
    2Vh                    >   d dl Z d dlmZ d dlmZ d dlmZ d dlmZ d dlmZ d dlm	Z	 d dl
mZ d d	lmZ d d
lmZ  G d de      Z ed       G d de             Z ed       G d de             Z ed       G d de             Z ed       G d de             Z ed       G d de             Z ed       G d de             Z ed       G d  d!e             Z ed"       G d# d$e             Z ed%       G d& d'e             Z ed(       G d) d*e             Z ed+       G d, d-e             Z ed.       G d/ d0e             Z ed1       G d2 d3e             Z ed4       G d5 d6e             Z ed7       G d8 d9e             Z ed:       G d; d<e             Z ed=       G d> d?e             Z  ed@       G dA dBe             Z! edC       G dD dEe             Z" edF       G dG dHe             Z# edI       G dJ dKe             Z$ edL       G dM dNe             Z%dO Z& edPdQg      dR        Z' edSdTg      dU        Z( edVdWg      dX        Z) eg dY      dZ        Z* eg d[      d\        Z+ eg d]      d^        Z, eg d_      d`        Z- eda      ddb       Z. edcddg      dde       Z/ eg df      dg        Z0 eg dh      di        Z1 edjdkg      dl        Z2 edmdng      	 ddo       Z3 edpdqg      	 	 	 	 	 ddr       Z4 edsdtg      	 ddu       Z5 edvdwg      	 ddx       Z6 edydzg      	 	 	 	 	 	 dd{       Z7 ed|      d}        Z8 ed~      dd       Z9 ed      dd       Z: ed      	 	 	 	 	 dd       Z; ed      d        Z<y)    N)backend)ops)tree)keras_export)Loss)squeeze_or_expand_to_same_rank)serialization_lib)build_pos_neg_masks)	normalizec                   L     e Zd Z	 	 	 d fd	Zd Z fdZed        Zd Z xZ	S )LossFunctionWrapperc                 F    t         |   |||       || _        || _        y N)name	reductiondtype)super__init__fn
_fn_kwargs)selfr   r   r   r   kwargs	__class__s         G/home/dcms/DCMS/lib/python3.12/site-packages/keras/src/losses/losses.pyr   zLossFunctionWrapper.__init__   s'     	diuE     c                     t        j                  t        ||      }t        j                  |d |      }t        j                  |d |      } | j                  ||fi | j
                  S )Nc                     | d   S )Nr    xs    r   <lambda>z*LossFunctionWrapper.call.<locals>.<lambda>   
    AaD r   c                     | d   S )N   r   r   s    r   r!   z*LossFunctionWrapper.call.<locals>.<lambda>    r"   r   )r   map_structurer   map_structure_up_tor   r   )r   y_truey_predy_true_y_preds       r   callzLossFunctionWrapper.call   s_    ***FF
 ))&.-P))&.-Ptwwvv999r   c                     t         |          }|j                  dt        j                  | j
                        i       |j                  t        j                  | j                               |S )Nr   )r   
get_configupdater	   serialize_keras_objectr   r   )r   configr   s     r   r,   zLossFunctionWrapper.get_config#   sR    #%t.EEdggNOP'>>tOPr   c                 D    d|v rt        j                  |      } | di |S )Nr   r   )r	   deserialize_keras_object)clsr/   s     r   from_configzLossFunctionWrapper.from_config)   s%    6>&??GF}V}r   c                 <    d| j                    d| j                   dS )Nz<LossFunctionWrapper(z	, kwargs=z)>)r   r   r   s    r   __repr__zLossFunctionWrapper.__repr__/   s     &twwiy8ILLr   )sum_over_batch_sizeNN)
__name__
__module____qualname__r   r*   r,   classmethodr3   r6   __classcell__r   s   @r   r   r      s7     (
!:  
Mr   r   zkeras.losses.MeanSquaredErrorc                   0     e Zd ZdZ	 	 	 d fd	Zd Z xZS )MeanSquaredErrora  Computes the mean of squares of errors between labels and predictions.

    Formula:

    ```python
    loss = mean(square(y_true - y_pred))
    ```

    Args:
        reduction: Type of reduction to apply to the loss. In almost all cases
            this should be `"sum_over_batch_size"`. Supported options are
            `"sum"`, `"sum_over_batch_size"`, `"mean"`,
            `"mean_with_sample_weight"` or `None`. `"sum"` sums the loss,
            `"sum_over_batch_size"` and `"mean"` sum the loss and divide by the
            sample size, and `"mean_with_sample_weight"` sums the loss and
            divides by the sum of the sample weights. `"none"` and `None`
            perform no aggregation. Defaults to `"sum_over_batch_size"`.
        name: Optional name for the loss instance.
        dtype: The dtype of the loss's computations. Defaults to `None`, which
            means using `keras.backend.floatx()`. `keras.backend.floatx()` is a
            `"float32"` unless set to different value
            (via `keras.backend.set_floatx()`). If a `keras.DTypePolicy` is
            provided, then the `compute_dtype` will be utilized.
    c                 4    t         |   t        |||       y r   )r   r   mean_squared_errorr   r   r   r   r   s       r   r   zMeanSquaredError.__init__N   s      	TYe 	 	
r   c                 ,    t        j                  |       S Nr   r,   r5   s    r   r,   zMeanSquaredError.get_configX       t$$r   )r7   rA   Nr8   r9   r:   __doc__r   r,   r<   r=   s   @r   r?   r?   3   s    6 (!	
%r   r?   zkeras.losses.MeanAbsoluteErrorc                   0     e Zd ZdZ	 	 	 d fd	Zd Z xZS )MeanAbsoluteErrora  Computes the mean of absolute difference between labels and predictions.

    Formula:

    ```python
    loss = mean(abs(y_true - y_pred))
    ```

    Args:
        reduction: Type of reduction to apply to the loss. In almost all cases
            this should be `"sum_over_batch_size"`. Supported options are
            `"sum"`, `"sum_over_batch_size"`, `"mean"`,
            `"mean_with_sample_weight"` or `None`. `"sum"` sums the loss,
            `"sum_over_batch_size"` and `"mean"` sum the loss and divide by the
            sample size, and `"mean_with_sample_weight"` sums the loss and
            divides by the sum of the sample weights. `"none"` and `None`
            perform no aggregation. Defaults to `"sum_over_batch_size"`.
        name: Optional name for the loss instance.
        dtype: The dtype of the loss's computations. Defaults to `None`, which
            means using `keras.backend.floatx()`. `keras.backend.floatx()` is a
            `"float32"` unless set to different value
            (via `keras.backend.set_floatx()`). If a `keras.DTypePolicy` is
            provided, then the `compute_dtype` will be utilized.
    c                 4    t         |   t        |||       y r   )r   r   mean_absolute_errorrB   s       r   r   zMeanAbsoluteError.__init__w   s      	diu 	 	
r   c                 ,    t        j                  |       S rD   rE   r5   s    r   r,   zMeanAbsoluteError.get_config   rF   r   )r7   rL   NrG   r=   s   @r   rJ   rJ   \   s    6 ("	
%r   rJ   z(keras.losses.MeanAbsolutePercentageErrorc                   0     e Zd ZdZ	 	 	 d fd	Zd Z xZS )MeanAbsolutePercentageErrora  Computes the mean absolute percentage error between `y_true` & `y_pred`.

    Formula:

    ```python
    loss = 100 * mean(abs((y_true - y_pred) / y_true))
    ```

    Args:
        reduction: Type of reduction to apply to the loss. In almost all cases
            this should be `"sum_over_batch_size"`. Supported options are
            `"sum"`, `"sum_over_batch_size"`, `"mean"`,
            `"mean_with_sample_weight"` or `None`. `"sum"` sums the loss,
            `"sum_over_batch_size"` and `"mean"` sum the loss and divide by the
            sample size, and `"mean_with_sample_weight"` sums the loss and
            divides by the sum of the sample weights. `"none"` and `None`
            perform no aggregation. Defaults to `"sum_over_batch_size"`.
        name: Optional name for the loss instance.
        dtype: The dtype of the loss's computations. Defaults to `None`, which
            means using `keras.backend.floatx()`. `keras.backend.floatx()` is a
            `"float32"` unless set to different value
            (via `keras.backend.set_floatx()`). If a `keras.DTypePolicy` is
            provided, then the `compute_dtype` will be utilized.
    c                 4    t         |   t        |||       y r   )r   r   mean_absolute_percentage_errorrB   s       r   r   z$MeanAbsolutePercentageError.__init__   #     	*	 	 	
r   c                 ,    t        j                  |       S rD   rE   r5   s    r   r,   z&MeanAbsolutePercentageError.get_config   rF   r   )r7   rQ   NrG   r=   s   @r   rO   rO          6 (-	
%r   rO   z(keras.losses.MeanSquaredLogarithmicErrorc                   0     e Zd ZdZ	 	 	 d fd	Zd Z xZS )MeanSquaredLogarithmicErrora  Computes the mean squared logarithmic error between `y_true` & `y_pred`.

    Formula:

    ```python
    loss = mean(square(log(y_true + 1) - log(y_pred + 1)))
    ```

    Args:
        reduction: Type of reduction to apply to the loss. In almost all cases
            this should be `"sum_over_batch_size"`. Supported options are
            `"sum"`, `"sum_over_batch_size"`, `"mean"`,
            `"mean_with_sample_weight"` or `None`. `"sum"` sums the loss,
            `"sum_over_batch_size"` and `"mean"` sum the loss and divide by the
            sample size, and `"mean_with_sample_weight"` sums the loss and
            divides by the sum of the sample weights. `"none"` and `None`
            perform no aggregation. Defaults to `"sum_over_batch_size"`.
        name: Optional name for the loss instance.
        dtype: The dtype of the loss's computations. Defaults to `None`, which
            means using `keras.backend.floatx()`. `keras.backend.floatx()` is a
            `"float32"` unless set to different value
            (via `keras.backend.set_floatx()`). If a `keras.DTypePolicy` is
            provided, then the `compute_dtype` will be utilized.
    c                 4    t         |   t        |||       y r   )r   r   mean_squared_logarithmic_errorrB   s       r   r   z$MeanSquaredLogarithmicError.__init__   rR   r   c                 ,    t        j                  |       S rD   rE   r5   s    r   r,   z&MeanSquaredLogarithmicError.get_config   rF   r   )r7   rX   NrG   r=   s   @r   rV   rV      rT   r   rV   zkeras.losses.CosineSimilarityc                   2     e Zd ZdZ	 	 	 	 d fd	Zd Z xZS )CosineSimilaritya  Computes the cosine similarity between `y_true` & `y_pred`.

    Note that it is a number between -1 and 1. When it is a negative number
    between -1 and 0, 0 indicates orthogonality and values closer to -1
    indicate greater similarity. This makes it usable as a loss function in a
    setting where you try to maximize the proximity between predictions and
    targets. If either `y_true` or `y_pred` is a zero vector, cosine similarity
    will be 0 regardless of the proximity between predictions and targets.

    Formula:

    ```python
    loss = -sum(l2_norm(y_true) * l2_norm(y_pred))
    ```

    Args:
        axis: The axis along which the cosine similarity is computed
            (the features axis). Defaults to `-1`.
        reduction: Type of reduction to apply to the loss. In almost all cases
            this should be `"sum_over_batch_size"`. Supported options are
            `"sum"`, `"sum_over_batch_size"`, `"mean"`,
            `"mean_with_sample_weight"` or `None`. `"sum"` sums the loss,
            `"sum_over_batch_size"` and `"mean"` sum the loss and divide by the
            sample size, and `"mean_with_sample_weight"` sums the loss and
            divides by the sum of the sample weights. `"none"` and `None`
            perform no aggregation. Defaults to `"sum_over_batch_size"`.
        name: Optional name for the loss instance.
        dtype: The dtype of the loss's computations. Defaults to `None`, which
            means using `keras.backend.floatx()`. `keras.backend.floatx()` is a
            `"float32"` unless set to different value
            (via `keras.backend.set_floatx()`). If a `keras.DTypePolicy` is
            provided, then the `compute_dtype` will be utilized.
    c                 6    t         |   t        ||||       y N)r   r   r   axis)r   r   cosine_similarity)r   r^   r   r   r   r   s        r   r   zCosineSimilarity.__init__  s&     	 	 	
r   c                 ,    t        j                  |       S rD   rE   r5   s    r   r,   zCosineSimilarity.get_config  rF   r   )r7   r_   NrG   r=   s   @r   r[   r[      s!     H ' 
%r   r[   zkeras.losses.Huberc                   2     e Zd ZdZ	 	 	 	 d fd	Zd Z xZS )Hubera  Computes the Huber loss between `y_true` & `y_pred`.

    Formula:

    ```python
    for x in error:
        if abs(x) <= delta:
            loss.append(0.5 * x^2)
        elif abs(x) > delta:
            loss.append(delta * abs(x) - 0.5 * delta^2)

    loss = mean(loss, axis=-1)
    ```
    See: [Huber loss](https://en.wikipedia.org/wiki/Huber_loss).

    Args:
        delta: A float, the point where the Huber loss function changes from a
            quadratic to linear.
        reduction: Type of reduction to apply to the loss. In almost all cases
            this should be `"sum_over_batch_size"`. Supported options are
            `"sum"`, `"sum_over_batch_size"`, `"mean"`,
            `"mean_with_sample_weight"` or `None`. `"sum"` sums the loss,
            `"sum_over_batch_size"` and `"mean"` sum the loss and divide by the
            sample size, and `"mean_with_sample_weight"` sums the loss and
            divides by the sum of the sample weights. `"none"` and `None`
            perform no aggregation. Defaults to `"sum_over_batch_size"`.
        name: Optional name for the instance.
        dtype: The dtype of the loss's computations. Defaults to `None`, which
            means using `keras.backend.floatx()`. `keras.backend.floatx()` is a
            `"float32"` unless set to different value
            (via `keras.backend.set_floatx()`). If a `keras.DTypePolicy` is
            provided, then the `compute_dtype` will be utilized.
    c                 6    t         |   t        ||||       y )N)r   r   r   delta)r   r   huber)r   re   r   r   r   r   s        r   r   zHuber.__init__8  s&     	 	 	
r   c                 ,    t        j                  |       S rD   rE   r5   s    r   r,   zHuber.get_configG  rF   r   )      ?r7   
huber_lossNrG   r=   s   @r   rc   rc     s!     H '
%r   rc   zkeras.losses.LogCoshc                   0     e Zd ZdZ	 	 	 d fd	Zd Z xZS )LogCosha  Computes the logarithm of the hyperbolic cosine of the prediction error.

    Formula:

    ```python
    error = y_pred - y_true
    logcosh = mean(log((exp(error) + exp(-error))/2), axis=-1)`
    ```
    where x is the error `y_pred - y_true`.

    Args:
        reduction: Type of reduction to apply to the loss. In almost all cases
            this should be `"sum_over_batch_size"`. Supported options are
            `"sum"`, `"sum_over_batch_size"`, `"mean"`,
            `"mean_with_sample_weight"` or `None`. `"sum"` sums the loss,
            `"sum_over_batch_size"` and `"mean"` sum the loss and divide by the
            sample size, and `"mean_with_sample_weight"` sums the loss and
            divides by the sum of the sample weights. `"none"` and `None`
            perform no aggregation. Defaults to `"sum_over_batch_size"`.
        name: Optional name for the instance.
        dtype: The dtype of the loss's computations. Defaults to `None`, which
            means using `keras.backend.floatx()`. `keras.backend.floatx()` is a
            `"float32"` unless set to different value
            (via `keras.backend.set_floatx()`). If a `keras.DTypePolicy` is
            provided, then the `compute_dtype` will be utilized.
    c                 4    t         |   t        |||       y r   )r   r   log_coshrB   s       r   r   zLogCosh.__init__h  s     		Or   c                 ,    t        j                  |       S rD   rE   r5   s    r   r,   zLogCosh.get_configp  rF   r   )r7   rm   NrG   r=   s   @r   rk   rk   K  s    : (	P%r   rk   zkeras.losses.Hingec                   0     e Zd ZdZ	 	 	 d fd	Zd Z xZS )Hingea  Computes the hinge loss between `y_true` & `y_pred`.

    Formula:

    ```python
    loss = maximum(1 - y_true * y_pred, 0)
    ```

    `y_true` values are expected to be -1 or 1. If binary (0 or 1) labels are
    provided we will convert them to -1 or 1.

    Args:
        reduction: Type of reduction to apply to the loss. In almost all cases
            this should be `"sum_over_batch_size"`. Supported options are
            `"sum"`, `"sum_over_batch_size"`, `"mean"`,
            `"mean_with_sample_weight"` or `None`. `"sum"` sums the loss,
            `"sum_over_batch_size"` and `"mean"` sum the loss and divide by the
            sample size, and `"mean_with_sample_weight"` sums the loss and
            divides by the sum of the sample weights. `"none"` and `None`
            perform no aggregation. Defaults to `"sum_over_batch_size"`.
        name: Optional name for the loss instance.
        dtype: The dtype of the loss's computations. Defaults to `None`, which
            means using `keras.backend.floatx()`. `keras.backend.floatx()` is a
            `"float32"` unless set to different value
            (via `keras.backend.set_floatx()`). If a `keras.DTypePolicy` is
            provided, then the `compute_dtype` will be utilized.
    c                 4    t         |   t        |||       y r   )r   r   hingerB   s       r   r   zHinge.__init__  s     	TYeLr   c                 ,    t        j                  |       S rD   rE   r5   s    r   r,   zHinge.get_config  rF   r   )r7   rr   NrG   r=   s   @r   rp   rp   t  s    < (	M%r   rp   zkeras.losses.SquaredHingec                   ,     e Zd ZdZ	 d fd	Zd Z xZS )SquaredHingea  Computes the squared hinge loss between `y_true` & `y_pred`.

    Formula:

    ```python
    loss = square(maximum(1 - y_true * y_pred, 0))
    ```

    `y_true` values are expected to be -1 or 1. If binary (0 or 1) labels are
    provided we will convert them to -1 or 1.

    Args:
        reduction: Type of reduction to apply to the loss. In almost all cases
            this should be `"sum_over_batch_size"`. Supported options are
            `"sum"`, `"sum_over_batch_size"`, `"mean"`,
            `"mean_with_sample_weight"` or `None`. `"sum"` sums the loss,
            `"sum_over_batch_size"` and `"mean"` sum the loss and divide by the
            sample size, and `"mean_with_sample_weight"` sums the loss and
            divides by the sum of the sample weights. `"none"` and `None`
            perform no aggregation. Defaults to `"sum_over_batch_size"`.
        name: Optional name for the loss instance.
        dtype: The dtype of the loss's computations. Defaults to `None`, which
            means using `keras.backend.floatx()`. `keras.backend.floatx()` is a
            `"float32"` unless set to different value
            (via `keras.backend.set_floatx()`). If a `keras.DTypePolicy` is
            provided, then the `compute_dtype` will be utilized.
    c                 4    t         |   t        |||       y r   )r   r   squared_hingerB   s       r   r   zSquaredHinge.__init__        		 	 	
r   c                 ,    t        j                  |       S rD   rE   r5   s    r   r,   zSquaredHinge.get_config  rF   r   )r7   rw   NrG   r=   s   @r   ru   ru     s    : LP
%r   ru   zkeras.losses.CategoricalHingec                   0     e Zd ZdZ	 	 	 d fd	Zd Z xZS )CategoricalHingea  Computes the categorical hinge loss between `y_true` & `y_pred`.

    Formula:

    ```python
    loss = maximum(neg - pos + 1, 0)
    ```

    where `neg=maximum((1-y_true)*y_pred)` and `pos=sum(y_true*y_pred)`

    Args:
        reduction: Type of reduction to apply to the loss. In almost all cases
            this should be `"sum_over_batch_size"`. Supported options are
            `"sum"`, `"sum_over_batch_size"`, `"mean"`,
            `"mean_with_sample_weight"` or `None`. `"sum"` sums the loss,
            `"sum_over_batch_size"` and `"mean"` sum the loss and divide by the
            sample size, and `"mean_with_sample_weight"` sums the loss and
            divides by the sum of the sample weights. `"none"` and `None`
            perform no aggregation. Defaults to `"sum_over_batch_size"`.
        name: Optional name for the loss instance.
        dtype: The dtype of the loss's computations. Defaults to `None`, which
            means using `keras.backend.floatx()`. `keras.backend.floatx()` is a
            `"float32"` unless set to different value
            (via `keras.backend.set_floatx()`). If a `keras.DTypePolicy` is
            provided, then the `compute_dtype` will be utilized.
    c                 4    t         |   t        |||       y r   )r   r   categorical_hingerB   s       r   r   zCategoricalHinge.__init__  s      	DIU 	 	
r   c                 ,    t        j                  |       S rD   rE   r5   s    r   r,   zCategoricalHinge.get_config  rF   r   )r7   r}   NrG   r=   s   @r   r{   r{     s    : ( 	
%r   r{   zkeras.losses.KLDivergencec                   ,     e Zd ZdZ	 d fd	Zd Z xZS )KLDivergencea  Computes Kullback-Leibler divergence loss between `y_true` & `y_pred`.

    Formula:

    ```python
    loss = y_true * log(y_true / y_pred)
    ```

    `y_true` and `y_pred` are expected to be probability
    distributions, with values between 0 and 1. They will get
    clipped to the `[0, 1]` range.

    Args:
        reduction: Type of reduction to apply to the loss. In almost all cases
            this should be `"sum_over_batch_size"`. Supported options are
            `"sum"`, `"sum_over_batch_size"`, `"mean"`,
            `"mean_with_sample_weight"` or `None`. `"sum"` sums the loss,
            `"sum_over_batch_size"` and `"mean"` sum the loss and divide by the
            sample size, and `"mean_with_sample_weight"` sums the loss and
            divides by the sum of the sample weights. `"none"` and `None`
            perform no aggregation. Defaults to `"sum_over_batch_size"`.
        name: Optional name for the loss instance.
        dtype: The dtype of the loss's computations. Defaults to `None`, which
            means using `keras.backend.floatx()`. `keras.backend.floatx()` is a
            `"float32"` unless set to different value
            (via `keras.backend.set_floatx()`). If a `keras.DTypePolicy` is
            provided, then the `compute_dtype` will be utilized.
    c                 4    t         |   t        |||       y r   )r   r   kl_divergencerB   s       r   r   zKLDivergence.__init__  rx   r   c                 ,    t        j                  |       S rD   rE   r5   s    r   r,   zKLDivergence.get_config  rF   r   )r7   r   NrG   r=   s   @r   r   r     s    < LP
%r   r   zkeras.losses.Poissonc                   ,     e Zd ZdZ	 d fd	Zd Z xZS )Poissonar  Computes the Poisson loss between `y_true` & `y_pred`.

    Formula:

    ```python
    loss = y_pred - y_true * log(y_pred)
    ```

    Args:
        reduction: Type of reduction to apply to the loss. In almost all cases
            this should be `"sum_over_batch_size"`. Supported options are
            `"sum"`, `"sum_over_batch_size"`, `"mean"`,
            `"mean_with_sample_weight"` or `None`. `"sum"` sums the loss,
            `"sum_over_batch_size"` and `"mean"` sum the loss and divide by the
            sample size, and `"mean_with_sample_weight"` sums the loss and
            divides by the sum of the sample weights. `"none"` and `None`
            perform no aggregation. Defaults to `"sum_over_batch_size"`.
        name: Optional name for the loss instance.
        dtype: The dtype of the loss's computations. Defaults to `None`, which
            means using `keras.backend.floatx()`. `keras.backend.floatx()` is a
            `"float32"` unless set to different value
            (via `keras.backend.set_floatx()`). If a `keras.DTypePolicy` is
            provided, then the `compute_dtype` will be utilized.
    c                 4    t         |   t        |||       y r   )r   r   poissonrB   s       r   r   zPoisson.__init__7  s     	tyNr   c                 ,    t        j                  |       S rD   rE   r5   s    r   r,   zPoisson.get_config<  rF   r   )r7   r   NrG   r=   s   @r   r   r     s    4 FJO
%r   r   zkeras.losses.BinaryCrossentropyc                   6     e Zd ZdZ	 	 	 	 	 	 d fd	Zd Z xZS )BinaryCrossentropya  Computes the cross-entropy loss between true labels and predicted labels.

    Use this cross-entropy loss for binary (0 or 1) classification applications.
    The loss function requires the following inputs:

    - `y_true` (true label): This is either 0 or 1.
    - `y_pred` (predicted value): This is the model's prediction, i.e, a single
        floating-point value which either represents a
        [logit](https://en.wikipedia.org/wiki/Logit), (i.e, value in [-inf, inf]
        when `from_logits=True`) or a probability (i.e, value in [0., 1.] when
        `from_logits=False`).

    Args:
        from_logits: Whether to interpret `y_pred` as a tensor of
            [logit](https://en.wikipedia.org/wiki/Logit) values. By default, we
            assume that `y_pred` is probabilities (i.e., values in [0, 1]).
        label_smoothing: Float in range [0, 1]. When 0, no smoothing occurs.
            When > 0, we compute the loss between the predicted labels
            and a smoothed version of the true labels, where the smoothing
            squeezes the labels towards 0.5. Larger values of
            `label_smoothing` correspond to heavier smoothing.
        axis: The axis along which to compute crossentropy (the features axis).
            Defaults to `-1`.
        reduction: Type of reduction to apply to the loss. In almost all cases
            this should be `"sum_over_batch_size"`. Supported options are
            `"sum"`, `"sum_over_batch_size"`, `"mean"`,
            `"mean_with_sample_weight"` or `None`. `"sum"` sums the loss,
            `"sum_over_batch_size"` and `"mean"` sum the loss and divide by the
            sample size, and `"mean_with_sample_weight"` sums the loss and
            divides by the sum of the sample weights. `"none"` and `None`
            perform no aggregation. Defaults to `"sum_over_batch_size"`.
        name: Optional name for the loss instance.
        dtype: The dtype of the loss's computations. Defaults to `None`, which
            means using `keras.backend.floatx()`. `keras.backend.floatx()` is a
            `"float32"` unless set to different value
            (via `keras.backend.set_floatx()`). If a `keras.DTypePolicy` is
            provided, then the `compute_dtype` will be utilized.

    Examples:

    **Recommended Usage:** (set `from_logits=True`)

    With `compile()` API:

    ```python
    model.compile(
        loss=keras.losses.BinaryCrossentropy(from_logits=True),
        ...
    )
    ```

    As a standalone function:

    >>> # Example 1: (batch_size = 1, number of samples = 4)
    >>> y_true = np.array([0, 1, 0, 0])
    >>> y_pred = np.array([-18.6, 0.51, 2.94, -12.8])
    >>> bce = keras.losses.BinaryCrossentropy(from_logits=True)
    >>> bce(y_true, y_pred)
    0.8654

    >>> # Example 2: (batch_size = 2, number of samples = 4)
    >>> y_true = np.array([[0, 1], [0, 0]])
    >>> y_pred = np.array([[-18.6, 0.51], [2.94, -12.8]])
    >>> # Using default 'auto'/'sum_over_batch_size' reduction type.
    >>> bce = keras.losses.BinaryCrossentropy(from_logits=True)
    >>> bce(y_true, y_pred)
    0.8654
    >>> # Using 'sample_weight' attribute
    >>> bce(y_true, y_pred, sample_weight=[0.8, 0.2])
    0.243
    >>> # Using 'sum' reduction` type.
    >>> bce = keras.losses.BinaryCrossentropy(from_logits=True,
    ...     reduction="sum")
    >>> bce(y_true, y_pred)
    1.730
    >>> # Using 'none' reduction type.
    >>> bce = keras.losses.BinaryCrossentropy(from_logits=True,
    ...     reduction=None)
    >>> bce(y_true, y_pred)
    array([0.235, 1.496], dtype=float32)

    **Default Usage:** (set `from_logits=False`)

    >>> # Make the following updates to the above "Recommended Usage" section
    >>> # 1. Set `from_logits=False`
    >>> keras.losses.BinaryCrossentropy() # OR ...('from_logits=False')
    >>> # 2. Update `y_pred` to use probabilities instead of logits
    >>> y_pred = [0.6, 0.3, 0.2, 0.8] # OR [[0.6, 0.3], [0.2, 0.8]]
    c           	      d    t         |   t        ||||||       || _        || _        || _        y N)r   r   r   from_logitslabel_smoothingr^   )r   r   binary_crossentropyr   r   r^   r   r   r   r^   r   r   r   r   s          r   r   zBinaryCrossentropy.__init__  sE     	#+ 	 	
 '.	r   c                     t        j                  |       }|j                  | j                  | j                  | j
                  d       |S N)r   r   r^   r   r,   r-   r   r   r^   r   r/   s     r   r,   zBinaryCrossentropy.get_config  A    &#//#'#7#7			
 r   )F        ra   r7   r   NrG   r=   s   @r   r   r   @  s(    Xx '",	r   r   z$keras.losses.BinaryFocalCrossentropyc                   <     e Zd ZdZ	 	 	 	 	 	 	 	 	 d fd	Zd Z xZS )BinaryFocalCrossentropya  Computes focal cross-entropy loss between true labels and predictions.

    Binary cross-entropy loss is often used for binary (0 or 1) classification
    tasks. The loss function requires the following inputs:

    - `y_true` (true label): This is either 0 or 1.
    - `y_pred` (predicted value): This is the model's prediction, i.e, a single
        floating-point value which either represents a
        [logit](https://en.wikipedia.org/wiki/Logit), (i.e, value in [-inf, inf]
        when `from_logits=True`) or a probability (i.e, value in `[0., 1.]` when
        `from_logits=False`).

    According to [Lin et al., 2018](https://arxiv.org/pdf/1708.02002.pdf), it
    helps to apply a "focal factor" to down-weight easy examples and focus more
    on hard examples. By default, the focal tensor is computed as follows:

    `focal_factor = (1 - output) ** gamma` for class 1
    `focal_factor = output ** gamma` for class 0
    where `gamma` is a focusing parameter. When `gamma=0`, this function is
    equivalent to the binary crossentropy loss.

    Args:
        apply_class_balancing: A bool, whether to apply weight balancing on the
            binary classes 0 and 1.
        alpha: A weight balancing factor for class 1, default is `0.25` as
            mentioned in reference [Lin et al., 2018](
            https://arxiv.org/pdf/1708.02002.pdf).  The weight for class 0 is
            `1.0 - alpha`.
        gamma: A focusing parameter used to compute the focal factor, default is
            `2.0` as mentioned in the reference
            [Lin et al., 2018](https://arxiv.org/pdf/1708.02002.pdf).
        from_logits: Whether to interpret `y_pred` as a tensor of
            [logit](https://en.wikipedia.org/wiki/Logit) values. By default, we
            assume that `y_pred` are probabilities (i.e., values in `[0, 1]`).
        label_smoothing: Float in `[0, 1]`. When `0`, no smoothing occurs.
            When > `0`, we compute the loss between the predicted labels
            and a smoothed version of the true labels, where the smoothing
            squeezes the labels towards `0.5`.
            Larger values of `label_smoothing` correspond to heavier smoothing.
        axis: The axis along which to compute crossentropy (the features axis).
            Defaults to `-1`.
        reduction: Type of reduction to apply to the loss. In almost all cases
            this should be `"sum_over_batch_size"`. Supported options are
            `"sum"`, `"sum_over_batch_size"`, `"mean"`,
            `"mean_with_sample_weight"` or `None`. `"sum"` sums the loss,
            `"sum_over_batch_size"` and `"mean"` sum the loss and divide by the
            sample size, and `"mean_with_sample_weight"` sums the loss and
            divides by the sum of the sample weights. `"none"` and `None`
            perform no aggregation. Defaults to `"sum_over_batch_size"`.
        name: Optional name for the loss instance.
        dtype: The dtype of the loss's computations. Defaults to `None`, which
            means using `keras.backend.floatx()`. `keras.backend.floatx()` is a
            `"float32"` unless set to different value
            (via `keras.backend.set_floatx()`). If a `keras.DTypePolicy` is
            provided, then the `compute_dtype` will be utilized.

    Examples:

    With the `compile()` API:

    ```python
    model.compile(
        loss=keras.losses.BinaryFocalCrossentropy(
            gamma=2.0, from_logits=True),
        ...
    )
    ```

    As a standalone function:

    >>> # Example 1: (batch_size = 1, number of samples = 4)
    >>> y_true = np.array([0, 1, 0, 0])
    >>> y_pred = np.array([-18.6, 0.51, 2.94, -12.8])
    >>> loss = keras.losses.BinaryFocalCrossentropy(
    ...    gamma=2, from_logits=True)
    >>> loss(y_true, y_pred)
    0.691

    >>> # Apply class weight
    >>> loss = keras.losses.BinaryFocalCrossentropy(
    ...     apply_class_balancing=True, gamma=2, from_logits=True)
    >>> loss(y_true, y_pred)
    0.51

    >>> # Example 2: (batch_size = 2, number of samples = 4)
    >>> y_true = np.array([[0, 1], [0, 0]])
    >>> y_pred = np.array([[-18.6, 0.51], [2.94, -12.8]])
    >>> # Using default 'auto'/'sum_over_batch_size' reduction type.
    >>> loss = keras.losses.BinaryFocalCrossentropy(
    ...     gamma=3, from_logits=True)
    >>> loss(y_true, y_pred)
    0.647

    >>> # Apply class weight
    >>> loss = keras.losses.BinaryFocalCrossentropy(
    ...      apply_class_balancing=True, gamma=3, from_logits=True)
    >>> loss(y_true, y_pred)
    0.482

    >>> # Using 'sample_weight' attribute with focal effect
    >>> loss = keras.losses.BinaryFocalCrossentropy(
    ...     gamma=3, from_logits=True)
    >>> loss(y_true, y_pred, sample_weight=[0.8, 0.2])
    0.133

    >>> # Apply class weight
    >>> loss = keras.losses.BinaryFocalCrossentropy(
    ...      apply_class_balancing=True, gamma=3, from_logits=True)
    >>> loss(y_true, y_pred, sample_weight=[0.8, 0.2])
    0.097

    >>> # Using 'sum' reduction` type.
    >>> loss = keras.losses.BinaryFocalCrossentropy(
    ...     gamma=4, from_logits=True,
    ...     reduction="sum")
    >>> loss(y_true, y_pred)
    1.222

    >>> # Apply class weight
    >>> loss = keras.losses.BinaryFocalCrossentropy(
    ...     apply_class_balancing=True, gamma=4, from_logits=True,
    ...     reduction="sum")
    >>> loss(y_true, y_pred)
    0.914

    >>> # Using 'none' reduction type.
    >>> loss = keras.losses.BinaryFocalCrossentropy(
    ...     gamma=5, from_logits=True,
    ...     reduction=None)
    >>> loss(y_true, y_pred)
    array([0.0017 1.1561], dtype=float32)

    >>> # Apply class weight
    >>> loss = keras.losses.BinaryFocalCrossentropy(
    ...     apply_class_balancing=True, gamma=5, from_logits=True,
    ...     reduction=None)
    >>> loss(y_true, y_pred)
    array([0.0004 0.8670], dtype=float32)
    c
                     t         
|   t        |||	||||||
       || _        || _        || _        || _        || _        || _        y )N)	r   r   r   apply_class_balancingalphagammar   r   r^   )	r   r   binary_focal_crossentropyr   r   r^   r   r   r   )r   r   r   r   r   r   r^   r   r   r   r   s             r   r   z BinaryFocalCrossentropy.__init__L  sd     	%"7#+ 	 	
 '.	%:"

r   c           	          t        j                  |       }|j                  | j                  | j                  | j
                  | j                  | j                  | j                  d       |S )N)r   r   r^   r   r   r   )	r   r,   r-   r   r   r^   r   r   r   r   s     r   r,   z"BinaryFocalCrossentropy.get_configk  sX    &#//#'#7#7		)-)C)C		
 r   )	F      ?       @Fr   ra   r7   r   NrG   r=   s   @r   r   r     s1    J\ $'(>r   r   z$keras.losses.CategoricalCrossentropyc                   6     e Zd ZdZ	 	 	 	 	 	 d fd	Zd Z xZS )CategoricalCrossentropya  Computes the crossentropy loss between the labels and predictions.

    Use this crossentropy loss function when there are two or more label
    classes. We expect labels to be provided in a `one_hot` representation. If
    you want to provide labels as integers, please use
    `SparseCategoricalCrossentropy` loss. There should be `num_classes` floating
    point values per feature, i.e., the shape of both `y_pred` and `y_true` are
    `[batch_size, num_classes]`.

    Args:
        from_logits: Whether `y_pred` is expected to be a logits tensor. By
            default, we assume that `y_pred` encodes a probability distribution.
        label_smoothing: Float in [0, 1]. When > 0, label values are smoothed,
            meaning the confidence on label values are relaxed. For example, if
            `0.1`, use `0.1 / num_classes` for non-target labels and
            `0.9 + 0.1 / num_classes` for target labels.
        axis: The axis along which to compute crossentropy (the features
            axis). Defaults to `-1`.
        reduction: Type of reduction to apply to the loss. In almost all cases
            this should be `"sum_over_batch_size"`. Supported options are
            `"sum"`, `"sum_over_batch_size"`, `"mean"`,
            `"mean_with_sample_weight"` or `None`. `"sum"` sums the loss,
            `"sum_over_batch_size"` and `"mean"` sum the loss and divide by the
            sample size, and `"mean_with_sample_weight"` sums the loss and
            divides by the sum of the sample weights. `"none"` and `None`
            perform no aggregation. Defaults to `"sum_over_batch_size"`.
        name: Optional name for the loss instance.
        dtype: The dtype of the loss's computations. Defaults to `None`, which
            means using `keras.backend.floatx()`. `keras.backend.floatx()` is a
            `"float32"` unless set to different value
            (via `keras.backend.set_floatx()`). If a `keras.DTypePolicy` is
            provided, then the `compute_dtype` will be utilized.

    Examples:

    Standalone usage:

    >>> y_true = np.array([[0, 1, 0], [0, 0, 1]])
    >>> y_pred = np.array([[0.05, 0.95, 0], [0.1, 0.8, 0.1]])
    >>> # Using 'auto'/'sum_over_batch_size' reduction type.
    >>> cce = keras.losses.CategoricalCrossentropy()
    >>> cce(y_true, y_pred)
    1.177

    >>> # Calling with 'sample_weight'.
    >>> cce(y_true, y_pred, sample_weight=np.array([0.3, 0.7]))
    0.814

    >>> # Using 'sum' reduction type.
    >>> cce = keras.losses.CategoricalCrossentropy(
    ...     reduction="sum")
    >>> cce(y_true, y_pred)
    2.354

    >>> # Using 'none' reduction type.
    >>> cce = keras.losses.CategoricalCrossentropy(
    ...     reduction=None)
    >>> cce(y_true, y_pred)
    array([0.0513, 2.303], dtype=float32)

    Usage with the `compile()` API:

    ```python
    model.compile(optimizer='sgd',
                  loss=keras.losses.CategoricalCrossentropy())
    ```
    c           	      d    t         |   t        ||||||       || _        || _        || _        y r   )r   r   categorical_crossentropyr   r   r^   r   s          r   r   z CategoricalCrossentropy.__init__  sE     	$#+ 	 	
 '.	r   c                     t        j                  |       }|j                  | j                  | j                  | j
                  d       |S r   r   r   s     r   r,   z"CategoricalCrossentropy.get_config  r   r   )Fr   ra   r7   r   NrG   r=   s   @r   r   r   z  s(    BL '',	r   r   z)keras.losses.CategoricalFocalCrossentropyc                   :     e Zd ZdZ	 	 	 	 	 	 	 	 d fd	Zd Z xZS )CategoricalFocalCrossentropya	  Computes the alpha balanced focal crossentropy loss.

    Use this crossentropy loss function when there are two or more label
    classes and if you want to handle class imbalance without using
    `class_weights`. We expect labels to be provided in a `one_hot`
    representation.

    According to [Lin et al., 2018](https://arxiv.org/pdf/1708.02002.pdf), it
    helps to apply a focal factor to down-weight easy examples and focus more on
    hard examples. The general formula for the focal loss (FL)
    is as follows:

    `FL(p_t) = (1 - p_t) ** gamma * log(p_t)`

    where `p_t` is defined as follows:
    `p_t = output if y_true == 1, else 1 - output`

    `(1 - p_t) ** gamma` is the `modulating_factor`, where `gamma` is a focusing
    parameter. When `gamma` = 0, there is no focal effect on the cross entropy.
    `gamma` reduces the importance given to simple examples in a smooth manner.

    The authors use alpha-balanced variant of focal loss (FL) in the paper:
    `FL(p_t) = -alpha * (1 - p_t) ** gamma * log(p_t)`

    where `alpha` is the weight factor for the classes. If `alpha` = 1, the
    loss won't be able to handle class imbalance properly as all
    classes will have the same weight. This can be a constant or a list of
    constants. If alpha is a list, it must have the same length as the number
    of classes.

    The formula above can be generalized to:
    `FL(p_t) = alpha * (1 - p_t) ** gamma * CrossEntropy(y_true, y_pred)`

    where minus comes from `CrossEntropy(y_true, y_pred)` (CE).

    Extending this to multi-class case is straightforward:
    `FL(p_t) = alpha * (1 - p_t) ** gamma * CategoricalCE(y_true, y_pred)`

    In the snippet below, there is `num_classes` floating pointing values per
    example. The shape of both `y_pred` and `y_true` are
    `(batch_size, num_classes)`.

    Args:
        alpha: A weight balancing factor for all classes, default is `0.25` as
            mentioned in the reference. It can be a list of floats or a scalar.
            In the multi-class case, alpha may be set by inverse class
            frequency by using `compute_class_weight` from `sklearn.utils`.
        gamma: A focusing parameter, default is `2.0` as mentioned in the
            reference. It helps to gradually reduce the importance given to
            simple (easy) examples in a smooth manner.
        from_logits: Whether `output` is expected to be a logits tensor. By
            default, we consider that `output` encodes a probability
            distribution.
        label_smoothing: Float in [0, 1]. When > 0, label values are smoothed,
            meaning the confidence on label values are relaxed. For example, if
            `0.1`, use `0.1 / num_classes` for non-target labels and
            `0.9 + 0.1 / num_classes` for target labels.
        axis: The axis along which to compute crossentropy (the features
            axis). Defaults to `-1`.
        reduction: Type of reduction to apply to the loss. In almost all cases
            this should be `"sum_over_batch_size"`. Supported options are
            `"sum"`, `"sum_over_batch_size"`, `"mean"`,
            `"mean_with_sample_weight"` or `None`. `"sum"` sums the loss,
            `"sum_over_batch_size"` and `"mean"` sum the loss and divide by the
            sample size, and `"mean_with_sample_weight"` sums the loss and
            divides by the sum of the sample weights. `"none"` and `None`
            perform no aggregation. Defaults to `"sum_over_batch_size"`.
        name: Optional name for the loss instance.
        dtype: The dtype of the loss's computations. Defaults to `None`, which
            means using `keras.backend.floatx()`. `keras.backend.floatx()` is a
            `"float32"` unless set to different value
            (via `keras.backend.set_floatx()`). If a `keras.DTypePolicy` is
            provided, then the `compute_dtype` will be utilized.

    Examples:

    Standalone usage:

    >>> y_true = [[0., 1., 0.], [0., 0., 1.]]
    >>> y_pred = [[0.05, 0.95, 0], [0.1, 0.8, 0.1]]
    >>> # Using 'auto'/'sum_over_batch_size' reduction type.
    >>> cce = keras.losses.CategoricalFocalCrossentropy()
    >>> cce(y_true, y_pred)
    0.23315276

    >>> # Calling with 'sample_weight'.
    >>> cce(y_true, y_pred, sample_weight=np.array([0.3, 0.7]))
    0.1632

    >>> # Using 'sum' reduction type.
    >>> cce = keras.losses.CategoricalFocalCrossentropy(
    ...     reduction="sum")
    >>> cce(y_true, y_pred)
    0.46631

    >>> # Using 'none' reduction type.
    >>> cce = keras.losses.CategoricalFocalCrossentropy(
    ...     reduction=None)
    >>> cce(y_true, y_pred)
    array([3.2058331e-05, 4.6627346e-01], dtype=float32)

    Usage with the `compile()` API:

    ```python
    model.compile(optimizer='adam',
                  loss=keras.losses.CategoricalFocalCrossentropy())
    ```
    c	                     t         	|   t        ||||||||	       || _        || _        || _        || _        || _        y)z4Initializes `CategoricalFocalCrossentropy` instance.)r   r   r   r   r   r   r   r^   N)r   r   categorical_focal_crossentropyr   r   r^   r   r   )
r   r   r   r   r   r^   r   r   r   r   s
            r   r   z%CategoricalFocalCrossentropy.__init__Q  sY     	*#+ 	 
	
 '.	

r   c                     t        j                  |       }|j                  | j                  | j                  | j
                  | j                  | j                  d       |S )N)r   r   r^   r   r   )r   r,   r-   r   r   r^   r   r   r   s     r   r,   z'CategoricalFocalCrossentropy.get_confign  sO    &#//#'#7#7			
 r   )r   r   Fr   ra   r7   r   NrG   r=   s   @r   r   r     s.    k^ '-:r   r   z*keras.losses.SparseCategoricalCrossentropyc                   6     e Zd ZdZ	 	 	 	 	 	 d fd	Zd Z xZS )SparseCategoricalCrossentropyaO  Computes the crossentropy loss between the labels and predictions.

    Use this crossentropy loss function when there are two or more label
    classes.  We expect labels to be provided as integers. If you want to
    provide labels using `one-hot` representation, please use
    `CategoricalCrossentropy` loss.  There should be `# classes` floating point
    values per feature for `y_pred` and a single floating point value per
    feature for `y_true`.

    In the snippet below, there is a single floating point value per example for
    `y_true` and `num_classes` floating pointing values per example for
    `y_pred`. The shape of `y_true` is `[batch_size]` and the shape of `y_pred`
    is `[batch_size, num_classes]`.

    Args:
        from_logits: Whether `y_pred` is expected to be a logits tensor. By
            default, we assume that `y_pred` encodes a probability distribution.
        reduction: Type of reduction to apply to the loss. In almost all cases
            this should be `"sum_over_batch_size"`. Supported options are
            `"sum"`, `"sum_over_batch_size"`, `"mean"`,
            `"mean_with_sample_weight"` or `None`. `"sum"` sums the loss,
            `"sum_over_batch_size"` and `"mean"` sum the loss and divide by the
            sample size, and `"mean_with_sample_weight"` sums the loss and
            divides by the sum of the sample weights. `"none"` and `None`
            perform no aggregation. Defaults to `"sum_over_batch_size"`.
        axis: The axis along which to compute crossentropy (the features
            axis). Defaults to `-1`.
        name: Optional name for the loss instance.
        dtype: The dtype of the loss's computations. Defaults to `None`, which
            means using `keras.backend.floatx()`. `keras.backend.floatx()` is a
            `"float32"` unless set to different value
            (via `keras.backend.set_floatx()`). If a `keras.DTypePolicy` is
            provided, then the `compute_dtype` will be utilized.

    Examples:

    >>> y_true = [1, 2]
    >>> y_pred = [[0.05, 0.95, 0], [0.1, 0.8, 0.1]]
    >>> # Using 'auto'/'sum_over_batch_size' reduction type.
    >>> scce = keras.losses.SparseCategoricalCrossentropy()
    >>> scce(y_true, y_pred)
    1.177

    >>> # Calling with 'sample_weight'.
    >>> scce(y_true, y_pred, sample_weight=np.array([0.3, 0.7]))
    0.814

    >>> # Using 'sum' reduction type.
    >>> scce = keras.losses.SparseCategoricalCrossentropy(
    ...     reduction="sum")
    >>> scce(y_true, y_pred)
    2.354

    >>> # Using 'none' reduction type.
    >>> scce = keras.losses.SparseCategoricalCrossentropy(
    ...     reduction=None)
    >>> scce(y_true, y_pred)
    array([0.0513, 2.303], dtype=float32)

    Usage with the `compile()` API:

    ```python
    model.compile(optimizer='sgd',
                  loss=keras.losses.SparseCategoricalCrossentropy())
    ```
    c           	      V    t         |   t        ||||||       || _        || _        y )N)r   r   r   r   ignore_classr^   )r   r   sparse_categorical_crossentropyr   r   )r   r   r   r   r^   r   r   r   s          r   r   z&SparseCategoricalCrossentropy.__init__  s>     	+#% 	 	
 '(r   c                     t        j                  |       }|j                  | j                  | j                  d       |S )N)r   r   )r   r,   r-   r   r   r   s     r   r,   z(SparseCategoricalCrossentropy.get_config  s:    &#// $ 1 1	
 r   )FNr7   ra   r   NrG   r=   s   @r   r   r   |  s(    AJ '.)*r   r   zkeras.losses.CTCc                   *     e Zd ZdZd fd	Zd Z xZS )CTCa  CTC (Connectionist Temporal Classification) loss.

    Args:
        reduction: Type of reduction to apply to the loss. In almost all cases
            this should be `"sum_over_batch_size"`. Supported options are
            `"sum"`, `"sum_over_batch_size"`, `"mean"`,
            `"mean_with_sample_weight"` or `None`. `"sum"` sums the loss,
            `"sum_over_batch_size"` and `"mean"` sum the loss and divide by the
            sample size, and `"mean_with_sample_weight"` sums the loss and
            divides by the sum of the sample weights. `"none"` and `None`
            perform no aggregation. Defaults to `"sum_over_batch_size"`.
        name: Optional name for the loss instance.
        dtype: The dtype of the loss's computations. Defaults to `None`, which
            means using `keras.backend.floatx()`. `keras.backend.floatx()` is a
            `"float32"` unless set to different value
            (via `keras.backend.set_floatx()`). If a `keras.DTypePolicy` is
            provided, then the `compute_dtype` will be utilized.
    c                 4    t         |   t        |||       y r   )r   r   ctcrB   s       r   r   zCTC.__init__  s    49EJr   c                 ,    t        j                  |       S rD   rE   r5   s    r   r,   zCTC.get_config  rF   r   )r7   r   NrG   r=   s   @r   r   r     s    &K%r   r   zkeras.losses.Dicec                   2     e Zd ZdZ	 	 	 	 d fd	Zd Z xZS )DiceaP  Computes the Dice loss value between `y_true` and `y_pred`.

    Formula:
    ```python
    loss = 1 - (2 * sum(y_true * y_pred)) / (sum(y_true) + sum(y_pred))
    ```

    Args:
        reduction: Type of reduction to apply to the loss. In almost all cases
            this should be `"sum_over_batch_size"`. Supported options are
            `"sum"`, `"sum_over_batch_size"`, `"mean"`,
            `"mean_with_sample_weight"` or `None`. `"sum"` sums the loss,
            `"sum_over_batch_size"` and `"mean"` sum the loss and divide by the
            sample size, and `"mean_with_sample_weight"` sums the loss and
            divides by the sum of the sample weights. `"none"` and `None`
            perform no aggregation. Defaults to `"sum_over_batch_size"`.
        name: Optional name for the loss instance.
        axis: Tuple for which dimensions the loss is calculated. Defaults to
            `None`.
        dtype: The dtype of the loss's computations. Defaults to `None`, which
            means using `keras.backend.floatx()`. `keras.backend.floatx()` is a
            `"float32"` unless set to different value
            (via `keras.backend.set_floatx()`). If a `keras.DTypePolicy` is
            provided, then the `compute_dtype` will be utilized.

    Returns:
        Dice loss value.

    Example:

    >>> y_true = [[[[1.0], [1.0]], [[0.0], [0.0]]],
    ...           [[[1.0], [1.0]], [[0.0], [0.0]]]]
    >>> y_pred = [[[[0.0], [1.0]], [[0.0], [1.0]]],
    ...           [[[0.4], [0.0]], [[0.0], [0.9]]]]
    >>> axis = (1, 2, 3)
    >>> loss = keras.losses.Dice(axis=axis, reduction=None)(y_true, y_pred)
    >>> assert loss.shape == (2,)
    >>> loss
    array([0.5, 0.75757575], shape=(2,), dtype=float32)

    >>> loss = keras.losses.Dice()(y_true, y_pred)
    >>> assert loss.shape == ()
    >>> loss
    array(0.6164384, shape=(), dtype=float32)

    >>> y_true = np.array(y_true)
    >>> y_pred = np.array(y_pred)
    >>> loss = keras.losses.Dice(axis=axis, reduction=None)(y_true, y_pred)
    >>> assert loss.shape == (2,)
    >>> loss
    array([0.5, 0.75757575], shape=(2,), dtype=float32)

    c                 D    t         |   t        ||||       || _        y r]   )r   r   dicer^   )r   r   r   r^   r   r   s        r   r   zDice.__init__5  s+     	tyD 	 	
 	r   c                 j    t        j                  |       }|j                  d| j                  i       |S )Nr^   )r   r,   r-   r^   r   s     r   r,   zDice.get_configA  s*    &vtyy)*r   )r7   r   NNrG   r=   s   @r   r   r     s!    4p (
r   r   zkeras.losses.Tverskyc                   6     e Zd ZdZ	 	 	 	 	 	 d fd	Zd Z xZS )TverskyaL  Computes the Tversky loss value between `y_true` and `y_pred`.

    This loss function is weighted by the alpha and beta coefficients
    that penalize false positives and false negatives.

    With `alpha=0.5` and `beta=0.5`, the loss value becomes equivalent to
    Dice Loss.

    Args:
        alpha: The coefficient controlling incidence of false positives.
            Defaults to `0.5`.
        beta: The coefficient controlling incidence of false negatives.
            Defaults to `0.5`.
        reduction: Type of reduction to apply to the loss. In almost all cases
            this should be `"sum_over_batch_size"`. Supported options are
            `"sum"`, `"sum_over_batch_size"`, `"mean"`,
            `"mean_with_sample_weight"` or `None`. `"sum"` sums the loss,
            `"sum_over_batch_size"` and `"mean"` sum the loss and divide by the
            sample size, and `"mean_with_sample_weight"` sums the loss and
            divides by the sum of the sample weights. `"none"` and `None`
            perform no aggregation. Defaults to `"sum_over_batch_size"`.
        name: Optional name for the loss instance.
        dtype: The dtype of the loss's computations. Defaults to `None`, which
            means using `keras.backend.floatx()`. `keras.backend.floatx()` is a
            `"float32"` unless set to different value
            (via `keras.backend.set_floatx()`). If a `keras.DTypePolicy` is
            provided, then the `compute_dtype` will be utilized.

    Returns:
        Tversky loss value.

    Reference:

    - [Salehi et al., 2017](https://arxiv.org/abs/1706.05721)
    c           	      d    t         |   t        ||||||       || _        || _        || _        y )N)r   r   r   r   betar^   )r   r   tverskyr   r   r^   )r   r   r   r   r   r^   r   r   s          r   r   zTversky.__init__m  sC     	 	 	
 
		r   c                     t        j                  |       }|j                  | j                  | j                  | j
                  d       |S )N)r   r   r^   )r   r,   r-   r   r   r^   r   s     r   r,   zTversky.get_config  s9    &jj$))TYYG	
 r   )      ?r   r7   r   NNrG   r=   s   @r   r   r   G  s'    "L ',r   r   zkeras.losses.Circlec                   6     e Zd ZdZ	 	 	 	 	 	 d fd	Zd Z xZS )Circlea
  Computes Circle Loss between integer labels and L2-normalized embeddings.

    This is a metric learning loss designed to minimize within-class distance
    and maximize between-class distance in a flexible manner by dynamically
    adjusting the penalty strength based on optimization status of each
    similarity score.

    To use Circle Loss effectively, the model should output embeddings without
    an activation function (such as a `Dense` layer with `activation=None`)
    followed by UnitNormalization layer to ensure unit-norm embeddings.

    Args:
        gamma: Scaling factor that determines the largest scale of each
            similarity score. Defaults to `80`.
        margin: The relaxation factor, below this distance, negatives are
        up weighted and positives are down weighted. Similarly, above this
        distance negatives are down weighted and positive are up weighted.
            Defaults to `0.4`.
        remove_diagonal: Boolean, whether to remove self-similarities from the
            positive mask. Defaults to `True`.
        reduction: Type of reduction to apply to the loss. In almost all cases
            this should be `"sum_over_batch_size"`. Supported options are
            `"sum"`, `"sum_over_batch_size"`, `"mean"`,
            `"mean_with_sample_weight"` or `None`. `"sum"` sums the loss,
            `"sum_over_batch_size"` and `"mean"` sum the loss and divide by the
            sample size, and `"mean_with_sample_weight"` sums the loss and
            divides by the sum of the sample weights. `"none"` and `None`
            perform no aggregation. Defaults to `"sum_over_batch_size"`.
        name: Optional name for the loss instance.
        dtype: The dtype of the loss's computations. Defaults to `None`, which
            means using `keras.backend.floatx()`. `keras.backend.floatx()` is a
            `"float32"` unless set to different value
            (via `keras.backend.set_floatx()`). If a `keras.DTypePolicy` is
            provided, then the `compute_dtype` will be utilized.

    Examples:

    Usage with the `compile()` API:

    ```python
    model = models.Sequential([
        keras.layers.Input(shape=(224, 224, 3)),
        keras.layers.Conv2D(16, (3, 3), activation='relu'),
        keras.layers.Flatten(),
        keras.layers.Dense(64, activation=None),  # No activation
        keras.layers.UnitNormalization()  # L2 normalization
    ])

    model.compile(optimizer="adam", loss=keras.losses.Circle())
    ```

    Reference:
    - [Yifan Sun et al., 2020](https://arxiv.org/abs/2002.10857)

    c           	      d    t         |   t        ||||||       || _        || _        || _        y )N)r   r   r   r   marginremove_diagonal)r   r   circler   r   r   )r   r   r   r   r   r   r   r   s          r   r   zCircle.__init__  sD     	+ 	 	
 
.r   c                     t        j                  |       }|j                  | j                  | j                  | j
                  d       |S )N)r   r   r   )r   r,   r-   r   r   r   r   s     r   r,   zCircle.get_config  s?    &++#'#7#7	
 r   )g      T@皙?Tr7   r   NrG   r=   s   @r   r   r     s'    6t '/,	r   r   z/keras.losses.CategoricalGeneralizedCrossEntropyc                   2     e Zd ZdZ	 	 	 	 d fd	Zd Z xZS )"CategoricalGeneralizedCrossEntropya  Computes the Generalized Cross Entropy loss between `y_true` & `y_pred`.

    Generalized Cross Entropy (GCE) is a noise-robust loss function
    that provides better robustness against noisy labels than
    standard cross entropy.
    It generalizes both cross entropy and mean absolute error through
    the parameter q, where values closer to 1 make the loss more robust
    to noisy labels.

    Formula:
    ```python
    loss = (1 - p**q) / q
    ```
    where `p` is the predicted probability for the true class and `q`
    is the noise parameter.

    Args:
        q: Float in range `(0, 1)`. It is the noise parameter.
           Controls the behavior of the loss:
            - As `q` approaches 0: Behaves more like cross entropy
            - As `q` approaches 1: Behaves more like mean absolute error
           Defaults to `0.5`
        reduction: Type of reduction to apply to the loss. In almost all cases
            this should be `"sum_over_batch_size"`. Supported options are
            `"sum"`, `"sum_over_batch_size"`, `"mean"`,
            `"mean_with_sample_weight"` or `None`. `"sum"` sums the loss,
            `"sum_over_batch_size"` and `"mean"` sum the loss and divide by the
            sample size, and `"mean_with_sample_weight"` sums the loss and
            divides by the sum of the sample weights. `"none"` and `None`
            perform no aggregation. Defaults to `"sum_over_batch_size"`.
        name: Optional name for the loss instance.
        dtype: The dtype of the loss's computations. Defaults to `None`, which
            means using `keras.backend.floatx()`. `keras.backend.floatx()` is a
            `"float32"` unless set to different value
            (via `keras.backend.set_floatx()`). If a `keras.DTypePolicy` is
            provided, then the `compute_dtype` will be utilized.

    Example:
    ```python
    y_true = np.array([0, 1, 0, 1])
    y_pred = np.array([[0.7, 0.3], [0.2, 0.8], [0.6, 0.4], [0.4, 0.6]])
    keras.losses.CategoricalGeneralizedCrossEntropy()(y_true, y_pred)
    ```

    References:
        - [Zhang, Sabuncu, 2018](https://arxiv.org/abs/1805.07836)
          ("Generalized Cross Entropy Loss for Training
            Deep Neural Networks with Noisy Labels")
    c                     d|cxk  rdk  st        d       t        d      t        | 	  t        ||||       || _        y )Nr   r$   z q must be in the interval (0, 1))r   r   r   q)
ValueErrorr   r   %categorical_generalized_cross_entropyr   )r   r   r   r   r   r   s        r   r   z+CategoricalGeneralizedCrossEntropy.__init__  sW     1yqy?@@ ?@@1 	 	
 r   c                 j    t        j                  |       }|j                  d| j                  i       |S )Nr   )r   r,   r-   r   r   s     r   r,   z-CategoricalGeneralizedCrossEntropy.get_config-  s1    &TVV	

 r   )r   r7   r   NrG   r=   s   @r   r   r     s!    0h '4$r   r   c                      t        j                   d      }t        j                   d      }t        j                  t        j                  ||            } fd} fd}t        j                  |||      }|S )zCConverts binary labels into -1/1 for hinge loss/metric calculation.r   r$   c                      d z  dz
  S )Nr   rh   r   r'   s   r   _convert_binary_labelsz>convert_binary_labels_to_hinge.<locals>._convert_binary_labels=  s    V|c!!r   c                       S rD   r   r   s   r   _return_labels_unconvertedzBconvert_binary_labels_to_hinge.<locals>._return_labels_unconvertedA  s    r   )r   equalall
logical_orcond)r'   	are_zerosare_ones	is_binaryr   r   updated_y_trues   `      r   convert_binary_labels_to_hinger   7  sg    		&!$Iyy#H	8<>I" XX)+EN r   zkeras.metrics.hingezkeras.losses.hingec                    t        j                  |      }t        j                  | |j                        } t        j                  |       } t	        |       } t        j
                  t        j                  d| |z  z
  d      d      S )a  Computes the hinge loss between `y_true` & `y_pred`.

    Formula:

    ```python
    loss = mean(maximum(1 - y_true * y_pred, 0), axis=-1)
    ```

    Args:
        y_true: The ground truth values. `y_true` values are expected to be -1
            or 1. If binary (0 or 1) labels are provided they will be converted
            to -1 or 1 with shape = `[batch_size, d0, .. dN]`.
        y_pred: The predicted values with shape = `[batch_size, d0, .. dN]`.

    Returns:
        Hinge loss values with shape = `[batch_size, d0, .. dN-1]`.

    Example:

    >>> y_true = np.random.choice([-1, 1], size=(2, 3))
    >>> y_pred = np.random.random(size=(2, 3))
    >>> loss = keras.losses.hinge(y_true, y_pred)
    r   rh   r   ra   r^   )r   convert_to_tensorcastr   r   meanmaximumr'   r(   s     r   rr   rr   K  sg    < ""6*FXXfFLL1F""6*F+F3F88CKKfvo 5s;"EEr   zkeras.metrics.squared_hingezkeras.losses.squared_hingec           	      
   t        j                  |      }t        j                  | |j                        } t	        |       } t        j
                  t        j                  t        j                  d| |z  z
  d            d      S )a  Computes the squared hinge loss between `y_true` & `y_pred`.

    Formula:

    ```python
    loss = mean(square(maximum(1 - y_true * y_pred, 0)), axis=-1)
    ```

    Args:
        y_true: The ground truth values. `y_true` values are expected to be -1
            or 1. If binary (0 or 1) labels are provided we will convert them
            to -1 or 1 with shape = `[batch_size, d0, .. dN]`.
        y_pred: The predicted values with shape = `[batch_size, d0, .. dN]`.

    Returns:
        Squared hinge loss values with shape = `[batch_size, d0, .. dN-1]`.

    Example:

    >>> y_true = np.random.choice([-1, 1], size=(2, 3))
    >>> y_pred = np.random.random(size=(2, 3))
    >>> loss = keras.losses.squared_hinge(y_true, y_pred)
    rh   r   ra   r   )r   r   r   r   r   r   squarer   r   s     r   rw   rw   p  se    < ""6*FXXffll+F+F3F88

3;;sVf_4c:;" r   zkeras.metrics.categorical_hingezkeras.losses.categorical_hingec                 R   t        j                  |      }t        j                  | |j                        } t        j                  | |z  d      }t        j
                  d| z
  |z  d      }t        j                  d|j                        }t        j                  ||z
  dz   |      S )a<  Computes the categorical hinge loss between `y_true` & `y_pred`.

    Formula:

    ```python
    loss = maximum(neg - pos + 1, 0)
    ```

    where `neg=maximum((1-y_true)*y_pred)` and `pos=sum(y_true*y_pred)`

    Args:
        y_true: The ground truth values. `y_true` values are expected to be
            either `{-1, +1}` or `{0, 1}` (i.e. a one-hot-encoded tensor) with
            shape = `[batch_size, d0, .. dN]`.
        y_pred: The predicted values with shape = `[batch_size, d0, .. dN]`.

    Returns:
        Categorical hinge loss values with shape = `[batch_size, d0, .. dN-1]`.

    Example:

    >>> y_true = np.random.randint(0, 3, size=(2,))
    >>> y_true = np.eye(np.max(y_true) + 1)[y_true]
    >>> y_pred = np.random.random(size=(2, 3))
    >>> loss = keras.losses.categorical_hinge(y_true, y_pred)
    ra   r   rh   r   )r   r   r   r   summaxr   )r'   r(   posnegzeros        r   r}   r}     s    B ""6*FXXffll+F
''&6/
+C
''3<6)
3C88C&D;;sSy3--r   )z keras.metrics.mean_squared_errorzkeras.losses.mean_squared_errorzkeras._legacy.losses.msezkeras._legacy.losses.MSEzkeras._legacy.metrics.msezkeras._legacy.metrics.MSEc                     t        j                  |      }t        j                  | |j                        } t        | |      \  } }t        j                  t        j
                  | |z
        d      S )aL  Computes the mean squared error between labels and predictions.

    Formula:

    ```python
    loss = mean(square(y_true - y_pred), axis=-1)
    ```

    Example:

    >>> y_true = np.random.randint(0, 2, size=(2, 3))
    >>> y_pred = np.random.random(size=(2, 3))
    >>> loss = keras.losses.mean_squared_error(y_true, y_pred)

    Args:
        y_true: Ground truth values with shape = `[batch_size, d0, .. dN]`.
        y_pred: The predicted values with shape = `[batch_size, d0, .. dN]`.

    Returns:
        Mean squared error values with shape = `[batch_size, d0, .. dN-1]`.
    r   ra   r   )r   r   r   r   r   r   r   s     r   rA   rA     sY    B ""6*F""6>F3FFCNFF88CJJv/b99r   )z!keras.metrics.mean_absolute_errorz keras.losses.mean_absolute_errorzkeras._legacy.losses.MAEzkeras._legacy.losses.maezkeras._legacy.metrics.MAEzkeras._legacy.metrics.maec                     t        j                  |      }t        j                  | |j                        } t        | |      \  } }t        j                  t        j
                  | |z
        d      S )a>  Computes the mean absolute error between labels and predictions.

    ```python
    loss = mean(abs(y_true - y_pred), axis=-1)
    ```

    Args:
        y_true: Ground truth values with shape = `[batch_size, d0, .. dN]`.
        y_pred: The predicted values with shape = `[batch_size, d0, .. dN]`.

    Returns:
        Mean absolute error values with shape = `[batch_size, d0, .. dN-1]`.

    Example:

    >>> y_true = np.random.randint(0, 2, size=(2, 3))
    >>> y_pred = np.random.random(size=(2, 3))
    >>> loss = keras.losses.mean_absolute_error(y_true, y_pred)
    r   ra   r   )r   r   r   r   r   absr   s     r   rL   rL     sX    > ""6*F""6>F3FFCNFF88CGGFVO,266r   )z,keras.metrics.mean_absolute_percentage_errorz+keras.losses.mean_absolute_percentage_errorzkeras._legacy.losses.mapezkeras._legacy.losses.MAPEzkeras._legacy.metrics.mapezkeras._legacy.metrics.MAPEc                    t        j                  |      }t        j                  | |j                        } t        j                  t        j                         |j                        }t        | |      \  } }t        j                  | |z
  t        j                  t        j                  |       |      z        }dt        j                  |d      z  S )a  Computes the mean absolute percentage error between `y_true` & `y_pred`.

    Formula:

    ```python
    loss = 100 * mean(abs((y_true - y_pred) / y_true), axis=-1)
    ```

    Division by zero is prevented by dividing by `maximum(y_true, epsilon)`
    where `epsilon = keras.backend.epsilon()`
    (default to `1e-7`).

    Args:
        y_true: Ground truth values with shape = `[batch_size, d0, .. dN]`.
        y_pred: The predicted values with shape = `[batch_size, d0, .. dN]`.

    Returns:
        Mean absolute percentage error values with shape = `[batch_size, d0, ..
        dN-1]`.

    Example:

    >>> y_true = np.random.random(size=(2, 3))
    >>> y_pred = np.random.random(size=(2, 3))
    >>> loss = keras.losses.mean_absolute_percentage_error(y_true, y_pred)
    r   g      Y@ra   r   )	r   r   r   r   epsilonr   r   r   r   )r'   r(   r   diffs       r   rQ   rQ     s    L ""6*F""6>F##GOO$5V\\JG3FFCNFF77FVOs{{3776?G'LLMD388Dr***r   )z,keras.metrics.mean_squared_logarithmic_errorz+keras.losses.mean_squared_logarithmic_errorzkeras._legacy.losses.mslezkeras._legacy.losses.MSLEzkeras._legacy.metrics.mslezkeras._legacy.metrics.MSLEc                    t        j                  t        j                               }t        j                  |      }t        j                  | |j                        } t        | |      \  } }t        j                  t        j                  ||      dz         }t        j                  t        j                  | |      dz         }t        j                  t        j                  ||z
        d      S )a4  Computes the mean squared logarithmic error between `y_true` & `y_pred`.

    Formula:

    ```python
    loss = mean(square(log(y_true + 1) - log(y_pred + 1)), axis=-1)
    ```

    Note that `y_pred` and `y_true` cannot be less or equal to 0. Negative
    values and 0 values will be replaced with `keras.backend.epsilon()`
    (default to `1e-7`).

    Args:
        y_true: Ground truth values with shape = `[batch_size, d0, .. dN]`.
        y_pred: The predicted values with shape = `[batch_size, d0, .. dN]`.

    Returns:
        Mean squared logarithmic error values with shape = `[batch_size, d0, ..
        dN-1]`.

    Example:

    >>> y_true = np.random.randint(0, 2, size=(2, 3))
    >>> y_pred = np.random.random(size=(2, 3))
    >>> loss = keras.losses.mean_squared_logarithmic_error(y_true, y_pred)
    r   rh   ra   r   )
r   r   r   r   r   r   logr   r   r   )r'   r(   r   	first_log
second_logs        r   rX   rX   9  s    L ##GOO$56G""6*F""6>F3FFCNFFFG4s:;IVW5;<J88CJJy:56R@@r   zkeras.losses.cosine_similarityc                     t        j                  |      }t        j                  | |j                        } t        | |      \  } }t	        ||      }t	        | |      } t        j
                  | |z  |       S )a  Computes the cosine similarity between labels and predictions.

    Formula:
    ```python
    loss = -sum(l2_norm(y_true) * l2_norm(y_pred))
    ```

    Note that it is a number between -1 and 1. When it is a negative number
    between -1 and 0, 0 indicates orthogonality and values closer to -1
    indicate greater similarity. This makes it usable as a loss function in a
    setting where you try to maximize the proximity between predictions and
    targets. If either `y_true` or `y_pred` is a zero vector, cosine
    similarity will be 0 regardless of the proximity between predictions
    and targets.

    Args:
        y_true: Tensor of true targets.
        y_pred: Tensor of predicted targets.
        axis: Axis along which to determine similarity. Defaults to `-1`.

    Returns:
        Cosine similarity tensor.

    Example:

    >>> y_true = [[0., 1.], [1., 1.], [1., 1.]]
    >>> y_pred = [[1., 0.], [1., 1.], [-1., -1.]]
    >>> loss = keras.losses.cosine_similarity(y_true, y_pred, axis=-1)
    [-0., -0.99999994, 0.99999994]
    r   r   )r   r   r   r   r   r   )r'   r(   r^   s      r   r_   r_   h  sk    @ ""6*F""6>F3FFCNFFvD)FvD)FGGFVO$///r   zkeras.losses.huberzkeras.metrics.huberc                 (   t        j                  |      }t        j                  | |j                        } t        | |      \  } }t        j                  ||j                        }t        j                  ||       }t        j
                  |      }t        j                  d|j                        }t        j                  t        j                  ||k  |t        j                  |      z  ||z  |t        j                  |      z  z
        d      S )a  Computes Huber loss value.

    Formula:
    ```python
    for x in error:
        if abs(x) <= delta:
            loss.append(0.5 * x^2)
        elif abs(x) > delta:
            loss.append(delta * abs(x) - 0.5 * delta^2)

    loss = mean(loss, axis=-1)
    ```
    See: [Huber loss](https://en.wikipedia.org/wiki/Huber_loss).

    Example:

    >>> y_true = [[0, 1], [0, 0]]
    >>> y_pred = [[0.6, 0.4], [0.4, 0.6]]
    >>> loss = keras.losses.huber(y_true, y_pred)
    0.155


    Args:
        y_true: tensor of true targets.
        y_pred: tensor of predicted targets.
        delta: A float, the point where the Huber loss function changes from a
            quadratic to linear. Defaults to `1.0`.

    Returns:
        Tensor with one scalar loss entry per sample.
    r   r   ra   r   )	r   r   r   r   subtractr   r   wherer   )r'   r(   re   error	abs_errorhalfs         r   rf   rf     s    B ""6*F""6>F3FFCNFF!!%v||<ELL(EI  IOO<D88		3::e$$Iszz%'8 88	

  r   )zkeras.losses.log_coshzkeras.metrics.log_coshzkeras._legacy.losses.logcoshzkeras._legacy.metrics.logcoshc                 @   t        j                  |      }t        j                  | |j                        } t        | |      \  } }t        j                  t        j                  d      |j                        fd}t        j
                   ||| z
        d      S )aM  Logarithm of the hyperbolic cosine of the prediction error.

    Formula:
    ```python
    loss = mean(log(cosh(y_pred - y_true)), axis=-1)
    ```

    Note that `log(cosh(x))` is approximately equal to `(x ** 2) / 2` for small
    `x` and to `abs(x) - log(2)` for large `x`. This means that 'logcosh' works
    mostly like the mean squared error, but will not be so strongly affected by
    the occasional wildly incorrect prediction.

    Example:

    >>> y_true = [[0., 1.], [0., 0.]]
    >>> y_pred = [[1., 1.], [0., 0.]]
    >>> loss = keras.losses.log_cosh(y_true, y_pred)
    0.108

    Args:
        y_true: Ground truth values with shape = `[batch_size, d0, .. dN]`.
        y_pred: The predicted values with shape = `[batch_size, d0, .. dN]`.

    Returns:
        Logcosh error values with shape = `[batch_size, d0, .. dN-1]`.
    r   r   c                 @    | t        j                  | dz        z   z
  S )Ng       )r   softplus)r    log2s    r   _logcoshzlog_cosh.<locals>._logcosh  s     3<<D))D00r   ra   r   )r   r   r   r   r   r   )r'   r(   r  r  s      @r   rm   rm     s{    H ""6*F""6>F3FFCNFF  V\\BD1 88HVf_-B77r   )zkeras.metrics.kl_divergencezkeras.losses.kl_divergencezkeras._legacy.losses.KLDzkeras._legacy.losses.kldz0keras._legacy.losses.kullback_leibler_divergencezkeras._legacy.metrics.KLDzkeras._legacy.metrics.kldz1keras._legacy.metrics.kullback_leibler_divergencec                 p   t        j                  |      }t        j                  | |j                        } t        j                  | t	        j
                         d      } t        j                  |t	        j
                         d      }t        j                  | t        j                  | |z        z  d      S )a  Computes Kullback-Leibler divergence loss between `y_true` & `y_pred`.

    Formula:

    ```python
    loss = y_true * log(y_true / y_pred)
    ```

    `y_true` and `y_pred` are expected to be probability
    distributions, with values between 0 and 1. They will get
    clipped to the `[0, 1]` range.

    Args:
        y_true: Tensor of true targets.
        y_pred: Tensor of predicted targets.

    Returns:
        KL Divergence loss values with shape = `[batch_size, d0, .. dN-1]`.

    Example:

    >>> y_true = np.random.randint(0, 2, size=(2, 3)).astype(np.float32)
    >>> y_pred = np.random.random(size=(2, 3))
    >>> loss = keras.losses.kl_divergence(y_true, y_pred)
    >>> assert loss.shape == (2,)
    >>> y_true = ops.clip(y_true, 1e-7, 1)
    >>> y_pred = ops.clip(y_pred, 1e-7, 1)
    >>> assert np.array_equal(
    ...     loss, np.sum(y_true * np.log(y_true / y_pred), axis=-1))
    r$   ra   r   )r   r   r   clipr   r   r   r   r   s     r   r   r     s    X ""6*F""66<<8FXXfgoo/3FXXfgoo/3F776CGGFVO442>>r   zkeras.metrics.poissonzkeras.losses.poissonc                 :   t        j                  |      }t        j                  | |j                        } t        j                  t        j                         |j                        }t        j
                  || t        j                  ||z         z  z
  d      S )a  Computes the Poisson loss between y_true and y_pred.

    Formula:

    ```python
    loss = y_pred - y_true * log(y_pred)
    ```

    Args:
        y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`.
        y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.

    Returns:
        Poisson loss values with shape = `[batch_size, d0, .. dN-1]`.

    Example:

    >>> y_true = np.random.randint(0, 2, size=(2, 3))
    >>> y_pred = np.random.random(size=(2, 3))
    >>> loss = keras.losses.poisson(y_true, y_pred)
    >>> assert loss.shape == (2,)
    >>> y_pred = y_pred + 1e-7
    >>> assert np.allclose(
    ...     loss, np.mean(y_pred - y_true * np.log(y_pred), axis=-1),
    ...     atol=1e-5)
    r   ra   r   )r   r   r   r   r   r   r   )r'   r(   r   s      r   r   r   $  sq    B ""6*F""6>F##GOO$5V\\JG88FVcggfw.>&???bIIr   z&keras.metrics.categorical_crossentropyz%keras.losses.categorical_crossentropyc                    t        |t              rt        d| dt        |             t	        j
                  |      }t	        j                  | |j                        } |j                  d   dk(  r*t        j                  d|j                   dt        d       |rDt	        j                  t	        j                  |       d   |j                        }| d	|z
  z  ||z  z   } t	        j                  | |||
      S )a  Computes the categorical crossentropy loss.

    Args:
        y_true: Tensor of one-hot true targets.
        y_pred: Tensor of predicted targets.
        from_logits: Whether `y_pred` is expected to be a logits tensor. By
            default, we assume that `y_pred` encodes a probability distribution.
        label_smoothing: Float in [0, 1]. If > `0` then smooth the labels. For
            example, if `0.1`, use `0.1 / num_classes` for non-target labels
            and `0.9 + 0.1 / num_classes` for target labels.
        axis: Defaults to `-1`. The dimension along which the entropy is
            computed.

    Returns:
        Categorical crossentropy loss value.

    Example:

    >>> y_true = [[0, 1, 0], [0, 0, 1]]
    >>> y_pred = [[0.05, 0.95, 0], [0.1, 0.8, 0.1]]
    >>> loss = keras.losses.categorical_crossentropy(y_true, y_pred)
    >>> assert loss.shape == (2,)
    >>> loss
    array([0.0513, 2.303], dtype=float32)
    -`axis` must be of type `int`. Received: axis=	 of type ra   r$   zIn loss categorical_crossentropy, expected y_pred.shape to be (batch_size, num_classes) with num_classes > 1. Received: y_pred.shape=B. Consider using 'binary_crossentropy' if you only have 2 classes.   
stacklevelrh   r   r^   )
isinstanceboolr   typer   r   r   r   shapewarningswarnSyntaxWarningr   )r'   r(   r   r   r^   num_classess         r   r   r   K  s    D $"V9T$ZL:
 	
 ""6*FXXffll+F||B1<<BLL> JOO 	
 hhsyy04fllC301k)
 ''Kd r   z,keras.metrics.categorical_focal_crossentropyz+keras.losses.categorical_focal_crossentropyc                    t        |t              rt        d| dt        |             t	        j
                  |      }t	        j                  | |j                        } |j                  d   dk(  r*t        j                  d|j                   dt        d       |rDt	        j                  t	        j                  |       d   |j                        }| d	|z
  z  ||z  z   } |rt	        j                  ||
      }|t	        j                  ||d      z  }t	        j                  |t        j                          d	t        j                          z
        }|  t	        j"                  |      z  }	t	        j$                  d	|z
  |      }
t	        j&                  |
|      }t	        j&                  ||	      }t	        j                  ||
      }|S )a4  Computes the categorical focal crossentropy loss.

    Args:
        y_true: Tensor of one-hot true targets.
        y_pred: Tensor of predicted targets.
        alpha: A weight balancing factor for all classes, default is `0.25` as
            mentioned in the reference. It can be a list of floats or a scalar.
            In the multi-class case, alpha may be set by inverse class
            frequency by using `compute_class_weight` from `sklearn.utils`.
        gamma: A focusing parameter, default is `2.0` as mentioned in the
            reference. It helps to gradually reduce the importance given to
            simple examples in a smooth manner. When `gamma` = 0, there is
            no focal effect on the categorical crossentropy.
        from_logits: Whether `y_pred` is expected to be a logits tensor. By
            default, we assume that `y_pred` encodes a probability
            distribution.
        label_smoothing: Float in [0, 1]. If > `0` then smooth the labels. For
            example, if `0.1`, use `0.1 / num_classes` for non-target labels
            and `0.9 + 0.1 / num_classes` for target labels.
        axis: Defaults to `-1`. The dimension along which the entropy is
            computed.

    Returns:
        Categorical focal crossentropy loss value.

    Example:

    >>> y_true = [[0, 1, 0], [0, 0, 1]]
    >>> y_pred = [[0.05, 0.9, 0.05], [0.1, 0.85, 0.05]]
    >>> loss = keras.losses.categorical_focal_crossentropy(y_true, y_pred)
    >>> assert loss.shape == (2,)
    >>> loss
    array([2.63401289e-04, 6.75912094e-01], dtype=float32)
    r
  r  ra   r$   zIn loss categorical_focal_crossentropy, expected y_pred.shape to be (batch_size, num_classes) with num_classes > 1. Received: y_pred.shape=r  r  r  rh   r   T)r^   keepdims)r  r  r   r  r   r   r   r   r  r  r  r  softmaxr   r  r   r   r   powermultiply)r'   r(   r   r   r   r   r^   r  outputccemodulating_factorweighting_factor	focal_cces                r   r   r     s   b $"V9T$ZL:
 	
 ""6*FXXffll+F||B1<<BLL> JOO 	
 hhsyy04fllC301k)
 V$/ cggf4$??FXXfgoo/w7H1HIF 'CGGFO
#C 		#,6||$5u= -s3I	-Ir   z-keras.metrics.sparse_categorical_crossentropyz,keras.losses.sparse_categorical_crossentropyc                    t        | j                        t        |j                        k(  r)| j                  d   dk(  rt        j                  | d      } |t        j                  |      dd }t        j                  | t        j
                  ||j                              }| t        j
                  || j                        z  } |t        j
                  t        j                  |d      |j                        z  }t        j                  | |||      }|Dt        j                        }t        j                  ||d      }t        j                  ||       |S )a  Computes the sparse categorical crossentropy loss.

    Args:
        y_true: Ground truth values.
        y_pred: The predicted values.
        from_logits: Whether `y_pred` is expected to be a logits tensor. By
            default, we assume that `y_pred` encodes a probability distribution.
        ignore_class: Optional integer. The ID of a class to be ignored during
            loss computation. This is useful, for example, in segmentation
            problems featuring a "void" class (commonly -1 or 255) in
            segmentation maps. By default (`ignore_class=None`), all classes are
            considered.
        axis: Defaults to `-1`. The dimension along which the entropy is
            computed.

    Returns:
        Sparse categorical crossentropy loss value.

    Examples:

    >>> y_true = [1, 2]
    >>> y_pred = [[0.05, 0.95, 0], [0.1, 0.8, 0.1]]
    >>> loss = keras.losses.sparse_categorical_crossentropy(y_true, y_pred)
    >>> assert loss.shape == (2,)
    >>> loss
    array([0.0513, 2.303], dtype=float32)
    ra   r$   r   Nr  r   )mask)lenr  r   squeeze	not_equalr   r   expand_dimsr   reshaper   r   set_keras_mask)r'   r(   r   r   r^   	res_shape
valid_maskress           r   r   r     s   J 6<<C--&,,r2Ba2GV"-IIf%cr*	]]6388L&,,+OP
#((:v||<<#((OOJ+V\\
 
 
-
-	C [[Y7
ii
C-s4Jr   z!keras.metrics.binary_crossentropyz keras.losses.binary_crossentropyc                     t        j                  |      }t        j                  | |j                        } |r| d|z
  z  d|z  z   } t        j                  t        j
                  | ||      |      S )a  Computes the binary crossentropy loss.

    Args:
        y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`.
        y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.
        from_logits: Whether `y_pred` is expected to be a logits tensor. By
            default, we assume that `y_pred` encodes a probability distribution.
        label_smoothing: Float in `[0, 1]`. If > `0` then smooth the labels by
            squeezing them towards 0.5, that is,
            using `1. - 0.5 * label_smoothing` for the target class
            and `0.5 * label_smoothing` for the non-target class.
        axis: The axis along which the mean is computed. Defaults to `-1`.

    Returns:
        Binary crossentropy loss value. shape = `[batch_size, d0, .. dN-1]`.

    Example:

    >>> y_true = [[0, 1], [0, 0]]
    >>> y_pred = [[0.6, 0.4], [0.4, 0.6]]
    >>> loss = keras.losses.binary_crossentropy(y_true, y_pred)
    >>> assert loss.shape == (2,)
    >>> loss
    array([0.916 , 0.714], dtype=float32)
    rh   r   )r   r   )r   r   r   r   r   r   )r'   r(   r   r   r^   s        r   r   r   )	  sk    D ""6*FXXffll+F301C/4II88KH r   z'keras.metrics.binary_focal_crossentropyz&keras.losses.binary_focal_crossentropyc                    t        j                  |      }t        j                  | |j                        } |r| d|z
  z  d|z  z   } |rt        j                  |      }t        j
                  | |d      }| |z  d| z
  d|z
  z  z   }	t        j                  d|	z
  |      }
|
|z  }|r| |z  d| z
  d|z
  z  z   }||z  }t        j                  ||      S )a
  Computes the binary focal crossentropy loss.

    According to [Lin et al., 2018](https://arxiv.org/pdf/1708.02002.pdf), it
    helps to apply a focal factor to down-weight easy examples and focus more on
    hard examples. By default, the focal tensor is computed as follows:

    `focal_factor = (1 - output) ** gamma` for class 1
    `focal_factor = output ** gamma` for class 0
    where `gamma` is a focusing parameter. When `gamma` = 0, there is no focal
    effect on the binary crossentropy loss.

    If `apply_class_balancing == True`, this function also takes into account a
    weight balancing factor for the binary classes 0 and 1 as follows:

    `weight = alpha` for class 1 (`target == 1`)
    `weight = 1 - alpha` for class 0
    where `alpha` is a float in the range of `[0, 1]`.

    Args:
        y_true: Ground truth values, of shape `(batch_size, d0, .. dN)`.
        y_pred: The predicted values, of shape `(batch_size, d0, .. dN)`.
        apply_class_balancing: A bool, whether to apply weight balancing on the
            binary classes 0 and 1.
        alpha: A weight balancing factor for class 1, default is `0.25` as
            mentioned in the reference. The weight for class 0 is `1.0 - alpha`.
        gamma: A focusing parameter, default is `2.0` as mentioned in the
            reference.
        from_logits: Whether `y_pred` is expected to be a logits tensor. By
            default, we assume that `y_pred` encodes a probability distribution.
        label_smoothing: Float in `[0, 1]`. If > `0` then smooth the labels by
            squeezing them towards 0.5, that is,
            using `1. - 0.5 * label_smoothing` for the target class
            and `0.5 * label_smoothing` for the non-target class.
        axis: The axis along which the mean is computed. Defaults to `-1`.

    Returns:
        Binary focal crossentropy loss value
        with shape = `[batch_size, d0, .. dN-1]`.

    Example:

    >>> y_true = [[0, 1], [0, 0]]
    >>> y_pred = [[0.6, 0.4], [0.4, 0.6]]
    >>> # In this instance, the first sample in the second batch is the
    >>> # 'easier' example.
    >>> focal_loss = keras.losses.binary_focal_crossentropy(
    ...        y_true, y_pred, gamma=2)
    >>> assert loss.shape == (2,)
    >>> focal_loss
    array([0.330, 0.206], dtype=float32)
    >>> # Compare with binary_crossentropy
    >>> bce_loss = keras.losses.binary_focal_crossentropy(
    ...        y_true, y_pred)
    >>> bce_loss
    array([0.916, 0.714], dtype=float32)
    >>> # Binary focal crossentropy loss attributes more importance to the
    >>> # harder example which results in a higher loss for the first batch
    >>> # when normalized by binary cross entropy loss
    >>> focal_loss/bce_loss
    array([0.360, 0.289]
    rh   r   F)targetr  r   r$   r   )r   r   r   r   sigmoidr   r  r   )r'   r(   r   r   r   r   r   r^   bcep_tfocal_factor	focal_bceweights                r   r   r   W	  s    Z ""6*FXXffll+F301C/4IIV$

!
!C 6/QZAJ7
7C99S3Y.Ls"I%1v:!e)"<<Y&	88ID))r   zkeras.losses.ctcc                 6   t        t        j                  |             dk7  r!t        dt        j                  |              t        t        j                  |            dk7  r!t        dt        j                  |             d}t        j                  |      d   }t        j                  |      d   }|t        j                  |fd      z  }t        j
                  t        j                  | |k7  d	
      d      }t        j                  | ||||      S )a  CTC (Connectionist Temporal Classification) loss.

    Args:
        y_true: A tensor of shape `(batch_size, max_length)` containing
            the true labels in integer format. `0` always represents
            the blank/mask index and should not be used for classes.
        y_pred: A tensor of shape `(batch_size, max_length, num_classes)`
            containing logits (the output of your model).
            They should *not* be normalized via softmax.
    r  z{Targets `y_true` are expected to be a tensor of shape `(batch_size, max_length)` in integer format. Received: y_true.shape=   zuLogits `y_pred` are expected to be a tensor of shape `(batch_size, max_length, num_classes)`. Received: y_pred.shape=r   r$   int32r   ra   r   )
mask_index)r%  r   r  r   onesr   r   ctc_loss)r'   r(   r:  batch_lengthinput_lengthlabel_lengths         r   r   r   	  s    399V"&&)ii&7%8:
 	

 399V"&&)ii&7%8:
 	
 J99V$Q'L99V$Q'L#((L?'"JJL88*$2.gL <<lz r   zkeras.losses.dicec                 h   t        j                  |      }t        j                  | |j                        } | }|}t        j                  ||z  |      }t        j
                  d|z  t        j                  | |      t        j                  ||      z   t        j                         z         }d|z
  S )a  Computes the Dice loss value between `y_true` and `y_pred`.

    Formula:
    ```python
    loss = 1 - (2 * sum(y_true * y_pred)) / (sum(y_true) + sum(y_pred))
    ```

    Args:
        y_true: tensor of true targets.
        y_pred: tensor of predicted targets.
        axis: tuple for which dimensions the loss is calculated

    Returns:
        Dice loss value.

    Example:

    >>> y_true = [[[[1.0], [1.0]], [[0.0], [0.0]]],
    ...           [[[1.0], [1.0]], [[0.0], [0.0]]]]
    >>> y_pred = [[[[0.0], [1.0]], [[0.0], [1.0]]],
    ...           [[[0.4], [0.0]], [[0.0], [0.9]]]]
    >>> axis = (1, 2, 3)
    >>> loss = keras.losses.dice(y_true, y_pred, axis=axis)
    >>> assert loss.shape == (2,)
    >>> loss
    array([0.5, 0.75757575], shape=(2,), dtype=float32)

    >>> loss = keras.losses.dice(y_true, y_pred)
    >>> assert loss.shape == ()
    >>> loss
    array(0.6164384, shape=(), dtype=float32)

    r   r   r$   r   r   r   r   r   divider   r   )r'   r(   r^   inputstargetsintersectionr   s          r   r   r   	  s    F ""6*FXXffll+FFG776G+$7L::lT"
''&t
$	%
//
	D t8Or   zkeras.losses.tverskyc                    t        j                  |      }t        j                  | |j                        } | }|}t        j                  ||z  |      }t        j                  d|z
  |z  |      }t        j                  |d|z
  z  |      }	t        j
                  ||||z  z   |	|z  z   t        j                         z         }
d|
z
  S )a  Computes the Tversky loss value between `y_true` and `y_pred`.

    This loss function is weighted by the alpha and beta coefficients
    that penalize false positives and false negatives.

    With `alpha=0.5` and `beta=0.5`, the loss value becomes equivalent to
    Dice Loss.

    Args:
        y_true: tensor of true targets.
        y_pred: tensor of predicted targets.
        alpha: coefficient controlling incidence of false positives.
        beta: coefficient controlling incidence of false negatives.
        axis: tuple for which dimensions the loss is calculated.

    Returns:
        Tversky loss value.

    Reference:

    - [Salehi et al., 2017](https://arxiv.org/abs/1706.05721)
    r   r$   rA  )r'   r(   r   r   r^   rC  rD  rE  fpr   r   s              r   r   r   
  s    0 ""6*FXXffll+FFG776G+$7L	!g+'d	3B	AJ'd	3BjjrEz!BI-0AAG
 w;r   zkeras.losses.circlec           	         t        j                  |      }t        j                  | d      } ||nt        j                  |      }|| nt        j                  |d      }|}d|z   }|}	d|z
  }
dt        j                  |t        j                  |            z
  }t        j
                  |d      }t        | ||      \  }}t        j                  ||j                        }t        j                  ||j                        }||z   }||z  }t        j
                  |d      }||z
  }||z  }t        j
                  |d      }|	|z
  }|
|z
  }d|z  |z  |z  }||z  |z  }t        j                  t        j                  ||t        d            d      }t        j                  t        j                  ||t        d            d      }t        j                  ||z         }t        j                  ||d	kD         |S )
a]  Computes the Circle loss.

    It is designed to minimize within-class distances and maximize between-class
    distances in L2 normalized embedding space.

    Args:
        y_true: Tensor with ground truth labels in integer format.
        y_pred: Tensor with predicted L2 normalized embeddings.
        ref_labels: Optional integer tensor with labels for reference
            embeddings. If `None`, defaults to `y_true`.
        ref_embeddings: Optional tensor with L2 normalized reference embeddings.
            If `None`, defaults to `y_pred`.
        remove_diagonal: Boolean, whether to remove self-similarities from
            positive mask. Defaults to `True`.
        gamma: Float, scaling factor for the loss. Defaults to `80`.
        margin: Float, relaxation factor for the loss. Defaults to `0.4`.

    Returns:
        Circle loss value.
    r9  r$   r   )r   r   ra   z-infr   r   )r   r   r   matmul	transposer   r
   r   	logsumexpr   floatr  r   r*  )r'   r(   
ref_labelsref_embeddingsr   r   r   	optim_pos	optim_neg	delta_pos	delta_negpairwise_cosine_distancespositive_masknegative_maskpos_weightsneg_weights	pos_dists	neg_dists
pos_wdists
neg_wdistsp_lossn_losscircle_losss                          r   r   r   D
  s   < ""6*FXXfg&F ! 	"">2 
 &-388J3PJIF
IIF
I !CJJn-% ! !$,Es K#6'$ M=
 HH6<<M HH6<<M 77K-K++k3/K77K-K++k3/K55I55Iek)I5J$y0J]]		-U6];F ]]		-U6];F
 ,,v/K;a8r   z2keras.losses.categorical_generalized_cross_entropyc                 4   t        j                  t        j                  | d      t        j                  |      d         }t        j                  ||j                        }t        j
                  ||z  d      }dt        j                  ||      z
  |z  }|S )a  Computes the Generalized Cross Entropy loss.

    Generalized Cross Entropy (GCE) is a noise-robust loss function that
    provides better robustness against noisy labels than standard cross entropy.
    It generalizes both cross entropy and mean absolute error through
    the parameter q, where values closer to 1 make the loss more robust
    to noisy labels.

    Formula:
    ```python
    loss = (1 - p**q) / q
    ```
    where `p` is the predicted probability for the true class and `q`
    is the noise parameter.

    Args:
        y_true: Ground truth labels. Expected to contain *integer class indices*
            with shape `[batch_size]` or `[batch_size, 1]`.
        y_pred: The predicted class probabilities, with shape
            `[batch_size, num_classes]`.
        q: Float in range `(0, 1)`. It is the noise parameter.
           Controls the behavior of the loss:
            - As `q` approaches 0: Behaves more like cross entropy
            - As `q` approaches 1: Behaves more like mean absolute error

    Returns:
        GCE loss values with shape `[batch_size]`.
    ```

    References:
        - [Zhang, Sabuncu, 2018](https://arxiv.org/abs/1805.07836)
          ("Generalized Cross Entropy Loss for Training
            Deep Neural Networks with Noisy Labels")
    intra   )r  r   r$   )r   one_hotr   r  r   r   r  )r'   r(   r   y_true_one_hotpgce_losss         r   r   r   
  s|    L [[SYYv->r-BN XXnfll;N'b1A CIIaO#q(HOr   )ra   )rh   )Fr   ra   )r   r   Fr   ra   )FNra   )Fr   r   Fr   ra   rD   )r   r   N)NNTP   r   )=r  	keras.srcr   r   r   keras.src.api_exportr   keras.src.losses.lossr   r   keras.src.savingr	   keras.src.utils.numerical_utilsr
   r   r   r?   rJ   rO   rV   r[   rc   rk   rp   ru   r{   r   r   r   r   r   r   r   r   r   r   r   r   r   rr   rw   r}   rA   rL   rQ   rX   r_   rf   rm   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   <module>rk     sr       - & @ . ? 5"M$ "MJ -.%%* %% /%%P ./%%+ %% 0%%P 89(%"5 (% :(%V 89(%"5 (% :(%V -.3%* 3% /3%l "#3% 3% $3%l $%%%! %% &%%P "#&% &% $&%R )*%%& %% +%%P -.'%* '% /'%T )*&%& &% +&%R $% %!  % & %F /0z, z 1zz 45x1 x 6xv 45d1 d 6dN 9:V#6 V ;Vr :;a$7 a <aH  !%
 % "%6 !"F F #FR $%@! @ &@F #$X  X %Xv ?@L)< L AL^( FF> %$@ )( . .F 
:
:8 
7
74 
 +
 +F 
!A
!AH ./$0 0$0N #%:;<. =.b #8#8L 
#?#?L JJB 0/ BD66r 65 
	WWt 76 @B66r +* BD%%P 10  

	`*`*F  !" ""J !"0 #0f $%& &&R #$ 
T %Tn BC/ D/r   