
    2Vh)                        d dl mZ d dlmZ d dlmZ d dlmZ d dlmZ d dlmZ d dlm	Z	  ed       G d	 d
e	j                               Z ed       G d de	j                               Z ed       G d de	j                               Z ed       G d de	j                               Z ed       G d de	j                               Zy)    )keras_export)binary_crossentropy)categorical_crossentropy)kl_divergence)poisson)sparse_categorical_crossentropy)reduction_metricszkeras.metrics.KLDivergencec                   *     e Zd ZdZd fd	Zd Z xZS )KLDivergencea  Computes Kullback-Leibler divergence metric between `y_true` and
    `y_pred`.

    Formula:

    ```python
    metric = y_true * log(y_true / y_pred)
    ```

    `y_true` and `y_pred` are expected to be probability
    distributions, with values between 0 and 1. They will get
    clipped to the `[0, 1]` range.

    Args:
        name: (Optional) string name of the metric instance.
        dtype: (Optional) data type of the metric result.

    Examples:

    >>> m = keras.metrics.KLDivergence()
    >>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]])
    >>> m.result()
    0.45814306

    >>> m.reset_state()
    >>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]],
    ...                sample_weight=[1, 0])
    >>> m.result()
    0.9162892

    Usage with `compile()` API:

    ```python
    model.compile(optimizer='sgd',
                  loss='mse',
                  metrics=[keras.metrics.KLDivergence()])
    ```
    c                 2    t         |   t        ||       y N)fnnamedtype)super__init__r   selfr   r   	__class__s      W/home/dcms/DCMS/lib/python3.12/site-packages/keras/src/metrics/probabilistic_metrics.pyr   zKLDivergence.__init__3   s    MEB    c                 4    | j                   | j                  dS Nr   r   r   r   s    r   
get_configzKLDivergence.get_config6       		DJJ77r   )r   N__name__
__module____qualname____doc__r   r   __classcell__r   s   @r   r   r   
   s    %NC8r   r   zkeras.metrics.Poissonc                   *     e Zd ZdZd fd	Zd Z xZS )Poissona  Computes the Poisson metric between `y_true` and `y_pred`.

    Formula:

    ```python
    metric = y_pred - y_true * log(y_pred)
    ```

    Args:
        name: (Optional) string name of the metric instance.
        dtype: (Optional) data type of the metric result.

    Examples:

    >>> m = keras.metrics.Poisson()
    >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]])
    >>> m.result()
    0.49999997

    >>> m.reset_state()
    >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]],
    ...                sample_weight=[1, 0])
    >>> m.result()
    0.99999994

    Usage with `compile()` API:

    ```python
    model.compile(optimizer='sgd',
                  loss='mse',
                  metrics=[keras.metrics.Poisson()])
    ```
    c                 2    t         |   t        ||       y r   )r   r   r   r   s      r   r   zPoisson.__init__^   s    G$e<r   c                 4    | j                   | j                  dS r   r   r   s    r   r   zPoisson.get_configa   r   r   )r   Nr   r$   s   @r   r&   r&   :   s     D=8r   r&   z keras.metrics.BinaryCrossentropyc                   2     e Zd ZdZ	 	 	 	 d fd	Zd Z xZS )BinaryCrossentropya
  Computes the crossentropy metric between the labels and predictions.

    This is the crossentropy metric class to be used when there are only two
    label classes (0 and 1).

    Args:
        name: (Optional) string name of the metric instance.
        dtype: (Optional) data type of the metric result.
        from_logits: (Optional) Whether output is expected
            to be a logits tensor. By default, we consider
            that output encodes a probability distribution.
        label_smoothing: (Optional) Float in `[0, 1]`.
            When > 0, label values are smoothed,
            meaning the confidence on label values are relaxed.
            e.g. `label_smoothing=0.2` means that we will use
            a value of 0.1 for label "0" and 0.9 for label "1".

    Examples:

    >>> m = keras.metrics.BinaryCrossentropy()
    >>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]])
    >>> m.result()
    0.81492424

    >>> m.reset_state()
    >>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]],
    ...                sample_weight=[1, 0])
    >>> m.result()
    0.9162905

    Usage with `compile()` API:

    ```python
    model.compile(
        optimizer='sgd',
        loss='mse',
        metrics=[keras.metrics.BinaryCrossentropy()])
    ```
    c                 `    t         |   t        ||||       || _        || _        d| _        y )N)r   from_logitslabel_smoothingdown)r   r   r   r,   r-   
_direction)r   r   r   r,   r-   r   s        r   r   zBinaryCrossentropy.__init__   s?     	#+ 	 	
 '. r   c                 `    | j                   | j                  | j                  | j                  dS )Nr   r   r,   r-   r1   r   s    r   r   zBinaryCrossentropy.get_config   s,    IIZZ++#33	
 	
r   )r   NFr   r   r$   s   @r   r*   r*   e   s!    &T #!&
r   r*   z%keras.metrics.CategoricalCrossentropyc                   4     e Zd ZdZ	 	 	 	 	 d fd	Zd Z xZS )CategoricalCrossentropya  Computes the crossentropy metric between the labels and predictions.

    This is the crossentropy metric class to be used when there are multiple
    label classes (2 or more). It assumes that labels are one-hot encoded,
    e.g., when labels values are `[2, 0, 1]`, then
    `y_true` is `[[0, 0, 1], [1, 0, 0], [0, 1, 0]]`.

    Args:
        name: (Optional) string name of the metric instance.
        dtype: (Optional) data type of the metric result.
        from_logits: (Optional) Whether output is expected to be
            a logits tensor. By default, we consider that output
            encodes a probability distribution.
        label_smoothing: (Optional) Float in `[0, 1]`.
            When > 0, label values are smoothed, meaning the confidence
            on label values are relaxed. e.g. `label_smoothing=0.2` means
            that we will use a value of 0.1 for label
            "0" and 0.9 for label "1".
        axis: (Optional) Defaults to `-1`.
            The dimension along which entropy is computed.

    Examples:

    >>> # EPSILON = 1e-7, y = y_true, y` = y_pred
    >>> # y` = clip_ops.clip_by_value(output, EPSILON, 1. - EPSILON)
    >>> # y` = [[0.05, 0.95, EPSILON], [0.1, 0.8, 0.1]]
    >>> # xent = -sum(y * log(y'), axis = -1)
    >>> #      = -((log 0.95), (log 0.1))
    >>> #      = [0.051, 2.302]
    >>> # Reduced xent = (0.051 + 2.302) / 2
    >>> m = keras.metrics.CategoricalCrossentropy()
    >>> m.update_state([[0, 1, 0], [0, 0, 1]],
    ...                [[0.05, 0.95, 0], [0.1, 0.8, 0.1]])
    >>> m.result()
    1.1769392

    >>> m.reset_state()
    >>> m.update_state([[0, 1, 0], [0, 0, 1]],
    ...                [[0.05, 0.95, 0], [0.1, 0.8, 0.1]],
    ...                sample_weight=np.array([0.3, 0.7]))
    >>> m.result()
    1.6271976

    Usage with `compile()` API:

    ```python
    model.compile(
        optimizer='sgd',
        loss='mse',
        metrics=[keras.metrics.CategoricalCrossentropy()])
    ```
    c                 p    t         |   t        |||||       || _        || _        || _        d| _        y )N)r   r,   r-   axisr.   )r   r   r   r,   r-   r5   r/   )r   r   r   r,   r-   r5   r   s         r   r   z CategoricalCrossentropy.__init__   sI     	$#+ 	 	
 '.	 r   c                 v    | j                   | j                  | j                  | j                  | j                  dS )Nr   r   r,   r-   r5   r7   r   s    r   r   z"CategoricalCrossentropy.get_config   s3    IIZZ++#33II
 	
r   )r   NFr   r   r$   s   @r   r3   r3      s$    3n (!,
r   r3   z+keras.metrics.SparseCategoricalCrossentropyc                   2     e Zd ZdZ	 	 	 	 d fd	Zd Z xZS )SparseCategoricalCrossentropya  Computes the crossentropy metric between the labels and predictions.

    Use this crossentropy metric when there are two or more label classes.
    It expects labels to be provided as integers. If you want to provide labels
    that are one-hot encoded, please use the `CategoricalCrossentropy`
    metric instead.

    There should be `num_classes` floating point values per feature for `y_pred`
    and a single floating point value per feature for `y_true`.

    Args:
        name: (Optional) string name of the metric instance.
        dtype: (Optional) data type of the metric result.
        from_logits: (Optional) Whether output is expected
            to be a logits tensor. By default, we consider that output
            encodes a probability distribution.
        axis: (Optional) Defaults to `-1`.
            The dimension along which entropy is computed.

    Examples:

    >>> # y_true = one_hot(y_true) = [[0, 1, 0], [0, 0, 1]]
    >>> # logits = log(y_pred)
    >>> # softmax = exp(logits) / sum(exp(logits), axis=-1)
    >>> # softmax = [[0.05, 0.95, EPSILON], [0.1, 0.8, 0.1]]
    >>> # xent = -sum(y * log(softmax), 1)
    >>> # log(softmax) = [[-2.9957, -0.0513, -16.1181],
    >>> #                [-2.3026, -0.2231, -2.3026]]
    >>> # y_true * log(softmax) = [[0, -0.0513, 0], [0, 0, -2.3026]]
    >>> # xent = [0.0513, 2.3026]
    >>> # Reduced xent = (0.0513 + 2.3026) / 2
    >>> m = keras.metrics.SparseCategoricalCrossentropy()
    >>> m.update_state([1, 2],
    ...                [[0.05, 0.95, 0], [0.1, 0.8, 0.1]])
    >>> m.result()
    1.1769392

    >>> m.reset_state()
    >>> m.update_state([1, 2],
    ...                [[0.05, 0.95, 0], [0.1, 0.8, 0.1]],
    ...                sample_weight=np.array([0.3, 0.7]))
    >>> m.result()
    1.6271976

    Usage with `compile()` API:

    ```python
    model.compile(
        optimizer='sgd',
        loss='mse',
        metrics=[keras.metrics.SparseCategoricalCrossentropy()])
    ```
    c                 `    t         |   t        ||||       || _        || _        d| _        y )Nr   r   r,   r5   r.   )r   r   r   r,   r5   r/   )r   r   r   r,   r5   r   s        r   r   z&SparseCategoricalCrossentropy.__init__:  s>     	+# 	 	
 '	 r   c                 `    | j                   | j                  | j                  | j                  dS )Nr<   r<   r   s    r   r   z(SparseCategoricalCrossentropy.get_configM  s*    IIZZ++II	
 	
r   )r   NFr8   r   r$   s   @r   r:   r:     s!    4p /!&
r   r:   N)keras.src.api_exportr   keras.src.losses.lossesr   r   r   r   r   keras.src.metricsr	   MeanMetricWrapperr   r&   r*   r3   r:    r   r   <module>rC      s    - 7 < 1 + C / *+,8$66 ,8 ,,8^ %&'811 '8 ''8T 01B
*<< B
 2B
J 56S
/AA S
 7S
l ;<P
$5$G$G P
 =P
r   