
    2Vhi                     |   d dl Zd dlmZ d dlmZ d dlmZ d dlmZ d dlmZ d dl	m
Z
 d dlmZ d d	lmZ  G d
 de      Z ed       G d de             Z ed       G d de             Z ed       G d de             Z ed       G d de             Z ed       G d de             Z ed       G d de             Z G d de      Z ed        G d! d"e             Z ed#       G d$ d%e             Z ed&       G d' d(e             Z ed)       G d* d+e             Z ed,       G d- d.e             Zy)/    N)activations)backend)initializers)ops)keras_export)metrics_utils)Metric)to_listc                   >     e Zd ZdZ	 d fd	ZddZd Z fdZ xZS )_ConfusionMatrixConditionCounta  Calculates the number of the given confusion matrix condition.

    Args:
        confusion_matrix_cond: One of `metrics_utils.ConfusionMatrix`
            conditions.
        thresholds: (Optional) Defaults to `0.5`. A float value or a python list
            / tuple of float threshold values in `[0, 1]`. A threshold is
            compared with prediction values to determine the truth value of
            predictions (i.e., above the threshold is `True`, below is `False`).
            One metric value is generated for each threshold value.
        name: (Optional) string name of the metric instance.
        dtype: (Optional) data type of the metric result.
    c                 B   t         |   ||       || _        || _        t	        j
                  |d      | _        t	        j                  | j                        | _        | j                  t        | j                        ft        j                         d      | _        y )Nnamedtype      ?default_thresholdaccumulatorshapeinitializerr   )super__init___confusion_matrix_condinit_thresholdsr   parse_init_thresholds
thresholds is_evenly_distributed_thresholds_thresholds_distributed_evenlyadd_variablelenr   Zerosr   )selfconfusion_matrix_condr   r   r   	__class__s        S/home/dcms/DCMS/lib/python3.12/site-packages/keras/src/metrics/confusion_metrics.pyr   z'_ConfusionMatrixConditionCount.__init__   s     	d%0&;#)'==#
 ::4??K 	+  ,,t')$**, - 
    c                     t        j                  | j                  | j                  i||| j                  | j
                  |      S )an  Accumulates the metric statistics.

        Args:
            y_true: The ground truth values.
            y_pred: The predicted values.
            sample_weight: Optional weighting of each example. Defaults to `1`.
                Can be a tensor whose rank is either 0, or the same rank as
                `y_true`, and must be broadcastable to `y_true`.
        )r   thresholds_distributed_evenlysample_weight)r   !update_confusion_matrix_variablesr   r   r   r   r#   y_truey_predr*   s       r&   update_statez+_ConfusionMatrixConditionCount.update_state.   sD     >>(($*:*:;*.*M*M'
 	
r'   c                     t        | j                        dk(  r| j                  d   }n| j                  }t        j                  |      S N   r   )r!   r   r   r   convert_to_tensorr#   results     r&   r5   z%_ConfusionMatrixConditionCount.resultA   s?    t1$%%a(F%%F((00r'   c                 H    d| j                   i}t        | 	         }i ||S )Nr   )r   r   
get_configr#   configbase_configr%   s      r&   r7   z)_ConfusionMatrixConditionCount.get_configH   s0     4 45g(*(+(((r'   NNNN)	__name__
__module____qualname____doc__r   r/   r5   r7   __classcell__r%   s   @r&   r   r      s(     HL
$
&1) )r'   r   zkeras.metrics.FalsePositivesc                   $     e Zd ZdZd fd	Z xZS )FalsePositivesa  Calculates the number of false positives.

    If `sample_weight` is given, calculates the sum of the weights of
    false positives. This metric creates one local variable, `accumulator`
    that is used to keep track of the number of false positives.

    If `sample_weight` is `None`, weights default to 1.
    Use `sample_weight` of 0 to mask values.

    Args:
        thresholds: (Optional) Defaults to `0.5`. A float value, or a Python
            list/tuple of float threshold values in `[0, 1]`. A threshold is
            compared with prediction values to determine the truth value of
            predictions (i.e., above the threshold is `True`, below is `False`).
            If used with a loss function that sets `from_logits=True` (i.e. no
            sigmoid applied to predictions), `thresholds` should be set to 0.
            One metric value is generated for each threshold value.
        name: (Optional) string name of the metric instance.
        dtype: (Optional) data type of the metric result.

    Examples:

    >>> m = keras.metrics.FalsePositives()
    >>> m.update_state([0, 1, 0, 0], [0, 0, 1, 1])
    >>> m.result()
    2.0

    >>> m.reset_state()
    >>> m.update_state([0, 1, 0, 0], [0, 0, 1, 1], sample_weight=[0, 0, 1, 0])
    >>> m.result()
    1.0
    c                 \    t         |   t        j                  j                  |||       y N)r$   r   r   r   )r   r   r   ConfusionMatrixFALSE_POSITIVESr#   r   r   r   r%   s       r&   r   zFalsePositives.__init__q   -    "/"?"?"O"O!	 	 	
r'   r;   r=   r>   r?   r@   r   rA   rB   s   @r&   rD   rD   N       B
 
r'   rD   zkeras.metrics.FalseNegativesc                   $     e Zd ZdZd fd	Z xZS )FalseNegativesa  Calculates the number of false negatives.

    If `sample_weight` is given, calculates the sum of the weights of
    false negatives. This metric creates one local variable, `accumulator`
    that is used to keep track of the number of false negatives.

    If `sample_weight` is `None`, weights default to 1.
    Use `sample_weight` of 0 to mask values.

    Args:
        thresholds: (Optional) Defaults to `0.5`. A float value, or a Python
            list/tuple of float threshold values in `[0, 1]`. A threshold is
            compared with prediction values to determine the truth value of
            predictions (i.e., above the threshold is `True`, below is `False`).
            If used with a loss function that sets `from_logits=True` (i.e. no
            sigmoid applied to predictions), `thresholds` should be set to 0.
            One metric value is generated for each threshold value.
        name: (Optional) string name of the metric instance.
        dtype: (Optional) data type of the metric result.

    Example:

    >>> m = keras.metrics.FalseNegatives()
    >>> m.update_state([0, 1, 1, 1], [0, 1, 0, 0])
    >>> m.result()
    2.0

    >>> m.reset_state()
    >>> m.update_state([0, 1, 1, 1], [0, 1, 0, 0], sample_weight=[0, 0, 1, 0])
    >>> m.result()
    1.0
    c                 \    t         |   t        j                  j                  |||       y rF   )r   r   r   rG   FALSE_NEGATIVESrI   s       r&   r   zFalseNegatives.__init__   rJ   r'   r;   rK   rB   s   @r&   rN   rN   z   rL   r'   rN   zkeras.metrics.TrueNegativesc                   $     e Zd ZdZd fd	Z xZS )TrueNegativesa  Calculates the number of true negatives.

    If `sample_weight` is given, calculates the sum of the weights of
    true negatives. This metric creates one local variable, `accumulator`
    that is used to keep track of the number of true negatives.

    If `sample_weight` is `None`, weights default to 1.
    Use `sample_weight` of 0 to mask values.

    Args:
        thresholds: (Optional) Defaults to `0.5`. A float value, or a Python
            list/tuple of float threshold values in `[0, 1]`. A threshold is
            compared with prediction values to determine the truth value of
            predictions (i.e., above the threshold is `True`, below is `False`).
            If used with a loss function that sets `from_logits=True` (i.e. no
            sigmoid applied to predictions), `thresholds` should be set to 0.
            One metric value is generated for each threshold value.
        name: (Optional) string name of the metric instance.
        dtype: (Optional) data type of the metric result.

    Example:

    >>> m = keras.metrics.TrueNegatives()
    >>> m.update_state([0, 1, 0, 0], [1, 1, 0, 0])
    >>> m.result()
    2.0

    >>> m.reset_state()
    >>> m.update_state([0, 1, 0, 0], [1, 1, 0, 0], sample_weight=[0, 0, 1, 0])
    >>> m.result()
    1.0
    c                 \    t         |   t        j                  j                  |||       y rF   )r   r   r   rG   TRUE_NEGATIVESrI   s       r&   r   zTrueNegatives.__init__   -    "/"?"?"N"N!	 	 	
r'   r;   rK   rB   s   @r&   rR   rR      rL   r'   rR   zkeras.metrics.TruePositivesc                   $     e Zd ZdZd fd	Z xZS )TruePositivesa  Calculates the number of true positives.

    If `sample_weight` is given, calculates the sum of the weights of
    true positives. This metric creates one local variable, `true_positives`
    that is used to keep track of the number of true positives.

    If `sample_weight` is `None`, weights default to 1.
    Use `sample_weight` of 0 to mask values.

    Args:
        thresholds: (Optional) Defaults to `0.5`. A float value, or a Python
            list/tuple of float threshold values in `[0, 1]`. A threshold is
            compared with prediction values to determine the truth value of
            predictions (i.e., above the threshold is `True`, below is `False`).
            If used with a loss function that sets `from_logits=True` (i.e. no
            sigmoid applied to predictions), `thresholds` should be set to 0.
            One metric value is generated for each threshold value.
        name: (Optional) string name of the metric instance.
        dtype: (Optional) data type of the metric result.

    Example:

    >>> m = keras.metrics.TruePositives()
    >>> m.update_state([0, 1, 1, 1], [1, 0, 1, 1])
    >>> m.result()
    2.0

    >>> m.reset_state()
    >>> m.update_state([0, 1, 1, 1], [1, 0, 1, 1], sample_weight=[0, 0, 1, 0])
    >>> m.result()
    1.0
    c                 \    t         |   t        j                  j                  |||       y rF   )r   r   r   rG   TRUE_POSITIVESrI   s       r&   r   zTruePositives.__init__   rU   r'   r;   rK   rB   s   @r&   rW   rW      rL   r'   rW   zkeras.metrics.Precisionc                   D     e Zd ZdZ	 d fd	ZddZd Zd Z fdZ xZ	S )		Precisiona  Computes the precision of the predictions with respect to the labels.

    The metric creates two local variables, `true_positives` and
    `false_positives` that are used to compute the precision. This value is
    ultimately returned as `precision`, an idempotent operation that simply
    divides `true_positives` by the sum of `true_positives` and
    `false_positives`.

    If `sample_weight` is `None`, weights default to 1.
    Use `sample_weight` of 0 to mask values.

    If `top_k` is set, we'll calculate precision as how often on average a class
    among the top-k classes with the highest predicted values of a batch entry
    is correct and can be found in the label for that entry.

    If `class_id` is specified, we calculate precision by considering only the
    entries in the batch for which `class_id` is above the threshold and/or in
    the top-k highest predictions, and computing the fraction of them for which
    `class_id` is indeed a correct label.

    Args:
        thresholds: (Optional) A float value, or a Python list/tuple of float
            threshold values in `[0, 1]`. A threshold is compared with
            prediction values to determine the truth value of predictions (i.e.,
            above the threshold is `True`, below is `False`). If used with a
            loss function that sets `from_logits=True` (i.e. no sigmoid applied
            to predictions), `thresholds` should be set to 0. One metric value
            is generated for each threshold value. If neither `thresholds` nor
            `top_k` are set, the default is to calculate precision with
            `thresholds=0.5`.
        top_k: (Optional) Unset by default. An int value specifying the top-k
            predictions to consider when calculating precision.
        class_id: (Optional) Integer class ID for which we want binary metrics.
            This must be in the half-open interval `[0, num_classes)`, where
            `num_classes` is the last dimension of predictions.
        name: (Optional) string name of the metric instance.
        dtype: (Optional) data type of the metric result.

    Example:

    >>> m = keras.metrics.Precision()
    >>> m.update_state([0, 1, 1, 1], [1, 0, 1, 1])
    >>> m.result()
    0.6666667

    >>> m.reset_state()
    >>> m.update_state([0, 1, 1, 1], [1, 0, 1, 1], sample_weight=[0, 0, 1, 0])
    >>> m.result()
    1.0

    >>> # With top_k=2, it will calculate precision over y_true[:2]
    >>> # and y_pred[:2]
    >>> m = keras.metrics.Precision(top_k=2)
    >>> m.update_state([0, 0, 1, 1], [1, 1, 1, 1])
    >>> m.result()
    0.0

    >>> # With top_k=4, it will calculate precision over y_true[:4]
    >>> # and y_pred[:4]
    >>> m = keras.metrics.Precision(top_k=4)
    >>> m.update_state([0, 0, 1, 1], [1, 1, 1, 1])
    >>> m.result()
    0.5

    Usage with `compile()` API:

    ```python
    model.compile(optimizer='sgd',
                  loss='binary_crossentropy',
                  metrics=[keras.metrics.Precision()])
    ```

    Usage with a loss with `from_logits=True`:

    ```python
    model.compile(optimizer='adam',
                  loss=keras.losses.BinaryCrossentropy(from_logits=True),
                  metrics=[keras.metrics.Precision(thresholds=0)])
    ```
    c                    t         |   ||       d| _        || _        || _        || _        |dnt        j                  }t        j                  ||      | _	        t        j                  | j                        | _        | j                  t        | j                        ft        j                         d      | _        | j                  t        | j                        ft        j                         d      | _        y )Nr   upr   r   true_positivesr   false_positives)r   r   
_directionr   top_kclass_idr   NEG_INFr   r   r   r   r    r!   r   r"   r^   r_   r#   r   ra   rb   r   r   r   r%   s          r&   r   zPrecision.__init__Q       	d%0)
 #(=Cm6K6K'==*;
 ::4??K 	+ #//t')$**,! 0 

  $00t')$**,"  1  
r'   c           
         t        j                  t         j                  j                  | j                  t         j                  j
                  | j                  i||| j                  | j                  | j                  | j                  |       y)a  Accumulates true positive and false positive statistics.

        Args:
            y_true: The ground truth values, with the same dimensions as
                `y_pred`. Will be cast to `bool`.
            y_pred: The predicted values. Each element must be in the range
                `[0, 1]`.
            sample_weight: Optional weighting of each example. Defaults to `1`.
                Can be a tensor whose rank is either 0, or the same rank as
                `y_true`, and must be broadcastable to `y_true`.
        r   r)   ra   rb   r*   N)r   r+   rG   rY   r^   rH   r_   r   r   ra   rb   r,   s       r&   r/   zPrecision.update_staten  n     	77--<<d>Q>Q--==t?S?S *.*M*M**]]'	
r'   c                     t        j                  | j                  t        j                  | j                  | j                              }t        | j                        dk(  r|d   S |S r1   )r   divide_no_nanr^   addr_   r!   r   r4   s     r&   r5   zPrecision.result  V    ""GGD'')=)=>
  0A5vayA6Ar'   c                     t        t        | j                              }| j                  j	                  t        j                  |f             | j                  j	                  t        j                  |f             y r<   )r!   r
   r   r^   assignr   zerosr_   r#   num_thresholdss     r&   reset_statezPrecision.reset_state  U    WT__56""399n->#?@##CII~.?$@Ar'   c                 t    | j                   | j                  | j                  d}t        |          }i ||S N)r   ra   rb   r   ra   rb   r   r7   r8   s      r&   r7   zPrecision.get_config  @    ..ZZ

 g(*(+(((r'   NNNNNr<   
r=   r>   r?   r@   r   r/   r5   rr   r7   rA   rB   s   @r&   r[   r[      s1    Od LP
:
4BB
) )r'   r[   zkeras.metrics.Recallc                   D     e Zd ZdZ	 d fd	ZddZd Zd Z fdZ xZ	S )	Recalla
  Computes the recall of the predictions with respect to the labels.

    This metric creates two local variables, `true_positives` and
    `false_negatives`, that are used to compute the recall. This value is
    ultimately returned as `recall`, an idempotent operation that simply divides
    `true_positives` by the sum of `true_positives` and `false_negatives`.

    If `sample_weight` is `None`, weights default to 1.
    Use `sample_weight` of 0 to mask values.

    If `top_k` is set, recall will be computed as how often on average a class
    among the labels of a batch entry is in the top-k predictions.

    If `class_id` is specified, we calculate recall by considering only the
    entries in the batch for which `class_id` is in the label, and computing the
    fraction of them for which `class_id` is above the threshold and/or in the
    top-k predictions.

    Args:
        thresholds: (Optional) A float value, or a Python list/tuple of float
            threshold values in `[0, 1]`. A threshold is compared with
            prediction values to determine the truth value of predictions (i.e.,
            above the threshold is `True`, below is `False`). If used with a
            loss function that sets `from_logits=True` (i.e. no sigmoid
            applied to predictions), `thresholds` should be set to 0.
            One metric value is generated for each threshold value.
            If neither `thresholds` nor `top_k` are set,
            the default is to calculate recall with `thresholds=0.5`.
        top_k: (Optional) Unset by default. An int value specifying the top-k
            predictions to consider when calculating recall.
        class_id: (Optional) Integer class ID for which we want binary metrics.
            This must be in the half-open interval `[0, num_classes)`, where
            `num_classes` is the last dimension of predictions.
        name: (Optional) string name of the metric instance.
        dtype: (Optional) data type of the metric result.

    Example:

    >>> m = keras.metrics.Recall()
    >>> m.update_state([0, 1, 1, 1], [1, 0, 1, 1])
    >>> m.result()
    0.6666667

    >>> m.reset_state()
    >>> m.update_state([0, 1, 1, 1], [1, 0, 1, 1], sample_weight=[0, 0, 1, 0])
    >>> m.result()
    1.0

    Usage with `compile()` API:

    ```python
    model.compile(optimizer='sgd',
                  loss='binary_crossentropy',
                  metrics=[keras.metrics.Recall()])
    ```

    Usage with a loss with `from_logits=True`:

    ```python
    model.compile(optimizer='adam',
                  loss=keras.losses.BinaryCrossentropy(from_logits=True),
                  metrics=[keras.metrics.Recall(thresholds=0)])
    ```
    c                    t         |   ||       d| _        || _        || _        || _        |dnt        j                  }t        j                  ||      | _	        t        j                  | j                        | _        | j                  t        | j                        ft        j                         d      | _        | j                  t        | j                        ft        j                         d      | _        y )Nr   r]   r   r   r^   r   false_negatives)r   r   r`   r   ra   rb   r   rc   r   r   r   r   r    r!   r   r"   r^   r}   rd   s          r&   r   zRecall.__init__  re   r'   c           
         t        j                  t         j                  j                  | j                  t         j                  j
                  | j                  i||| j                  | j                  | j                  | j                  |       y)a  Accumulates true positive and false negative statistics.

        Args:
            y_true: The ground truth values, with the same dimensions as
                `y_pred`. Will be cast to `bool`.
            y_pred: The predicted values. Each element must be in the range
                `[0, 1]`.
            sample_weight: Optional weighting of each example. Defaults to `1`.
                Can be a tensor whose rank is either 0, or the same rank as
                `y_true`, and must be broadcastable to `y_true`.
        rg   N)r   r+   rG   rY   r^   rP   r}   r   r   ra   rb   r,   s       r&   r/   zRecall.update_state  rh   r'   c                     t        j                  | j                  t        j                  | j                  | j                              }t        | j                        dk(  r|d   S |S r1   )r   rj   r^   rk   r}   r!   r   r4   s     r&   r5   zRecall.result  rl   r'   c                     t        t        | j                              }| j                  j	                  t        j                  |f             | j                  j	                  t        j                  |f             y r<   )r!   r
   r   r^   rn   r   ro   r}   rp   s     r&   rr   zRecall.reset_state  rs   r'   c                 t    | j                   | j                  | j                  d}t        |          }i ||S ru   rv   r8   s      r&   r7   zRecall.get_config$  rw   r'   rx   r<   ry   rB   s   @r&   r{   r{     s0    ?D LP
:
4BB
) )r'   r{   c                   D     e Zd ZdZ	 d fd	ZddZd Z fdZd Z xZ	S )	SensitivitySpecificityBasezAbstract base class for computing sensitivity and specificity.

    For additional information about specificity and sensitivity, see
    [the following](https://en.wikipedia.org/wiki/Sensitivity_and_specificity).
    c                    t         |   ||       d| _        |dk  rt        d|       || _        || _        |dk(  rdg| _        d| _        n=t        |dz
        D cg c]  }|dz   d	z  |dz
  z   }}d
g|z   d	gz   | _        d| _        | j                  t        | j                        ft        j                         d      | _        | j                  t        | j                        ft        j                         d      | _        | j                  t        | j                        ft        j                         d      | _        | j                  t        | j                        ft        j                         d      | _        y c c}w )Nr   r]   r   zKArgument `num_thresholds` must be an integer > 0. Received: num_thresholds=r2   r   F         ?        Tr^   r   r_   true_negativesr}   )r   r   r`   
ValueErrorvaluerb   r   r   ranger    r!   r   r"   r^   r_   r   r}   )	r#   r   rq   rb   r   r   ir   r%   s	           r&   r   z#SensitivitySpecificityBase.__init__5  s    	d%0Q,,:+;=  
  Q"eDO27D/ ~12 Q#!!34J   #ej0C58DO26D/"//t')$**,! 0 

  $00t')$**,"  1  

 #//t')$**,! 0 

  $00t')$**,"  1  
-s    F
c           
         t        j                  t         j                  j                  | j                  t         j                  j
                  | j                  t         j                  j                  | j                  t         j                  j                  | j                  i||| j                  | j                  | j                  |       y)at  Accumulates confusion matrix statistics.

        Args:
            y_true: The ground truth values.
            y_pred: The predicted values.
            sample_weight: Optional weighting of each example. Defaults to `1`.
                Can be a tensor whose rank is either 0, or the same rank as
                `y_true`, and must be broadcastable to `y_true`.
        )r   r)   rb   r*   N)r   r+   rG   rY   r^   rT   r   rH   r_   rP   r}   r   r   rb   r,   s       r&   r/   z'SensitivitySpecificityBase.update_statee  s     	77--<<d>Q>Q--<<d>Q>Q--==t?S?S--==t?S?S	 *.*M*M]]'	
r'   c                    t        | j                        }| j                  j                  t	        j
                  |f             | j                  j                  t	        j
                  |f             | j                  j                  t	        j
                  |f             | j                  j                  t	        j
                  |f             y r<   )	r!   r   r^   rn   r   ro   r_   r   r}   rp   s     r&   rr   z&SensitivitySpecificityBase.reset_state~  s    T__-""399n->#?@##CII~.?$@A""399n->#?@##CII~.?$@Ar'   c                 H    d| j                   i}t        | 	         }i ||S )Nrb   )rb   r   r7   r8   s      r&   r7   z%SensitivitySpecificityBase.get_config  s.    dmm,g(*(+(((r'   c                 $   t        j                   ||| j                              }t        j                  t        j                  |      d      }t        j
                  t        j                  ||      d      }t        j                  ||d      S )a  Returns the maximum of dependent_statistic that satisfies the
        constraint.

        Args:
            constrained: Over these values the constraint is specified. A rank-1
                tensor.
            dependent: From these values the maximum that satiesfies the
                constraint is selected. Values in this tensor and in
                `constrained` are linked by having the same threshold at each
                position, hence this tensor must have the same shape.
            predicate: A binary boolean functor to be applied to arguments
                `constrained` and `self.value`, e.g. `ops.greater`.

        Returns:
            maximal dependent value, if no value satisfies the constraint 0.0.
        r   )initialr   )r   nonzeror   greatersizemaxtakewhere)r#   constrained	dependent	predicatefeasiblefeasible_existsmax_dependents          r&   _find_max_under_constraintz5SensitivitySpecificityBase._find_max_under_constraint  sd    " ;;ydjjAB++chhx&8!<H =qIyy-==r'      NNNr<   )
r=   r>   r?   r@   r   r/   rr   r7   r   rA   rB   s   @r&   r   r   .  s*     JN.
`
2B)
>r'   r   z&keras.metrics.SensitivityAtSpecificityc                   <     e Zd ZdZ	 	 	 	 d fd	Zd Z fdZ xZS )SensitivityAtSpecificitya  Computes best sensitivity where specificity is >= specified value.

    `Sensitivity` measures the proportion of actual positives that are correctly
    identified as such `(tp / (tp + fn))`.
    `Specificity` measures the proportion of actual negatives that are correctly
    identified as such `(tn / (tn + fp))`.

    This metric creates four local variables, `true_positives`,
    `true_negatives`, `false_positives` and `false_negatives` that are used to
    compute the sensitivity at the given specificity. The threshold for the
    given specificity value is computed and used to evaluate the corresponding
    sensitivity.

    If `sample_weight` is `None`, weights default to 1.
    Use `sample_weight` of 0 to mask values.

    If `class_id` is specified, we calculate precision by considering only the
    entries in the batch for which `class_id` is above the threshold
    predictions, and computing the fraction of them for which `class_id` is
    indeed a correct label.

    For additional information about specificity and sensitivity, see
    [the following](https://en.wikipedia.org/wiki/Sensitivity_and_specificity).

    Args:
        specificity: A scalar value in range `[0, 1]`.
        num_thresholds: (Optional) Defaults to 200. The number of thresholds to
            use for matching the given specificity.
        class_id: (Optional) Integer class ID for which we want binary metrics.
            This must be in the half-open interval `[0, num_classes)`, where
            `num_classes` is the last dimension of predictions.
        name: (Optional) string name of the metric instance.
        dtype: (Optional) data type of the metric result.

    Example:

    >>> m = keras.metrics.SensitivityAtSpecificity(0.5)
    >>> m.update_state([0, 0, 0, 1, 1], [0, 0.3, 0.8, 0.3, 0.8])
    >>> m.result()
    0.5

    >>> m.reset_state()
    >>> m.update_state([0, 0, 0, 1, 1], [0, 0.3, 0.8, 0.3, 0.8],
    ...                sample_weight=[1, 1, 2, 2, 1])
    >>> m.result()
    0.333333

    Usage with `compile()` API:

    ```python
    model.compile(
        optimizer='sgd',
        loss='binary_crossentropy',
        metrics=[keras.metrics.SensitivityAtSpecificity(specificity=0.5)])
    ```
    c                 z    |dk  s|dkD  rt        d|       || _        || _        t        |   |||||       y )Nr   r2   zJArgument `specificity` must be in the range [0, 1]. Received: specificity=rq   rb   r   r   )r   specificityrq   r   r   )r#   r   rq   rb   r   r   r%   s         r&   r   z!SensitivityAtSpecificity.__init__  a     ?kAo))47  ',) 	 	
r'   c                 d   t        j                  | j                  t        j                  | j                  | j                              }t        j                  | j
                  t        j                  | j
                  | j                              }| j                  ||t         j                        S r<   	r   rj   r^   rk   r}   r   r_   r   greater_equalr#   sensitivitiesspecificitiess      r&   r5   zSensitivityAtSpecificity.result      ))GGD'')=)=>
 ))GGD'')=)=>
 ..=#*;*;
 	
r'   c                 ^    | j                   | j                  d}t        |          }i ||S )N)rq   r   )rq   r   r   r7   r8   s      r&   r7   z#SensitivityAtSpecificity.get_config  ;    "11++
 g(*(+(((r'   r   r=   r>   r?   r@   r   r5   r7   rA   rB   s   @r&   r   r     +    7x 
.
) )r'   r   z&keras.metrics.SpecificityAtSensitivityc                   <     e Zd ZdZ	 	 	 	 d fd	Zd Z fdZ xZS )SpecificityAtSensitivitya  Computes best specificity where sensitivity is >= specified value.

    `Sensitivity` measures the proportion of actual positives that are correctly
    identified as such `(tp / (tp + fn))`.
    `Specificity` measures the proportion of actual negatives that are correctly
    identified as such `(tn / (tn + fp))`.

    This metric creates four local variables, `true_positives`,
    `true_negatives`, `false_positives` and `false_negatives` that are used to
    compute the specificity at the given sensitivity. The threshold for the
    given sensitivity value is computed and used to evaluate the corresponding
    specificity.

    If `sample_weight` is `None`, weights default to 1.
    Use `sample_weight` of 0 to mask values.

    If `class_id` is specified, we calculate precision by considering only the
    entries in the batch for which `class_id` is above the threshold
    predictions, and computing the fraction of them for which `class_id` is
    indeed a correct label.

    For additional information about specificity and sensitivity, see
    [the following](https://en.wikipedia.org/wiki/Sensitivity_and_specificity).

    Args:
        sensitivity: A scalar value in range `[0, 1]`.
        num_thresholds: (Optional) Defaults to 200. The number of thresholds to
            use for matching the given sensitivity.
        class_id: (Optional) Integer class ID for which we want binary metrics.
            This must be in the half-open interval `[0, num_classes)`, where
            `num_classes` is the last dimension of predictions.
        name: (Optional) string name of the metric instance.
        dtype: (Optional) data type of the metric result.

    Example:

    >>> m = keras.metrics.SpecificityAtSensitivity(0.5)
    >>> m.update_state([0, 0, 0, 1, 1], [0, 0.3, 0.8, 0.3, 0.8])
    >>> m.result()
    0.66666667

    >>> m.reset_state()
    >>> m.update_state([0, 0, 0, 1, 1], [0, 0.3, 0.8, 0.3, 0.8],
    ...                sample_weight=[1, 1, 2, 2, 2])
    >>> m.result()
    0.5

    Usage with `compile()` API:

    ```python
    model.compile(
        optimizer='sgd',
        loss='binary_crossentropy',
        metrics=[keras.metrics.SpecificityAtSensitivity()])
    ```
    c                 z    |dk  s|dkD  rt        d|       || _        || _        t        |   |||||       y )Nr   r2   zJArgument `sensitivity` must be in the range [0, 1]. Received: sensitivity=r   )r   sensitivityrq   r   r   )r#   r   rq   rb   r   r   r%   s         r&   r   z!SpecificityAtSensitivity.__init__E  r   r'   c                 d   t        j                  | j                  t        j                  | j                  | j                              }t        j                  | j
                  t        j                  | j
                  | j                              }| j                  ||t         j                        S r<   r   r   s      r&   r5   zSpecificityAtSensitivity.result\  r   r'   c                 ^    | j                   | j                  d}t        |          }i ||S )N)rq   r   )rq   r   r   r7   r8   s      r&   r7   z#SpecificityAtSensitivity.get_configi  r   r'   r   r   rB   s   @r&   r   r   
  r   r'   r   zkeras.metrics.PrecisionAtRecallc                   6     e Zd ZdZ	 d fd	Zd Z fdZ xZS )PrecisionAtRecalla  Computes best precision where recall is >= specified value.

    This metric creates four local variables, `true_positives`,
    `true_negatives`, `false_positives` and `false_negatives` that are used to
    compute the precision at the given recall. The threshold for the given
    recall value is computed and used to evaluate the corresponding precision.

    If `sample_weight` is `None`, weights default to 1.
    Use `sample_weight` of 0 to mask values.

    If `class_id` is specified, we calculate precision by considering only the
    entries in the batch for which `class_id` is above the threshold
    predictions, and computing the fraction of them for which `class_id` is
    indeed a correct label.

    Args:
        recall: A scalar value in range `[0, 1]`.
        num_thresholds: (Optional) Defaults to 200. The number of thresholds to
            use for matching the given recall.
        class_id: (Optional) Integer class ID for which we want binary metrics.
            This must be in the half-open interval `[0, num_classes)`, where
            `num_classes` is the last dimension of predictions.
        name: (Optional) string name of the metric instance.
        dtype: (Optional) data type of the metric result.

    Example:

    >>> m = keras.metrics.PrecisionAtRecall(0.5)
    >>> m.update_state([0, 0, 0, 1, 1], [0, 0.3, 0.8, 0.3, 0.8])
    >>> m.result()
    0.5

    >>> m.reset_state()
    >>> m.update_state([0, 0, 0, 1, 1], [0, 0.3, 0.8, 0.3, 0.8],
    ...                sample_weight=[2, 2, 2, 1, 1])
    >>> m.result()
    0.33333333

    Usage with `compile()` API:

    ```python
    model.compile(
        optimizer='sgd',
        loss='binary_crossentropy',
        metrics=[keras.metrics.PrecisionAtRecall(recall=0.8)])
    ```
    c                 z    |dk  s|dkD  rt        d|       || _        || _        t        |   |||||       y )Nr   r2   z@Argument `recall` must be in the range [0, 1]. Received: recall=r   rq   rb   r   r   )r   recallrq   r   r   )r#   r   rq   rb   r   r   r%   s         r&   r   zPrecisionAtRecall.__init__  s`     A:!$$*8-  ,) 	 	
r'   c                 d   t        j                  | j                  t        j                  | j                  | j                              }t        j                  | j                  t        j                  | j                  | j
                              }| j                  ||t         j                        S r<   r   rj   r^   rk   r}   r_   r   r   r#   recalls
precisionss      r&   r5   zPrecisionAtRecall.result  s    ##GGD'')=)=>
 &&GGD'')=)=>

 ..Z!2!2
 	
r'   c                 ^    | j                   | j                  d}t        |          }i ||S )N)rq   r   )rq   r   r   r7   r8   s      r&   r7   zPrecisionAtRecall.get_config  s4    $($7$74;;Og(*(+(((r'   r   r   rB   s   @r&   r   r   r  s$    .b KO
$
) )r'   r   zkeras.metrics.RecallAtPrecisionc                   <     e Zd ZdZ	 	 	 	 d fd	Zd Z fdZ xZS )RecallAtPrecisionag  Computes best recall where precision is >= specified value.

    For a given score-label-distribution the required precision might not
    be achievable, in this case 0.0 is returned as recall.

    This metric creates four local variables, `true_positives`,
    `true_negatives`, `false_positives` and `false_negatives` that are used to
    compute the recall at the given precision. The threshold for the given
    precision value is computed and used to evaluate the corresponding recall.

    If `sample_weight` is `None`, weights default to 1.
    Use `sample_weight` of 0 to mask values.

    If `class_id` is specified, we calculate precision by considering only the
    entries in the batch for which `class_id` is above the threshold
    predictions, and computing the fraction of them for which `class_id` is
    indeed a correct label.

    Args:
        precision: A scalar value in range `[0, 1]`.
        num_thresholds: (Optional) Defaults to 200. The number of thresholds
            to use for matching the given precision.
        class_id: (Optional) Integer class ID for which we want binary metrics.
            This must be in the half-open interval `[0, num_classes)`, where
            `num_classes` is the last dimension of predictions.
        name: (Optional) string name of the metric instance.
        dtype: (Optional) data type of the metric result.

    Example:

    >>> m = keras.metrics.RecallAtPrecision(0.8)
    >>> m.update_state([0, 0, 1, 1], [0, 0.5, 0.3, 0.9])
    >>> m.result()
    0.5

    >>> m.reset_state()
    >>> m.update_state([0, 0, 1, 1], [0, 0.5, 0.3, 0.9],
    ...                sample_weight=[1, 0, 0, 1])
    >>> m.result()
    1.0

    Usage with `compile()` API:

    ```python
    model.compile(
        optimizer='sgd',
        loss='binary_crossentropy',
        metrics=[keras.metrics.RecallAtPrecision(precision=0.8)])
    ```
    c                 z    |dk  s|dkD  rt        d|       || _        || _        t        |   |||||       y )Nr   r2   zFArgument `precision` must be in the range [0, 1]. Received: precision=r   )r   	precisionrq   r   r   )r#   r   rq   rb   r   r   r%   s         r&   r   zRecallAtPrecision.__init__  s`     q=IM''0k3  #,) 	 	
r'   c                 d   t        j                  | j                  t        j                  | j                  | j                              }t        j                  | j                  t        j                  | j                  | j
                              }| j                  ||t         j                        S r<   r   r   s      r&   r5   zRecallAtPrecision.result  s    ##GGD'')=)=>
 &&GGD'')=)=>

 ..!2!2
 	
r'   c                 ^    | j                   | j                  d}t        |          }i ||S )N)rq   r   )rq   r   r   r7   r8   s      r&   r7   zRecallAtPrecision.get_config"  s9    "11
 g(*(+(((r'   r   r   rB   s   @r&   r   r     s+    1l 
.
) )r'   r   zkeras.metrics.AUCc                   r     e Zd ZdZ	 	 	 	 	 	 	 	 	 	 d
 fd	Zed        Zd ZddZd Z	d Z
d Z fd	Z xZS )AUCa  Approximates the AUC (Area under the curve) of the ROC or PR curves.

    The AUC (Area under the curve) of the ROC (Receiver operating
    characteristic; default) or PR (Precision Recall) curves are quality
    measures of binary classifiers. Unlike the accuracy, and like cross-entropy
    losses, ROC-AUC and PR-AUC evaluate all the operational points of a model.

    This class approximates AUCs using a Riemann sum. During the metric
    accumulation phrase, predictions are accumulated within predefined buckets
    by value. The AUC is then computed by interpolating per-bucket averages.
    These buckets define the evaluated operational points.

    This metric creates four local variables, `true_positives`,
    `true_negatives`, `false_positives` and `false_negatives` that are used to
    compute the AUC.  To discretize the AUC curve, a linearly spaced set of
    thresholds is used to compute pairs of recall and precision values. The area
    under the ROC-curve is therefore computed using the height of the recall
    values by the false positive rate, while the area under the PR-curve is the
    computed using the height of the precision values by the recall.

    This value is ultimately returned as `auc`, an idempotent operation that
    computes the area under a discretized curve of precision versus recall
    values (computed using the aforementioned variables). The `num_thresholds`
    variable controls the degree of discretization with larger numbers of
    thresholds more closely approximating the true AUC. The quality of the
    approximation may vary dramatically depending on `num_thresholds`. The
    `thresholds` parameter can be used to manually specify thresholds which
    split the predictions more evenly.

    For a best approximation of the real AUC, `predictions` should be
    distributed approximately uniformly in the range `[0, 1]` (if
    `from_logits=False`). The quality of the AUC approximation may be poor if
    this is not the case. Setting `summation_method` to 'minoring' or 'majoring'
    can help quantify the error in the approximation by providing lower or upper
    bound estimate of the AUC.

    If `sample_weight` is `None`, weights default to 1.
    Use `sample_weight` of 0 to mask values.

    Args:
        num_thresholds: (Optional) The number of thresholds to
            use when discretizing the roc curve. Values must be > 1.
            Defaults to `200`.
        curve: (Optional) Specifies the name of the curve to be computed,
            `'ROC'` (default) or `'PR'` for the Precision-Recall-curve.
        summation_method: (Optional) Specifies the [Riemann summation method](
              https://en.wikipedia.org/wiki/Riemann_sum) used.
              'interpolation' (default) applies mid-point summation scheme for
              `ROC`.  For PR-AUC, interpolates (true/false) positives but not
              the ratio that is precision (see Davis & Goadrich 2006 for
              details); 'minoring' applies left summation for increasing
              intervals and right summation for decreasing intervals; 'majoring'
              does the opposite.
        name: (Optional) string name of the metric instance.
        dtype: (Optional) data type of the metric result.
        thresholds: (Optional) A list of floating point values to use as the
            thresholds for discretizing the curve. If set, the `num_thresholds`
            parameter is ignored. Values should be in `[0, 1]`. Endpoint
            thresholds equal to {`-epsilon`, `1+epsilon`} for a small positive
            epsilon value will be automatically included with these to correctly
            handle predictions equal to exactly 0 or 1.
        multi_label: boolean indicating whether multilabel data should be
            treated as such, wherein AUC is computed separately for each label
            and then averaged across labels, or (when `False`) if the data
            should be flattened into a single label before AUC computation. In
            the latter case, when multilabel data is passed to AUC, each
            label-prediction pair is treated as an individual data point. Should
            be set to `False` for multi-class data.
        num_labels: (Optional) The number of labels, used when `multi_label` is
            True. If `num_labels` is not specified, then state variables get
            created on the first call to `update_state`.
        label_weights: (Optional) list, array, or tensor of non-negative weights
            used to compute AUCs for multilabel data. When `multi_label` is
            True, the weights are applied to the individual label AUCs when they
            are averaged to produce the multi-label AUC. When it's False, they
            are used to weight the individual label predictions in computing the
            confusion matrix on the flattened data. Note that this is unlike
            `class_weights` in that `class_weights` weights the example
            depending on the value of its label, whereas `label_weights` depends
            only on the index of that label before flattening; therefore
            `label_weights` should not be used for multi-class data.
        from_logits: boolean indicating whether the predictions (`y_pred` in
        `update_state`) are probabilities or sigmoid logits. As a rule of thumb,
        when using a keras loss, the `from_logits` constructor argument of the
        loss should match the AUC `from_logits` constructor argument.

    Example:

    >>> m = keras.metrics.AUC(num_thresholds=3)
    >>> m.update_state([0, 0, 1, 1], [0, 0.5, 0.3, 0.9])
    >>> # threshold values are [0 - 1e-7, 0.5, 1 + 1e-7]
    >>> # tp = [2, 1, 0], fp = [2, 0, 0], fn = [0, 1, 2], tn = [0, 2, 2]
    >>> # tp_rate = recall = [1, 0.5, 0], fp_rate = [1, 0, 0]
    >>> # auc = ((((1 + 0.5) / 2) * (1 - 0)) + (((0.5 + 0) / 2) * (0 - 0)))
    >>> #     = 0.75
    >>> m.result()
    0.75

    >>> m.reset_state()
    >>> m.update_state([0, 0, 1, 1], [0, 0.5, 0.3, 0.9],
    ...                sample_weight=[1, 0, 0, 1])
    >>> m.result()
    1.0

    Usage with `compile()` API:

    ```python
    # Reports the AUC of a model outputting a probability.
    model.compile(optimizer='sgd',
                  loss=keras.losses.BinaryCrossentropy(),
                  metrics=[keras.metrics.AUC()])

    # Reports the AUC of a model outputting a logit.
    model.compile(optimizer='sgd',
                  loss=keras.losses.BinaryCrossentropy(from_logits=True),
                  metrics=[keras.metrics.AUC(from_logits=True)])
    ```
    c                    d| _         t        |t        j                        rC|t	        t        j                        vr(t        d| dt	        t        j                               t        |t        j                        rC|t	        t        j                        vr(t        d| dt	        t        j                               |d u| _        |Tt        |      dz   | _	        t        |      }t        j                  t        j                  dg|z   dgz               | _        nH|dk  rt        d	|       || _	        t        |dz
        D cg c]  }|dz   dz  |dz
  z   }}d
| _        t        j                  dt!        j"                         z
  g|z   dt!        j"                         z   gz         | _        t        |t        j                        r|| _        n$t        j                  j)                  |      | _        t        |t        j                        r|| _        n$t        j                  j)                  |      | _        t,        | ]  ||       || _        || _        |	)t5        j                  |	| j6                        }	|	| _        nd | _        |
| _        d| _        | j0                  r|rd |g}| j?                  |       y y |rt        d      | j?                  d        y c c}w )Nr]   z Invalid `curve` argument value "z". Expected one of: z+Invalid `summation_method` argument value "r   r   r   r2   zKArgument `num_thresholds` must be an integer > 1. Received: num_thresholds=Tr   )r   Fz7`num_labels` is needed only when `multi_label` is True.) r`   
isinstancer   AUCCurvelistr   AUCSummationMethod_init_from_thresholdsr!   rq   sortedr   nparrayr   r   r   epsilon_thresholdscurvefrom_strsummation_methodr   r   multi_label
num_labelsr   r   label_weights_from_logits_built_build)r#   rq   r   r   r   r   r   r   r   r   from_logitsr   r   r%   s                r&   r   zAUC.__init__  s     e]334d""G
 :
 25' :$$()?)?$@#AC  m>>
d=+K+K&LL##3"4 5$$()I)I$J#KM  &0t%;"!"%j/A"5D
+J>>HHcUZ/3%78 / " 00>/?A  #1D ~12 Q#!!34J  37D/ 887??$$%
2cGOO<M6M5NN
 e]334DJ&//88?DJ&(H(HI$4D!$1$D$D$M$M %D! 	d%0 '$$IIm4::FM!.D "&D'z*E"   M  KK]s   K%c                 ,    t        | j                        S )z'The thresholds used for evaluating AUC.)r   r   )r#   s    r&   r   zAUC.thresholds
  s     D$$%%r'   c                 @   | j                   rKt        |      dk7  rt        dt        |       d|       |d   | _        | j                  | j                  g}n| j                  g}|| _        | j                  |t        j                         d      | _	        | j                  |t        j                         d      | _
        | j                  |t        j                         d      | _        | j                  |t        j                         d	      | _        d
| _        y)zKInitialize TP, FP, TN, and FN tensors, given the shape of the
        data.r   z>`y_pred` must have rank 2 when `multi_label=True`. Found rank z$. Full shape received for `y_pred`: r2   r^   r   r_   r   r}   TN)r   r!   r   _num_labelsrq   _build_input_shaper    r   r"   r^   r_   r   r}   r   )r#   r   variable_shapes      r&   r   z
AUC._build  s4    5zQ ""%e* .99>A 
  %QxD"1143C3CDN"112N"'"// $**,! 0 

  $00 $**,"  1  

 #// $**,! 0 

  $00 $**,"  1  
 r'   c           
         | j                   s| j                  |j                         | j                  s| j                  |dfg}| j                  rE|j                  | j                  df| j                  df| j                  df| j                  dfg       | j                  |j                  | j                  df       | j                  rdn| j                  }| j                  rt        j                  |      }t        j                  t        j                   j"                  | j                  t        j                   j$                  | j                  t        j                   j&                  | j                  t        j                   j(                  | j                  i||| j*                  | j,                  || j                  |       y)a  Accumulates confusion matrix statistics.

        Args:
            y_true: The ground truth values.
            y_pred: The predicted values.
            sample_weight: Optional weighting of each example. Can
                be a tensor whose rank is either 0, or the same rank as
                `y_true`, and must be broadcastable to `y_true`. Defaults to
                `1`.
        N)NL)Tr   )r   )r)   r*   r   r   )r   r   r   r   r   extendr^   r   r_   r}   appendr   r   sigmoidr   r+   rG   rY   rT   rH   rP   r   r   )r#   r-   r.   r*   shapesr   s         r&   r/   zAUC.update_state7  s    {{KK% 2 2 >z*+F ,,j9,,j9--z:--z:	 !!- t116:;
 !% 0 0d6H6H ((0F77--<<d>Q>Q--<<d>Q>Q--==t?S?S--==t?S?S	 *.*M*M'(('	
r'   c                    t        j                  | j                  d| j                  dz
   | j                  dd       }t        j                  | j                  | j
                        }t        j                  |d| j                  dz
   |dd       }t        j                  |t        j                  |d            }t        j                  | j                  dd t        j                  ||dd             }t        j                  t        j                  |d| j                  dz
   dkD  |dd dkD        t        j                  |d| j                  dz
   t        j                  |dd d            t        j                  |dd             }t        j                  t        j                  |t        j                  |t        j                  |t        j                  |                        t        j                  t        j                  | j                  dd | j                  dd       d            }| j                  rt        j                  |d      }| j                   t        j"                  |      S t        j                  t        j                  t        j                  || j                               t        j                  | j                               S t        j                  |      S )a  Interpolation formula inspired by section 4 of Davis & Goadrich 2006.

        https://www.biostat.wisc.edu/~page/rocpr.pdf

        Note here we derive & use a closed formula not present in the paper
        as follows:

            Precision = TP / (TP + FP) = TP / P

        Modeling all of TP (true positive), FP (false positive) and their sum
        P = TP + FP (predicted positive) as varying linearly within each
        interval [A, B] between successive thresholds, we get

            Precision slope = dTP / dP
                            = (TP_B - TP_A) / (P_B - P_A)
                            = (TP - TP_A) / (P - P_A)
            Precision = (TP_A + slope * (P - P_A)) / P

        The area within the interval is (slope / total_pos_weight) times

            int_A^B{Precision.dP} = int_A^B{(TP_A + slope * (P - P_A)) * dP / P}
            int_A^B{Precision.dP} = int_A^B{slope * dP + intercept * dP / P}

        where intercept = TP_A - slope * P_A = TP_B - slope * P_B, resulting in

            int_A^B{Precision.dP} = TP_B - TP_A + intercept * log(P_B / P_A)

        Bringing back the factor (slope / total_pos_weight) we'd put aside, we
        get

            slope * [dTP + intercept *  log(P_B / P_A)] / total_pos_weight

        where dTP == TP_B - TP_A.

        Note that when P_A == 0 the above calculation simplifies into

            int_A^B{Precision.dTP} = int_A^B{slope * dTP}
                                   = slope * (TP_B - TP_A)

        which is really equivalent to imputing constant precision throughout the
        first bucket having >0 true positives.

        Returns:
            pr_auc: an approximation of the area under the P-R curve.
        Nr2   r   axis)r   subtractr^   rq   rk   r_   rj   maximummultiplyr   logical_and	ones_likelogr}   r   sumr   mean)	r#   dtppdp
prec_slope	interceptsafe_p_ratiopr_auc_incrementby_label_aucs	            r&   interpolate_pr_auczAUC.interpolate_pr_aucp  sh   ^ ll 9$"5"5"9:#
 GGD'')=)=>\\!5d11A56!">&&sCKKA,>?
LL#S\\*ae%D
	 yyOOA7 3 3a 781<aeaiH+D''!+,ckk!AB%.C MM!AB% 
 ,,LLcll9cggl6KLM KK++AB/1E1Eab1IJA
 77#3!<L!!)xx-- ((GGCLLt7I7IJKGGD../ 
 77+,,r'   c                    | j                   t        j                  j                  k(  r7| j                  t        j
                  j                  k(  r| j                         S t        j                  | j                  t        j                  | j                  | j                              }| j                   t        j                  j                  k(  rMt        j                  | j                  t        j                  | j                  | j                              }|}|}nLt        j                  | j                  t        j                  | j                  | j                              }|}|}| j                  t        j
                  j                  k(  r>t        j                   t        j                  |d | j"                  dz
   |dd        d      }nz| j                  t        j
                  j$                  k(  r*t        j&                  |d | j"                  dz
   |dd        }n)t        j(                  |d | j"                  dz
   |dd        }t        j*                  t        j,                  |d | j"                  dz
   |dd        |      }| j.                  rt        j0                  |d      }| j2                  t        j4                  |      S t        j                  t        j0                  t        j*                  || j2                              t        j0                  | j2                              S t        j0                  |      S )Nr2   g       @r   r   )r   r   r   PRr   r   INTERPOLATIONr   r   rj   r^   rk   r}   ROCr_   r   dividerq   MINORINGminimumr   r   r   r   r   r   r   )	r#   r   fp_ratexyr   heightsriemann_termsr   s	            r&   r5   z
AUC.result  s   JJ-00333%%//==> **,, ""GGD'')=)=>
 :://333''$$,,d.A.ABG AA))##++T-A-ABI AA !!//==> jj3D//!34ae<cG ""m&F&F&O&OOkk!$=d&9&9A&=">!"FG kk!$=d&9&9A&=">!"FG LL4T00145qu=w
 77=q9L!!)xx-- ((GGCLLt7I7IJKGGD../ 
 77=))r'   c                    | j                   r| j                  r| j                  | j                  f}n| j                  f}| j                  j                  t        j                  |             | j                  j                  t        j                  |             | j                  j                  t        j                  |             | j                  j                  t        j                  |             y y r<   )r   r   rq   r   r^   rn   r   ro   r_   r   r}   )r#   r   s     r&   rr   zAUC.reset_state	  s    ;;"&"5"5t7G7G!H"&"5"5!7&&syy'@A  ''		.(AB&&syy'@A  ''		.(AB r'   c                 4   | j                   }| j                  | j                  j                  | j                  j                  | j
                  | j                  || j                  d}| j                  r| j                  dd |d<   t        | -         }i ||S )N)rq   r   r   r   r   r   r   r2   r   )r   rq   r   r   r   r   r   r   r   r   r   r7   )r#   r   r9   r:   r%   s       r&   r7   zAUC.get_config  s    **"11ZZ%% $ 5 5 ; ;++//*,,
 %% $(??1R#8F< g(*(+(((r'   )
r   r  interpolationNNNFNNFr<   )r=   r>   r?   r@   r   propertyr   r   r/   r   r5   rr   r7   rA   rB   s   @r&   r   r   +  sn    ur (dL & &&P7
rX-t=*~
C) )r'   r   )numpyr   	keras.srcr   r   r   r   keras.src.api_exportr   keras.src.metricsr   keras.src.metrics.metricr	   keras.src.utils.python_utilsr
   r   rD   rN   rR   rW   r[   r{   r   r   r   r   r   r    r'   r&   <module>r     s    !  "  - + + 0>)V >)B ,-(
3 (
 .(
V ,-(
3 (
 .(
V +,(
2 (
 -(
V +,(
2 (
 -(
V '(\) \) )\)~ $%L)V L) &L)^q> q>h 67d)9 d) 8d)N 67d)9 d) 8d)N /0S)2 S) 1S)l /0^)2 ^) 1^)B !"|)& |) #|)r'   