
    2VhY                    
   d Z ddlZddlmZ ddlmZ ddlmZ ddlmZ ddlm	Z	 ddl
mZ dd	lmZ dd
lmZ ddlmZ ddlmZ  G d de      Z eddg      d        Z G d de      Z eddg      d        Z G d de      Z eddg      d        Z G d de      Z eddg      d         Z G d! d"e      Z ed#d$g      d%        Z G d& d'e      Z ed(d)g      d*        Z G d+ d,e      Z  ed-d.g      dd/       Z! G d0 d1e      Z" ed2d3g      d4        Z# G d5 d6e      Z$ eg d7      d8        Z% G d9 d:e      Z& ed;d<g      dd=       Z' G d> d?e      Z( ed@dAg      dB        Z) G dC dDe      Z* edEdFg      ddG       Z+ G dH dIe      Z, edJdKg      dL        Z- G dM dNe      Z. eg dO      dP        Z/ G dQ dRe      Z0 edSdTg      ddU       Z1 G dV dWe      Z2 edXdYg      dZ        Z3 G d[ d\e      Z4 ed]d^g      dd_       Z5 G d` dae      Z6 edbdcg      ddd       Z7 G de dfe      Z8 edgdhg      ddi       Z9 G dj dke      Z: edldmg      dn        Z; G do dpe      Z< edqdrg      ds        Z= G dt due      Z> edvdwg      ddx       Z? G dy dze      Z@ ed{d|g      d}        ZA G d~ de      ZB eddg      dd       ZC G d de      ZD eddg      dd       ZE G d de      ZF eddg      dd       ZG G d de      ZH eddg      	 	 	 dd       ZI G d de      ZJ eddg      	 	 	 dd       ZK G d de      ZL eddg      	 	 	 	 dd       ZM G d de      ZN eddg      	 	 	 	 dd       ZO G d de      ZP eddg      	 	 	 	 dd       ZQ G d de      ZR eddg      	 	 	 	 dd       ZS G d de      ZT eddg      dd       ZU G d de      ZV eddg      dd       ZW G d de      ZX eddg      dd       ZY G d de      ZZ eddg      dd       Z[ G d de      Z\ eddg      	 d dÄ       Z] G dĄ de      Z^ eddg      ddȄ       Z_ G dɄ de      Z` eddg      	 dd̈́       Za G d΄ de      Zb eddg      dd҄       Zc G dӄ de      Zd eddg      	 	 	 	 	 ddׄ       Ze G d؄ de      Zf eddg      dd܄       Zgdd݄Zh G dބ de      Zi eddg      d        Zj G d de      Zk eddg      	 	 	 	 	 	 dd       Zl G d de      Zm eddg      dd       ZnddZo G d de      Zp eddg      d        Zqd Zry(  z>Commonly-used neural network operations not included in NumPy.    N)backend)keras_export)KerasTensor)any_symbolic_tensors)standardize_data_format)#compute_conv_transpose_output_shape)is_keras_tensor)operation_utils)	Operation)reduce_shapec                       e Zd Zd Zd Zy)Reluc                 @    t         j                  j                  |      S N)r   nnreluselfxs     @/home/dcms/DCMS/lib/python3.12/site-packages/keras/src/ops/nn.pycallz	Relu.call       zzq!!    c                 D    t        |j                  |j                        S Ndtyper   shaper   r   s     r   compute_output_speczRelu.compute_output_spec       177!''22r   N__name__
__module____qualname__r   r     r   r   r   r          "3r   r   zkeras.ops.reluzkeras.ops.nn.reluc                     t        | f      rt               j                  |       S t        j                  j                  |       S )aQ  Rectified linear unit activation function.

    It is defined as `f(x) = max(0, x)`.

    Args:
        x: Input tensor.

    Returns:
        A tensor with the same shape as `x`.

    Example:

    >>> x1 = keras.ops.convert_to_tensor([-1.0, 0.0, 1.0, 0.2])
    >>> keras.ops.relu(x1)
    array([0.0, 0.0, 1.0, 0.2], dtype=float32)
    )r   r   symbolic_callr   r   r   r   s    r   r   r      s4    $ QD!v##A&&::??1r   c                       e Zd Zd Zd Zy)Relu6c                 @    t         j                  j                  |      S r   )r   r   relu6r   s     r   r   z
Relu6.call3   s    zz""r   c                 D    t        |j                  |j                        S r   r   r   s     r   r    zRelu6.compute_output_spec6   r!   r   Nr"   r&   r   r   r,   r,   2   s    #3r   r,   zkeras.ops.relu6zkeras.ops.nn.relu6c                     t        | f      rt               j                  |       S t        j                  j                  |       S )a  Rectified linear unit activation function with upper bound of 6.

    It is defined as `f(x) = np.clip(x, 0, 6)`.

    Args:
        x: Input tensor.

    Returns:
        A tensor with the same shape as `x`.

    Example:

    >>> x = keras.ops.convert_to_tensor([-3.0, -2.0, 0.1, 0.2, 6.0, 8.0])
    >>> keras.ops.relu6(x)
    array([0.0, 0.0, 0.1, 0.2, 6.0, 6.0], dtype=float32)
    )r   r,   r)   r   r   r.   r*   s    r   r.   r.   :   s6    $ QD!w$$Q''::Ar   c                       e Zd Zd Zd Zy)Sigmoidc                 @    t         j                  j                  |      S r   )r   r   sigmoidr   s     r   r   zSigmoid.callR   s    zz!!!$$r   c                 D    t        |j                  |j                        S r   r   r   s     r   r    zSigmoid.compute_output_specU   r!   r   Nr"   r&   r   r   r2   r2   Q   s    %3r   r2   zkeras.ops.sigmoidzkeras.ops.nn.sigmoidc                     t        | f      rt               j                  |       S t        j                  j                  |       S )ap  Sigmoid activation function.

    It is defined as `f(x) = 1 / (1 + exp(-x))`.

    Args:
        x: Input tensor.

    Returns:
        A tensor with the same shape as `x`.

    Example:

    >>> x = keras.ops.convert_to_tensor([-6.0, 1.0, 0.0, 1.0, 6.0])
    >>> keras.ops.sigmoid(x)
    array([0.00247262, 0.7310586, 0.5, 0.7310586, 0.9975274], dtype=float32)

    )r   r2   r)   r   r   r4   r*   s    r   r4   r4   Y   s6    & QD!y&&q))::a  r   c                       e Zd Zd Zd Zy)SparseSigmoidc                 @    t         j                  j                  |      S r   )r   r   sparse_sigmoidr   s     r   r   zSparseSigmoid.callr   s    zz((++r   c                 D    t        |j                  |j                        S r   r   r   s     r   r    z!SparseSigmoid.compute_output_specu   r!   r   Nr"   r&   r   r   r8   r8   q   s    ,3r   r8   zkeras.ops.sparse_sigmoidzkeras.ops.nn.sparse_sigmoidc                     t        | f      rt               j                  |       S t        j                  j                  |       S )a  Sparse sigmoid activation function.

    It is defined as

    `f(x) = 0` for `x <= -1`,
    `f(x) = 0.5 * (x + 1)` for `-1 < x < 1`,
    `f(x) = 1` for `x >= 1`.

    Args:
        x: Input tensor.

    Returns:
        A tensor with the same shape as `x`.

    Example:

    >>> x = keras.ops.convert_to_tensor([-6.0, 1.0, 0.0, 1.0, 6.0])
    >>> keras.ops.sparse_sigmoid(x)
    array([0. , 1. , 0.5, 1. , 1. ], dtype=float32)

    )r   r8   r)   r   r   r:   r*   s    r   r:   r:   y   s6    . QD!,,Q//::$$Q''r   c                       e Zd Zd Zd Zy)Softplusc                 @    t         j                  j                  |      S r   )r   r   softplusr   s     r   r   zSoftplus.call       zz""1%%r   c                 D    t        |j                  |j                        S r   r   r   s     r   r    zSoftplus.compute_output_spec   r!   r   Nr"   r&   r   r   r>   r>          &3r   r>   zkeras.ops.softpluszkeras.ops.nn.softplusc                     t        | f      rt               j                  |       S t        j                  j                  |       S )a  Softplus activation function.

    It is defined as `f(x) = log(exp(x) + 1)`, where `log` is the natural
    logarithm and `exp` is the exponential function.

    Args:
        x: Input tensor.

    Returns:
        A tensor with the same shape as `x`.

    Example:

    >>> x = keras.ops.convert_to_tensor([-0.555, 0.0, 0.555])
    >>> keras.ops.softplus(x)
    array([0.45366603, 0.6931472, 1.008666], dtype=float32)

    )r   r>   r)   r   r   r@   r*   s    r   r@   r@      s6    ( QD!z''**::q!!r   c                       e Zd Zd Zd Zy)Softsignc                 @    t         j                  j                  |      S r   )r   r   softsignr   s     r   r   zSoftsign.call   rA   r   c                 D    t        |j                  |j                        S r   r   r   s     r   r    zSoftsign.compute_output_spec   r!   r   Nr"   r&   r   r   rF   rF      rC   r   rF   zkeras.ops.softsignzkeras.ops.nn.softsignc                     t        | f      rt               j                  |       S t        j                  j                  |       S )as  Softsign activation function.

    It is defined as `f(x) = x / (abs(x) + 1)`.

    Args:
        x: Input tensor.

    Returns:
        A tensor with the same shape as `x`.

    Example:

    >>> x = keras.ops.convert_to_tensor([-0.100, -10.0, 1.0, 0.0, 100.0])
    >>> keras.ops.softsign(x)
    Array([-0.09090909, -0.90909094, 0.5, 0.0, 0.990099], dtype=float32)

    )r   rF   r)   r   r   rH   r*   s    r   rH   rH      s6    & QD!z''**::q!!r   c                   ,     e Zd Zd fd	Zd Zd Z xZS )
SoftShrinkc                 0    t         |           || _        y r   super__init__	thresholdr   rQ   	__class__s     r   rP   zSoftShrink.__init__       "r   c                 V    t         j                  j                  || j                        S r   )r   r   soft_shrinkrQ   r   s     r   r   zSoftShrink.call       zz%%a88r   c                 D    t        |j                  |j                        S r   r   r   s     r   r    zSoftShrink.compute_output_spec   r!   r   g      ?r#   r$   r%   rP   r   r    __classcell__rS   s   @r   rL   rL          #93r   rL   zkeras.ops.soft_shrinkzkeras.ops.nn.soft_shrinkc                     t        | f      rt        |      j                  |       S t        j                  j                  | |      S )a  Soft Shrink activation function.

    It is defined as

    `f(x) = x - threshold` if `x > threshold`,
    `f(x) = x + threshold` if `x < -threshold`,
    `f(x) = 0` otherwise.

    Args:
        x: Input tensor.
        threshold: Threshold value. Defaults to 0.5.

    Returns:
        A tensor with the same shape as `x`.

    Example:

    >>> x = np.array([-1.0, 0.0, 1.0])
    >>> x_soft_shrink = keras.ops.soft_shrink(x)
    >>> print(x_soft_shrink)
    array([-0.5  0.   0.5], shape=(3,), dtype=float64)

    )r   rL   r)   r   r   rV   r   rQ   s     r   rV   rV      s;    2 QD!)$22155::!!!Y//r   c                       e Zd Zd Zd Zy)
SparsePlusc                 @    t         j                  j                  |      S r   )r   r   sparse_plusr   s     r   r   zSparsePlus.call      zz%%a((r   c                 D    t        |j                  |j                        S r   r   r   s     r   r    zSparsePlus.compute_output_spec  r!   r   Nr"   r&   r   r   ra   ra          )3r   ra   zkeras.ops.sparse_pluszkeras.ops.nn.sparse_plusc                     t        | f      rt               j                  |       S t        j                  j                  |       S )a  SparsePlus activation function.

    It is defined as

    `f(x) = 0` for `x <= -1`.
    `f(x) = (1/4) * (x + 1)^2` for `-1 < x < 1`.
    `f(x) = x` for `x >= 1`.


    Args:
        x: Input tensor.

    Returns:
        A tensor with the same shape as `x`.

    Example:

    >>> x = np.array([-1.0, 0.0, 1.0])
    >>> x_sparse_plus = keras.ops.sparse_plus(x)
    >>> print(x_sparse_plus)
    Array([0.   0.25 1.  ], shape=(3,), dtype=float32)

    )r   ra   r)   r   r   rc   r*   s    r   rc   rc     s6    2 QD!|))!,,::!!!$$r   c                       e Zd Zd Zd Zy)Siluc                 @    t         j                  j                  |      S r   )r   r   silur   s     r   r   z	Silu.call'  r   r   c                 D    t        |j                  |j                        S r   r   r   s     r   r    zSilu.compute_output_spec*  r!   r   Nr"   r&   r   r   ri   ri   &  r'   r   ri   )zkeras.ops.siluzkeras.ops.nn.siluzkeras.ops.swishzkeras.ops.nn.swishc                     t        | f      rt               j                  |       S t        j                  j                  |       S )aZ  Sigmoid Linear Unit (SiLU) activation function, also known as Swish.

    The SiLU activation function is computed by the sigmoid function multiplied
    by its input. It is defined as `f(x) = x * sigmoid(x)`.

    Args:
        x: Input tensor.

    Returns:
        A tensor with the same shape as `x`.

    Example:

    >>> x = keras.ops.convert_to_tensor([-6.0, 1.0, 0.0, 1.0, 6.0])
    >>> keras.ops.sigmoid(x)
    array([0.00247262, 0.7310586, 0.5, 0.7310586, 0.9975274], dtype=float32)
    >>> keras.ops.silu(x)
    array([-0.0148357, 0.7310586, 0.0, 0.7310586, 5.9851646], dtype=float32)

    )r   ri   r)   r   r   rk   r*   s    r   rk   rk   .  s4    : QD!v##A&&::??1r   c                   ,     e Zd Zd fd	Zd Zd Z xZS )
Squareplusc                 0    t         |           || _        y r   )rO   rP   b)r   rq   rS   s     r   rP   zSquareplus.__init__Q  s    r   c                 V    t         j                  j                  || j                        S r   )r   r   
squareplusrq   r   s     r   r   zSquareplus.callU  s    zz$$Q//r   c                 D    t        |j                  |j                        S r   r   r   s     r   r    zSquareplus.compute_output_specX  r!   r      rZ   r\   s   @r   ro   ro   P  s    03r   ro   zkeras.ops.squarepluszkeras.ops.nn.squareplusc                     t        | f      rt        |      j                  |       S t        j                  j                  | |      S )a  Squareplus activation function.

    The Squareplus activation function is defined as:

    `f(x) = (x + sqrt(x^2 + b)) / 2`

    Args:
        x: Input tensor.
        b: Smoothness parameter. Defaults to 4.

    Returns:
        A tensor with the same shape as `x`.

    Example:

    >>> x = np.array([-1.0, 0.0, 1.0])
    >>> x_squareplus = keras.ops.squareplus(x)
    >>> print(x_squareplus)
    array([0.6180, 1.0000, 1.6180], dtype=float32)

    )r   ro   r)   r   r   rs   )r   rq   s     r   rs   rs   \  s:    . QD!!}**1--::  A&&r   c                       e Zd Zd Zd Zy)
LogSigmoidc                 @    t         j                  j                  |      S r   )r   r   log_sigmoidr   s     r   r   zLogSigmoid.cally  rd   r   c                 D    t        |j                  |j                        S r   r   r   s     r   r    zLogSigmoid.compute_output_spec|  r!   r   Nr"   r&   r   r   ry   ry   x  rf   r   ry   zkeras.ops.log_sigmoidzkeras.ops.nn.log_sigmoidc                     t        | f      rt               j                  |       S t        j                  j                  |       S )a  Logarithm of the sigmoid activation function.

    It is defined as `f(x) = log(1 / (1 + exp(-x)))`.

    Args:
        x: Input tensor.

    Returns:
        A tensor with the same shape as `x`.

    Example:

    >>> x = keras.ops.convert_to_tensor([-0.541391, 0.0, 0.50, 5.0])
    >>> keras.ops.log_sigmoid(x)
    array([-1.0000418, -0.6931472, -0.474077, -0.00671535], dtype=float32)

    )r   ry   r)   r   r   r{   r*   s    r   r{   r{     s6    0 QD!|))!,,::!!!$$r   c                   ,     e Zd Zd fd	Zd Zd Z xZS )	LeakyReluc                 0    t         |           || _        y r   )rO   rP   negative_slope)r   r   rS   s     r   rP   zLeakyRelu.__init__  s    ,r   c                 V    t         j                  j                  || j                        S r   )r   r   
leaky_relur   r   s     r   r   zLeakyRelu.call  s    zz$$Q(;(;<<r   c                 D    t        |j                  |j                        S r   r   r   s     r   r    zLeakyRelu.compute_output_spec  r!   r   g?rZ   r\   s   @r   r   r     s    -=3r   r   zkeras.ops.leaky_reluzkeras.ops.nn.leaky_reluc                     t        | f      rt        |      j                  |       S t        j                  j                  | |      S )aF  Leaky version of a Rectified Linear Unit activation function.

    It allows a small gradient when the unit is not active, it is defined as:

    `f(x) = alpha * x for x < 0` or `f(x) = x for x >= 0`.

    Args:
        x: Input tensor.
        negative_slope: Slope of the activation function at x < 0.
            Defaults to `0.2`.

    Returns:
        A tensor with the same shape as `x`.

    Example:

    >>> x = np.array([-1., 0., 1.])
    >>> x_leaky_relu = keras.ops.leaky_relu(x)
    >>> print(x_leaky_relu)
    array([-0.2,  0. ,  1. ], shape=(3,), dtype=float64)

    )r   )r   r   r)   r   r   r   )r   r   s     r   r   r     s>    0 QD!(66q99::  > BBr   c                       e Zd Zd Zd Zy)HardSigmoidc                 @    t         j                  j                  |      S r   )r   r   hard_sigmoidr   s     r   r   zHardSigmoid.call  s    zz&&q))r   c                 D    t        |j                  |j                        S r   r   r   s     r   r    zHardSigmoid.compute_output_spec  r!   r   Nr"   r&   r   r   r   r     s    *3r   r   zkeras.ops.hard_sigmoidzkeras.ops.nn.hard_sigmoidc                     t        | f      rt               j                  |       S t        j                  j                  |       S )a  Hard sigmoid activation function.

    It is defined as:

    `0 if x < -2.5`, `1 if x > 2.5`, `(0.2 * x) + 0.5 if -2.5 <= x <= 2.5`.

    Args:
        x: Input tensor.

    Returns:
        A tensor with the same shape as `x`.

    Example:

    >>> x = np.array([-1., 0., 1.])
    >>> x_hard_sigmoid = keras.ops.hard_sigmoid(x)
    >>> print(x_hard_sigmoid)
    array([0.3, 0.5, 0.7], shape=(3,), dtype=float64)

    )r   r   r)   r   r   r   r*   s    r   r   r     s6    6 QD!}**1--::""1%%r   c                       e Zd Zd Zd Zy)HardSiluc                 @    t         j                  j                  |      S r   )r   r   	hard_silur   s     r   r   zHardSilu.call      zz##A&&r   c                 D    t        |j                  |j                        S r   r   r   s     r   r    zHardSilu.compute_output_spec  r!   r   Nr"   r&   r   r   r   r     s    '3r   r   )zkeras.ops.hard_siluzkeras.ops.nn.hard_siluzkeras.ops.hard_swishzkeras.ops.nn.hard_swishc                     t        | f      rt               j                  |       S t        j                  j                  |       S )a  Hard SiLU activation function, also known as Hard Swish.

    It is defined as:

    - `0` if `if x < -3`
    - `x` if `x > 3`
    - `x * (x + 3) / 6` if `-3 <= x <= 3`

    It's a faster, piecewise linear approximation of the silu activation.

    Args:
        x: Input tensor.

    Returns:
        A tensor with the same shape as `x`.

    Example:

    >>> x = keras.ops.convert_to_tensor([-3.0, -1.0, 0.0, 1.0, 3.0])
    >>> keras.ops.hard_silu(x)
    array([-0.0, -0.3333333, 0.0, 0.6666667, 3.0], shape=(5,), dtype=float32)

    )r   r   r)   r   r   r   r*   s    r   r   r     s7    @ QD!z''**::""r   c                   ,     e Zd Zd fd	Zd Zd Z xZS )Eluc                 0    t         |           || _        y r   rO   rP   alphar   r   rS   s     r   rP   zElu.__init__      
r   c                 X    t         j                  j                  || j                        S )Nr   )r   r   elur   r   s     r   r   zElu.call   s    zz~~atzz~22r   c                 D    t        |j                  |j                        S r   r   r   s     r   r    zElu.compute_output_spec#  r!   r         ?rZ   r\   s   @r   r   r     s    33r   r   zkeras.ops.eluzkeras.ops.nn.eluc                     t        | f      rt        |      j                  |       S t        j                  j                  | |      S )a  Exponential Linear Unit activation function.

    It is defined as:

    `f(x) =  alpha * (exp(x) - 1.) for x < 0`, `f(x) = x for x >= 0`.

    Args:
        x: Input tensor.
        alpha: A scalar, slope of positive section. Defaults to `1.0`.

    Returns:
        A tensor with the same shape as `x`.

    Example:

    >>> x = np.array([-1., 0., 1.])
    >>> x_elu = keras.ops.elu(x)
    >>> print(x_elu)
    array([-0.63212055, 0., 1.], shape=(3,), dtype=float64)

    r   )r   r   r)   r   r   r   r   r   s     r   r   r   '  s:    . QD!5z''**::>>!5>))r   c                       e Zd Zd Zd Zy)Seluc                 @    t         j                  j                  |      S r   )r   r   selur   s     r   r   z	Selu.callD  r   r   c                 D    t        |j                  |j                        S r   r   r   s     r   r    zSelu.compute_output_specG  r!   r   Nr"   r&   r   r   r   r   C  r'   r   r   zkeras.ops.seluzkeras.ops.nn.seluc                     t        | f      rt               j                  |       S t        j                  j                  |       S )a  Scaled Exponential Linear Unit (SELU) activation function.

    It is defined as:

    `f(x) =  scale * alpha * (exp(x) - 1.) for x < 0`,
    `f(x) = scale * x for x >= 0`.

    Args:
        x: Input tensor.

    Returns:
        A tensor with the same shape as `x`.

    Example:

    >>> x = np.array([-1., 0., 1.])
    >>> x_selu = keras.ops.selu(x)
    >>> print(x_selu)
    array([-1.11133055, 0., 1.05070098], shape=(3,), dtype=float64)

    )r   r   r)   r   r   r   r*   s    r   r   r   K  s4    . QD!v##A&&::??1r   c                   ,     e Zd Zd fd	Zd Zd Z xZS )Geluc                 0    t         |           || _        y r   )rO   rP   approximate)r   r   rS   s     r   rP   zGelu.__init__h      &r   c                 V    t         j                  j                  || j                        S r   )r   r   gelur   r   s     r   r   z	Gelu.calll  s    zzq$"2"233r   c                 D    t        |j                  |j                        S r   r   r   s     r   r    zGelu.compute_output_speco  r!   r   TrZ   r\   s   @r   r   r   g  s    '43r   r   zkeras.ops.geluzkeras.ops.nn.geluc                     t        | f      rt        |      j                  |       S t        j                  j                  | |      S )a  Gaussian Error Linear Unit (GELU) activation function.

    If `approximate` is `True`, it is defined as:
    `f(x) = 0.5 * x * (1 + tanh(sqrt(2 / pi) * (x + 0.044715 * x^3)))`

    Or if `approximate` is `False`, it is defined as:
    `f(x) = x * P(X <= x) = 0.5 * x * (1 + erf(x / sqrt(2)))`,
    where `P(X) ~ N(0, 1)`.

    Args:
        x: Input tensor.
        approximate: Approximate version of GELU activation. Defaults to `True`.

    Returns:
        A tensor with the same shape as `x`.

    Example:

    >>> x = np.array([-1., 0., 1.])
    >>> x_gelu = keras.ops.gelu(x)
    >>> print(x_gelu)
    array([-0.15865525, 0., 0.84134475], shape=(3,), dtype=float64)

    )r   r   r)   r   r   r   )r   r   s     r   r   r   s  s9    4 QD!K ..q11::??1k**r   c                   ,     e Zd Zd fd	Zd Zd Z xZS )Celuc                 0    t         |           || _        y r   r   r   s     r   rP   zCelu.__init__  r   r   c                 V    t         j                  j                  || j                        S r   )r   r   celur   r   s     r   r   z	Celu.call  s    zzq$**--r   c                 D    t        |j                  |j                        S r   r   r   s     r   r    zCelu.compute_output_spec  r!   r   r   rZ   r\   s   @r   r   r     s    .3r   r   zkeras.ops.celuzkeras.ops.nn.celuc                     t        | f      rt        |      j                  |       S t        j                  j                  | |      S )u  Continuously-differentiable exponential linear unit.

    It is defined as:

    `f(x) =  alpha * (exp(x / alpha) - 1) for x < 0`, `f(x) = x for x >= 0`.

    Args:
        x: Input tensor.
        alpha: the α value for the CELU formulation. Defaults to `1.0`.

    Returns:
        A tensor with the same shape as `x`.

    Example:

    >>> x = np.array([-1., 0., 1.])
    >>> x_celu = keras.ops.celu(x)
    >>> print(x_celu)
    array([-0.63212056, 0. , 1. ], shape=(3,), dtype=float64)

    )r   r   r)   r   r   r   r   s     r   r   r     s8    . QD!E{((++::??1e$$r   c                   ,     e Zd Zd fd	Zd Zd Z xZS )Gluc                 0    t         |           || _        y r   rO   rP   axisr   r   rS   s     r   rP   zGlu.__init__      	r   c                 X    t         j                  j                  || j                        S Nr   )r   r   glur   r   s     r   r   zGlu.call  s    zz~~adii~00r   c                 D    t        |j                  |j                        S r   r   r   s     r   r    zGlu.compute_output_spec  r!   r   rZ   r\   s   @r   r   r     s    13r   r   zkeras.ops.gluzkeras.ops.nn.gluc                     t        | f      rt        |      j                  |       S t        j                  j                  | |      S )a  Gated Linear Unit (GLU) activation function.

    It is defined as:

    `f(x) = a * sigmoid(b)`
    where `x` is split into `a` and `b` along the given axis.

    Args:
        x: Input tensor.
        axis: The axis along which to split the input tensor. Defaults to `-1`.

    Returns:
        A tensor with the same shape as half of the input.

    Example:

    >>> x = np.array([-1., 0., 1. , 1.])
    >>> x_glu = keras.ops.glu(x)
    >>> print(x_glu)
    array([-0.73105858, 0. ], shape=(2,), dtype=float64)

    r   )r   r   r)   r   r   r   r   r   s     r   r   r     s:    0 QD!4y&&q))::>>!$>''r   c                   *     e Zd Z fdZd Zd Z xZS )
TanhShrinkc                 "    t         |           y r   rO   rP   r   rS   s    r   rP   zTanhShrink.__init__      r   c                 @    t         j                  j                  |      S r   )r   r   tanh_shrinkr   s     r   r   zTanhShrink.call  rd   r   c                 D    t        |j                  |j                        S r   r   r   s     r   r    zTanhShrink.compute_output_spec  r!   r   rZ   r\   s   @r   r   r     s    )3r   r   zkeras.ops.tanh_shrinkzkeras.ops.nn.tanh_shrinkc                     t        | f      rt               j                  |       S t        j                  j                  |       S )a  Applies the tanh shrink function element-wise.

    It is defined as:

    `f(x) = x - tanh(x)`.

    Args:
        x: Input tensor.

    Returns:
        Output tensor of the same shape as `x`, where each element is
        transformed according to the tanh shrink operation.

    Example:

    >>> x = np.array([ -1., 0., 1.])
    >>> x_tanh_shrink = keras.ops.tanh_shrink(x)
    >>> print(x_tanh_shrink)
    array([-0.23840584  0.  0.23840584], shape=(3,), dtype=float64)

    )r   r   r)   r   r   r   r*   s    r   r   r     s6    . QD!|))!,,::!!!$$r   c                   *     e Zd Z fdZd Zd Z xZS )HardTanhc                 "    t         |           y r   r   r   s    r   rP   zHardTanh.__init__  r   r   c                 @    t         j                  j                  |      S r   )r   r   	hard_tanhr   s     r   r   zHardTanh.call  r   r   c                 D    t        |j                  |j                        S r   r   r   s     r   r    zHardTanh.compute_output_spec  r!   r   rZ   r\   s   @r   r   r   
  s    '3r   r   zkeras.ops.hard_tanhzkeras.ops.nn.hard_tanhc                     t        | f      rt               j                  |       S t        j                  j                  |       S )a  Applies the HardTanh function element-wise.

    It is defined as:

    `f(x) = -1 for x < -1`, `f(x) = x for -1 <= x <= 1`, `f(x) = 1 for x > 1`.

    Args:
        x: Input tensor.

    Returns:
        Output tensor of same shape as `x`
        where values are clamped between -1 and 1.

    Example:

    >>> x = np.array([-2., -1., 0., 1., 2.])
    >>> x_hard_tanh = keras.ops.hard_tanh(x)
    >>> print(x_hard_tanh)
    array([-1. -1.  0.  1.  1.], shape=(5,), dtype=float64)

    )r   r   r)   r   r   r   r*   s    r   r   r     s6    . QD!z''**::""r   c                   ,     e Zd Zd fd	Zd Zd Z xZS )
HardShrinkc                 0    t         |           || _        y r   rN   rR   s     r   rP   zHardShrink.__init__2  rT   r   c                 V    t         j                  j                  || j                        S r   )r   r   hard_shrinkrQ   r   s     r   r   zHardShrink.call6  rW   r   c                 D    t        |j                  |j                        S r   r   r   s     r   r    zHardShrink.compute_output_spec9  r!   r   rY   rZ   r\   s   @r   r   r   1  r]   r   r   zkeras.ops.hard_shrinkzkeras.ops.nn.hard_shrinkc                     t        | f      rt        |      j                  |       S t        j                  j                  | |      S )a  Hard Shrink activation function.

    The Hard Shrink function is a thresholding operation defined as:

    `f(x) = x` if `|x| > threshold`,
    `f(x) = 0` otherwise.

    Args:
        x: Input tensor.
        threshold: Threshold value. Defaults to 0.5.

    Returns:
        A tensor with the same shape as `x`.

    Example:

    >>> x = np.array([-0.5, 0., 1.])
    >>> x_hard_shrink = keras.ops.hard_shrink(x)
    >>> print(x_hard_shrink)
    array([0. 0. 1.], shape=(3,), dtype=float64)

    )r   r   r)   r   r   r   r_   s     r   r   r   =  s;    0 QD!)$22155::!!!Y//r   c                   *     e Zd Z fdZd Zd Z xZS )	Thresholdc                 >    t         |           || _        || _        y r   )rO   rP   threshold_valuevalue)r   r   r   rS   s      r   rP   zThreshold.__init__[  s    .
r   c                 l    t         j                  j                  || j                  | j                        S r   )r   r   rQ   r   r   r   s     r   r   zThreshold.call`  s%    zz##At';';TZZHHr   c                 D    t        |j                  |j                        S r   r   r   s     r   r    zThreshold.compute_output_specc  r!   r   rZ   r\   s   @r   r   r   Z  s    
I3r   r   zkeras.ops.thresholdzkeras.ops.nn.thresholdc                     t        | f      rt        ||      j                  |       S t        j                  j                  | ||      S )aH  Threshold activation function.

    The function thresholds the input `x` as follows:
    `f(x) = x` if `x > threshold`,
    `f(x) = default_value` otherwise.

    Args:
        x: Input tensor.
        threshold: The value that decides when to retain or replace x.
        default_value: Value to assign when `x <= threshold`.

    Returns:
        A tensor with the same shape as `x`.

    Example:

    >>> x = np.array([-1.0, 0.0, 1.0, 2.0])
    >>> x_threshold = keras.ops.threshold(x, 1, 0)
    >>> print(x_threshold)
    array([0., 0., 0., 2.], shape=(4,), dtype=float64)

    )r   r   r)   r   r   rQ   )r   rQ   default_values      r   rQ   rQ   g  s?    0 QD!M2@@CC::9m<<r   c                   ,     e Zd Zd fd	Zd Zd Z xZS )Softmaxc                 0    t         |           || _        y r   r   r   s     r   rP   zSoftmax.__init__  r   r   c                 X    t         j                  j                  || j                        S r   )r   r   softmaxr   r   s     r   r   zSoftmax.call  s     zz!!!$))!44r   c                 D    t        |j                  |j                        S r   r   r   s     r   r    zSoftmax.compute_output_spec  r!   r   r   rZ   r\   s   @r   r   r     s    53r   r   zkeras.ops.softmaxzkeras.ops.nn.softmaxc           	         t        |t              r8| j                  |   dk(  r&t        j                  d| d| j                   d       t        | f      rt        |      j                  |       S t        |t              r.t        t        | j                              D cg c]	  }||vs| }}t        j                  j                  | g ||      }t        j                  j                  |g |D cg c]  }| j                  |    c}d      }t        j                  j!                  |d      } t        j                  j                  | |j                        } t        j                  j                  | t#        t        j                  j%                  g ||                  } | S t        j                  j!                  | |      S c c}w c c}w )a  Softmax activation function.

    The elements of the output vector lie within the range `(0, 1)`, and their
    total sum is exactly 1 (excluding the floating point rounding error).

    Each vector is processed independently. The `axis` argument specifies the
    axis along which the function is applied within the input.

    It is defined as:
    `f(x) = exp(x) / sum(exp(x))`

    Args:
        x: Input tensor.
        axis: Integer, axis along which the softmax is applied.

    Returns:
        A tensor with the same shape as `x`.

    Example:

    >>> x = np.array([-1., 0., 1.])
    >>> x_softmax = keras.ops.softmax(x)
    >>> print(x_softmax)
    array([0.09003057, 0.24472847, 0.66524096], shape=(3,), dtype=float64)

       z"You are using a softmax over axis z of a tensor of shape z. This axis has size 1. The softmax operation will always return the value 1, which is likely not what you intended. Did you mean to use a sigmoid instead?axesr   r   )
isinstanceintr   warningswarnr   r   r)   tuplerangelenr   numpy	transposereshaper   r   listargsortr   r   vaxis_to_keepx_transposed
x_reshapeds         r   r   r     s   > $!!30 7$$%GG9 -55	
 QD!t}**1--$#(QWW#6Ha!4-HH}}..q7M7M7M.N]]**C>AQWWQZ>CC

 JJz3MM!!!\%7%78MM##D../E/E/EFG $ 
 zz!!!$!// I ?s    	G*G5Gc                   ,     e Zd Zd fd	Zd Zd Z xZS )
LogSoftmaxc                 0    t         |           || _        y r   r   r   s     r   rP   zLogSoftmax.__init__  r   r   c                 X    t         j                  j                  || j                        S r   )r   r   log_softmaxr   r   s     r   r   zLogSoftmax.call  s     zz%%adii%88r   c                 D    t        |j                  |j                        S r   r   r   s     r   r    zLogSoftmax.compute_output_spec  r!   r   r   rZ   r\   s   @r   r  r    s    93r   r  zkeras.ops.log_softmaxzkeras.ops.nn.log_softmaxc           	      "   t        | f      rt        |      j                  |       S t        |t              r.t        t        | j                              D cg c]	  }||vs| }}t        j                  j                  | g ||      }t        j                  j                  |g |D cg c]  }| j                  |    c}d      }t        j                  j                  |d      } t        j                  j                  | |j                        } t        j                  j                  | t        t        j                  j                  g ||                  } | S t        j                  j                  | |      S c c}w c c}w )a  Log-softmax activation function.

    It is defined as:
    `f(x) = x - max(x) - log(sum(exp(x - max(x))))`

    Args:
        x: Input tensor.
        axis: Integer, axis along which the log-softmax is applied.
            Defaults to `-1`.

    Returns:
        A tensor with the same shape as `x`.

    Example:

    >>> x = np.array([-1., 0., 1.])
    >>> x_log_softmax = keras.ops.log_softmax(x)
    >>> print(x_log_softmax)
    array([-2.40760596, -1.40760596, -0.40760596], shape=(3,), dtype=float64)

    r   r   r   )r   r  r)   r   r   r   r   r   r   r   r   r   r   r
  r   r   r  s         r   r
  r
    sO   8 QD!$--a00$#(QWW#6Ha!4-HH}}..q7M7M7M.N]]**C>AQWWQZ>CC

 JJ"":B"7MM!!!\%7%78MM##D../E/E/EFG $ 
 zz%%ad%33 I ?s   	F"F-Fc                   ,     e Zd Zd fd	Zd Zd Z xZS )	Sparsemaxc                 0    t         |           || _        y r   r   r   s     r   rP   zSparsemax.__init__
  r   r   c                 X    t         j                  j                  || j                        S r   )r   r   	sparsemaxr   r   s     r   r   zSparsemax.call  s     zz##ADII#66r   c                 D    t        |j                  |j                        S r   r   r   s     r   r    zSparsemax.compute_output_spec  r!   r   r   rZ   r\   s   @r   r  r  	  s    73r   r  zkeras.ops.sparsemaxzkeras.ops.nn.sparsemaxc                     t        | f      rt        |      j                  |       S t        j                  j                  | |      S )uD  Sparsemax activation function.

    For each batch `i`, and class `j`,
    sparsemax activation function is defined as:

    `sparsemax(x)[i, j] = max(x[i, j] - τ(x[i, :]), 0).`

    Args:
        x: Input tensor.
        axis: `int`, axis along which the sparsemax operation is applied.

    Returns:
        A tensor, output of sparsemax transformation. Has the same type and
        shape as `x`.

    Example:

    >>> x = np.array([-1., 0., 1.])
    >>> x_sparsemax = keras.ops.sparsemax(x)
    >>> print(x_sparsemax)
    array([0., 0., 1.], shape=(3,), dtype=float64)

    r   )r   r  r)   r   r   r  r   s     r   r  r    s=    2 QD!,,Q//::--r   c                   2     e Zd Z	 	 	 d fd	Zd Zd Z xZS )MaxPoolc                 v    t         |           || _        || _        |j	                         | _        || _        y r   rO   rP   	pool_sizestrideslowerpaddingdata_formatr   r  r  r  r  rS   s        r   rP   zMaxPool.__init__4  3     	"}}&r   c                     t         j                  j                  || j                  | j                  | j
                  | j                        S r   )r   r   max_poolr  r  r  r  r   inputss     r   r   zMaxPool.callA  s:    zz""NNLLLL
 	
r   c                     t        j                  |j                  | j                  | j                  | j
                  | j                        }t        ||j                        S r   	r
   compute_pooling_output_shaper   r  r  r  r  r   r   r   r"  output_shapes      r   r    zMaxPool.compute_output_specJ  J    &CCLLNNLLLL
 <v||<<r   NvalidNrZ   r\   s   @r   r  r  3       '
=r   r  zkeras.ops.max_poolzkeras.ops.nn.max_poolc                     t        |      }|j                         }t        | f      rt        ||||      j	                  |       S t
        j                  j                  | ||||      S )a  Max pooling operation.

    Args:
        inputs: Tensor of rank N+2. `inputs` has shape
            `(batch_size,) + inputs_spatial_shape + (num_channels,)` if
            `data_format="channels_last"`, or
            `(batch_size, num_channels) + inputs_spatial_shape` if
            `data_format="channels_first"`. Pooling happens over the spatial
            dimensions only.
        pool_size: int or tuple/list of integers of size
            `len(inputs_spatial_shape)`, specifying the size of the pooling
            window for each spatial dimension of the input tensor. If
            `pool_size` is int, then every spatial dimension shares the same
            `pool_size`.
        strides: int or tuple/list of integers of size
            `len(inputs_spatial_shape)`. The stride of the sliding window for
            each spatial dimension of the input tensor. If `strides` is int,
            then every spatial dimension shares the same `strides`.
        padding: string, either `"valid"` or `"same"`. `"valid"` means no
            padding is applied, and `"same"` results in padding evenly to the
            left/right or up/down of the input such that output has the
            same height/width dimension as the input when `strides=1`.
        data_format: A string, either `"channels_last"` or `"channels_first"`.
            `data_format` determines the ordering of the dimensions in the
            inputs. If `data_format="channels_last"`, `inputs` is of shape
            `(batch_size, ..., channels)` while if
            `data_format="channels_first"`, `inputs` is of shape
            `(batch_size, channels, ...)`.

    Returns:
        A tensor of rank N+2, the result of the max pooling operation.
    )r   r  r   r  r)   r   r   r   r"  r  r  r  r  s        r   r   r   U  sh    P *+6KmmoGVI&	

 -
	  ::vy'7KPPr   c                   2     e Zd Z	 	 	 d fd	Zd Zd Z xZS )AveragePoolc                 v    t         |           || _        || _        |j	                         | _        || _        y r   r  r  s        r   rP   zAveragePool.__init__  r  r   c                     t         j                  j                  || j                  | j                  | j
                  | j                        S r   )r   r   average_poolr  r  r  r  r!  s     r   r   zAveragePool.call  s:    zz&&NNLLLL
 	
r   c                     t        j                  |j                  | j                  | j                  | j
                  | j                        }t        ||j                        S r   r$  r&  s      r   r    zAveragePool.compute_output_spec  r(  r   r)  rZ   r\   s   @r   r/  r/    r+  r   r/  zkeras.ops.average_poolzkeras.ops.nn.average_poolc                     t        |      }|j                         }t        | f      rt        ||||      j	                  |       S t
        j                  j                  | ||||      S )a  Average pooling operation.

    Args:
        inputs: Tensor of rank N+2. `inputs` has shape
            `(batch_size,) + inputs_spatial_shape + (num_channels,)` if
            `data_format="channels_last"`, or
            `(batch_size, num_channels) + inputs_spatial_shape` if
            `data_format="channels_first"`. Pooling happens over the spatial
            dimensions only.
        pool_size: int or tuple/list of integers of size
            `len(inputs_spatial_shape)`, specifying the size of the pooling
            window for each spatial dimension of the input tensor. If
            `pool_size` is int, then every spatial dimension shares the same
            `pool_size`.
        strides: int or tuple/list of integers of size
            `len(inputs_spatial_shape)`. The stride of the sliding window for
            each spatial dimension of the input tensor. If `strides` is int,
            then every spatial dimension shares the same `strides`.
        padding: string, either `"valid"` or `"same"`. `"valid"` means no
            padding is applied, and `"same"` results in padding evenly to the
            left/right or up/down of the input such that output has the
            same height/width dimension as the input when `strides=1`.
        data_format: A string, either `"channels_last"` or `"channels_first"`.
            `data_format` determines the ordering of the dimensions in the
            inputs. If `data_format="channels_last"`, `inputs` is of shape
            `(batch_size, ..., channels)` while if
            `data_format="channels_first"`, `inputs` is of shape
            `(batch_size, channels, ...)`.

    Returns:
        A tensor of rank N+2, the result of the average pooling operation.
    )r   r  r   r/  r)   r   r   r2  r-  s        r   r2  r2    sm    Z *+6KmmoGVI&	

 -
	  ::""	7G[ r   c                   4     e Zd Z	 	 	 	 d fd	Zd Zd Z xZS )Convc                 v    t         |           || _        |j                         | _        || _        || _        y r   rO   rP   r  r  r  r  dilation_rater   r  r  r  r9  rS   s        r   rP   zConv.__init__  4     	}}&*r   c                     t         j                  j                  ||| j                  | j                  | j
                  | j                        S )N)r  r  r  r9  )r   r   convr  r  r  r9  r   r"  kernels      r   r   z	Conv.call  sB    zzLLLL((,,  
 	
r   c           	          t        j                  |j                  |j                  d   |j                  d d | j                  | j                  | j
                  | j                        }t        ||j                        S Nr   r   	r
   compute_conv_output_shaper   r  r  r  r9  r   r   r   r"  r?  r'  s       r   r    zConv.compute_output_spec  sf    &@@LLLLLL"LLLL
 <v||<<r   r   r*  Nr   rZ   r\   s   @r   r6  r6          +

=r   r6  zkeras.ops.convzkeras.ops.nn.convc                     t        |      }|j                         }t        | f      rt        ||||      j	                  | |      S t
        j                  j                  | |||||      S )aZ  General N-D convolution.

    This ops supports 1D, 2D and 3D convolution.

    Args:
        inputs: Tensor of rank N+2. `inputs` has shape
            `(batch_size,) + inputs_spatial_shape + (num_channels,)` if
            `data_format="channels_last"`, or
            `(batch_size, num_channels) + inputs_spatial_shape` if
            `data_format="channels_first"`.
        kernel: Tensor of rank N+2. `kernel` has shape
            `(kernel_spatial_shape, num_input_channels, num_output_channels)`.
            `num_input_channels` should match the number of channels in
            `inputs`.
        strides: int or int tuple/list of `len(inputs_spatial_shape)`,
            specifying the strides of the convolution along each spatial
            dimension. If `strides` is int, then every spatial dimension shares
            the same `strides`.
        padding: string, either `"valid"` or `"same"`. `"valid"` means no
            padding is applied, and `"same"` results in padding evenly to the
            left/right or up/down of the input such that output has the
            same height/width dimension as the input when `strides=1`.
        data_format: A string, either `"channels_last"` or `"channels_first"`.
            `data_format` determines the ordering of the dimensions in the
            inputs. If `data_format="channels_last"`, `inputs` is of shape
            `(batch_size, ..., channels)` while if
            `data_format="channels_first"`, `inputs` is of shape
            `(batch_size, channels, ...)`.
        dilation_rate: int or int tuple/list of `len(inputs_spatial_shape)`,
            specifying the dilation rate to use for dilated convolution. If
            `dilation_rate` is int, then every spatial dimension shares
            the same `dilation_rate`.

    Returns:
        A tensor of rank N+2, the result of the conv operation.
    )r   r  r   r6  r)   r   r   r=  r"  r?  r  r  r  r9  s         r   r=  r=    sk    Z *+6KmmoGVI&GWk=AOOF
 	
 ::??+} r   c                   4     e Zd Z	 	 	 	 d fd	Zd Zd Z xZS )DepthwiseConvc                 v    t         |           || _        |j                         | _        || _        || _        y r   r8  r:  s        r   rP   zDepthwiseConv.__init__D  r;  r   c                     t         j                  j                  ||| j                  | j                  | j
                  | j                        S r   )r   r   depthwise_convr  r  r  r9  r>  s      r   r   zDepthwiseConv.callQ  s?    zz((LLLL
 	
r   c           	         t        j                  |j                  |j                  d   |j                  d   z  |j                  d d | j                  | j                  | j
                  | j                        }t        ||j                        S rA  rC  rE  s       r   r    z!DepthwiseConv.compute_output_spec[  st    &@@LLLLv||B//LL"LLLL
 <v||<<r   rF  rZ   r\   s   @r   rK  rK  C  rG  r   rK  zkeras.ops.depthwise_convzkeras.ops.nn.depthwise_convc                     t        |      }|j                         }t        | f      rt        ||||      j	                  | |      S t
        j                  j                  | |||||      S )av  General N-D depthwise convolution.

    This ops supports 1D and 2D depthwise convolution.

    Args:
        inputs: Tensor of rank N+2. `inputs` has shape
            `(batch_size,) + inputs_spatial_shape + (num_channels,)` if
            `data_format="channels_last"`, or
            `(batch_size, num_channels) + inputs_spatial_shape` if
            `data_format="channels_first"`.
        kernel: Tensor of rank N+2. `kernel` has shape
            [kernel_spatial_shape, num_input_channels, num_channels_multiplier],
            `num_input_channels` should match the number of channels in
            `inputs`.
        strides: int or int tuple/list of `len(inputs_spatial_shape)`,
            specifying the strides of the convolution along each spatial
            dimension. If `strides` is int, then every spatial dimension shares
            the same `strides`.
        padding: string, either `"valid"` or `"same"`. `"valid"` means no
            padding is applied, and `"same"` results in padding evenly to the
            left/right or up/down of the input such that output has the
            same height/width dimension as the input when `strides=1`.
        data_format: A string, either `"channels_last"` or `"channels_first"`.
            `data_format` determines the ordering of the dimensions in the
            inputs. If `data_format="channels_last"`, `inputs` is of shape
            `(batch_size, ..., channels)` while if
            `data_format="channels_first"`, `inputs` is of shape
            `(batch_size, channels, ...)`.
        dilation_rate: int or int tuple/list of `len(inputs_spatial_shape)`,
            specifying the dilation rate to use for dilated convolution. If
            `dilation_rate` is int, then every spatial dimension shares
            the same `dilation_rate`.

    Returns:
        A tensor of rank N+2, the result of the depthwise conv operation.
    )r   r  r   rK  r)   r   r   rN  rI  s         r   rN  rN  h  sq    d *+6KmmoGVI&Wk=

-
'	( ::$$ r   c                   4     e Zd Z	 	 	 	 d fd	Zd Zd Z xZS )SeparableConvc                 v    t         |           || _        |j                         | _        || _        || _        y r   r8  r:  s        r   rP   zSeparableConv.__init__  r;  r   c           	          t         j                  j                  |||| j                  | j                  | j
                  | j                        S r   )r   r   separable_convr  r  r  r9  )r   r"  depthwise_kernelpointwise_kernels       r   r   zSeparableConv.call  sB    zz((LLLL
 	
r   c           
      .   t        t        ||| j                  | j                  | j                  | j
                        j                        }| j                  dk(  r|j                  d   |d<   n|j                  d   |d<   t        ||j                        S )Nchannels_lastr   r   r   )	r   rN  r  r  r  r9  r   r   r   )r   r"  rV  rW  r'  s        r   r    z!SeparableConv.compute_output_spec  s       "" e	
 ./55b9L.44R8LO<v||<<r   rF  rZ   r\   s   @r   rR  rR    s      +	
=r   rR  zkeras.ops.separable_convzkeras.ops.nn.separable_convc           	          t        |      }|j                         }t        | f      rt        ||||      j	                  | ||      S t
        j                  j                  | ||||||      S )a  General N-D separable convolution.

    This ops supports 1D and 2D separable convolution. `separable_conv` is
    a depthwise conv followed by a pointwise conv.

    Args:
        inputs: Tensor of rank N+2. `inputs` has shape
            `(batch_size,) + inputs_spatial_shape + (num_channels,)` if
            `data_format="channels_last"`, or
            `(batch_size, num_channels) + inputs_spatial_shape` if
            `data_format="channels_first"`.
        depthwise_kernel: Tensor of rank N+2. `depthwise_kernel` has shape
            [kernel_spatial_shape, num_input_channels, num_channels_multiplier],
            `num_input_channels` should match the number of channels in
            `inputs`.
        pointwise_kernel: Tensor of rank N+2. `pointwise_kernel` has shape
            `(*ones_like(kernel_spatial_shape),
            num_input_channels * num_channels_multiplier, num_output_channels)`.
        strides: int or int tuple/list of `len(inputs_spatial_shape)`,
            specifying the strides of the convolution along each spatial
            dimension. If `strides` is int, then every spatial dimension shares
            the same `strides`.
        padding: string, either `"valid"` or `"same"`. `"valid"` means no
            padding is applied, and `"same"` results in padding evenly to the
            left/right or up/down of the input such that output has the
            same height/width dimension as the input when `strides=1`.
        data_format: A string, either `"channels_last"` or `"channels_first"`.
            `data_format` determines the ordering of the dimensions in the
            inputs. If `data_format="channels_last"`, `inputs` is of shape
            `(batch_size, ..., channels)` while if
            `data_format="channels_first"`, `inputs` is of shape
            `(batch_size, channels, ...)`.
        dilation_rate: int or int tuple/list of `len(inputs_spatial_shape)`,
            specifying the dilation rate to use for dilated convolution. If
            `dilation_rate` is int, then every spatial dimension shares
            the same `dilation_rate`.

    Returns:
        A tensor of rank N+2, the result of the depthwise conv operation.
    )r   r  r   rR  r)   r   r   rU  )r"  rV  rW  r  r  r  r9  s          r   rU  rU    s~    n *+6KmmoGVI&	

 - 02B
C	D ::$$ r   c                   4     e Zd Z	 	 	 	 d fd	Zd Zd Z xZS )ConvTransposec                     t         |           || _        || _        |j	                         | _        || _        || _        y r   )rO   rP   r  output_paddingr  r  r  r9  )r   r  r  r^  r  r9  rS   s         r   rP   zConvTranspose.__init__!  s<     	,}}&*r   c           	          t         j                  j                  ||| j                  | j                  | j
                  | j                  | j                        S r   )r   r   conv_transposer  r^  r  r  r9  r>  s      r   r   zConvTranspose.call0  sJ    
 zz((LLLL
 	
r   c           
         |j                   d d }|j                   d   }t        |j                   ||| j                  | j                  | j                  | j
                  | j                        }t        ||j                        S )NrB  r   )	r   r   r  r  r^  r  r9  r   r   )r   r"  r?  kernel_sizefiltersr'  s         r   r    z!ConvTranspose.compute_output_spec?  ss    ll3B',,r":LLLLLL	
 <v||<<r   r*  NNr   rZ   r\   s   @r   r\  r\     s      +
=r   r\  zkeras.ops.conv_transposezkeras.ops.nn.conv_transposec           	          t        |      }|j                         }t        | f      rt        |||||      j	                  | |      S t
        j                  j                  | ||||||      S )a^	  General N-D convolution transpose.

    Also known as de-convolution. This ops supports 1D, 2D and 3D convolution.

    Args:
        inputs: Tensor of rank N+2. `inputs` has shape
            `(batch_size,) + inputs_spatial_shape + (num_channels,)` if
            `data_format="channels_last"`, or
            `(batch_size, num_channels) + inputs_spatial_shape` if
            `data_format="channels_first"`.
        kernel: Tensor of rank N+2. `kernel` has shape
            [kernel_spatial_shape, num_output_channels, num_input_channels],
            `num_input_channels` should match the number of channels in
            `inputs`.
        strides: int or int tuple/list of `len(inputs_spatial_shape)`,
            specifying the strides of the convolution along each spatial
            dimension. If `strides` is int, then every spatial dimension shares
            the same `strides`.
        padding: string, either `"valid"` or `"same"`. `"valid"` means no
            padding is applied, and `"same"` results in padding evenly to the
            left/right or up/down of the input such that output has the
            same height/width dimension as the input when `strides=1`.
        output_padding: int or int tuple/list of `len(inputs_spatial_shape)`,
            specifying the amount of padding along the height and width of
            the output tensor. Can be a single integer to specify the same
            value for all spatial dimensions. The amount of output padding
            along a given dimension must be lower than the stride along that
            same dimension. If set to `None` (default), the output shape is
            inferred.
        data_format: A string, either `"channels_last"` or `"channels_first"`.
            `data_format` determines the ordering of the dimensions in the
            inputs. If `data_format="channels_last"`, `inputs` is of shape
            `(batch_size, ..., channels)` while if
            `data_format="channels_first"`, `inputs` is of shape
            `(batch_size, channels, ...)`.
        dilation_rate: int or int tuple/list of `len(inputs_spatial_shape)`,
            specifying the dilation rate to use for dilated convolution. If
            `dilation_rate` is int, then every spatial dimension shares
            the same `dilation_rate`.

    Returns:
        A tensor of rank N+2, the result of the conv operation.
    )r   r  r   r\  r)   r   r   r`  )r"  r?  r  r  r^  r  r9  s          r   r`  r`  O  sv    t *+6KmmoGVI&Wnk=

-
'	( ::$$ r   c                   ,     e Zd Zd fd	Zd Zd Z xZS )OneHotc                     t         |           || _        || _        |xs t	        j
                         | _        || _        y r   )rO   rP   num_classesr   r   floatxr   sparse)r   ri  r   r   rk  rS   s        r   rP   zOneHot.__init__  s7    &	.gnn.
r   c                     t         j                  j                  || j                  | j                  | j
                  | j                        S )Nr   r   rk  )r   r   one_hotri  r   r   rk  r   s     r   r   zOneHot.call  s?    zz!!**;; " 
 	
r   c                    t        t        |dg             }| j                  dk(  r|j                  | j                         n}| j                  dk\  r?| j                  t        |      k  r'|j                  | j                  | j                         n/t        dt        |j                         d| j                   d      t        || j                  | j                        S )Nr   r   r   axis must be -1 or between [0, ), but received .r   rk  r   getattrr   appendri  r   insert
ValueErrorr   r   r   rk  )r   r   x_shapes      r   r    zOneHot.compute_output_spec  s    wq'2./99?NN4++,YY!^		CL 8NN499d&6&671#agg, @ II;a)  7$**T[[IIr   r   NFrZ   r\   s   @r   rg  rg    s    
Jr   rg  zkeras.ops.one_hotzkeras.ops.nn.one_hotc                     t        | f      rt        ||||      j                  |       S t        j                  j                  | |||xs t        j                         |      S )aw  Converts integer tensor `x` into a one-hot tensor.

    The one-hot encoding is a representation where each integer value is
    converted into a binary vector with a length equal to `num_classes`,
    and the index corresponding to the integer value is marked as 1, while
    all other indices are marked as 0.

    Args:
        x: Integer tensor to be encoded. The shape can be
            arbitrary, but the dtype should be integer.
        num_classes: Number of classes for the one-hot encoding.
        axis: Axis along which the encoding is performed.
            `-1` represents the last axis. Defaults to `-1`.
        dtype: (Optional) Data type of the output tensor. If not
            provided, it defaults to the default data type of the backend.
        sparse: Whether to return a sparse tensor; for backends that support
            sparse tensors.

    Returns:
        Integer tensor: One-hot encoded tensor with the same shape as `x`
        except for the specified `axis` dimension, which will have
        a length of `num_classes`. The dtype of the output tensor
        is determined by `dtype` or the default data type of the backend.

    Example:

    >>> x = keras.ops.convert_to_tensor([1, 3, 2, 0])
    >>> one_hot(x, num_classes=4)
    array([[0. 1. 0. 0.]
           [0. 0. 0. 1.]
           [0. 0. 1. 0.]
           [1. 0. 0. 0.]], shape=(4, 4), dtype=float32)
    rm  )r   rg  r)   r   r   rn  rj  )r   ri  r   r   rk  s        r   rn  rn    sh    F QD!d%

-
	 ::	'w~~'   r   c                   ,     e Zd Zd fd	Zd Zd Z xZS )BinaryCrossentropyc                 0    t         |           || _        y r   )rO   rP   from_logits)r   r  rS   s     r   rP   zBinaryCrossentropy.__init__  r   r   c                 Z    t         j                  j                  ||| j                        S )Nr  )r   r   binary_crossentropyr  r   targetoutputs      r   r   zBinaryCrossentropy.call  s+    zz--F(8(8 . 
 	
r   c                     |j                   |j                   k7  r%t        d|j                    d|j                          t        |j                   |j                        S )NQArguments `target` and `output` must have the same shape. Received: target.shape=, output.shape=r   )r   rx  r   r   r  s      r   r    z&BinaryCrossentropy.compute_output_spec  sT    <<6<<'  &~_V\\NL 
 6<<v||<<r   FrZ   r\   s   @r   r}  r}    s    '

=r   r}  zkeras.ops.binary_crossentropyz keras.ops.nn.binary_crossentropyc                     t        | |f      rt        |      j                  | |      S t        j                  j                  | ||      S )a  Computes binary cross-entropy loss between target and output tensor.

    The binary cross-entropy loss is commonly used in binary
    classification tasks where each input sample belongs to one
    of the two classes. It measures the dissimilarity between the
    target and output probabilities or logits.

    Args:
        target: The target tensor representing the true binary labels.
            Its shape should match the shape of the `output` tensor.
        output: The output tensor representing the predicted probabilities
            or logits. Its shape should match the shape of the
            `target` tensor.
        from_logits: (optional) Whether `output` is a tensor of logits or
            probabilities.
            Set it to `True` if `output` represents logits; otherwise,
            set it to `False` if `output` represents probabilities.
            Defaults to `False`.

    Returns:
        Integer tensor: The computed binary cross-entropy loss between
        `target` and `output`.

    Example:

    >>> target = keras.ops.convert_to_tensor([0, 1, 1, 0])
    >>> output = keras.ops.convert_to_tensor([0.1, 0.9, 0.8, 0.2])
    >>> binary_crossentropy(target, output)
    array([0.10536054 0.10536054 0.22314355 0.22314355],
          shape=(4,), dtype=float32)
    r  )r   r}  r)   r   r   r  )r  r  r  s      r   r  r    sT    L VV,-!k:HHF
 	
 ::))K *  r   c                   ,     e Zd Zd fd	Zd Zd Z xZS )CategoricalCrossentropyc                 >    t         |           || _        || _        y r   rO   rP   r  r   r   r  r   rS   s      r   rP   z CategoricalCrossentropy.__init__-      &	r   c                 p    t         j                  j                  ||| j                  | j                        S Nr  r   )r   r   categorical_crossentropyr  r   r  s      r   r   zCategoricalCrossentropy.call2  s1    zz22F(8(8tyy 3 
 	
r   c                 @   |j                   |j                   k7  r%t        d|j                    d|j                          t        |j                         dk  r%t        d|j                    d|j                          t        |j                   d d |j                        S )Nr  r  r   zPArguments `target` and `output` must be at least rank 1. Received: target.shape=r   r   )r   rx  r   r   r   r  s      r   r    z+CategoricalCrossentropy.compute_output_spec7  s    <<6<<'  &~_V\\NL 
 v||q   &~_V\\NL 
 6<<,FLLAAr   Fr   rZ   r\   s   @r   r  r  ,  s    


Br   r  z"keras.ops.categorical_crossentropyz%keras.ops.nn.categorical_crossentropyc                     t        | |f      rt        ||      j                  | |      S t        j                  j                  | |||      S )a  Computes categorical cross-entropy loss between target and output tensor.

    The categorical cross-entropy loss is commonly used in multi-class
    classification tasks where each input sample can belong to one of
    multiple classes. It measures the dissimilarity
    between the target and output probabilities or logits.

    Args:
        target: The target tensor representing the true categorical labels.
            Its shape should match the shape of the `output` tensor
            except for the last dimension.
        output: The output tensor representing the predicted probabilities
            or logits. Its shape should match the shape of the `target`
            tensor except for the last dimension.
        from_logits: (optional) Whether `output` is a tensor of logits or
            probabilities.
            Set it to `True` if `output` represents logits; otherwise,
            set it to `False` if `output` represents probabilities.
            Defaults to `False`.
        axis: (optional) The axis along which the categorical cross-entropy
            is computed.
            Defaults to `-1`, which corresponds to the last dimension of
            the tensors.

    Returns:
        Integer tensor: The computed categorical cross-entropy loss between
        `target` and `output`.

    Example:

    >>> target = keras.ops.convert_to_tensor(
    ... [[1, 0, 0],
    ...  [0, 1, 0],
    ...  [0, 0, 1]])
    >>> output = keras.ops.convert_to_tensor(
    ... [[0.9, 0.05, 0.05],
    ...  [0.1, 0.8, 0.1],
    ...  [0.2, 0.3, 0.5]])
    >>> categorical_crossentropy(target, output)
    array([0.10536054 0.22314355 0.6931472 ], shape=(3,), dtype=float32)
    r  )r   r  r)   r   r   r  r  r  r  r   s       r   r  r  G  sW    ` VV,-&#$

-
'	( ::..Kd /  r   c                   ,     e Zd Zd fd	Zd Zd Z xZS )SparseCategoricalCrossentropyc                 >    t         |           || _        || _        y r   r  r  s      r   rP   z&SparseCategoricalCrossentropy.__init__  r  r   c                 p    t         j                  j                  ||| j                  | j                        S r  )r   r   sparse_categorical_crossentropyr  r   r  s      r   r   z"SparseCategoricalCrossentropy.call  s1    zz99F(8(8tyy : 
 	
r   c                    t        |j                        dk  rt        d|j                         |j                  }t        |      t        |j                        k(  r|d   dk(  r|d d }||j                  d d k7  r%t        d|j                   d|j                         t        |j                  d d |j                        S )Nr   zBArgument `output` must be at least rank 1. Received: output.shape=r   zcArguments `target` and `output` must have the same shape up until the last dimension: target.shape=r  r   )r   r   rx  r   r   )r   r  r  target_shapes       r   r    z1SparseCategoricalCrossentropy.compute_output_spec  s    v||q   &~/ 
 |||FLL 11l26F!6K',L6<<,,  &~_V\\NL 
 6<<,FLLAAr   r  rZ   r\   s   @r   r  r    s    


Br   r  z)keras.ops.sparse_categorical_crossentropyz,keras.ops.nn.sparse_categorical_crossentropyc                     t        | |f      rt        ||      j                  | |      S t        j                  j                  | |||      S )a  Computes sparse categorical cross-entropy loss.

    The sparse categorical cross-entropy loss is similar to categorical
    cross-entropy, but it is used when the target tensor contains integer
    class labels instead of one-hot encoded vectors. It measures the
    dissimilarity between the target and output probabilities or logits.

    Args:
        target: The target tensor representing the true class labels as
            integers. Its shape should match the shape of the `output`
            tensor except for the last dimension.
        output: The output tensor representing the predicted probabilities
            or logits.
            Its shape should match the shape of the `target` tensor except
            for the last dimension.
        from_logits: (optional) Whether `output` is a tensor of logits
            or probabilities.
            Set it to `True` if `output` represents logits; otherwise,
            set it to `False` if `output` represents probabilities.
            Defaults to `False`.
        axis: (optional) The axis along which the sparse categorical
            cross-entropy is computed.
            Defaults to `-1`, which corresponds to the last dimension
            of the tensors.

    Returns:
        Integer tensor: The computed sparse categorical cross-entropy
        loss between `target` and `output`.

    Example:

    >>> target = keras.ops.convert_to_tensor([0, 1, 2], dtype=int32)
    >>> output = keras.ops.convert_to_tensor(
    ... [[0.9, 0.05, 0.05],
    ...  [0.1, 0.8, 0.1],
    ...  [0.2, 0.3, 0.5]])
    >>> sparse_categorical_crossentropy(target, output)
    array([0.10536056 0.22314355 0.6931472 ], shape=(3,), dtype=float32)
    r  )r   r  r)   r   r   r  r  s       r   r  r    sW    \ VV,-,#$

-
'	( ::55Kd 6  r   c                   .     e Zd Z	 d fd	Zd Zd Z xZS )MultiHotc                     |d|v r|j                  d      }|t        d      t        |   di | || _        || _        |xs t        j                         | _        || _	        y )N
num_tokens)Argument `num_classes` must be specified.r&   )
poprx  rO   rP   ri  r   r   rj  r   rk  )r   ri  r   r   rk  kwargsrS   s         r   rP   zMultiHot.__init__  sk     <6#9 **\2KHII"6"&	.gnn.
r   c                     t         j                  j                  || j                  | j                  | j
                        S )N)ri  r   r   )r   r   	multi_hotri  r   r   r!  s     r   r   zMultiHot.call  s8    zz##((**	 $ 
 	
r   c                    t        t        |dg             }| j                  dk(  r|j                  | j                         n}| j                  dk\  r?| j                  t        |      k  r'|j                  | j                  | j                         n/t        dt        |j                         d| j                   d      t        |      dk(  r|d   g}n|d   g|dd  z   }t        ||j                  | j                        S )	Nr   r   r   rp  rq  rr     rs  rt  )r   r"  ry  s      r   r    zMultiHot.compute_output_spec  s    wvw3499?NN4++,YY!^		CL 8NN499d&6&671#fll2C1D E II;a) 
 w<1r{mGqzlWQR[0G7&,,t{{KKr   Nr   NFrZ   r\   s   @r   r  r    s    <A
Lr   r  zkeras.ops.multi_hotzkeras.ops.nn.multi_hotc                     |d|v r|j                  d      }|t        d      t        | f      rt        ||||      j	                  |       S t
        j                  j                  | ||||      S )a  Encodes integer labels as multi-hot vectors.

    This function encodes integer labels as multi-hot vectors, where each label
    is mapped to a binary value in the resulting vector.

    Args:
        inputs: Tensor of integer labels to be converted to multi-hot vectors.
        num_classes: Integer, the total number of unique classes.
        axis: (optional) Axis along which the multi-hot encoding should be
            added. Defaults to `-1`, which corresponds to the last dimension.
        dtype: (optional) The data type of the resulting tensor. Default
            is backend's float type.
        sparse: Whether to return a sparse tensor; for backends that support
            sparse tensors.

    Returns:
        Tensor: The multi-hot encoded tensor.

    Example:

    >>> data = keras.ops.convert_to_tensor([0, 4])
    >>> keras.ops.multi_hot(data, num_classes=5)
    array([1.0, 0.0, 0.0, 0.0, 1.0], dtype=float32)

    r  r  )r  rx  r   r  r)   r   r   r  )r"  ri  r   r   rk  r  s         r   r  r    su    D |v5jj.DEEVI&T5&9GGOO::T5&IIr   c                   ,     e Zd Zd fd	Zd Zd Z xZS )Momentsc                 L    t         |           || _        || _        || _        y r   )rO   rP   r   keepdimssynchronized)r   r   r  r  rS   s       r   rP   zMoments.__init__-  s$    	 (r   c                     t         j                  j                  || j                  | j                  | j
                        S )N)r   r  r  )r   r   momentsr   r  r  r   s     r   r   zMoments.call3  s8    zz!!]]**	 " 
 	
r   c                    t        t        |j                  | j                  | j                        |j
                        t        t        |j                  | j                  | j                        |j
                        fS )Nr   r  r   )r   r   r   r   r  r   r   s     r   r    zMoments.compute_output_spec;  s\    QWW499t}}Mgg QWW499t}}Mgg	
 		
r   FFrZ   r\   s   @r   r  r  ,  s    )


r   r  zkeras.ops.momentszkeras.ops.nn.momentsc                     t        | f      rt        |||      j                  |       S t        j                  j                  | |||      S )a!  Calculates the mean and variance of `x`.

    The mean and variance are calculated by aggregating the contents of `x`
    across `axes`. If `x` is 1-D and `axes = [0]` this is just the mean and
    variance of a vector.

    Args:
        x: Input tensor.
        axes: A list of axes which to compute mean and variance.
        keepdims: If this is set to `True`, the axes which are reduced are left
            in the result as dimensions with size one.
        synchronized: Only applicable with the TensorFlow backend.
            If `True`, synchronizes the global batch statistics (mean and
            variance) across all devices at each training step in a
            distributed training strategy. If `False`, each replica uses its own
            local batch statistics.

    Returns:
        A tuple containing two tensors - mean and variance.

    Example:

    >>> x = keras.ops.convert_to_tensor([0, 1, 2, 3, 100], dtype="float32")
    >>> keras.ops.moments(x, axes=[0])
    (array(21.2, dtype=float32), array(1553.3601, dtype=float32))

    )r  )r   r  r)   r   r   r  )r   r   r  r  s       r   r  r  H  sN    D QD!tXLAOO
 	
 ::axlKKr   c                   *     e Zd Z fdZd Zd Z xZS )	BatchNormc                 >    t         |           || _        || _        y r   )rO   rP   r   epsilon)r   r   r  rS   s      r   rP   zBatchNorm.__init__s  s    	r   c           	      8    ||k7  rt        d| d| d| d      y )NArguments `z9` must be a vector of length `x.shape[axis]`. Expected: `z`. Received: `rr  rx  )r   namer   expected_shapes       r   _check_shapezBatchNorm._check_shapex  s?    N"dV $//=.> ?#WA'  #r   c                    |j                   | j                     f}| j                  dt        |j                         |       | j                  dt        |j                         |       |&| j                  dt        |j                         |       ||ur&| j                  dt        |j                         |       t	        |j                   |j
                        S )Nmeanvarianceoffsetscaler   )r   r   r  r   r   r   )r   r   r  r  r  r  r   s          r   r    zBatchNorm.compute_output_spec  s    #%&%

"3U;*eHNN&;UChfll(;UCguU[['95A177!''22r   )r#   r$   r%   rP   r  r    r[   r\   s   @r   r  r  r  s    
3r   r  zkeras.ops.batch_normalizationz keras.ops.nn.batch_normalizationc           	          t        | ||||f      rt        ||      j                  | ||||      S t        j                  j                  | ||||||      S )a  Normalizes `x` by `mean` and `variance`.

    This op is typically used by the batch normalization step in a neural
    network. It normalizes the input tensor along the given axis.

    Args:
        x: Input tensor.
        mean: A mean vector of the same length as the `axis` dimension of the
            input thensor.
        variance: A variance vector of the same length as the `axis` dimension
            of the input tensor.
        axis: Integer, the axis that should be normalized.
        offset: An offset vector of the same length as the `axis` dimension of
            the input tensor. If not `None`, `offset` is added to the normalized
            tensor. Defaults to `None`.
        scale: A scale vector of the same length as the `axis` dimension of the
            input tensor. If not `None`, the normalized tensor is multiplied by
            `scale`. Defaults to `None`.
        epsilon: Small float added to variance to avoid dividing by zero.
            Defaults to 1e-3.

    Returns:
        The normalized tensor.

    Example:

    >>> x = keras.ops.convert_to_tensor(
    ...     [[0.1, 0.2, 0.3], [0.4, 0.5, 0.6], [0.7, 0.8, 0.9]]
    ... )
    >>> keras.ops.batch_normalization(
    ...     x,
    ...     mean=[0.4, 0.5, 0.6],
    ...     variance=[0.67, 0.67, 0.67],
    ...     axis=-1
    ... )
    array([[-3.6624e-01, -3.6624e-01, -3.6624e-01],
           [-4.6445e-09,  0.0000e+00, -1.8578e-08],
           [ 3.6624e-01,  3.6624e-01,  3.6624e-01]])

    )r   r  r)   r   r   batch_normalization)r   r  r  r   r  r  r  s          r   r  r    se    b Qh>?w'55tXvu
 	
 ::))	44 r   c                   2     e Zd Zd fd	Zd Zd Zd Z xZS )CTCLossc                 0    t         |           || _        y r   )rO   rP   
mask_index)r   r  rS   s     r   rP   zCTCLoss.__init__  s    $r   c                 \    t         j                  j                  ||||| j                        S r   )r   r   ctc_lossr  )r   r  r  target_lengthoutput_lengths        r   r   zCTCLoss.call  s(    zz""FM=$//
 	
r   c                 J    |d   |d   k7  rt        d| d| d| d| d	      y )Nr   r  z` and `z8` must have the same first dimension. Received shapes: `z`.r  )r   name1shape1name2shape2s        r   _check_shape_first_dimzCTCLoss._check_shape_first_dim  sH    !9q	!eWGE7 3%%+HGF82?  "r   c                 h   | j                  d|j                  d|j                         | j                  d|j                  d|j                         | j                  d|j                  d|j                         t        j                  |j                  d      }t        |j                  d   f|      S )Nr  r  r  r  float32r   r   )r  r   r   result_typer   r   )r   r  r  r  r  r   s         r   r    zCTCLoss.compute_output_spec  s    ##fllHfll	
 	##]00(FLL	
 	##]00(FLL	
 ##FLL)<FLLO-U;;r   r   )r#   r$   r%   rP   r   r  r    r[   r\   s   @r   r  r    s    %

<r   r  zkeras.ops.ctc_losszkeras.ops.nn.ctc_lossc                     t        | |||f      rt        |      j                  | |||      S t        j                  j                  | ||||      S )al  CTC (Connectionist Temporal Classification) loss.

    Args:
        target: A tensor of shape `(batch_size, max_length)` containing
            the true labels in integer format.
        output: A tensor of shape `(batch_size, max_length, num_classes)`
            containing logits (the output of your model).
        target_length: A tensor of shape `(batch_size,)` containing the
            true label lengths.
        output_length: A tensor of shape `(batch_size,)` containing the
            output lengths.
        mask_index: The index of the mask character in the vocabulary.
            Defaults to `0`.
    )r   r  r)   r   r   r  )r  r  r  r  r  s        r   r  r    sZ    , VV]MJKz"00FM=
 	
 ::}j r   c                   6     e Zd Z	 	 	 	 	 d fd	Zd Zd Z xZS )	CTCDecodec                 h    t         |           || _        || _        || _        || _        || _        y r   )rO   rP   strategy
beam_width	top_pathsmerge_repeatedr  )r   r  r  r  r  r  rS   s         r   rP   zCTCDecode.__init__	  s4     	 $",$r   c           	          t         j                  j                  ||| j                  | j                  | j
                  | j                  | j                        S )Nr  r  r  r  r  )r   r   
ctc_decoder  r  r  r  r  )r   r"  sequence_lengthss      r   r   zCTCDecode.call	  sI    zz$$]]nn.. % 
 	
r   c                     |j                   }| j                  dk(  rd}n| j                  }t        j                  |j
                  d      }t        ||d   |d   fd      t        |d   |f|      fS )Ngreedyr   r  r   int32r   )r   r  r  r   r  r   r   )r   r"  r  inputs_shaper  r   s         r   r    zCTCDecode.compute_output_spec 	  sv    ||==H$II##FLL)<LO\!_=W a)4EB	
 	
r   r  d   r   Tr   rZ   r\   s   @r   r  r  	  s#     %	

r   r  zkeras.ops.ctc_decodezkeras.ops.nn.ctc_decodec           	          t        | |f      r t        |||||      j                  | |      S t        j                  j                  | ||||||      S )a8  Decodes the output of a CTC model.

    Args:
        inputs: A tensor of shape `(batch_size, max_length, num_classes)`
            containing the logits (the output of the model).
            They should *not* be normalized via softmax.
        sequence_lengths: A tensor of shape `(batch_size,)` containing the
            sequence lengths for the batch.
        strategy: A string for the decoding strategy. Supported values are
            `"greedy"` and `"beam_search"`.
        beam_width: An integer scalar beam width used in beam search.
            Defaults to 100.
        top_paths: An integer scalar, the number of top paths to return.
            Defaults to 1.
        merge_repeated: A boolean scalar, whether to merge repeated
            labels in the output. Defaults to `True`.
        mask_index: An integer scalar, the index of the mask character in
            the vocabulary. Defaults to `0`.

    Returns:
        A tuple containing:
        - The tensor representing the list of decoded sequences. If
            `strategy="greedy"`, the shape is `(1, batch_size, max_length)`. If
            `strategy="beam_search"`, the shape is
            `(top_paths, batch_size, max_length)`. Note that: `-1` indicates the
            blank label.
        - If `strategy="greedy"`, a tensor of shape `(batch_size, 1)`
            representing the negative of the sum of the probability logits for
            each sequence. If `strategy="beam_seatch"`, a tensor of shape
            `(batch_size, top_paths)` representing the log probability for each
            sequence.
    r  r"  r  r  r  r  r  r  )r   r  r)   r   r   r  r  s          r   r  r  /	  sq    ` V%567!)!
 - 0
1	2 ::  )% !  r   c                   ,     e Zd Zd fd	Zd Zd Z xZS )	Normalizec                 L    t         |           || _        || _        || _        y r   )rO   rP   r   orderr  )r   r   r  r  rS   s       r   rP   zNormalize.__init__s	  #    	
r   c                 .    t        |j                        S Nr   r   r   r   s     r   r    zNormalize.compute_output_specy	      ))r   c                 \    t        || j                  | j                  | j                        S )Nr   r  r  )
_normalizer   r  r  r   s     r   r   zNormalize.call|	  s$    DIITZZ
 	
r   r   r  Nr#   r$   r%   rP   r    r   r[   r\   s   @r   r  r  r	      *
r   r  zkeras.ops.normalizezkeras.ops.nn.normalizec                 r    t        | f      rt        |||      j                  |       S t        | |||      S )a  Normalizes `x` over the specified axis.

    It is defined as: `normalize(x) = x / max(norm(x), epsilon)`.

    Args:
        x: Input tensor.
        axis: The axis or axes along which to perform normalization.
            Default to -1.
        order: The exponent value in the norm formulation.
            Defaults to 2.
        epsilon: A lower bound value for the norm.
            Defaults to `backend.epsilon()`.

    Returns:
        The normalized array.

    Example:

    >>> x = keras.ops.convert_to_tensor([[1, 2, 3], [4, 5, 6]])
    >>> x_norm = keras.ops.math.normalize(x)
    >>> print(x_norm)
    array([[0.26726124 0.5345225  0.8017837 ]
           [0.45584232 0.5698029  0.68376344]], shape=(2, 3), dtype=float32)

    r  )r   r  r)   r  )r   r   r  r  s       r   	normalizer  	  sA    @ QD!d%AOO
 	
 ad%AAr   c                    t        |t              r|dk\  st        d|       t        j                  |       } t        | j                        dk(  r!t        j                  j                  | d      } |t        j                         }d|k(  rt        j                  j                  t        j                  j                  |       |d      }t        j                  j                  |      }t        j                  j                  |d|z        }| |z  S t        j                  j!                  | ||d	      }t        j                  j#                  ||      }t        j                  j%                  | |      S )
Nr   z6Argument `order` must be an int >= 1. Received: order=r   r   r  Tr  r   )ordr   r  )r   r   rx  r   convert_to_tensorr   r   r   expand_dimsr  sumsquaremathrsqrtminimumlinalgnormmaximumdivide)r   r   r  r  
square_suminv_normr  denoms           r   r  r  	  s1   eS!!DUGL
 	
 	!!!$A
177|qMM%%aa%0//#Ez ]]&&MM  #$ ' 

 <<%%j1==((3=A8|>>qe$FDMM!!$0E==5))r   c                   *     e Zd Z fdZd Zd Z xZS )PSNRc                 0    t         |           || _        y r   )rO   rP   max_val)r   r  rS   s     r   rP   zPSNR.__init__	  s     	r   c                 Z    t         j                  j                  ||| j                        S )Nx1x2r  )r   r   psnrr  r   r  r  s      r   r   z	PSNR.call	  s)    zzLL  
 	
r   c                     t        |j                        t        |j                        k7  rt        d      t        d      S )NzInputs must have the same rankr&   r  )r   r   rx  r   r  s      r   r    zPSNR.compute_output_spec	  s1    rxx=CM)=>>$$r   rZ   r\   s   @r   r  r  	  s    
%r   r  zkeras.ops.psnrzkeras.ops.nn.psnrc                     t        | |f      rt        |      j                  | |      S t        j                  j                  | ||      S )a:  Peak Signal-to-Noise Ratio (PSNR) function.

    This function computes the Peak Signal-to-Noise Ratio between two signals,
    `x1` and `x2`. PSNR is a measure of the quality of a reconstructed signal.
    The higher the PSNR, the closer the reconstructed signal is to the original
    signal. Note that it can become negative when the signal power is
    smaller that the noise power.

    Args:
        x1: The first input signal.
        x2: The second input signal. Must have the same shape as `x1`.
        max_val: The maximum possible value in the signals.

    Returns:
        float: The PSNR value between `x1` and `x2`.

    Examples:

    >>> x1 = keras.random.normal((2, 4, 4, 3))
    >>> x2 = keras.random.normal((2, 4, 4, 3))
    >>> max_val = 1.0
    >>> keras.ops.nn.psnr(x1, x2, max_val)
    -3.1697404
    )r   r  r)   r   r   r  r  s      r   r  r  	  sV    F 	
 

-B
	  ::??

 r   c                   D     e Zd Zd fd	Z	 	 	 	 	 ddZ	 	 	 	 	 ddZ xZS )DotProductAttentionc                 0    t         |           || _        y r   )rO   rP   	is_causal)r   r!  rS   s     r   rP   zDotProductAttention.__init__
  rT   r   c	                 f    t         j                  j                  ||||||| j                  ||	      S )Nbiasmaskr  r!  flash_attentionattn_logits_soft_cap)r   r   dot_product_attentionr!  	r   querykeyr   r$  r%  r  r&  r'  s	            r   r   zDotProductAttention.call
  s?     zz//nn+!5 0 

 
	
r   c	                 D    t        |j                  |j                        S r   r   r)  s	            r   r    z'DotProductAttention.compute_output_spec&
  s     5;;ekk::r   r  )NNNNNrZ   r\   s   @r   r  r  

  s4    # !
8 !;r   r  zkeras.ops.dot_product_attentionz"keras.ops.nn.dot_product_attentionc	                 Z   |Qt        j                          dk(  r/ddl}	|	j                         d   j                  dk7  rt	        d      t	        d      t        | ||f      r#t        |      j                  | |||||||      S t         j                  j                  | ||||||||	      S )	a	  Scaled dot product attention function.

    Computes the attention function on Q (`query`), K (`key`), and V(`value`):
    `attention(Q, K, V) = softmax(Q * K / sqrt(d)) * V`. If we define `logits`
    as the output of `Q * K` and the `probs` as the output of `softmax`.

    Throughout this function, we utilize the following notation to represent the
    shape of array:
    - B: batch size
    - S: length of the key/value
    - T: length of the query
    - N: number of attention heads
    - H: dimensions of each attention head
    - K: number of key/value heads
    - G: number of groups, which equals to `N // K`

    Args:
        query: The query array with the shape of `(B, T, N, H)`.
        key: The key array with the shape of `(B, S, K, H)`. When `K` equals
            `N`, multi-headed attention (MHA) is performed. Otherwise, grouped
            query attention (GQA) is performed if `N` is a multiple of `K`. and
            multi-query attention (MQA) is performed if `K==1` (a special case
            of GQA).
        value: The value array with the same shape of `key`.
        bias: Optional bias array to be added to logits. The shape must be
            broadcastable to `(B, N, T, S)`.
        mask: Optional mask array used to filter out logits. It is a boolean
            mask where `True` indicates the element should take part in
            attention. For an additive mask, users should pass it to bias. The
            shape must be broadcastable to `(B, N, T, S)`.
        scale: Optional scale for the logits. If `None`, the scale will be set
            to `1.0 / sqrt(H)`.
        is_causal: Whether to apply causal mask.
        flash_attention: Whether to use flash attention. If `None`, it will
            attempt to use flash attention if the required conditions are met.
            Typically, the inputs must be in float16 and bfloat16 dtype and the
            input layout requirements may vary depending on the backend.
        attn_logits_soft_cap: The value limit for maximum value of the
            attention logits before the softmax function is applied. This is
            only supported in JAX TPU backend. Defaults to None.

    Returns:
        An array of the attention output with the same shape of `query`.

    Example:

    >>> query = keras.random.normal((2, 4, 8, 16))
    >>> key = keras.random.normal((2, 6, 8, 16))
    >>> value = keras.random.normal((2, 6, 8, 16))
    >>> keras.ops.nn.dot_product_attention(query, key, value).shape
    (2, 4, 8, 16)
    Njaxr   tpuzoattn_logits_soft_cap is only supported for JAX on TPU. Set attn_logits_soft_cap=None when not using JAX on TPU.)r!  )r$  r%  r  r&  r'  r#  )
r   r.  devicesplatformrx  r   r  r)   r   r(  )
r*  r+  r   r$  r%  r  r!  r&  r'  r.  s
             r   r(  r(  4
  s    D '??%{{}Q((E1 O 
 K 
 UC/0"Y7EE+!5 F 	
 		
 ::++'1 , 
 
r   c                   ,     e Zd Zd fd	Zd Zd Z xZS )RMSNormc                 L    t         |           || _        || _        || _        y r   )rO   rP   r   r  r  )r   r  r   r  rS   s       r   rP   zRMSNorm.__init__
  r  r   c                 .    t        |j                        S r  r  r   s     r   r    zRMSNorm.compute_output_spec
  r  r   c                 \    t        || j                  | j                  | j                        S )Nr  r   r  )_rms_normalizationr  r   r  r   s     r   r   zRMSNorm.call
  s$    !TZZdii
 	
r   )r   Nr  r\   s   @r   r3  r3  
  r  r   r3  zkeras.ops.rms_normalizationzkeras.ops.nn.rms_normalizationc                 r    t        | f      rt        |||      j                  |       S t        | |||      S )a  Performs Root Mean Square (RMS) normalization on `x`.

    The Keras operation implements the operation as described in
    [Root Mean Square Layer Normalization](https://arxiv.org/pdf/1910.07467)
    by Biao Zhang et al.

    The operation is different from LayerNormalization with RMS scaling.

    It is defined as `rms_normalization(x) = x * rsqrt(mean(square(x))) * scale`

    Args:
        x: Input tensor.
        axis: The axis or axes along which to perform normalization.
            Default to -1.
        scale: Optional scaling factor for the normalization.
        epsilon: A lower bound value for the norm.
            Defaults to `backend.epsilon()`.

    Returns:
        The normalized array.

    Example:

    >>> x = np.random.rand(1, 10)
    >>> x_norm = keras.ops.rms_normalization(x, (10,))
    >>> print(x_norm)
    array([[0.69384296, 0.94444374, 0.16551171, 0.05749961, 1.11008865,
        0.52475186, 1.57686807, 1.69893307, 1.27292764, 0.30819128]])
    r7  )r   r3  r)   r8  )r   r  r   r  s       r   rms_normalizationr:  
  s:    H QD!Uw?MMaPPau4IIr   c                 H   t        j                  |       } t        | j                        dk(  r!t         j                  j                  | d      } |t        j                         }t        |      s!t        j                  || j                        }t        |      s!t        j                  || j                        }t         j                  j                  t         j                  j                  t         j                  j                  |       |d      |z         }| |z  |z  S )Nr   r   r   Tr  )r   r  r   r   r   r  r  r	   r   r  r	  r  r  )r   r  r   r  rrmss        r   r8  r8  
  s    !!!$A
177|qMM%%aa%0//#5!))%qww?7#++G177C<<7==//2M
	D Hr   c                   *     e Zd Z fdZd Zd Z xZS )Polarc                 "    t         |           y r   r   r   s    r   rP   zPolar.__init__
  r   r   c                 .    t        |j                        S r  r  )r   abs_angles      r   r    zPolar.compute_output_spec
  s    ,,r   c                     t        |      S r   )_polarr   s     r   r   z
Polar.call
  s    ayr   r  r\   s   @r   r>  r>  
  s    -r   r>  zkeras.ops.polarzkeras.ops.nn.polarc                 h    t        | |f      rt               j                  | |      S t        | |      S )aN  Constructs a complex tensor whose elements are Cartesian
    coordinates corresponding to the polar coordinates
    with absolute value `abs` and angle `angle`.

    The operation is numerically equivalent to `torch.polar()`.
    It is not equivalent to `scipy.lingalg.polar()` which performs
    Singular Value Decomposition.

    Given the magnitude (`abs_`) and angle (`angle`), this function computes the
    corresponding complex number in the form of `real + imaginary * 1j`, where:
    - `real = abs_ * cos(angle)`
    - `imaginary = abs_ * sin(angle)`

    Args:
        abs_: The magnitude (absolute value) of the complex number.
        angle: The angle (in radians) of the complex number.

    Returns:
        A complex number (or array of complex numbers) with the same shape as
        `abs_` and `angle`.

    Example:

    >>> abs_ = keras.random.normal((1, 2))
    >>> angle = keras.random.normal((1, 2))
    >>> keras.ops.nn.polar(abs_, angle).shape
    (1, 2)
    >>> keras.ops.nn.polar(abs_, angle)
    Array([[0.63185346-0.59370506j, 0.48960376-0.31677645j]], dtype=complex64)
    )r   r>  r)   rD  )rA  rB  s     r   polarrF  
  s3    @ T5M*w$$T511$r   c                 $   t        j                  |       } t        j                  |      }| t         j                  j                  |      z  }| t         j                  j	                  |      z  }t         j
                  j                  ||f      }|S )a7  Internal implementation of the polar function.

    Args:
        abs_: The magnitude (absolute value) of the complex number.
        angle: The angle (in radians) of the complex number.

    Returns:
        A complex number (or array of complex numbers) with the same shape as
        `abs_` and `angle`.
    )r   r  r   cossinr  _get_complex_tensor_from_tuple)rA  rB  real	imaginaryresults        r   rD  rD    st     $$T*D%%e,E'--##E**Dw}}((//I\\88$	9JKFMr   rY   ru   r   r   r   r   r)  rF  rd  rz  r  r  r  r  )NNgMbP?r  r  r  )NNNFNN)r   r   N)s__doc__r   	keras.srcr   keras.src.api_exportr   keras.src.backendr   r   r   &keras.src.backend.common.backend_utilsr   %keras.src.backend.common.keras_tensorr	   keras.src.opsr
   keras.src.ops.operationr   keras.src.ops.operation_utilsr   r   r   r,   r.   r2   r4   r8   r:   r>   r@   rF   rH   rL   rV   ra   rc   ri   rk   ro   rs   ry   r{   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   rQ   r   r   r  r
  r  r  r  r   r/  r2  r6  r=  rK  rN  rR  rU  r\  r`  rg  rn  r}  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r(  r3  r:  r8  r>  rF  rD  r&   r   r   <module>rW     s+	   D   - ) 2 5 B ) - 639 3 !456 7,3I 3  "678 9,3i 3 "$:;<! =!.3I 3 )+HIJ( K(63y 3 #%<=>" ?"03y 3 #%<=>" ?".	3 	3 &(BCD0 E0:3 3 &(BCD% E%:39 3 4	3 	3 %'@AB' C'63 3 "%%.	3	 	3 %'@ABC CC83) 3  #&&43y 3 ##:	3) 	3  234* 5*639 3 !456 76	39 	3 !456+ 7+<	39 	3 !456% 7%6	3) 	3  234( 5(83 3 &(BCD% E%63y 3 $&>?@# A#6	3 	3 &(BCD0 E08
3	 
3 $&>?@= A=8	3i 	3 "$:;<80 =80v	3 	3 "(4(4V	3	 	3 $&>?@. A.:=i =D #%<=> 0Q ?0Qf=) =D  # 22j"=9 "=J !456 4 74n"=I "=J "% 99x(=I (=V "% BBJ,=I ,=^ "% BBJJY J> "$:;<, =,^= =( '*&&RBi B6 ,/00fBI B< 36..b'Ly 'LT   ;@$J$JN
i 
8 !L!LH3	 32 '* ?C22j<i <@ 2'
	 '
T ! ::z
	 
   BBB*0%9 %, **Z';) ';T &(LM 
	
ccL
i 
  %( J JF&I   "678! 9!Hr   