
    2VhD                        d dl mZ d dl mZ d dlmZ  ed      dGd       Z G d dej                        Z ed	      dHd
       Z ed      d        Z	 ed      dId       Z
 ed      dJd       Z ed      d        Z ed      d        Z ed      d        Z ed      dKd       Z ed      d        Z eddg      d        Z ed      dLd       Z ed       dMd!       Z ed"      dJd#       Z ed$      dId%       Z ed&      d'        Z ed(      d)        Z ed*      d+        Z ed,      dKd-       Z ed.      d/        Z ed0      d1        Z ed2      d3        Z ed4      d5        Z ed6      d7        Z ed8      d9        Z ed:d;g      d<        Z  ed=      d>        Z! G d? d@ej                        Z" edA      dB        Z# edC      dIdD       Z$ edEg      dIdF       Z%y)N    )backend)ops)keras_exportzkeras.activations.reluNc                     t        j                  | f      r t        |||      |       S t        j                  | |||      S )a  Applies the rectified linear unit activation function.

    With default values, this returns the standard ReLU activation:
    `max(x, 0)`, the element-wise maximum of 0 and the input tensor.

    Modifying default parameters allows you to use non-zero thresholds,
    change the max value of the activation,
    and to use a non-zero multiple of the input for values below the threshold.

    Examples:

    >>> x = [-10, -5, 0.0, 5, 10]
    >>> keras.activations.relu(x)
    [ 0.,  0.,  0.,  5., 10.]
    >>> keras.activations.relu(x, negative_slope=0.5)
    [-5. , -2.5,  0. ,  5. , 10. ]
    >>> keras.activations.relu(x, max_value=5.)
    [0., 0., 0., 5., 5.]
    >>> keras.activations.relu(x, threshold=5.)
    [-0., -0.,  0.,  0., 10.]

    Args:
        x: Input tensor.
        negative_slope: A `float` that controls the slope
            for values lower than the threshold.
        max_value: A `float` that sets the saturation threshold (the largest
            value the function will return).
        threshold: A `float` giving the threshold value of the activation
            function below which values will be damped or set to zero.

    Returns:
        A tensor with the same shape and dtype as input `x`.
    negative_slope	max_value	threshold)r   any_symbolic_tensorsReLUstatic_call)xr   r	   r
   s       Q/home/dcms/DCMS/lib/python3.12/site-packages/keras/src/activations/activations.pyrelur      s_    F ##QD)
t)
 	 	
 	%	       c                   @     e Zd Z	 d fd	Zd Zd Zedd       Z xZS )r   c                 P    t         |   |       || _        || _        || _        y )N)name)super__init__r   r	   r
   )selfr   r	   r
   r   	__class__s        r   r   zReLU.__init__8   s+     	d#,""r   c                 h    | j                  || j                  | j                  | j                        S )Nr   )r   r   r	   r
   r   r   s     r   callz	ReLU.call@   s4    ..nnnn	   
 	
r   c                 V    t        j                  |j                  |j                        S Nr   KerasTensorshapedtyper   s     r   compute_output_speczReLU.compute_output_specH       ""177AGG44r   c                 l   t        j                  |       } |dk7  rr|&|dk(  r!t         j                  j                  | |      S |dk7  r$t         j                  j	                  |  |z         }n#t         j                  j	                  |        }nd}|d u}|dk7  rdt        j                  || j                        }| t        j                  t         j                  j                  | |      | j                        z  } nF|dk(  r"t         j                  j                  |       } d}nt         j                  j	                  |       } |rct        j                  d| j                        }t        j                  || j                        }t         j                  j                  | ||      } |dk7  r| ||z  z  } | S )N        r   r      )r!      F)r   convert_to_tensornn
leaky_relur   r   castr!   numpygreaterrelu6clip)r   r   r	   r
   negative_partclip_max	min_values          r   r   zReLU.static_callK   s_   %%a(S  Y!^zz,,Q~,NNA~ '

Y ? '

 3MD(>!'':IGLL%%a3177 A !^

  #AH

"AAGG4I!'':I""1i;AS -//Ar   )r%   Nr%   Nr%   Nr%   )	__name__
__module____qualname__r   r   r"   staticmethodr   __classcell__)r   s   @r   r   r   7   s)    FJ#
5 " "r   r   zkeras.activations.leaky_reluc                 0    t        j                  | |      S )zLeaky relu activation function.

    Args:
        x: Input tensor.
        negative_slope: A `float` that controls the slope
            for values lower than the threshold.
    r&   )r   r+   )r   r   s     r   r+   r+   q   s     >>!N;;r   zkeras.activations.relu6c                 ,    t        j                  |       S )zRelu6 activation function.

    It's the ReLU function, but truncated to a maximum value of 6.

    Args:
        x: Input tensor.
    )r   r/   r   s    r   r/   r/   }   s     99Q<r   zkeras.activations.softmaxc                 d    t        j                  | |      }	 | |_        |S # t        $ r Y |S w xY w)a  Softmax converts a vector of values to a probability distribution.

    The elements of the output vector are in range `[0, 1]` and sum to 1.

    Each input vector is handled independently.
    The `axis` argument sets which axis of the input the function
    is applied along.

    Softmax is often used as the activation for the last
    layer of a classification network because the result could be interpreted as
    a probability distribution.

    The softmax of each vector x is computed as
    `exp(x) / sum(exp(x))`.

    The input values in are the log-odds of the resulting probability.

    Args:
        x: Input tensor.
        axis: Integer, axis along which the softmax is applied.
    axis)r   softmax_keras_logitsAttributeError)r   r?   outputs      r   r@   r@      sA    . [[&F  M  Ms   " 	//zkeras.activations.eluc                 0    t        j                  | |      S )a  Exponential Linear Unit.

    The exponential linear unit (ELU) with `alpha > 0` is defined as:

    - `x` if `x > 0`
    - alpha * `exp(x) - 1` if `x < 0`

    ELUs have negative values which pushes the mean of the activations
    closer to zero.

    Mean activations that are closer to zero enable faster learning as they
    bring the gradient closer to the natural gradient.
    ELUs saturate to a negative value when the argument gets smaller.
    Saturation means a small derivative which decreases the variation
    and the information that is propagated to the next layer.

    Args:
        x: Input tensor.
        alpha: A scalar, slope of positive section. Defaults to `1.0`.

    Reference:

    - [Clevert et al., 2016](https://arxiv.org/abs/1511.07289)
    alpha)r   elur   rF   s     r   rG   rG      s    4 771E""r   zkeras.activations.seluc                 ,    t        j                  |       S )a  Scaled Exponential Linear Unit (SELU).

    The Scaled Exponential Linear Unit (SELU) activation function is defined as:

    - `scale * x` if `x > 0`
    - `scale * alpha * (exp(x) - 1)` if `x < 0`

    where `alpha` and `scale` are pre-defined constants
    (`alpha=1.67326324` and `scale=1.05070098`).

    Basically, the SELU activation function multiplies `scale` (> 1) with the
    output of the `keras.activations.elu` function to ensure a slope larger
    than one for positive inputs.

    The values of `alpha` and `scale` are
    chosen so that the mean and variance of the inputs are preserved
    between two consecutive layers as long as the weights are initialized
    correctly (see `keras.initializers.LecunNormal` initializer)
    and the number of input units is "large enough"
    (see reference paper for more information).

    Args:
        x: Input tensor.

    Notes:

    - To be used together with the
        `keras.initializers.LecunNormal` initializer.
    - To be used together with the dropout variant
        `keras.layers.AlphaDropout` (rather than regular dropout).

    Reference:

    - [Klambauer et al., 2017](https://arxiv.org/abs/1706.02515)
    )r   selur<   s    r   rJ   rJ      s    J 88A;r   zkeras.activations.softplusc                 ,    t        j                  |       S )z~Softplus activation function.

    It is defined as: `softplus(x) = log(exp(x) + 1)`.

    Args:
        x: Input tensor.
    )r   softplusr<   s    r   rL   rL           <<?r   zkeras.activations.softsignc                 ,    t        j                  |       S )zSoftsign activation function.

    Softsign is defined as: `softsign(x) = x / (abs(x) + 1)`.

    Args:
        x: Input tensor.
    )r   softsignr<   s    r   rO   rO      rM   r   zkeras.activations.soft_shrinkc                 0    t        j                  | |      S )a.  Soft Shrink activation function.

    It is defined as:

    `soft_shrink(x) = x - threshold` if `x > threshold`,
    `soft_shrink(x) = x + threshold` if `x < -threshold`,
    `soft_shrink(x) = 0` otherwise.

    Args:
        x: Input tensor.
        threshold: Threshold value. Defaults to 0.5.

    r
   )r   soft_shrinkr   r
   s     r   rR   rR     s     ??1	22r   zkeras.activations.sparse_plusc                 ,    t        j                  |       S )zSparsePlus activation function.

    SparsePlus is defined as:

    `sparse_plus(x) = 0` for `x <= -1`.
    `sparse_plus(x) = (1/4) * (x + 1)^2` for `-1 < x < 1`.
    `sparse_plus(x) = x` for `x >= 1`.

    Args:
        x: Input tensor.

    )r   sparse_plusr<   s    r   rU   rU     s     ??1r   zkeras.activations.siluzkeras.activations.swishc                 ,    t        j                  |       S )aZ  Swish (or Silu) activation function.

    It is defined as: `swish(x) = x * sigmoid(x)`.

    The Swish (or Silu) activation function is a smooth,
    non-monotonic function that is unbounded above and
    bounded below.

    Args:
        x: Input tensor.

    Reference:

    - [Ramachandran et al., 2017](https://arxiv.org/abs/1710.05941)
    )r   silur<   s    r   rW   rW   *  s    " 88A;r   zkeras.activations.squareplusc                 0    t        j                  | |      S )aT  Squareplus activation function.

    The Squareplus activation function is defined as:

    `f(x) = (x + sqrt(x^2 + b)) / 2`

    Where `b` is a smoothness parameter.

    Args:
        x: Input tensor.
        b: Smoothness parameter. Defaults to 4.

    Reference:

    - [Ramachandran et al., 2021](https://arxiv.org/abs/2112.11687)
    )b)r   
squareplus)r   rY   s     r   rZ   rZ   >  s    $ >>!q!!r   zkeras.activations.geluc                 0    t        j                  | |      S )a  Gaussian error linear unit (GELU) activation function.

    The Gaussian error linear unit (GELU) is defined as:

    `gelu(x) = x * P(X <= x)` where `P(X) ~ N(0, 1)`,
    i.e. `gelu(x) = 0.5 * x * (1 + erf(x / sqrt(2)))`.

    GELU weights inputs by their value, rather than gating
    inputs by their sign as in ReLU.

    Args:
        x: Input tensor.
        approximate: A `bool`, whether to enable approximation.

    Reference:

    - [Hendrycks et al., 2016](https://arxiv.org/abs/1606.08415)
    )approximate)r   gelu)r   r\   s     r   r]   r]   S  s    ( 88A;//r   zkeras.activations.celuc                 0    t        j                  | |      S )u  Continuously Differentiable Exponential Linear Unit.

    The CeLU activation function is defined as:

    `celu(x) = alpha * (exp(x / alpha) - 1) for x < 0`,`celu(x) = x for x >= 0`.

    where `alpha` is a scaling parameter that controls the activation's shape.

    Args:
        x: Input tensor.
        alpha: The α value for the CeLU formulation. Defaults to `1.0`.

    Reference:

    - [Barron, J. T., 2017](https://arxiv.org/abs/1704.07483)
    rE   )r   celurH   s     r   r_   r_   j  s    $ 88AU##r   zkeras.activations.gluc                 0    t        j                  | |      S )a  Gated Linear Unit (GLU) activation function.

    The GLU activation function is defined as:

    `glu(x) = a * sigmoid(b)`,

    where `x` is split into two equal parts `a` and `b` along the given axis.

    Args:
        x: Input tensor.
        axis: The axis along which to split the input tensor. Defaults to `-1`.

    Reference:

    - [Dauphin et al., 2017](https://arxiv.org/abs/1612.08083)
    r>   )r   glur   r?   s     r   ra   ra     s    $ 7714  r   zkeras.activations.tanhc                 ,    t        j                  |       S )zHyperbolic tangent activation function.

    It is defined as:
    `tanh(x) = sinh(x) / cosh(x)`, i.e.
    `tanh(x) = ((exp(x) - exp(-x)) / (exp(x) + exp(-x)))`.

    Args:
        x: Input tensor.
    )r   tanhr<   s    r   rd   rd     s     88A;r   zkeras.activations.tanh_shrinkc                 ,    t        j                  |       S )z{Tanh shrink activation function.

    It is defined as:

    `f(x) = x - tanh(x)`.

    Args:
        x: Input tensor.
    )r   tanh_shrinkr<   s    r   rf   rf     s     ??1r   zkeras.activations.hard_tanhc                 ,    t        j                  |       S )zHardTanh activation function.

    It is defined as:
    `hard_tanh(x) = -1 for x < -1`,
    `hard_tanh(x) = x for -1 <= x <= 1`,
    `hard_tanh(x) = 1 for x > 1`.

    Args:
        x: Input tensor.
    )r   	hard_tanhr<   s    r   rh   rh     s     ==r   zkeras.activations.hard_shrinkc                 0    t        j                  | |      S )zHard Shrink activation function.

    It is defined as:

    `hard_shrink(x) = x` if `|x| > threshold`,
    `hard_shrink(x) = 0` otherwise.

    Args:
        x: Input tensor.
        threshold: Threshold value. Defaults to 0.5.

    rQ   )r   hard_shrinkrS   s     r   rj   rj     s     ??1	22r   zkeras.activations.thresholdc                 0    t        j                  | ||      S )a>  Threshold activation function.

    It is defined as:

    `threshold(x) = x` if `x > threshold`,
    `threshold(x) = default_value` otherwise.

    Args:
        x: Input tensor.
        threshold: The value that decides when to retain or replace x.
        default_value: Value to assign when `x <= threshold`.

    )r   r
   )r   r
   default_values      r   r
   r
     s     ==I}55r   zkeras.activations.sigmoidc                 `    t        j                  |       }	 | |_        |S # t        $ r Y |S w xY w)a  Sigmoid activation function.

    It is defined as: `sigmoid(x) = 1 / (1 + exp(-x))`.

    For small values (<-5),
    `sigmoid` returns a value close to zero, and for large values (>5)
    the result of the function gets close to 1.

    Sigmoid is equivalent to a 2-element softmax, where the second element is
    assumed to be zero. The sigmoid function always returns a value between
    0 and 1.

    Args:
        x: Input tensor.
    )r   sigmoidrA   rB   )r   rC   s     r   rn   rn     s>    " [[^F  M  Ms     	--zkeras.activations.exponentialc                 ,    t        j                  |       S )zIExponential activation function.

    Args:
        x: Input tensor.
    )r   expr<   s    r   exponentialrq     s     771:r   zkeras.activations.hard_sigmoidc                 ,    t        j                  |       S )a  Hard sigmoid activation function.

    The hard sigmoid activation is defined as:

    - `0` if `if x <= -3`
    - `1` if `x >= 3`
    - `(x/6) + 0.5` if `-3 < x < 3`

    It's a faster, piecewise linear approximation
    of the sigmoid activation.

    Args:
        x: Input tensor.

    Reference:

    - [Wikipedia "Hard sigmoid"](https://en.wikipedia.org/wiki/Hard_sigmoid)
    )r   hard_sigmoidr<   s    r   rs   rs     s    ( Ar   zkeras.activations.log_sigmoidc                 ,    t        j                  |       S )zLogarithm of the sigmoid activation function.

    It is defined as `f(x) = log(1 / (1 + exp(-x)))`.

    Args:
        x: Input tensor.

    )r   log_sigmoidr<   s    r   ru   ru     s     ??1r   z keras.activations.sparse_sigmoidc                 ,    t        j                  |       S )a6  Sparse sigmoid activation function.

    It is defined as

    `f(x) = 0` for `x <= -1`,
    `f(x) = 0.5 * (x + 1)` for `-1 < x < 1`,
    `f(x) = 1` for `x >= 1`.

    Args:
        x: Input tensor.

    Reference:

    - [M. Blondel, A. F. T. Martins, V. Niculae, 2019](https://arxiv.org/pdf/1901.02324)

    )r   sparse_sigmoidr<   s    r   rw   rw   +  s    $ a  r   zkeras.activations.hard_siluzkeras.activations.hard_swishc                 V    t        j                  |       } t        j                  |       S )af  Hard SiLU activation function, also known as Hard Swish.

    It is defined as:

    - `0` if `if x < -3`
    - `x` if `x > 3`
    - `x * (x + 3) / 6` if `-3 <= x <= 3`

    It's a faster, piecewise linear approximation of the silu activation.

    Args:
        x: Input tensor.

    Reference:

    - [A Howard, 2019](https://arxiv.org/abs/1905.02244)
    )r   r)   r   	hard_silur<   s    r   ry   ry   @  s#    & 	!!!$A==r   zkeras.activations.linearc                     | S )zLinear activation function (pass-through).

    A "linear" activation is an identity function:
    it returns the input, unmodified.

    Args:
        x: Input tensor.
     r<   s    r   linearr|   W  s	     Hr   c                   (    e Zd Zd Zd Zed        Zy)Mishc                 $    | j                  |      S r   )r   r   s     r   r   z	Mish.calle  s    ""r   c                 V    t        j                  |j                  |j                        S r   r   r   s     r   r"   zMish.compute_output_spech  r#   r   c                     | t         j                  j                  t         j                  j                  |             z  S r   )r   r*   rd   rL   r<   s    r   r   zMish.static_callk  s)    7::??7::#6#6q#9:::r   N)r5   r6   r7   r   r"   r8   r   r{   r   r   r~   r~   d  s     #5 ; ;r   r~   zkeras.activations.mishc                 V    t        j                  |       } t        j                  |       S )a  Mish activation function.

    It is defined as:

    `mish(x) = x * tanh(softplus(x))`

    where `softplus` is defined as:

    `softplus(x) = log(exp(x) + 1)`

    Args:
        x: Input tensor.

    Reference:

    - [Misra, 2019](https://arxiv.org/abs/1908.08681)
    )r   r)   r~   r   r<   s    r   mishr   p  s%    & 	!!!$AAr   zkeras.activations.log_softmaxc                 0    t        j                  | |      S )a  Log-Softmax activation function.

    Each input vector is handled independently.
    The `axis` argument sets which axis of the input the function
    is applied along.

    Args:
        x: Input tensor.
        axis: Integer, axis along which the softmax is applied.
    r>   )r   log_softmaxrb   s     r   r   r     s     ??14((r   zkeras.activations.sparsemaxc                 X    t        j                  |       } t        j                  | |      S )u  Sparsemax activation function.

    For each batch `i`, and class `j`,
    sparsemax activation function is defined as:

    `sparsemax(x)[i, j] = max(x[i, j] - τ(x[i, :]), 0).`

    Args:
        x: Input tensor.
        axis: `int`, axis along which the sparsemax operation is applied.

    Returns:
        A tensor, output of sparsemax transformation. Has the same type and
        shape as `x`.

    Reference:

    - [Martins et.al., 2016](https://arxiv.org/abs/1602.02068)
    )r   r)   r   	sparsemaxrb   s     r   r   r     s%    * 	!!!$A==D!!r   r4   )g?))g      ?)g      ?)   )F)&	keras.srcr   r   keras.src.api_exportr   r   	Operationr   r+   r/   r@   rG   rJ   rL   rO   rR   rU   rW   rZ   r]   r_   ra   rd   rf   rh   rj   r
   rn   rq   rs   ru   rw   ry   r|   r~   r   r   r   r{   r   r   <module>r      sO     - &'- (-`73== 7t ,-< .< '( ) )* +@ %&# '#8 &'$ ($N *+ , *+ , -.3 /3" -. /  ')BCD E& ,-" ."( &'0 (0, &'$ ($( %&! '!( &'
 (
 -.
 /
 +, - -.3 /3  +,6 -6" )* +4 -. / ./ 0, -.	 /	 01! 2!( ,.LMN O, ()	 *		;3== 	; &' (, -.) /) ,-." /"r   