
    2Vh?                         d dl mZ d dlmZ d dlmZ  edg       G d dej                               Zej                  j                  dej                        e_        y)	    )ops)keras_export)	optimizerzkeras.optimizers.Adamc                   \     e Zd ZdZ	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 d fd	Z fdZd Z fdZ xZS )Adama  Optimizer that implements the Adam algorithm.

    Adam optimization is a stochastic gradient descent method that is based on
    adaptive estimation of first-order and second-order moments.

    According to
    [Kingma et al., 2014](http://arxiv.org/abs/1412.6980),
    the method is "*computationally
    efficient, has little memory requirement, invariant to diagonal rescaling of
    gradients, and is well suited for problems that are large in terms of
    data/parameters*".

    Args:
        learning_rate: A float, a
            `keras.optimizers.schedules.LearningRateSchedule` instance, or
            a callable that takes no arguments and returns the actual value to
            use. The learning rate. Defaults to `0.001`.
        beta_1: A float value or a constant float tensor, or a callable
            that takes no arguments and returns the actual value to use. The
            exponential decay rate for the 1st moment estimates. Defaults to
            `0.9`.
        beta_2: A float value or a constant float tensor, or a callable
            that takes no arguments and returns the actual value to use. The
            exponential decay rate for the 2nd moment estimates. Defaults to
            `0.999`.
        epsilon: A small constant for numerical stability. This epsilon is
            "epsilon hat" in the Kingma and Ba paper (in the formula just before
            Section 2.1), not the epsilon in Algorithm 1 of the paper. Defaults
            to `1e-7`.
        amsgrad: Boolean. Whether to apply AMSGrad variant of this algorithm
            from the paper "On the Convergence of Adam and beyond". Defaults
            to `False`.
        {{base_optimizer_keyword_args}}
    c                 t    t        |   d||||||	|
||||d| || _        || _        || _        || _        y )N)learning_ratenameweight_decayclipnorm	clipvalueglobal_clipnormuse_emaema_momentumema_overwrite_frequencyloss_scale_factorgradient_accumulation_steps )super__init__beta_1beta_2epsilonamsgrad)selfr	   r   r   r   r   r   r   r   r   r   r   r   r   r   r
   kwargs	__class__s                    I/home/dcms/DCMS/lib/python3.12/site-packages/keras/src/optimizers/adam.pyr   zAdam.__init__+   s`    & 	 	
'%+%$;/(C	
 	
     c                     | j                   ryt        | 	  |       | j                  |ddg      \  | _        | _        | j                  r| j                  |d      | _        yy)a  Initialize optimizer variables.

        Adam optimizer has 3 types of variables: momentums, velocities and
        velocity_hat (only set when amsgrad is applied),

        Args:
            var_list: list of model variables to build Adam variables on.
        Nmomentumvelocityvelocity_hat)builtr   buildadd_optimizer_variables
_momentums_velocitiesr   _velocity_hats)r   var_listr   s     r   r%   z
Adam.buildQ   se     ::h,0,H,Hz:.-
)) <<"&">">.#D r   c                 L   t        j                  ||j                        }t        j                  ||j                        }t        j                  | j                  dz   |j                        }t        j                  t        j                  | j
                  |j                        |      }t        j                  t        j                  | j                  |j                        |      }| j                  | j                  |         }| j                  | j                  |         }	|t        j                  d|z
        z  d|z
  z  }
| j                  |t        j                  t        j                  ||      d| j
                  z
               | j                  |	t        j                  t        j                  t        j                  |      |	      d| j                  z
               | j                  rF| j                   | j                  |         }| j#                  |t        j$                  ||	             |}	| j'                  |t        j(                  t        j                  ||
      t        j*                  t        j                  |	      | j,                                     y)z=Update step given gradient and the associated model variable.   N)r   castdtype
iterationspowerr   r   r'   _get_variable_indexr(   sqrt
assign_addmultiplysubtractsquarer   r)   assignmaximum
assign_subdivideaddr   )r   gradientvariabler	   lr
local_stepbeta_1_powerbeta_2_powermvalphav_hats               r   update_stepzAdam.update_stepf   s   XXmX^^488Hhnn5XXdoo18>>B
yyHHT[[(..1:
 yyHHT[[(..1:
 OOD44X>?T55h?@SXXa,.//1|3CDs||CLL15q4;;G	
 	LLSZZ115q4;;	
 <<''(@(@(JKEKKs{{5!45AJJQ&T\\(J	
r   c                     t         |          }|j                  | j                  | j                  | j
                  | j                  d       |S )N)r   r   r   r   )r   
get_configupdater   r   r   r   )r   configr   s     r   rH   zAdam.get_config   sD    #%++++<<<<		
 r   )gMbP?g?g+?gHz>FNNNNFgGz?NNNadam)	__name__
__module____qualname____doc__r   r%   rF   rH   __classcell__)r   s   @r   r   r      sS    !J  $$(!$L*#
J
 
r   r   z{{base_optimizer_keyword_args}}N)	keras.srcr   keras.src.api_exportr   keras.src.optimizersr   	Optimizerr   rO   replacebase_optimizer_keyword_argsr   r   r   <module>rW      s_     - * &'(N9 N )Nb ||##%y'L'Lr   