
    2Vh,                         d dl mZ d dlmZ d dlmZ  ed       G d dej                               Zej                  j                  dej                        e_        y)	    )ops)keras_export)	optimizerzkeras.optimizers.SGDc                   X     e Zd ZdZ	 	 	 	 	 	 	 	 	 	 	 	 	 d fd	Z fdZd Z fdZ xZS )SGDaI  Gradient descent (with momentum) optimizer.

    Update rule for parameter `w` with gradient `g` when `momentum` is 0:

    ```python
    w = w - learning_rate * g
    ```

    Update rule when `momentum` is larger than 0:

    ```python
    velocity = momentum * velocity - learning_rate * g
    w = w + velocity
    ```

    When `nesterov=True`, this rule becomes:

    ```python
    velocity = momentum * velocity - learning_rate * g
    w = w + momentum * velocity - learning_rate * g
    ```

    Args:
        learning_rate: A float, a
            `keras.optimizers.schedules.LearningRateSchedule` instance, or
            a callable that takes no arguments and returns the actual value to
            use. The learning rate. Defaults to `0.01`.
        momentum: float hyperparameter >= 0 that accelerates gradient descent in
            the relevant direction and dampens oscillations. 0 is vanilla
            gradient descent. Defaults to `0.0`.
        nesterov: boolean. Whether to apply Nesterov momentum.
            Defaults to `False`.
        {{base_optimizer_keyword_args}}
    c                     t        |   d||||||||	|
||d| t        |t              r
|dk  s|dkD  rt	        d      || _        || _        y )N)learning_ratenameweight_decayclipnorm	clipvalueglobal_clipnormuse_emaema_momentumema_overwrite_frequencyloss_scale_factorgradient_accumulation_stepsr      z*`momentum` must be a float between [0, 1]. )super__init__
isinstancefloat
ValueErrormomentumnesterov)selfr	   r   r   r   r   r   r   r   r   r   r   r   r
   kwargs	__class__s                  H/home/dcms/DCMS/lib/python3.12/site-packages/keras/src/optimizers/sgd.pyr   zSGD.__init__+   st    " 	 	
'%+%$;/(C	
 	
 (E*hlhlIJJ      c                     | j                   ryt        | 	  |       g | _        | j                  dk7  r| j                  |d      | _        yy)zInitialize optimizer variables.

        SGD optimizer has one variable `momentums`, only set if `self.momentum`
        is not 0.

        Args:
          var_list: list of model variables to build SGD variables on.
        Nr   r   )builtr   build	momentumsr   add_optimizer_variables)r   	variablesr   s     r    r$   z	SGD.buildO   sG     ::i ==A!99)ZPDN r!   c           
         t        j                  ||j                        }t        j                  ||j                        }d}| j                  dk7  r| j                  | j                  |         }|t        j                  | j                  |j                        }| j                  |t        j                  t        j                  ||      t        j                  ||                   | j                  rO| j                  |t        j                  t        j                  ||      t        j                  ||                   y| j                  ||       y| j                  |t        j                  ||             y)z=Update step given gradient and the associated model variable.Nr   )r   castdtyper   r%   _get_variable_indexassignsubtractmultiplyr   
assign_add
assign_sub)r   gradientvariabler	   mr   s         r    update_stepzSGD.update_step_   s   ?88Hhnn5==At77ABA=xxx~~>HKKLLH-LL=9 }}LLQ1X}= !,OOHcll8]&KLr!   c                 t    t         |          }|j                  | j                  | j                  d       |S )N)r   r   )r   
get_configupdater   r   )r   configr   s     r    r6   zSGD.get_config}   s6    #% MM MM	
 r!   )g{Gz?g        FNNNNFgGz?NNNr   )	__name__
__module____qualname____doc__r   r$   r4   r6   __classcell__)r   s   @r    r   r      sN    !J  $$("!HQ M< r!   r   z{{base_optimizer_keyword_args}}N)	keras.srcr   keras.src.api_exportr   keras.src.optimizersr   	Optimizerr   r<   replacebase_optimizer_keyword_argsr   r!   r    <module>rD      s\     - * $%~)

 ~ &~B kk!!%y'L'Lr!   