
    2Vh                         d dl mZ d dlmZ d dlmZ  edg       G d dej                               Zej                  j                  dej                        e_        y)	    )ops)keras_export)	optimizerzkeras.optimizers.RMSpropc                   \     e Zd ZdZ	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 d fd	Z fdZd Z fdZ xZS )RMSpropak  Optimizer that implements the RMSprop algorithm.

    The gist of RMSprop is to:

    - Maintain a moving (discounted) average of the square of gradients
    - Divide the gradient by the root of this average

    This implementation of RMSprop uses plain momentum, not Nesterov momentum.

    The centered version additionally maintains a moving average of the
    gradients, and uses that average to estimate the variance.

    Args:
        learning_rate: A float, a
            `keras.optimizers.schedules.LearningRateSchedule` instance, or
            a callable that takes no arguments and returns the actual value to
            use. The learning rate. Defaults to `0.001`.
        rho: float, defaults to 0.9. Discounting factor for the old gradients.
        momentum: float, defaults to 0.0. If not 0.0., the optimizer tracks the
            momentum value, with a decay rate equals to `1 - momentum`.
        epsilon: A small constant for numerical stability. This epsilon is
            "epsilon hat" in the Kingma and Ba paper (in the formula just before
            Section 2.1), not the epsilon in Algorithm 1 of the paper. Defaults
            to 1e-7.
        centered: Boolean. If `True`, gradients are normalized by the estimated
            variance of the gradient; if False, by the uncentered second moment.
            Setting this to `True` may help with training, but is slightly more
            expensive in terms of computation and memory. Defaults to `False`.
        {{base_optimizer_keyword_args}}

    Example:

    >>> opt = keras.optimizers.RMSprop(learning_rate=0.1)
    >>> var1 = keras.backend.Variable(10.0)
    >>> loss = lambda: (var1 ** 2) / 2.0  # d(loss) / d(var1) = var1
    >>> opt.minimize(loss, [var1])
    >>> var1
    9.683772

    Reference:

    - [Hinton, 2012](
        http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf)
    c                 t    t        |   d|||||	|
|||||d| || _        || _        || _        || _        y )N)learning_rateweight_decayclipnorm	clipvalueglobal_clipnormuse_emaema_momentumema_overwrite_frequencyloss_scale_factorgradient_accumulation_stepsname )super__init__rhomomentumepsiloncentered)selfr	   r   r   r   r   r
   r   r   r   r   r   r   r   r   r   kwargs	__class__s                    L/home/dcms/DCMS/lib/python3.12/site-packages/keras/src/optimizers/rmsprop.pyr   zRMSprop.__init__5   s`    & 	 	
'%+%$;/(C	
 	
       c                    | j                   ry t        | 	  |       | j                  |d      | _        g | _        | j                  dkD  r| j                  |d      | _        g | _        | j                  r| j                  |d      | _        y y )Nvelocityr   r   average_gradient)	builtr   buildadd_optimizer_variables_velocities
_momentumsr   _average_gradientsr   )r   var_listr   s     r   r$   zRMSprop.build[   s    ::h77*M==1"::8ZPDO"$==&*&B&B,'D# r   c                    t        j                  ||j                        }t        j                  ||j                        }| j                  | j	                  |         }d}| j
                  dkD  r| j                  | j	                  |         }d}| j                  r| j                  | j	                  |         }| j                  }| j                  |t        j                  t        j                  ||      t        j                  d|z
  t        j                  |                         | j                  rw| j                  |t        j                  t        j                  ||      t        j                  d|z
  |                   |t        j                  |      z
  | j                  z   }	n t        j                  || j                        }	t        j                  t        j                  ||      t        j                   |	            }
| j
                  dkD  rW| j                  |t        j                  t        j                  | j
                  |      |
             | j#                  ||       y| j#                  ||
       y)z=Update step given gradient and the associated model variable.Nr      )r   castdtyper&   _get_variable_indexr   r'   r   r(   r   assignaddmultiplysquarer   dividesqrt
assign_sub)r   gradientvariabler	   lrr!   r   average_gradr   denominator	increments              r   update_stepzRMSprop.update_stepm   s   XXmX^^488Hhnn5##D$<$<X$FG==1t'?'?'IJH==22((2L hhGGS(+QWcjj&:;	
 ==KKLLl3LLS(3 #SZZ%==LK''(DLL9KJJLLX&(=
	 ==1KKT]]H=yI OOHh/OOHi0r   c                     t         |          }|j                  | j                  | j                  | j
                  | j                  d       |S )N)r   r   r   r   )r   
get_configupdater   r   r   r   )r   configr   s     r   r>   zRMSprop.get_config   sD    #%xx MM<< MM		
 r   )gMbP?g?g        gHz>FNNNNFgGz?NNNrmsprop)	__name__
__module____qualname____doc__r   r$   r<   r>   __classcell__)r   s   @r   r   r      sS    +^  $$(!$!L$-1^ r   r   z{{base_optimizer_keyword_args}}N)	keras.srcr   keras.src.api_exportr   keras.src.optimizersr   	Optimizerr   rE   replacebase_optimizer_keyword_argsr   r   r   <module>rM      s_     - * )*+`i!! ` ,`F //))%y'L'Lr   