
    2Vhg                         d dl Z d dlZd dlmZ d dlmZ d dlmZ d dlmZ d dlm	Z	 d dl
mZ d dlmZ d d	lmZ  G d
 de      ZdZd Zd Zy)    N)backend)initializers)ops)learning_rate_schedule)serialization_lib)KerasSaveable)tracking)	auto_namec                       e Zd ZdZ	 	 	 	 	 	 	 	 	 	 d0dZed        Zd Zd Ze	j                  d        Zd Zed        Zd	 Z	 	 	 	 	 d1d
Z	 d2dZ	 d3dZd Zd Zd Zd Zd Zd Zd4dZd Zd Zd Zd Zd Zd Zed        Zej@                  d        Zd Z!d Z"d Z#d Z$d  Z%d! Z&d" Z'd5d#Z(d$ Z)d% Z*d& Z+d' Z,d( Z-d) Z.d* Z/d+ Z0e1d4d,       Z2 fd-Z3d4d.Z4d/ Z5 xZ6S )6BaseOptimizera  Abstract optimizer base class.

    If you intend to create your own optimization algorithm, please inherit from
    this class and override the following methods:

    - `build`: Create your optimizer-related variables, such as momentum
        variables in the SGD optimizer.
    - `update_step`: Implement your optimizer's variable updating logic.
    - `get_config`: serialization of the optimizer.

    Example:

    ```python
    class SGD(Optimizer):
        def __init__(self, **kwargs):
            super().__init__(**kwargs)
            self.momentum = 0.9

        def build(self, variables):
            super().build(variables)
            self.momentums = []
            for variable in variables:
                self.momentums.append(
                    self.add_variable_from_reference(
                        reference_variable=variable, name="momentum"
                    )
                )

        def update_step(self, gradient, variable, learning_rate):
            learning_rate = ops.cast(learning_rate, variable.dtype)
            gradient = ops.cast(gradient, variable.dtype)
            m = self.momentums[self._get_variable_index(variable)]
            self.assign(
                m,
                ops.subtract(
                    ops.multiply(m, ops.cast(self.momentum, variable.dtype)),
                    ops.multiply(gradient, learning_rate),
                ),
            )
            self.assign_add(variable, m)

        def get_config(self):
            config = super().get_config()
            config.update(
                {
                    "momentum": self.momentum,
                    "nesterov": self.nesterov,
                }
            )
            return config
    ```
    c                 F   d| _         |j                  dd       t        j                  d       |rt	        d|       |t        | j                  j                        }|| _        || _	        || _
        || _        || _        || _        |	| _        |
| _        |
r|
dk\  st	        d|
       |r=|dkD  s|dk  rt	        d	|       |r#t!        |t"              r|dk  rt	        d
|       || _        || _        t)        d |||fD              }|dkD  rt	        d| d| d|       d| _        g | _        g | _        t1        j2                  dd | j,                  fi      | _        i | _        t9        j:                  | j                  |       5  t9        j<                  ddddd      }d d d        | j?                         || _         t!        |tB        jD                        r|| _#        y tI        |      r|| _#        y t!        |tJ              st	        d|       t9        j:                  | j                  |       5  t9        j<                  |dt9        jL                         dd      }d d d        | j?                  |       || _#        y # 1 sw Y   xY w# 1 sw Y   .xY w)NFdecayz<Argument `decay` is no longer supported and will be ignored.zArgument(s) not recognized:    z]`gradient_accumulation_steps` must be an integer >= 2. Received: gradient_accumulation_steps=   r   zC`ema_momentum` must be in the range [0, 1]. Received: ema_momentum=z]`ema_overwrite_frequency` must be an integer >= 1 or None. Received: ema_overwrite_frequency=c              3   $   K   | ]  }|d u 
 y wN ).0as     S/home/dcms/DCMS/lib/python3.12/site-packages/keras/src/optimizers/base_optimizer.py	<genexpr>z)BaseOptimizer.__init__.<locals>.<genexpr>   s      
ATM
s   zYOnly one of `clipnorm`, `clipvalue` and `global_clipnorm` can be set. Received: clipnorm=z, clipvalue=z, global_clipnorm=	variablesc                 6    t        | t        j                        S r   )
isinstancer   Variable)xs    r   <lambda>z(BaseOptimizer.__init__.<locals>.<lambda>   s    jG,<,<=     caller	iterationintonly_first_replica)namedtype	trainableaggregationzArgument `learning_rate` should be float, or an instance of LearningRateSchedule, or a callable (that takes in the current iteration value and returns the corresponding learning rate value). Received instead: learning_rate=learning_rate)'_lockpopwarningswarn
ValueErrorr
   	__class____name__r$   weight_decayclipnormglobal_clipnorm	clipvalueuse_emaloss_scale_factorgradient_accumulation_stepsr   r"   ema_momentumema_overwrite_frequencysumbuilt
_variables_trainable_variablesr	   Tracker_tracker_trainable_variables_indicesr   
name_scoper   _track_variable_iterationsr   LearningRateSchedule_learning_ratecallablefloatfloatx)selfr(   r0   r1   r3   r2   r4   r7   r8   r5   r6   r$   kwargsclip_args_sum
iterationss                  r   __init__zBaseOptimizer.__init__D   s    
::gt$0MMN ;F8DEE<T^^445D	( ."!2+F(&.!3 =235  a<!#3 ..:^=  '6<*Q. ?./1 
 )'>$ 
$,i#I
 
 1..6Z 8&K'9/9JL 
 
 $&! ((=OO
 -/)
 		$7 	 )) 0J	 	Z(% 1FF
 #0Dm$"/DmU3 7 8Eo	G  ##DIId;  ' 0 0!(!..*# 4!   /"/DI	 	6 s   J=-JJJ c                     | j                   r*t        j                  | j                  | j                         S | j                  S r   )r6   r   floor_dividerB   rH   s    r   rK   zBaseOptimizer.iterations   s=    ++##  $"B"B  r   c                 <    | j                   j                  d|       y )Nr   )r>   add_to_storerH   variables     r   rA   zBaseOptimizer._track_variable   s    "";9r   c                     t        |dd      S )Noverwrite_with_gradientF)getattrrR   s     r   !_overwrite_variable_with_gradientz/BaseOptimizer._overwrite_variable_with_gradient   s    x!:EBBr   c                 f   | j                   r| j                  |d      | _        | j                  rg | _        t        |      D ]\  \  }}|| j                  | j                  |      <   | j                  s1| j                  j                  | j                  |d             ^ |d d  | _
        d| _        y )Naveragegradient_accumulator)r$   T)r4   add_optimizer_variables_model_variables_moving_averager6   _accumulated_gradients	enumerater?   _var_keyappendadd_variable_from_referencer<   r:   )rH   r   irS   s       r   buildzBaseOptimizer.build   s    <<373O3O94D0 ++*,D'$Y/ 	KAxIJD--dmmH.EF//++2244 3 5 	 %.aL!
r   c                     t        |      S r   )idrR   s     r   r_   zBaseOptimizer._var_key   s    (|r   c                      | j                   d d  S r   )r;   rO   s    r   r   zBaseOptimizer.variables   s    q!!r   c                 >    | j                   | j                  |         S r   )r?   r_   rR   s     r   _get_variable_indexz!BaseOptimizer._get_variable_index   s    00x1HIIr   c           
         | j                          t        j                  |      }t        j                  | j
                  |       5  t        j                  |||d|||      }ddd       | j                         |S # 1 sw Y   xY w)aG  Add a variable to the optimizer.

        Args:
            shape: Shape tuple for the variable. Must be fully-defined
                (no `None` entries).
            initializer: Initializer object to use to populate the initial
                variable value, or string name of a built-in initializer
                (e.g. `"random_normal"`). Defaults to `"zeros"`.
            dtype: Dtype of the variable to create, e.g. `"float32"`. If
                unspecified, defaults to the `keras.backend.floatx()`.
            aggregation: Optional string, one of `None`, `"none"`, `"mean"`,
                `"sum"` or `"only_first_replica"`. Annotates the variable with
                the type of multi-replica aggregation to be used for this
                variable when writing custom data parallel training loops.
                Defaults to `"none"`.
            layout: Optional tensor layout.  Defaults to `None`.
            name: String name of the variable. Useful for debugging purposes.

        Returns:
            An optimizer variable, in the format of `keras.Variable`.
        r   F)initializershaper%   r&   r'   layoutr$   N)_check_super_calledr   getr   r@   r$   r   rA   )rH   rk   rj   r%   r'   rl   r$   rS   s           r   add_variablezBaseOptimizer.add_variable   s    < 	  ""&&{3		$7 		''''H		 	X&		 		s   A??Bc                 L   |xs d}t        |d      r#|j                  j                  dd      dz   |z   }n;t        |j                        j                  dd      j                  dd      dz   |z   }| j                  |j                  ||j                  |t        |dd            S )	a]  Add an optimizer variable from the model variable.

        Create an optimizer variable based on the information of model variable.
        For example, in SGD optimizer momemtum, for each model variable, a
        corresponding momemtum variable is created of the same shape and dtype.

        Args:
            reference_variable: `keras.Variable`. The corresponding model
                variable to the optimizer variable to be created.
            name: Optional string. The name prefix of the optimizer variable to
                be created. If not provided, it will be set to `"var"`. The
                variable name will follow the pattern
                `{variable_name}_{reference_variable.name}`,
                e.g., `momemtum/dense_1`. Defaults to `None`.
            initializer: Initializer object to use to populate the initial
                variable value, or string name of a built-in initializer
                (e.g. `"random_normal"`). If unspecified, defaults to
                `"zeros"`.

        Returns:
            An optimizer variable, in the format of `keras.Variable`.
        varpath/_:_layoutN)rk   rj   r%   r$   rl   )	hasattrrr   replacestrr$   ro   rk   r%   rV   )rH   reference_variabler$   rj   s       r   ra   z)BaseOptimizer.add_variable_from_reference  s    2 }u%v.%**223<sBTID &++,44S#>FFsCP 
   $**#$**-y$? ! 
 	
r   c           	      f   |}|}t        |t              r|g}|g}n9t        |t              st        |t        j                        r|gt	        |      z  }t	        |      t	        |      k7  rt        d| d| d      t        d |D              }|D ]  }| j                  |      sGt        t        ||            D ].  \  }\  }	}
||   j                  | j                  ||	|
             0 [t        t	        |            D ]  }||   j                  d         t        |t              r|d   S |S )a  Add optimizer variables from the list of trainable model variables.

        Create an optimizer variable based on the information of the supplied
        model variables.  For example, in SGD optimizer momemtum, for each model
        variable, a corresponding momemtum variable is created of the same shape
        and dtype.

        Note that trainable variables with `v.overwrite_with_gradient == True`
        will insert `None`, into the output list, since the optimizer variable
        will not be used anyways, and could be wasteful.

        Args:
            trainable_variables: `keras.Variable`, the corresponding model
                variable to the optimizer variable to be created.
            name: The name prefix(es) of the optimizer variable(s) to be
                created. Can be a single string or list of strings.  If a
                list of strings, will create an optimizer variable for each
                prefix.  The variable name will follow the pattern
                `{variable_name}_{trainable_variable.name}`, e.g.,
                `momemtum/dense_1`.
            initializer: Initializer object(s) to use to populate the initial
                variable value(s), or string name of a built-in initializer
                (e.g. `"random_normal"`). If unspecified, defaults to
                `"zeros"`.

        Returns:
            A list of optimizer variables, in the format of `keras.Variable`s.
            If multiple names are provide, returns a tuple of lists.
        z]The number of provided names must match the number of provided initializers.  Received name='z', initializer=''c              3       K   | ]  }g   y wr   r   )r   rt   s     r   r   z8BaseOptimizer.add_optimizer_variables.<locals>.<genexpr>~  s     #:1B#:s   )r$   rj   Nr   )r   ry   r   Initializerlenr-   tuplerW   r^   zipr`   ra   range)rH   trainable_variablesr$   rj   	name_listinitializer_listoptimizer_variablesrS   rb   var_namevar_inits              r   r[   z%BaseOptimizer.add_optimizer_variablesH  si   @ 	&dC I +} +s+z\550 %0=3y>#A y>S!122::> @  +}A/  $#:	#::+ 	8H99(C/8	#340 	+A+( (*1188$!)(0 9 	 s9~. 8A'*11$78	8$ dC &q))""r   c                 j    |D ].  }| j                  |      | j                  vs!t        d| d       y )NzUnknown variable: z. This optimizer can only be called for the variables it was originally built with. When working with a new set of variables, you should recreate a new optimizer instance.)r_   r?   r-   )rH   r   vs      r   _check_variables_are_knownz(BaseOptimizer._check_variables_are_known  sF     	A}}Qt'H'HH ( ,9 9 	r   c                 &    |j                  |       y)a  Assign a value to a variable.

        This should be used in optimizers instead of `variable.assign(value)` to
        support backend specific optimizations.
        Note that the variable can be a model variable or an optimizer variable;
        it can be a backend native variable or a Keras variable.

        Args:
            variable: The variable to update.
            value: The value to add to the variable.
        N)assignrH   rS   values      r   r   zBaseOptimizer.assign  s     	r   c                 &    |j                  |       y)a  Add a value to a variable.

        This should be used in optimizers instead of
        `variable.assign_add(value)` to support backend specific optimizations.
        Note that the variable can be a model variable or an optimizer variable;
        it can be a backend native variable or a Keras variable.

        Args:
            variable: The variable to update.
            value: The value to add to the variable.
        N)
assign_addr   s      r   r   zBaseOptimizer.assign_add       	E"r   c                 &    |j                  |       y)a  Subtract a value from a variable.

        This should be used in optimizers instead of
        `variable.assign_sub(value)` to support backend specific optimizations.
        Note that the variable can be a model variable or an optimizer variable;
        it can be a backend native variable or a Keras variable.

        Args:
            variable: The variable to update.
            value: The value to add to the variable.
        N)
assign_subr   s      r   r   zBaseOptimizer.assign_sub  r   r   c                     t         r   )NotImplementedError)rH   gradientrS   r(   s       r   update_stepzBaseOptimizer.update_step  s    !!r   c                 T    t        | \  }}| j                  ||       | j                  S r   )r   applyrB   )rH   grads_and_varsgradsr   s       r   apply_gradientszBaseOptimizer.apply_gradients  s-    %(.%9""

5-.r   c                    t        |      dk(  ry|s| j                  st        d      t        |      t        | j                        k7  r.t        dt        |       dt        | j                         d      | j                  }njt        |      }| j                  sBt        j                  | j                  |       5  | j                  |       ddd       d| _        | j                  |       t        j                  | j                  |       5  | j                  ||      \  }}| j                  ||      \  }}t        t        |            dkD  rj| j                  }||D cg c]  }||n||z   }}| j                  ||       |D ]/  }|j                  |j!                  |j                  |             1 ddd       | j"                  j%                  d	       y# 1 sw Y   xY wc c}w # 1 sw Y   7xY w)
a3  Update traininable variables according to provided gradient values.

        `grads` should be a list of gradient tensors
        with 1:1 mapping to the list of variables the optimizer was built with.

        `trainable_variables` can be provided
        on the first call to build the optimizer.
        r   NzWhen passing `grads` without `variables`, the optimizer must already be built on a list of variables. Call `optimizer.build(trainable_variables)` first. zWhen passing `grads` as a list of gradient tensors, the gradients must match `optimizer.variables` one-to-on. Received a list of z* gradients, but the optimizer is tracking z trainable variables.r   Tr   )r   r:   r-   r?   r<   listr   r@   r$   rc   r   _filter_empty_gradients,_overwrite_variables_directly_with_gradientsr5   _backend_apply_gradients
constraintr   rB   r   )rH   r   r   scalegrS   s         r   r   zBaseOptimizer.apply  s    u:? &:: J 
 5zS!B!BCC **-e* 6--01J1J-K,L M++  #'";";"&':";::''		$? 4JJ234!
++,?@		$7 	G)-)E)E**&E& AA. 'E& 4;!#..$DIJq!)QU:JEJ --e5HI 3 GH**6 (;(;H(EFG/	G8 	##A&C4 4. K%	G 	Gs1   =G AG2$G-4%G2"G2 G*-G22G;c                 r     j                   rs j                  dz    j                   z  dk(  }D cg c]   } j                   j                  |         " c} fdt	        j
                  |fd fd       n? j                         j                          j                   j                          j                  rc j                   j                          j                  r; j                  dz    j                  z  dk(  }t	        j
                  | fdd        yyyc c}w )	ar  Apply method that can be overridden by different backends.

        JAX overrides it in order to deal with statelessness in gradient
        accumulation and EMA handling.

        The below implementation is intended to be generally backend-agnostic,
        but may not work with all backends.

        This method does 4 things:
        - Call the optimizer's update_step() to update trainable variables
            and optimizer variables.
        - Update EMA variables, if EMA is configured.
        - Update gradient accumulators, if gradient accumulation is configured.
        - Update the iteration counter.
        r   r   c                    j                   }t        |       D cg c]  \  }}||z   |z   } }}j                  |       } j                  |       j	                  | |j
                         j                          y c c}}w r   )r6   r   _clip_gradients_apply_weight_decay_backend_update_stepr(   $_backend_reset_gradient_accumulators)r   r   stepsr   acc_g	acc_gradsrH   s        r   _update_step_fnz?BaseOptimizer._backend_apply_gradients.<locals>._update_step_fn4  s    888;E98M,4AuQY%' 
 ,,U3(()<=)).0B0B 99;s   Bc                              S r   r   )r   r   r   s   r   r   z8BaseOptimizer._backend_apply_gradients.<locals>.<lambda>F  s    /BC r   c                  (    j                         S r   )(_backend_increment_gradient_accumulators)r   r   rH   s   r   r   z8BaseOptimizer._backend_apply_gradients.<locals>.<lambda>G  s    EE9 r   c                  :     j                   j                        S r   )-_overwrite_model_variables_with_average_valuer<   rO   s   r   r   z8BaseOptimizer._backend_apply_gradients.<locals>.<lambda>a  s    DNN11 r   c                       y r   r   r   r   r   r   z8BaseOptimizer._backend_apply_gradients.<locals>.<lambda>d  s    r   N)r6   rB   r]   rh   r   condr   r   r   r(   r4   &_update_model_variables_moving_averager<   r8   rK   )rH   r   r   is_update_stepr   should_overwrite_model_varsr   r   s   ```   @@r   r   z&BaseOptimizer._backend_apply_gradients  s>     ++  1$001456N - ++D,D,DQ,GHI
<  HHC ((/E$$%89 %%*D,>,> <<77)) ++ OOa'00/145/6+ / ! ,	 Ms   %D4c                 R    t        ||      D ]  \  }}| j                  |||        y)zCollective update_step that can be overridden by the backend.

        It is overridden by torch for performance reasons, and
        by TF to support tf.distribute.
        N)r   r   )rH   r   r   r(   gradrq   s         r   r   z"BaseOptimizer._backend_update_stepg  s1     U$78 	7ID#T36	7r   c                     | j                   D ]?  }||j                  t        j                  |j                  |j
                               A y N)r%   )r]   r   r   zerosrk   r%   )rH   g_accs     r   r   z2BaseOptimizer._backend_reset_gradient_accumulatorsp  s>    00 	HE SYYu{{%++FG	Hr   c                     t        ||      D cg c]
  \  }}||z    }}}t        ||      D ]  \  }}|j                  |        y c c}}w r   )r   r   )rH   r   r   r   r   
new_g_accsn_g_accr   s           r   r   z6BaseOptimizer._backend_increment_gradient_accumulatorsu  sP    25eY2GHhaq5yH
H!*i8 	"NGULL!	" Is   Ac           
         | j                          | j                  s#t        d| j                  j                   d      t        |      t        | j                        k7  rEt        d| j                  j                   dt        |       dt        | j                         d      t        |      t        | j                        k7  r.t        dt        |       dt        | j                         d      t        t        | j                  |            t        t        | j                  |            z   }t        j                  |	      5 }| j                  |       d d d        g }| j                  D ]8  }j                  |      }||j                  |       (|j                  |       : g }| j                  D ]8  }j                  |      }||j                  |       (|j                  |       : ||fS # 1 sw Y   xY w)
NzTo call `stateless_apply`, zx must be built (i.e. its variables must have been created). You can build it via `optimizer.build(trainable_variables)`.zNArgument `optimizer_variables` must be a list of tensors corresponding 1:1 to z(().variables. Received list with length z, but expected z variables.zArgument `optimizer_variables` must be a list of tensors corresponding 1:1 to the trainable variables list that the optimizer was built with. Received len(trainable_variables) == z& whereas the optimizer was built with )state_mapping)rm   r:   r-   r.   r/   r   r   r<   r   r   r   StatelessScoper   get_current_valuer`   )rH   r   r   r   mappingscoper   new_vs           r   stateless_applyzBaseOptimizer.stateless_applyz  s     "zz-dnn.E.E-F GO O 
 "#s4>>'::((,(?(?'@ A--01D-E,F G/0=  "#s4+D+D'EE/ 033F/G.H I8t0012+?  ))+>?
T^^%89:;
 ##': 	eJJu	 !** 	.A++A.E #**51#**1-	. ! 	.A++A.E #**51#**1-	. #$777%	 	s   
G::Hc                 <    | j                   || j                   z  S |S )zScale the loss before computing gradients.

        Scales the loss before gradients are computed in a `train_step`. This
        is primarily useful during mixed precision training to prevent numeric
        underflow.
        )r5   )rH   losss     r   
scale_losszBaseOptimizer.scale_loss  s&     !!-$0000r   c                 "    | j                         S r   )_get_current_learning_raterO   s    r   r(   zBaseOptimizer.learning_rate  s    ..00r   c                    t        | j                  t        j                        r| j                  }nd }t        |t        j
                        r|| _        n]t        |      r|| _        nJt        | j                  t        j
                        rt        d      | j                  j                  |       |7t        | j                  t        j                        s| j                  |       y y y )Na  This optimizer was created with a `LearningRateSchedule` object as its `learning_rate` constructor argument, hence its learning rate is not settable. If you need the learning rate to be settable, you should instantiate the optimizer with a float `learning_rate` argument.)
r   rD   r   r   r   rC   rE   	TypeErrorr   _untrack_variable)rH   r(   prev_lr_vars      r   r(   zBaseOptimizer.learning_rate  s    d))7+;+;<--KK1FF
 #0Dm$"/D##%;%P%P  K  &&}5":!1!1,
 "";/	,
"r   c                 V   | j                   st        d      t        | j                  |      D ]y  \  }}|j                  |j                  k7  rJt        d| j                  |       dt        |j                         dt        |j                         d      |j                  |       { y)z!Set the weights of the optimizer.zYou are calling `set_weights()` on an optimizer that has not yet been built. Please call `optimizer.build(trainable_variables)` to create the optimizer weights before calling `set_weights()`.zOptimizer variable z has shape z+ not compatible with provided weight shape .N)r:   r-   r   r;   rk   r_   ry   r   )rH   weightsrS   weights       r   set_weightszBaseOptimizer.set_weights  s    zzD  !$DOOW = 	$Hf~~- )$--*A)B+8>>*+ ,$$'$5#6a9 
 OOF#	$r   c                 v    t        | j                        D ]!  \  }}|j                         |t        |      <   # y)z'Get the state of this optimizer object.N)r^   r   numpyry   )rH   storerb   rS   s       r   save_own_variablesz BaseOptimizer.save_own_variables  s2    $T^^4 	-KAx$NN,E#a&M	-r   c           	         t        |j                               t        | j                        k7  rud| j                   dt        | j                         dt        |j                                d}t        | j                        dk(  r|dz  }t	        j
                  |d       y	t        | j                        D ]"  \  }}|j                  |t        |                $ y	)
z'Set the state of this optimizer object.z)Skipping variable loading for optimizer 'z', because it has z+ variables whereas the saved optimizer has z variables. r   zCThis is likely because the optimizer has not been called/built yet.r   )
stacklevelN)	r   keysr   r$   r+   r,   r^   r   ry   )rH   r   msgrb   rS   s        r   load_own_variablesz BaseOptimizer.load_own_variables  s    uzz|DNN 33;DII; G""%dnn"5!6 7++.uzz|+<*=\K 
 4>>"a'( MM#!,$T^^4 	+KAxOOE#a&M*	+r   c                     t        | j                  t        j                        r| j                  | j                        S t        | j                        r| j                         S | j                  S r   )r   rD   r   rC   rB   rE   rO   s    r   r   z(BaseOptimizer._get_current_learning_rate  s^    !7!L!L
 &&t'7'788d))*&&(("""r   c                    
 t         fd|D              s||fS t        |      }t        |      }t        t        |      dz
  dd      D ]  }||   ||   c j	                        s! j
                  r j
                  } j                  dz   |z  dk(  } j                   j                           
t        j                  |fd
fd      }t        j                  |
fdfd      t        j                  |fd	fd
      }	j                  |	       
j                  |       nj                         |j                  |       |j                  |        ||fS )a.  Overwrite the variables directly by their gradients.

        This method is designed for a special case where we want to overwrite
        the variable directly with its computed gradient. For example, in float8
        training, new `scale` and `amax_history` are computed as gradients, and
        we want to overwrite them directly instead of following the typical
        procedure such as gradient descent with a learning rate, gradient
        clipping and weight decaying.

        After the update, the processed pairs will be filtered out.
        c              3   @   K   | ]  }j                  |        y wr   )rW   )r   r   rH   s     r   r   zMBaseOptimizer._overwrite_variables_directly_with_gradients.<locals>.<genexpr>  s     K499!<K   r   r   c                  Z    t        j                   j                   j                        S r   )r   r   rk   r%   r   s   r   r   zLBaseOptimizer._overwrite_variables_directly_with_gradients.<locals>.<lambda>1  s    		!'' A r   c                  0    t        j                         S r   r   maximumr   r   s   r   r   zLBaseOptimizer._overwrite_variables_directly_with_gradients.<locals>.<lambda>2      Au 5 r   c                  0    t        j                         S r   r   r   s   r   r   zLBaseOptimizer._overwrite_variables_directly_with_gradients.<locals>.<lambda>6  r   r   c                       S r   r   r   s   r   r   zLBaseOptimizer._overwrite_variables_directly_with_gradients.<locals>.<lambda>7  s     r   c                       S r   r   )new_gs   r   r   zLBaseOptimizer._overwrite_variables_directly_with_gradients.<locals>.<lambda>:  s     r   c                       j                   S r   )r   )r   s   r   r   zLBaseOptimizer._overwrite_variables_directly_with_gradients.<locals>.<lambda>:  s    qww r   )anyr   r   r   rW   r6   rB   r]   rh   r   r   r   r*   )rH   r   varsfiltered_gradsfiltered_varsrb   r   r   	new_g_accr   r   r   r   r   s   `         @@@@r   r   z:BaseOptimizer._overwrite_variables_directly_with_gradients  sZ    KdKK$; eT
 s>*Q.B7 	%A!!$mA&6DAq55a833 <<E&*&6&6&:e%Cq%HN 77003E
 !$&A5!I
  HH&5!E
  HH&E HHUOLL+HHQK""1%!!!$=	%> },,r   c                    t        |      }t        |      }g }t        t        |      dz
  dd      D ]F  }||   	|j                  |       |j                  |      }	 |j	                  |j
                         H |st        d      |r+t        j                  dt        t        |             d       ||fS # t        $ r |j	                  |j                         Y w xY w)Nr   r   z'No gradients provided for any variable.z%Gradients do not exist for variables zc when minimizing the loss. If using `model.compile()`, did you forget to provide a `loss` argument?)r   r   r   r*   r`   rr   AttributeErrorr$   r-   r+   r,   reversed)rH   r   r   r   r   missing_grad_varsrb   r   s           r   r   z%BaseOptimizer._filter_empty_gradientsD  s    eT
 s>*Q.B7 	5Aa (""1%!%%a(5%,,QVV4	5 FGGMM7!2345 6## },, & 5%,,QVV45s   B99$C C c                    | j                   r2| j                   dkD  r#|D cg c]  }|| j                  |      n| c}S | j                  r%| j                  dkD  rt        || j                        S | j                  rE| j                  dkD  r6| j                  }|D cg c]  }|t        j                  || |      n|  c}S |S c c}w c c}w )Nr   )r1   _clip_by_normr2   clip_by_global_normr3   r   clip)rH   r   r   r   s       r   r   zBaseOptimizer._clip_gradients_  s    ==T]]Q.GLBC""1%A=  !!d&:&:Q&>&ud.B.BCC^^ 2AHMN1!-CHHQA&Q>NNL Os   C#Cc                 T    t         d      r j                  rt        d      |rt         fd|D               _        nt                _        |rAt        |      dkD  r3t        j                  dj                  t        |                   _	        nd _	        t                _        y)a  Exclude variables from weight decay.

        This method must be called before the optimizer's `build` method is
        called. You can set specific variables to exclude out, or set a list of
        strings as the anchor words, if any of which appear in a variable's
        name, then the variable is excluded.

        Args:
            var_list: A list of `Variable`s to exclude from weight decay.
            var_names: A list of strings. If any string in `var_names` appear
                in the model variable's name, then this model variable is
                excluded from weight decay. For example, `var_names=['bias']`
                excludes all bias variables from weight decay.
        _builtzS`exclude_from_weight_decay()` can only be configured before the optimizer is built.c              3   @   K   | ]  }j                  |        y wr   )r_   )r   rS   rH   s     r   r   z:BaseOptimizer.exclude_from_weight_decay.<locals>.<genexpr>  s      2,4h'2r   r   |N)rw   r  r-   set_exclude_from_weight_decayr   recompilejoin"_exclude_from_weight_decay_patterndict _exclude_from_weight_decay_cache)rH   var_list	var_namess   `  r   exclude_from_weight_decayz'BaseOptimizer.exclude_from_weight_decayl  s     4"t{{*  .1 28@2 /D+ /2eD+ Y!+68jjY(7D3 7;D3 15-r   c                    | j                  |      }t        | d      st               | _        || j                  v r| j                  |   S t	        | dt                     }t	        | dd       }||v rd| j                  |<   y|1t        j                  ||j                        	 d| j                  |<   yd| j                  |<   y)Nr  r  r	  FT)	r_   rw   r
  r  rV   r  r  searchr$   )rH   rS   variable_idr  !exclude_from_weight_decay_patterns        r   _use_weight_decayzBaseOptimizer._use_weight_decay  s    mmH- t?@48FD1$???88EE %,.%
! -46-
) 33AFD11+>,8		;X]]K FK55kB=A--k:r   c                 ,   | j                   y |D ]  }| j                  |      st        j                  | j                  |j
                        }t        j                  | j                   |j
                        }|j                  |||z  |z  z
          y r   )r0   r  r   castr(   r%   r   )rH   r   rS   lrwds        r   r   z!BaseOptimizer._apply_weight_decay  s|    $! 	?H%%h/XXd00(..AXXd//@8b=2+= =>		?r   c                 b    t        | d      s#t        d| j                  j                   d      y )Nr)   zIn optimizer 'zh', you forgot to call `super().__init__()` as the first statement in the `__init__()` method. Go add it!)rw   RuntimeErrorr.   r/   rO   s    r   rm   z!BaseOptimizer._check_super_called  s:    tW% !8!8 9 :   &r   c                 4   | j                   rt        || j                        D ]r  \  }}|	t        j                  | j
                  d      }t        j                  ||j                        | j                  z  }|j                  ||z  d|z
  |z  z          t yy)z8Update the stored moving average using the latest value.Nr   r   )
r4   r   r\   r   	not_equalrK   r  r%   r7   r   )rH   r   rq   rY   not_first_stepmomentums         r   r   z4BaseOptimizer._update_model_variables_moving_average  s    << ##T%I%I! NW &%(]]4??A%FN;d>O>OO  NN8g#5X8L#LMN r   c                    t        |      t        | j                        k7  r.t        dt        |       dt        | j                         d      t        || j                        D ]  \  }}|	|j	                  |        y)z2Overwrite model variables with its moving average.zThe length of model variables (zT) to override does not match the length of model variables stored in the optimizer (z:). Please check if the optimizer was called on your model.N)r   r\   r-   r   r   )rH   r   rq   average_vars       r   r   z;BaseOptimizer._overwrite_model_variables_with_average_value  s     "#s00(
 
 1#6I2J1K L <<=> ?CC  !$!E!E!
 	(C &

;'		(r   c                 @    | j                   r| j                  |       yy)a  Set the final value of model's trainable variables.

        Sometimes there are some extra steps before ending the variable updates,
        such as overriding the model variables with its average value.

        Args:
          var_list: list of model variables.
        N)r4   r   )rH   r  s     r   finalize_variable_valuesz&BaseOptimizer.finalize_variable_values  s      << >>xH	 r   c                      y)N	Optimizerr   rO   s    r   	_obj_typezBaseOptimizer._obj_type  s    r   c                    t        | j                  t        j                        r t        j                  | j                        }nt        | j                  t
        j                        r$t        | j                  j                               }nlt        j                  | j                        rt        | j                        }n7t        | j                        r t        j                  | j                        }nd}| j                  || j                  | j                   | j"                  | j$                  | j&                  | j(                  | j*                  | j,                  | j.                  d}|S )a  Returns the config of the optimizer.

        An optimizer config is a Python dictionary (serializable)
        containing the configuration of an optimizer.
        The same optimizer can be reinstantiated later
        (without any saved state) from this configuration.

        Subclass optimizer should override this method to include other
        hyperparameters.

        Returns:
            Python dictionary.
        g      ?)r$   r(   r0   r1   r2   r3   r4   r7   r8   r5   r6   )r   rD   r   rC   	serializer   r   rF   r   r   	is_tensorrE   r   serialize_keras_objectr$   r0   r1   r2   r3   r4   r7   r8   r5   r6   )rH   r(   configs      r   
get_configzBaseOptimizer.get_config  s    !7!L!L
 3<<##M ++W-=-=>!$"5"5";";"=>M]]4../!$"5"56Md))*-DD##M  M II* --#33|| --'+'C'C!%!7!7+/+K+K
 r   c                 z    d|v r0t        |d   t              rt        j                  |d   |      |d<    | di |S )a  Creates an optimizer from its config.

        This method is the reverse of `get_config`, capable of instantiating the
        same optimizer from the config dictionary.

        Args:
            config: A Python dictionary, typically the output of get_config.
            custom_objects: A Python dictionary mapping names to additional
              user-defined Python objects needed to recreate this optimizer.

        Returns:
            An optimizer instance.
        r(   )custom_objectsr   )r   r
  r   deserialize_keras_object)clsr)  r,  s      r   from_configzBaseOptimizer.from_config%  sK     f$&148%>>/ '
 }V}r   c                     |dk7  r| j                          t        | d      r| j                  j                  |      }t        |   ||      S )Nr)   r>   )rm   rw   r>   tracksuper__setattr__)rH   r$   r   r.   s      r   r3  zBaseOptimizer.__setattr__=  sH     7?$$&4$MM''.Ew"4//r   c                    t        j                  t        j                  |      |d      }|dkD  }t        j                  ||t        j                  |            }t        j                  |t        j
                  |      |      }t        j                  || j                        }t        j                  |      t        j                  || j                        z  }|S )NT)keepdimsr   )
r   r9   squarewhere	ones_likesqrtmultiplyr1   convert_to_tensorr   )	rH   valuesaxesl2sumpred
l2sum_safel2normintermediatevalues_clips	            r   r   zBaseOptimizer._clip_by_normH  s    

6*D4@qyYYtUCMM%,@A
4*!5u=||FDMM:++L9CKKDMM=
 
 r   c                     | j                   j                  }| j                   j                          | j                   j                  |       |du r| j                   j	                          y y )NT)r>   lockedunlockuntracklock)rH   rS   previous_lock_states      r   r   zBaseOptimizer._untrack_variableU  sQ    "mm22h'$&MM  'r   )
NNNNFgGz?NNNN)r   NnoneNN)Nr   )r   r   )NN)7r/   
__module____qualname____doc__rL   propertyrK   rA   rW   r	    no_automatic_dependency_trackingrc   r_   r   rh   ro   ra   r[   r   r   r   r   r   r   r   r   r   r   r   r   r   r(   setterr   r   r   r   r   r   r   r  r  r   rm   r   r   r!  r$  r*  classmethodr/  r3  r   r   __classcell__)r.   s   @r   r   r      s   3p  $$(}0~    :C .. /& " "J +\ :A(
V 6=L#\##" C'JM^7H
"
28h	 1 1 0 0:$$-
+$#5-n-6&7P:?N((I-^  .	0!r   r   a2  name: String. The name to use
            for momentum accumulator weights created by
            the optimizer.
        weight_decay: Float. If set, weight decay is applied.
        clipnorm: Float. If set, the gradient of each weight is individually
            clipped so that its norm is no higher than this value.
        clipvalue: Float. If set, the gradient of each weight is clipped to be
            no higher than this value.
        global_clipnorm: Float. If set, the gradient of all weights is clipped
            so that their global norm is no higher than this value.
        use_ema: Boolean, defaults to `False`.
            If `True`, exponential moving average
            (EMA) is applied. EMA consists of computing an exponential moving
            average of the weights of the model (as the weight values change
            after each training batch), and periodically overwriting the
            weights with their moving average.
        ema_momentum: Float, defaults to 0.99. Only used if `use_ema=True`.
            This is the momentum to use when computing
            the EMA of the model's weights:
            `new_average = ema_momentum * old_average + (1 - ema_momentum) *
            current_variable_value`.
        ema_overwrite_frequency: Int or None, defaults to None. Only used if
            `use_ema=True`. Every `ema_overwrite_frequency` steps of iterations,
            we overwrite the model variable by its moving average.
            If None, the optimizer
            does not overwrite model variables in the middle of training,
            and you need to explicitly overwrite the variables
            at the end of training by calling
            `optimizer.finalize_variable_values()` (which updates the model
            variables in-place). When using the built-in `fit()` training loop,
            this happens automatically after the last epoch,
            and you don't need to do anything.
        loss_scale_factor: Float or `None`. If a float, the scale factor will
            be multiplied the loss before computing gradients, and the inverse
            of the scale factor will be multiplied by the gradients before
            updating variables. Useful for preventing underflow during
            mixed precision training. Alternately,
            `keras.optimizers.LossScaleOptimizer` will
            automatically set a loss scale factor.
        gradient_accumulation_steps: Int or `None`. If an int, model & optimizer
            variables will not be updated at every step; instead they will be
            updated every `gradient_accumulation_steps` steps, using the average
            value of the gradients since the last update. This is known as
            "gradient accumulation". This can be useful
            when your batch size is very small, in order to reduce gradient
            noise at each update step. EMA frequency will look at "accumulated"
            iterations value (optimizer steps // gradient_accumulation_steps).
            Learning rate schedules will look at "real" iterations value
            (optimizer steps).
c                     | D cg c]-  }|t        j                  t        j                  |            / }}t        j                  t        j                  |            }t        j                  |      S c c}w )z-Computes the global norm of multiple tensors.)r   r9   r6  stackr9  )
value_listr   squared_normssquared_norms       r   global_normrX    s`     )3#$am

1M  77399]34L88L!!	s
   A6*A6c                     t        |       }|t        j                  d|z  d|z        z  }|||z
  z   }| D cg c]  }|||z  n| c}S c c}w )Ng      ?)rX  r   minimum)rU  	clip_normuse_normscale_for_finiter   r   s         r   r   r     s\    :&H 3;;sX~sY#OO 8 34E7AB!AIA-BBBs   A
)r  r+   	keras.srcr   r   r   keras.src.optimizers.schedulesr   keras.src.savingr   keras.src.saving.keras_saveabler   keras.src.utilsr	   keras.src.utils.namingr
   r   base_optimizer_keyword_argsrX  r   r   r   r   <module>re     sF    	   "  A . 9 $ ,L!M L!^"1 h"Cr   