
    BVh                         d Z ddlZddlZddlZddlZddlmZ ddlmZ ddlmZ ddlm	Z	 ddlm
Z ddlmZ dd	lmZ dd
lmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlm Z  ddlm!Z! ddlm"Z" ddl#m$Z$ ddl#m%Z% ddl#m&Z& ddl#m'Z' ddl#m(Z( ddl#m)Z* ddl+m,Z, ddl-m.Z/ dd l0m1Z1  e2ejf                  ejh                  ejj                  ejl                  ejn                  ejp                  g      Z9d! Z: G d" d#e;      Z<d$ Z= G d% d&e/j|                        Z?d' Z@d( ZA G d) d*e?      ZB e,j                  d+d,  e,j                  d- d.d.d.eBj                  /      g0       y)1zVersion 2 of class Optimizer.    N)central_storage_strategy)distribute_lib)parameter_server_strategy)parameter_server_strategy_v2)values)backprop)context)dtypes)indexed_slices)ops)tensor)tensor_util)backend)initializers)base_layer_utils)learning_rate_schedule)utils)generic_utils)layer_utils)
tf_inspect)tf_utils)	array_ops)control_flow_ops)gen_resource_variable_ops)	gradients)math_ops)	variables)revived_types)base)nestc                     t        j                  |      \  }}t        j                  | |t        j                  |      d         }||fS )a  Sums `values` associated with any non-unique `indices`.

  Args:
    values: A `Tensor` with rank >= 1.
    indices: A one-dimensional integer `Tensor`, indexing into the first
      dimension of `values` (as in an IndexedSlices object).

  Returns:
    A tuple of (`summed_values`, `unique_indices`) where `unique_indices` is a
    de-duplicated version of `indices` and `summed_values` contains the sum of
    `values` slices associated with each unique index.
  r   )r   uniquer   unsorted_segment_sumshape)r   indicesunique_indicesnew_index_positionssummed_valuess        a/home/dcms/DCMS/lib/python3.12/site-packages/tensorflow/python/keras/optimizer_v2/optimizer_v2.py_deduplicate_indexed_slicesr*   =   sN     )2(8(8(A%.%//!oon%a(*- 	((    c                       e Zd Zd Zd Zd Zy)NullContextmanagerc                      y N )selfargskwargss      r)   __init__zNullContextmanager.__init__S       r+   c                      y r/   r0   r1   s    r)   	__enter__zNullContextmanager.__enter__V   r5   r+   c                      y)NFr0   )r1   type_arg	value_argtraceback_args       r)   __exit__zNullContextmanager.__exit__Y   s    r+   N)__name__
__module____qualname__r4   r8   r=   r0   r+   r)   r-   r-   Q   s    		r+   r-   c                 h    t        j                         st        j                  |       S t	               S )a  Internal-only entry point for `name_scope*`.

  Enters a compat.v1.name_scope only when in a function or graph,
  not when running fully eagerly.

  Args:
    name: The name argument that is passed to the op function.

  Returns:
    `name_scope*` context manager.
  )r	   executing_eagerlyr   name_scope_v1r-   names    r)   $name_scope_only_in_function_or_graphrF   ]   s*     
	"	"	$T""r+   c                   4    e Zd ZdZdZ	 	 d>dZed        Zed        Zej                  d        Zej                  d        Zed	        Z
e
j                  d
        Z
d Zd?dZd Zd Zd Zd@dZd>dZ	 	 dAdZd Zd Zd Zd Zd?dZd Zd Z fdZ fdZ fdZd ZdBdZd  Zd! Z d" Z!d# Z"d$ Z#ed%        Z$e$j                  d&        Z$d' Z%e&jN                  d(        Z(e)d?d)       Z*d* Z+d+ Z,ed,        Z-d- Z.d. Z/ddde0jb                  jd                  e0jf                  jh                  fd/Z5dCd0Z6d1 Z7d2 Z8d3 Z9d4 Z:d5 Z;d6 Z<d7 Z=d8 Z>ee?j                  d9               ZAee?j                  d:               ZBd; ZCd< ZDeEj                  d=        ZG xZHS )DOptimizerV2a  Base class for Keras optimizers.

  You should not use this class directly, but instead instantiate one of its
  subclasses such as `tf.keras.optimizers.SGD`, `tf.keras.optimizers.Adam`, etc.

  ### Usage

  ```python
  # Create an optimizer with the desired parameters.
  opt = tf.keras.optimizers.SGD(learning_rate=0.1)
  # `loss` is a callable that takes no argument and returns the value
  # to minimize.
  loss = lambda: 3 * var1 * var1 + 2 * var2 * var2
  # In graph mode, returns op that minimizes the loss by updating the listed
  # variables.
  opt_op = opt.minimize(loss, var_list=[var1, var2])
  opt_op.run()
  # In eager mode, simply call minimize to update the list of variables.
  opt.minimize(loss, var_list=[var1, var2])
  ```

  ### Usage in custom training loops

  In Keras models, sometimes variables are created when the model is first
  called, instead of construction time. Examples include 1) sequential models
  without input shape pre-defined, or 2) subclassed models. Pass var_list as
  callable in these cases.

  Example:

  ```python
  opt = tf.keras.optimizers.SGD(learning_rate=0.1)
  model = tf.keras.Sequential()
  model.add(tf.keras.layers.Dense(num_hidden, activation='relu'))
  model.add(tf.keras.layers.Dense(num_classes, activation='sigmoid'))
  loss_fn = lambda: tf.keras.losses.mse(model(input), output)
  var_list_fn = lambda: model.trainable_weights
  for input, output in data:
    opt.minimize(loss_fn, var_list_fn)
  ```

  ### Processing gradients before applying them

  Calling `minimize()` takes care of both computing the gradients and
  applying them to the variables.  If you want to process the gradients
  before applying them you can instead use the optimizer in three steps:

  1.  Compute the gradients with `tf.GradientTape`.
  2.  Process the gradients as you wish.
  3.  Apply the processed gradients with `apply_gradients()`.

  Example:

  ```python
  # Create an optimizer.
  opt = tf.keras.optimizers.SGD(learning_rate=0.1)

  # Compute the gradients for a list of variables.
  with tf.GradientTape() as tape:
    loss = <call_loss_function>
  vars = <list_of_variables>
  grads = tape.gradient(loss, vars)

  # Process the gradients, for example cap them, etc.
  # capped_grads = [MyCapper(g) for g in grads]
  processed_grads = [process_gradient(g) for g in grads]

  # Ask the optimizer to apply the processed gradients.
  opt.apply_gradients(zip(processed_grads, var_list))
  ```

  ### Use with `tf.distribute.Strategy`

  This optimizer class is `tf.distribute.Strategy` aware, which means it
  automatically sums gradients across all replicas. To average gradients,
  you divide your loss by the global batch size, which is done
  automatically if you use `tf.keras` built-in training or evaluation loops.
  See the `reduction` argument of your loss which should be set to
  `tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE` for averaging or
  `tf.keras.losses.Reduction.SUM` for not.

  To aggregate gradients yourself, call `apply_gradients` with
  `experimental_aggregate_gradients` set to False. This is useful if you need to
  process aggregated gradients.

  If you are not using these and you want to average gradients, you should use
  `tf.math.reduce_sum` to add up your per-example losses and then divide by the
  global batch size. Note that when using `tf.distribute.Strategy`, the first
  component of a tensor's shape is the *replica-local* batch size, which is off
  by a factor equal to the number of replicas being used to compute a single
  step. As a result, using `tf.math.reduce_mean` will give the wrong answer,
  resulting in gradients that can be many times too big.

  ### Variable Constraints

  All Keras optimizers respect variable constraints. If constraint function is
  passed to any variable, the constraint will be applied to the variable after
  the gradient has been applied to the variable.
  Important: If gradient is sparse tensor, variable constraint is not supported.

  ### Thread Compatibility

  The entire optimizer is currently thread compatible, not thread-safe. The user
  needs to perform synchronization if necessary.

  ### Slots

  Many optimizer subclasses, such as `Adam` and `Adagrad` allocate and manage
  additional variables associated with the variables to train.  These are called
  <i>Slots</i>.  Slots have names and you can ask the optimizer for the names of
  the slots that it uses.  Once you have a slot name you can ask the optimizer
  for the variable it created to hold the slot value.

  This can be useful if you want to log debug a training algorithm, report stats
  about the slots, etc.

  ### Hyperparameters

  These are arguments passed to the optimizer subclass constructor
  (the `__init__` method), and then passed to `self._set_hyper()`.
  They can be either regular Python values (like 1.0), tensors, or
  callables. If they are callable, the callable will be called during
  `apply_gradients()` to get the value for the hyper parameter.

  Hyperparameters can be overwritten through user code:

  Example:

  ```python
  # Create an optimizer with the desired parameters.
  opt = tf.keras.optimizers.SGD(learning_rate=0.1)
  # `loss` is a callable that takes no argument and returns the value
  # to minimize.
  loss = lambda: 3 * var1 + 2 * var2
  # In eager mode, simply call minimize to update the list of variables.
  opt.minimize(loss, var_list=[var1, var2])
  # update learning rate
  opt.learning_rate = 0.05
  opt.minimize(loss, var_list=[var1, var2])
  ```

  ### Callable learning rate

  Optimizer accepts a callable learning rate in two ways. The first way is
  through built-in or customized
  `tf.keras.optimizers.schedules.LearningRateSchedule`. The schedule will be
  called on each iteration with `schedule(iteration)`, a `tf.Variable`
  owned by the optimizer.

  Example:

  >>> var = tf.Variable(np.random.random(size=(1,)))
  >>> learning_rate = tf.keras.optimizers.schedules.ExponentialDecay(
  ... initial_learning_rate=.01, decay_steps=20, decay_rate=.1)
  >>> opt = tf.keras.optimizers.SGD(learning_rate=learning_rate)
  >>> loss = lambda: 3 * var
  >>> opt.minimize(loss, var_list=[var])
  <tf.Variable...

  The second way is through a callable function that
  does not accept any arguments.

  Example:

  >>> var = tf.Variable(np.random.random(size=(1,)))
  >>> def lr_callable():
  ...   return .1
  >>> opt = tf.keras.optimizers.SGD(learning_rate=lr_callable)
  >>> loss = lambda: 3 * var
  >>> opt.minimize(loss, var_list=[var])
  <tf.Variable...

  ### Creating a custom optimizer

  If you intend to create your own optimization algorithm, simply inherit from
  this class and override the following methods:

    - `_resource_apply_dense` (update variable given gradient tensor is a dense
      `tf.Tensor`)
    - `_resource_apply_sparse` (update variable given gradient tensor is a
      sparse `tf.IndexedSlices`. The most common way for this to happen
      is if you are taking the gradient through a `tf.gather`.)
    - `_create_slots`
      (if your optimizer algorithm requires additional variables)
    - `get_config`
      (serialization of the optimizer, include all hyper parameters)
  FNc                    h d}|D ]c  }||vrt        dt        |      z         ||   &||   dk  rt        dj                  |||               |dk(  sOt	        j
                  d       e d| _        | j                  |       i | _        i | _	        g | _
        g | _        d| _        i | _        |j                  d	d
      }|d
k  rt        dj                  |            || _        d| _        t#        j$                         rt#        j&                         | _        nd| _        |t*        j,                  }|| _        |g }|| _        |j                  dd      | _        |j                  dd      | _        | j2                  ;| j4                  /t        dj                  | j2                  | j4                              |j                  dd      | _        y)a  Create a new Optimizer.

    This must be called by the constructors of subclasses.
    Note that Optimizer instances should not bind to a single graph,
    and so shouldn't keep Tensors as member variables. Generally
    you should be able to use the _set_hyper()/state.get_hyper()
    facility instead.

    This class is stateful and thread-compatible.

    Example of custom gradient transformations:

    ```python
    def my_gradient_transformer(grads_and_vars):
      # Simple example, double the gradients.
      return [(2. * g, v) for g, v in grads_and_vars]

    optimizer = tf.keras.optimizers.SGD(
        1e-3, gradient_transformers=[my_gradient_transformer])
    ```

    Args:
      name: String. The name to use for momentum accumulator weights created
        by the optimizer.
      gradient_aggregator: The function to use to aggregate gradients across
        devices (when using `tf.distribute.Strategy`). If `None`, defaults to
        summing the gradients across devices. The function should accept and
        return a list of `(gradient, variable)` tuples.
      gradient_transformers: Optional. List of functions to use to transform
        gradients before applying updates to Variables. The functions are
        applied after `gradient_aggregator`. The functions should accept and
        return a list of `(gradient, variable)` tuples.
      **kwargs: keyword arguments. Allowed arguments are `clipvalue`,
        `clipnorm`, `global_clipnorm`.
        If `clipvalue` (float) is set, the gradient of each weight
        is clipped to be no higher than this value.
        If `clipnorm` (float) is set, the gradient of each weight
        is individually clipped so that its norm is no higher than this value.
        If `global_clipnorm` (float) is set the gradient of all weights is
        clipped so that their global norm is no higher than this value.

    Raises:
      ValueError: in case of any invalid argument.
    >   lrdecayclipnorm	clipvalueglobal_clipnormz1Unexpected keyword argument passed to optimizer: Nr   zExpected {} >= 0, received: {}rJ   z=The `lr` argument is deprecated, use `learning_rate` instead.TrK           zdecay cannot be less than 0: {}FrL   rN   z_Cannot accept both `clipnorm` and `global_clipnorm`, passed `clipnorm` {}, `global_clipnorm` {}rM   )	TypeErrorstr
ValueErrorformatwarningswarn_use_locking_init_set_name_hyper_slots_slot_names_weights_iterations_deferred_slot_restorationspop_initial_decay_hypers_createdr   has_strategyget_strategy_distribution_strategyoptimizer_utilsall_reduce_sum_gradientsgradient_aggregatorgradient_transformersrL   rN   rM   )r1   rE   rf   rg   r3   allowed_kwargskrK   s           r)   r4   zOptimizerV2.__init__5  s   b QN 	M	
.	  025a&9 : 	: 
	6!9q=9@@F1INOO	
dK	M	M DDKDKDDMD (*D$JJw$Erz8??FGGD D ""$$2$?$?$Ad!$(d! "+DD2D$ !6DJJz40DM!::&7>D}} T%9%9%E DDJF==$*>*>E@A A ZZT2DNr+   c                     | j                   S z=`float` or `None`. If set, clips gradients to a maximum norm.)	_clipnormr7   s    r)   rL   zOptimizerV2.clipnorm  s     >>r+   c                     | j                   S rk   )_global_clipnormr7   s    r)   rN   zOptimizerV2.global_clipnorm  s        r+   c                     || j                   rt        d      || _        t        j                  | j                        | _        y Nz`clipnorm` cannot be set when `gradient_transformers` is set. Instead, use the `gradient_transformers` to specify clipping and other transformations.)rg   rR   rl   rd   make_gradient_clipnorm_fn_clipnorm_fnr1   vals     r)   rL   zOptimizerV2.clipnorm  sG    
455 E F F DN'AADr+   c                     || j                   rt        d      || _        t        j                  | j                        | _        y rp   )rg   rR   rn   rd    make_global_gradient_clipnorm_fn_global_clipnorm_fnrs   s     r)   rN   zOptimizerV2.global_clipnorm  sJ    
455 E F F  D.OO Dr+   c                     | j                   S )z>`float` or `None`. If set, clips gradients to a maximum value.)
_clipvaluer7   s    r)   rM   zOptimizerV2.clipvalue  s     ??r+   c                     || j                   rt        d      || _        t        j                  | j                        | _        y )Nz`clipvalue` cannot be set when `gradient_transformers` is set. Instead, use the `gradient_transformers` to specify clipping and other transformations.)rg   rR   ry   rd   make_gradient_clipvalue_fn_clipvalue_fnrs   s     r)   rM   zOptimizerV2.clipvalue  sG    
455 E F F DO(CCDr+   c                     |S )zCCalled in `.minimize` to transform loss before computing gradients.r0   )r1   losss     r)   _transform_losszOptimizerV2._transform_loss  s    Kr+   c                 R    |j                  |||      }t        t        ||            S )z4Called in `minimize` to compute gradients from loss.)gradientlistzip)r1   taper~   var_list	grad_lossgradss         r)   _get_gradientszOptimizerV2._get_gradients  s&    MM$)4EE8$%%r+   c                     |S )z8Called in `apply_gradients` before gradient aggregation.r0   r1   grads_and_varss     r)   !_transform_unaggregated_gradientsz-OptimizerV2._transform_unaggregated_gradients  s    r+   c                 $    | j                  |      S )ai  Called in `apply_gradients` to aggregate gradients across devices.

    Note that user subclasses may override this, so the interface should not be
    changed.

    Args:
      grads_and_vars: List of (gradient, variable) pairs.

    Returns:
      A list of (aggregrated_gradient, variable) pairs. By default, this calls
      `self.gradient_aggregator`.
    )rf   r   s     r)   _aggregate_gradientsz OptimizerV2._aggregate_gradients  s     ##N33r+   c                     | j                   | j                  |      }| j                  | j                  |      }| j                  | j                  |      }| j                  D ]
  } ||      } |S )z.Called in `apply_gradients` after aggregation.)ry   r|   rl   rr   rn   rw   rg   )r1   r   fns      r)   _transform_gradientsz OptimizerV2._transform_gradients  sw    ")).9n~~!((8n(//?n(( *.)n*r+   c                 R    | j                  ||||      }| j                  ||      S )a9  Minimize `loss` by updating `var_list`.

    This method simply computes gradient using `tf.GradientTape` and calls
    `apply_gradients()`. If you want to process the gradient before applying
    then call `tf.GradientTape` and `apply_gradients()` explicitly instead
    of using this function.

    Args:
      loss: `Tensor` or callable. If a callable, `loss` should take no arguments
        and return the value to minimize. If a `Tensor`, the `tape` argument
        must be passed.
      var_list: list or tuple of `Variable` objects to update to minimize
        `loss`, or a callable returning the list or tuple of `Variable` objects.
        Use callable when the variable list would otherwise be incomplete before
        `minimize` since the variables are created at the first time `loss` is
        called.
      grad_loss: (Optional). A `Tensor` holding the gradient computed for
        `loss`.
      name: (Optional) str. Name for the returned operation.
      tape: (Optional) `tf.GradientTape`. If `loss` is provided as a `Tensor`,
        the tape that computed the `loss` must be provided.

    Returns:
      An `Operation` that updates the variables in `var_list`. The `iterations`
      will be automatically increased by 1.

    Raises:
      ValueError: If some of the variables are not `Variable` objects.

    )r   r   r   rD   )_compute_gradientsapply_gradients)r1   r~   r   r   rE   r   r   s          r)   minimizezOptimizerV2.minimize  s9    > ,,x94 - ANT::r+   c                    t        |      s|t        d      ||nt        j                         }t        |      r@|5  t        |      s|j	                  |        |       }t        |      r |       }ddd       |5  | j                  |      }ddd       t        j                  |      }t        j                  | j                  dz         5  | j                  ||||      }ddd       | j                  D cg c]&  \  }}||j                  t        j                  k7  r|( c}}       |S # 1 sw Y   xY w# 1 sw Y   xY w# 1 sw Y   fxY wc c}}w )a[  Compute gradients of `loss` for the variables in `var_list`.

    This is the first part of `minimize()`.  It returns a list
    of (gradient, variable) pairs where "gradient" is the gradient
    for "variable".  Note that "gradient" can be a `Tensor`, an
    `IndexedSlices`, or `None` if there is no gradient for the
    given variable.

    Args:
      loss: `Tensor` or callable. If a callable, `loss` should take no
        arguments and return the value to minimize. If a `Tensor`, the `tape`
        argument must be passed.
      var_list: list or tuple of `Variable` objects to update to minimize
        `loss`, or a callable returning the list or tuple of `Variable` objects.
        Use callable when the variable list would otherwise be incomplete before
        `minimize` and the variables are created at the first time when `loss`
        is called.
      grad_loss: Optional. A `Tensor` holding the gradient computed for `loss`.
      tape: (Optional) `tf.GradientTape`. If `loss` is provided as a `Tensor`,
        the tape that computed the `loss` must be provided.

    Returns:
      A list of (gradient, variable) pairs. Variable is always present, but
      gradient can be `None`.

    Raises:
      TypeError: If `var_list` contains anything else than `Variable` objects.
      ValueError: If some arguments are invalid, or var_list is None.
    Nz2`tape` is required when a `Tensor` loss is passed.
/gradients)callablerR   r   GradientTapewatchr   r    flattenr   name_scope_v2_namer   _assert_valid_dtypesdtyper
   resource)r1   r~   r   r   r   r   gvs           r)   r   zOptimizerV2._compute_gradients  sC   > D>dlKLL#4)>)>)@D~  !
**X
vHZ(  
 (!!$'d( ||H%H			4::4	5 L**4xKnL 	$a=QWW7 	
 
 '   ( (L Ls)   6D1>D=E	<+E
1D:=E	Ec                    t        j                  |      }|D cg c]  \  }}|	 }}}t        j                  | j                        5  t        j
                         5  | j                  |       ddd       |st        j                         cddd       S t        j                         rt        d      t        j                         }|sU|rSt        |t        j                  t         j"                  t$        j&                  t$        j(                  f      rt+        d      | j-                  |      }|r"| j/                  |      }| j1                  |      }| j3                  |      }t        j4                         r| j7                  ||||      cddd       S t        j8                         j;                  t=        j>                  | j6                  |      |fd|i      cddd       S c c}}w # 1 sw Y   xY w# 1 sw Y   yxY w)aL  Apply gradients to variables.

    This is the second part of `minimize()`. It returns an `Operation` that
    applies gradients.

    The method sums gradients from all replicas in the presence of
    `tf.distribute.Strategy` by default. You can aggregate gradients yourself by
    passing `experimental_aggregate_gradients=False`.

    Example:

    ```python
    grads = tape.gradient(loss, vars)
    grads = tf.distribute.get_replica_context().all_reduce('sum', grads)
    # Processing aggregated gradients.
    optimizer.apply_gradients(zip(grads, vars),
        experimental_aggregate_gradients=False)

    ```

    Args:
      grads_and_vars: List of (gradient, variable) pairs.
      name: Optional name for the returned operation. Default to the name passed
        to the `Optimizer` constructor.
      experimental_aggregate_gradients: Whether to sum gradients from different
        replicas in the presense of `tf.distribute.Strategy`. If False, it's
        user responsibility to aggregate the gradients. Default to True.

    Returns:
      An `Operation` that applies the specified gradients. The `iterations`
      will be automatically increased by 1.

    Raises:
      TypeError: If `grads_and_vars` is malformed.
      ValueError: If none of the variables have gradients.
      RuntimeError: If called in a cross-replica context.
    Nzx`apply_gradients() cannot be called in cross-replica context. Use `tf.distribute.Strategy.run` to enter replica context.zo`experimental_aggregate_gradients=False is not supported for ParameterServerStrategy and CentralStorageStrategy)apply_staterE   )r2   r3   ) rd   filter_empty_gradientsr   r   r   
init_scope_create_all_weightsr   no_opr   in_cross_replica_contextRuntimeErrorrb   
isinstancer   ParameterServerStrategyV1r   ParameterServerStrategyV2r   CentralStorageStrategyCentralStorageStrategyV1NotImplementedError_preparer   r   r   strategy_supports_no_merge_call_distributed_applyget_replica_context
merge_call	functoolspartial)	r1   r   rE    experimental_aggregate_gradients_r   r   strategyr   s	            r)   r   zOptimizerV2.apply_gradientsQ  s   R %;;NKN ./fq!/H/			4::	& *>> +  *+   %%'* * 
	0	0	2 	
  ,,.h.8
X/II2LL.EE.GGIJ
 "AB 	B MM(+k	)??O22>B00@n		8	8	:&&x'24E* *J 113>>d55;O " ? K* * 0+ +* *s7   G)	G<G/0G<C7G<AG</G9	4G<<Hc           
           fd}t        j                         }g }t        |xs  j                        5  |D ]  \  }}	|j                  j                  |	      5  t        |rdnd|	j                  j                  z         5  |j                  j                  |	||fd      }
t        j                         r|j                  |
       n|j                  |
       ddd       ddd        t        d |D              }t        j                         r|rt!        j"                  |      j%                         5  t        j&                  t)        j*                  |      g      5   j,                  j/                  dd	      cddd       cddd       cddd       S  j,                  j/                  d      cddd       S # 1 sw Y   xY w# 1 sw Y   xY w# 1 sw Y   nxY w	 ddd       S# 1 sw Y   \xY w# 1 sw Y   yxY w)
z1`apply_gradients` using a `DistributionStrategy`.c                 0   t        | t        j                        rt        d|       i }t        |t        j
                        rS| j                  t        d      dj                  v r|d<    j                  |j                  | |j                  fi |S dj                  v r|d<    j                  || fi |}| j                  @t        j                  |g      5  | j!                  | j                  |             cddd       S |S # 1 sw Y   yxY w)zApply gradient to variable.zTrying to update a Tensor Nz6Cannot use a constraint function on a sparse variable.r   )r   r   Tensorr   r   IndexedSlices
constraintr   _sparse_apply_args(_resource_apply_sparse_duplicate_indicesr   r%   _dense_apply_args_resource_apply_denser   control_dependenciesassign)vargradapply_kwargs	update_opr   r1   s       r)   apply_grad_to_update_varz@OptimizerV2._distributed_apply.<locals>.apply_grad_to_update_var  s   	C	'!">DDl	D.66	7>>%FH HD333(3,}
%<t<<KKdll<.:< 	< 
$00	0&1]#,$,,T3G,Gi		#%%yk2 	1CNN3/0	1 	1 	1 	1s     DDupdateupdate_F)r2   groupNc              3   ~   K   | ]5  }t        |t        j                        xs t        j                  |       7 y wr/   )r   r   	Operationr   is_symbolic_tensor).0is     r)   	<genexpr>z1OptimizerV2._distributed_apply.<locals>.<genexpr>  s=      M<= $As}}5 8!44Q78 Ms   ;=   )
read_value)r   #executing_eagerly_outside_functionsrF   r   extendedcolocate_vars_withoprE   r   r   r   extendappendanyr	   rB   r   _current_graph
as_defaultr   r   r   r\   
assign_add)r1   distributionr   rE   r   r   eagerly_outside_functions
update_opsr   r   r   any_symbolics   `   `       r)   r   zOptimizerV2._distributed_apply  s   0 !$ G G IJ	-d.@djj	A ,% +)$ ""55c: 	+33hffkk: + %--44-TG5 5 JI668 	* 	*+	+ 	++$  MAKM Ml&&(L ##J/::< 	D'')9)?)?
)K(LM D##..qU.CD D	D 	D3, ,: ((+;, ,
+ +	+ 	+,D D D	D 	D 	D3, ,sy   $G:'GAF?	G#AG:8*G."G?	G.	G:G:?GGGG:G!G.%	G:.G7	3G::Hc                    t        j                  |      }t        j                         j	                         5  t        j
                  | j                  dz         5  t        j                  ||      }t        ||      D ]!  \  }}|	t        dj                  |             	 ddd       ddd       S # 1 sw Y   xY w# 1 sw Y   S xY w)a[  Returns gradients of `loss` with respect to `params`.

    Should be used only in legacy v1 graph mode.

    Args:
      loss: Loss tensor.
      params: List of variables.

    Returns:
      List of gradient tensors.

    Raises:
      ValueError: In case any gradient cannot be computed (e.g. if gradient
        function not implemented).
    r   NzVariable {} has `None` for gradient. Please make sure that all of your ops have a gradient defined (i.e. are differentiable). Common ops without gradient: K.argmax, K.round, K.eval.)r    r   r   	get_graphr   
name_scoper   r   r   rR   rS   )r1   r~   paramsr   r   params         r)   get_gradientszOptimizerV2.get_gradients  s      \\&!F					'	'	) 	G7+=+=djj>J?K ,L 	G!!$/eUF+ G+$< 8 9?u	G GG	G 	G L	G 	G 	G Ls)   #C+B5B5#C5B>	:CCc                    | j                  ||      }t        t        ||            }| j                  |D cg c]&  \  }}||j                  t
        j                  k7  r|( c}}       | j                  |      gS c c}}w r/   )r   r   r   r   r   r
   r   r   )r1   r~   r   r   r   r   r   s          r)   get_updateszOptimizerV2.get_updates  s|    tV,E#eV,-N$a=QWW7 	
    011	s   +A=
c                    t        |t        j                        r| j                  ||d       || j                  vr|| j                  |<   y| j                  |   }t        |      sNt        |t        j                  t        t        t        j                  f      st        |t        j                        r|| j                  |<   yt        j                  | j                  |   |       y)zBset hyper `name` to value. value can be callable, tensor, numeric.T)	overwriteN)r   	trackable	Trackable_track_trackablerX   r   r   r   intfloatr   LearningRateScheduler   	set_value)r1   rE   value
prev_values       r)   
_set_hyperzOptimizerV2._set_hyper  s    %,,-
E4484;;dkk$;;t$j
:

U/DDFG 5JJK!D$++d+U3r+   c                     | j                   s| j                          | j                  |   }t        |t        j
                        r|S t        |      r |       }|rt        j                  ||      S |S r/   )	r`   _create_hypersrX   r   r   r   r   r   cast)r1   rE   r   r   s       r)   
_get_hyperzOptimizerV2._get_hyper  sb    
KKE%/DDElge]]5%((lr+   c                      y r/   r0   )r1   r   s     r)   _create_slotszOptimizerV2._create_slots)  r5   r+   c                 ^    | j                   }| j                          | j                  |       y)a  Creates all weights, including iterations, hyperparameters and slot vars.

    This will add newly created variables to `optimizer.weights`.

    New variables are only created when this method is called the first time, or
    when called with different variables in the var_list.

    Args:
      var_list: list or tuple of `Variable` objects that will be minimized
        using this optimizer.
    N)
iterationsr   r   )r1   r   r   s      r)   r   zOptimizerV2._create_all_weights,  s'     	Ax r+   c                     	 t         t        |   |      S # t        $ r9}|dk(  r||dk(  rd}|| j                  v r| j                  |      cY d}~S |d}~ww xY w)z,Overridden to support hyperparameter access.rX   rJ   learning_rateN)superrH   __getattribute__AttributeErrorrX   r   )r1   rE   e	__class__s      r)   r  zOptimizerV2.__getattribute__=  sc    ;6t<< 						t$$g	s     	A,AAAAc                     t        t        t        |                }d|v rJ|| j                  j                         z  }d| j                  j                         v r|j                  d       t        |      S )NrX   r  rJ   )setr  rH   __dir__rX   keysaddr   )r1   resultr  s     r)   r	  zOptimizerV2.__dir__L  sa    {D134F6  ""f	DKK,,.	.

4<r+   c                     |dk(  rd}t        | d      r!|| j                  v r| j                  ||       yt        t        |   ||       y)z;Override setattr to support dynamic hyperparameter setting.rJ   r  rX   N)hasattrrX   r   r  rH   __setattr__)r1   rE   r   r  s      r)   r  zOptimizerV2.__setattr__T  sE     t|dtX44;;#6
oodE"K*47r+   c                     | j                   S )z+A list of names for this optimizer's slots.)rZ   r7   s    r)   get_slot_nameszOptimizerV2.get_slot_names^  s    r+   zerosc                    || j                   vr| j                   j                  |       t        |      }| j                  j	                  |i       }|j                  |d      }|t        |t              st        |      rct        j
                  |      }t        |t        j                        s||}n|j                  }t        j                  |||j                        }	n|}	| j!                         5  t#        j$                         }
|
j&                  j)                  |      st+        dj-                  |
|            |
j&                  j/                  |      5  t1        j2                  |j4                  d||j                  d|	      }ddd       ddd       t7        j8                  |       |||<   | j;                  |||       | j<                  j                  |       |S # 1 sw Y   \xY w# 1 sw Y   `xY w)a  Add a new slot variable for `var`.

    A slot variable is an additional variable associated with `var` to train.
    It is allocated and managed by optimizers, e.g. `Adam`.

    Args:
      var: a `Variable` object.
      slot_name: name of the slot variable.
      initializer: initializer of the slot variable
      shape: (Optional) shape of the slot variable. If not set, it will default
      to the shape of `var`.

    Returns:
      A slot variable.
    N)r$   r   a0  Trying to create optimizer slot variable under the scope for tf.distribute.Strategy ({}), which is different from the scope used for the original variable ({}). Make sure the slot variables are created under the same strategy scope. This may happen if you're restoring from a checkpoint outside the scope/F)rE   r   	trainableinitial_value)	slot_namevariableslot_variable)rZ   r   _var_keyrY   
setdefaultgetr   rQ   r   r   r   CheckpointInitialValueCallabler$   r   r   r   _distribution_strategy_scoper   rb   r   variable_created_in_scoperR   rS   r   tf_variablesVariable_shared_namer   track_variable_restore_slot_variabler[   )r1   r   r  initializerr$   var_key	slot_dictweight
slot_shaper  r   s              r)   add_slotzOptimizerV2.add_slotb  s     (((
i(smG&&w3I]]9d+F~	K	%+)>"&&{3446:?:K*yy*!))z< $,,. +!..0  ::3?O
 vh$& & 11#6 	+((!..	:II)	+&	++" V$#i	
!! "   mm6"M	+ 	++ +s%   6A&G63G*G6*G3	/G66G?c                 @    t        |      }| j                  |   }||   S r/   )r  rY   )r1   r   r  r&  r'  s        r)   get_slotzOptimizerV2.get_slot  s$    smGG$IYr+   c                    t               }|D ]f  }t        |t        j                        r|j                  }n|j
                  g}|j                  j                  }|D ]  }|j                  ||f        h i }|D ]=  \  }}i |||f<   t        j
                  |      5  | j                  |||       d d d        ? |S # 1 sw Y   KxY wr/   )r  r   	ds_valuesDistributedValues_devicesdevicer   
base_dtyper  r   _prepare_local)r1   r   r
  r   var_devices	var_dtype
var_devicer   s           r)   r   zOptimizerV2._prepare  s    5D *	C44	5llzzl))&&i# ***i()** K!% @
I-/k:y)*::j! @J	;?@ @@
 @ @s   B<<C	c                 ~    d| j                   v r/t        j                  | j                  |            }||||f   d<   y y )Nr  lr_t)rX   r   identity_decayed_lr)r1   r6  r5  r   r8  s        r)   r3  zOptimizerV2._prepare_local  sB    $++% 0 0 ;<d59k:y)*62 &r+   c                 B    ||fi i}| j                  |||       |||f   S )zACompatibility for subclasses that don't pass apply_state through.)r3  )r1   r6  r5  r   s       r)   _fallback_apply_statez!OptimizerV2._fallback_apply_state  s3    	*B/K
I{;
I.//r+   c           	         | j                   ry | j                         5  t        | j                  j	                               D ]v  \  }}t        |t        j                  t        j                  f      st        |      r<| j                  |g d|t        j                  j                        | j                  |<   x 	 d d d        d| _         y # 1 sw Y   d| _         y xY w)NF)r$   r  r%  aggregationT)r`   r  sortedrX   itemsr   r   r   r   r!  r   
add_weightVariableAggregationONLY_FIRST_REPLICA)r1   rE   r   s      r)   r   zOptimizerV2._create_hypers  s    		*	*	, O 1 1 34 O+$FMM<#8#89;>Fuo "oo&::MM . O$++d
OO   D!O   Ds   BCCc                 H   | j                   | j                         5  | j                  dg t        j                  dt
        j                  j                        | _         ddd       | j                  j                  | j                          | j                   S # 1 sw Y   :xY w)z>Variable. The number of training steps this Optimizer has run.NiterF)r$   r   r  r>  )
r\   r  rA  r
   int64r   rB  rC  r[   r   r7   s    r)   r   zOptimizerV2.iterations  s     ,,. M??,,$88KK + MM mm4++,M Ms   ABB!c                     | j                   t        d      || _         | j                  j                  | j                          y )NzWCannot set `iterations` to a new Variable after the Optimizer weights have been created)r\   r   r[   r   )r1   r  s     r)   r   zOptimizerV2.iterations  sC    # C D DDMM))*r+   c                    | j                  d|      }t        |t        j                        r<t	        j
                  | j                  |      }t	        j
                   ||      |      }| j                  dkD  rKt	        j
                  | j                  |      }t	        j
                  | j                  |      }|d||z  z   z  }|S )z;Get decayed learning rate as a Tensor with dtype=var_dtype.r  rO   g      ?)r   r   r   r   r   r   r   r_   )r1   r5  r8  
local_stepdecay_ts        r)   r:  zOptimizerV2._decayed_lr  s    ???I6D$.CCD==)<j]]4
+Y7dR==)<jd119=gR'J../dKr+   c                     d| j                   i}| j                  | j                  |d<   | j                  | j                  |d<   | j                  | j                  |d<   |S )a-  Returns the config of the optimizer.

    An optimizer config is a Python dictionary (serializable)
    containing the configuration of an optimizer.
    The same optimizer can be reinstantiated later
    (without any saved state) from this configuration.

    Returns:
        Python dictionary.
    rE   rL   rM   rN   )r   rL   rM   rN   )r1   configs     r)   
get_configzOptimizerV2.get_config  sd     djj!F}} ==fZ~~! NNf['"&"6"6fMr+   c                     d|v r|j                  d      |d<   d|v r0t        |d   t              rt        j                  |d   |      |d<    | di |S )a  Creates an optimizer from its config.

    This method is the reverse of `get_config`,
    capable of instantiating the same optimizer from the config
    dictionary.

    Args:
        config: A Python dictionary, typically the output of get_config.
        custom_objects: A Python dictionary mapping names to additional Python
          objects used to create this optimizer, such as a function used for a
          hyperparameter.

    Returns:
        An optimizer instance.
    rJ   r  )custom_objectsr0   )r^   r   dictr   deserialize)clsrL  rO  s      r)   from_configzOptimizerV2.from_config  sa    " v~ &

4 0f_& 	F?+T	2"8"D"D?#N#D==r+   c                     | j                   |   }t        |t        j                        rt        j                  |      S t        |      r |       S t        j                  |      rt        j                  |      S |S )zDSerialize a hyperparameter that can be a float, callable, or Tensor.)
rX   r   r   r   	serializer   r   
is_tf_typer   	get_value)r1   hyperparameter_namer   s      r)   _serialize_hyperparameterz%OptimizerV2._serialize_hyperparameter&  se    KK+,E%/DDE#--e44Wne$u%%Lr+   c                     | j                   S z?Returns variables of this Optimizer based on the order created.r[   r7   s    r)   r   zOptimizerV2.variables1  s    ==r+   c                     | j                   S r[  r\  r7   s    r)   weightszOptimizerV2.weights5  s     ==r+   c                 D    | j                   }t        j                  |      S )a  Returns the current weights of the optimizer.

    The weights of an optimizer are its state (ie, variables).
    This function returns the weight values associated with this
    optimizer as a list of Numpy arrays. The first value is always the
    iterations count of the optimizer, followed by the optimizer's state
    variables in the order they were created. The returned list can in turn
    be used to load state into similarly parameterized optimizers.

    For example, the RMSprop optimizer for this simple model returns a list of
    three values-- the iteration count, followed by the root-mean-square value
    of the kernel and bias of the single Dense layer:

    >>> opt = tf.keras.optimizers.RMSprop()
    >>> m = tf.keras.models.Sequential([tf.keras.layers.Dense(10)])
    >>> m.compile(opt, loss='mse')
    >>> data = np.arange(100).reshape(5, 20)
    >>> labels = np.zeros(5)
    >>> results = m.fit(data, labels)  # Training.
    >>> len(opt.get_weights())
    3

    Returns:
        Weights values as a list of numpy arrays.
    )r^  r   batch_get_value)r1   r   s     r)   get_weightszOptimizerV2.get_weights:  s    4 \\F""6**r+   c                 Z   | j                   }t        |      t        |      k7  r]t        d| j                  z   dz   t	        t        |            z   dz   t	        t        |            z   dz   t	        |      dd z   dz         |syg }t        j                  |      }t        |||      D ]l  \  }}}|j                  |j                  k7  r:t        dt	        |j                        z   d	z   t	        |j                        z         |j                  ||f       n t        j                  |       y)
a  Set the weights of the optimizer.

    The weights of an optimizer are its state (ie, variables).
    This function takes the weight values associated with this
    optimizer as a list of Numpy arrays. The first value is always the
    iterations count of the optimizer, followed by the optimizer's state
    variables in the order they are created. The passed values are used to set
    the new state of the optimizer.

    For example, the RMSprop optimizer for this simple model takes a list of
    three values-- the iteration count, followed by the root-mean-square value
    of the kernel and bias of the single Dense layer:

    >>> opt = tf.keras.optimizers.RMSprop()
    >>> m = tf.keras.models.Sequential([tf.keras.layers.Dense(10)])
    >>> m.compile(opt, loss='mse')
    >>> data = np.arange(100).reshape(5, 20)
    >>> labels = np.zeros(5)
    >>> results = m.fit(data, labels)  # Training.
    >>> new_weights = [np.array(10), np.ones([20, 10]), np.zeros([10])]
    >>> opt.set_weights(new_weights)
    >>> opt.iterations
    <tf.Variable 'RMSprop/iter:0' shape=() dtype=int64, numpy=10>

    Args:
        weights: weight values as a list of numpy arrays.
    z/You called `set_weights(weights)` on optimizer z with a  weight list of length z", but the optimizer was expecting z weights. Provided weights: N2   z...zOptimizer weight shape z+ not compatible with provided weight shape )r^  lenrR   r   rQ   r   r`  r   r$   r   batch_set_value)r1   r^  r   weight_value_tuplesparam_valuespvpws           r)   set_weightszOptimizerV2.set_weightsX  sC   8 \\F
6{c'l"
;djj
H
+,.1#g,.?@
./14S[1AB )) ,/w<+<= @EEF F
 **62Lfg6 )Aq	QWW	2S]B2247LA B 	B   !Q() /0r+   c                 h   |t         j                  }t        |t              st	        |      rt        j                  |      }|t        j                  j                  k(  r|rt        d      d}n|d}| j                  ||t        j                  d|||d||
      }t        j                  |       |S )NzSynchronization value can be set to VariableSynchronization.ON_READ only for non-trainable variables. You have specified trainable=True and synchronization=VariableSynchronization.ON_READ.FT)
rE   r$   getterr   r%  r   r  use_resourcesynchronizationr>  )r
   float32r   rQ   r   r   r  r   VariableSynchronizationON_READrR    _add_variable_with_custom_getterr   make_variabler   r#  )	r1   rE   r$   r   r%  r  ro  r>  r  s	            r)   rA  zOptimizerV2.add_weight  s     }nne+s#x'< $$[1k,>>FFF	?@ 	@ 			i44--' 5 
!H 8$Or+   c                     |sDt        j                  t        j                  | j                  j
                        |      | _        y || _        y )N)
zero_based)r   unique_object_namer   to_snake_caser  r>   r   )r1   rE   rv  s      r)   rW   zOptimizerV2._init_set_name  s;    --

%
%dnn&=&=
>!dj djr+   c                     | j                         }|D ]I  }|j                  j                  }||vst        d|d|j                  d|D cg c]  }| c}d       yc c}w )zAsserts tensors are all valid types (see `_valid_dtypes`).

    Args:
      tensors: Tensors to check.

    Raises:
      ValueError: If any tensor is not a valid type.
    zInvalid type z for z, expected: .N)_valid_dtypesr   r2  rR   rE   )r1   tensorsvalid_dtypestr   r   s         r)   r   z OptimizerV2._assert_valid_dtypes  sk     %%'L Egg  e	l	"\)B!)BD E 	EE *Cs   	A c                     t         S )zValid types for loss, variables and gradients.

    Subclasses should override to allow other float types.

    Returns:
      Valid types for loss, variables and gradients.
    )_DEFAULT_VALID_DTYPESr7   s    r)   r{  zOptimizerV2._valid_dtypes  s
     ! r+   c                 *    t        |      r |       S |S )z'Call the function if param is callable.)r   )r1   r   s     r)   _call_if_callablezOptimizerV2._call_if_callable  s    uo57050r+   c                     t        d      )an  Add ops to apply dense gradients to the variable `handle`.

    Args:
      grad: a `Tensor` representing the gradient.
      handle: a `Tensor` of dtype `resource` which points to the variable to be
        updated.
      apply_state: A dict which is used across multiple apply calls.

    Returns:
      An `Operation` which updates the value of the variable.
    "Must be implemented in subclasses.r   )r1   r   handler   s       r)   r   z!OptimizerV2._resource_apply_dense  s     B
CCr+   c                 L    t        ||      \  }} | j                  |||fi |S )a  Add ops to apply sparse gradients to `handle`, with repeated indices.

    Optimizers which override this method must deal with repeated indices. See
    the docstring of `_apply_sparse_duplicate_indices` for details. By default
    the correct behavior, to sum non-unique indices and their associated
    gradients, is enforced by first pre-processing `grad` and `indices` and
    passing them on to `_resource_apply_sparse`. Optimizers which deal correctly
    with duplicate indices may instead override this method to avoid the
    overhead of summing.

    Args:
      grad: a `Tensor` representing the gradient for the affected indices.
      handle: a `Tensor` of dtype `resource` which points to the variable to be
        updated.
      indices: a `Tensor` of integral type representing the indices for which
        the gradient is nonzero. Indices may be repeated.
      **kwargs: May optionally contain `apply_state`

    Returns:
      An `Operation` which updates the value of the variable.
    )r   r%   )r*   _resource_apply_sparse)r1   r   r  r%   r3   summed_gradr&   s          r)   r   z4OptimizerV2._resource_apply_sparse_duplicate_indices  s;    . #>W#&K&4&&{FN 1)/1 1r+   c                     t        d      )a  Add ops to apply sparse gradients to the variable `handle`.

    Similar to `_apply_sparse`, the `indices` argument to this method has been
    de-duplicated. Optimizers which deal correctly with non-unique indices may
    instead override `_resource_apply_sparse_duplicate_indices` to avoid this
    overhead.

    Args:
      grad: a `Tensor` representing the gradient for the affected indices.
      handle: a `Tensor` of dtype `resource` which points to the variable to be
        updated.
      indices: a `Tensor` of integral type representing the indices for which
        the gradient is nonzero. Indices are unique.
      apply_state: A dict which is used across multiple apply calls.

    Returns:
      An `Operation` which updates the value of the variable.
    r  r  )r1   r   r  r%   r   s        r)   r  z"OptimizerV2._resource_apply_sparse  s    & B
CCr+   c                     t        j                  t        j                  |j                  ||      g      5  |j                         cd d d        S # 1 sw Y   y xY wN)r   r%   updates)r   r   r   ResourceScatterAddr  r   r1   xr   r   s       r)   _resource_scatter_addz!OptimizerV2._resource_scatter_add  sN    		!	!!44XXq!	5# 
  WWY	     AAc                     t        j                  t        j                  |j                  ||      g      5  |j                         cd d d        S # 1 sw Y   y xY wr  )r   r   r   ResourceScatterUpdater  r   r  s       r)   _resource_scatter_updatez$OptimizerV2._resource_scatter_update  sN    		!	!	"	8	8XXq!
5 	6
7  WWY  r  c                 T    t        j                  | j                        j                  S r/   )r   getfullargspecr   r2   r7   s    r)   r   zOptimizerV2._dense_apply_args#  s!     $$T%?%?@EEEr+   c                 T    t        j                  | j                        j                  S r/   )r   r  r  r2   r7   s    r)   r   zOptimizerV2._sparse_apply_args(  s!     $$T%@%@AFFFr+   c                     t        |      }| j                  j                  |i       j                  |g       }|j	                  d d       |D ]  }|j                  |        y)z.Restore a newly created slot variable's value.c                     | j                   S r/   )restore_uid)positions    r)   <lambda>z4OptimizerV2._restore_slot_variable.<locals>.<lambda>8  s    H4H4H r+   T)keyreverseN)r  r]   r  r^   sortrestore)r1   r  r  r  variable_keydeferred_restorationscheckpoint_positions          r)   r$  z"OptimizerV2._restore_slot_variable1  so    H%L <<@@2s<,  #H'+  -4 1!!-01r+   c                 &   t        |      }| j                  j                  |i       }|j                  |d      }|t        j                         rs|j                         rct        j                         j                  r| j                  r9t        j                  |      }| j                  ||||j                               }||j                  |       y| j                  j!                  |i       j!                  |g       j#                  |       y)a  Restore a slot variable's value, possibly creating it.

    Called when a variable which has an associated slot variable is created or
    restored. When executing eagerly, we create the slot variable with a
    restoring initializer.

    No new variables are created when graph building. Instead,
    _restore_slot_variable catches these after normal creation and adds restore
    ops to the graph. This method is nonetheless important when graph building
    for the case when a slot variable has already been created but `variable`
    has just been added to a dependency graph (causing us to realize that the
    slot variable needs to be restored).

    Args:
      slot_variable_position: A `trackable._CheckpointPosition` object
        indicating the slot variable `Trackable` object to be restored.
      slot_name: The name of this `Optimizer`'s slot to restore into.
      variable: The variable object this slot is being created for.
    N)r  )r   r%  r  r$   )r  rY   r  r	   rB   is_simple_variabler   get_default_graph_variable_creator_stackrc   r   r  r*  value_shaper  r]   r  r   )r1   slot_variable_positionr  r  r  r'  r  r%  s           r)    _create_or_restore_slot_variablez,OptimizerV2._create_or_restore_slot_variable=  s    * H%Lb1IMM)T2M'";";"=113 &&(@@((<<46kmm!&224	 $ 6m   $$]3 &&11
R#L"5ff$7&r+   c              #      K   | j                   rTt        j                         s@| j                   j                         5  | j                   j                          ddd       yd y# 1 sw Y   yxY ww)zFReturns the `tf.distribute.Strategy` this optimizer was created under.N)rc   r   ra   scoper7   s    r)   r  z(OptimizerV2._distribution_strategy_scope  sa      "">+F+F+H&&,,. 2))//112 2 2 2s   ;A4A(A4(A1-A4)NNr/   )NNN)NT)r  N)T)Ir>   r?   r@   __doc___HAS_AGGREGATE_GRADr4   propertyrL   rN   setterrM   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r  r	  r  r  r*  r,  r   r3  r<  r   r   r:  abcabstractmethodrM  classmethodrS  rY  r   r^  ra  rk  r   rq  AUTOrB  NONErA  rW   r   r{  r  r   r   r  r  r  r   cached_per_instancer   r   r$  r  
contextlibcontextmanagerr  __classcell__r  s   @r)   rH   rH   o   s   zF  $(%)h3T   ! ! ??       &
4!;F7v  7;Vp:,x:24"	!"8:x 
&:
0 *   + +
  (  0	  +<-1d $!-!E!E!J!J)==BB(TE !1D18D* ""F # F ""G # G
1B&H  r+   rH   c                     t        | d      r| j                         } | j                  r| j                  S | j                  S )aP  Key for representing a primary variable, for looking up slots.

  In graph mode the name is derived from the var shared name.
  In eager mode the name is derived from the var unique id.
  If distribution strategy exists, get the primary variable first.

  Args:
    var: the variable.

  Returns:
    the unique name of the variable.
  _distributed_container)r  r  _in_graph_moder"  
_unique_id)r   s    r)   r  r    s<      S*+

$
$
&C	r+   c                 (    t        |       }|dz   |z   S )z6Get the slot key for the variable: var_name/slot_name.r  )r  )r   r  rE   s      r)   _get_slot_key_from_varr    s     
#$	i	r+   c                   (     e Zd ZdZ fdZd Z xZS )RestoredOptimizeraq  A non-functional Optimizer implementation for checkpoint compatibility.

  Holds slot variables and hyperparameters when an optimizer is restored from a
  SavedModel. These variables may be referenced in functions along with ops
  created by the original optimizer, but currently we do not support using the
  optimizer object iself (e.g. through `apply_gradients`).
  c                 :    t         t        |   d       d| _        y )Nr  T)r  r  r4   r`   )r1   r  s    r)   r4   zRestoredOptimizer.__init__  s    	
T+,?@Dr+   c                     t        d      )NzRestoring functional Optimizers from SavedModels is not currently supported. Please file a feature request if this limitation bothers you.r  r7   s    r)   rM  zRestoredOptimizer.get_config  s    
	 r+   )r>   r?   r@   r  r4   rM  r  r  s   @r)   r  r    s     r+   r  tf_deprecated_optimizerc                 "    t        | t              S r/   )r   rH   )objs    r)   r  r    s    
3, r+   c                     t               S r/   )r  )protos    r)   r  r    s
    %6%8 r+   r   )object_factoryversionmin_producer_versionmin_consumer_versionr  )versions)Fr  r  r  r   rT   tensorflow.python.distributer   r   r   r   r   r.  tensorflow.python.eagerr   r	   tensorflow.python.frameworkr
   r   r   r   r   tensorflow.python.kerasr   r   tensorflow.python.keras.enginer   $tensorflow.python.keras.optimizer_v2r   r   rd   tensorflow.python.keras.utilsr   r   r   r   tensorflow.python.opsr   r   r   r   r   r   r   tensorflow.python.saved_modelr   tensorflow.python.trackabler   r   tensorflow.python.utilr    	frozensetfloat16bfloat16rp  float64	complex64
complex128r  r*   objectr-   rF   r   rH   r  r  r  register_revived_typeVersionedTypeRegistrationr   r0   r+   r)   <module>r     s1   $     A 7 B E < , + . 6 + . 3 + 0 ; G I 7 5 4 2 + 2 ; + * ; 7 9 ' "
NNFOOV^^V^^
f''#  )(	 	 $Y)%% Yx(.  , $ # #,5m558 ++ 	r+   