
    BVhb                         d Z ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ ddl	m
Z
 dd	lmZ  ed
g       G d de
j                               Zy)zAdagrad for TensorFlow.    )ops)	array_ops)gen_array_ops)gen_training_ops)init_ops)math_ops)	optimizer)	tf_exportztrain.AdagradOptimizer)v1c                   R     e Zd ZdZ	 	 d
 fd	Zd Zd Zd Zd Zd Z	d Z
d	 Z xZS )AdagradOptimizera  Optimizer that implements the Adagrad algorithm.

  References:
    Adaptive Subgradient Methods for Online Learning and Stochastic Optimization
      :[Duchi et al., 2011](http://jmlr.org/papers/v12/duchi11a.html)
      ([pdf](http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf))

  @compatibility(TF2)
  tf.compat.v1.train.AdagradOptimizer is compatible with eager mode and
  `tf.function`.
  When eager execution is enabled, `learning_rate`,
  `initial_accumulator_value`, and `epsilon` can each be a callable that
  takes no arguments and returns the actual value to use. This can be useful
  for changing these values across different invocations of optimizer
  functions.

  To switch to native TF2 style, use [`tf.keras.optimizers.Adagrad`]
  (https://www.tensorflow.org/api_docs/python/tf/keras/optimizers/Adagrad)
  instead. Please notice that due to the implementation differences,
  `tf.keras.optimizers.Adagrad` and
  `tf.compat.v1.train.AdagradOptimizer` may have slight differences in
  floating point numerics even though the formula used for the variable
  updates still matches.

  #### Structural mapping to native TF2

  Before:

  ```python
  optimizer = tf.compat.v1.train.AdagradOptimizer(
    learning_rate=learning_rate,
    initial_accumulator_value=initial_accumulator_value)
  ```

  After:

  ```python
  optimizer = tf.keras.optimizers.Adagrad(
    learning_rate=learning_rate,
    initial_accumulator_value=initial_accumulator_value,
    epsilon=1e-07)
  ```

  #### How to map arguments
  | TF1 Arg Name       | TF2 Arg Name   | Note                             |
  | ------------------ | -------------  | -------------------------------  |
  | `learning_rate`    | `learning_rate` | Be careful of setting           |
  : : : learning_rate tensor value computed from the global step.          :
  : : : In TF1 this was usually meant to imply a dynamic learning rate and :
  : : : would recompute in each step. In TF2 (eager + function) it will    :
  : : : treat it as a scalar value that only gets computed once instead of :
  : : : a symbolic placeholder to be computed each time.                   :
  | `initial_accumulator_value` | `initial_accumulator_value` | The        |
  : : : argument can be value of zero in TF2, which is not accepted in TF1.|
  | - | `epsilon`      | `epsilon` is become configurable in TF2. The      |
  : : : defualt value is changed from 1e-8 to 1e-7                         :
  | `use_locking`      | -             | Not applicable in TF2.            |

  #### Before & after usage example
  Before:

  ```python
  x = tf.Variable([1,2,3], dtype=tf.float32)
  grad = tf.constant([0.1, 0.2, 0.3])
  optimizer = tf.compat.v1.train.AdagradOptimizer(learning_rate=0.001)
  optimizer.apply_gradients(zip([grad], [x]))
  ```

  After:

  ```python
  x = tf.Variable([1,2,3], dtype=tf.float32)
  grad = tf.constant([0.1, 0.2, 0.3])
  optimizer = tf.keras.optimizers.Adagrad(learning_rate=0.001)
  optimizer.apply_gradients(zip([grad], [x]))
  ```

  @end_compatibility
  c                 ~    |dk  rt        d|z        t        t        |   ||       || _        || _        d| _        y)a  Construct a new Adagrad optimizer.

    Args:
      learning_rate: A `Tensor` or a floating point value.  The learning rate.
      initial_accumulator_value: A floating point value.
        Starting value for the accumulators, must be positive.
      use_locking: If `True` use locks for update operations.
      name: Optional name prefix for the operations created when applying
        gradients.  Defaults to "Adagrad".

    Raises:
      ValueError: If the `initial_accumulator_value` is invalid.

    g        z.initial_accumulator_value must be positive: %sN)
ValueErrorsuperr   __init___learning_rate_initial_accumulator_value_learning_rate_tensor)selflearning_rateinitial_accumulator_valueuse_lockingname	__class__s        R/home/dcms/DCMS/lib/python3.12/site-packages/tensorflow/python/training/adagrad.pyr   zAdagradOptimizer.__init__m   sO      !C'G01 2 2	
D*;='D&?D#!%D    c           	      >   |D ]  }|j                   j                  }|j                         j                         r"t	        j
                  | j                  |      }n| j                  ||      }| j                  |||j                         |d| j                          y )N)dtypeaccumulator)
r   
base_dtype	get_shapeis_fully_definedr   constant_initializerr   _init_constant_op"_get_or_make_slot_with_initializer_name)r   var_listvr   inits        r   _create_slotszAdagradOptimizer._create_slots   s     Igg  e	
	'	'	),,T-L-L38: %%a/
--aq{{}e.;TZZIIr   c                       fd}|S )Nc                      t        j                  t        j                        j                        } t        j                  |       S )N)r   fillr   shaper   r   cast)init_constantr   r   r(   s    r   r)   z0AdagradOptimizer._init_constant_op.<locals>.init   s;     $(();)-)H)HJm]]=%00r    )r   r(   r   r)   s   ``` r   r$   z"AdagradOptimizer._init_constant_op   s    1 Kr   c                 r    | j                  | j                        }t        j                  |d      | _        y )Nr   )r   )_call_if_callabler   r   convert_to_tensorr   )r   r   s     r   _preparezAdagradOptimizer._prepare   s0    **4+>+>?M!$!6!6O"-Dr   c                     | j                  |d      }t        j                  ||t        j                  | j
                  |j                  j                        || j                        S Nr   )r   )	get_slotr   apply_adagradr   r/   r   r   r    _use_lockingr   gradvaraccs       r   _apply_densezAdagradOptimizer._apply_dense   sU    
--]
+C))d00#))2F2FG%%' 'r   c                     | j                  |d      }t        j                  |j                  |j                  t	        j
                  | j                  |j                  j                        || j                        S r7   )
r8   r   resource_apply_adagradhandler   r/   r   r   r    r:   r;   s       r   _resource_apply_densez&AdagradOptimizer._resource_apply_dense   s]    
--]
+C22



d00$**2G2GH%%' 'r   c                     | j                  |d      }t        j                  ||t        j                  | j
                  |j                  j                        |j                  |j                  | j                        S r7   )r8   r   sparse_apply_adagradr   r/   r   r   r    valuesindicesr:   r;   s       r   _apply_sparsezAdagradOptimizer._apply_sparse   s`    
--]
+C00d00#))2F2FG%%' 'r   c                     | j                  |d      }t        j                  |j                  |j                  t	        j
                  | j                  |j                        ||| j                        S r7   )	r8   r   resource_sparse_apply_adagradrB   r   r/   r   r   r:   )r   r<   r=   rG   r>   s        r   _resource_apply_sparsez'AdagradOptimizer._resource_apply_sparse   sZ    
--]
+C99



d00$**=%%' 'r   )g?FAdagrad)__name__
__module____qualname____doc__r   r*   r$   r5   r?   rC   rH   rK   __classcell__)r   s   @r   r   r      s<    N` ?B'0&2	I-
''''r   r   N)rP   tensorflow.python.frameworkr   tensorflow.python.opsr   r   r   r   r   tensorflow.python.trainingr	    tensorflow.python.util.tf_exportr
   	Optimizerr   r1   r   r   <module>rW      sO      + + / 2 * * 0 6 '()g'y** g' *g'r   