
    AVhQ                        d Z ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlm	Z	 ddlm
Z
 dd	lmZ dd
lmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlm Z  ddlm!Z! dd lm"Z" dd!l#m$Z$ dd"l%m&Z& dd#l'm(Z( dd$l)m*Z* dd%l+m,Z,  e,d&g'      d(d&d)d)d(d(e(jZ                  fd*       Z. e,d&g '      d(d&d)d(d(e(jZ                  fd+       Z/d, Z0 e,d-g'      	 	 	 	 d0d.       Z1 e,d-g '      	 	 	 d1d/       Z2y()2z=Implements the graph generation for computation of gradients.    )xla_ops_grad)dtypes)ops)
array_grad)	array_ops)	check_ops)control_flow_grad)cudnn_rnn_grad)gradients_util)
image_grad)io_ops)linalg_grad)
linalg_ops)logging_ops)lookup_grad)
manip_grad)	math_grad)math_ops)nccl_ops)nn_grad)optional_grad)parsing_grad)	proto_ops)random_grad)rnn_grad)sdca_ops)sets)sparse_grad)tensor_array_grad)tensor_array_ops)
while_loop)sparse_csr_matrix_grad)fft_ops)UnconnectedGradients)checkpoint_ops)	tf_export	gradients)v1NFc	                     t        j                         j                         5  t        j                  | ||||||||	      cddd       S # 1 sw Y   yxY w)a  Constructs symbolic derivatives of sum of `ys` w.r.t. x in `xs`.

  `ys` and `xs` are each a `Tensor` or a list of tensors.  `grad_ys`
  is a list of `Tensor`, holding the gradients received by the
  `ys`. The list must be the same length as `ys`.

  `gradients()` adds ops to the graph to output the derivatives of `ys` with
  respect to `xs`.  It returns a list of `Tensor` of length `len(xs)` where
  each tensor is the `sum(dy/dx)` for y in `ys` and for x in `xs`.

  `grad_ys` is a list of tensors of the same length as `ys` that holds
  the initial gradients for each y in `ys`.  When `grad_ys` is None,
  we fill in a tensor of '1's of the shape of y for each y in `ys`.  A
  user can provide their own initial `grad_ys` to compute the
  derivatives using a different initial gradient for each y (e.g., if
  one wanted to weight the gradient differently for each value in
  each y).

  `stop_gradients` is a `Tensor` or a list of tensors to be considered constant
  with respect to all `xs`. These tensors will not be backpropagated through,
  as though they had been explicitly disconnected using `stop_gradient`.  Among
  other things, this allows computation of partial derivatives as opposed to
  total derivatives. For example:

  ```python
  a = tf.constant(0.)
  b = 2 * a
  g = tf.gradients(a + b, [a, b], stop_gradients=[a, b])
  ```

  Here the partial derivatives `g` evaluate to `[1.0, 1.0]`, compared to the
  total derivatives `tf.gradients(a + b, [a, b])`, which take into account the
  influence of `a` on `b` and evaluate to `[3.0, 1.0]`.  Note that the above is
  equivalent to:

  ```python
  a = tf.stop_gradient(tf.constant(0.))
  b = tf.stop_gradient(2 * a)
  g = tf.gradients(a + b, [a, b])
  ```

  `stop_gradients` provides a way of stopping gradient after the graph has
  already been constructed, as compared to `tf.stop_gradient` which is used
  during graph construction.  When the two approaches are combined,
  backpropagation stops at both `tf.stop_gradient` nodes and nodes in
  `stop_gradients`, whichever is encountered first.

  All integer tensors are considered constant with respect to all `xs`, as if
  they were included in `stop_gradients`.

  `unconnected_gradients` determines the value returned for each x in xs if it
  is unconnected in the graph to ys. By default this is None to safeguard
  against errors. Mathematically these gradients are zero which can be requested
  using the `'zero'` option. `tf.UnconnectedGradients` provides the
  following options and behaviors:

  ```python
  a = tf.ones([1, 2])
  b = tf.ones([3, 1])
  g1 = tf.gradients([b], [a], unconnected_gradients='none')
  sess.run(g1)  # [None]

  g2 = tf.gradients([b], [a], unconnected_gradients='zero')
  sess.run(g2)  # [array([[0., 0.]], dtype=float32)]
  ```

  Let us take one practical example which comes during the back propogation
  phase. This function is used to evaluate the derivatives of the cost function
  with respect to Weights `Ws` and Biases `bs`. Below sample implementation
  provides the exaplantion of what it is actually used for :

  ```python
  Ws = tf.constant(0.)
  bs = 2 * Ws
  cost = Ws + bs  # This is just an example. So, please ignore the formulas.
  g = tf.gradients(cost, [Ws, bs])
  dCost_dW, dCost_db = g
  ```


  Args:
    ys: A `Tensor` or list of tensors to be differentiated.
    xs: A `Tensor` or list of tensors to be used for differentiation.
    grad_ys: Optional. A `Tensor` or list of tensors the same size as
      `ys` and holding the gradients computed for each y in `ys`.
    name: Optional name to use for grouping all the gradient ops together.
      defaults to 'gradients'.
    colocate_gradients_with_ops: If True, try colocating gradients with
      the corresponding op.
    gate_gradients: If True, add a tuple around the gradients returned
      for an operations.  This avoids some race conditions.
    aggregation_method: Specifies the method used to combine gradient terms.
      Accepted values are constants defined in the class `AggregationMethod`.
    stop_gradients: Optional. A `Tensor` or list of tensors not to differentiate
      through.
    unconnected_gradients: Optional. Specifies the gradient value returned when
      the given input tensors are unconnected. Accepted values are constants
      defined in the class `tf.UnconnectedGradients` and the default value is
      `none`.

  Returns:
    A list of `Tensor` of length `len(xs)` where each tensor is the `sum(dy/dx)`
    for y in `ys` and for x in `xs`.

  Raises:
    LookupError: if one of the operations between `x` and `y` does not
      have a registered gradient function.
    ValueError: if the arguments are invalid.
    RuntimeError: if called in Eager mode.

  Nr   get_default_graph_mutation_lockr   _GradientsHelper)	ysxsgrad_ysnamecolocate_gradients_with_opsgate_gradientsaggregation_methodstop_gradientsunconnected_gradientss	            T/home/dcms/DCMS/lib/python3.12/site-packages/tensorflow/python/ops/gradients_impl.pyr'   r'   7   sT    z --/ **
B:*N     A

Ac                     t        j                         j                         5  t        j                  | |||d||||	      cddd       S # 1 sw Y   yxY w)aZ  Constructs symbolic derivatives of sum of `ys` w.r.t. x in `xs`.

  `tf.gradients` is only valid in a graph context. In particular,
  it is valid in the context of a `tf.function` wrapper, where code
  is executing as a graph.

  `ys` and `xs` are each a `Tensor` or a list of tensors.  `grad_ys`
  is a list of `Tensor`, holding the gradients received by the
  `ys`. The list must be the same length as `ys`.

  `gradients()` adds ops to the graph to output the derivatives of `ys` with
  respect to `xs`.  It returns a list of `Tensor` of length `len(xs)` where
  each tensor is the `sum(dy/dx)` for y in `ys` and for x in `xs`.

  `grad_ys` is a list of tensors of the same length as `ys` that holds
  the initial gradients for each y in `ys`.  When `grad_ys` is None,
  we fill in a tensor of '1's of the shape of y for each y in `ys`.  A
  user can provide their own initial `grad_ys` to compute the
  derivatives using a different initial gradient for each y (e.g., if
  one wanted to weight the gradient differently for each value in
  each y).

  `stop_gradients` is a `Tensor` or a list of tensors to be considered constant
  with respect to all `xs`. These tensors will not be backpropagated through,
  as though they had been explicitly disconnected using `stop_gradient`.  Among
  other things, this allows computation of partial derivatives as opposed to
  total derivatives. For example:

  >>> @tf.function
  ... def example():
  ...   a = tf.constant(0.)
  ...   b = 2 * a
  ...   return tf.gradients(a + b, [a, b], stop_gradients=[a, b])
  >>> example()
  [<tf.Tensor: shape=(), dtype=float32, numpy=1.0>,
  <tf.Tensor: shape=(), dtype=float32, numpy=1.0>]

  Here the partial derivatives `g` evaluate to `[1.0, 1.0]`, compared to the
  total derivatives `tf.gradients(a + b, [a, b])`, which take into account the
  influence of `a` on `b` and evaluate to `[3.0, 1.0]`.  Note that the above is
  equivalent to:

  >>> @tf.function
  ... def example():
  ...   a = tf.stop_gradient(tf.constant(0.))
  ...   b = tf.stop_gradient(2 * a)
  ...   return tf.gradients(a + b, [a, b])
  >>> example()
  [<tf.Tensor: shape=(), dtype=float32, numpy=1.0>,
  <tf.Tensor: shape=(), dtype=float32, numpy=1.0>]

  `stop_gradients` provides a way of stopping gradient after the graph has
  already been constructed, as compared to `tf.stop_gradient` which is used
  during graph construction.  When the two approaches are combined,
  backpropagation stops at both `tf.stop_gradient` nodes and nodes in
  `stop_gradients`, whichever is encountered first.

  All integer tensors are considered constant with respect to all `xs`, as if
  they were included in `stop_gradients`.

  `unconnected_gradients` determines the value returned for each x in xs if it
  is unconnected in the graph to ys. By default this is None to safeguard
  against errors. Mathematically these gradients are zero which can be requested
  using the `'zero'` option. `tf.UnconnectedGradients` provides the
  following options and behaviors:

  >>> @tf.function
  ... def example(use_zero):
  ...   a = tf.ones([1, 2])
  ...   b = tf.ones([3, 1])
  ...   if use_zero:
  ...     return tf.gradients([b], [a], unconnected_gradients='zero')
  ...   else:
  ...     return tf.gradients([b], [a], unconnected_gradients='none')
  >>> example(False)
  [None]
  >>> example(True)
  [<tf.Tensor: shape=(1, 2), dtype=float32, numpy=array([[0., 0.]], ...)>]

  Let us take one practical example which comes during the back propogation
  phase. This function is used to evaluate the derivatives of the cost function
  with respect to Weights `Ws` and Biases `bs`. Below sample implementation
  provides the exaplantion of what it is actually used for :

  >>> @tf.function
  ... def example():
  ...   Ws = tf.constant(0.)
  ...   bs = 2 * Ws
  ...   cost = Ws + bs  # This is just an example. Please ignore the formulas.
  ...   g = tf.gradients(cost, [Ws, bs])
  ...   dCost_dW, dCost_db = g
  ...   return dCost_dW, dCost_db
  >>> example()
  (<tf.Tensor: shape=(), dtype=float32, numpy=3.0>,
  <tf.Tensor: shape=(), dtype=float32, numpy=1.0>)

  Args:
    ys: A `Tensor` or list of tensors to be differentiated.
    xs: A `Tensor` or list of tensors to be used for differentiation.
    grad_ys: Optional. A `Tensor` or list of tensors the same size as
      `ys` and holding the gradients computed for each y in `ys`.
    name: Optional name to use for grouping all the gradient ops together.
      defaults to 'gradients'.
    gate_gradients: If True, add a tuple around the gradients returned
      for an operations.  This avoids some race conditions.
    aggregation_method: Specifies the method used to combine gradient terms.
      Accepted values are constants defined in the class `AggregationMethod`.
    stop_gradients: Optional. A `Tensor` or list of tensors not to differentiate
      through.
    unconnected_gradients: Optional. Specifies the gradient value returned when
      the given input tensors are unconnected. Accepted values are constants
      defined in the class `tf.UnconnectedGradients` and the default value is
      `none`.

  Returns:
    A list of `Tensor` of length `len(xs)` where each tensor is the `sum(dy/dx)`
    for y in `ys` and for x in `xs`.

  Raises:
    LookupError: if one of the operations between `x` and `y` does not
      have a registered gradient function.
    ValueError: if the arguments are invalid.
    RuntimeError: if called in Eager mode.

  TNr*   )r.   r/   r0   r1   r3   r4   r5   r6   s           r7   gradients_v2r:      sR    T --/ **
Bt^N  r8   c           
      4   t        |      }t        |      |k7  rt        d      t        | |      }t        |      |k(  sJ t        ||      D cg c]0  \  }}|)t	        j
                  |t        j                  |            2 }}}t        ||      S c c}}w )a  Multiply the Hessian of `ys` wrt `xs` by `v`.

  This is an efficient construction that uses a backprop-like approach
  to compute the product between the Hessian and another vector. The
  Hessian is usually too large to be explicitly computed or even
  represented, but this method allows us to at least multiply by it
  for the same big-O cost as backprop.

  Implicit Hessian-vector products are the main practical, scalable way
  of using second derivatives with neural networks. They allow us to
  do things like construct Krylov subspaces and approximate conjugate
  gradient descent.

  Example: if `y` = 1/2 `x`^T A `x`, then `hessian_vector_product(y,
  x, v)` will return an expression that evaluates to the same values
  as (A + A.T) `v`.

  Args:
    ys: A scalar value, or a tensor or list of tensors to be summed to
        yield a scalar.
    xs: A list of tensors that we should construct the Hessian over.
    v: A list of tensors, with the same shapes as xs, that we want to
       multiply by the Hessian.

  Returns:
    A list of tensors (or if the list would be length 1, a single tensor)
    containing the product between the Hessian and `v`.

  Raises:
    ValueError: `xs` and `v` have different length.

  z#xs and v must have the same length.)len
ValueErrorr'   zipr   multiplyr   stop_gradient)r.   r/   vlengthgrads	grad_elemv_elemelemwise_productss           r7   _hessian_vector_productrG   O  s    F r7&Vv
:
;; B
%	Uv		  #5!}
)V		 	9#:#:6#BC  
$b	))s   5Bhessiansc                 j   t        j                  |      }|||d}g }t        | |fi |}t        ||      D ]  \  t	        j
                  dg      t	        j                        t	        j                  dt        j                        t        j                  j                        g}	t        j                  fdfd|	      \  }
}t	        j                        }t	        j
                  |j                         t	        j                   ||fd            }|j#                  |        |S )a:  Constructs the Hessian of sum of `ys` with respect to `x` in `xs`.

  `hessians()` adds ops to the graph to output the Hessian matrix of `ys`
  with respect to `xs`.  It returns a list of `Tensor` of length `len(xs)`
  where each tensor is the Hessian of `sum(ys)`.

  The Hessian is a matrix of second-order partial derivatives of a scalar
  tensor (see https://en.wikipedia.org/wiki/Hessian_matrix for more details).

  Args:
    ys: A `Tensor` or list of tensors to be differentiated.
    xs: A `Tensor` or list of tensors to be used for differentiation.
    name: Optional name to use for grouping all the gradient ops together.
      defaults to 'hessians'.
    colocate_gradients_with_ops: See `gradients()` documentation for details.
    gate_gradients: See `gradients()` documentation for details.
    aggregation_method: See `gradients()` documentation for details.

  Returns:
    A list of Hessian matrices of `sum(ys)` for each `x` in `xs`.

  Raises:
    LookupError: if one of the operations between `xs` and `ys` does not
      have a registered gradient function.
  )r2   r3   r4   r   c                     | k  S )N )j_ns     r7   <lambda>zhessians.<locals>.<lambda>  s    QU     c                 R    | dz   |j                  | t        |          d         fS )N   r   )writer'   )rM   resultgradientxs     r7   rP   zhessians.<locals>.<lambda>  s/    1q5!<<9Xa[!+DQ+GHJ rQ   )r   _AsListr'   r>   r   reshapesizeconstantr   int32r    TensorArraydtyper!   shapestackconcatappend)r.   r/   r1   r2   r3   r4   kwargsrH   
_gradients	loop_varsrN   hessian_shape_reshaped_hessianrV   rO   rW   s                 @@@r7   rH   rH     s    @ b!"%@&.& (R*6**R( 'kh  B40H 	qA1fll+$$QWWa0I &&	J	JAw __QF!))'--/*3*:*:FF;KQ*OQOO%&-'. 
/rQ   c                 $    t        | ||d||      S )a  Constructs the Hessian of sum of `ys` with respect to `x` in `xs`.

  `hessians()` adds ops to the graph to output the Hessian matrix of `ys`
  with respect to `xs`.  It returns a list of `Tensor` of length `len(xs)`
  where each tensor is the Hessian of `sum(ys)`.

  The Hessian is a matrix of second-order partial derivatives of a scalar
  tensor (see https://en.wikipedia.org/wiki/Hessian_matrix for more details).

  Args:
    ys: A `Tensor` or list of tensors to be differentiated.
    xs: A `Tensor` or list of tensors to be used for differentiation.
    gate_gradients: See `gradients()` documentation for details.
    aggregation_method: See `gradients()` documentation for details.
    name: Optional name to use for grouping all the gradient ops together.
      defaults to 'hessians'.

  Returns:
    A list of Hessian matrices of `sum(ys)` for each `x` in `xs`.

  Raises:
    LookupError: if one of the operations between `xs` and `ys` does not
      have a registered gradient function.
  T)r1   r2   r3   r4   )rH   )r.   r/   r3   r4   r1   s        r7   
HessiansV2rj     s#    < 
"&#+
- -rQ   )rH   FFN)FNrH   )3__doc__tensorflow.compiler.jit.opsr   tensorflow.python.frameworkr   r   tensorflow.python.opsr   r   r   r	   r
   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r    r!   #tensorflow.python.ops.linalg.sparser"   tensorflow.python.ops.signalr#   +tensorflow.python.ops.unconnected_gradientsr$   tensorflow.python.trainingr%    tensorflow.python.util.tf_exportr&   NONEr'   r:   rG   rH   rj   rL   rQ   r7   <module>ru      s@   D 4 . + , + + 3 0 0 , ( - , - - , + * * ) / . + - * * & - 3 2 , F 0 L 5 6 {m */"!%!$8$=$=@ @H ;2 ! %$( $';'@'@M Md2*j zl ).! $? ?D :" $"&	#- #-rQ   