
    BVh<1                     V   d Z ddlmZ ddlmZ ddlmZ ddlmZ	 ddlm
Z
 ddlmZ ddlmZ ej                  e	j                   fd	z  iZ ed
g       dd       Zd Z ej(                  d       eddg      dd              Z ej(                  d       eddg      d               Zy)zAContains functions to use mixed precision with the graph rewrite.    )config)
tf_logging)	optimizer)loss_scale_optimizer)mixed_precision_global_state)deprecation)	tf_export   z8__internal__.mixed_precision.register_loss_scale_wrapper)v1Nc                 "    ||xs |ft         | <   y)a  Registers a loss scale optimizer wrapper.

  `tf.compat.v1.mixed_precision.enable_mixed_precision_graph_rewrite`
  automatically wraps an optimizer with an optimizer wrapper that performs loss
  scaling. This function registers a
  `(base_cls, wrapper_fn, wrapper_cls)` triple
  that is used by `enable_mixed_precision_graph_rewrite`, where
  `wrapper_fn` is called to create a `wrapper_cls` instance that wraps an
  `optimizer_cls` instance.

  Args:
    optimizer_cls: A base optimizer class, e.g. `tf.keras.optimizers.Optimizer`.
    wrapper_fn: A function that takes in arguments "optimizer" and
      "loss_scale", and returns a loss scale optimizer of type "wrapper_cls"
      that wraps "optimizer".
    wrapper_cls: A loss scale optimizer class. Defaults to `wrapper_fn`, in
      which case `wrapper_fn` should be a loss scale optimizer class whose
      constructor takes in arguments "optimizer" and "loss_scale".
  N)!_REGISTERED_WRAPPER_OPTIMIZER_CLS)optimizer_cls
wrapper_fnwrapper_clss      g/home/dcms/DCMS/lib/python3.12/site-packages/tensorflow/python/training/experimental/mixed_precision.pyregister_loss_scale_wrapperr   #   s    , ++6-#M2    c                 $   t         j                         D ]6  \  }}t        | |      st        dj	                  |j
                               t         j                         D ]  \  }\  }}t        | |      s || |      c S  t        d| z        )z-Wraps an optimizer with a LossScaleOptimizer.z"opt" must not already be an instance of a {cls}. `enable_mixed_precision_graph_rewrite` will automatically wrap the optimizer with a {cls}.)clsza"opt" must be an instance of a tf.train.Optimizer or a tf.keras.optimizers.Optimizer, but got: %s)r   values
isinstance
ValueErrorformat__name__items)opt
loss_scale_wrapper_optimizerr   r   s         r   _wrap_optimizerr    <   s     @FFH @a	#()   #4#=#=>	@ @@ (--/)$m_j!#}%Z(()
 	 @BEF 	G Gr   z7train.experimental.enable_mixed_precision_graph_rewritez4mixed_precision.enable_mixed_precision_graph_rewritec                    t        j                         rt        d      t        j                         rt	        j
                  d       t        | |      } t        j                  ddi       t        j                  d       | S )a  Enable mixed precision via a graph rewrite.

  Mixed precision is the use of both float32 and float16 data types when
  training a model to improve performance. This is achieved via a graph rewrite
  operation and a loss-scale optimizer.

  Performing arithmetic operations in float16 takes advantage of specialized
  processing units, such as NVIDIA Tensor Cores, for much higher arithmetic
  throughput. However, due to the smaller representable range, performing the
  entire training with float16 can result in gradient underflow, that is, small
  gradient values becoming zeroes. Instead, performing only select arithmetic
  operations in float16 results in higher throughput and decreased training
  time when using compatible hardware accelerators while also reducing memory
  usage, typically without sacrificing model accuracy.

  Note: While the mixed precision rewrite changes the datatype of various
  layers throughout the model, the same accuracy reached in float32 is
  expected. If a `NaN` gradient occurs with dynamic loss scaling, the model
  update for that batch is skipped. In this case, the global step count is not
  incremented, and the `LossScaleOptimizer` attempts to decrease the loss
  scaling value to avoid `NaN` values in subsequent iterations. This approach
  has been shown to achieve the same accuracy as float32 and, in most cases,
  better training throughput.

  Example:

  ```python
  model = tf.keras.models.Sequential([
      tf.keras.layers.Dense(64, activation='relu'),
      tf.keras.layers.Dense(64, activation='softmax'),
  ])

  opt = tf.keras.optimizers.SGD()
  opt = tf.train.experimental.enable_mixed_precision_graph_rewrite(opt)
  model.compile(loss="mse", optimizer=opt)

  x_train = np.random.random((1024, 64))
  y_train = np.random.random((1024, 64))
  model.fit(x_train, y_train)
  ```

  Calling `enable_mixed_precision_graph_rewrite(opt)` enables the graph rewrite
  operation before computing gradients. The function additionally returns an
  `Optimizer` (`opt`) wrapped with a `LossScaleOptimizer`. This prevents
  underflow in the float16 tensors during the backward pass. An optimizer of
  type `tf.train.Optimizer` or `tf.keras.optimizers.Optimizer` must be passed
  to this function, which will then be wrapped to use loss scaling.

  The graph rewrite operation changes the `dtype` of certain operations in the
  graph from float32 to float16. There are several categories of operations
  that are either included or excluded by this rewrite operation. The following
  categories of Ops are defined inside corresponding functions under the class
  `AutoMixedPrecisionLists` in
  <a href="https://github.com/tensorflow/tensorflow/blob/master/tensorflow/
  core/grappler/optimizers/auto_mixed_precision_lists.h">
  auto_mixed_precision_lists.h</a>:

  * `ClearList`: Ops that do not have numerically significant adverse effects.
  E.g. `ArgMax` and `Floor`.
  * `AllowList`: Ops that are considered numerically safe for execution in
  float16, and thus are always converted. E.g. `Conv2D`.
  * `DenyList`: Ops that are numerically unsafe to execute in float16 and
  can negatively affect downstream nodes. E.g. `Softmax`.
  * `GrayList`: Ops that are considered numerically safe for execution in
  float16 unless downstream from a DenyList Op. E.g. `Add` and `AvgPool`.

  When this function is used, gradients should only be computed and applied
  with the returned optimizer, either by calling `opt.minimize()` or
  `opt.compute_gradients()` followed by `opt.apply_gradients()`.
  Gradients should not be computed with `tf.gradients` or `tf.GradientTape`.
  This is because the returned optimizer will apply loss scaling, and
  `tf.gradients` or `tf.GradientTape` will not. If you do directly use
  `tf.gradients` or `tf.GradientTape`, your model may not converge due to
  float16 underflow problems.

  When eager execution is enabled, the mixed precision graph rewrite is only
  enabled within `tf.function`s, as outside `tf.function`s, there is no graph.

  For NVIDIA GPUs with Tensor cores, as a general performance guide, dimensions
  (such as batch size, input size, output size, and channel counts)
  should be powers of two if under 256, or  otherwise divisible by 8 if above
  256. For more information, check out the
  [NVIDIA Deep Learning Performance Guide](
  https://docs.nvidia.com/deeplearning/sdk/dl-performance-guide/index.html).

  Currently, mixed precision is only enabled on NVIDIA Tensor Core GPUs with
  Compute Capability 7.0 and above (Volta, Turing, or newer architectures). The
  parts of the graph on CPUs and TPUs are untouched by the graph rewrite.

  Raises:
    `ValueError`, if the `tf.keras.mixed_precision` API is also used by calling
    `tf.keras.mixed_precision.set_global_policy`. Only one mixed precision
    API can be used.

  Args:
    opt: An instance of a `tf.keras.optimizers.Optimizer` or a
      `tf.train.Optimizer`.
    loss_scale: Either an int/float, the string `"dynamic"`, or an instance of
      a `tf.mixed_precision.experimental.LossScale`. The loss scale to use. It
      is recommended to keep this as its default value of `"dynamic"`, which
      will adjust the scaling automatically to prevent `Inf` or `NaN` values.

  Returns:
    A version of `opt` that will use loss scaling to prevent underflow.
  aI  The mixed precision graph rewrite cannot be enabled, because the global Keras dtype Policy has been set to a mixed precision policy. At most, one of the following can be called:

  1. tf.keras.mixed_precision.set_global_policy() with a mixed precision policy (You called this first)

  2. tf.train.experimental.enable_mixed_precision_graph_rewrite() (You called this second)
You called both functions, which is an error, because both functions enable you to use mixed precision. If in doubt which function to use, use the first, as it supports Eager execution and is more customizable.zYou already have existing Sessions that do not use mixed precision. enable_mixed_precision_graph_rewrite() will not affect these Sessions.auto_mixed_precisionT)
r   is_using_mixed_precision_policyr   #non_mixed_precision_session_createdr   warnr    r   "set_optimizer_experimental_options)set_mixed_precision_graph_rewrite_enabled)r   r   s     r   'enable_mixed_precision_graph_rewrite_v1r(   P   sy    \ "AAC

	  "EEG OO 1 2 	Z(#++-CT,JKHHN	*r   z8train.experimental.disable_mixed_precision_graph_rewritez5mixed_precision.disable_mixed_precision_graph_rewritec                      t        j                         st        j                  d       t	        j
                  ddi       t        j                  d       y)a]  Disables the mixed precision graph rewrite.

  After this is called, the mixed precision graph rewrite will no longer run for
  new Sessions, and so float32 operations will no longer be converted to float16
  in such Sessions. However, any existing Sessions will continue to have the
  graph rewrite enabled if they were created after
  `enable_mixed_precision_graph_rewrite` was called but before
  `disable_mixed_precision_graph_rewrite` was called.

  This does not undo the effects of loss scaling. Any optimizers wrapped with a
  LossScaleOptimizer will continue to do loss scaling, although this loss
  scaling will no longer be useful if the optimizer is used in new Sessions, as
  the graph rewrite no longer converts the graph to use float16.

  This function is useful for unit testing. A unit tests can test using the
  mixed precision graph rewrite, then disable it so future unit tests continue
  using float32. If this is done, unit tests should not share a single session,
  as `enable_mixed_precision_graph_rewrite` and
  `disable_mixed_precision_graph_rewrite` have no effect on existing sessions.
  zXdisable_mixed_precision_graph_rewrite() called when mixed precision is already disabled.r"   FN)r   (is_mixed_precision_graph_rewrite_enabledr   r%   r   r&   r'    r   r   (disable_mixed_precision_graph_rewrite_v1r,      sC    8 #KKMOO 5 6++-CU,KLHHOr   )N)dynamic)__doc__tensorflow.python.frameworkr   tensorflow.python.platformr   tensorflow.python.trainingr   'tensorflow.python.training.experimentalr   loss_scale_optimizer_v1r   tensorflow.python.utilr    tensorflow.python.util.tf_exportr	   	Optimizer MixedPrecisionLossScaleOptimizerr   r   r    deprecated_endpointsr(   r,   r+   r   r   <module>r9      s    H . 1 0 c P . 6 	 	A	ACaG% ! E"M- N-0G( "!!=?
EHJ KAK?AH "!!>@
FIK LPL@Pr   