
    BVh*                         d Z ddlZddlmZ ddlmZ ddlmZ dZ ed       G d d	ej                               Z
 ed
       G d d             Zd Zy)zOptions for saving SavedModels.    N)sharding_util)compat)	tf_exportTz'saved_model.experimental.VariablePolicyc                   8    e Zd ZdZdZdZdZd Zd Ze	d        Z
y)VariablePolicyaU  Enum defining options for variable handling when saving.

  NONE
    No policy applied: Distributed variables are saved as one variable, with no
    device attached.

  SAVE_VARIABLE_DEVICES
    When saving variables, also save their device assignment.
    This is useful if one wants to hardcode devices in saved models, but it also
    makes them non-portable if soft device placement is disabled (more details
    in `tf.config.set_soft_device_placement`). This is currently not
    fully supported by `saved_model.load`, and is mainly intended to be used
    when one will be reading the saved model at a lower API level. In the
    example below, the graph saved by the call to `saved_model.save` will have
    the variable devices correctly specified:
    ```python
    exported = tf.train.Checkpoint()
    with tf.device('/GPU:0'):
      exported.x_gpu = tf.Variable(1.0)
    with tf.device('/CPU:0'):
      exported.x_cpu = tf.Variable(1.0)
    tf.saved_model.save(exported, export_dir,
        options = tf.saved_model.SaveOptions(
            experimental_variable_policy=
              tf.saved_model.experimental.VariablePolicy.SAVE_VARIABLE_DEVICES))
    ```
    Distributed variables are still saved as one variable under this policy.

  EXPAND_DISTRIBUTED_VARIABLES
    Distributed variables will be saved with information about their components,
    allowing for their restoration on load. Also, the saved graph will contain
    references to those variables. This is useful when one wants to use the
    model for training in environments where the original distribution strategy
    is not available.
  Nsave_variable_devicesexpand_distributed_variablesc                 (    | t         j                  k7  S )z0Checks whether variable devices should be saved.)r   NONEselfs    Z/home/dcms/DCMS/lib/python3.12/site-packages/tensorflow/python/saved_model/save_options.py_save_variable_devicesz%VariablePolicy._save_variable_devicesG   s    >&&&&    c                 (    | t         j                  k(  S )z8Checks whether distributed variables should be expanded.)r   EXPAND_DISTRIBUTED_VARIABLESr   s    r   _expand_distributed_variablesz,VariablePolicy._expand_distributed_variablesK   s    >>>>>r   c                     | t         j                  S t        | t               r| S t        |       j	                         }t         D ]  }||j
                  k(  s|c S  t        d|  d      )z4Tries to convert `obj` to a VariablePolicy instance.z'Received invalid VariablePolicy value: .)r   r   
isinstancestrlowervalue
ValueError)objkeypolicys      r   from_objzVariablePolicy.from_objO   sk     {   #~&j
c(..
C  		 >se1E
FFr   )__name__
__module____qualname____doc__r   SAVE_VARIABLE_DEVICESr   r   r   staticmethodr    r   r   r   r      s;    "H 
$1!?'? 
G 
Gr   r   zsaved_model.SaveOptionsc                   2    e Zd ZdZdZ	 	 	 	 	 	 	 	 	 	 	 ddZy)SaveOptionszOptions for saving to SavedModel.

  This function may be used in the `options` argument in functions that
  save a SavedModel (`tf.saved_model.save`, `tf.keras.models.save_model`).
  )namespace_whitelistsave_debug_infofunction_aliasesexperimental_debug_stripperexperimental_io_deviceexperimental_variable_policyexperimental_custom_gradientsexperimental_image_formatexperimental_skip_saverexperimental_sharding_callback
extra_tagsNc                 |   t        |      | _        || _        |r|n	t               | _        || _        || _        || _        t        j                  |      | _
        |	| _        |rt        rt        d      || _        |
2t        |
t         j"                        st        dt%        |
       d      |
| _        || _        y)aR  Creates an object that stores options for SavedModel saving.

    Args:
      namespace_whitelist: List of strings containing op namespaces to whitelist
        when saving a model. Saving an object that uses namespaced ops must
        explicitly add all namespaces to the whitelist. The namespaced ops must
        be registered into the framework when loading the SavedModel. If no
        whitelist is provided, all namespaced ops will be allowed.
      save_debug_info: Boolean indicating whether debug information is saved. If
        True, then a debug/saved_model_debug_info.pb file will be written with
        the contents of a GraphDebugInfo binary protocol buffer containing stack
        trace information for all ops and functions that are saved.
      function_aliases: Python dict. Mapping from string to object returned by
        @tf.function. A single tf.function can generate many ConcreteFunctions.
        If a downstream tool wants to refer to all concrete functions generated
        by a single tf.function you can use the `function_aliases` argument to
        store a map from the alias name to all concrete function names. E.g. >>>
        class Adder(tf.Module): ...   @tf.function ...   def double(self, x):
        ...     return x + x  >>> model = Adder() >>>
        model.double.get_concrete_function( ...   tf.TensorSpec(shape=[],
        dtype=tf.float32, name="float_input")) >>>
        model.double.get_concrete_function( ...   tf.TensorSpec(shape=[],
        dtype=tf.string, name="string_input"))  >>> options =
        tf.saved_model.SaveOptions( ...   function_aliases={'double':
        model.double}) >>> tf.saved_model.save(model, '/tmp/adder',
        options=options)
      experimental_debug_stripper: bool. If set to True, this strips the debug
        nodes from the graph, from both the nodes and the function defs. Note
        that this currently only strips the `Assert` nodes from the graph and
        converts them into `NoOp`s instead.
      experimental_io_device: string. Applies in a distributed setting.
        Tensorflow device to use to access the filesystem. If `None` (default)
        then for each variable the filesystem is accessed from the CPU:0 device
        of the host where that variable is assigned. If specified, the
        filesystem is instead accessed from that device for all variables.  This
        is for example useful if you want to save to a local directory, such as
        "/tmp" when running in a distributed setting. In that case pass a device
        for the host where the "/tmp" directory is accessible.
      experimental_variable_policy: The policy to apply to variables when
        saving. This is either a `saved_model.experimental.VariablePolicy` enum
        instance or one of its value strings (case is not important). See that
        enum documentation for details. A value of `None` corresponds to the
        default policy.
      experimental_custom_gradients: Boolean. When True, will save traced
        gradient functions for the functions decorated by `tf.custom_gradient`.
        Defaults to `True`.
      experimental_image_format: New (highly) experimental format that is
        capable of saving models larger than the 2GB protobuf limit. Enabling
        this option will likely break compatibility with downstream consumers.
        This option is currently disabled in OSS.
      experimental_skip_saver: If True, will prevent SavedModel from creating
        its native checkpointing ops - this is for models that do not use
        SavedModel's native checkpointing functionality to avoid the costs
        associated with creating and serializing those ops.
      experimental_sharding_callback: `tf.train.experimental.ShardingCallback`.
        A pre-made or custom callback that determines how checkpoints are
        sharded on disk. Pre-made callback options are
        `tf.train.experimental.ShardByDevicePolicy` and
        `tf.train.experimental.MaxShardSizePolicy`. You may also write a custom
        callback, see `tf.train.experimental.ShardingCallback`.
      extra_tags: Extra tags to be saved with the MetaGraph in the SavedModel.
    z:The option `experimental_image_format` is disabled in OSS.NzuThe experimental_sharding_callback checkpoint optionmust be of type ShardingCallback. The option providedwas of type r   )_validate_namespace_whitelistr(   r)   dictr*   r.   r+   r,   r   r   r-   r0   is_ossr   r/   r   r   ShardingCallbacktyper1   r2   )r   r(   r)   r*   r+   r,   r-   r.   r/   r0   r1   r2   s               r   __init__zSaveOptions.__init__t   s    X  = D +D0@,dfD)FD&'BD$"8D(6(?(?$)D% $;D  !V
F  &?D"%1
(-*H*H  >?@C
 	

 +ID' DOr   )NFNFNNTFFNN)r   r    r!   r"   	__slots__r9   r%   r   r   r'   r'   ]   s7    )  "'!#'$( %#%)k!r   r'   c           	         | yt        | t              st        d|  dt        |        d      g }| D ]Q  }t        |t              st        d| dt        |       d      |j                  t        j                  |             S |S )z'Validates namespace whitelist argument.Nz6`namespace_whitelist` must be a list of strings. Got: z with type r   z-Whitelisted namespace must be a string. Got: z	 of type )	r   list	TypeErrorr8   r   r   appendr   as_str)r(   	processed	namespaces      r   r4   r4      s     	'	.
@
{#$
%Q	(  )& /ii%
9Kyi 14  V]]9-./ 
r   )r"   enum%tensorflow.python.checkpoint.shardingr   tensorflow.python.utilr    tensorflow.python.util.tf_exportr   r6   Enumr   r'   r4   r%   r   r   <module>rG      sn    &  ? ) 6 
 45>GTYY >G 6>GB $%A! A! &A!Hr   