
    BVhI                     h    d Z ddlZddlZddlmZ ddlmZ ddlmZ  ed       G d de	             Z
y)	zOptions for saving Checkpoints.    N)sharding_util)deprecated_args)	tf_exportztrain.CheckpointOptionsc                   T     e Zd ZdZdZ eddd      	 	 	 	 	 	 dd       Z fdZ xZS )	CheckpointOptionsa  Options for constructing a Checkpoint.

  Used as the `options` argument to either `tf.train.Checkpoint.save()` or
  `tf.train.Checkpoint.restore()` methods to adjust how variables are
  saved/restored.

  Example: Run IO ops on "localhost" while saving a checkpoint:

  ```
  step = tf.Variable(0, name="step")
  checkpoint = tf.train.Checkpoint(step=step)
  options = tf.train.CheckpointOptions(experimental_io_device="/job:localhost")
  checkpoint.save("/tmp/ckpt", options=options)
  ```
  )experimental_io_device$experimental_enable_async_checkpointexperimental_write_callbacksenable_asyncexperimental_sharding_callback experimental_skip_slot_variablesNzUse enable_async insteadr	   c                 H   || _         |xs || _        | j                  | _        |4|D ]/  }t        t	        j
                  |      j                        dk  r/J  || _        |2t        |t        j                        st        dt        |       d      || _        || _        y)a
  Creates an object that stores options for a Checkpoint.

    Args:
      experimental_io_device: string. Applies in a distributed setting.
        Tensorflow device to use to access the filesystem. If `None` (default)
        then for each variable the filesystem is accessed from the CPU:0 device
        of the host where that variable is assigned. If specified, the
        filesystem is instead accessed from that device for all variables.  This
        is for example useful if you want to save to a local directory, such as
        "/tmp" when running in a distributed setting. In that case pass a device
        for the host where the "/tmp" directory is accessible.
      experimental_enable_async_checkpoint: bool Type. Deprecated, please use
        the enable_async option.
      experimental_write_callbacks: List[Callable]. A list of callback functions
        that will be executed after each saving event finishes (i.e. after
        `save()` or `write()`). For async checkpoint, the callbacks will be
        executed only after the async thread finishes saving.  The return values
        of the callback(s) will be ignored. The callback(s) can optionally take
        the `save_path` (the result of `save()` or `write()`) as an argument.
        The callbacks will be executed in the same order of this list after the
        checkpoint has been written.
      enable_async: bool Type. Indicates whether async checkpointing is enabled.
        Default is False, i.e., no async checkpoint.  Async checkpoint moves the
        checkpoint file writing off the main thread, so that the model can
        continue to train while the checkpoint file writing runs in the
        background. Async checkpoint reduces TPU device idle cycles and speeds
        up model training process, while memory consumption may increase.
      experimental_skip_slot_variables: bool Type. If true, ignores slot
        variables during restore. Context: TPU Embedding layers for Serving do
        not properly restore slot variables. This option is a way to omit
        restoring slot variables which are not required for Serving usecase
        anyways.(b/315912101)
      experimental_sharding_callback: `tf.train.experimental.ShardingCallback`.
        A pre-made or custom callback that determines how checkpoints are
        sharded on disk. Pre-made callback options are
        `tf.train.experimental.ShardByDevicePolicy` and
        `tf.train.experimental.MaxShardSizePolicy`. You may also write a custom
        callback, see `tf.train.experimental.ShardingCallback`.
    N   zuThe experimental_sharding_callback checkpoint optionmust be of type ShardingCallback. The option providedwas of type .)r   r   r	   leninspect	signature
parametersr
   
isinstancer   ShardingCallback
ValueErrortyper   r   )selfr   r	   r
   r   r   r   callbacks           _/home/dcms/DCMS/lib/python3.12/site-packages/tensorflow/python/checkpoint/checkpoint_options.py__init__zCheckpointOptions.__init__5   s    f #9D<LD040A0AD-#/2 @(7$$X.99:a???@(DD%%1
(-*H*HJ ((,-K(L'MQP Q 	Q +ID',LD)    c                     t        j                   t                     }t        j                   | j                        |_        |S )N)copysuperr
   )r   result	__class__s     r   __copy__zCheckpointOptions.__copy__y   s6     YYuwF*.))))+F' Mr   )NFNFFN)	__name__
__module____qualname____doc__	__slots__r   r   r#   __classcell__)r"   s   @r   r   r      sR    ") 
&(N
 "+0#'',%)?M?MB r   r   )r'   r   r   %tensorflow.python.checkpoint.shardingr   "tensorflow.python.util.deprecationr    tensorflow.python.util.tf_exportr   objectr    r   r   <module>r/      s=    &   ? > 6 $%f f &fr   