
    AVh                     L    d Z ddlmZ ddlmZ ddlmZ 	 	 	 	 	 ddZ	 	 	 ddZy)	z9Utils to create distributed datasets based on TF version.    )tf2)	input_libNc           
          t        j                         rt        j                  ||| |||||      S t	        j
                  | |||||      S )ad  Returns a distributed dataset from the given tf.data.Dataset instance.

  This is a common function that is used by all strategies to return a
  distributed dataset. The distributed dataset instance returned is different
  depending on if we are in a TF 1 or TF 2 context. The distributed dataset
  instances returned differ from each other in the APIs supported by each of
  them.

  Args:
    dataset: a tf.data.Dataset instance.
    input_workers: an InputWorkers object which specifies devices on which
      iterators should be created.
    strategy: a `tf.distribute.Strategy` object, used to run all-reduce to
      handle last partial batch.
    num_replicas_in_sync: Optional integer. If this is not None, the value is
      used to decide how to rebatch datasets into smaller batches so that the
      total batch size for each step (across all workers and replicas) adds up
      to `dataset`'s batch size.
    input_context: `InputContext` for sharding. Only pass this in for between
      graph multi-worker cases where there is only one `input_worker`. In these
      cases, we will shard based on the `input_pipeline_id` and
      `num_input_pipelines` in the `InputContext`.
    options: Default is None. `tf.distribute.InputOptions` used to control
      options on how this dataset is distributed.
    build: whether to build underlying datasets when a DistributedDataset is
      created. This is only useful for `ParameterServerStrategy` now.
    replica_order: the order of the replicas, which will be used to reorder the
      iterators to match the device order.

  Returns:
    A distributed dataset instance.
  )num_replicas_in_syncinput_contextbuildoptionsreplica_order)r   r   r	   )r   enabledr   DistributedDatasetinput_lib_v1DistributedDatasetV1)datasetinput_workersstrategyr   r   r	   r   r
   s           W/home/dcms/DCMS/lib/python3.12/site-packages/tensorflow/python/distribute/input_util.pyget_distributed_datasetr      s`    T 	[[]''1##	 	 ,,1#     c           	         |>|j                   t        j                  j                  k7  r|j                  rt        d      |J|j                   t        j                  j                  k(  r#|j                  r|j                  rt        d      t        j                         rt        j                  |||| |||      S t        j                  |||| |      S )a  Returns a distributed dataset from the given input function.

  This is a common function that is used by all strategies to return a
  distributed dataset. The distributed dataset instance returned is different
  depending on if we are in a TF 1 or TF 2 context. The distributed dataset
  instances returned differ from each other in the APIs supported by each of
  them.

  Args:
    dataset_fn: a function that returns a tf.data.Dataset instance.
    input_workers: an InputWorkers object which specifies devices on which
      iterators should be created.
    input_contexts: A list of `InputContext` instances to be passed to call(s)
      to `dataset_fn`. Length and order should match worker order in
      `worker_device_pairs`.
    strategy: a `tf.distribute.Strategy` object, used to run all-reduce to
      handle last partial batch.
    options: Default is None. `tf.distribute.InputOptions` used to control
      options on how this dataset is distributed.
    build: whether to build underlying datasets when a
      `DistributedDatasetFromFunction` is created. This is only useful for
      `ParameterServerStrategy` now.
    replica_order: the order of the replicas, which will be used to reorder the
      iterators to match the device order.

  Returns:
    A distributed dataset instance.

  Raises:
    ValueError: if `options.experimental_replication_mode` and
    `options.experimental_place_dataset_on_device` are not consistent
  zWhen `experimental_place_dataset_on_device` is set for dataset placement, you must also specify `PER_REPLICA` for the replication modez`experimental_place_dataset_on_device` can not be set to True when experimental_fetch_to_device is True and replication mode is set to `PER_REPLICA`)input_contexts
dataset_fnr	   r   r
   )experimental_replication_moder   InputReplicationModePER_REPLICA$experimental_place_dataset_on_device
ValueErrorexperimental_fetch_to_devicer   r   DistributedDatasetsFromFunctionr   !DistributedDatasetsFromFunctionV1)r   r   r   r   r	   r   r
   s          r   &get_distributed_datasets_from_functionr    U   s    R gCC$$00122
	 
 gCC		'	'	3	34**22
	34 4
 	[[]44%#  99xWF Fr   )NNNTN)NTN)	__doc__tensorflow.pythonr   tensorflow.python.distributer   tensorflow.python.distribute.v1r   r   r     r   r   <module>r&      s=    @ ! 2 E 
<H 
FFr   