
    2Vhe~                     ^   d Z ddlZddlZddlZddlZddlZddlZddlm	Z	 ddl
mZ ddl
mZ ddlmZ dZdZ e	d	      d$d
       Z e	d      d%d       Z e	d       G d d             Z e	d       G d d             Z G d d      Z e	d       G d de             Z e	d       G d de             Z e	d       G d dej0                  j2                               Zej6                  j                   ej8                  _          e	d      d        Z e	d       d!        Z e	d"      d#        Zy)&zUnified high-level distribution APIs across backends.

Currently only the JAX backend is supported. The TensorFlow backend
will be supported in the future (via tf.dtensor API).
    N)keras_export)KerasTensor)distribution_lib)global_statebatchdistributionzkeras.distribution.list_devicesc                 ,    t        j                  |       S )a  Return all the available devices based on the device type.

    Note: in a distributed setting, global devices are returned.

    Args:
        device_type: string, one of `"cpu"`, `"gpu"` or `"tpu"`.
            Defaults to `"gpu"` or `"tpu"` if available when
            `device_type` is not provided. Otherwise
            will return the `"cpu"` devices.

    Return:
        List of devices that are available for distribute computation.
    )r   list_devices)device_types    W/home/dcms/DCMS/lib/python3.12/site-packages/keras/src/distribution/distribution_lib.pyr
   r
      s     ((55    zkeras.distribution.initializec                 @   | %dt         j                  v rt         j                  d   } |.dt         j                  v rt        t         j                  d         }|.dt         j                  v rt        t         j                  d         }t        j                  | ||       y)ar  Initialize the distribution system for multi-host/process setting.

    Calling `initialize` will prepare the backend for execution on multi-host
    GPU or TPUs. It should be called before any computations.

    Note that the parameters can also be injected via environment variables,
    which can be better controlled by the launch script at startup time.
    For certain backend that also rely on the environment variables to
    configure, Keras will properly forward them.

    Args:
        job_addresses: string. Comma separated IP addresses for all the jobs
            that will form the whole computation cluster. Note that for JAX
            backend, only the address for job 0 (coodinator) is needed. For
            certain runtime like cloud TPU, this value can be `None`, and the
            backend will figure it out with the TPU environment variables. You
            can also config this value via environment variable
            `KERAS_DISTRIBUTION_JOB_ADDRESSES`.
        num_processes: int. The number of worker/processes that will form the
            whole computation cluster. For certain runtime like cloud TPU, this
            value can be `None`, and the backend will figure it out with the TPU
            environment variables. You can also configure this value via
            environment variable `KERAS_DISTRIBUTION_NUM_PROCESSES`.
        process_id: int. The ID number of the current worker/process. The value
            should be ranged from `0` to `num_processes - 1`. `0` will indicate
            the current worker/process is the master/coordinate job. You can
            also configure this value via environment variable
            `KERAS_DISTRIBUTION_PROCESS_ID`.

        Example:
            Suppose there are two GPU processes, and process 0 is running at
            address `10.0.0.1:1234`, and process 1 is running at address
            `10.0.0.2:2345`. To configure such cluster, you can run

        On process 0:
        ```python
        keras.distribute.initialize(
            job_addresses="10.0.0.1:1234,10.0.0.2:2345",
            num_processes=2,
            process_id=0)
        ```

        On process 1:
        ```python
        keras.distribute.initialize(
            job_addresses="10.0.0.1:1234,10.0.0.2:2345",
            num_processes=2,
            process_id=1)
        ```

        or via the environment variables:
        On process 0:
        ```python
        os.environ[
            "KERAS_DISTRIBUTION_JOB_ADDRESSES"] = "10.0.0.1:1234,10.0.0.2:2345"
        os.environ["KERAS_DISTRIBUTION_NUM_PROCESSES"] = "2"
        os.environ["KERAS_DISTRIBUTION_PROCESS_ID"] = "0"
        keras.distribute.initialize()
        ```

        On process 1:
        ```python
        os.environ[
            "KERAS_DISTRIBUTION_JOB_ADDRESSES"] = "10.0.0.1:1234,10.0.0.2:2345"
        os.environ["KERAS_DISTRIBUTION_NUM_PROCESSES"] = "2"
        os.environ["KERAS_DISTRIBUTION_PROCESS_ID"] = "1"
        keras.distribute.initialize()
        ```

        Also note that for JAX backend, the `job_addresses` can be further
        reduced to just the master/coordinator address, which is
        `10.0.0.1:1234`.
    N KERAS_DISTRIBUTION_JOB_ADDRESSES KERAS_DISTRIBUTION_NUM_PROCESSESKERAS_DISTRIBUTION_PROCESS_ID)osenvironintr   
initialize)job_addressesnum_processes
process_ids      r   r   r   *   s    X 	."**<

#EF."**<BJJ'IJK=K$CDE
}jIr   zkeras.distribution.DeviceMeshc                   f    e Zd ZdZ	 d
dZed        Zed        Zed        Zed        Z	d Z
d	 Zy)
DeviceMeshac  A cluster of computation devices for distributed computation.

    This API is aligned with `jax.sharding.Mesh` and `tf.dtensor.Mesh`, which
    represents the computation devices in the global context.

    See more details in [jax.sharding.Mesh](
        https://jax.readthedocs.io/en/latest/jax.sharding.html#jax.sharding.Mesh)
    and [tf.dtensor.Mesh](
        https://www.tensorflow.org/api_docs/python/tf/experimental/dtensor/Mesh).

    Args:
        shape: tuple of list of integers. The shape of the overall
            `DeviceMesh`, e.g. `(8,)` for a data parallel only distribution,
            or `(4, 2)` for a model+data parallel distribution.
        axis_names: List of string. The logical name of the each axis for
            the `DeviceMesh`. The length of the `axis_names` should match to
            the rank of the `shape`. The `axis_names` will be used to
            match/create the `TensorLayout` when distribute the data and
            variables.
        devices: Optional list of devices. Defaults to all the available
            devices locally from `keras.distribution.list_devices()`.
    Nc                    |r|st        d| d|       t        |      t        |      k7  rt        d| d|       |
t               }t        j                  |      }t        j
                  |      t        j
                  |j                        k7  rt        d| d|j                         || _        || _        t        j                  ||      | _
        y )Nz6Shape and axis_names cannot be empty. Received: shape=, axis_names=z<Shape and axis_names should have same size. Received: shape=z<Shape does not match the number of devices. Received: shape=z; devices.shape=)
ValueErrorlenr
   nparrayprodshape_shape_axis_namesreshape_devices)selfr"   
axis_namesdevicess       r   __init__zDeviceMesh.__init__   s     J}ZL: 
 u:Z(##('zlD  ?"nG((7#775>RWWW]]33##(')9==/#  %

7E2r   c                     | j                   S N)r#   r'   s    r   r"   zDeviceMesh.shape   s    {{r   c                     | j                   S r,   )r$   r-   s    r   r(   zDeviceMesh.axis_names   s    r   c                     | j                   S r,   )r&   r-   s    r   r)   zDeviceMesh.devices   s    }}r   c                 f    t        | d      st        j                  |       | _        | j                  S )N_backend_mesh)hasattrr   _to_backend_meshr1   r-   s    r   backend_meshzDeviceMesh.backend_mesh   s+    t_-!1!B!B4!HD!!!r   c                 j    d| j                   j                   d| j                   d| j                   dS )N<z shape=r   >)	__class____name__r"   r(   r-   s    r   __repr__zDeviceMesh.__repr__   s:    ''( )ZZLdoo->aA	
r   c                 "    | j                         S r,   r:   r-   s    r   __str__zDeviceMesh.__str__       }}r   r,   )r9   
__module____qualname____doc__r*   propertyr"   r(   r)   r4   r:   r=    r   r   r   r      sk    6 	3>         " "

r   r   zkeras.distribution.TensorLayoutc                   ~    e Zd ZdZddZed        Zed        Zej                  d        Zed        Z	d Z
d	 Zd
 Zy)TensorLayouta  A layout to apply to a tensor.

    This API is aligned with `jax.sharding.NamedSharding`
    and `tf.dtensor.Layout`.

    See more details in [jax.sharding.NamedSharding](
        https://jax.readthedocs.io/en/latest/jax.sharding.html#jax.sharding.NamedSharding)
    and [tf.dtensor.Layout](
        https://www.tensorflow.org/api_docs/python/tf/experimental/dtensor/Layout).

    Args:
        axes: tuple of strings that should map to the `axis_names` in
            a `DeviceMesh`. For any dimensions that doesn't need any sharding,
            A `None` can be used a placeholder.
        device_mesh: Optional `DeviceMesh` that will be used to create
            the layout. The actual mapping of tensor to physical device
            is not known until the mesh is specified.
    Nc                 R    t        |      | _        || _        | j                          y r,   )tuple_axes_device_mesh_validate_axes)r'   axesdevice_meshs      r   r*   zTensorLayout.__init__   s!    4[
'r   c                     | j                   S r,   )rH   r-   s    r   rK   zTensorLayout.axes   s    zzr   c                     | j                   S r,   rI   r-   s    r   rL   zTensorLayout.device_mesh          r   c                 z    | j                   t        d| j                          || _         | j                          y )Nz5Cannot override device mesh value. Existing value is )rI   r   rJ   r'   rL   s     r   rL   zTensorLayout.device_mesh   sG    ( --.0  (r   c                 f    t        | d      st        j                  |       | _        | j                  S )N_backend_layout)r2   r   _to_backend_layoutrT   r-   s    r   backend_layoutzTensorLayout.backend_layout  s,    t./#3#F#Ft#LD ###r   c                     | j                   rXt        | j                   j                        }t        | j                        t        d g      z
  }||z
  rt	        d| d|       y y )Nz1Invalid axis names for Layout. Valid axis names: z, Got )rI   setr(   rH   r   )r'   valid_axis_namesr(   s      r   rJ   zTensorLayout._validate_axes
  sq    "4#4#4#?#?@TZZ3v;6J,, ./vj\C  - r   c                 j    d| j                   j                   d| j                   d| j                   dS )Nr6   z axes=z, device_mesh=r7   )r8   r9   rK   rL   r-   s    r   r:   zTensorLayout.__repr__  s<    ''( )II;nT-=-=,>aA	
r   c                 "    | j                         S r,   r<   r-   s    r   r=   zTensorLayout.__str__  r>   r   r,   )r9   r?   r@   rA   r*   rB   rK   rL   setterrV   rJ   r:   r=   rC   r   r   rE   rE      sq    &
   ! !   $ $

r   rE   c                       e Zd ZdZddZd Zd Zd Zej                  d        Z
ed        Zed	        Zd
 Zd Zd Zy)Distributiona  Base class for variable distribution strategies.

    A `Distribution` has following key functionalities:

    1. Distribute the model variables to a `DeviceMesh`.
    2. Distribute the input data to a `DeviceMesh`.
    3. Distribute an intermediate state tensor in the model.

    It can create a context scope so that the framework to properly detect the
    `Distribution` and distribute the variable/data accordingly.

    Args:
        device_mesh: A `DeviceMesh` instance.
    Nc                      || _         || _        y r,   )rI   _batch_dim_name)r'   rL   batch_dim_names      r   r*   zDistribution.__init__.  s    '-r   c                     t               )a,  Retrieve the `TensorLayout` for the input data.

        Args:
            data_shape: shape for the input data in list or tuple format.

        Returns:
            The `TensorLayout` for the data, which can be used by
            `backend.distribute_value()` to redistribute a input data.
        NotImplementedError)r'   
data_shapes     r   get_data_layoutzDistribution.get_data_layout2       "##r   c                     t               )a  Retrieve the `TensorLayout` for the variable.

        Args:
            variable: A `Variable` instance.

        return:
            The `TensorLayout` for the variable, which can be used by
            `backend.distribute_value()` to redistribute a variable.
        rc   )r'   variables     r   get_variable_layoutz Distribution.get_variable_layout>  rg   r   c                     t               )aL  Retrieve the `TensorLayout` for the intermediate tensor.

        Args:
            path: a string path for the corresponding tensor.

        return:
            The `TensorLayout` for the intermediate tensor, which can be used
            by `backend.relayout()` to reshard the tensor. Could also return
            None.
        rc   r'   paths     r   get_tensor_layoutzDistribution.get_tensor_layoutJ       "##r   c              #   v   K   t               }t        |        	 d t        |       y# t        |       w xY ww)z3Context manager to make the `Distribution` current.N)r   set_distribution)r'   original_scopes     r   scopezDistribution.scopeW  s0      &	-^,^,s   9) 969c                     | j                   S r,   rO   r-   s    r   rL   zDistribution.device_mesha  rP   r   c                     | j                   S r,   )r`   r-   s    r   ra   zDistribution.batch_dim_namee  s    ###r   c                     t               )ae  Create a distributed dataset instance from the original user dataset.

        Args:
            dataset: the original global dataset instance. Only
            `tf.data.Dataset` is supported at the moment.

        Returns:
            a sharded `tf.data.Dataset` instance, which will produce data for
            the current local worker/process.
        rc   )r'   datasets     r   distribute_datasetzDistribution.distribute_dataseti  ro   r   c                 P    d| j                   j                   d| j                   dS )Nr6   z device_mesh=r7   )r8   r9   rL   r-   s    r   r:   zDistribution.__repr__v  s)    4>>**+=9I9I8J!LLr   c                 "    | j                         S r,   r<   r-   s    r   r=   zDistribution.__str__y  r>   r   r,   )r9   r?   r@   rA   r*   rf   rj   rn   
contextlibcontextmanagerrs   rB   rL   ra   rx   r:   r=   rC   r   r   r^   r^     sm    .
$
$$ - - ! ! $ $$Mr   r^   zkeras.distribution.DataParallelc                   V     e Zd ZdZd
dZ fdZ fdZ fdZd Zd Z	d Z
d	 Z xZS )DataParallelat  Distribution for data parallelism.

    You can choose to create this instance by either specifying
    the `device_mesh` or `devices` arguments (but not both).

    The `device_mesh` argument is expected to be a `DeviceMesh` instance,
    and is expected to be 1D only. In case that the mesh has multiple axes,
    then the first axis will be treated as the data parallel dimension
    (and a warning will be raised).

    When a list of `devices` are provided, they will be used to construct a
    1D mesh.

    When both `mesh` and `devices` are absent, then `list_devices()`
    will be used to detect any available devices and create a 1D mesh from
    them.

    Args:
        device_mesh: Optional `DeviceMesh` instance.
        devices: Optional list of devices.
        auto_shard_dataset: Automatically shard the dataset amongst processes.
            Defaults to true.
    c                    |r| j                  |       n$|r| j                  |       n| j                          t        j                         | _        t        j                         | _        | j
                  dkD  | _        || _	        y )N   )
_initialize_with_device_mesh_initialize_mesh_from_devices"_initialize_mesh_from_list_devicesr   r   _num_processr   _process_id_is_multi_process_auto_shard_dataset)r'   rL   r)   auto_shard_datasets       r   r*   zDataParallel.__init__  so    --k:..w7335 -::<+668!%!2!2Q!6#5 r   c                 2   t        |t              st        d| dt        |       d      t        |   ||j                  d          | j                  j                  j                  dk7  r+t        j                  d|j                  j                         y y )Nz@Expect `mesh` to be an instance of `DeviceMesh`. Received: mesh=z
 (of type )r   r   zzExpect the input mesh to be 1D, but received mesh.devices.ndim=%d. The first axis will be used for data-parallel sharding.)
isinstancer   r   typesuperr*   r(   rL   r)   ndimwarningswarn)r'   rL   r8   s     r   r   z)DataParallel._initialize_with_device_mesh  s    +z2""-jk9J8K1N  	k&<&<Q&?@##((A-MMJ ##((	 .r   c                     t        j                  |      }t        |j                  t        g|      }t
        |   |t               y N)r"   r(   r)   )r   r    r   r"   DEFAULT_BATCH_DIM_NAMEr   r*   r'   r)   rL   r8   s      r   r   z*DataParallel._initialize_mesh_from_devices  s=    ((7# --./

 	&<=r   c                     t        j                  t                     }t        |j                  t
        g|      }t        |   |t
               y r   )r   r    r
   r   r"   r   r   r*   r   s      r   r   z/DataParallel._initialize_mesh_from_list_devices  s?    ((<>* --./

 	&<=r   c                 j    d gt        |      z  }| j                  |d<   t        || j                        S Nr   r   ra   rE   rL   r'   re   data_shard_specs      r   rf   zDataParallel.get_data_layout  5    &3z?2!00OT-=-=>>r   c                     t        |dd       |j                  S d gt        |j                        z  }t	        || j
                        S N_layout)getattrr   r   r"   rE   rL   )r'   ri   variable_shard_specs      r   rj   z DataParallel.get_variable_layout  sG    8Y-9####fs8>>'::/1A1ABBr   c                      y r,   rC   rl   s     r   rn   zDataParallel.get_tensor_layout  s    r   c                 >   ddl m} ddlm} t	        ||j
                  j                        st        dt        |             | j                  r| j                  s|S |j                  |      }|j                         dk  rt        d      |j                  || j                  d| j                        }|j!                  |      }|j#                  || j                  | j                  | j                        }|j%                  |j
                  j&                        S )	Nr   
distribute
tensorflow6Only `tf.data.Dataset` is supported for sharding, got The batch size of the input dataset is unknown. Please config the batch size for the input dataset, e.g via `dataset.batch(batch_size)`r   )global_batch_sizenum_workersnum_replicas_per_workerworker_index)r   indexnum_replicas)'tensorflow.python.data.experimental.opsr   keras.src.utils.module_utilsr   r   dataDatasetr   r   r   r   compute_batch_sizenumpybatch_sizes_for_workerr   r   rebatch_AutoShardDatasetprefetchAUTOTUNE)r'   rw   tf_data_distributetf
batch_sizeper_worker_batch_sizedistributed_datasets          r   rx   zDataParallel.distribute_dataset  s    	
 	B'277??3!!%g1  %%T-E-EN'::7C
!I 
 !3 I I())$%))	 !J !
 &oo.CD0BB))""**	 C 
 #++BGG,<,<==r   )NNT)r9   r?   r@   rA   r*   r   r   r   rf   rj   rn   rx   __classcell__r8   s   @r   r~   r~   }  s1    06>>?
C#>r   r~   z keras.distribution.ModelParallelc                   B     e Zd ZdZddd fd
Zd Zd Zd Zd Z xZ	S )	ModelParallela2  Distribution that shards model variables.

    Compare to `DataParallel` which replicates the variables across all devices,
    `ModelParallel` allows you to shard variables in addition to the input data.

    To construct a `ModelParallel` distribution, you need to provide a
    `DeviceMesh` and a `LayoutMap`.

    1. `DeviceMesh` contains physical device information. The axis names in
        the mesh will be used to map the variable and data layout.
    2. `LayoutMap` contains the mapping between variable paths to their
        corresponding `TensorLayout`.

    Example:

    ```python
    devices = list_devices()    # Assume there are 8 devices.

    # Create a mesh with 2 devices for data parallelism and 4 devices for
    # model parallelism.
    device_mesh = DeviceMesh(shape=(2, 4), axis_names=('batch', 'model'),
                             devices=devices)
    # Create a layout map that shard the `Dense` layer and `Conv2D`
    # layer variables on the last dimension.
    # Based on the `device_mesh`, this means the variables
    # will be split across 4 devices. Any other variable that doesn't
    # match any key in the layout map will be fully replicated.
    layout_map = LayoutMap(device_mesh)
    layout_map['dense.*kernel'] = (None, 'model')
    layout_map['dense.*bias'] = ('model',)
    layout_map['conv2d.*kernel'] = (None, None, None, 'model')
    layout_map['conv2d.*bias'] = ('model',)

    distribution = ModelParallel(
        layout_map=layout_map,
        batch_dim_name='batch',
    )

    # Set the global distribution, or via `with distribution.scope():`
    set_distribution(distribution)

    model = model_creation()
    model.compile()
    model.fit(data)
    ```

    You can quickly update the device mesh shape to change the sharding factor
    of the variables. E.g.

    ```python
    # With only the shape change for the device mesh, the variables will be
    # sharded across 8 devices instead of 4, which further reduces the memory
    # footprint of variables on each of the device.
    device_mesh = DeviceMesh(
        shape=(1, 8),
        axis_names=('batch', 'model'),
        devices=devices,
    )
    ```

    To figure out a proper layout mapping rule for all the model variables, you
    can first list out all the model variable paths, which will be used as the
    key to map the variables to `TensorLayout`.

    e.g.

    ```python
    model = create_model()
    for v in model.variables:
        print(v.path)
    ```

    Args:
        layout_map: `LayoutMap` instance which map the variable path to the
            corresponding tensor layout.
        batch_dim_name: Optional string, the axis name in the device mesh
            (of the `layout_map` object)
            that will be used to distribute data. If unspecified, the
            first axis from the device mesh will be used.
    N)
layout_mapra   c                   |j                  dd        |t        d      t        |t              st        d|       |j                  }t
        |   |       || _        |xs | j                  j                  d   | _	        t        j                         | _        t        j                         | _        | j                  dkD  | _        y )NrL   z'You must specify a layout_map argument.zKArgument `layout_map` must be a `LayoutMap` instance. Received: layout_map=r   r   )popr   r   	LayoutMaprL   r   r*   _layout_mapr(   r`   r   r   r   r   r   r   )r'   r   ra   kwargsrL   r8   s        r   r*   zModelParallel.__init__P  s    

=$'FGG*i0((2|5  !,,%%-O1A1A1L1LQ1O -::<+668!%!2!2Q!6r   c                 j    d gt        |      z  }| j                  |d<   t        || j                        S r   r   r   s      r   rf   zModelParallel.get_data_layoutc  r   r   c                     t        |dd       |j                  S | j                  |j                     }||S d gt	        |j
                        z  }t        || j                        S r   )r   r   r   rm   r   r"   rE   rL   )r'   ri   variable_layoutr   s       r   rj   z!ModelParallel.get_variable_layouth  sg    8Y-9###**8==9&""#fs8>>'::/1A1ABBr   c                      | j                   |   S r,   )r   rl   s     r   rn   zModelParallel.get_tensor_layouts  s    %%r   c                 J   ddl m} ddlm} t	        ||j
                  j                        st        dt        |             | j                  s|S |j                  |      }|j                         dk  rt        d      | j                  j                  j                  | j                        }| j                  j                   |   }|dk(  r%|j#                  |j
                  j$                        S || j&                  z  }|dk\  r|| j&                  z  dk7  rt        d| d| j&                         || j&                  z  }|j)                  |      }	|	j+                  | j&                  | j,                  	      }	|	j#                  |j
                  j$                        S ||z  dk7  rt        d
| d|       ||z  }|j)                  |      }	| j&                  |z  }
| j,                  |
z  }|	j+                  ||	      }	|	j#                  |j
                  j$                        S )Nr   r   r   r   r   r   zTGlobal batch size must be divisible by the number of processes. `global_batch_size`=z and `num_process`=)
num_shardsr   zSGlobal batch size must be divisible by the number of replicas. `global_batch_size`=z and `num_model_replicas`=)r   r   r   r   r   r   r   r   r   r   r   r   rL   r(   r   ra   r"   r   r   r   r   shardr   )r'   rw   r   r   r   mesh_batch_dim_indexnum_model_replicasnum_model_replicas_per_processper_process_batch_sizer   processes_per_replicadata_shard_ids               r   rx   z ModelParallel.distribute_datasetv  sd   	
 	B'277??3!!%g1  %%N.AA'J""$q(I   $//::@@ 
 "--334HI" ##BGG$4$455);d>O>O)O&)Q. !4#4#449 66G5H I%%)%6%6$79 
 &7$:K:K%K"")//2H"I"5";";,,&& #< # '//0@0@AA
 !#55: 55F4G H,,>+?A 
 &7:L%L"")//2H"I$($5$59K$K! ,,/DDM"5";";-# #< # '//0@0@AAr   )
r9   r?   r@   rA   r*   rf   rj   rn   rx   r   r   s   @r   r   r     s-    Ob &*$ 7&?
	C&HBr   r   zkeras.distribution.LayoutMapc                   J    e Zd ZdZd Zd Zd Zd Zd Zd Z	e
d        Zd	 Zy
)r   a  A dict-like object that maps string to `TensorLayout` instances.

    `LayoutMap` uses a string as key and a `TensorLayout` as value. There is a
    behavior difference between a normal Python dict and this class. The string
    key will be treated as a regex when retrieving the value. See the docstring
    of `get` for more details.

    See below for a usage example. You can define the naming schema
    of the `TensorLayout`, and then retrieve the corresponding
    `TensorLayout` instance.

    In the normal case, the key to query is usually the `variable.path`, which
    is the identifier of the variable.

    As shortcut, tuple or list of axis names are also allowed when inserting
    as value, and will be converted to `TensorLayout`.

    ```python
    layout_map = LayoutMap(device_mesh)
    layout_map['dense.*kernel'] = (None, 'model')
    layout_map['dense.*bias'] = ('model',)
    layout_map['conv2d.*kernel'] = (None, None, None, 'model')
    layout_map['conv2d.*bias'] = ('model',)

    layout_1 = layout_map['dense_1.kernel']             # layout_1 == layout_2d
    layout_2 = layout_map['dense_1.bias']               # layout_2 == layout_1d
    layout_3 = layout_map['dense_2.kernel']             # layout_3 == layout_2d
    layout_4 = layout_map['dense_2.bias']               # layout_4 == layout_1d
    layout_5 = layout_map['my_model/conv2d_123/kernel'] # layout_5 == layout_4d
    layout_6 = layout_map['my_model/conv2d_123/bias']   # layout_6 == layout_1d
    layout_7 = layout_map['my_model/conv3d_1/kernel']   # layout_7 == None
    layout_8 = layout_map['my_model/conv3d_1/bias']     # layout_8 == None
    ```

    Args:
        device_mesh: `keras.distribution.DeviceMesh` instance.
    c                 D    t        j                         | _        || _        y r,   )collectionsOrderedDictr   rI   rR   s     r   r*   zLayoutMap.__init__  s    &224'r   c                 4   || j                   v r| j                   |   S g }| j                   D ]*  }t        j                  ||      s|j                  |       , t	        |      dkD  rt        d| d| d      t	        |      dk(  r| j                   |d      S y)a  Retrieves the corresponding layout by the string key.

        When there isn't an exact match, all the existing keys in the layout map
        will be treated as a regex and map against the input key again. When
        there are multiple matches for the regex, an `ValueError` will be
        raised. Returns `None` if there isn't any match found.

        Args:
            key: String key to query a layout.

        Returns:
            Corresponding layout based on the query.
        r   zPath 'z.' matches multiple layout specification keys: zp. Please make sure each tensor/variable path only matches at most one layout specification key in the LayoutMap.r   N)r   researchappendr   r   )r'   keymatching_keysks       r   __getitem__zLayoutMap.__getitem__  s     $"""##C((!! 	(AyyC $$Q'	( }! ''4o 6AA  1$##M!$455r   c                 (   || j                   v rt        | d| j                   |    d      t        |t              rt	        |d      }t        |t              st        | dt        |             | j                  |       || j                   |<   y)a  Insert TensorLayout to the LayoutMap.

        Args:
            key: String key for the `TensorLayout`.
            layout: The `TensorLayout`. As a shortcut, tuple of string and None
                are also acceptable, and will be converted to `TensorLayout`.
        z+ already exist in the LayoutMap with value z.. Please make sure to not use duplicated keys.N)rK   rL   z$ should be a TensorLayout type, got )r   r   r   rG   rE   r   _maybe_populate_device_mesh)r'   r   layouts      r   __setitem__zLayoutMap.__setitem__  s     $"""% ))#./ 0++ 
 fe$!v4@F&,/(>tF|nM  	((0 &r   c                 8    | j                   j                  |      S r,   )r   r   )r'   r   s     r   __delitem__zLayoutMap.__delitem__%  s    ##C((r   c                 ,    t        | j                        S r,   )r   r   r-   s    r   __len__zLayoutMap.__len__)  s    4##$$r   c                 ,    t        | j                        S r,   )iterr   r-   s    r   __iter__zLayoutMap.__iter__,  s    D$$%%r   c                     | j                   S r,   rO   r-   s    r   rL   zLayoutMap.device_mesh/  rP   r   c                 Z    |j                   | j                   | j                   |_         y y y r,   )rL   )r'   r   s     r   r   z%LayoutMap._maybe_populate_device_mesh3  s0    %$*:*:*F!%!1!1F +G%r   N)r9   r?   r@   rA   r*   r   r   r   r   r   rB   rL   r   rC   r   r   r   r     s@    $L(@'0)%& ! !2r   r   z$keras.distribution.distribute_tensorc                 R    t        | t              r| S t        j                  | |      S )a   Change the layout of a Tensor value in the jit function execution.

    Args:
        tensor: a Tensor to change the layout.
        layout: `TensorLayout` to be applied on the value.

    Returns:
        a new value with the specified tensor layout.
    )r   r   r   distribute_tensor)tensorr   s     r   r   r   ;  s(     &+& --ff==r   zkeras.distribution.distributionc                  4    t        j                  t              S )z6Retrieve the current distribution from global context.)r   get_global_attributeGLOBAL_ATTRIBUTE_NAMErC   r   r   r   r   M  s     ,,-BCCr   z#keras.distribution.set_distributionc                 8    t        j                  t        |        y)zrSet the distribution as the global distribution setting.

    Args:
        value: a `Distribution` instance.
    N)r   set_global_attributer   )values    r   rq   rq   S  s     %%&;UCr   r,   )NNN) rA   r   r{   r   r   r   r   r   keras.src.api_exportr   keras.src.backendr   r   keras.src.backend.commonr   r   r   r
   r   r   rE   r^   r~   r   abcMutableMappingr   r   getr   r   rq   rC   r   r   <module>r      s     	 	   - ) . 1  &  /06 16" -.VJ /VJr -.P P /Pf /0B B 1BJ\ \~ /0|>< |> 1|>~ 01@BL @B 2@BF ,-s2.. s2 .s2l "--55	  45> 6>" /0D 1D
 34D 5Dr   