
    2Vh
                     6    d Z ddlZddlmZ ddZd Zd Zd Zy)	z!!!DO NOT USE!!!

Distribution related class for Tensorflow backend.

This is just a prototype and we might want to unify it
with other backends in the future.
    N)dtensorc                    | r| j                         nd} t        j                  j                  |       }g }g }|D ]B  }|j                  j                         dk(  r|j                  |       2|j                  |       D | t        |      dkD  r|n|}|D cg c]=  }|j                  j                          d|j                  j                  d      d    ? c}S c c}w )a  Return all the available devices based on the device type.

    Note that this should return the global devices in a distributed setting.

    Args:
        device_type: string of `"cpu"`, `"gpu"` or `"tpu"`. Default to `gpu` or
        `tpu` if available when device_type is not provided. Otherwise will
        return the `cpu` devices.

    Return:
        List of devices that are available for distribute computation.
    N)device_typecpur   :)
uppertfconfiglist_logical_devicesr   lowerappendlennamesplit)r   
tf_devicescpu_devicesother_devicesdevices        ]/home/dcms/DCMS/lib/python3.12/site-packages/keras/src/backend/tensorflow/distribution_lib.pylist_devicesr      s     *5+##%$K //K/HJKM )##%.v&  (	)
 &)-&81&<]+
 ! ##%
&a(9(9#(>r(B'CD  s   ACc                      y N )valuetensor_layouts     r   distribute_valuer   0   s        c                     t        t        | j                  | j                              }t	        j
                  || j                  j                               S )zConvert the DeviceMesh to Tensorflow backend specific Mesh.

    Args:
        device_mesh: DeviceMesh instance to convert.

    Returns:
        A `tf.dtensor.Mesh` instance.
    )	mesh_dimslocal_devices)listzip
axis_namesshaper   create_distributed_meshdevicesflatten)device_meshr    s     r   _to_backend_meshr*   5   sH     S//1B1BCDI**;+>+>+F+F+H r   c                     | j                   t        d      | j                  D cg c]  }|r|nt        j                   }}| j                   j
                  }t        j                  ||      S c c}w )zConvert the TensorLayout to Tensorflow backend specific Sharding.

    Args:
        tensor_layout: TensorLayout instance to convert.

    Returns:
        A `tf.dtensor.Layout` instance.
    zDCannot create sharding when device mesh is not set for TensorLayout.)sharding_specsmesh)r)   
ValueErroraxesr   	UNSHARDEDbackend_meshLayout)r   axisr,   dtensor_meshs       r   _to_backend_layoutr5   D   s{       (
 	
 9F8J8J04'+++N  !,,99L>>lKK	s   A0r   )	__doc__
tensorflowr
   tensorflow.experimentalr   r   r   r*   r5   r   r   r   <module>r9      s(     + F	
Lr   