
    Vh"                     <   d Z ddlmZ ddlmZ ddlZddlmZmZ g dZ	de
fd	Zdefd
Zddedeej                     fdZde
fdZ  ede      e      ZdeddfdZ  ede      e      Zddedej*                  fdZdej*                  ddfdZddeddfdZy)z]
This package introduces support for the current :ref:`accelerator<accelerators>` in python.
    )Optional)
deprecatedN   )	_device_t_get_device_index)
current_acceleratorcurrent_device_idxcurrent_device_indexcurrent_streamdevice_countis_availableset_device_idxset_device_index
set_streamsynchronizereturnc                  >    t         j                  j                         S )zReturn the number of current :ref:`accelerator<accelerators>` available.

    Returns:
        int: the number of the current :ref:`accelerator<accelerators>` available.
            If there is no available accelerators, return 0.
    )torch_C_accelerator_deviceCount     J/home/dcms/DCMS/lib/python3.12/site-packages/torch/accelerator/__init__.pyr   r      s     88,,..r   c                  f    t               } | yt        j                  |       }|j                         S )a  Check if the current accelerator is available at runtime: it was build, all the
    required drivers are available and at least one device is visible.
    See :ref:`accelerator<accelerators>` for details.

    Returns:
        bool: A boolean indicating if there is an available :ref:`accelerator<accelerators>`.

    Example::

        >>> assert torch.accelerator.is_available() "No available accelerators detected."
    F)r   r   get_device_moduler   )accmods     r   r   r   %   s3      
C
{

!
!#
&Cr   check_availablec                 d    t         j                  j                         x}| r| rt               r|S y)a  Return the device of the accelerator available at compilation time.
    If no accelerator were available at compilation time, returns None.
    See :ref:`accelerator<accelerators>` for details.

    Args:
        check_available (bool, optional): if True, will also do a runtime check to see
            if the device :func:`torch.accelerator.is_available` on top of the compile-time
            check.
            Default: ``False``

    Returns:
        torch.device: return the current accelerator as :class:`torch.device`.

    .. note:: The index of the returned :class:`torch.device` will be ``None``, please use
        :func:`torch.accelerator.current_device_index` to know the current index being used.

    Example::

        >>> # xdoctest:
        >>> # If an accelerator is available, sent the model to it
        >>> model = torch.nn.Linear(2, 2)
        >>> if (current_device := current_accelerator(check_available=True)) is not None:
        >>>     model.to(current_device)
    N)r   r   _accelerator_getAcceleratorr   )r   r   s     r   r   r   =   s-    2 xx3355B_Jr   c                  >    t         j                  j                         S )zReturn the index of a currently selected device for the current :ref:`accelerator<accelerators>`.

    Returns:
        int: the index of a currently selected device.
    )r   r   _accelerator_getDeviceIndexr   r   r   r
   r
   \   s     88//11r   z#Use `current_device_index` instead.)categorydevicec                X    t        |       }t        j                  j                  |       y)a   Set the current device index to a given device.

    Args:
        device (:class:`torch.device`, str, int): a given device that must match the current
            :ref:`accelerator<accelerators>` device type.

    .. note:: This function is a no-op if this device index is negative.
    N)r   r   r   _accelerator_setDeviceIndexr$   device_indexs     r   r   r   k   s      %V,L	HH((6r   zUse `set_device_index` instead.c                X    t        | d      }t        j                  j                  |      S )a  Return the currently selected stream for a given device.

    Args:
        device (:class:`torch.device`, str, int, optional): a given device that must match the current
            :ref:`accelerator<accelerators>` device type. If not given,
            use :func:`torch.accelerator.current_device_index` by default.

    Returns:
        torch.Stream: the currently selected stream for a given device.
    T)r   r   r   _accelerator_getStreamr'   s     r   r   r   ~   s%     %VT2L88**<88r   streamc                 B    t         j                  j                  |        y)a  Set the current stream to a given stream.

    Args:
        stream (torch.Stream): a given stream that must match the current :ref:`accelerator<accelerators>` device type.

    .. note:: This function will set the current device index to the device index of the given stream.
    N)r   r   _accelerator_setStream)r+   s    r   r   r      s     
HH##F+r   c                Z    t        | d      }t        j                  j                  |       y)a  Wait for all kernels in all streams on the given device to complete.

    Args:
        device (:class:`torch.device`, str, int, optional): device for which to synchronize. It must match
            the current :ref:`accelerator<accelerators>` device type. If not given,
            use :func:`torch.accelerator.current_device_index` by default.

    .. note:: This function is a no-op if the current :ref:`accelerator<accelerators>` is not initialized.

    Example::

        >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA)
        >>> assert torch.accelerator.is_available() "No available accelerators detected."
        >>> start_event = torch.Event(enable_timing=True)
        >>> end_event = torch.Event(enable_timing=True)
        >>> start_event.record()
        >>> tensor = torch.randn(100, device=torch.accelerator.current_accelerator())
        >>> sum = torch.sum(tensor)
        >>> end_event.record()
        >>> torch.accelerator.synchronize()
        >>> elapsed_time_ms = start_event.elapsed_time(end_event)
    TN)r   r   r   _accelerator_synchronizeDevicer'   s     r   r   r      s"    . %VT2L	HH++L9r   )F)N)__doc__typingr   typing_extensionsr   r   _utilsr   r   __all__intr   boolr   r$   r   r
   FutureWarningr	   r   r   Streamr   r   r   r   r   r   <module>r9      s     (  0/c /d 0 (5<<:P >2c 2Z)  
7Y 
7d 
7% 99 95<< 9,u|| , ,:	 : :r   