
    Vh<                     P    d dl Z d dlZd dlmc mZ e j                  d        Zd Zy)    Nc              #     K   t         j                  j                         }	 t         j                  j                  |        d  t         j                  j                  |       y # t         j                  j                  |       w xY wwN)torch_C_get_autograd_fallback_mode_set_autograd_fallback_mode)modeprevs     e/home/dcms/DCMS/lib/python3.12/site-packages/torch/testing/_internal/optests/autograd_registration.pyset_autograd_fallback_moder   	   sV     88//1D3,,T2,,T2,,T2s   B	#A%  B	%!BB	c                   
 t        | t        j                  j                        sJ t	        j
                  |i |}|D cg c]  }t        |t        j                        s|! }}t        d |D              st        d      |D ch c]  }|j                  j                   }}|j                  ddg      st        d|       d|v rd}nd|v rd}t        j                  j                  | j                               ryt        j                  j                  | j                         d	      ryt        j                  j                  | j                         d
      ryt!        d      5   | |i |}ddd       |D ch c]  }t#        |       c}

fd}	t	        j$                  t        j                  |	      syt'        | j                          d      c c}w c c}w # 1 sw Y   sxY wc c}w )a)  Check if autograd was registered correctly (for the operator).

    Operators should have "autograd support" registered directly to an
    autograd dispatch key.
    An incorrect registration may lead to unexpected silent incorrectness.
    Note that this check won't catch all problems but will catch
    the most common ones.

    Example usage:
        >>> x = torch.randn(3, requires_grad=True)
        >>> autograd_registration_check(torch.ops.aten.sin.default, (x,), {})

    Here are some best practices if you do find your autograd is
    registered incorrectly:
    - If the operator is composite (i.e. consists of other PyTorch ops)
      and you wish the operator to decompose and get autograd support
      that way, then please register the implementation to
      DispatchKey::CompositeImplicitAutograd
    - If you're adding an autograd formula for the operator, the correct
      thing to do is to register an autograd.Function to
      DispatchKey::Autograd (preferred) or one of the
      DispatchKey::Autograd<BACKEND> keys. It is NOT OK to register
      an autograd.Function to a backend (e.g. CPU/CUDA) key.
    - If your operator is non-differentiable, then you should register
      an implementation to the Autograd key that uses
      AutoDispatchBelowAutograd and re-invokes the operator.

    c              3   4   K   | ]  }|j                     y wr   )requires_grad).0ts     r   	<genexpr>z.autograd_registration_check.<locals>.<genexpr>M   s     41q4s   zautograd_registration_check: no inputs have requires_grad=True so we are unable to actually perform this test. Please pass inputs that do require grad.cpucudazBautograd_registration_check: NYI devices other than CPU/CUDA, got AutogradCUDAAutogradCPUNAutogradCompositeImplicitAutogradnothingc                 <    | j                   syt        |       v ryy)NFT)r   id)tensorinp_idss    r   not_an_input_and_requires_gradzCautograd_registration_check.<locals>.not_an_input_and_requires_gradp   s!    ##f:     aQ  : at least one output of this operator has requires_grad=True but the operator does not have an autograd kernel defined at an autograd key (e.g. DispatchKey::Autograd). This could mean that you have incorrectly registered an autograd kernel to a non-Autograd DispatchKey, which may lead to silently incorrect results. If your operator consists of regular PyTorch operations, consider not using an operator at all or registering your operator as CompositeImplicitAutograd. If you have an autograd.Function registered to a backend (CPU/CUDA) key, the correct location for it is the Autograd key.)
isinstancer   _ops
OpOverloadpytreearg_tree_leavesTensoranyRuntimeErrordevicetypeissubsetNotImplementedErrorr   %_dispatch_has_kernel_for_dispatch_keynamer   r   tree_any_onlyAssertionError)opargskwargs	flat_argsargall_tensorsall_device_typeskeyall_outsr   r   s             @r   autograd_registration_checkr9      s   : b%**//0006 &&77I"+M3z#u||/L3MKM444$
 	
 4??C

??$$eV_5!PQaPbc
 	
 !!	"	"xx55bggiExx55bggiLxx55
	. 	 
$I	. 't&v&' #,,3r#w,G .LhW
779+ / 	0
 
] N @,' ' -s$   G!GG#-	G(G4(G1)	
contextlibr   torch.utils._pytreeutils_pytreer#   contextmanagerr   r9    r   r   <module>r@      s3      $ $ 3 3qr   