
    Vh                        d dl mZ d dlZd dlmZ d dlZd dlZd dlZd dlZd dl	m
Z
 d dlmZ d dlmZmZ d dlmZ d dlmZ  G d	 d
ej*                        Z G d dej*                        Zy)    )annotationsN)Callable)fake_tensor)proxy_tensor)_passdiagnostics)_utils)_pytreec                  F     e Zd ZdZ	 d	 	 	 	 	 	 	 d fdZddZddZ xZS )	Functionalizea
  Functionalize a GraphModule.

    This pass utilizes ``functionalization`` utility of ``torch._functorch`` to convert
    a GraphModule into a functional form. The two main functionalities are (copied from
    its documentations):

    * ``functionalization`` removes (intermediate) mutations and aliasing from a
    function, while preserving the function's semantics.

    * ``functionalization`` also removes mutations (and views) that were performed
    on function inputs. However to preserve semantics, functionalize will "fix up" the
    mutations after the transform has finished running, by detecting if any tensor inputs
    "should have" been mutated, and copying the new data back to the inputs if necessary.
    For example, consider::

        def fn(a, b):
            a.add_(b)
            return a

      For a call like `fn(x, y)`, the variable `x` outside is also mutated. Hence just
      functionalizing is not enough for preserving the original semantics. A "special"
      input mutation step needs to be inserted at the end.::

        # After functionalization, without input mutation "fix up".
        # This is not semantically the same. The variable outside the function call that
        # was passed in as `a` is not mutated.
        def fn(a, b):
            new_a = a + b
            return new_a

        # Functionalization with input mutation "fix up" that preserves semantics.
        def fn(a, b):
            new_a = a + b

            # Copying the new data back to the inputs
            a.copy_(new_a)

            return new_a

    For ONNX inference, it is recommended to run ``RemoveInputMutation`` after this pass.
    ``RemoveInputMutation`` removes the "fix up" nodes that were added by ``Functionalize``,
    which are not needed for ONNX inference.
    c                B    t         |   ||       || _        || _        y )N)super__init__enable_dynamic_axesallow_fake_constant)selfdiagnostic_contextmoduler   r   	__class__s        `/home/dcms/DCMS/lib/python3.12/site-packages/torch/onnx/_internal/fx/passes/functionalization.pyr   zFunctionalize.__init__?   s%     	+V4#6 #6     c                    fd}|S )Nc                 (   t        j                  t        j                  t        j                  |       }t        j
                  d       	  | }t        j                          t        j                  |      }|D ]2  }t        |t        j                        st        j                  |       4 t        j                  t        j                  |       t        j                  t        j                  |      }|S # t        j                          w xY w)NT)reapply_views)pytreetree_map_onlytorchTensor_to_functional_tensor_enable_functionalization_disable_functionalizationtree_leaves
isinstance_synctree_map_from_functional_tensor)inputsinputs_functionaloutflat_inputs_functionalinput_functionalout_unwrappedfunctions         r   wrappedz-Functionalize._functionalize.<locals>.wrappedN   s     & 4 4e996! ++$?3 12002%+%7%78I%J"$: 2 .=KK 012 OOEKK-"OOE,I,I3OM   002s   C; ;D )r   r-   r.   s    ` r   _functionalizezFunctionalize._functionalizeJ   s    	!$ r   c                   t        j                  | j                        }| j                  |      }| j                  } | j
                  |g| }|d}n$t        j                         }| j                  rdnd}|J t        j                         5  |5   t        j                  |i |dt        | j                              | }d d d        d d d        t        j                  | j                         |S # 1 sw Y   3xY w# 1 sw Y   7xY w)NrealsymbolicfakeT)decomposition_tabletracing_mode_allow_non_fake_inputs_allow_fake_constant)r	   ,wrap_graph_module_for_node_meta_preservationr   r0   	fake_mode_maybe_fakefy_args
contextlibnullcontextr   r   unset_fake_temporarilyr   make_fxboolr   #replace_placeholder_name_and_target)r   argsr   functionalized_callabler:   maybe_fake_argsr6   graph_modules           r   _runzFunctionalize._runb   s   DDT[[Q"&"5"5f"= 8<~~	1$11)CdC  "L #..0I)-)A)A:vL$$$//1 	 9 	 <//'$&)'+%)$*B*B%C  L	  	  	22<M	  	  	  	 s$   D1C7D7D 	<DD)F)r   zdiagnostics.DiagnosticContextr   torch.fx.GraphModuler   r@   r   zbool | None)r-   r   returnr   rH   rG   )__name__
__module____qualname____doc__r   r0   rF   __classcell__)r   s   @r   r   r      sC    *b ,1	79	7 %	7 "		7
 )	70!r   r   c                      e Zd ZdZddZy)RemoveInputMutationab  Remove `aten.copy_.default` nodes that mutate module inputs.

    This pass is recommended to be used after ``Functionalization`` pass.
    ``Functionalization`` pass adds `aten.copy_.default` nodes to the graph
    when it detects mutations to inputs. These nodes are not needed for ONNX export
    for inference. They could be useful for training.
    c                (   t        | j                  j                  j                        D ]  }|j                  dk(  s|j
                  t        j                  j                  j                  j                  k(  sOt        |j                        dk(  sht        |j                  d   t        j                  j                         s|j                  d   j                  dk(  s| j                  j                  j#                  |        | j                  S )Ncall_functionr   placeholder)reversedr   graphnodesoptargetr   opsatencopy_defaultlenusersr#   rB   fxNode
erase_node)r   rB   nodes      r   rF   zRemoveInputMutation._run   s    T[[..445 	3D?*KK599>>#7#7#?#??

Oq(tyy|UXX]];IIaLOO}4!!,,T2	3 {{r   NrI   )rJ   rK   rL   rM   rF   r/   r   r   rP   rP      s    
r   rP   )
__future__r   r<   typingr   r   
torch._ops
torch.functorch.fxtorch._subclassesr   torch.fx.experimentalr   torch.onnx._internal.fxr   r   torch.onnx._internal.fx.passesr	   torch.utilsr
   r   	Transformr   rP   r/   r   r   <module>rn      sL    "       ) . 6 1 )qEOO qh%// r   