
    Vh6                        d dl Z d dlZd dlmc mZ d dlmZ dgZdZ	 G d dej                  j                        Zdej                  j                  defd	Zdej                  j                  d
efdZdej                  j                  d
efdZdej                  j                  fdZdej                  j                  fdZdej                  j                  fdZy)    N)_assert_and_get_unique_devicemodel_is_exported_exported_trainingc                   (     e Zd ZdZ fdZd Z xZS )_WrapperModulezrClass to wrap a callable in an :class:`torch.nn.Module`. Use this if you
    are trying to export a callable.
    c                 0    t         |           || _        y N)super__init__fn)selfr   	__class__s     W/home/dcms/DCMS/lib/python3.12/site-packages/torch/ao/quantization/pt2e/export_utils.pyr   z_WrapperModule.__init__   s        c                 &     | j                   |i |S )zUSimple forward that just calls the ``fn`` provided to :meth:`WrapperModule.__init__`.)r   )r   argskwargss      r   forwardz_WrapperModule.forward   s    tww'''r   )__name__
__module____qualname____doc__r   r   __classcell__)r   s   @r   r   r      s    (r   r   mreturnc                     t        | t        j                  j                        xr& t	        d | j
                  j                  D              S )z
    Return True if the `torch.nn.Module` was exported, False otherwise
    (e.g. if the model was FX symbolically traced or not traced at all).
    c              3   8   K   | ]  }d |j                   v   yw)valN)meta).0ns     r   	<genexpr>z$model_is_exported.<locals>.<genexpr>#   s      77s   )
isinstancetorchfxGraphModuleanygraphnodes)r   s    r   r   r      s>    
 a--. 3 7!"7 4 r   train_to_evalc                   	 ddl m} | j                  j                          | j	                          dD ]  		fd}	fd}t        j                  d      f}|r% |t        |      |      } |t        |      |      }n$ |t        |      |      } |t        |      |      }ddlm	}  || ||g d	       | j	                           y
)a  
    Switch dropout patterns in the model between train and eval modes.

    Dropout has different behavior in train vs eval mode. For exported models,
    however, calling `model.train()` or `model.eval()` does not automatically switch
    the dropout behavior between the two modes, so here we need to rewrite the aten
    dropout patterns manually to achieve the same effect.

    See https://github.com/pytorch/pytorch/issues/103681.
       "_get_aten_graph_module_for_pattern)FTc                 6    t        j                  | dd      S )N      ?TptraininginplaceFdropoutxr4   s    r   dropout_trainz'_replace_dropout.<locals>.dropout_train<   s    99Q#gFFr   c                 6    t        j                  | dd      S )Nr0   Fr1   r5   r8   s    r   dropout_evalz&_replace_dropout.<locals>.dropout_eval?   s    99Q#wGGr   r   replace_pattern_with_filtersTmatch_filtersignore_literalsN)
utilsr.   r(   eliminate_dead_code	recompiler$   randnr   torch.fx.subgraph_rewriterr>   )
r   r*   r.   r:   r<   example_inputsmatch_patternreplacement_patternr>   r4   s
            @r   _replace_dropoutrJ   (   s     : GG!KKM  %	G	H  ++a.*>}-M #E|,#
 ?|,M #E}-#
 	L$ 	
 	
K%r   c           
      n   ddl m} | j                  j                          | j	                          dt
        j                  dt
        j                  dt
        j                  dt
        j                  dt
        j                  f
d}dt
        j                  dt
        j                  dt
        j                  dt
        j                  dt
        j                  f
d	}t        j                  ddd
d
      t        j                  d      t        j                  d      t        j                  d      t        j                  d      f}t        |       }|duxr |j                  dk(  } |t        |      ||      } |t        |      ||      }	|r|}
|	}n|	}
|}ddlm}  || |
|g d       | j	                          y)a  
    Switch batchnorm patterns in the model between train and eval modes.

    Batchnorm has different behavior in train vs eval mode. For exported models,
    however, calling `model.train()` or `model.eval()` does not automatically switch
    the batchnorm behavior between the two modes, so here we need to rewrite the aten
    batchnorm patterns manually to achieve the same effect.
    r,   r-   r9   	bn_weightbn_biasbn_running_meanbn_running_varc                 8    t        j                  | ||||d      S )NTr3   r6   
batch_normr9   rL   rM   rN   rO   s        r   bn_trainz$_replace_batchnorm.<locals>.bn_trainu   s"     ||	7T
 	
r   c                 8    t        j                  | ||||d      S )NFrQ   rR   rT   s        r   bn_evalz#_replace_batchnorm.<locals>.bn_eval   s"     ||	7U
 	
r      Ncudar   r=   Tr?   )rB   r.   r(   rC   rD   r$   TensorrE   r   typer   rF   r>   )r   r*   r.   rU   rW   rG   deviceis_cudabn_train_atenbn_eval_atenrH   rI   r>   s                r   _replace_batchnormr`   b   s    : GG!KKM	
<<	
<<	
 	
 		

 	
	
<<	
<<	
 	
 		

 	
 	Aq!QAAAAN +1-FD :V[[F%:G6x M
 6wL %*$+G 	 KKMr   modelc                     t        | t        d      }|s| S t        | t        d       t        | d       t	        | d       | S )a8  
    Move an exported GraphModule to eval mode.

    This is equivalent to model.eval() but only for certain special ops like dropout, batchnorm.
    QAT users should call this before performing inference on the model.

    This call is idempotent; if the model is already in eval mode, nothing will happen.
    TFr*   getattr_EXPORTED_TRAINING_ATTRsetattrrJ   r`   ra   is_trainings     r   _move_exported_model_to_evalrj      sA     %!8$?KE*E2U$/uD1Lr   c                     t        | t        d      }|r| S t        | t        d       t        | d       t	        | d       | S )a:  
    Move an exported GraphModule to train mode.

    This is equivalent to model.train() but only for certain special ops like dropout, batchnorm.
    QAT users should call this before performing training on the model.

    This call is idempotent; if the model is already in train mode, nothing will happen.
    FTrc   rd   rh   s     r   _move_exported_model_to_trainrl      sA     %!8%@KE*D1U%0uE2Lr   c                     ddt         fd}d }t        j                  ||       | _        t        j                  ||       | _        | S )a  
    Allow users to call `model.train()` and `model.eval()` on an exported model,
    but with the effect of changing behavior between the two modes limited to special
    ops only, which are currently dropout and batchnorm.

    Note: This does not achieve the same effect as what `model.train()` and `model.eval()`
    does in eager models, but only provides an approximation. In particular, user code
    branching on `training` flag will not function correctly in general because the branch
    is already specialized at export time. Additionally, other ops beyond dropout and batchnorm
    that have different train/eval behavior will also not be converted properly.
    modec                 6    |rt        |        y t        |        y r	   )rl   rj   )r   rn   s     r   _trainz0_allow_exported_model_train_eval.<locals>._train   s    )$/(.r   c                     t        |        y r	   )rj   )r   s    r   _evalz/_allow_exported_model_train_eval.<locals>._eval   s
    $T*r   )T)booltypes
MethodTypetraineval)ra   rp   rr   s      r    _allow_exported_model_train_evalrx      sB    /4 /+ ""651EK!!%/EJLr   )rt   r$   torch.nn.functionalnn
functionalr6   torch.ao.quantization.utilsr   __all__rf   Moduler   rs   r   r%   r&   rJ   r`   rj   rl   rx    r   r   <module>r      s        E  / (UXX__ ( T 7,, 7T 7tN%((.. Nt Nd(<(< $)=)= $EHH,@,@ r   