
    Vh%                     :   d dl mZmZ d dlZd dlmZ d dlmZ d dlm	Z	m
Z
mZ d dlmZmZmZ g dZ G d dej"                  j$                  j&                        Z G d	 d
eej(                        Z G d deej*                        Z G d deej,                        Zy)    )ClassVarUnionN)_FusedModule)	_size_1_t	_size_2_t	_size_3_t)_pair_single_triple)Conv1dConv2dConv3dc                       e Zd ZU eeej                  j                  j                        e	d<   	 	 	 dde
de
dee
df   dee
df   deeee
df   f   d	ee
df   d
edee
df   de
dededdfdZd Zedd       Zd Zy)_ConvNd_FLOAT_MODULENin_channelsout_channelskernel_size.stridepaddingdilation
transposedoutput_paddinggroupsbiaspadding_modereturnc                     ||d}t        j                  j                  j                  j                  | |||||||||	|
|fi | |sJ d       || _        |j                  |      | _        y )N)devicedtypez'qconfig must be provided for QAT module)factory_kwargs)nnmodulesconvr   __init__qconfigweightweight_fake_quant)selfr   r   r   r   r   r   r   r   r   r   r   r&   r   r    r!   s                   L/home/dcms/DCMS/lib/python3.12/site-packages/torch/ao/nn/qat/modules/conv.pyr%   z_ConvNd.__init__   s    " %+U;


((	
 	
 AAAw!(~!N    c                 n    | j                  || j                  | j                        | j                        S N_conv_forwardr(   r'   r   r)   inputs     r*   forwardz_ConvNd.forward6   *    !!%)?)?)LdiiXXr+   c                 R   t        |      | j                  k(  s.J d| j                  z   dz   | j                  j                  z          t        |d      sJ d       |j                  sJ d       t        t        |      t              r|d   }|j                  } | |j                  |j                  |j                  |j                  |j                  |j                  |j                  |j                  du|j                  |
      }|j                   |_        |j                  |_        |S )	zCreate a qat module from a float module

        Args:
           `mod`: a float module, either produced by torch.ao.quantization utilities
           or directly from user
        zqat.z.from_float only works for r&   z,Input float module must have qconfig definedz,Input float module must have a valid qconfigr   N)r   r   r   r   r   r   r&   )typer   __name__hasattrr&   
issubclassr   r   r   r   r   r   r   r   r   r   r'   )clsmoduse_precomputed_fake_quantr&   qat_convs        r*   
from_floatz_ConvNd.from_float9   s    CyC--- 	
ll+, (()	
- sI&V(VV&{{JJJ{d3i.a&C++OOOO::KK\\::%))
 **r+   c                    t        |       }|j                  | j                  | j                  | j                  | j
                  | j                  | j                  | j                  | j                  du| j                  	      }t        j                  j                  | j                  j                               |_        | j                  <t        j                  j                  | j                  j                               |_	        t!        |t"              r^|g}t%        |d      sJ |j'                         }|j)                  |        |j*                  | }|j-                  | j.                         |S |S )zThis works for both single qat conv, and the qat conv - relu modules
        to convert the qat module to a floating point module
        N_FLOAT_RELU_MODULE)r5   _FLOAT_CONV_MODULEr   r   r   r   r   r   r   r   r   torchr"   	Parameterr'   detachr8   r   r7   r?   appendr   traintraining)r)   r9   r$   r#   relufuseds         r*   to_floatz_ConvNd.to_float\   s#    4j%%KKLLMMKKIIT!

 hh((););)=>99 **499+;+;+=>DIc<(fG3 4555))+DNN4 %C%%w/EKK&LKr+   )NNNF)r6   
__module____qualname__r   r5   r"   r#   r$   r   __annotations__inttupler   strboolr%   r2   staticmethodr=   rI    r+   r*   r   r      s   D!8!89:: #O#O #O 38_	#O
 c3h#O sE#s(O+,#O S/#O #O c3h#O #O #O #O  
!#OJY    Dr+   r   c                       e Zd ZU dZej
                  Zeeej
                        e	d<   ej
                  Z
eeej
                        e	d<   	 	 	 	 	 	 	 	 	 ddedededed	eeef   d
ededededdf fdZed fd	       Z xZS )r   aZ  
    A Conv1d module attached with FakeQuantize modules for weight,
    used for quantization aware training.

    We adopt the same interface as :class:`~torch.nn.Conv1d`

    Similar to :class:`~torch.nn.Conv2d`, with FakeQuantize modules initialized to
    default.

    Attributes:
        weight_fake_quant: fake quant module for weight
    r   r@   Nr   r   r   r   r   r   r   r   r   r   c                     t        |      }t        |      }t        |t              r|n
t        |      }t        |      }t        |   ||||||dt        d      |||	|
||       y NFr   )r   r   r   r   r   r   r   r   r&   r   r    )r
   
isinstancerP   superr%   r)   r   r   r   r   r   r   r   r   r   r&   r   r    kernel_size_stride_padding_	dilation_	__class__s                    r*   r%   zConv1d.__init__   w     {+&/(#67GG<LH%	"1:% 	 	
r+   c                 (    t         |   | ||      S N)r;   rX   r=   r9   r:   r;   r^   s      r*   r=   zConv1d.from_float   "    w!1K " 
 	
r+   	   r   rf   rf   TzerosNNNrJ   )r6   rK   rL   __doc__r"   r   r   r   r5   rM   r@   rN   r   r   rP   rQ   r%   classmethodr=   __classcell__r^   s   @r*   r   r   |   s     02yyM8DO,846IIbii1= )*#"
"
 "
 	"

 "
 sI~&"
 "
 "
 "
 "
 
"
H 
 
r+   r   c                       e Zd ZU dZej
                  Zeeej
                        e	d<   ej
                  Z
eeej
                        e	d<   	 	 	 	 	 	 	 	 	 ddedededed	eeef   d
ededededdf fdZd Zed fd	       Z xZS )r   a  
    A Conv2d module attached with FakeQuantize modules for weight,
    used for quantization aware training.

    We adopt the same interface as `torch.nn.Conv2d`, please see
    https://pytorch.org/docs/stable/nn.html?highlight=conv2d#torch.nn.Conv2d
    for documentation.

    Similar to `torch.nn.Conv2d`, with FakeQuantize modules initialized to
    default.

    Attributes:
        weight_fake_quant: fake quant module for weight
    r   r@   Nr   r   r   r   r   r   r   r   r   r   c                     t        |      }t        |      }t        |t              r|n
t        |      }t        |      }t        |   ||||||dt        d      |||	|
||       y rV   )r	   rW   rP   rX   r%   rY   s                    r*   r%   zConv2d.__init__   su     [)-(#67E'N(O	 8% 	 	
r+   c                 n    | j                  || j                  | j                        | j                        S r-   r.   r0   s     r*   r2   zConv2d.forward   r3   r+   c                 (    t         |   | ||      S ra   rb   rc   s      r*   r=   zConv2d.from_float   rd   r+   re   rJ   )r6   rK   rL   rh   r"   r   r   r   r5   rM   r@   rN   r   r   rP   rQ   r%   r2   ri   r=   rj   rk   s   @r*   r   r           02yyM8DO,846IIbii1= )*#"
"
 "
 	"

 "
 sI~&"
 "
 "
 "
 "
 
"
HY 
 
r+   r   c                       e Zd ZU dZej
                  Zeeej
                        e	d<   ej
                  Z
eeej
                        e	d<   	 	 	 	 	 	 	 	 	 ddedededed	eeef   d
ededededdf fdZd Zed fd	       Z xZS )r   a  
    A Conv3d module attached with FakeQuantize modules for weight,
    used for quantization aware training.

    We adopt the same interface as `torch.nn.Conv3d`, please see
    https://pytorch.org/docs/stable/nn.html?highlight=conv3d#torch.nn.Conv3d
    for documentation.

    Similar to `torch.nn.Conv3d`, with FakeQuantize modules initialized to
    default.

    Attributes:
        weight_fake_quant: fake quant module for weight
    r   r@   Nr   r   r   r   r   r   r   r   r   r   c                     t        |      }t        |      }t        |t              r|n
t        |      }t        |      }t        |   ||||||dt        d      |||	|
||       y rV   )r   rW   rP   rX   r%   rY   s                    r*   r%   zConv3d.__init__	  r_   r+   c                 n    | j                  || j                  | j                        | j                        S r-   r.   r0   s     r*   r2   zConv3d.forward-  r3   r+   c                 (    t         |   | ||      S ra   rb   rc   s      r*   r=   zConv3d.from_float0  rd   r+   re   rJ   )r6   rK   rL   rh   r"   r   r   r   r5   rM   r@   rN   r   r   rP   rQ   r%   r2   ri   r=   rj   rk   s   @r*   r   r      rp   r+   r   )typingr   r   rA   torch.nnr"   torch.ao.nn.intrinsicr   torch.nn.common_typesr   r   r   torch.nn.modules.utilsr	   r
   r   __all__r#   r$   r   r   r   r   rS   r+   r*   <module>r{      sy    "   . A A : : )kbjjoo%% k\8
Wbii 8
v=
Wbii =
@=
Wbii =
r+   