
    Vh;                     2   d dl mZmZ d dlZd dlmZ d dlmc mZ d dl	m
Z
 ddlmZ g dZ G d dej
                  j                  j                  j                   e      Z G d	 d
eej"                        Z G d deej$                        Z G d deej&                        Z G d deej
                  j                  j                  j(                        Z G d deej*                        Z G d deej,                        Z G d deej.                        Zy)    )AnyOptionalN)	_size_1_t   )ReferenceQuantizedModule)Conv1dConv2dConv3dConvTranspose1dConvTranspose2dConvTranspose3dc                   F    e Zd ZdZdeej                     iZdZe	d        Z
y)_ConvNda  A reference version of nn.quantized.Conv2d
    we will not pack the parameters in this module, since weight packing is an
    optimization for quantized backends supported in PyTorch (fbgemm/qnnpack),
    this is useful when user want to use this module in other backends like Glow.
    biasTc                 >    | |j                   |j                  |j                  |j                  |j                  |j
                  |j                  |j                  d u|j                  |j                  j                  |j                  j                  |      }t        j                  j                  |j                  j                               |_	        |j                  <t        j                  j                  |j                  j                               |_        |S N)devicedtypeweight_qparams)in_channelsout_channelskernel_sizestridepaddingdilationgroupsr   padding_modeweightr   r   torchnn	Parameterdetachcls
float_convr   	qref_convs       \/home/dcms/DCMS/lib/python3.12/site-packages/torch/ao/nn/quantized/reference/modules/conv.py
from_floatz_ConvNd.from_float    s    ""##""OO4'##$$++##)))
	 !88--j.?.?.F.F.HI	??&"XX//
0F0F0HIIN    N)__name__
__module____qualname____doc__r   r   Tensor__annotations___IS_REFERENCEstaticmethodr(    r)   r'   r   r      s3     x56OM r)   r   c                       e Zd Z	 	 	 	 	 	 	 	 	 ddededededededed	ed
edeeee	f      fdZ
dej                  dej                  fdZd Zed        Zy)r   Nr   r   r   r   r   r   r   r   r   r   c                 |    t         j                  j                  | |||||||||	|
|       | j                  ||
       y N)r    r   __init___init_weight_qparamsselfr   r   r   r   r   r   r   r   r   r   r   r   s                r'   r6   zConv1d.__init__7   L     				
 	!!.&9r)   xreturnc           	          | j                         }t        j                  ||| j                  | j                  | j
                  | j                  | j                        }|S )aR  
        we have:
        w(float) -- quant - dequant         x(float) ------------- F.conv1d ---

        In the full model, we will see
        w(float) -- quant - *dequant         x -- quant --- *dequant --  *F.conv1d --- *quant - dequant
        and the backend should be able to fuse the ops with `*` into a quantized conv1d
        )
get_weightFconv1dr   r   r   r   r   r9   r;   weight_quant_dequantresults       r'   forwardzConv1d.forwardV   O      $0 IIKKLLMMKK
 r)   c                      y)NzQuantizedConv1d(Reference)r2   r9   s    r'   	_get_namezConv1d._get_namem       +r)   c                 0    t         j                  | ||      S r5   r   r(   r$   r%   r   s      r'   r(   zConv1d.from_floatp       !!#z>BBr)   	r   r   r   r   TzerosNNN)r*   r+   r,   intr   boolstrr   dictr   r6   r   r.   rD   rH   classmethodr(   r2   r)   r'   r   r   6   s     #37:: : 	:
 : : : : : : !c3h0:> %,, ., C Cr)   r   c                       e Zd Z	 	 	 	 	 	 	 	 	 d	deeeef      fdZdej                  dej                  fdZ
d Zed        Zy)
r	   Nr   c                 |    t         j                  j                  | |||||||||	|
|       | j                  ||
       y r5   )r    r	   r6   r7   r8   s                r'   r6   zConv2d.__init__v   r:   r)   r;   r<   c           	          | j                         }t        j                  ||| j                  | j                  | j
                  | j                  | j                        }|S )aR  
        we have:
        w(float) -- quant - dequant         x(float) ------------- F.conv2d ---

        In the full model, we will see
        w(float) -- quant - *dequant         x -- quant --- *dequant --  *F.conv2d --- *quant - dequant
        and the backend should be able to fuse the ops with `*` into a quantized conv2d
        )r>   r?   conv2dr   r   r   r   r   rA   s       r'   rD   zConv2d.forward   rE   r)   c                      y)NzQuantizedConv2d(Reference)r2   rG   s    r'   rH   zConv2d._get_name   rI   r)   c                 0    t         j                  | ||      S r5   rK   rL   s      r'   r(   zConv2d.from_float   rM   r)   rN   r*   r+   r,   r   rS   rR   r   r6   r   r.   rD   rH   rT   r(   r2   r)   r'   r	   r	   u   q     37: !c3h0:> %,, ., C Cr)   r	   c                       e Zd Z	 	 	 	 	 	 	 	 	 d	deeeef      fdZdej                  dej                  fdZ
d Zed        Zy)
r
   Nr   c                 |    t         j                  j                  | |||||||||	|
|       | j                  ||
       y r5   )r    r
   r6   r7   r8   s                r'   r6   zConv3d.__init__   r:   r)   r;   r<   c           	          | j                         }t        j                  ||| j                  | j                  | j
                  | j                  | j                        }|S )aR  
        we have:
        w(float) -- quant - dequant         x(float) ------------- F.conv3d ---

        In the full model, we will see
        w(float) -- quant - *dequant         x -- quant --- *dequant --  *F.conv3d --- *quant - dequant
        and the backend should be able to fuse the ops with `*` into a quantized conv3d
        )r>   r?   conv3dr   r   r   r   r   rA   s       r'   rD   zConv3d.forward   rE   r)   c                      y)NzQuantizedConv3d(Reference)r2   rG   s    r'   rH   zConv3d._get_name   rI   r)   c                 0    t         j                  | ||      S r5   rK   rL   s      r'   r(   zConv3d.from_float   rM   r)   rN   r[   r2   r)   r'   r
   r
      r\   r)   r
   c                        e Zd ZdZed        Zy)_ConvTransposeNda(  A reference version of nn.quantized.ConvTranspose2d
    we will not pack the parameters in this module, since weight packing is an
    optimization for quantized backends supported in PyTorch (fbgemm/qnnpack),
    this is useful when user want to use this module in other backends like Glow.
    c                 T    | |j                   |j                  |j                  |j                  |j                  |j
                  |j                  |j                  d u|j                  |j                  |j                  j                  |j                  j                  |      }t        j                  j                  |j                  j!                               |_
        |j                  <t        j                  j                  |j                  j!                               |_        |S r   )r   r   r   r   r   output_paddingr   r   r   r   r   r   r   r   r    r!   r"   r#   s       r'   r(   z_ConvTransposeNd.from_float   s    ""##""%%OO4'##$$++##)))
	 !88--j.?.?.F.F.HI	??&"XX//
0F0F0HIINr)   N)r*   r+   r,   r-   r1   r(   r2   r)   r'   rd   rd      s      r)   rd   c                       e Zd Z	 	 	 	 	 	 	 	 	 	 ddededededededed	ed
ededeeee	f      fdZ
	 ddej                  deee      dej                  fdZd Zed        Zy)r   Nr   r   r   r   r   rf   r   r   r   r   r   c                 ~    t         j                  j                  | |||||||||	|
||       | j                  ||       y r5   )r    r   r6   r7   r9   r   r   r   r   r   rf   r   r   r   r   r   r   r   s                 r'   r6   zConvTranspose1d.__init__  Q      	##	
 	!!.&9r)   r;   output_sizer<   c           
      ~   t        | j                  t              sJ | j                  t        || j
                  | j                  | j                  | j                        }| j                         }t        j                  ||| j                  | j
                  | j                  || j                  | j                        }|S )ac  
        we have:
        w(float) -- quant - dequant         x(float) ------------- F.convTranspose1d ---
        In the full model, we will see
        w(float) -- quant - *dequant         x -- quant --- *dequant --  *F.convTranspose1d --- *quant - dequant
        and the backend should be able to fuse the ops with `*` into a quantized conv1d
        )
isinstancer   tuple_output_paddinginputr   r   r   r>   r?   conv_transpose1dr   r   r9   r;   rk   rf   rB   rC   s         r'   rD   zConvTranspose1d.forward3       $,,... --KKLLMM
  $0## IIKKLLKKMM	
 r)   c                      y)Nz#QuantizedConvTranspose1d(Reference)r2   rG   s    r'   rH   zConvTranspose1d._get_nameY      4r)   c                 0    t         j                  | ||      S r5   rd   r(   rL   s      r'   r(   zConvTranspose1d.from_float\      **3
NKKr)   
r   r   r   r   Tr   rO   NNNr5   )r*   r+   r,   rP   r   rQ   rR   r   rS   r   r6   r   r.   listrD   rH   rT   r(   r2   r)   r'   r   r     s     $%#37:: : 	:
 : : ": : : : : !c3h0:D CG$$,4T#Y,?$	$L5 L Lr)   r   c                       e Zd Z	 	 	 	 	 	 	 	 	 	 d
deeeef      fdZ	 ddej                  dee
e      dej                  fdZd Zed	        Zy)r   Nr   c                 ~    t         j                  j                  | |||||||||	|
||       | j                  ||       y r5   )r    r   r6   r7   ri   s                 r'   r6   zConvTranspose2d.__init__b  rj   r)   r;   rk   r<   c           
      ~   t        | j                  t              sJ | j                  t        || j
                  | j                  | j                  | j                        }| j                         }t        j                  ||| j                  | j
                  | j                  || j                  | j                        }|S )ac  
        we have:
        w(float) -- quant - dequant         x(float) ------------- F.convTranspose2d ---
        In the full model, we will see
        w(float) -- quant - *dequant         x -- quant --- *dequant --  *F.convTranspose2d --- *quant - dequant
        and the backend should be able to fuse the ops with `*` into a quantized conv2d
        )rm   r   rn   ro   rp   r   r   r   r>   r?   conv_transpose2dr   r   rr   s         r'   rD   zConvTranspose2d.forward  s     $,,... --KKLLMM
  $0## IIKKLLKKMM	
 r)   c                      y)Nz#QuantizedConvTranspose2d(Reference)r2   rG   s    r'   rH   zConvTranspose2d._get_name  ru   r)   c                 0    t         j                  | ||      S r5   rw   rL   s      r'   r(   zConvTranspose2d.from_float  rx   r)   ry   r5   r*   r+   r,   r   rS   rR   r   r6   r   r.   rz   rP   rD   rH   rT   r(   r2   r)   r'   r   r   a  s     37: !c3h0:D CG%%,4T#Y,?%	%N5 L Lr)   r   c                       e Zd Z	 	 	 	 	 	 	 	 	 	 d
deeeef      fdZ	 ddej                  dee
e      dej                  fdZd Zed	        Zy)r   Nr   c                 ~    t         j                  j                  | |||||||||	|
||       | j                  ||       y r5   )r    r   r6   r7   ri   s                 r'   r6   zConvTranspose3d.__init__  rj   r)   r;   rk   r<   c           
      ~   t        | j                  t              sJ | j                  t        || j
                  | j                  | j                  | j                        }| j                         }t        j                  ||| j                  | j
                  | j                  || j                  | j                        }|S )ac  
        we have:
        w(float) -- quant - dequant         x(float) ------------- F.convTranspose3d ---
        In the full model, we will see
        w(float) -- quant - *dequant         x -- quant --- *dequant --  *F.convTranspose3d --- *quant - dequant
        and the backend should be able to fuse the ops with `*` into a quantized conv3d
        )rm   r   rn   ro   rp   r   r   r   r>   r?   conv_transpose3dr   r   rr   s         r'   rD   zConvTranspose3d.forward  rs   r)   c                      y)Nz#QuantizedConvTranspose3d(Reference)r2   rG   s    r'   rH   zConvTranspose3d._get_name  ru   r)   c                 0    t         j                  | ||      S r5   rw   rL   s      r'   r(   zConvTranspose3d.from_float  rx   r)   ry   r5   r   r2   r)   r'   r   r     s     37: !c3h0:D CG$$,4T#Y,?$	$L5 L Lr)   r   )typingr   r   r   torch.nnr    torch.nn.functional
functionalr?   torch.nn.common_typesr   utilsr   __all__modulesconvr   r   r	   r
   rd   r   r   r   r2   r)   r'   <module>r      s          + +ehh##++-E @<CWbii <C~<CWbii <C~<CWbii <C~w 0 0 5 5 F F <ML&(:(: ML`NL&(:(: NLbML&(:(: MLr)   