
    VhG                        d Z ddlZddlmZmZ ddlZddlmc mc m	Z
 ddlmZ ddlmc mZ ddlmZ ddlmZ ddlmZ ddlmZ ddlmZmZmZ g d	Z G d
 de
j6                        Z G d de
j8                        Z G d de
j:                        Z G d de
j<                        Z G d de
j>                        Z G d de
j@                        Z y)z*Dynamically quantized convolution modules.    N)ClassVarOptional)Tensor)ops)_reverse_repeat_padding)	_size_1_t)_pair_single_triple)Conv1dConv2dConv3dConvTranspose1dConvTranspose2dConvTranspose3dc                   $    e Zd ZU dZej
                  Zeeej
                        e	d<   dZ
eeeej                           e	d<   dZeeeej                           e	d<   	 	 	 	 	 	 	 	 	 ddededed	ed
ededededef fdZd ZddededefdZ xZS )r   a  A dynamically quantized conv module with floating point tensors as inputs and outputs.

    For details on input arguments, parameters, and implementation see
    :class:`~torch.nn.Conv1d` and :class:`~torch.ao.nn.quantized.dynamic.Conv1d` and

    Attributes:
        weight (Tensor):     packed tensor derived from the learnable weight
                             parameter.
        scale (Tensor):      scalar for the output scale
        zero_point (Tensor): scalar for the output zero point

    See :class:`~torch.nn.Conv1d` for other attributes.

    Examples::

        >>> # xdoctest: +SKIP
        >>> m = nn.quantized.dynamic.Conv1d(16, 33, 3, stride=2)
        >>> input = torch.randn(20, 16, 100)
        >>> output = m(input)

    _FLOAT_MODULEN_NNIQAT_CONV_BN_MODULE_NNI_CONV_RELU_MODULEin_channelsout_channelskernel_sizestridepaddingdilationgroupsbiaspadding_modec                 
   t        j                  d| j                          d       |
|d}t        |      }t        |      }t	        |t
              r|n
t        |      }t        |      }t        |   |||||||||	f	i | y N"The current implementation of the B module has poor numerical accuracy and its use is not recommendeddevicedtype)warningswarn	_get_namer
   
isinstancestrsuper__init__)selfr   r   r   r   r   r   r   r   r   r$   r%   reduce_rangefactory_kwargs	__class__s                 Z/home/dcms/DCMS/lib/python3.12/site-packages/torch/ao/nn/quantized/dynamic/modules/conv.pyr,   zConv1d.__init__7   s     	01A0B  CE  F	
 %+U;k*'5'77;K8$	
 	
    c                      y)NDynamicQuantizedConv1d r-   s    r1   r(   zConv1d._get_name\       'r2   inputr.   returnc                 0   t        |j                        dk7  rt        d      | j                  dk7  r:t	        | j
                  d d       }t        j                  ||| j                        }t        j                  j                  || j                  |      S )N    Input shape must be `(N, C, L)`!zeros   mode)lenshape
ValueErrorr   r   r   Fpadr   	quantizedconv1d_dynamic_packed_paramsr-   r8   r.    _reversed_padding_repeated_twices       r1   forwardzConv1d.forward_   s     u{{q ?@@'/Ft||TVUVGW/X,EE7d>O>OE }}++E43F3FUUr2   )	r>   r   r>   r>   Tr=   NNTT)__name__
__module____qualname____doc__nnr   r   r   type__annotations__r   r   Moduler   intr   boolr*   r,   r(   r   rK   __classcell__r0   s   @r1   r   r      s    , 02yyM8DO,8BFHXd299o%>?FAE8HT"))_$=>E ##
#
 #
 	#

 #
 #
 #
 #
 #
 #
J(VV V4 V6 Vr2   r   c                        e Zd ZU dZej
                  Zeeej
                        e	d<   dZ
eeeej                           e	d<   dZeeeej                           e	d<   	 	 	 	 	 	 	 	 d fd	Zd Zdded	ed
efdZ xZS )r   a  A dynamically quantized conv module with floating point tensors as inputs and outputs.

    For details on input arguments, parameters, and implementation see
    :class:`~torch.nn.Conv2d` and :class:`~torch.ao.nn.quantized.dynamic.Conv2d` and

    Attributes:
        weight (Tensor):     packed tensor derived from the learnable weight
                             parameter.
        scale (Tensor):      scalar for the output scale
        zero_point (Tensor): scalar for the output zero point

    See :class:`~torch.nn.Conv2d` for other attributes.

    Examples::

        >>> # xdoctest: +SKIP
        >>> # With square kernels and equal stride
        >>> m = nn.quantized.dynamic.Conv2d(16, 33, 3, stride=2)
        >>> # non-square kernels and unequal stride and with padding
        >>> m = nn.quantized.dynamic.Conv2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2))
        >>> # non-square kernels and unequal stride and with padding and dilation
        >>> m = nn.quantized.dynamic.Conv2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2), dilation=(3, 1))
        >>> input = torch.randn(20, 16, 50, 100)
        >>> output = m(input)

    r   Nr   r   c                     t        j                  d| j                          d       |
|d}t        |      }t        |      }t        |      }t        |      }t	        |   |||||||||	f	i | y r    )r&   r'   r(   r	   r+   r,   r-   r   r   r   r   r   r   r   r   r   r$   r%   r/   r0   s                r1   r,   zConv2d.__init__   s     	01A0B CI I	
 %+U;K(v.?	
 	
r2   c                      y)NDynamicQuantizedConv2dr5   r6   s    r1   r(   zConv2d._get_name   r7   r2   r8   r.   r9   c                 *   t        |j                        dk7  rt        d      | j                  dk7  r7t	        | j
                        }t        j                  ||| j                        }t        j                  j                  || j                  |      S )N   #Input shape must be `(N, C, H, W)`!r=   r?   )rA   rB   rC   r   r   r   rD   rE   r   rF   conv2d_dynamicrH   rI   s       r1   rK   zConv2d.forward   s{     u{{q BCC'/Ft||/T,EE7d>O>OE }}++E43F3FUUr2   r>   r   r>   r>   Tr=   NNrL   )rM   rN   rO   rP   rQ   r   r   r   rR   rS   r   r   rT   r   r,   r(   r   rV   rK   rW   rX   s   @r1   r   r   m   s    4 02yyM8DO,8BFHXd299o%>?FAE8HT"))_$=>E #
J(
VV 
V4 
V6 
Vr2   r   c                        e Zd ZU dZej
                  Zeeej
                        e	d<   dZ
eeeej                           e	d<   dZeeeej                           e	d<   	 	 	 	 	 	 	 	 d fd	Zd Zdded	ed
efdZ xZS )r   a  A dynamically quantized conv module with floating point tensors as inputs and outputs.

    For details on input arguments, parameters, and implementation see
    :class:`~torch.nn.Conv3d` and :class:`~torch.ao.nn.quantized.dynamic.Conv3d` and

    Attributes:
        weight (Tensor):     packed tensor derived from the learnable weight
                             parameter.
        scale (Tensor):      scalar for the output scale
        zero_point (Tensor): scalar for the output zero point

    See :class:`~torch.nn.Conv3d` for other attributes.

    Examples::

        >>> # xdoctest: +SKIP
        >>> # With square kernels and equal stride
        >>> m = nn.quantized.dynamic.Conv3d(16, 33, 3, stride=2)
        >>> # non-square kernels and unequal stride and with padding
        >>> m = nn.quantized.dynamic.Conv3d(16, 33, (3, 5, 5), stride=(1, 2, 2), padding=(1, 2, 2))
        >>> # non-square kernels and unequal stride and with padding and dilation
        >>> m = nn.quantized.dynamic.Conv3d(16, 33, (3, 5, 5), stride=(1, 2, 2), padding=(1, 2, 2), dilation=(1, 2, 2))
        >>> input = torch.randn(20, 16, 56, 56, 56)
        >>> output = m(input)

    r   Nr   r   c                    t        j                  d| j                          d       |	dk7  sJ d       |
|d}t        |      }t        |      }t        |      }t        |      }t	        |   ||||||dt        d      |||	fi | y )Nr!   r"   reflectz*Conv3d does not support reflection paddingr#   Fr   )r&   r'   r(   r   r+   _initr[   s                r1   r,   zConv3d.__init__   s     	01A0B  CE  F	
 y(V*VV($*U;k*'"8$AJ	
 	
r2   c                      y)NDynamicQuantizedConv3dr5   r6   s    r1   r(   zConv3d._get_name  r7   r2   r8   r.   r9   c                 *   t        |j                        dk7  rt        d      | j                  dk7  r7t	        | j
                        }t        j                  ||| j                        }t        j                  j                  || j                  |      S )N   z&Input shape must be `(N, C, D, H, W)`!r=   r?   )rA   rB   rC   r   r   r   rD   rE   r   rF   conv3d_dynamicrH   rI   s       r1   rK   zConv3d.forward	  s{     u{{q EFF'/Ft||/T,EE7d>O>OE }}++E43F3FUUr2   rb   rL   )rM   rN   rO   rP   rQ   r   r   r   rR   rS   r   r   rT   r   r,   r(   r   rV   rK   rW   rX   s   @r1   r   r      s    4 02yyM8DO,8BFHXd299o%>?FAE8HT"))_$=>E $
L(
VV 
V4 
V6 
Vr2   r   c                        e Zd ZU dZej
                  Zeeej
                        e	d<   	 	 	 	 	 	 	 	 	 d	 fd	Z
d Zd
dededefdZ xZS )r   ab  A dynamically quantized transposed convolution module with floating point tensors as inputs and outputs.

    For details on input arguments, parameters, and implementation see
    :class:`~torch.nn.ConvTranspose1d`.

    For special notes, please, see :class:`~torch.ao.nn.quantized.dynamic.Conv1d`

    Attributes:
        weight (Tensor):     packed tensor derived from the learnable weight
                             parameter.
        scale (Tensor):      scalar for the output scale
        zero_point (Tensor): scalar for the output zero point
    See :class:`~torch.nn.ConvTranspose1d` for other attributes.

    Examples::

        >>> # xdoctest: +SKIP
        >>> # With square kernels and equal stride
        >>> m = nndq.ConvTranspose1d(16, 33, 3, stride=2)
        >>> # non-square kernels and unequal stride and with padding
        >>> m = nndq.ConvTranspose1d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2))
        >>> output = m(input)
        >>> # exact output size can be also specified as an argument
        >>> downsample = nndq.Conv1d(16, 16, 3, stride=2, padding=1)
        >>> upsample = nndq.ConvTranspose1d(16, 16, 3, stride=2, padding=1)
        >>> h = downsample(input)
        >>> h.size()
        torch.Size([1, 16, 6])
        >>> output = upsample(h, output_size=input.size())
        >>> output.size()
        torch.Size([1, 16, 12])
    r   c                     t        j                  d| j                          d       ||d}t        |   |||||||||	|
f
i | y r    r&   r'   r(   r+   r,   r-   r   r   r   r   r   output_paddingr   r   r   r   r$   r%   r/   r0   s                 r1   r,   zConvTranspose1d.__init__:  n     	01A0B  CE  F	
 %+U;	
 	
r2   c                      y)NDynamicQuantizedConvTranspose1dr5   r6   s    r1   r(   zConvTranspose1d._get_name[      0r2   r8   r.   r9   c                     t        |j                        dk7  rt        d      t        j                  j
                  j                  || j                  |      S )Nr;   r<   )rA   rB   rC   torchr   rF   conv_transpose1d_dynamicrH   r-   r8   r.   s      r1   rK   zConvTranspose1d.forward^  sK     u{{q ?@@yy"";;4&&
 	
r2   	r>   r   r   r>   Tr>   r=   NNrL   )rM   rN   rO   rP   rQ   r   r   r   rR   rS   r,   r(   r   rV   rK   rW   rX   s   @r1   r   r     m    B 9;8J8JM8D!3!345J 
B1
V 
4 
6 
r2   r   c                        e Zd ZU dZej
                  Zeeej
                        e	d<   	 	 	 	 	 	 	 	 	 d	 fd	Z
d Zd
dededefdZ xZS )r   ae  A dynamically quantized transposed convolution module with floating point tensors as inputs and outputs.

    For details on input arguments, parameters, and implementation see
    :class:`~torch.nn.ConvTranspose2d`.

    For special notes, please, see :class:`~torch.ao.nn.quantized.dynamic.Conv2d`

    Attributes:
        weight (Tensor):     packed tensor derived from the learnable weight
                             parameter.
        scale (Tensor):      scalar for the output scale
        zero_point (Tensor): scalar for the output zero point
    See :class:`~torch.nn.ConvTranspose2d` for other attributes.

    Examples::

        >>> # xdoctest: +SKIP
        >>> # With square kernels and equal stride
        >>> m = nnq.ConvTranspose2d(16, 33, 3, stride=2)
        >>> # non-square kernels and unequal stride and with padding
        >>> m = nnq.ConvTranspose2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2))
        >>> output = m(input)
        >>> # exact output size can be also specified as an argument
        >>> downsample = nnq.Conv2d(16, 16, 3, stride=2, padding=1)
        >>> upsample = nnq.ConvTranspose2d(16, 16, 3, stride=2, padding=1)
        >>> h = downsample(input)
        >>> h.size()
        torch.Size([1, 16, 6, 6])
        >>> output = upsample(h, output_size=input.size())
        >>> output.size()
        torch.Size([1, 16, 12, 12])
    r   c                     t        j                  d| j                          d       ||d}t        |   |||||||||	|
f
i | y r    rn   ro   s                 r1   r,   zConvTranspose2d.__init__  rq   r2   c                      y)NDynamicQuantizedConvTranspose2dr5   r6   s    r1   r(   zConvTranspose2d._get_name  rt   r2   r8   r.   r9   c                     t        |j                        dk7  rt        d      t        j                  j                  || j                  |      S )Nr_   r`   )rA   rB   rC   r   rF   conv_transpose2d_dynamicrH   rx   s      r1   rK   zConvTranspose2d.forward  sE     u{{q BCC}}554&&
 	
r2   ry   rL   )rM   rN   rO   rP   rQ   r   r   r   rR   rS   r,   r(   r   rV   rK   rW   rX   s   @r1   r   r   h  rz   r2   r   c                        e Zd ZU dZej
                  Zeeej
                        e	d<   	 	 	 	 	 	 	 	 	 d	 fd	Z
d Zd
dededefdZ xZS )r   as  A dynamically quantized transposed convolution module with floating point tensors as inputs and outputs.

    For details on input arguments, parameters, and implementation see
    :class:`~torch.nn.ConvTranspose3d`.

    For special notes, please, see :class:`~torch.ao.nn.quantized.dynamic.Conv3d`

    Attributes:
        weight (Tensor):     packed tensor derived from the learnable weight
                             parameter.
        scale (Tensor):      scalar for the output scale
        zero_point (Tensor): scalar for the output zero point
    See :class:`~torch.nn.ConvTranspose3d` for other attributes.

    Examples::

        >>> # xdoctest: +SKIP
        >>> # With cubic kernels and equal stride
        >>> m = nnq.ConvTranspose3d(16, 33, 3, stride=2)
        >>> # non-cubic kernels and unequal stride and with padding
        >>> m = nnq.ConvTranspose3d(16, 33, (3, 3, 5), stride=(2, 1, 1), padding=(4, 2, 2))
        >>> output = m(input)
        >>> # exact output size can be also specified as an argument
        >>> downsample = nnq.Conv3d(16, 16, 3, stride=2, padding=1)
        >>> upsample = nnq.ConvTranspose3d(16, 16, 3, stride=2, padding=1)
        >>> h = downsample(input)
        >>> h.size()
        torch.Size([1, 16, 6, 6, 6])
        >>> output = upsample(h, output_size=input.size())
        >>> output.size()
        torch.Size([1, 16, 12, 12, 12])
    r   c                     t        j                  d| j                          d       ||d}t        |   |||||||||	|
f
i | y r    rn   ro   s                 r1   r,   zConvTranspose3d.__init__  rq   r2   c                      y)NDynamicQuantizedConvTranspose3dr5   r6   s    r1   r(   zConvTranspose3d._get_name  rt   r2   r8   r.   r9   c                     t        |j                        dk7  rt        d      t        j                  j                  || j                  |      S )Nrj   z&Input shape must be `(N, C, T, H, W)`!)rA   rB   rC   r   rF   conv_transpose3d_dynamicrH   rx   s      r1   rK   zConvTranspose3d.forward  sE     u{{q EFF}}554&&
 	
r2   ry   rL   )rM   rN   rO   rP   rQ   r   r   r   rR   rS   r,   r(   r   rV   rK   rW   rX   s   @r1   r   r     rz   r2   r   )!rP   r&   typingr   r   rv   torch.ao.nn.quantizedaorQ   rF   nnqtorch.nntorch.nn.functional
functionalrD   r   
torch._opsr   "torch.ao.nn.quantized.modules.convr   torch.nn.common_typesr   torch.nn.modules.utilsr	   r
   r   __all__r   r   r   r   r   r   r5   r2   r1   <module>r      s    1  %  # #      F + : :NVSZZ NVbQVSZZ QVhRVSZZ RVjO
c)) O
dO
c)) O
dO
c)) O
r2   