
    Vh                         d dl mZmZmZ d dlZerd dlmZ dgZ G d dej                  j                  j                  j                        Zy)    )OptionalTYPE_CHECKINGUnionN)QConfigLinearc                   |     e Zd ZdZ	 	 	 	 ddedededed   deeeee	j                  f      d	ee   d
df fdZ xZS )r   ah  
    A linear module attached with FakeQuantize modules for weight,
    used for dynamic quantization aware training.

    We adopt the same interface as `torch.nn.Linear`, please see
    https://pytorch.org/docs/stable/nn.html#torch.nn.Linear
    for documentation.

    Similar to `torch.nn.Linear`, with FakeQuantize modules initialized to
    default.
    Nin_featuresout_featuresbiasqconfigr   devicedtypereturnc                     t         |   ||||||       t        j                  j                  j
                  j                  |      st        d      y )NzqDynamic QAT requires a memoryless observer.This means a MovingAverage observer with averaging constant equal to 1)super__init__torchaoquantizationr   _activation_is_memoryless
ValueError)selfr	   r
   r   r   r   r   	__class__s          V/home/dcms/DCMS/lib/python3.12/site-packages/torch/ao/nn/qat/dynamic/modules/linear.pyr   zLinear.__init__   sS     	lD'65Qxx$$,,FFwO[  P    )TNNN)__name__
__module____qualname____doc__intboolr   r   strr   r   r   __classcell__)r   s   @r   r   r      s    
  '+:>#  	
 )$ sC567 } 
 r   )typingr   r   r   r   torch.ao.quantization.qconfigr   __all__r   nnqatr    r   r   <module>r*      s=    1 1  5 *UXX[[__## r   