
    9i                         S SK JrJr  S SKrS SKJr  S SKJr  / SQr " S S\R                  5      r	 " S S	\R                  5      r
 " S
 S\R                  5      rg)    )AnyOptionalN)nn)QConfig)	QuantStubDeQuantStubQuantWrapperc                   x   ^  \ rS rSrSrS	S\\   4U 4S jjjrS\R                  S\R                  4S jr
SrU =r$ )
r      a  Quantize stub module, before calibration, this is same as an observer,
it will be swapped as `nnq.Quantize` in `convert`.

Args:
    qconfig: quantization configuration for the tensor,
        if qconfig is not provided, we will get qconfig from parent modules
qconfigc                 >   > [         TU ]  5         U(       a  Xl        g g Nsuper__init__r   selfr   	__class__s     [/var/www/html/land-doc-ocr/venv/lib/python3.13/site-packages/torch/ao/quantization/stubs.pyr   QuantStub.__init__       "L     xreturnc                     U$ r    r   r   s     r   forwardQuantStub.forward       r   r   r   )__name__
__module____qualname____firstlineno____doc__r   r   r   torchTensorr   __static_attributes____classcell__r   s   @r   r   r      s>    # 1 # #
 %,,  r   r   c                   x   ^  \ rS rSrSrS	S\\   4U 4S jjjrS\R                  S\R                  4S jr
SrU =r$ )
r      a  Dequantize stub module, before calibration, this is same as identity,
this will be swapped as `nnq.DeQuantize` in `convert`.

Args:
    qconfig: quantization configuration for the tensor,
        if qconfig is not provided, we will get qconfig from parent modules
r   c                 >   > [         TU ]  5         U(       a  Xl        g g r   r   r   s     r   r   DeQuantStub.__init__&   r   r   r   r   c                     U$ r   r   r   s     r   r   DeQuantStub.forward+   r    r   r!   r   )r"   r#   r$   r%   r&   r   r   r   r'   r(   r   r)   r*   r+   s   @r   r   r      s=    # # #
 %,,  r   r   c                      ^  \ rS rSr% Sr\\S'   \\S'   \R                  \S'   S\R                  4U 4S jjr
S\R                  S\R                  4S	 jrS
rU =r$ )r	   /   a  A wrapper class that wraps the input module, adds QuantStub and
DeQuantStub and surround the call to module with call to quant and dequant
modules.

This is used by the `quantization` utility functions to add the quant and
dequant modules, before `convert` function `QuantStub` will just be observer,
it observes the input tensor, after `convert`, `QuantStub`
will be swapped to `nnq.Quantize` which does actual quantization. Similarly
for `DeQuantStub`.
quantdequantmodulec                   > [         TU ]  5         [        USS 5      nU R                  S[	        U5      5        U R                  S[        U5      5        U R                  SU5        U R                  UR                  5        g )Nr   r4   r5   r6   )r   r   getattr
add_moduler   r   traintraining)r   r6   r   r   s      r   r   QuantWrapper.__init__?   s`    &)T27!34	;w#78&)

6??#r   Xr   c                 h    U R                  U5      nU R                  U5      nU R                  U5      $ r   )r4   r6   r5   )r   r=   s     r   r   QuantWrapper.forwardG   s*    JJqMKKN||Ar   r   )r"   r#   r$   r%   r&   r   __annotations__r   r   Moduler   r'   r(   r   r)   r*   r+   s   @r   r	   r	   /   sP    	 II$ryy $ %,,  r   r	   )typingr   r   r'   r   torch.ao.quantizationr   __all__rA   r   r   r	   r   r   r   <module>rE      sH        ) 7		 $")) $299 r   