
    9i                        S SK r S SKrS SKrS SKJrJrJr  S SKrS SKJ	r	  S SK
Jr  SSKJrJr  SSKJr  SSKJrJrJr  SS	KJr  SS
KJr  SSKJr  SSKJrJrJr  SSKJ r J!r!  SSK"J#r#  SSK$J%r%  S\\	\RL                  RN                  4   S\(\)\4   SS4S jr*S\RL                  RN                  SS4S jr+S\	SS4S jr,S\RL                  RN                  SS4S jr-  S1S\	S\.S\\\(\)\4   S4   S\\\(\)\4   S4   S\	4
S jjr/    S2S\RL                  RN                  S\\#\(\)\4   4   S\.S\0\S4   S\\\(\)\4   S4   S\\\#\(\)\4   4      S\\\(\)\4   S4   S \.S\	4S! jjr1  S1S\RL                  RN                  S\\#\(\)\4   4   S\.S\0\S4   S\\\(\)\4   S4   S\\\(\)\4   S4   S\	4S" jjr2  S1S\RL                  RN                  S\\\(\)\4   S4   S\\\(\)\4   S4   S\	4S# jjr3\Rh                  " \%5         S3S\RL                  RN                  S\\#\(\)\4   4   S\0\S4   S\\\(\)\4   S4   S\\\#\(\)\4   4      S\\\(\)\4   S4   S\	4S$ jj5       r5\Rh                  " \%5        S1S\RL                  RN                  S\\#\(\)\4   4   S\0\S4   S\\\(\)\4   S4   S\\\(\)\4   S4   S\	4S% jj5       r6       S4S&\	S'\.S(\\\(\)\4   S4   S \.S)\.S\\#\(\)\4   S4   S\\\(\)\4   S4   S*\.S+\.S\	4S, jjr7\Rh                  " \%5           S5S&\	S(\\\(\)\4   S4   S)\.S\\#\(\)\4   S4   S\\\(\)\4   S4   S+\.S\	4S- jj5       r8    S6S&\	S(\\\(\)\4   S4   S)\.S\\#\(\)\4   S4   S\\\(\)\4   S4   S\	4S. jjr9   S3S&\	S(\\\(\)\4   S4   S\\#\(\)\4   S4   S\\\(\)\4   S4   S\	4
S/ jjr:  S7S&\	S'\.S(\\\(\)\4   S4   S\	4S0 jjr;g)8    N)AnyOptionalUnion)GraphModule)_USER_PRESERVED_ATTRIBUTES_KEY   )BackendConfigget_tensorrt_backend_config)convert)ConvertCustomConfigFuseCustomConfigPrepareCustomConfig)fuse)ObservedGraphModule)prepare)QuantizationTracerScopeScopeContextManager)get_custom_module_class_keys#get_skipped_module_name_and_classes)QConfigMapping)DEPRECATION_WARNINGmodelpreserved_attrsreturnc                     [         R                   " U5      U R                  [        '   U R                  [           R                  5        H  u  p#[	        XU5        M     g)zXStore preserved attributes to the model.meta so that it can be preserved during deepcopyN)copymetar   itemssetattr)r   r   	attr_nameattrs       a/var/www/html/land-doc-ocr/venv/lib/python3.13/site-packages/torch/ao/quantization/quantize_fx.pyattach_preserved_attrs_to_modelr$      sH    
 26?1KEJJ-. !::&DEKKM	$' N    c                 z    [        U [        5      (       d&  [        S[        [	        U 5      5      -   S-   S-   5      eg )Nz,input model must be a GraphModule, Got type:z Please make zsure to follow the tutorials.)
isinstancer   
ValueErrorstrtype)r   s    r#   _check_is_graph_moduler+   %   sK    e[))$u+  .	.
 	
 *r%   c                 r    U R                   R                   H  n[        US5      (       a  M  0 Ul        M     g)a  Attach meta field to all nodes of the graph if it does not exist,
meta field is a field stores some meta information about the node, such
as dtype and shape information for output of the node, this only exists
if the program is captured by make_fx (used in quantize_pt2e flow), if
the program is captured by torch.fx symbolic tracing, this field may not exist,
so we add it here to avoid checking this all over the places
r   N)graphnodeshasattrr   )r   nodes     r#   !_attach_meta_to_node_if_not_existr1   0   s,     !!tV$$DI "r%   c                    / nU R                  5        H`  u  p#[        U[        R                  R                  R
                  R                  5      (       a  UR                  U5        MU  [        U5        Mb     U HO  nU R                  U	 [        R                  R                  R
                  R                  5       U R                  U'   MQ     g)z+Swap FloatFunctional with FXFloatFunctionalN)named_childrenr'   torchaonn	quantizedFloatFunctionalappend_swap_ff_with_fxff_modulesFXFloatFunctional)r   modules_to_swapnamemodules       r#   r:   r:   =   s    O,,.fehhkk33CCDD""4(v&	 /  NN4 $xx{{44FFHt  r%   is_qatfuse_custom_configbackend_configc                 0    [        U 5        [        XX#5      $ )zInternal helper function to fuse modules in preparation for quantization

Args:
    model: GraphModule object from symbolic tracing (torch.fx.symbolic_trace)
)r+   r   )r   r@   rA   rB   s       r#   _fuse_fxrD   K   s     5!1BBr%   qconfig_mappingexample_inputs.prepare_custom_config_equalization_configis_standalone_modulec                 ~   Uc
  [        5       nUc
  [        5       n[        U[        5      (       a0  [        R
                  " S[        SS9  [         R                  " U5      n[        U 5        [        XG5      u  pUR                  n
U
 Vs0 s H!  n[        X5      (       d  M  U[        X5      _M#     nn[        X5      n[        XR                  U 5      5      n[!        U5        [#        5       R%                  UR                  5      n['        XX5      n[)        UUUUR*                  UUUUUS9	n[-        UU5        U$ s  snf )a2  Internal helper function for prepare_fx
    Args:
      `model`, `qconfig_mapping`, `prepare_custom_config`, `_equalization_config`:
      see docs for :func:`~torch.ao.quantization.prepare_fx`
      `is_standalone_module`: a boolean flag indicates whether we are
      quantizing a standalone module or not, a standalone module
      is a submodule of the parent module that is not inlined in the
forward graph of the parent module,
      the way we quantize standalone module is described in:
      :func:`~torch.ao.quantization._prepare_standalone_module_fx`
zPassing a prepare_custom_config_dict to prepare is deprecated and will not be supported in a future version. Please pass in a PrepareCustomConfig instead.   
stacklevel)rF   rG   rH   rB   rI   )r   r   r'   dictwarningswarnFutureWarning	from_dictr:   r   preserved_attributesr/   getattrr   r   tracer1   r   set_preserved_attributesrD   r   node_name_to_scoper$   )r   rE   r@   rF   rG   rH   rB   rI   skipped_module_namesskipped_module_classespreserved_attr_namesr"   r   tracergraph_modulerA   prepareds                    r#   _prepare_fxr^   Z   sP   * $ 3 5#-/'..Q		
 !4 = =>S T u3V40 1EE )(D5 	#ge""(     4MFull5&9:L%l3)+DD22 L2DUL!!%31%1
H $Ho>O7s   D: D:c           
           [        U UUUUUSS9$ )a  [Internal use only] Prepare a standalone module, so that it can be used when quantizing the
parent module.
standalone_module means it a submodule that is not inlined in parent module,
and will be quantized separately as one unit.

How the standalone module is observed is specified by `input_quantized_idxs` and
`output_quantized_idxs` in the prepare_custom_config for the standalone module

Returns:

    * model(GraphModule): prepared standalone module. It has these attributes in
      model.meta:

        * `standalone_module_input_quantized_idxs(List[Int])`: a list of
          indexes for the graph input that is expected to be quantized,
          same as input_quantized_idxs configuration provided
          for the standalone module
        * `standalone_module_output_quantized_idxs(List[Int])`: a list of
          indices for the graph output that is quantized
          same as input_quantized_idxs configuration provided
          for the standalone module

T)rB   rI   )r^   )r   rE   r@   rF   rG   rB   s         r#   _prepare_standalone_module_fxr`      s&    > %! r%   c                    Uc
  [        5       n[        U[        5      (       a0  [        R                  " S[
        SS9  [         R                  " U5      n[        R                  R                  S5        UR                  nU Vs0 s H!  n[        X5      (       d  M  U[        X5      _M#     nn[        R                  R                  U 5      n[        U5        [!        USX5      n[#        Xe5        U$ s  snf )a  Fuse modules like conv+bn, conv+bn+relu etc, model must be in eval mode.
Fusion rules are defined in torch.ao.quantization.fx.fusion_pattern.py

Args:

    * `model` (torch.nn.Module): a torch.nn.Module model
    * `fuse_custom_config` (FuseCustomConfig): custom configurations for fuse_fx.
        See :class:`~torch.ao.quantization.fx.custom_config.FuseCustomConfig` for more details
Example::

    from torch.ao.quantization import fuse_fx

    m = Model().eval()
    m = fuse_fx(m)

zPassing a fuse_custom_config_dict to fuse is deprecated and will not be supported in a future version. Please pass in a FuseCustomConfig instead.   rL   z$quantization_api.quantize_fx.fuse_fxF)r   r'   rN   rO   rP   rQ   rR   r4   _C_log_api_usage_oncerS   r/   rT   fxsymbolic_tracer1   rD   r$   )r   rA   rB   rZ   r"   r   r\   s          r#   fuse_fxrg      s    * !-/$d++N		
 .778JK	HH  !GH-BB )(D5 	#ge""(   88**51L%l3L%1CTL#LBs   C0C0c           	      b    [         R                  R                  S5        [        U USUUUU5      $ )a'  Prepare a model for post training quantization

Args:
  * `model` (torch.nn.Module): torch.nn.Module model

  * `qconfig_mapping` (QConfigMapping): QConfigMapping object to configure how a model is
     quantized, see :class:`~torch.ao.quantization.qconfig_mapping.QConfigMapping`
     for more details

  * `example_inputs` (Tuple[Any, ...]): Example inputs for forward function of the model,
     Tuple of positional args (keyword args can be passed as positional args as well)

  * `prepare_custom_config` (PrepareCustomConfig): customization configuration for quantization tool.
      See :class:`~torch.ao.quantization.fx.custom_config.PrepareCustomConfig` for more details

  * `_equalization_config`: config for specifying how to perform equalization on the model

  * `backend_config` (BackendConfig): config that specifies how operators are quantized
     in a backend, this includes how the operators are observed,
     supported fusion patterns, how quantize/dequantize ops are
     inserted, supported dtypes etc. See :class:`~torch.ao.quantization.backend_config.BackendConfig` for more details

Return:
  A GraphModule with observer (configured by qconfig_mapping), ready for calibration

Example::

    import torch
    from torch.ao.quantization import get_default_qconfig_mapping
    from torch.ao.quantization.quantize_fx import prepare_fx

    class Submodule(torch.nn.Module):
        def __init__(self) -> None:
            super().__init__()
            self.linear = torch.nn.Linear(5, 5)
        def forward(self, x):
            x = self.linear(x)
            return x

    class M(torch.nn.Module):
        def __init__(self) -> None:
            super().__init__()
            self.linear = torch.nn.Linear(5, 5)
            self.sub = Submodule()

        def forward(self, x):
            x = self.linear(x)
            x = self.sub(x) + x
            return x

    # initialize a floating point model
    float_model = M().eval()

    # define calibration function
    def calibrate(model, data_loader):
        model.eval()
        with torch.no_grad():
            for image, target in data_loader:
                model(image)

    # qconfig is the configuration for how we insert observers for a particular
    # operator
    # qconfig = get_default_qconfig("fbgemm")
    # Example of customizing qconfig:
    # qconfig = torch.ao.quantization.QConfig(
    #    activation=MinMaxObserver.with_args(dtype=torch.qint8),
    #    weight=MinMaxObserver.with_args(dtype=torch.qint8))
    # `activation` and `weight` are constructors of observer module

    # qconfig_mapping is a collection of quantization configurations, user can
    # set the qconfig for each operator (torch op calls, functional calls, module calls)
    # in the model through qconfig_mapping
    # the following call will get the qconfig_mapping that works best for models
    # that target "fbgemm" backend
    qconfig_mapping = get_default_qconfig_mapping("fbgemm")

    # We can customize qconfig_mapping in different ways.
    # e.g. set the global qconfig, which means we will use the same qconfig for
    # all operators in the model, this can be overwritten by other settings
    # qconfig_mapping = QConfigMapping().set_global(qconfig)
    # e.g. quantize the linear submodule with a specific qconfig
    # qconfig_mapping = QConfigMapping().set_module_name("linear", qconfig)
    # e.g. quantize all nn.Linear modules with a specific qconfig
    # qconfig_mapping = QConfigMapping().set_object_type(torch.nn.Linear, qconfig)
    # for a more complete list, please see the docstring for :class:`torch.ao.quantization.QConfigMapping`
    # argument

    # example_inputs is a tuple of inputs, that is used to infer the type of the
    # outputs in the model
    # currently it's not used, but please make sure model(*example_inputs) runs
    example_inputs = (torch.randn(1, 3, 224, 224),)

    # TODO: add backend_config after we split the backend_config for fbgemm and qnnpack
    # e.g. backend_config = get_default_backend_config("fbgemm")
    # `prepare_fx` inserts observers in the model based on qconfig_mapping and
    # backend_config. If the configuration for an operator in qconfig_mapping
    # is supported in the backend_config (meaning it's supported by the target
    # hardware), we'll insert observer modules according to the qconfig_mapping
    # otherwise the configuration in qconfig_mapping will be ignored
    #
    # Example:
    # in qconfig_mapping, user sets linear module to be quantized with quint8 for
    # activation and qint8 for weight:
    # qconfig = torch.ao.quantization.QConfig(
    #     observer=MinMaxObserver.with_args(dtype=torch.quint8),
    #     weight=MinMaxObserver.with-args(dtype=torch.qint8))
    # Note: current qconfig api does not support setting output observer, but
    # we may extend this to support these more fine grained control in the
    # future
    #
    # qconfig_mapping = QConfigMapping().set_object_type(torch.nn.Linear, qconfig)
    # in backend config, linear module also supports in this configuration:
    # weighted_int8_dtype_config = DTypeConfig(
    #   input_dtype=torch.quint8,
    #   output_dtype=torch.quint8,
    #   weight_dtype=torch.qint8,
    #   bias_type=torch.float)

    # linear_pattern_config = BackendPatternConfig(torch.nn.Linear) \
    #    .set_observation_type(ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT) \
    #    .add_dtype_config(weighted_int8_dtype_config) \
    #    ...

    # backend_config = BackendConfig().set_backend_pattern_config(linear_pattern_config)
    # `prepare_fx` will check that the setting requested by suer in qconfig_mapping
    # is supported by the backend_config and insert observers and fake quant modules
    # in the model
    prepared_model = prepare_fx(float_model, qconfig_mapping, example_inputs)
    # Run calibration
    calibrate(prepared_model, sample_inference_data)
z'quantization_api.quantize_fx.prepare_fxFr4   rc   rd   r^   )r   rE   rF   rG   rH   rB   s         r#   
prepare_fxrj      s:    X 
HH  !JK r%   c           	      \    [         R                  R                  S5        [        U USUUUS9$ )a  Prepare a model for quantization aware training

Args:
  * `model` (torch.nn.Module): torch.nn.Module model
  * `qconfig_mapping` (QConfigMapping): see :func:`~torch.ao.quantization.prepare_fx`
  * `example_inputs` (Tuple[Any, ...]): see :func:`~torch.ao.quantization.prepare_fx`
  * `prepare_custom_config` (PrepareCustomConfig): see :func:`~torch.ao.quantization.prepare_fx`
  * `backend_config` (BackendConfig): see :func:`~torch.ao.quantization.prepare_fx`

Return:
  A GraphModule with fake quant modules (configured by qconfig_mapping and backend_config), ready for
  quantization aware training

Example::

    import torch
    from torch.ao.quantization import get_default_qat_qconfig_mapping
    from torch.ao.quantization.quantize_fx import prepare_qat_fx


    class Submodule(torch.nn.Module):
        def __init__(self) -> None:
            super().__init__()
            self.linear = torch.nn.Linear(5, 5)

        def forward(self, x):
            x = self.linear(x)
            return x


    class M(torch.nn.Module):
        def __init__(self) -> None:
            super().__init__()
            self.linear = torch.nn.Linear(5, 5)
            self.sub = Submodule()

        def forward(self, x):
            x = self.linear(x)
            x = self.sub(x) + x
            return x


    # initialize a floating point model
    float_model = M().train()
    # (optional, but preferred) load the weights from pretrained model
    # float_model.load_weights(...)


    # define the training loop for quantization aware training
    def train_loop(model, train_data):
        model.train()
        for image, target in data_loader:
            ...


    # qconfig is the configuration for how we insert observers for a particular
    # operator
    # qconfig = get_default_qconfig("fbgemm")
    # Example of customizing qconfig:
    # qconfig = torch.ao.quantization.QConfig(
    #    activation=FakeQuantize.with_args(observer=MinMaxObserver.with_args(dtype=torch.qint8)),
    #    weight=FakeQuantize.with_args(observer=MinMaxObserver.with_args(dtype=torch.qint8)))
    # `activation` and `weight` are constructors of observer module

    # qconfig_mapping is a collection of quantization configurations, user can
    # set the qconfig for each operator (torch op calls, functional calls, module calls)
    # in the model through qconfig_mapping
    # the following call will get the qconfig_mapping that works best for models
    # that target "fbgemm" backend
    qconfig_mapping = get_default_qat_qconfig_mapping("fbgemm")

    # We can customize qconfig_mapping in different ways, please take a look at
    # the docstring for :func:`~torch.ao.quantization.prepare_fx` for different ways
    # to configure this

    # example_inputs is a tuple of inputs, that is used to infer the type of the
    # outputs in the model
    # currently it's not used, but please make sure model(*example_inputs) runs
    example_inputs = (torch.randn(1, 3, 224, 224),)

    # TODO: add backend_config after we split the backend_config for fbgemm and qnnpack
    # e.g. backend_config = get_default_backend_config("fbgemm")
    # `prepare_qat_fx` inserts observers in the model based on qconfig_mapping and
    # backend_config, if the configuration for an operator in qconfig_mapping
    # is supported in the backend_config (meaning it's supported by the target
    # hardware), we'll insert fake_quantize modules according to the qconfig_mapping
    # otherwise the configuration in qconfig_mapping will be ignored
    # see :func:`~torch.ao.quantization.prepare_fx` for a detailed explanation of
    # how qconfig_mapping interacts with backend_config
    prepared_model = prepare_qat_fx(float_model, qconfig_mapping, example_inputs)
    # Run training
    train_loop(prepared_model, train_loop)

z+quantization_api.quantize_fx.prepare_qat_fxT)rB   ri   )r   rE   rF   rG   rB   s        r#   prepare_qat_fxrl     s7    L 
HH  !NO% r%   r\   is_referenceconvert_custom_config_remove_qconfigis_decomposedkeep_original_weightsc	                 v   Uc
  [        5       n[        U[        5      (       a0  [        R                  " S[
        SS9  [         R                  " U5      n[        U 5        UR                  n	U	 V
s0 s H!  n
[        X
5      (       d  M  U
[        X
5      _M#     nn
[        U UUUUUUUUS9	n[        X5        U$ s  sn
f )z_`is_standalone_module`: see docs in :func:`~torch.ao.quantization.prepare_standalone_module_fx`zPassing a convert_custom_config_dict to convert is deprecated and will not be supported in a future version. Please pass in a ConvertCustomConfig instead.rK   rL   )_remove_qconfig_flagrE   rB   rp   rq   )r   r'   rN   rO   rP   rQ   rR   r+   rS   r/   rT   r   r$   )r\   rm   rn   rI   ro   rE   rB   rp   rq   rZ   r"   r   r7   s                r#   _convert_fxrt     s     $ 3 5'..Q		
 !4 = =>S T<(0EE )(D<& 	*gl))(   ,'%#3
I $I?'s   .B6B6c           
      ^    [         R                  R                  S5        [        U SUUUUUS9$ )a 
  Convert a calibrated or trained model to a quantized model

Args:
    * `graph_module` (torch.fx.GraphModule): A prepared and calibrated/trained model (GraphModule)

    * `convert_custom_config` (ConvertCustomConfig): custom configurations for convert function.
        See :class:`~torch.ao.quantization.fx.custom_config.ConvertCustomConfig` for more details

    * `_remove_qconfig` (bool): Option to remove the qconfig attributes in the model after convert.

    * `qconfig_mapping` (QConfigMapping): config for specifying how to convert a model for quantization.

       The keys must include the ones in the qconfig_mapping passed to `prepare_fx` or `prepare_qat_fx`,
       with the same values or `None`. Additional keys can be specified with values set to `None`.

      For each entry whose value is set to None, we skip quantizing that entry in the model::

        qconfig_mapping = QConfigMapping
            .set_global(qconfig_from_prepare)
            .set_object_type(torch.nn.functional.add, None)  # skip quantizing torch.nn.functional.add
            .set_object_type(torch.nn.functional.linear, qconfig_from_prepare)
            .set_module_name("foo.bar", None)  # skip quantizing module "foo.bar"

     * `backend_config` (BackendConfig): A configuration for the backend which describes how
        operators should be quantized in the backend, this includes quantization
        mode support (static/dynamic/weight_only), dtype support (quint8/qint8 etc.),
        observer placement for each operators and fused operators.
        See :class:`~torch.ao.quantization.backend_config.BackendConfig` for more details

Return:
    A quantized model (torch.nn.Module)

Example::

    # prepared_model: the model after prepare_fx/prepare_qat_fx and calibration/training
    # convert_fx converts a calibrated/trained model to a quantized model for the
    # target hardware, this includes converting the model first to a reference
    # quantized model, and then lower the reference quantized model to a backend
    # Currently, the supported backends are fbgemm (onednn), qnnpack (xnnpack) and
    # they share the same set of quantized operators, so we are using the same
    # lowering procedure
    #
    # backend_config defines the corresponding reference quantized module for
    # the weighted modules in the model, e.g. nn.Linear
    # TODO: add backend_config after we split the backend_config for fbgemm and qnnpack
    # e.g. backend_config = get_default_backend_config("fbgemm")
    quantized_model = convert_fx(prepared_model)

z'quantization_api.quantize_fx.convert_fxF)rm   rn   ro   rE   rB   rq   r4   rc   rd   rt   )r\   rn   ro   rE   rB   rq   s         r#   
convert_fxrw   6  s:    t 
HH  !JK3''%3 r%   c           	      \    [         R                  R                  S5        [        U SUUUUS9$ )a%  Convert a calibrated or trained model to a reference quantized model,
see https://github.com/pytorch/rfcs/blob/master/RFC-0019-Extending-PyTorch-Quantization-to-Custom-Backends.md for more details,
reference quantized model is a standard representation of a quantized model provided
by FX Graph Mode Quantization, it can be further lowered to run on the target
hardware, like accelerators

Args:
    * `graph_module` (GraphModule): A prepared and calibrated/trained model (GraphModule)

    * `convert_custom_config` (ConvertCustomConfig): custom configurations for convert function.
        See :func:`~torch.ao.quantization.quantize_fx.convert_fx` for more details.

    * `_remove_qconfig` (bool): Option to remove the qconfig attributes in the model after convert.

    * `qconfig_mapping` (QConfigMapping): config for specifying how to convert a model for quantization.
        See :func:`~torch.ao.quantization.quantize_fx.convert_fx` for more details.

     * `backend_config` (BackendConfig): A configuration for the backend which describes how
        operators should be quantized in the backend. See
        :func:`~torch.ao.quantization.quantize_fx.convert_fx` for more details.

Return:
    A reference quantized model (GraphModule)

Example::

    # prepared_model: the model after prepare_fx/prepare_qat_fx and calibration/training
    # TODO: add backend_config after we split the backend_config for fbgemm and qnnpack
    # e.g. backend_config = get_default_backend_config("fbgemm")
    reference_quantized_model = convert_to_reference_fx(prepared_model)

z4quantization_api.quantize_fx.convert_to_reference_fxT)rm   rn   ro   rE   rB   rv   )r\   rn   ro   rE   rB   s        r#   convert_to_reference_fxry   |  s7    N 
HH  !WX3''% r%   c           
      ^    [         R                  R                  S5        [        U SUSUUSS9$ )a  Convert a calibrated or trained model to a reference quantized model, with
decomposed representation for quantized Tensor
see https://github.com/pytorch/rfcs/blob/master/RFC-0019-Extending-PyTorch-Quantization-to-Custom-Backends.md for more details,
reference quantized model is a standard representation of a quantized model provided
by FX Graph Mode Quantization, it can be further lowered to run on the target
hardware, like accelerators

Note: this is not public API

Args:
    * `graph_module` (GraphModule): A prepared and calibrated/trained model (GraphModule)

    * `convert_custom_config` (ConvertCustomConfig): custom configurations for convert function.
        See :func:`~torch.ao.quantization.quantize_fx.convert_fx` for more details.

    * `_remove_qconfig` (bool): Option to remove the qconfig attributes in the model after convert.

    * `qconfig_mapping` (QConfigMapping): config for specifying how to convert a model for quantization.
        See :func:`~torch.ao.quantization.quantize_fx.convert_fx` for more details.

     * `backend_config` (BackendConfig): A configuration for the backend which describes how
        operators should be quantized in the backend. See
        :func:`~torch.ao.quantization.quantize_fx.convert_fx` for more details.

Return:
    A reference quantized model (GraphModule) with operators working with decomposed quantized Tensor

Example::

    # prepared_model: the model after prepare_fx/prepare_qat_fx and calibration/training
    # TODO: add backend_config after we split the backend_config for fbgemm and qnnpack
    # e.g. backend_config = get_default_backend_config("fbgemm")
    reference_quantized_model = _convert_to_reference_decomposed_fx(prepared_model)

z@quantization_api.quantize_fx._convert_to_reference_decomposed_fxTF)rm   rn   ro   rE   rB   rp   rv   )r\   rn   rE   rB   s       r#   #_convert_to_reference_decomposed_fxr{     s>    R 
HH  J 3'% r%   c                     [        U UUSS9$ )a^  [Internal use only] Convert a model produced by :func:`~torch.ao.quantization.prepare_standalone_module_fx`
and convert it to a quantized model

Returns a quantized standalone module, whether input/output is quantized is
specified by prepare_custom_config, with
input_quantized_idxs, output_quantized_idxs, please
see docs for prepare_fx for details
T)rI   )rt   )r\   rm   rn   s      r#   _convert_standalone_module_fxr}     s     !	 r%   )NN)NNNF)NNN)NFTNNFF)NTNNF)NTNN)FN)<r   typing_extensionsrO   typingr   r   r   r4   torch.fxr   torch.fx.graph_moduler   rB   r	   r
   
fx.convertr   fx.custom_configr   r   r   fx.fuser   fx.graph_moduler   
fx.preparer   	fx.tracerr   r   r   fx.utilsr   r   rE   r   utilsr   r6   ModulerN   r)   r$   r+   r1   r:   boolrD   tupler^   r`   rg   
deprecatedrj   rl   rt   rw   ry   r{   r}    r%   r#   <module>r      sh      ' '    @ F  X X  0  E E , &	(ehhoo-.	(#s(^	( 
	(
%((// 
d 

[ 
T 
Iehhoo I$ I" IMAE	CCC .S#XDEC -c3h=>	C
 C( OSLPAE!&E88??E>4S>9:E E #s(O	E
 !!4d38nd!JKE #5c3h)G#HIE -c3h=>E E EZ OSAE'88??'>4S>9:' ' #s(O	'
 !!4d38nd!JK' -c3h=>' 'X IMAE.88??..S#XDE. -c3h=>. 	.b 12
 OSLPAET88??T>4S>9:T #s(OT !!4d38nd!JK	T
 #5c3h)G#HIT -c3h=>T T 3Tn 12
 OSAEm88??m>4S>9:m #s(Om !!4d38nd!JK	m
 -c3h=>m m 3mf OS!& CGAE"'--- !!4d38nd!JK- 	-
 - >4S>4?@- -c3h=>- -  - -` 12 OS CGAE"'BB !4d38nd!JKB B >4S>4?@	B
 -c3h=>B  B B 3BN OS CGAE// !4d38nd!JK/ / >4S>4?@	/
 -c3h=>/ /h OSCGAE	44 !4d38nd!JK4 >4S>4?@4 -c3h=>	4
 4r NR !!4d38nd!JK 	r%   