
    9i              
          S SK Jr  S SKJr  S SKJrJr  S SKJr  S SK	J
r
  S SKJr  S SKJr  SS	KJr  \R"                  " \R$                  \R&                  S
9\R"                  " \R(                  \R&                  S
9\R"                  " \R*                  \R&                  S
9\R"                  " \R,                  \R&                  S
9 " S S\\5      5       5       5       5       rg)    ) RobertaForSequenceClassification)Models)Model
TorchModel)MODELS)&AttentionTextClassificationModelOutput)Tasks)parse_labels_in_order   )
VecoConfig)module_namec                   R   ^  \ rS rSrSr\rU 4S jrU 4S jr\	U 4S j5       r
SrU =r$ )VecoForSequenceClassification   a  Veco Model transformer with a sequence classification/regression head on top (a linear layer on top of the
pooled output) e.g. for GLUE tasks.

This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic
methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,
pruning heads etc.)

This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module)
subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to
general usage and behavior.

Preprocessor:
    This is the text classification model of Veco, the preprocessor of this model
    is `modelscope.preprocessors.TextClassificationTransformersPreprocessor`.

Trainer:
    This model should be trained by dataset which has mixed languages,
    and evaluated by datasets of languages one by one.
    For example, if the training dataset is xnli (which has sub datasets of multiple languages), then you
    should mix the sub-datasets with the languages you want to train to one training dataset, and evaluate
    the model one sub-dataset by one sub-dataset of different languages.
    This procedure can be done by custom code. If you are using trainer of ModelScope,
    the `VecoTrainer` is suggested to use to train this model. This trainer overrides the basic evaluation
    loop, and will call the evaluation dataset one by one. Besides, this trainer will use the `VecoTaskDataset`
    to mix the input datasets to one, you can check the API Doc for the details.

    To check the complete example please
    view the unittest `test_veco_xnli` in `tests.trainers.test_finetune_sequence_classification.py`

Parameters:
    config ([`VecoConfig`]): Model configuration class with all the parameters of the
        model. Initializing with a config file does not load the weights associated with the model, only the
        configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model
        weights.

This class overrides [`RobertaForSequenceClassification`]. Please check the superclass for the
appropriate documentation alongside usage examples.
c                 b   > [         TU ]  " UR                  40 UD6  [         [        U ]  U5        g )N)super__init__name_or_pathr   )selfconfigkwargs	__class__s      n/var/www/html/land-doc-ocr/venv/lib/python3.13/site-packages/modelscope/models/nlp/veco/text_classification.pyr   &VecoForSequenceClassification.__init__M   s*    ,,77eT#F+    c                    > SUS'   [         [        U ]
  " U0 UD6n[        UR                  UR
                  UR                  UR                  S9$ )u  
Returns:
    Returns `modelscope.outputs.AttentionTextClassificationModelOutput`

Examples:
    >>> from modelscope.models import Model
    >>> from modelscope.preprocessors import Preprocessor
    >>> model = Model.from_pretrained('damo/nlp_veco_fill-mask-large',
    >>>                               task='text-classification', num_labels=2)
    >>> preprocessor = Preprocessor.from_pretrained('damo/nlp_veco_fill-mask-large',
    >>>                                             label2id={'0': 0, '1': 1})
    >>> # Call the model, return some tensors
    >>> print(model(**preprocessor('这是个测试')))
    >>> # Call the pipeline, the result may be incorrect
    >>> from modelscope.pipelines import pipeline
    >>> pipeline_ins = pipeline('text-classification', pipeline_name='text-classification',
    >>>                         model=model, preprocessor=preprocessor)
    >>> print(pipeline_ins('这是个测试'))
Treturn_dict)losslogitshidden_states
attentions)r   r   forwardr   r   r   r    r!   )r   argsr   outputsr   s       r   r"   %VecoForSequenceClassification.forwardQ   sR    * !%}t,d=f=5>>!//))	
 	
r   c                    > UR                  SS5      nUR                  SS5      n[        X#40 UD6nUc  [        S0 UD6nU " U5      nU$ [        [        U ]  " SSU0UD6nU$ )a$  Instantiate the model.

Args:
    kwargs: Input args.
            model_dir: The model dir used to load the checkpoint and the label information.
            num_labels: An optional arg to tell the model how many classes to initialize.
                            Method will call utils.parse_label_mapping if num_labels is not input.
            label2id: An optional label2id mapping, which will cover the label2id in configuration (if exists).

Returns:
    The loaded model, which is initialized by transformers.PreTrainedModel.from_pretrained
	model_dirNcfgpretrained_model_name_or_path )popr
   r   r   r   from_pretrained)clsr   r'   r(   
model_argsr   modelr   s          r   _instantiate*VecoForSequenceClassification._instantiateo   s     JJ{D1	jj%*9DVD
-*-FKE  %5 G.7G;EGEr   r*   )__name__
__module____qualname____firstlineno____doc__r   config_classr   r"   classmethodr0   __static_attributes____classcell__)r   s   @r   r   r      s.    %N L,
<  r   r   N)transformersr   modelscope.metainfor   modelscope.modelsr   r   modelscope.models.builderr   modelscope.outputsr   modelscope.utils.constantr	   modelscope.utils.nlp.utilsr
   configurationr   register_modulenlivecosentiment_classificationsentence_similaritytext_classificationr   r*   r   r   <module>rI      s   $ : & / , E + < % 		v{{;	""=11v{{K11v{{KfJ$Df L L= <
fr   