
    9iP                        S SK Jr  S SKJrJrJrJr  S SKrS SK	J
r
  \S   r\ " S S\
5      5       r\ " S S	\5      5       r\ " S
 S\
5      5       r\ " S S\
5      5       r\ " S S\
5      5       r\ " S S\
5      5       r\ " S S\5      5       r\ " S S\
5      5       r\ " S S\
5      5       r\ " S S\
5      5       r\ " S S\5      5       r\ " S S\
5      5       r\ " S S\
5      5       r\ " S  S!\
5      5       r\ " S" S#\5      5       r\ " S$ S%\
5      5       r\ " S& S'\
5      5       r\ " S( S)\5      5       r\ " S* S+\
5      5       r\ " S, S-\
5      5       r\ " S. S/\
5      5       r \ " S0 S1\
5      5       r!g)2    )	dataclass)ListOptionalTupleUnionN)ModelOutputBase)ztorch.Tensorz	tf.Tensorc                   D    \ rS rSr% SrSr\\S'   Sr\\S'   Sr	\\S'   Sr
g)BackboneModelOutput   a  The output class for text classification models.

Args:
    last_hidden_state (`Tensor`, *optional*): Sequence of hidden-states at
        the output of the last layer of the model.
    pooler_output (`Tensor`, *optional*) The tensor of the pooled hidden state.
    hidden_states (`Tensor`, *optional*) Hidden-states of the model at
        the output of each layer plus the optional initial embedding outputs.
Nlast_hidden_statepooler_outputhidden_states )__name__
__module____qualname____firstlineno____doc__r   Tensor__annotations__r   r   __static_attributes__r       ^/var/www/html/land-doc-ocr/venv/lib/python3.13/site-packages/modelscope/outputs/nlp_outputs.pyr
   r
      s(     !%v$ M6  M6 r   r
   c                   D    \ rS rSr% SrSr\\S'   Sr\\S'   Sr	\\S'   Sr
g)AttentionBackboneModelOutput   a  The output class for backbones of attention based models.

Args:
    attentions (`tuple(torch.FloatTensor)`, *optional*, returned when
    `output_attentions=True` is passed or when
    `config.output_attentions=True`):
        Tuple of `torch.FloatTensor` (one for each layer) of shape
        `(batch_size, num_heads, sequence_length, sequence_length)`.

        Attentions weights after the attention softmax, used to compute the
        weighted average in the self-attention heads.
    cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when
    `output_attentions=True` and `config.add_cross_attention=True` is passed
    or when `config.output_attentions=True`):
        Tuple of `torch.FloatTensor` (one for each layer) of shape
        `(batch_size, num_heads, sequence_length, sequence_length)`.

        Attentions weights of the decoder's cross-attention layer, after the
        attention softmax, used to compute the weighted average in the
        cross-attention heads.
    past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned
    when `use_cache=True` is passed or when `config.use_cache=True`):
        Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`,
        with each tuple having 2 tensors of shape `(batch_size, num_heads,
        sequence_length, embed_size_per_head)`) and optionally if
        `config.is_encoder_decoder=True` 2 additional tensors of shape
        `(batch_size, num_heads, encoder_sequence_length,
        embed_size_per_head)`.

        Contains pre-computed hidden-states (key and values in the
        self-attention blocks and optionally if
        `config.is_encoder_decoder=True` in the cross-attention blocks) that
        can be used (see `past_key_values` input) to speed up sequential
        decoding.
N
attentionspast_key_valuescross_attentionsr   )r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r      s)    "F J"OV"#f#r   r   c                       \ rS rSr% SrSr\\S'   Sr\	\
\
\         \S'   Sr\	\
\      \S'   Sr\	\
\      \S'   Sr\	\
\      \S'   Sr\	\   \S	'   Sr\	\
\      \S
'   Sr\	\
\      \S'   Srg)Seq2SeqModelOutputF   a  
Base class for model encoder's outputs that also contains : pre-computed
hidden states that can speed up sequential decoding.

Args:
    last_hidden_state (`torch.FloatTensor` of shape `(batch_size,
    sequence_length, hidden_size)`):
        Sequence of hidden-states at the output of the last layer of the
        decoder of the model.

        If `past_key_values` is used only the last hidden-state of the
        sequences of shape `(batch_size, 1, hidden_size)` is output.
    past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned
    when `use_cache=True` is passed or when `config.use_cache=True`):
        Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`,
        with each tuple having 2 tensors of shape `(batch_size, num_heads,
        sequence_length, embed_size_per_head)`) and 2 additional tensors of
        shape `(batch_size, num_heads, encoder_sequence_length,
        embed_size_per_head)`.

        Contains pre-computed hidden-states (key and values in the
        self-attention blocks and in the cross-attention blocks) that can be
        used (see `past_key_values` input) to speed up sequential decoding.
    decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned
    when `output_hidden_states=True` is passed or when
    `config.output_hidden_states=True`):
        Tuple of `torch.FloatTensor` (one for the output of the embeddings,
        if the model has an embedding layer, + one for the output of each
        layer) of shape `(batch_size, sequence_length, hidden_size)`.

        Hidden-states of the decoder at the output of each layer plus the
        optional initial embedding outputs.
    decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned
    when `output_attentions=True` is passed or when
    `config.output_attentions=True`):
        Tuple of `torch.FloatTensor` (one for each layer) of shape
        `(batch_size, num_heads, sequence_length, sequence_length)`.

        Attentions weights of the decoder, after the attention softmax, used
        to compute the weighted average in the self-attention heads.
    cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when
    `output_attentions=True` is passed or when
    `config.output_attentions=True`):
        Tuple of `torch.FloatTensor` (one for each layer) of shape
        `(batch_size, num_heads, sequence_length, sequence_length)`.

        Attentions weights of the decoder's cross-attention layer, after the
        attention softmax, used to compute the weighted average in the
        cross-attention heads.
    encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size,
    sequence_length, hidden_size)`, *optional*):
        Sequence of hidden-states at the output of the last layer of the
        encoder of the model.
    encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned
    when `output_hidden_states=True` is passed or when
    `config.output_hidden_states=True`):
        Tuple of `torch.FloatTensor` (one for the output of the embeddings,
        if the model has an embedding layer, + one for the output of each
        layer) of shape `(batch_size, sequence_length, hidden_size)`.

        Hidden-states of the encoder at the output of each layer plus the
        optional initial embedding outputs.
    encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned
    when `output_attentions=True` is passed or when
    `config.output_attentions=True`):
        Tuple of `torch.FloatTensor` (one for each layer) of shape
        `(batch_size, num_heads, sequence_length, sequence_length)`.

        Attentions weights of the encoder, after the attention softmax, used
        to compute the weighted average in the self-attention heads.
Nr   r   decoder_hidden_statesdecoder_attentionsr   encoder_last_hidden_stateencoder_hidden_statesencoder_attentionsr   )r   r   r   r   r   r   r   r   r   r   r   r#   r$   r   r%   r&   r'   r   r   r   r   r!   r!   F   s    FP !%v$6:OXeE&M23:598E&M2926v/604huV}-426x/6598E&M2926v/6r   r!   c                   R    \ rS rSr% SrSr\\S'   Sr\\S'   Sr	\\S'   Sr
\\S'   Srg)	FaqQuestionAnsweringOutput   z(The output class for faq QA models.
    Nscoreslabelslosslogitsr   )r   r   r   r   r   r+   r   r   r,   r-   r.   r   r   r   r   r)   r)      s1     FFFFD&FFr   r)   c                   (    \ rS rSr% SrSr\\S'   Srg)FeatureExtractionOutput   z4The output class for feature extraction models.
    Ntext_embeddingr   )	r   r   r   r   r   r2   r   r   r   r   r   r   r0   r0      s     "NF!r   r0   c                   R    \ rS rSr% SrSr\\S'   Sr\\S'   Sr	\\S'   Sr
\\S'   Srg)	FillMaskModelOutput   a  The output class for fill mask models.

Args:
    logits (`Tensor`): The logits output of the model.
    loss (`Tensor`, *optional*) The loss of the model, available when training.
    input_ids (`Tensor`, *optional*) The input id tensor fed into the model.
    hidden_states (`Tensor`, *optional*) Hidden-states of the model at the
        output of each layer plus the optional initial embedding outputs.
Nr.   r-   	input_idsr   r   )r   r   r   r   r   r.   r   r   r-   r6   r   r   r   r   r   r4   r4      s1     FFD&Iv M6 r   r4   c                   (    \ rS rSr% SrSr\\S'   Srg)AttentionFillMaskModelOutput   zThe output class for the fill mask and attention based models.

Args:
    attentions (`tuple(Tensor)`, *optional* Attentions weights after the
    attention softmax, used to compute the weighted average in the
    self-attention heads.
Nr   r   )	r   r   r   r   r   r   r   r   r   r   r   r   r8   r8      s     Jr   r8   c                   <    \ rS rSr% SrSr\R                  \S'   Sr	g)InformationExtractionOutput   8The output class for information extraction models.
    Nspo_listr   )
r   r   r   r   r   r>   npndarrayr   r   r   r   r   r;   r;      s      Hbjjr   r;   c                       \ rS rSr% SrSr\\   \S'   Sr	\\S'   Sr
\\\\         \S'   Sr\\\      \S'   Sr\\\      \S'   Sr\\\      \S	'   Sr\\   \S
'   Sr\\\      \S'   Sr\\\      \S'   Srg)Seq2SeqLMOutput   at  
Base class for sequence-to-sequence language models outputs.

Args:
    loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when
    `labels` is provided):
        Language modeling loss.
    logits (`torch.FloatTensor` of shape `(batch_size, sequence_length,
    config.vocab_size)`):
        Prediction scores of the language modeling head (scores for each
        vocabulary token before SoftMax).
    past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned
    when `use_cache=True` is passed or when `config.use_cache=True`):
        Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`,
        with each tuple having 2 tensors of shape `(batch_size, num_heads,
        sequence_length, embed_size_per_head)`) and 2 additional tensors of
        shape `(batch_size, num_heads, encoder_sequence_length,
        embed_size_per_head)`.

        Contains pre-computed hidden-states (key and values in the
        self-attention blocks and in the cross-attention blocks) that can be
        used (see `past_key_values` input) to speed up sequential decoding.
    decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned
    when `output_hidden_states=True` is passed or when
    `config.output_hidden_states=True`):
        Tuple of `torch.FloatTensor` (one for the output of the embeddings,
        if the model has an embedding layer, + one for the output of each
        layer) of shape `(batch_size, sequence_length, hidden_size)`.

        Hidden-states of the decoder at the output of each layer plus the
        initial embedding outputs.
    decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned
    when `output_attentions=True` is passed or when
    `config.output_attentions=True`):
        Tuple of `torch.FloatTensor` (one for each layer) of shape
        `(batch_size, num_heads, sequence_length, sequence_length)`.

        Attentions weights of the decoder, after the attention softmax, used
        to compute the weighted average in the self-attention heads.
    cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when
    `output_attentions=True` is passed or when
    `config.output_attentions=True`):
        Tuple of `torch.FloatTensor` (one for each layer) of shape
        `(batch_size, num_heads, sequence_length, sequence_length)`.

        Attentions weights of the decoder's cross-attention layer, after the
        attention softmax, used to compute the weighted average in the
        cross-attention heads.
    encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size,
    sequence_length, hidden_size)`, *optional*):
        Sequence of hidden-states at the output of the last layer of the
        encoder of the model.
    encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned
    when `output_hidden_states=True` is passed or when
    `config.output_hidden_states=True`):
        Tuple of `torch.FloatTensor` (one for the output of the embeddings,
        if the model has an embedding layer, + one for the output of each
        layer) of shape `(batch_size, sequence_length, hidden_size)`.

        Hidden-states of the encoder at the output of each layer plus the
        initial embedding outputs.
    encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned
    when `output_attentions=True` is passed or when
    `config.output_attentions=True`):
        Tuple of `torch.FloatTensor` (one for each layer) of shape
        `(batch_size, num_heads, sequence_length, sequence_length)`.

        Attentions weights of the encoder, after the attention softmax, used
        to compute the weighted average in the self-attention heads.
Nr-   r.   r   r#   r$   r   r%   r&   r'   r   )r   r   r   r   r   r-   r   r   r   r.   r   r   r#   r$   r   r%   r&   r'   r   r   r   r   rB   rB      s    EN "D(6
!FF6:OXeE&M23:598E&M2926v/604huV}-426x/6598E&M2926v/6r   rB   c                   6    \ rS rSr% SrSr\\S'   Sr\\S'   Sr	g)TextClassificationModelOutputi'  aP  The output class for text classification models.

Args:
    logits (`Tensor`): The logits output of the model. loss (`Tensor`,
    *optional*) The loss of the model, available when training.
    hidden_states (`Tensor`, *optional*) Hidden-states of the model at the
    output of each layer plus the optional initial embedding outputs.
Nr.   r-   r   
r   r   r   r   r   r.   r   r   r-   r   r   r   r   rE   rE   '       FFD&r   rE   c                   D    \ rS rSr% SrSr\\S'   Sr\\S'   Sr	\\S'   Sr
g)&AttentionTextClassificationModelOutputi6  zThe output class for backbones of attention based models.

Args:
    attentions (`tuple(Tensor)`, *optional* Attentions weights after the
    attention softmax, used to compute the weighted average in the
    self-attention heads.
Nr   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   rI   rI   6  s'     J M6 "OV"r   rI   c                   (    \ rS rSr% SrSr\\S'   Srg)TextErrorCorrectionOutputiD  r=   Npredictionsr   	r   r   r   r   r   rM   r   r   r   r   r   r   rL   rL   D       Kr   rL   c                   (    \ rS rSr% SrSr\\S'   Srg)WordAlignmentOutputiL  z0The output class for word alignment models.
    NrM   r   rN   r   r   r   rQ   rQ   L  rO   r   rQ   c                   6    \ rS rSr% SrSr\\S'   Sr\\S'   Sr	g)TextGenerationModelOutputiT  aL  The output class for text generation models.

Args:
    logits (`Tensor`): The logits output of the model. loss (`Tensor`,
    *optional*) The loss of the model, available when training.
    hidden_states (`Tensor`, *optional*) Hidden-states of the model at the
    output of each layer plus the optional initial embedding outputs.
Nr.   r-   r   rF   r   r   r   rS   rS   T  rG   r   rS   c                   D    \ rS rSr% SrSr\\S'   Sr\\S'   Sr	\\S'   Sr
g)"AttentionTextGenerationModelOutputic  a_  The output class for text generation of attention based models.

Args:
    logits (`Tensor`): The logits output of the model. loss (`Tensor`,
    *optional*) The loss of the model, available when training.
    hidden_states (`Tensor`, *optional*) Hidden-states of the model at the
    output of each layer plus the optional initial embedding outputs.
Nr   r   r   r   rJ   r   r   r   rU   rU   c  s'     J M6 "OV"r   rU   c                       \ rS rSr% SrSr\\S'   Sr\	\
\      \S'   Sr\	\
\
\         \S'   Sr\	\
\
\         \S'   Srg)	TokenGeneratorOutputir  aG  
The output class for generate method of text generation models.


Args:
    sequences (`torch.LongTensor` of shape `(batch_size*num_return_sequences, sequence_length)`):
        The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter
        if all batches finished early due to the `eos_token_id`.
    scores (`tuple(torch.FloatTensor)` *optional*, returned when `output_scores=True`
    is passed or when `config.output_scores=True`):
        Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax)
        at each generation step. Tuple of `torch.FloatTensor` with up to `max_new_tokens` elements (one element for
        each generated token), with each tensor of shape `(batch_size*num_return_sequences, config.vocab_size)`.
    attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True`
    is passed or `config.output_attentions=True`):
        Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
        `torch.FloatTensor` of shape `(num_return_sequences*batch_size, num_heads, generated_length,
        sequence_length)`.
    hidden_states (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_hidden_states=True`
    is passed or when `config.output_hidden_states=True`):
        Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
        `torch.FloatTensor` of shape `(num_return_sequences*batch_size, generated_length, hidden_size)`.
N	sequencesr+   r   r   r   )r   r   r   r   r   rX   r   r   r+   r   r   r   r   r   r   r   r   rW   rW   r  sV    0 Iv&*FHU6]#*15JuV}-.548M8E%-018r   rW   c                   `    \ rS rSr% SrSr\\S'   Sr\\S'   Sr	\\S'   Sr
\\S'   Sr\\S'   S	rg)
TokenClassificationModelOutputi  a	  The output class for token classification models.
logits (`Tensor`): The logits output of the model.
loss (`Tensor`, *optional*) The loss of the model, available when training.
predictions: A PyTorch tensor of the best tag sequence for each batch of shape
    (nbest, batch_size, seq_length)
offset_mapping (:obj:`torch.FloatTensor` of shape :obj:`(batch_size,
sequence_length)`, `optional`):
    Indices of positions of each input sequence tokens in the sentence.
    Selected in the range ``[0, sequence_length - 1]``.
Nr.   r-   offset_mappingrM   
label_maskr   )r   r   r   r   r   r.   r   r   r-   r[   rM   r\   r   r   r   r   rZ   rZ     s;    	 FFD&!NF!KJr   rZ   c                   6    \ rS rSr% SrSr\\S'   Sr\\S'   Sr	g)'AttentionTokenClassificationModelOutputi  zThe output class for backbones of attention based models.

Args:
    attentions (`tuple(Tensor)`, *optional* Attentions weights after the attention softmax,
    used to compute the weighted average in the self-attention heads.
Nr   r   r   )
r   r   r   r   r   r   r   r   r   r   r   r   r   r^   r^     s     J M6 r   r^   c                   (    \ rS rSr% SrSr\\S'   Srg)-DialogueUserSatisfactionEstimationModelOutputi  zqThe output class for user satisfaction estimation.

Args:
    logits (`Tensor`): The logits output of the model.
Nr.   r   )	r   r   r   r   r   r.   r   r   r   r   r   r   r`   r`     s    
 FFr   r`   c                   D    \ rS rSr% SrSr\\S'   Sr\\S'   Sr	\\S'   Sr
g)SentencEmbeddingModelOutputi  a(  The output class for text classification models.

Args:
    query_embs (`Tensor`, *optional*): The tensor of the query embeddings.
    doc_embs (`Tensor`, *optional*) Then tensor of the doc embeddings.
    loss (`torch.FloatTensor` of shape `(1,)`, *optional*): Sentence Embedding modeling loss.
Nquery_embeddingsdoc_embeddingsr-   r   )r   r   r   r   r   rc   r   r   rd   r-   r   r   r   r   rb   rb     s(      $f#!NF!D&r   rb   c                   J    \ rS rSr% SrSr\\S'   Sr\\S'   Sr	\
\   \S'   Srg)TranslationEvaluationOutputi  z8The output class for translation evaluation models.
    Nscorer-   input_formatr   )r   r   r   r   r   rg   r   r   r-   rh   r   strr   r   r   r   rf   rf     s+     E6D&"L$s)"r   rf   c                       \ rS rSr% SrSr\\   \S'   Sr	\\   \S'   Sr
\\S'   Sr\\\      \S'   Sr\\\      \S'   Sr\\S	'   S
rg)!MachineReadingComprehensionOutputi  a  The output class for machine reading comprehension models.

Args:
    loss (`Tensor`, *optional*): The training loss of the current batch
    match_loss (`Tensor`, *optinal*): The match loss of the current batch
    span_logits (`Tensor`): The logits of the span matrix output by the model
    hidden_states (`Tuple[Tensor]`, *optinal*): The hidden states output by the model
    attentions (`Tuple[Tensor]`, *optinal*):  The attention scores output by the model
    input_ids (`Tensor`): The token ids of the input sentence

Nr-   
match_lossspan_logitsr   r   r6   r   )r   r   r   r   r   r-   r   r   r   rl   rm   r   r   r   r6   r   r   r   r   rk   rk     sa    
 "D(6
!#'J 'K-1M8E&M*1*.Jv'.Ivr   rk   )"dataclassesr   typingr   r   r   r   numpyr?   modelscope.outputs.outputsr   r   r
   r   r!   r)   r0   r4   r8   r;   rB   rE   rI   rL   rQ   rS   rU   rW   rZ   r^   r`   rb   rf   rk   r   r   r   <module>rr      s   ! / /  6	*	+ !/ ! !  &$#6 &$ &$R P7 P7 P7f    "o " " !/ ! !" #6    /     P7o P7 P7f O   
#-J 
# 
#    /      #)B # # 9? 9 9> _  & !.L ! ! O   /   #/ # #   r   