
    9i#                     \    S SK r S SKJr  S SKJr  \R
                  " 5       r " S S\5      rg)    N)PretrainedConfig)loggingc                      ^  \ rS rSrSrSr                               SU 4S jjr\S 5       rSr	U =r
$ )
GPT3Config   a   
Configuration classes for GPT-3 model.

Class attributes:

- **model_type** (`str`) -- An identifier for the model type, serialized into the JSON file, can be used to recreate
  the correct object in [`~transformers.AutoConfig`].

Args:
    vocab_size (`int`, *optional*, defaults to 25600):
        Vocabulary size of the GPT model. Defines the number of different
        tokens that can be represented by the `inputs_ids` passed when
        calling [`GPT3Model`].
    hidden_size (`int`, *optional*, defaults to 768):
        Dimensionality of the decoder layers and the pooler layer.
    ffn_hidden_size (`int`, *optional*, defaults to None):
        Dimensionality of the ffn layer, None defaults to four times the hidden_size.
    num_hidden_layers (`int`, *optional*, defaults to 12):
        Number of hidden layers in the Transformer decoder.
    num_attention_heads (`int`, *optional*, defaults to 12):
        Number of attention heads for each attention layer in the
        Transformer decoder.
    intermediate_size (`int`, *optional*, defaults to 3072):
        Dimensionality of the "intermediate" (often named feed-forward)
        layer in the Transformer decoder.
    hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
        The non-linear activation function (function or string) in the
        decoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and
        `"gelu_new"` are supported.
    hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
        The dropout probability for all fully connected layers in the
        embeddings, decoder, and pooler.
    attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
        The dropout ratio for the attention probabilities.
    max_position_embeddings (`int`, *optional*, defaults to 512):
        The maximum sequence length that this model might ever be used with.
        Typically set this to something large just in case (e.g., 512 or
        1024 or 2048).
    type_vocab_size (`int`, *optional*, defaults to 2):
        The vocabulary size of the `token_type_ids` passed when calling
        [`GPT3Model`].
    layernorm_epsilon (`float`, *optional*, defaults to 1e-12):
        The epsilon used by the layer normalization layers.
    bias_gelu_fusion (`bool`, *optional*, defaults to True):
        Whether to use gelu activation function when mixing bias.
    fp32_residual_connection (`bool`, *optional*, defaults to False):
        Whether to use fp32 for residual connection
        between layers to improve accuracy.
    sequence_parallel (`bool`, *optional*, defaults to False):
        Whether to use sequence parallel during training.
    bf16 (`bool`, *optional*, defaults to `False`):
        Whether to use bf16 16-bit (mixed) precision training instead of 32-bit training.
        Requires Ampere or higher NVIDIA architecture or using CPU (no_cuda).
        This is an experimental API and it may change.
    fp16 (`bool`, *optional*, defaults to `False`):
        Whether to use fp16 16-bit (mixed) precision training instead of 32-bit training.
    apply_query_key_layer_scaling (`bool`, *optional*, defaults to `True`):
        Whether to scale query and key layer parameters during training.
    init_method_std (`float`, *optional*, defaults to `0.02`):
        The standard deviation of the normal distribution for initialization process.
    eod_id (`int`, *optional*, defaults to `1`):
        The end of text label for tokenizer, also indicates the end of the generation.
    tokens_to_generate (`int`, *optional*, defaults to 100):
        Number of tokens to generate.
    top_k (`int`, *optional*, defaults to 0):
        Number of highest probability vocabulary tokens to keep for
        top-k-filtering that will be used by default in
        the `generate` method of the model.
    top_p (`float`, *optional*, defaults to 0.9):
        Value that will be used by default in the `generate` method of the model
        for `top_p`. If set to float < 1,
        only the most probable tokens with probabilities that add up to `top_p`
        or higher are kept for generation.
    temperature (`float`, *optional*, defaults to 1.0):
        The value used to module the next token probabilities that will be used
        by default in the `generate` method of the model. Must be strictly positive.
gpt3c                    > [         T#U ]  " SSU0U D6  Xl        X l        Uc  SU-  OUU l        X@l        XPl        Xpl        X`l        Xl	        Xl
        Xl        Xl        Xl        Xl        Xl        Xl        UU l        UU l        U(       a	  U(       a   eUU l        UU l        Uc  X%-  S:X  d   eX%-  U l        UU l        UU l        UU l        UU l        UU l        UU l        UU l        UU l        UU l        UU l        UU l         [C        [D        RF                  RI                  S5      S   5      n![C        [D        RF                  RI                  S5      S   5      n"U!S:  =(       d    U!S:H  =(       a    U"S:  U l%        g )Nlayer_norm_eps   r   .       )&super__init__
vocab_sizehidden_sizeffn_hidden_sizenum_hidden_layersnum_attention_heads
hidden_actintermediate_sizehidden_dropout_probattention_probs_dropout_probmax_position_embeddingstype_vocab_sizelayernorm_epsilonbias_gelu_fusionfp32_residual_connectionsequence_parallelfp16bf16apply_query_key_layer_scalingattention_softmax_in_fp32kv_channelsmasked_softmax_fusionattention_dropoutbias_dropout_fusion(apply_residual_connection_post_layernormhidden_dropoutinit_method_stdeod_idtokens_to_generatetop_ktop_ptemperatureinttorch__version__splitno_persist_layer_norm)$selfr   r   r   r   r   r   r   r   r   r   r   r   r   r   r    r!   r"   r#   r$   r%   r&   r'   r(   r)   r*   r+   r,   r-   r.   r/   r0   kwargsTORCH_MAJORTORCH_MINOR	__class__s$                                      h/var/www/html/land-doc-ocr/venv/lib/python3.13/site-packages/modelscope/models/nlp/gpt3/configuration.pyr   GPT3Config.__init__h   s   F 	D(9DVD$&&  !;,; 	!2#6 $!2#6 ,H)'>$.!2 0(@%!2		T""-J*)B&4999*AD%:"!2#6 4 	5,."4

&%++11#6q9:%++11#6q9:!OFq 0 E[25E 	"    c                     U R                   (       a  [        R                  $ U R                  (       a  [        R                  $ [        R
                  $ )N)r!   r2   halfr"   bfloat16float)r6   s    r;   params_dtypeGPT3Config.params_dtype   s.    99::YY>>!;;r=   ) r#   r)   r'   r   r$   r"   r(   r   r,   r   r!   r   r   r*   r   r   r+   r   r%   r   r&   r   r5   r   r   r    r0   r-   r.   r/   r   r   )i d  i   N   rD   i   gelu皙?rF   i      g-q=TFFFFTFNTrF   TFrF   g{Gz?r   d   r   g?g      ?)__name__
__module____qualname____firstlineno____doc__
model_typer   propertyrB   __static_attributes____classcell__)r:   s   @r;   r   r      s    L\ J    "" #),$(#!%*#*.&+"&! $5: "CMG^  r=   r   )r2    transformers.configuration_utilsr   transformers.utilsr   
get_loggerloggerr   r   r=   r;   <module>rV      s-      = &				g! gr=   