| | |
| | |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| | |
| | |
| | |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | """ Shell configuration""" |
| |
|
| | from transformers.configuration_utils import PretrainedConfig |
| | from transformers.utils import logging |
| |
|
| |
|
| | logger = logging.get_logger(__name__) |
| |
|
| |
|
| | class CodeShellConfig(PretrainedConfig): |
| | """ |
| | This is the configuration class to store the configuration of a [`CodeShellModel`]. It is used to instantiate a |
| | CodeShell model according to the specified arguments, defining the model architecture. |
| | |
| | Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the |
| | documentation from [`PretrainedConfig`] for more information. |
| | |
| | Args: |
| | vocab_size (`int`, *optional*, defaults to 50257): |
| | Vocabulary size of the GPT-2 model. Defines the number of different tokens that can be represented by the |
| | `inputs_ids` passed when calling [`ShellModel`]. |
| | n_positions (`int`, *optional*, defaults to 1024): |
| | The maximum sequence length that this model might ever be used with. Typically set this to something large |
| | just in case (e.g., 512 or 1024 or 2048). |
| | n_embd (`int`, *optional*, defaults to 768): |
| | Dimensionality of the embeddings and hidden states. |
| | n_layer (`int`, *optional*, defaults to 12): |
| | Number of hidden layers in the Transformer encoder. |
| | n_head (`int`, *optional*, defaults to 12): |
| | Number of attention heads for each attention layer in the Transformer encoder. |
| | n_inner (`int`, *optional*, defaults to None): |
| | Dimensionality of the inner feed-forward layers. `None` will set it to 4 times n_embd |
| | activation_function (`str`, *optional*, defaults to `"gelu_pytorch_tanh"`): |
| | Activation function, to be selected in the list `["relu", "silu", "gelu", "tanh", "gelu_new", |
| | "gelu_pytorch_tanh"]`. |
| | resid_pdrop (`float`, *optional*, defaults to 0.1): |
| | The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. |
| | embd_pdrop (`float`, *optional*, defaults to 0.1): |
| | The dropout ratio for the embeddings. |
| | attn_pdrop (`float`, *optional*, defaults to 0.1): |
| | The dropout ratio for the attention. |
| | layer_norm_epsilon (`float`, *optional*, defaults to 1e-5): |
| | The epsilon to use in the layer normalization layers. |
| | initializer_range (`float`, *optional*, defaults to 0.02): |
| | The standard deviation of the truncated_normal_initializer for initializing all weight matrices. |
| | scale_attn_weights (`bool`, *optional*, defaults to `True`): |
| | Scale attention weights by dividing by sqrt(hidden_size).. |
| | use_cache (`bool`, *optional*, defaults to `True`): |
| | Whether or not the model should return the last key/values attentions (not used by all models). |
| | attention_softmax_in_fp32 (`bool`, *optional*, defaults to `True`): |
| | Whether to call the fused softmax in float32. |
| | scale_attention_softmax_in_fp32 (`bool`, *optional*, defaults to `True`): |
| | Whether to scale the attention softmax in float32. |
| | attention_type (`bool`, *optional*, defaults to `True`): |
| | Whether to use Multi-Query Attion (`True`) or Multi-Head Attention (`False`). |
| | Example: |
| | |
| | ```python |
| | >>> from configuration_codeshell import CodeShellConfig |
| | >>> from modeling_codeshell import CodeShellForCausalLM |
| | |
| | >>> # Initializing a CodeShell configuration |
| | >>> configuration = CodeShellConfig() |
| | |
| | >>> # Initializing a model (with random weights) from the configuration |
| | >>> model = CodeShellForCausalLM(configuration) |
| | |
| | >>> # Accessing the model configuration |
| | >>> configuration = model.config |
| | ```""" |
| |
|
| | model_type = "codeshell" |
| | keys_to_ignore_at_inference = ["past_key_values"] |
| | attribute_map = { |
| | "hidden_size": "n_embd", |
| | "max_position_embeddings": "n_positions", |
| | "num_attention_heads": "n_head", |
| | "num_hidden_layers": "n_layer", |
| | } |
| |
|
| | def __init__( |
| | self, |
| | vocab_size=70144, |
| | n_positions=8192, |
| | n_embd=4096, |
| | n_layer=42, |
| | n_head=32, |
| | n_inner=None, |
| | activation_function="gelu_pytorch_tanh", |
| | resid_pdrop=0.1, |
| | embd_pdrop=0.1, |
| | attn_pdrop=0.1, |
| | layer_norm_epsilon=1e-5, |
| | initializer_range=0.02, |
| | scale_attn_weights=True, |
| | use_cache=True, |
| | bos_token_id=70000, |
| | eos_token_id=70000, |
| | attention_softmax_in_fp32=True, |
| | scale_attention_softmax_in_fp32=True, |
| | group_query_attention=True, |
| | num_query_groups=1, |
| | position_embedding_type="learned_absolute", |
| | rope_scaling=None, |
| | **kwargs, |
| | ): |
| | self.vocab_size = vocab_size |
| | self.n_positions = n_positions |
| | self.n_embd = n_embd |
| | self.n_layer = n_layer |
| | self.n_head = n_head |
| | self.n_inner = n_inner |
| | self.activation_function = activation_function |
| | self.resid_pdrop = resid_pdrop |
| | self.embd_pdrop = embd_pdrop |
| | self.attn_pdrop = attn_pdrop |
| | self.layer_norm_epsilon = layer_norm_epsilon |
| | self.initializer_range = initializer_range |
| | self.scale_attn_weights = scale_attn_weights |
| | self.use_cache = use_cache |
| | self.attention_softmax_in_fp32 = attention_softmax_in_fp32 |
| | self.scale_attention_softmax_in_fp32 = scale_attention_softmax_in_fp32 |
| | self.group_query_attention = group_query_attention |
| | self.num_query_groups = num_query_groups |
| | self.position_embedding_type = position_embedding_type |
| | self.rope_scaling = rope_scaling |
| | assert self.position_embedding_type in [ |
| | "learned_absolute", "rope" |
| | ], "position_embedding_type must be one of ['learned_absolute', 'rope']" |
| | |
| | self.bos_token_id = bos_token_id |
| | self.eos_token_id = eos_token_id |
| |
|
| | super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) |
| |
|