|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
""" TELECHAT configuration""" |
|
|
|
from transformers.configuration_utils import PretrainedConfig |
|
from transformers.utils import logging |
|
|
|
|
|
|
|
logger = logging.get_logger(__name__) |
|
|
|
TELECHAT_PRETRAINED_CONFIG_ARCHIVE_MAP = { |
|
} |
|
|
|
|
|
class TELECHATConfig(PretrainedConfig): |
|
""" |
|
xxxxxx |
|
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the |
|
documentation from [`PretrainedConfig`] for more information. |
|
Args: |
|
vocab_size (`int`, *optional*, defaults to 50257): |
|
Vocabulary size of the GPT-2 model. Defines the number of different tokens that can be represented by the |
|
`inputs_ids` passed when calling [`GPT2Model`] or [`TFGPT2Model`]. |
|
n_positions (`int`, *optional*, defaults to 1024): |
|
The maximum sequence length that this model might ever be used with. Typically set this to something large |
|
just in case (e.g., 512 or 1024 or 2048). |
|
n_embd (`int`, *optional*, defaults to 768): |
|
Dimensionality of the embeddings and hidden states. |
|
n_layer (`int`, *optional*, defaults to 12): |
|
Number of hidden layers in the Transformer encoder. |
|
n_head (`int`, *optional*, defaults to 12): |
|
Number of attention heads for each attention layer in the Transformer encoder. |
|
n_inner (`int`, *optional*, defaults to None): |
|
Dimensionality of the inner feed-forward layers. `None` will set it to 4 times n_embd |
|
activation_function (`str`, *optional*, defaults to `"gelu"`): |
|
Activation function, to be selected in the list `["relu", "silu", "gelu", "tanh", "gelu_new"]`. |
|
resid_pdrop (`float`, *optional*, defaults to 0.1): |
|
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. |
|
embd_pdrop (`int`, *optional*, defaults to 0.1): |
|
The dropout ratio for the embeddings. |
|
attn_pdrop (`float`, *optional*, defaults to 0.1): |
|
The dropout ratio for the attention. |
|
layer_norm_epsilon (`float`, *optional*, defaults to 1e-5): |
|
The epsilon to use in the layer normalization layers. |
|
initializer_range (`float`, *optional*, defaults to 0.02): |
|
The standard deviation of the truncated_normal_initializer for initializing all weight matrices. |
|
summary_type (`string`, *optional*, defaults to `"cls_index"`): |
|
Argument used when doing sequence summary, used in the models [`GPT2DoubleHeadsModel`] and |
|
[`TFGPT2DoubleHeadsModel`]. |
|
Has to be one of the following options: |
|
- `"last"`: Take the last token hidden state (like XLNet). |
|
- `"first"`: Take the first token hidden state (like BERT). |
|
- `"mean"`: Take the mean of all tokens hidden states. |
|
- `"cls_index"`: Supply a Tensor of classification token position (like GPT/GPT-2). |
|
- `"attn"`: Not implemented now, use multi-head attention. |
|
summary_use_proj (`bool`, *optional*, defaults to `True`): |
|
Argument used when doing sequence summary, used in the models [`GPT2DoubleHeadsModel`] and |
|
[`TFGPT2DoubleHeadsModel`]. |
|
Whether or not to add a projection after the vector extraction. |
|
summary_activation (`str`, *optional*): |
|
Argument used when doing sequence summary. Used in for the multiple choice head in |
|
[`GPT2DoubleHeadsModel`]. |
|
Pass `"tanh"` for a tanh activation to the output, any other value will result in no activation. |
|
summary_proj_to_labels (`bool`, *optional*, defaults to `True`): |
|
Argument used when doing sequence summary, used in the models [`GPT2DoubleHeadsModel`] and |
|
[`TFGPT2DoubleHeadsModel`]. |
|
Whether the projection outputs should have `config.num_labels` or `config.hidden_size` classes. |
|
summary_first_dropout (`float`, *optional*, defaults to 0.1): |
|
Argument used when doing sequence summary, used in the models [`GPT2DoubleHeadsModel`] and |
|
[`TFGPT2DoubleHeadsModel`]. |
|
The dropout ratio to be used after the projection and activation. |
|
scale_attn_weights (`bool`, *optional*, defaults to `True`): |
|
Scale attention weights by dividing by sqrt(hidden_size).. |
|
use_cache (`bool`, *optional*, defaults to `True`): |
|
Whether or not the model should return the last key/values attentions (not used by all models). |
|
scale_attn_by_inverse_layer_idx (`bool`, *optional*, defaults to `False`): |
|
Whether to additionally scale attention weights by `1 / layer_idx + 1`. |
|
reorder_and_upcast_attn (`bool`, *optional*, defaults to `False`): |
|
Whether to scale keys (K) prior to computing attention (dot-product) and upcast attention |
|
dot-product/softmax to float() when training with mixed precision. |
|
Example: |
|
```python |
|
>>> from transformers import GPT2Config, GPT2Model |
|
>>> # Initializing a GPT2 configuration |
|
>>> configuration = GPT2Config() |
|
>>> # Initializing a model (with random weights) from the configuration |
|
>>> model = GPT2Model(configuration) |
|
>>> # Accessing the model configuration |
|
>>> configuration = model.config |
|
```""" |
|
|
|
model_type = "telechat" |
|
keys_to_ignore_at_inference = ["past_key_values"] |
|
attribute_map = { |
|
"hidden_size": "n_embd", |
|
"max_position_embeddings": "n_positions", |
|
"num_attention_heads": "n_head", |
|
"num_hidden_layers": "n_layer", |
|
} |
|
|
|
def __init__( |
|
self, |
|
vocab_size=80000, |
|
n_positions=1024, |
|
n_embd=768, |
|
n_layer=12, |
|
n_head=12, |
|
n_inner=None, |
|
activation_function="gelu_new", |
|
resid_pdrop=0.1, |
|
embd_pdrop=0.1, |
|
attn_pdrop=0.1, |
|
layer_norm_epsilon=1e-5, |
|
initializer_range=0.02, |
|
summary_type="cls_index", |
|
summary_use_proj=True, |
|
summary_activation=None, |
|
summary_proj_to_labels=True, |
|
summary_first_dropout=0.1, |
|
scale_attn_weights=True, |
|
use_cache=True, |
|
bos_token_id=None, |
|
eos_token_id=None, |
|
sep_token_id=None, |
|
pad_token_id=None, |
|
unk_token_id=None, |
|
scale_attn_by_inverse_layer_idx=False, |
|
reorder_and_upcast_attn=False, |
|
relative_encoding=None, |
|
rotary_theta=10000, |
|
rotary_use_xpos=True, |
|
rotary_xpos_scale_base=512, |
|
use_mup=False, |
|
mup_scale_factor=1.0, |
|
output_mult=1.0, |
|
input_mult=1.0, |
|
mup_base_width=256, |
|
enable_flash_attn=True, |
|
use_RMSNorm=False, |
|
add_bias_linear=True, |
|
**kwargs, |
|
): |
|
self.vocab_size = vocab_size |
|
self.n_positions = n_positions |
|
self.n_embd = n_embd |
|
self.n_layer = n_layer |
|
self.n_head = n_head |
|
self.n_inner = n_inner |
|
self.activation_function = activation_function |
|
self.resid_pdrop = resid_pdrop |
|
self.embd_pdrop = embd_pdrop |
|
self.attn_pdrop = attn_pdrop |
|
self.layer_norm_epsilon = layer_norm_epsilon |
|
self.initializer_range = initializer_range |
|
self.summary_type = summary_type |
|
self.summary_use_proj = summary_use_proj |
|
self.summary_activation = summary_activation |
|
self.summary_first_dropout = summary_first_dropout |
|
self.summary_proj_to_labels = summary_proj_to_labels |
|
self.scale_attn_weights = scale_attn_weights |
|
self.use_cache = use_cache |
|
self.scale_attn_by_inverse_layer_idx = scale_attn_by_inverse_layer_idx |
|
self.reorder_and_upcast_attn = reorder_and_upcast_attn |
|
self.relative_encoding = relative_encoding |
|
self.use_RMSNorm = use_RMSNorm |
|
self.add_bias_linear = add_bias_linear |
|
|
|
|
|
self.rotary_theta = rotary_theta |
|
self.rotary_use_xpos = rotary_use_xpos |
|
self.rotary_xpos_scale_base = rotary_xpos_scale_base |
|
|
|
|
|
self.use_mup = use_mup |
|
self.mup_scale_factor = mup_scale_factor |
|
self.output_mult = output_mult |
|
self.input_mult = input_mult |
|
self.mup_base_width = mup_base_width |
|
|
|
self.bos_token_id = bos_token_id |
|
self.eos_token_id = eos_token_id |
|
self.unk_token_id = unk_token_id |
|
self.sep_token_id = sep_token_id |
|
self.pad_token_id = pad_token_id |
|
|
|
self.enable_flash_attn = enable_flash_attn |
|
|
|
self.architectures = ["TELECHAT"] |
|
self.auto_map = { |
|
"AutoConfig": "configuration_telechat.TELECHATConfig", |
|
"AutoModel": "modeling_telechat.TELECHAT", |
|
"AutoModelForCausalLM": "modeling_telechat.TELECHAT" |
|
} |
|
|
|
|
|
super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, sep_token_id = sep_token_id, pad_token_id = pad_token_id, **kwargs) |
|
|