Hzfinfdu commited on
Commit
20ca061
1 Parent(s): c02b0f5

Upload configuration_moss.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. configuration_moss.py +94 -0
configuration_moss.py ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Moss model configuration"""
2
+
3
+ from transformers.utils import logging
4
+ from transformers.configuration_utils import PretrainedConfig
5
+
6
+
7
+ logger = logging.get_logger(__name__)
8
+
9
+
10
+ class MossConfig(PretrainedConfig):
11
+ r"""
12
+ This is the configuration class to store the configuration of a [`MossModel`]. It is used to instantiate a
13
+ Moss model according to the specified arguments, defining the model architecture. Configuration objects
14
+ inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from
15
+ [`PretrainedConfig`] for more information.
16
+
17
+ Args:
18
+ vocab_size (`int`, *optional*, defaults to 92494):
19
+ Vocabulary size of the Moss model. Defines the number of different tokens that can be represented by the
20
+ `inputs_ids` passed when calling [`MossModel`]
21
+ hidden_size (`int`, *optional*, defaults to 4096):
22
+ Dimension of the hidden representations.
23
+ intermediate_size (`int`, *optional*, defaults to 11008):
24
+ Dimension of the MLP representations.
25
+ num_hidden_layers (`int`, *optional*, defaults to 32):
26
+ Number of hidden layers in the Transformer encoder.
27
+ num_attention_heads (`int`, *optional*, defaults to 32):
28
+ Number of attention heads for each attention layer in the Transformer encoder.
29
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
30
+ The non-linear activation function (function or string) in the decoder.
31
+ max_position_embeddings (`int`, *optional*, defaults to 2048):
32
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
33
+ just in case (e.g., 512 or 1024 or 2048).
34
+ initializer_range (`float`, *optional*, defaults to 0.02):
35
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
36
+ rms_norm_eps (`float`, *optional*, defaults to 1e-05):
37
+ The epsilon used by the rms normalization layers.
38
+ use_cache (`bool`, *optional*, defaults to `True`):
39
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
40
+ relevant if `config.is_decoder=True`.
41
+ tie_word_embeddings(`bool`, *optional*, defaults to `False`):
42
+ Whether to tie weight embeddings
43
+ Example:
44
+
45
+ ```python
46
+ >>> from transformers import MossModel, MossConfig
47
+
48
+ >>> # Initializing a Moss-7b style configuration
49
+ >>> configuration = MossConfig()
50
+
51
+ >>> # Initializing a model from the Moss-7b style configuration
52
+ >>> model = MossModel(configuration)
53
+
54
+ >>> # Accessing the model configuration
55
+ >>> configuration = model.config
56
+ ```"""
57
+ model_type = "moss"
58
+ keys_to_ignore_at_inference = ["past_key_values"]
59
+
60
+ def __init__(
61
+ self,
62
+ vocab_size=92494,
63
+ hidden_size=4096,
64
+ intermediate_size=11008,
65
+ num_hidden_layers=32,
66
+ num_attention_heads=32,
67
+ hidden_act="silu",
68
+ max_position_embeddings=2048,
69
+ initializer_range=0.02,
70
+ rms_norm_eps=1e-05,
71
+ use_cache=True,
72
+ pad_token_id=0,
73
+ bos_token_id=1,
74
+ eos_token_id=2,
75
+ tie_word_embeddings=False,
76
+ **kwargs,
77
+ ):
78
+ self.vocab_size = vocab_size
79
+ self.max_position_embeddings = max_position_embeddings
80
+ self.hidden_size = hidden_size
81
+ self.intermediate_size = intermediate_size
82
+ self.num_hidden_layers = num_hidden_layers
83
+ self.num_attention_heads = num_attention_heads
84
+ self.hidden_act = hidden_act
85
+ self.initializer_range = initializer_range
86
+ self.rms_norm_eps = rms_norm_eps
87
+ self.use_cache = use_cache
88
+ super().__init__(
89
+ pad_token_id=pad_token_id,
90
+ bos_token_id=bos_token_id,
91
+ eos_token_id=eos_token_id,
92
+ tie_word_embeddings=tie_word_embeddings,
93
+ **kwargs,
94
+ )