Yixin Song commited on
Commit
16eb470
1 Parent(s): e181620

add model weight

Browse files
added_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "<|im_end|>": 57000,
3
+ "<|im_start|>": 57001
4
+ }
configuration_supersparsemixtral.py ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 Mistral AI and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ This model config is from Mistral config """
16
+ """ Viola model configuration"""
17
+
18
+ from transformers.configuration_utils import PretrainedConfig
19
+ from transformers.utils import logging
20
+
21
+
22
+
23
+ logger = logging.get_logger(__name__)
24
+
25
+ class SuperSparseMixtralConfig(PretrainedConfig):
26
+ r"""
27
+ This is the configuration class to store the configuration of a [`MixtralModel`]. It is used to instantiate an
28
+ Mixtral model according to the specified arguments, defining the model architecture. Instantiating a configuration
29
+ with the defaults will yield a similar configuration to that of the Mixtral-7B-v0.1 or Mixtral-7B-Instruct-v0.1.
30
+
31
+ [mixtralai/Mixtral-8x7B](https://huggingface.co/mixtralai/Mixtral-8x7B)
32
+ [mixtralai/Mixtral-7B-Instruct-v0.1](https://huggingface.co/mixtralai/Mixtral-7B-Instruct-v0.1)
33
+
34
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
35
+ documentation from [`PretrainedConfig`] for more information.
36
+
37
+
38
+ Args:
39
+ vocab_size (`int`, *optional*, defaults to 32000):
40
+ Vocabulary size of the Mixtral model. Defines the number of different tokens that can be represented by the
41
+ `inputs_ids` passed when calling [`MixtralModel`]
42
+ hidden_size (`int`, *optional*, defaults to 4096):
43
+ Dimension of the hidden representations.
44
+ intermediate_size (`int`, *optional*, defaults to 14336):
45
+ Dimension of the MLP representations.
46
+ num_hidden_layers (`int`, *optional*, defaults to 32):
47
+ Number of hidden layers in the Transformer encoder.
48
+ num_attention_heads (`int`, *optional*, defaults to 32):
49
+ Number of attention heads for each attention layer in the Transformer encoder.
50
+ num_key_value_heads (`int`, *optional*, defaults to 8):
51
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
52
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
53
+ `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
54
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
55
+ by meanpooling all the original heads within that group. For more details checkout [this
56
+ paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to `8`.
57
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
58
+ The non-linear activation function (function or string) in the decoder.
59
+ max_position_embeddings (`int`, *optional*, defaults to `4096*32`):
60
+ The maximum sequence length that this model might ever be used with. Mixtral's sliding window attention
61
+ allows sequence of up to 4096*32 tokens.
62
+ initializer_range (`float`, *optional*, defaults to 0.02):
63
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
64
+ rms_norm_eps (`float`, *optional*, defaults to 1e-05):
65
+ The epsilon used by the rms normalization layers.
66
+ use_cache (`bool`, *optional*, defaults to `True`):
67
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
68
+ relevant if `config.is_decoder=True`.
69
+ pad_token_id (`int`, *optional*):
70
+ The id of the padding token.
71
+ bos_token_id (`int`, *optional*, defaults to 1):
72
+ The id of the "beginning-of-sequence" token.
73
+ eos_token_id (`int`, *optional*, defaults to 2):
74
+ The id of the "end-of-sequence" token.
75
+ tie_word_embeddings (`bool`, *optional*, defaults to `False`):
76
+ Whether the model's input and output word embeddings should be tied.
77
+ rope_theta (`float`, *optional*, defaults to 1000000.0):
78
+ The base period of the RoPE embeddings.
79
+ sliding_window (`int`, *optional*):
80
+ Sliding window attention window size. If not specified, will default to `4096`.
81
+ attention_dropout (`float`, *optional*, defaults to 0.0):
82
+ The dropout ratio for the attention probabilities.
83
+ num_experts_per_tok (`int`, *optional*, defaults to 2):
84
+ The number of experts to route per-token, can be also interpreted as the `top-k` routing
85
+ parameter
86
+ num_local_experts (`int`, *optional*, defaults to 8):
87
+ Number of experts per Sparse MLP layer.
88
+ output_router_logits (`bool`, *optional*, defaults to `False`):
89
+ Whether or not the router logits should be returned by the model. Enabeling this will also
90
+ allow the model to output the auxiliary loss. See [here]() for more details
91
+ router_aux_loss_coef (`float`, *optional*, defaults to 0.001):
92
+ The aux loss factor for the total loss.
93
+ router_jitter_noise (`float`, *optional*, defaults to 0.0):
94
+ Amount of noise to add to the router.
95
+
96
+ ```python
97
+ >>> from transformers import MixtralModel, MixtralConfig
98
+
99
+ >>> # Initializing a Mixtral 7B style configuration
100
+ >>> configuration = MixtralConfig()
101
+
102
+ >>> # Initializing a model from the Mixtral 7B style configuration
103
+ >>> model = MixtralModel(configuration)
104
+
105
+ >>> # Accessing the model configuration
106
+ >>> configuration = model.config
107
+ ```"""
108
+
109
+ model_type = "mixtral"
110
+ keys_to_ignore_at_inference = ["past_key_values"]
111
+
112
+ def __init__(
113
+ self,
114
+ vocab_size=32000,
115
+ hidden_size=4096,
116
+ intermediate_size=14336,
117
+ num_hidden_layers=32,
118
+ num_attention_heads=32,
119
+ num_key_value_heads=8,
120
+ hidden_act="silu",
121
+ max_position_embeddings=4096 * 32,
122
+ initializer_range=0.02,
123
+ rms_norm_eps=1e-5,
124
+ use_cache=True,
125
+ pad_token_id=None,
126
+ bos_token_id=1,
127
+ eos_token_id=2,
128
+ tie_word_embeddings=False,
129
+ rope_theta=1e6,
130
+ sliding_window=None,
131
+ attention_dropout=0.0,
132
+ num_experts_per_tok=2,
133
+ num_local_experts=8,
134
+ output_router_logits=False,
135
+ router_aux_loss_coef=0.001,
136
+ router_jitter_noise=0.0,
137
+ **kwargs,
138
+ ):
139
+ self.vocab_size = vocab_size
140
+ self.max_position_embeddings = max_position_embeddings
141
+ self.hidden_size = hidden_size
142
+ self.intermediate_size = intermediate_size
143
+ self.num_hidden_layers = num_hidden_layers
144
+ self.num_attention_heads = num_attention_heads
145
+ self.sliding_window = sliding_window
146
+
147
+ # for backward compatibility
148
+ if num_key_value_heads is None:
149
+ num_key_value_heads = num_attention_heads
150
+
151
+ self.num_key_value_heads = num_key_value_heads
152
+ self.hidden_act = hidden_act
153
+ self.initializer_range = initializer_range
154
+ self.rms_norm_eps = rms_norm_eps
155
+ self.use_cache = use_cache
156
+ self.rope_theta = rope_theta
157
+ self.attention_dropout = attention_dropout
158
+
159
+ self.num_experts_per_tok = num_experts_per_tok
160
+ self.num_local_experts = num_local_experts
161
+ self.output_router_logits = output_router_logits
162
+ self.router_aux_loss_coef = router_aux_loss_coef
163
+ self.router_jitter_noise = router_jitter_noise
164
+ super().__init__(
165
+ pad_token_id=pad_token_id,
166
+ bos_token_id=bos_token_id,
167
+ eos_token_id=eos_token_id,
168
+ tie_word_embeddings=tie_word_embeddings,
169
+ **kwargs,
170
+ )
generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "eos_token_id": 2,
5
+ "transformers_version": "4.41.0",
6
+ "use_cache": false
7
+ }
model-00001-of-00021.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:275d158ca62b7de17092c313f92bc9d8da64b4cb28d3cd54bb6159c15329131f
3
+ size 4991378288
model-00002-of-00021.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6a22d2c5771e434a27c9b79fcb5bed89a458ec5f5e5792113b69def3e6abc6f3
3
+ size 4984497264
model-00003-of-00021.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e72d9c9c4aa4dcb78f4c85579700514e54154becd1fa78453c001d813ef87c29
3
+ size 4995589920
model-00004-of-00021.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:db5b5de815ba1e224a31753cd27d053575ff2436ecce5aa1884437f4b9d80f44
3
+ size 4966229656
model-00005-of-00021.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b8ae778b31a16ae12b8e32f1b5b873e8ce3451232ee99da9044f7dbcb67c833d
3
+ size 4920009856
model-00006-of-00021.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7cf9362792a224329239fa3533dab46868a6ed266295047f19df7a806f079ca1
3
+ size 4966229656
model-00007-of-00021.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:071ae55b8f789b3ad58da8a26c8da3f76d7b9a1c776fffc0d6baadd313d9753a
3
+ size 4999701512
model-00008-of-00021.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7dfd4a9c3de39e22941838a70806e16fe6e676b817c80eb885c6418d1bb3ddc9
3
+ size 4886538096
model-00009-of-00021.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:86d57916b06a5ca45e050e69cd96f8423d09dcecf27f8546419a568bba022fb3
3
+ size 4896416680
model-00010-of-00021.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:94fb70ea45ff2ab501a90140fd9be95a3a3b0c4b3e5e577d89be3fd4549e0010
3
+ size 4970948320
model-00011-of-00021.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a19047bfaa4a760f871ecbc34f2a48d6810231534264be4ac88b7a22d2da619d
3
+ size 4920009928
model-00012-of-00021.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2b360d6d76ef9c62ace02d58b9eb3cec124bf52ee4304054e093d09e4c037df9
3
+ size 4966229728
model-00013-of-00021.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:11941794d3cbcc1c5c5fb6b0bdeb049d071ba78a2d4fec4c2f305f28a08d2098
3
+ size 4999701552
model-00014-of-00021.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cef345d73c5519e8acdbf78f5cdbc2cfa816bfe3ab127919a987281bc660f028
3
+ size 4886538096
model-00015-of-00021.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:83cccb41ba88c635f3103c4e35c33901c9f7d5d70af200ba86683d96b73a25fc
3
+ size 4966229736
model-00016-of-00021.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8c4484f8f8b447dff514da4ca15e4742f9aec2ce576ff6bacf8dac399b84d338
3
+ size 4999701552
model-00017-of-00021.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6c9a4dc6bd7aed888a65fcf9ad83d8c6b697b749c26c0250f4c0ce0234f312ea
3
+ size 4886538104
model-00018-of-00021.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:db323e0560c384d1571f928d6bed81be23523e1673e56fee6589f6e8ccf5b8a9
3
+ size 4894057384
model-00019-of-00021.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d4d3c56e39e95d06f363d2c4dfb45b6f9c95f28fbf36b5033879069726328e41
3
+ size 4994541288
model-00020-of-00021.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4c7f0a0727c932355172137820b5fc242bb6d1e61ef2865a0454916bdd9d81c3
3
+ size 4950680776
model-00021-of-00021.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:581202d994dd4a89033ccaa5a38bf3e1157d514687be26284eeee35f242fe28b
3
+ size 4550934408
model.safetensors.index.json ADDED
The diff for this file is too large to render. See raw diff
 
modeling_supersparsemixtral.py ADDED
The diff for this file is too large to render. See raw diff
 
scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3e53953f87fd01194d381ec675b068b3ab8ad821b92137fa8d05dcadb5cc7d7a
3
+ size 1064
special_tokens_map.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>"
4
+ ],
5
+ "bos_token": {
6
+ "content": "<s>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "eos_token": {
13
+ "content": "<|im_end|>",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false
18
+ },
19
+ "pad_token": {
20
+ "content": "</s>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false
25
+ },
26
+ "unk_token": {
27
+ "content": "<unk>",
28
+ "lstrip": false,
29
+ "normalized": false,
30
+ "rstrip": false,
31
+ "single_word": false
32
+ }
33
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:86840d604f9e18ebbdc35aa937cfc2486fe774534ceea0fd3f667a72bc7584b2
3
+ size 925420
tokenizer_config.json ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "added_tokens_decoder": {
5
+ "0": {
6
+ "content": "<unk>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "1": {
14
+ "content": "<s>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "2": {
22
+ "content": "</s>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "57000": {
30
+ "content": "<|im_end|>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "57001": {
38
+ "content": "<|im_start|>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
+ }
45
+ },
46
+ "additional_special_tokens": [
47
+ "<|im_start|>"
48
+ ],
49
+ "bos_token": "<s>",
50
+ "chat_template": "{% if messages[0]['role'] == 'system' %}{% set system_message = messages[0]['content'] %}{% endif %}{% if system_message is defined %}{{ '<|im_start|>system\\n' + system_message + '<|im_end|>\\n' }}{% endif %}{% for message in messages %}{% set content = message['content'] %}{% if message['role'] == 'user' %}{{ '<|im_start|>user\\n' + content + '<|im_end|>\\n<|im_start|>assistant\\n' }}{% elif message['role'] == 'assistant' %}{{ content + '<|im_end|>' + '\\n' }}{% endif %}{% endfor %}",
51
+ "clean_up_tokenization_spaces": false,
52
+ "eos_token": "<|im_end|>",
53
+ "model_max_length":32768,
54
+ "pad_token": "</s>",
55
+ "padding_side": "right",
56
+ "sp_model_kwargs": {},
57
+ "spaces_between_special_tokens": false,
58
+ "split_special_tokens": false,
59
+ "tokenizer_class": "LlamaTokenizer",
60
+ "unk_token": "<unk>",
61
+ "use_default_system_prompt": false
62
+ }