Tamnemtf commited on
Commit
1f30b14
1 Parent(s): 4cfe6fb

Upload MistralForCausalLM

Browse files
config.json CHANGED
@@ -1,28 +1,26 @@
1
  {
2
- "_name_or_path": "meta-llama/Llama-2-7b-chat-hf",
3
  "architectures": [
4
- "LlamaForCausalLM"
5
  ],
6
- "attention_bias": false,
7
  "attention_dropout": 0.0,
8
  "bos_token_id": 1,
9
  "eos_token_id": 2,
10
  "hidden_act": "silu",
11
  "hidden_size": 4096,
12
  "initializer_range": 0.02,
13
- "intermediate_size": 11008,
14
- "max_position_embeddings": 4096,
15
- "model_type": "llama",
16
  "num_attention_heads": 32,
17
  "num_hidden_layers": 32,
18
- "num_key_value_heads": 32,
19
- "pretraining_tp": 1,
20
  "rms_norm_eps": 1e-05,
21
- "rope_scaling": null,
22
  "rope_theta": 10000.0,
 
23
  "tie_word_embeddings": false,
24
  "torch_dtype": "float16",
25
  "transformers_version": "4.37.2",
26
  "use_cache": true,
27
- "vocab_size": 32000
28
  }
 
1
  {
2
+ "_name_or_path": "Viet-Mistral/Vistral-7B-Chat",
3
  "architectures": [
4
+ "MistralForCausalLM"
5
  ],
 
6
  "attention_dropout": 0.0,
7
  "bos_token_id": 1,
8
  "eos_token_id": 2,
9
  "hidden_act": "silu",
10
  "hidden_size": 4096,
11
  "initializer_range": 0.02,
12
+ "intermediate_size": 14336,
13
+ "max_position_embeddings": 32768,
14
+ "model_type": "mistral",
15
  "num_attention_heads": 32,
16
  "num_hidden_layers": 32,
17
+ "num_key_value_heads": 8,
 
18
  "rms_norm_eps": 1e-05,
 
19
  "rope_theta": 10000.0,
20
+ "sliding_window": 4096,
21
  "tie_word_embeddings": false,
22
  "torch_dtype": "float16",
23
  "transformers_version": "4.37.2",
24
  "use_cache": true,
25
+ "vocab_size": 38369
26
  }
generation_config.json CHANGED
@@ -1,10 +1,7 @@
1
  {
 
2
  "bos_token_id": 1,
3
- "do_sample": true,
4
  "eos_token_id": 2,
5
- "max_length": 4096,
6
- "pad_token_id": 0,
7
- "temperature": 0.6,
8
- "top_p": 0.9,
9
- "transformers_version": "4.37.2"
10
  }
 
1
  {
2
+ "_from_model_config": true,
3
  "bos_token_id": 1,
 
4
  "eos_token_id": 2,
5
+ "transformers_version": "4.37.2",
6
+ "use_cache": false
 
 
 
7
  }
model-00001-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fa5b958e69193e8ddd6079344f728d1cb370809beff022ed61a1bfe37ea0b134
3
- size 4938985248
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:76a179e74c9bd6cb72791a55cc985ca323e73a1408d65e426ded2b44f6ad809c
3
+ size 4995337088
model-00002-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e2091994e27cfd683bad1aa18a67b25db35bdb06f18201f66669f7ab2ff2881c
3
- size 4947390768
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d8160f2ba59a21443b6041e34075a53fe45202b28dfa223456fb38c2b932dfb9
3
+ size 4999819232
model-00003-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d7b00db1905ff1b05dac589b377fdfe76f19f06a013d1ad5076a5e112b7eea82
3
- size 3590488736
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6dc632ed601eb7b77793b747ef61bfba35f1638cf3fab0c2638512da530204f7
3
+ size 4592691112
model.safetensors.index.json CHANGED
@@ -1,6 +1,6 @@
1
  {
2
  "metadata": {
3
- "total_size": 13476831232
4
  },
5
  "weight_map": {
6
  "lm_head.weight": "model-00003-of-00003.safetensors",
@@ -23,24 +23,24 @@
23
  "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
24
  "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
25
  "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
26
- "model.layers.10.input_layernorm.weight": "model-00001-of-00003.safetensors",
27
- "model.layers.10.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
28
  "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
29
  "model.layers.10.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
30
- "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
31
  "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
32
  "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
33
  "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
34
  "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
35
  "model.layers.11.input_layernorm.weight": "model-00002-of-00003.safetensors",
36
  "model.layers.11.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
37
- "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
38
  "model.layers.11.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
39
  "model.layers.11.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
40
- "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
41
- "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
42
- "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
43
- "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
44
  "model.layers.12.input_layernorm.weight": "model-00002-of-00003.safetensors",
45
  "model.layers.12.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
46
  "model.layers.12.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
@@ -140,24 +140,24 @@
140
  "model.layers.21.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
141
  "model.layers.21.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
142
  "model.layers.21.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
143
- "model.layers.22.input_layernorm.weight": "model-00002-of-00003.safetensors",
144
- "model.layers.22.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
145
- "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
146
- "model.layers.22.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
147
- "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
148
  "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
149
  "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
150
  "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
151
  "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
152
  "model.layers.23.input_layernorm.weight": "model-00003-of-00003.safetensors",
153
  "model.layers.23.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
154
- "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
155
- "model.layers.23.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
156
  "model.layers.23.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
157
- "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
158
- "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
159
- "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
160
- "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
161
  "model.layers.24.input_layernorm.weight": "model-00003-of-00003.safetensors",
162
  "model.layers.24.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
163
  "model.layers.24.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
 
1
  {
2
  "metadata": {
3
+ "total_size": 14587813888
4
  },
5
  "weight_map": {
6
  "lm_head.weight": "model-00003-of-00003.safetensors",
 
23
  "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
24
  "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
25
  "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
26
+ "model.layers.10.input_layernorm.weight": "model-00002-of-00003.safetensors",
27
+ "model.layers.10.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
28
  "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
29
  "model.layers.10.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
30
+ "model.layers.10.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
31
  "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
32
  "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
33
  "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
34
  "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
35
  "model.layers.11.input_layernorm.weight": "model-00002-of-00003.safetensors",
36
  "model.layers.11.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
37
+ "model.layers.11.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
38
  "model.layers.11.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
39
  "model.layers.11.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
40
+ "model.layers.11.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
41
+ "model.layers.11.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
42
+ "model.layers.11.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
43
+ "model.layers.11.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
44
  "model.layers.12.input_layernorm.weight": "model-00002-of-00003.safetensors",
45
  "model.layers.12.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
46
  "model.layers.12.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
 
140
  "model.layers.21.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
141
  "model.layers.21.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
142
  "model.layers.21.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
143
+ "model.layers.22.input_layernorm.weight": "model-00003-of-00003.safetensors",
144
+ "model.layers.22.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
145
+ "model.layers.22.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
146
+ "model.layers.22.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
147
+ "model.layers.22.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
148
  "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
149
  "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
150
  "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
151
  "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
152
  "model.layers.23.input_layernorm.weight": "model-00003-of-00003.safetensors",
153
  "model.layers.23.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
154
+ "model.layers.23.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
155
+ "model.layers.23.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
156
  "model.layers.23.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
157
+ "model.layers.23.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
158
+ "model.layers.23.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
159
+ "model.layers.23.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
160
+ "model.layers.23.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
161
  "model.layers.24.input_layernorm.weight": "model-00003-of-00003.safetensors",
162
  "model.layers.24.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
163
  "model.layers.24.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",