Upload model
Browse files- config.json +4 -33
- model-00001.safetensors +2 -2
- model-00002.safetensors +2 -2
- model.safetensors.index.json +43 -43
- special_tokens_map.json +1 -2
- tokenizer.json +2 -2
- tokenizer_config.json +2 -3
config.json
CHANGED
@@ -5,8 +5,8 @@
|
|
5 |
],
|
6 |
"attention_bias": false,
|
7 |
"attention_dropout": 0.0,
|
8 |
-
"bos_token_id":
|
9 |
-
"eos_token_id":
|
10 |
"head_dim": 128,
|
11 |
"hidden_act": "silu",
|
12 |
"hidden_size": 4096,
|
@@ -19,35 +19,6 @@
|
|
19 |
"num_hidden_layers": 32,
|
20 |
"num_key_value_heads": 8,
|
21 |
"pretraining_tp": 1,
|
22 |
-
"quantization_config": {
|
23 |
-
"config_groups": {
|
24 |
-
"group_0": {
|
25 |
-
"input_activations": null,
|
26 |
-
"output_activations": null,
|
27 |
-
"targets": [
|
28 |
-
"Linear"
|
29 |
-
],
|
30 |
-
"weights": {
|
31 |
-
"actorder": null,
|
32 |
-
"block_structure": null,
|
33 |
-
"dynamic": false,
|
34 |
-
"group_size": null,
|
35 |
-
"num_bits": 8,
|
36 |
-
"observer": "minmax",
|
37 |
-
"observer_kwargs": {},
|
38 |
-
"strategy": "channel",
|
39 |
-
"symmetric": true,
|
40 |
-
"type": "int"
|
41 |
-
}
|
42 |
-
}
|
43 |
-
},
|
44 |
-
"format": "pack-quantized",
|
45 |
-
"global_compression_ratio": 1.4619484106630287,
|
46 |
-
"ignore": [],
|
47 |
-
"kv_cache_scheme": null,
|
48 |
-
"quant_method": "compressed-tensors",
|
49 |
-
"quantization_status": "compressed"
|
50 |
-
},
|
51 |
"rms_norm_eps": 1e-05,
|
52 |
"rope_scaling": {
|
53 |
"factor": 8.0,
|
@@ -61,5 +32,5 @@
|
|
61 |
"torch_dtype": "float32",
|
62 |
"transformers_version": "4.46.0.dev0",
|
63 |
"use_cache": true,
|
64 |
-
"vocab_size":
|
65 |
-
}
|
|
|
5 |
],
|
6 |
"attention_bias": false,
|
7 |
"attention_dropout": 0.0,
|
8 |
+
"bos_token_id": 65536,
|
9 |
+
"eos_token_id": 65537,
|
10 |
"head_dim": 128,
|
11 |
"hidden_act": "silu",
|
12 |
"hidden_size": 4096,
|
|
|
19 |
"num_hidden_layers": 32,
|
20 |
"num_key_value_heads": 8,
|
21 |
"pretraining_tp": 1,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
22 |
"rms_norm_eps": 1e-05,
|
23 |
"rope_scaling": {
|
24 |
"factor": 8.0,
|
|
|
32 |
"torch_dtype": "float32",
|
33 |
"transformers_version": "4.46.0.dev0",
|
34 |
"use_cache": true,
|
35 |
+
"vocab_size": 65538
|
36 |
+
}
|
model-00001.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:644c7e8c9d89b656ed00686ffb9c0777f52cf7f728cecaa6c944682a92b4804b
|
3 |
+
size 2127429138
|
model-00002.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2403e31af850bba0af949ad5113fed4ca735f791ace8d28c82dfca0500b075ad
|
3 |
+
size 1442447190
|
model.safetensors.index.json
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
{
|
2 |
"metadata": {
|
3 |
-
"total_size":
|
4 |
},
|
5 |
"weight_map": {
|
6 |
"model.layers.0.self_attn.q_proj.weight.shape": "model-00001.safetensors",
|
@@ -873,6 +873,20 @@
|
|
873 |
"model.layers.29.mlp.up_proj.weight.gptq_4_bit_scale.dtype": "model-00001.safetensors",
|
874 |
"model.layers.29.mlp.down_proj.weight.gptq_4_bit_weight_packed.dtype": "model-00001.safetensors",
|
875 |
"model.layers.29.mlp.down_proj.weight.gptq_4_bit_scale.dtype": "model-00001.safetensors",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
876 |
"model.embed_tokens.weight.dtype": "model-00001.safetensors",
|
877 |
"model.layers.0.input_layernorm.weight.dtype": "model-00001.safetensors",
|
878 |
"model.layers.0.post_attention_layernorm.weight.dtype": "model-00001.safetensors",
|
@@ -934,20 +948,6 @@
|
|
934 |
"model.layers.28.post_attention_layernorm.weight.dtype": "model-00001.safetensors",
|
935 |
"model.layers.29.input_layernorm.weight.dtype": "model-00001.safetensors",
|
936 |
"model.layers.29.post_attention_layernorm.weight.dtype": "model-00001.safetensors",
|
937 |
-
"model.layers.30.self_attn.q_proj.weight.gptq_8_bit_weight_packed.dtype": "model-00001.safetensors",
|
938 |
-
"model.layers.30.self_attn.q_proj.weight.gptq_8_bit_scale.dtype": "model-00001.safetensors",
|
939 |
-
"model.layers.30.self_attn.k_proj.weight.gptq_8_bit_weight_packed.dtype": "model-00001.safetensors",
|
940 |
-
"model.layers.30.self_attn.k_proj.weight.gptq_8_bit_scale.dtype": "model-00001.safetensors",
|
941 |
-
"model.layers.30.self_attn.v_proj.weight.gptq_8_bit_weight_packed.dtype": "model-00001.safetensors",
|
942 |
-
"model.layers.30.self_attn.v_proj.weight.gptq_8_bit_scale.dtype": "model-00001.safetensors",
|
943 |
-
"model.layers.30.self_attn.o_proj.weight.gptq_8_bit_weight_packed.dtype": "model-00001.safetensors",
|
944 |
-
"model.layers.30.self_attn.o_proj.weight.gptq_8_bit_scale.dtype": "model-00001.safetensors",
|
945 |
-
"model.layers.30.mlp.gate_proj.weight.gptq_8_bit_weight_packed.dtype": "model-00001.safetensors",
|
946 |
-
"model.layers.30.mlp.gate_proj.weight.gptq_8_bit_scale.dtype": "model-00001.safetensors",
|
947 |
-
"model.layers.30.mlp.up_proj.weight.gptq_8_bit_weight_packed.dtype": "model-00001.safetensors",
|
948 |
-
"model.layers.30.mlp.up_proj.weight.gptq_8_bit_scale.dtype": "model-00001.safetensors",
|
949 |
-
"model.layers.30.mlp.down_proj.weight.gptq_8_bit_weight_packed.dtype": "model-00001.safetensors",
|
950 |
-
"model.layers.30.mlp.down_proj.weight.gptq_8_bit_scale.dtype": "model-00001.safetensors",
|
951 |
"model.layers.30.input_layernorm.weight.dtype": "model-00001.safetensors",
|
952 |
"model.layers.30.post_attention_layernorm.weight.dtype": "model-00001.safetensors",
|
953 |
"model.layers.31.self_attn.q_proj.weight.gptq_8_bit_weight_packed.dtype": "model-00001.safetensors",
|
@@ -1392,6 +1392,20 @@
|
|
1392 |
"model.layers.29.mlp.up_proj.weight.gptq_4_bit_scale.shape": "model-00001.safetensors",
|
1393 |
"model.layers.29.mlp.down_proj.weight.gptq_4_bit_weight_packed.shape": "model-00001.safetensors",
|
1394 |
"model.layers.29.mlp.down_proj.weight.gptq_4_bit_scale.shape": "model-00001.safetensors",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1395 |
"model.layers.0.input_layernorm.weight.shape": "model-00001.safetensors",
|
1396 |
"model.layers.0.post_attention_layernorm.weight.shape": "model-00001.safetensors",
|
1397 |
"model.layers.1.input_layernorm.weight.shape": "model-00001.safetensors",
|
@@ -1452,20 +1466,6 @@
|
|
1452 |
"model.layers.28.post_attention_layernorm.weight.shape": "model-00001.safetensors",
|
1453 |
"model.layers.29.input_layernorm.weight.shape": "model-00001.safetensors",
|
1454 |
"model.layers.29.post_attention_layernorm.weight.shape": "model-00001.safetensors",
|
1455 |
-
"model.layers.30.self_attn.q_proj.weight.gptq_8_bit_weight_packed.shape": "model-00001.safetensors",
|
1456 |
-
"model.layers.30.self_attn.q_proj.weight.gptq_8_bit_scale.shape": "model-00001.safetensors",
|
1457 |
-
"model.layers.30.self_attn.k_proj.weight.gptq_8_bit_weight_packed.shape": "model-00001.safetensors",
|
1458 |
-
"model.layers.30.self_attn.k_proj.weight.gptq_8_bit_scale.shape": "model-00001.safetensors",
|
1459 |
-
"model.layers.30.self_attn.v_proj.weight.gptq_8_bit_weight_packed.shape": "model-00001.safetensors",
|
1460 |
-
"model.layers.30.self_attn.v_proj.weight.gptq_8_bit_scale.shape": "model-00001.safetensors",
|
1461 |
-
"model.layers.30.self_attn.o_proj.weight.gptq_8_bit_weight_packed.shape": "model-00001.safetensors",
|
1462 |
-
"model.layers.30.self_attn.o_proj.weight.gptq_8_bit_scale.shape": "model-00001.safetensors",
|
1463 |
-
"model.layers.30.mlp.gate_proj.weight.gptq_8_bit_weight_packed.shape": "model-00001.safetensors",
|
1464 |
-
"model.layers.30.mlp.gate_proj.weight.gptq_8_bit_scale.shape": "model-00001.safetensors",
|
1465 |
-
"model.layers.30.mlp.up_proj.weight.gptq_8_bit_weight_packed.shape": "model-00001.safetensors",
|
1466 |
-
"model.layers.30.mlp.up_proj.weight.gptq_8_bit_scale.shape": "model-00001.safetensors",
|
1467 |
-
"model.layers.30.mlp.down_proj.weight.gptq_8_bit_weight_packed.shape": "model-00001.safetensors",
|
1468 |
-
"model.layers.30.mlp.down_proj.weight.gptq_8_bit_scale.shape": "model-00001.safetensors",
|
1469 |
"model.layers.30.input_layernorm.weight.shape": "model-00001.safetensors",
|
1470 |
"model.layers.30.post_attention_layernorm.weight.shape": "model-00001.safetensors",
|
1471 |
"model.layers.31.self_attn.q_proj.weight.gptq_8_bit_weight_packed.shape": "model-00001.safetensors",
|
@@ -1913,6 +1913,20 @@
|
|
1913 |
"model.layers.29.mlp.up_proj.weight.gptq_4_bit_scale.compressed": "model-00002.safetensors",
|
1914 |
"model.layers.29.mlp.down_proj.weight.gptq_4_bit_weight_packed.compressed": "model-00002.safetensors",
|
1915 |
"model.layers.29.mlp.down_proj.weight.gptq_4_bit_scale.compressed": "model-00002.safetensors",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1916 |
"model.layers.0.input_layernorm.weight.compressed": "model-00002.safetensors",
|
1917 |
"model.layers.0.post_attention_layernorm.weight.compressed": "model-00002.safetensors",
|
1918 |
"model.layers.1.input_layernorm.weight.compressed": "model-00002.safetensors",
|
@@ -1973,20 +1987,6 @@
|
|
1973 |
"model.layers.28.post_attention_layernorm.weight.compressed": "model-00002.safetensors",
|
1974 |
"model.layers.29.input_layernorm.weight.compressed": "model-00002.safetensors",
|
1975 |
"model.layers.29.post_attention_layernorm.weight.compressed": "model-00002.safetensors",
|
1976 |
-
"model.layers.30.self_attn.q_proj.weight.gptq_8_bit_weight_packed.compressed": "model-00002.safetensors",
|
1977 |
-
"model.layers.30.self_attn.q_proj.weight.gptq_8_bit_scale.compressed": "model-00002.safetensors",
|
1978 |
-
"model.layers.30.self_attn.k_proj.weight.gptq_8_bit_weight_packed.compressed": "model-00002.safetensors",
|
1979 |
-
"model.layers.30.self_attn.k_proj.weight.gptq_8_bit_scale.compressed": "model-00002.safetensors",
|
1980 |
-
"model.layers.30.self_attn.v_proj.weight.gptq_8_bit_weight_packed.compressed": "model-00002.safetensors",
|
1981 |
-
"model.layers.30.self_attn.v_proj.weight.gptq_8_bit_scale.compressed": "model-00002.safetensors",
|
1982 |
-
"model.layers.30.self_attn.o_proj.weight.gptq_8_bit_weight_packed.compressed": "model-00002.safetensors",
|
1983 |
-
"model.layers.30.self_attn.o_proj.weight.gptq_8_bit_scale.compressed": "model-00002.safetensors",
|
1984 |
-
"model.layers.30.mlp.gate_proj.weight.gptq_8_bit_weight_packed.compressed": "model-00002.safetensors",
|
1985 |
-
"model.layers.30.mlp.gate_proj.weight.gptq_8_bit_scale.compressed": "model-00002.safetensors",
|
1986 |
-
"model.layers.30.mlp.up_proj.weight.gptq_8_bit_weight_packed.compressed": "model-00002.safetensors",
|
1987 |
-
"model.layers.30.mlp.up_proj.weight.gptq_8_bit_scale.compressed": "model-00002.safetensors",
|
1988 |
-
"model.layers.30.mlp.down_proj.weight.gptq_8_bit_weight_packed.compressed": "model-00002.safetensors",
|
1989 |
-
"model.layers.30.mlp.down_proj.weight.gptq_8_bit_scale.compressed": "model-00002.safetensors",
|
1990 |
"model.layers.30.input_layernorm.weight.compressed": "model-00002.safetensors",
|
1991 |
"model.layers.30.post_attention_layernorm.weight.compressed": "model-00002.safetensors",
|
1992 |
"model.layers.31.self_attn.q_proj.weight.gptq_8_bit_weight_packed.compressed": "model-00002.safetensors",
|
|
|
1 |
{
|
2 |
"metadata": {
|
3 |
+
"total_size": 3569631824
|
4 |
},
|
5 |
"weight_map": {
|
6 |
"model.layers.0.self_attn.q_proj.weight.shape": "model-00001.safetensors",
|
|
|
873 |
"model.layers.29.mlp.up_proj.weight.gptq_4_bit_scale.dtype": "model-00001.safetensors",
|
874 |
"model.layers.29.mlp.down_proj.weight.gptq_4_bit_weight_packed.dtype": "model-00001.safetensors",
|
875 |
"model.layers.29.mlp.down_proj.weight.gptq_4_bit_scale.dtype": "model-00001.safetensors",
|
876 |
+
"model.layers.30.self_attn.q_proj.weight.gptq_4_bit_weight_packed.dtype": "model-00001.safetensors",
|
877 |
+
"model.layers.30.self_attn.q_proj.weight.gptq_4_bit_scale.dtype": "model-00001.safetensors",
|
878 |
+
"model.layers.30.self_attn.k_proj.weight.gptq_4_bit_weight_packed.dtype": "model-00001.safetensors",
|
879 |
+
"model.layers.30.self_attn.k_proj.weight.gptq_4_bit_scale.dtype": "model-00001.safetensors",
|
880 |
+
"model.layers.30.self_attn.v_proj.weight.gptq_4_bit_weight_packed.dtype": "model-00001.safetensors",
|
881 |
+
"model.layers.30.self_attn.v_proj.weight.gptq_4_bit_scale.dtype": "model-00001.safetensors",
|
882 |
+
"model.layers.30.self_attn.o_proj.weight.gptq_4_bit_weight_packed.dtype": "model-00001.safetensors",
|
883 |
+
"model.layers.30.self_attn.o_proj.weight.gptq_4_bit_scale.dtype": "model-00001.safetensors",
|
884 |
+
"model.layers.30.mlp.gate_proj.weight.gptq_4_bit_weight_packed.dtype": "model-00001.safetensors",
|
885 |
+
"model.layers.30.mlp.gate_proj.weight.gptq_4_bit_scale.dtype": "model-00001.safetensors",
|
886 |
+
"model.layers.30.mlp.up_proj.weight.gptq_4_bit_weight_packed.dtype": "model-00001.safetensors",
|
887 |
+
"model.layers.30.mlp.up_proj.weight.gptq_4_bit_scale.dtype": "model-00001.safetensors",
|
888 |
+
"model.layers.30.mlp.down_proj.weight.gptq_4_bit_weight_packed.dtype": "model-00001.safetensors",
|
889 |
+
"model.layers.30.mlp.down_proj.weight.gptq_4_bit_scale.dtype": "model-00001.safetensors",
|
890 |
"model.embed_tokens.weight.dtype": "model-00001.safetensors",
|
891 |
"model.layers.0.input_layernorm.weight.dtype": "model-00001.safetensors",
|
892 |
"model.layers.0.post_attention_layernorm.weight.dtype": "model-00001.safetensors",
|
|
|
948 |
"model.layers.28.post_attention_layernorm.weight.dtype": "model-00001.safetensors",
|
949 |
"model.layers.29.input_layernorm.weight.dtype": "model-00001.safetensors",
|
950 |
"model.layers.29.post_attention_layernorm.weight.dtype": "model-00001.safetensors",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
951 |
"model.layers.30.input_layernorm.weight.dtype": "model-00001.safetensors",
|
952 |
"model.layers.30.post_attention_layernorm.weight.dtype": "model-00001.safetensors",
|
953 |
"model.layers.31.self_attn.q_proj.weight.gptq_8_bit_weight_packed.dtype": "model-00001.safetensors",
|
|
|
1392 |
"model.layers.29.mlp.up_proj.weight.gptq_4_bit_scale.shape": "model-00001.safetensors",
|
1393 |
"model.layers.29.mlp.down_proj.weight.gptq_4_bit_weight_packed.shape": "model-00001.safetensors",
|
1394 |
"model.layers.29.mlp.down_proj.weight.gptq_4_bit_scale.shape": "model-00001.safetensors",
|
1395 |
+
"model.layers.30.self_attn.q_proj.weight.gptq_4_bit_weight_packed.shape": "model-00001.safetensors",
|
1396 |
+
"model.layers.30.self_attn.q_proj.weight.gptq_4_bit_scale.shape": "model-00001.safetensors",
|
1397 |
+
"model.layers.30.self_attn.k_proj.weight.gptq_4_bit_weight_packed.shape": "model-00001.safetensors",
|
1398 |
+
"model.layers.30.self_attn.k_proj.weight.gptq_4_bit_scale.shape": "model-00001.safetensors",
|
1399 |
+
"model.layers.30.self_attn.v_proj.weight.gptq_4_bit_weight_packed.shape": "model-00001.safetensors",
|
1400 |
+
"model.layers.30.self_attn.v_proj.weight.gptq_4_bit_scale.shape": "model-00001.safetensors",
|
1401 |
+
"model.layers.30.self_attn.o_proj.weight.gptq_4_bit_weight_packed.shape": "model-00001.safetensors",
|
1402 |
+
"model.layers.30.self_attn.o_proj.weight.gptq_4_bit_scale.shape": "model-00001.safetensors",
|
1403 |
+
"model.layers.30.mlp.gate_proj.weight.gptq_4_bit_weight_packed.shape": "model-00001.safetensors",
|
1404 |
+
"model.layers.30.mlp.gate_proj.weight.gptq_4_bit_scale.shape": "model-00001.safetensors",
|
1405 |
+
"model.layers.30.mlp.up_proj.weight.gptq_4_bit_weight_packed.shape": "model-00001.safetensors",
|
1406 |
+
"model.layers.30.mlp.up_proj.weight.gptq_4_bit_scale.shape": "model-00001.safetensors",
|
1407 |
+
"model.layers.30.mlp.down_proj.weight.gptq_4_bit_weight_packed.shape": "model-00001.safetensors",
|
1408 |
+
"model.layers.30.mlp.down_proj.weight.gptq_4_bit_scale.shape": "model-00001.safetensors",
|
1409 |
"model.layers.0.input_layernorm.weight.shape": "model-00001.safetensors",
|
1410 |
"model.layers.0.post_attention_layernorm.weight.shape": "model-00001.safetensors",
|
1411 |
"model.layers.1.input_layernorm.weight.shape": "model-00001.safetensors",
|
|
|
1466 |
"model.layers.28.post_attention_layernorm.weight.shape": "model-00001.safetensors",
|
1467 |
"model.layers.29.input_layernorm.weight.shape": "model-00001.safetensors",
|
1468 |
"model.layers.29.post_attention_layernorm.weight.shape": "model-00001.safetensors",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1469 |
"model.layers.30.input_layernorm.weight.shape": "model-00001.safetensors",
|
1470 |
"model.layers.30.post_attention_layernorm.weight.shape": "model-00001.safetensors",
|
1471 |
"model.layers.31.self_attn.q_proj.weight.gptq_8_bit_weight_packed.shape": "model-00001.safetensors",
|
|
|
1913 |
"model.layers.29.mlp.up_proj.weight.gptq_4_bit_scale.compressed": "model-00002.safetensors",
|
1914 |
"model.layers.29.mlp.down_proj.weight.gptq_4_bit_weight_packed.compressed": "model-00002.safetensors",
|
1915 |
"model.layers.29.mlp.down_proj.weight.gptq_4_bit_scale.compressed": "model-00002.safetensors",
|
1916 |
+
"model.layers.30.self_attn.q_proj.weight.gptq_4_bit_weight_packed.compressed": "model-00002.safetensors",
|
1917 |
+
"model.layers.30.self_attn.q_proj.weight.gptq_4_bit_scale.compressed": "model-00002.safetensors",
|
1918 |
+
"model.layers.30.self_attn.k_proj.weight.gptq_4_bit_weight_packed.compressed": "model-00002.safetensors",
|
1919 |
+
"model.layers.30.self_attn.k_proj.weight.gptq_4_bit_scale.compressed": "model-00002.safetensors",
|
1920 |
+
"model.layers.30.self_attn.v_proj.weight.gptq_4_bit_weight_packed.compressed": "model-00002.safetensors",
|
1921 |
+
"model.layers.30.self_attn.v_proj.weight.gptq_4_bit_scale.compressed": "model-00002.safetensors",
|
1922 |
+
"model.layers.30.self_attn.o_proj.weight.gptq_4_bit_weight_packed.compressed": "model-00002.safetensors",
|
1923 |
+
"model.layers.30.self_attn.o_proj.weight.gptq_4_bit_scale.compressed": "model-00002.safetensors",
|
1924 |
+
"model.layers.30.mlp.gate_proj.weight.gptq_4_bit_weight_packed.compressed": "model-00002.safetensors",
|
1925 |
+
"model.layers.30.mlp.gate_proj.weight.gptq_4_bit_scale.compressed": "model-00002.safetensors",
|
1926 |
+
"model.layers.30.mlp.up_proj.weight.gptq_4_bit_weight_packed.compressed": "model-00002.safetensors",
|
1927 |
+
"model.layers.30.mlp.up_proj.weight.gptq_4_bit_scale.compressed": "model-00002.safetensors",
|
1928 |
+
"model.layers.30.mlp.down_proj.weight.gptq_4_bit_weight_packed.compressed": "model-00002.safetensors",
|
1929 |
+
"model.layers.30.mlp.down_proj.weight.gptq_4_bit_scale.compressed": "model-00002.safetensors",
|
1930 |
"model.layers.0.input_layernorm.weight.compressed": "model-00002.safetensors",
|
1931 |
"model.layers.0.post_attention_layernorm.weight.compressed": "model-00002.safetensors",
|
1932 |
"model.layers.1.input_layernorm.weight.compressed": "model-00002.safetensors",
|
|
|
1987 |
"model.layers.28.post_attention_layernorm.weight.compressed": "model-00002.safetensors",
|
1988 |
"model.layers.29.input_layernorm.weight.compressed": "model-00002.safetensors",
|
1989 |
"model.layers.29.post_attention_layernorm.weight.compressed": "model-00002.safetensors",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1990 |
"model.layers.30.input_layernorm.weight.compressed": "model-00002.safetensors",
|
1991 |
"model.layers.30.post_attention_layernorm.weight.compressed": "model-00002.safetensors",
|
1992 |
"model.layers.31.self_attn.q_proj.weight.gptq_8_bit_weight_packed.compressed": "model-00002.safetensors",
|
special_tokens_map.json
CHANGED
@@ -12,6 +12,5 @@
|
|
12 |
"normalized": false,
|
13 |
"rstrip": false,
|
14 |
"single_word": false
|
15 |
-
}
|
16 |
-
"pad_token": "<|end_of_text|>"
|
17 |
}
|
|
|
12 |
"normalized": false,
|
13 |
"rstrip": false,
|
14 |
"single_word": false
|
15 |
+
}
|
|
|
16 |
}
|
tokenizer.json
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5916f74b4ae404700bdca21094462c34ad42a61c06e79dee8f203b34b6e9bf31
|
3 |
+
size 8803976
|
tokenizer_config.json
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
{
|
2 |
"added_tokens_decoder": {
|
3 |
-
"
|
4 |
"content": "<|begin_of_text|>",
|
5 |
"lstrip": false,
|
6 |
"normalized": false,
|
@@ -8,7 +8,7 @@
|
|
8 |
"single_word": false,
|
9 |
"special": true
|
10 |
},
|
11 |
-
"
|
12 |
"content": "<|end_of_text|>",
|
13 |
"lstrip": false,
|
14 |
"normalized": false,
|
@@ -25,6 +25,5 @@
|
|
25 |
"attention_mask"
|
26 |
],
|
27 |
"model_max_length": 131072,
|
28 |
-
"pad_token": "<|end_of_text|>",
|
29 |
"tokenizer_class": "PreTrainedTokenizerFast"
|
30 |
}
|
|
|
1 |
{
|
2 |
"added_tokens_decoder": {
|
3 |
+
"65538": {
|
4 |
"content": "<|begin_of_text|>",
|
5 |
"lstrip": false,
|
6 |
"normalized": false,
|
|
|
8 |
"single_word": false,
|
9 |
"special": true
|
10 |
},
|
11 |
+
"65539": {
|
12 |
"content": "<|end_of_text|>",
|
13 |
"lstrip": false,
|
14 |
"normalized": false,
|
|
|
25 |
"attention_mask"
|
26 |
],
|
27 |
"model_max_length": 131072,
|
|
|
28 |
"tokenizer_class": "PreTrainedTokenizerFast"
|
29 |
}
|