vxbrandon commited on
Commit
831e0df
1 Parent(s): 03bfd9b

Training in progress, step 500

Browse files
README.md ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ base_model: mistralai/Mistral-7B-v0.1
4
+ tags:
5
+ - generated_from_trainer
6
+ model-index:
7
+ - name: Mistral_Sparse_refined_web_70p_2024-02-27
8
+ results: []
9
+ ---
10
+
11
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
12
+ should probably proofread and complete it, then remove this comment. -->
13
+
14
+ # Mistral_Sparse_refined_web_70p_2024-02-27
15
+
16
+ This model is a fine-tuned version of [mistralai/Mistral-7B-v0.1](https://huggingface.co/mistralai/Mistral-7B-v0.1) on the None dataset.
17
+ It achieves the following results on the evaluation set:
18
+ - Loss: 2.2339
19
+
20
+ ## Model description
21
+
22
+ More information needed
23
+
24
+ ## Intended uses & limitations
25
+
26
+ More information needed
27
+
28
+ ## Training and evaluation data
29
+
30
+ More information needed
31
+
32
+ ## Training procedure
33
+
34
+ ### Training hyperparameters
35
+
36
+ The following hyperparameters were used during training:
37
+ - learning_rate: 1e-05
38
+ - train_batch_size: 1
39
+ - eval_batch_size: 1
40
+ - seed: 0
41
+ - distributed_type: multi-GPU
42
+ - num_devices: 4
43
+ - gradient_accumulation_steps: 4
44
+ - total_train_batch_size: 16
45
+ - total_eval_batch_size: 4
46
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
47
+ - lr_scheduler_type: linear
48
+ - training_steps: 250
49
+
50
+ ### Training results
51
+
52
+ | Training Loss | Epoch | Step | Validation Loss |
53
+ |:-------------:|:-----:|:----:|:---------------:|
54
+ | 2.7195 | 0.0 | 25 | 2.8171 |
55
+ | 2.4252 | 0.01 | 50 | 2.6962 |
56
+ | 2.4132 | 0.01 | 75 | 2.6180 |
57
+ | 2.3614 | 0.02 | 100 | 2.5748 |
58
+ | 2.3289 | 0.02 | 125 | 2.5433 |
59
+ | 2.4043 | 0.02 | 150 | 2.5219 |
60
+ | 2.3267 | 0.03 | 175 | 2.5078 |
61
+ | 2.3059 | 0.03 | 200 | 2.4980 |
62
+ | 2.3307 | 0.04 | 225 | 2.4895 |
63
+ | 2.3628 | 0.04 | 250 | 2.4807 |
64
+
65
+
66
+ ### Framework versions
67
+
68
+ - Transformers 4.36.2
69
+ - Pytorch 2.1.2+cu121
70
+ - Datasets 2.15.0
71
+ - Tokenizers 0.15.0
adapter_config.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "mistralai/Mistral-7B-v0.1",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layers_pattern": null,
10
+ "layers_to_transform": null,
11
+ "loftq_config": {},
12
+ "lora_alpha": 16,
13
+ "lora_dropout": 0.1,
14
+ "megatron_config": null,
15
+ "megatron_core": "megatron.core",
16
+ "modules_to_save": null,
17
+ "peft_type": "LORA",
18
+ "r": 64,
19
+ "rank_pattern": {},
20
+ "revision": null,
21
+ "target_modules": [
22
+ "down_proj",
23
+ "gate_proj",
24
+ "up_proj",
25
+ "q_proj",
26
+ "v_proj"
27
+ ],
28
+ "task_type": "CAUSAL_LM"
29
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aca4b774005d18574ff9e80a4c22eee908c37be7ae801134e8b2484ca8afbd54
3
+ size 281061608
config.json ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "mistralai/Mistral-7B-v0.1",
3
+ "architectures": [
4
+ "SparseMistralforCausalLM"
5
+ ],
6
+ "attention_dropout": 0.0,
7
+ "auto_map": {
8
+ "AutoConfig": "sparsification_sftt.SparseMistralConfig",
9
+ "AutoModelForCausalLM": "sparsification_sftt.SparseMistralforCausalLM"
10
+ },
11
+ "bos_token_id": 1,
12
+ "eos_token_id": 2,
13
+ "hidden_act": "silu",
14
+ "hidden_size": 4096,
15
+ "initializer_range": 0.02,
16
+ "intermediate_size": 14336,
17
+ "max_position_embeddings": 32768,
18
+ "model_type": "sparse_mistral",
19
+ "num_attention_heads": 32,
20
+ "num_hidden_layers": 32,
21
+ "num_key_value_heads": 8,
22
+ "rms_norm_eps": 1e-05,
23
+ "rope_theta": 10000.0,
24
+ "sliding_window": 4096,
25
+ "thresholds": [
26
+ 0.027081236243247986,
27
+ 0.037111327052116394,
28
+ 0.07923770695924759,
29
+ 0.07923770695924759,
30
+ 0.09327983111143112,
31
+ 0.10732196271419525,
32
+ 0.11133399605751038,
33
+ 0.11735205352306366,
34
+ 0.12136408686637878,
35
+ 0.12136408686637878,
36
+ 0.13340020179748535,
37
+ 0.13540621101856232,
38
+ 0.14543630182743073,
39
+ 0.14744232594966888,
40
+ 0.15546639263629913,
41
+ 0.1735205501317978,
42
+ 0.19558675587177277,
43
+ 0.21765294671058655,
44
+ 0.22968906164169312,
45
+ 0.24373118579387665,
46
+ 0.24774321913719177,
47
+ 0.2577733099460602,
48
+ 0.2577733099460602,
49
+ 0.26379138231277466,
50
+ 0.26379138231277466,
51
+ 0.26379138231277466,
52
+ 0.26379138231277466,
53
+ 0.26579737663269043,
54
+ 0.26579737663269043,
55
+ 0.26980942487716675,
56
+ 0.2838515639305115,
57
+ 0.5145436525344849
58
+ ],
59
+ "tie_word_embeddings": false,
60
+ "torch_dtype": "bfloat16",
61
+ "transformers_version": "4.36.2",
62
+ "use_cache": false,
63
+ "use_relu": false,
64
+ "use_sparse_model": true,
65
+ "use_sparse_predictor": false,
66
+ "use_sparse_regularization": false,
67
+ "vocab_size": 32000
68
+ }
generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "eos_token_id": 2,
5
+ "transformers_version": "4.36.2"
6
+ }
model-00001-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e0392d4524168100070adc76133b2c83d49b7a1c611d70b3eb3ea880bd78e564
3
+ size 4943163992
model-00002-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3ebac2d36e99aae45dd4f083ce112fbcdca58d4cb0bbff6d8235fad84dcf6c0d
3
+ size 4999821144
model-00003-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:be6d87ea341714a53d7a78167465e28add7c2f4deb659f6935d6ce999594d638
3
+ size 4540517840
model.safetensors.index.json ADDED
@@ -0,0 +1,298 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_size": 14483464192
4
+ },
5
+ "weight_map": {
6
+ "base_model.model.lm_head.weight": "model-00003-of-00003.safetensors",
7
+ "base_model.model.model.embed_tokens.weight": "model-00001-of-00003.safetensors",
8
+ "base_model.model.model.layers.0.input_layernorm.weight": "model-00001-of-00003.safetensors",
9
+ "base_model.model.model.layers.0.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
10
+ "base_model.model.model.layers.0.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
11
+ "base_model.model.model.layers.0.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
12
+ "base_model.model.model.layers.0.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
13
+ "base_model.model.model.layers.0.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
14
+ "base_model.model.model.layers.0.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
15
+ "base_model.model.model.layers.0.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
16
+ "base_model.model.model.layers.0.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
17
+ "base_model.model.model.layers.1.input_layernorm.weight": "model-00001-of-00003.safetensors",
18
+ "base_model.model.model.layers.1.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
19
+ "base_model.model.model.layers.1.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
20
+ "base_model.model.model.layers.1.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
21
+ "base_model.model.model.layers.1.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
22
+ "base_model.model.model.layers.1.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
23
+ "base_model.model.model.layers.1.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
24
+ "base_model.model.model.layers.1.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
25
+ "base_model.model.model.layers.1.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
26
+ "base_model.model.model.layers.10.input_layernorm.weight": "model-00002-of-00003.safetensors",
27
+ "base_model.model.model.layers.10.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
28
+ "base_model.model.model.layers.10.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
29
+ "base_model.model.model.layers.10.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
30
+ "base_model.model.model.layers.10.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
31
+ "base_model.model.model.layers.10.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
32
+ "base_model.model.model.layers.10.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
33
+ "base_model.model.model.layers.10.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
34
+ "base_model.model.model.layers.10.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
35
+ "base_model.model.model.layers.11.input_layernorm.weight": "model-00002-of-00003.safetensors",
36
+ "base_model.model.model.layers.11.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
37
+ "base_model.model.model.layers.11.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
38
+ "base_model.model.model.layers.11.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
39
+ "base_model.model.model.layers.11.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
40
+ "base_model.model.model.layers.11.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
41
+ "base_model.model.model.layers.11.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
42
+ "base_model.model.model.layers.11.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
43
+ "base_model.model.model.layers.11.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
44
+ "base_model.model.model.layers.12.input_layernorm.weight": "model-00002-of-00003.safetensors",
45
+ "base_model.model.model.layers.12.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
46
+ "base_model.model.model.layers.12.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
47
+ "base_model.model.model.layers.12.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
48
+ "base_model.model.model.layers.12.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
49
+ "base_model.model.model.layers.12.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
50
+ "base_model.model.model.layers.12.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
51
+ "base_model.model.model.layers.12.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
52
+ "base_model.model.model.layers.12.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
53
+ "base_model.model.model.layers.13.input_layernorm.weight": "model-00002-of-00003.safetensors",
54
+ "base_model.model.model.layers.13.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
55
+ "base_model.model.model.layers.13.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
56
+ "base_model.model.model.layers.13.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
57
+ "base_model.model.model.layers.13.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
58
+ "base_model.model.model.layers.13.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
59
+ "base_model.model.model.layers.13.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
60
+ "base_model.model.model.layers.13.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
61
+ "base_model.model.model.layers.13.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
62
+ "base_model.model.model.layers.14.input_layernorm.weight": "model-00002-of-00003.safetensors",
63
+ "base_model.model.model.layers.14.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
64
+ "base_model.model.model.layers.14.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
65
+ "base_model.model.model.layers.14.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
66
+ "base_model.model.model.layers.14.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
67
+ "base_model.model.model.layers.14.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
68
+ "base_model.model.model.layers.14.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
69
+ "base_model.model.model.layers.14.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
70
+ "base_model.model.model.layers.14.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
71
+ "base_model.model.model.layers.15.input_layernorm.weight": "model-00002-of-00003.safetensors",
72
+ "base_model.model.model.layers.15.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
73
+ "base_model.model.model.layers.15.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
74
+ "base_model.model.model.layers.15.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
75
+ "base_model.model.model.layers.15.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
76
+ "base_model.model.model.layers.15.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
77
+ "base_model.model.model.layers.15.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
78
+ "base_model.model.model.layers.15.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
79
+ "base_model.model.model.layers.15.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
80
+ "base_model.model.model.layers.16.input_layernorm.weight": "model-00002-of-00003.safetensors",
81
+ "base_model.model.model.layers.16.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
82
+ "base_model.model.model.layers.16.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
83
+ "base_model.model.model.layers.16.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
84
+ "base_model.model.model.layers.16.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
85
+ "base_model.model.model.layers.16.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
86
+ "base_model.model.model.layers.16.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
87
+ "base_model.model.model.layers.16.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
88
+ "base_model.model.model.layers.16.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
89
+ "base_model.model.model.layers.17.input_layernorm.weight": "model-00002-of-00003.safetensors",
90
+ "base_model.model.model.layers.17.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
91
+ "base_model.model.model.layers.17.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
92
+ "base_model.model.model.layers.17.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
93
+ "base_model.model.model.layers.17.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
94
+ "base_model.model.model.layers.17.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
95
+ "base_model.model.model.layers.17.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
96
+ "base_model.model.model.layers.17.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
97
+ "base_model.model.model.layers.17.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
98
+ "base_model.model.model.layers.18.input_layernorm.weight": "model-00002-of-00003.safetensors",
99
+ "base_model.model.model.layers.18.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
100
+ "base_model.model.model.layers.18.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
101
+ "base_model.model.model.layers.18.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
102
+ "base_model.model.model.layers.18.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
103
+ "base_model.model.model.layers.18.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
104
+ "base_model.model.model.layers.18.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
105
+ "base_model.model.model.layers.18.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
106
+ "base_model.model.model.layers.18.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
107
+ "base_model.model.model.layers.19.input_layernorm.weight": "model-00002-of-00003.safetensors",
108
+ "base_model.model.model.layers.19.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
109
+ "base_model.model.model.layers.19.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
110
+ "base_model.model.model.layers.19.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
111
+ "base_model.model.model.layers.19.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
112
+ "base_model.model.model.layers.19.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
113
+ "base_model.model.model.layers.19.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
114
+ "base_model.model.model.layers.19.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
115
+ "base_model.model.model.layers.19.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
116
+ "base_model.model.model.layers.2.input_layernorm.weight": "model-00001-of-00003.safetensors",
117
+ "base_model.model.model.layers.2.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
118
+ "base_model.model.model.layers.2.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
119
+ "base_model.model.model.layers.2.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
120
+ "base_model.model.model.layers.2.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
121
+ "base_model.model.model.layers.2.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
122
+ "base_model.model.model.layers.2.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
123
+ "base_model.model.model.layers.2.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
124
+ "base_model.model.model.layers.2.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
125
+ "base_model.model.model.layers.20.input_layernorm.weight": "model-00002-of-00003.safetensors",
126
+ "base_model.model.model.layers.20.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
127
+ "base_model.model.model.layers.20.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
128
+ "base_model.model.model.layers.20.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
129
+ "base_model.model.model.layers.20.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
130
+ "base_model.model.model.layers.20.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
131
+ "base_model.model.model.layers.20.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
132
+ "base_model.model.model.layers.20.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
133
+ "base_model.model.model.layers.20.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
134
+ "base_model.model.model.layers.21.input_layernorm.weight": "model-00002-of-00003.safetensors",
135
+ "base_model.model.model.layers.21.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
136
+ "base_model.model.model.layers.21.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
137
+ "base_model.model.model.layers.21.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
138
+ "base_model.model.model.layers.21.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
139
+ "base_model.model.model.layers.21.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
140
+ "base_model.model.model.layers.21.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
141
+ "base_model.model.model.layers.21.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
142
+ "base_model.model.model.layers.21.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
143
+ "base_model.model.model.layers.22.input_layernorm.weight": "model-00003-of-00003.safetensors",
144
+ "base_model.model.model.layers.22.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
145
+ "base_model.model.model.layers.22.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
146
+ "base_model.model.model.layers.22.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
147
+ "base_model.model.model.layers.22.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
148
+ "base_model.model.model.layers.22.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
149
+ "base_model.model.model.layers.22.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
150
+ "base_model.model.model.layers.22.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
151
+ "base_model.model.model.layers.22.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
152
+ "base_model.model.model.layers.23.input_layernorm.weight": "model-00003-of-00003.safetensors",
153
+ "base_model.model.model.layers.23.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
154
+ "base_model.model.model.layers.23.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
155
+ "base_model.model.model.layers.23.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
156
+ "base_model.model.model.layers.23.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
157
+ "base_model.model.model.layers.23.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
158
+ "base_model.model.model.layers.23.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
159
+ "base_model.model.model.layers.23.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
160
+ "base_model.model.model.layers.23.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
161
+ "base_model.model.model.layers.24.input_layernorm.weight": "model-00003-of-00003.safetensors",
162
+ "base_model.model.model.layers.24.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
163
+ "base_model.model.model.layers.24.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
164
+ "base_model.model.model.layers.24.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
165
+ "base_model.model.model.layers.24.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
166
+ "base_model.model.model.layers.24.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
167
+ "base_model.model.model.layers.24.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
168
+ "base_model.model.model.layers.24.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
169
+ "base_model.model.model.layers.24.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
170
+ "base_model.model.model.layers.25.input_layernorm.weight": "model-00003-of-00003.safetensors",
171
+ "base_model.model.model.layers.25.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
172
+ "base_model.model.model.layers.25.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
173
+ "base_model.model.model.layers.25.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
174
+ "base_model.model.model.layers.25.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
175
+ "base_model.model.model.layers.25.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
176
+ "base_model.model.model.layers.25.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
177
+ "base_model.model.model.layers.25.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
178
+ "base_model.model.model.layers.25.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
179
+ "base_model.model.model.layers.26.input_layernorm.weight": "model-00003-of-00003.safetensors",
180
+ "base_model.model.model.layers.26.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
181
+ "base_model.model.model.layers.26.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
182
+ "base_model.model.model.layers.26.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
183
+ "base_model.model.model.layers.26.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
184
+ "base_model.model.model.layers.26.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
185
+ "base_model.model.model.layers.26.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
186
+ "base_model.model.model.layers.26.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
187
+ "base_model.model.model.layers.26.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
188
+ "base_model.model.model.layers.27.input_layernorm.weight": "model-00003-of-00003.safetensors",
189
+ "base_model.model.model.layers.27.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
190
+ "base_model.model.model.layers.27.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
191
+ "base_model.model.model.layers.27.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
192
+ "base_model.model.model.layers.27.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
193
+ "base_model.model.model.layers.27.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
194
+ "base_model.model.model.layers.27.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
195
+ "base_model.model.model.layers.27.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
196
+ "base_model.model.model.layers.27.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
197
+ "base_model.model.model.layers.28.input_layernorm.weight": "model-00003-of-00003.safetensors",
198
+ "base_model.model.model.layers.28.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
199
+ "base_model.model.model.layers.28.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
200
+ "base_model.model.model.layers.28.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
201
+ "base_model.model.model.layers.28.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
202
+ "base_model.model.model.layers.28.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
203
+ "base_model.model.model.layers.28.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
204
+ "base_model.model.model.layers.28.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
205
+ "base_model.model.model.layers.28.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
206
+ "base_model.model.model.layers.29.input_layernorm.weight": "model-00003-of-00003.safetensors",
207
+ "base_model.model.model.layers.29.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
208
+ "base_model.model.model.layers.29.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
209
+ "base_model.model.model.layers.29.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
210
+ "base_model.model.model.layers.29.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
211
+ "base_model.model.model.layers.29.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
212
+ "base_model.model.model.layers.29.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
213
+ "base_model.model.model.layers.29.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
214
+ "base_model.model.model.layers.29.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
215
+ "base_model.model.model.layers.3.input_layernorm.weight": "model-00001-of-00003.safetensors",
216
+ "base_model.model.model.layers.3.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
217
+ "base_model.model.model.layers.3.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
218
+ "base_model.model.model.layers.3.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
219
+ "base_model.model.model.layers.3.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
220
+ "base_model.model.model.layers.3.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
221
+ "base_model.model.model.layers.3.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
222
+ "base_model.model.model.layers.3.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
223
+ "base_model.model.model.layers.3.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
224
+ "base_model.model.model.layers.30.input_layernorm.weight": "model-00003-of-00003.safetensors",
225
+ "base_model.model.model.layers.30.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
226
+ "base_model.model.model.layers.30.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
227
+ "base_model.model.model.layers.30.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
228
+ "base_model.model.model.layers.30.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
229
+ "base_model.model.model.layers.30.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
230
+ "base_model.model.model.layers.30.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
231
+ "base_model.model.model.layers.30.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
232
+ "base_model.model.model.layers.30.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
233
+ "base_model.model.model.layers.31.input_layernorm.weight": "model-00003-of-00003.safetensors",
234
+ "base_model.model.model.layers.31.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
235
+ "base_model.model.model.layers.31.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
236
+ "base_model.model.model.layers.31.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
237
+ "base_model.model.model.layers.31.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
238
+ "base_model.model.model.layers.31.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
239
+ "base_model.model.model.layers.31.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
240
+ "base_model.model.model.layers.31.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
241
+ "base_model.model.model.layers.31.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
242
+ "base_model.model.model.layers.4.input_layernorm.weight": "model-00001-of-00003.safetensors",
243
+ "base_model.model.model.layers.4.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
244
+ "base_model.model.model.layers.4.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
245
+ "base_model.model.model.layers.4.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
246
+ "base_model.model.model.layers.4.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
247
+ "base_model.model.model.layers.4.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
248
+ "base_model.model.model.layers.4.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
249
+ "base_model.model.model.layers.4.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
250
+ "base_model.model.model.layers.4.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
251
+ "base_model.model.model.layers.5.input_layernorm.weight": "model-00001-of-00003.safetensors",
252
+ "base_model.model.model.layers.5.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
253
+ "base_model.model.model.layers.5.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
254
+ "base_model.model.model.layers.5.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
255
+ "base_model.model.model.layers.5.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
256
+ "base_model.model.model.layers.5.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
257
+ "base_model.model.model.layers.5.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
258
+ "base_model.model.model.layers.5.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
259
+ "base_model.model.model.layers.5.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
260
+ "base_model.model.model.layers.6.input_layernorm.weight": "model-00001-of-00003.safetensors",
261
+ "base_model.model.model.layers.6.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
262
+ "base_model.model.model.layers.6.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
263
+ "base_model.model.model.layers.6.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
264
+ "base_model.model.model.layers.6.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
265
+ "base_model.model.model.layers.6.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
266
+ "base_model.model.model.layers.6.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
267
+ "base_model.model.model.layers.6.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
268
+ "base_model.model.model.layers.6.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
269
+ "base_model.model.model.layers.7.input_layernorm.weight": "model-00001-of-00003.safetensors",
270
+ "base_model.model.model.layers.7.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
271
+ "base_model.model.model.layers.7.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
272
+ "base_model.model.model.layers.7.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
273
+ "base_model.model.model.layers.7.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
274
+ "base_model.model.model.layers.7.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
275
+ "base_model.model.model.layers.7.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
276
+ "base_model.model.model.layers.7.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
277
+ "base_model.model.model.layers.7.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
278
+ "base_model.model.model.layers.8.input_layernorm.weight": "model-00001-of-00003.safetensors",
279
+ "base_model.model.model.layers.8.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
280
+ "base_model.model.model.layers.8.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
281
+ "base_model.model.model.layers.8.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
282
+ "base_model.model.model.layers.8.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
283
+ "base_model.model.model.layers.8.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
284
+ "base_model.model.model.layers.8.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
285
+ "base_model.model.model.layers.8.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
286
+ "base_model.model.model.layers.8.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
287
+ "base_model.model.model.layers.9.input_layernorm.weight": "model-00001-of-00003.safetensors",
288
+ "base_model.model.model.layers.9.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
289
+ "base_model.model.model.layers.9.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
290
+ "base_model.model.model.layers.9.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
291
+ "base_model.model.model.layers.9.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
292
+ "base_model.model.model.layers.9.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
293
+ "base_model.model.model.layers.9.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
294
+ "base_model.model.model.layers.9.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
295
+ "base_model.model.model.layers.9.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
296
+ "base_model.model.model.norm.weight": "model-00003-of-00003.safetensors"
297
+ }
298
+ }
sparsification_sftt.py ADDED
@@ -0,0 +1,962 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import TrainerCallback, Trainer
2
+ from trl import SFTTrainer, DataCollatorForCompletionOnlyLM
3
+ from peft import PeftModel
4
+ from datasets import Dataset
5
+ from transformers.utils import is_sagemaker_mp_enabled, is_sagemaker_dp_enabled
6
+ from typing import Any, Dict, Union, Optional, Tuple
7
+ from torch.nn import MSELoss
8
+
9
+ import warnings
10
+ import torch
11
+ import torch.nn as nn
12
+ import torch.nn.functional as F
13
+ import matplotlib.pyplot as plt
14
+ import numpy as np
15
+ import time
16
+ import os
17
+ import copy
18
+
19
+ from transformers.models.mistral.modeling_mistral import (
20
+ MistralMLP,
21
+ MistralAttention,
22
+ MistralModel,
23
+ MistralDecoderLayer,
24
+ MistralConfig,
25
+ MISTRAL_ATTENTION_CLASSES,
26
+ MistralRMSNorm,
27
+ MistralForCausalLM,
28
+ )
29
+ from experiments.models.sparse_mistral.svd_router import (
30
+ low_rank_approximation,
31
+ SparsePredictor,
32
+ )
33
+ from utils.utils import (
34
+ print_size_of_model,
35
+ is_running_deepspeed,
36
+ is_mainprocess,
37
+ get_datetime,
38
+ ds_print,
39
+ )
40
+
41
+
42
+ class SparseSFTTTrainer(SFTTrainer):
43
+ def __init__(self, *args, **kwargs):
44
+ self.regularization_coefficient = kwargs.pop("regularization_coefficient", 10)
45
+ self.use_sparse_regularization = kwargs.pop("use_sparse_regularization", False)
46
+ self.use_spm_loss = False
47
+ self.freeze_original_weights = False
48
+ self.regularization_type = kwargs.pop(
49
+ "regularization_type", "L1 positive activation"
50
+ )
51
+ assert self.regularization_type in [
52
+ "L2 activation",
53
+ "L1 positive activation",
54
+ ], f"Invalid regularization type: {self.regularization_type}"
55
+ self.sparse_layers = []
56
+ self.sparse_decoder_layers = []
57
+ super(SparseSFTTTrainer, self).__init__(*args, **kwargs)
58
+
59
+ def initialize_sparse_silu_layers(self, model):
60
+ self.sparse_layers = [
61
+ m for m in model.modules() if isinstance(m, MistralSparseSiluMLP)
62
+ ]
63
+
64
+ def initialize_sparse_decoder_layers(self, model):
65
+ self.sparse_decoder_layers = [
66
+ m for m in model.modules() if isinstance(m, SparseMistralDecoderLayer)
67
+ ]
68
+
69
+ def training_step(
70
+ self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]]
71
+ ) -> torch.Tensor:
72
+ """
73
+ Override the huggingface's training_step function to add a regularization term.
74
+ A regularization term is computed with intermediate values, which are freed after "backward()."
75
+ You need to set `retain_graph=True` inside `backward` function to keep the values.
76
+ """
77
+ model.train()
78
+ inputs = self._prepare_inputs(inputs)
79
+
80
+ with self.compute_loss_context_manager():
81
+ loss = self.compute_loss(model, inputs)
82
+
83
+ if self.args.n_gpu > 1:
84
+ loss = loss.mean() # mean() to average on multi-gpu parallel training
85
+ if not self.freeze_original_weights:
86
+ if loss is not None:
87
+ self.accelerator.backward(loss, retain_graph=False)
88
+
89
+ if self.use_sparse_regularization:
90
+ regularization_loss = self.compute_regularization(model)
91
+ if self.args.n_gpu > 1:
92
+ regularization_loss = regularization_loss.mean()
93
+ if regularization_loss is not None:
94
+ self.accelerator.backward(regularization_loss, retain_graph=True)
95
+ loss += regularization_loss
96
+
97
+ if self.use_spm_loss:
98
+ spm_loss = self.compute_spm_loss(model)
99
+ if self.args.n_gpu > 1:
100
+ spm_loss = spm_loss.mean()
101
+ if spm_loss is not None:
102
+ self.accelerator.backward(spm_loss, retain_graph=False)
103
+ loss += spm_loss
104
+
105
+ return loss.detach() / self.args.gradient_accumulation_steps
106
+
107
+ def compute_regularization(self, model):
108
+ """
109
+ Compute a sparse regularization loss for SiLU
110
+ """
111
+ loss = 0
112
+ if len(self.sparse_layers) == 0:
113
+ self.initialize_sparse_silu_layers(model)
114
+ num_layers = len(self.sparse_layers)
115
+
116
+ for module in self.sparse_layers:
117
+ if module.activation_norm is not None:
118
+ loss += module.activation_norm
119
+
120
+ loss /= num_layers
121
+ loss *= self.regularization_coefficient
122
+
123
+ if self.state.global_step % 20 == 0 and loss != 0:
124
+ print("Negative relularizer loss: ", loss.item())
125
+ return loss
126
+
127
+ def compute_spm_loss(self, model):
128
+ loss = 0
129
+ if len(self.sparse_decoder_layers) == 0:
130
+ self.initialize_sparse_decoder_layers(model)
131
+ for module in self.sparse_decoder_layers:
132
+ if module.distill_loss != None:
133
+ loss += module.distill_loss
134
+ if self.state.global_step % 20 == 0 and loss != 0:
135
+ print("Sparse Predictor Distillation loss: ", loss.item())
136
+ return loss
137
+
138
+ # def compute_loss(self, model, inputs, return_outputs=False):
139
+ # loss = super().compute_loss(model, inputs, return_outputs)
140
+ #
141
+ # if is_sagemaker_mp_enabled():
142
+ # import smdistributed.modelparallel.torch as smp
143
+ # @smp.step()
144
+ # def smp_forward_backward(model, inputs, gradient_accumulation_steps=1):
145
+ # outputs = model(**inputs)
146
+ # loss = outputs["loss"] if isinstance(outputs, dict) else outputs[0]
147
+ # loss /= gradient_accumulation_steps
148
+ # model.backward(loss)
149
+ # return loss
150
+ #
151
+ # loss_mb = smp_forward_backward(
152
+ # model, inputs, self.args.gradient_accumulation_steps
153
+ # )
154
+ # if self.use_sparse_regularization:
155
+ # return loss_mb.reduce_mean().detach().to(
156
+ # self.args.device
157
+ # ) + self.regularization_coefficient * self.compute_regularization(model)
158
+ # else:
159
+ # return loss_mb.reduce_mean().detach().to(self)
160
+ #
161
+ # if return_outputs:
162
+ # classification_loss, outputs = loss
163
+ # else:
164
+ # classification_loss = loss
165
+ #
166
+ # loss = classification_loss
167
+ # if self.use_sparse_regularization:
168
+ # regularization_loss = self.compute_regularization(model)
169
+ # loss += self.regularization_coefficient * regularization_loss
170
+ #
171
+ # return (loss, outputs) if return_outputs else loss
172
+
173
+
174
+ class SparseTrainer(Trainer):
175
+ def __init__(self, *args, **kwargs):
176
+ self.regularization_coefficient = kwargs.pop("regularization_coefficient", 10)
177
+ self.use_sparse_regularization = kwargs.pop("use_sparse_regularization", False)
178
+ self.use_spm_loss = False
179
+ self.freeze_original_weights = False
180
+ self.regularization_type = kwargs.pop(
181
+ "regularization_type", "L1 positive activation"
182
+ )
183
+ assert self.regularization_type in [
184
+ "L2 activation",
185
+ "L1 positive activation",
186
+ ], f"Invalid regularization type: {self.regularization_type}"
187
+ self.sparse_layers = []
188
+ self.sparse_decoder_layers = []
189
+ super(SparseTrainer, self).__init__(*args, **kwargs)
190
+
191
+ def initialize_sparse_silu_layers(self, model):
192
+ self.sparse_layers = [
193
+ m for m in model.modules() if isinstance(m, MistralSparseSiluMLP)
194
+ ]
195
+
196
+ def initialize_sparse_decoder_layers(self, model):
197
+ self.sparse_decoder_layers = [
198
+ m for m in model.modules() if isinstance(m, SparseMistralDecoderLayer)
199
+ ]
200
+
201
+ def training_step(
202
+ self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]]
203
+ ) -> torch.Tensor:
204
+ """
205
+ Override the huggingface's training_step function to add a regularization term.
206
+ A regularization term is computed with intermediate values, which are freed after "backward()."
207
+ You need to set `retain_graph=True` inside `backward` function to keep the values.
208
+ """
209
+ model.train()
210
+ inputs = self._prepare_inputs(inputs)
211
+
212
+ with self.compute_loss_context_manager():
213
+ loss = self.compute_loss(model, inputs)
214
+
215
+ if self.args.n_gpu > 1:
216
+ loss = loss.mean() # mean() to average on multi-gpu parallel training
217
+ if not self.freeze_original_weights:
218
+ if loss is not None:
219
+ self.accelerator.backward(loss, retain_graph=False)
220
+
221
+ if self.use_sparse_regularization:
222
+ regularization_loss = self.compute_regularization(model)
223
+ if self.args.n_gpu > 1:
224
+ regularization_loss = regularization_loss.mean()
225
+ if regularization_loss is not None:
226
+ self.accelerator.backward(regularization_loss, retain_graph=True)
227
+ loss += regularization_loss
228
+
229
+ if self.use_spm_loss:
230
+ spm_loss = self.compute_spm_loss(model)
231
+ if self.args.n_gpu > 1:
232
+ spm_loss = spm_loss.mean()
233
+ if spm_loss is not None:
234
+ self.accelerator.backward(spm_loss, retain_graph=False)
235
+ loss += spm_loss
236
+
237
+ return loss.detach() / self.args.gradient_accumulation_steps
238
+
239
+ def compute_regularization(self, model):
240
+ """
241
+ Compute a sparse regularization loss for SiLU
242
+ """
243
+ loss = 0
244
+ if len(self.sparse_layers) == 0:
245
+ self.initialize_sparse_silu_layers(model)
246
+ num_layers = len(self.sparse_layers)
247
+
248
+ for module in self.sparse_layers:
249
+ if module.activation_norm is not None:
250
+ loss += module.activation_norm
251
+
252
+ loss /= num_layers
253
+ loss *= self.regularization_coefficient
254
+
255
+ if self.state.global_step % 20 == 0 and loss != 0:
256
+ print("Negative relularizer loss: ", loss.item())
257
+ return loss
258
+
259
+ def compute_spm_loss(self, model):
260
+ loss = 0
261
+ if len(self.sparse_decoder_layers) == 0:
262
+ self.initialize_sparse_decoder_layers(model)
263
+ for module in self.sparse_decoder_layers:
264
+ if module.distill_loss != None:
265
+ loss += module.distill_loss
266
+ if self.state.global_step % 20 == 0 and loss != 0:
267
+ print("Sparse Predictor Distillation loss: ", loss.item())
268
+ return loss
269
+
270
+
271
+ class SparseSiLU(nn.SiLU):
272
+ def __init__(self, threshold):
273
+ super(SparseSiLU, self).__init__()
274
+ self.threshold = threshold
275
+ self.m = nn.Threshold(self.threshold, 0)
276
+
277
+ def set_new_threshold(self, threshold):
278
+ self.threshold = threshold
279
+ self.m = nn.Threshold(threshold, 0)
280
+
281
+ def forward(self, x):
282
+ act = super(SparseSiLU, self).forward(x)
283
+ return self.m(act) - self.m(-act)
284
+
285
+
286
+ class MistralSparseSiluMLP(MistralMLP):
287
+ def __init__(self, config, *args, **kwargs):
288
+ super().__init__(config)
289
+ self.swish_outputs = None
290
+ self.relu = nn.ReLU()
291
+
292
+ self.kill_sparse_swish_outputs = False
293
+ self.dead_percentage = 0
294
+ self.is_stats = False
295
+ self.visit_counts = 0
296
+
297
+ # Hyperparameters to tune
298
+ self.dead_threshold = kwargs.pop("dead_threshold", 0)
299
+ self.use_sparse_regularization = kwargs.pop("use_sparse_regularization", True)
300
+ self.regularization_type = kwargs.pop(
301
+ "regularization_type", "L1 regularization"
302
+ )
303
+ self.regularization_threshold = kwargs.pop("regularization_threshold", 0.5)
304
+ self.use_relu = kwargs.pop("use_relu", False)
305
+ self.activation_norm = None
306
+
307
+ # Activation Histograms
308
+ self.is_collect_histogram = False
309
+ num_bins = 1000
310
+ self.histogram_bins = torch.linspace(-1, 1, num_bins - 2)
311
+ self.histogram_bins = torch.cat(
312
+ [torch.tensor([-torch.inf]), self.histogram_bins, torch.tensor([torch.inf])]
313
+ )
314
+ self.pre_act_hist_counts = torch.zeros(num_bins - 1)
315
+ self.post_act_hist_counts = torch.zeros(num_bins - 1)
316
+ self.t = 0
317
+ self.agg_sparsity = 0
318
+
319
+ # Sparse activation function
320
+ self.sparse_act_fn = SparseSiLU(threshold=self.dead_threshold)
321
+
322
+ def activate_stats(self, is_collect_histogram: bool = True):
323
+ self.is_stats = True
324
+ self.dead_percentage = 0
325
+ self.visit_counts = 0
326
+ self.is_collect_histogram = is_collect_histogram
327
+ self.histogram_counts = torch.zeros(2000) # .to(self.down_proj.weight.device)
328
+
329
+ def deactivate_stats(self):
330
+ self.is_stats = False
331
+
332
+ def collect_stats(self, pre_activation, post_activation):
333
+ start_time = time.time()
334
+ pre_activation = pre_activation.float().cpu().detach()
335
+ post_activation = post_activation.float().cpu().detach()
336
+ # self.histogram_bins=self.histogram_bins.to(pre_activation.device).type(pre_activation.dtype)
337
+ self.pre_act_hist_counts += torch.histogram(
338
+ pre_activation, bins=self.histogram_bins
339
+ )[0]
340
+ self.post_act_hist_counts += torch.histogram(
341
+ torch.abs(post_activation), bins=self.histogram_bins
342
+ )[0]
343
+ self.t += time.time() - start_time
344
+ if self.visit_counts % 30 == 0:
345
+ print(f"Time taken to collect stats: {self.t}s.")
346
+
347
+ def forward(
348
+ self,
349
+ x,
350
+ sp_mask: torch.tensor = None,
351
+ ):
352
+ """
353
+ If kill_sparse_swish_outputs is set to False, this layer functions exactly like a normal MLP layer.
354
+ """
355
+ if sp_mask != None: # When sparse mask is given
356
+ return self.down_proj(
357
+ self.sparse_act_fn(self.gate_proj(x) * sp_mask) * self.up_proj(x)
358
+ ) # Todo: This doesn't accelerate runtime (instead slowing down)
359
+
360
+ elif self.use_relu:
361
+ post_act = self.relu(self.gate_proj(x))
362
+
363
+ if self.is_stats:
364
+ dead_neurons = post_act == 0
365
+ dead_percentage = dead_neurons.float().mean()
366
+ agg_sparsity = dead_neurons.all(dim=0).float().mean()
367
+
368
+ self.dead_percentage = (
369
+ self.dead_percentage * self.visit_counts + dead_percentage
370
+ ) / (self.visit_counts + 1)
371
+ self.agg_sparsity = (
372
+ self.agg_sparsity * self.visit_counts + agg_sparsity
373
+ ) / (self.visit_counts + 1)
374
+ self.visit_counts += 1
375
+
376
+ return self.down_proj(post_act * self.up_proj(x))
377
+
378
+ else:
379
+ pre_act = self.gate_proj(x)
380
+ post_act = self.act_fn(pre_act)
381
+ if self.kill_sparse_swish_outputs:
382
+ dead_neurons = post_act.abs() <= self.dead_threshold
383
+ # print("pre act sparsity: ", (pre_act==0).float().mean())
384
+
385
+ dead_percentage = dead_neurons.float().mean()
386
+ agg_sparsity = dead_neurons.all(dim=0).float().mean()
387
+
388
+ if self.is_stats:
389
+ self.dead_percentage = (
390
+ self.dead_percentage * self.visit_counts + dead_percentage
391
+ ) / (self.visit_counts + 1)
392
+ self.agg_sparsity = (
393
+ self.agg_sparsity * self.visit_counts + agg_sparsity
394
+ ) / (self.visit_counts + 1)
395
+ self.visit_counts += 1
396
+
397
+ # print(self.agg_sparsity)
398
+
399
+ # Collect histogram stats
400
+ if (
401
+ self.is_collect_histogram
402
+ and pre_act.eq(0).float().mean() < 0.99
403
+ ): # Padded dataset
404
+ self.collect_stats(pre_act, post_act)
405
+
406
+ post_act[dead_neurons] = 0
407
+
408
+ out = self.down_proj(post_act * self.up_proj(x))
409
+ if self.use_sparse_regularization:
410
+ if self.regularization_type == "L1 regularization":
411
+ self.activation_norm = torch.abs(post_act)[
412
+ post_act < self.regularization_threshold
413
+ ].mean()
414
+ elif self.regularization_type == "L2 regularization":
415
+ self.activation_norm = torch.sqrt(
416
+ torch.square(post_act)[post_act < self.regularization_threshold]
417
+ ).mean()
418
+
419
+ return out
420
+
421
+
422
+ class SparseMistralDecoderLayer(MistralDecoderLayer):
423
+ def __init__(
424
+ self,
425
+ config: MistralConfig,
426
+ layer_idx: int,
427
+ decoder_layer: MistralDecoderLayer,
428
+ init_svd: bool = True,
429
+ *args,
430
+ **kwargs,
431
+ ):
432
+ assert isinstance(
433
+ decoder_layer.mlp, MistralSparseSiluMLP
434
+ ), f"{type(decoder_layer.mlp)} should MistralSparseSiluMLP."
435
+
436
+ super().__init__(config, layer_idx)
437
+ self.hidden_size = config.hidden_size
438
+ self.intermediate_size = config.intermediate_size
439
+
440
+ self.init_svd = init_svd
441
+ self.self_attn = decoder_layer.self_attn
442
+
443
+ self.mlp = decoder_layer.mlp
444
+ self.input_layernorm = decoder_layer.input_layernorm
445
+ self.post_attention_layernorm = decoder_layer.post_attention_layernorm
446
+
447
+ # Sparse predictor for mlp (initialized with SVD decomposed matrix)
448
+ self.low_rank = kwargs.pop("low_rank", 64)
449
+ self.sparse_act_func = decoder_layer.mlp.sparse_act_fn
450
+
451
+ print(
452
+ f"Setting {layer_idx}th mlp layer's sparse predictor... svd init: {init_svd}"
453
+ )
454
+ self.sp_mlp = low_rank_approximation(
455
+ decoder_layer.mlp.gate_proj,
456
+ act_func=self.sparse_act_func,
457
+ init_svd=init_svd,
458
+ )
459
+ self.use_async = kwargs.pop("use_async", False)
460
+ self.use_sparse_predictor = False
461
+ self.distill_loss = None
462
+
463
+ def forward(
464
+ self,
465
+ hidden_states: torch.Tensor,
466
+ attention_mask: Optional[torch.Tensor] = None,
467
+ position_ids: Optional[torch.LongTensor] = None,
468
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
469
+ output_attentions: Optional[bool] = False,
470
+ use_cache: Optional[bool] = False,
471
+ **kwargs,
472
+ ) -> Tuple[
473
+ torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]
474
+ ]:
475
+ print("hidden_states shape: ", hidden_states.shape)
476
+ if "padding_mask" in kwargs:
477
+ warnings.warn(
478
+ "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`"
479
+ )
480
+
481
+ residual = hidden_states
482
+ sp_mask = None
483
+
484
+ if self.use_async:
485
+ sp_mask = self.sp_mlp(hidden_states)
486
+
487
+ hidden_states = self.input_layernorm(hidden_states)
488
+
489
+ # Self Attention
490
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
491
+ hidden_states=hidden_states,
492
+ attention_mask=attention_mask,
493
+ position_ids=position_ids,
494
+ past_key_value=past_key_value,
495
+ output_attentions=output_attentions,
496
+ use_cache=use_cache,
497
+ )
498
+ hidden_states = residual + hidden_states
499
+
500
+ # Fully Connected
501
+ residual = hidden_states
502
+ hidden_states = self.post_attention_layernorm(hidden_states)
503
+
504
+ if not self.use_async:
505
+ sp_mask = self.sp_mlp(hidden_states)
506
+
507
+ # Compute distillation loss
508
+ gating_output = self.mlp.sparse_act_fn(self.mlp.gate_proj(hidden_states))
509
+ loss_func = MSELoss()
510
+ self.distill_loss = loss_func(sp_mask, gating_output)
511
+
512
+ # Convert sp mask into binary form
513
+ sp_mask = sp_mask > 0
514
+
515
+ if self.training:
516
+ sp_mask = None
517
+ # if not self.use_sparse_predictor:
518
+ # sp_mask = None
519
+
520
+ hidden_states = self.mlp(hidden_states, sp_mask)
521
+ hidden_states = residual + hidden_states
522
+
523
+ outputs = (hidden_states,)
524
+
525
+ if output_attentions:
526
+ outputs += (self_attn_weights,)
527
+
528
+ if use_cache:
529
+ outputs += (present_key_value,)
530
+
531
+ return outputs
532
+
533
+
534
+ class SparseMistralConfig(MistralConfig):
535
+ model_type = "sparse_mistral"
536
+
537
+ def __init__(self, **kwargs):
538
+ super().__init__(**kwargs)
539
+
540
+
541
+ class SparseMistralforCausalLM(MistralForCausalLM):
542
+ config_class = SparseMistralConfig
543
+
544
+ def __init__(self, config):
545
+ super().__init__(config)
546
+ self.config = config
547
+ if config.use_sparse_model:
548
+ self.apply_sparse_mlp()
549
+ if config.thresholds is not None:
550
+ for idx, m in enumerate(self.model.layers):
551
+ if isinstance(m.mlp, MistralSparseSiluMLP):
552
+ m.mlp.dead_threshold = config.thresholds[idx]
553
+ m.mlp.sparse_act_fn.set_new_threshold(m.mlp.dead_threshold)
554
+ m.mlp.kill_sparse_swish_outputs = True
555
+ m.mlp.use_relu = config.use_relu
556
+ if config.use_sparse_predictor:
557
+ self.apply_sparse_predictor(init_svd=config.init_svd)
558
+
559
+ def apply_sparse_mlp(self):
560
+ apply_mistral_sparse_silu_mlp(
561
+ self,
562
+ config=self.config,
563
+ use_sparse_regularization=self.config.use_sparse_regularization,
564
+ )
565
+
566
+ def apply_sparse_predictor(self, init_svd: bool = True):
567
+ apply_mistral_sparse_decoder_layer(self, config=self.config, init_svd=init_svd)
568
+
569
+
570
+ class GracefulRegularizationScheduler(TrainerCallback):
571
+ def __init__(
572
+ self,
573
+ num_warmup_steps=40,
574
+ is_enabled: bool = False,
575
+ model_name: str = "mistral",
576
+ test_dataset: Dataset = None,
577
+ targeted_sparsity: float = 0.5,
578
+ keep_regularization_with_kill: bool = False,
579
+ ):
580
+ """Scheduler for regularizing the model first before applying the dead threshold.
581
+
582
+ :param num_warmup_steps: number of training steps required to reach the dead threshold, defaults to 40
583
+ :param increment_ratio: by how much to increase the dead threshold.
584
+ For example, 0.5 means "increase the threshold by 0.5 * desired threshold
585
+ """
586
+ self.num_warmup_steps = num_warmup_steps
587
+ self.is_enabled = is_enabled
588
+ self.model_name = model_name
589
+ self.test_dataset = test_dataset
590
+ self.targeted_sparsity = targeted_sparsity
591
+ self.keep_regularization_with_kill = keep_regularization_with_kill
592
+ self.act_hist_path = (
593
+ f"/matx/u/vxbrando/histograms/warm_up_reg_{targeted_sparsity}/act_hist.pt"
594
+ )
595
+ if self.is_enabled:
596
+ print("GracefulRegularizationScheduler is enabled.")
597
+ self.trainer = None
598
+
599
+ def set_trainer(self, trainer):
600
+ self.trainer = trainer
601
+
602
+ def on_step_end(self, args, state, control, **kwargs):
603
+ if not self.is_enabled:
604
+ return
605
+
606
+ model = kwargs["model"]
607
+ if isinstance(model, PeftModel):
608
+ base_model = model.get_base_model()
609
+ else:
610
+ base_model = model
611
+
612
+ if state.global_step == 1:
613
+ ds_print("Setting an initial reg threshold to 0.1")
614
+ set_regularization_threshold(base_model, 0.1)
615
+
616
+ # if state.global_step >= self.num_warmup_steps and state.global_step % 50 == 0:
617
+ if state.global_step == self.num_warmup_steps:
618
+ activate_stats(base_model)
619
+ enable_sparse_silu(base_model)
620
+ self.trainer.evaluate()
621
+ save_act_hist(base_model, self.act_hist_path)
622
+ set_sparse_threshold(base_model, self.targeted_sparsity, True)
623
+ deactivate_stats(base_model)
624
+ self.trainer.use_sparse_regularization = self.keep_regularization_with_kill
625
+ # set_layer_specific_regularization(model.get_base_model())
626
+ print_dead_neuron_stats(model.get_base_model())
627
+
628
+ if state.global_step % 2000 == 0:
629
+ if is_mainprocess():
630
+ ds_print(
631
+ f"Saving to /scr/lukeai/{self.model_name}_{state.global_step}.pt",
632
+ )
633
+ torch.save(
634
+ model.state_dict(),
635
+ f"/scr/lukeai/{self.model_name}_{state.global_step}.pt",
636
+ )
637
+
638
+
639
+ class GradualSparsificationScheduler(TrainerCallback):
640
+ def __init__(
641
+ self,
642
+ num_warmup_steps=40,
643
+ increment_ratio=0.5,
644
+ is_enabled: bool = False,
645
+ model_name: str = "mistral",
646
+ ):
647
+ """Scheduler for gradually increasing a dead threshold until it reaches the desired threshold.
648
+
649
+ :param num_warmup_steps: number of training steps required to reach the dead threshold, defaults to 40
650
+ :param increment_ratio: by how much to increase the dead threshold.
651
+ For example, 0.5 means "increase the threshold by 0.5 * desired threshold
652
+ """
653
+ self.num_warmup_steps = num_warmup_steps
654
+ self.increment_ratio = increment_ratio
655
+ self.step_size = int(num_warmup_steps * increment_ratio)
656
+ self.is_enabled = is_enabled
657
+ self.model_name = model_name
658
+
659
+ def on_step_end(self, args, state, control, **kwargs):
660
+ model = kwargs["model"]
661
+
662
+ if not self.is_enabled:
663
+ if state.global_step <= 10:
664
+ for module in model.modules():
665
+ if isinstance(module, MistralSparseSiluMLP):
666
+ module.current_dead_threshold = module.dead_threshold
667
+ return
668
+
669
+ current_dead_threshold = 0
670
+ desired_dead_threshold = 0
671
+
672
+ if is_mainprocess():
673
+ ds_print(state.global_step)
674
+
675
+ if state.global_step % self.step_size == 2:
676
+ for module in model.modules():
677
+ if isinstance(module, MistralSparseSiluMLP):
678
+ desired_dead_threshold = copy.deepcopy(module.dead_threshold)
679
+ current_dead_threshold = module.current_dead_threshold
680
+ current_dead_threshold += (
681
+ self.increment_ratio * desired_dead_threshold
682
+ )
683
+ module.current_dead_threshold = min(
684
+ desired_dead_threshold, current_dead_threshold
685
+ )
686
+
687
+ if is_running_deepspeed and is_mainprocess():
688
+ ds_print(
689
+ state.global_step,
690
+ current_dead_threshold,
691
+ desired_dead_threshold,
692
+ )
693
+
694
+ if state.global_step % 2000 == 0:
695
+ if is_running_deepspeed and is_mainprocess():
696
+ ds_print(
697
+ f"Saving to /matx/u/lukeai/{self.model_name}_{state.global_step - 2}.pt",
698
+ )
699
+ torch.save(
700
+ model.state_dict(),
701
+ f"/matx/u/lukeai/{self.model_name}_{state.global_step - 2}.pt",
702
+ )
703
+
704
+
705
+ def get_sparse_mistral_config(
706
+ config: MistralConfig,
707
+ use_sparse_model=False,
708
+ use_sparse_predictor=False,
709
+ use_sparse_regularization=False,
710
+ thresholds=None,
711
+ ):
712
+ new_config = SparseMistralConfig()
713
+ new_config.__dict__.update(config.__dict__)
714
+ config = new_config
715
+ config.use_sparse_model = use_sparse_model
716
+ config.use_sparse_predictor = use_sparse_predictor
717
+ config.use_sparse_regularization = use_sparse_regularization
718
+ config.thresholds = thresholds
719
+
720
+ return config
721
+
722
+
723
+ def apply_mistral_sparse_silu_mlp(
724
+ model,
725
+ config,
726
+ use_sparse_regularization: bool = False,
727
+ ):
728
+ # counts = 0
729
+ for layer in model.model.layers:
730
+ # counts += 1
731
+ # if counts < 4:
732
+ # continue
733
+ original_mlp = layer.mlp
734
+ new_mlp = MistralSparseSiluMLP(
735
+ config, use_sparse_regularization=use_sparse_regularization
736
+ )
737
+ new_mlp.gate_proj = original_mlp.gate_proj
738
+ new_mlp.up_proj = original_mlp.up_proj
739
+ new_mlp.down_proj = original_mlp.down_proj
740
+ layer.mlp = new_mlp
741
+
742
+
743
+ def apply_mistral_sparse_decoder_layer(
744
+ model,
745
+ config,
746
+ init_svd: bool = True,
747
+ ):
748
+ assert isinstance(model.model, MistralModel), "model.model must be a MistralModel."
749
+ new_layers = []
750
+ for layer_idx, layer in enumerate(model.model.layers):
751
+ if isinstance(layer.mlp, MistralSparseSiluMLP):
752
+ new_layers.append(
753
+ SparseMistralDecoderLayer(
754
+ config=config,
755
+ layer_idx=layer_idx,
756
+ decoder_layer=layer,
757
+ init_svd=init_svd,
758
+ )
759
+ )
760
+ print(f"{layer_idx}th mlp layer activation: {layer.mlp.sparse_act_fn}")
761
+ else:
762
+ new_layers.append(layer)
763
+ model.model.layers = nn.ModuleList(new_layers)
764
+
765
+
766
+ def enable_sparse_predictor(
767
+ model,
768
+ ):
769
+ for layer_idx, layer in enumerate(model.model.layers):
770
+ if isinstance(layer, MistralDecoderLayer):
771
+ layer.use_sparse_predictor = True
772
+
773
+
774
+ def disable_sparse_predictor(
775
+ model,
776
+ ):
777
+ for layer_idx, layer in enumerate(model.model.layers):
778
+ if isinstance(layer, MistralDecoderLayer):
779
+ layer.use_sparse_predictor = False
780
+
781
+
782
+ def activate_stats(model, is_collect_histogram: bool = True):
783
+ for layer in model.model.layers:
784
+ if isinstance(layer.mlp, MistralSparseSiluMLP):
785
+ layer.mlp.activate_stats(is_collect_histogram=is_collect_histogram)
786
+
787
+
788
+ def deactivate_stats(model):
789
+ for layer in model.model.layers:
790
+ if isinstance(layer.mlp, MistralSparseSiluMLP):
791
+ layer.mlp.deactivate_stats()
792
+
793
+
794
+ def enable_sparse_silu(model):
795
+ print("Enabling SparseSilu")
796
+ for i, layer in enumerate(model.model.layers):
797
+ if isinstance(layer.mlp, MistralSparseSiluMLP):
798
+ layer.mlp.kill_sparse_swish_outputs = True
799
+
800
+
801
+ def print_dead_neuron_stats(model):
802
+ total_sparsity = 0
803
+ counts = 0
804
+ for i, layer in enumerate(model.model.layers):
805
+ if isinstance(layer.mlp, MistralSparseSiluMLP):
806
+ dead_percentage = layer.mlp.dead_percentage * 100
807
+ agg_sparsity = layer.mlp.agg_sparsity * 100
808
+ print(f"layer {i} sparsity: {dead_percentage:.3f}%")
809
+ print(f"layer {i} agg sparsity: {agg_sparsity:.3f}%")
810
+ total_sparsity += dead_percentage
811
+ counts += 1
812
+
813
+ print(f"Total sparsity: {total_sparsity/counts: .3f}%")
814
+ return total_sparsity / counts
815
+
816
+
817
+ def get_sparse_layers(model: MistralModel):
818
+ sparse_layers = [
819
+ m.mlp for m in model.layers() if isinstance(m.mlp, MistralSparseSiluMLP)
820
+ ]
821
+ return sparse_layers
822
+
823
+
824
+ def get_threshold(
825
+ bin_edges: torch.tensor, histogram_counts: torch.tensor, sparsity_level: float
826
+ ): # Only for L1 Regularization
827
+ assert (
828
+ len(bin_edges.shape) == len(histogram_counts.shape) == 1
829
+ ), "bin_edges and histogram are expected to be 1-dimensional."
830
+ histogram_counts /= histogram_counts.sum()
831
+ threshold_idx = torch.searchsorted(
832
+ histogram_counts.cumsum(0), sparsity_level, side="right"
833
+ )
834
+
835
+ return bin_edges[threshold_idx]
836
+
837
+
838
+ def set_regularization_threshold(model, threshold: float = 0.1):
839
+ for i, layer in enumerate(model.model.layers):
840
+ if (
841
+ isinstance(layer.mlp, MistralSparseSiluMLP) and layer.mlp.is_stats
842
+ ): # Can set the threshold only the relevant statistics is collected.
843
+ layer.mlp.regularization_threshold = threshold # TODO: find better param
844
+
845
+
846
+ def set_sparse_threshold(model, sparsity_level: float, use_relu: bool = False):
847
+ for i, layer in enumerate(model.model.layers):
848
+ if (
849
+ isinstance(layer.mlp, MistralSparseSiluMLP) and layer.mlp.is_stats
850
+ ): # Can set the threshold only the relevant statistics is collected.
851
+ if use_relu:
852
+ layer.mlp.sparse_act_fn = nn.ReLU()
853
+ layer.mlp.use_relu = True
854
+ else:
855
+ layer.mlp.dead_threshold = get_threshold(
856
+ layer.mlp.histogram_bins,
857
+ layer.mlp.post_act_hist_counts,
858
+ sparsity_level,
859
+ )
860
+ layer.mlp.sparse_act_fn.set_new_threshold(layer.mlp.dead_threshold)
861
+ layer.mlp.regularization_threshold = (
862
+ layer.mlp.dead_threshold * 1.2
863
+ ) # TODO: find better param
864
+
865
+
866
+ def plot_histogram(
867
+ bin_edges, histogram_counts: torch.tensor, title: str = "Activation Distribution", fig_dir: str = "figures"
868
+ ):
869
+ plt.bar(
870
+ bin_edges[:-1], histogram_counts, width=np.diff(bin_edges), edgecolor="black"
871
+ )
872
+ plt.title(title)
873
+ plt.xlabel("Activation Value")
874
+ plt.ylabel("Frequency")
875
+ os.makedirs(fig_dir, exist_ok=True)
876
+ plt.savefig(f"{fig_dir}/{title}.png")
877
+ # plt.show()
878
+ plt.clf()
879
+
880
+
881
+ def plot_act(model, fig_dir: str = "figures"):
882
+ for i, layer in enumerate(model.model.layers):
883
+ if (
884
+ isinstance(layer.mlp, MistralSparseSiluMLP) and layer.mlp.is_stats
885
+ ): # Can set the threshold only the relevant statistics is collected.
886
+ plot_title = f"Layer: {i} Pre-Activation Distribution"
887
+ plot_histogram(
888
+ layer.mlp.histogram_bins, layer.mlp.pre_act_hist_counts, plot_title
889
+ )
890
+
891
+ plot_title = f"Layer: {i} Post-Activation Absolute Distribution"
892
+ plot_histogram(
893
+ layer.mlp.histogram_bins, layer.mlp.post_act_hist_counts, plot_title
894
+ )
895
+
896
+
897
+ def save_act_hist(
898
+ model, filename="/scr/jay/models/mistral/pre_finetune/cola_act_hist.pt"
899
+ ):
900
+ os.makedirs(os.path.dirname(filename), exist_ok=True)
901
+ act_dict = {}
902
+ for i, layer in enumerate(model.model.layers):
903
+ if (
904
+ isinstance(layer.mlp, MistralSparseSiluMLP) and layer.mlp.is_stats
905
+ ): # Can set the threshold only the relevant statistics is collected.
906
+ act_dict[i] = (
907
+ layer.mlp.histogram_bins,
908
+ layer.mlp.pre_act_hist_counts,
909
+ layer.mlp.post_act_hist_counts,
910
+ )
911
+ print("Saving activation histograms...\n\n\n")
912
+ torch.save(act_dict, filename)
913
+
914
+
915
+ def load_act_hist(
916
+ model, filename="/scr/jay/models/mistral/pre_finetune/cola_act_hist.pt"
917
+ ):
918
+ assert os.path.exists(
919
+ filename
920
+ ), f"{filename} does not exist when loading pre/post-activation histogram of SparseMistralSiluMLP."
921
+ print("Loading activation histograms...\n\n\n")
922
+
923
+ act_dict = torch.load(filename)
924
+ for i, layer in enumerate(model.model.layers):
925
+ if (
926
+ isinstance(layer.mlp, MistralSparseSiluMLP) and layer.mlp.is_stats
927
+ ): # Can set the threshold only the relevant statistics is collected.
928
+ (
929
+ layer.mlp.histogram_bins,
930
+ layer.mlp.pre_act_hist_counts,
931
+ layer.mlp.post_act_hist_counts,
932
+ ) = act_dict[i]
933
+
934
+
935
+ def enable_last_k_modules(model, start_module_idx: int):
936
+ assert 32 > start_module_idx >= 0
937
+ new_modules = []
938
+ new_idx = 0
939
+ for idx in range(start_module_idx, len(model.model.original_layers)):
940
+ module = model.model.original_layers[idx]
941
+ module.layer_idx = new_idx
942
+ module.self_attn.layer_idx = new_idx
943
+ new_modules.append(module)
944
+ new_idx += 1
945
+ print(module.layer_idx)
946
+
947
+ model.model.layers = nn.ModuleList(new_modules)
948
+
949
+
950
+ def enable_first_k_modules(model, end_module_idx: int):
951
+ assert 32 > end_module_idx >= 0
952
+ new_modules = []
953
+ new_idx = 0
954
+ for idx in range(0, end_module_idx + 1):
955
+ module = model.model.original_layers[idx]
956
+ module.layer_idx = new_idx
957
+ module.self_attn.layer_idx = new_idx
958
+ new_modules.append(module)
959
+ new_idx += 1
960
+ print(module.layer_idx)
961
+
962
+ model.model.layers = nn.ModuleList(new_modules)
special_tokens_map.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "</s>",
17
+ "unk_token": {
18
+ "content": "<unk>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "added_tokens_decoder": {
5
+ "0": {
6
+ "content": "<unk>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "1": {
14
+ "content": "<s>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "2": {
22
+ "content": "</s>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ }
29
+ },
30
+ "additional_special_tokens": [],
31
+ "bos_token": "<s>",
32
+ "clean_up_tokenization_spaces": false,
33
+ "eos_token": "</s>",
34
+ "legacy": true,
35
+ "model_max_length": 1000000000000000019884624838656,
36
+ "pad_token": "</s>",
37
+ "sp_model_kwargs": {},
38
+ "spaces_between_special_tokens": false,
39
+ "tokenizer_class": "LlamaTokenizer",
40
+ "unk_token": "<unk>",
41
+ "use_default_system_prompt": false
42
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e4a67b2e270a0df0223b0668e343f2623e59131b75c6904e9debc861313d368
3
+ size 6456