vxbrandon commited on
Commit
df3d0fd
1 Parent(s): 57f199e

Training in progress, step 503

Browse files
README.md ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ base_model: mistralai/Mistral-7B-v0.1
4
+ tags:
5
+ - generated_from_trainer
6
+ model-index:
7
+ - name: Mistral_Sparse_refined_web_50p_2024-03-21
8
+ results: []
9
+ ---
10
+
11
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
12
+ should probably proofread and complete it, then remove this comment. -->
13
+
14
+ # Mistral_Sparse_refined_web_50p_2024-03-21
15
+
16
+ This model is a fine-tuned version of [mistralai/Mistral-7B-v0.1](https://huggingface.co/mistralai/Mistral-7B-v0.1) on the None dataset.
17
+ It achieves the following results on the evaluation set:
18
+ - Loss: 2.1512
19
+
20
+ ## Model description
21
+
22
+ More information needed
23
+
24
+ ## Intended uses & limitations
25
+
26
+ More information needed
27
+
28
+ ## Training and evaluation data
29
+
30
+ More information needed
31
+
32
+ ## Training procedure
33
+
34
+ ### Training hyperparameters
35
+
36
+ The following hyperparameters were used during training:
37
+ - learning_rate: 1e-05
38
+ - train_batch_size: 1
39
+ - eval_batch_size: 1
40
+ - seed: 0
41
+ - distributed_type: multi-GPU
42
+ - num_devices: 3
43
+ - gradient_accumulation_steps: 4
44
+ - total_train_batch_size: 12
45
+ - total_eval_batch_size: 3
46
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
47
+ - lr_scheduler_type: linear
48
+ - training_steps: 501
49
+
50
+ ### Training results
51
+
52
+ | Training Loss | Epoch | Step | Validation Loss |
53
+ |:-------------:|:-----:|:----:|:---------------:|
54
+ | 2.4177 | 0.0 | 25 | 2.6401 |
55
+ | 2.5407 | 0.01 | 50 | 2.5820 |
56
+ | 2.3887 | 0.01 | 75 | 2.5299 |
57
+ | 2.2849 | 0.01 | 100 | 2.4991 |
58
+ | 2.2042 | 0.01 | 125 | 2.4802 |
59
+ | 2.2574 | 0.02 | 150 | 2.4609 |
60
+ | 2.2353 | 0.02 | 175 | 2.4473 |
61
+ | 2.3355 | 0.02 | 200 | 2.4449 |
62
+ | 2.3044 | 0.03 | 225 | 2.4381 |
63
+ | 2.2664 | 0.03 | 250 | 2.4348 |
64
+ | 2.1999 | 0.03 | 275 | 2.4263 |
65
+ | 2.2631 | 0.04 | 300 | 2.4247 |
66
+ | 2.2918 | 0.04 | 325 | 2.4184 |
67
+ | 2.1426 | 0.04 | 350 | 2.4185 |
68
+ | 2.149 | 0.04 | 375 | 2.4158 |
69
+ | 2.1937 | 0.05 | 400 | 2.4129 |
70
+ | 2.2372 | 0.05 | 425 | 2.4134 |
71
+ | 2.1997 | 0.05 | 450 | 2.4123 |
72
+ | 2.2937 | 0.06 | 475 | 2.4086 |
73
+ | 2.3067 | 0.06 | 500 | 2.4052 |
74
+
75
+
76
+ ### Framework versions
77
+
78
+ - Transformers 4.36.2
79
+ - Pytorch 2.1.2+cu121
80
+ - Datasets 2.15.0
81
+ - Tokenizers 0.15.0
adapter_config.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "mistralai/Mistral-7B-v0.1",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layers_pattern": null,
10
+ "layers_to_transform": null,
11
+ "loftq_config": {},
12
+ "lora_alpha": 16,
13
+ "lora_dropout": 0.1,
14
+ "megatron_config": null,
15
+ "megatron_core": "megatron.core",
16
+ "modules_to_save": null,
17
+ "peft_type": "LORA",
18
+ "r": 64,
19
+ "rank_pattern": {},
20
+ "revision": null,
21
+ "target_modules": [
22
+ "down_proj",
23
+ "v_proj",
24
+ "q_proj",
25
+ "up_proj",
26
+ "gate_proj"
27
+ ],
28
+ "task_type": "CAUSAL_LM"
29
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8eccc59fac77b72db240a1cd499d891f9ebe0aa2d95c9e4600775cc840102e9f
3
+ size 281061608
config.json ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "mistralai/Mistral-7B-v0.1",
3
+ "architectures": [
4
+ "SparseMistralforCausalLM"
5
+ ],
6
+ "attention_dropout": 0.0,
7
+ "auto_map": {
8
+ "AutoConfig": "sparsification_sftt.SparseMistralConfig",
9
+ "AutoModelForCausalLM": "sparsification_sftt.SparseMistralforCausalLM"
10
+ },
11
+ "bos_token_id": 1,
12
+ "eos_token_id": 2,
13
+ "hidden_act": "silu",
14
+ "hidden_size": 4096,
15
+ "initializer_range": 0.02,
16
+ "intermediate_size": 14336,
17
+ "max_position_embeddings": 32768,
18
+ "model_type": "sparse_mistral",
19
+ "num_attention_heads": 32,
20
+ "num_hidden_layers": 32,
21
+ "num_key_value_heads": 8,
22
+ "rms_norm_eps": 1e-05,
23
+ "rope_theta": 10000.0,
24
+ "sliding_window": 4096,
25
+ "thresholds": [
26
+ 0.023069201037287712,
27
+ 0.03309928998351097,
28
+ 0.04312938079237938,
29
+ 0.05516548827290535,
30
+ 0.07522567361593246,
31
+ 0.09327983111143112,
32
+ 0.10531593859195709,
33
+ 0.11935807019472122,
34
+ 0.12738214433193207,
35
+ 0.12738214433193207,
36
+ 0.1313941776752472,
37
+ 0.13340020179748535,
38
+ 0.13941824436187744,
39
+ 0.1414242684841156,
40
+ 0.15546639263629913,
41
+ 0.1675025075674057,
42
+ 0.18555666506290436,
43
+ 0.19157472252845764,
44
+ 0.20762285590171814,
45
+ 0.2196589708328247,
46
+ 0.22768303751945496,
47
+ 0.23771312832832336,
48
+ 0.2357071191072464,
49
+ 0.23771312832832336,
50
+ 0.24172517657279968,
51
+ 0.24172517657279968,
52
+ 0.24172517657279968,
53
+ 0.24172517657279968,
54
+ 0.24172517657279968,
55
+ 0.23971915245056152,
56
+ 0.2357071191072464,
57
+ 0.225677028298378
58
+ ],
59
+ "tie_word_embeddings": false,
60
+ "torch_dtype": "bfloat16",
61
+ "transformers_version": "4.36.2",
62
+ "use_cache": false,
63
+ "use_relu": false,
64
+ "use_resilu": false,
65
+ "use_sparse_model": true,
66
+ "use_sparse_predictor": false,
67
+ "use_sparse_regularization": false,
68
+ "vocab_size": 32000
69
+ }
generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "eos_token_id": 2,
5
+ "transformers_version": "4.36.2"
6
+ }
model-00001-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:95729f8aae782d1dc5112c74b463100926416d372874db0b1b025e1ee4f6aacd
3
+ size 4943162336
model-00002-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d1a91505db655a8e3306808cdd365de8f920952ea30ef28ebbdca5864e81b0fd
3
+ size 4999819336
model-00003-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c7eaf0b6353ca719e0ec6fd4f52c905bc677a752fa7bead4ca9e27509ec8f532
3
+ size 4540516344
model.safetensors.index.json ADDED
@@ -0,0 +1,298 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_size": 14483464192
4
+ },
5
+ "weight_map": {
6
+ "lm_head.weight": "model-00003-of-00003.safetensors",
7
+ "model.embed_tokens.weight": "model-00001-of-00003.safetensors",
8
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00003.safetensors",
9
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
10
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
11
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
12
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
13
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
14
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
15
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
16
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
17
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00003.safetensors",
18
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
19
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
20
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
21
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
22
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
23
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
24
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
25
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
26
+ "model.layers.10.input_layernorm.weight": "model-00002-of-00003.safetensors",
27
+ "model.layers.10.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
28
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
29
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
30
+ "model.layers.10.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
31
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
32
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
33
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
34
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
35
+ "model.layers.11.input_layernorm.weight": "model-00002-of-00003.safetensors",
36
+ "model.layers.11.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
37
+ "model.layers.11.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
38
+ "model.layers.11.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
39
+ "model.layers.11.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
40
+ "model.layers.11.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
41
+ "model.layers.11.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
42
+ "model.layers.11.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
43
+ "model.layers.11.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
44
+ "model.layers.12.input_layernorm.weight": "model-00002-of-00003.safetensors",
45
+ "model.layers.12.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
46
+ "model.layers.12.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
47
+ "model.layers.12.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
48
+ "model.layers.12.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
49
+ "model.layers.12.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
50
+ "model.layers.12.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
51
+ "model.layers.12.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
52
+ "model.layers.12.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
53
+ "model.layers.13.input_layernorm.weight": "model-00002-of-00003.safetensors",
54
+ "model.layers.13.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
55
+ "model.layers.13.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
56
+ "model.layers.13.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
57
+ "model.layers.13.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
58
+ "model.layers.13.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
59
+ "model.layers.13.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
60
+ "model.layers.13.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
61
+ "model.layers.13.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
62
+ "model.layers.14.input_layernorm.weight": "model-00002-of-00003.safetensors",
63
+ "model.layers.14.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
64
+ "model.layers.14.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
65
+ "model.layers.14.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
66
+ "model.layers.14.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
67
+ "model.layers.14.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
68
+ "model.layers.14.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
69
+ "model.layers.14.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
70
+ "model.layers.14.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
71
+ "model.layers.15.input_layernorm.weight": "model-00002-of-00003.safetensors",
72
+ "model.layers.15.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
73
+ "model.layers.15.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
74
+ "model.layers.15.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
75
+ "model.layers.15.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
76
+ "model.layers.15.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
77
+ "model.layers.15.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
78
+ "model.layers.15.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
79
+ "model.layers.15.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
80
+ "model.layers.16.input_layernorm.weight": "model-00002-of-00003.safetensors",
81
+ "model.layers.16.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
82
+ "model.layers.16.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
83
+ "model.layers.16.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
84
+ "model.layers.16.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
85
+ "model.layers.16.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
86
+ "model.layers.16.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
87
+ "model.layers.16.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
88
+ "model.layers.16.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
89
+ "model.layers.17.input_layernorm.weight": "model-00002-of-00003.safetensors",
90
+ "model.layers.17.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
91
+ "model.layers.17.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
92
+ "model.layers.17.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
93
+ "model.layers.17.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
94
+ "model.layers.17.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
95
+ "model.layers.17.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
96
+ "model.layers.17.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
97
+ "model.layers.17.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
98
+ "model.layers.18.input_layernorm.weight": "model-00002-of-00003.safetensors",
99
+ "model.layers.18.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
100
+ "model.layers.18.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
101
+ "model.layers.18.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
102
+ "model.layers.18.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
103
+ "model.layers.18.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
104
+ "model.layers.18.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
105
+ "model.layers.18.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
106
+ "model.layers.18.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
107
+ "model.layers.19.input_layernorm.weight": "model-00002-of-00003.safetensors",
108
+ "model.layers.19.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
109
+ "model.layers.19.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
110
+ "model.layers.19.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
111
+ "model.layers.19.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
112
+ "model.layers.19.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
113
+ "model.layers.19.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
114
+ "model.layers.19.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
115
+ "model.layers.19.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
116
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00003.safetensors",
117
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
118
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
119
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
120
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
121
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
122
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
123
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
124
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
125
+ "model.layers.20.input_layernorm.weight": "model-00002-of-00003.safetensors",
126
+ "model.layers.20.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
127
+ "model.layers.20.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
128
+ "model.layers.20.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
129
+ "model.layers.20.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
130
+ "model.layers.20.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
131
+ "model.layers.20.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
132
+ "model.layers.20.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
133
+ "model.layers.20.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
134
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00003.safetensors",
135
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
136
+ "model.layers.21.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
137
+ "model.layers.21.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
138
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
139
+ "model.layers.21.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
140
+ "model.layers.21.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
141
+ "model.layers.21.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
142
+ "model.layers.21.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
143
+ "model.layers.22.input_layernorm.weight": "model-00003-of-00003.safetensors",
144
+ "model.layers.22.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
145
+ "model.layers.22.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
146
+ "model.layers.22.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
147
+ "model.layers.22.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
148
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
149
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
150
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
151
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
152
+ "model.layers.23.input_layernorm.weight": "model-00003-of-00003.safetensors",
153
+ "model.layers.23.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
154
+ "model.layers.23.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
155
+ "model.layers.23.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
156
+ "model.layers.23.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
157
+ "model.layers.23.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
158
+ "model.layers.23.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
159
+ "model.layers.23.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
160
+ "model.layers.23.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
161
+ "model.layers.24.input_layernorm.weight": "model-00003-of-00003.safetensors",
162
+ "model.layers.24.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
163
+ "model.layers.24.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
164
+ "model.layers.24.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
165
+ "model.layers.24.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
166
+ "model.layers.24.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
167
+ "model.layers.24.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
168
+ "model.layers.24.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
169
+ "model.layers.24.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
170
+ "model.layers.25.input_layernorm.weight": "model-00003-of-00003.safetensors",
171
+ "model.layers.25.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
172
+ "model.layers.25.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
173
+ "model.layers.25.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
174
+ "model.layers.25.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
175
+ "model.layers.25.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
176
+ "model.layers.25.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
177
+ "model.layers.25.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
178
+ "model.layers.25.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
179
+ "model.layers.26.input_layernorm.weight": "model-00003-of-00003.safetensors",
180
+ "model.layers.26.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
181
+ "model.layers.26.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
182
+ "model.layers.26.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
183
+ "model.layers.26.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
184
+ "model.layers.26.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
185
+ "model.layers.26.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
186
+ "model.layers.26.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
187
+ "model.layers.26.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
188
+ "model.layers.27.input_layernorm.weight": "model-00003-of-00003.safetensors",
189
+ "model.layers.27.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
190
+ "model.layers.27.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
191
+ "model.layers.27.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
192
+ "model.layers.27.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
193
+ "model.layers.27.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
194
+ "model.layers.27.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
195
+ "model.layers.27.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
196
+ "model.layers.27.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
197
+ "model.layers.28.input_layernorm.weight": "model-00003-of-00003.safetensors",
198
+ "model.layers.28.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
199
+ "model.layers.28.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
200
+ "model.layers.28.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
201
+ "model.layers.28.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
202
+ "model.layers.28.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
203
+ "model.layers.28.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
204
+ "model.layers.28.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
205
+ "model.layers.28.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
206
+ "model.layers.29.input_layernorm.weight": "model-00003-of-00003.safetensors",
207
+ "model.layers.29.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
208
+ "model.layers.29.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
209
+ "model.layers.29.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
210
+ "model.layers.29.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
211
+ "model.layers.29.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
212
+ "model.layers.29.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
213
+ "model.layers.29.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
214
+ "model.layers.29.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
215
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00003.safetensors",
216
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
217
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
218
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
219
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
220
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
221
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
222
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
223
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
224
+ "model.layers.30.input_layernorm.weight": "model-00003-of-00003.safetensors",
225
+ "model.layers.30.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
226
+ "model.layers.30.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
227
+ "model.layers.30.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
228
+ "model.layers.30.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
229
+ "model.layers.30.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
230
+ "model.layers.30.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
231
+ "model.layers.30.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
232
+ "model.layers.30.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
233
+ "model.layers.31.input_layernorm.weight": "model-00003-of-00003.safetensors",
234
+ "model.layers.31.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
235
+ "model.layers.31.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
236
+ "model.layers.31.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
237
+ "model.layers.31.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
238
+ "model.layers.31.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
239
+ "model.layers.31.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
240
+ "model.layers.31.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
241
+ "model.layers.31.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
242
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00003.safetensors",
243
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
244
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
245
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
246
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
247
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
248
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
249
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
250
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
251
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00003.safetensors",
252
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
253
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
254
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
255
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
256
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
257
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
258
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
259
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
260
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00003.safetensors",
261
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
262
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
263
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
264
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
265
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
266
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
267
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
268
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
269
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00003.safetensors",
270
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
271
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
272
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
273
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
274
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
275
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
276
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
277
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
278
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00003.safetensors",
279
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
280
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
281
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
282
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
283
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
284
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
285
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
286
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
287
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00003.safetensors",
288
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
289
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
290
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
291
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
292
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
293
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
294
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
295
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
296
+ "model.norm.weight": "model-00003-of-00003.safetensors"
297
+ }
298
+ }
sparsification_sftt.py ADDED
@@ -0,0 +1,1574 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import TrainerCallback, Trainer
2
+ from trl import SFTTrainer, DataCollatorForCompletionOnlyLM
3
+ from peft import PeftModel
4
+ from datasets import Dataset
5
+ from transformers.utils import is_sagemaker_mp_enabled, is_sagemaker_dp_enabled
6
+ from typing import Any, Dict, Union, Optional, Tuple
7
+ from torch.nn import MSELoss
8
+ from transformers.utils import is_flash_attn_2_available, logging
9
+ import inspect
10
+ import warnings
11
+ import math
12
+ import torch
13
+ import torch.nn as nn
14
+ import torch.nn.functional as F
15
+ import matplotlib.pyplot as plt
16
+ import numpy as np
17
+ import time
18
+ import os
19
+ import copy
20
+
21
+ from transformers.models.mistral.modeling_mistral import (
22
+ MistralMLP,
23
+ MistralAttention,
24
+ MistralModel,
25
+ MistralDecoderLayer,
26
+ MistralConfig,
27
+ MISTRAL_ATTENTION_CLASSES,
28
+ MistralRMSNorm,
29
+ MistralForCausalLM,
30
+ MistralFlashAttention2,
31
+ )
32
+ from experiments.models.sparse_mistral.svd_router import (
33
+ low_rank_approximation,
34
+ SparsePredictor,
35
+ )
36
+ from utils.utils import (
37
+ print_size_of_model,
38
+ is_running_deepspeed,
39
+ is_mainprocess,
40
+ get_datetime,
41
+ ds_print,
42
+ )
43
+
44
+ if is_flash_attn_2_available():
45
+ from flash_attn import flash_attn_func, flash_attn_varlen_func
46
+ from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa
47
+
48
+ _flash_supports_window_size = "window_size" in list(
49
+ inspect.signature(flash_attn_func).parameters
50
+ )
51
+ logger = logging.get_logger(__name__)
52
+
53
+
54
+ class SparseSFTTTrainer(SFTTrainer):
55
+ def __init__(self, *args, **kwargs):
56
+ self.regularization_coefficient = kwargs.pop("regularization_coefficient", 10)
57
+ self.use_sparse_regularization = kwargs.pop("use_sparse_regularization", False)
58
+ self.use_spm_loss = False
59
+ self.freeze_original_weights = False
60
+ self.regularization_type = kwargs.pop(
61
+ "regularization_type", "L1 positive activation"
62
+ )
63
+ assert self.regularization_type in [
64
+ "L2 activation",
65
+ "L1 positive activation",
66
+ ], f"Invalid regularization type: {self.regularization_type}"
67
+ self.sparse_layers = []
68
+ self.sparse_decoder_layers = []
69
+ super(SparseSFTTTrainer, self).__init__(*args, **kwargs)
70
+
71
+ def initialize_sparse_silu_layers(self, model):
72
+ self.sparse_layers = [
73
+ m for m in model.modules() if isinstance(m, MistralSparseSiluMLP)
74
+ ]
75
+
76
+ def initialize_sparse_decoder_layers(self, model):
77
+ self.sparse_decoder_layers = [
78
+ m for m in model.modules() if isinstance(m, SparseMistralDecoderLayer)
79
+ ]
80
+
81
+ def training_step(
82
+ self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]]
83
+ ) -> torch.Tensor:
84
+ """
85
+ Override the huggingface's training_step function to add a regularization term.
86
+ A regularization term is computed with intermediate values, which are freed after "backward()."
87
+ You need to set `retain_graph=True` inside `backward` function to keep the values.
88
+ """
89
+ model.train()
90
+ inputs = self._prepare_inputs(inputs)
91
+
92
+ with self.compute_loss_context_manager():
93
+ loss = self.compute_loss(model, inputs)
94
+
95
+ if self.args.n_gpu > 1:
96
+ loss = loss.mean() # mean() to average on multi-gpu parallel training
97
+ if not self.freeze_original_weights:
98
+ if loss is not None:
99
+ self.accelerator.backward(loss, retain_graph=False)
100
+
101
+ if self.use_sparse_regularization:
102
+ regularization_loss = self.compute_regularization(model)
103
+ if self.args.n_gpu > 1:
104
+ regularization_loss = regularization_loss.mean()
105
+ if regularization_loss is not None:
106
+ self.accelerator.backward(regularization_loss, retain_graph=True)
107
+ loss += regularization_loss
108
+
109
+ if self.use_spm_loss:
110
+ spm_loss = self.compute_spm_loss(model)
111
+ if self.args.n_gpu > 1:
112
+ spm_loss = spm_loss.mean()
113
+ if spm_loss is not None:
114
+ self.accelerator.backward(spm_loss, retain_graph=False)
115
+ loss += spm_loss
116
+
117
+ return loss.detach() / self.args.gradient_accumulation_steps
118
+
119
+ def compute_regularization(self, model):
120
+ """
121
+ Compute a sparse regularization loss for SiLU
122
+ """
123
+ loss = 0
124
+ if len(self.sparse_layers) == 0:
125
+ self.initialize_sparse_silu_layers(model)
126
+ num_layers = len(self.sparse_layers)
127
+
128
+ for module in self.sparse_layers:
129
+ if module.activation_norm is not None:
130
+ loss += module.activation_norm
131
+
132
+ loss /= num_layers
133
+ loss *= self.regularization_coefficient
134
+
135
+ if self.state.global_step % 20 == 0 and loss != 0:
136
+ print("Negative relularizer loss: ", loss.item())
137
+ return loss
138
+
139
+ def compute_spm_loss(self, model):
140
+ loss = 0
141
+ if len(self.sparse_decoder_layers) == 0:
142
+ self.initialize_sparse_decoder_layers(model)
143
+ for module in self.sparse_decoder_layers:
144
+ if module.distill_loss != None:
145
+ loss += module.distill_loss
146
+ if self.state.global_step % 20 == 0 and loss != 0:
147
+ print("Sparse Predictor Distillation loss: ", loss.item())
148
+ return loss
149
+
150
+ # def compute_loss(self, model, inputs, return_outputs=False):
151
+ # loss = super().compute_loss(model, inputs, return_outputs)
152
+ #
153
+ # if is_sagemaker_mp_enabled():
154
+ # import smdistributed.modelparallel.torch as smp
155
+ # @smp.step()
156
+ # def smp_forward_backward(model, inputs, gradient_accumulation_steps=1):
157
+ # outputs = model(**inputs)
158
+ # loss = outputs["loss"] if isinstance(outputs, dict) else outputs[0]
159
+ # loss /= gradient_accumulation_steps
160
+ # model.backward(loss)
161
+ # return loss
162
+ #
163
+ # loss_mb = smp_forward_backward(
164
+ # model, inputs, self.args.gradient_accumulation_steps
165
+ # )
166
+ # if self.use_sparse_regularization:
167
+ # return loss_mb.reduce_mean().detach().to(
168
+ # self.args.device
169
+ # ) + self.regularization_coefficient * self.compute_regularization(model)
170
+ # else:
171
+ # return loss_mb.reduce_mean().detach().to(self)
172
+ #
173
+ # if return_outputs:
174
+ # classification_loss, outputs = loss
175
+ # else:
176
+ # classification_loss = loss
177
+ #
178
+ # loss = classification_loss
179
+ # if self.use_sparse_regularization:
180
+ # regularization_loss = self.compute_regularization(model)
181
+ # loss += self.regularization_coefficient * regularization_loss
182
+ #
183
+ # return (loss, outputs) if return_outputs else loss
184
+
185
+
186
+ class SparseTrainer(Trainer):
187
+ def __init__(self, *args, **kwargs):
188
+ self.regularization_coefficient = kwargs.pop("regularization_coefficient", 10)
189
+ self.use_sparse_regularization = kwargs.pop("use_sparse_regularization", False)
190
+ self.use_spm_loss = False
191
+ self.freeze_original_weights = False
192
+ self.regularization_type = kwargs.pop(
193
+ "regularization_type", "L1 positive activation"
194
+ )
195
+ assert self.regularization_type in [
196
+ "L2 activation",
197
+ "L1 positive activation",
198
+ ], f"Invalid regularization type: {self.regularization_type}"
199
+ self.sparse_layers = []
200
+ self.sparse_decoder_layers = []
201
+ super(SparseTrainer, self).__init__(*args, **kwargs)
202
+
203
+ def initialize_sparse_silu_layers(self, model):
204
+ self.sparse_layers = [
205
+ m for m in model.modules() if isinstance(m, MistralSparseSiluMLP)
206
+ ]
207
+
208
+ def initialize_sparse_decoder_layers(self, model):
209
+ self.sparse_decoder_layers = [
210
+ m for m in model.modules() if isinstance(m, SparseMistralDecoderLayer)
211
+ ]
212
+
213
+ def training_step(
214
+ self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]]
215
+ ) -> torch.Tensor:
216
+ """
217
+ Override the huggingface's training_step function to add a regularization term.
218
+ A regularization term is computed with intermediate values, which are freed after "backward()."
219
+ You need to set `retain_graph=True` inside `backward` function to keep the values.
220
+ """
221
+ model.train()
222
+ inputs = self._prepare_inputs(inputs)
223
+
224
+ with self.compute_loss_context_manager():
225
+ loss = self.compute_loss(model, inputs)
226
+
227
+ if self.args.n_gpu > 1:
228
+ loss = loss.mean() # mean() to average on multi-gpu parallel training
229
+ if not self.freeze_original_weights:
230
+ if loss is not None:
231
+ self.accelerator.backward(loss, retain_graph=False)
232
+
233
+ if self.use_sparse_regularization:
234
+ regularization_loss = self.compute_regularization(model)
235
+ if self.args.n_gpu > 1:
236
+ regularization_loss = regularization_loss.mean()
237
+ if regularization_loss is not None:
238
+ self.accelerator.backward(regularization_loss, retain_graph=True)
239
+ loss += regularization_loss
240
+
241
+ if self.use_spm_loss:
242
+ spm_loss = self.compute_spm_loss(model)
243
+ if self.args.n_gpu > 1:
244
+ spm_loss = spm_loss.mean()
245
+ if spm_loss is not None:
246
+ self.accelerator.backward(spm_loss, retain_graph=False)
247
+ loss += spm_loss
248
+
249
+ return loss.detach() / self.args.gradient_accumulation_steps
250
+
251
+ def compute_regularization(self, model):
252
+ """
253
+ Compute a sparse regularization loss for SiLU
254
+ """
255
+ loss = 0
256
+ if len(self.sparse_layers) == 0:
257
+ self.initialize_sparse_silu_layers(model)
258
+ num_layers = len(self.sparse_layers)
259
+
260
+ for module in self.sparse_layers:
261
+ if module.activation_norm is not None:
262
+ loss += module.activation_norm
263
+
264
+ loss /= num_layers
265
+ loss *= self.regularization_coefficient
266
+
267
+ if self.state.global_step % 20 == 0 and loss != 0:
268
+ print("Negative relularizer loss: ", loss.item())
269
+ return loss
270
+
271
+ def compute_spm_loss(self, model):
272
+ loss = 0
273
+ if len(self.sparse_decoder_layers) == 0:
274
+ self.initialize_sparse_decoder_layers(model)
275
+ for module in self.sparse_decoder_layers:
276
+ if module.distill_loss != None:
277
+ loss += module.distill_loss
278
+ if self.state.global_step % 20 == 0 and loss != 0:
279
+ print("Sparse Predictor Distillation loss: ", loss.item())
280
+ return loss
281
+
282
+
283
+ class SparseSiLU(nn.SiLU):
284
+ def __init__(self, threshold):
285
+ super(SparseSiLU, self).__init__()
286
+ self.threshold = threshold
287
+ self.m = nn.Threshold(self.threshold, 0)
288
+
289
+ def set_new_threshold(self, threshold):
290
+ self.threshold = threshold
291
+ self.m = nn.Threshold(threshold, 0)
292
+
293
+ def forward(self, x):
294
+ act = super(SparseSiLU, self).forward(x)
295
+ return self.m(act) - self.m(-act)
296
+
297
+
298
+ def rotate_half(x):
299
+ """Rotates half the hidden dims of the input."""
300
+ x1 = x[..., : x.shape[-1] // 2]
301
+ x2 = x[..., x.shape[-1] // 2 :]
302
+ return torch.cat((-x2, x1), dim=-1)
303
+
304
+
305
+ def apply_rotary_pos_emb(q, k, cos, sin, position_ids, unsqueeze_dim=1):
306
+ """Applies Rotary Position Embedding to the query and key tensors.
307
+
308
+ Args:
309
+ q (`torch.Tensor`): The query tensor.
310
+ k (`torch.Tensor`): The key tensor.
311
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
312
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
313
+ position_ids (`torch.Tensor`):
314
+ The position indices of the tokens corresponding to the query and key tensors. For example, this can be
315
+ used to pass offsetted position ids when working with a KV-cache.
316
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
317
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
318
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
319
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
320
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
321
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
322
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
323
+ Returns:
324
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
325
+ """
326
+ cos = cos[position_ids].unsqueeze(unsqueeze_dim)
327
+ sin = sin[position_ids].unsqueeze(unsqueeze_dim)
328
+ q_embed = (q * cos) + (rotate_half(q) * sin)
329
+ k_embed = (k * cos) + (rotate_half(k) * sin)
330
+ return q_embed, k_embed
331
+
332
+
333
+ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
334
+ """
335
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
336
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
337
+ """
338
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
339
+ if n_rep == 1:
340
+ return hidden_states
341
+ hidden_states = hidden_states[:, :, None, :, :].expand(
342
+ batch, num_key_value_heads, n_rep, slen, head_dim
343
+ )
344
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
345
+
346
+
347
+ def _get_unpad_data(attention_mask):
348
+ seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
349
+ indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
350
+ max_seqlen_in_batch = seqlens_in_batch.max().item()
351
+ cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0))
352
+ return (
353
+ indices,
354
+ cu_seqlens,
355
+ max_seqlen_in_batch,
356
+ )
357
+
358
+
359
+ class SparseMistralFlashAttention(MistralFlashAttention2):
360
+ """
361
+ Multi-headed attention from 'Attention Is All You Need' paper. Modified to use sliding window attention: Longformer
362
+ and "Generating Long Sequences with Sparse Transformers".
363
+ """
364
+
365
+ def __init__(self, *args, **kwargs):
366
+ super().__init__(*args, **kwargs)
367
+ self.counts = 0
368
+ self.pre_attn_sparsity = 0
369
+ self.visit_counts = 0
370
+ self.is_stats = False
371
+ self.cutoff_value = 0
372
+
373
+ def activate_stats(self):
374
+ self.is_stats = True
375
+ self.visit_counts = 0
376
+ self.pre_attn_sparsity = 0
377
+ self.cutoff_value = 0
378
+
379
+ def forward(
380
+ self,
381
+ hidden_states: torch.Tensor,
382
+ attention_mask: Optional[torch.Tensor] = None,
383
+ position_ids: Optional[torch.LongTensor] = None,
384
+ past_key_value: Optional = None,
385
+ output_attentions: bool = False,
386
+ use_cache: bool = False,
387
+ **kwargs,
388
+ ):
389
+ if "padding_mask" in kwargs:
390
+ warnings.warn(
391
+ "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`"
392
+ )
393
+
394
+ # overwrite attention_mask with padding_mask
395
+ attention_mask = kwargs.pop("padding_mask")
396
+ bsz, q_len, _ = hidden_states.size()
397
+ mask = abs(hidden_states - hidden_states.mean()) < 0.6 * hidden_states.std()
398
+ hidden_states[mask] = 0
399
+ self.counts += 1
400
+
401
+ if self.is_stats:
402
+ self.pre_attn_sparsity = (
403
+ self.pre_attn_sparsity * self.visit_counts + (hidden_states == 0).float().mean()
404
+ ) / (self.visit_counts + 1)
405
+ self.cutoff_value += hidden_states.std()
406
+ self.visit_counts += 1
407
+
408
+ if 10 <= self.counts <= 11:
409
+ print(f"Attention {self.layer_idx}: ", (hidden_states == 0).float().mean())
410
+ print(
411
+ mask.shape,
412
+ )
413
+
414
+ query_states = self.q_proj(hidden_states)
415
+ key_states = self.k_proj(hidden_states)
416
+ value_states = self.v_proj(hidden_states)
417
+
418
+ query_states = query_states.view(
419
+ bsz, q_len, self.num_heads, self.head_dim
420
+ ).transpose(1, 2)
421
+ key_states = key_states.view(
422
+ bsz, q_len, self.num_key_value_heads, self.head_dim
423
+ ).transpose(1, 2)
424
+ value_states = value_states.view(
425
+ bsz, q_len, self.num_key_value_heads, self.head_dim
426
+ ).transpose(1, 2)
427
+
428
+ kv_seq_len = key_states.shape[-2]
429
+ if past_key_value is not None:
430
+ if self.layer_idx is None:
431
+ raise ValueError(
432
+ f"The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} "
433
+ "for auto-regressive decoding with k/v caching, please make sure to initialize the attention class "
434
+ "with a layer index."
435
+ )
436
+ kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
437
+
438
+ # Because the input can be padded, the absolute sequence length depends on the max position id.
439
+ rotary_seq_len = max(kv_seq_len, position_ids[:, -1].max().item()) + 1
440
+ cos, sin = self.rotary_emb(value_states, seq_len=rotary_seq_len)
441
+
442
+ query_states, key_states = apply_rotary_pos_emb(
443
+ query_states, key_states, cos, sin, position_ids
444
+ )
445
+
446
+ use_sliding_windows = (
447
+ _flash_supports_window_size
448
+ and getattr(self.config, "sliding_window", None) is not None
449
+ and kv_seq_len > self.config.sliding_window
450
+ )
451
+
452
+ if not _flash_supports_window_size:
453
+ logger.warning_once(
454
+ "The current flash attention version does not support sliding window attention, for a more memory efficient implementation"
455
+ " make sure to upgrade flash-attn library."
456
+ )
457
+
458
+ if past_key_value is not None:
459
+ # Activate slicing cache only if the config has a value `sliding_windows` attribute
460
+ cache_has_contents = past_key_value.get_seq_length(self.layer_idx) > 0
461
+ if (
462
+ getattr(self.config, "sliding_window", None) is not None
463
+ and kv_seq_len > self.config.sliding_window
464
+ and cache_has_contents
465
+ ):
466
+ slicing_tokens = 1 - self.config.sliding_window
467
+
468
+ past_key = past_key_value[self.layer_idx][0]
469
+ past_value = past_key_value[self.layer_idx][1]
470
+
471
+ past_key = past_key[:, :, slicing_tokens:, :].contiguous()
472
+ past_value = past_value[:, :, slicing_tokens:, :].contiguous()
473
+
474
+ if past_key.shape[-2] != self.config.sliding_window - 1:
475
+ raise ValueError(
476
+ f"past key must have a shape of (`batch_size, num_heads, self.config.sliding_window-1, head_dim`), got"
477
+ f" {past_key.shape}"
478
+ )
479
+
480
+ if attention_mask is not None:
481
+ attention_mask = attention_mask[:, slicing_tokens:]
482
+ attention_mask = torch.cat(
483
+ [attention_mask, torch.ones_like(attention_mask[:, -1:])],
484
+ dim=-1,
485
+ )
486
+
487
+ cache_kwargs = {"sin": sin, "cos": cos} # Specific to RoPE models
488
+ key_states, value_states = past_key_value.update(
489
+ key_states, value_states, self.layer_idx, cache_kwargs
490
+ )
491
+
492
+ # repeat k/v heads if n_kv_heads < n_heads
493
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
494
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
495
+ dropout_rate = 0.0 if not self.training else self.attention_dropout
496
+
497
+ # In PEFT, usually we cast the layer norms in float32 for training stability reasons
498
+ # therefore the input hidden states gets silently casted in float32. Hence, we need
499
+ # cast them back in float16 just to be sure everything works as expected.
500
+ input_dtype = query_states.dtype
501
+ if input_dtype == torch.float32:
502
+ if torch.is_autocast_enabled():
503
+ target_dtype = torch.get_autocast_gpu_dtype()
504
+ # Handle the case where the model is quantized
505
+ elif hasattr(self.config, "_pre_quantization_dtype"):
506
+ target_dtype = self.config._pre_quantization_dtype
507
+ else:
508
+ target_dtype = self.q_proj.weight.dtype
509
+
510
+ logger.warning_once(
511
+ f"The input hidden states seems to be silently casted in float32, this might be related to"
512
+ f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
513
+ f" {target_dtype}."
514
+ )
515
+
516
+ query_states = query_states.to(target_dtype)
517
+ key_states = key_states.to(target_dtype)
518
+ value_states = value_states.to(target_dtype)
519
+
520
+ # Reashape to the expected shape for Flash Attention
521
+ query_states = query_states.transpose(1, 2)
522
+ key_states = key_states.transpose(1, 2)
523
+ value_states = value_states.transpose(1, 2)
524
+
525
+ attn_output = self._flash_attention_forward(
526
+ query_states,
527
+ key_states,
528
+ value_states,
529
+ attention_mask,
530
+ q_len,
531
+ dropout=dropout_rate,
532
+ use_sliding_windows=use_sliding_windows,
533
+ )
534
+
535
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous()
536
+ attn_output = self.o_proj(attn_output)
537
+
538
+ if not output_attentions:
539
+ attn_weights = None
540
+
541
+ return attn_output, attn_weights, past_key_value
542
+
543
+ def _flash_attention_forward(
544
+ self,
545
+ query_states,
546
+ key_states,
547
+ value_states,
548
+ attention_mask,
549
+ query_length,
550
+ dropout=0.0,
551
+ softmax_scale=None,
552
+ use_sliding_windows=False,
553
+ ):
554
+ """
555
+ Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token
556
+ first unpad the input, then computes the attention scores and pad the final attention scores.
557
+
558
+ Args:
559
+ query_states (`torch.Tensor`):
560
+ Input query states to be passed to Flash Attention API
561
+ key_states (`torch.Tensor`):
562
+ Input key states to be passed to Flash Attention API
563
+ value_states (`torch.Tensor`):
564
+ Input value states to be passed to Flash Attention API
565
+ attention_mask (`torch.Tensor`):
566
+ The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
567
+ position of padding tokens and 1 for the position of non-padding tokens.
568
+ dropout (`float`):
569
+ Attention dropout
570
+ softmax_scale (`float`, *optional*):
571
+ The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
572
+ use_sliding_windows (`bool`, *optional*):
573
+ Whether to activate sliding window attention.
574
+ """
575
+ if not self._flash_attn_uses_top_left_mask:
576
+ causal = self.is_causal
577
+ else:
578
+ # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__.
579
+ causal = self.is_causal and query_length != 1
580
+
581
+ # Contains at least one padding token in the sequence
582
+ if attention_mask is not None:
583
+ batch_size = query_states.shape[0]
584
+ (
585
+ query_states,
586
+ key_states,
587
+ value_states,
588
+ indices_q,
589
+ cu_seq_lens,
590
+ max_seq_lens,
591
+ ) = self._upad_input(
592
+ query_states, key_states, value_states, attention_mask, query_length
593
+ )
594
+
595
+ cu_seqlens_q, cu_seqlens_k = cu_seq_lens
596
+ max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
597
+
598
+ if not use_sliding_windows:
599
+ attn_output_unpad = flash_attn_varlen_func(
600
+ query_states,
601
+ key_states,
602
+ value_states,
603
+ cu_seqlens_q=cu_seqlens_q,
604
+ cu_seqlens_k=cu_seqlens_k,
605
+ max_seqlen_q=max_seqlen_in_batch_q,
606
+ max_seqlen_k=max_seqlen_in_batch_k,
607
+ dropout_p=dropout,
608
+ softmax_scale=softmax_scale,
609
+ causal=causal,
610
+ )
611
+ else:
612
+ attn_output_unpad = flash_attn_varlen_func(
613
+ query_states,
614
+ key_states,
615
+ value_states,
616
+ cu_seqlens_q=cu_seqlens_q,
617
+ cu_seqlens_k=cu_seqlens_k,
618
+ max_seqlen_q=max_seqlen_in_batch_q,
619
+ max_seqlen_k=max_seqlen_in_batch_k,
620
+ dropout_p=dropout,
621
+ softmax_scale=softmax_scale,
622
+ causal=causal,
623
+ window_size=(
624
+ self.config.sliding_window,
625
+ self.config.sliding_window,
626
+ ),
627
+ )
628
+
629
+ attn_output = pad_input(
630
+ attn_output_unpad, indices_q, batch_size, query_length
631
+ )
632
+ else:
633
+ if not use_sliding_windows:
634
+ attn_output = flash_attn_func(
635
+ query_states,
636
+ key_states,
637
+ value_states,
638
+ dropout,
639
+ softmax_scale=softmax_scale,
640
+ causal=causal,
641
+ )
642
+ else:
643
+ attn_output = flash_attn_func(
644
+ query_states,
645
+ key_states,
646
+ value_states,
647
+ dropout,
648
+ softmax_scale=softmax_scale,
649
+ causal=causal,
650
+ window_size=(
651
+ self.config.sliding_window,
652
+ self.config.sliding_window,
653
+ ),
654
+ )
655
+
656
+ return attn_output
657
+
658
+ def _upad_input(
659
+ self, query_layer, key_layer, value_layer, attention_mask, query_length
660
+ ):
661
+ batch_size, kv_seq_len, num_heads, head_dim = key_layer.shape
662
+
663
+ # On the first iteration we need to properly re-create the padding mask
664
+ # by slicing it on the proper place
665
+ if kv_seq_len != attention_mask.shape[-1]:
666
+ attention_mask_num_tokens = attention_mask.shape[-1]
667
+ attention_mask = attention_mask[:, attention_mask_num_tokens - kv_seq_len :]
668
+
669
+ indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
670
+
671
+ key_layer = index_first_axis(
672
+ key_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k
673
+ )
674
+ value_layer = index_first_axis(
675
+ value_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k
676
+ )
677
+
678
+ if query_length == kv_seq_len:
679
+ query_layer = index_first_axis(
680
+ query_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim),
681
+ indices_k,
682
+ )
683
+ cu_seqlens_q = cu_seqlens_k
684
+ max_seqlen_in_batch_q = max_seqlen_in_batch_k
685
+ indices_q = indices_k
686
+ elif query_length == 1:
687
+ max_seqlen_in_batch_q = 1
688
+ cu_seqlens_q = torch.arange(
689
+ batch_size + 1, dtype=torch.int32, device=query_layer.device
690
+ ) # There is a memcpy here, that is very bad.
691
+ indices_q = cu_seqlens_q[:-1]
692
+ query_layer = query_layer.squeeze(1)
693
+ else:
694
+ # The -q_len: slice assumes left padding.
695
+ attention_mask = attention_mask[:, -query_length:]
696
+ query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(
697
+ query_layer, attention_mask
698
+ )
699
+
700
+ return (
701
+ query_layer,
702
+ key_layer,
703
+ value_layer,
704
+ indices_q,
705
+ (cu_seqlens_q, cu_seqlens_k),
706
+ (max_seqlen_in_batch_q, max_seqlen_in_batch_k),
707
+ )
708
+
709
+
710
+ class SparseMistralAttention(MistralAttention):
711
+ def __init__(self, *args, **kwargs):
712
+ super().__init__(*args, **kwargs)
713
+
714
+ self.counts = 0
715
+
716
+ def forward(
717
+ self,
718
+ hidden_states: torch.Tensor,
719
+ attention_mask: Optional[torch.Tensor] = None,
720
+ position_ids: Optional[torch.LongTensor] = None,
721
+ past_key_value: Optional = None,
722
+ output_attentions: bool = False,
723
+ use_cache: bool = False,
724
+ **kwargs,
725
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
726
+ if "padding_mask" in kwargs:
727
+ warnings.warn(
728
+ "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`"
729
+ )
730
+ bsz, q_len, _ = hidden_states.size()
731
+ mask = abs(hidden_states - hidden_states.mean()) < 0.6 * hidden_states.std()
732
+ hidden_states[mask] = 0
733
+ self.counts += 1
734
+ if 10 <= self.counts <= 11:
735
+ print(f"Attention {self.layer_idx}: ", (hidden_states == 0).float().mean())
736
+ self.counts += 1
737
+
738
+ query_states = self.q_proj(hidden_states)
739
+ key_states = self.k_proj(hidden_states)
740
+ value_states = self.v_proj(hidden_states)
741
+
742
+ query_states = query_states.view(
743
+ bsz, q_len, self.num_heads, self.head_dim
744
+ ).transpose(1, 2)
745
+ key_states = key_states.view(
746
+ bsz, q_len, self.num_key_value_heads, self.head_dim
747
+ ).transpose(1, 2)
748
+ value_states = value_states.view(
749
+ bsz, q_len, self.num_key_value_heads, self.head_dim
750
+ ).transpose(1, 2)
751
+
752
+ kv_seq_len = key_states.shape[-2]
753
+ if past_key_value is not None:
754
+ if self.layer_idx is None:
755
+ raise ValueError(
756
+ f"The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} "
757
+ "for auto-regressive decoding with k/v caching, please make sure to initialize the attention class "
758
+ "with a layer index."
759
+ )
760
+ kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
761
+ cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
762
+ query_states, key_states = apply_rotary_pos_emb(
763
+ query_states, key_states, cos, sin, position_ids
764
+ )
765
+
766
+ if past_key_value is not None:
767
+ cache_kwargs = {"sin": sin, "cos": cos} # Specific to RoPE models
768
+ key_states, value_states = past_key_value.update(
769
+ key_states, value_states, self.layer_idx, cache_kwargs
770
+ )
771
+
772
+ # repeat k/v heads if n_kv_heads < n_heads
773
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
774
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
775
+
776
+ attn_weights = torch.matmul(
777
+ query_states, key_states.transpose(2, 3)
778
+ ) / math.sqrt(self.head_dim)
779
+
780
+ if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):
781
+ raise ValueError(
782
+ f"Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is"
783
+ f" {attn_weights.size()}"
784
+ )
785
+
786
+ if attention_mask is not None:
787
+ if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
788
+ raise ValueError(
789
+ f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
790
+ )
791
+
792
+ attn_weights = attn_weights + attention_mask
793
+
794
+ # upcast attention to fp32
795
+ attn_weights = nn.functional.softmax(
796
+ attn_weights, dim=-1, dtype=torch.float32
797
+ ).to(query_states.dtype)
798
+ attn_weights = nn.functional.dropout(
799
+ attn_weights, p=self.attention_dropout, training=self.training
800
+ )
801
+ attn_output = torch.matmul(attn_weights, value_states)
802
+
803
+ if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
804
+ raise ValueError(
805
+ f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
806
+ f" {attn_output.size()}"
807
+ )
808
+
809
+ attn_output = attn_output.transpose(1, 2).contiguous()
810
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
811
+
812
+ attn_output = self.o_proj(attn_output)
813
+
814
+ if not output_attentions:
815
+ attn_weights = None
816
+
817
+ return attn_output, attn_weights, past_key_value
818
+
819
+
820
+ class MistralSparseSiluMLP(MistralMLP):
821
+ def __init__(self, config, *args, **kwargs):
822
+ super().__init__(config)
823
+ self.swish_outputs = None
824
+ self.relu = nn.ReLU()
825
+ self.resilu = nn.Sequential(nn.SiLU())
826
+
827
+ self.kill_sparse_swish_outputs = False
828
+ self.dead_percentage = 0
829
+ self.is_stats = False
830
+ self.visit_counts = 0
831
+ self.pre_mlp_sparsity = 0
832
+
833
+ # Hyperparameters to tune
834
+ self.dead_threshold = kwargs.pop("dead_threshold", 0)
835
+ self.pre_mlp_threshold = kwargs.pop("pre_mlp_threshold", 0)
836
+ self.pre_mlp_dead_threshold = kwargs.pop("pre_mlp_dead_threshold", 0)
837
+ self.use_sparse_regularization = kwargs.pop("use_sparse_regularization", True)
838
+ self.regularization_type = kwargs.pop(
839
+ "regularization_type", "L1 regularization"
840
+ )
841
+ self.regularization_threshold = kwargs.pop("regularization_threshold", 0.5)
842
+ self.use_relu = kwargs.pop("use_relu", False)
843
+ self.use_resilu = kwargs.pop("use_resilu", False)
844
+ self.activation_norm = None
845
+
846
+ # Activation Histograms
847
+ self.is_collect_histogram = False
848
+ num_bins = 1000
849
+ self.histogram_bins = torch.linspace(-1, 1, num_bins - 2)
850
+ self.histogram_bins = torch.cat(
851
+ [torch.tensor([-torch.inf]), self.histogram_bins, torch.tensor([torch.inf])]
852
+ )
853
+ self.pre_mlp_hist_counts = torch.zeros(num_bins - 1)
854
+ self.pre_act_hist_counts = torch.zeros(num_bins - 1)
855
+ self.post_act_hist_counts = torch.zeros(num_bins - 1)
856
+ self.t = 0
857
+ self.count = 0
858
+ self.agg_sparsity = 0
859
+
860
+ # Sparse activation function
861
+ self.sparse_act_fn = SparseSiLU(threshold=self.dead_threshold)
862
+
863
+ def activate_stats(self, is_collect_histogram: bool = True):
864
+ self.is_stats = True
865
+ self.dead_percentage = 0
866
+ self.visit_counts = 0
867
+ self.is_collect_histogram = is_collect_histogram
868
+ self.histogram_counts = torch.zeros(2000) # .to(self.down_proj.weight.device)
869
+
870
+ def deactivate_stats(self):
871
+ self.is_stats = False
872
+
873
+ def collect_stats(
874
+ self,
875
+ pre_mlp,
876
+ pre_activation,
877
+ post_activation,
878
+ ):
879
+ start_time = time.time()
880
+ pre_activation = pre_activation.float().cpu().detach()
881
+ post_activation = post_activation.float().cpu().detach()
882
+ # self.histogram_bins=self.histogram_bins.to(pre_activation.device).type(pre_activation.dtype)
883
+ self.pre_mlp_hist_counts = torch.histogram(pre_mlp, bins=self.histogram_bins)[0]
884
+ self.pre_act_hist_counts += torch.histogram(
885
+ pre_activation, bins=self.histogram_bins
886
+ )[0]
887
+ self.post_act_hist_counts += torch.histogram(
888
+ torch.abs(post_activation), bins=self.histogram_bins
889
+ )[0]
890
+ self.t += time.time() - start_time
891
+ if self.visit_counts % 30 == 0:
892
+ print(f"Time taken to collect stats: {self.t}s.")
893
+
894
+ def forward(
895
+ self,
896
+ x,
897
+ sp_mask: torch.tensor = None,
898
+ ):
899
+ """
900
+ If kill_sparse_swish_outputs is set to False, this layer functions exactly like a normal MLP layer.
901
+ """
902
+ if sp_mask != None: # When sparse mask is given
903
+ return self.down_proj(
904
+ self.sparse_act_fn(self.gate_proj(x) * sp_mask) * self.up_proj(x)
905
+ ) # Todo: This doesn't accelerate runtime (instead slowing down)
906
+
907
+ elif self.use_relu or self.use_resilu:
908
+ if self.use_relu:
909
+ post_act = self.relu(self.gate_proj(x))
910
+ else:
911
+ post_act = self.resilu(self.gate_proj(x))
912
+ self.count += 1
913
+ if self.count <= 1:
914
+ print("USING RELU or ReSiLU!!!!")
915
+
916
+ if self.is_stats:
917
+ dead_neurons = post_act == 0
918
+ dead_percentage = dead_neurons.float().mean()
919
+ agg_sparsity = dead_neurons.all(dim=0).float().mean()
920
+
921
+ self.dead_percentage = (
922
+ self.dead_percentage * self.visit_counts + dead_percentage
923
+ ) / (self.visit_counts + 1)
924
+ self.agg_sparsity = (
925
+ self.agg_sparsity * self.visit_counts + agg_sparsity
926
+ ) / (self.visit_counts + 1)
927
+ self.visit_counts += 1
928
+
929
+ return self.down_proj(post_act * self.up_proj(x))
930
+
931
+ else:
932
+ self.count += 1
933
+ # x[abs(x) < 0.6 * x.std()] = 0
934
+ if self.count <= 1:
935
+ print("USING SparseSILU!!!!")
936
+ # print(x.mean(), x.std(), x.max(), x.min())
937
+ # print(f"pre mlp sparsity: {(x==0).float().mean()}")
938
+ pre_act = self.gate_proj(x)
939
+ post_act = self.act_fn(pre_act)
940
+ if self.kill_sparse_swish_outputs:
941
+ dead_neurons = post_act.abs() <= self.dead_threshold
942
+ # print("pre act sparsity: ", (pre_act==0).float().mean())
943
+
944
+ dead_percentage = dead_neurons.float().mean()
945
+ agg_sparsity = dead_neurons.all(dim=0).float().mean()
946
+
947
+ if self.is_stats:
948
+ self.dead_percentage = (
949
+ self.dead_percentage * self.visit_counts + dead_percentage
950
+ ) / (self.visit_counts + 1)
951
+ self.agg_sparsity = (
952
+ self.agg_sparsity * self.visit_counts + agg_sparsity
953
+ ) / (self.visit_counts + 1)
954
+ self.pre_mlp_sparsity = (
955
+ self.pre_mlp_sparsity * self.visit_counts
956
+ + (x == 0).float().mean()
957
+ ) / (self.visit_counts + 1)
958
+ self.visit_counts += 1
959
+
960
+ self.a = dead_percentage
961
+
962
+ # print(self.agg_sparsity)
963
+
964
+ # Collect histogram stats
965
+ if (
966
+ self.is_collect_histogram
967
+ and pre_act.eq(0).float().mean() < 0.99
968
+ ): # Padded dataset
969
+ self.collect_stats(x, pre_act, post_act)
970
+
971
+ post_act[dead_neurons] = 0
972
+ if 10 <= self.count <= 11:
973
+ print(
974
+ f"sparsity: {dead_percentage}/ pre-activation sparsity: {(x==0).float().mean()}"
975
+ )
976
+
977
+ out = self.down_proj(post_act * self.up_proj(x))
978
+ if self.use_sparse_regularization:
979
+ if self.regularization_type == "L1 regularization":
980
+ self.activation_norm = torch.abs(post_act)[
981
+ post_act < self.regularization_threshold
982
+ ].mean()
983
+ elif self.regularization_type == "L2 regularization":
984
+ self.activation_norm = torch.sqrt(
985
+ torch.square(post_act)[post_act < self.regularization_threshold]
986
+ ).mean()
987
+
988
+ return out
989
+
990
+
991
+ class SparseMistralDecoderLayer(MistralDecoderLayer):
992
+ def __init__(
993
+ self,
994
+ config: MistralConfig,
995
+ layer_idx: int,
996
+ decoder_layer: MistralDecoderLayer,
997
+ init_svd: bool = True,
998
+ *args,
999
+ **kwargs,
1000
+ ):
1001
+ assert isinstance(
1002
+ decoder_layer.mlp, MistralSparseSiluMLP
1003
+ ), f"{type(decoder_layer.mlp)} should MistralSparseSiluMLP."
1004
+
1005
+ super().__init__(config, layer_idx)
1006
+ self.hidden_size = config.hidden_size
1007
+ self.intermediate_size = config.intermediate_size
1008
+
1009
+ self.init_svd = init_svd
1010
+ self.self_attn = decoder_layer.self_attn
1011
+
1012
+ self.mlp = decoder_layer.mlp
1013
+ self.input_layernorm = decoder_layer.input_layernorm
1014
+ self.post_attention_layernorm = decoder_layer.post_attention_layernorm
1015
+
1016
+ # Sparse predictor for mlp (initialized with SVD decomposed matrix)
1017
+ self.low_rank = kwargs.pop("low_rank", 64)
1018
+ self.sparse_act_func = decoder_layer.mlp.sparse_act_fn
1019
+
1020
+ print(
1021
+ f"Setting {layer_idx}th mlp layer's sparse predictor... svd init: {init_svd}"
1022
+ )
1023
+ self.sp_mlp = low_rank_approximation(
1024
+ decoder_layer.mlp.gate_proj,
1025
+ act_func=self.sparse_act_func,
1026
+ init_svd=init_svd,
1027
+ )
1028
+ self.use_async = kwargs.pop("use_async", False)
1029
+ self.use_sparse_predictor = False
1030
+ self.distill_loss = None
1031
+
1032
+ def forward(
1033
+ self,
1034
+ hidden_states: torch.Tensor,
1035
+ attention_mask: Optional[torch.Tensor] = None,
1036
+ position_ids: Optional[torch.LongTensor] = None,
1037
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
1038
+ output_attentions: Optional[bool] = False,
1039
+ use_cache: Optional[bool] = False,
1040
+ **kwargs,
1041
+ ) -> Tuple[
1042
+ torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]
1043
+ ]:
1044
+ print("hidden_states shape: ", hidden_states.shape)
1045
+ if "padding_mask" in kwargs:
1046
+ warnings.warn(
1047
+ "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`"
1048
+ )
1049
+
1050
+ residual = hidden_states
1051
+ sp_mask = None
1052
+
1053
+ if self.use_async:
1054
+ sp_mask = self.sp_mlp(hidden_states)
1055
+
1056
+ hidden_states = self.input_layernorm(hidden_states)
1057
+
1058
+ # Self Attention
1059
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
1060
+ hidden_states=hidden_states,
1061
+ attention_mask=attention_mask,
1062
+ position_ids=position_ids,
1063
+ past_key_value=past_key_value,
1064
+ output_attentions=output_attentions,
1065
+ use_cache=use_cache,
1066
+ )
1067
+ hidden_states = residual + hidden_states
1068
+
1069
+ # Fully Connected
1070
+ residual = hidden_states
1071
+ hidden_states = self.post_attention_layernorm(hidden_states)
1072
+
1073
+ if not self.use_async:
1074
+ sp_mask = self.sp_mlp(hidden_states)
1075
+
1076
+ # Compute distillation loss
1077
+ gating_output = self.mlp.sparse_act_fn(self.mlp.gate_proj(hidden_states))
1078
+ loss_func = MSELoss()
1079
+ self.distill_loss = loss_func(sp_mask, gating_output)
1080
+
1081
+ # Convert sp mask into binary form
1082
+ sp_mask = sp_mask > 0
1083
+
1084
+ if self.training:
1085
+ sp_mask = None
1086
+ # if not self.use_sparse_predictor:
1087
+ # sp_mask = None
1088
+
1089
+ hidden_states = self.mlp(hidden_states, sp_mask)
1090
+ hidden_states = residual + hidden_states
1091
+
1092
+ outputs = (hidden_states,)
1093
+
1094
+ if output_attentions:
1095
+ outputs += (self_attn_weights,)
1096
+
1097
+ if use_cache:
1098
+ outputs += (present_key_value,)
1099
+
1100
+ return outputs
1101
+
1102
+
1103
+ class SparseMistralConfig(MistralConfig):
1104
+ model_type = "sparse_mistral"
1105
+
1106
+ def __init__(self, **kwargs):
1107
+ super().__init__(**kwargs)
1108
+
1109
+
1110
+ class SparseMistralforCausalLM(MistralForCausalLM):
1111
+ config_class = SparseMistralConfig
1112
+
1113
+ def __init__(self, config):
1114
+ super().__init__(config)
1115
+ self.config = config
1116
+ if config.use_sparse_model:
1117
+ self.apply_sparse_mlp()
1118
+ if config.thresholds is not None:
1119
+ for idx, m in enumerate(self.model.layers):
1120
+ if isinstance(m.mlp, MistralSparseSiluMLP):
1121
+ m.mlp.dead_threshold = config.thresholds[idx]
1122
+ m.mlp.sparse_act_fn.set_new_threshold(m.mlp.dead_threshold)
1123
+ m.mlp.kill_sparse_swish_outputs = True
1124
+ m.mlp.use_relu = getattr(config, "use_relu", False)
1125
+ m.mlp.use_resilu = getattr(config, "use_resilu", False)
1126
+ if config.use_sparse_predictor:
1127
+ self.apply_sparse_predictor(init_svd=config.init_svd)
1128
+
1129
+ def apply_sparse_mlp(self):
1130
+ apply_mistral_sparse_silu_mlp(
1131
+ self,
1132
+ config=self.config,
1133
+ use_sparse_regularization=self.config.use_sparse_regularization,
1134
+ )
1135
+
1136
+ def apply_sparse_predictor(self, init_svd: bool = True):
1137
+ apply_mistral_sparse_decoder_layer(self, config=self.config, init_svd=init_svd)
1138
+
1139
+
1140
+ class GracefulRegularizationScheduler(TrainerCallback):
1141
+ def __init__(
1142
+ self,
1143
+ num_warmup_steps=40,
1144
+ is_enabled: bool = False,
1145
+ model_name: str = "mistral",
1146
+ test_dataset: Dataset = None,
1147
+ targeted_sparsity: float = 0.5,
1148
+ keep_regularization_with_kill: bool = False,
1149
+ ):
1150
+ """Scheduler for regularizing the model first before applying the dead threshold.
1151
+
1152
+ :param num_warmup_steps: number of training steps required to reach the dead threshold, defaults to 40
1153
+ :param increment_ratio: by how much to increase the dead threshold.
1154
+ For example, 0.5 means "increase the threshold by 0.5 * desired threshold
1155
+ """
1156
+ self.num_warmup_steps = num_warmup_steps
1157
+ self.is_enabled = is_enabled
1158
+ self.model_name = model_name
1159
+ self.test_dataset = test_dataset
1160
+ self.targeted_sparsity = targeted_sparsity
1161
+ self.keep_regularization_with_kill = keep_regularization_with_kill
1162
+ self.act_hist_path = (
1163
+ f"/matx/u/vxbrando/histograms/warm_up_reg_{targeted_sparsity}/act_hist.pt"
1164
+ )
1165
+ if self.is_enabled:
1166
+ print("GracefulRegularizationScheduler is enabled.")
1167
+ self.trainer = None
1168
+
1169
+ def set_trainer(self, trainer):
1170
+ self.trainer = trainer
1171
+
1172
+ def on_step_end(self, args, state, control, **kwargs):
1173
+ if not self.is_enabled:
1174
+ return
1175
+
1176
+ model = kwargs["model"]
1177
+ if isinstance(model, PeftModel):
1178
+ base_model = model.get_base_model()
1179
+ else:
1180
+ base_model = model
1181
+
1182
+ if state.global_step == 1:
1183
+ ds_print("Setting an initial reg threshold to 0.1")
1184
+ set_regularization_threshold(base_model, 0.1)
1185
+
1186
+ # if state.global_step >= self.num_warmup_steps and state.global_step % 50 == 0:
1187
+ if state.global_step == self.num_warmup_steps:
1188
+ activate_stats(base_model)
1189
+ enable_sparse_silu(base_model)
1190
+ self.trainer.evaluate()
1191
+ save_act_hist(base_model, self.act_hist_path)
1192
+ set_sparse_threshold(base_model, self.targeted_sparsity, True)
1193
+ deactivate_stats(base_model)
1194
+ self.trainer.use_sparse_regularization = self.keep_regularization_with_kill
1195
+ # set_layer_specific_regularization(model.get_base_model())
1196
+ print_dead_neuron_stats(model.get_base_model())
1197
+
1198
+ if state.global_step % 2000 == 0:
1199
+ if is_mainprocess():
1200
+ ds_print(
1201
+ f"Saving to /scr/lukeai/{self.model_name}_{state.global_step}.pt",
1202
+ )
1203
+ torch.save(
1204
+ model.state_dict(),
1205
+ f"/scr/lukeai/{self.model_name}_{state.global_step}.pt",
1206
+ )
1207
+
1208
+
1209
+ class GradualSparsificationScheduler(TrainerCallback):
1210
+ def __init__(
1211
+ self,
1212
+ num_warmup_steps=40,
1213
+ increment_ratio=0.5,
1214
+ is_enabled: bool = False,
1215
+ model_name: str = "mistral",
1216
+ ):
1217
+ """Scheduler for gradually increasing a dead threshold until it reaches the desired threshold.
1218
+
1219
+ :param num_warmup_steps: number of training steps required to reach the dead threshold, defaults to 40
1220
+ :param increment_ratio: by how much to increase the dead threshold.
1221
+ For example, 0.5 means "increase the threshold by 0.5 * desired threshold
1222
+ """
1223
+ self.num_warmup_steps = num_warmup_steps
1224
+ self.increment_ratio = increment_ratio
1225
+ self.step_size = int(num_warmup_steps * increment_ratio)
1226
+ self.is_enabled = is_enabled
1227
+ self.model_name = model_name
1228
+
1229
+ def on_step_end(self, args, state, control, **kwargs):
1230
+ model = kwargs["model"]
1231
+
1232
+ if not self.is_enabled:
1233
+ if state.global_step <= 10:
1234
+ for module in model.modules():
1235
+ if isinstance(module, MistralSparseSiluMLP):
1236
+ module.current_dead_threshold = module.dead_threshold
1237
+ return
1238
+
1239
+ current_dead_threshold = 0
1240
+ desired_dead_threshold = 0
1241
+
1242
+ if is_mainprocess():
1243
+ ds_print(state.global_step)
1244
+
1245
+ if state.global_step % self.step_size == 2:
1246
+ for module in model.modules():
1247
+ if isinstance(module, MistralSparseSiluMLP):
1248
+ desired_dead_threshold = copy.deepcopy(module.dead_threshold)
1249
+ current_dead_threshold = module.current_dead_threshold
1250
+ current_dead_threshold += (
1251
+ self.increment_ratio * desired_dead_threshold
1252
+ )
1253
+ module.current_dead_threshold = min(
1254
+ desired_dead_threshold, current_dead_threshold
1255
+ )
1256
+
1257
+ if is_running_deepspeed and is_mainprocess():
1258
+ ds_print(
1259
+ state.global_step,
1260
+ current_dead_threshold,
1261
+ desired_dead_threshold,
1262
+ )
1263
+
1264
+ if state.global_step % 2000 == 0:
1265
+ if is_running_deepspeed and is_mainprocess():
1266
+ ds_print(
1267
+ f"Saving to /matx/u/lukeai/{self.model_name}_{state.global_step - 2}.pt",
1268
+ )
1269
+ torch.save(
1270
+ model.state_dict(),
1271
+ f"/matx/u/lukeai/{self.model_name}_{state.global_step - 2}.pt",
1272
+ )
1273
+
1274
+
1275
+ def get_sparse_mistral_config(
1276
+ config: MistralConfig,
1277
+ use_sparse_model=False,
1278
+ use_sparse_predictor=False,
1279
+ use_sparse_regularization=False,
1280
+ thresholds=None,
1281
+ ):
1282
+ new_config = SparseMistralConfig()
1283
+ new_config.__dict__.update(config.__dict__)
1284
+ config = new_config
1285
+ config.use_sparse_model = use_sparse_model
1286
+ config.use_sparse_predictor = use_sparse_predictor
1287
+ config.use_sparse_regularization = use_sparse_regularization
1288
+ config.thresholds = thresholds
1289
+
1290
+ return config
1291
+
1292
+
1293
+ def apply_mistral_sparse_silu_mlp(
1294
+ model, config, use_sparse_regularization: bool = False, use_flash_attn: bool = False
1295
+ ):
1296
+ for layer in model.model.layers:
1297
+ # counts += 1
1298
+ # if counts < 4:
1299
+ # continue
1300
+ original_mlp = layer.mlp
1301
+ new_mlp = MistralSparseSiluMLP(
1302
+ config, use_sparse_regularization=use_sparse_regularization
1303
+ )
1304
+ new_mlp.gate_proj = original_mlp.gate_proj
1305
+ new_mlp.up_proj = original_mlp.up_proj
1306
+ new_mlp.down_proj = original_mlp.down_proj
1307
+ layer.mlp = new_mlp
1308
+
1309
+ # for layer in model.model.layers:
1310
+ # original_attention = layer.self_attn
1311
+ # if use_flash_attn:
1312
+ # new_attention = SparseMistralFlashAttention(
1313
+ # config=original_attention.config, layer_idx=original_attention.layer_idx
1314
+ # )
1315
+ #
1316
+ # else:
1317
+ # new_attention = SparseMistralAttention(
1318
+ # config=original_attention.config, layer_idx=original_attention.layer_idx
1319
+ # )
1320
+ # for attr in vars(original_attention):
1321
+ # setattr(new_attention, attr, getattr(original_attention, attr))
1322
+ # layer.self_attn = new_attention
1323
+
1324
+
1325
+ def apply_mistral_sparse_attention(
1326
+ model,
1327
+ config,
1328
+ ):
1329
+ for layer in model.model.layers:
1330
+ layer.self_attention = layer.self_attention
1331
+
1332
+
1333
+ def apply_mistral_sparse_decoder_layer(
1334
+ model,
1335
+ config,
1336
+ init_svd: bool = True,
1337
+ ):
1338
+ assert isinstance(model.model, MistralModel), "model.model must be a MistralModel."
1339
+ new_layers = []
1340
+ for layer_idx, layer in enumerate(model.model.layers):
1341
+ if isinstance(layer.mlp, MistralSparseSiluMLP):
1342
+ new_layers.append(
1343
+ SparseMistralDecoderLayer(
1344
+ config=config,
1345
+ layer_idx=layer_idx,
1346
+ decoder_layer=layer,
1347
+ init_svd=init_svd,
1348
+ )
1349
+ )
1350
+ print(f"{layer_idx}th mlp layer activation: {layer.mlp.sparse_act_fn}")
1351
+ else:
1352
+ new_layers.append(layer)
1353
+ model.model.layers = nn.ModuleList(new_layers)
1354
+
1355
+
1356
+ def enable_sparse_predictor(
1357
+ model,
1358
+ ):
1359
+ for layer_idx, layer in enumerate(model.model.layers):
1360
+ if isinstance(layer, MistralDecoderLayer):
1361
+ layer.use_sparse_predictor = True
1362
+
1363
+
1364
+ def disable_sparse_predictor(
1365
+ model,
1366
+ ):
1367
+ for layer_idx, layer in enumerate(model.model.layers):
1368
+ if isinstance(layer, MistralDecoderLayer):
1369
+ layer.use_sparse_predictor = False
1370
+
1371
+
1372
+ def activate_stats(model, is_collect_histogram: bool = True):
1373
+ for layer in model.model.layers:
1374
+ if isinstance(layer.mlp, MistralSparseSiluMLP):
1375
+ layer.mlp.activate_stats(is_collect_histogram=is_collect_histogram)
1376
+ if isinstance(layer.self_attn, SparseMistralAttention):
1377
+ layer.self_attn.activate_stats()
1378
+
1379
+
1380
+ def deactivate_stats(model):
1381
+ for layer in model.model.layers:
1382
+ if isinstance(layer.mlp, MistralSparseSiluMLP):
1383
+ layer.mlp.deactivate_stats()
1384
+
1385
+
1386
+ def enable_sparse_silu(model):
1387
+ print("Enabling SparseSilu")
1388
+ for i, layer in enumerate(model.model.layers):
1389
+ if isinstance(layer.mlp, MistralSparseSiluMLP):
1390
+ layer.mlp.kill_sparse_swish_outputs = True
1391
+
1392
+
1393
+ def print_dead_neuron_stats(model):
1394
+ total_sparsity = 0
1395
+ counts = 0
1396
+ for i, layer in enumerate(model.model.layers):
1397
+ if isinstance(layer.mlp, MistralSparseSiluMLP):
1398
+ dead_percentage = layer.mlp.dead_percentage * 100
1399
+ agg_sparsity = layer.mlp.agg_sparsity * 100
1400
+ pre_mlp_sparsity = layer.mlp.pre_mlp_sparsity * 100
1401
+ print(f"layer {i} sparsity: {dead_percentage:.3f}%")
1402
+ print(f"layer {i} agg sparsity: {agg_sparsity:.3f}%")
1403
+ print(f"layer {i} pre_mlp_sparsity: {pre_mlp_sparsity:.3f}%")
1404
+
1405
+ total_sparsity += dead_percentage
1406
+ counts += 1
1407
+ if isinstance(layer.self_attn, SparseMistralAttention) or isinstance(layer.self_attn, SparseMistralFlashAttention):
1408
+ print(
1409
+ f"Attention layer {i} sparsity: {layer.self_attn.pre_attn_sparsity * 100: .3f}%"
1410
+ )
1411
+
1412
+ print(f"Total sparsity: {total_sparsity/counts: .3f}%")
1413
+ return total_sparsity / counts
1414
+
1415
+
1416
+ def get_sparse_layers(model: MistralModel):
1417
+ sparse_layers = [
1418
+ m.mlp for m in model.layers() if isinstance(m.mlp, MistralSparseSiluMLP)
1419
+ ]
1420
+ return sparse_layers
1421
+
1422
+
1423
+ def get_threshold(
1424
+ bin_edges: torch.tensor, histogram_counts: torch.tensor, sparsity_level: float
1425
+ ): # Only for L1 Regularization
1426
+ assert (
1427
+ len(bin_edges.shape) == len(histogram_counts.shape) == 1
1428
+ ), "bin_edges and histogram are expected to be 1-dimensional."
1429
+ histogram_counts /= histogram_counts.sum()
1430
+ threshold_idx = torch.searchsorted(
1431
+ histogram_counts.cumsum(0), sparsity_level, side="right"
1432
+ )
1433
+
1434
+ return bin_edges[threshold_idx]
1435
+
1436
+
1437
+ def set_regularization_threshold(model, threshold: float = 0.1):
1438
+ for i, layer in enumerate(model.model.layers):
1439
+ if (
1440
+ isinstance(layer.mlp, MistralSparseSiluMLP) and layer.mlp.is_stats
1441
+ ): # Can set the threshold only the relevant statistics is collected.
1442
+ layer.mlp.regularization_threshold = threshold # TODO: find better param
1443
+
1444
+
1445
+ def set_sparse_threshold(
1446
+ model, sparsity_level: float, use_relu: bool = False, use_resilu: bool = False
1447
+ ):
1448
+ assert not (use_relu and use_resilu), "It's not allowed to use both relu and resilu"
1449
+ for i, layer in enumerate(model.model.layers):
1450
+ if (
1451
+ isinstance(layer.mlp, MistralSparseSiluMLP) and layer.mlp.is_stats
1452
+ ): # Can set the threshold only the relevant statistics is collected.
1453
+ if use_relu:
1454
+ layer.mlp.sparse_act_fn = nn.ReLU()
1455
+ layer.mlp.use_relu = True
1456
+ layer.mlp.use_resilu = False
1457
+ elif use_resilu:
1458
+ layer.mlp.sparse_act_fn = nn.Sequential(nn.ReLU(), nn.SiLU())
1459
+ layer.mlp.use_resilu = True
1460
+ layer.mlp.use_relu = False
1461
+ else:
1462
+ layer.mlp.dead_threshold = get_threshold(
1463
+ layer.mlp.histogram_bins,
1464
+ layer.mlp.post_act_hist_counts,
1465
+ sparsity_level,
1466
+ )
1467
+ layer.mlp.sparse_act_fn.set_new_threshold(layer.mlp.dead_threshold)
1468
+ layer.mlp.regularization_threshold = (
1469
+ layer.mlp.dead_threshold * 1.2
1470
+ ) # TODO: find better param
1471
+
1472
+
1473
+ def plot_histogram(
1474
+ bin_edges,
1475
+ histogram_counts: torch.tensor,
1476
+ title: str = "Activation Distribution",
1477
+ fig_dir: str = "figures",
1478
+ ):
1479
+ plt.bar(
1480
+ bin_edges[:-1], histogram_counts, width=np.diff(bin_edges), edgecolor="black"
1481
+ )
1482
+ plt.title(title)
1483
+ plt.xlabel("Activation Value")
1484
+ plt.ylabel("Frequency")
1485
+ os.makedirs(fig_dir, exist_ok=True)
1486
+ plt.savefig(f"{fig_dir}/{title}.png")
1487
+ # plt.show()
1488
+ plt.clf()
1489
+
1490
+
1491
+ def plot_act(model, fig_dir: str = "figures"):
1492
+ for i, layer in enumerate(model.model.layers):
1493
+ if (
1494
+ isinstance(layer.mlp, MistralSparseSiluMLP) and layer.mlp.is_stats
1495
+ ): # Can set the threshold only the relevant statistics is collected.
1496
+ plot_title = f"Layer: {i} Pre-Activation Distribution"
1497
+ plot_histogram(
1498
+ layer.mlp.histogram_bins, layer.mlp.pre_act_hist_counts, plot_title
1499
+ )
1500
+
1501
+ plot_title = f"Layer: {i} Post-Activation Absolute Distribution"
1502
+ plot_histogram(
1503
+ layer.mlp.histogram_bins, layer.mlp.post_act_hist_counts, plot_title
1504
+ )
1505
+
1506
+
1507
+ def save_act_hist(
1508
+ model, filename="/scr/jay/models/mistral/pre_finetune/cola_act_hist.pt"
1509
+ ):
1510
+ os.makedirs(os.path.dirname(filename), exist_ok=True)
1511
+ act_dict = {}
1512
+ for i, layer in enumerate(model.model.layers):
1513
+ if (
1514
+ isinstance(layer.mlp, MistralSparseSiluMLP) and layer.mlp.is_stats
1515
+ ): # Can set the threshold only the relevant statistics is collected.
1516
+ act_dict[i] = (
1517
+ layer.mlp.histogram_bins,
1518
+ # layer.mlp.pre_mlp_hist_counts,
1519
+ layer.mlp.pre_act_hist_counts,
1520
+ layer.mlp.post_act_hist_counts,
1521
+ )
1522
+ print("Saving activation histograms...\n\n\n")
1523
+ torch.save(act_dict, filename)
1524
+
1525
+
1526
+ def load_act_hist(
1527
+ model, filename="/scr/jay/models/mistral/pre_finetune/cola_act_hist.pt"
1528
+ ):
1529
+ assert os.path.exists(
1530
+ filename
1531
+ ), f"{filename} does not exist when loading pre/post-activation histogram of SparseMistralSiluMLP."
1532
+ print("Loading activation histograms...\n\n\n")
1533
+
1534
+ act_dict = torch.load(filename)
1535
+ for i, layer in enumerate(model.model.layers):
1536
+ if (
1537
+ isinstance(layer.mlp, MistralSparseSiluMLP) and layer.mlp.is_stats
1538
+ ): # Can set the threshold only the relevant statistics is collected.
1539
+ (
1540
+ layer.mlp.histogram_bins,
1541
+ # layer.mlp.pre_mlp_hist_counts,
1542
+ layer.mlp.pre_act_hist_counts,
1543
+ layer.mlp.post_act_hist_counts,
1544
+ ) = act_dict[i]
1545
+
1546
+
1547
+ def enable_last_k_modules(model, start_module_idx: int):
1548
+ assert 32 > start_module_idx >= 0
1549
+ new_modules = []
1550
+ new_idx = 0
1551
+ for idx in range(start_module_idx, len(model.model.original_layers)):
1552
+ module = model.model.original_layers[idx]
1553
+ module.layer_idx = new_idx
1554
+ module.self_attn.layer_idx = new_idx
1555
+ new_modules.append(module)
1556
+ new_idx += 1
1557
+ print(module.layer_idx)
1558
+
1559
+ model.model.layers = nn.ModuleList(new_modules)
1560
+
1561
+
1562
+ def enable_first_k_modules(model, end_module_idx: int):
1563
+ assert 32 > end_module_idx >= 0
1564
+ new_modules = []
1565
+ new_idx = 0
1566
+ for idx in range(0, end_module_idx + 1):
1567
+ module = model.model.original_layers[idx]
1568
+ module.layer_idx = new_idx
1569
+ module.self_attn.layer_idx = new_idx
1570
+ new_modules.append(module)
1571
+ new_idx += 1
1572
+ print(module.layer_idx)
1573
+
1574
+ model.model.layers = nn.ModuleList(new_modules)
special_tokens_map.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "</s>",
17
+ "unk_token": {
18
+ "content": "<unk>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "added_tokens_decoder": {
5
+ "0": {
6
+ "content": "<unk>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "1": {
14
+ "content": "<s>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "2": {
22
+ "content": "</s>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ }
29
+ },
30
+ "additional_special_tokens": [],
31
+ "bos_token": "<s>",
32
+ "clean_up_tokenization_spaces": false,
33
+ "eos_token": "</s>",
34
+ "legacy": true,
35
+ "model_max_length": 1000000000000000019884624838656,
36
+ "pad_token": "</s>",
37
+ "sp_model_kwargs": {},
38
+ "spaces_between_special_tokens": false,
39
+ "tokenizer_class": "LlamaTokenizer",
40
+ "unk_token": "<unk>",
41
+ "use_default_system_prompt": false
42
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a7f727fbdd379a7ca3d793ae91c9d375cbe553ad805bc5f2805c488855f7db32
3
+ size 6456