vxbrandon commited on
Commit
2a1e815
1 Parent(s): 69bc423

Training in progress, step 500

Browse files
README.md ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ base_model: mistralai/Mistral-7B-v0.1
4
+ tags:
5
+ - generated_from_trainer
6
+ model-index:
7
+ - name: sparse_mistral_7b_refined_web_50p_2024-04-12
8
+ results: []
9
+ ---
10
+
11
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
12
+ should probably proofread and complete it, then remove this comment. -->
13
+
14
+ # sparse_mistral_7b_refined_web_50p_2024-04-12
15
+
16
+ This model is a fine-tuned version of [mistralai/Mistral-7B-v0.1](https://huggingface.co/mistralai/Mistral-7B-v0.1) on the None dataset.
17
+ It achieves the following results on the evaluation set:
18
+ - Loss: 2.2135
19
+
20
+ ## Model description
21
+
22
+ More information needed
23
+
24
+ ## Intended uses & limitations
25
+
26
+ More information needed
27
+
28
+ ## Training and evaluation data
29
+
30
+ More information needed
31
+
32
+ ## Training procedure
33
+
34
+ ### Training hyperparameters
35
+
36
+ The following hyperparameters were used during training:
37
+ - learning_rate: 1e-05
38
+ - train_batch_size: 1
39
+ - eval_batch_size: 4
40
+ - seed: 0
41
+ - distributed_type: multi-GPU
42
+ - num_devices: 4
43
+ - gradient_accumulation_steps: 8
44
+ - total_train_batch_size: 32
45
+ - total_eval_batch_size: 16
46
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
47
+ - lr_scheduler_type: linear
48
+ - training_steps: 350
49
+
50
+ ### Training results
51
+
52
+ | Training Loss | Epoch | Step | Validation Loss |
53
+ |:-------------:|:-----:|:----:|:---------------:|
54
+ | 2.3391 | 0.01 | 25 | 2.4196 |
55
+ | 2.2711 | 0.02 | 50 | 2.3577 |
56
+ | 2.3054 | 0.02 | 75 | 2.3158 |
57
+ | 2.2795 | 0.03 | 100 | 2.2966 |
58
+ | 2.3175 | 0.04 | 125 | 2.2846 |
59
+ | 2.2388 | 0.05 | 150 | 2.2766 |
60
+ | 2.1679 | 0.06 | 175 | 2.2705 |
61
+ | 2.2996 | 0.06 | 200 | 2.2678 |
62
+ | 2.2788 | 0.07 | 225 | 2.2647 |
63
+ | 2.2448 | 0.08 | 250 | 2.2637 |
64
+ | 2.1813 | 0.09 | 275 | 2.2619 |
65
+ | 2.2059 | 0.1 | 300 | 2.2602 |
66
+ | 2.2689 | 0.1 | 325 | 2.2582 |
67
+ | 2.2238 | 0.11 | 350 | 2.2579 |
68
+
69
+
70
+ ### Framework versions
71
+
72
+ - Transformers 4.36.2
73
+ - Pytorch 2.1.2+cu121
74
+ - Datasets 2.15.0
75
+ - Tokenizers 0.15.0
adapter_config.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "mistralai/Mistral-7B-v0.1",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layers_pattern": null,
10
+ "layers_to_transform": null,
11
+ "loftq_config": {},
12
+ "lora_alpha": 16,
13
+ "lora_dropout": 0.1,
14
+ "megatron_config": null,
15
+ "megatron_core": "megatron.core",
16
+ "modules_to_save": null,
17
+ "peft_type": "LORA",
18
+ "r": 64,
19
+ "rank_pattern": {},
20
+ "revision": null,
21
+ "target_modules": [
22
+ "down_proj",
23
+ "q_proj",
24
+ "up_proj",
25
+ "gate_proj",
26
+ "v_proj"
27
+ ],
28
+ "task_type": "CAUSAL_LM"
29
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d6179150f0744f210ef1094e253c38b788ddc960d935fc05bd455b1241e71412
3
+ size 281061608
config.json ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "mistralai/Mistral-7B-v0.1",
3
+ "architectures": [
4
+ "SparseMistralforCausalLM"
5
+ ],
6
+ "attention_bias": false,
7
+ "attention_dropout": 0.0,
8
+ "auto_map": {
9
+ "AutoModelForCausalLM": "ugly_utils.SparseMistralforCausalLM"
10
+ },
11
+ "bos_token_id": 1,
12
+ "eos_token_id": 2,
13
+ "hidden_act": "silu",
14
+ "hidden_size": 4096,
15
+ "initializer_range": 0.02,
16
+ "intermediate_size": 14336,
17
+ "max_position_embeddings": 32768,
18
+ "model_type": "sparse_llama",
19
+ "num_attention_heads": 32,
20
+ "num_hidden_layers": 32,
21
+ "num_key_value_heads": 8,
22
+ "pretraining_tp": 1,
23
+ "rms_norm_eps": 1e-05,
24
+ "rope_scaling": null,
25
+ "rope_theta": 10000.0,
26
+ "sliding_window": 4096,
27
+ "thresholds": [
28
+ 0.01905716396868229,
29
+ 0.027081236243247986,
30
+ 0.04513540118932724,
31
+ 0.05315947160124779,
32
+ 0.06720159947872162,
33
+ 0.07923770695924759,
34
+ 0.08726178109645844,
35
+ 0.09528584778308868,
36
+ 0.10130390524864197,
37
+ 0.0992978885769844,
38
+ 0.10732196271419525,
39
+ 0.10732196271419525,
40
+ 0.11735205352306366,
41
+ 0.11735205352306366,
42
+ 0.12738214433193207,
43
+ 0.14343027770519257,
44
+ 0.16349045932292938,
45
+ 0.17552657425403595,
46
+ 0.19157472252845764,
47
+ 0.20762285590171814,
48
+ 0.21364091336727142,
49
+ 0.22968906164169312,
50
+ 0.22768303751945496,
51
+ 0.2357071191072464,
52
+ 0.23971915245056152,
53
+ 0.23971915245056152,
54
+ 0.2357071191072464,
55
+ 0.23771312832832336,
56
+ 0.2357071191072464,
57
+ 0.23370109498500824,
58
+ 0.23971915245056152,
59
+ 0.24774321913719177
60
+ ],
61
+ "tie_word_embeddings": false,
62
+ "torch_dtype": "bfloat16",
63
+ "transformers_version": "4.36.2",
64
+ "us_sparse_regularization": false,
65
+ "use_cache": false,
66
+ "use_graceful_regularization": false,
67
+ "use_relu": false,
68
+ "use_sparse_model": true,
69
+ "use_sparse_predictor": false,
70
+ "use_sparse_regularization": false,
71
+ "vocab_size": 32000
72
+ }
generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "eos_token_id": 2,
5
+ "transformers_version": "4.36.2"
6
+ }
model-00001-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:07b9355dab70bf7351570f1681f5de0e42ae8988305728dec9567a8a7db48fd8
3
+ size 4943162336
model-00002-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:643924457a6246b2cfef7c06c92bef2b7653837e5b0879c5f740206ab222e044
3
+ size 4999819336
model-00003-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:98ead7c7ae12dcd392b05ff80c4eb800fc228e51c2b3c5647b1cd6dc9a6a62c2
3
+ size 4540516344
model.safetensors.index.json ADDED
@@ -0,0 +1,298 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_size": 14483464192
4
+ },
5
+ "weight_map": {
6
+ "lm_head.weight": "model-00003-of-00003.safetensors",
7
+ "model.embed_tokens.weight": "model-00001-of-00003.safetensors",
8
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00003.safetensors",
9
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
10
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
11
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
12
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
13
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
14
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
15
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
16
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
17
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00003.safetensors",
18
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
19
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
20
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
21
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
22
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
23
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
24
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
25
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
26
+ "model.layers.10.input_layernorm.weight": "model-00002-of-00003.safetensors",
27
+ "model.layers.10.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
28
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
29
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
30
+ "model.layers.10.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
31
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
32
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
33
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
34
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
35
+ "model.layers.11.input_layernorm.weight": "model-00002-of-00003.safetensors",
36
+ "model.layers.11.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
37
+ "model.layers.11.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
38
+ "model.layers.11.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
39
+ "model.layers.11.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
40
+ "model.layers.11.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
41
+ "model.layers.11.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
42
+ "model.layers.11.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
43
+ "model.layers.11.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
44
+ "model.layers.12.input_layernorm.weight": "model-00002-of-00003.safetensors",
45
+ "model.layers.12.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
46
+ "model.layers.12.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
47
+ "model.layers.12.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
48
+ "model.layers.12.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
49
+ "model.layers.12.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
50
+ "model.layers.12.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
51
+ "model.layers.12.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
52
+ "model.layers.12.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
53
+ "model.layers.13.input_layernorm.weight": "model-00002-of-00003.safetensors",
54
+ "model.layers.13.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
55
+ "model.layers.13.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
56
+ "model.layers.13.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
57
+ "model.layers.13.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
58
+ "model.layers.13.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
59
+ "model.layers.13.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
60
+ "model.layers.13.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
61
+ "model.layers.13.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
62
+ "model.layers.14.input_layernorm.weight": "model-00002-of-00003.safetensors",
63
+ "model.layers.14.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
64
+ "model.layers.14.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
65
+ "model.layers.14.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
66
+ "model.layers.14.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
67
+ "model.layers.14.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
68
+ "model.layers.14.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
69
+ "model.layers.14.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
70
+ "model.layers.14.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
71
+ "model.layers.15.input_layernorm.weight": "model-00002-of-00003.safetensors",
72
+ "model.layers.15.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
73
+ "model.layers.15.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
74
+ "model.layers.15.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
75
+ "model.layers.15.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
76
+ "model.layers.15.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
77
+ "model.layers.15.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
78
+ "model.layers.15.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
79
+ "model.layers.15.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
80
+ "model.layers.16.input_layernorm.weight": "model-00002-of-00003.safetensors",
81
+ "model.layers.16.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
82
+ "model.layers.16.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
83
+ "model.layers.16.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
84
+ "model.layers.16.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
85
+ "model.layers.16.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
86
+ "model.layers.16.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
87
+ "model.layers.16.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
88
+ "model.layers.16.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
89
+ "model.layers.17.input_layernorm.weight": "model-00002-of-00003.safetensors",
90
+ "model.layers.17.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
91
+ "model.layers.17.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
92
+ "model.layers.17.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
93
+ "model.layers.17.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
94
+ "model.layers.17.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
95
+ "model.layers.17.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
96
+ "model.layers.17.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
97
+ "model.layers.17.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
98
+ "model.layers.18.input_layernorm.weight": "model-00002-of-00003.safetensors",
99
+ "model.layers.18.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
100
+ "model.layers.18.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
101
+ "model.layers.18.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
102
+ "model.layers.18.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
103
+ "model.layers.18.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
104
+ "model.layers.18.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
105
+ "model.layers.18.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
106
+ "model.layers.18.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
107
+ "model.layers.19.input_layernorm.weight": "model-00002-of-00003.safetensors",
108
+ "model.layers.19.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
109
+ "model.layers.19.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
110
+ "model.layers.19.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
111
+ "model.layers.19.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
112
+ "model.layers.19.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
113
+ "model.layers.19.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
114
+ "model.layers.19.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
115
+ "model.layers.19.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
116
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00003.safetensors",
117
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
118
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
119
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
120
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
121
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
122
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
123
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
124
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
125
+ "model.layers.20.input_layernorm.weight": "model-00002-of-00003.safetensors",
126
+ "model.layers.20.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
127
+ "model.layers.20.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
128
+ "model.layers.20.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
129
+ "model.layers.20.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
130
+ "model.layers.20.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
131
+ "model.layers.20.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
132
+ "model.layers.20.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
133
+ "model.layers.20.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
134
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00003.safetensors",
135
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
136
+ "model.layers.21.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
137
+ "model.layers.21.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
138
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
139
+ "model.layers.21.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
140
+ "model.layers.21.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
141
+ "model.layers.21.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
142
+ "model.layers.21.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
143
+ "model.layers.22.input_layernorm.weight": "model-00003-of-00003.safetensors",
144
+ "model.layers.22.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
145
+ "model.layers.22.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
146
+ "model.layers.22.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
147
+ "model.layers.22.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
148
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
149
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
150
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
151
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
152
+ "model.layers.23.input_layernorm.weight": "model-00003-of-00003.safetensors",
153
+ "model.layers.23.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
154
+ "model.layers.23.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
155
+ "model.layers.23.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
156
+ "model.layers.23.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
157
+ "model.layers.23.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
158
+ "model.layers.23.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
159
+ "model.layers.23.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
160
+ "model.layers.23.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
161
+ "model.layers.24.input_layernorm.weight": "model-00003-of-00003.safetensors",
162
+ "model.layers.24.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
163
+ "model.layers.24.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
164
+ "model.layers.24.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
165
+ "model.layers.24.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
166
+ "model.layers.24.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
167
+ "model.layers.24.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
168
+ "model.layers.24.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
169
+ "model.layers.24.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
170
+ "model.layers.25.input_layernorm.weight": "model-00003-of-00003.safetensors",
171
+ "model.layers.25.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
172
+ "model.layers.25.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
173
+ "model.layers.25.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
174
+ "model.layers.25.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
175
+ "model.layers.25.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
176
+ "model.layers.25.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
177
+ "model.layers.25.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
178
+ "model.layers.25.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
179
+ "model.layers.26.input_layernorm.weight": "model-00003-of-00003.safetensors",
180
+ "model.layers.26.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
181
+ "model.layers.26.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
182
+ "model.layers.26.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
183
+ "model.layers.26.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
184
+ "model.layers.26.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
185
+ "model.layers.26.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
186
+ "model.layers.26.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
187
+ "model.layers.26.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
188
+ "model.layers.27.input_layernorm.weight": "model-00003-of-00003.safetensors",
189
+ "model.layers.27.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
190
+ "model.layers.27.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
191
+ "model.layers.27.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
192
+ "model.layers.27.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
193
+ "model.layers.27.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
194
+ "model.layers.27.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
195
+ "model.layers.27.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
196
+ "model.layers.27.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
197
+ "model.layers.28.input_layernorm.weight": "model-00003-of-00003.safetensors",
198
+ "model.layers.28.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
199
+ "model.layers.28.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
200
+ "model.layers.28.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
201
+ "model.layers.28.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
202
+ "model.layers.28.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
203
+ "model.layers.28.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
204
+ "model.layers.28.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
205
+ "model.layers.28.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
206
+ "model.layers.29.input_layernorm.weight": "model-00003-of-00003.safetensors",
207
+ "model.layers.29.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
208
+ "model.layers.29.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
209
+ "model.layers.29.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
210
+ "model.layers.29.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
211
+ "model.layers.29.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
212
+ "model.layers.29.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
213
+ "model.layers.29.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
214
+ "model.layers.29.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
215
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00003.safetensors",
216
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
217
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
218
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
219
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
220
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
221
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
222
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
223
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
224
+ "model.layers.30.input_layernorm.weight": "model-00003-of-00003.safetensors",
225
+ "model.layers.30.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
226
+ "model.layers.30.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
227
+ "model.layers.30.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
228
+ "model.layers.30.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
229
+ "model.layers.30.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
230
+ "model.layers.30.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
231
+ "model.layers.30.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
232
+ "model.layers.30.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
233
+ "model.layers.31.input_layernorm.weight": "model-00003-of-00003.safetensors",
234
+ "model.layers.31.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
235
+ "model.layers.31.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
236
+ "model.layers.31.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
237
+ "model.layers.31.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
238
+ "model.layers.31.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
239
+ "model.layers.31.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
240
+ "model.layers.31.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
241
+ "model.layers.31.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
242
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00003.safetensors",
243
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
244
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
245
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
246
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
247
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
248
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
249
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
250
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
251
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00003.safetensors",
252
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
253
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
254
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
255
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
256
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
257
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
258
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
259
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
260
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00003.safetensors",
261
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
262
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
263
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
264
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
265
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
266
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
267
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
268
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
269
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00003.safetensors",
270
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
271
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
272
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
273
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
274
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
275
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
276
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
277
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
278
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00003.safetensors",
279
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
280
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
281
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
282
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
283
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
284
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
285
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
286
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
287
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00003.safetensors",
288
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
289
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
290
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
291
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
292
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
293
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
294
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
295
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
296
+ "model.norm.weight": "model-00003-of-00003.safetensors"
297
+ }
298
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "</s>",
17
+ "unk_token": {
18
+ "content": "<unk>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "added_tokens_decoder": {
5
+ "0": {
6
+ "content": "<unk>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "1": {
14
+ "content": "<s>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "2": {
22
+ "content": "</s>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ }
29
+ },
30
+ "additional_special_tokens": [],
31
+ "bos_token": "<s>",
32
+ "clean_up_tokenization_spaces": false,
33
+ "eos_token": "</s>",
34
+ "legacy": true,
35
+ "model_max_length": 1000000000000000019884624838656,
36
+ "pad_token": "</s>",
37
+ "sp_model_kwargs": {},
38
+ "spaces_between_special_tokens": false,
39
+ "tokenizer_class": "LlamaTokenizer",
40
+ "unk_token": "<unk>",
41
+ "use_default_system_prompt": false
42
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3005f306c11cdfa8d1e146e732facb51e217a023f0136dc27c3cd10a195c34a8
3
+ size 6456
ugly_utils.py ADDED
@@ -0,0 +1,1386 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional, Tuple
2
+ import torch
3
+ import torch.nn as nn
4
+ from torch.nn import MSELoss
5
+ import matplotlib.pyplot as plt
6
+ import numpy as np
7
+ import os
8
+ import time
9
+ import os
10
+ import copy
11
+ import warnings
12
+ from datasets import Dataset
13
+ from peft import PeftModel
14
+ from transformers import TrainerCallback
15
+ import matplotlib.pyplot as plt
16
+ import numpy as np
17
+ import time
18
+ import os
19
+ import copy
20
+ from transformers import Trainer
21
+ from typing import Any, Dict, Union
22
+ import torch
23
+ import torch.nn as nn
24
+ import torch.nn.functional as F
25
+ import flash_gemv
26
+ import seaborn as sns
27
+
28
+ # from experiments.models.sparse_silu.utils import get_mlp_class, get_decoder_class
29
+
30
+
31
+ from utils.utils import (
32
+ is_running_deepspeed,
33
+ is_mainprocess,
34
+ ds_print,
35
+ get_model_type,
36
+ get_model_type_from_name,
37
+ )
38
+ from utils.constants import MISTRAL
39
+ from transformers.configuration_utils import PretrainedConfig
40
+
41
+ # Mistral
42
+ from transformers.models.mistral.modeling_mistral import (
43
+ MistralMLP,
44
+ MistralDecoderLayer,
45
+ MistralConfig,
46
+ MistralForCausalLM,
47
+ MistralModel,
48
+ )
49
+ from experiments.models.sparse_mistral.svd_router import (
50
+ low_rank_approximation,
51
+ )
52
+
53
+ # Llama
54
+ from transformers.models.llama.modeling_llama import (
55
+ LlamaModel,
56
+ LlamaMLP,
57
+ LlamaDecoderLayer,
58
+ LlamaConfig,
59
+ LlamaForCausalLM,
60
+ )
61
+
62
+
63
+ def get_mlp_class(model):
64
+ model_type = get_model_type(model)
65
+ return MistralSparseSiluMLP if model_type == MISTRAL else LlamaSparseSiluMLP
66
+
67
+
68
+ def get_decoder_class(model):
69
+ model_type = get_model_type(model)
70
+ return (
71
+ SparseMistralDecoderLayer if model_type == MISTRAL else LlamaSparseDecoderLayer
72
+ )
73
+
74
+
75
+ def get_model_class(model):
76
+ model_type = get_model_type(model)
77
+ return MistralModel if model_type == MISTRAL else LlamaModel
78
+
79
+
80
+ class SparseSiLU(nn.SiLU):
81
+ def __init__(self, threshold):
82
+ super(SparseSiLU, self).__init__()
83
+ self.threshold = threshold
84
+ self.m = nn.Threshold(self.threshold, 0)
85
+
86
+ def set_new_threshold(self, threshold):
87
+ self.threshold = threshold
88
+ self.m = nn.Threshold(threshold, 0)
89
+
90
+ def forward(self, x):
91
+ act = super(SparseSiLU, self).forward(x)
92
+ return self.m(act) - self.m(-act)
93
+
94
+
95
+ def get_sparse_config(
96
+ config: PretrainedConfig,
97
+ model_type: str = None,
98
+ use_sparse_model=False,
99
+ use_sparse_predictor=False,
100
+ use_sparse_regularization=False,
101
+ use_graceful_regularization=False,
102
+ thresholds=None,
103
+ ):
104
+ if model_type == MISTRAL:
105
+ new_config = SparseMistralConfig()
106
+ else:
107
+ new_config = SparseLlamaConfig()
108
+ new_config.__dict__.update(config.__dict__)
109
+ config = new_config
110
+ config.use_sparse_model = use_sparse_model
111
+ config.use_sparse_predictor = use_sparse_predictor
112
+ config.use_sparse_regularization = use_sparse_regularization
113
+ config.use_graceful_regularization = use_graceful_regularization
114
+ config.thresholds = thresholds
115
+
116
+ return config
117
+
118
+
119
+ def apply_sparse_silu_mlp(
120
+ model,
121
+ config,
122
+ use_sparse_regularization: bool = False,
123
+ ):
124
+ SparseMLP = get_mlp_class(model)
125
+ for i, layer in enumerate(model.model.layers):
126
+ original_mlp = layer.mlp
127
+ new_mlp = SparseMLP(config, use_sparse_regularization=use_sparse_regularization)
128
+ print(f"layer {i} is_profile: {new_mlp.is_profile}")
129
+ new_mlp.gate_proj = original_mlp.gate_proj
130
+ new_mlp.up_proj = original_mlp.up_proj
131
+ new_mlp.down_proj = original_mlp.down_proj
132
+ layer.mlp = new_mlp
133
+
134
+
135
+ def apply_sparse_decoder_layer(
136
+ model,
137
+ config,
138
+ init_svd: bool = True,
139
+ ):
140
+ Model = get_model_type(model)
141
+ SparseMLP = get_mlp_class(model)
142
+ DecoderLayer = get_decoder_class(model)
143
+
144
+ assert isinstance(model.model, Model), "model.model must be a MistralModel."
145
+ new_layers = []
146
+ for layer_idx, layer in enumerate(model.model.layers):
147
+ if isinstance(layer.mlp, SparseMLP):
148
+ new_layers.append(
149
+ DecoderLayer(
150
+ config=config,
151
+ layer_idx=layer_idx,
152
+ decoder_layer=layer,
153
+ init_svd=init_svd,
154
+ )
155
+ )
156
+ print(f"{layer_idx}th mlp layer activation: {layer.mlp.sparse_act_fn}")
157
+ else:
158
+ new_layers.append(layer)
159
+ model.model.layers = nn.ModuleList(new_layers)
160
+
161
+
162
+ def enable_sparse_predictor(
163
+ model,
164
+ ):
165
+ DecoderLayer = get_decoder_class(model)
166
+ for layer_idx, layer in enumerate(model.model.layers):
167
+ if isinstance(layer, DecoderLayer):
168
+ layer.use_sparse_predictor = True
169
+
170
+
171
+ def disable_sparse_predictor(
172
+ model,
173
+ ):
174
+ DecoderLayer = get_decoder_class(model)
175
+ for layer_idx, layer in enumerate(model.model.layers):
176
+ if isinstance(layer, DecoderLayer):
177
+ layer.use_sparse_predictor = False
178
+
179
+
180
+ def activate_stats(model, is_collect_histogram: bool = True):
181
+ SparseMLP = get_mlp_class(model)
182
+ for layer in model.model.layers:
183
+ if isinstance(layer.mlp, SparseMLP):
184
+ layer.mlp.activate_stats(is_collect_histogram=is_collect_histogram)
185
+
186
+
187
+ def deactivate_stats(
188
+ model,
189
+ ):
190
+ SparseMLP = get_mlp_class(model)
191
+ for layer in model.model.layers:
192
+ if isinstance(layer.mlp, SparseMLP):
193
+ layer.mlp.deactivate_stats()
194
+
195
+
196
+ def enable_sparse_silu(model):
197
+ print("Enabling SparseSilu")
198
+ SparseMLP = get_mlp_class(model)
199
+ for i, layer in enumerate(model.model.layers):
200
+ if isinstance(layer.mlp, SparseMLP):
201
+ layer.mlp.kill_sparse_swish_outputs = True
202
+
203
+
204
+ def disable_sparse_silu(model):
205
+ print("Disabling SparseSilu")
206
+ SparseMLP = get_mlp_class(model)
207
+ for i, layer in enumerate(model.model.layers):
208
+ if isinstance(layer.mlp, SparseMLP):
209
+ layer.mlp.kill_sparse_swish_outputs = False
210
+
211
+
212
+ def print_dead_neuron_stats(model):
213
+ SparseMLP = get_mlp_class(model)
214
+ total_sparsity = 0
215
+ counts = 0
216
+ for i, layer in enumerate(model.model.layers):
217
+ if isinstance(layer.mlp, SparseMLP):
218
+ dead_percentage = layer.mlp.dead_percentage * 100
219
+ agg_sparsity = layer.mlp.agg_sparsity * 100
220
+ ds_print(f"layer {i} sparsity: {dead_percentage:.3f}%")
221
+ ds_print(f"layer {i} agg sparsity: {agg_sparsity:.3f}%")
222
+ total_sparsity += dead_percentage
223
+ counts += 1
224
+
225
+ ds_print(f"Total sparsity: {total_sparsity/counts: .3f}%")
226
+ return total_sparsity / counts
227
+
228
+
229
+ def get_sparse_layers(model):
230
+ SparseMLP = get_mlp_class(model)
231
+ sparse_layers = [m.mlp for m in model.layers() if isinstance(m.mlp, SparseMLP)]
232
+ return sparse_layers
233
+
234
+
235
+ def get_threshold(
236
+ bin_edges: torch.tensor, histogram_counts: torch.tensor, sparsity_level: float
237
+ ): # Only for L1 Regularization
238
+ assert (
239
+ len(bin_edges.shape) == len(histogram_counts.shape) == 1
240
+ ), "bin_edges and histogram are expected to be 1-dimensional."
241
+ histogram_counts /= histogram_counts.sum()
242
+ threshold_idx = torch.searchsorted(
243
+ histogram_counts.cumsum(0), sparsity_level, side="right"
244
+ )
245
+
246
+ return bin_edges[threshold_idx]
247
+
248
+
249
+ def set_regularization_threshold(model, threshold: float = 0.1):
250
+ SparseMLP = get_mlp_class(model)
251
+ for i, layer in enumerate(model.model.layers):
252
+ if (
253
+ isinstance(layer.mlp, SparseMLP) and layer.mlp.is_stats
254
+ ): # Can set the threshold only the relevant statistics is collected.
255
+ layer.mlp.regularization_threshold = threshold # TODO: find better param
256
+
257
+
258
+ def set_sparse_threshold(model, sparsity_level: float, use_relu: bool = False):
259
+ SparseMLP = get_mlp_class(model)
260
+ for i, layer in enumerate(model.model.layers):
261
+ if (
262
+ isinstance(layer.mlp, SparseMLP) and layer.mlp.is_stats
263
+ ): # Can set the threshold only the relevant statistics is collected.
264
+ if use_relu:
265
+ layer.mlp.sparse_act_fn = nn.ReLU()
266
+ layer.mlp.use_relu = True
267
+ else:
268
+ layer.mlp.dead_threshold = get_threshold(
269
+ layer.mlp.histogram_bins,
270
+ layer.mlp.post_act_hist_counts,
271
+ sparsity_level,
272
+ )
273
+ layer.mlp.sparse_act_fn.set_new_threshold(layer.mlp.dead_threshold)
274
+ layer.mlp.regularization_threshold = (
275
+ layer.mlp.dead_threshold * 1.2
276
+ ) # TODO: find better param
277
+
278
+ def plot_histogram(
279
+ bin_edges,
280
+ histogram_counts: torch.tensor,
281
+ threshold: float = 0.5,
282
+ title: str = "Activation Distribution",
283
+ fig_dir: str = "figures",
284
+ layer_index: int = 0,
285
+ ):
286
+ if is_mainprocess(): # Ensure this function is defined or adjust accordingly
287
+ torch.save(bin_edges, f"{fig_dir}/bin_edges_{layer_index}.pt")
288
+ torch.save(histogram_counts, f"{fig_dir}/histogram_counts_{layer_index}.pt")
289
+
290
+ fig, ax = plt.subplots()
291
+
292
+ # Determine bars within the threshold
293
+ within_threshold_mask = (bin_edges[:-1] >= -threshold) & (bin_edges[:-1] <= threshold)
294
+
295
+ # Bars within the threshold
296
+ ax.bar(
297
+ bin_edges[:-1][within_threshold_mask],
298
+ histogram_counts[within_threshold_mask],
299
+ width=np.diff(bin_edges)[within_threshold_mask],
300
+ color="#227CF6",
301
+ alpha=0.2,
302
+ label="Within Threshold",
303
+ )
304
+
305
+ # Bars outside the threshold
306
+ outside_threshold_mask = ~within_threshold_mask
307
+ ax.bar(
308
+ bin_edges[:-1][outside_threshold_mask],
309
+ histogram_counts[outside_threshold_mask],
310
+ width=np.diff(bin_edges)[outside_threshold_mask],
311
+ color="#227CF6",
312
+ alpha=1.0,
313
+ label="Outside Threshold",
314
+ )
315
+
316
+ # KDE plot
317
+ bin_midpoints = (bin_edges[:-1] + bin_edges[1:]) / 2
318
+ sns.kdeplot(x=bin_midpoints, weights=histogram_counts, bw_adjust=0.2, ax=ax, color='#227CF6', label='KDE')
319
+
320
+ # Threshold lines
321
+ plt.axvline(x=threshold, color="red", linestyle="--", label=f"Threshold (+/-{threshold})")
322
+ plt.axvline(x=-threshold, color="red", linestyle="--")
323
+
324
+ # Labels and title
325
+ ax.set_title(title)
326
+ ax.set_xlabel("Activation Value")
327
+ ax.set_ylabel("Frequency")
328
+ ax.legend()
329
+
330
+ # Save the plot
331
+ os.makedirs(fig_dir, exist_ok=True)
332
+ plt.savefig(f"{fig_dir}/{title}_layer_{layer_index}.png")
333
+ plt.close(fig)
334
+
335
+
336
+ # def plot_histogram(
337
+ # bin_edges,
338
+ # histogram_counts: torch.tensor,
339
+ # threshold: float = 0.5,
340
+ # title: str = "Activation Distribution",
341
+ # fig_dir: str = "figures",
342
+ # layer_index: int = 0,
343
+ # ):
344
+ # if is_mainprocess():
345
+ # torch.save(bin_edges, f"{fig_dir}/bin_edges_{layer_index}.pt")
346
+ # torch.save(histogram_counts, f"{fig_dir}/histogram_counts_{layer_index}.pt")
347
+ #
348
+ # fig, ax = plt.subplots()
349
+ #
350
+ # # Plot the bars for activations within the threshold
351
+ # within_threshold_mask = (bin_edges[:-1] >= -threshold) & (bin_edges[:-1] <= threshold)
352
+ # ax.bar(
353
+ # bin_edges[:-1][within_threshold_mask][:-1],
354
+ # histogram_counts[within_threshold_mask][:-1],
355
+ # width=np.diff(bin_edges[:-1][within_threshold_mask]),
356
+ # # edgecolor="black",
357
+ # color="#227CF6",
358
+ # alpha=0.2,
359
+ # label="Within Threshold",
360
+ # )
361
+ #
362
+ # # # Plot the bars for activations outside the threshold
363
+ # outside_threshold_mask = ~within_threshold_mask
364
+ # ax.bar(
365
+ # bin_edges[:-1][outside_threshold_mask][:-1],
366
+ # histogram_counts[outside_threshold_mask][:-1],
367
+ # width=np.diff(bin_edges[:-1][outside_threshold_mask]),
368
+ # # edgecolor="black",
369
+ # color="#227CF6",
370
+ # alpha=1.0,
371
+ # label="Outside Threshold",
372
+ # clip_on=False,
373
+ # )
374
+ #
375
+ # # Plot the threshold lines
376
+ # ax.axvline(
377
+ # x=threshold,
378
+ # color="#227CF6",
379
+ # alpha=0.6,
380
+ # linestyle="--",
381
+ # label="Threshold",
382
+ # )
383
+ # # ax.axvline(x=-threshold, color="#227CF6", alpha=0.3, linestyle="--")
384
+ # ax.axvline(x=0, color="#227CF6", alpha=0.3, linestyle="--")
385
+ #
386
+ # # Set the title and labels
387
+ # # ax.set_title(title)
388
+ # ax.set_xlabel("Activation Value")
389
+ # ax.set_ylabel("Frequency")
390
+ #
391
+ # ax.set_xlim(-0.7, 0.7)
392
+ #
393
+ # # Add legend
394
+ # ax.legend()
395
+ #
396
+ # # Create the figures directory if it doesn't exist
397
+ # os.makedirs(fig_dir, exist_ok=True)
398
+ #
399
+ # # Save the figure
400
+ # plt.savefig(f"{fig_dir}/{title}.png")
401
+ # # plt.show()
402
+ #
403
+ # # Close the figure to free memory
404
+ # plt.close(fig)
405
+
406
+
407
+ def plot_act(model, fig_dir: str = "figures"):
408
+ SparseMLP = get_mlp_class(model)
409
+
410
+ for i, layer in enumerate(model.model.layers):
411
+ if (
412
+ isinstance(layer.mlp, SparseMLP) and layer.mlp.is_stats
413
+ ): # Can set the threshold only the relevant statistics is collected.
414
+ # plot_title = f"Layer: {i} Pre-Activation Distribution"
415
+ # plot_histogram(layer.mlp.histogram_bins, layer.mlp.pre_act_hist_counts, plot_title, fig_dir, layer_index=i)
416
+
417
+ plot_title = f"Layer: {i} Post-Activation Absolute Distribution"
418
+ plot_histogram(
419
+ layer.mlp.histogram_bins,
420
+ layer.mlp.post_act_hist_counts,
421
+ layer.mlp.dead_threshold,
422
+ plot_title,
423
+ fig_dir,
424
+ layer_index=i,
425
+ )
426
+
427
+
428
+ def save_act_hist(
429
+ model, filename="/scr/jay/models/mistral/pre_finetune/cola_act_hist.pt"
430
+ ):
431
+ SparseMLP = get_mlp_class(model)
432
+ os.makedirs(os.path.dirname(filename), exist_ok=True)
433
+ act_dict = {}
434
+ for i, layer in enumerate(model.model.layers):
435
+ if (
436
+ isinstance(layer.mlp, SparseMLP) and layer.mlp.is_stats
437
+ ): # Can set the threshold only the relevant statistics is collected.
438
+ act_dict[i] = (
439
+ layer.mlp.histogram_bins,
440
+ layer.mlp.pre_act_hist_counts,
441
+ layer.mlp.post_act_hist_counts,
442
+ )
443
+ print("Saving activation histograms...\n\n\n")
444
+ torch.save(act_dict, filename)
445
+
446
+
447
+ def load_act_hist(
448
+ model, filename="/scr/jay/models/mistral/pre_finetune/cola_act_hist.pt"
449
+ ):
450
+ assert os.path.exists(
451
+ filename
452
+ ), f"{filename} does not exist when loading pre/post-activation histogram of SparseMistralSiluMLP."
453
+ SparseMLP = get_mlp_class(model)
454
+
455
+ print("Loading activation histograms...\n\n\n")
456
+
457
+ act_dict = torch.load(filename)
458
+ for i, layer in enumerate(model.model.layers):
459
+ if (
460
+ isinstance(layer.mlp, SparseMLP) and layer.mlp.is_stats
461
+ ): # Can set the threshold only the relevant statistics is collected.
462
+ (
463
+ layer.mlp.histogram_bins,
464
+ layer.mlp.pre_act_hist_counts,
465
+ layer.mlp.post_act_hist_counts,
466
+ ) = act_dict[i]
467
+
468
+
469
+ def enable_last_k_modules(model, start_module_idx: int):
470
+ assert 32 > start_module_idx >= 0
471
+ new_modules = []
472
+ new_idx = 0
473
+ for idx in range(start_module_idx, len(model.model.original_layers)):
474
+ module = model.model.original_layers[idx]
475
+ module.layer_idx = new_idx
476
+ module.self_attn.layer_idx = new_idx
477
+ new_modules.append(module)
478
+ new_idx += 1
479
+ print(module.layer_idx)
480
+
481
+ model.model.layers = nn.ModuleList(new_modules)
482
+
483
+
484
+ def enable_first_k_modules(model, end_module_idx: int):
485
+ assert 32 > end_module_idx >= 0
486
+ new_modules = []
487
+ new_idx = 0
488
+ for idx in range(0, end_module_idx + 1):
489
+ module = model.model.original_layers[idx]
490
+ module.layer_idx = new_idx
491
+ module.self_attn.layer_idx = new_idx
492
+ new_modules.append(module)
493
+ new_idx += 1
494
+ print(module.layer_idx)
495
+
496
+ model.model.layers = nn.ModuleList(new_modules)
497
+
498
+
499
+ # MISTRAL
500
+
501
+
502
+ class MistralSparseSiluMLP(MistralMLP):
503
+ def __init__(self, config, *args, **kwargs):
504
+ super().__init__(config)
505
+ self.swish_outputs = None
506
+ self.relu = nn.ReLU()
507
+
508
+ self.kill_sparse_swish_outputs = False
509
+ self.dead_percentage = 0
510
+ self.is_stats = False
511
+ self.visit_counts = 0
512
+ self.is_profile = False
513
+
514
+ # Hyperparameters to tune
515
+ self.dead_threshold = kwargs.pop("dead_threshold", 0)
516
+ self.use_sparse_regularization = kwargs.pop("use_sparse_regularization", True)
517
+ self.regularization_type = kwargs.pop(
518
+ "regularization_type", "L1 regularization"
519
+ )
520
+ self.regularization_threshold = kwargs.pop("regularization_threshold", 0.5)
521
+ self.use_relu = kwargs.pop("use_relu", False)
522
+ self.activation_norm = None
523
+
524
+ # Activation Histograms
525
+ self.is_collect_histogram = False
526
+ num_bins = 1000
527
+ self.histogram_bins = torch.linspace(-1, 1, num_bins - 2)
528
+ self.histogram_bins = torch.cat(
529
+ [torch.tensor([-torch.inf]), self.histogram_bins, torch.tensor([torch.inf])]
530
+ )
531
+ self.pre_act_hist_counts = torch.zeros(num_bins - 1)
532
+ self.abs_post_act_hist_counts = torch.zeros(num_bins - 1)
533
+ self.post_act_hist_counts = torch.zeros(num_bins - 1)
534
+ self.t = 0
535
+ self.count = 0
536
+ self.agg_sparsity = 0
537
+
538
+ # Sparse activation function
539
+ self.sparse_act_fn = SparseSiLU(threshold=self.dead_threshold)
540
+
541
+ def activate_stats(self, is_collect_histogram: bool = True):
542
+ self.is_stats = True
543
+ self.dead_percentage = 0
544
+ self.visit_counts = 0
545
+ self.is_collect_histogram = is_collect_histogram
546
+ self.histogram_counts = torch.zeros(2000) # .to(self.down_proj.weight.device)
547
+
548
+ def deactivate_stats(self):
549
+ self.is_stats = False
550
+
551
+ def collect_stats(self, pre_activation, post_activation):
552
+ start_time = time.time()
553
+ pre_activation = pre_activation.float().cpu().detach()
554
+ post_activation = post_activation.float().cpu().detach()
555
+ # self.histogram_bins=self.histogram_bins.to(pre_activation.device).type(pre_activation.dtype)
556
+ self.pre_act_hist_counts += torch.histogram(
557
+ pre_activation, bins=self.histogram_bins
558
+ )[0]
559
+ self.post_act_hist_counts += torch.histogram(
560
+ torch.abs(post_activation), bins=self.histogram_bins
561
+ )[0]
562
+ # self.post_act_hist_counts += torch.histogram(post_activation, bins=self.histogram_bins)[0]
563
+ self.t += time.time() - start_time
564
+ # if self.visit_counts % 30 == 0:
565
+ # print(f"Time taken to collect stats: {self.t}s.")
566
+
567
+ def forward(
568
+ self,
569
+ x,
570
+ sp_mask: torch.tensor = None,
571
+ ):
572
+ """
573
+ If kill_sparse_swish_outputs is set to False, this layer functions exactly like a normal MLP layer.
574
+ """
575
+ if sp_mask != None: # When sparse mask is given
576
+ return self.down_proj(
577
+ self.sparse_act_fn(self.gate_proj(x) * sp_mask) * self.up_proj(x)
578
+ ) # Todo: This doesn't accelerate runtime (instead slowing down)
579
+
580
+ if self.is_profile:
581
+ if x.shape[1] == 1:
582
+ if self.sp_method == 1:
583
+ return flash_gemv.flag_gemv_gemv_inner_bf16(
584
+ x,
585
+ self.gate_proj.weight,
586
+ self.up_proj.weight,
587
+ self.down_proj.weight,
588
+ self.dead_threshold,
589
+ )
590
+ elif self.sp_method == 2:
591
+ return flash_gemv.gemv_gemv_triton(
592
+ x,
593
+ self.act_fn(self.gate_proj(x)),
594
+ self.up_proj.weight,
595
+ self.wdown_t,
596
+ self.dead_threshold,
597
+ )
598
+ else:
599
+ post_act = self.act_fn(self.gate_proj(x))
600
+ dead_neurons = post_act.abs() <= self.dead_threshold
601
+ post_act[dead_neurons] = 0
602
+ return self.down_proj(post_act * self.up_proj(x))
603
+ else:
604
+ post_act = self.act_fn(self.gate_proj(x))
605
+ dead_neurons = post_act.abs() <= self.dead_threshold
606
+ post_act[dead_neurons] = 0
607
+ return self.down_proj(post_act * self.up_proj(x))
608
+
609
+ elif self.use_relu:
610
+ post_act = self.relu(self.gate_proj(x))
611
+ self.count += 1
612
+ if self.count <= 1:
613
+ print("USING RELU!!!!")
614
+
615
+ if self.is_stats:
616
+ dead_neurons = post_act == 0
617
+ dead_percentage = dead_neurons.float().mean()
618
+ agg_sparsity = dead_neurons.all(dim=0).float().mean()
619
+
620
+ self.dead_percentage = (
621
+ self.dead_percentage * self.visit_counts + dead_percentage
622
+ ) / (self.visit_counts + 1)
623
+ self.agg_sparsity = (
624
+ self.agg_sparsity * self.visit_counts + agg_sparsity
625
+ ) / (self.visit_counts + 1)
626
+ self.visit_counts += 1
627
+
628
+ return self.down_proj(post_act * self.up_proj(x))
629
+
630
+ else:
631
+ self.count += 1
632
+ if self.count <= 1:
633
+ ds_print("USING SparseSILU!!!!")
634
+ pre_act = self.gate_proj(x)
635
+ post_act = self.act_fn(pre_act)
636
+ if self.kill_sparse_swish_outputs:
637
+ dead_neurons = post_act.abs() <= self.dead_threshold
638
+ # print("pre act sparsity: ", (pre_act==0).float().mean())
639
+
640
+ dead_percentage = dead_neurons.float().mean()
641
+ agg_sparsity = dead_neurons.all(dim=0).float().mean()
642
+
643
+ if self.is_stats:
644
+ self.dead_percentage = (
645
+ self.dead_percentage * self.visit_counts + dead_percentage
646
+ ) / (self.visit_counts + 1)
647
+ self.agg_sparsity = (
648
+ self.agg_sparsity * self.visit_counts + agg_sparsity
649
+ ) / (self.visit_counts + 1)
650
+ self.visit_counts += 1
651
+
652
+ self.a = dead_percentage
653
+
654
+ # Collect histogram stats
655
+ if (
656
+ self.is_collect_histogram
657
+ and pre_act.eq(0).float().mean() < 0.99
658
+ ): # Padded dataset
659
+ self.collect_stats(pre_act, post_act)
660
+
661
+ if self.count <= 1:
662
+ ds_print("KILL!")
663
+ post_act[dead_neurons] = 0
664
+
665
+ out = self.down_proj(post_act * self.up_proj(x))
666
+ if self.use_sparse_regularization:
667
+ if self.regularization_type == "L1 regularization":
668
+ self.activation_norm = torch.abs(post_act)[
669
+ torch.abs(post_act) < self.regularization_threshold
670
+ ].mean()
671
+ elif self.regularization_type == "L2 regularization":
672
+ self.activation_norm = torch.sqrt(
673
+ torch.square(post_act)[
674
+ torch.abs(post_act) < self.regularization_threshold
675
+ ]
676
+ ).mean()
677
+
678
+ return out
679
+
680
+
681
+ class SparseMistralDecoderLayer(MistralDecoderLayer):
682
+ def __init__(
683
+ self,
684
+ config: MistralConfig,
685
+ layer_idx: int,
686
+ decoder_layer: MistralDecoderLayer,
687
+ init_svd: bool = True,
688
+ *args,
689
+ **kwargs,
690
+ ):
691
+ assert isinstance(
692
+ decoder_layer.mlp, MistralSparseSiluMLP
693
+ ), f"{type(decoder_layer.mlp)} should MistralSparseSiluMLP."
694
+
695
+ super().__init__(config, layer_idx)
696
+ self.hidden_size = config.hidden_size
697
+ self.intermediate_size = config.intermediate_size
698
+
699
+ self.init_svd = init_svd
700
+ self.self_attn = decoder_layer.self_attn
701
+
702
+ self.mlp = decoder_layer.mlp
703
+ self.input_layernorm = decoder_layer.input_layernorm
704
+ self.post_attention_layernorm = decoder_layer.post_attention_layernorm
705
+
706
+ # Sparse predictor for mlp (initialized with SVD decomposed matrix)
707
+ self.low_rank = kwargs.pop("low_rank", 64)
708
+ self.sparse_act_func = decoder_layer.mlp.sparse_act_fn
709
+
710
+ print(
711
+ f"Setting {layer_idx}th mlp layer's sparse predictor... svd init: {init_svd}"
712
+ )
713
+ self.sp_mlp = low_rank_approximation(
714
+ decoder_layer.mlp.gate_proj,
715
+ act_func=self.sparse_act_func,
716
+ init_svd=init_svd,
717
+ )
718
+ self.use_async = kwargs.pop("use_async", False)
719
+ self.use_sparse_predictor = False
720
+ self.distill_loss = None
721
+
722
+ def forward(
723
+ self,
724
+ hidden_states: torch.Tensor,
725
+ attention_mask: Optional[torch.Tensor] = None,
726
+ position_ids: Optional[torch.LongTensor] = None,
727
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
728
+ output_attentions: Optional[bool] = False,
729
+ use_cache: Optional[bool] = False,
730
+ **kwargs,
731
+ ) -> Tuple[
732
+ torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]
733
+ ]:
734
+ print("hidden_states shape: ", hidden_states.shape)
735
+ if "padding_mask" in kwargs:
736
+ warnings.warn(
737
+ "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`"
738
+ )
739
+
740
+ residual = hidden_states
741
+ sp_mask = None
742
+
743
+ if self.use_async:
744
+ sp_mask = self.sp_mlp(hidden_states)
745
+
746
+ hidden_states = self.input_layernorm(hidden_states)
747
+
748
+ # Self Attention
749
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
750
+ hidden_states=hidden_states,
751
+ attention_mask=attention_mask,
752
+ position_ids=position_ids,
753
+ past_key_value=past_key_value,
754
+ output_attentions=output_attentions,
755
+ use_cache=use_cache,
756
+ )
757
+ hidden_states = residual + hidden_states
758
+
759
+ # Fully Connected
760
+ residual = hidden_states
761
+ hidden_states = self.post_attention_layernorm(hidden_states)
762
+
763
+ if not self.use_async:
764
+ sp_mask = self.sp_mlp(hidden_states)
765
+
766
+ # Compute distillation loss
767
+ gating_output = self.mlp.sparse_act_fn(self.mlp.gate_proj(hidden_states))
768
+ loss_func = MSELoss()
769
+ self.distill_loss = loss_func(sp_mask, gating_output)
770
+
771
+ # Convert sp mask into binary form
772
+ sp_mask = sp_mask > 0
773
+
774
+ if self.training:
775
+ sp_mask = None
776
+ # if not self.use_sparse_predictor:
777
+ # sp_mask = None
778
+
779
+ hidden_states = self.mlp(hidden_states, sp_mask)
780
+ hidden_states = residual + hidden_states
781
+
782
+ outputs = (hidden_states,)
783
+
784
+ if output_attentions:
785
+ outputs += (self_attn_weights,)
786
+
787
+ if use_cache:
788
+ outputs += (present_key_value,)
789
+
790
+ return outputs
791
+
792
+
793
+ class SparseMistralConfig(MistralConfig):
794
+ model_type = "sparse_mistral"
795
+
796
+ def __init__(self, **kwargs):
797
+ super().__init__(**kwargs)
798
+
799
+
800
+ class SparseMistralforCausalLM(MistralForCausalLM):
801
+ config_class = SparseMistralConfig
802
+
803
+ def __init__(self, config):
804
+ super().__init__(config)
805
+ self.config = config
806
+ if config.use_sparse_model:
807
+ self.apply_sparse_mlp()
808
+ if config.thresholds is not None:
809
+ for idx, m in enumerate(self.model.layers):
810
+ if isinstance(m.mlp, MistralSparseSiluMLP):
811
+ m.mlp.dead_threshold = config.thresholds[idx]
812
+ m.mlp.sparse_act_fn.set_new_threshold(m.mlp.dead_threshold)
813
+ m.mlp.kill_sparse_swish_outputs = True
814
+ m.mlp.use_relu = config.use_relu
815
+ if config.use_sparse_predictor:
816
+ self.apply_sparse_predictor(init_svd=config.init_svd)
817
+
818
+ def apply_sparse_mlp(self):
819
+ apply_sparse_silu_mlp(
820
+ self,
821
+ config=self.config,
822
+ use_sparse_regularization=self.config.use_sparse_regularization,
823
+ )
824
+
825
+ def apply_sparse_predictor(self, init_svd: bool = True):
826
+ apply_sparse_decoder_layer(self, config=self.config, init_svd=init_svd)
827
+
828
+
829
+ # LLAMA
830
+
831
+
832
+ class SparseLlamaConfig(LlamaConfig):
833
+ model_type = "sparse_llama"
834
+
835
+ def __init__(self, **kwargs):
836
+ super().__init__(**kwargs)
837
+
838
+
839
+ class SparseLlamaForCausalLM(LlamaForCausalLM):
840
+ config_class = SparseLlamaConfig
841
+
842
+ def __init__(self, config):
843
+ super().__init__(config)
844
+ self.config = config
845
+ if config.use_sparse_model:
846
+ self.apply_sparse_mlp()
847
+ if config.thresholds is not None:
848
+ for idx, m in enumerate(self.model.layers):
849
+ if isinstance(m.mlp, LlamaSparseSiluMLP):
850
+ m.mlp.dead_threshold = config.thresholds[idx]
851
+ m.mlp.sparse_act_fn.set_new_threshold(m.mlp.dead_threshold)
852
+ m.mlp.kill_sparse_swish_outputs = True
853
+ m.mlp.use_relu = config.use_relu
854
+ if config.use_sparse_predictor:
855
+ self.apply_sparse_predictor(init_svd=config.init_svd)
856
+
857
+ def apply_sparse_mlp(self):
858
+ apply_sparse_silu_mlp(
859
+ self,
860
+ config=self.config,
861
+ use_sparse_regularization=self.config.use_sparse_regularization,
862
+ )
863
+
864
+ def apply_sparse_predictor(self, init_svd: bool = True):
865
+ apply_sparse_decoder_layer(self, config=self.config, init_svd=init_svd)
866
+
867
+
868
+ class LlamaSparseSiluMLP(LlamaMLP):
869
+ def __init__(self, config, *args, **kwargs):
870
+ super().__init__(config)
871
+ self.swish_outputs = None
872
+ self.relu = nn.ReLU()
873
+
874
+ self.kill_sparse_swish_outputs = False
875
+ self.dead_percentage = 0
876
+ self.is_stats = False
877
+ self.visit_counts = 0
878
+ self.is_profile = False
879
+
880
+ # Hyperparameters to tune
881
+ self.dead_threshold = kwargs.pop("dead_threshold", 0)
882
+ self.use_sparse_regularization = kwargs.pop("use_sparse_regularization", True)
883
+ self.regularization_type = kwargs.pop(
884
+ "regularization_type", "L1 regularization"
885
+ )
886
+ self.regularization_threshold = kwargs.pop("regularization_threshold", 0.5)
887
+ self.use_relu = kwargs.pop("use_relu", False)
888
+ self.activation_norm = None
889
+
890
+ # Activation Histograms
891
+ self.is_collect_histogram = False
892
+ num_bins = 1000
893
+ self.histogram_bins = torch.linspace(-1, 1, num_bins - 2)
894
+ self.histogram_bins = torch.cat(
895
+ [torch.tensor([-torch.inf]), self.histogram_bins, torch.tensor([torch.inf])]
896
+ )
897
+ self.pre_act_hist_counts = torch.zeros(num_bins - 1)
898
+ self.abs_post_act_hist_counts = torch.zeros(num_bins - 1)
899
+ self.post_act_hist_counts = torch.zeros(num_bins - 1)
900
+ self.t = 0
901
+ self.count = 0
902
+ self.agg_sparsity = 0
903
+
904
+ # Sparse activation function
905
+ self.sparse_act_fn = SparseSiLU(threshold=self.dead_threshold)
906
+
907
+ def activate_stats(self, is_collect_histogram: bool = True):
908
+ self.is_stats = True
909
+ self.dead_percentage = 0
910
+ self.visit_counts = 0
911
+ self.is_collect_histogram = is_collect_histogram
912
+ self.histogram_counts = torch.zeros(2000) # .to(self.down_proj.weight.device)
913
+
914
+ def deactivate_stats(self):
915
+ self.is_stats = False
916
+
917
+ def collect_stats(self, pre_activation, post_activation):
918
+ start_time = time.time()
919
+ pre_activation = pre_activation.float().cpu().detach()
920
+ post_activation = post_activation.float().cpu().detach()
921
+ # self.histogram_bins=self.histogram_bins.to(pre_activation.device).type(pre_activation.dtype)
922
+ self.pre_act_hist_counts += torch.histogram(
923
+ pre_activation, bins=self.histogram_bins
924
+ )[0]
925
+ self.post_act_hist_counts += torch.histogram(
926
+ torch.abs(post_activation), bins=self.histogram_bins
927
+ )[0]
928
+ # self.post_act_hist_counts += torch.histogram(post_activation, bins=self.histogram_bins)[0]
929
+ self.t += time.time() - start_time
930
+ # if self.visit_counts % 30 == 0:
931
+ # print(f"Time taken to collect stats: {self.t}s.")
932
+
933
+ def forward(
934
+ self,
935
+ x,
936
+ sp_mask: torch.tensor = None,
937
+ ):
938
+ """
939
+ If kill_sparse_swish_outputs is set to False, this layer functions exactly like a normal MLP layer.
940
+ """
941
+ if sp_mask != None: # When sparse mask is given
942
+ return self.down_proj(
943
+ self.sparse_act_fn(self.gate_proj(x) * sp_mask) * self.up_proj(x)
944
+ ) # Todo: This doesn't accelerate runtime (instead slowing down)
945
+
946
+ if self.is_profile:
947
+ if x.shape[1] == 1:
948
+ if self.sp_method == 1:
949
+ return flash_gemv.flag_gemv_gemv_inner_bf16(
950
+ x,
951
+ self.gate_proj.weight,
952
+ self.up_proj.weight,
953
+ self.down_proj.weight,
954
+ self.dead_threshold,
955
+ )
956
+ elif self.sp_method == 2:
957
+ return flash_gemv.gemv_gemv_triton(
958
+ x,
959
+ self.act_fn(self.gate_proj(x)),
960
+ self.up_proj.weight,
961
+ self.wdown_t,
962
+ self.dead_threshold,
963
+ )
964
+ else:
965
+ post_act = self.act_fn(self.gate_proj(x))
966
+ dead_neurons = post_act.abs() <= self.dead_threshold
967
+ post_act[dead_neurons] = 0
968
+ return self.down_proj(post_act * self.up_proj(x))
969
+ else:
970
+ post_act = self.act_fn(self.gate_proj(x))
971
+ dead_neurons = post_act.abs() <= self.dead_threshold
972
+ post_act[dead_neurons] = 0
973
+ return self.down_proj(post_act * self.up_proj(x))
974
+
975
+ elif self.use_relu:
976
+ post_act = self.relu(self.gate_proj(x))
977
+ self.count += 1
978
+ if self.count <= 1:
979
+ print("USING RELU!!!!")
980
+
981
+ if self.is_stats:
982
+ dead_neurons = post_act == 0
983
+ dead_percentage = dead_neurons.float().mean()
984
+ agg_sparsity = dead_neurons.all(dim=0).float().mean()
985
+
986
+ self.dead_percentage = (
987
+ self.dead_percentage * self.visit_counts + dead_percentage
988
+ ) / (self.visit_counts + 1)
989
+ self.agg_sparsity = (
990
+ self.agg_sparsity * self.visit_counts + agg_sparsity
991
+ ) / (self.visit_counts + 1)
992
+ self.visit_counts += 1
993
+
994
+ return self.down_proj(post_act * self.up_proj(x))
995
+
996
+ else:
997
+ self.count += 1
998
+ if self.count <= 1:
999
+ ds_print("USING SparseSILU!!!!")
1000
+ ds_print(self.dead_threshold)
1001
+ pre_act = self.gate_proj(x)
1002
+ post_act = self.act_fn(pre_act)
1003
+ if self.kill_sparse_swish_outputs:
1004
+ dead_neurons = post_act.abs() <= self.dead_threshold
1005
+ dead_percentage = dead_neurons.float().mean()
1006
+ agg_sparsity = dead_neurons.all(dim=0).float().mean()
1007
+
1008
+ if self.is_stats:
1009
+ self.dead_percentage = (
1010
+ self.dead_percentage * self.visit_counts + dead_percentage
1011
+ ) / (self.visit_counts + 1)
1012
+ self.agg_sparsity = (
1013
+ self.agg_sparsity * self.visit_counts + agg_sparsity
1014
+ ) / (self.visit_counts + 1)
1015
+ self.visit_counts += 1
1016
+
1017
+ self.a = dead_percentage
1018
+
1019
+ # Collect histogram stats
1020
+ # if self.is_collect_histogram and pre_act.eq(0).float().mean() < 0.99: # Padded dataset
1021
+ if self.is_collect_histogram: # Padded dataset
1022
+ self.collect_stats(pre_act, post_act)
1023
+
1024
+ if self.count <= 1:
1025
+ ds_print("KILL!")
1026
+ post_act[dead_neurons] = 0
1027
+
1028
+ out = self.down_proj(post_act * self.up_proj(x))
1029
+ if self.use_sparse_regularization:
1030
+ if self.regularization_type == "L1 regularization":
1031
+ self.activation_norm = torch.abs(post_act)[
1032
+ torch.abs(post_act) < self.regularization_threshold
1033
+ ].mean()
1034
+ elif self.regularization_type == "L2 regularization":
1035
+ self.activation_norm = torch.sqrt(
1036
+ torch.square(post_act)[
1037
+ torch.abs(post_act) < self.regularization_threshold
1038
+ ]
1039
+ ).mean()
1040
+
1041
+ return out
1042
+
1043
+
1044
+ class LlamaSparseDecoderLayer(LlamaDecoderLayer):
1045
+ def __init__(
1046
+ self,
1047
+ config: LlamaConfig,
1048
+ layer_idx: int,
1049
+ decoder_layer: LlamaDecoderLayer,
1050
+ init_svd: bool = True,
1051
+ *args,
1052
+ **kwargs,
1053
+ ):
1054
+ assert isinstance(
1055
+ decoder_layer.mlp, LlamaSparseSiluMLP
1056
+ ), f"{type(decoder_layer.mlp)} should be LlamaSparseSiluMLP."
1057
+
1058
+ super().__init__(config, layer_idx)
1059
+ self.hidden_size = config.hidden_size
1060
+ self.intermediate_size = config.intermediate_size
1061
+
1062
+ self.init_svd = init_svd
1063
+ self.self_attn = decoder_layer.self_attn
1064
+
1065
+ self.mlp = decoder_layer.mlp
1066
+ self.input_layernorm = decoder_layer.input_layernorm
1067
+ self.post_attention_layernorm = decoder_layer.post_attention_layernorm
1068
+
1069
+ # Sparse predictor for mlp (initialized with SVD decomposed matrix)
1070
+ self.low_rank = kwargs.pop("low_rank", 64)
1071
+ self.sparse_act_func = decoder_layer.mlp.sparse_act_fn
1072
+
1073
+ print(
1074
+ f"Setting {layer_idx}th mlp layer's sparse predictor... svd init: {init_svd}"
1075
+ )
1076
+ self.sp_mlp = low_rank_approximation(
1077
+ decoder_layer.mlp.gate_proj,
1078
+ act_func=self.sparse_act_func,
1079
+ init_svd=init_svd,
1080
+ )
1081
+ self.use_async = kwargs.pop("use_async", False)
1082
+ self.use_sparse_predictor = False
1083
+ self.distill_loss = None
1084
+
1085
+ def forward(
1086
+ self,
1087
+ hidden_states: torch.Tensor,
1088
+ attention_mask: Optional[torch.Tensor] = None,
1089
+ position_ids: Optional[torch.LongTensor] = None,
1090
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
1091
+ output_attentions: Optional[bool] = False,
1092
+ use_cache: Optional[bool] = False,
1093
+ **kwargs,
1094
+ ) -> Tuple[
1095
+ torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]
1096
+ ]:
1097
+ print("hidden_states shape: ", hidden_states.shape)
1098
+ if "padding_mask" in kwargs:
1099
+ warnings.warn(
1100
+ "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`"
1101
+ )
1102
+
1103
+ residual = hidden_states
1104
+ sp_mask = None
1105
+
1106
+ if self.use_async:
1107
+ sp_mask = self.sp_mlp(hidden_states)
1108
+
1109
+ hidden_states = self.input_layernorm(hidden_states)
1110
+
1111
+ # Self Attention
1112
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
1113
+ hidden_states=hidden_states,
1114
+ attention_mask=attention_mask,
1115
+ position_ids=position_ids,
1116
+ past_key_value=past_key_value,
1117
+ output_attentions=output_attentions,
1118
+ use_cache=use_cache,
1119
+ **kwargs,
1120
+ )
1121
+ hidden_states = residual + hidden_states
1122
+
1123
+ # Fully Connected
1124
+ residual = hidden_states
1125
+ hidden_states = self.post_attention_layernorm(hidden_states)
1126
+
1127
+ if not self.use_async:
1128
+ sp_mask = self.sp_mlp(hidden_states)
1129
+
1130
+ # Compute distillation loss
1131
+ gating_output = self.mlp.sparse_act_fn(self.mlp.gate_proj(hidden_states))
1132
+ loss_func = MSELoss()
1133
+ self.distill_loss = loss_func(sp_mask, gating_output)
1134
+
1135
+ # Convert sp mask into binary form
1136
+ sp_mask = sp_mask > 0
1137
+
1138
+ if self.training:
1139
+ sp_mask = None
1140
+ # if not self.use_sparse_predictor:
1141
+ # sp_mask = None
1142
+
1143
+ hidden_states = self.mlp(hidden_states, sp_mask)
1144
+ hidden_states = residual + hidden_states
1145
+
1146
+ outputs = (hidden_states,)
1147
+
1148
+ if output_attentions:
1149
+ outputs += (self_attn_weights,)
1150
+
1151
+ if use_cache:
1152
+ outputs += (present_key_value,)
1153
+
1154
+ return outputs
1155
+
1156
+
1157
+ # Callbacks
1158
+
1159
+
1160
+ class GracefulRegularizationScheduler(TrainerCallback):
1161
+ def __init__(
1162
+ self,
1163
+ num_warmup_steps=40,
1164
+ is_enabled: bool = False,
1165
+ model_name: str = "mistral",
1166
+ test_dataset: Dataset = None,
1167
+ targeted_sparsity: float = 0.5,
1168
+ keep_regularization_with_kill: bool = False,
1169
+ ):
1170
+ """Scheduler for regularizing the model first before applying the dead threshold.
1171
+
1172
+ :param num_warmup_steps: number of training steps required to reach the dead threshold, defaults to 40
1173
+ :param increment_ratio: by how much to increase the dead threshold.
1174
+ For example, 0.5 means "increase the threshold by 0.5 * desired threshold
1175
+ """
1176
+ self.num_warmup_steps = num_warmup_steps
1177
+ self.is_enabled = is_enabled
1178
+ self.model_name = model_name
1179
+ self.test_dataset = test_dataset
1180
+ self.targeted_sparsity = targeted_sparsity
1181
+ self.keep_regularization_with_kill = keep_regularization_with_kill
1182
+ self.act_hist_path = (
1183
+ f"/scr/lukeai/histograms/warm_up_reg_{targeted_sparsity}/act_hist.pt"
1184
+ )
1185
+ if self.is_enabled:
1186
+ print("GracefulRegularizationScheduler is enabled.")
1187
+ self.trainer = None
1188
+
1189
+ def set_trainer(self, trainer):
1190
+ self.trainer = trainer
1191
+
1192
+ def on_step_end(self, args, state, control, **kwargs):
1193
+ if not self.is_enabled:
1194
+ return
1195
+
1196
+ model = kwargs["model"]
1197
+ if isinstance(model, PeftModel):
1198
+ base_model = model.get_base_model()
1199
+ else:
1200
+ base_model = model
1201
+
1202
+ if state.global_step == 1:
1203
+ ds_print("Setting an initial reg threshold to 0.1")
1204
+ set_regularization_threshold(base_model, 0.1)
1205
+ disable_sparse_silu(base_model)
1206
+
1207
+ if state.global_step == self.num_warmup_steps:
1208
+ activate_stats(base_model)
1209
+ enable_sparse_silu(base_model)
1210
+ self.trainer.evaluate()
1211
+ save_act_hist(base_model, self.act_hist_path)
1212
+ set_sparse_threshold(base_model, self.targeted_sparsity, False)
1213
+ deactivate_stats(base_model)
1214
+ self.trainer.use_sparse_regularization = self.keep_regularization_with_kill
1215
+ print_dead_neuron_stats(model.get_base_model())
1216
+
1217
+
1218
+ class GradualSparsificationScheduler(TrainerCallback):
1219
+ def __init__(
1220
+ self,
1221
+ num_warmup_steps=40,
1222
+ increment_ratio=0.5,
1223
+ is_enabled: bool = False,
1224
+ model_name: str = "mistral",
1225
+ ):
1226
+ """Scheduler for gradually increasing a dead threshold until it reaches the desired threshold.
1227
+
1228
+ :param num_warmup_steps: number of training steps required to reach the dead threshold, defaults to 40
1229
+ :param increment_ratio: by how much to increase the dead threshold.
1230
+ For example, 0.5 means "increase the threshold by 0.5 * desired threshold
1231
+ """
1232
+ self.num_warmup_steps = num_warmup_steps
1233
+ self.increment_ratio = increment_ratio
1234
+ self.step_size = int(num_warmup_steps * increment_ratio)
1235
+ self.is_enabled = is_enabled
1236
+ self.model_name = model_name
1237
+ self.model_type = get_model_type(model_name)
1238
+ self.mlp_type = (
1239
+ MistralSparseSiluMLP if self.model_type == MISTRAL else LlamaSparseSiluMLP
1240
+ )
1241
+
1242
+ def on_step_end(self, args, state, control, **kwargs):
1243
+ model = kwargs["model"]
1244
+
1245
+ if not self.is_enabled:
1246
+ if state.global_step <= 10:
1247
+ for module in model.modules():
1248
+ if isinstance(module, self.mlp_type):
1249
+ module.current_dead_threshold = module.dead_threshold
1250
+ return
1251
+
1252
+ current_dead_threshold = 0
1253
+ desired_dead_threshold = 0
1254
+
1255
+ if is_mainprocess():
1256
+ ds_print(state.global_step)
1257
+
1258
+ if state.global_step % self.step_size == 2:
1259
+ for module in model.modules():
1260
+ if isinstance(module, self.mlp_type):
1261
+ desired_dead_threshold = copy.deepcopy(module.dead_threshold)
1262
+ current_dead_threshold = module.current_dead_threshold
1263
+ current_dead_threshold += (
1264
+ self.increment_ratio * desired_dead_threshold
1265
+ )
1266
+ module.current_dead_threshold = min(
1267
+ desired_dead_threshold, current_dead_threshold
1268
+ )
1269
+
1270
+ if is_running_deepspeed and is_mainprocess():
1271
+ ds_print(
1272
+ state.global_step,
1273
+ current_dead_threshold,
1274
+ desired_dead_threshold,
1275
+ )
1276
+
1277
+ if state.global_step % 2000 == 0:
1278
+ if is_running_deepspeed and is_mainprocess():
1279
+ ds_print(
1280
+ f"Saving to /matx/u/lukeai/{self.model_name}_{state.global_step - 2}.pt",
1281
+ )
1282
+ torch.save(
1283
+ model.state_dict(),
1284
+ f"/matx/u/lukeai/{self.model_name}_{state.global_step - 2}.pt",
1285
+ )
1286
+
1287
+
1288
+ # Trainer
1289
+
1290
+
1291
+ class SparseTrainer(Trainer):
1292
+ def __init__(self, *args, **kwargs):
1293
+ self.regularization_coefficient = kwargs.pop("regularization_coefficient", 10)
1294
+ self.use_sparse_regularization = kwargs.pop("use_sparse_regularization", False)
1295
+ self.use_spm_loss = False
1296
+ self.freeze_original_weights = False
1297
+ self.regularization_type = kwargs.pop(
1298
+ "regularization_type", "L1 positive activation"
1299
+ )
1300
+ assert self.regularization_type in [
1301
+ "L2 activation",
1302
+ "L1 positive activation",
1303
+ ], f"Invalid regularization type: {self.regularization_type}"
1304
+ self.sparse_layers = []
1305
+ self.sparse_decoder_layers = []
1306
+ super(SparseTrainer, self).__init__(*args, **kwargs)
1307
+
1308
+ def initialize_sparse_silu_layers(self, model):
1309
+ SparseMLP = get_mlp_class(model)
1310
+ self.sparse_layers = [m for m in model.modules() if isinstance(m, SparseMLP)]
1311
+
1312
+ def initialize_sparse_decoder_layers(self, model):
1313
+ SparseDecoder = get_decoder_class(model)
1314
+ self.sparse_decoder_layers = [
1315
+ m for m in model.modules() if isinstance(m, SparseDecoder)
1316
+ ]
1317
+
1318
+ def training_step(
1319
+ self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]]
1320
+ ) -> torch.Tensor:
1321
+ """
1322
+ Override the huggingface's training_step function to add a regularization term.
1323
+ A regularization term is computed with intermediate values, which are freed after "backward()."
1324
+ You need to set `retain_graph=True` inside `backward` function to keep the values.
1325
+ """
1326
+ model.train()
1327
+ inputs = self._prepare_inputs(inputs)
1328
+
1329
+ with self.compute_loss_context_manager():
1330
+ loss = self.compute_loss(model, inputs)
1331
+
1332
+ if self.args.n_gpu > 1:
1333
+ loss = loss.mean() # mean() to average on multi-gpu parallel training
1334
+
1335
+ if not self.freeze_original_weights:
1336
+ if loss is not None:
1337
+ self.accelerator.backward(loss, retain_graph=True)
1338
+
1339
+ if self.use_sparse_regularization:
1340
+ regularization_loss = self.compute_regularization(model)
1341
+ if self.args.n_gpu > 1:
1342
+ regularization_loss = regularization_loss.mean()
1343
+ if regularization_loss is not None:
1344
+ self.accelerator.backward(regularization_loss, retain_graph=True)
1345
+ loss += regularization_loss
1346
+
1347
+ if self.use_spm_loss:
1348
+ spm_loss = self.compute_spm_loss(model)
1349
+ if self.args.n_gpu > 1:
1350
+ spm_loss = spm_loss.mean()
1351
+ if spm_loss is not None:
1352
+ self.accelerator.backward(spm_loss, retain_graph=False)
1353
+ loss += spm_loss
1354
+
1355
+ return loss.detach() / self.args.gradient_accumulation_steps
1356
+
1357
+ def compute_regularization(self, model):
1358
+ """
1359
+ Compute a sparse regularization loss for SiLU
1360
+ """
1361
+ loss = 0
1362
+ if len(self.sparse_layers) == 0:
1363
+ self.initialize_sparse_silu_layers(model)
1364
+ num_layers = len(self.sparse_layers)
1365
+
1366
+ for module in self.sparse_layers:
1367
+ if module.activation_norm is not None:
1368
+ loss += module.activation_norm
1369
+
1370
+ loss /= num_layers
1371
+ loss *= self.regularization_coefficient
1372
+
1373
+ if self.state.global_step % 20 == 0 and loss != 0:
1374
+ print("Negative relularizer loss: ", loss.item())
1375
+ return loss
1376
+
1377
+ def compute_spm_loss(self, model):
1378
+ loss = 0
1379
+ if len(self.sparse_decoder_layers) == 0:
1380
+ self.initialize_sparse_decoder_layers(model)
1381
+ for module in self.sparse_decoder_layers:
1382
+ if module.distill_loss != None:
1383
+ loss += module.distill_loss
1384
+ if self.state.global_step % 20 == 0 and loss != 0:
1385
+ print("Sparse Predictor Distillation loss: ", loss.item())
1386
+ return loss