lukeleeai commited on
Commit
9b89b1d
1 Parent(s): 57fe6fa

Training in progress, step 50

Browse files
adapter_config.json CHANGED
@@ -19,11 +19,11 @@
19
  "rank_pattern": {},
20
  "revision": null,
21
  "target_modules": [
22
- "q_proj",
23
  "gate_proj",
24
  "down_proj",
25
  "v_proj",
26
- "up_proj"
27
  ],
28
  "task_type": "CAUSAL_LM"
29
  }
 
19
  "rank_pattern": {},
20
  "revision": null,
21
  "target_modules": [
22
+ "up_proj",
23
  "gate_proj",
24
  "down_proj",
25
  "v_proj",
26
+ "q_proj"
27
  ],
28
  "task_type": "CAUSAL_LM"
29
  }
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:59e80c1e61a30dc0140aa28a60ee2d4d1b500e3efacf24d67c4a9bc030815e33
3
  size 281061608
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:523c9ce01eaa3d800afc789bb9c6b6b98e9dadb742c5568fc237f2f5e48006c9
3
  size 281061608
config.json ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "mistralai/Mistral-7B-v0.1",
3
+ "architectures": [
4
+ "SparseMistralforCausalLM"
5
+ ],
6
+ "attention_dropout": 0.0,
7
+ "auto_map": {
8
+ "AutoConfig": "sparsification_sftt.SparseMistralConfig",
9
+ "AutoModelForCausalLM": "sparsification_sftt.SparseMistralforCausalLM"
10
+ },
11
+ "bos_token_id": 1,
12
+ "eos_token_id": 2,
13
+ "hidden_act": "silu",
14
+ "hidden_size": 4096,
15
+ "initializer_range": 0.02,
16
+ "intermediate_size": 14336,
17
+ "max_position_embeddings": 32768,
18
+ "model_type": "sparse_mistral",
19
+ "num_attention_heads": 32,
20
+ "num_hidden_layers": 32,
21
+ "num_key_value_heads": 8,
22
+ "rms_norm_eps": 1e-05,
23
+ "rope_theta": 10000.0,
24
+ "sliding_window": 4096,
25
+ "thresholds": null,
26
+ "tie_word_embeddings": false,
27
+ "torch_dtype": "bfloat16",
28
+ "transformers_version": "4.37.2",
29
+ "us_sparse_regularization": true,
30
+ "use_cache": false,
31
+ "use_sparse_model": true,
32
+ "use_sparse_predictor": false,
33
+ "use_sparse_regularization": false,
34
+ "vocab_size": 32000
35
+ }
generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "eos_token_id": 2,
5
+ "transformers_version": "4.37.2"
6
+ }
model-00001-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1ca3d821f1ab32a3430ca3cadad63d0470fd6ff7ad3f47f2fbe6add8d44dd3ba
3
+ size 4943163992
model-00002-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:61d229bfa164c4bfdfb89aa3796f216fe18af11a4b016a5ce5bf84b7ef289153
3
+ size 4999821144
model-00003-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:de9f483f216b3e9a46b133a3fe463e88b8b8bf1983d755beca4384474c874134
3
+ size 4540517840
sparsification_sftt.py ADDED
@@ -0,0 +1,861 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import TrainerCallback, Trainer
2
+ from trl import SFTTrainer, DataCollatorForCompletionOnlyLM
3
+ from peft import PeftModel
4
+ from datasets import Dataset
5
+ from typing import Any, Dict, Union, Optional, Tuple
6
+ from torch.nn import MSELoss
7
+
8
+ import warnings
9
+ import torch
10
+ import torch.nn as nn
11
+ import matplotlib.pyplot as plt
12
+ import numpy as np
13
+ import time
14
+ import os
15
+ import copy
16
+
17
+ from transformers.models.mistral.modeling_mistral import (
18
+ MistralMLP,
19
+ MistralModel,
20
+ MistralDecoderLayer,
21
+ MistralConfig,
22
+ MistralForCausalLM,
23
+ )
24
+ from experiments.models.sparse_mistral.svd_router import (
25
+ low_rank_approximation,
26
+ SparsePredictor,
27
+ )
28
+ from utils.utils import (
29
+ print_size_of_model,
30
+ is_running_deepspeed,
31
+ is_mainprocess,
32
+ get_datetime,
33
+ ds_print,
34
+ )
35
+
36
+
37
+ class SparseSFTTTrainer(SFTTrainer):
38
+ def __init__(self, *args, **kwargs):
39
+ self.regularization_coefficient = kwargs.pop("regularization_coefficient", 10)
40
+ self.use_sparse_regularization = kwargs.pop("use_sparse_regularization", False)
41
+ self.use_spm_loss = False
42
+ self.freeze_original_weights = False
43
+ self.regularization_type = kwargs.pop("regularization_type", "L1 positive activation")
44
+ assert self.regularization_type in [
45
+ "L2 activation",
46
+ "L1 positive activation",
47
+ ], f"Invalid regularization type: {self.regularization_type}"
48
+ self.sparse_layers = []
49
+ self.sparse_decoder_layers = []
50
+ super(SparseSFTTTrainer, self).__init__(*args, **kwargs)
51
+
52
+ def initialize_sparse_silu_layers(self, model):
53
+ self.sparse_layers = [m for m in model.modules() if isinstance(m, MistralSparseSiluMLP)]
54
+
55
+ def initialize_sparse_decoder_layers(self, model):
56
+ self.sparse_decoder_layers = [m for m in model.modules() if isinstance(m, SparseMistralDecoderLayer)]
57
+
58
+ def training_step(self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]]) -> torch.Tensor:
59
+ """
60
+ Override the huggingface's training_step function to add a regularization term.
61
+ A regularization term is computed with intermediate values, which are freed after "backward()."
62
+ You need to set `retain_graph=True` inside `backward` function to keep the values.
63
+ """
64
+ model.train()
65
+ inputs = self._prepare_inputs(inputs)
66
+
67
+ with self.compute_loss_context_manager():
68
+ loss = self.compute_loss(model, inputs)
69
+
70
+ if self.args.n_gpu > 1:
71
+ loss = loss.mean() # mean() to average on multi-gpu parallel training
72
+ if not self.freeze_original_weights:
73
+ if loss is not None:
74
+ self.accelerator.backward(loss, retain_graph=True)
75
+
76
+ if self.use_sparse_regularization:
77
+ regularization_loss = self.compute_regularization(model)
78
+ if self.args.n_gpu > 1:
79
+ regularization_loss = regularization_loss.mean()
80
+ if regularization_loss is not None:
81
+ self.accelerator.backward(regularization_loss, retain_graph=True)
82
+ loss += regularization_loss
83
+
84
+ if self.state.global_step % 5 == 0:
85
+ ds_print("Regularization loss: ", regularization_loss.item())
86
+
87
+ if self.use_spm_loss:
88
+ spm_loss = self.compute_spm_loss(model)
89
+ if self.args.n_gpu > 1:
90
+ spm_loss = spm_loss.mean()
91
+ if spm_loss is not None:
92
+ self.accelerator.backward(spm_loss, retain_graph=False)
93
+ loss += spm_loss
94
+
95
+ return loss.detach() / self.args.gradient_accumulation_steps
96
+
97
+ def compute_regularization(self, model):
98
+ """
99
+ Compute a sparse regularization loss for SiLU
100
+ """
101
+ loss = 0
102
+ if len(self.sparse_layers) == 0:
103
+ self.initialize_sparse_silu_layers(model)
104
+ num_layers = len(self.sparse_layers)
105
+
106
+ for module in self.sparse_layers:
107
+ if module.activation_norm is not None:
108
+ loss += module.activation_norm
109
+
110
+ loss /= num_layers
111
+ loss *= self.regularization_coefficient
112
+
113
+ if self.state.global_step % 20 == 0 and loss != 0:
114
+ print("Negative relularizer loss: ", loss.item())
115
+ return loss
116
+
117
+ def compute_spm_loss(self, model):
118
+ loss = 0
119
+ if len(self.sparse_decoder_layers) == 0:
120
+ self.initialize_sparse_decoder_layers(model)
121
+ for module in self.sparse_decoder_layers:
122
+ if module.distill_loss != None:
123
+ loss += module.distill_loss
124
+ if self.state.global_step % 20 == 0 and loss != 0:
125
+ print("Sparse Predictor Distillation loss: ", loss.item())
126
+ return loss
127
+
128
+ # def compute_loss(self, model, inputs, return_outputs=False):
129
+ # loss = super().compute_loss(model, inputs, return_outputs)
130
+ #
131
+ # if is_sagemaker_mp_enabled():
132
+ # import smdistributed.modelparallel.torch as smp
133
+ # @smp.step()
134
+ # def smp_forward_backward(model, inputs, gradient_accumulation_steps=1):
135
+ # outputs = model(**inputs)
136
+ # loss = outputs["loss"] if isinstance(outputs, dict) else outputs[0]
137
+ # loss /= gradient_accumulation_steps
138
+ # model.backward(loss)
139
+ # return loss
140
+ #
141
+ # loss_mb = smp_forward_backward(
142
+ # model, inputs, self.args.gradient_accumulation_steps
143
+ # )
144
+ # if self.use_sparse_regularization:
145
+ # return loss_mb.reduce_mean().detach().to(
146
+ # self.args.device
147
+ # ) + self.regularization_coefficient * self.compute_regularization(model)
148
+ # else:
149
+ # return loss_mb.reduce_mean().detach().to(self)
150
+ #
151
+ # if return_outputs:
152
+ # classification_loss, outputs = loss
153
+ # else:
154
+ # classification_loss = loss
155
+ #
156
+ # loss = classification_loss
157
+ # if self.use_sparse_regularization:
158
+ # regularization_loss = self.compute_regularization(model)
159
+ # loss += self.regularization_coefficient * regularization_loss
160
+ #
161
+ # return (loss, outputs) if return_outputs else loss
162
+
163
+
164
+ class SparseTrainer(Trainer):
165
+ def __init__(self, *args, **kwargs):
166
+ self.regularization_coefficient = kwargs.pop("regularization_coefficient", 10)
167
+ self.use_sparse_regularization = kwargs.pop("use_sparse_regularization", False)
168
+ self.use_spm_loss = False
169
+ self.freeze_original_weights = False
170
+ self.regularization_type = kwargs.pop("regularization_type", "L1 positive activation")
171
+ assert self.regularization_type in [
172
+ "L2 activation",
173
+ "L1 positive activation",
174
+ ], f"Invalid regularization type: {self.regularization_type}"
175
+ self.sparse_layers = []
176
+ self.sparse_decoder_layers = []
177
+ super(SparseTrainer, self).__init__(*args, **kwargs)
178
+
179
+ def initialize_sparse_silu_layers(self, model):
180
+ self.sparse_layers = [m for m in model.modules() if isinstance(m, MistralSparseSiluMLP)]
181
+
182
+ def initialize_sparse_decoder_layers(self, model):
183
+ self.sparse_decoder_layers = [m for m in model.modules() if isinstance(m, SparseMistralDecoderLayer)]
184
+
185
+ def training_step(self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]]) -> torch.Tensor:
186
+ """
187
+ Override the huggingface's training_step function to add a regularization term.
188
+ A regularization term is computed with intermediate values, which are freed after "backward()."
189
+ You need to set `retain_graph=True` inside `backward` function to keep the values.
190
+ """
191
+ model.train()
192
+ inputs = self._prepare_inputs(inputs)
193
+
194
+ with self.compute_loss_context_manager():
195
+ loss = self.compute_loss(model, inputs)
196
+
197
+ if self.args.n_gpu > 1:
198
+ loss = loss.mean() # mean() to average on multi-gpu parallel training
199
+ if not self.freeze_original_weights:
200
+ if loss is not None:
201
+ self.accelerator.backward(loss, retain_graph=True)
202
+
203
+ if self.use_sparse_regularization:
204
+ regularization_loss = self.compute_regularization(model)
205
+ if self.args.n_gpu > 1:
206
+ regularization_loss = regularization_loss.mean()
207
+ if regularization_loss is not None:
208
+ self.accelerator.backward(regularization_loss, retain_graph=True)
209
+ loss += regularization_loss
210
+
211
+ if self.use_spm_loss:
212
+ spm_loss = self.compute_spm_loss(model)
213
+ if self.args.n_gpu > 1:
214
+ spm_loss = spm_loss.mean()
215
+ if spm_loss is not None:
216
+ self.accelerator.backward(spm_loss, retain_graph=False)
217
+ loss += spm_loss
218
+
219
+ return loss.detach() / self.args.gradient_accumulation_steps
220
+
221
+ def compute_regularization(self, model):
222
+ """
223
+ Compute a sparse regularization loss for SiLU
224
+ """
225
+ loss = 0
226
+ if len(self.sparse_layers) == 0:
227
+ self.initialize_sparse_silu_layers(model)
228
+ num_layers = len(self.sparse_layers)
229
+
230
+ for module in self.sparse_layers:
231
+ if module.activation_norm is not None:
232
+ loss += module.activation_norm
233
+
234
+ loss /= num_layers
235
+ loss *= self.regularization_coefficient
236
+
237
+ if self.state.global_step % 20 == 0 and loss != 0:
238
+ print("Negative relularizer loss: ", loss.item())
239
+ return loss
240
+
241
+ def compute_spm_loss(self, model):
242
+ loss = 0
243
+ if len(self.sparse_decoder_layers) == 0:
244
+ self.initialize_sparse_decoder_layers(model)
245
+ for module in self.sparse_decoder_layers:
246
+ if module.distill_loss != None:
247
+ loss += module.distill_loss
248
+ if self.state.global_step % 20 == 0 and loss != 0:
249
+ print("Sparse Predictor Distillation loss: ", loss.item())
250
+ return loss
251
+
252
+
253
+ class SparseSiLU(nn.SiLU):
254
+ def __init__(self, threshold):
255
+ super(SparseSiLU, self).__init__()
256
+ self.threshold = threshold
257
+ self.m = nn.Threshold(self.threshold, 0)
258
+
259
+ def set_new_threshold(self, threshold):
260
+ self.threshold = threshold
261
+ self.m = nn.Threshold(threshold, 0)
262
+
263
+ def forward(self, x):
264
+ act = super(SparseSiLU, self).forward(x)
265
+ return self.m(act) - self.m(-act)
266
+
267
+
268
+ class MistralSparseSiluMLP(MistralMLP):
269
+ def __init__(self, config, *args, **kwargs):
270
+ super().__init__(config)
271
+ self.swish_outputs = None
272
+ self.relu = nn.ReLU()
273
+
274
+ self.kill_sparse_swish_outputs = False
275
+ self.dead_percentage = 0
276
+ self.is_stats = False
277
+ self.visit_counts = 0
278
+
279
+ # Hyperparameters to tune
280
+ self.dead_threshold = kwargs.pop("dead_threshold", 0)
281
+ self.use_sparse_regularization = kwargs.pop("use_sparse_regularization", True)
282
+ self.regularization_type = kwargs.pop("regularization_type", "L1 regularization")
283
+ self.regularization_threshold = kwargs.pop("regularization_threshold", 0.5)
284
+ self.use_relu = kwargs.pop("use_relu", False)
285
+ self.activation_norm = None
286
+
287
+ # Activation Histograms
288
+ self.is_collect_histogram = False
289
+ num_bins = 1000
290
+ self.histogram_bins = torch.linspace(-1, 1, num_bins - 2)
291
+ self.histogram_bins = torch.cat([torch.tensor([-torch.inf]), self.histogram_bins, torch.tensor([torch.inf])])
292
+ self.pre_act_hist_counts = torch.zeros(num_bins - 1)
293
+ self.post_act_hist_counts = torch.zeros(num_bins - 1)
294
+ self.t = 0
295
+ self.agg_sparsity = 0
296
+
297
+ # Sparse activation function
298
+ self.sparse_act_fn = SparseSiLU(threshold=self.dead_threshold)
299
+
300
+ def activate_stats(self, is_collect_histogram: bool = True):
301
+ self.is_stats = True
302
+ self.dead_percentage = 0
303
+ self.visit_counts = 0
304
+ self.is_collect_histogram = is_collect_histogram
305
+ self.histogram_counts = torch.zeros(2000) # .to(self.down_proj.weight.device)
306
+
307
+ def deactivate_stats(self):
308
+ self.is_stats = False
309
+
310
+ def collect_stats(self, pre_activation, post_activation):
311
+ start_time = time.time()
312
+ pre_activation = pre_activation.float().cpu().detach()
313
+ post_activation = post_activation.float().cpu().detach()
314
+ # self.histogram_bins=self.histogram_bins.to(pre_activation.device).type(pre_activation.dtype)
315
+ self.pre_act_hist_counts += torch.histogram(pre_activation, bins=self.histogram_bins)[0]
316
+ self.post_act_hist_counts += torch.histogram(torch.abs(post_activation), bins=self.histogram_bins)[0]
317
+ self.t += time.time() - start_time
318
+ if self.visit_counts % 30 == 0:
319
+ print(f"Time taken to collect stats: {self.t}s.")
320
+
321
+ def forward(
322
+ self,
323
+ x,
324
+ sp_mask: torch.tensor = None,
325
+ ):
326
+ """
327
+ If kill_sparse_swish_outputs is set to False, this layer functions exactly like a normal MLP layer.
328
+ """
329
+ if sp_mask != None: # When sparse mask is given
330
+ return self.down_proj(
331
+ self.sparse_act_fn(self.gate_proj(x) * sp_mask) * self.up_proj(x)
332
+ ) # Todo: This doesn't accelerate runtime (instead slowing down)
333
+
334
+ elif self.use_relu:
335
+ return self.down_proj(self.relu(self.gate_proj(x)) * self.up_proj(x))
336
+
337
+ else:
338
+ pre_act = self.gate_proj(x)
339
+ post_act = self.act_fn(pre_act)
340
+ if self.kill_sparse_swish_outputs:
341
+ dead_neurons = post_act.abs() <= self.dead_threshold
342
+
343
+ dead_percentage = dead_neurons.float().mean()
344
+ agg_sparsity = dead_neurons.all(dim=0).float().mean()
345
+
346
+ if self.is_stats:
347
+ self.dead_percentage = (self.dead_percentage * self.visit_counts + dead_percentage) / (self.visit_counts + 1)
348
+ self.agg_sparsity = (self.agg_sparsity * self.visit_counts + agg_sparsity) / (self.visit_counts + 1)
349
+ self.visit_counts += 1
350
+
351
+ # print(self.agg_sparsity)
352
+
353
+ # Collect histogram stats
354
+ if self.is_collect_histogram:
355
+ self.collect_stats(pre_act, post_act)
356
+
357
+ post_act[dead_neurons] = 0
358
+
359
+ out = self.down_proj(post_act * self.up_proj(x))
360
+ if self.use_sparse_regularization:
361
+ if self.regularization_type == "L1 regularization":
362
+ self.activation_norm = torch.abs(post_act)[post_act < self.regularization_threshold].mean()
363
+ elif self.regularization_type == "L2 regularization":
364
+ self.activation_norm = torch.sqrt(torch.square(post_act)[post_act < self.regularization_threshold]).mean()
365
+
366
+ return out
367
+
368
+
369
+ class SparseMistralDecoderLayer(MistralDecoderLayer):
370
+ def __init__(
371
+ self,
372
+ config: MistralConfig,
373
+ layer_idx: int,
374
+ decoder_layer: MistralDecoderLayer,
375
+ init_svd: bool = True,
376
+ *args,
377
+ **kwargs,
378
+ ):
379
+ assert isinstance(decoder_layer.mlp, MistralSparseSiluMLP), f"{type(decoder_layer.mlp)} should MistralSparseSiluMLP."
380
+
381
+ super().__init__(config, layer_idx)
382
+ self.hidden_size = config.hidden_size
383
+ self.intermediate_size = config.intermediate_size
384
+
385
+ self.init_svd = init_svd
386
+ self.self_attn = decoder_layer.self_attn
387
+
388
+ self.mlp = decoder_layer.mlp
389
+ self.input_layernorm = decoder_layer.input_layernorm
390
+ self.post_attention_layernorm = decoder_layer.post_attention_layernorm
391
+
392
+ # Sparse predictor for mlp (initialized with SVD decomposed matrix)
393
+ self.low_rank = kwargs.pop("low_rank", 64)
394
+ self.sparse_act_func = decoder_layer.mlp.sparse_act_fn
395
+
396
+ print(f"Setting {layer_idx}th mlp layer's sparse predictor... svd init: {init_svd}")
397
+ self.sp_mlp = low_rank_approximation(
398
+ decoder_layer.mlp.gate_proj,
399
+ act_func=self.sparse_act_func,
400
+ init_svd=init_svd,
401
+ )
402
+ self.use_async = kwargs.pop("use_async", False)
403
+ self.use_sparse_predictor = False
404
+ self.distill_loss = None
405
+
406
+ def forward(
407
+ self,
408
+ hidden_states: torch.Tensor,
409
+ attention_mask: Optional[torch.Tensor] = None,
410
+ position_ids: Optional[torch.LongTensor] = None,
411
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
412
+ output_attentions: Optional[bool] = False,
413
+ use_cache: Optional[bool] = False,
414
+ **kwargs,
415
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
416
+ if "padding_mask" in kwargs:
417
+ warnings.warn(
418
+ "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`"
419
+ )
420
+
421
+ residual = hidden_states
422
+ sp_mask = None
423
+
424
+ if self.use_async:
425
+ sp_mask = self.sp_mlp(hidden_states)
426
+
427
+ hidden_states = self.input_layernorm(hidden_states)
428
+
429
+ # Self Attention
430
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
431
+ hidden_states=hidden_states,
432
+ attention_mask=attention_mask,
433
+ position_ids=position_ids,
434
+ past_key_value=past_key_value,
435
+ output_attentions=output_attentions,
436
+ use_cache=use_cache,
437
+ )
438
+ hidden_states = residual + hidden_states
439
+
440
+ # Fully Connected
441
+ residual = hidden_states
442
+ hidden_states = self.post_attention_layernorm(hidden_states)
443
+
444
+ if not self.use_async:
445
+ sp_mask = self.sp_mlp(hidden_states)
446
+
447
+ # Compute distillation loss
448
+ gating_output = self.mlp.sparse_act_fn(self.mlp.gate_proj(hidden_states))
449
+ loss_func = MSELoss()
450
+ self.distill_loss = loss_func(sp_mask, gating_output)
451
+
452
+ # Convert sp mask into binary form
453
+ sp_mask = sp_mask > 0
454
+
455
+ if self.training:
456
+ sp_mask = None
457
+ # if not self.use_sparse_predictor:
458
+ # sp_mask = None
459
+
460
+ hidden_states = self.mlp(hidden_states, sp_mask)
461
+ hidden_states = residual + hidden_states
462
+
463
+ outputs = (hidden_states,)
464
+
465
+ if output_attentions:
466
+ outputs += (self_attn_weights,)
467
+
468
+ if use_cache:
469
+ outputs += (present_key_value,)
470
+
471
+ return outputs
472
+
473
+
474
+ class SparseMistralConfig(MistralConfig):
475
+ model_type = "sparse_mistral"
476
+
477
+ def __init__(self, **kwargs):
478
+ super().__init__(**kwargs)
479
+
480
+
481
+ class SparseMistralforCausalLM(MistralForCausalLM):
482
+ config_class = SparseMistralConfig
483
+
484
+ def __init__(self, config):
485
+ super().__init__(config)
486
+ self.config = config
487
+ if config.use_sparse_model:
488
+ self.apply_sparse_mlp()
489
+ if config.thresholds is not None:
490
+ for idx, m in enumerate(self.model.layers):
491
+ if isinstance(m.mlp, MistralSparseSiluMLP):
492
+ m.mlp.dead_threshold = config.thresholds[idx]
493
+ m.mlp.sparse_act_fn.set_new_threshold(m.mlp.dead_threshold)
494
+ m.mlp.kill_sparse_swish_outputs = True
495
+ print("Setting a threshold")
496
+ if config.use_sparse_predictor:
497
+ self.apply_sparse_predictor(init_svd=config.init_svd)
498
+
499
+ def apply_sparse_mlp(self):
500
+ apply_mistral_sparse_silu_mlp(
501
+ self,
502
+ config=self.config,
503
+ use_sparse_regularization=self.config.use_sparse_regularization,
504
+ )
505
+
506
+ def apply_sparse_predictor(self, init_svd: bool = True):
507
+ apply_mistral_sparse_decoder_layer(self, config=self.config, init_svd=init_svd)
508
+
509
+
510
+ class GracefulRegularizationScheduler(TrainerCallback):
511
+ def __init__(
512
+ self,
513
+ num_warmup_steps=40,
514
+ is_enabled: bool = False,
515
+ model_name: str = "mistral",
516
+ test_dataset: Dataset = None,
517
+ targeted_sparsity: float = 0.5,
518
+ keep_regularization_with_kill: bool = False,
519
+ ):
520
+ """Scheduler for regularizing the model first before applying the dead threshold.
521
+
522
+ :param num_warmup_steps: number of training steps required to reach the dead threshold, defaults to 40
523
+ :param increment_ratio: by how much to increase the dead threshold.
524
+ For example, 0.5 means "increase the threshold by 0.5 * desired threshold
525
+ """
526
+ self.num_warmup_steps = num_warmup_steps
527
+ self.is_enabled = is_enabled
528
+ self.model_name = model_name
529
+ self.test_dataset = test_dataset
530
+ self.targeted_sparsity = targeted_sparsity
531
+ self.keep_regularization_with_kill = keep_regularization_with_kill
532
+ self.act_hist_path = f"/matx/u/lukeai/histograms/graceful_reg_{targeted_sparsity}/act_hist.pt"
533
+ if self.is_enabled:
534
+ print("GracefulRegularizationScheduler is enabled.")
535
+ self.trainer = None
536
+
537
+ def set_trainer(self, trainer):
538
+ self.trainer = trainer
539
+
540
+ def on_step_end(self, args, state, control, **kwargs):
541
+ if not self.is_enabled:
542
+ return
543
+
544
+ model = kwargs["model"]
545
+ if isinstance(model, PeftModel):
546
+ base_model = model.get_base_model()
547
+ else:
548
+ base_model = model
549
+
550
+ if state.global_step == 1:
551
+ ds_print("Setting an initial reg threshold to 0.1")
552
+ set_regularization_threshold(base_model, 0.1)
553
+
554
+ # if state.global_step >= self.num_warmup_steps and state.global_step % 50 == 0:
555
+ if state.global_step == self.num_warmup_steps:
556
+ activate_stats(base_model)
557
+ enable_sparse_silu(base_model)
558
+ self.trainer.evaluate()
559
+ save_act_hist(base_model, self.act_hist_path)
560
+ set_sparse_threshold(base_model, self.targeted_sparsity, False)
561
+ deactivate_stats(base_model)
562
+ self.trainer.use_sparse_regularization = self.keep_regularization_with_kill
563
+ # set_layer_specific_regularization(model.get_base_model())
564
+ print_dead_neuron_stats(model.get_base_model())
565
+
566
+ if state.global_step % 2000 == 0:
567
+ if is_mainprocess():
568
+ ds_print(
569
+ f"Saving to /scr/lukeai/{self.model_name}_{state.global_step}.pt",
570
+ )
571
+ torch.save(
572
+ model.state_dict(),
573
+ f"/scr/lukeai/{self.model_name}_{state.global_step}.pt",
574
+ )
575
+
576
+
577
+ class GradualSparsificationScheduler(TrainerCallback):
578
+ def __init__(
579
+ self,
580
+ num_warmup_steps=40,
581
+ increment_ratio=0.5,
582
+ is_enabled: bool = False,
583
+ model_name: str = "mistral",
584
+ ):
585
+ """Scheduler for gradually increasing a dead threshold until it reaches the desired threshold.
586
+
587
+ :param num_warmup_steps: number of training steps required to reach the dead threshold, defaults to 40
588
+ :param increment_ratio: by how much to increase the dead threshold.
589
+ For example, 0.5 means "increase the threshold by 0.5 * desired threshold
590
+ """
591
+ self.num_warmup_steps = num_warmup_steps
592
+ self.increment_ratio = increment_ratio
593
+ self.step_size = int(num_warmup_steps * increment_ratio)
594
+ self.is_enabled = is_enabled
595
+ self.model_name = model_name
596
+
597
+ def on_step_end(self, args, state, control, **kwargs):
598
+ model = kwargs["model"]
599
+
600
+ if not self.is_enabled:
601
+ if state.global_step <= 10:
602
+ for module in model.modules():
603
+ if isinstance(module, MistralSparseSiluMLP):
604
+ module.current_dead_threshold = module.dead_threshold
605
+ return
606
+
607
+ current_dead_threshold = 0
608
+ desired_dead_threshold = 0
609
+
610
+ if is_mainprocess():
611
+ ds_print(state.global_step)
612
+
613
+ if state.global_step % self.step_size == 2:
614
+ for module in model.modules():
615
+ if isinstance(module, MistralSparseSiluMLP):
616
+ desired_dead_threshold = copy.deepcopy(module.dead_threshold)
617
+ current_dead_threshold = module.current_dead_threshold
618
+ current_dead_threshold += self.increment_ratio * desired_dead_threshold
619
+ module.current_dead_threshold = min(desired_dead_threshold, current_dead_threshold)
620
+
621
+ if is_running_deepspeed and is_mainprocess():
622
+ ds_print(
623
+ state.global_step,
624
+ current_dead_threshold,
625
+ desired_dead_threshold,
626
+ )
627
+
628
+
629
+ def get_sparse_mistral_config(
630
+ config: MistralConfig,
631
+ use_sparse_model=False,
632
+ use_sparse_predictor=False,
633
+ use_sparse_regularization=False,
634
+ thresholds=None,
635
+ ):
636
+ new_config = SparseMistralConfig()
637
+ new_config.__dict__.update(config.__dict__)
638
+ config = new_config
639
+ config.use_sparse_model = use_sparse_model
640
+ config.use_sparse_predictor = use_sparse_predictor
641
+ config.use_sparse_regularization = use_sparse_regularization
642
+ config.thresholds = thresholds
643
+
644
+ return config
645
+
646
+
647
+ def apply_mistral_sparse_silu_mlp(
648
+ model,
649
+ config,
650
+ use_sparse_regularization: bool = False,
651
+ ):
652
+ # counts = 0
653
+ for layer in model.model.layers:
654
+ # counts += 1
655
+ # if counts < 4:
656
+ # continue
657
+ original_mlp = layer.mlp
658
+ new_mlp = MistralSparseSiluMLP(config, use_sparse_regularization=use_sparse_regularization)
659
+ new_mlp.gate_proj = original_mlp.gate_proj
660
+ new_mlp.up_proj = original_mlp.up_proj
661
+ new_mlp.down_proj = original_mlp.down_proj
662
+ layer.mlp = new_mlp
663
+
664
+
665
+ def apply_mistral_sparse_decoder_layer(
666
+ model,
667
+ config,
668
+ init_svd: bool = True,
669
+ ):
670
+ assert isinstance(model.model, MistralModel), "model.model must be a MistralModel."
671
+ new_layers = []
672
+ for layer_idx, layer in enumerate(model.model.layers):
673
+ if isinstance(layer.mlp, MistralSparseSiluMLP):
674
+ new_layers.append(
675
+ SparseMistralDecoderLayer(
676
+ config=config,
677
+ layer_idx=layer_idx,
678
+ decoder_layer=layer,
679
+ init_svd=init_svd,
680
+ )
681
+ )
682
+ print(f"{layer_idx}th mlp layer activation: {layer.mlp.sparse_act_fn}")
683
+ else:
684
+ new_layers.append(layer)
685
+ model.model.layers = nn.ModuleList(new_layers)
686
+
687
+
688
+ def enable_sparse_predictor(
689
+ model,
690
+ ):
691
+ for layer_idx, layer in enumerate(model.model.layers):
692
+ if isinstance(layer, MistralDecoderLayer):
693
+ layer.use_sparse_predictor = True
694
+
695
+
696
+ def disable_sparse_predictor(
697
+ model,
698
+ ):
699
+ for layer_idx, layer in enumerate(model.model.layers):
700
+ if isinstance(layer, MistralDecoderLayer):
701
+ layer.use_sparse_predictor = False
702
+
703
+
704
+ def activate_stats(model, is_collect_histogram: bool = True):
705
+ for layer in model.model.layers:
706
+ if isinstance(layer.mlp, MistralSparseSiluMLP):
707
+ layer.mlp.activate_stats(is_collect_histogram=is_collect_histogram)
708
+
709
+
710
+ def deactivate_stats(model):
711
+ for layer in model.model.layers:
712
+ if isinstance(layer.mlp, MistralSparseSiluMLP):
713
+ layer.mlp.deactivate_stats()
714
+
715
+
716
+ def enable_sparse_silu(model):
717
+ print("Enabling SparseSilu")
718
+ for i, layer in enumerate(model.model.layers):
719
+ if isinstance(layer.mlp, MistralSparseSiluMLP):
720
+ layer.mlp.kill_sparse_swish_outputs = True
721
+
722
+
723
+ def print_dead_neuron_stats(model):
724
+ total_sparsity = 0
725
+ counts = 0
726
+ for i, layer in enumerate(model.model.layers):
727
+ if isinstance(layer.mlp, MistralSparseSiluMLP):
728
+ dead_percentage = layer.mlp.dead_percentage * 100
729
+ agg_sparsity = layer.mlp.agg_sparsity * 100
730
+ ds_print(f"layer {i} threshold: {layer.mlp.dead_threshold:.3f}%")
731
+ ds_print(f"layer {i} sparsity: {dead_percentage:.3f}%")
732
+ ds_print(f"layer {i} agg sparsity: {agg_sparsity:.3f}%")
733
+ total_sparsity += dead_percentage
734
+ counts += 1
735
+
736
+ ds_print(f"Total sparsity: {total_sparsity/counts: .3f}%")
737
+ return total_sparsity / counts
738
+
739
+
740
+ def get_sparse_layers(model: MistralModel):
741
+ sparse_layers = [m.mlp for m in model.layers() if isinstance(m.mlp, MistralSparseSiluMLP)]
742
+ return sparse_layers
743
+
744
+
745
+ def get_threshold(bin_edges: torch.tensor, histogram_counts: torch.tensor, sparsity_level: float): # Only for L1 Regularization
746
+ assert len(bin_edges.shape) == len(histogram_counts.shape) == 1, "bin_edges and histogram are expected to be 1-dimensional."
747
+ histogram_counts /= histogram_counts.sum()
748
+ threshold_idx = torch.searchsorted(histogram_counts.cumsum(0), sparsity_level, side="right")
749
+
750
+ return bin_edges[threshold_idx]
751
+
752
+
753
+ def set_regularization_threshold(model, threshold: float = 0.1):
754
+ for i, layer in enumerate(model.model.layers):
755
+ if (
756
+ isinstance(layer.mlp, MistralSparseSiluMLP) and layer.mlp.is_stats
757
+ ): # Can set the threshold only the relevant statistics is collected.
758
+ layer.mlp.regularization_threshold = threshold # TODO: find better param
759
+
760
+
761
+ def set_sparse_threshold(model, sparsity_level: float, use_relu: bool = False):
762
+ for i, layer in enumerate(model.model.layers):
763
+ if (
764
+ isinstance(layer.mlp, MistralSparseSiluMLP) and layer.mlp.is_stats
765
+ ): # Can set the threshold only the relevant statistics is collected.
766
+ if use_relu:
767
+ layer.mlp.sparse_act_fn = nn.ReLU()
768
+ layer.mlp.use_relu = True
769
+ else:
770
+ layer.mlp.dead_threshold = get_threshold(
771
+ layer.mlp.histogram_bins,
772
+ layer.mlp.post_act_hist_counts,
773
+ sparsity_level,
774
+ )
775
+ layer.mlp.sparse_act_fn.set_new_threshold(layer.mlp.dead_threshold)
776
+ layer.mlp.regularization_threshold = layer.mlp.dead_threshold * 1.2 # TODO: find better param
777
+
778
+
779
+ def plot_histogram(bin_edges, histogram_counts: torch.tensor, title: str = "Activation Distribution"):
780
+ plt.bar(bin_edges[:-1], histogram_counts, width=np.diff(bin_edges), edgecolor="black")
781
+ plt.title(title)
782
+ plt.xlabel("Activation Value")
783
+ plt.ylabel("Frequency")
784
+ os.makedirs("figures", exist_ok=True)
785
+ plt.savefig(f"figures/{title}.png")
786
+ # plt.show()
787
+ plt.clf()
788
+
789
+
790
+ def plot_act(model):
791
+ for i, layer in enumerate(model.model.layers):
792
+ if (
793
+ isinstance(layer.mlp, MistralSparseSiluMLP) and layer.mlp.is_stats
794
+ ): # Can set the threshold only the relevant statistics is collected.
795
+ plot_title = f"Layer: {i} Pre-Activation Distribution"
796
+ plot_histogram(layer.mlp.histogram_bins, layer.mlp.pre_act_hist_counts, plot_title)
797
+
798
+ plot_title = f"Layer: {i} Post-Activation Absolute Distribution"
799
+ plot_histogram(layer.mlp.histogram_bins, layer.mlp.post_act_hist_counts, plot_title)
800
+
801
+
802
+ def save_act_hist(model, filename="/scr/jay/models/mistral/pre_finetune/cola_act_hist.pt"):
803
+ os.makedirs(os.path.dirname(filename), exist_ok=True)
804
+ act_dict = {}
805
+ for i, layer in enumerate(model.model.layers):
806
+ if (
807
+ isinstance(layer.mlp, MistralSparseSiluMLP) and layer.mlp.is_stats
808
+ ): # Can set the threshold only the relevant statistics is collected.
809
+ act_dict[i] = (
810
+ layer.mlp.histogram_bins,
811
+ layer.mlp.pre_act_hist_counts,
812
+ layer.mlp.post_act_hist_counts,
813
+ )
814
+ print("Saving activation histograms...\n\n\n")
815
+ torch.save(act_dict, filename)
816
+
817
+
818
+ def load_act_hist(model, filename="/scr/jay/models/mistral/pre_finetune/cola_act_hist.pt"):
819
+ assert os.path.exists(filename), f"{filename} does not exist when loading pre/post-activation histogram of SparseMistralSiluMLP."
820
+ print("Loading activation histograms...\n\n\n")
821
+
822
+ act_dict = torch.load(filename)
823
+ for i, layer in enumerate(model.model.layers):
824
+ if (
825
+ isinstance(layer.mlp, MistralSparseSiluMLP) and layer.mlp.is_stats
826
+ ): # Can set the threshold only the relevant statistics is collected.
827
+ (
828
+ layer.mlp.histogram_bins,
829
+ layer.mlp.pre_act_hist_counts,
830
+ layer.mlp.post_act_hist_counts,
831
+ ) = act_dict[i]
832
+
833
+
834
+ def enable_last_k_modules(model, start_module_idx: int):
835
+ assert 32 > start_module_idx >= 0
836
+ new_modules = []
837
+ new_idx = 0
838
+ for idx in range(start_module_idx, len(model.model.original_layers)):
839
+ module = model.model.original_layers[idx]
840
+ module.layer_idx = new_idx
841
+ module.self_attn.layer_idx = new_idx
842
+ new_modules.append(module)
843
+ new_idx += 1
844
+ print(module.layer_idx)
845
+
846
+ model.model.layers = nn.ModuleList(new_modules)
847
+
848
+
849
+ def enable_first_k_modules(model, end_module_idx: int):
850
+ assert 32 > end_module_idx >= 0
851
+ new_modules = []
852
+ new_idx = 0
853
+ for idx in range(0, end_module_idx + 1):
854
+ module = model.model.original_layers[idx]
855
+ module.layer_idx = new_idx
856
+ module.self_attn.layer_idx = new_idx
857
+ new_modules.append(module)
858
+ new_idx += 1
859
+ print(module.layer_idx)
860
+
861
+ model.model.layers = nn.ModuleList(new_modules)
tmp-checkpoint-150/README.md ADDED
@@ -0,0 +1,204 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ base_model: mistralai/Mistral-7B-v0.1
4
+ ---
5
+
6
+ # Model Card for Model ID
7
+
8
+ <!-- Provide a quick summary of what the model is/does. -->
9
+
10
+
11
+
12
+ ## Model Details
13
+
14
+ ### Model Description
15
+
16
+ <!-- Provide a longer summary of what this model is. -->
17
+
18
+
19
+
20
+ - **Developed by:** [More Information Needed]
21
+ - **Funded by [optional]:** [More Information Needed]
22
+ - **Shared by [optional]:** [More Information Needed]
23
+ - **Model type:** [More Information Needed]
24
+ - **Language(s) (NLP):** [More Information Needed]
25
+ - **License:** [More Information Needed]
26
+ - **Finetuned from model [optional]:** [More Information Needed]
27
+
28
+ ### Model Sources [optional]
29
+
30
+ <!-- Provide the basic links for the model. -->
31
+
32
+ - **Repository:** [More Information Needed]
33
+ - **Paper [optional]:** [More Information Needed]
34
+ - **Demo [optional]:** [More Information Needed]
35
+
36
+ ## Uses
37
+
38
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
39
+
40
+ ### Direct Use
41
+
42
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
43
+
44
+ [More Information Needed]
45
+
46
+ ### Downstream Use [optional]
47
+
48
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
49
+
50
+ [More Information Needed]
51
+
52
+ ### Out-of-Scope Use
53
+
54
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
55
+
56
+ [More Information Needed]
57
+
58
+ ## Bias, Risks, and Limitations
59
+
60
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
61
+
62
+ [More Information Needed]
63
+
64
+ ### Recommendations
65
+
66
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
67
+
68
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
69
+
70
+ ## How to Get Started with the Model
71
+
72
+ Use the code below to get started with the model.
73
+
74
+ [More Information Needed]
75
+
76
+ ## Training Details
77
+
78
+ ### Training Data
79
+
80
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
81
+
82
+ [More Information Needed]
83
+
84
+ ### Training Procedure
85
+
86
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
87
+
88
+ #### Preprocessing [optional]
89
+
90
+ [More Information Needed]
91
+
92
+
93
+ #### Training Hyperparameters
94
+
95
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
96
+
97
+ #### Speeds, Sizes, Times [optional]
98
+
99
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
100
+
101
+ [More Information Needed]
102
+
103
+ ## Evaluation
104
+
105
+ <!-- This section describes the evaluation protocols and provides the results. -->
106
+
107
+ ### Testing Data, Factors & Metrics
108
+
109
+ #### Testing Data
110
+
111
+ <!-- This should link to a Dataset Card if possible. -->
112
+
113
+ [More Information Needed]
114
+
115
+ #### Factors
116
+
117
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
118
+
119
+ [More Information Needed]
120
+
121
+ #### Metrics
122
+
123
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
124
+
125
+ [More Information Needed]
126
+
127
+ ### Results
128
+
129
+ [More Information Needed]
130
+
131
+ #### Summary
132
+
133
+
134
+
135
+ ## Model Examination [optional]
136
+
137
+ <!-- Relevant interpretability work for the model goes here -->
138
+
139
+ [More Information Needed]
140
+
141
+ ## Environmental Impact
142
+
143
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
144
+
145
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
146
+
147
+ - **Hardware Type:** [More Information Needed]
148
+ - **Hours used:** [More Information Needed]
149
+ - **Cloud Provider:** [More Information Needed]
150
+ - **Compute Region:** [More Information Needed]
151
+ - **Carbon Emitted:** [More Information Needed]
152
+
153
+ ## Technical Specifications [optional]
154
+
155
+ ### Model Architecture and Objective
156
+
157
+ [More Information Needed]
158
+
159
+ ### Compute Infrastructure
160
+
161
+ [More Information Needed]
162
+
163
+ #### Hardware
164
+
165
+ [More Information Needed]
166
+
167
+ #### Software
168
+
169
+ [More Information Needed]
170
+
171
+ ## Citation [optional]
172
+
173
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
174
+
175
+ **BibTeX:**
176
+
177
+ [More Information Needed]
178
+
179
+ **APA:**
180
+
181
+ [More Information Needed]
182
+
183
+ ## Glossary [optional]
184
+
185
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
186
+
187
+ [More Information Needed]
188
+
189
+ ## More Information [optional]
190
+
191
+ [More Information Needed]
192
+
193
+ ## Model Card Authors [optional]
194
+
195
+ [More Information Needed]
196
+
197
+ ## Model Card Contact
198
+
199
+ [More Information Needed]
200
+
201
+
202
+ ### Framework versions
203
+
204
+ - PEFT 0.7.1
tmp-checkpoint-150/adapter_config.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "mistralai/Mistral-7B-v0.1",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layers_pattern": null,
10
+ "layers_to_transform": null,
11
+ "loftq_config": {},
12
+ "lora_alpha": 16,
13
+ "lora_dropout": 0.1,
14
+ "megatron_config": null,
15
+ "megatron_core": "megatron.core",
16
+ "modules_to_save": null,
17
+ "peft_type": "LORA",
18
+ "r": 64,
19
+ "rank_pattern": {},
20
+ "revision": null,
21
+ "target_modules": [
22
+ "gate_proj",
23
+ "down_proj",
24
+ "v_proj",
25
+ "up_proj",
26
+ "q_proj"
27
+ ],
28
+ "task_type": "CAUSAL_LM"
29
+ }
tmp-checkpoint-150/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d3e1066dadfef03ea4836f81d5bff2fafd2f380341ae4e2a17370a00b6470d05
3
+ size 281061608
tmp-checkpoint-150/global_step200/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:45c43de59f6971c9029185d5117bd1c6973ac53bad3efba539d674f65398d69e
3
+ size 421539920
tmp-checkpoint-150/global_step200/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7d30cb539e90fe77db8fe0744cfe2c63b2a79c0229dd460f546ad711dd93fb8d
3
+ size 421539920
tmp-checkpoint-150/global_step200/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d5b8609c192b2db07740ce2349466d979ef964e99ea33d5ac2923b5b5abfb189
3
+ size 421539984
tmp-checkpoint-150/global_step200/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5d17e32779c1a13e348727007d3258e76f73bb3d761f6ffa80d0668654d675ed
3
+ size 421539984
tmp-checkpoint-150/global_step200/mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:74ce7580a0b0cc1e716d1e8a9a4ff554f99bd39b2252c475db5b95eba4c3e22a
3
+ size 281267244
tmp-checkpoint-150/special_tokens_map.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "</s>",
17
+ "unk_token": {
18
+ "content": "<unk>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ }
tmp-checkpoint-150/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tmp-checkpoint-150/tokenizer_config.json ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "added_tokens_decoder": {
5
+ "0": {
6
+ "content": "<unk>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "1": {
14
+ "content": "<s>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "2": {
22
+ "content": "</s>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ }
29
+ },
30
+ "additional_special_tokens": [],
31
+ "bos_token": "<s>",
32
+ "clean_up_tokenization_spaces": false,
33
+ "eos_token": "</s>",
34
+ "legacy": true,
35
+ "model_max_length": 1000000000000000019884624838656,
36
+ "pad_token": "</s>",
37
+ "sp_model_kwargs": {},
38
+ "spaces_between_special_tokens": false,
39
+ "tokenizer_class": "LlamaTokenizer",
40
+ "unk_token": "<unk>",
41
+ "use_default_system_prompt": false
42
+ }
tmp-checkpoint-150/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1d16669460f3a6b81a3754ad01106c55e9b9ff5f29b51e86fb16757a5dff89ea
3
+ size 6520
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ea31c24da55d47344e48dcbadd0aba88b07fb984253e769b1170b588b215b505
3
  size 6520
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:11154dfc64f012ecf478b1a2fe9121dbebe10acc689a5ad1653d7d80f4db4611
3
  size 6520