mlabonne commited on
Commit
7315603
1 Parent(s): deb7de6

Upload folder using huggingface_hub

Browse files
README.md CHANGED
@@ -1,202 +1,146 @@
1
  ---
 
2
  library_name: peft
 
 
3
  base_model: ai21labs/Jamba-v0.1
 
 
 
4
  ---
5
 
6
- # Model Card for Model ID
 
7
 
8
- <!-- Provide a quick summary of what the model is/does. -->
 
9
 
 
 
10
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
 
12
- ## Model Details
13
-
14
- ### Model Description
15
-
16
- <!-- Provide a longer summary of what this model is. -->
17
-
18
-
19
-
20
- - **Developed by:** [More Information Needed]
21
- - **Funded by [optional]:** [More Information Needed]
22
- - **Shared by [optional]:** [More Information Needed]
23
- - **Model type:** [More Information Needed]
24
- - **Language(s) (NLP):** [More Information Needed]
25
- - **License:** [More Information Needed]
26
- - **Finetuned from model [optional]:** [More Information Needed]
27
-
28
- ### Model Sources [optional]
29
-
30
- <!-- Provide the basic links for the model. -->
31
-
32
- - **Repository:** [More Information Needed]
33
- - **Paper [optional]:** [More Information Needed]
34
- - **Demo [optional]:** [More Information Needed]
35
-
36
- ## Uses
37
-
38
- <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
39
-
40
- ### Direct Use
41
-
42
- <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
43
-
44
- [More Information Needed]
45
-
46
- ### Downstream Use [optional]
47
-
48
- <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
49
-
50
- [More Information Needed]
51
-
52
- ### Out-of-Scope Use
53
-
54
- <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
55
-
56
- [More Information Needed]
57
-
58
- ## Bias, Risks, and Limitations
59
-
60
- <!-- This section is meant to convey both technical and sociotechnical limitations. -->
61
-
62
- [More Information Needed]
63
-
64
- ### Recommendations
65
-
66
- <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
67
-
68
- Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
69
-
70
- ## How to Get Started with the Model
71
-
72
- Use the code below to get started with the model.
73
-
74
- [More Information Needed]
75
-
76
- ## Training Details
77
-
78
- ### Training Data
79
-
80
- <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
81
-
82
- [More Information Needed]
83
-
84
- ### Training Procedure
85
-
86
- <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
87
-
88
- #### Preprocessing [optional]
89
-
90
- [More Information Needed]
91
-
92
-
93
- #### Training Hyperparameters
94
-
95
- - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
96
-
97
- #### Speeds, Sizes, Times [optional]
98
-
99
- <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
100
-
101
- [More Information Needed]
102
-
103
- ## Evaluation
104
-
105
- <!-- This section describes the evaluation protocols and provides the results. -->
106
-
107
- ### Testing Data, Factors & Metrics
108
-
109
- #### Testing Data
110
-
111
- <!-- This should link to a Dataset Card if possible. -->
112
-
113
- [More Information Needed]
114
-
115
- #### Factors
116
-
117
- <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
118
-
119
- [More Information Needed]
120
-
121
- #### Metrics
122
-
123
- <!-- These are the evaluation metrics being used, ideally with a description of why. -->
124
-
125
- [More Information Needed]
126
-
127
- ### Results
128
-
129
- [More Information Needed]
130
-
131
- #### Summary
132
-
133
-
134
-
135
- ## Model Examination [optional]
136
-
137
- <!-- Relevant interpretability work for the model goes here -->
138
-
139
- [More Information Needed]
140
-
141
- ## Environmental Impact
142
-
143
- <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
144
-
145
- Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
146
-
147
- - **Hardware Type:** [More Information Needed]
148
- - **Hours used:** [More Information Needed]
149
- - **Cloud Provider:** [More Information Needed]
150
- - **Compute Region:** [More Information Needed]
151
- - **Carbon Emitted:** [More Information Needed]
152
-
153
- ## Technical Specifications [optional]
154
-
155
- ### Model Architecture and Objective
156
-
157
- [More Information Needed]
158
-
159
- ### Compute Infrastructure
160
-
161
- [More Information Needed]
162
-
163
- #### Hardware
164
-
165
- [More Information Needed]
166
-
167
- #### Software
168
-
169
- [More Information Needed]
170
-
171
- ## Citation [optional]
172
-
173
- <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
174
-
175
- **BibTeX:**
176
-
177
- [More Information Needed]
178
-
179
- **APA:**
180
-
181
- [More Information Needed]
182
-
183
- ## Glossary [optional]
184
-
185
- <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
186
-
187
- [More Information Needed]
188
-
189
- ## More Information [optional]
190
-
191
- [More Information Needed]
192
-
193
- ## Model Card Authors [optional]
194
-
195
- [More Information Needed]
196
-
197
- ## Model Card Contact
198
 
199
- [More Information Needed]
200
  ### Framework versions
201
 
202
- - PEFT 0.10.0
 
 
 
 
 
1
  ---
2
+ license: apache-2.0
3
  library_name: peft
4
+ tags:
5
+ - generated_from_trainer
6
  base_model: ai21labs/Jamba-v0.1
7
+ model-index:
8
+ - name: out
9
+ results: []
10
  ---
11
 
12
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
13
+ should probably proofread and complete it, then remove this comment. -->
14
 
15
+ [<img src="https://raw.githubusercontent.com/OpenAccess-AI-Collective/axolotl/main/image/axolotl-badge-web.png" alt="Built with Axolotl" width="200" height="32"/>](https://github.com/OpenAccess-AI-Collective/axolotl)
16
+ <details><summary>See axolotl config</summary>
17
 
18
+ axolotl version: `0.4.0`
19
+ ```yaml
20
 
21
+ base_model: ai21labs/Jamba-v0.1
22
+ trust_remote_code: true
23
+
24
+ load_in_8bit: false
25
+ load_in_4bit: true
26
+ strict: false
27
+
28
+ datasets:
29
+ - path: mhenrichsen/alpaca_2k_test
30
+ type: alpaca
31
+ chat_template: chatml
32
+ dataset_prepared_path:
33
+ val_set_size: 0.01
34
+ output_dir: ./out
35
+
36
+ sequence_len: 4096
37
+ sample_packing: true
38
+ pad_to_sequence_len: true
39
+ eval_sample_packing: false
40
+
41
+ use_wandb: true
42
+ wandb_project: axolotl
43
+ wandb_entity:
44
+ wandb_watch:
45
+ wandb_name: Jambalpaca-v0.1
46
+ wandb_log_model:
47
+
48
+ adapter: qlora
49
+ lora_r: 32
50
+ lora_alpha: 64
51
+ lora_dropout: 0.05
52
+ lora_target_linear: true
53
+
54
+ low_cpu_mem_usage: true
55
+ gradient_accumulation_steps: 8
56
+ micro_batch_size: 1
57
+ num_epochs: 1
58
+ optimizer: adamw_bnb_8bit
59
+ adam_beta2: 0.95
60
+ adam_epsilon: 0.00001
61
+ max_grad_norm: 1.0
62
+ lr_scheduler: cosine
63
+ learning_rate: 0.0002
64
+
65
+ train_on_inputs: false
66
+ group_by_length: false
67
+ bf16: auto
68
+ fp16:
69
+ tf32: false
70
+
71
+ gradient_checkpointing: true
72
+ gradient_checkpointing_kwargs:
73
+ use_reentrant: false
74
+ early_stopping_patience:
75
+ resume_from_checkpoint:
76
+ local_rank:
77
+ logging_steps: 1
78
+ xformers_attention:
79
+ flash_attention: true
80
+
81
+ warmup_steps: 10
82
+ evals_per_epoch: 4
83
+ saves_per_epoch: 4
84
+ save_total_limit: 2
85
+ debug:
86
+ deepspeed:
87
+ weight_decay: 0.0
88
+ special_tokens:
89
+
90
+ ```
91
+
92
+ </details><br>
93
+
94
+ # out
95
+
96
+ This model is a fine-tuned version of [ai21labs/Jamba-v0.1](https://huggingface.co/ai21labs/Jamba-v0.1) on the None dataset.
97
+ It achieves the following results on the evaluation set:
98
+ - Loss: 0.9899
99
+
100
+ ## Model description
101
+
102
+ More information needed
103
+
104
+ ## Intended uses & limitations
105
+
106
+ More information needed
107
+
108
+ ## Training and evaluation data
109
+
110
+ More information needed
111
+
112
+ ## Training procedure
113
+
114
+ ### Training hyperparameters
115
+
116
+ The following hyperparameters were used during training:
117
+ - learning_rate: 0.0002
118
+ - train_batch_size: 1
119
+ - eval_batch_size: 1
120
+ - seed: 42
121
+ - distributed_type: multi-GPU
122
+ - num_devices: 2
123
+ - gradient_accumulation_steps: 8
124
+ - total_train_batch_size: 16
125
+ - total_eval_batch_size: 2
126
+ - optimizer: Adam with betas=(0.9,0.95) and epsilon=1e-05
127
+ - lr_scheduler_type: cosine
128
+ - lr_scheduler_warmup_steps: 10
129
+ - num_epochs: 1
130
+
131
+ ### Training results
132
+
133
+ | Training Loss | Epoch | Step | Validation Loss |
134
+ |:-------------:|:-----:|:----:|:---------------:|
135
+ | 0.9332 | 0.17 | 1 | 1.0365 |
136
+ | 0.9677 | 0.35 | 2 | 1.0337 |
137
+ | 0.9337 | 0.7 | 4 | 0.9899 |
138
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
139
 
 
140
  ### Framework versions
141
 
142
+ - PEFT 0.10.0
143
+ - Transformers 4.40.0.dev0
144
+ - Pytorch 2.1.2+cu118
145
+ - Datasets 2.18.0
146
+ - Tokenizers 0.15.0
adapter_config.json CHANGED
@@ -10,28 +10,28 @@
10
  "layers_pattern": null,
11
  "layers_to_transform": null,
12
  "loftq_config": {},
13
- "lora_alpha": 32,
14
  "lora_dropout": 0.05,
15
  "megatron_config": null,
16
  "megatron_core": "megatron.core",
17
  "modules_to_save": null,
18
  "peft_type": "LORA",
19
- "r": 16,
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
- "k_proj",
24
  "dt_proj",
25
  "router",
26
- "o_proj",
 
27
  "gate_proj",
28
- "down_proj",
29
  "in_proj",
30
  "out_proj",
31
- "x_proj",
 
32
  "q_proj",
33
- "v_proj",
34
- "up_proj"
35
  ],
36
  "task_type": "CAUSAL_LM",
37
  "use_dora": false,
 
10
  "layers_pattern": null,
11
  "layers_to_transform": null,
12
  "loftq_config": {},
13
+ "lora_alpha": 64,
14
  "lora_dropout": 0.05,
15
  "megatron_config": null,
16
  "megatron_core": "megatron.core",
17
  "modules_to_save": null,
18
  "peft_type": "LORA",
19
+ "r": 32,
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
 
23
  "dt_proj",
24
  "router",
25
+ "v_proj",
26
+ "k_proj",
27
  "gate_proj",
28
+ "o_proj",
29
  "in_proj",
30
  "out_proj",
31
+ "up_proj",
32
+ "down_proj",
33
  "q_proj",
34
+ "x_proj"
 
35
  ],
36
  "task_type": "CAUSAL_LM",
37
  "use_dora": false,
adapter_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d9ea9e35900b80b03fbab00de7812f2c67d75592e30574f5bcb55022dff414c7
3
- size 531653306
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7e9e338f0e33e3dc4ce519b70b1b77c9a5d9e6691cf6913030df1c3b6a13bab2
3
+ size 1062990522
checkpoint-2/adapter_config.json CHANGED
@@ -10,28 +10,28 @@
10
  "layers_pattern": null,
11
  "layers_to_transform": null,
12
  "loftq_config": {},
13
- "lora_alpha": 32,
14
  "lora_dropout": 0.05,
15
  "megatron_config": null,
16
  "megatron_core": "megatron.core",
17
  "modules_to_save": null,
18
  "peft_type": "LORA",
19
- "r": 16,
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
- "k_proj",
24
  "dt_proj",
25
  "router",
26
- "o_proj",
 
27
  "gate_proj",
28
- "down_proj",
29
  "in_proj",
30
  "out_proj",
31
- "x_proj",
 
32
  "q_proj",
33
- "v_proj",
34
- "up_proj"
35
  ],
36
  "task_type": "CAUSAL_LM",
37
  "use_dora": false,
 
10
  "layers_pattern": null,
11
  "layers_to_transform": null,
12
  "loftq_config": {},
13
+ "lora_alpha": 64,
14
  "lora_dropout": 0.05,
15
  "megatron_config": null,
16
  "megatron_core": "megatron.core",
17
  "modules_to_save": null,
18
  "peft_type": "LORA",
19
+ "r": 32,
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
 
23
  "dt_proj",
24
  "router",
25
+ "v_proj",
26
+ "k_proj",
27
  "gate_proj",
28
+ "o_proj",
29
  "in_proj",
30
  "out_proj",
31
+ "up_proj",
32
+ "down_proj",
33
  "q_proj",
34
+ "x_proj"
 
35
  ],
36
  "task_type": "CAUSAL_LM",
37
  "use_dora": false,
checkpoint-2/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fcae7e5c26cc873cdb18a0549ed5008cd4d3b73fb81b5b837d8dec0f6070341e
3
- size 531611600
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9ef9f98fc5b708273fb8972835559e2d33bcc26bd962f1052114864ed821d0e6
3
+ size 1062949440
checkpoint-2/global_step2/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:de60e75dd34d3ed2b2376fb277cc48b10fb49b9a585f72f331be5c8846b0d076
3
- size 797643792
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5993166e16c39c6bbfc435fa6f8b54f90dd9999e5869f58f80f821f2b2e543ab
3
+ size 1595168528
checkpoint-2/global_step2/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1581d777820e99083cfabe82792a1d8dd69138c72c58709b631391aab0c5aa62
3
- size 797644432
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5d7dc3155c81728105f27df8de402cd044eb226b43d82df659c61d99ca1c890c
3
+ size 1595169168
checkpoint-2/global_step2/mp_rank_00_model_states.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7e32a978fa8f59a21f7aff3cb07ee15ca9ca84b0cc4f4f9bb72285420c1d9529
3
- size 1345660121
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d8aa23b1b9f9aa86ee7ba859cf0a317cdd442058654252029798d013c6f6af5f
3
+ size 1876997337
checkpoint-2/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:86f3c3a184fce736e425ae79f301d03b453a47c412a22e4e09d037b087704f0f
3
  size 14512
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1a4b811df8764f131ee4217c664f8ae90c4d52049ee577ec3e69510f8a346554
3
  size 14512
checkpoint-2/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a138b4ddc65b2fe41cb403b7899483fd77c70e0840aca5e683d6c8c4f71adec0
3
  size 14512
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cfe192ca207a82c15a9ed16e5e2684e33702bbca61ec28f1d6f223db0900da3e
3
  size 14512
checkpoint-2/trainer_state.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.35555555555555557,
5
  "eval_steps": 2,
6
  "global_step": 2,
7
  "is_hyper_param_search": false,
@@ -9,33 +9,33 @@
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
- "epoch": 0.18,
13
- "grad_norm": 0.3344474727310718,
14
  "learning_rate": 2e-05,
15
- "loss": 0.959,
16
  "step": 1
17
  },
18
  {
19
- "epoch": 0.18,
20
- "eval_loss": 1.0038232803344727,
21
- "eval_runtime": 60.0979,
22
- "eval_samples_per_second": 1.664,
23
- "eval_steps_per_second": 0.832,
24
  "step": 1
25
  },
26
  {
27
- "epoch": 0.36,
28
- "grad_norm": 0.2992521282333887,
29
  "learning_rate": 4e-05,
30
- "loss": 0.8876,
31
  "step": 2
32
  },
33
  {
34
- "epoch": 0.36,
35
- "eval_loss": 1.0015610456466675,
36
- "eval_runtime": 60.6066,
37
- "eval_samples_per_second": 1.65,
38
- "eval_steps_per_second": 0.825,
39
  "step": 2
40
  }
41
  ],
@@ -44,7 +44,7 @@
44
  "num_input_tokens_seen": 0,
45
  "num_train_epochs": 1,
46
  "save_steps": 2,
47
- "total_flos": 4.055437828895539e+16,
48
  "train_batch_size": 1,
49
  "trial_name": null,
50
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.34782608695652173,
5
  "eval_steps": 2,
6
  "global_step": 2,
7
  "is_hyper_param_search": false,
 
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
+ "epoch": 0.17,
13
+ "grad_norm": 0.4166610439239046,
14
  "learning_rate": 2e-05,
15
+ "loss": 0.9332,
16
  "step": 1
17
  },
18
  {
19
+ "epoch": 0.17,
20
+ "eval_loss": 1.0365231037139893,
21
+ "eval_runtime": 11.8979,
22
+ "eval_samples_per_second": 1.681,
23
+ "eval_steps_per_second": 0.84,
24
  "step": 1
25
  },
26
  {
27
+ "epoch": 0.35,
28
+ "grad_norm": 0.42192077388393007,
29
  "learning_rate": 4e-05,
30
+ "loss": 0.9677,
31
  "step": 2
32
  },
33
  {
34
+ "epoch": 0.35,
35
+ "eval_loss": 1.0336682796478271,
36
+ "eval_runtime": 11.9106,
37
+ "eval_samples_per_second": 1.679,
38
+ "eval_steps_per_second": 0.84,
39
  "step": 2
40
  }
41
  ],
 
44
  "num_input_tokens_seen": 0,
45
  "num_train_epochs": 1,
46
  "save_steps": 2,
47
+ "total_flos": 4.076330697306931e+16,
48
  "train_batch_size": 1,
49
  "trial_name": null,
50
  "trial_params": null
checkpoint-2/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:820ccf8abb8da9bad49cd1410e42902ccc5037976c458c48cab90b812216d4fb
3
  size 6968
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:61901b679dccdf55dbfdcf3c43fe290d636408cbe93d2b8c510b8fd666f5c664
3
  size 6968
checkpoint-4/adapter_config.json CHANGED
@@ -10,28 +10,28 @@
10
  "layers_pattern": null,
11
  "layers_to_transform": null,
12
  "loftq_config": {},
13
- "lora_alpha": 32,
14
  "lora_dropout": 0.05,
15
  "megatron_config": null,
16
  "megatron_core": "megatron.core",
17
  "modules_to_save": null,
18
  "peft_type": "LORA",
19
- "r": 16,
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
- "k_proj",
24
  "dt_proj",
25
  "router",
26
- "o_proj",
 
27
  "gate_proj",
28
- "down_proj",
29
  "in_proj",
30
  "out_proj",
31
- "x_proj",
 
32
  "q_proj",
33
- "v_proj",
34
- "up_proj"
35
  ],
36
  "task_type": "CAUSAL_LM",
37
  "use_dora": false,
 
10
  "layers_pattern": null,
11
  "layers_to_transform": null,
12
  "loftq_config": {},
13
+ "lora_alpha": 64,
14
  "lora_dropout": 0.05,
15
  "megatron_config": null,
16
  "megatron_core": "megatron.core",
17
  "modules_to_save": null,
18
  "peft_type": "LORA",
19
+ "r": 32,
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
 
23
  "dt_proj",
24
  "router",
25
+ "v_proj",
26
+ "k_proj",
27
  "gate_proj",
28
+ "o_proj",
29
  "in_proj",
30
  "out_proj",
31
+ "up_proj",
32
+ "down_proj",
33
  "q_proj",
34
+ "x_proj"
 
35
  ],
36
  "task_type": "CAUSAL_LM",
37
  "use_dora": false,
checkpoint-4/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ff06a0ff5975f349280efec949d0a4be206f80b6bef536929707c516bf7e9730
3
- size 531611600
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:37e6f48b6648f7e4547d9758f4b24cbddfc2110b5ead1621190a4e8098fb1772
3
+ size 1062949440
checkpoint-4/global_step4/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:63dc1f4cb313b53fe2379098fb96db31e6944456103b0b99ebe654a6e46bc105
3
- size 797643792
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9c7b6a8864c56b8ea786cf6277e3672e0e4c84eda57a7856655a55692b910643
3
+ size 1595168528
checkpoint-4/global_step4/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:048140af6f88ae217a6829913d85f07a8c73b9957f60dab0061002f498bfcf8f
3
- size 797644432
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:df223962689167de3fba124b9dae1fcbd1d3950b854cc10b42c29fa7cccecc1f
3
+ size 1595169168
checkpoint-4/global_step4/mp_rank_00_model_states.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6d072c3664c55da80a9511be50ed9b9189657a961290902704cf98c3d55fe6ce
3
- size 1345660121
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8c95e7abbee4bb105443249ab1b181cde9e307374d14817517d24270c64fe6e0
3
+ size 1876997337
checkpoint-4/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:52aa8540fb424d44116caecb2d10a8c57abc9f134b64c8a041a34ce99ae48a96
3
  size 14512
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e1896c134cf413c501b55d73d93ad6c62dd0356f4dffc4308bdbb16230b65f9f
3
  size 14512
checkpoint-4/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:92ee6fbd548321da2b037cae26c5b5ee91b1c1750d8e4b5a4ebc38df67f24b78
3
  size 14512
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cae4ba58ea382764cb92c2f5f138cfe1bbde97b1a3cfcd7c16a9ab4156c2df46
3
  size 14512
checkpoint-4/trainer_state.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.7111111111111111,
5
  "eval_steps": 2,
6
  "global_step": 4,
7
  "is_hyper_param_search": false,
@@ -9,55 +9,55 @@
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
- "epoch": 0.18,
13
- "grad_norm": 0.3344474727310718,
14
  "learning_rate": 2e-05,
15
- "loss": 0.959,
16
  "step": 1
17
  },
18
  {
19
- "epoch": 0.18,
20
- "eval_loss": 1.0038232803344727,
21
- "eval_runtime": 60.0979,
22
- "eval_samples_per_second": 1.664,
23
- "eval_steps_per_second": 0.832,
24
  "step": 1
25
  },
26
  {
27
- "epoch": 0.36,
28
- "grad_norm": 0.2992521282333887,
29
  "learning_rate": 4e-05,
30
- "loss": 0.8876,
31
  "step": 2
32
  },
33
  {
34
- "epoch": 0.36,
35
- "eval_loss": 1.0015610456466675,
36
- "eval_runtime": 60.6066,
37
- "eval_samples_per_second": 1.65,
38
- "eval_steps_per_second": 0.825,
39
  "step": 2
40
  },
41
  {
42
- "epoch": 0.53,
43
- "grad_norm": 0.3010397456722721,
44
  "learning_rate": 6e-05,
45
- "loss": 0.9691,
46
  "step": 3
47
  },
48
  {
49
- "epoch": 0.71,
50
- "grad_norm": 0.3476376689274444,
51
  "learning_rate": 8e-05,
52
- "loss": 0.9616,
53
  "step": 4
54
  },
55
  {
56
- "epoch": 0.71,
57
- "eval_loss": 0.9824349880218506,
58
- "eval_runtime": 60.7946,
59
- "eval_samples_per_second": 1.645,
60
- "eval_steps_per_second": 0.822,
61
  "step": 4
62
  }
63
  ],
@@ -66,7 +66,7 @@
66
  "num_input_tokens_seen": 0,
67
  "num_train_epochs": 1,
68
  "save_steps": 2,
69
- "total_flos": 8.110875657791078e+16,
70
  "train_batch_size": 1,
71
  "trial_name": null,
72
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.6956521739130435,
5
  "eval_steps": 2,
6
  "global_step": 4,
7
  "is_hyper_param_search": false,
 
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
+ "epoch": 0.17,
13
+ "grad_norm": 0.4166610439239046,
14
  "learning_rate": 2e-05,
15
+ "loss": 0.9332,
16
  "step": 1
17
  },
18
  {
19
+ "epoch": 0.17,
20
+ "eval_loss": 1.0365231037139893,
21
+ "eval_runtime": 11.8979,
22
+ "eval_samples_per_second": 1.681,
23
+ "eval_steps_per_second": 0.84,
24
  "step": 1
25
  },
26
  {
27
+ "epoch": 0.35,
28
+ "grad_norm": 0.42192077388393007,
29
  "learning_rate": 4e-05,
30
+ "loss": 0.9677,
31
  "step": 2
32
  },
33
  {
34
+ "epoch": 0.35,
35
+ "eval_loss": 1.0336682796478271,
36
+ "eval_runtime": 11.9106,
37
+ "eval_samples_per_second": 1.679,
38
+ "eval_steps_per_second": 0.84,
39
  "step": 2
40
  },
41
  {
42
+ "epoch": 0.52,
43
+ "grad_norm": 0.4249677147944204,
44
  "learning_rate": 6e-05,
45
+ "loss": 0.9551,
46
  "step": 3
47
  },
48
  {
49
+ "epoch": 0.7,
50
+ "grad_norm": 0.4384769363231807,
51
  "learning_rate": 8e-05,
52
+ "loss": 0.9337,
53
  "step": 4
54
  },
55
  {
56
+ "epoch": 0.7,
57
+ "eval_loss": 0.9899237751960754,
58
+ "eval_runtime": 11.9484,
59
+ "eval_samples_per_second": 1.674,
60
+ "eval_steps_per_second": 0.837,
61
  "step": 4
62
  }
63
  ],
 
66
  "num_input_tokens_seen": 0,
67
  "num_train_epochs": 1,
68
  "save_steps": 2,
69
+ "total_flos": 8.152661394613862e+16,
70
  "train_batch_size": 1,
71
  "trial_name": null,
72
  "trial_params": null
checkpoint-4/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:820ccf8abb8da9bad49cd1410e42902ccc5037976c458c48cab90b812216d4fb
3
  size 6968
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:61901b679dccdf55dbfdcf3c43fe290d636408cbe93d2b8c510b8fd666f5c664
3
  size 6968
config.json CHANGED
@@ -37,13 +37,28 @@
37
  "num_key_value_heads": 8,
38
  "output_router_logits": false,
39
  "pad_token_id": 0,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
40
  "rms_norm_eps": 1e-06,
41
  "router_aux_loss_coef": 0.001,
42
  "sliding_window": null,
43
  "tie_word_embeddings": false,
44
- "torch_dtype": "float16",
45
  "transformers_version": "4.40.0.dev0",
46
- "use_cache": true,
47
  "use_mamba_kernels": true,
48
  "vocab_size": 65536
49
  }
 
37
  "num_key_value_heads": 8,
38
  "output_router_logits": false,
39
  "pad_token_id": 0,
40
+ "quantization_config": {
41
+ "_load_in_4bit": true,
42
+ "_load_in_8bit": false,
43
+ "bnb_4bit_compute_dtype": "bfloat16",
44
+ "bnb_4bit_quant_storage": "bfloat16",
45
+ "bnb_4bit_quant_type": "nf4",
46
+ "bnb_4bit_use_double_quant": true,
47
+ "llm_int8_enable_fp32_cpu_offload": false,
48
+ "llm_int8_has_fp16_weight": false,
49
+ "llm_int8_skip_modules": null,
50
+ "llm_int8_threshold": 6.0,
51
+ "load_in_4bit": true,
52
+ "load_in_8bit": false,
53
+ "quant_method": "bitsandbytes"
54
+ },
55
  "rms_norm_eps": 1e-06,
56
  "router_aux_loss_coef": 0.001,
57
  "sliding_window": null,
58
  "tie_word_embeddings": false,
59
+ "torch_dtype": "bfloat16",
60
  "transformers_version": "4.40.0.dev0",
61
+ "use_cache": false,
62
  "use_mamba_kernels": true,
63
  "vocab_size": 65536
64
  }
tokenizer_config.json CHANGED
@@ -36,6 +36,7 @@
36
  }
37
  },
38
  "bos_token": "<|startoftext|>",
 
39
  "clean_up_tokenization_spaces": false,
40
  "eos_token": "<|endoftext|>",
41
  "model_max_length": 1000000000000000019884624838656,
 
36
  }
37
  },
38
  "bos_token": "<|startoftext|>",
39
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
40
  "clean_up_tokenization_spaces": false,
41
  "eos_token": "<|endoftext|>",
42
  "model_max_length": 1000000000000000019884624838656,