nguyenthanhdo commited on
Commit
d7eab89
1 Parent(s): 60cca79

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. Qwen_Qwen2-7B-Instruct-lora-2024-07-01-14-29-26.yml +70 -0
  2. README.md +170 -0
  3. adapter_config.json +34 -0
  4. adapter_model.bin +3 -0
  5. added_tokens.json +5 -0
  6. checkpoint-106/README.md +202 -0
  7. checkpoint-106/adapter_config.json +34 -0
  8. checkpoint-106/adapter_model.safetensors +3 -0
  9. checkpoint-106/added_tokens.json +5 -0
  10. checkpoint-106/merges.txt +0 -0
  11. checkpoint-106/optimizer.pt +3 -0
  12. checkpoint-106/rng_state.pth +3 -0
  13. checkpoint-106/scheduler.pt +3 -0
  14. checkpoint-106/special_tokens_map.json +20 -0
  15. checkpoint-106/tokenizer.json +0 -0
  16. checkpoint-106/tokenizer_config.json +43 -0
  17. checkpoint-106/trainer_state.json +855 -0
  18. checkpoint-106/training_args.bin +3 -0
  19. checkpoint-106/vocab.json +0 -0
  20. checkpoint-159/README.md +202 -0
  21. checkpoint-159/adapter_config.json +34 -0
  22. checkpoint-159/adapter_model.safetensors +3 -0
  23. checkpoint-159/added_tokens.json +5 -0
  24. checkpoint-159/merges.txt +0 -0
  25. checkpoint-159/optimizer.pt +3 -0
  26. checkpoint-159/rng_state.pth +3 -0
  27. checkpoint-159/scheduler.pt +3 -0
  28. checkpoint-159/special_tokens_map.json +20 -0
  29. checkpoint-159/tokenizer.json +0 -0
  30. checkpoint-159/tokenizer_config.json +43 -0
  31. checkpoint-159/trainer_state.json +1266 -0
  32. checkpoint-159/training_args.bin +3 -0
  33. checkpoint-159/vocab.json +0 -0
  34. checkpoint-212/README.md +202 -0
  35. checkpoint-212/adapter_config.json +34 -0
  36. checkpoint-212/adapter_model.safetensors +3 -0
  37. checkpoint-212/added_tokens.json +5 -0
  38. checkpoint-212/merges.txt +0 -0
  39. checkpoint-212/optimizer.pt +3 -0
  40. checkpoint-212/rng_state.pth +3 -0
  41. checkpoint-212/scheduler.pt +3 -0
  42. checkpoint-212/special_tokens_map.json +20 -0
  43. checkpoint-212/tokenizer.json +0 -0
  44. checkpoint-212/tokenizer_config.json +43 -0
  45. checkpoint-212/trainer_state.json +1677 -0
  46. checkpoint-212/training_args.bin +3 -0
  47. checkpoint-212/vocab.json +0 -0
  48. checkpoint-265/README.md +202 -0
  49. checkpoint-265/adapter_config.json +34 -0
  50. checkpoint-265/adapter_model.safetensors +3 -0
Qwen_Qwen2-7B-Instruct-lora-2024-07-01-14-29-26.yml ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ base_model: Qwen/Qwen2-7B-Instruct
2
+ model_type: AutoModelForCausalLM
3
+ tokenizer_type: AutoTokenizer
4
+
5
+ load_in_8bit: false
6
+ load_in_4bit: false
7
+ strict: false
8
+
9
+ datasets:
10
+ - path: /workspace/axolotl/vinh/PAL/input_output_qwen.json
11
+ type: input_output
12
+ dataset_prepared_path:
13
+ val_set_size: 0.05
14
+ eval_sample_packing: false
15
+ output_dir: /workspace/axolotl/vinh/Qwen_Qwen2-7B-Instruct-lora-2024-07-01-14-29-26
16
+
17
+ sequence_len: 2048
18
+ sample_packing: false
19
+ pad_to_sequence_len: false
20
+
21
+ adapter: lora
22
+ lora_model_dir:
23
+ lora_r: 64
24
+ lora_alpha: 128
25
+ lora_dropout: 0.05
26
+ lora_target_linear: true
27
+ lora_fan_in_fan_out:
28
+
29
+ wandb_project:
30
+ wandb_entity:
31
+ wandb_watch:
32
+ wandb_name:
33
+ wandb_log_model:
34
+
35
+ gradient_accumulation_steps: 128
36
+ micro_batch_size: 1
37
+ num_epochs: 3
38
+ optimizer: paged_adamw_32bit
39
+ lr_scheduler: cosine
40
+ learning_rate: 2e-4
41
+
42
+ train_on_inputs: false
43
+ group_by_length: false
44
+ bf16: auto
45
+ fp16:
46
+ tf32: false
47
+
48
+ gradient_checkpointing: false
49
+ early_stopping_patience:
50
+ resume_from_checkpoint:
51
+ local_rank:
52
+ logging_steps: 1
53
+ xformers_attention:
54
+ flash_attention: true
55
+ s2_attention:
56
+
57
+ loss_watchdog_threshold: 5.0
58
+ loss_watchdog_patience: 3
59
+
60
+ warmup_steps: 10
61
+ evals_per_epoch: 10
62
+ eval_table_size:
63
+ eval_max_new_tokens: 512
64
+ saves_per_epoch: 2
65
+ save_total_limit: 20
66
+ debug:
67
+ deepspeed:
68
+ weight_decay: 0.0
69
+ fsdp:
70
+ fsdp_config:
README.md ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: Qwen/Qwen2-7B-Instruct
3
+ library_name: peft
4
+ license: apache-2.0
5
+ tags:
6
+ - generated_from_trainer
7
+ model-index:
8
+ - name: workspace/axolotl/vinh/Qwen_Qwen2-7B-Instruct-lora-2024-07-01-14-29-26
9
+ results: []
10
+ ---
11
+
12
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
13
+ should probably proofread and complete it, then remove this comment. -->
14
+
15
+ [<img src="https://raw.githubusercontent.com/OpenAccess-AI-Collective/axolotl/main/image/axolotl-badge-web.png" alt="Built with Axolotl" width="200" height="32"/>](https://github.com/OpenAccess-AI-Collective/axolotl)
16
+ <details><summary>See axolotl config</summary>
17
+
18
+ axolotl version: `0.4.1`
19
+ ```yaml
20
+ base_model: Qwen/Qwen2-7B-Instruct
21
+ model_type: AutoModelForCausalLM
22
+ tokenizer_type: AutoTokenizer
23
+
24
+ load_in_8bit: false
25
+ load_in_4bit: false
26
+ strict: false
27
+
28
+ datasets:
29
+ - path: /workspace/axolotl/vinh/PAL/input_output_qwen.json
30
+ type: input_output
31
+ dataset_prepared_path:
32
+ val_set_size: 0.05
33
+ eval_sample_packing: false
34
+ output_dir: /workspace/axolotl/vinh/Qwen_Qwen2-7B-Instruct-lora-2024-07-01-14-29-26
35
+
36
+ sequence_len: 2048
37
+ sample_packing: false
38
+ pad_to_sequence_len: false
39
+
40
+ adapter: lora
41
+ lora_model_dir:
42
+ lora_r: 64
43
+ lora_alpha: 128
44
+ lora_dropout: 0.05
45
+ lora_target_linear: true
46
+ lora_fan_in_fan_out:
47
+
48
+ wandb_project:
49
+ wandb_entity:
50
+ wandb_watch:
51
+ wandb_name:
52
+ wandb_log_model:
53
+
54
+ gradient_accumulation_steps: 128
55
+ micro_batch_size: 1
56
+ num_epochs: 3
57
+ optimizer: paged_adamw_32bit
58
+ lr_scheduler: cosine
59
+ learning_rate: 2e-4
60
+
61
+ train_on_inputs: false
62
+ group_by_length: false
63
+ bf16: auto
64
+ fp16:
65
+ tf32: false
66
+
67
+ gradient_checkpointing: false
68
+ early_stopping_patience:
69
+ resume_from_checkpoint:
70
+ local_rank:
71
+ logging_steps: 1
72
+ xformers_attention:
73
+ flash_attention: true
74
+ s2_attention:
75
+
76
+ loss_watchdog_threshold: 5.0
77
+ loss_watchdog_patience: 3
78
+
79
+ warmup_steps: 10
80
+ evals_per_epoch: 10
81
+ eval_table_size:
82
+ eval_max_new_tokens: 512
83
+ saves_per_epoch: 2
84
+ save_total_limit: 20
85
+ debug:
86
+ deepspeed:
87
+ weight_decay: 0.0
88
+ fsdp:
89
+ fsdp_config:
90
+
91
+ ```
92
+
93
+ </details><br>
94
+
95
+ # workspace/axolotl/vinh/Qwen_Qwen2-7B-Instruct-lora-2024-07-01-14-29-26
96
+
97
+ This model is a fine-tuned version of [Qwen/Qwen2-7B-Instruct](https://huggingface.co/Qwen/Qwen2-7B-Instruct) on the None dataset.
98
+ It achieves the following results on the evaluation set:
99
+ - Loss: 0.0356
100
+
101
+ ## Model description
102
+
103
+ More information needed
104
+
105
+ ## Intended uses & limitations
106
+
107
+ More information needed
108
+
109
+ ## Training and evaluation data
110
+
111
+ More information needed
112
+
113
+ ## Training procedure
114
+
115
+ ### Training hyperparameters
116
+
117
+ The following hyperparameters were used during training:
118
+ - learning_rate: 0.0002
119
+ - train_batch_size: 1
120
+ - eval_batch_size: 1
121
+ - seed: 42
122
+ - gradient_accumulation_steps: 128
123
+ - total_train_batch_size: 128
124
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
125
+ - lr_scheduler_type: cosine
126
+ - lr_scheduler_warmup_steps: 10
127
+ - num_epochs: 3
128
+
129
+ ### Training results
130
+
131
+ | Training Loss | Epoch | Step | Validation Loss |
132
+ |:-------------:|:------:|:----:|:---------------:|
133
+ | 0.4503 | 0.0095 | 1 | 0.4264 |
134
+ | 0.0836 | 0.1043 | 11 | 0.0792 |
135
+ | 0.0532 | 0.2086 | 22 | 0.0566 |
136
+ | 0.0511 | 0.3129 | 33 | 0.0496 |
137
+ | 0.0511 | 0.4172 | 44 | 0.0457 |
138
+ | 0.0475 | 0.5214 | 55 | 0.0436 |
139
+ | 0.0435 | 0.6257 | 66 | 0.0420 |
140
+ | 0.0361 | 0.7300 | 77 | 0.0407 |
141
+ | 0.0406 | 0.8343 | 88 | 0.0391 |
142
+ | 0.0349 | 0.9386 | 99 | 0.0384 |
143
+ | 0.0304 | 1.0429 | 110 | 0.0373 |
144
+ | 0.0305 | 1.1472 | 121 | 0.0374 |
145
+ | 0.0251 | 1.2515 | 132 | 0.0365 |
146
+ | 0.0288 | 1.3558 | 143 | 0.0370 |
147
+ | 0.0251 | 1.4600 | 154 | 0.0366 |
148
+ | 0.0236 | 1.5643 | 165 | 0.0353 |
149
+ | 0.0266 | 1.6686 | 176 | 0.0353 |
150
+ | 0.0281 | 1.7729 | 187 | 0.0348 |
151
+ | 0.0246 | 1.8772 | 198 | 0.0340 |
152
+ | 0.0249 | 1.9815 | 209 | 0.0339 |
153
+ | 0.0169 | 2.0858 | 220 | 0.0349 |
154
+ | 0.0155 | 2.1901 | 231 | 0.0371 |
155
+ | 0.0178 | 2.2943 | 242 | 0.0369 |
156
+ | 0.0194 | 2.3986 | 253 | 0.0361 |
157
+ | 0.0139 | 2.5029 | 264 | 0.0357 |
158
+ | 0.0157 | 2.6072 | 275 | 0.0356 |
159
+ | 0.0197 | 2.7115 | 286 | 0.0357 |
160
+ | 0.0188 | 2.8158 | 297 | 0.0357 |
161
+ | 0.0163 | 2.9201 | 308 | 0.0356 |
162
+
163
+
164
+ ### Framework versions
165
+
166
+ - PEFT 0.11.1
167
+ - Transformers 4.41.1
168
+ - Pytorch 2.1.2+cu118
169
+ - Datasets 2.19.1
170
+ - Tokenizers 0.19.1
adapter_config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "Qwen/Qwen2-7B-Instruct",
5
+ "bias": "none",
6
+ "fan_in_fan_out": null,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layer_replication": null,
10
+ "layers_pattern": null,
11
+ "layers_to_transform": null,
12
+ "loftq_config": {},
13
+ "lora_alpha": 128,
14
+ "lora_dropout": 0.05,
15
+ "megatron_config": null,
16
+ "megatron_core": "megatron.core",
17
+ "modules_to_save": null,
18
+ "peft_type": "LORA",
19
+ "r": 64,
20
+ "rank_pattern": {},
21
+ "revision": null,
22
+ "target_modules": [
23
+ "k_proj",
24
+ "up_proj",
25
+ "o_proj",
26
+ "down_proj",
27
+ "gate_proj",
28
+ "v_proj",
29
+ "q_proj"
30
+ ],
31
+ "task_type": "CAUSAL_LM",
32
+ "use_dora": false,
33
+ "use_rslora": false
34
+ }
adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e00c1b5096a925d96989d6434bdc4970c69ee77b28630ad66d23b66d6505deb9
3
+ size 323103018
added_tokens.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "<|endoftext|>": 151643,
3
+ "<|im_end|>": 151645,
4
+ "<|im_start|>": 151644
5
+ }
checkpoint-106/README.md ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: Qwen/Qwen2-7B-Instruct
3
+ library_name: peft
4
+ ---
5
+
6
+ # Model Card for Model ID
7
+
8
+ <!-- Provide a quick summary of what the model is/does. -->
9
+
10
+
11
+
12
+ ## Model Details
13
+
14
+ ### Model Description
15
+
16
+ <!-- Provide a longer summary of what this model is. -->
17
+
18
+
19
+
20
+ - **Developed by:** [More Information Needed]
21
+ - **Funded by [optional]:** [More Information Needed]
22
+ - **Shared by [optional]:** [More Information Needed]
23
+ - **Model type:** [More Information Needed]
24
+ - **Language(s) (NLP):** [More Information Needed]
25
+ - **License:** [More Information Needed]
26
+ - **Finetuned from model [optional]:** [More Information Needed]
27
+
28
+ ### Model Sources [optional]
29
+
30
+ <!-- Provide the basic links for the model. -->
31
+
32
+ - **Repository:** [More Information Needed]
33
+ - **Paper [optional]:** [More Information Needed]
34
+ - **Demo [optional]:** [More Information Needed]
35
+
36
+ ## Uses
37
+
38
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
39
+
40
+ ### Direct Use
41
+
42
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
43
+
44
+ [More Information Needed]
45
+
46
+ ### Downstream Use [optional]
47
+
48
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
49
+
50
+ [More Information Needed]
51
+
52
+ ### Out-of-Scope Use
53
+
54
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
55
+
56
+ [More Information Needed]
57
+
58
+ ## Bias, Risks, and Limitations
59
+
60
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
61
+
62
+ [More Information Needed]
63
+
64
+ ### Recommendations
65
+
66
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
67
+
68
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
69
+
70
+ ## How to Get Started with the Model
71
+
72
+ Use the code below to get started with the model.
73
+
74
+ [More Information Needed]
75
+
76
+ ## Training Details
77
+
78
+ ### Training Data
79
+
80
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
81
+
82
+ [More Information Needed]
83
+
84
+ ### Training Procedure
85
+
86
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
87
+
88
+ #### Preprocessing [optional]
89
+
90
+ [More Information Needed]
91
+
92
+
93
+ #### Training Hyperparameters
94
+
95
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
96
+
97
+ #### Speeds, Sizes, Times [optional]
98
+
99
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
100
+
101
+ [More Information Needed]
102
+
103
+ ## Evaluation
104
+
105
+ <!-- This section describes the evaluation protocols and provides the results. -->
106
+
107
+ ### Testing Data, Factors & Metrics
108
+
109
+ #### Testing Data
110
+
111
+ <!-- This should link to a Dataset Card if possible. -->
112
+
113
+ [More Information Needed]
114
+
115
+ #### Factors
116
+
117
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
118
+
119
+ [More Information Needed]
120
+
121
+ #### Metrics
122
+
123
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
124
+
125
+ [More Information Needed]
126
+
127
+ ### Results
128
+
129
+ [More Information Needed]
130
+
131
+ #### Summary
132
+
133
+
134
+
135
+ ## Model Examination [optional]
136
+
137
+ <!-- Relevant interpretability work for the model goes here -->
138
+
139
+ [More Information Needed]
140
+
141
+ ## Environmental Impact
142
+
143
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
144
+
145
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
146
+
147
+ - **Hardware Type:** [More Information Needed]
148
+ - **Hours used:** [More Information Needed]
149
+ - **Cloud Provider:** [More Information Needed]
150
+ - **Compute Region:** [More Information Needed]
151
+ - **Carbon Emitted:** [More Information Needed]
152
+
153
+ ## Technical Specifications [optional]
154
+
155
+ ### Model Architecture and Objective
156
+
157
+ [More Information Needed]
158
+
159
+ ### Compute Infrastructure
160
+
161
+ [More Information Needed]
162
+
163
+ #### Hardware
164
+
165
+ [More Information Needed]
166
+
167
+ #### Software
168
+
169
+ [More Information Needed]
170
+
171
+ ## Citation [optional]
172
+
173
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
174
+
175
+ **BibTeX:**
176
+
177
+ [More Information Needed]
178
+
179
+ **APA:**
180
+
181
+ [More Information Needed]
182
+
183
+ ## Glossary [optional]
184
+
185
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
186
+
187
+ [More Information Needed]
188
+
189
+ ## More Information [optional]
190
+
191
+ [More Information Needed]
192
+
193
+ ## Model Card Authors [optional]
194
+
195
+ [More Information Needed]
196
+
197
+ ## Model Card Contact
198
+
199
+ [More Information Needed]
200
+ ### Framework versions
201
+
202
+ - PEFT 0.11.1
checkpoint-106/adapter_config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "Qwen/Qwen2-7B-Instruct",
5
+ "bias": "none",
6
+ "fan_in_fan_out": null,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layer_replication": null,
10
+ "layers_pattern": null,
11
+ "layers_to_transform": null,
12
+ "loftq_config": {},
13
+ "lora_alpha": 128,
14
+ "lora_dropout": 0.05,
15
+ "megatron_config": null,
16
+ "megatron_core": "megatron.core",
17
+ "modules_to_save": null,
18
+ "peft_type": "LORA",
19
+ "r": 64,
20
+ "rank_pattern": {},
21
+ "revision": null,
22
+ "target_modules": [
23
+ "k_proj",
24
+ "up_proj",
25
+ "o_proj",
26
+ "down_proj",
27
+ "gate_proj",
28
+ "v_proj",
29
+ "q_proj"
30
+ ],
31
+ "task_type": "CAUSAL_LM",
32
+ "use_dora": false,
33
+ "use_rslora": false
34
+ }
checkpoint-106/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:43a9169c4abff3d527fc65649d8e6a5f97f53efb4233eba9469aea349ea853f1
3
+ size 323014560
checkpoint-106/added_tokens.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "<|endoftext|>": 151643,
3
+ "<|im_end|>": 151645,
4
+ "<|im_start|>": 151644
5
+ }
checkpoint-106/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-106/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:faec1afe03ad0c8a35021a00aab498ad21e593a2693378cf0de1fd7803702a85
3
+ size 1292086650
checkpoint-106/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5ae807f52bcc5748d1ce37e576f225498f6e018ded4795088f5f981908b52514
3
+ size 14244
checkpoint-106/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d5c6ccb73029ede53fae960610ee97663cfd6cf8234ebdac5276096c0b56e952
3
+ size 1064
checkpoint-106/special_tokens_map.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>"
5
+ ],
6
+ "eos_token": {
7
+ "content": "<|im_end|>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false
12
+ },
13
+ "pad_token": {
14
+ "content": "<|endoftext|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false
19
+ }
20
+ }
checkpoint-106/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-106/tokenizer_config.json ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "151643": {
5
+ "content": "<|endoftext|>",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "151644": {
13
+ "content": "<|im_start|>",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "151645": {
21
+ "content": "<|im_end|>",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ }
28
+ },
29
+ "additional_special_tokens": [
30
+ "<|im_start|>",
31
+ "<|im_end|>"
32
+ ],
33
+ "bos_token": null,
34
+ "chat_template": "{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n' }}{% endif %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
35
+ "clean_up_tokenization_spaces": false,
36
+ "eos_token": "<|im_end|>",
37
+ "errors": "replace",
38
+ "model_max_length": 131072,
39
+ "pad_token": "<|endoftext|>",
40
+ "split_special_tokens": false,
41
+ "tokenizer_class": "Qwen2Tokenizer",
42
+ "unk_token": null
43
+ }
checkpoint-106/trainer_state.json ADDED
@@ -0,0 +1,855 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 1.0049625953633063,
5
+ "eval_steps": 11,
6
+ "global_step": 106,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.009480779201540626,
13
+ "grad_norm": 1.421875,
14
+ "learning_rate": 2e-05,
15
+ "loss": 0.4503,
16
+ "step": 1
17
+ },
18
+ {
19
+ "epoch": 0.009480779201540626,
20
+ "eval_loss": 0.4263582229614258,
21
+ "eval_runtime": 33.6286,
22
+ "eval_samples_per_second": 21.143,
23
+ "eval_steps_per_second": 21.143,
24
+ "step": 1
25
+ },
26
+ {
27
+ "epoch": 0.018961558403081252,
28
+ "grad_norm": 1.359375,
29
+ "learning_rate": 4e-05,
30
+ "loss": 0.412,
31
+ "step": 2
32
+ },
33
+ {
34
+ "epoch": 0.02844233760462188,
35
+ "grad_norm": 1.1875,
36
+ "learning_rate": 6e-05,
37
+ "loss": 0.4214,
38
+ "step": 3
39
+ },
40
+ {
41
+ "epoch": 0.037923116806162505,
42
+ "grad_norm": 0.80859375,
43
+ "learning_rate": 8e-05,
44
+ "loss": 0.2924,
45
+ "step": 4
46
+ },
47
+ {
48
+ "epoch": 0.04740389600770313,
49
+ "grad_norm": 0.51953125,
50
+ "learning_rate": 0.0001,
51
+ "loss": 0.1896,
52
+ "step": 5
53
+ },
54
+ {
55
+ "epoch": 0.05688467520924376,
56
+ "grad_norm": 0.4453125,
57
+ "learning_rate": 0.00012,
58
+ "loss": 0.1531,
59
+ "step": 6
60
+ },
61
+ {
62
+ "epoch": 0.06636545441078438,
63
+ "grad_norm": 0.27734375,
64
+ "learning_rate": 0.00014,
65
+ "loss": 0.1181,
66
+ "step": 7
67
+ },
68
+ {
69
+ "epoch": 0.07584623361232501,
70
+ "grad_norm": 0.263671875,
71
+ "learning_rate": 0.00016,
72
+ "loss": 0.1143,
73
+ "step": 8
74
+ },
75
+ {
76
+ "epoch": 0.08532701281386564,
77
+ "grad_norm": 0.2197265625,
78
+ "learning_rate": 0.00018,
79
+ "loss": 0.0952,
80
+ "step": 9
81
+ },
82
+ {
83
+ "epoch": 0.09480779201540626,
84
+ "grad_norm": 0.1708984375,
85
+ "learning_rate": 0.0002,
86
+ "loss": 0.0767,
87
+ "step": 10
88
+ },
89
+ {
90
+ "epoch": 0.10428857121694689,
91
+ "grad_norm": 0.1484375,
92
+ "learning_rate": 0.00019999469523400122,
93
+ "loss": 0.0836,
94
+ "step": 11
95
+ },
96
+ {
97
+ "epoch": 0.10428857121694689,
98
+ "eval_loss": 0.07918477058410645,
99
+ "eval_runtime": 33.4231,
100
+ "eval_samples_per_second": 21.273,
101
+ "eval_steps_per_second": 21.273,
102
+ "step": 11
103
+ },
104
+ {
105
+ "epoch": 0.11376935041848751,
106
+ "grad_norm": 0.154296875,
107
+ "learning_rate": 0.00019997878149881574,
108
+ "loss": 0.0757,
109
+ "step": 12
110
+ },
111
+ {
112
+ "epoch": 0.12325012962002814,
113
+ "grad_norm": 0.1513671875,
114
+ "learning_rate": 0.0001999522604828164,
115
+ "loss": 0.0767,
116
+ "step": 13
117
+ },
118
+ {
119
+ "epoch": 0.13273090882156877,
120
+ "grad_norm": 0.1416015625,
121
+ "learning_rate": 0.00019991513499975882,
122
+ "loss": 0.0809,
123
+ "step": 14
124
+ },
125
+ {
126
+ "epoch": 0.1422116880231094,
127
+ "grad_norm": 0.09765625,
128
+ "learning_rate": 0.00019986740898848306,
129
+ "loss": 0.0634,
130
+ "step": 15
131
+ },
132
+ {
133
+ "epoch": 0.15169246722465002,
134
+ "grad_norm": 0.099609375,
135
+ "learning_rate": 0.00019980908751249555,
136
+ "loss": 0.0674,
137
+ "step": 16
138
+ },
139
+ {
140
+ "epoch": 0.16117324642619066,
141
+ "grad_norm": 0.119140625,
142
+ "learning_rate": 0.00019974017675943192,
143
+ "loss": 0.0667,
144
+ "step": 17
145
+ },
146
+ {
147
+ "epoch": 0.17065402562773127,
148
+ "grad_norm": 0.09619140625,
149
+ "learning_rate": 0.0001996606840404006,
150
+ "loss": 0.0632,
151
+ "step": 18
152
+ },
153
+ {
154
+ "epoch": 0.1801348048292719,
155
+ "grad_norm": 0.09130859375,
156
+ "learning_rate": 0.00019957061778920701,
157
+ "loss": 0.0488,
158
+ "step": 19
159
+ },
160
+ {
161
+ "epoch": 0.18961558403081252,
162
+ "grad_norm": 0.0947265625,
163
+ "learning_rate": 0.0001994699875614589,
164
+ "loss": 0.0627,
165
+ "step": 20
166
+ },
167
+ {
168
+ "epoch": 0.19909636323235316,
169
+ "grad_norm": 0.08203125,
170
+ "learning_rate": 0.00019935880403355253,
171
+ "loss": 0.0528,
172
+ "step": 21
173
+ },
174
+ {
175
+ "epoch": 0.20857714243389378,
176
+ "grad_norm": 0.1123046875,
177
+ "learning_rate": 0.00019923707900153982,
178
+ "loss": 0.0532,
179
+ "step": 22
180
+ },
181
+ {
182
+ "epoch": 0.20857714243389378,
183
+ "eval_loss": 0.056647635996341705,
184
+ "eval_runtime": 33.3595,
185
+ "eval_samples_per_second": 21.313,
186
+ "eval_steps_per_second": 21.313,
187
+ "step": 22
188
+ },
189
+ {
190
+ "epoch": 0.21805792163543442,
191
+ "grad_norm": 0.10693359375,
192
+ "learning_rate": 0.00019910482537987702,
193
+ "loss": 0.0583,
194
+ "step": 23
195
+ },
196
+ {
197
+ "epoch": 0.22753870083697503,
198
+ "grad_norm": 0.0791015625,
199
+ "learning_rate": 0.0001989620572000544,
200
+ "loss": 0.0554,
201
+ "step": 24
202
+ },
203
+ {
204
+ "epoch": 0.23701948003851567,
205
+ "grad_norm": 0.123046875,
206
+ "learning_rate": 0.00019880878960910772,
207
+ "loss": 0.0688,
208
+ "step": 25
209
+ },
210
+ {
211
+ "epoch": 0.24650025924005628,
212
+ "grad_norm": 0.10498046875,
213
+ "learning_rate": 0.00019864503886801106,
214
+ "loss": 0.0655,
215
+ "step": 26
216
+ },
217
+ {
218
+ "epoch": 0.2559810384415969,
219
+ "grad_norm": 0.07421875,
220
+ "learning_rate": 0.00019847082234995171,
221
+ "loss": 0.0471,
222
+ "step": 27
223
+ },
224
+ {
225
+ "epoch": 0.26546181764313753,
226
+ "grad_norm": 0.080078125,
227
+ "learning_rate": 0.00019828615853848688,
228
+ "loss": 0.0518,
229
+ "step": 28
230
+ },
231
+ {
232
+ "epoch": 0.27494259684467814,
233
+ "grad_norm": 0.07421875,
234
+ "learning_rate": 0.00019809106702558277,
235
+ "loss": 0.0481,
236
+ "step": 29
237
+ },
238
+ {
239
+ "epoch": 0.2844233760462188,
240
+ "grad_norm": 0.0703125,
241
+ "learning_rate": 0.0001978855685095358,
242
+ "loss": 0.0464,
243
+ "step": 30
244
+ },
245
+ {
246
+ "epoch": 0.2939041552477594,
247
+ "grad_norm": 0.0849609375,
248
+ "learning_rate": 0.00019766968479277683,
249
+ "loss": 0.0566,
250
+ "step": 31
251
+ },
252
+ {
253
+ "epoch": 0.30338493444930004,
254
+ "grad_norm": 0.0859375,
255
+ "learning_rate": 0.00019744343877955788,
256
+ "loss": 0.0517,
257
+ "step": 32
258
+ },
259
+ {
260
+ "epoch": 0.3128657136508407,
261
+ "grad_norm": 0.07177734375,
262
+ "learning_rate": 0.00019720685447352209,
263
+ "loss": 0.0511,
264
+ "step": 33
265
+ },
266
+ {
267
+ "epoch": 0.3128657136508407,
268
+ "eval_loss": 0.04964344948530197,
269
+ "eval_runtime": 33.3741,
270
+ "eval_samples_per_second": 21.304,
271
+ "eval_steps_per_second": 21.304,
272
+ "step": 33
273
+ },
274
+ {
275
+ "epoch": 0.3223464928523813,
276
+ "grad_norm": 0.076171875,
277
+ "learning_rate": 0.0001969599569751571,
278
+ "loss": 0.045,
279
+ "step": 34
280
+ },
281
+ {
282
+ "epoch": 0.33182727205392193,
283
+ "grad_norm": 0.07568359375,
284
+ "learning_rate": 0.00019670277247913205,
285
+ "loss": 0.0543,
286
+ "step": 35
287
+ },
288
+ {
289
+ "epoch": 0.34130805125546254,
290
+ "grad_norm": 0.0791015625,
291
+ "learning_rate": 0.0001964353282715183,
292
+ "loss": 0.0444,
293
+ "step": 36
294
+ },
295
+ {
296
+ "epoch": 0.3507888304570032,
297
+ "grad_norm": 0.0771484375,
298
+ "learning_rate": 0.00019615765272689461,
299
+ "loss": 0.0506,
300
+ "step": 37
301
+ },
302
+ {
303
+ "epoch": 0.3602696096585438,
304
+ "grad_norm": 0.080078125,
305
+ "learning_rate": 0.00019586977530533677,
306
+ "loss": 0.0558,
307
+ "step": 38
308
+ },
309
+ {
310
+ "epoch": 0.36975038886008443,
311
+ "grad_norm": 0.078125,
312
+ "learning_rate": 0.00019557172654929196,
313
+ "loss": 0.0507,
314
+ "step": 39
315
+ },
316
+ {
317
+ "epoch": 0.37923116806162505,
318
+ "grad_norm": 0.06396484375,
319
+ "learning_rate": 0.00019526353808033825,
320
+ "loss": 0.0452,
321
+ "step": 40
322
+ },
323
+ {
324
+ "epoch": 0.3887119472631657,
325
+ "grad_norm": 0.07275390625,
326
+ "learning_rate": 0.00019494524259582992,
327
+ "loss": 0.0481,
328
+ "step": 41
329
+ },
330
+ {
331
+ "epoch": 0.3981927264647063,
332
+ "grad_norm": 0.06591796875,
333
+ "learning_rate": 0.00019461687386542826,
334
+ "loss": 0.0464,
335
+ "step": 42
336
+ },
337
+ {
338
+ "epoch": 0.40767350566624694,
339
+ "grad_norm": 0.06591796875,
340
+ "learning_rate": 0.00019427846672751873,
341
+ "loss": 0.0431,
342
+ "step": 43
343
+ },
344
+ {
345
+ "epoch": 0.41715428486778755,
346
+ "grad_norm": 0.06982421875,
347
+ "learning_rate": 0.00019393005708551498,
348
+ "loss": 0.0511,
349
+ "step": 44
350
+ },
351
+ {
352
+ "epoch": 0.41715428486778755,
353
+ "eval_loss": 0.04574437811970711,
354
+ "eval_runtime": 33.3514,
355
+ "eval_samples_per_second": 21.318,
356
+ "eval_steps_per_second": 21.318,
357
+ "step": 44
358
+ },
359
+ {
360
+ "epoch": 0.4266350640693282,
361
+ "grad_norm": 0.06787109375,
362
+ "learning_rate": 0.00019357168190404936,
363
+ "loss": 0.0443,
364
+ "step": 45
365
+ },
366
+ {
367
+ "epoch": 0.43611584327086883,
368
+ "grad_norm": 0.0751953125,
369
+ "learning_rate": 0.00019320337920505153,
370
+ "loss": 0.0545,
371
+ "step": 46
372
+ },
373
+ {
374
+ "epoch": 0.44559662247240944,
375
+ "grad_norm": 0.07666015625,
376
+ "learning_rate": 0.00019282518806371414,
377
+ "loss": 0.0542,
378
+ "step": 47
379
+ },
380
+ {
381
+ "epoch": 0.45507740167395005,
382
+ "grad_norm": 0.07666015625,
383
+ "learning_rate": 0.0001924371486043473,
384
+ "loss": 0.0589,
385
+ "step": 48
386
+ },
387
+ {
388
+ "epoch": 0.4645581808754907,
389
+ "grad_norm": 0.059326171875,
390
+ "learning_rate": 0.0001920393019961217,
391
+ "loss": 0.0444,
392
+ "step": 49
393
+ },
394
+ {
395
+ "epoch": 0.47403896007703133,
396
+ "grad_norm": 0.0732421875,
397
+ "learning_rate": 0.0001916316904487005,
398
+ "loss": 0.0485,
399
+ "step": 50
400
+ },
401
+ {
402
+ "epoch": 0.48351973927857195,
403
+ "grad_norm": 0.060546875,
404
+ "learning_rate": 0.00019121435720776122,
405
+ "loss": 0.0408,
406
+ "step": 51
407
+ },
408
+ {
409
+ "epoch": 0.49300051848011256,
410
+ "grad_norm": 0.068359375,
411
+ "learning_rate": 0.0001907873465504076,
412
+ "loss": 0.0466,
413
+ "step": 52
414
+ },
415
+ {
416
+ "epoch": 0.5024812976816532,
417
+ "grad_norm": 0.07763671875,
418
+ "learning_rate": 0.00019035070378047204,
419
+ "loss": 0.0426,
420
+ "step": 53
421
+ },
422
+ {
423
+ "epoch": 0.5119620768831938,
424
+ "grad_norm": 0.0654296875,
425
+ "learning_rate": 0.00018990447522370884,
426
+ "loss": 0.0407,
427
+ "step": 54
428
+ },
429
+ {
430
+ "epoch": 0.5214428560847345,
431
+ "grad_norm": 0.080078125,
432
+ "learning_rate": 0.00018944870822287956,
433
+ "loss": 0.0475,
434
+ "step": 55
435
+ },
436
+ {
437
+ "epoch": 0.5214428560847345,
438
+ "eval_loss": 0.043636418879032135,
439
+ "eval_runtime": 33.266,
440
+ "eval_samples_per_second": 21.373,
441
+ "eval_steps_per_second": 21.373,
442
+ "step": 55
443
+ },
444
+ {
445
+ "epoch": 0.5309236352862751,
446
+ "grad_norm": 0.0673828125,
447
+ "learning_rate": 0.00018898345113272998,
448
+ "loss": 0.0404,
449
+ "step": 56
450
+ },
451
+ {
452
+ "epoch": 0.5404044144878157,
453
+ "grad_norm": 0.06201171875,
454
+ "learning_rate": 0.00018850875331485995,
455
+ "loss": 0.036,
456
+ "step": 57
457
+ },
458
+ {
459
+ "epoch": 0.5498851936893563,
460
+ "grad_norm": 0.0751953125,
461
+ "learning_rate": 0.00018802466513248632,
462
+ "loss": 0.0412,
463
+ "step": 58
464
+ },
465
+ {
466
+ "epoch": 0.559365972890897,
467
+ "grad_norm": 0.068359375,
468
+ "learning_rate": 0.00018753123794509974,
469
+ "loss": 0.044,
470
+ "step": 59
471
+ },
472
+ {
473
+ "epoch": 0.5688467520924376,
474
+ "grad_norm": 0.0673828125,
475
+ "learning_rate": 0.00018702852410301554,
476
+ "loss": 0.0458,
477
+ "step": 60
478
+ },
479
+ {
480
+ "epoch": 0.5783275312939782,
481
+ "grad_norm": 0.07080078125,
482
+ "learning_rate": 0.0001865165769418196,
483
+ "loss": 0.0464,
484
+ "step": 61
485
+ },
486
+ {
487
+ "epoch": 0.5878083104955188,
488
+ "grad_norm": 0.0732421875,
489
+ "learning_rate": 0.00018599545077670985,
490
+ "loss": 0.0427,
491
+ "step": 62
492
+ },
493
+ {
494
+ "epoch": 0.5972890896970595,
495
+ "grad_norm": 0.059814453125,
496
+ "learning_rate": 0.0001854652008967335,
497
+ "loss": 0.0403,
498
+ "step": 63
499
+ },
500
+ {
501
+ "epoch": 0.6067698688986001,
502
+ "grad_norm": 0.07080078125,
503
+ "learning_rate": 0.00018492588355892124,
504
+ "loss": 0.0475,
505
+ "step": 64
506
+ },
507
+ {
508
+ "epoch": 0.6162506481001407,
509
+ "grad_norm": 0.072265625,
510
+ "learning_rate": 0.00018437755598231856,
511
+ "loss": 0.0454,
512
+ "step": 65
513
+ },
514
+ {
515
+ "epoch": 0.6257314273016814,
516
+ "grad_norm": 0.06982421875,
517
+ "learning_rate": 0.00018382027634191524,
518
+ "loss": 0.0435,
519
+ "step": 66
520
+ },
521
+ {
522
+ "epoch": 0.6257314273016814,
523
+ "eval_loss": 0.042031481862068176,
524
+ "eval_runtime": 33.3825,
525
+ "eval_samples_per_second": 21.299,
526
+ "eval_steps_per_second": 21.299,
527
+ "step": 66
528
+ },
529
+ {
530
+ "epoch": 0.635212206503222,
531
+ "grad_norm": 0.0712890625,
532
+ "learning_rate": 0.00018325410376247294,
533
+ "loss": 0.0429,
534
+ "step": 67
535
+ },
536
+ {
537
+ "epoch": 0.6446929857047626,
538
+ "grad_norm": 0.0625,
539
+ "learning_rate": 0.0001826790983122527,
540
+ "loss": 0.0402,
541
+ "step": 68
542
+ },
543
+ {
544
+ "epoch": 0.6541737649063032,
545
+ "grad_norm": 0.0673828125,
546
+ "learning_rate": 0.00018209532099664174,
547
+ "loss": 0.0437,
548
+ "step": 69
549
+ },
550
+ {
551
+ "epoch": 0.6636545441078439,
552
+ "grad_norm": 0.07568359375,
553
+ "learning_rate": 0.00018150283375168114,
554
+ "loss": 0.0442,
555
+ "step": 70
556
+ },
557
+ {
558
+ "epoch": 0.6731353233093845,
559
+ "grad_norm": 0.06982421875,
560
+ "learning_rate": 0.00018090169943749476,
561
+ "loss": 0.0462,
562
+ "step": 71
563
+ },
564
+ {
565
+ "epoch": 0.6826161025109251,
566
+ "grad_norm": 0.07568359375,
567
+ "learning_rate": 0.00018029198183161998,
568
+ "loss": 0.0578,
569
+ "step": 72
570
+ },
571
+ {
572
+ "epoch": 0.6920968817124658,
573
+ "grad_norm": 0.06494140625,
574
+ "learning_rate": 0.00017967374562224132,
575
+ "loss": 0.0443,
576
+ "step": 73
577
+ },
578
+ {
579
+ "epoch": 0.7015776609140064,
580
+ "grad_norm": 0.0703125,
581
+ "learning_rate": 0.00017904705640132718,
582
+ "loss": 0.0462,
583
+ "step": 74
584
+ },
585
+ {
586
+ "epoch": 0.711058440115547,
587
+ "grad_norm": 0.06640625,
588
+ "learning_rate": 0.00017841198065767107,
589
+ "loss": 0.0362,
590
+ "step": 75
591
+ },
592
+ {
593
+ "epoch": 0.7205392193170876,
594
+ "grad_norm": 0.06396484375,
595
+ "learning_rate": 0.00017776858576983712,
596
+ "loss": 0.0431,
597
+ "step": 76
598
+ },
599
+ {
600
+ "epoch": 0.7300199985186282,
601
+ "grad_norm": 0.0615234375,
602
+ "learning_rate": 0.0001771169399990119,
603
+ "loss": 0.0361,
604
+ "step": 77
605
+ },
606
+ {
607
+ "epoch": 0.7300199985186282,
608
+ "eval_loss": 0.040743011981248856,
609
+ "eval_runtime": 33.5015,
610
+ "eval_samples_per_second": 21.223,
611
+ "eval_steps_per_second": 21.223,
612
+ "step": 77
613
+ },
614
+ {
615
+ "epoch": 0.7395007777201689,
616
+ "grad_norm": 0.0576171875,
617
+ "learning_rate": 0.00017645711248176195,
618
+ "loss": 0.0371,
619
+ "step": 78
620
+ },
621
+ {
622
+ "epoch": 0.7489815569217095,
623
+ "grad_norm": 0.060791015625,
624
+ "learning_rate": 0.00017578917322269886,
625
+ "loss": 0.0395,
626
+ "step": 79
627
+ },
628
+ {
629
+ "epoch": 0.7584623361232501,
630
+ "grad_norm": 0.06787109375,
631
+ "learning_rate": 0.00017511319308705198,
632
+ "loss": 0.0387,
633
+ "step": 80
634
+ },
635
+ {
636
+ "epoch": 0.7679431153247908,
637
+ "grad_norm": 0.0634765625,
638
+ "learning_rate": 0.0001744292437931502,
639
+ "loss": 0.0374,
640
+ "step": 81
641
+ },
642
+ {
643
+ "epoch": 0.7774238945263314,
644
+ "grad_norm": 0.0732421875,
645
+ "learning_rate": 0.00017373739790481262,
646
+ "loss": 0.042,
647
+ "step": 82
648
+ },
649
+ {
650
+ "epoch": 0.786904673727872,
651
+ "grad_norm": 0.052734375,
652
+ "learning_rate": 0.00017303772882365016,
653
+ "loss": 0.0314,
654
+ "step": 83
655
+ },
656
+ {
657
+ "epoch": 0.7963854529294127,
658
+ "grad_norm": 0.07177734375,
659
+ "learning_rate": 0.00017233031078127788,
660
+ "loss": 0.0404,
661
+ "step": 84
662
+ },
663
+ {
664
+ "epoch": 0.8058662321309532,
665
+ "grad_norm": 0.0791015625,
666
+ "learning_rate": 0.00017161521883143934,
667
+ "loss": 0.0472,
668
+ "step": 85
669
+ },
670
+ {
671
+ "epoch": 0.8153470113324939,
672
+ "grad_norm": 0.07568359375,
673
+ "learning_rate": 0.00017089252884204377,
674
+ "loss": 0.0434,
675
+ "step": 86
676
+ },
677
+ {
678
+ "epoch": 0.8248277905340345,
679
+ "grad_norm": 0.0732421875,
680
+ "learning_rate": 0.0001701623174871168,
681
+ "loss": 0.0419,
682
+ "step": 87
683
+ },
684
+ {
685
+ "epoch": 0.8343085697355751,
686
+ "grad_norm": 0.0732421875,
687
+ "learning_rate": 0.0001694246622386658,
688
+ "loss": 0.0406,
689
+ "step": 88
690
+ },
691
+ {
692
+ "epoch": 0.8343085697355751,
693
+ "eval_loss": 0.039096854627132416,
694
+ "eval_runtime": 33.3174,
695
+ "eval_samples_per_second": 21.34,
696
+ "eval_steps_per_second": 21.34,
697
+ "step": 88
698
+ },
699
+ {
700
+ "epoch": 0.8437893489371158,
701
+ "grad_norm": 0.056640625,
702
+ "learning_rate": 0.00016867964135846043,
703
+ "loss": 0.0331,
704
+ "step": 89
705
+ },
706
+ {
707
+ "epoch": 0.8532701281386564,
708
+ "grad_norm": 0.07177734375,
709
+ "learning_rate": 0.00016792733388972932,
710
+ "loss": 0.0439,
711
+ "step": 90
712
+ },
713
+ {
714
+ "epoch": 0.862750907340197,
715
+ "grad_norm": 0.059326171875,
716
+ "learning_rate": 0.0001671678196487741,
717
+ "loss": 0.0422,
718
+ "step": 91
719
+ },
720
+ {
721
+ "epoch": 0.8722316865417377,
722
+ "grad_norm": 0.07470703125,
723
+ "learning_rate": 0.00016640117921650117,
724
+ "loss": 0.0463,
725
+ "step": 92
726
+ },
727
+ {
728
+ "epoch": 0.8817124657432783,
729
+ "grad_norm": 0.057373046875,
730
+ "learning_rate": 0.00016562749392987254,
731
+ "loss": 0.037,
732
+ "step": 93
733
+ },
734
+ {
735
+ "epoch": 0.8911932449448189,
736
+ "grad_norm": 0.057373046875,
737
+ "learning_rate": 0.0001648468458732762,
738
+ "loss": 0.0361,
739
+ "step": 94
740
+ },
741
+ {
742
+ "epoch": 0.9006740241463596,
743
+ "grad_norm": 0.061279296875,
744
+ "learning_rate": 0.00016405931786981755,
745
+ "loss": 0.039,
746
+ "step": 95
747
+ },
748
+ {
749
+ "epoch": 0.9101548033479001,
750
+ "grad_norm": 0.0634765625,
751
+ "learning_rate": 0.00016326499347253207,
752
+ "loss": 0.0393,
753
+ "step": 96
754
+ },
755
+ {
756
+ "epoch": 0.9196355825494408,
757
+ "grad_norm": 0.05712890625,
758
+ "learning_rate": 0.00016246395695552085,
759
+ "loss": 0.0376,
760
+ "step": 97
761
+ },
762
+ {
763
+ "epoch": 0.9291163617509814,
764
+ "grad_norm": 0.054931640625,
765
+ "learning_rate": 0.00016165629330500952,
766
+ "loss": 0.035,
767
+ "step": 98
768
+ },
769
+ {
770
+ "epoch": 0.938597140952522,
771
+ "grad_norm": 0.060791015625,
772
+ "learning_rate": 0.0001608420882103315,
773
+ "loss": 0.0349,
774
+ "step": 99
775
+ },
776
+ {
777
+ "epoch": 0.938597140952522,
778
+ "eval_loss": 0.03842462599277496,
779
+ "eval_runtime": 33.3873,
780
+ "eval_samples_per_second": 21.296,
781
+ "eval_steps_per_second": 21.296,
782
+ "step": 99
783
+ },
784
+ {
785
+ "epoch": 0.9480779201540627,
786
+ "grad_norm": 0.0703125,
787
+ "learning_rate": 0.00016002142805483685,
788
+ "loss": 0.0379,
789
+ "step": 100
790
+ },
791
+ {
792
+ "epoch": 0.9575586993556033,
793
+ "grad_norm": 0.06884765625,
794
+ "learning_rate": 0.0001591943999067273,
795
+ "loss": 0.0382,
796
+ "step": 101
797
+ },
798
+ {
799
+ "epoch": 0.9670394785571439,
800
+ "grad_norm": 0.060791015625,
801
+ "learning_rate": 0.00015836109150981886,
802
+ "loss": 0.0345,
803
+ "step": 102
804
+ },
805
+ {
806
+ "epoch": 0.9765202577586846,
807
+ "grad_norm": 0.0703125,
808
+ "learning_rate": 0.00015752159127423263,
809
+ "loss": 0.0399,
810
+ "step": 103
811
+ },
812
+ {
813
+ "epoch": 0.9860010369602251,
814
+ "grad_norm": 0.06787109375,
815
+ "learning_rate": 0.0001566759882670146,
816
+ "loss": 0.0358,
817
+ "step": 104
818
+ },
819
+ {
820
+ "epoch": 0.9954818161617658,
821
+ "grad_norm": 0.06982421875,
822
+ "learning_rate": 0.00015582437220268647,
823
+ "loss": 0.0408,
824
+ "step": 105
825
+ },
826
+ {
827
+ "epoch": 1.0049625953633063,
828
+ "grad_norm": 0.05908203125,
829
+ "learning_rate": 0.0001549668334337271,
830
+ "loss": 0.0321,
831
+ "step": 106
832
+ }
833
+ ],
834
+ "logging_steps": 1,
835
+ "max_steps": 315,
836
+ "num_input_tokens_seen": 0,
837
+ "num_train_epochs": 3,
838
+ "save_steps": 53,
839
+ "stateful_callbacks": {
840
+ "TrainerControl": {
841
+ "args": {
842
+ "should_epoch_stop": false,
843
+ "should_evaluate": false,
844
+ "should_log": false,
845
+ "should_save": true,
846
+ "should_training_stop": false
847
+ },
848
+ "attributes": {}
849
+ }
850
+ },
851
+ "total_flos": 1.907469225639936e+17,
852
+ "train_batch_size": 1,
853
+ "trial_name": null,
854
+ "trial_params": null
855
+ }
checkpoint-106/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:040d0a78d4967c05b5ae4923ea1ba23193e5db226aec4c55dfa817cfdd1347a0
3
+ size 6008
checkpoint-106/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-159/README.md ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: Qwen/Qwen2-7B-Instruct
3
+ library_name: peft
4
+ ---
5
+
6
+ # Model Card for Model ID
7
+
8
+ <!-- Provide a quick summary of what the model is/does. -->
9
+
10
+
11
+
12
+ ## Model Details
13
+
14
+ ### Model Description
15
+
16
+ <!-- Provide a longer summary of what this model is. -->
17
+
18
+
19
+
20
+ - **Developed by:** [More Information Needed]
21
+ - **Funded by [optional]:** [More Information Needed]
22
+ - **Shared by [optional]:** [More Information Needed]
23
+ - **Model type:** [More Information Needed]
24
+ - **Language(s) (NLP):** [More Information Needed]
25
+ - **License:** [More Information Needed]
26
+ - **Finetuned from model [optional]:** [More Information Needed]
27
+
28
+ ### Model Sources [optional]
29
+
30
+ <!-- Provide the basic links for the model. -->
31
+
32
+ - **Repository:** [More Information Needed]
33
+ - **Paper [optional]:** [More Information Needed]
34
+ - **Demo [optional]:** [More Information Needed]
35
+
36
+ ## Uses
37
+
38
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
39
+
40
+ ### Direct Use
41
+
42
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
43
+
44
+ [More Information Needed]
45
+
46
+ ### Downstream Use [optional]
47
+
48
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
49
+
50
+ [More Information Needed]
51
+
52
+ ### Out-of-Scope Use
53
+
54
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
55
+
56
+ [More Information Needed]
57
+
58
+ ## Bias, Risks, and Limitations
59
+
60
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
61
+
62
+ [More Information Needed]
63
+
64
+ ### Recommendations
65
+
66
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
67
+
68
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
69
+
70
+ ## How to Get Started with the Model
71
+
72
+ Use the code below to get started with the model.
73
+
74
+ [More Information Needed]
75
+
76
+ ## Training Details
77
+
78
+ ### Training Data
79
+
80
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
81
+
82
+ [More Information Needed]
83
+
84
+ ### Training Procedure
85
+
86
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
87
+
88
+ #### Preprocessing [optional]
89
+
90
+ [More Information Needed]
91
+
92
+
93
+ #### Training Hyperparameters
94
+
95
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
96
+
97
+ #### Speeds, Sizes, Times [optional]
98
+
99
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
100
+
101
+ [More Information Needed]
102
+
103
+ ## Evaluation
104
+
105
+ <!-- This section describes the evaluation protocols and provides the results. -->
106
+
107
+ ### Testing Data, Factors & Metrics
108
+
109
+ #### Testing Data
110
+
111
+ <!-- This should link to a Dataset Card if possible. -->
112
+
113
+ [More Information Needed]
114
+
115
+ #### Factors
116
+
117
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
118
+
119
+ [More Information Needed]
120
+
121
+ #### Metrics
122
+
123
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
124
+
125
+ [More Information Needed]
126
+
127
+ ### Results
128
+
129
+ [More Information Needed]
130
+
131
+ #### Summary
132
+
133
+
134
+
135
+ ## Model Examination [optional]
136
+
137
+ <!-- Relevant interpretability work for the model goes here -->
138
+
139
+ [More Information Needed]
140
+
141
+ ## Environmental Impact
142
+
143
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
144
+
145
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
146
+
147
+ - **Hardware Type:** [More Information Needed]
148
+ - **Hours used:** [More Information Needed]
149
+ - **Cloud Provider:** [More Information Needed]
150
+ - **Compute Region:** [More Information Needed]
151
+ - **Carbon Emitted:** [More Information Needed]
152
+
153
+ ## Technical Specifications [optional]
154
+
155
+ ### Model Architecture and Objective
156
+
157
+ [More Information Needed]
158
+
159
+ ### Compute Infrastructure
160
+
161
+ [More Information Needed]
162
+
163
+ #### Hardware
164
+
165
+ [More Information Needed]
166
+
167
+ #### Software
168
+
169
+ [More Information Needed]
170
+
171
+ ## Citation [optional]
172
+
173
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
174
+
175
+ **BibTeX:**
176
+
177
+ [More Information Needed]
178
+
179
+ **APA:**
180
+
181
+ [More Information Needed]
182
+
183
+ ## Glossary [optional]
184
+
185
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
186
+
187
+ [More Information Needed]
188
+
189
+ ## More Information [optional]
190
+
191
+ [More Information Needed]
192
+
193
+ ## Model Card Authors [optional]
194
+
195
+ [More Information Needed]
196
+
197
+ ## Model Card Contact
198
+
199
+ [More Information Needed]
200
+ ### Framework versions
201
+
202
+ - PEFT 0.11.1
checkpoint-159/adapter_config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "Qwen/Qwen2-7B-Instruct",
5
+ "bias": "none",
6
+ "fan_in_fan_out": null,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layer_replication": null,
10
+ "layers_pattern": null,
11
+ "layers_to_transform": null,
12
+ "loftq_config": {},
13
+ "lora_alpha": 128,
14
+ "lora_dropout": 0.05,
15
+ "megatron_config": null,
16
+ "megatron_core": "megatron.core",
17
+ "modules_to_save": null,
18
+ "peft_type": "LORA",
19
+ "r": 64,
20
+ "rank_pattern": {},
21
+ "revision": null,
22
+ "target_modules": [
23
+ "k_proj",
24
+ "up_proj",
25
+ "o_proj",
26
+ "down_proj",
27
+ "gate_proj",
28
+ "v_proj",
29
+ "q_proj"
30
+ ],
31
+ "task_type": "CAUSAL_LM",
32
+ "use_dora": false,
33
+ "use_rslora": false
34
+ }
checkpoint-159/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:93bd2fe1c778710c91da462fdd0b52277f5762876969f8db8726d1ed12042e2e
3
+ size 323014560
checkpoint-159/added_tokens.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "<|endoftext|>": 151643,
3
+ "<|im_end|>": 151645,
4
+ "<|im_start|>": 151644
5
+ }
checkpoint-159/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-159/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:05ae7ed81e894918ec0b19d1e1625b97ca554b00ff90257857294c81fba3f6e8
3
+ size 1292086650
checkpoint-159/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c545908143cadbd286b6c69fbeb188ca3abbbd3efcc3e60abd4c6f6b057c5b16
3
+ size 14244
checkpoint-159/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:19809e3e9f97211f78c8259943f11fd298a3c4adde59e98dc5f42c9c642f8a96
3
+ size 1064
checkpoint-159/special_tokens_map.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>"
5
+ ],
6
+ "eos_token": {
7
+ "content": "<|im_end|>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false
12
+ },
13
+ "pad_token": {
14
+ "content": "<|endoftext|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false
19
+ }
20
+ }
checkpoint-159/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-159/tokenizer_config.json ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "151643": {
5
+ "content": "<|endoftext|>",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "151644": {
13
+ "content": "<|im_start|>",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "151645": {
21
+ "content": "<|im_end|>",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ }
28
+ },
29
+ "additional_special_tokens": [
30
+ "<|im_start|>",
31
+ "<|im_end|>"
32
+ ],
33
+ "bos_token": null,
34
+ "chat_template": "{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n' }}{% endif %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
35
+ "clean_up_tokenization_spaces": false,
36
+ "eos_token": "<|im_end|>",
37
+ "errors": "replace",
38
+ "model_max_length": 131072,
39
+ "pad_token": "<|endoftext|>",
40
+ "split_special_tokens": false,
41
+ "tokenizer_class": "Qwen2Tokenizer",
42
+ "unk_token": null
43
+ }
checkpoint-159/trainer_state.json ADDED
@@ -0,0 +1,1266 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 1.5074438930449596,
5
+ "eval_steps": 11,
6
+ "global_step": 159,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.009480779201540626,
13
+ "grad_norm": 1.421875,
14
+ "learning_rate": 2e-05,
15
+ "loss": 0.4503,
16
+ "step": 1
17
+ },
18
+ {
19
+ "epoch": 0.009480779201540626,
20
+ "eval_loss": 0.4263582229614258,
21
+ "eval_runtime": 33.6286,
22
+ "eval_samples_per_second": 21.143,
23
+ "eval_steps_per_second": 21.143,
24
+ "step": 1
25
+ },
26
+ {
27
+ "epoch": 0.018961558403081252,
28
+ "grad_norm": 1.359375,
29
+ "learning_rate": 4e-05,
30
+ "loss": 0.412,
31
+ "step": 2
32
+ },
33
+ {
34
+ "epoch": 0.02844233760462188,
35
+ "grad_norm": 1.1875,
36
+ "learning_rate": 6e-05,
37
+ "loss": 0.4214,
38
+ "step": 3
39
+ },
40
+ {
41
+ "epoch": 0.037923116806162505,
42
+ "grad_norm": 0.80859375,
43
+ "learning_rate": 8e-05,
44
+ "loss": 0.2924,
45
+ "step": 4
46
+ },
47
+ {
48
+ "epoch": 0.04740389600770313,
49
+ "grad_norm": 0.51953125,
50
+ "learning_rate": 0.0001,
51
+ "loss": 0.1896,
52
+ "step": 5
53
+ },
54
+ {
55
+ "epoch": 0.05688467520924376,
56
+ "grad_norm": 0.4453125,
57
+ "learning_rate": 0.00012,
58
+ "loss": 0.1531,
59
+ "step": 6
60
+ },
61
+ {
62
+ "epoch": 0.06636545441078438,
63
+ "grad_norm": 0.27734375,
64
+ "learning_rate": 0.00014,
65
+ "loss": 0.1181,
66
+ "step": 7
67
+ },
68
+ {
69
+ "epoch": 0.07584623361232501,
70
+ "grad_norm": 0.263671875,
71
+ "learning_rate": 0.00016,
72
+ "loss": 0.1143,
73
+ "step": 8
74
+ },
75
+ {
76
+ "epoch": 0.08532701281386564,
77
+ "grad_norm": 0.2197265625,
78
+ "learning_rate": 0.00018,
79
+ "loss": 0.0952,
80
+ "step": 9
81
+ },
82
+ {
83
+ "epoch": 0.09480779201540626,
84
+ "grad_norm": 0.1708984375,
85
+ "learning_rate": 0.0002,
86
+ "loss": 0.0767,
87
+ "step": 10
88
+ },
89
+ {
90
+ "epoch": 0.10428857121694689,
91
+ "grad_norm": 0.1484375,
92
+ "learning_rate": 0.00019999469523400122,
93
+ "loss": 0.0836,
94
+ "step": 11
95
+ },
96
+ {
97
+ "epoch": 0.10428857121694689,
98
+ "eval_loss": 0.07918477058410645,
99
+ "eval_runtime": 33.4231,
100
+ "eval_samples_per_second": 21.273,
101
+ "eval_steps_per_second": 21.273,
102
+ "step": 11
103
+ },
104
+ {
105
+ "epoch": 0.11376935041848751,
106
+ "grad_norm": 0.154296875,
107
+ "learning_rate": 0.00019997878149881574,
108
+ "loss": 0.0757,
109
+ "step": 12
110
+ },
111
+ {
112
+ "epoch": 0.12325012962002814,
113
+ "grad_norm": 0.1513671875,
114
+ "learning_rate": 0.0001999522604828164,
115
+ "loss": 0.0767,
116
+ "step": 13
117
+ },
118
+ {
119
+ "epoch": 0.13273090882156877,
120
+ "grad_norm": 0.1416015625,
121
+ "learning_rate": 0.00019991513499975882,
122
+ "loss": 0.0809,
123
+ "step": 14
124
+ },
125
+ {
126
+ "epoch": 0.1422116880231094,
127
+ "grad_norm": 0.09765625,
128
+ "learning_rate": 0.00019986740898848306,
129
+ "loss": 0.0634,
130
+ "step": 15
131
+ },
132
+ {
133
+ "epoch": 0.15169246722465002,
134
+ "grad_norm": 0.099609375,
135
+ "learning_rate": 0.00019980908751249555,
136
+ "loss": 0.0674,
137
+ "step": 16
138
+ },
139
+ {
140
+ "epoch": 0.16117324642619066,
141
+ "grad_norm": 0.119140625,
142
+ "learning_rate": 0.00019974017675943192,
143
+ "loss": 0.0667,
144
+ "step": 17
145
+ },
146
+ {
147
+ "epoch": 0.17065402562773127,
148
+ "grad_norm": 0.09619140625,
149
+ "learning_rate": 0.0001996606840404006,
150
+ "loss": 0.0632,
151
+ "step": 18
152
+ },
153
+ {
154
+ "epoch": 0.1801348048292719,
155
+ "grad_norm": 0.09130859375,
156
+ "learning_rate": 0.00019957061778920701,
157
+ "loss": 0.0488,
158
+ "step": 19
159
+ },
160
+ {
161
+ "epoch": 0.18961558403081252,
162
+ "grad_norm": 0.0947265625,
163
+ "learning_rate": 0.0001994699875614589,
164
+ "loss": 0.0627,
165
+ "step": 20
166
+ },
167
+ {
168
+ "epoch": 0.19909636323235316,
169
+ "grad_norm": 0.08203125,
170
+ "learning_rate": 0.00019935880403355253,
171
+ "loss": 0.0528,
172
+ "step": 21
173
+ },
174
+ {
175
+ "epoch": 0.20857714243389378,
176
+ "grad_norm": 0.1123046875,
177
+ "learning_rate": 0.00019923707900153982,
178
+ "loss": 0.0532,
179
+ "step": 22
180
+ },
181
+ {
182
+ "epoch": 0.20857714243389378,
183
+ "eval_loss": 0.056647635996341705,
184
+ "eval_runtime": 33.3595,
185
+ "eval_samples_per_second": 21.313,
186
+ "eval_steps_per_second": 21.313,
187
+ "step": 22
188
+ },
189
+ {
190
+ "epoch": 0.21805792163543442,
191
+ "grad_norm": 0.10693359375,
192
+ "learning_rate": 0.00019910482537987702,
193
+ "loss": 0.0583,
194
+ "step": 23
195
+ },
196
+ {
197
+ "epoch": 0.22753870083697503,
198
+ "grad_norm": 0.0791015625,
199
+ "learning_rate": 0.0001989620572000544,
200
+ "loss": 0.0554,
201
+ "step": 24
202
+ },
203
+ {
204
+ "epoch": 0.23701948003851567,
205
+ "grad_norm": 0.123046875,
206
+ "learning_rate": 0.00019880878960910772,
207
+ "loss": 0.0688,
208
+ "step": 25
209
+ },
210
+ {
211
+ "epoch": 0.24650025924005628,
212
+ "grad_norm": 0.10498046875,
213
+ "learning_rate": 0.00019864503886801106,
214
+ "loss": 0.0655,
215
+ "step": 26
216
+ },
217
+ {
218
+ "epoch": 0.2559810384415969,
219
+ "grad_norm": 0.07421875,
220
+ "learning_rate": 0.00019847082234995171,
221
+ "loss": 0.0471,
222
+ "step": 27
223
+ },
224
+ {
225
+ "epoch": 0.26546181764313753,
226
+ "grad_norm": 0.080078125,
227
+ "learning_rate": 0.00019828615853848688,
228
+ "loss": 0.0518,
229
+ "step": 28
230
+ },
231
+ {
232
+ "epoch": 0.27494259684467814,
233
+ "grad_norm": 0.07421875,
234
+ "learning_rate": 0.00019809106702558277,
235
+ "loss": 0.0481,
236
+ "step": 29
237
+ },
238
+ {
239
+ "epoch": 0.2844233760462188,
240
+ "grad_norm": 0.0703125,
241
+ "learning_rate": 0.0001978855685095358,
242
+ "loss": 0.0464,
243
+ "step": 30
244
+ },
245
+ {
246
+ "epoch": 0.2939041552477594,
247
+ "grad_norm": 0.0849609375,
248
+ "learning_rate": 0.00019766968479277683,
249
+ "loss": 0.0566,
250
+ "step": 31
251
+ },
252
+ {
253
+ "epoch": 0.30338493444930004,
254
+ "grad_norm": 0.0859375,
255
+ "learning_rate": 0.00019744343877955788,
256
+ "loss": 0.0517,
257
+ "step": 32
258
+ },
259
+ {
260
+ "epoch": 0.3128657136508407,
261
+ "grad_norm": 0.07177734375,
262
+ "learning_rate": 0.00019720685447352209,
263
+ "loss": 0.0511,
264
+ "step": 33
265
+ },
266
+ {
267
+ "epoch": 0.3128657136508407,
268
+ "eval_loss": 0.04964344948530197,
269
+ "eval_runtime": 33.3741,
270
+ "eval_samples_per_second": 21.304,
271
+ "eval_steps_per_second": 21.304,
272
+ "step": 33
273
+ },
274
+ {
275
+ "epoch": 0.3223464928523813,
276
+ "grad_norm": 0.076171875,
277
+ "learning_rate": 0.0001969599569751571,
278
+ "loss": 0.045,
279
+ "step": 34
280
+ },
281
+ {
282
+ "epoch": 0.33182727205392193,
283
+ "grad_norm": 0.07568359375,
284
+ "learning_rate": 0.00019670277247913205,
285
+ "loss": 0.0543,
286
+ "step": 35
287
+ },
288
+ {
289
+ "epoch": 0.34130805125546254,
290
+ "grad_norm": 0.0791015625,
291
+ "learning_rate": 0.0001964353282715183,
292
+ "loss": 0.0444,
293
+ "step": 36
294
+ },
295
+ {
296
+ "epoch": 0.3507888304570032,
297
+ "grad_norm": 0.0771484375,
298
+ "learning_rate": 0.00019615765272689461,
299
+ "loss": 0.0506,
300
+ "step": 37
301
+ },
302
+ {
303
+ "epoch": 0.3602696096585438,
304
+ "grad_norm": 0.080078125,
305
+ "learning_rate": 0.00019586977530533677,
306
+ "loss": 0.0558,
307
+ "step": 38
308
+ },
309
+ {
310
+ "epoch": 0.36975038886008443,
311
+ "grad_norm": 0.078125,
312
+ "learning_rate": 0.00019557172654929196,
313
+ "loss": 0.0507,
314
+ "step": 39
315
+ },
316
+ {
317
+ "epoch": 0.37923116806162505,
318
+ "grad_norm": 0.06396484375,
319
+ "learning_rate": 0.00019526353808033825,
320
+ "loss": 0.0452,
321
+ "step": 40
322
+ },
323
+ {
324
+ "epoch": 0.3887119472631657,
325
+ "grad_norm": 0.07275390625,
326
+ "learning_rate": 0.00019494524259582992,
327
+ "loss": 0.0481,
328
+ "step": 41
329
+ },
330
+ {
331
+ "epoch": 0.3981927264647063,
332
+ "grad_norm": 0.06591796875,
333
+ "learning_rate": 0.00019461687386542826,
334
+ "loss": 0.0464,
335
+ "step": 42
336
+ },
337
+ {
338
+ "epoch": 0.40767350566624694,
339
+ "grad_norm": 0.06591796875,
340
+ "learning_rate": 0.00019427846672751873,
341
+ "loss": 0.0431,
342
+ "step": 43
343
+ },
344
+ {
345
+ "epoch": 0.41715428486778755,
346
+ "grad_norm": 0.06982421875,
347
+ "learning_rate": 0.00019393005708551498,
348
+ "loss": 0.0511,
349
+ "step": 44
350
+ },
351
+ {
352
+ "epoch": 0.41715428486778755,
353
+ "eval_loss": 0.04574437811970711,
354
+ "eval_runtime": 33.3514,
355
+ "eval_samples_per_second": 21.318,
356
+ "eval_steps_per_second": 21.318,
357
+ "step": 44
358
+ },
359
+ {
360
+ "epoch": 0.4266350640693282,
361
+ "grad_norm": 0.06787109375,
362
+ "learning_rate": 0.00019357168190404936,
363
+ "loss": 0.0443,
364
+ "step": 45
365
+ },
366
+ {
367
+ "epoch": 0.43611584327086883,
368
+ "grad_norm": 0.0751953125,
369
+ "learning_rate": 0.00019320337920505153,
370
+ "loss": 0.0545,
371
+ "step": 46
372
+ },
373
+ {
374
+ "epoch": 0.44559662247240944,
375
+ "grad_norm": 0.07666015625,
376
+ "learning_rate": 0.00019282518806371414,
377
+ "loss": 0.0542,
378
+ "step": 47
379
+ },
380
+ {
381
+ "epoch": 0.45507740167395005,
382
+ "grad_norm": 0.07666015625,
383
+ "learning_rate": 0.0001924371486043473,
384
+ "loss": 0.0589,
385
+ "step": 48
386
+ },
387
+ {
388
+ "epoch": 0.4645581808754907,
389
+ "grad_norm": 0.059326171875,
390
+ "learning_rate": 0.0001920393019961217,
391
+ "loss": 0.0444,
392
+ "step": 49
393
+ },
394
+ {
395
+ "epoch": 0.47403896007703133,
396
+ "grad_norm": 0.0732421875,
397
+ "learning_rate": 0.0001916316904487005,
398
+ "loss": 0.0485,
399
+ "step": 50
400
+ },
401
+ {
402
+ "epoch": 0.48351973927857195,
403
+ "grad_norm": 0.060546875,
404
+ "learning_rate": 0.00019121435720776122,
405
+ "loss": 0.0408,
406
+ "step": 51
407
+ },
408
+ {
409
+ "epoch": 0.49300051848011256,
410
+ "grad_norm": 0.068359375,
411
+ "learning_rate": 0.0001907873465504076,
412
+ "loss": 0.0466,
413
+ "step": 52
414
+ },
415
+ {
416
+ "epoch": 0.5024812976816532,
417
+ "grad_norm": 0.07763671875,
418
+ "learning_rate": 0.00019035070378047204,
419
+ "loss": 0.0426,
420
+ "step": 53
421
+ },
422
+ {
423
+ "epoch": 0.5119620768831938,
424
+ "grad_norm": 0.0654296875,
425
+ "learning_rate": 0.00018990447522370884,
426
+ "loss": 0.0407,
427
+ "step": 54
428
+ },
429
+ {
430
+ "epoch": 0.5214428560847345,
431
+ "grad_norm": 0.080078125,
432
+ "learning_rate": 0.00018944870822287956,
433
+ "loss": 0.0475,
434
+ "step": 55
435
+ },
436
+ {
437
+ "epoch": 0.5214428560847345,
438
+ "eval_loss": 0.043636418879032135,
439
+ "eval_runtime": 33.266,
440
+ "eval_samples_per_second": 21.373,
441
+ "eval_steps_per_second": 21.373,
442
+ "step": 55
443
+ },
444
+ {
445
+ "epoch": 0.5309236352862751,
446
+ "grad_norm": 0.0673828125,
447
+ "learning_rate": 0.00018898345113272998,
448
+ "loss": 0.0404,
449
+ "step": 56
450
+ },
451
+ {
452
+ "epoch": 0.5404044144878157,
453
+ "grad_norm": 0.06201171875,
454
+ "learning_rate": 0.00018850875331485995,
455
+ "loss": 0.036,
456
+ "step": 57
457
+ },
458
+ {
459
+ "epoch": 0.5498851936893563,
460
+ "grad_norm": 0.0751953125,
461
+ "learning_rate": 0.00018802466513248632,
462
+ "loss": 0.0412,
463
+ "step": 58
464
+ },
465
+ {
466
+ "epoch": 0.559365972890897,
467
+ "grad_norm": 0.068359375,
468
+ "learning_rate": 0.00018753123794509974,
469
+ "loss": 0.044,
470
+ "step": 59
471
+ },
472
+ {
473
+ "epoch": 0.5688467520924376,
474
+ "grad_norm": 0.0673828125,
475
+ "learning_rate": 0.00018702852410301554,
476
+ "loss": 0.0458,
477
+ "step": 60
478
+ },
479
+ {
480
+ "epoch": 0.5783275312939782,
481
+ "grad_norm": 0.07080078125,
482
+ "learning_rate": 0.0001865165769418196,
483
+ "loss": 0.0464,
484
+ "step": 61
485
+ },
486
+ {
487
+ "epoch": 0.5878083104955188,
488
+ "grad_norm": 0.0732421875,
489
+ "learning_rate": 0.00018599545077670985,
490
+ "loss": 0.0427,
491
+ "step": 62
492
+ },
493
+ {
494
+ "epoch": 0.5972890896970595,
495
+ "grad_norm": 0.059814453125,
496
+ "learning_rate": 0.0001854652008967335,
497
+ "loss": 0.0403,
498
+ "step": 63
499
+ },
500
+ {
501
+ "epoch": 0.6067698688986001,
502
+ "grad_norm": 0.07080078125,
503
+ "learning_rate": 0.00018492588355892124,
504
+ "loss": 0.0475,
505
+ "step": 64
506
+ },
507
+ {
508
+ "epoch": 0.6162506481001407,
509
+ "grad_norm": 0.072265625,
510
+ "learning_rate": 0.00018437755598231856,
511
+ "loss": 0.0454,
512
+ "step": 65
513
+ },
514
+ {
515
+ "epoch": 0.6257314273016814,
516
+ "grad_norm": 0.06982421875,
517
+ "learning_rate": 0.00018382027634191524,
518
+ "loss": 0.0435,
519
+ "step": 66
520
+ },
521
+ {
522
+ "epoch": 0.6257314273016814,
523
+ "eval_loss": 0.042031481862068176,
524
+ "eval_runtime": 33.3825,
525
+ "eval_samples_per_second": 21.299,
526
+ "eval_steps_per_second": 21.299,
527
+ "step": 66
528
+ },
529
+ {
530
+ "epoch": 0.635212206503222,
531
+ "grad_norm": 0.0712890625,
532
+ "learning_rate": 0.00018325410376247294,
533
+ "loss": 0.0429,
534
+ "step": 67
535
+ },
536
+ {
537
+ "epoch": 0.6446929857047626,
538
+ "grad_norm": 0.0625,
539
+ "learning_rate": 0.0001826790983122527,
540
+ "loss": 0.0402,
541
+ "step": 68
542
+ },
543
+ {
544
+ "epoch": 0.6541737649063032,
545
+ "grad_norm": 0.0673828125,
546
+ "learning_rate": 0.00018209532099664174,
547
+ "loss": 0.0437,
548
+ "step": 69
549
+ },
550
+ {
551
+ "epoch": 0.6636545441078439,
552
+ "grad_norm": 0.07568359375,
553
+ "learning_rate": 0.00018150283375168114,
554
+ "loss": 0.0442,
555
+ "step": 70
556
+ },
557
+ {
558
+ "epoch": 0.6731353233093845,
559
+ "grad_norm": 0.06982421875,
560
+ "learning_rate": 0.00018090169943749476,
561
+ "loss": 0.0462,
562
+ "step": 71
563
+ },
564
+ {
565
+ "epoch": 0.6826161025109251,
566
+ "grad_norm": 0.07568359375,
567
+ "learning_rate": 0.00018029198183161998,
568
+ "loss": 0.0578,
569
+ "step": 72
570
+ },
571
+ {
572
+ "epoch": 0.6920968817124658,
573
+ "grad_norm": 0.06494140625,
574
+ "learning_rate": 0.00017967374562224132,
575
+ "loss": 0.0443,
576
+ "step": 73
577
+ },
578
+ {
579
+ "epoch": 0.7015776609140064,
580
+ "grad_norm": 0.0703125,
581
+ "learning_rate": 0.00017904705640132718,
582
+ "loss": 0.0462,
583
+ "step": 74
584
+ },
585
+ {
586
+ "epoch": 0.711058440115547,
587
+ "grad_norm": 0.06640625,
588
+ "learning_rate": 0.00017841198065767107,
589
+ "loss": 0.0362,
590
+ "step": 75
591
+ },
592
+ {
593
+ "epoch": 0.7205392193170876,
594
+ "grad_norm": 0.06396484375,
595
+ "learning_rate": 0.00017776858576983712,
596
+ "loss": 0.0431,
597
+ "step": 76
598
+ },
599
+ {
600
+ "epoch": 0.7300199985186282,
601
+ "grad_norm": 0.0615234375,
602
+ "learning_rate": 0.0001771169399990119,
603
+ "loss": 0.0361,
604
+ "step": 77
605
+ },
606
+ {
607
+ "epoch": 0.7300199985186282,
608
+ "eval_loss": 0.040743011981248856,
609
+ "eval_runtime": 33.5015,
610
+ "eval_samples_per_second": 21.223,
611
+ "eval_steps_per_second": 21.223,
612
+ "step": 77
613
+ },
614
+ {
615
+ "epoch": 0.7395007777201689,
616
+ "grad_norm": 0.0576171875,
617
+ "learning_rate": 0.00017645711248176195,
618
+ "loss": 0.0371,
619
+ "step": 78
620
+ },
621
+ {
622
+ "epoch": 0.7489815569217095,
623
+ "grad_norm": 0.060791015625,
624
+ "learning_rate": 0.00017578917322269886,
625
+ "loss": 0.0395,
626
+ "step": 79
627
+ },
628
+ {
629
+ "epoch": 0.7584623361232501,
630
+ "grad_norm": 0.06787109375,
631
+ "learning_rate": 0.00017511319308705198,
632
+ "loss": 0.0387,
633
+ "step": 80
634
+ },
635
+ {
636
+ "epoch": 0.7679431153247908,
637
+ "grad_norm": 0.0634765625,
638
+ "learning_rate": 0.0001744292437931502,
639
+ "loss": 0.0374,
640
+ "step": 81
641
+ },
642
+ {
643
+ "epoch": 0.7774238945263314,
644
+ "grad_norm": 0.0732421875,
645
+ "learning_rate": 0.00017373739790481262,
646
+ "loss": 0.042,
647
+ "step": 82
648
+ },
649
+ {
650
+ "epoch": 0.786904673727872,
651
+ "grad_norm": 0.052734375,
652
+ "learning_rate": 0.00017303772882365016,
653
+ "loss": 0.0314,
654
+ "step": 83
655
+ },
656
+ {
657
+ "epoch": 0.7963854529294127,
658
+ "grad_norm": 0.07177734375,
659
+ "learning_rate": 0.00017233031078127788,
660
+ "loss": 0.0404,
661
+ "step": 84
662
+ },
663
+ {
664
+ "epoch": 0.8058662321309532,
665
+ "grad_norm": 0.0791015625,
666
+ "learning_rate": 0.00017161521883143934,
667
+ "loss": 0.0472,
668
+ "step": 85
669
+ },
670
+ {
671
+ "epoch": 0.8153470113324939,
672
+ "grad_norm": 0.07568359375,
673
+ "learning_rate": 0.00017089252884204377,
674
+ "loss": 0.0434,
675
+ "step": 86
676
+ },
677
+ {
678
+ "epoch": 0.8248277905340345,
679
+ "grad_norm": 0.0732421875,
680
+ "learning_rate": 0.0001701623174871168,
681
+ "loss": 0.0419,
682
+ "step": 87
683
+ },
684
+ {
685
+ "epoch": 0.8343085697355751,
686
+ "grad_norm": 0.0732421875,
687
+ "learning_rate": 0.0001694246622386658,
688
+ "loss": 0.0406,
689
+ "step": 88
690
+ },
691
+ {
692
+ "epoch": 0.8343085697355751,
693
+ "eval_loss": 0.039096854627132416,
694
+ "eval_runtime": 33.3174,
695
+ "eval_samples_per_second": 21.34,
696
+ "eval_steps_per_second": 21.34,
697
+ "step": 88
698
+ },
699
+ {
700
+ "epoch": 0.8437893489371158,
701
+ "grad_norm": 0.056640625,
702
+ "learning_rate": 0.00016867964135846043,
703
+ "loss": 0.0331,
704
+ "step": 89
705
+ },
706
+ {
707
+ "epoch": 0.8532701281386564,
708
+ "grad_norm": 0.07177734375,
709
+ "learning_rate": 0.00016792733388972932,
710
+ "loss": 0.0439,
711
+ "step": 90
712
+ },
713
+ {
714
+ "epoch": 0.862750907340197,
715
+ "grad_norm": 0.059326171875,
716
+ "learning_rate": 0.0001671678196487741,
717
+ "loss": 0.0422,
718
+ "step": 91
719
+ },
720
+ {
721
+ "epoch": 0.8722316865417377,
722
+ "grad_norm": 0.07470703125,
723
+ "learning_rate": 0.00016640117921650117,
724
+ "loss": 0.0463,
725
+ "step": 92
726
+ },
727
+ {
728
+ "epoch": 0.8817124657432783,
729
+ "grad_norm": 0.057373046875,
730
+ "learning_rate": 0.00016562749392987254,
731
+ "loss": 0.037,
732
+ "step": 93
733
+ },
734
+ {
735
+ "epoch": 0.8911932449448189,
736
+ "grad_norm": 0.057373046875,
737
+ "learning_rate": 0.0001648468458732762,
738
+ "loss": 0.0361,
739
+ "step": 94
740
+ },
741
+ {
742
+ "epoch": 0.9006740241463596,
743
+ "grad_norm": 0.061279296875,
744
+ "learning_rate": 0.00016405931786981755,
745
+ "loss": 0.039,
746
+ "step": 95
747
+ },
748
+ {
749
+ "epoch": 0.9101548033479001,
750
+ "grad_norm": 0.0634765625,
751
+ "learning_rate": 0.00016326499347253207,
752
+ "loss": 0.0393,
753
+ "step": 96
754
+ },
755
+ {
756
+ "epoch": 0.9196355825494408,
757
+ "grad_norm": 0.05712890625,
758
+ "learning_rate": 0.00016246395695552085,
759
+ "loss": 0.0376,
760
+ "step": 97
761
+ },
762
+ {
763
+ "epoch": 0.9291163617509814,
764
+ "grad_norm": 0.054931640625,
765
+ "learning_rate": 0.00016165629330500952,
766
+ "loss": 0.035,
767
+ "step": 98
768
+ },
769
+ {
770
+ "epoch": 0.938597140952522,
771
+ "grad_norm": 0.060791015625,
772
+ "learning_rate": 0.0001608420882103315,
773
+ "loss": 0.0349,
774
+ "step": 99
775
+ },
776
+ {
777
+ "epoch": 0.938597140952522,
778
+ "eval_loss": 0.03842462599277496,
779
+ "eval_runtime": 33.3873,
780
+ "eval_samples_per_second": 21.296,
781
+ "eval_steps_per_second": 21.296,
782
+ "step": 99
783
+ },
784
+ {
785
+ "epoch": 0.9480779201540627,
786
+ "grad_norm": 0.0703125,
787
+ "learning_rate": 0.00016002142805483685,
788
+ "loss": 0.0379,
789
+ "step": 100
790
+ },
791
+ {
792
+ "epoch": 0.9575586993556033,
793
+ "grad_norm": 0.06884765625,
794
+ "learning_rate": 0.0001591943999067273,
795
+ "loss": 0.0382,
796
+ "step": 101
797
+ },
798
+ {
799
+ "epoch": 0.9670394785571439,
800
+ "grad_norm": 0.060791015625,
801
+ "learning_rate": 0.00015836109150981886,
802
+ "loss": 0.0345,
803
+ "step": 102
804
+ },
805
+ {
806
+ "epoch": 0.9765202577586846,
807
+ "grad_norm": 0.0703125,
808
+ "learning_rate": 0.00015752159127423263,
809
+ "loss": 0.0399,
810
+ "step": 103
811
+ },
812
+ {
813
+ "epoch": 0.9860010369602251,
814
+ "grad_norm": 0.06787109375,
815
+ "learning_rate": 0.0001566759882670146,
816
+ "loss": 0.0358,
817
+ "step": 104
818
+ },
819
+ {
820
+ "epoch": 0.9954818161617658,
821
+ "grad_norm": 0.06982421875,
822
+ "learning_rate": 0.00015582437220268647,
823
+ "loss": 0.0408,
824
+ "step": 105
825
+ },
826
+ {
827
+ "epoch": 1.0049625953633063,
828
+ "grad_norm": 0.05908203125,
829
+ "learning_rate": 0.0001549668334337271,
830
+ "loss": 0.0321,
831
+ "step": 106
832
+ },
833
+ {
834
+ "epoch": 1.0144433745648471,
835
+ "grad_norm": 0.06298828125,
836
+ "learning_rate": 0.0001541034629409865,
837
+ "loss": 0.0313,
838
+ "step": 107
839
+ },
840
+ {
841
+ "epoch": 1.0239241537663877,
842
+ "grad_norm": 0.061279296875,
843
+ "learning_rate": 0.00015323435232403337,
844
+ "loss": 0.0318,
845
+ "step": 108
846
+ },
847
+ {
848
+ "epoch": 1.0334049329679282,
849
+ "grad_norm": 0.049072265625,
850
+ "learning_rate": 0.00015235959379143678,
851
+ "loss": 0.0274,
852
+ "step": 109
853
+ },
854
+ {
855
+ "epoch": 1.042885712169469,
856
+ "grad_norm": 0.056396484375,
857
+ "learning_rate": 0.0001514792801509831,
858
+ "loss": 0.0304,
859
+ "step": 110
860
+ },
861
+ {
862
+ "epoch": 1.042885712169469,
863
+ "eval_loss": 0.03729868680238724,
864
+ "eval_runtime": 33.6565,
865
+ "eval_samples_per_second": 21.125,
866
+ "eval_steps_per_second": 21.125,
867
+ "step": 110
868
+ },
869
+ {
870
+ "epoch": 1.0523664913710096,
871
+ "grad_norm": 0.054931640625,
872
+ "learning_rate": 0.00015059350479982965,
873
+ "loss": 0.0271,
874
+ "step": 111
875
+ },
876
+ {
877
+ "epoch": 1.0618472705725501,
878
+ "grad_norm": 0.05712890625,
879
+ "learning_rate": 0.0001497023617145958,
880
+ "loss": 0.0285,
881
+ "step": 112
882
+ },
883
+ {
884
+ "epoch": 1.071328049774091,
885
+ "grad_norm": 0.054931640625,
886
+ "learning_rate": 0.0001488059454413923,
887
+ "loss": 0.0272,
888
+ "step": 113
889
+ },
890
+ {
891
+ "epoch": 1.0808088289756315,
892
+ "grad_norm": 0.06201171875,
893
+ "learning_rate": 0.00014790435108579048,
894
+ "loss": 0.0294,
895
+ "step": 114
896
+ },
897
+ {
898
+ "epoch": 1.090289608177172,
899
+ "grad_norm": 0.05908203125,
900
+ "learning_rate": 0.000146997674302732,
901
+ "loss": 0.0273,
902
+ "step": 115
903
+ },
904
+ {
905
+ "epoch": 1.0997703873787126,
906
+ "grad_norm": 0.059326171875,
907
+ "learning_rate": 0.00014608601128638027,
908
+ "loss": 0.0279,
909
+ "step": 116
910
+ },
911
+ {
912
+ "epoch": 1.1092511665802534,
913
+ "grad_norm": 0.06298828125,
914
+ "learning_rate": 0.00014516945875991472,
915
+ "loss": 0.0327,
916
+ "step": 117
917
+ },
918
+ {
919
+ "epoch": 1.118731945781794,
920
+ "grad_norm": 0.060302734375,
921
+ "learning_rate": 0.00014424811396526892,
922
+ "loss": 0.0256,
923
+ "step": 118
924
+ },
925
+ {
926
+ "epoch": 1.1282127249833347,
927
+ "grad_norm": 0.07177734375,
928
+ "learning_rate": 0.00014332207465281364,
929
+ "loss": 0.0312,
930
+ "step": 119
931
+ },
932
+ {
933
+ "epoch": 1.1376935041848752,
934
+ "grad_norm": 0.072265625,
935
+ "learning_rate": 0.0001423914390709861,
936
+ "loss": 0.0314,
937
+ "step": 120
938
+ },
939
+ {
940
+ "epoch": 1.1471742833864158,
941
+ "grad_norm": 0.06298828125,
942
+ "learning_rate": 0.00014145630595586607,
943
+ "loss": 0.0305,
944
+ "step": 121
945
+ },
946
+ {
947
+ "epoch": 1.1471742833864158,
948
+ "eval_loss": 0.03735668212175369,
949
+ "eval_runtime": 33.3809,
950
+ "eval_samples_per_second": 21.3,
951
+ "eval_steps_per_second": 21.3,
952
+ "step": 121
953
+ },
954
+ {
955
+ "epoch": 1.1566550625879564,
956
+ "grad_norm": 0.057373046875,
957
+ "learning_rate": 0.00014051677452070065,
958
+ "loss": 0.027,
959
+ "step": 122
960
+ },
961
+ {
962
+ "epoch": 1.1661358417894971,
963
+ "grad_norm": 0.058837890625,
964
+ "learning_rate": 0.00013957294444537808,
965
+ "loss": 0.027,
966
+ "step": 123
967
+ },
968
+ {
969
+ "epoch": 1.1756166209910377,
970
+ "grad_norm": 0.06494140625,
971
+ "learning_rate": 0.0001386249158658522,
972
+ "loss": 0.0362,
973
+ "step": 124
974
+ },
975
+ {
976
+ "epoch": 1.1850974001925783,
977
+ "grad_norm": 0.060791015625,
978
+ "learning_rate": 0.00013767278936351854,
979
+ "loss": 0.0301,
980
+ "step": 125
981
+ },
982
+ {
983
+ "epoch": 1.194578179394119,
984
+ "grad_norm": 0.06201171875,
985
+ "learning_rate": 0.00013671666595454295,
986
+ "loss": 0.0241,
987
+ "step": 126
988
+ },
989
+ {
990
+ "epoch": 1.2040589585956596,
991
+ "grad_norm": 0.05859375,
992
+ "learning_rate": 0.00013575664707914448,
993
+ "loss": 0.0259,
994
+ "step": 127
995
+ },
996
+ {
997
+ "epoch": 1.2135397377972001,
998
+ "grad_norm": 0.064453125,
999
+ "learning_rate": 0.0001347928345908329,
1000
+ "loss": 0.0325,
1001
+ "step": 128
1002
+ },
1003
+ {
1004
+ "epoch": 1.223020516998741,
1005
+ "grad_norm": 0.05908203125,
1006
+ "learning_rate": 0.00013382533074560255,
1007
+ "loss": 0.0284,
1008
+ "step": 129
1009
+ },
1010
+ {
1011
+ "epoch": 1.2325012962002815,
1012
+ "grad_norm": 0.05908203125,
1013
+ "learning_rate": 0.0001328542381910835,
1014
+ "loss": 0.0267,
1015
+ "step": 130
1016
+ },
1017
+ {
1018
+ "epoch": 1.241982075401822,
1019
+ "grad_norm": 0.0546875,
1020
+ "learning_rate": 0.00013187965995565098,
1021
+ "loss": 0.0274,
1022
+ "step": 131
1023
+ },
1024
+ {
1025
+ "epoch": 1.2514628546033628,
1026
+ "grad_norm": 0.05078125,
1027
+ "learning_rate": 0.00013090169943749476,
1028
+ "loss": 0.0251,
1029
+ "step": 132
1030
+ },
1031
+ {
1032
+ "epoch": 1.2514628546033628,
1033
+ "eval_loss": 0.036460939794778824,
1034
+ "eval_runtime": 33.4627,
1035
+ "eval_samples_per_second": 21.248,
1036
+ "eval_steps_per_second": 21.248,
1037
+ "step": 132
1038
+ },
1039
+ {
1040
+ "epoch": 1.2609436338049034,
1041
+ "grad_norm": 0.0546875,
1042
+ "learning_rate": 0.00012992046039364893,
1043
+ "loss": 0.0273,
1044
+ "step": 133
1045
+ },
1046
+ {
1047
+ "epoch": 1.270424413006444,
1048
+ "grad_norm": 0.064453125,
1049
+ "learning_rate": 0.0001289360469289838,
1050
+ "loss": 0.0263,
1051
+ "step": 134
1052
+ },
1053
+ {
1054
+ "epoch": 1.2799051922079845,
1055
+ "grad_norm": 0.06298828125,
1056
+ "learning_rate": 0.00012794856348516095,
1057
+ "loss": 0.0273,
1058
+ "step": 135
1059
+ },
1060
+ {
1061
+ "epoch": 1.2893859714095253,
1062
+ "grad_norm": 0.06640625,
1063
+ "learning_rate": 0.00012695811482955227,
1064
+ "loss": 0.0277,
1065
+ "step": 136
1066
+ },
1067
+ {
1068
+ "epoch": 1.2988667506110658,
1069
+ "grad_norm": 0.057861328125,
1070
+ "learning_rate": 0.00012596480604412484,
1071
+ "loss": 0.0297,
1072
+ "step": 137
1073
+ },
1074
+ {
1075
+ "epoch": 1.3083475298126066,
1076
+ "grad_norm": 0.057861328125,
1077
+ "learning_rate": 0.000124968742514292,
1078
+ "loss": 0.023,
1079
+ "step": 138
1080
+ },
1081
+ {
1082
+ "epoch": 1.3178283090141472,
1083
+ "grad_norm": 0.06005859375,
1084
+ "learning_rate": 0.00012397002991773275,
1085
+ "loss": 0.0274,
1086
+ "step": 139
1087
+ },
1088
+ {
1089
+ "epoch": 1.3273090882156877,
1090
+ "grad_norm": 0.06591796875,
1091
+ "learning_rate": 0.0001229687742131796,
1092
+ "loss": 0.0262,
1093
+ "step": 140
1094
+ },
1095
+ {
1096
+ "epoch": 1.3367898674172283,
1097
+ "grad_norm": 0.06396484375,
1098
+ "learning_rate": 0.00012196508162917677,
1099
+ "loss": 0.0312,
1100
+ "step": 141
1101
+ },
1102
+ {
1103
+ "epoch": 1.346270646618769,
1104
+ "grad_norm": 0.0693359375,
1105
+ "learning_rate": 0.00012095905865281025,
1106
+ "loss": 0.0291,
1107
+ "step": 142
1108
+ },
1109
+ {
1110
+ "epoch": 1.3557514258203096,
1111
+ "grad_norm": 0.0625,
1112
+ "learning_rate": 0.00011995081201840956,
1113
+ "loss": 0.0288,
1114
+ "step": 143
1115
+ },
1116
+ {
1117
+ "epoch": 1.3557514258203096,
1118
+ "eval_loss": 0.036965224891901016,
1119
+ "eval_runtime": 33.3733,
1120
+ "eval_samples_per_second": 21.304,
1121
+ "eval_steps_per_second": 21.304,
1122
+ "step": 143
1123
+ },
1124
+ {
1125
+ "epoch": 1.3652322050218502,
1126
+ "grad_norm": 0.058837890625,
1127
+ "learning_rate": 0.00011894044869622403,
1128
+ "loss": 0.0279,
1129
+ "step": 144
1130
+ },
1131
+ {
1132
+ "epoch": 1.374712984223391,
1133
+ "grad_norm": 0.056640625,
1134
+ "learning_rate": 0.00011792807588107357,
1135
+ "loss": 0.0247,
1136
+ "step": 145
1137
+ },
1138
+ {
1139
+ "epoch": 1.3841937634249315,
1140
+ "grad_norm": 0.06689453125,
1141
+ "learning_rate": 0.00011691380098097597,
1142
+ "loss": 0.0315,
1143
+ "step": 146
1144
+ },
1145
+ {
1146
+ "epoch": 1.393674542626472,
1147
+ "grad_norm": 0.0537109375,
1148
+ "learning_rate": 0.0001158977316057513,
1149
+ "loss": 0.0244,
1150
+ "step": 147
1151
+ },
1152
+ {
1153
+ "epoch": 1.4031553218280126,
1154
+ "grad_norm": 0.054443359375,
1155
+ "learning_rate": 0.00011487997555560503,
1156
+ "loss": 0.0292,
1157
+ "step": 148
1158
+ },
1159
+ {
1160
+ "epoch": 1.4126361010295534,
1161
+ "grad_norm": 0.064453125,
1162
+ "learning_rate": 0.00011386064080969094,
1163
+ "loss": 0.0293,
1164
+ "step": 149
1165
+ },
1166
+ {
1167
+ "epoch": 1.422116880231094,
1168
+ "grad_norm": 0.059814453125,
1169
+ "learning_rate": 0.00011283983551465511,
1170
+ "loss": 0.0254,
1171
+ "step": 150
1172
+ },
1173
+ {
1174
+ "epoch": 1.4315976594326347,
1175
+ "grad_norm": 0.06640625,
1176
+ "learning_rate": 0.0001118176679731619,
1177
+ "loss": 0.0284,
1178
+ "step": 151
1179
+ },
1180
+ {
1181
+ "epoch": 1.4410784386341753,
1182
+ "grad_norm": 0.060791015625,
1183
+ "learning_rate": 0.00011079424663240372,
1184
+ "loss": 0.0306,
1185
+ "step": 152
1186
+ },
1187
+ {
1188
+ "epoch": 1.4505592178357158,
1189
+ "grad_norm": 0.058349609375,
1190
+ "learning_rate": 0.00010976968007259519,
1191
+ "loss": 0.028,
1192
+ "step": 153
1193
+ },
1194
+ {
1195
+ "epoch": 1.4600399970372564,
1196
+ "grad_norm": 0.0537109375,
1197
+ "learning_rate": 0.00010874407699545328,
1198
+ "loss": 0.0251,
1199
+ "step": 154
1200
+ },
1201
+ {
1202
+ "epoch": 1.4600399970372564,
1203
+ "eval_loss": 0.03655948117375374,
1204
+ "eval_runtime": 33.4084,
1205
+ "eval_samples_per_second": 21.282,
1206
+ "eval_steps_per_second": 21.282,
1207
+ "step": 154
1208
+ },
1209
+ {
1210
+ "epoch": 1.4695207762387972,
1211
+ "grad_norm": 0.06298828125,
1212
+ "learning_rate": 0.00010771754621266466,
1213
+ "loss": 0.031,
1214
+ "step": 155
1215
+ },
1216
+ {
1217
+ "epoch": 1.4790015554403377,
1218
+ "grad_norm": 0.05419921875,
1219
+ "learning_rate": 0.00010669019663434117,
1220
+ "loss": 0.0262,
1221
+ "step": 156
1222
+ },
1223
+ {
1224
+ "epoch": 1.4884823346418785,
1225
+ "grad_norm": 0.06298828125,
1226
+ "learning_rate": 0.00010566213725746506,
1227
+ "loss": 0.0317,
1228
+ "step": 157
1229
+ },
1230
+ {
1231
+ "epoch": 1.497963113843419,
1232
+ "grad_norm": 0.052734375,
1233
+ "learning_rate": 0.00010463347715432488,
1234
+ "loss": 0.0255,
1235
+ "step": 158
1236
+ },
1237
+ {
1238
+ "epoch": 1.5074438930449596,
1239
+ "grad_norm": 0.058837890625,
1240
+ "learning_rate": 0.00010360432546094341,
1241
+ "loss": 0.028,
1242
+ "step": 159
1243
+ }
1244
+ ],
1245
+ "logging_steps": 1,
1246
+ "max_steps": 315,
1247
+ "num_input_tokens_seen": 0,
1248
+ "num_train_epochs": 3,
1249
+ "save_steps": 53,
1250
+ "stateful_callbacks": {
1251
+ "TrainerControl": {
1252
+ "args": {
1253
+ "should_epoch_stop": false,
1254
+ "should_evaluate": false,
1255
+ "should_log": false,
1256
+ "should_save": true,
1257
+ "should_training_stop": false
1258
+ },
1259
+ "attributes": {}
1260
+ }
1261
+ },
1262
+ "total_flos": 2.8648279883317248e+17,
1263
+ "train_batch_size": 1,
1264
+ "trial_name": null,
1265
+ "trial_params": null
1266
+ }
checkpoint-159/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:040d0a78d4967c05b5ae4923ea1ba23193e5db226aec4c55dfa817cfdd1347a0
3
+ size 6008
checkpoint-159/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-212/README.md ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: Qwen/Qwen2-7B-Instruct
3
+ library_name: peft
4
+ ---
5
+
6
+ # Model Card for Model ID
7
+
8
+ <!-- Provide a quick summary of what the model is/does. -->
9
+
10
+
11
+
12
+ ## Model Details
13
+
14
+ ### Model Description
15
+
16
+ <!-- Provide a longer summary of what this model is. -->
17
+
18
+
19
+
20
+ - **Developed by:** [More Information Needed]
21
+ - **Funded by [optional]:** [More Information Needed]
22
+ - **Shared by [optional]:** [More Information Needed]
23
+ - **Model type:** [More Information Needed]
24
+ - **Language(s) (NLP):** [More Information Needed]
25
+ - **License:** [More Information Needed]
26
+ - **Finetuned from model [optional]:** [More Information Needed]
27
+
28
+ ### Model Sources [optional]
29
+
30
+ <!-- Provide the basic links for the model. -->
31
+
32
+ - **Repository:** [More Information Needed]
33
+ - **Paper [optional]:** [More Information Needed]
34
+ - **Demo [optional]:** [More Information Needed]
35
+
36
+ ## Uses
37
+
38
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
39
+
40
+ ### Direct Use
41
+
42
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
43
+
44
+ [More Information Needed]
45
+
46
+ ### Downstream Use [optional]
47
+
48
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
49
+
50
+ [More Information Needed]
51
+
52
+ ### Out-of-Scope Use
53
+
54
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
55
+
56
+ [More Information Needed]
57
+
58
+ ## Bias, Risks, and Limitations
59
+
60
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
61
+
62
+ [More Information Needed]
63
+
64
+ ### Recommendations
65
+
66
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
67
+
68
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
69
+
70
+ ## How to Get Started with the Model
71
+
72
+ Use the code below to get started with the model.
73
+
74
+ [More Information Needed]
75
+
76
+ ## Training Details
77
+
78
+ ### Training Data
79
+
80
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
81
+
82
+ [More Information Needed]
83
+
84
+ ### Training Procedure
85
+
86
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
87
+
88
+ #### Preprocessing [optional]
89
+
90
+ [More Information Needed]
91
+
92
+
93
+ #### Training Hyperparameters
94
+
95
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
96
+
97
+ #### Speeds, Sizes, Times [optional]
98
+
99
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
100
+
101
+ [More Information Needed]
102
+
103
+ ## Evaluation
104
+
105
+ <!-- This section describes the evaluation protocols and provides the results. -->
106
+
107
+ ### Testing Data, Factors & Metrics
108
+
109
+ #### Testing Data
110
+
111
+ <!-- This should link to a Dataset Card if possible. -->
112
+
113
+ [More Information Needed]
114
+
115
+ #### Factors
116
+
117
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
118
+
119
+ [More Information Needed]
120
+
121
+ #### Metrics
122
+
123
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
124
+
125
+ [More Information Needed]
126
+
127
+ ### Results
128
+
129
+ [More Information Needed]
130
+
131
+ #### Summary
132
+
133
+
134
+
135
+ ## Model Examination [optional]
136
+
137
+ <!-- Relevant interpretability work for the model goes here -->
138
+
139
+ [More Information Needed]
140
+
141
+ ## Environmental Impact
142
+
143
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
144
+
145
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
146
+
147
+ - **Hardware Type:** [More Information Needed]
148
+ - **Hours used:** [More Information Needed]
149
+ - **Cloud Provider:** [More Information Needed]
150
+ - **Compute Region:** [More Information Needed]
151
+ - **Carbon Emitted:** [More Information Needed]
152
+
153
+ ## Technical Specifications [optional]
154
+
155
+ ### Model Architecture and Objective
156
+
157
+ [More Information Needed]
158
+
159
+ ### Compute Infrastructure
160
+
161
+ [More Information Needed]
162
+
163
+ #### Hardware
164
+
165
+ [More Information Needed]
166
+
167
+ #### Software
168
+
169
+ [More Information Needed]
170
+
171
+ ## Citation [optional]
172
+
173
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
174
+
175
+ **BibTeX:**
176
+
177
+ [More Information Needed]
178
+
179
+ **APA:**
180
+
181
+ [More Information Needed]
182
+
183
+ ## Glossary [optional]
184
+
185
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
186
+
187
+ [More Information Needed]
188
+
189
+ ## More Information [optional]
190
+
191
+ [More Information Needed]
192
+
193
+ ## Model Card Authors [optional]
194
+
195
+ [More Information Needed]
196
+
197
+ ## Model Card Contact
198
+
199
+ [More Information Needed]
200
+ ### Framework versions
201
+
202
+ - PEFT 0.11.1
checkpoint-212/adapter_config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "Qwen/Qwen2-7B-Instruct",
5
+ "bias": "none",
6
+ "fan_in_fan_out": null,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layer_replication": null,
10
+ "layers_pattern": null,
11
+ "layers_to_transform": null,
12
+ "loftq_config": {},
13
+ "lora_alpha": 128,
14
+ "lora_dropout": 0.05,
15
+ "megatron_config": null,
16
+ "megatron_core": "megatron.core",
17
+ "modules_to_save": null,
18
+ "peft_type": "LORA",
19
+ "r": 64,
20
+ "rank_pattern": {},
21
+ "revision": null,
22
+ "target_modules": [
23
+ "k_proj",
24
+ "up_proj",
25
+ "o_proj",
26
+ "down_proj",
27
+ "gate_proj",
28
+ "v_proj",
29
+ "q_proj"
30
+ ],
31
+ "task_type": "CAUSAL_LM",
32
+ "use_dora": false,
33
+ "use_rslora": false
34
+ }
checkpoint-212/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d45ee1c590f4437b1cffd64a90e68be6895d370a859dc6df1f23e66bd7cd50cd
3
+ size 323014560
checkpoint-212/added_tokens.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "<|endoftext|>": 151643,
3
+ "<|im_end|>": 151645,
4
+ "<|im_start|>": 151644
5
+ }
checkpoint-212/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-212/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0ed947d8bb500c97c84e6c56b9fb6a993435bf55abbb304341f24d0058202091
3
+ size 1292086650
checkpoint-212/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f459e4c94d30d81b86fbd716cd83ffed98e68fb8ce9392aaa2b40c81de74ba25
3
+ size 14244
checkpoint-212/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a214afa28cba8a61e5d4cc1d5f61fa340ffd678157072501b9e9dc32eae8fe12
3
+ size 1064
checkpoint-212/special_tokens_map.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>"
5
+ ],
6
+ "eos_token": {
7
+ "content": "<|im_end|>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false
12
+ },
13
+ "pad_token": {
14
+ "content": "<|endoftext|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false
19
+ }
20
+ }
checkpoint-212/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-212/tokenizer_config.json ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "151643": {
5
+ "content": "<|endoftext|>",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "151644": {
13
+ "content": "<|im_start|>",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "151645": {
21
+ "content": "<|im_end|>",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ }
28
+ },
29
+ "additional_special_tokens": [
30
+ "<|im_start|>",
31
+ "<|im_end|>"
32
+ ],
33
+ "bos_token": null,
34
+ "chat_template": "{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n' }}{% endif %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
35
+ "clean_up_tokenization_spaces": false,
36
+ "eos_token": "<|im_end|>",
37
+ "errors": "replace",
38
+ "model_max_length": 131072,
39
+ "pad_token": "<|endoftext|>",
40
+ "split_special_tokens": false,
41
+ "tokenizer_class": "Qwen2Tokenizer",
42
+ "unk_token": null
43
+ }
checkpoint-212/trainer_state.json ADDED
@@ -0,0 +1,1677 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 2.0099251907266127,
5
+ "eval_steps": 11,
6
+ "global_step": 212,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.009480779201540626,
13
+ "grad_norm": 1.421875,
14
+ "learning_rate": 2e-05,
15
+ "loss": 0.4503,
16
+ "step": 1
17
+ },
18
+ {
19
+ "epoch": 0.009480779201540626,
20
+ "eval_loss": 0.4263582229614258,
21
+ "eval_runtime": 33.6286,
22
+ "eval_samples_per_second": 21.143,
23
+ "eval_steps_per_second": 21.143,
24
+ "step": 1
25
+ },
26
+ {
27
+ "epoch": 0.018961558403081252,
28
+ "grad_norm": 1.359375,
29
+ "learning_rate": 4e-05,
30
+ "loss": 0.412,
31
+ "step": 2
32
+ },
33
+ {
34
+ "epoch": 0.02844233760462188,
35
+ "grad_norm": 1.1875,
36
+ "learning_rate": 6e-05,
37
+ "loss": 0.4214,
38
+ "step": 3
39
+ },
40
+ {
41
+ "epoch": 0.037923116806162505,
42
+ "grad_norm": 0.80859375,
43
+ "learning_rate": 8e-05,
44
+ "loss": 0.2924,
45
+ "step": 4
46
+ },
47
+ {
48
+ "epoch": 0.04740389600770313,
49
+ "grad_norm": 0.51953125,
50
+ "learning_rate": 0.0001,
51
+ "loss": 0.1896,
52
+ "step": 5
53
+ },
54
+ {
55
+ "epoch": 0.05688467520924376,
56
+ "grad_norm": 0.4453125,
57
+ "learning_rate": 0.00012,
58
+ "loss": 0.1531,
59
+ "step": 6
60
+ },
61
+ {
62
+ "epoch": 0.06636545441078438,
63
+ "grad_norm": 0.27734375,
64
+ "learning_rate": 0.00014,
65
+ "loss": 0.1181,
66
+ "step": 7
67
+ },
68
+ {
69
+ "epoch": 0.07584623361232501,
70
+ "grad_norm": 0.263671875,
71
+ "learning_rate": 0.00016,
72
+ "loss": 0.1143,
73
+ "step": 8
74
+ },
75
+ {
76
+ "epoch": 0.08532701281386564,
77
+ "grad_norm": 0.2197265625,
78
+ "learning_rate": 0.00018,
79
+ "loss": 0.0952,
80
+ "step": 9
81
+ },
82
+ {
83
+ "epoch": 0.09480779201540626,
84
+ "grad_norm": 0.1708984375,
85
+ "learning_rate": 0.0002,
86
+ "loss": 0.0767,
87
+ "step": 10
88
+ },
89
+ {
90
+ "epoch": 0.10428857121694689,
91
+ "grad_norm": 0.1484375,
92
+ "learning_rate": 0.00019999469523400122,
93
+ "loss": 0.0836,
94
+ "step": 11
95
+ },
96
+ {
97
+ "epoch": 0.10428857121694689,
98
+ "eval_loss": 0.07918477058410645,
99
+ "eval_runtime": 33.4231,
100
+ "eval_samples_per_second": 21.273,
101
+ "eval_steps_per_second": 21.273,
102
+ "step": 11
103
+ },
104
+ {
105
+ "epoch": 0.11376935041848751,
106
+ "grad_norm": 0.154296875,
107
+ "learning_rate": 0.00019997878149881574,
108
+ "loss": 0.0757,
109
+ "step": 12
110
+ },
111
+ {
112
+ "epoch": 0.12325012962002814,
113
+ "grad_norm": 0.1513671875,
114
+ "learning_rate": 0.0001999522604828164,
115
+ "loss": 0.0767,
116
+ "step": 13
117
+ },
118
+ {
119
+ "epoch": 0.13273090882156877,
120
+ "grad_norm": 0.1416015625,
121
+ "learning_rate": 0.00019991513499975882,
122
+ "loss": 0.0809,
123
+ "step": 14
124
+ },
125
+ {
126
+ "epoch": 0.1422116880231094,
127
+ "grad_norm": 0.09765625,
128
+ "learning_rate": 0.00019986740898848306,
129
+ "loss": 0.0634,
130
+ "step": 15
131
+ },
132
+ {
133
+ "epoch": 0.15169246722465002,
134
+ "grad_norm": 0.099609375,
135
+ "learning_rate": 0.00019980908751249555,
136
+ "loss": 0.0674,
137
+ "step": 16
138
+ },
139
+ {
140
+ "epoch": 0.16117324642619066,
141
+ "grad_norm": 0.119140625,
142
+ "learning_rate": 0.00019974017675943192,
143
+ "loss": 0.0667,
144
+ "step": 17
145
+ },
146
+ {
147
+ "epoch": 0.17065402562773127,
148
+ "grad_norm": 0.09619140625,
149
+ "learning_rate": 0.0001996606840404006,
150
+ "loss": 0.0632,
151
+ "step": 18
152
+ },
153
+ {
154
+ "epoch": 0.1801348048292719,
155
+ "grad_norm": 0.09130859375,
156
+ "learning_rate": 0.00019957061778920701,
157
+ "loss": 0.0488,
158
+ "step": 19
159
+ },
160
+ {
161
+ "epoch": 0.18961558403081252,
162
+ "grad_norm": 0.0947265625,
163
+ "learning_rate": 0.0001994699875614589,
164
+ "loss": 0.0627,
165
+ "step": 20
166
+ },
167
+ {
168
+ "epoch": 0.19909636323235316,
169
+ "grad_norm": 0.08203125,
170
+ "learning_rate": 0.00019935880403355253,
171
+ "loss": 0.0528,
172
+ "step": 21
173
+ },
174
+ {
175
+ "epoch": 0.20857714243389378,
176
+ "grad_norm": 0.1123046875,
177
+ "learning_rate": 0.00019923707900153982,
178
+ "loss": 0.0532,
179
+ "step": 22
180
+ },
181
+ {
182
+ "epoch": 0.20857714243389378,
183
+ "eval_loss": 0.056647635996341705,
184
+ "eval_runtime": 33.3595,
185
+ "eval_samples_per_second": 21.313,
186
+ "eval_steps_per_second": 21.313,
187
+ "step": 22
188
+ },
189
+ {
190
+ "epoch": 0.21805792163543442,
191
+ "grad_norm": 0.10693359375,
192
+ "learning_rate": 0.00019910482537987702,
193
+ "loss": 0.0583,
194
+ "step": 23
195
+ },
196
+ {
197
+ "epoch": 0.22753870083697503,
198
+ "grad_norm": 0.0791015625,
199
+ "learning_rate": 0.0001989620572000544,
200
+ "loss": 0.0554,
201
+ "step": 24
202
+ },
203
+ {
204
+ "epoch": 0.23701948003851567,
205
+ "grad_norm": 0.123046875,
206
+ "learning_rate": 0.00019880878960910772,
207
+ "loss": 0.0688,
208
+ "step": 25
209
+ },
210
+ {
211
+ "epoch": 0.24650025924005628,
212
+ "grad_norm": 0.10498046875,
213
+ "learning_rate": 0.00019864503886801106,
214
+ "loss": 0.0655,
215
+ "step": 26
216
+ },
217
+ {
218
+ "epoch": 0.2559810384415969,
219
+ "grad_norm": 0.07421875,
220
+ "learning_rate": 0.00019847082234995171,
221
+ "loss": 0.0471,
222
+ "step": 27
223
+ },
224
+ {
225
+ "epoch": 0.26546181764313753,
226
+ "grad_norm": 0.080078125,
227
+ "learning_rate": 0.00019828615853848688,
228
+ "loss": 0.0518,
229
+ "step": 28
230
+ },
231
+ {
232
+ "epoch": 0.27494259684467814,
233
+ "grad_norm": 0.07421875,
234
+ "learning_rate": 0.00019809106702558277,
235
+ "loss": 0.0481,
236
+ "step": 29
237
+ },
238
+ {
239
+ "epoch": 0.2844233760462188,
240
+ "grad_norm": 0.0703125,
241
+ "learning_rate": 0.0001978855685095358,
242
+ "loss": 0.0464,
243
+ "step": 30
244
+ },
245
+ {
246
+ "epoch": 0.2939041552477594,
247
+ "grad_norm": 0.0849609375,
248
+ "learning_rate": 0.00019766968479277683,
249
+ "loss": 0.0566,
250
+ "step": 31
251
+ },
252
+ {
253
+ "epoch": 0.30338493444930004,
254
+ "grad_norm": 0.0859375,
255
+ "learning_rate": 0.00019744343877955788,
256
+ "loss": 0.0517,
257
+ "step": 32
258
+ },
259
+ {
260
+ "epoch": 0.3128657136508407,
261
+ "grad_norm": 0.07177734375,
262
+ "learning_rate": 0.00019720685447352209,
263
+ "loss": 0.0511,
264
+ "step": 33
265
+ },
266
+ {
267
+ "epoch": 0.3128657136508407,
268
+ "eval_loss": 0.04964344948530197,
269
+ "eval_runtime": 33.3741,
270
+ "eval_samples_per_second": 21.304,
271
+ "eval_steps_per_second": 21.304,
272
+ "step": 33
273
+ },
274
+ {
275
+ "epoch": 0.3223464928523813,
276
+ "grad_norm": 0.076171875,
277
+ "learning_rate": 0.0001969599569751571,
278
+ "loss": 0.045,
279
+ "step": 34
280
+ },
281
+ {
282
+ "epoch": 0.33182727205392193,
283
+ "grad_norm": 0.07568359375,
284
+ "learning_rate": 0.00019670277247913205,
285
+ "loss": 0.0543,
286
+ "step": 35
287
+ },
288
+ {
289
+ "epoch": 0.34130805125546254,
290
+ "grad_norm": 0.0791015625,
291
+ "learning_rate": 0.0001964353282715183,
292
+ "loss": 0.0444,
293
+ "step": 36
294
+ },
295
+ {
296
+ "epoch": 0.3507888304570032,
297
+ "grad_norm": 0.0771484375,
298
+ "learning_rate": 0.00019615765272689461,
299
+ "loss": 0.0506,
300
+ "step": 37
301
+ },
302
+ {
303
+ "epoch": 0.3602696096585438,
304
+ "grad_norm": 0.080078125,
305
+ "learning_rate": 0.00019586977530533677,
306
+ "loss": 0.0558,
307
+ "step": 38
308
+ },
309
+ {
310
+ "epoch": 0.36975038886008443,
311
+ "grad_norm": 0.078125,
312
+ "learning_rate": 0.00019557172654929196,
313
+ "loss": 0.0507,
314
+ "step": 39
315
+ },
316
+ {
317
+ "epoch": 0.37923116806162505,
318
+ "grad_norm": 0.06396484375,
319
+ "learning_rate": 0.00019526353808033825,
320
+ "loss": 0.0452,
321
+ "step": 40
322
+ },
323
+ {
324
+ "epoch": 0.3887119472631657,
325
+ "grad_norm": 0.07275390625,
326
+ "learning_rate": 0.00019494524259582992,
327
+ "loss": 0.0481,
328
+ "step": 41
329
+ },
330
+ {
331
+ "epoch": 0.3981927264647063,
332
+ "grad_norm": 0.06591796875,
333
+ "learning_rate": 0.00019461687386542826,
334
+ "loss": 0.0464,
335
+ "step": 42
336
+ },
337
+ {
338
+ "epoch": 0.40767350566624694,
339
+ "grad_norm": 0.06591796875,
340
+ "learning_rate": 0.00019427846672751873,
341
+ "loss": 0.0431,
342
+ "step": 43
343
+ },
344
+ {
345
+ "epoch": 0.41715428486778755,
346
+ "grad_norm": 0.06982421875,
347
+ "learning_rate": 0.00019393005708551498,
348
+ "loss": 0.0511,
349
+ "step": 44
350
+ },
351
+ {
352
+ "epoch": 0.41715428486778755,
353
+ "eval_loss": 0.04574437811970711,
354
+ "eval_runtime": 33.3514,
355
+ "eval_samples_per_second": 21.318,
356
+ "eval_steps_per_second": 21.318,
357
+ "step": 44
358
+ },
359
+ {
360
+ "epoch": 0.4266350640693282,
361
+ "grad_norm": 0.06787109375,
362
+ "learning_rate": 0.00019357168190404936,
363
+ "loss": 0.0443,
364
+ "step": 45
365
+ },
366
+ {
367
+ "epoch": 0.43611584327086883,
368
+ "grad_norm": 0.0751953125,
369
+ "learning_rate": 0.00019320337920505153,
370
+ "loss": 0.0545,
371
+ "step": 46
372
+ },
373
+ {
374
+ "epoch": 0.44559662247240944,
375
+ "grad_norm": 0.07666015625,
376
+ "learning_rate": 0.00019282518806371414,
377
+ "loss": 0.0542,
378
+ "step": 47
379
+ },
380
+ {
381
+ "epoch": 0.45507740167395005,
382
+ "grad_norm": 0.07666015625,
383
+ "learning_rate": 0.0001924371486043473,
384
+ "loss": 0.0589,
385
+ "step": 48
386
+ },
387
+ {
388
+ "epoch": 0.4645581808754907,
389
+ "grad_norm": 0.059326171875,
390
+ "learning_rate": 0.0001920393019961217,
391
+ "loss": 0.0444,
392
+ "step": 49
393
+ },
394
+ {
395
+ "epoch": 0.47403896007703133,
396
+ "grad_norm": 0.0732421875,
397
+ "learning_rate": 0.0001916316904487005,
398
+ "loss": 0.0485,
399
+ "step": 50
400
+ },
401
+ {
402
+ "epoch": 0.48351973927857195,
403
+ "grad_norm": 0.060546875,
404
+ "learning_rate": 0.00019121435720776122,
405
+ "loss": 0.0408,
406
+ "step": 51
407
+ },
408
+ {
409
+ "epoch": 0.49300051848011256,
410
+ "grad_norm": 0.068359375,
411
+ "learning_rate": 0.0001907873465504076,
412
+ "loss": 0.0466,
413
+ "step": 52
414
+ },
415
+ {
416
+ "epoch": 0.5024812976816532,
417
+ "grad_norm": 0.07763671875,
418
+ "learning_rate": 0.00019035070378047204,
419
+ "loss": 0.0426,
420
+ "step": 53
421
+ },
422
+ {
423
+ "epoch": 0.5119620768831938,
424
+ "grad_norm": 0.0654296875,
425
+ "learning_rate": 0.00018990447522370884,
426
+ "loss": 0.0407,
427
+ "step": 54
428
+ },
429
+ {
430
+ "epoch": 0.5214428560847345,
431
+ "grad_norm": 0.080078125,
432
+ "learning_rate": 0.00018944870822287956,
433
+ "loss": 0.0475,
434
+ "step": 55
435
+ },
436
+ {
437
+ "epoch": 0.5214428560847345,
438
+ "eval_loss": 0.043636418879032135,
439
+ "eval_runtime": 33.266,
440
+ "eval_samples_per_second": 21.373,
441
+ "eval_steps_per_second": 21.373,
442
+ "step": 55
443
+ },
444
+ {
445
+ "epoch": 0.5309236352862751,
446
+ "grad_norm": 0.0673828125,
447
+ "learning_rate": 0.00018898345113272998,
448
+ "loss": 0.0404,
449
+ "step": 56
450
+ },
451
+ {
452
+ "epoch": 0.5404044144878157,
453
+ "grad_norm": 0.06201171875,
454
+ "learning_rate": 0.00018850875331485995,
455
+ "loss": 0.036,
456
+ "step": 57
457
+ },
458
+ {
459
+ "epoch": 0.5498851936893563,
460
+ "grad_norm": 0.0751953125,
461
+ "learning_rate": 0.00018802466513248632,
462
+ "loss": 0.0412,
463
+ "step": 58
464
+ },
465
+ {
466
+ "epoch": 0.559365972890897,
467
+ "grad_norm": 0.068359375,
468
+ "learning_rate": 0.00018753123794509974,
469
+ "loss": 0.044,
470
+ "step": 59
471
+ },
472
+ {
473
+ "epoch": 0.5688467520924376,
474
+ "grad_norm": 0.0673828125,
475
+ "learning_rate": 0.00018702852410301554,
476
+ "loss": 0.0458,
477
+ "step": 60
478
+ },
479
+ {
480
+ "epoch": 0.5783275312939782,
481
+ "grad_norm": 0.07080078125,
482
+ "learning_rate": 0.0001865165769418196,
483
+ "loss": 0.0464,
484
+ "step": 61
485
+ },
486
+ {
487
+ "epoch": 0.5878083104955188,
488
+ "grad_norm": 0.0732421875,
489
+ "learning_rate": 0.00018599545077670985,
490
+ "loss": 0.0427,
491
+ "step": 62
492
+ },
493
+ {
494
+ "epoch": 0.5972890896970595,
495
+ "grad_norm": 0.059814453125,
496
+ "learning_rate": 0.0001854652008967335,
497
+ "loss": 0.0403,
498
+ "step": 63
499
+ },
500
+ {
501
+ "epoch": 0.6067698688986001,
502
+ "grad_norm": 0.07080078125,
503
+ "learning_rate": 0.00018492588355892124,
504
+ "loss": 0.0475,
505
+ "step": 64
506
+ },
507
+ {
508
+ "epoch": 0.6162506481001407,
509
+ "grad_norm": 0.072265625,
510
+ "learning_rate": 0.00018437755598231856,
511
+ "loss": 0.0454,
512
+ "step": 65
513
+ },
514
+ {
515
+ "epoch": 0.6257314273016814,
516
+ "grad_norm": 0.06982421875,
517
+ "learning_rate": 0.00018382027634191524,
518
+ "loss": 0.0435,
519
+ "step": 66
520
+ },
521
+ {
522
+ "epoch": 0.6257314273016814,
523
+ "eval_loss": 0.042031481862068176,
524
+ "eval_runtime": 33.3825,
525
+ "eval_samples_per_second": 21.299,
526
+ "eval_steps_per_second": 21.299,
527
+ "step": 66
528
+ },
529
+ {
530
+ "epoch": 0.635212206503222,
531
+ "grad_norm": 0.0712890625,
532
+ "learning_rate": 0.00018325410376247294,
533
+ "loss": 0.0429,
534
+ "step": 67
535
+ },
536
+ {
537
+ "epoch": 0.6446929857047626,
538
+ "grad_norm": 0.0625,
539
+ "learning_rate": 0.0001826790983122527,
540
+ "loss": 0.0402,
541
+ "step": 68
542
+ },
543
+ {
544
+ "epoch": 0.6541737649063032,
545
+ "grad_norm": 0.0673828125,
546
+ "learning_rate": 0.00018209532099664174,
547
+ "loss": 0.0437,
548
+ "step": 69
549
+ },
550
+ {
551
+ "epoch": 0.6636545441078439,
552
+ "grad_norm": 0.07568359375,
553
+ "learning_rate": 0.00018150283375168114,
554
+ "loss": 0.0442,
555
+ "step": 70
556
+ },
557
+ {
558
+ "epoch": 0.6731353233093845,
559
+ "grad_norm": 0.06982421875,
560
+ "learning_rate": 0.00018090169943749476,
561
+ "loss": 0.0462,
562
+ "step": 71
563
+ },
564
+ {
565
+ "epoch": 0.6826161025109251,
566
+ "grad_norm": 0.07568359375,
567
+ "learning_rate": 0.00018029198183161998,
568
+ "loss": 0.0578,
569
+ "step": 72
570
+ },
571
+ {
572
+ "epoch": 0.6920968817124658,
573
+ "grad_norm": 0.06494140625,
574
+ "learning_rate": 0.00017967374562224132,
575
+ "loss": 0.0443,
576
+ "step": 73
577
+ },
578
+ {
579
+ "epoch": 0.7015776609140064,
580
+ "grad_norm": 0.0703125,
581
+ "learning_rate": 0.00017904705640132718,
582
+ "loss": 0.0462,
583
+ "step": 74
584
+ },
585
+ {
586
+ "epoch": 0.711058440115547,
587
+ "grad_norm": 0.06640625,
588
+ "learning_rate": 0.00017841198065767107,
589
+ "loss": 0.0362,
590
+ "step": 75
591
+ },
592
+ {
593
+ "epoch": 0.7205392193170876,
594
+ "grad_norm": 0.06396484375,
595
+ "learning_rate": 0.00017776858576983712,
596
+ "loss": 0.0431,
597
+ "step": 76
598
+ },
599
+ {
600
+ "epoch": 0.7300199985186282,
601
+ "grad_norm": 0.0615234375,
602
+ "learning_rate": 0.0001771169399990119,
603
+ "loss": 0.0361,
604
+ "step": 77
605
+ },
606
+ {
607
+ "epoch": 0.7300199985186282,
608
+ "eval_loss": 0.040743011981248856,
609
+ "eval_runtime": 33.5015,
610
+ "eval_samples_per_second": 21.223,
611
+ "eval_steps_per_second": 21.223,
612
+ "step": 77
613
+ },
614
+ {
615
+ "epoch": 0.7395007777201689,
616
+ "grad_norm": 0.0576171875,
617
+ "learning_rate": 0.00017645711248176195,
618
+ "loss": 0.0371,
619
+ "step": 78
620
+ },
621
+ {
622
+ "epoch": 0.7489815569217095,
623
+ "grad_norm": 0.060791015625,
624
+ "learning_rate": 0.00017578917322269886,
625
+ "loss": 0.0395,
626
+ "step": 79
627
+ },
628
+ {
629
+ "epoch": 0.7584623361232501,
630
+ "grad_norm": 0.06787109375,
631
+ "learning_rate": 0.00017511319308705198,
632
+ "loss": 0.0387,
633
+ "step": 80
634
+ },
635
+ {
636
+ "epoch": 0.7679431153247908,
637
+ "grad_norm": 0.0634765625,
638
+ "learning_rate": 0.0001744292437931502,
639
+ "loss": 0.0374,
640
+ "step": 81
641
+ },
642
+ {
643
+ "epoch": 0.7774238945263314,
644
+ "grad_norm": 0.0732421875,
645
+ "learning_rate": 0.00017373739790481262,
646
+ "loss": 0.042,
647
+ "step": 82
648
+ },
649
+ {
650
+ "epoch": 0.786904673727872,
651
+ "grad_norm": 0.052734375,
652
+ "learning_rate": 0.00017303772882365016,
653
+ "loss": 0.0314,
654
+ "step": 83
655
+ },
656
+ {
657
+ "epoch": 0.7963854529294127,
658
+ "grad_norm": 0.07177734375,
659
+ "learning_rate": 0.00017233031078127788,
660
+ "loss": 0.0404,
661
+ "step": 84
662
+ },
663
+ {
664
+ "epoch": 0.8058662321309532,
665
+ "grad_norm": 0.0791015625,
666
+ "learning_rate": 0.00017161521883143934,
667
+ "loss": 0.0472,
668
+ "step": 85
669
+ },
670
+ {
671
+ "epoch": 0.8153470113324939,
672
+ "grad_norm": 0.07568359375,
673
+ "learning_rate": 0.00017089252884204377,
674
+ "loss": 0.0434,
675
+ "step": 86
676
+ },
677
+ {
678
+ "epoch": 0.8248277905340345,
679
+ "grad_norm": 0.0732421875,
680
+ "learning_rate": 0.0001701623174871168,
681
+ "loss": 0.0419,
682
+ "step": 87
683
+ },
684
+ {
685
+ "epoch": 0.8343085697355751,
686
+ "grad_norm": 0.0732421875,
687
+ "learning_rate": 0.0001694246622386658,
688
+ "loss": 0.0406,
689
+ "step": 88
690
+ },
691
+ {
692
+ "epoch": 0.8343085697355751,
693
+ "eval_loss": 0.039096854627132416,
694
+ "eval_runtime": 33.3174,
695
+ "eval_samples_per_second": 21.34,
696
+ "eval_steps_per_second": 21.34,
697
+ "step": 88
698
+ },
699
+ {
700
+ "epoch": 0.8437893489371158,
701
+ "grad_norm": 0.056640625,
702
+ "learning_rate": 0.00016867964135846043,
703
+ "loss": 0.0331,
704
+ "step": 89
705
+ },
706
+ {
707
+ "epoch": 0.8532701281386564,
708
+ "grad_norm": 0.07177734375,
709
+ "learning_rate": 0.00016792733388972932,
710
+ "loss": 0.0439,
711
+ "step": 90
712
+ },
713
+ {
714
+ "epoch": 0.862750907340197,
715
+ "grad_norm": 0.059326171875,
716
+ "learning_rate": 0.0001671678196487741,
717
+ "loss": 0.0422,
718
+ "step": 91
719
+ },
720
+ {
721
+ "epoch": 0.8722316865417377,
722
+ "grad_norm": 0.07470703125,
723
+ "learning_rate": 0.00016640117921650117,
724
+ "loss": 0.0463,
725
+ "step": 92
726
+ },
727
+ {
728
+ "epoch": 0.8817124657432783,
729
+ "grad_norm": 0.057373046875,
730
+ "learning_rate": 0.00016562749392987254,
731
+ "loss": 0.037,
732
+ "step": 93
733
+ },
734
+ {
735
+ "epoch": 0.8911932449448189,
736
+ "grad_norm": 0.057373046875,
737
+ "learning_rate": 0.0001648468458732762,
738
+ "loss": 0.0361,
739
+ "step": 94
740
+ },
741
+ {
742
+ "epoch": 0.9006740241463596,
743
+ "grad_norm": 0.061279296875,
744
+ "learning_rate": 0.00016405931786981755,
745
+ "loss": 0.039,
746
+ "step": 95
747
+ },
748
+ {
749
+ "epoch": 0.9101548033479001,
750
+ "grad_norm": 0.0634765625,
751
+ "learning_rate": 0.00016326499347253207,
752
+ "loss": 0.0393,
753
+ "step": 96
754
+ },
755
+ {
756
+ "epoch": 0.9196355825494408,
757
+ "grad_norm": 0.05712890625,
758
+ "learning_rate": 0.00016246395695552085,
759
+ "loss": 0.0376,
760
+ "step": 97
761
+ },
762
+ {
763
+ "epoch": 0.9291163617509814,
764
+ "grad_norm": 0.054931640625,
765
+ "learning_rate": 0.00016165629330500952,
766
+ "loss": 0.035,
767
+ "step": 98
768
+ },
769
+ {
770
+ "epoch": 0.938597140952522,
771
+ "grad_norm": 0.060791015625,
772
+ "learning_rate": 0.0001608420882103315,
773
+ "loss": 0.0349,
774
+ "step": 99
775
+ },
776
+ {
777
+ "epoch": 0.938597140952522,
778
+ "eval_loss": 0.03842462599277496,
779
+ "eval_runtime": 33.3873,
780
+ "eval_samples_per_second": 21.296,
781
+ "eval_steps_per_second": 21.296,
782
+ "step": 99
783
+ },
784
+ {
785
+ "epoch": 0.9480779201540627,
786
+ "grad_norm": 0.0703125,
787
+ "learning_rate": 0.00016002142805483685,
788
+ "loss": 0.0379,
789
+ "step": 100
790
+ },
791
+ {
792
+ "epoch": 0.9575586993556033,
793
+ "grad_norm": 0.06884765625,
794
+ "learning_rate": 0.0001591943999067273,
795
+ "loss": 0.0382,
796
+ "step": 101
797
+ },
798
+ {
799
+ "epoch": 0.9670394785571439,
800
+ "grad_norm": 0.060791015625,
801
+ "learning_rate": 0.00015836109150981886,
802
+ "loss": 0.0345,
803
+ "step": 102
804
+ },
805
+ {
806
+ "epoch": 0.9765202577586846,
807
+ "grad_norm": 0.0703125,
808
+ "learning_rate": 0.00015752159127423263,
809
+ "loss": 0.0399,
810
+ "step": 103
811
+ },
812
+ {
813
+ "epoch": 0.9860010369602251,
814
+ "grad_norm": 0.06787109375,
815
+ "learning_rate": 0.0001566759882670146,
816
+ "loss": 0.0358,
817
+ "step": 104
818
+ },
819
+ {
820
+ "epoch": 0.9954818161617658,
821
+ "grad_norm": 0.06982421875,
822
+ "learning_rate": 0.00015582437220268647,
823
+ "loss": 0.0408,
824
+ "step": 105
825
+ },
826
+ {
827
+ "epoch": 1.0049625953633063,
828
+ "grad_norm": 0.05908203125,
829
+ "learning_rate": 0.0001549668334337271,
830
+ "loss": 0.0321,
831
+ "step": 106
832
+ },
833
+ {
834
+ "epoch": 1.0144433745648471,
835
+ "grad_norm": 0.06298828125,
836
+ "learning_rate": 0.0001541034629409865,
837
+ "loss": 0.0313,
838
+ "step": 107
839
+ },
840
+ {
841
+ "epoch": 1.0239241537663877,
842
+ "grad_norm": 0.061279296875,
843
+ "learning_rate": 0.00015323435232403337,
844
+ "loss": 0.0318,
845
+ "step": 108
846
+ },
847
+ {
848
+ "epoch": 1.0334049329679282,
849
+ "grad_norm": 0.049072265625,
850
+ "learning_rate": 0.00015235959379143678,
851
+ "loss": 0.0274,
852
+ "step": 109
853
+ },
854
+ {
855
+ "epoch": 1.042885712169469,
856
+ "grad_norm": 0.056396484375,
857
+ "learning_rate": 0.0001514792801509831,
858
+ "loss": 0.0304,
859
+ "step": 110
860
+ },
861
+ {
862
+ "epoch": 1.042885712169469,
863
+ "eval_loss": 0.03729868680238724,
864
+ "eval_runtime": 33.6565,
865
+ "eval_samples_per_second": 21.125,
866
+ "eval_steps_per_second": 21.125,
867
+ "step": 110
868
+ },
869
+ {
870
+ "epoch": 1.0523664913710096,
871
+ "grad_norm": 0.054931640625,
872
+ "learning_rate": 0.00015059350479982965,
873
+ "loss": 0.0271,
874
+ "step": 111
875
+ },
876
+ {
877
+ "epoch": 1.0618472705725501,
878
+ "grad_norm": 0.05712890625,
879
+ "learning_rate": 0.0001497023617145958,
880
+ "loss": 0.0285,
881
+ "step": 112
882
+ },
883
+ {
884
+ "epoch": 1.071328049774091,
885
+ "grad_norm": 0.054931640625,
886
+ "learning_rate": 0.0001488059454413923,
887
+ "loss": 0.0272,
888
+ "step": 113
889
+ },
890
+ {
891
+ "epoch": 1.0808088289756315,
892
+ "grad_norm": 0.06201171875,
893
+ "learning_rate": 0.00014790435108579048,
894
+ "loss": 0.0294,
895
+ "step": 114
896
+ },
897
+ {
898
+ "epoch": 1.090289608177172,
899
+ "grad_norm": 0.05908203125,
900
+ "learning_rate": 0.000146997674302732,
901
+ "loss": 0.0273,
902
+ "step": 115
903
+ },
904
+ {
905
+ "epoch": 1.0997703873787126,
906
+ "grad_norm": 0.059326171875,
907
+ "learning_rate": 0.00014608601128638027,
908
+ "loss": 0.0279,
909
+ "step": 116
910
+ },
911
+ {
912
+ "epoch": 1.1092511665802534,
913
+ "grad_norm": 0.06298828125,
914
+ "learning_rate": 0.00014516945875991472,
915
+ "loss": 0.0327,
916
+ "step": 117
917
+ },
918
+ {
919
+ "epoch": 1.118731945781794,
920
+ "grad_norm": 0.060302734375,
921
+ "learning_rate": 0.00014424811396526892,
922
+ "loss": 0.0256,
923
+ "step": 118
924
+ },
925
+ {
926
+ "epoch": 1.1282127249833347,
927
+ "grad_norm": 0.07177734375,
928
+ "learning_rate": 0.00014332207465281364,
929
+ "loss": 0.0312,
930
+ "step": 119
931
+ },
932
+ {
933
+ "epoch": 1.1376935041848752,
934
+ "grad_norm": 0.072265625,
935
+ "learning_rate": 0.0001423914390709861,
936
+ "loss": 0.0314,
937
+ "step": 120
938
+ },
939
+ {
940
+ "epoch": 1.1471742833864158,
941
+ "grad_norm": 0.06298828125,
942
+ "learning_rate": 0.00014145630595586607,
943
+ "loss": 0.0305,
944
+ "step": 121
945
+ },
946
+ {
947
+ "epoch": 1.1471742833864158,
948
+ "eval_loss": 0.03735668212175369,
949
+ "eval_runtime": 33.3809,
950
+ "eval_samples_per_second": 21.3,
951
+ "eval_steps_per_second": 21.3,
952
+ "step": 121
953
+ },
954
+ {
955
+ "epoch": 1.1566550625879564,
956
+ "grad_norm": 0.057373046875,
957
+ "learning_rate": 0.00014051677452070065,
958
+ "loss": 0.027,
959
+ "step": 122
960
+ },
961
+ {
962
+ "epoch": 1.1661358417894971,
963
+ "grad_norm": 0.058837890625,
964
+ "learning_rate": 0.00013957294444537808,
965
+ "loss": 0.027,
966
+ "step": 123
967
+ },
968
+ {
969
+ "epoch": 1.1756166209910377,
970
+ "grad_norm": 0.06494140625,
971
+ "learning_rate": 0.0001386249158658522,
972
+ "loss": 0.0362,
973
+ "step": 124
974
+ },
975
+ {
976
+ "epoch": 1.1850974001925783,
977
+ "grad_norm": 0.060791015625,
978
+ "learning_rate": 0.00013767278936351854,
979
+ "loss": 0.0301,
980
+ "step": 125
981
+ },
982
+ {
983
+ "epoch": 1.194578179394119,
984
+ "grad_norm": 0.06201171875,
985
+ "learning_rate": 0.00013671666595454295,
986
+ "loss": 0.0241,
987
+ "step": 126
988
+ },
989
+ {
990
+ "epoch": 1.2040589585956596,
991
+ "grad_norm": 0.05859375,
992
+ "learning_rate": 0.00013575664707914448,
993
+ "loss": 0.0259,
994
+ "step": 127
995
+ },
996
+ {
997
+ "epoch": 1.2135397377972001,
998
+ "grad_norm": 0.064453125,
999
+ "learning_rate": 0.0001347928345908329,
1000
+ "loss": 0.0325,
1001
+ "step": 128
1002
+ },
1003
+ {
1004
+ "epoch": 1.223020516998741,
1005
+ "grad_norm": 0.05908203125,
1006
+ "learning_rate": 0.00013382533074560255,
1007
+ "loss": 0.0284,
1008
+ "step": 129
1009
+ },
1010
+ {
1011
+ "epoch": 1.2325012962002815,
1012
+ "grad_norm": 0.05908203125,
1013
+ "learning_rate": 0.0001328542381910835,
1014
+ "loss": 0.0267,
1015
+ "step": 130
1016
+ },
1017
+ {
1018
+ "epoch": 1.241982075401822,
1019
+ "grad_norm": 0.0546875,
1020
+ "learning_rate": 0.00013187965995565098,
1021
+ "loss": 0.0274,
1022
+ "step": 131
1023
+ },
1024
+ {
1025
+ "epoch": 1.2514628546033628,
1026
+ "grad_norm": 0.05078125,
1027
+ "learning_rate": 0.00013090169943749476,
1028
+ "loss": 0.0251,
1029
+ "step": 132
1030
+ },
1031
+ {
1032
+ "epoch": 1.2514628546033628,
1033
+ "eval_loss": 0.036460939794778824,
1034
+ "eval_runtime": 33.4627,
1035
+ "eval_samples_per_second": 21.248,
1036
+ "eval_steps_per_second": 21.248,
1037
+ "step": 132
1038
+ },
1039
+ {
1040
+ "epoch": 1.2609436338049034,
1041
+ "grad_norm": 0.0546875,
1042
+ "learning_rate": 0.00012992046039364893,
1043
+ "loss": 0.0273,
1044
+ "step": 133
1045
+ },
1046
+ {
1047
+ "epoch": 1.270424413006444,
1048
+ "grad_norm": 0.064453125,
1049
+ "learning_rate": 0.0001289360469289838,
1050
+ "loss": 0.0263,
1051
+ "step": 134
1052
+ },
1053
+ {
1054
+ "epoch": 1.2799051922079845,
1055
+ "grad_norm": 0.06298828125,
1056
+ "learning_rate": 0.00012794856348516095,
1057
+ "loss": 0.0273,
1058
+ "step": 135
1059
+ },
1060
+ {
1061
+ "epoch": 1.2893859714095253,
1062
+ "grad_norm": 0.06640625,
1063
+ "learning_rate": 0.00012695811482955227,
1064
+ "loss": 0.0277,
1065
+ "step": 136
1066
+ },
1067
+ {
1068
+ "epoch": 1.2988667506110658,
1069
+ "grad_norm": 0.057861328125,
1070
+ "learning_rate": 0.00012596480604412484,
1071
+ "loss": 0.0297,
1072
+ "step": 137
1073
+ },
1074
+ {
1075
+ "epoch": 1.3083475298126066,
1076
+ "grad_norm": 0.057861328125,
1077
+ "learning_rate": 0.000124968742514292,
1078
+ "loss": 0.023,
1079
+ "step": 138
1080
+ },
1081
+ {
1082
+ "epoch": 1.3178283090141472,
1083
+ "grad_norm": 0.06005859375,
1084
+ "learning_rate": 0.00012397002991773275,
1085
+ "loss": 0.0274,
1086
+ "step": 139
1087
+ },
1088
+ {
1089
+ "epoch": 1.3273090882156877,
1090
+ "grad_norm": 0.06591796875,
1091
+ "learning_rate": 0.0001229687742131796,
1092
+ "loss": 0.0262,
1093
+ "step": 140
1094
+ },
1095
+ {
1096
+ "epoch": 1.3367898674172283,
1097
+ "grad_norm": 0.06396484375,
1098
+ "learning_rate": 0.00012196508162917677,
1099
+ "loss": 0.0312,
1100
+ "step": 141
1101
+ },
1102
+ {
1103
+ "epoch": 1.346270646618769,
1104
+ "grad_norm": 0.0693359375,
1105
+ "learning_rate": 0.00012095905865281025,
1106
+ "loss": 0.0291,
1107
+ "step": 142
1108
+ },
1109
+ {
1110
+ "epoch": 1.3557514258203096,
1111
+ "grad_norm": 0.0625,
1112
+ "learning_rate": 0.00011995081201840956,
1113
+ "loss": 0.0288,
1114
+ "step": 143
1115
+ },
1116
+ {
1117
+ "epoch": 1.3557514258203096,
1118
+ "eval_loss": 0.036965224891901016,
1119
+ "eval_runtime": 33.3733,
1120
+ "eval_samples_per_second": 21.304,
1121
+ "eval_steps_per_second": 21.304,
1122
+ "step": 143
1123
+ },
1124
+ {
1125
+ "epoch": 1.3652322050218502,
1126
+ "grad_norm": 0.058837890625,
1127
+ "learning_rate": 0.00011894044869622403,
1128
+ "loss": 0.0279,
1129
+ "step": 144
1130
+ },
1131
+ {
1132
+ "epoch": 1.374712984223391,
1133
+ "grad_norm": 0.056640625,
1134
+ "learning_rate": 0.00011792807588107357,
1135
+ "loss": 0.0247,
1136
+ "step": 145
1137
+ },
1138
+ {
1139
+ "epoch": 1.3841937634249315,
1140
+ "grad_norm": 0.06689453125,
1141
+ "learning_rate": 0.00011691380098097597,
1142
+ "loss": 0.0315,
1143
+ "step": 146
1144
+ },
1145
+ {
1146
+ "epoch": 1.393674542626472,
1147
+ "grad_norm": 0.0537109375,
1148
+ "learning_rate": 0.0001158977316057513,
1149
+ "loss": 0.0244,
1150
+ "step": 147
1151
+ },
1152
+ {
1153
+ "epoch": 1.4031553218280126,
1154
+ "grad_norm": 0.054443359375,
1155
+ "learning_rate": 0.00011487997555560503,
1156
+ "loss": 0.0292,
1157
+ "step": 148
1158
+ },
1159
+ {
1160
+ "epoch": 1.4126361010295534,
1161
+ "grad_norm": 0.064453125,
1162
+ "learning_rate": 0.00011386064080969094,
1163
+ "loss": 0.0293,
1164
+ "step": 149
1165
+ },
1166
+ {
1167
+ "epoch": 1.422116880231094,
1168
+ "grad_norm": 0.059814453125,
1169
+ "learning_rate": 0.00011283983551465511,
1170
+ "loss": 0.0254,
1171
+ "step": 150
1172
+ },
1173
+ {
1174
+ "epoch": 1.4315976594326347,
1175
+ "grad_norm": 0.06640625,
1176
+ "learning_rate": 0.0001118176679731619,
1177
+ "loss": 0.0284,
1178
+ "step": 151
1179
+ },
1180
+ {
1181
+ "epoch": 1.4410784386341753,
1182
+ "grad_norm": 0.060791015625,
1183
+ "learning_rate": 0.00011079424663240372,
1184
+ "loss": 0.0306,
1185
+ "step": 152
1186
+ },
1187
+ {
1188
+ "epoch": 1.4505592178357158,
1189
+ "grad_norm": 0.058349609375,
1190
+ "learning_rate": 0.00010976968007259519,
1191
+ "loss": 0.028,
1192
+ "step": 153
1193
+ },
1194
+ {
1195
+ "epoch": 1.4600399970372564,
1196
+ "grad_norm": 0.0537109375,
1197
+ "learning_rate": 0.00010874407699545328,
1198
+ "loss": 0.0251,
1199
+ "step": 154
1200
+ },
1201
+ {
1202
+ "epoch": 1.4600399970372564,
1203
+ "eval_loss": 0.03655948117375374,
1204
+ "eval_runtime": 33.4084,
1205
+ "eval_samples_per_second": 21.282,
1206
+ "eval_steps_per_second": 21.282,
1207
+ "step": 154
1208
+ },
1209
+ {
1210
+ "epoch": 1.4695207762387972,
1211
+ "grad_norm": 0.06298828125,
1212
+ "learning_rate": 0.00010771754621266466,
1213
+ "loss": 0.031,
1214
+ "step": 155
1215
+ },
1216
+ {
1217
+ "epoch": 1.4790015554403377,
1218
+ "grad_norm": 0.05419921875,
1219
+ "learning_rate": 0.00010669019663434117,
1220
+ "loss": 0.0262,
1221
+ "step": 156
1222
+ },
1223
+ {
1224
+ "epoch": 1.4884823346418785,
1225
+ "grad_norm": 0.06298828125,
1226
+ "learning_rate": 0.00010566213725746506,
1227
+ "loss": 0.0317,
1228
+ "step": 157
1229
+ },
1230
+ {
1231
+ "epoch": 1.497963113843419,
1232
+ "grad_norm": 0.052734375,
1233
+ "learning_rate": 0.00010463347715432488,
1234
+ "loss": 0.0255,
1235
+ "step": 158
1236
+ },
1237
+ {
1238
+ "epoch": 1.5074438930449596,
1239
+ "grad_norm": 0.058837890625,
1240
+ "learning_rate": 0.00010360432546094341,
1241
+ "loss": 0.028,
1242
+ "step": 159
1243
+ },
1244
+ {
1245
+ "epoch": 1.5169246722465002,
1246
+ "grad_norm": 0.059814453125,
1247
+ "learning_rate": 0.00010257479136549889,
1248
+ "loss": 0.029,
1249
+ "step": 160
1250
+ },
1251
+ {
1252
+ "epoch": 1.5264054514480407,
1253
+ "grad_norm": 0.061767578125,
1254
+ "learning_rate": 0.00010154498409674051,
1255
+ "loss": 0.0299,
1256
+ "step": 161
1257
+ },
1258
+ {
1259
+ "epoch": 1.5358862306495815,
1260
+ "grad_norm": 0.06640625,
1261
+ "learning_rate": 0.00010051501291240008,
1262
+ "loss": 0.0329,
1263
+ "step": 162
1264
+ },
1265
+ {
1266
+ "epoch": 1.5453670098511223,
1267
+ "grad_norm": 0.06005859375,
1268
+ "learning_rate": 9.948498708759993e-05,
1269
+ "loss": 0.0275,
1270
+ "step": 163
1271
+ },
1272
+ {
1273
+ "epoch": 1.5548477890526629,
1274
+ "grad_norm": 0.059814453125,
1275
+ "learning_rate": 9.845501590325948e-05,
1276
+ "loss": 0.0263,
1277
+ "step": 164
1278
+ },
1279
+ {
1280
+ "epoch": 1.5643285682542034,
1281
+ "grad_norm": 0.05810546875,
1282
+ "learning_rate": 9.742520863450115e-05,
1283
+ "loss": 0.0236,
1284
+ "step": 165
1285
+ },
1286
+ {
1287
+ "epoch": 1.5643285682542034,
1288
+ "eval_loss": 0.03534528240561485,
1289
+ "eval_runtime": 33.413,
1290
+ "eval_samples_per_second": 21.279,
1291
+ "eval_steps_per_second": 21.279,
1292
+ "step": 165
1293
+ },
1294
+ {
1295
+ "epoch": 1.573809347455744,
1296
+ "grad_norm": 0.06298828125,
1297
+ "learning_rate": 9.639567453905661e-05,
1298
+ "loss": 0.0289,
1299
+ "step": 166
1300
+ },
1301
+ {
1302
+ "epoch": 1.5832901266572845,
1303
+ "grad_norm": 0.046875,
1304
+ "learning_rate": 9.536652284567513e-05,
1305
+ "loss": 0.0222,
1306
+ "step": 167
1307
+ },
1308
+ {
1309
+ "epoch": 1.5927709058588253,
1310
+ "grad_norm": 0.061767578125,
1311
+ "learning_rate": 9.433786274253495e-05,
1312
+ "loss": 0.029,
1313
+ "step": 168
1314
+ },
1315
+ {
1316
+ "epoch": 1.6022516850603659,
1317
+ "grad_norm": 0.054443359375,
1318
+ "learning_rate": 9.330980336565887e-05,
1319
+ "loss": 0.0246,
1320
+ "step": 169
1321
+ },
1322
+ {
1323
+ "epoch": 1.6117324642619066,
1324
+ "grad_norm": 0.06396484375,
1325
+ "learning_rate": 9.228245378733537e-05,
1326
+ "loss": 0.0279,
1327
+ "step": 170
1328
+ },
1329
+ {
1330
+ "epoch": 1.6212132434634472,
1331
+ "grad_norm": 0.06884765625,
1332
+ "learning_rate": 9.125592300454676e-05,
1333
+ "loss": 0.0328,
1334
+ "step": 171
1335
+ },
1336
+ {
1337
+ "epoch": 1.6306940226649878,
1338
+ "grad_norm": 0.05517578125,
1339
+ "learning_rate": 9.023031992740488e-05,
1340
+ "loss": 0.024,
1341
+ "step": 172
1342
+ },
1343
+ {
1344
+ "epoch": 1.6401748018665283,
1345
+ "grad_norm": 0.05810546875,
1346
+ "learning_rate": 8.920575336759629e-05,
1347
+ "loss": 0.0278,
1348
+ "step": 173
1349
+ },
1350
+ {
1351
+ "epoch": 1.6496555810680689,
1352
+ "grad_norm": 0.06591796875,
1353
+ "learning_rate": 8.818233202683814e-05,
1354
+ "loss": 0.0305,
1355
+ "step": 174
1356
+ },
1357
+ {
1358
+ "epoch": 1.6591363602696096,
1359
+ "grad_norm": 0.06298828125,
1360
+ "learning_rate": 8.71601644853449e-05,
1361
+ "loss": 0.028,
1362
+ "step": 175
1363
+ },
1364
+ {
1365
+ "epoch": 1.6686171394711504,
1366
+ "grad_norm": 0.0576171875,
1367
+ "learning_rate": 8.613935919030907e-05,
1368
+ "loss": 0.0266,
1369
+ "step": 176
1370
+ },
1371
+ {
1372
+ "epoch": 1.6686171394711504,
1373
+ "eval_loss": 0.03533780202269554,
1374
+ "eval_runtime": 33.4436,
1375
+ "eval_samples_per_second": 21.26,
1376
+ "eval_steps_per_second": 21.26,
1377
+ "step": 176
1378
+ },
1379
+ {
1380
+ "epoch": 1.678097918672691,
1381
+ "grad_norm": 0.057861328125,
1382
+ "learning_rate": 8.512002444439502e-05,
1383
+ "loss": 0.0258,
1384
+ "step": 177
1385
+ },
1386
+ {
1387
+ "epoch": 1.6875786978742315,
1388
+ "grad_norm": 0.0615234375,
1389
+ "learning_rate": 8.410226839424871e-05,
1390
+ "loss": 0.0292,
1391
+ "step": 178
1392
+ },
1393
+ {
1394
+ "epoch": 1.697059477075772,
1395
+ "grad_norm": 0.05712890625,
1396
+ "learning_rate": 8.308619901902406e-05,
1397
+ "loss": 0.0227,
1398
+ "step": 179
1399
+ },
1400
+ {
1401
+ "epoch": 1.7065402562773127,
1402
+ "grad_norm": 0.057373046875,
1403
+ "learning_rate": 8.207192411892646e-05,
1404
+ "loss": 0.0245,
1405
+ "step": 180
1406
+ },
1407
+ {
1408
+ "epoch": 1.7160210354788534,
1409
+ "grad_norm": 0.056884765625,
1410
+ "learning_rate": 8.1059551303776e-05,
1411
+ "loss": 0.0243,
1412
+ "step": 181
1413
+ },
1414
+ {
1415
+ "epoch": 1.7255018146803942,
1416
+ "grad_norm": 0.054443359375,
1417
+ "learning_rate": 8.004918798159045e-05,
1418
+ "loss": 0.0241,
1419
+ "step": 182
1420
+ },
1421
+ {
1422
+ "epoch": 1.7349825938819348,
1423
+ "grad_norm": 0.05615234375,
1424
+ "learning_rate": 7.904094134718976e-05,
1425
+ "loss": 0.0269,
1426
+ "step": 183
1427
+ },
1428
+ {
1429
+ "epoch": 1.7444633730834753,
1430
+ "grad_norm": 0.0712890625,
1431
+ "learning_rate": 7.803491837082324e-05,
1432
+ "loss": 0.0343,
1433
+ "step": 184
1434
+ },
1435
+ {
1436
+ "epoch": 1.7539441522850159,
1437
+ "grad_norm": 0.0712890625,
1438
+ "learning_rate": 7.703122578682046e-05,
1439
+ "loss": 0.0293,
1440
+ "step": 185
1441
+ },
1442
+ {
1443
+ "epoch": 1.7634249314865564,
1444
+ "grad_norm": 0.05419921875,
1445
+ "learning_rate": 7.602997008226726e-05,
1446
+ "loss": 0.025,
1447
+ "step": 186
1448
+ },
1449
+ {
1450
+ "epoch": 1.7729057106880972,
1451
+ "grad_norm": 0.05859375,
1452
+ "learning_rate": 7.5031257485708e-05,
1453
+ "loss": 0.0281,
1454
+ "step": 187
1455
+ },
1456
+ {
1457
+ "epoch": 1.7729057106880972,
1458
+ "eval_loss": 0.034808043390512466,
1459
+ "eval_runtime": 33.3499,
1460
+ "eval_samples_per_second": 21.319,
1461
+ "eval_steps_per_second": 21.319,
1462
+ "step": 187
1463
+ },
1464
+ {
1465
+ "epoch": 1.7823864898896378,
1466
+ "grad_norm": 0.054443359375,
1467
+ "learning_rate": 7.403519395587521e-05,
1468
+ "loss": 0.0255,
1469
+ "step": 188
1470
+ },
1471
+ {
1472
+ "epoch": 1.7918672690911785,
1473
+ "grad_norm": 0.05029296875,
1474
+ "learning_rate": 7.304188517044774e-05,
1475
+ "loss": 0.0248,
1476
+ "step": 189
1477
+ },
1478
+ {
1479
+ "epoch": 1.801348048292719,
1480
+ "grad_norm": 0.058837890625,
1481
+ "learning_rate": 7.205143651483906e-05,
1482
+ "loss": 0.0247,
1483
+ "step": 190
1484
+ },
1485
+ {
1486
+ "epoch": 1.8108288274942597,
1487
+ "grad_norm": 0.061767578125,
1488
+ "learning_rate": 7.106395307101621e-05,
1489
+ "loss": 0.0253,
1490
+ "step": 191
1491
+ },
1492
+ {
1493
+ "epoch": 1.8203096066958002,
1494
+ "grad_norm": 0.0595703125,
1495
+ "learning_rate": 7.007953960635109e-05,
1496
+ "loss": 0.0261,
1497
+ "step": 192
1498
+ },
1499
+ {
1500
+ "epoch": 1.8297903858973408,
1501
+ "grad_norm": 0.0673828125,
1502
+ "learning_rate": 6.909830056250527e-05,
1503
+ "loss": 0.0285,
1504
+ "step": 193
1505
+ },
1506
+ {
1507
+ "epoch": 1.8392711650988816,
1508
+ "grad_norm": 0.056884765625,
1509
+ "learning_rate": 6.812034004434903e-05,
1510
+ "loss": 0.0254,
1511
+ "step": 194
1512
+ },
1513
+ {
1514
+ "epoch": 1.8487519443004223,
1515
+ "grad_norm": 0.056884765625,
1516
+ "learning_rate": 6.714576180891654e-05,
1517
+ "loss": 0.0241,
1518
+ "step": 195
1519
+ },
1520
+ {
1521
+ "epoch": 1.858232723501963,
1522
+ "grad_norm": 0.056396484375,
1523
+ "learning_rate": 6.617466925439746e-05,
1524
+ "loss": 0.0239,
1525
+ "step": 196
1526
+ },
1527
+ {
1528
+ "epoch": 1.8677135027035034,
1529
+ "grad_norm": 0.05419921875,
1530
+ "learning_rate": 6.520716540916709e-05,
1531
+ "loss": 0.0252,
1532
+ "step": 197
1533
+ },
1534
+ {
1535
+ "epoch": 1.877194281905044,
1536
+ "grad_norm": 0.056884765625,
1537
+ "learning_rate": 6.424335292085553e-05,
1538
+ "loss": 0.0246,
1539
+ "step": 198
1540
+ },
1541
+ {
1542
+ "epoch": 1.877194281905044,
1543
+ "eval_loss": 0.034031398594379425,
1544
+ "eval_runtime": 33.489,
1545
+ "eval_samples_per_second": 21.231,
1546
+ "eval_steps_per_second": 21.231,
1547
+ "step": 198
1548
+ },
1549
+ {
1550
+ "epoch": 1.8866750611065846,
1551
+ "grad_norm": 0.057373046875,
1552
+ "learning_rate": 6.32833340454571e-05,
1553
+ "loss": 0.0261,
1554
+ "step": 199
1555
+ },
1556
+ {
1557
+ "epoch": 1.8961558403081253,
1558
+ "grad_norm": 0.0576171875,
1559
+ "learning_rate": 6.232721063648148e-05,
1560
+ "loss": 0.0261,
1561
+ "step": 200
1562
+ },
1563
+ {
1564
+ "epoch": 1.905636619509666,
1565
+ "grad_norm": 0.059814453125,
1566
+ "learning_rate": 6.137508413414784e-05,
1567
+ "loss": 0.0276,
1568
+ "step": 201
1569
+ },
1570
+ {
1571
+ "epoch": 1.9151173987112067,
1572
+ "grad_norm": 0.056640625,
1573
+ "learning_rate": 6.0427055554621913e-05,
1574
+ "loss": 0.0262,
1575
+ "step": 202
1576
+ },
1577
+ {
1578
+ "epoch": 1.9245981779127472,
1579
+ "grad_norm": 0.0537109375,
1580
+ "learning_rate": 5.948322547929939e-05,
1581
+ "loss": 0.023,
1582
+ "step": 203
1583
+ },
1584
+ {
1585
+ "epoch": 1.9340789571142878,
1586
+ "grad_norm": 0.055908203125,
1587
+ "learning_rate": 5.854369404413398e-05,
1588
+ "loss": 0.0199,
1589
+ "step": 204
1590
+ },
1591
+ {
1592
+ "epoch": 1.9435597363158283,
1593
+ "grad_norm": 0.058837890625,
1594
+ "learning_rate": 5.7608560929013946e-05,
1595
+ "loss": 0.0253,
1596
+ "step": 205
1597
+ },
1598
+ {
1599
+ "epoch": 1.9530405155173691,
1600
+ "grad_norm": 0.0654296875,
1601
+ "learning_rate": 5.667792534718639e-05,
1602
+ "loss": 0.0295,
1603
+ "step": 206
1604
+ },
1605
+ {
1606
+ "epoch": 1.9625212947189097,
1607
+ "grad_norm": 0.05810546875,
1608
+ "learning_rate": 5.5751886034731115e-05,
1609
+ "loss": 0.0257,
1610
+ "step": 207
1611
+ },
1612
+ {
1613
+ "epoch": 1.9720020739204505,
1614
+ "grad_norm": 0.05126953125,
1615
+ "learning_rate": 5.483054124008528e-05,
1616
+ "loss": 0.023,
1617
+ "step": 208
1618
+ },
1619
+ {
1620
+ "epoch": 1.981482853121991,
1621
+ "grad_norm": 0.056640625,
1622
+ "learning_rate": 5.391398871361972e-05,
1623
+ "loss": 0.0249,
1624
+ "step": 209
1625
+ },
1626
+ {
1627
+ "epoch": 1.981482853121991,
1628
+ "eval_loss": 0.03389376401901245,
1629
+ "eval_runtime": 33.4678,
1630
+ "eval_samples_per_second": 21.244,
1631
+ "eval_steps_per_second": 21.244,
1632
+ "step": 209
1633
+ },
1634
+ {
1635
+ "epoch": 1.9909636323235316,
1636
+ "grad_norm": 0.06298828125,
1637
+ "learning_rate": 5.300232569726804e-05,
1638
+ "loss": 0.0261,
1639
+ "step": 210
1640
+ },
1641
+ {
1642
+ "epoch": 2.000444411525072,
1643
+ "grad_norm": 0.0654296875,
1644
+ "learning_rate": 5.2095648914209525e-05,
1645
+ "loss": 0.0322,
1646
+ "step": 211
1647
+ },
1648
+ {
1649
+ "epoch": 2.0099251907266127,
1650
+ "grad_norm": 0.046630859375,
1651
+ "learning_rate": 5.119405455860772e-05,
1652
+ "loss": 0.0193,
1653
+ "step": 212
1654
+ }
1655
+ ],
1656
+ "logging_steps": 1,
1657
+ "max_steps": 315,
1658
+ "num_input_tokens_seen": 0,
1659
+ "num_train_epochs": 3,
1660
+ "save_steps": 53,
1661
+ "stateful_callbacks": {
1662
+ "TrainerControl": {
1663
+ "args": {
1664
+ "should_epoch_stop": false,
1665
+ "should_evaluate": false,
1666
+ "should_log": false,
1667
+ "should_save": true,
1668
+ "should_training_stop": false
1669
+ },
1670
+ "attributes": {}
1671
+ }
1672
+ },
1673
+ "total_flos": 3.813883143271219e+17,
1674
+ "train_batch_size": 1,
1675
+ "trial_name": null,
1676
+ "trial_params": null
1677
+ }
checkpoint-212/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:040d0a78d4967c05b5ae4923ea1ba23193e5db226aec4c55dfa817cfdd1347a0
3
+ size 6008
checkpoint-212/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-265/README.md ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: Qwen/Qwen2-7B-Instruct
3
+ library_name: peft
4
+ ---
5
+
6
+ # Model Card for Model ID
7
+
8
+ <!-- Provide a quick summary of what the model is/does. -->
9
+
10
+
11
+
12
+ ## Model Details
13
+
14
+ ### Model Description
15
+
16
+ <!-- Provide a longer summary of what this model is. -->
17
+
18
+
19
+
20
+ - **Developed by:** [More Information Needed]
21
+ - **Funded by [optional]:** [More Information Needed]
22
+ - **Shared by [optional]:** [More Information Needed]
23
+ - **Model type:** [More Information Needed]
24
+ - **Language(s) (NLP):** [More Information Needed]
25
+ - **License:** [More Information Needed]
26
+ - **Finetuned from model [optional]:** [More Information Needed]
27
+
28
+ ### Model Sources [optional]
29
+
30
+ <!-- Provide the basic links for the model. -->
31
+
32
+ - **Repository:** [More Information Needed]
33
+ - **Paper [optional]:** [More Information Needed]
34
+ - **Demo [optional]:** [More Information Needed]
35
+
36
+ ## Uses
37
+
38
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
39
+
40
+ ### Direct Use
41
+
42
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
43
+
44
+ [More Information Needed]
45
+
46
+ ### Downstream Use [optional]
47
+
48
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
49
+
50
+ [More Information Needed]
51
+
52
+ ### Out-of-Scope Use
53
+
54
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
55
+
56
+ [More Information Needed]
57
+
58
+ ## Bias, Risks, and Limitations
59
+
60
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
61
+
62
+ [More Information Needed]
63
+
64
+ ### Recommendations
65
+
66
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
67
+
68
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
69
+
70
+ ## How to Get Started with the Model
71
+
72
+ Use the code below to get started with the model.
73
+
74
+ [More Information Needed]
75
+
76
+ ## Training Details
77
+
78
+ ### Training Data
79
+
80
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
81
+
82
+ [More Information Needed]
83
+
84
+ ### Training Procedure
85
+
86
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
87
+
88
+ #### Preprocessing [optional]
89
+
90
+ [More Information Needed]
91
+
92
+
93
+ #### Training Hyperparameters
94
+
95
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
96
+
97
+ #### Speeds, Sizes, Times [optional]
98
+
99
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
100
+
101
+ [More Information Needed]
102
+
103
+ ## Evaluation
104
+
105
+ <!-- This section describes the evaluation protocols and provides the results. -->
106
+
107
+ ### Testing Data, Factors & Metrics
108
+
109
+ #### Testing Data
110
+
111
+ <!-- This should link to a Dataset Card if possible. -->
112
+
113
+ [More Information Needed]
114
+
115
+ #### Factors
116
+
117
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
118
+
119
+ [More Information Needed]
120
+
121
+ #### Metrics
122
+
123
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
124
+
125
+ [More Information Needed]
126
+
127
+ ### Results
128
+
129
+ [More Information Needed]
130
+
131
+ #### Summary
132
+
133
+
134
+
135
+ ## Model Examination [optional]
136
+
137
+ <!-- Relevant interpretability work for the model goes here -->
138
+
139
+ [More Information Needed]
140
+
141
+ ## Environmental Impact
142
+
143
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
144
+
145
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
146
+
147
+ - **Hardware Type:** [More Information Needed]
148
+ - **Hours used:** [More Information Needed]
149
+ - **Cloud Provider:** [More Information Needed]
150
+ - **Compute Region:** [More Information Needed]
151
+ - **Carbon Emitted:** [More Information Needed]
152
+
153
+ ## Technical Specifications [optional]
154
+
155
+ ### Model Architecture and Objective
156
+
157
+ [More Information Needed]
158
+
159
+ ### Compute Infrastructure
160
+
161
+ [More Information Needed]
162
+
163
+ #### Hardware
164
+
165
+ [More Information Needed]
166
+
167
+ #### Software
168
+
169
+ [More Information Needed]
170
+
171
+ ## Citation [optional]
172
+
173
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
174
+
175
+ **BibTeX:**
176
+
177
+ [More Information Needed]
178
+
179
+ **APA:**
180
+
181
+ [More Information Needed]
182
+
183
+ ## Glossary [optional]
184
+
185
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
186
+
187
+ [More Information Needed]
188
+
189
+ ## More Information [optional]
190
+
191
+ [More Information Needed]
192
+
193
+ ## Model Card Authors [optional]
194
+
195
+ [More Information Needed]
196
+
197
+ ## Model Card Contact
198
+
199
+ [More Information Needed]
200
+ ### Framework versions
201
+
202
+ - PEFT 0.11.1
checkpoint-265/adapter_config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "Qwen/Qwen2-7B-Instruct",
5
+ "bias": "none",
6
+ "fan_in_fan_out": null,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layer_replication": null,
10
+ "layers_pattern": null,
11
+ "layers_to_transform": null,
12
+ "loftq_config": {},
13
+ "lora_alpha": 128,
14
+ "lora_dropout": 0.05,
15
+ "megatron_config": null,
16
+ "megatron_core": "megatron.core",
17
+ "modules_to_save": null,
18
+ "peft_type": "LORA",
19
+ "r": 64,
20
+ "rank_pattern": {},
21
+ "revision": null,
22
+ "target_modules": [
23
+ "k_proj",
24
+ "up_proj",
25
+ "o_proj",
26
+ "down_proj",
27
+ "gate_proj",
28
+ "v_proj",
29
+ "q_proj"
30
+ ],
31
+ "task_type": "CAUSAL_LM",
32
+ "use_dora": false,
33
+ "use_rslora": false
34
+ }
checkpoint-265/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0ba7800a5b2065f8d7099205e1f3d35666270512a83fcb76caad52c9c31edff2
3
+ size 323014560