svjack commited on
Commit
69a9d9d
1 Parent(s): 390991c

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. README.md +59 -0
  2. adapter_config.json +29 -0
  3. adapter_model.safetensors +3 -0
  4. added_tokens.json +5 -0
  5. all_results.json +8 -0
  6. checkpoint-100/README.md +202 -0
  7. checkpoint-100/adapter_config.json +29 -0
  8. checkpoint-100/adapter_model.safetensors +3 -0
  9. checkpoint-100/added_tokens.json +5 -0
  10. checkpoint-100/merges.txt +0 -0
  11. checkpoint-100/optimizer.pt +3 -0
  12. checkpoint-100/rng_state.pth +3 -0
  13. checkpoint-100/scheduler.pt +3 -0
  14. checkpoint-100/special_tokens_map.json +20 -0
  15. checkpoint-100/tokenizer.json +0 -0
  16. checkpoint-100/tokenizer_config.json +44 -0
  17. checkpoint-100/trainer_state.json +173 -0
  18. checkpoint-100/training_args.bin +3 -0
  19. checkpoint-100/vocab.json +0 -0
  20. checkpoint-1000/README.md +202 -0
  21. checkpoint-1000/adapter_config.json +29 -0
  22. checkpoint-1000/adapter_model.safetensors +3 -0
  23. checkpoint-1000/added_tokens.json +5 -0
  24. checkpoint-1000/merges.txt +0 -0
  25. checkpoint-1000/optimizer.pt +3 -0
  26. checkpoint-1000/rng_state.pth +3 -0
  27. checkpoint-1000/scheduler.pt +3 -0
  28. checkpoint-1000/special_tokens_map.json +20 -0
  29. checkpoint-1000/tokenizer.json +0 -0
  30. checkpoint-1000/tokenizer_config.json +44 -0
  31. checkpoint-1000/trainer_state.json +1433 -0
  32. checkpoint-1000/training_args.bin +3 -0
  33. checkpoint-1000/vocab.json +0 -0
  34. checkpoint-1100/README.md +202 -0
  35. checkpoint-1100/adapter_config.json +29 -0
  36. checkpoint-1100/adapter_model.safetensors +3 -0
  37. checkpoint-1100/added_tokens.json +5 -0
  38. checkpoint-1100/merges.txt +0 -0
  39. checkpoint-1100/optimizer.pt +3 -0
  40. checkpoint-1100/rng_state.pth +3 -0
  41. checkpoint-1100/scheduler.pt +3 -0
  42. checkpoint-1100/special_tokens_map.json +20 -0
  43. checkpoint-1100/tokenizer.json +0 -0
  44. checkpoint-1100/tokenizer_config.json +44 -0
  45. checkpoint-1100/trainer_state.json +1573 -0
  46. checkpoint-1100/training_args.bin +3 -0
  47. checkpoint-1100/vocab.json +0 -0
  48. checkpoint-1200/README.md +202 -0
  49. checkpoint-1200/adapter_config.json +29 -0
  50. checkpoint-1200/adapter_model.safetensors +3 -0
README.md ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: other
3
+ library_name: peft
4
+ tags:
5
+ - llama-factory
6
+ - lora
7
+ - generated_from_trainer
8
+ base_model: Qwen/Qwen1.5-7B-Chat
9
+ model-index:
10
+ - name: train_2024-05-23-01-40-51
11
+ results: []
12
+ ---
13
+
14
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
15
+ should probably proofread and complete it, then remove this comment. -->
16
+
17
+ # train_2024-05-23-01-40-51
18
+
19
+ This model is a fine-tuned version of [Qwen/Qwen1.5-7B-Chat](https://huggingface.co/Qwen/Qwen1.5-7B-Chat) on the instruction_genshin_impact_roleplay, the genshin_impact_background and the sharegpt_genshin_impact_roleplay datasets.
20
+
21
+ ## Model description
22
+
23
+ More information needed
24
+
25
+ ## Intended uses & limitations
26
+
27
+ More information needed
28
+
29
+ ## Training and evaluation data
30
+
31
+ More information needed
32
+
33
+ ## Training procedure
34
+
35
+ ### Training hyperparameters
36
+
37
+ The following hyperparameters were used during training:
38
+ - learning_rate: 5e-05
39
+ - train_batch_size: 4
40
+ - eval_batch_size: 8
41
+ - seed: 42
42
+ - gradient_accumulation_steps: 8
43
+ - total_train_batch_size: 32
44
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
45
+ - lr_scheduler_type: cosine
46
+ - num_epochs: 5.0
47
+ - mixed_precision_training: Native AMP
48
+
49
+ ### Training results
50
+
51
+
52
+
53
+ ### Framework versions
54
+
55
+ - PEFT 0.11.1
56
+ - Transformers 4.41.0
57
+ - Pytorch 2.3.0+cu121
58
+ - Datasets 2.19.1
59
+ - Tokenizers 0.19.1
adapter_config.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "Qwen/Qwen1.5-7B-Chat",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layer_replication": null,
10
+ "layers_pattern": null,
11
+ "layers_to_transform": null,
12
+ "loftq_config": {},
13
+ "lora_alpha": 32,
14
+ "lora_dropout": 0.0,
15
+ "megatron_config": null,
16
+ "megatron_core": "megatron.core",
17
+ "modules_to_save": null,
18
+ "peft_type": "LORA",
19
+ "r": 16,
20
+ "rank_pattern": {},
21
+ "revision": null,
22
+ "target_modules": [
23
+ "q_proj",
24
+ "v_proj"
25
+ ],
26
+ "task_type": "CAUSAL_LM",
27
+ "use_dora": false,
28
+ "use_rslora": false
29
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:73f45227ed90cce5f698ccbf5105931974935bbdbfb1f4ced8f047888909f72b
3
+ size 33571624
added_tokens.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "<|endoftext|>": 151643,
3
+ "<|im_end|>": 151645,
4
+ "<|im_start|>": 151644
5
+ }
all_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 5.0,
3
+ "total_flos": 1.3943478340182344e+18,
4
+ "train_loss": 2.0002955956245536,
5
+ "train_runtime": 31085.0395,
6
+ "train_samples_per_second": 1.379,
7
+ "train_steps_per_second": 0.043
8
+ }
checkpoint-100/README.md ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ base_model: Qwen/Qwen1.5-7B-Chat
4
+ ---
5
+
6
+ # Model Card for Model ID
7
+
8
+ <!-- Provide a quick summary of what the model is/does. -->
9
+
10
+
11
+
12
+ ## Model Details
13
+
14
+ ### Model Description
15
+
16
+ <!-- Provide a longer summary of what this model is. -->
17
+
18
+
19
+
20
+ - **Developed by:** [More Information Needed]
21
+ - **Funded by [optional]:** [More Information Needed]
22
+ - **Shared by [optional]:** [More Information Needed]
23
+ - **Model type:** [More Information Needed]
24
+ - **Language(s) (NLP):** [More Information Needed]
25
+ - **License:** [More Information Needed]
26
+ - **Finetuned from model [optional]:** [More Information Needed]
27
+
28
+ ### Model Sources [optional]
29
+
30
+ <!-- Provide the basic links for the model. -->
31
+
32
+ - **Repository:** [More Information Needed]
33
+ - **Paper [optional]:** [More Information Needed]
34
+ - **Demo [optional]:** [More Information Needed]
35
+
36
+ ## Uses
37
+
38
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
39
+
40
+ ### Direct Use
41
+
42
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
43
+
44
+ [More Information Needed]
45
+
46
+ ### Downstream Use [optional]
47
+
48
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
49
+
50
+ [More Information Needed]
51
+
52
+ ### Out-of-Scope Use
53
+
54
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
55
+
56
+ [More Information Needed]
57
+
58
+ ## Bias, Risks, and Limitations
59
+
60
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
61
+
62
+ [More Information Needed]
63
+
64
+ ### Recommendations
65
+
66
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
67
+
68
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
69
+
70
+ ## How to Get Started with the Model
71
+
72
+ Use the code below to get started with the model.
73
+
74
+ [More Information Needed]
75
+
76
+ ## Training Details
77
+
78
+ ### Training Data
79
+
80
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
81
+
82
+ [More Information Needed]
83
+
84
+ ### Training Procedure
85
+
86
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
87
+
88
+ #### Preprocessing [optional]
89
+
90
+ [More Information Needed]
91
+
92
+
93
+ #### Training Hyperparameters
94
+
95
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
96
+
97
+ #### Speeds, Sizes, Times [optional]
98
+
99
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
100
+
101
+ [More Information Needed]
102
+
103
+ ## Evaluation
104
+
105
+ <!-- This section describes the evaluation protocols and provides the results. -->
106
+
107
+ ### Testing Data, Factors & Metrics
108
+
109
+ #### Testing Data
110
+
111
+ <!-- This should link to a Dataset Card if possible. -->
112
+
113
+ [More Information Needed]
114
+
115
+ #### Factors
116
+
117
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
118
+
119
+ [More Information Needed]
120
+
121
+ #### Metrics
122
+
123
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
124
+
125
+ [More Information Needed]
126
+
127
+ ### Results
128
+
129
+ [More Information Needed]
130
+
131
+ #### Summary
132
+
133
+
134
+
135
+ ## Model Examination [optional]
136
+
137
+ <!-- Relevant interpretability work for the model goes here -->
138
+
139
+ [More Information Needed]
140
+
141
+ ## Environmental Impact
142
+
143
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
144
+
145
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
146
+
147
+ - **Hardware Type:** [More Information Needed]
148
+ - **Hours used:** [More Information Needed]
149
+ - **Cloud Provider:** [More Information Needed]
150
+ - **Compute Region:** [More Information Needed]
151
+ - **Carbon Emitted:** [More Information Needed]
152
+
153
+ ## Technical Specifications [optional]
154
+
155
+ ### Model Architecture and Objective
156
+
157
+ [More Information Needed]
158
+
159
+ ### Compute Infrastructure
160
+
161
+ [More Information Needed]
162
+
163
+ #### Hardware
164
+
165
+ [More Information Needed]
166
+
167
+ #### Software
168
+
169
+ [More Information Needed]
170
+
171
+ ## Citation [optional]
172
+
173
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
174
+
175
+ **BibTeX:**
176
+
177
+ [More Information Needed]
178
+
179
+ **APA:**
180
+
181
+ [More Information Needed]
182
+
183
+ ## Glossary [optional]
184
+
185
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
186
+
187
+ [More Information Needed]
188
+
189
+ ## More Information [optional]
190
+
191
+ [More Information Needed]
192
+
193
+ ## Model Card Authors [optional]
194
+
195
+ [More Information Needed]
196
+
197
+ ## Model Card Contact
198
+
199
+ [More Information Needed]
200
+ ### Framework versions
201
+
202
+ - PEFT 0.11.1
checkpoint-100/adapter_config.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "Qwen/Qwen1.5-7B-Chat",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layer_replication": null,
10
+ "layers_pattern": null,
11
+ "layers_to_transform": null,
12
+ "loftq_config": {},
13
+ "lora_alpha": 32,
14
+ "lora_dropout": 0.0,
15
+ "megatron_config": null,
16
+ "megatron_core": "megatron.core",
17
+ "modules_to_save": null,
18
+ "peft_type": "LORA",
19
+ "r": 16,
20
+ "rank_pattern": {},
21
+ "revision": null,
22
+ "target_modules": [
23
+ "q_proj",
24
+ "v_proj"
25
+ ],
26
+ "task_type": "CAUSAL_LM",
27
+ "use_dora": false,
28
+ "use_rslora": false
29
+ }
checkpoint-100/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9bd53017d8c3d55ef68bbc716cb38bc86122539490076a84bd28072adec894e5
3
+ size 33571624
checkpoint-100/added_tokens.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "<|endoftext|>": 151643,
3
+ "<|im_end|>": 151645,
4
+ "<|im_start|>": 151644
5
+ }
checkpoint-100/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-100/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d53e7a8914730bf3c32e3ab6e3be6c5a692143615524e20eb24a4e9820fde90e
3
+ size 67217018
checkpoint-100/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:386fcc8cc1089aade9450d86fb239ea3483f455fd2d78d8378645feecfec9d69
3
+ size 14244
checkpoint-100/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fad61784a1b4ca36d371827e35da5e2212d09345fd119ba0c0b03a4c78e9fa80
3
+ size 1064
checkpoint-100/special_tokens_map.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>"
5
+ ],
6
+ "eos_token": {
7
+ "content": "<|im_end|>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false
12
+ },
13
+ "pad_token": {
14
+ "content": "<|endoftext|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false
19
+ }
20
+ }
checkpoint-100/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-100/tokenizer_config.json ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "151643": {
5
+ "content": "<|endoftext|>",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "151644": {
13
+ "content": "<|im_start|>",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "151645": {
21
+ "content": "<|im_end|>",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ }
28
+ },
29
+ "additional_special_tokens": [
30
+ "<|im_start|>",
31
+ "<|im_end|>"
32
+ ],
33
+ "bos_token": null,
34
+ "chat_template": "{% set system_message = 'You are a helpful assistant.' %}{% if messages[0]['role'] == 'system' %}{% set system_message = messages[0]['content'] %}{% endif %}{% if system_message is defined %}{{ '<|im_start|>system\n' + system_message + '<|im_end|>\n' }}{% endif %}{% for message in messages %}{% set content = message['content'] %}{% if message['role'] == 'user' %}{{ '<|im_start|>user\n' + content + '<|im_end|>\n<|im_start|>assistant\n' }}{% elif message['role'] == 'assistant' %}{{ content + '<|im_end|>' + '\n' }}{% endif %}{% endfor %}",
35
+ "clean_up_tokenization_spaces": false,
36
+ "eos_token": "<|im_end|>",
37
+ "errors": "replace",
38
+ "model_max_length": 32768,
39
+ "pad_token": "<|endoftext|>",
40
+ "padding_side": "right",
41
+ "split_special_tokens": false,
42
+ "tokenizer_class": "Qwen2Tokenizer",
43
+ "unk_token": null
44
+ }
checkpoint-100/trainer_state.json ADDED
@@ -0,0 +1,173 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 0.373134328358209,
5
+ "eval_steps": 500,
6
+ "global_step": 100,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.018656716417910446,
13
+ "grad_norm": 2.403158187866211,
14
+ "learning_rate": 4.9998282347929784e-05,
15
+ "loss": 3.3875,
16
+ "step": 5
17
+ },
18
+ {
19
+ "epoch": 0.03731343283582089,
20
+ "grad_norm": 2.301710367202759,
21
+ "learning_rate": 4.99931296277454e-05,
22
+ "loss": 2.9015,
23
+ "step": 10
24
+ },
25
+ {
26
+ "epoch": 0.055970149253731345,
27
+ "grad_norm": 1.271048665046692,
28
+ "learning_rate": 4.998454254749331e-05,
29
+ "loss": 2.6229,
30
+ "step": 15
31
+ },
32
+ {
33
+ "epoch": 0.07462686567164178,
34
+ "grad_norm": 1.069893717765808,
35
+ "learning_rate": 4.997252228714279e-05,
36
+ "loss": 2.3704,
37
+ "step": 20
38
+ },
39
+ {
40
+ "epoch": 0.09328358208955224,
41
+ "grad_norm": 0.9044906497001648,
42
+ "learning_rate": 4.9957070498423854e-05,
43
+ "loss": 2.3782,
44
+ "step": 25
45
+ },
46
+ {
47
+ "epoch": 0.11194029850746269,
48
+ "grad_norm": 0.9635376334190369,
49
+ "learning_rate": 4.993818930460026e-05,
50
+ "loss": 2.3576,
51
+ "step": 30
52
+ },
53
+ {
54
+ "epoch": 0.13059701492537312,
55
+ "grad_norm": 0.8513979315757751,
56
+ "learning_rate": 4.9915881300177725e-05,
57
+ "loss": 2.4603,
58
+ "step": 35
59
+ },
60
+ {
61
+ "epoch": 0.14925373134328357,
62
+ "grad_norm": 0.845267117023468,
63
+ "learning_rate": 4.9890149550547454e-05,
64
+ "loss": 2.2033,
65
+ "step": 40
66
+ },
67
+ {
68
+ "epoch": 0.16791044776119404,
69
+ "grad_norm": 0.6632418036460876,
70
+ "learning_rate": 4.98609975915649e-05,
71
+ "loss": 2.1851,
72
+ "step": 45
73
+ },
74
+ {
75
+ "epoch": 0.1865671641791045,
76
+ "grad_norm": 0.6857479810714722,
77
+ "learning_rate": 4.982842942906386e-05,
78
+ "loss": 2.3592,
79
+ "step": 50
80
+ },
81
+ {
82
+ "epoch": 0.20522388059701493,
83
+ "grad_norm": 0.7204287648200989,
84
+ "learning_rate": 4.979244953830608e-05,
85
+ "loss": 2.1323,
86
+ "step": 55
87
+ },
88
+ {
89
+ "epoch": 0.22388059701492538,
90
+ "grad_norm": 0.6864420175552368,
91
+ "learning_rate": 4.9753062863366276e-05,
92
+ "loss": 2.2138,
93
+ "step": 60
94
+ },
95
+ {
96
+ "epoch": 0.24253731343283583,
97
+ "grad_norm": 0.7536088228225708,
98
+ "learning_rate": 4.971027481645274e-05,
99
+ "loss": 2.2584,
100
+ "step": 65
101
+ },
102
+ {
103
+ "epoch": 0.26119402985074625,
104
+ "grad_norm": 0.9708526134490967,
105
+ "learning_rate": 4.966409127716367e-05,
106
+ "loss": 2.2669,
107
+ "step": 70
108
+ },
109
+ {
110
+ "epoch": 0.2798507462686567,
111
+ "grad_norm": 0.7516190409660339,
112
+ "learning_rate": 4.96145185916792e-05,
113
+ "loss": 2.2133,
114
+ "step": 75
115
+ },
116
+ {
117
+ "epoch": 0.29850746268656714,
118
+ "grad_norm": 0.7864778637886047,
119
+ "learning_rate": 4.95615635718894e-05,
120
+ "loss": 2.1683,
121
+ "step": 80
122
+ },
123
+ {
124
+ "epoch": 0.31716417910447764,
125
+ "grad_norm": 0.7846741080284119,
126
+ "learning_rate": 4.950523349445824e-05,
127
+ "loss": 2.1274,
128
+ "step": 85
129
+ },
130
+ {
131
+ "epoch": 0.3358208955223881,
132
+ "grad_norm": 0.816838800907135,
133
+ "learning_rate": 4.944553609982363e-05,
134
+ "loss": 2.2033,
135
+ "step": 90
136
+ },
137
+ {
138
+ "epoch": 0.35447761194029853,
139
+ "grad_norm": 0.7661916017532349,
140
+ "learning_rate": 4.938247959113386e-05,
141
+ "loss": 2.1492,
142
+ "step": 95
143
+ },
144
+ {
145
+ "epoch": 0.373134328358209,
146
+ "grad_norm": 0.8964986205101013,
147
+ "learning_rate": 4.931607263312032e-05,
148
+ "loss": 2.0862,
149
+ "step": 100
150
+ }
151
+ ],
152
+ "logging_steps": 5,
153
+ "max_steps": 1340,
154
+ "num_input_tokens_seen": 0,
155
+ "num_train_epochs": 5,
156
+ "save_steps": 100,
157
+ "stateful_callbacks": {
158
+ "TrainerControl": {
159
+ "args": {
160
+ "should_epoch_stop": false,
161
+ "should_evaluate": false,
162
+ "should_log": false,
163
+ "should_save": true,
164
+ "should_training_stop": false
165
+ },
166
+ "attributes": {}
167
+ }
168
+ },
169
+ "total_flos": 1.0457337537390182e+17,
170
+ "train_batch_size": 4,
171
+ "trial_name": null,
172
+ "trial_params": null
173
+ }
checkpoint-100/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:67f11670c2c9e329be00355fdb1e7f6d48dffafc103eefbd6bc3474fa9f6e67c
3
+ size 5304
checkpoint-100/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-1000/README.md ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ base_model: Qwen/Qwen1.5-7B-Chat
4
+ ---
5
+
6
+ # Model Card for Model ID
7
+
8
+ <!-- Provide a quick summary of what the model is/does. -->
9
+
10
+
11
+
12
+ ## Model Details
13
+
14
+ ### Model Description
15
+
16
+ <!-- Provide a longer summary of what this model is. -->
17
+
18
+
19
+
20
+ - **Developed by:** [More Information Needed]
21
+ - **Funded by [optional]:** [More Information Needed]
22
+ - **Shared by [optional]:** [More Information Needed]
23
+ - **Model type:** [More Information Needed]
24
+ - **Language(s) (NLP):** [More Information Needed]
25
+ - **License:** [More Information Needed]
26
+ - **Finetuned from model [optional]:** [More Information Needed]
27
+
28
+ ### Model Sources [optional]
29
+
30
+ <!-- Provide the basic links for the model. -->
31
+
32
+ - **Repository:** [More Information Needed]
33
+ - **Paper [optional]:** [More Information Needed]
34
+ - **Demo [optional]:** [More Information Needed]
35
+
36
+ ## Uses
37
+
38
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
39
+
40
+ ### Direct Use
41
+
42
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
43
+
44
+ [More Information Needed]
45
+
46
+ ### Downstream Use [optional]
47
+
48
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
49
+
50
+ [More Information Needed]
51
+
52
+ ### Out-of-Scope Use
53
+
54
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
55
+
56
+ [More Information Needed]
57
+
58
+ ## Bias, Risks, and Limitations
59
+
60
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
61
+
62
+ [More Information Needed]
63
+
64
+ ### Recommendations
65
+
66
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
67
+
68
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
69
+
70
+ ## How to Get Started with the Model
71
+
72
+ Use the code below to get started with the model.
73
+
74
+ [More Information Needed]
75
+
76
+ ## Training Details
77
+
78
+ ### Training Data
79
+
80
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
81
+
82
+ [More Information Needed]
83
+
84
+ ### Training Procedure
85
+
86
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
87
+
88
+ #### Preprocessing [optional]
89
+
90
+ [More Information Needed]
91
+
92
+
93
+ #### Training Hyperparameters
94
+
95
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
96
+
97
+ #### Speeds, Sizes, Times [optional]
98
+
99
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
100
+
101
+ [More Information Needed]
102
+
103
+ ## Evaluation
104
+
105
+ <!-- This section describes the evaluation protocols and provides the results. -->
106
+
107
+ ### Testing Data, Factors & Metrics
108
+
109
+ #### Testing Data
110
+
111
+ <!-- This should link to a Dataset Card if possible. -->
112
+
113
+ [More Information Needed]
114
+
115
+ #### Factors
116
+
117
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
118
+
119
+ [More Information Needed]
120
+
121
+ #### Metrics
122
+
123
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
124
+
125
+ [More Information Needed]
126
+
127
+ ### Results
128
+
129
+ [More Information Needed]
130
+
131
+ #### Summary
132
+
133
+
134
+
135
+ ## Model Examination [optional]
136
+
137
+ <!-- Relevant interpretability work for the model goes here -->
138
+
139
+ [More Information Needed]
140
+
141
+ ## Environmental Impact
142
+
143
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
144
+
145
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
146
+
147
+ - **Hardware Type:** [More Information Needed]
148
+ - **Hours used:** [More Information Needed]
149
+ - **Cloud Provider:** [More Information Needed]
150
+ - **Compute Region:** [More Information Needed]
151
+ - **Carbon Emitted:** [More Information Needed]
152
+
153
+ ## Technical Specifications [optional]
154
+
155
+ ### Model Architecture and Objective
156
+
157
+ [More Information Needed]
158
+
159
+ ### Compute Infrastructure
160
+
161
+ [More Information Needed]
162
+
163
+ #### Hardware
164
+
165
+ [More Information Needed]
166
+
167
+ #### Software
168
+
169
+ [More Information Needed]
170
+
171
+ ## Citation [optional]
172
+
173
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
174
+
175
+ **BibTeX:**
176
+
177
+ [More Information Needed]
178
+
179
+ **APA:**
180
+
181
+ [More Information Needed]
182
+
183
+ ## Glossary [optional]
184
+
185
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
186
+
187
+ [More Information Needed]
188
+
189
+ ## More Information [optional]
190
+
191
+ [More Information Needed]
192
+
193
+ ## Model Card Authors [optional]
194
+
195
+ [More Information Needed]
196
+
197
+ ## Model Card Contact
198
+
199
+ [More Information Needed]
200
+ ### Framework versions
201
+
202
+ - PEFT 0.11.1
checkpoint-1000/adapter_config.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "Qwen/Qwen1.5-7B-Chat",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layer_replication": null,
10
+ "layers_pattern": null,
11
+ "layers_to_transform": null,
12
+ "loftq_config": {},
13
+ "lora_alpha": 32,
14
+ "lora_dropout": 0.0,
15
+ "megatron_config": null,
16
+ "megatron_core": "megatron.core",
17
+ "modules_to_save": null,
18
+ "peft_type": "LORA",
19
+ "r": 16,
20
+ "rank_pattern": {},
21
+ "revision": null,
22
+ "target_modules": [
23
+ "q_proj",
24
+ "v_proj"
25
+ ],
26
+ "task_type": "CAUSAL_LM",
27
+ "use_dora": false,
28
+ "use_rslora": false
29
+ }
checkpoint-1000/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:51d383fd956926e1b33d0726a0064a2413bf995aa290c3c0d33ea86819c1c24a
3
+ size 33571624
checkpoint-1000/added_tokens.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "<|endoftext|>": 151643,
3
+ "<|im_end|>": 151645,
4
+ "<|im_start|>": 151644
5
+ }
checkpoint-1000/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-1000/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7d2591d45c8b37d47b5754133b1fe510643011091cd40ee4f06b37d77864e162
3
+ size 67217018
checkpoint-1000/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8d138cfe3a4adf21f048848ee35837c9a757a0a3616ff7adbb45b69aac247435
3
+ size 14244
checkpoint-1000/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:20c5b035af9e2c5c7974db41c58d92df9a9680679afb92eb6393974f9ff739a1
3
+ size 1064
checkpoint-1000/special_tokens_map.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>"
5
+ ],
6
+ "eos_token": {
7
+ "content": "<|im_end|>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false
12
+ },
13
+ "pad_token": {
14
+ "content": "<|endoftext|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false
19
+ }
20
+ }
checkpoint-1000/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-1000/tokenizer_config.json ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "151643": {
5
+ "content": "<|endoftext|>",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "151644": {
13
+ "content": "<|im_start|>",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "151645": {
21
+ "content": "<|im_end|>",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ }
28
+ },
29
+ "additional_special_tokens": [
30
+ "<|im_start|>",
31
+ "<|im_end|>"
32
+ ],
33
+ "bos_token": null,
34
+ "chat_template": "{% set system_message = 'You are a helpful assistant.' %}{% if messages[0]['role'] == 'system' %}{% set system_message = messages[0]['content'] %}{% endif %}{% if system_message is defined %}{{ '<|im_start|>system\n' + system_message + '<|im_end|>\n' }}{% endif %}{% for message in messages %}{% set content = message['content'] %}{% if message['role'] == 'user' %}{{ '<|im_start|>user\n' + content + '<|im_end|>\n<|im_start|>assistant\n' }}{% elif message['role'] == 'assistant' %}{{ content + '<|im_end|>' + '\n' }}{% endif %}{% endfor %}",
35
+ "clean_up_tokenization_spaces": false,
36
+ "eos_token": "<|im_end|>",
37
+ "errors": "replace",
38
+ "model_max_length": 32768,
39
+ "pad_token": "<|endoftext|>",
40
+ "padding_side": "right",
41
+ "split_special_tokens": false,
42
+ "tokenizer_class": "Qwen2Tokenizer",
43
+ "unk_token": null
44
+ }
checkpoint-1000/trainer_state.json ADDED
@@ -0,0 +1,1433 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 3.7313432835820897,
5
+ "eval_steps": 500,
6
+ "global_step": 1000,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.018656716417910446,
13
+ "grad_norm": 2.403158187866211,
14
+ "learning_rate": 4.9998282347929784e-05,
15
+ "loss": 3.3875,
16
+ "step": 5
17
+ },
18
+ {
19
+ "epoch": 0.03731343283582089,
20
+ "grad_norm": 2.301710367202759,
21
+ "learning_rate": 4.99931296277454e-05,
22
+ "loss": 2.9015,
23
+ "step": 10
24
+ },
25
+ {
26
+ "epoch": 0.055970149253731345,
27
+ "grad_norm": 1.271048665046692,
28
+ "learning_rate": 4.998454254749331e-05,
29
+ "loss": 2.6229,
30
+ "step": 15
31
+ },
32
+ {
33
+ "epoch": 0.07462686567164178,
34
+ "grad_norm": 1.069893717765808,
35
+ "learning_rate": 4.997252228714279e-05,
36
+ "loss": 2.3704,
37
+ "step": 20
38
+ },
39
+ {
40
+ "epoch": 0.09328358208955224,
41
+ "grad_norm": 0.9044906497001648,
42
+ "learning_rate": 4.9957070498423854e-05,
43
+ "loss": 2.3782,
44
+ "step": 25
45
+ },
46
+ {
47
+ "epoch": 0.11194029850746269,
48
+ "grad_norm": 0.9635376334190369,
49
+ "learning_rate": 4.993818930460026e-05,
50
+ "loss": 2.3576,
51
+ "step": 30
52
+ },
53
+ {
54
+ "epoch": 0.13059701492537312,
55
+ "grad_norm": 0.8513979315757751,
56
+ "learning_rate": 4.9915881300177725e-05,
57
+ "loss": 2.4603,
58
+ "step": 35
59
+ },
60
+ {
61
+ "epoch": 0.14925373134328357,
62
+ "grad_norm": 0.845267117023468,
63
+ "learning_rate": 4.9890149550547454e-05,
64
+ "loss": 2.2033,
65
+ "step": 40
66
+ },
67
+ {
68
+ "epoch": 0.16791044776119404,
69
+ "grad_norm": 0.6632418036460876,
70
+ "learning_rate": 4.98609975915649e-05,
71
+ "loss": 2.1851,
72
+ "step": 45
73
+ },
74
+ {
75
+ "epoch": 0.1865671641791045,
76
+ "grad_norm": 0.6857479810714722,
77
+ "learning_rate": 4.982842942906386e-05,
78
+ "loss": 2.3592,
79
+ "step": 50
80
+ },
81
+ {
82
+ "epoch": 0.20522388059701493,
83
+ "grad_norm": 0.7204287648200989,
84
+ "learning_rate": 4.979244953830608e-05,
85
+ "loss": 2.1323,
86
+ "step": 55
87
+ },
88
+ {
89
+ "epoch": 0.22388059701492538,
90
+ "grad_norm": 0.6864420175552368,
91
+ "learning_rate": 4.9753062863366276e-05,
92
+ "loss": 2.2138,
93
+ "step": 60
94
+ },
95
+ {
96
+ "epoch": 0.24253731343283583,
97
+ "grad_norm": 0.7536088228225708,
98
+ "learning_rate": 4.971027481645274e-05,
99
+ "loss": 2.2584,
100
+ "step": 65
101
+ },
102
+ {
103
+ "epoch": 0.26119402985074625,
104
+ "grad_norm": 0.9708526134490967,
105
+ "learning_rate": 4.966409127716367e-05,
106
+ "loss": 2.2669,
107
+ "step": 70
108
+ },
109
+ {
110
+ "epoch": 0.2798507462686567,
111
+ "grad_norm": 0.7516190409660339,
112
+ "learning_rate": 4.96145185916792e-05,
113
+ "loss": 2.2133,
114
+ "step": 75
115
+ },
116
+ {
117
+ "epoch": 0.29850746268656714,
118
+ "grad_norm": 0.7864778637886047,
119
+ "learning_rate": 4.95615635718894e-05,
120
+ "loss": 2.1683,
121
+ "step": 80
122
+ },
123
+ {
124
+ "epoch": 0.31716417910447764,
125
+ "grad_norm": 0.7846741080284119,
126
+ "learning_rate": 4.950523349445824e-05,
127
+ "loss": 2.1274,
128
+ "step": 85
129
+ },
130
+ {
131
+ "epoch": 0.3358208955223881,
132
+ "grad_norm": 0.816838800907135,
133
+ "learning_rate": 4.944553609982363e-05,
134
+ "loss": 2.2033,
135
+ "step": 90
136
+ },
137
+ {
138
+ "epoch": 0.35447761194029853,
139
+ "grad_norm": 0.7661916017532349,
140
+ "learning_rate": 4.938247959113386e-05,
141
+ "loss": 2.1492,
142
+ "step": 95
143
+ },
144
+ {
145
+ "epoch": 0.373134328358209,
146
+ "grad_norm": 0.8964986205101013,
147
+ "learning_rate": 4.931607263312032e-05,
148
+ "loss": 2.0862,
149
+ "step": 100
150
+ },
151
+ {
152
+ "epoch": 0.3917910447761194,
153
+ "grad_norm": 0.8603547215461731,
154
+ "learning_rate": 4.924632435090696e-05,
155
+ "loss": 2.1444,
156
+ "step": 105
157
+ },
158
+ {
159
+ "epoch": 0.41044776119402987,
160
+ "grad_norm": 0.8611045479774475,
161
+ "learning_rate": 4.917324432875627e-05,
162
+ "loss": 2.1202,
163
+ "step": 110
164
+ },
165
+ {
166
+ "epoch": 0.4291044776119403,
167
+ "grad_norm": 0.9499636888504028,
168
+ "learning_rate": 4.909684260875235e-05,
169
+ "loss": 2.1285,
170
+ "step": 115
171
+ },
172
+ {
173
+ "epoch": 0.44776119402985076,
174
+ "grad_norm": 0.8490393161773682,
175
+ "learning_rate": 4.9017129689421e-05,
176
+ "loss": 2.236,
177
+ "step": 120
178
+ },
179
+ {
180
+ "epoch": 0.4664179104477612,
181
+ "grad_norm": 0.9628555178642273,
182
+ "learning_rate": 4.893411652428712e-05,
183
+ "loss": 2.1219,
184
+ "step": 125
185
+ },
186
+ {
187
+ "epoch": 0.48507462686567165,
188
+ "grad_norm": 1.1119599342346191,
189
+ "learning_rate": 4.8847814520369475e-05,
190
+ "loss": 2.2537,
191
+ "step": 130
192
+ },
193
+ {
194
+ "epoch": 0.503731343283582,
195
+ "grad_norm": 0.9489665627479553,
196
+ "learning_rate": 4.875823553661334e-05,
197
+ "loss": 2.1018,
198
+ "step": 135
199
+ },
200
+ {
201
+ "epoch": 0.5223880597014925,
202
+ "grad_norm": 0.9434083700180054,
203
+ "learning_rate": 4.8665391882260856e-05,
204
+ "loss": 2.0809,
205
+ "step": 140
206
+ },
207
+ {
208
+ "epoch": 0.5410447761194029,
209
+ "grad_norm": 0.8856557607650757,
210
+ "learning_rate": 4.856929631515964e-05,
211
+ "loss": 2.0807,
212
+ "step": 145
213
+ },
214
+ {
215
+ "epoch": 0.5597014925373134,
216
+ "grad_norm": 0.8770031929016113,
217
+ "learning_rate": 4.846996204000967e-05,
218
+ "loss": 2.0843,
219
+ "step": 150
220
+ },
221
+ {
222
+ "epoch": 0.5783582089552238,
223
+ "grad_norm": 0.8374930620193481,
224
+ "learning_rate": 4.8367402706548805e-05,
225
+ "loss": 2.1869,
226
+ "step": 155
227
+ },
228
+ {
229
+ "epoch": 0.5970149253731343,
230
+ "grad_norm": 1.0829132795333862,
231
+ "learning_rate": 4.8261632407677174e-05,
232
+ "loss": 2.028,
233
+ "step": 160
234
+ },
235
+ {
236
+ "epoch": 0.6156716417910447,
237
+ "grad_norm": 0.9735206365585327,
238
+ "learning_rate": 4.815266567752059e-05,
239
+ "loss": 2.0966,
240
+ "step": 165
241
+ },
242
+ {
243
+ "epoch": 0.6343283582089553,
244
+ "grad_norm": 1.087944746017456,
245
+ "learning_rate": 4.804051748943343e-05,
246
+ "loss": 2.0863,
247
+ "step": 170
248
+ },
249
+ {
250
+ "epoch": 0.6529850746268657,
251
+ "grad_norm": 0.8176729083061218,
252
+ "learning_rate": 4.792520325394111e-05,
253
+ "loss": 2.1135,
254
+ "step": 175
255
+ },
256
+ {
257
+ "epoch": 0.6716417910447762,
258
+ "grad_norm": 0.9173070788383484,
259
+ "learning_rate": 4.780673881662242e-05,
260
+ "loss": 2.0564,
261
+ "step": 180
262
+ },
263
+ {
264
+ "epoch": 0.6902985074626866,
265
+ "grad_norm": 0.9463202953338623,
266
+ "learning_rate": 4.7685140455932267e-05,
267
+ "loss": 2.1579,
268
+ "step": 185
269
+ },
270
+ {
271
+ "epoch": 0.7089552238805971,
272
+ "grad_norm": 1.149950385093689,
273
+ "learning_rate": 4.756042488096471e-05,
274
+ "loss": 2.1447,
275
+ "step": 190
276
+ },
277
+ {
278
+ "epoch": 0.7276119402985075,
279
+ "grad_norm": 0.940965473651886,
280
+ "learning_rate": 4.743260922915701e-05,
281
+ "loss": 2.0823,
282
+ "step": 195
283
+ },
284
+ {
285
+ "epoch": 0.746268656716418,
286
+ "grad_norm": 0.9384671449661255,
287
+ "learning_rate": 4.730171106393466e-05,
288
+ "loss": 2.1445,
289
+ "step": 200
290
+ },
291
+ {
292
+ "epoch": 0.7649253731343284,
293
+ "grad_norm": 0.8937250971794128,
294
+ "learning_rate": 4.716774837229804e-05,
295
+ "loss": 2.014,
296
+ "step": 205
297
+ },
298
+ {
299
+ "epoch": 0.7835820895522388,
300
+ "grad_norm": 0.8928058743476868,
301
+ "learning_rate": 4.7030739562350713e-05,
302
+ "loss": 2.1882,
303
+ "step": 210
304
+ },
305
+ {
306
+ "epoch": 0.8022388059701493,
307
+ "grad_norm": 1.0239906311035156,
308
+ "learning_rate": 4.6890703460769955e-05,
309
+ "loss": 2.1042,
310
+ "step": 215
311
+ },
312
+ {
313
+ "epoch": 0.8208955223880597,
314
+ "grad_norm": 1.0555064678192139,
315
+ "learning_rate": 4.674765931021976e-05,
316
+ "loss": 2.015,
317
+ "step": 220
318
+ },
319
+ {
320
+ "epoch": 0.8395522388059702,
321
+ "grad_norm": 1.084709882736206,
322
+ "learning_rate": 4.6601626766706626e-05,
323
+ "loss": 2.0603,
324
+ "step": 225
325
+ },
326
+ {
327
+ "epoch": 0.8582089552238806,
328
+ "grad_norm": 0.9265861511230469,
329
+ "learning_rate": 4.645262589687861e-05,
330
+ "loss": 2.1006,
331
+ "step": 230
332
+ },
333
+ {
334
+ "epoch": 0.8768656716417911,
335
+ "grad_norm": 1.0058296918869019,
336
+ "learning_rate": 4.6300677175267914e-05,
337
+ "loss": 2.063,
338
+ "step": 235
339
+ },
340
+ {
341
+ "epoch": 0.8955223880597015,
342
+ "grad_norm": 1.0766576528549194,
343
+ "learning_rate": 4.614580148147744e-05,
344
+ "loss": 2.0781,
345
+ "step": 240
346
+ },
347
+ {
348
+ "epoch": 0.914179104477612,
349
+ "grad_norm": 1.0215730667114258,
350
+ "learning_rate": 4.598802009731167e-05,
351
+ "loss": 2.1774,
352
+ "step": 245
353
+ },
354
+ {
355
+ "epoch": 0.9328358208955224,
356
+ "grad_norm": 0.9870419502258301,
357
+ "learning_rate": 4.582735470385229e-05,
358
+ "loss": 1.9636,
359
+ "step": 250
360
+ },
361
+ {
362
+ "epoch": 0.9514925373134329,
363
+ "grad_norm": 1.1921675205230713,
364
+ "learning_rate": 4.5663827378478975e-05,
365
+ "loss": 2.0141,
366
+ "step": 255
367
+ },
368
+ {
369
+ "epoch": 0.9701492537313433,
370
+ "grad_norm": 1.0618964433670044,
371
+ "learning_rate": 4.5497460591835615e-05,
372
+ "loss": 2.0508,
373
+ "step": 260
374
+ },
375
+ {
376
+ "epoch": 0.9888059701492538,
377
+ "grad_norm": 0.9723111391067505,
378
+ "learning_rate": 4.532827720474268e-05,
379
+ "loss": 2.0312,
380
+ "step": 265
381
+ },
382
+ {
383
+ "epoch": 1.007462686567164,
384
+ "grad_norm": 0.9339023232460022,
385
+ "learning_rate": 4.515630046505575e-05,
386
+ "loss": 2.1107,
387
+ "step": 270
388
+ },
389
+ {
390
+ "epoch": 1.0261194029850746,
391
+ "grad_norm": 1.0588074922561646,
392
+ "learning_rate": 4.498155400447107e-05,
393
+ "loss": 2.0963,
394
+ "step": 275
395
+ },
396
+ {
397
+ "epoch": 1.044776119402985,
398
+ "grad_norm": 1.0709750652313232,
399
+ "learning_rate": 4.480406183527823e-05,
400
+ "loss": 2.0359,
401
+ "step": 280
402
+ },
403
+ {
404
+ "epoch": 1.0634328358208955,
405
+ "grad_norm": 1.2172249555587769,
406
+ "learning_rate": 4.462384834706058e-05,
407
+ "loss": 2.1083,
408
+ "step": 285
409
+ },
410
+ {
411
+ "epoch": 1.0820895522388059,
412
+ "grad_norm": 1.1719626188278198,
413
+ "learning_rate": 4.4440938303343804e-05,
414
+ "loss": 2.1259,
415
+ "step": 290
416
+ },
417
+ {
418
+ "epoch": 1.1007462686567164,
419
+ "grad_norm": 1.051269292831421,
420
+ "learning_rate": 4.425535683819312e-05,
421
+ "loss": 2.0901,
422
+ "step": 295
423
+ },
424
+ {
425
+ "epoch": 1.1194029850746268,
426
+ "grad_norm": 1.3167760372161865,
427
+ "learning_rate": 4.406712945275955e-05,
428
+ "loss": 2.0032,
429
+ "step": 300
430
+ },
431
+ {
432
+ "epoch": 1.1380597014925373,
433
+ "grad_norm": 1.2565367221832275,
434
+ "learning_rate": 4.387628201177577e-05,
435
+ "loss": 2.0148,
436
+ "step": 305
437
+ },
438
+ {
439
+ "epoch": 1.1567164179104479,
440
+ "grad_norm": 1.1141688823699951,
441
+ "learning_rate": 4.368284074000193e-05,
442
+ "loss": 2.0217,
443
+ "step": 310
444
+ },
445
+ {
446
+ "epoch": 1.1753731343283582,
447
+ "grad_norm": 1.1642612218856812,
448
+ "learning_rate": 4.348683221862212e-05,
449
+ "loss": 2.0194,
450
+ "step": 315
451
+ },
452
+ {
453
+ "epoch": 1.1940298507462686,
454
+ "grad_norm": 1.1613104343414307,
455
+ "learning_rate": 4.328828338159173e-05,
456
+ "loss": 1.9371,
457
+ "step": 320
458
+ },
459
+ {
460
+ "epoch": 1.212686567164179,
461
+ "grad_norm": 1.2319557666778564,
462
+ "learning_rate": 4.3087221511936434e-05,
463
+ "loss": 2.0227,
464
+ "step": 325
465
+ },
466
+ {
467
+ "epoch": 1.2313432835820897,
468
+ "grad_norm": 1.2520420551300049,
469
+ "learning_rate": 4.288367423800319e-05,
470
+ "loss": 1.9883,
471
+ "step": 330
472
+ },
473
+ {
474
+ "epoch": 1.25,
475
+ "grad_norm": 1.0452089309692383,
476
+ "learning_rate": 4.267766952966369e-05,
477
+ "loss": 1.9912,
478
+ "step": 335
479
+ },
480
+ {
481
+ "epoch": 1.2686567164179103,
482
+ "grad_norm": 0.9965611100196838,
483
+ "learning_rate": 4.2469235694471043e-05,
484
+ "loss": 1.983,
485
+ "step": 340
486
+ },
487
+ {
488
+ "epoch": 1.287313432835821,
489
+ "grad_norm": 1.0808607339859009,
490
+ "learning_rate": 4.225840137376993e-05,
491
+ "loss": 1.9514,
492
+ "step": 345
493
+ },
494
+ {
495
+ "epoch": 1.3059701492537314,
496
+ "grad_norm": 1.102575659751892,
497
+ "learning_rate": 4.204519553876095e-05,
498
+ "loss": 2.0286,
499
+ "step": 350
500
+ },
501
+ {
502
+ "epoch": 1.3246268656716418,
503
+ "grad_norm": 1.0246608257293701,
504
+ "learning_rate": 4.1829647486519596e-05,
505
+ "loss": 2.0265,
506
+ "step": 355
507
+ },
508
+ {
509
+ "epoch": 1.3432835820895521,
510
+ "grad_norm": 1.0723367929458618,
511
+ "learning_rate": 4.161178683597054e-05,
512
+ "loss": 2.0077,
513
+ "step": 360
514
+ },
515
+ {
516
+ "epoch": 1.3619402985074627,
517
+ "grad_norm": 1.4298617839813232,
518
+ "learning_rate": 4.139164352381758e-05,
519
+ "loss": 2.0898,
520
+ "step": 365
521
+ },
522
+ {
523
+ "epoch": 1.3805970149253732,
524
+ "grad_norm": 1.1437115669250488,
525
+ "learning_rate": 4.116924780042997e-05,
526
+ "loss": 2.024,
527
+ "step": 370
528
+ },
529
+ {
530
+ "epoch": 1.3992537313432836,
531
+ "grad_norm": 1.326556921005249,
532
+ "learning_rate": 4.094463022568569e-05,
533
+ "loss": 2.2252,
534
+ "step": 375
535
+ },
536
+ {
537
+ "epoch": 1.417910447761194,
538
+ "grad_norm": 1.2549344301223755,
539
+ "learning_rate": 4.071782166477213e-05,
540
+ "loss": 1.9777,
541
+ "step": 380
542
+ },
543
+ {
544
+ "epoch": 1.4365671641791045,
545
+ "grad_norm": 1.1226497888565063,
546
+ "learning_rate": 4.0488853283944806e-05,
547
+ "loss": 2.0062,
548
+ "step": 385
549
+ },
550
+ {
551
+ "epoch": 1.455223880597015,
552
+ "grad_norm": 1.2250981330871582,
553
+ "learning_rate": 4.0257756546244804e-05,
554
+ "loss": 1.9147,
555
+ "step": 390
556
+ },
557
+ {
558
+ "epoch": 1.4738805970149254,
559
+ "grad_norm": 1.3552589416503906,
560
+ "learning_rate": 4.0024563207175316e-05,
561
+ "loss": 1.9709,
562
+ "step": 395
563
+ },
564
+ {
565
+ "epoch": 1.4925373134328357,
566
+ "grad_norm": 1.3661599159240723,
567
+ "learning_rate": 3.978930531033807e-05,
568
+ "loss": 1.9748,
569
+ "step": 400
570
+ },
571
+ {
572
+ "epoch": 1.5111940298507462,
573
+ "grad_norm": 1.1794605255126953,
574
+ "learning_rate": 3.9552015183030136e-05,
575
+ "loss": 2.0367,
576
+ "step": 405
577
+ },
578
+ {
579
+ "epoch": 1.5298507462686568,
580
+ "grad_norm": 1.19724440574646,
581
+ "learning_rate": 3.93127254318018e-05,
582
+ "loss": 1.9545,
583
+ "step": 410
584
+ },
585
+ {
586
+ "epoch": 1.5485074626865671,
587
+ "grad_norm": 1.310658574104309,
588
+ "learning_rate": 3.907146893797599e-05,
589
+ "loss": 1.9933,
590
+ "step": 415
591
+ },
592
+ {
593
+ "epoch": 1.5671641791044775,
594
+ "grad_norm": 1.2032736539840698,
595
+ "learning_rate": 3.882827885312999e-05,
596
+ "loss": 2.0442,
597
+ "step": 420
598
+ },
599
+ {
600
+ "epoch": 1.585820895522388,
601
+ "grad_norm": 1.2670124769210815,
602
+ "learning_rate": 3.858318859454001e-05,
603
+ "loss": 1.974,
604
+ "step": 425
605
+ },
606
+ {
607
+ "epoch": 1.6044776119402986,
608
+ "grad_norm": 1.5301685333251953,
609
+ "learning_rate": 3.833623184058926e-05,
610
+ "loss": 2.0865,
611
+ "step": 430
612
+ },
613
+ {
614
+ "epoch": 1.623134328358209,
615
+ "grad_norm": 1.3863707780838013,
616
+ "learning_rate": 3.808744252614012e-05,
617
+ "loss": 1.9614,
618
+ "step": 435
619
+ },
620
+ {
621
+ "epoch": 1.6417910447761193,
622
+ "grad_norm": 1.2591431140899658,
623
+ "learning_rate": 3.783685483787105e-05,
624
+ "loss": 1.949,
625
+ "step": 440
626
+ },
627
+ {
628
+ "epoch": 1.6604477611940298,
629
+ "grad_norm": 1.2093037366867065,
630
+ "learning_rate": 3.758450320957899e-05,
631
+ "loss": 1.9618,
632
+ "step": 445
633
+ },
634
+ {
635
+ "epoch": 1.6791044776119404,
636
+ "grad_norm": 1.1593824625015259,
637
+ "learning_rate": 3.7330422317447685e-05,
638
+ "loss": 2.0124,
639
+ "step": 450
640
+ },
641
+ {
642
+ "epoch": 1.6977611940298507,
643
+ "grad_norm": 1.7013437747955322,
644
+ "learning_rate": 3.707464707528275e-05,
645
+ "loss": 2.0613,
646
+ "step": 455
647
+ },
648
+ {
649
+ "epoch": 1.716417910447761,
650
+ "grad_norm": 1.2550350427627563,
651
+ "learning_rate": 3.681721262971413e-05,
652
+ "loss": 2.1354,
653
+ "step": 460
654
+ },
655
+ {
656
+ "epoch": 1.7350746268656716,
657
+ "grad_norm": 1.1735903024673462,
658
+ "learning_rate": 3.6558154355366506e-05,
659
+ "loss": 1.9618,
660
+ "step": 465
661
+ },
662
+ {
663
+ "epoch": 1.7537313432835822,
664
+ "grad_norm": 1.331148624420166,
665
+ "learning_rate": 3.6297507849998344e-05,
666
+ "loss": 1.9245,
667
+ "step": 470
668
+ },
669
+ {
670
+ "epoch": 1.7723880597014925,
671
+ "grad_norm": 1.3502494096755981,
672
+ "learning_rate": 3.6035308929610446e-05,
673
+ "loss": 1.9758,
674
+ "step": 475
675
+ },
676
+ {
677
+ "epoch": 1.7910447761194028,
678
+ "grad_norm": 1.2406198978424072,
679
+ "learning_rate": 3.5771593623524265e-05,
680
+ "loss": 1.9824,
681
+ "step": 480
682
+ },
683
+ {
684
+ "epoch": 1.8097014925373134,
685
+ "grad_norm": 1.224885106086731,
686
+ "learning_rate": 3.550639816943111e-05,
687
+ "loss": 2.069,
688
+ "step": 485
689
+ },
690
+ {
691
+ "epoch": 1.828358208955224,
692
+ "grad_norm": 1.4666011333465576,
693
+ "learning_rate": 3.5239759008412666e-05,
694
+ "loss": 2.0797,
695
+ "step": 490
696
+ },
697
+ {
698
+ "epoch": 1.8470149253731343,
699
+ "grad_norm": 1.2758076190948486,
700
+ "learning_rate": 3.497171277993346e-05,
701
+ "loss": 2.0195,
702
+ "step": 495
703
+ },
704
+ {
705
+ "epoch": 1.8656716417910446,
706
+ "grad_norm": 1.1991291046142578,
707
+ "learning_rate": 3.4702296316806244e-05,
708
+ "loss": 1.9558,
709
+ "step": 500
710
+ },
711
+ {
712
+ "epoch": 1.8843283582089554,
713
+ "grad_norm": 1.2548415660858154,
714
+ "learning_rate": 3.443154664013067e-05,
715
+ "loss": 1.9805,
716
+ "step": 505
717
+ },
718
+ {
719
+ "epoch": 1.9029850746268657,
720
+ "grad_norm": 1.5407222509384155,
721
+ "learning_rate": 3.415950095420616e-05,
722
+ "loss": 1.9152,
723
+ "step": 510
724
+ },
725
+ {
726
+ "epoch": 1.921641791044776,
727
+ "grad_norm": 1.285704493522644,
728
+ "learning_rate": 3.3886196641419545e-05,
729
+ "loss": 2.0442,
730
+ "step": 515
731
+ },
732
+ {
733
+ "epoch": 1.9402985074626866,
734
+ "grad_norm": 1.377465844154358,
735
+ "learning_rate": 3.361167125710832e-05,
736
+ "loss": 2.0537,
737
+ "step": 520
738
+ },
739
+ {
740
+ "epoch": 1.9589552238805972,
741
+ "grad_norm": 1.186889410018921,
742
+ "learning_rate": 3.333596252440008e-05,
743
+ "loss": 1.9798,
744
+ "step": 525
745
+ },
746
+ {
747
+ "epoch": 1.9776119402985075,
748
+ "grad_norm": 1.4855142831802368,
749
+ "learning_rate": 3.305910832902884e-05,
750
+ "loss": 2.0984,
751
+ "step": 530
752
+ },
753
+ {
754
+ "epoch": 1.9962686567164178,
755
+ "grad_norm": 1.47159743309021,
756
+ "learning_rate": 3.278114671412917e-05,
757
+ "loss": 1.9932,
758
+ "step": 535
759
+ },
760
+ {
761
+ "epoch": 2.014925373134328,
762
+ "grad_norm": 1.4078409671783447,
763
+ "learning_rate": 3.2502115875008524e-05,
764
+ "loss": 1.9457,
765
+ "step": 540
766
+ },
767
+ {
768
+ "epoch": 2.033582089552239,
769
+ "grad_norm": 1.1386340856552124,
770
+ "learning_rate": 3.222205415389877e-05,
771
+ "loss": 1.9334,
772
+ "step": 545
773
+ },
774
+ {
775
+ "epoch": 2.0522388059701493,
776
+ "grad_norm": 1.666084885597229,
777
+ "learning_rate": 3.1941000034687515e-05,
778
+ "loss": 1.9716,
779
+ "step": 550
780
+ },
781
+ {
782
+ "epoch": 2.0708955223880596,
783
+ "grad_norm": 1.3137987852096558,
784
+ "learning_rate": 3.165899213762995e-05,
785
+ "loss": 1.9189,
786
+ "step": 555
787
+ },
788
+ {
789
+ "epoch": 2.08955223880597,
790
+ "grad_norm": 1.2372797727584839,
791
+ "learning_rate": 3.1376069214041913e-05,
792
+ "loss": 2.0234,
793
+ "step": 560
794
+ },
795
+ {
796
+ "epoch": 2.1082089552238807,
797
+ "grad_norm": 1.3149720430374146,
798
+ "learning_rate": 3.109227014097505e-05,
799
+ "loss": 2.0271,
800
+ "step": 565
801
+ },
802
+ {
803
+ "epoch": 2.126865671641791,
804
+ "grad_norm": 1.4162675142288208,
805
+ "learning_rate": 3.0807633915874584e-05,
806
+ "loss": 1.8236,
807
+ "step": 570
808
+ },
809
+ {
810
+ "epoch": 2.1455223880597014,
811
+ "grad_norm": 1.4029136896133423,
812
+ "learning_rate": 3.052219965122062e-05,
813
+ "loss": 2.1821,
814
+ "step": 575
815
+ },
816
+ {
817
+ "epoch": 2.1641791044776117,
818
+ "grad_norm": 1.5424753427505493,
819
+ "learning_rate": 3.0236006569153617e-05,
820
+ "loss": 1.9496,
821
+ "step": 580
822
+ },
823
+ {
824
+ "epoch": 2.1828358208955225,
825
+ "grad_norm": 1.274217963218689,
826
+ "learning_rate": 2.9949093996084747e-05,
827
+ "loss": 2.0439,
828
+ "step": 585
829
+ },
830
+ {
831
+ "epoch": 2.201492537313433,
832
+ "grad_norm": 1.2068248987197876,
833
+ "learning_rate": 2.9661501357292033e-05,
834
+ "loss": 2.0805,
835
+ "step": 590
836
+ },
837
+ {
838
+ "epoch": 2.220149253731343,
839
+ "grad_norm": 1.2352491617202759,
840
+ "learning_rate": 2.9373268171502777e-05,
841
+ "loss": 1.975,
842
+ "step": 595
843
+ },
844
+ {
845
+ "epoch": 2.2388059701492535,
846
+ "grad_norm": 1.3039956092834473,
847
+ "learning_rate": 2.9084434045463255e-05,
848
+ "loss": 1.9834,
849
+ "step": 600
850
+ },
851
+ {
852
+ "epoch": 2.2574626865671643,
853
+ "grad_norm": 1.3400136232376099,
854
+ "learning_rate": 2.8795038668496222e-05,
855
+ "loss": 1.84,
856
+ "step": 605
857
+ },
858
+ {
859
+ "epoch": 2.2761194029850746,
860
+ "grad_norm": 1.458132028579712,
861
+ "learning_rate": 2.850512180704715e-05,
862
+ "loss": 1.9518,
863
+ "step": 610
864
+ },
865
+ {
866
+ "epoch": 2.294776119402985,
867
+ "grad_norm": 1.446595311164856,
868
+ "learning_rate": 2.821472329921981e-05,
869
+ "loss": 1.8977,
870
+ "step": 615
871
+ },
872
+ {
873
+ "epoch": 2.3134328358208958,
874
+ "grad_norm": 1.432244062423706,
875
+ "learning_rate": 2.792388304930207e-05,
876
+ "loss": 1.9742,
877
+ "step": 620
878
+ },
879
+ {
880
+ "epoch": 2.332089552238806,
881
+ "grad_norm": 1.499017596244812,
882
+ "learning_rate": 2.7632641022282502e-05,
883
+ "loss": 1.9379,
884
+ "step": 625
885
+ },
886
+ {
887
+ "epoch": 2.3507462686567164,
888
+ "grad_norm": 1.6504281759262085,
889
+ "learning_rate": 2.7341037238358774e-05,
890
+ "loss": 1.9175,
891
+ "step": 630
892
+ },
893
+ {
894
+ "epoch": 2.3694029850746268,
895
+ "grad_norm": 1.6585911512374878,
896
+ "learning_rate": 2.704911176743833e-05,
897
+ "loss": 2.0449,
898
+ "step": 635
899
+ },
900
+ {
901
+ "epoch": 2.388059701492537,
902
+ "grad_norm": 1.545623779296875,
903
+ "learning_rate": 2.6756904723632324e-05,
904
+ "loss": 2.0096,
905
+ "step": 640
906
+ },
907
+ {
908
+ "epoch": 2.406716417910448,
909
+ "grad_norm": 1.468853235244751,
910
+ "learning_rate": 2.646445625974347e-05,
911
+ "loss": 1.878,
912
+ "step": 645
913
+ },
914
+ {
915
+ "epoch": 2.425373134328358,
916
+ "grad_norm": 1.3598605394363403,
917
+ "learning_rate": 2.6171806561748502e-05,
918
+ "loss": 1.9625,
919
+ "step": 650
920
+ },
921
+ {
922
+ "epoch": 2.4440298507462686,
923
+ "grad_norm": 1.3197077512741089,
924
+ "learning_rate": 2.5878995843276204e-05,
925
+ "loss": 1.9375,
926
+ "step": 655
927
+ },
928
+ {
929
+ "epoch": 2.4626865671641793,
930
+ "grad_norm": 1.5469880104064941,
931
+ "learning_rate": 2.5586064340081516e-05,
932
+ "loss": 1.8402,
933
+ "step": 660
934
+ },
935
+ {
936
+ "epoch": 2.4813432835820897,
937
+ "grad_norm": 1.4435440301895142,
938
+ "learning_rate": 2.529305230451666e-05,
939
+ "loss": 1.8795,
940
+ "step": 665
941
+ },
942
+ {
943
+ "epoch": 2.5,
944
+ "grad_norm": 1.505194067955017,
945
+ "learning_rate": 2.5e-05,
946
+ "loss": 1.9938,
947
+ "step": 670
948
+ },
949
+ {
950
+ "epoch": 2.5186567164179103,
951
+ "grad_norm": 1.3251738548278809,
952
+ "learning_rate": 2.4706947695483348e-05,
953
+ "loss": 1.956,
954
+ "step": 675
955
+ },
956
+ {
957
+ "epoch": 2.5373134328358207,
958
+ "grad_norm": 1.4197183847427368,
959
+ "learning_rate": 2.441393565991849e-05,
960
+ "loss": 1.906,
961
+ "step": 680
962
+ },
963
+ {
964
+ "epoch": 2.5559701492537314,
965
+ "grad_norm": 1.4905989170074463,
966
+ "learning_rate": 2.4121004156723802e-05,
967
+ "loss": 1.9073,
968
+ "step": 685
969
+ },
970
+ {
971
+ "epoch": 2.574626865671642,
972
+ "grad_norm": 1.3891818523406982,
973
+ "learning_rate": 2.3828193438251497e-05,
974
+ "loss": 2.1399,
975
+ "step": 690
976
+ },
977
+ {
978
+ "epoch": 2.593283582089552,
979
+ "grad_norm": 1.6372982263565063,
980
+ "learning_rate": 2.3535543740256536e-05,
981
+ "loss": 1.873,
982
+ "step": 695
983
+ },
984
+ {
985
+ "epoch": 2.611940298507463,
986
+ "grad_norm": 1.5683703422546387,
987
+ "learning_rate": 2.3243095276367685e-05,
988
+ "loss": 1.8899,
989
+ "step": 700
990
+ },
991
+ {
992
+ "epoch": 2.6305970149253732,
993
+ "grad_norm": 1.585425615310669,
994
+ "learning_rate": 2.2950888232561672e-05,
995
+ "loss": 2.0511,
996
+ "step": 705
997
+ },
998
+ {
999
+ "epoch": 2.6492537313432836,
1000
+ "grad_norm": 1.3682692050933838,
1001
+ "learning_rate": 2.2658962761641232e-05,
1002
+ "loss": 2.0364,
1003
+ "step": 710
1004
+ },
1005
+ {
1006
+ "epoch": 2.667910447761194,
1007
+ "grad_norm": 1.7755306959152222,
1008
+ "learning_rate": 2.23673589777175e-05,
1009
+ "loss": 2.0033,
1010
+ "step": 715
1011
+ },
1012
+ {
1013
+ "epoch": 2.6865671641791042,
1014
+ "grad_norm": 1.4118067026138306,
1015
+ "learning_rate": 2.207611695069794e-05,
1016
+ "loss": 2.102,
1017
+ "step": 720
1018
+ },
1019
+ {
1020
+ "epoch": 2.705223880597015,
1021
+ "grad_norm": 1.5786772966384888,
1022
+ "learning_rate": 2.17852767007802e-05,
1023
+ "loss": 1.9894,
1024
+ "step": 725
1025
+ },
1026
+ {
1027
+ "epoch": 2.7238805970149254,
1028
+ "grad_norm": 1.4233230352401733,
1029
+ "learning_rate": 2.1494878192952855e-05,
1030
+ "loss": 1.9355,
1031
+ "step": 730
1032
+ },
1033
+ {
1034
+ "epoch": 2.7425373134328357,
1035
+ "grad_norm": 1.5830904245376587,
1036
+ "learning_rate": 2.1204961331503787e-05,
1037
+ "loss": 1.9399,
1038
+ "step": 735
1039
+ },
1040
+ {
1041
+ "epoch": 2.7611940298507465,
1042
+ "grad_norm": 1.2974706888198853,
1043
+ "learning_rate": 2.0915565954536744e-05,
1044
+ "loss": 1.9814,
1045
+ "step": 740
1046
+ },
1047
+ {
1048
+ "epoch": 2.779850746268657,
1049
+ "grad_norm": 1.2366008758544922,
1050
+ "learning_rate": 2.0626731828497225e-05,
1051
+ "loss": 1.9275,
1052
+ "step": 745
1053
+ },
1054
+ {
1055
+ "epoch": 2.798507462686567,
1056
+ "grad_norm": 1.5165388584136963,
1057
+ "learning_rate": 2.0338498642707977e-05,
1058
+ "loss": 1.9444,
1059
+ "step": 750
1060
+ },
1061
+ {
1062
+ "epoch": 2.8171641791044775,
1063
+ "grad_norm": 1.429136037826538,
1064
+ "learning_rate": 2.005090600391526e-05,
1065
+ "loss": 1.9831,
1066
+ "step": 755
1067
+ },
1068
+ {
1069
+ "epoch": 2.835820895522388,
1070
+ "grad_norm": 1.4274283647537231,
1071
+ "learning_rate": 1.9763993430846395e-05,
1072
+ "loss": 2.0005,
1073
+ "step": 760
1074
+ },
1075
+ {
1076
+ "epoch": 2.8544776119402986,
1077
+ "grad_norm": 1.502812147140503,
1078
+ "learning_rate": 1.947780034877938e-05,
1079
+ "loss": 2.0224,
1080
+ "step": 765
1081
+ },
1082
+ {
1083
+ "epoch": 2.873134328358209,
1084
+ "grad_norm": 1.556489109992981,
1085
+ "learning_rate": 1.9192366084125425e-05,
1086
+ "loss": 1.9519,
1087
+ "step": 770
1088
+ },
1089
+ {
1090
+ "epoch": 2.8917910447761193,
1091
+ "grad_norm": 1.467826008796692,
1092
+ "learning_rate": 1.890772985902496e-05,
1093
+ "loss": 1.9947,
1094
+ "step": 775
1095
+ },
1096
+ {
1097
+ "epoch": 2.91044776119403,
1098
+ "grad_norm": 1.6837282180786133,
1099
+ "learning_rate": 1.8623930785958092e-05,
1100
+ "loss": 1.9335,
1101
+ "step": 780
1102
+ },
1103
+ {
1104
+ "epoch": 2.9291044776119404,
1105
+ "grad_norm": 1.446560025215149,
1106
+ "learning_rate": 1.8341007862370056e-05,
1107
+ "loss": 1.9258,
1108
+ "step": 785
1109
+ },
1110
+ {
1111
+ "epoch": 2.9477611940298507,
1112
+ "grad_norm": 1.453008770942688,
1113
+ "learning_rate": 1.8058999965312484e-05,
1114
+ "loss": 1.9039,
1115
+ "step": 790
1116
+ },
1117
+ {
1118
+ "epoch": 2.966417910447761,
1119
+ "grad_norm": 1.3427950143814087,
1120
+ "learning_rate": 1.777794584610124e-05,
1121
+ "loss": 1.8156,
1122
+ "step": 795
1123
+ },
1124
+ {
1125
+ "epoch": 2.9850746268656714,
1126
+ "grad_norm": 1.7210839986801147,
1127
+ "learning_rate": 1.749788412499149e-05,
1128
+ "loss": 2.0007,
1129
+ "step": 800
1130
+ },
1131
+ {
1132
+ "epoch": 3.003731343283582,
1133
+ "grad_norm": 1.8247441053390503,
1134
+ "learning_rate": 1.721885328587083e-05,
1135
+ "loss": 1.8995,
1136
+ "step": 805
1137
+ },
1138
+ {
1139
+ "epoch": 3.0223880597014925,
1140
+ "grad_norm": 1.3744760751724243,
1141
+ "learning_rate": 1.694089167097116e-05,
1142
+ "loss": 1.9604,
1143
+ "step": 810
1144
+ },
1145
+ {
1146
+ "epoch": 3.041044776119403,
1147
+ "grad_norm": 1.1527031660079956,
1148
+ "learning_rate": 1.6664037475599923e-05,
1149
+ "loss": 1.8479,
1150
+ "step": 815
1151
+ },
1152
+ {
1153
+ "epoch": 3.0597014925373136,
1154
+ "grad_norm": 1.412294626235962,
1155
+ "learning_rate": 1.638832874289168e-05,
1156
+ "loss": 1.9622,
1157
+ "step": 820
1158
+ },
1159
+ {
1160
+ "epoch": 3.078358208955224,
1161
+ "grad_norm": 1.5206471681594849,
1162
+ "learning_rate": 1.611380335858047e-05,
1163
+ "loss": 1.8965,
1164
+ "step": 825
1165
+ },
1166
+ {
1167
+ "epoch": 3.0970149253731343,
1168
+ "grad_norm": 1.426445484161377,
1169
+ "learning_rate": 1.5840499045793843e-05,
1170
+ "loss": 1.9118,
1171
+ "step": 830
1172
+ },
1173
+ {
1174
+ "epoch": 3.1156716417910446,
1175
+ "grad_norm": 1.556396245956421,
1176
+ "learning_rate": 1.5568453359869334e-05,
1177
+ "loss": 1.8189,
1178
+ "step": 835
1179
+ },
1180
+ {
1181
+ "epoch": 3.1343283582089554,
1182
+ "grad_norm": 1.5185908079147339,
1183
+ "learning_rate": 1.5297703683193752e-05,
1184
+ "loss": 1.9363,
1185
+ "step": 840
1186
+ },
1187
+ {
1188
+ "epoch": 3.1529850746268657,
1189
+ "grad_norm": 1.4425839185714722,
1190
+ "learning_rate": 1.502828722006655e-05,
1191
+ "loss": 1.9708,
1192
+ "step": 845
1193
+ },
1194
+ {
1195
+ "epoch": 3.171641791044776,
1196
+ "grad_norm": 1.6175637245178223,
1197
+ "learning_rate": 1.4760240991587337e-05,
1198
+ "loss": 1.9008,
1199
+ "step": 850
1200
+ },
1201
+ {
1202
+ "epoch": 3.1902985074626864,
1203
+ "grad_norm": 1.5075782537460327,
1204
+ "learning_rate": 1.4493601830568887e-05,
1205
+ "loss": 1.9626,
1206
+ "step": 855
1207
+ },
1208
+ {
1209
+ "epoch": 3.208955223880597,
1210
+ "grad_norm": 1.7610998153686523,
1211
+ "learning_rate": 1.4228406376475742e-05,
1212
+ "loss": 1.9749,
1213
+ "step": 860
1214
+ },
1215
+ {
1216
+ "epoch": 3.2276119402985075,
1217
+ "grad_norm": 1.538076400756836,
1218
+ "learning_rate": 1.396469107038956e-05,
1219
+ "loss": 1.9565,
1220
+ "step": 865
1221
+ },
1222
+ {
1223
+ "epoch": 3.246268656716418,
1224
+ "grad_norm": 1.4104888439178467,
1225
+ "learning_rate": 1.3702492150001659e-05,
1226
+ "loss": 1.9042,
1227
+ "step": 870
1228
+ },
1229
+ {
1230
+ "epoch": 3.264925373134328,
1231
+ "grad_norm": 1.5483851432800293,
1232
+ "learning_rate": 1.34418456446335e-05,
1233
+ "loss": 1.8595,
1234
+ "step": 875
1235
+ },
1236
+ {
1237
+ "epoch": 3.283582089552239,
1238
+ "grad_norm": 1.8045192956924438,
1239
+ "learning_rate": 1.3182787370285865e-05,
1240
+ "loss": 1.8968,
1241
+ "step": 880
1242
+ },
1243
+ {
1244
+ "epoch": 3.3022388059701493,
1245
+ "grad_norm": 1.5665298700332642,
1246
+ "learning_rate": 1.292535292471726e-05,
1247
+ "loss": 1.8853,
1248
+ "step": 885
1249
+ },
1250
+ {
1251
+ "epoch": 3.3208955223880596,
1252
+ "grad_norm": 1.4902681112289429,
1253
+ "learning_rate": 1.2669577682552319e-05,
1254
+ "loss": 1.8916,
1255
+ "step": 890
1256
+ },
1257
+ {
1258
+ "epoch": 3.33955223880597,
1259
+ "grad_norm": 1.3823623657226562,
1260
+ "learning_rate": 1.2415496790421011e-05,
1261
+ "loss": 1.8614,
1262
+ "step": 895
1263
+ },
1264
+ {
1265
+ "epoch": 3.3582089552238807,
1266
+ "grad_norm": 1.4400016069412231,
1267
+ "learning_rate": 1.2163145162128947e-05,
1268
+ "loss": 1.9052,
1269
+ "step": 900
1270
+ },
1271
+ {
1272
+ "epoch": 3.376865671641791,
1273
+ "grad_norm": 1.7787601947784424,
1274
+ "learning_rate": 1.1912557473859895e-05,
1275
+ "loss": 2.0061,
1276
+ "step": 905
1277
+ },
1278
+ {
1279
+ "epoch": 3.3955223880597014,
1280
+ "grad_norm": 1.5302358865737915,
1281
+ "learning_rate": 1.1663768159410748e-05,
1282
+ "loss": 1.9656,
1283
+ "step": 910
1284
+ },
1285
+ {
1286
+ "epoch": 3.4141791044776117,
1287
+ "grad_norm": 1.6571131944656372,
1288
+ "learning_rate": 1.1416811405459993e-05,
1289
+ "loss": 1.9289,
1290
+ "step": 915
1291
+ },
1292
+ {
1293
+ "epoch": 3.4328358208955225,
1294
+ "grad_norm": 1.8324801921844482,
1295
+ "learning_rate": 1.1171721146870015e-05,
1296
+ "loss": 1.8982,
1297
+ "step": 920
1298
+ },
1299
+ {
1300
+ "epoch": 3.451492537313433,
1301
+ "grad_norm": 1.5971417427062988,
1302
+ "learning_rate": 1.0928531062024017e-05,
1303
+ "loss": 1.9105,
1304
+ "step": 925
1305
+ },
1306
+ {
1307
+ "epoch": 3.470149253731343,
1308
+ "grad_norm": 1.5357367992401123,
1309
+ "learning_rate": 1.0687274568198208e-05,
1310
+ "loss": 1.9997,
1311
+ "step": 930
1312
+ },
1313
+ {
1314
+ "epoch": 3.4888059701492535,
1315
+ "grad_norm": 1.6085304021835327,
1316
+ "learning_rate": 1.0447984816969874e-05,
1317
+ "loss": 1.9139,
1318
+ "step": 935
1319
+ },
1320
+ {
1321
+ "epoch": 3.5074626865671643,
1322
+ "grad_norm": 1.3676837682724,
1323
+ "learning_rate": 1.021069468966194e-05,
1324
+ "loss": 1.9389,
1325
+ "step": 940
1326
+ },
1327
+ {
1328
+ "epoch": 3.5261194029850746,
1329
+ "grad_norm": 1.6692901849746704,
1330
+ "learning_rate": 9.975436792824691e-06,
1331
+ "loss": 1.835,
1332
+ "step": 945
1333
+ },
1334
+ {
1335
+ "epoch": 3.544776119402985,
1336
+ "grad_norm": 1.579232931137085,
1337
+ "learning_rate": 9.742243453755202e-06,
1338
+ "loss": 1.822,
1339
+ "step": 950
1340
+ },
1341
+ {
1342
+ "epoch": 3.5634328358208958,
1343
+ "grad_norm": 1.587336778640747,
1344
+ "learning_rate": 9.5111467160552e-06,
1345
+ "loss": 1.9564,
1346
+ "step": 955
1347
+ },
1348
+ {
1349
+ "epoch": 3.582089552238806,
1350
+ "grad_norm": 1.6467562913894653,
1351
+ "learning_rate": 9.282178335227884e-06,
1352
+ "loss": 1.9029,
1353
+ "step": 960
1354
+ },
1355
+ {
1356
+ "epoch": 3.6007462686567164,
1357
+ "grad_norm": 1.3579665422439575,
1358
+ "learning_rate": 9.05536977431431e-06,
1359
+ "loss": 2.0225,
1360
+ "step": 965
1361
+ },
1362
+ {
1363
+ "epoch": 3.6194029850746268,
1364
+ "grad_norm": 1.641695261001587,
1365
+ "learning_rate": 8.830752199570033e-06,
1366
+ "loss": 1.939,
1367
+ "step": 970
1368
+ },
1369
+ {
1370
+ "epoch": 3.638059701492537,
1371
+ "grad_norm": 1.5209190845489502,
1372
+ "learning_rate": 8.608356476182424e-06,
1373
+ "loss": 2.01,
1374
+ "step": 975
1375
+ },
1376
+ {
1377
+ "epoch": 3.656716417910448,
1378
+ "grad_norm": 1.769853115081787,
1379
+ "learning_rate": 8.38821316402946e-06,
1380
+ "loss": 1.9972,
1381
+ "step": 980
1382
+ },
1383
+ {
1384
+ "epoch": 3.675373134328358,
1385
+ "grad_norm": 1.7627779245376587,
1386
+ "learning_rate": 8.170352513480408e-06,
1387
+ "loss": 1.8508,
1388
+ "step": 985
1389
+ },
1390
+ {
1391
+ "epoch": 3.6940298507462686,
1392
+ "grad_norm": 1.8104168176651,
1393
+ "learning_rate": 7.954804461239053e-06,
1394
+ "loss": 1.994,
1395
+ "step": 990
1396
+ },
1397
+ {
1398
+ "epoch": 3.7126865671641793,
1399
+ "grad_norm": 1.6434147357940674,
1400
+ "learning_rate": 7.741598626230079e-06,
1401
+ "loss": 1.9354,
1402
+ "step": 995
1403
+ },
1404
+ {
1405
+ "epoch": 3.7313432835820897,
1406
+ "grad_norm": 1.871103286743164,
1407
+ "learning_rate": 7.530764305528959e-06,
1408
+ "loss": 2.004,
1409
+ "step": 1000
1410
+ }
1411
+ ],
1412
+ "logging_steps": 5,
1413
+ "max_steps": 1340,
1414
+ "num_input_tokens_seen": 0,
1415
+ "num_train_epochs": 5,
1416
+ "save_steps": 100,
1417
+ "stateful_callbacks": {
1418
+ "TrainerControl": {
1419
+ "args": {
1420
+ "should_epoch_stop": false,
1421
+ "should_evaluate": false,
1422
+ "should_log": false,
1423
+ "should_save": true,
1424
+ "should_training_stop": false
1425
+ },
1426
+ "attributes": {}
1427
+ }
1428
+ },
1429
+ "total_flos": 1.0410725611963023e+18,
1430
+ "train_batch_size": 4,
1431
+ "trial_name": null,
1432
+ "trial_params": null
1433
+ }
checkpoint-1000/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:67f11670c2c9e329be00355fdb1e7f6d48dffafc103eefbd6bc3474fa9f6e67c
3
+ size 5304
checkpoint-1000/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-1100/README.md ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ base_model: Qwen/Qwen1.5-7B-Chat
4
+ ---
5
+
6
+ # Model Card for Model ID
7
+
8
+ <!-- Provide a quick summary of what the model is/does. -->
9
+
10
+
11
+
12
+ ## Model Details
13
+
14
+ ### Model Description
15
+
16
+ <!-- Provide a longer summary of what this model is. -->
17
+
18
+
19
+
20
+ - **Developed by:** [More Information Needed]
21
+ - **Funded by [optional]:** [More Information Needed]
22
+ - **Shared by [optional]:** [More Information Needed]
23
+ - **Model type:** [More Information Needed]
24
+ - **Language(s) (NLP):** [More Information Needed]
25
+ - **License:** [More Information Needed]
26
+ - **Finetuned from model [optional]:** [More Information Needed]
27
+
28
+ ### Model Sources [optional]
29
+
30
+ <!-- Provide the basic links for the model. -->
31
+
32
+ - **Repository:** [More Information Needed]
33
+ - **Paper [optional]:** [More Information Needed]
34
+ - **Demo [optional]:** [More Information Needed]
35
+
36
+ ## Uses
37
+
38
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
39
+
40
+ ### Direct Use
41
+
42
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
43
+
44
+ [More Information Needed]
45
+
46
+ ### Downstream Use [optional]
47
+
48
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
49
+
50
+ [More Information Needed]
51
+
52
+ ### Out-of-Scope Use
53
+
54
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
55
+
56
+ [More Information Needed]
57
+
58
+ ## Bias, Risks, and Limitations
59
+
60
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
61
+
62
+ [More Information Needed]
63
+
64
+ ### Recommendations
65
+
66
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
67
+
68
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
69
+
70
+ ## How to Get Started with the Model
71
+
72
+ Use the code below to get started with the model.
73
+
74
+ [More Information Needed]
75
+
76
+ ## Training Details
77
+
78
+ ### Training Data
79
+
80
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
81
+
82
+ [More Information Needed]
83
+
84
+ ### Training Procedure
85
+
86
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
87
+
88
+ #### Preprocessing [optional]
89
+
90
+ [More Information Needed]
91
+
92
+
93
+ #### Training Hyperparameters
94
+
95
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
96
+
97
+ #### Speeds, Sizes, Times [optional]
98
+
99
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
100
+
101
+ [More Information Needed]
102
+
103
+ ## Evaluation
104
+
105
+ <!-- This section describes the evaluation protocols and provides the results. -->
106
+
107
+ ### Testing Data, Factors & Metrics
108
+
109
+ #### Testing Data
110
+
111
+ <!-- This should link to a Dataset Card if possible. -->
112
+
113
+ [More Information Needed]
114
+
115
+ #### Factors
116
+
117
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
118
+
119
+ [More Information Needed]
120
+
121
+ #### Metrics
122
+
123
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
124
+
125
+ [More Information Needed]
126
+
127
+ ### Results
128
+
129
+ [More Information Needed]
130
+
131
+ #### Summary
132
+
133
+
134
+
135
+ ## Model Examination [optional]
136
+
137
+ <!-- Relevant interpretability work for the model goes here -->
138
+
139
+ [More Information Needed]
140
+
141
+ ## Environmental Impact
142
+
143
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
144
+
145
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
146
+
147
+ - **Hardware Type:** [More Information Needed]
148
+ - **Hours used:** [More Information Needed]
149
+ - **Cloud Provider:** [More Information Needed]
150
+ - **Compute Region:** [More Information Needed]
151
+ - **Carbon Emitted:** [More Information Needed]
152
+
153
+ ## Technical Specifications [optional]
154
+
155
+ ### Model Architecture and Objective
156
+
157
+ [More Information Needed]
158
+
159
+ ### Compute Infrastructure
160
+
161
+ [More Information Needed]
162
+
163
+ #### Hardware
164
+
165
+ [More Information Needed]
166
+
167
+ #### Software
168
+
169
+ [More Information Needed]
170
+
171
+ ## Citation [optional]
172
+
173
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
174
+
175
+ **BibTeX:**
176
+
177
+ [More Information Needed]
178
+
179
+ **APA:**
180
+
181
+ [More Information Needed]
182
+
183
+ ## Glossary [optional]
184
+
185
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
186
+
187
+ [More Information Needed]
188
+
189
+ ## More Information [optional]
190
+
191
+ [More Information Needed]
192
+
193
+ ## Model Card Authors [optional]
194
+
195
+ [More Information Needed]
196
+
197
+ ## Model Card Contact
198
+
199
+ [More Information Needed]
200
+ ### Framework versions
201
+
202
+ - PEFT 0.11.1
checkpoint-1100/adapter_config.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "Qwen/Qwen1.5-7B-Chat",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layer_replication": null,
10
+ "layers_pattern": null,
11
+ "layers_to_transform": null,
12
+ "loftq_config": {},
13
+ "lora_alpha": 32,
14
+ "lora_dropout": 0.0,
15
+ "megatron_config": null,
16
+ "megatron_core": "megatron.core",
17
+ "modules_to_save": null,
18
+ "peft_type": "LORA",
19
+ "r": 16,
20
+ "rank_pattern": {},
21
+ "revision": null,
22
+ "target_modules": [
23
+ "q_proj",
24
+ "v_proj"
25
+ ],
26
+ "task_type": "CAUSAL_LM",
27
+ "use_dora": false,
28
+ "use_rslora": false
29
+ }
checkpoint-1100/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f169a98e4cffbba3b209a0e1eb829a684bf1e49bd5240f055634ce25d4493a09
3
+ size 33571624
checkpoint-1100/added_tokens.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "<|endoftext|>": 151643,
3
+ "<|im_end|>": 151645,
4
+ "<|im_start|>": 151644
5
+ }
checkpoint-1100/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-1100/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c6a15d7ad90f4459f2b2d6326efc554c39c336f32d232d551d76248d79f110d0
3
+ size 67217018
checkpoint-1100/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6b3ee827a7a00012c0a116546df467feee35e70376d81a7a85b1a70eb90414d3
3
+ size 14244
checkpoint-1100/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:595b7ae12b9873b2b8e24bfa338f23d8744b2fea2db53e52bc67cd4dbb89d1b8
3
+ size 1064
checkpoint-1100/special_tokens_map.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>"
5
+ ],
6
+ "eos_token": {
7
+ "content": "<|im_end|>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false
12
+ },
13
+ "pad_token": {
14
+ "content": "<|endoftext|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false
19
+ }
20
+ }
checkpoint-1100/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-1100/tokenizer_config.json ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "151643": {
5
+ "content": "<|endoftext|>",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "151644": {
13
+ "content": "<|im_start|>",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "151645": {
21
+ "content": "<|im_end|>",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ }
28
+ },
29
+ "additional_special_tokens": [
30
+ "<|im_start|>",
31
+ "<|im_end|>"
32
+ ],
33
+ "bos_token": null,
34
+ "chat_template": "{% set system_message = 'You are a helpful assistant.' %}{% if messages[0]['role'] == 'system' %}{% set system_message = messages[0]['content'] %}{% endif %}{% if system_message is defined %}{{ '<|im_start|>system\n' + system_message + '<|im_end|>\n' }}{% endif %}{% for message in messages %}{% set content = message['content'] %}{% if message['role'] == 'user' %}{{ '<|im_start|>user\n' + content + '<|im_end|>\n<|im_start|>assistant\n' }}{% elif message['role'] == 'assistant' %}{{ content + '<|im_end|>' + '\n' }}{% endif %}{% endfor %}",
35
+ "clean_up_tokenization_spaces": false,
36
+ "eos_token": "<|im_end|>",
37
+ "errors": "replace",
38
+ "model_max_length": 32768,
39
+ "pad_token": "<|endoftext|>",
40
+ "padding_side": "right",
41
+ "split_special_tokens": false,
42
+ "tokenizer_class": "Qwen2Tokenizer",
43
+ "unk_token": null
44
+ }
checkpoint-1100/trainer_state.json ADDED
@@ -0,0 +1,1573 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 4.104477611940299,
5
+ "eval_steps": 500,
6
+ "global_step": 1100,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.018656716417910446,
13
+ "grad_norm": 2.403158187866211,
14
+ "learning_rate": 4.9998282347929784e-05,
15
+ "loss": 3.3875,
16
+ "step": 5
17
+ },
18
+ {
19
+ "epoch": 0.03731343283582089,
20
+ "grad_norm": 2.301710367202759,
21
+ "learning_rate": 4.99931296277454e-05,
22
+ "loss": 2.9015,
23
+ "step": 10
24
+ },
25
+ {
26
+ "epoch": 0.055970149253731345,
27
+ "grad_norm": 1.271048665046692,
28
+ "learning_rate": 4.998454254749331e-05,
29
+ "loss": 2.6229,
30
+ "step": 15
31
+ },
32
+ {
33
+ "epoch": 0.07462686567164178,
34
+ "grad_norm": 1.069893717765808,
35
+ "learning_rate": 4.997252228714279e-05,
36
+ "loss": 2.3704,
37
+ "step": 20
38
+ },
39
+ {
40
+ "epoch": 0.09328358208955224,
41
+ "grad_norm": 0.9044906497001648,
42
+ "learning_rate": 4.9957070498423854e-05,
43
+ "loss": 2.3782,
44
+ "step": 25
45
+ },
46
+ {
47
+ "epoch": 0.11194029850746269,
48
+ "grad_norm": 0.9635376334190369,
49
+ "learning_rate": 4.993818930460026e-05,
50
+ "loss": 2.3576,
51
+ "step": 30
52
+ },
53
+ {
54
+ "epoch": 0.13059701492537312,
55
+ "grad_norm": 0.8513979315757751,
56
+ "learning_rate": 4.9915881300177725e-05,
57
+ "loss": 2.4603,
58
+ "step": 35
59
+ },
60
+ {
61
+ "epoch": 0.14925373134328357,
62
+ "grad_norm": 0.845267117023468,
63
+ "learning_rate": 4.9890149550547454e-05,
64
+ "loss": 2.2033,
65
+ "step": 40
66
+ },
67
+ {
68
+ "epoch": 0.16791044776119404,
69
+ "grad_norm": 0.6632418036460876,
70
+ "learning_rate": 4.98609975915649e-05,
71
+ "loss": 2.1851,
72
+ "step": 45
73
+ },
74
+ {
75
+ "epoch": 0.1865671641791045,
76
+ "grad_norm": 0.6857479810714722,
77
+ "learning_rate": 4.982842942906386e-05,
78
+ "loss": 2.3592,
79
+ "step": 50
80
+ },
81
+ {
82
+ "epoch": 0.20522388059701493,
83
+ "grad_norm": 0.7204287648200989,
84
+ "learning_rate": 4.979244953830608e-05,
85
+ "loss": 2.1323,
86
+ "step": 55
87
+ },
88
+ {
89
+ "epoch": 0.22388059701492538,
90
+ "grad_norm": 0.6864420175552368,
91
+ "learning_rate": 4.9753062863366276e-05,
92
+ "loss": 2.2138,
93
+ "step": 60
94
+ },
95
+ {
96
+ "epoch": 0.24253731343283583,
97
+ "grad_norm": 0.7536088228225708,
98
+ "learning_rate": 4.971027481645274e-05,
99
+ "loss": 2.2584,
100
+ "step": 65
101
+ },
102
+ {
103
+ "epoch": 0.26119402985074625,
104
+ "grad_norm": 0.9708526134490967,
105
+ "learning_rate": 4.966409127716367e-05,
106
+ "loss": 2.2669,
107
+ "step": 70
108
+ },
109
+ {
110
+ "epoch": 0.2798507462686567,
111
+ "grad_norm": 0.7516190409660339,
112
+ "learning_rate": 4.96145185916792e-05,
113
+ "loss": 2.2133,
114
+ "step": 75
115
+ },
116
+ {
117
+ "epoch": 0.29850746268656714,
118
+ "grad_norm": 0.7864778637886047,
119
+ "learning_rate": 4.95615635718894e-05,
120
+ "loss": 2.1683,
121
+ "step": 80
122
+ },
123
+ {
124
+ "epoch": 0.31716417910447764,
125
+ "grad_norm": 0.7846741080284119,
126
+ "learning_rate": 4.950523349445824e-05,
127
+ "loss": 2.1274,
128
+ "step": 85
129
+ },
130
+ {
131
+ "epoch": 0.3358208955223881,
132
+ "grad_norm": 0.816838800907135,
133
+ "learning_rate": 4.944553609982363e-05,
134
+ "loss": 2.2033,
135
+ "step": 90
136
+ },
137
+ {
138
+ "epoch": 0.35447761194029853,
139
+ "grad_norm": 0.7661916017532349,
140
+ "learning_rate": 4.938247959113386e-05,
141
+ "loss": 2.1492,
142
+ "step": 95
143
+ },
144
+ {
145
+ "epoch": 0.373134328358209,
146
+ "grad_norm": 0.8964986205101013,
147
+ "learning_rate": 4.931607263312032e-05,
148
+ "loss": 2.0862,
149
+ "step": 100
150
+ },
151
+ {
152
+ "epoch": 0.3917910447761194,
153
+ "grad_norm": 0.8603547215461731,
154
+ "learning_rate": 4.924632435090696e-05,
155
+ "loss": 2.1444,
156
+ "step": 105
157
+ },
158
+ {
159
+ "epoch": 0.41044776119402987,
160
+ "grad_norm": 0.8611045479774475,
161
+ "learning_rate": 4.917324432875627e-05,
162
+ "loss": 2.1202,
163
+ "step": 110
164
+ },
165
+ {
166
+ "epoch": 0.4291044776119403,
167
+ "grad_norm": 0.9499636888504028,
168
+ "learning_rate": 4.909684260875235e-05,
169
+ "loss": 2.1285,
170
+ "step": 115
171
+ },
172
+ {
173
+ "epoch": 0.44776119402985076,
174
+ "grad_norm": 0.8490393161773682,
175
+ "learning_rate": 4.9017129689421e-05,
176
+ "loss": 2.236,
177
+ "step": 120
178
+ },
179
+ {
180
+ "epoch": 0.4664179104477612,
181
+ "grad_norm": 0.9628555178642273,
182
+ "learning_rate": 4.893411652428712e-05,
183
+ "loss": 2.1219,
184
+ "step": 125
185
+ },
186
+ {
187
+ "epoch": 0.48507462686567165,
188
+ "grad_norm": 1.1119599342346191,
189
+ "learning_rate": 4.8847814520369475e-05,
190
+ "loss": 2.2537,
191
+ "step": 130
192
+ },
193
+ {
194
+ "epoch": 0.503731343283582,
195
+ "grad_norm": 0.9489665627479553,
196
+ "learning_rate": 4.875823553661334e-05,
197
+ "loss": 2.1018,
198
+ "step": 135
199
+ },
200
+ {
201
+ "epoch": 0.5223880597014925,
202
+ "grad_norm": 0.9434083700180054,
203
+ "learning_rate": 4.8665391882260856e-05,
204
+ "loss": 2.0809,
205
+ "step": 140
206
+ },
207
+ {
208
+ "epoch": 0.5410447761194029,
209
+ "grad_norm": 0.8856557607650757,
210
+ "learning_rate": 4.856929631515964e-05,
211
+ "loss": 2.0807,
212
+ "step": 145
213
+ },
214
+ {
215
+ "epoch": 0.5597014925373134,
216
+ "grad_norm": 0.8770031929016113,
217
+ "learning_rate": 4.846996204000967e-05,
218
+ "loss": 2.0843,
219
+ "step": 150
220
+ },
221
+ {
222
+ "epoch": 0.5783582089552238,
223
+ "grad_norm": 0.8374930620193481,
224
+ "learning_rate": 4.8367402706548805e-05,
225
+ "loss": 2.1869,
226
+ "step": 155
227
+ },
228
+ {
229
+ "epoch": 0.5970149253731343,
230
+ "grad_norm": 1.0829132795333862,
231
+ "learning_rate": 4.8261632407677174e-05,
232
+ "loss": 2.028,
233
+ "step": 160
234
+ },
235
+ {
236
+ "epoch": 0.6156716417910447,
237
+ "grad_norm": 0.9735206365585327,
238
+ "learning_rate": 4.815266567752059e-05,
239
+ "loss": 2.0966,
240
+ "step": 165
241
+ },
242
+ {
243
+ "epoch": 0.6343283582089553,
244
+ "grad_norm": 1.087944746017456,
245
+ "learning_rate": 4.804051748943343e-05,
246
+ "loss": 2.0863,
247
+ "step": 170
248
+ },
249
+ {
250
+ "epoch": 0.6529850746268657,
251
+ "grad_norm": 0.8176729083061218,
252
+ "learning_rate": 4.792520325394111e-05,
253
+ "loss": 2.1135,
254
+ "step": 175
255
+ },
256
+ {
257
+ "epoch": 0.6716417910447762,
258
+ "grad_norm": 0.9173070788383484,
259
+ "learning_rate": 4.780673881662242e-05,
260
+ "loss": 2.0564,
261
+ "step": 180
262
+ },
263
+ {
264
+ "epoch": 0.6902985074626866,
265
+ "grad_norm": 0.9463202953338623,
266
+ "learning_rate": 4.7685140455932267e-05,
267
+ "loss": 2.1579,
268
+ "step": 185
269
+ },
270
+ {
271
+ "epoch": 0.7089552238805971,
272
+ "grad_norm": 1.149950385093689,
273
+ "learning_rate": 4.756042488096471e-05,
274
+ "loss": 2.1447,
275
+ "step": 190
276
+ },
277
+ {
278
+ "epoch": 0.7276119402985075,
279
+ "grad_norm": 0.940965473651886,
280
+ "learning_rate": 4.743260922915701e-05,
281
+ "loss": 2.0823,
282
+ "step": 195
283
+ },
284
+ {
285
+ "epoch": 0.746268656716418,
286
+ "grad_norm": 0.9384671449661255,
287
+ "learning_rate": 4.730171106393466e-05,
288
+ "loss": 2.1445,
289
+ "step": 200
290
+ },
291
+ {
292
+ "epoch": 0.7649253731343284,
293
+ "grad_norm": 0.8937250971794128,
294
+ "learning_rate": 4.716774837229804e-05,
295
+ "loss": 2.014,
296
+ "step": 205
297
+ },
298
+ {
299
+ "epoch": 0.7835820895522388,
300
+ "grad_norm": 0.8928058743476868,
301
+ "learning_rate": 4.7030739562350713e-05,
302
+ "loss": 2.1882,
303
+ "step": 210
304
+ },
305
+ {
306
+ "epoch": 0.8022388059701493,
307
+ "grad_norm": 1.0239906311035156,
308
+ "learning_rate": 4.6890703460769955e-05,
309
+ "loss": 2.1042,
310
+ "step": 215
311
+ },
312
+ {
313
+ "epoch": 0.8208955223880597,
314
+ "grad_norm": 1.0555064678192139,
315
+ "learning_rate": 4.674765931021976e-05,
316
+ "loss": 2.015,
317
+ "step": 220
318
+ },
319
+ {
320
+ "epoch": 0.8395522388059702,
321
+ "grad_norm": 1.084709882736206,
322
+ "learning_rate": 4.6601626766706626e-05,
323
+ "loss": 2.0603,
324
+ "step": 225
325
+ },
326
+ {
327
+ "epoch": 0.8582089552238806,
328
+ "grad_norm": 0.9265861511230469,
329
+ "learning_rate": 4.645262589687861e-05,
330
+ "loss": 2.1006,
331
+ "step": 230
332
+ },
333
+ {
334
+ "epoch": 0.8768656716417911,
335
+ "grad_norm": 1.0058296918869019,
336
+ "learning_rate": 4.6300677175267914e-05,
337
+ "loss": 2.063,
338
+ "step": 235
339
+ },
340
+ {
341
+ "epoch": 0.8955223880597015,
342
+ "grad_norm": 1.0766576528549194,
343
+ "learning_rate": 4.614580148147744e-05,
344
+ "loss": 2.0781,
345
+ "step": 240
346
+ },
347
+ {
348
+ "epoch": 0.914179104477612,
349
+ "grad_norm": 1.0215730667114258,
350
+ "learning_rate": 4.598802009731167e-05,
351
+ "loss": 2.1774,
352
+ "step": 245
353
+ },
354
+ {
355
+ "epoch": 0.9328358208955224,
356
+ "grad_norm": 0.9870419502258301,
357
+ "learning_rate": 4.582735470385229e-05,
358
+ "loss": 1.9636,
359
+ "step": 250
360
+ },
361
+ {
362
+ "epoch": 0.9514925373134329,
363
+ "grad_norm": 1.1921675205230713,
364
+ "learning_rate": 4.5663827378478975e-05,
365
+ "loss": 2.0141,
366
+ "step": 255
367
+ },
368
+ {
369
+ "epoch": 0.9701492537313433,
370
+ "grad_norm": 1.0618964433670044,
371
+ "learning_rate": 4.5497460591835615e-05,
372
+ "loss": 2.0508,
373
+ "step": 260
374
+ },
375
+ {
376
+ "epoch": 0.9888059701492538,
377
+ "grad_norm": 0.9723111391067505,
378
+ "learning_rate": 4.532827720474268e-05,
379
+ "loss": 2.0312,
380
+ "step": 265
381
+ },
382
+ {
383
+ "epoch": 1.007462686567164,
384
+ "grad_norm": 0.9339023232460022,
385
+ "learning_rate": 4.515630046505575e-05,
386
+ "loss": 2.1107,
387
+ "step": 270
388
+ },
389
+ {
390
+ "epoch": 1.0261194029850746,
391
+ "grad_norm": 1.0588074922561646,
392
+ "learning_rate": 4.498155400447107e-05,
393
+ "loss": 2.0963,
394
+ "step": 275
395
+ },
396
+ {
397
+ "epoch": 1.044776119402985,
398
+ "grad_norm": 1.0709750652313232,
399
+ "learning_rate": 4.480406183527823e-05,
400
+ "loss": 2.0359,
401
+ "step": 280
402
+ },
403
+ {
404
+ "epoch": 1.0634328358208955,
405
+ "grad_norm": 1.2172249555587769,
406
+ "learning_rate": 4.462384834706058e-05,
407
+ "loss": 2.1083,
408
+ "step": 285
409
+ },
410
+ {
411
+ "epoch": 1.0820895522388059,
412
+ "grad_norm": 1.1719626188278198,
413
+ "learning_rate": 4.4440938303343804e-05,
414
+ "loss": 2.1259,
415
+ "step": 290
416
+ },
417
+ {
418
+ "epoch": 1.1007462686567164,
419
+ "grad_norm": 1.051269292831421,
420
+ "learning_rate": 4.425535683819312e-05,
421
+ "loss": 2.0901,
422
+ "step": 295
423
+ },
424
+ {
425
+ "epoch": 1.1194029850746268,
426
+ "grad_norm": 1.3167760372161865,
427
+ "learning_rate": 4.406712945275955e-05,
428
+ "loss": 2.0032,
429
+ "step": 300
430
+ },
431
+ {
432
+ "epoch": 1.1380597014925373,
433
+ "grad_norm": 1.2565367221832275,
434
+ "learning_rate": 4.387628201177577e-05,
435
+ "loss": 2.0148,
436
+ "step": 305
437
+ },
438
+ {
439
+ "epoch": 1.1567164179104479,
440
+ "grad_norm": 1.1141688823699951,
441
+ "learning_rate": 4.368284074000193e-05,
442
+ "loss": 2.0217,
443
+ "step": 310
444
+ },
445
+ {
446
+ "epoch": 1.1753731343283582,
447
+ "grad_norm": 1.1642612218856812,
448
+ "learning_rate": 4.348683221862212e-05,
449
+ "loss": 2.0194,
450
+ "step": 315
451
+ },
452
+ {
453
+ "epoch": 1.1940298507462686,
454
+ "grad_norm": 1.1613104343414307,
455
+ "learning_rate": 4.328828338159173e-05,
456
+ "loss": 1.9371,
457
+ "step": 320
458
+ },
459
+ {
460
+ "epoch": 1.212686567164179,
461
+ "grad_norm": 1.2319557666778564,
462
+ "learning_rate": 4.3087221511936434e-05,
463
+ "loss": 2.0227,
464
+ "step": 325
465
+ },
466
+ {
467
+ "epoch": 1.2313432835820897,
468
+ "grad_norm": 1.2520420551300049,
469
+ "learning_rate": 4.288367423800319e-05,
470
+ "loss": 1.9883,
471
+ "step": 330
472
+ },
473
+ {
474
+ "epoch": 1.25,
475
+ "grad_norm": 1.0452089309692383,
476
+ "learning_rate": 4.267766952966369e-05,
477
+ "loss": 1.9912,
478
+ "step": 335
479
+ },
480
+ {
481
+ "epoch": 1.2686567164179103,
482
+ "grad_norm": 0.9965611100196838,
483
+ "learning_rate": 4.2469235694471043e-05,
484
+ "loss": 1.983,
485
+ "step": 340
486
+ },
487
+ {
488
+ "epoch": 1.287313432835821,
489
+ "grad_norm": 1.0808607339859009,
490
+ "learning_rate": 4.225840137376993e-05,
491
+ "loss": 1.9514,
492
+ "step": 345
493
+ },
494
+ {
495
+ "epoch": 1.3059701492537314,
496
+ "grad_norm": 1.102575659751892,
497
+ "learning_rate": 4.204519553876095e-05,
498
+ "loss": 2.0286,
499
+ "step": 350
500
+ },
501
+ {
502
+ "epoch": 1.3246268656716418,
503
+ "grad_norm": 1.0246608257293701,
504
+ "learning_rate": 4.1829647486519596e-05,
505
+ "loss": 2.0265,
506
+ "step": 355
507
+ },
508
+ {
509
+ "epoch": 1.3432835820895521,
510
+ "grad_norm": 1.0723367929458618,
511
+ "learning_rate": 4.161178683597054e-05,
512
+ "loss": 2.0077,
513
+ "step": 360
514
+ },
515
+ {
516
+ "epoch": 1.3619402985074627,
517
+ "grad_norm": 1.4298617839813232,
518
+ "learning_rate": 4.139164352381758e-05,
519
+ "loss": 2.0898,
520
+ "step": 365
521
+ },
522
+ {
523
+ "epoch": 1.3805970149253732,
524
+ "grad_norm": 1.1437115669250488,
525
+ "learning_rate": 4.116924780042997e-05,
526
+ "loss": 2.024,
527
+ "step": 370
528
+ },
529
+ {
530
+ "epoch": 1.3992537313432836,
531
+ "grad_norm": 1.326556921005249,
532
+ "learning_rate": 4.094463022568569e-05,
533
+ "loss": 2.2252,
534
+ "step": 375
535
+ },
536
+ {
537
+ "epoch": 1.417910447761194,
538
+ "grad_norm": 1.2549344301223755,
539
+ "learning_rate": 4.071782166477213e-05,
540
+ "loss": 1.9777,
541
+ "step": 380
542
+ },
543
+ {
544
+ "epoch": 1.4365671641791045,
545
+ "grad_norm": 1.1226497888565063,
546
+ "learning_rate": 4.0488853283944806e-05,
547
+ "loss": 2.0062,
548
+ "step": 385
549
+ },
550
+ {
551
+ "epoch": 1.455223880597015,
552
+ "grad_norm": 1.2250981330871582,
553
+ "learning_rate": 4.0257756546244804e-05,
554
+ "loss": 1.9147,
555
+ "step": 390
556
+ },
557
+ {
558
+ "epoch": 1.4738805970149254,
559
+ "grad_norm": 1.3552589416503906,
560
+ "learning_rate": 4.0024563207175316e-05,
561
+ "loss": 1.9709,
562
+ "step": 395
563
+ },
564
+ {
565
+ "epoch": 1.4925373134328357,
566
+ "grad_norm": 1.3661599159240723,
567
+ "learning_rate": 3.978930531033807e-05,
568
+ "loss": 1.9748,
569
+ "step": 400
570
+ },
571
+ {
572
+ "epoch": 1.5111940298507462,
573
+ "grad_norm": 1.1794605255126953,
574
+ "learning_rate": 3.9552015183030136e-05,
575
+ "loss": 2.0367,
576
+ "step": 405
577
+ },
578
+ {
579
+ "epoch": 1.5298507462686568,
580
+ "grad_norm": 1.19724440574646,
581
+ "learning_rate": 3.93127254318018e-05,
582
+ "loss": 1.9545,
583
+ "step": 410
584
+ },
585
+ {
586
+ "epoch": 1.5485074626865671,
587
+ "grad_norm": 1.310658574104309,
588
+ "learning_rate": 3.907146893797599e-05,
589
+ "loss": 1.9933,
590
+ "step": 415
591
+ },
592
+ {
593
+ "epoch": 1.5671641791044775,
594
+ "grad_norm": 1.2032736539840698,
595
+ "learning_rate": 3.882827885312999e-05,
596
+ "loss": 2.0442,
597
+ "step": 420
598
+ },
599
+ {
600
+ "epoch": 1.585820895522388,
601
+ "grad_norm": 1.2670124769210815,
602
+ "learning_rate": 3.858318859454001e-05,
603
+ "loss": 1.974,
604
+ "step": 425
605
+ },
606
+ {
607
+ "epoch": 1.6044776119402986,
608
+ "grad_norm": 1.5301685333251953,
609
+ "learning_rate": 3.833623184058926e-05,
610
+ "loss": 2.0865,
611
+ "step": 430
612
+ },
613
+ {
614
+ "epoch": 1.623134328358209,
615
+ "grad_norm": 1.3863707780838013,
616
+ "learning_rate": 3.808744252614012e-05,
617
+ "loss": 1.9614,
618
+ "step": 435
619
+ },
620
+ {
621
+ "epoch": 1.6417910447761193,
622
+ "grad_norm": 1.2591431140899658,
623
+ "learning_rate": 3.783685483787105e-05,
624
+ "loss": 1.949,
625
+ "step": 440
626
+ },
627
+ {
628
+ "epoch": 1.6604477611940298,
629
+ "grad_norm": 1.2093037366867065,
630
+ "learning_rate": 3.758450320957899e-05,
631
+ "loss": 1.9618,
632
+ "step": 445
633
+ },
634
+ {
635
+ "epoch": 1.6791044776119404,
636
+ "grad_norm": 1.1593824625015259,
637
+ "learning_rate": 3.7330422317447685e-05,
638
+ "loss": 2.0124,
639
+ "step": 450
640
+ },
641
+ {
642
+ "epoch": 1.6977611940298507,
643
+ "grad_norm": 1.7013437747955322,
644
+ "learning_rate": 3.707464707528275e-05,
645
+ "loss": 2.0613,
646
+ "step": 455
647
+ },
648
+ {
649
+ "epoch": 1.716417910447761,
650
+ "grad_norm": 1.2550350427627563,
651
+ "learning_rate": 3.681721262971413e-05,
652
+ "loss": 2.1354,
653
+ "step": 460
654
+ },
655
+ {
656
+ "epoch": 1.7350746268656716,
657
+ "grad_norm": 1.1735903024673462,
658
+ "learning_rate": 3.6558154355366506e-05,
659
+ "loss": 1.9618,
660
+ "step": 465
661
+ },
662
+ {
663
+ "epoch": 1.7537313432835822,
664
+ "grad_norm": 1.331148624420166,
665
+ "learning_rate": 3.6297507849998344e-05,
666
+ "loss": 1.9245,
667
+ "step": 470
668
+ },
669
+ {
670
+ "epoch": 1.7723880597014925,
671
+ "grad_norm": 1.3502494096755981,
672
+ "learning_rate": 3.6035308929610446e-05,
673
+ "loss": 1.9758,
674
+ "step": 475
675
+ },
676
+ {
677
+ "epoch": 1.7910447761194028,
678
+ "grad_norm": 1.2406198978424072,
679
+ "learning_rate": 3.5771593623524265e-05,
680
+ "loss": 1.9824,
681
+ "step": 480
682
+ },
683
+ {
684
+ "epoch": 1.8097014925373134,
685
+ "grad_norm": 1.224885106086731,
686
+ "learning_rate": 3.550639816943111e-05,
687
+ "loss": 2.069,
688
+ "step": 485
689
+ },
690
+ {
691
+ "epoch": 1.828358208955224,
692
+ "grad_norm": 1.4666011333465576,
693
+ "learning_rate": 3.5239759008412666e-05,
694
+ "loss": 2.0797,
695
+ "step": 490
696
+ },
697
+ {
698
+ "epoch": 1.8470149253731343,
699
+ "grad_norm": 1.2758076190948486,
700
+ "learning_rate": 3.497171277993346e-05,
701
+ "loss": 2.0195,
702
+ "step": 495
703
+ },
704
+ {
705
+ "epoch": 1.8656716417910446,
706
+ "grad_norm": 1.1991291046142578,
707
+ "learning_rate": 3.4702296316806244e-05,
708
+ "loss": 1.9558,
709
+ "step": 500
710
+ },
711
+ {
712
+ "epoch": 1.8843283582089554,
713
+ "grad_norm": 1.2548415660858154,
714
+ "learning_rate": 3.443154664013067e-05,
715
+ "loss": 1.9805,
716
+ "step": 505
717
+ },
718
+ {
719
+ "epoch": 1.9029850746268657,
720
+ "grad_norm": 1.5407222509384155,
721
+ "learning_rate": 3.415950095420616e-05,
722
+ "loss": 1.9152,
723
+ "step": 510
724
+ },
725
+ {
726
+ "epoch": 1.921641791044776,
727
+ "grad_norm": 1.285704493522644,
728
+ "learning_rate": 3.3886196641419545e-05,
729
+ "loss": 2.0442,
730
+ "step": 515
731
+ },
732
+ {
733
+ "epoch": 1.9402985074626866,
734
+ "grad_norm": 1.377465844154358,
735
+ "learning_rate": 3.361167125710832e-05,
736
+ "loss": 2.0537,
737
+ "step": 520
738
+ },
739
+ {
740
+ "epoch": 1.9589552238805972,
741
+ "grad_norm": 1.186889410018921,
742
+ "learning_rate": 3.333596252440008e-05,
743
+ "loss": 1.9798,
744
+ "step": 525
745
+ },
746
+ {
747
+ "epoch": 1.9776119402985075,
748
+ "grad_norm": 1.4855142831802368,
749
+ "learning_rate": 3.305910832902884e-05,
750
+ "loss": 2.0984,
751
+ "step": 530
752
+ },
753
+ {
754
+ "epoch": 1.9962686567164178,
755
+ "grad_norm": 1.47159743309021,
756
+ "learning_rate": 3.278114671412917e-05,
757
+ "loss": 1.9932,
758
+ "step": 535
759
+ },
760
+ {
761
+ "epoch": 2.014925373134328,
762
+ "grad_norm": 1.4078409671783447,
763
+ "learning_rate": 3.2502115875008524e-05,
764
+ "loss": 1.9457,
765
+ "step": 540
766
+ },
767
+ {
768
+ "epoch": 2.033582089552239,
769
+ "grad_norm": 1.1386340856552124,
770
+ "learning_rate": 3.222205415389877e-05,
771
+ "loss": 1.9334,
772
+ "step": 545
773
+ },
774
+ {
775
+ "epoch": 2.0522388059701493,
776
+ "grad_norm": 1.666084885597229,
777
+ "learning_rate": 3.1941000034687515e-05,
778
+ "loss": 1.9716,
779
+ "step": 550
780
+ },
781
+ {
782
+ "epoch": 2.0708955223880596,
783
+ "grad_norm": 1.3137987852096558,
784
+ "learning_rate": 3.165899213762995e-05,
785
+ "loss": 1.9189,
786
+ "step": 555
787
+ },
788
+ {
789
+ "epoch": 2.08955223880597,
790
+ "grad_norm": 1.2372797727584839,
791
+ "learning_rate": 3.1376069214041913e-05,
792
+ "loss": 2.0234,
793
+ "step": 560
794
+ },
795
+ {
796
+ "epoch": 2.1082089552238807,
797
+ "grad_norm": 1.3149720430374146,
798
+ "learning_rate": 3.109227014097505e-05,
799
+ "loss": 2.0271,
800
+ "step": 565
801
+ },
802
+ {
803
+ "epoch": 2.126865671641791,
804
+ "grad_norm": 1.4162675142288208,
805
+ "learning_rate": 3.0807633915874584e-05,
806
+ "loss": 1.8236,
807
+ "step": 570
808
+ },
809
+ {
810
+ "epoch": 2.1455223880597014,
811
+ "grad_norm": 1.4029136896133423,
812
+ "learning_rate": 3.052219965122062e-05,
813
+ "loss": 2.1821,
814
+ "step": 575
815
+ },
816
+ {
817
+ "epoch": 2.1641791044776117,
818
+ "grad_norm": 1.5424753427505493,
819
+ "learning_rate": 3.0236006569153617e-05,
820
+ "loss": 1.9496,
821
+ "step": 580
822
+ },
823
+ {
824
+ "epoch": 2.1828358208955225,
825
+ "grad_norm": 1.274217963218689,
826
+ "learning_rate": 2.9949093996084747e-05,
827
+ "loss": 2.0439,
828
+ "step": 585
829
+ },
830
+ {
831
+ "epoch": 2.201492537313433,
832
+ "grad_norm": 1.2068248987197876,
833
+ "learning_rate": 2.9661501357292033e-05,
834
+ "loss": 2.0805,
835
+ "step": 590
836
+ },
837
+ {
838
+ "epoch": 2.220149253731343,
839
+ "grad_norm": 1.2352491617202759,
840
+ "learning_rate": 2.9373268171502777e-05,
841
+ "loss": 1.975,
842
+ "step": 595
843
+ },
844
+ {
845
+ "epoch": 2.2388059701492535,
846
+ "grad_norm": 1.3039956092834473,
847
+ "learning_rate": 2.9084434045463255e-05,
848
+ "loss": 1.9834,
849
+ "step": 600
850
+ },
851
+ {
852
+ "epoch": 2.2574626865671643,
853
+ "grad_norm": 1.3400136232376099,
854
+ "learning_rate": 2.8795038668496222e-05,
855
+ "loss": 1.84,
856
+ "step": 605
857
+ },
858
+ {
859
+ "epoch": 2.2761194029850746,
860
+ "grad_norm": 1.458132028579712,
861
+ "learning_rate": 2.850512180704715e-05,
862
+ "loss": 1.9518,
863
+ "step": 610
864
+ },
865
+ {
866
+ "epoch": 2.294776119402985,
867
+ "grad_norm": 1.446595311164856,
868
+ "learning_rate": 2.821472329921981e-05,
869
+ "loss": 1.8977,
870
+ "step": 615
871
+ },
872
+ {
873
+ "epoch": 2.3134328358208958,
874
+ "grad_norm": 1.432244062423706,
875
+ "learning_rate": 2.792388304930207e-05,
876
+ "loss": 1.9742,
877
+ "step": 620
878
+ },
879
+ {
880
+ "epoch": 2.332089552238806,
881
+ "grad_norm": 1.499017596244812,
882
+ "learning_rate": 2.7632641022282502e-05,
883
+ "loss": 1.9379,
884
+ "step": 625
885
+ },
886
+ {
887
+ "epoch": 2.3507462686567164,
888
+ "grad_norm": 1.6504281759262085,
889
+ "learning_rate": 2.7341037238358774e-05,
890
+ "loss": 1.9175,
891
+ "step": 630
892
+ },
893
+ {
894
+ "epoch": 2.3694029850746268,
895
+ "grad_norm": 1.6585911512374878,
896
+ "learning_rate": 2.704911176743833e-05,
897
+ "loss": 2.0449,
898
+ "step": 635
899
+ },
900
+ {
901
+ "epoch": 2.388059701492537,
902
+ "grad_norm": 1.545623779296875,
903
+ "learning_rate": 2.6756904723632324e-05,
904
+ "loss": 2.0096,
905
+ "step": 640
906
+ },
907
+ {
908
+ "epoch": 2.406716417910448,
909
+ "grad_norm": 1.468853235244751,
910
+ "learning_rate": 2.646445625974347e-05,
911
+ "loss": 1.878,
912
+ "step": 645
913
+ },
914
+ {
915
+ "epoch": 2.425373134328358,
916
+ "grad_norm": 1.3598605394363403,
917
+ "learning_rate": 2.6171806561748502e-05,
918
+ "loss": 1.9625,
919
+ "step": 650
920
+ },
921
+ {
922
+ "epoch": 2.4440298507462686,
923
+ "grad_norm": 1.3197077512741089,
924
+ "learning_rate": 2.5878995843276204e-05,
925
+ "loss": 1.9375,
926
+ "step": 655
927
+ },
928
+ {
929
+ "epoch": 2.4626865671641793,
930
+ "grad_norm": 1.5469880104064941,
931
+ "learning_rate": 2.5586064340081516e-05,
932
+ "loss": 1.8402,
933
+ "step": 660
934
+ },
935
+ {
936
+ "epoch": 2.4813432835820897,
937
+ "grad_norm": 1.4435440301895142,
938
+ "learning_rate": 2.529305230451666e-05,
939
+ "loss": 1.8795,
940
+ "step": 665
941
+ },
942
+ {
943
+ "epoch": 2.5,
944
+ "grad_norm": 1.505194067955017,
945
+ "learning_rate": 2.5e-05,
946
+ "loss": 1.9938,
947
+ "step": 670
948
+ },
949
+ {
950
+ "epoch": 2.5186567164179103,
951
+ "grad_norm": 1.3251738548278809,
952
+ "learning_rate": 2.4706947695483348e-05,
953
+ "loss": 1.956,
954
+ "step": 675
955
+ },
956
+ {
957
+ "epoch": 2.5373134328358207,
958
+ "grad_norm": 1.4197183847427368,
959
+ "learning_rate": 2.441393565991849e-05,
960
+ "loss": 1.906,
961
+ "step": 680
962
+ },
963
+ {
964
+ "epoch": 2.5559701492537314,
965
+ "grad_norm": 1.4905989170074463,
966
+ "learning_rate": 2.4121004156723802e-05,
967
+ "loss": 1.9073,
968
+ "step": 685
969
+ },
970
+ {
971
+ "epoch": 2.574626865671642,
972
+ "grad_norm": 1.3891818523406982,
973
+ "learning_rate": 2.3828193438251497e-05,
974
+ "loss": 2.1399,
975
+ "step": 690
976
+ },
977
+ {
978
+ "epoch": 2.593283582089552,
979
+ "grad_norm": 1.6372982263565063,
980
+ "learning_rate": 2.3535543740256536e-05,
981
+ "loss": 1.873,
982
+ "step": 695
983
+ },
984
+ {
985
+ "epoch": 2.611940298507463,
986
+ "grad_norm": 1.5683703422546387,
987
+ "learning_rate": 2.3243095276367685e-05,
988
+ "loss": 1.8899,
989
+ "step": 700
990
+ },
991
+ {
992
+ "epoch": 2.6305970149253732,
993
+ "grad_norm": 1.585425615310669,
994
+ "learning_rate": 2.2950888232561672e-05,
995
+ "loss": 2.0511,
996
+ "step": 705
997
+ },
998
+ {
999
+ "epoch": 2.6492537313432836,
1000
+ "grad_norm": 1.3682692050933838,
1001
+ "learning_rate": 2.2658962761641232e-05,
1002
+ "loss": 2.0364,
1003
+ "step": 710
1004
+ },
1005
+ {
1006
+ "epoch": 2.667910447761194,
1007
+ "grad_norm": 1.7755306959152222,
1008
+ "learning_rate": 2.23673589777175e-05,
1009
+ "loss": 2.0033,
1010
+ "step": 715
1011
+ },
1012
+ {
1013
+ "epoch": 2.6865671641791042,
1014
+ "grad_norm": 1.4118067026138306,
1015
+ "learning_rate": 2.207611695069794e-05,
1016
+ "loss": 2.102,
1017
+ "step": 720
1018
+ },
1019
+ {
1020
+ "epoch": 2.705223880597015,
1021
+ "grad_norm": 1.5786772966384888,
1022
+ "learning_rate": 2.17852767007802e-05,
1023
+ "loss": 1.9894,
1024
+ "step": 725
1025
+ },
1026
+ {
1027
+ "epoch": 2.7238805970149254,
1028
+ "grad_norm": 1.4233230352401733,
1029
+ "learning_rate": 2.1494878192952855e-05,
1030
+ "loss": 1.9355,
1031
+ "step": 730
1032
+ },
1033
+ {
1034
+ "epoch": 2.7425373134328357,
1035
+ "grad_norm": 1.5830904245376587,
1036
+ "learning_rate": 2.1204961331503787e-05,
1037
+ "loss": 1.9399,
1038
+ "step": 735
1039
+ },
1040
+ {
1041
+ "epoch": 2.7611940298507465,
1042
+ "grad_norm": 1.2974706888198853,
1043
+ "learning_rate": 2.0915565954536744e-05,
1044
+ "loss": 1.9814,
1045
+ "step": 740
1046
+ },
1047
+ {
1048
+ "epoch": 2.779850746268657,
1049
+ "grad_norm": 1.2366008758544922,
1050
+ "learning_rate": 2.0626731828497225e-05,
1051
+ "loss": 1.9275,
1052
+ "step": 745
1053
+ },
1054
+ {
1055
+ "epoch": 2.798507462686567,
1056
+ "grad_norm": 1.5165388584136963,
1057
+ "learning_rate": 2.0338498642707977e-05,
1058
+ "loss": 1.9444,
1059
+ "step": 750
1060
+ },
1061
+ {
1062
+ "epoch": 2.8171641791044775,
1063
+ "grad_norm": 1.429136037826538,
1064
+ "learning_rate": 2.005090600391526e-05,
1065
+ "loss": 1.9831,
1066
+ "step": 755
1067
+ },
1068
+ {
1069
+ "epoch": 2.835820895522388,
1070
+ "grad_norm": 1.4274283647537231,
1071
+ "learning_rate": 1.9763993430846395e-05,
1072
+ "loss": 2.0005,
1073
+ "step": 760
1074
+ },
1075
+ {
1076
+ "epoch": 2.8544776119402986,
1077
+ "grad_norm": 1.502812147140503,
1078
+ "learning_rate": 1.947780034877938e-05,
1079
+ "loss": 2.0224,
1080
+ "step": 765
1081
+ },
1082
+ {
1083
+ "epoch": 2.873134328358209,
1084
+ "grad_norm": 1.556489109992981,
1085
+ "learning_rate": 1.9192366084125425e-05,
1086
+ "loss": 1.9519,
1087
+ "step": 770
1088
+ },
1089
+ {
1090
+ "epoch": 2.8917910447761193,
1091
+ "grad_norm": 1.467826008796692,
1092
+ "learning_rate": 1.890772985902496e-05,
1093
+ "loss": 1.9947,
1094
+ "step": 775
1095
+ },
1096
+ {
1097
+ "epoch": 2.91044776119403,
1098
+ "grad_norm": 1.6837282180786133,
1099
+ "learning_rate": 1.8623930785958092e-05,
1100
+ "loss": 1.9335,
1101
+ "step": 780
1102
+ },
1103
+ {
1104
+ "epoch": 2.9291044776119404,
1105
+ "grad_norm": 1.446560025215149,
1106
+ "learning_rate": 1.8341007862370056e-05,
1107
+ "loss": 1.9258,
1108
+ "step": 785
1109
+ },
1110
+ {
1111
+ "epoch": 2.9477611940298507,
1112
+ "grad_norm": 1.453008770942688,
1113
+ "learning_rate": 1.8058999965312484e-05,
1114
+ "loss": 1.9039,
1115
+ "step": 790
1116
+ },
1117
+ {
1118
+ "epoch": 2.966417910447761,
1119
+ "grad_norm": 1.3427950143814087,
1120
+ "learning_rate": 1.777794584610124e-05,
1121
+ "loss": 1.8156,
1122
+ "step": 795
1123
+ },
1124
+ {
1125
+ "epoch": 2.9850746268656714,
1126
+ "grad_norm": 1.7210839986801147,
1127
+ "learning_rate": 1.749788412499149e-05,
1128
+ "loss": 2.0007,
1129
+ "step": 800
1130
+ },
1131
+ {
1132
+ "epoch": 3.003731343283582,
1133
+ "grad_norm": 1.8247441053390503,
1134
+ "learning_rate": 1.721885328587083e-05,
1135
+ "loss": 1.8995,
1136
+ "step": 805
1137
+ },
1138
+ {
1139
+ "epoch": 3.0223880597014925,
1140
+ "grad_norm": 1.3744760751724243,
1141
+ "learning_rate": 1.694089167097116e-05,
1142
+ "loss": 1.9604,
1143
+ "step": 810
1144
+ },
1145
+ {
1146
+ "epoch": 3.041044776119403,
1147
+ "grad_norm": 1.1527031660079956,
1148
+ "learning_rate": 1.6664037475599923e-05,
1149
+ "loss": 1.8479,
1150
+ "step": 815
1151
+ },
1152
+ {
1153
+ "epoch": 3.0597014925373136,
1154
+ "grad_norm": 1.412294626235962,
1155
+ "learning_rate": 1.638832874289168e-05,
1156
+ "loss": 1.9622,
1157
+ "step": 820
1158
+ },
1159
+ {
1160
+ "epoch": 3.078358208955224,
1161
+ "grad_norm": 1.5206471681594849,
1162
+ "learning_rate": 1.611380335858047e-05,
1163
+ "loss": 1.8965,
1164
+ "step": 825
1165
+ },
1166
+ {
1167
+ "epoch": 3.0970149253731343,
1168
+ "grad_norm": 1.426445484161377,
1169
+ "learning_rate": 1.5840499045793843e-05,
1170
+ "loss": 1.9118,
1171
+ "step": 830
1172
+ },
1173
+ {
1174
+ "epoch": 3.1156716417910446,
1175
+ "grad_norm": 1.556396245956421,
1176
+ "learning_rate": 1.5568453359869334e-05,
1177
+ "loss": 1.8189,
1178
+ "step": 835
1179
+ },
1180
+ {
1181
+ "epoch": 3.1343283582089554,
1182
+ "grad_norm": 1.5185908079147339,
1183
+ "learning_rate": 1.5297703683193752e-05,
1184
+ "loss": 1.9363,
1185
+ "step": 840
1186
+ },
1187
+ {
1188
+ "epoch": 3.1529850746268657,
1189
+ "grad_norm": 1.4425839185714722,
1190
+ "learning_rate": 1.502828722006655e-05,
1191
+ "loss": 1.9708,
1192
+ "step": 845
1193
+ },
1194
+ {
1195
+ "epoch": 3.171641791044776,
1196
+ "grad_norm": 1.6175637245178223,
1197
+ "learning_rate": 1.4760240991587337e-05,
1198
+ "loss": 1.9008,
1199
+ "step": 850
1200
+ },
1201
+ {
1202
+ "epoch": 3.1902985074626864,
1203
+ "grad_norm": 1.5075782537460327,
1204
+ "learning_rate": 1.4493601830568887e-05,
1205
+ "loss": 1.9626,
1206
+ "step": 855
1207
+ },
1208
+ {
1209
+ "epoch": 3.208955223880597,
1210
+ "grad_norm": 1.7610998153686523,
1211
+ "learning_rate": 1.4228406376475742e-05,
1212
+ "loss": 1.9749,
1213
+ "step": 860
1214
+ },
1215
+ {
1216
+ "epoch": 3.2276119402985075,
1217
+ "grad_norm": 1.538076400756836,
1218
+ "learning_rate": 1.396469107038956e-05,
1219
+ "loss": 1.9565,
1220
+ "step": 865
1221
+ },
1222
+ {
1223
+ "epoch": 3.246268656716418,
1224
+ "grad_norm": 1.4104888439178467,
1225
+ "learning_rate": 1.3702492150001659e-05,
1226
+ "loss": 1.9042,
1227
+ "step": 870
1228
+ },
1229
+ {
1230
+ "epoch": 3.264925373134328,
1231
+ "grad_norm": 1.5483851432800293,
1232
+ "learning_rate": 1.34418456446335e-05,
1233
+ "loss": 1.8595,
1234
+ "step": 875
1235
+ },
1236
+ {
1237
+ "epoch": 3.283582089552239,
1238
+ "grad_norm": 1.8045192956924438,
1239
+ "learning_rate": 1.3182787370285865e-05,
1240
+ "loss": 1.8968,
1241
+ "step": 880
1242
+ },
1243
+ {
1244
+ "epoch": 3.3022388059701493,
1245
+ "grad_norm": 1.5665298700332642,
1246
+ "learning_rate": 1.292535292471726e-05,
1247
+ "loss": 1.8853,
1248
+ "step": 885
1249
+ },
1250
+ {
1251
+ "epoch": 3.3208955223880596,
1252
+ "grad_norm": 1.4902681112289429,
1253
+ "learning_rate": 1.2669577682552319e-05,
1254
+ "loss": 1.8916,
1255
+ "step": 890
1256
+ },
1257
+ {
1258
+ "epoch": 3.33955223880597,
1259
+ "grad_norm": 1.3823623657226562,
1260
+ "learning_rate": 1.2415496790421011e-05,
1261
+ "loss": 1.8614,
1262
+ "step": 895
1263
+ },
1264
+ {
1265
+ "epoch": 3.3582089552238807,
1266
+ "grad_norm": 1.4400016069412231,
1267
+ "learning_rate": 1.2163145162128947e-05,
1268
+ "loss": 1.9052,
1269
+ "step": 900
1270
+ },
1271
+ {
1272
+ "epoch": 3.376865671641791,
1273
+ "grad_norm": 1.7787601947784424,
1274
+ "learning_rate": 1.1912557473859895e-05,
1275
+ "loss": 2.0061,
1276
+ "step": 905
1277
+ },
1278
+ {
1279
+ "epoch": 3.3955223880597014,
1280
+ "grad_norm": 1.5302358865737915,
1281
+ "learning_rate": 1.1663768159410748e-05,
1282
+ "loss": 1.9656,
1283
+ "step": 910
1284
+ },
1285
+ {
1286
+ "epoch": 3.4141791044776117,
1287
+ "grad_norm": 1.6571131944656372,
1288
+ "learning_rate": 1.1416811405459993e-05,
1289
+ "loss": 1.9289,
1290
+ "step": 915
1291
+ },
1292
+ {
1293
+ "epoch": 3.4328358208955225,
1294
+ "grad_norm": 1.8324801921844482,
1295
+ "learning_rate": 1.1171721146870015e-05,
1296
+ "loss": 1.8982,
1297
+ "step": 920
1298
+ },
1299
+ {
1300
+ "epoch": 3.451492537313433,
1301
+ "grad_norm": 1.5971417427062988,
1302
+ "learning_rate": 1.0928531062024017e-05,
1303
+ "loss": 1.9105,
1304
+ "step": 925
1305
+ },
1306
+ {
1307
+ "epoch": 3.470149253731343,
1308
+ "grad_norm": 1.5357367992401123,
1309
+ "learning_rate": 1.0687274568198208e-05,
1310
+ "loss": 1.9997,
1311
+ "step": 930
1312
+ },
1313
+ {
1314
+ "epoch": 3.4888059701492535,
1315
+ "grad_norm": 1.6085304021835327,
1316
+ "learning_rate": 1.0447984816969874e-05,
1317
+ "loss": 1.9139,
1318
+ "step": 935
1319
+ },
1320
+ {
1321
+ "epoch": 3.5074626865671643,
1322
+ "grad_norm": 1.3676837682724,
1323
+ "learning_rate": 1.021069468966194e-05,
1324
+ "loss": 1.9389,
1325
+ "step": 940
1326
+ },
1327
+ {
1328
+ "epoch": 3.5261194029850746,
1329
+ "grad_norm": 1.6692901849746704,
1330
+ "learning_rate": 9.975436792824691e-06,
1331
+ "loss": 1.835,
1332
+ "step": 945
1333
+ },
1334
+ {
1335
+ "epoch": 3.544776119402985,
1336
+ "grad_norm": 1.579232931137085,
1337
+ "learning_rate": 9.742243453755202e-06,
1338
+ "loss": 1.822,
1339
+ "step": 950
1340
+ },
1341
+ {
1342
+ "epoch": 3.5634328358208958,
1343
+ "grad_norm": 1.587336778640747,
1344
+ "learning_rate": 9.5111467160552e-06,
1345
+ "loss": 1.9564,
1346
+ "step": 955
1347
+ },
1348
+ {
1349
+ "epoch": 3.582089552238806,
1350
+ "grad_norm": 1.6467562913894653,
1351
+ "learning_rate": 9.282178335227884e-06,
1352
+ "loss": 1.9029,
1353
+ "step": 960
1354
+ },
1355
+ {
1356
+ "epoch": 3.6007462686567164,
1357
+ "grad_norm": 1.3579665422439575,
1358
+ "learning_rate": 9.05536977431431e-06,
1359
+ "loss": 2.0225,
1360
+ "step": 965
1361
+ },
1362
+ {
1363
+ "epoch": 3.6194029850746268,
1364
+ "grad_norm": 1.641695261001587,
1365
+ "learning_rate": 8.830752199570033e-06,
1366
+ "loss": 1.939,
1367
+ "step": 970
1368
+ },
1369
+ {
1370
+ "epoch": 3.638059701492537,
1371
+ "grad_norm": 1.5209190845489502,
1372
+ "learning_rate": 8.608356476182424e-06,
1373
+ "loss": 2.01,
1374
+ "step": 975
1375
+ },
1376
+ {
1377
+ "epoch": 3.656716417910448,
1378
+ "grad_norm": 1.769853115081787,
1379
+ "learning_rate": 8.38821316402946e-06,
1380
+ "loss": 1.9972,
1381
+ "step": 980
1382
+ },
1383
+ {
1384
+ "epoch": 3.675373134328358,
1385
+ "grad_norm": 1.7627779245376587,
1386
+ "learning_rate": 8.170352513480408e-06,
1387
+ "loss": 1.8508,
1388
+ "step": 985
1389
+ },
1390
+ {
1391
+ "epoch": 3.6940298507462686,
1392
+ "grad_norm": 1.8104168176651,
1393
+ "learning_rate": 7.954804461239053e-06,
1394
+ "loss": 1.994,
1395
+ "step": 990
1396
+ },
1397
+ {
1398
+ "epoch": 3.7126865671641793,
1399
+ "grad_norm": 1.6434147357940674,
1400
+ "learning_rate": 7.741598626230079e-06,
1401
+ "loss": 1.9354,
1402
+ "step": 995
1403
+ },
1404
+ {
1405
+ "epoch": 3.7313432835820897,
1406
+ "grad_norm": 1.871103286743164,
1407
+ "learning_rate": 7.530764305528959e-06,
1408
+ "loss": 2.004,
1409
+ "step": 1000
1410
+ },
1411
+ {
1412
+ "epoch": 3.75,
1413
+ "grad_norm": 1.7736434936523438,
1414
+ "learning_rate": 7.3223304703363135e-06,
1415
+ "loss": 2.0723,
1416
+ "step": 1005
1417
+ },
1418
+ {
1419
+ "epoch": 3.7686567164179103,
1420
+ "grad_norm": 1.3735415935516357,
1421
+ "learning_rate": 7.116325761996817e-06,
1422
+ "loss": 1.9994,
1423
+ "step": 1010
1424
+ },
1425
+ {
1426
+ "epoch": 3.7873134328358207,
1427
+ "grad_norm": 1.5601403713226318,
1428
+ "learning_rate": 6.91277848806356e-06,
1429
+ "loss": 2.0251,
1430
+ "step": 1015
1431
+ },
1432
+ {
1433
+ "epoch": 3.8059701492537314,
1434
+ "grad_norm": 1.7761123180389404,
1435
+ "learning_rate": 6.711716618408281e-06,
1436
+ "loss": 1.9377,
1437
+ "step": 1020
1438
+ },
1439
+ {
1440
+ "epoch": 3.824626865671642,
1441
+ "grad_norm": 1.8018087148666382,
1442
+ "learning_rate": 6.513167781377885e-06,
1443
+ "loss": 1.9374,
1444
+ "step": 1025
1445
+ },
1446
+ {
1447
+ "epoch": 3.843283582089552,
1448
+ "grad_norm": 1.8684526681900024,
1449
+ "learning_rate": 6.317159259998073e-06,
1450
+ "loss": 1.8859,
1451
+ "step": 1030
1452
+ },
1453
+ {
1454
+ "epoch": 3.861940298507463,
1455
+ "grad_norm": 1.4688338041305542,
1456
+ "learning_rate": 6.123717988224237e-06,
1457
+ "loss": 1.8078,
1458
+ "step": 1035
1459
+ },
1460
+ {
1461
+ "epoch": 3.8805970149253732,
1462
+ "grad_norm": 1.566017508506775,
1463
+ "learning_rate": 5.932870547240454e-06,
1464
+ "loss": 1.82,
1465
+ "step": 1040
1466
+ },
1467
+ {
1468
+ "epoch": 3.8992537313432836,
1469
+ "grad_norm": 1.6024701595306396,
1470
+ "learning_rate": 5.74464316180689e-06,
1471
+ "loss": 1.9388,
1472
+ "step": 1045
1473
+ },
1474
+ {
1475
+ "epoch": 3.917910447761194,
1476
+ "grad_norm": 1.615678310394287,
1477
+ "learning_rate": 5.559061696656198e-06,
1478
+ "loss": 1.8754,
1479
+ "step": 1050
1480
+ },
1481
+ {
1482
+ "epoch": 3.9365671641791042,
1483
+ "grad_norm": 1.7218761444091797,
1484
+ "learning_rate": 5.37615165293942e-06,
1485
+ "loss": 1.8457,
1486
+ "step": 1055
1487
+ },
1488
+ {
1489
+ "epoch": 3.955223880597015,
1490
+ "grad_norm": 1.5426040887832642,
1491
+ "learning_rate": 5.1959381647217666e-06,
1492
+ "loss": 1.944,
1493
+ "step": 1060
1494
+ },
1495
+ {
1496
+ "epoch": 3.9738805970149254,
1497
+ "grad_norm": 1.607737421989441,
1498
+ "learning_rate": 5.018445995528931e-06,
1499
+ "loss": 1.8135,
1500
+ "step": 1065
1501
+ },
1502
+ {
1503
+ "epoch": 3.9925373134328357,
1504
+ "grad_norm": 1.6999655961990356,
1505
+ "learning_rate": 4.843699534944257e-06,
1506
+ "loss": 1.8664,
1507
+ "step": 1070
1508
+ },
1509
+ {
1510
+ "epoch": 4.0111940298507465,
1511
+ "grad_norm": 1.7595158815383911,
1512
+ "learning_rate": 4.671722795257327e-06,
1513
+ "loss": 1.8825,
1514
+ "step": 1075
1515
+ },
1516
+ {
1517
+ "epoch": 4.029850746268656,
1518
+ "grad_norm": 1.6721343994140625,
1519
+ "learning_rate": 4.502539408164386e-06,
1520
+ "loss": 1.7858,
1521
+ "step": 1080
1522
+ },
1523
+ {
1524
+ "epoch": 4.048507462686567,
1525
+ "grad_norm": 1.8451169729232788,
1526
+ "learning_rate": 4.336172621521034e-06,
1527
+ "loss": 1.9469,
1528
+ "step": 1085
1529
+ },
1530
+ {
1531
+ "epoch": 4.067164179104478,
1532
+ "grad_norm": 1.5332179069519043,
1533
+ "learning_rate": 4.1726452961477146e-06,
1534
+ "loss": 1.8153,
1535
+ "step": 1090
1536
+ },
1537
+ {
1538
+ "epoch": 4.085820895522388,
1539
+ "grad_norm": 1.5114800930023193,
1540
+ "learning_rate": 4.01197990268834e-06,
1541
+ "loss": 1.92,
1542
+ "step": 1095
1543
+ },
1544
+ {
1545
+ "epoch": 4.104477611940299,
1546
+ "grad_norm": 1.642686367034912,
1547
+ "learning_rate": 3.8541985185225645e-06,
1548
+ "loss": 1.9352,
1549
+ "step": 1100
1550
+ }
1551
+ ],
1552
+ "logging_steps": 5,
1553
+ "max_steps": 1340,
1554
+ "num_input_tokens_seen": 0,
1555
+ "num_train_epochs": 5,
1556
+ "save_steps": 100,
1557
+ "stateful_callbacks": {
1558
+ "TrainerControl": {
1559
+ "args": {
1560
+ "should_epoch_stop": false,
1561
+ "should_evaluate": false,
1562
+ "should_log": false,
1563
+ "should_save": true,
1564
+ "should_training_stop": false
1565
+ },
1566
+ "attributes": {}
1567
+ }
1568
+ },
1569
+ "total_flos": 1.1450083189999534e+18,
1570
+ "train_batch_size": 4,
1571
+ "trial_name": null,
1572
+ "trial_params": null
1573
+ }
checkpoint-1100/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:67f11670c2c9e329be00355fdb1e7f6d48dffafc103eefbd6bc3474fa9f6e67c
3
+ size 5304
checkpoint-1100/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-1200/README.md ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ base_model: Qwen/Qwen1.5-7B-Chat
4
+ ---
5
+
6
+ # Model Card for Model ID
7
+
8
+ <!-- Provide a quick summary of what the model is/does. -->
9
+
10
+
11
+
12
+ ## Model Details
13
+
14
+ ### Model Description
15
+
16
+ <!-- Provide a longer summary of what this model is. -->
17
+
18
+
19
+
20
+ - **Developed by:** [More Information Needed]
21
+ - **Funded by [optional]:** [More Information Needed]
22
+ - **Shared by [optional]:** [More Information Needed]
23
+ - **Model type:** [More Information Needed]
24
+ - **Language(s) (NLP):** [More Information Needed]
25
+ - **License:** [More Information Needed]
26
+ - **Finetuned from model [optional]:** [More Information Needed]
27
+
28
+ ### Model Sources [optional]
29
+
30
+ <!-- Provide the basic links for the model. -->
31
+
32
+ - **Repository:** [More Information Needed]
33
+ - **Paper [optional]:** [More Information Needed]
34
+ - **Demo [optional]:** [More Information Needed]
35
+
36
+ ## Uses
37
+
38
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
39
+
40
+ ### Direct Use
41
+
42
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
43
+
44
+ [More Information Needed]
45
+
46
+ ### Downstream Use [optional]
47
+
48
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
49
+
50
+ [More Information Needed]
51
+
52
+ ### Out-of-Scope Use
53
+
54
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
55
+
56
+ [More Information Needed]
57
+
58
+ ## Bias, Risks, and Limitations
59
+
60
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
61
+
62
+ [More Information Needed]
63
+
64
+ ### Recommendations
65
+
66
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
67
+
68
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
69
+
70
+ ## How to Get Started with the Model
71
+
72
+ Use the code below to get started with the model.
73
+
74
+ [More Information Needed]
75
+
76
+ ## Training Details
77
+
78
+ ### Training Data
79
+
80
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
81
+
82
+ [More Information Needed]
83
+
84
+ ### Training Procedure
85
+
86
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
87
+
88
+ #### Preprocessing [optional]
89
+
90
+ [More Information Needed]
91
+
92
+
93
+ #### Training Hyperparameters
94
+
95
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
96
+
97
+ #### Speeds, Sizes, Times [optional]
98
+
99
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
100
+
101
+ [More Information Needed]
102
+
103
+ ## Evaluation
104
+
105
+ <!-- This section describes the evaluation protocols and provides the results. -->
106
+
107
+ ### Testing Data, Factors & Metrics
108
+
109
+ #### Testing Data
110
+
111
+ <!-- This should link to a Dataset Card if possible. -->
112
+
113
+ [More Information Needed]
114
+
115
+ #### Factors
116
+
117
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
118
+
119
+ [More Information Needed]
120
+
121
+ #### Metrics
122
+
123
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
124
+
125
+ [More Information Needed]
126
+
127
+ ### Results
128
+
129
+ [More Information Needed]
130
+
131
+ #### Summary
132
+
133
+
134
+
135
+ ## Model Examination [optional]
136
+
137
+ <!-- Relevant interpretability work for the model goes here -->
138
+
139
+ [More Information Needed]
140
+
141
+ ## Environmental Impact
142
+
143
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
144
+
145
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
146
+
147
+ - **Hardware Type:** [More Information Needed]
148
+ - **Hours used:** [More Information Needed]
149
+ - **Cloud Provider:** [More Information Needed]
150
+ - **Compute Region:** [More Information Needed]
151
+ - **Carbon Emitted:** [More Information Needed]
152
+
153
+ ## Technical Specifications [optional]
154
+
155
+ ### Model Architecture and Objective
156
+
157
+ [More Information Needed]
158
+
159
+ ### Compute Infrastructure
160
+
161
+ [More Information Needed]
162
+
163
+ #### Hardware
164
+
165
+ [More Information Needed]
166
+
167
+ #### Software
168
+
169
+ [More Information Needed]
170
+
171
+ ## Citation [optional]
172
+
173
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
174
+
175
+ **BibTeX:**
176
+
177
+ [More Information Needed]
178
+
179
+ **APA:**
180
+
181
+ [More Information Needed]
182
+
183
+ ## Glossary [optional]
184
+
185
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
186
+
187
+ [More Information Needed]
188
+
189
+ ## More Information [optional]
190
+
191
+ [More Information Needed]
192
+
193
+ ## Model Card Authors [optional]
194
+
195
+ [More Information Needed]
196
+
197
+ ## Model Card Contact
198
+
199
+ [More Information Needed]
200
+ ### Framework versions
201
+
202
+ - PEFT 0.11.1
checkpoint-1200/adapter_config.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "Qwen/Qwen1.5-7B-Chat",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layer_replication": null,
10
+ "layers_pattern": null,
11
+ "layers_to_transform": null,
12
+ "loftq_config": {},
13
+ "lora_alpha": 32,
14
+ "lora_dropout": 0.0,
15
+ "megatron_config": null,
16
+ "megatron_core": "megatron.core",
17
+ "modules_to_save": null,
18
+ "peft_type": "LORA",
19
+ "r": 16,
20
+ "rank_pattern": {},
21
+ "revision": null,
22
+ "target_modules": [
23
+ "q_proj",
24
+ "v_proj"
25
+ ],
26
+ "task_type": "CAUSAL_LM",
27
+ "use_dora": false,
28
+ "use_rslora": false
29
+ }
checkpoint-1200/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3b08f126d206f3994c062dc74fa06dd6dbf964894c90161aac9de2ea7b05b6cc
3
+ size 33571624