End of training
Browse files- README.md +17 -32
- adapter_config.json +4 -4
- last-checkpoint/adapter_config.json +4 -4
- last-checkpoint/optimizer.pt +2 -2
- last-checkpoint/rng_state.pth +1 -1
- last-checkpoint/scheduler.pt +1 -1
- last-checkpoint/trainer_state.json +51 -689
- last-checkpoint/training_args.bin +1 -1
- training_args.bin +1 -1
README.md
CHANGED
@@ -6,7 +6,7 @@ tags:
|
|
6 |
- axolotl
|
7 |
- generated_from_trainer
|
8 |
model-index:
|
9 |
-
- name:
|
10 |
results: []
|
11 |
---
|
12 |
|
@@ -18,12 +18,6 @@ should probably proofread and complete it, then remove this comment. -->
|
|
18 |
|
19 |
axolotl version: `0.4.1`
|
20 |
```yaml
|
21 |
-
accelerate_config:
|
22 |
-
dynamo_backend: inductor
|
23 |
-
mixed_precision: bf16
|
24 |
-
num_machines: 1
|
25 |
-
num_processes: auto
|
26 |
-
use_cpu: false
|
27 |
adapter: lora
|
28 |
base_model: fxmarty/tiny-random-GemmaForCausalLM
|
29 |
bf16: auto
|
@@ -44,7 +38,6 @@ datasets:
|
|
44 |
system_prompt: ''
|
45 |
debug: null
|
46 |
deepspeed: null
|
47 |
-
device_map: auto
|
48 |
early_stopping_patience: null
|
49 |
eval_max_new_tokens: 128
|
50 |
eval_table_size: null
|
@@ -53,14 +46,16 @@ flash_attention: false
|
|
53 |
fp16: null
|
54 |
fsdp: null
|
55 |
fsdp_config: null
|
56 |
-
gradient_accumulation_steps:
|
57 |
-
gradient_checkpointing:
|
58 |
group_by_length: false
|
59 |
hub_model_id: null
|
60 |
hub_repo: null
|
61 |
hub_strategy: checkpoint
|
62 |
hub_token: null
|
63 |
-
learning_rate: 0.
|
|
|
|
|
64 |
local_rank: null
|
65 |
logging_steps: 1
|
66 |
lora_alpha: 16
|
@@ -69,13 +64,8 @@ lora_fan_in_fan_out: null
|
|
69 |
lora_model_dir: null
|
70 |
lora_r: 8
|
71 |
lora_target_linear: true
|
72 |
-
lora_target_modules:
|
73 |
-
- q_proj
|
74 |
-
- v_proj
|
75 |
lr_scheduler: cosine
|
76 |
-
|
77 |
-
0: 70GiB
|
78 |
-
max_steps: 100
|
79 |
micro_batch_size: 2
|
80 |
mlflow_experiment_name: /tmp/9845ef35490c4ed8_train_data.json
|
81 |
model_type: AutoModelForCausalLM
|
@@ -83,9 +73,6 @@ num_epochs: 1
|
|
83 |
optimizer: adamw_bnb_8bit
|
84 |
output_dir: miner_id_24
|
85 |
pad_to_sequence_len: true
|
86 |
-
quantization_config:
|
87 |
-
llm_int8_enable_fp32_cpu_offload: true
|
88 |
-
load_in_8bit: true
|
89 |
resume_from_checkpoint: null
|
90 |
s2_attention: null
|
91 |
sample_packing: false
|
@@ -94,14 +81,13 @@ sequence_len: 512
|
|
94 |
strict: false
|
95 |
tf32: false
|
96 |
tokenizer_type: AutoTokenizer
|
97 |
-
torch_compile: true
|
98 |
train_on_inputs: false
|
99 |
trust_remote_code: true
|
100 |
val_set_size: 0.05
|
101 |
wandb_entity: null
|
102 |
wandb_mode: online
|
103 |
wandb_name: 922da73d-0a26-4f27-a883-0f02b8fbbb93
|
104 |
-
wandb_project: Gradients-On-Demand
|
105 |
wandb_run: your_name
|
106 |
wandb_runid: 922da73d-0a26-4f27-a883-0f02b8fbbb93
|
107 |
warmup_steps: 10
|
@@ -112,7 +98,7 @@ xformers_attention: null
|
|
112 |
|
113 |
</details><br>
|
114 |
|
115 |
-
#
|
116 |
|
117 |
This model is a fine-tuned version of [fxmarty/tiny-random-GemmaForCausalLM](https://huggingface.co/fxmarty/tiny-random-GemmaForCausalLM) on the None dataset.
|
118 |
It achieves the following results on the evaluation set:
|
@@ -135,26 +121,25 @@ More information needed
|
|
135 |
### Training hyperparameters
|
136 |
|
137 |
The following hyperparameters were used during training:
|
138 |
-
- learning_rate: 0.
|
139 |
- train_batch_size: 2
|
140 |
- eval_batch_size: 2
|
141 |
- seed: 42
|
142 |
-
- gradient_accumulation_steps:
|
143 |
-
- total_train_batch_size:
|
144 |
- optimizer: Use OptimizerNames.ADAMW_BNB with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
|
145 |
- lr_scheduler_type: cosine
|
146 |
- lr_scheduler_warmup_steps: 10
|
147 |
-
- training_steps:
|
148 |
|
149 |
### Training results
|
150 |
|
151 |
| Training Loss | Epoch | Step | Validation Loss |
|
152 |
|:-------------:|:------:|:----:|:---------------:|
|
153 |
-
| 0.0 | 0.
|
154 |
-
| 0.0 | 0.
|
155 |
-
| 0.0 | 0.
|
156 |
-
| 0.0 | 0.
|
157 |
-
| 0.0 | 0.0450 | 100 | nan |
|
158 |
|
159 |
|
160 |
### Framework versions
|
|
|
6 |
- axolotl
|
7 |
- generated_from_trainer
|
8 |
model-index:
|
9 |
+
- name: 93c52fbb-53ce-4004-8ccc-12e99bc530fe
|
10 |
results: []
|
11 |
---
|
12 |
|
|
|
18 |
|
19 |
axolotl version: `0.4.1`
|
20 |
```yaml
|
|
|
|
|
|
|
|
|
|
|
|
|
21 |
adapter: lora
|
22 |
base_model: fxmarty/tiny-random-GemmaForCausalLM
|
23 |
bf16: auto
|
|
|
38 |
system_prompt: ''
|
39 |
debug: null
|
40 |
deepspeed: null
|
|
|
41 |
early_stopping_patience: null
|
42 |
eval_max_new_tokens: 128
|
43 |
eval_table_size: null
|
|
|
46 |
fp16: null
|
47 |
fsdp: null
|
48 |
fsdp_config: null
|
49 |
+
gradient_accumulation_steps: 4
|
50 |
+
gradient_checkpointing: false
|
51 |
group_by_length: false
|
52 |
hub_model_id: null
|
53 |
hub_repo: null
|
54 |
hub_strategy: checkpoint
|
55 |
hub_token: null
|
56 |
+
learning_rate: 0.0002
|
57 |
+
load_in_4bit: false
|
58 |
+
load_in_8bit: false
|
59 |
local_rank: null
|
60 |
logging_steps: 1
|
61 |
lora_alpha: 16
|
|
|
64 |
lora_model_dir: null
|
65 |
lora_r: 8
|
66 |
lora_target_linear: true
|
|
|
|
|
|
|
67 |
lr_scheduler: cosine
|
68 |
+
max_steps: 10
|
|
|
|
|
69 |
micro_batch_size: 2
|
70 |
mlflow_experiment_name: /tmp/9845ef35490c4ed8_train_data.json
|
71 |
model_type: AutoModelForCausalLM
|
|
|
73 |
optimizer: adamw_bnb_8bit
|
74 |
output_dir: miner_id_24
|
75 |
pad_to_sequence_len: true
|
|
|
|
|
|
|
76 |
resume_from_checkpoint: null
|
77 |
s2_attention: null
|
78 |
sample_packing: false
|
|
|
81 |
strict: false
|
82 |
tf32: false
|
83 |
tokenizer_type: AutoTokenizer
|
|
|
84 |
train_on_inputs: false
|
85 |
trust_remote_code: true
|
86 |
val_set_size: 0.05
|
87 |
wandb_entity: null
|
88 |
wandb_mode: online
|
89 |
wandb_name: 922da73d-0a26-4f27-a883-0f02b8fbbb93
|
90 |
+
wandb_project: Birthday-SN56-11-Gradients-On-Demand
|
91 |
wandb_run: your_name
|
92 |
wandb_runid: 922da73d-0a26-4f27-a883-0f02b8fbbb93
|
93 |
warmup_steps: 10
|
|
|
98 |
|
99 |
</details><br>
|
100 |
|
101 |
+
# 93c52fbb-53ce-4004-8ccc-12e99bc530fe
|
102 |
|
103 |
This model is a fine-tuned version of [fxmarty/tiny-random-GemmaForCausalLM](https://huggingface.co/fxmarty/tiny-random-GemmaForCausalLM) on the None dataset.
|
104 |
It achieves the following results on the evaluation set:
|
|
|
121 |
### Training hyperparameters
|
122 |
|
123 |
The following hyperparameters were used during training:
|
124 |
+
- learning_rate: 0.0002
|
125 |
- train_batch_size: 2
|
126 |
- eval_batch_size: 2
|
127 |
- seed: 42
|
128 |
+
- gradient_accumulation_steps: 4
|
129 |
+
- total_train_batch_size: 8
|
130 |
- optimizer: Use OptimizerNames.ADAMW_BNB with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
|
131 |
- lr_scheduler_type: cosine
|
132 |
- lr_scheduler_warmup_steps: 10
|
133 |
+
- training_steps: 10
|
134 |
|
135 |
### Training results
|
136 |
|
137 |
| Training Loss | Epoch | Step | Validation Loss |
|
138 |
|:-------------:|:------:|:----:|:---------------:|
|
139 |
+
| 0.0 | 0.0001 | 1 | nan |
|
140 |
+
| 0.0 | 0.0003 | 3 | nan |
|
141 |
+
| 0.0 | 0.0007 | 6 | nan |
|
142 |
+
| 0.0 | 0.0010 | 9 | nan |
|
|
|
143 |
|
144 |
|
145 |
### Framework versions
|
adapter_config.json
CHANGED
@@ -20,13 +20,13 @@
|
|
20 |
"rank_pattern": {},
|
21 |
"revision": null,
|
22 |
"target_modules": [
|
23 |
-
"k_proj",
|
24 |
"q_proj",
|
25 |
-
"
|
|
|
26 |
"v_proj",
|
|
|
27 |
"up_proj",
|
28 |
-
"o_proj"
|
29 |
-
"down_proj"
|
30 |
],
|
31 |
"task_type": "CAUSAL_LM",
|
32 |
"use_dora": false,
|
|
|
20 |
"rank_pattern": {},
|
21 |
"revision": null,
|
22 |
"target_modules": [
|
|
|
23 |
"q_proj",
|
24 |
+
"down_proj",
|
25 |
+
"k_proj",
|
26 |
"v_proj",
|
27 |
+
"gate_proj",
|
28 |
"up_proj",
|
29 |
+
"o_proj"
|
|
|
30 |
],
|
31 |
"task_type": "CAUSAL_LM",
|
32 |
"use_dora": false,
|
last-checkpoint/adapter_config.json
CHANGED
@@ -20,13 +20,13 @@
|
|
20 |
"rank_pattern": {},
|
21 |
"revision": null,
|
22 |
"target_modules": [
|
23 |
-
"k_proj",
|
24 |
"q_proj",
|
25 |
-
"
|
|
|
26 |
"v_proj",
|
|
|
27 |
"up_proj",
|
28 |
-
"o_proj"
|
29 |
-
"down_proj"
|
30 |
],
|
31 |
"task_type": "CAUSAL_LM",
|
32 |
"use_dora": false,
|
|
|
20 |
"rank_pattern": {},
|
21 |
"revision": null,
|
22 |
"target_modules": [
|
|
|
23 |
"q_proj",
|
24 |
+
"down_proj",
|
25 |
+
"k_proj",
|
26 |
"v_proj",
|
27 |
+
"gate_proj",
|
28 |
"up_proj",
|
29 |
+
"o_proj"
|
|
|
30 |
],
|
31 |
"task_type": "CAUSAL_LM",
|
32 |
"use_dora": false,
|
last-checkpoint/optimizer.pt
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:82fedc521b22f7161ab3611d76e7c81304b7cc24627d50a72854bbfbae2f8dda
|
3 |
+
size 26562
|
last-checkpoint/rng_state.pth
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 14244
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d53389285e1a574b35c681421511ed4fd865190d0dbcc7ca577aba5847d929e5
|
3 |
size 14244
|
last-checkpoint/scheduler.pt
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 1064
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:bb578e75c11a81e85dda67a691f96ba4793a02960f1409fd3e1511aac873491a
|
3 |
size 1064
|
last-checkpoint/trainer_state.json
CHANGED
@@ -1,759 +1,121 @@
|
|
1 |
{
|
2 |
"best_metric": null,
|
3 |
"best_model_checkpoint": null,
|
4 |
-
"epoch": 0.
|
5 |
-
"eval_steps":
|
6 |
-
"global_step":
|
7 |
"is_hyper_param_search": false,
|
8 |
"is_local_process_zero": true,
|
9 |
"is_world_process_zero": true,
|
10 |
"log_history": [
|
11 |
{
|
12 |
-
"epoch": 0.
|
13 |
"grad_norm": NaN,
|
14 |
-
"learning_rate":
|
15 |
"loss": 0.0,
|
16 |
"step": 1
|
17 |
},
|
18 |
{
|
19 |
-
"epoch": 0.
|
20 |
"eval_loss": NaN,
|
21 |
-
"eval_runtime":
|
22 |
-
"eval_samples_per_second":
|
23 |
-
"eval_steps_per_second":
|
24 |
"step": 1
|
25 |
},
|
26 |
{
|
27 |
-
"epoch": 0.
|
28 |
-
"grad_norm": NaN,
|
29 |
-
"learning_rate": 2e-05,
|
30 |
-
"loss": 0.0,
|
31 |
-
"step": 2
|
32 |
-
},
|
33 |
-
{
|
34 |
-
"epoch": 0.001348807148677888,
|
35 |
-
"grad_norm": NaN,
|
36 |
-
"learning_rate": 3e-05,
|
37 |
-
"loss": 0.0,
|
38 |
-
"step": 3
|
39 |
-
},
|
40 |
-
{
|
41 |
-
"epoch": 0.0017984095315705174,
|
42 |
"grad_norm": NaN,
|
43 |
"learning_rate": 4e-05,
|
44 |
"loss": 0.0,
|
45 |
-
"step":
|
46 |
-
},
|
47 |
-
{
|
48 |
-
"epoch": 0.0022480119144631465,
|
49 |
-
"grad_norm": NaN,
|
50 |
-
"learning_rate": 5e-05,
|
51 |
-
"loss": 0.0,
|
52 |
-
"step": 5
|
53 |
},
|
54 |
{
|
55 |
-
"epoch": 0.
|
56 |
"grad_norm": NaN,
|
57 |
"learning_rate": 6e-05,
|
58 |
"loss": 0.0,
|
59 |
-
"step":
|
60 |
},
|
61 |
{
|
62 |
-
"epoch": 0.
|
63 |
-
"
|
64 |
-
"
|
65 |
-
"
|
66 |
-
"
|
|
|
67 |
},
|
68 |
{
|
69 |
-
"epoch": 0.
|
70 |
"grad_norm": NaN,
|
71 |
"learning_rate": 8e-05,
|
72 |
"loss": 0.0,
|
73 |
-
"step":
|
74 |
-
},
|
75 |
-
{
|
76 |
-
"epoch": 0.004046421446033664,
|
77 |
-
"grad_norm": NaN,
|
78 |
-
"learning_rate": 9e-05,
|
79 |
-
"loss": 0.0,
|
80 |
-
"step": 9
|
81 |
},
|
82 |
{
|
83 |
-
"epoch": 0.
|
84 |
"grad_norm": NaN,
|
85 |
"learning_rate": 0.0001,
|
86 |
"loss": 0.0,
|
87 |
-
"step":
|
88 |
-
},
|
89 |
-
{
|
90 |
-
"epoch": 0.0049456262118189225,
|
91 |
-
"grad_norm": NaN,
|
92 |
-
"learning_rate": 9.99695413509548e-05,
|
93 |
-
"loss": 0.0,
|
94 |
-
"step": 11
|
95 |
-
},
|
96 |
-
{
|
97 |
-
"epoch": 0.005395228594711552,
|
98 |
-
"grad_norm": NaN,
|
99 |
-
"learning_rate": 9.987820251299122e-05,
|
100 |
-
"loss": 0.0,
|
101 |
-
"step": 12
|
102 |
-
},
|
103 |
-
{
|
104 |
-
"epoch": 0.005844830977604181,
|
105 |
-
"grad_norm": NaN,
|
106 |
-
"learning_rate": 9.972609476841367e-05,
|
107 |
-
"loss": 0.0,
|
108 |
-
"step": 13
|
109 |
-
},
|
110 |
-
{
|
111 |
-
"epoch": 0.006294433360496811,
|
112 |
-
"grad_norm": NaN,
|
113 |
-
"learning_rate": 9.951340343707852e-05,
|
114 |
-
"loss": 0.0,
|
115 |
-
"step": 14
|
116 |
-
},
|
117 |
-
{
|
118 |
-
"epoch": 0.00674403574338944,
|
119 |
-
"grad_norm": NaN,
|
120 |
-
"learning_rate": 9.924038765061042e-05,
|
121 |
-
"loss": 0.0,
|
122 |
-
"step": 15
|
123 |
-
},
|
124 |
-
{
|
125 |
-
"epoch": 0.0071936381262820695,
|
126 |
-
"grad_norm": NaN,
|
127 |
-
"learning_rate": 9.890738003669029e-05,
|
128 |
-
"loss": 0.0,
|
129 |
-
"step": 16
|
130 |
-
},
|
131 |
-
{
|
132 |
-
"epoch": 0.007643240509174699,
|
133 |
-
"grad_norm": NaN,
|
134 |
-
"learning_rate": 9.851478631379982e-05,
|
135 |
-
"loss": 0.0,
|
136 |
-
"step": 17
|
137 |
-
},
|
138 |
-
{
|
139 |
-
"epoch": 0.008092842892067327,
|
140 |
-
"grad_norm": NaN,
|
141 |
-
"learning_rate": 9.806308479691595e-05,
|
142 |
-
"loss": 0.0,
|
143 |
-
"step": 18
|
144 |
-
},
|
145 |
-
{
|
146 |
-
"epoch": 0.008542445274959957,
|
147 |
-
"grad_norm": NaN,
|
148 |
-
"learning_rate": 9.755282581475769e-05,
|
149 |
-
"loss": 0.0,
|
150 |
-
"step": 19
|
151 |
-
},
|
152 |
-
{
|
153 |
-
"epoch": 0.008992047657852586,
|
154 |
-
"grad_norm": NaN,
|
155 |
-
"learning_rate": 9.698463103929542e-05,
|
156 |
-
"loss": 0.0,
|
157 |
-
"step": 20
|
158 |
-
},
|
159 |
-
{
|
160 |
-
"epoch": 0.009441650040745216,
|
161 |
-
"grad_norm": NaN,
|
162 |
-
"learning_rate": 9.635919272833938e-05,
|
163 |
-
"loss": 0.0,
|
164 |
-
"step": 21
|
165 |
-
},
|
166 |
-
{
|
167 |
-
"epoch": 0.009891252423637845,
|
168 |
-
"grad_norm": NaN,
|
169 |
-
"learning_rate": 9.567727288213005e-05,
|
170 |
-
"loss": 0.0,
|
171 |
-
"step": 22
|
172 |
-
},
|
173 |
-
{
|
174 |
-
"epoch": 0.010340854806530474,
|
175 |
-
"grad_norm": NaN,
|
176 |
-
"learning_rate": 9.493970231495835e-05,
|
177 |
-
"loss": 0.0,
|
178 |
-
"step": 23
|
179 |
-
},
|
180 |
-
{
|
181 |
-
"epoch": 0.010790457189423104,
|
182 |
-
"grad_norm": NaN,
|
183 |
-
"learning_rate": 9.414737964294636e-05,
|
184 |
-
"loss": 0.0,
|
185 |
-
"step": 24
|
186 |
-
},
|
187 |
-
{
|
188 |
-
"epoch": 0.011240059572315733,
|
189 |
-
"grad_norm": NaN,
|
190 |
-
"learning_rate": 9.330127018922194e-05,
|
191 |
-
"loss": 0.0,
|
192 |
-
"step": 25
|
193 |
-
},
|
194 |
-
{
|
195 |
-
"epoch": 0.011240059572315733,
|
196 |
-
"eval_loss": NaN,
|
197 |
-
"eval_runtime": 13.6049,
|
198 |
-
"eval_samples_per_second": 275.415,
|
199 |
-
"eval_steps_per_second": 137.744,
|
200 |
-
"step": 25
|
201 |
-
},
|
202 |
-
{
|
203 |
-
"epoch": 0.011689661955208363,
|
204 |
-
"grad_norm": NaN,
|
205 |
-
"learning_rate": 9.24024048078213e-05,
|
206 |
-
"loss": 0.0,
|
207 |
-
"step": 26
|
208 |
-
},
|
209 |
-
{
|
210 |
-
"epoch": 0.012139264338100992,
|
211 |
-
"grad_norm": NaN,
|
212 |
-
"learning_rate": 9.145187862775209e-05,
|
213 |
-
"loss": 0.0,
|
214 |
-
"step": 27
|
215 |
-
},
|
216 |
-
{
|
217 |
-
"epoch": 0.012588866720993621,
|
218 |
-
"grad_norm": NaN,
|
219 |
-
"learning_rate": 9.045084971874738e-05,
|
220 |
-
"loss": 0.0,
|
221 |
-
"step": 28
|
222 |
-
},
|
223 |
-
{
|
224 |
-
"epoch": 0.01303846910388625,
|
225 |
-
"grad_norm": NaN,
|
226 |
-
"learning_rate": 8.940053768033609e-05,
|
227 |
-
"loss": 0.0,
|
228 |
-
"step": 29
|
229 |
-
},
|
230 |
-
{
|
231 |
-
"epoch": 0.01348807148677888,
|
232 |
-
"grad_norm": NaN,
|
233 |
-
"learning_rate": 8.83022221559489e-05,
|
234 |
-
"loss": 0.0,
|
235 |
-
"step": 30
|
236 |
-
},
|
237 |
-
{
|
238 |
-
"epoch": 0.01393767386967151,
|
239 |
-
"grad_norm": NaN,
|
240 |
-
"learning_rate": 8.715724127386972e-05,
|
241 |
-
"loss": 0.0,
|
242 |
-
"step": 31
|
243 |
-
},
|
244 |
-
{
|
245 |
-
"epoch": 0.014387276252564139,
|
246 |
-
"grad_norm": NaN,
|
247 |
-
"learning_rate": 8.596699001693255e-05,
|
248 |
-
"loss": 0.0,
|
249 |
-
"step": 32
|
250 |
-
},
|
251 |
-
{
|
252 |
-
"epoch": 0.014836878635456768,
|
253 |
-
"grad_norm": NaN,
|
254 |
-
"learning_rate": 8.473291852294987e-05,
|
255 |
-
"loss": 0.0,
|
256 |
-
"step": 33
|
257 |
-
},
|
258 |
-
{
|
259 |
-
"epoch": 0.015286481018349398,
|
260 |
-
"grad_norm": NaN,
|
261 |
-
"learning_rate": 8.345653031794292e-05,
|
262 |
-
"loss": 0.0,
|
263 |
-
"step": 34
|
264 |
-
},
|
265 |
-
{
|
266 |
-
"epoch": 0.015736083401242027,
|
267 |
-
"grad_norm": NaN,
|
268 |
-
"learning_rate": 8.213938048432697e-05,
|
269 |
-
"loss": 0.0,
|
270 |
-
"step": 35
|
271 |
-
},
|
272 |
-
{
|
273 |
-
"epoch": 0.016185685784134655,
|
274 |
-
"grad_norm": NaN,
|
275 |
-
"learning_rate": 8.07830737662829e-05,
|
276 |
-
"loss": 0.0,
|
277 |
-
"step": 36
|
278 |
-
},
|
279 |
-
{
|
280 |
-
"epoch": 0.016635288167027286,
|
281 |
-
"grad_norm": NaN,
|
282 |
-
"learning_rate": 7.938926261462366e-05,
|
283 |
-
"loss": 0.0,
|
284 |
-
"step": 37
|
285 |
-
},
|
286 |
-
{
|
287 |
-
"epoch": 0.017084890549919914,
|
288 |
-
"grad_norm": NaN,
|
289 |
-
"learning_rate": 7.795964517353735e-05,
|
290 |
-
"loss": 0.0,
|
291 |
-
"step": 38
|
292 |
-
},
|
293 |
-
{
|
294 |
-
"epoch": 0.017534492932812545,
|
295 |
-
"grad_norm": NaN,
|
296 |
-
"learning_rate": 7.649596321166024e-05,
|
297 |
-
"loss": 0.0,
|
298 |
-
"step": 39
|
299 |
-
},
|
300 |
-
{
|
301 |
-
"epoch": 0.017984095315705172,
|
302 |
-
"grad_norm": NaN,
|
303 |
-
"learning_rate": 7.500000000000001e-05,
|
304 |
-
"loss": 0.0,
|
305 |
-
"step": 40
|
306 |
-
},
|
307 |
-
{
|
308 |
-
"epoch": 0.018433697698597803,
|
309 |
-
"grad_norm": NaN,
|
310 |
-
"learning_rate": 7.347357813929454e-05,
|
311 |
-
"loss": 0.0,
|
312 |
-
"step": 41
|
313 |
-
},
|
314 |
-
{
|
315 |
-
"epoch": 0.01888330008149043,
|
316 |
-
"grad_norm": NaN,
|
317 |
-
"learning_rate": 7.191855733945387e-05,
|
318 |
-
"loss": 0.0,
|
319 |
-
"step": 42
|
320 |
-
},
|
321 |
-
{
|
322 |
-
"epoch": 0.019332902464383062,
|
323 |
-
"grad_norm": NaN,
|
324 |
-
"learning_rate": 7.033683215379002e-05,
|
325 |
-
"loss": 0.0,
|
326 |
-
"step": 43
|
327 |
-
},
|
328 |
-
{
|
329 |
-
"epoch": 0.01978250484727569,
|
330 |
-
"grad_norm": NaN,
|
331 |
-
"learning_rate": 6.873032967079561e-05,
|
332 |
-
"loss": 0.0,
|
333 |
-
"step": 44
|
334 |
-
},
|
335 |
-
{
|
336 |
-
"epoch": 0.02023210723016832,
|
337 |
-
"grad_norm": NaN,
|
338 |
-
"learning_rate": 6.710100716628344e-05,
|
339 |
-
"loss": 0.0,
|
340 |
-
"step": 45
|
341 |
-
},
|
342 |
-
{
|
343 |
-
"epoch": 0.02068170961306095,
|
344 |
-
"grad_norm": NaN,
|
345 |
-
"learning_rate": 6.545084971874738e-05,
|
346 |
-
"loss": 0.0,
|
347 |
-
"step": 46
|
348 |
-
},
|
349 |
-
{
|
350 |
-
"epoch": 0.02113131199595358,
|
351 |
-
"grad_norm": NaN,
|
352 |
-
"learning_rate": 6.378186779084995e-05,
|
353 |
-
"loss": 0.0,
|
354 |
-
"step": 47
|
355 |
-
},
|
356 |
-
{
|
357 |
-
"epoch": 0.021580914378846208,
|
358 |
-
"grad_norm": NaN,
|
359 |
-
"learning_rate": 6.209609477998338e-05,
|
360 |
-
"loss": 0.0,
|
361 |
-
"step": 48
|
362 |
-
},
|
363 |
-
{
|
364 |
-
"epoch": 0.02203051676173884,
|
365 |
-
"grad_norm": NaN,
|
366 |
-
"learning_rate": 6.0395584540887963e-05,
|
367 |
-
"loss": 0.0,
|
368 |
-
"step": 49
|
369 |
},
|
370 |
{
|
371 |
-
"epoch": 0.
|
372 |
"grad_norm": NaN,
|
373 |
-
"learning_rate":
|
374 |
"loss": 0.0,
|
375 |
-
"step":
|
376 |
},
|
377 |
{
|
378 |
-
"epoch": 0.
|
379 |
"eval_loss": NaN,
|
380 |
-
"eval_runtime":
|
381 |
-
"eval_samples_per_second":
|
382 |
-
"eval_steps_per_second":
|
383 |
-
"step":
|
384 |
-
},
|
385 |
-
{
|
386 |
-
"epoch": 0.022929721527524097,
|
387 |
-
"grad_norm": NaN,
|
388 |
-
"learning_rate": 5.695865504800327e-05,
|
389 |
-
"loss": 0.0,
|
390 |
-
"step": 51
|
391 |
-
},
|
392 |
-
{
|
393 |
-
"epoch": 0.023379323910416725,
|
394 |
-
"grad_norm": NaN,
|
395 |
-
"learning_rate": 5.522642316338268e-05,
|
396 |
-
"loss": 0.0,
|
397 |
-
"step": 52
|
398 |
-
},
|
399 |
-
{
|
400 |
-
"epoch": 0.023828926293309356,
|
401 |
-
"grad_norm": NaN,
|
402 |
-
"learning_rate": 5.348782368720626e-05,
|
403 |
-
"loss": 0.0,
|
404 |
-
"step": 53
|
405 |
-
},
|
406 |
-
{
|
407 |
-
"epoch": 0.024278528676201984,
|
408 |
-
"grad_norm": NaN,
|
409 |
-
"learning_rate": 5.174497483512506e-05,
|
410 |
-
"loss": 0.0,
|
411 |
-
"step": 54
|
412 |
-
},
|
413 |
-
{
|
414 |
-
"epoch": 0.02472813105909461,
|
415 |
-
"grad_norm": NaN,
|
416 |
-
"learning_rate": 5e-05,
|
417 |
-
"loss": 0.0,
|
418 |
-
"step": 55
|
419 |
-
},
|
420 |
-
{
|
421 |
-
"epoch": 0.025177733441987243,
|
422 |
-
"grad_norm": NaN,
|
423 |
-
"learning_rate": 4.825502516487497e-05,
|
424 |
-
"loss": 0.0,
|
425 |
-
"step": 56
|
426 |
-
},
|
427 |
-
{
|
428 |
-
"epoch": 0.02562733582487987,
|
429 |
-
"grad_norm": NaN,
|
430 |
-
"learning_rate": 4.6512176312793736e-05,
|
431 |
-
"loss": 0.0,
|
432 |
-
"step": 57
|
433 |
-
},
|
434 |
-
{
|
435 |
-
"epoch": 0.0260769382077725,
|
436 |
-
"grad_norm": NaN,
|
437 |
-
"learning_rate": 4.477357683661734e-05,
|
438 |
-
"loss": 0.0,
|
439 |
-
"step": 58
|
440 |
-
},
|
441 |
-
{
|
442 |
-
"epoch": 0.02652654059066513,
|
443 |
-
"grad_norm": NaN,
|
444 |
-
"learning_rate": 4.3041344951996746e-05,
|
445 |
-
"loss": 0.0,
|
446 |
-
"step": 59
|
447 |
-
},
|
448 |
-
{
|
449 |
-
"epoch": 0.02697614297355776,
|
450 |
-
"grad_norm": NaN,
|
451 |
-
"learning_rate": 4.131759111665349e-05,
|
452 |
-
"loss": 0.0,
|
453 |
-
"step": 60
|
454 |
-
},
|
455 |
-
{
|
456 |
-
"epoch": 0.027425745356450388,
|
457 |
-
"grad_norm": NaN,
|
458 |
-
"learning_rate": 3.960441545911204e-05,
|
459 |
-
"loss": 0.0,
|
460 |
-
"step": 61
|
461 |
-
},
|
462 |
-
{
|
463 |
-
"epoch": 0.02787534773934302,
|
464 |
-
"grad_norm": NaN,
|
465 |
-
"learning_rate": 3.790390522001662e-05,
|
466 |
-
"loss": 0.0,
|
467 |
-
"step": 62
|
468 |
-
},
|
469 |
-
{
|
470 |
-
"epoch": 0.028324950122235647,
|
471 |
-
"grad_norm": NaN,
|
472 |
-
"learning_rate": 3.6218132209150045e-05,
|
473 |
-
"loss": 0.0,
|
474 |
-
"step": 63
|
475 |
-
},
|
476 |
-
{
|
477 |
-
"epoch": 0.028774552505128278,
|
478 |
-
"grad_norm": NaN,
|
479 |
-
"learning_rate": 3.4549150281252636e-05,
|
480 |
-
"loss": 0.0,
|
481 |
-
"step": 64
|
482 |
-
},
|
483 |
-
{
|
484 |
-
"epoch": 0.029224154888020906,
|
485 |
-
"grad_norm": NaN,
|
486 |
-
"learning_rate": 3.289899283371657e-05,
|
487 |
-
"loss": 0.0,
|
488 |
-
"step": 65
|
489 |
-
},
|
490 |
-
{
|
491 |
-
"epoch": 0.029673757270913537,
|
492 |
-
"grad_norm": NaN,
|
493 |
-
"learning_rate": 3.12696703292044e-05,
|
494 |
-
"loss": 0.0,
|
495 |
-
"step": 66
|
496 |
-
},
|
497 |
-
{
|
498 |
-
"epoch": 0.030123359653806164,
|
499 |
-
"grad_norm": NaN,
|
500 |
-
"learning_rate": 2.9663167846209998e-05,
|
501 |
-
"loss": 0.0,
|
502 |
-
"step": 67
|
503 |
-
},
|
504 |
-
{
|
505 |
-
"epoch": 0.030572962036698795,
|
506 |
-
"grad_norm": NaN,
|
507 |
-
"learning_rate": 2.8081442660546125e-05,
|
508 |
-
"loss": 0.0,
|
509 |
-
"step": 68
|
510 |
-
},
|
511 |
-
{
|
512 |
-
"epoch": 0.031022564419591423,
|
513 |
-
"grad_norm": NaN,
|
514 |
-
"learning_rate": 2.6526421860705473e-05,
|
515 |
-
"loss": 0.0,
|
516 |
-
"step": 69
|
517 |
-
},
|
518 |
-
{
|
519 |
-
"epoch": 0.031472166802484054,
|
520 |
-
"grad_norm": NaN,
|
521 |
-
"learning_rate": 2.500000000000001e-05,
|
522 |
-
"loss": 0.0,
|
523 |
-
"step": 70
|
524 |
-
},
|
525 |
-
{
|
526 |
-
"epoch": 0.03192176918537668,
|
527 |
-
"grad_norm": NaN,
|
528 |
-
"learning_rate": 2.350403678833976e-05,
|
529 |
-
"loss": 0.0,
|
530 |
-
"step": 71
|
531 |
-
},
|
532 |
-
{
|
533 |
-
"epoch": 0.03237137156826931,
|
534 |
-
"grad_norm": NaN,
|
535 |
-
"learning_rate": 2.2040354826462668e-05,
|
536 |
-
"loss": 0.0,
|
537 |
-
"step": 72
|
538 |
},
|
539 |
{
|
540 |
-
"epoch": 0.
|
541 |
"grad_norm": NaN,
|
542 |
-
"learning_rate":
|
543 |
"loss": 0.0,
|
544 |
-
"step":
|
545 |
},
|
546 |
{
|
547 |
-
"epoch": 0.
|
548 |
"grad_norm": NaN,
|
549 |
-
"learning_rate":
|
550 |
"loss": 0.0,
|
551 |
-
"step":
|
552 |
},
|
553 |
{
|
554 |
-
"epoch": 0.
|
555 |
"grad_norm": NaN,
|
556 |
-
"learning_rate":
|
557 |
"loss": 0.0,
|
558 |
-
"step":
|
559 |
},
|
560 |
{
|
561 |
-
"epoch": 0.
|
562 |
"eval_loss": NaN,
|
563 |
-
"eval_runtime":
|
564 |
-
"eval_samples_per_second":
|
565 |
-
"eval_steps_per_second":
|
566 |
-
"step":
|
567 |
-
},
|
568 |
-
{
|
569 |
-
"epoch": 0.03416978109983983,
|
570 |
-
"grad_norm": NaN,
|
571 |
-
"learning_rate": 1.6543469682057106e-05,
|
572 |
-
"loss": 0.0,
|
573 |
-
"step": 76
|
574 |
-
},
|
575 |
-
{
|
576 |
-
"epoch": 0.03461938348273246,
|
577 |
-
"grad_norm": NaN,
|
578 |
-
"learning_rate": 1.526708147705013e-05,
|
579 |
-
"loss": 0.0,
|
580 |
-
"step": 77
|
581 |
-
},
|
582 |
-
{
|
583 |
-
"epoch": 0.03506898586562509,
|
584 |
-
"grad_norm": NaN,
|
585 |
-
"learning_rate": 1.4033009983067452e-05,
|
586 |
-
"loss": 0.0,
|
587 |
-
"step": 78
|
588 |
-
},
|
589 |
-
{
|
590 |
-
"epoch": 0.03551858824851772,
|
591 |
-
"grad_norm": NaN,
|
592 |
-
"learning_rate": 1.2842758726130283e-05,
|
593 |
-
"loss": 0.0,
|
594 |
-
"step": 79
|
595 |
-
},
|
596 |
-
{
|
597 |
-
"epoch": 0.035968190631410345,
|
598 |
-
"grad_norm": NaN,
|
599 |
-
"learning_rate": 1.1697777844051105e-05,
|
600 |
-
"loss": 0.0,
|
601 |
-
"step": 80
|
602 |
-
},
|
603 |
-
{
|
604 |
-
"epoch": 0.03641779301430297,
|
605 |
-
"grad_norm": NaN,
|
606 |
-
"learning_rate": 1.0599462319663905e-05,
|
607 |
-
"loss": 0.0,
|
608 |
-
"step": 81
|
609 |
-
},
|
610 |
-
{
|
611 |
-
"epoch": 0.03686739539719561,
|
612 |
-
"grad_norm": NaN,
|
613 |
-
"learning_rate": 9.549150281252633e-06,
|
614 |
-
"loss": 0.0,
|
615 |
-
"step": 82
|
616 |
-
},
|
617 |
-
{
|
618 |
-
"epoch": 0.037316997780088235,
|
619 |
-
"grad_norm": NaN,
|
620 |
-
"learning_rate": 8.548121372247918e-06,
|
621 |
-
"loss": 0.0,
|
622 |
-
"step": 83
|
623 |
-
},
|
624 |
-
{
|
625 |
-
"epoch": 0.03776660016298086,
|
626 |
-
"grad_norm": NaN,
|
627 |
-
"learning_rate": 7.597595192178702e-06,
|
628 |
-
"loss": 0.0,
|
629 |
-
"step": 84
|
630 |
-
},
|
631 |
-
{
|
632 |
-
"epoch": 0.03821620254587349,
|
633 |
-
"grad_norm": NaN,
|
634 |
-
"learning_rate": 6.698729810778065e-06,
|
635 |
-
"loss": 0.0,
|
636 |
-
"step": 85
|
637 |
-
},
|
638 |
-
{
|
639 |
-
"epoch": 0.038665804928766125,
|
640 |
-
"grad_norm": NaN,
|
641 |
-
"learning_rate": 5.852620357053651e-06,
|
642 |
-
"loss": 0.0,
|
643 |
-
"step": 86
|
644 |
-
},
|
645 |
-
{
|
646 |
-
"epoch": 0.03911540731165875,
|
647 |
-
"grad_norm": NaN,
|
648 |
-
"learning_rate": 5.060297685041659e-06,
|
649 |
-
"loss": 0.0,
|
650 |
-
"step": 87
|
651 |
-
},
|
652 |
-
{
|
653 |
-
"epoch": 0.03956500969455138,
|
654 |
-
"grad_norm": NaN,
|
655 |
-
"learning_rate": 4.322727117869951e-06,
|
656 |
-
"loss": 0.0,
|
657 |
-
"step": 88
|
658 |
-
},
|
659 |
-
{
|
660 |
-
"epoch": 0.04001461207744401,
|
661 |
-
"grad_norm": NaN,
|
662 |
-
"learning_rate": 3.6408072716606346e-06,
|
663 |
-
"loss": 0.0,
|
664 |
-
"step": 89
|
665 |
-
},
|
666 |
-
{
|
667 |
-
"epoch": 0.04046421446033664,
|
668 |
-
"grad_norm": NaN,
|
669 |
-
"learning_rate": 3.0153689607045845e-06,
|
670 |
-
"loss": 0.0,
|
671 |
-
"step": 90
|
672 |
-
},
|
673 |
-
{
|
674 |
-
"epoch": 0.04091381684322927,
|
675 |
-
"grad_norm": NaN,
|
676 |
-
"learning_rate": 2.4471741852423237e-06,
|
677 |
-
"loss": 0.0,
|
678 |
-
"step": 91
|
679 |
-
},
|
680 |
-
{
|
681 |
-
"epoch": 0.0413634192261219,
|
682 |
-
"grad_norm": NaN,
|
683 |
-
"learning_rate": 1.9369152030840556e-06,
|
684 |
-
"loss": 0.0,
|
685 |
-
"step": 92
|
686 |
-
},
|
687 |
-
{
|
688 |
-
"epoch": 0.041813021609014525,
|
689 |
-
"grad_norm": NaN,
|
690 |
-
"learning_rate": 1.4852136862001764e-06,
|
691 |
-
"loss": 0.0,
|
692 |
-
"step": 93
|
693 |
-
},
|
694 |
-
{
|
695 |
-
"epoch": 0.04226262399190716,
|
696 |
-
"grad_norm": NaN,
|
697 |
-
"learning_rate": 1.0926199633097157e-06,
|
698 |
-
"loss": 0.0,
|
699 |
-
"step": 94
|
700 |
-
},
|
701 |
-
{
|
702 |
-
"epoch": 0.04271222637479979,
|
703 |
-
"grad_norm": NaN,
|
704 |
-
"learning_rate": 7.596123493895991e-07,
|
705 |
-
"loss": 0.0,
|
706 |
-
"step": 95
|
707 |
-
},
|
708 |
-
{
|
709 |
-
"epoch": 0.043161828757692415,
|
710 |
-
"grad_norm": NaN,
|
711 |
-
"learning_rate": 4.865965629214819e-07,
|
712 |
-
"loss": 0.0,
|
713 |
-
"step": 96
|
714 |
-
},
|
715 |
-
{
|
716 |
-
"epoch": 0.04361143114058504,
|
717 |
-
"grad_norm": NaN,
|
718 |
-
"learning_rate": 2.7390523158633554e-07,
|
719 |
-
"loss": 0.0,
|
720 |
-
"step": 97
|
721 |
-
},
|
722 |
-
{
|
723 |
-
"epoch": 0.04406103352347768,
|
724 |
-
"grad_norm": NaN,
|
725 |
-
"learning_rate": 1.2179748700879012e-07,
|
726 |
-
"loss": 0.0,
|
727 |
-
"step": 98
|
728 |
-
},
|
729 |
-
{
|
730 |
-
"epoch": 0.044510635906370305,
|
731 |
-
"grad_norm": NaN,
|
732 |
-
"learning_rate": 3.04586490452119e-08,
|
733 |
-
"loss": 0.0,
|
734 |
-
"step": 99
|
735 |
},
|
736 |
{
|
737 |
-
"epoch": 0.
|
738 |
"grad_norm": NaN,
|
739 |
-
"learning_rate": 0.
|
740 |
"loss": 0.0,
|
741 |
-
"step":
|
742 |
-
},
|
743 |
-
{
|
744 |
-
"epoch": 0.04496023828926293,
|
745 |
-
"eval_loss": NaN,
|
746 |
-
"eval_runtime": 6.0311,
|
747 |
-
"eval_samples_per_second": 621.278,
|
748 |
-
"eval_steps_per_second": 310.722,
|
749 |
-
"step": 100
|
750 |
}
|
751 |
],
|
752 |
"logging_steps": 1,
|
753 |
-
"max_steps":
|
754 |
"num_input_tokens_seen": 0,
|
755 |
"num_train_epochs": 1,
|
756 |
-
"save_steps":
|
757 |
"stateful_callbacks": {
|
758 |
"TrainerControl": {
|
759 |
"args": {
|
@@ -766,7 +128,7 @@
|
|
766 |
"attributes": {}
|
767 |
}
|
768 |
},
|
769 |
-
"total_flos":
|
770 |
"train_batch_size": 2,
|
771 |
"trial_name": null,
|
772 |
"trial_params": null
|
|
|
1 |
{
|
2 |
"best_metric": null,
|
3 |
"best_model_checkpoint": null,
|
4 |
+
"epoch": 0.0011240059572315733,
|
5 |
+
"eval_steps": 3,
|
6 |
+
"global_step": 10,
|
7 |
"is_hyper_param_search": false,
|
8 |
"is_local_process_zero": true,
|
9 |
"is_world_process_zero": true,
|
10 |
"log_history": [
|
11 |
{
|
12 |
+
"epoch": 0.00011240059572315734,
|
13 |
"grad_norm": NaN,
|
14 |
+
"learning_rate": 2e-05,
|
15 |
"loss": 0.0,
|
16 |
"step": 1
|
17 |
},
|
18 |
{
|
19 |
+
"epoch": 0.00011240059572315734,
|
20 |
"eval_loss": NaN,
|
21 |
+
"eval_runtime": 18.5088,
|
22 |
+
"eval_samples_per_second": 202.445,
|
23 |
+
"eval_steps_per_second": 101.249,
|
24 |
"step": 1
|
25 |
},
|
26 |
{
|
27 |
+
"epoch": 0.00022480119144631467,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
28 |
"grad_norm": NaN,
|
29 |
"learning_rate": 4e-05,
|
30 |
"loss": 0.0,
|
31 |
+
"step": 2
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
32 |
},
|
33 |
{
|
34 |
+
"epoch": 0.000337201787169472,
|
35 |
"grad_norm": NaN,
|
36 |
"learning_rate": 6e-05,
|
37 |
"loss": 0.0,
|
38 |
+
"step": 3
|
39 |
},
|
40 |
{
|
41 |
+
"epoch": 0.000337201787169472,
|
42 |
+
"eval_loss": NaN,
|
43 |
+
"eval_runtime": 17.2302,
|
44 |
+
"eval_samples_per_second": 217.467,
|
45 |
+
"eval_steps_per_second": 108.762,
|
46 |
+
"step": 3
|
47 |
},
|
48 |
{
|
49 |
+
"epoch": 0.00044960238289262934,
|
50 |
"grad_norm": NaN,
|
51 |
"learning_rate": 8e-05,
|
52 |
"loss": 0.0,
|
53 |
+
"step": 4
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
54 |
},
|
55 |
{
|
56 |
+
"epoch": 0.0005620029786157866,
|
57 |
"grad_norm": NaN,
|
58 |
"learning_rate": 0.0001,
|
59 |
"loss": 0.0,
|
60 |
+
"step": 5
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
61 |
},
|
62 |
{
|
63 |
+
"epoch": 0.000674403574338944,
|
64 |
"grad_norm": NaN,
|
65 |
+
"learning_rate": 0.00012,
|
66 |
"loss": 0.0,
|
67 |
+
"step": 6
|
68 |
},
|
69 |
{
|
70 |
+
"epoch": 0.000674403574338944,
|
71 |
"eval_loss": NaN,
|
72 |
+
"eval_runtime": 17.4401,
|
73 |
+
"eval_samples_per_second": 214.85,
|
74 |
+
"eval_steps_per_second": 107.453,
|
75 |
+
"step": 6
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
76 |
},
|
77 |
{
|
78 |
+
"epoch": 0.0007868041700621013,
|
79 |
"grad_norm": NaN,
|
80 |
+
"learning_rate": 0.00014,
|
81 |
"loss": 0.0,
|
82 |
+
"step": 7
|
83 |
},
|
84 |
{
|
85 |
+
"epoch": 0.0008992047657852587,
|
86 |
"grad_norm": NaN,
|
87 |
+
"learning_rate": 0.00016,
|
88 |
"loss": 0.0,
|
89 |
+
"step": 8
|
90 |
},
|
91 |
{
|
92 |
+
"epoch": 0.001011605361508416,
|
93 |
"grad_norm": NaN,
|
94 |
+
"learning_rate": 0.00018,
|
95 |
"loss": 0.0,
|
96 |
+
"step": 9
|
97 |
},
|
98 |
{
|
99 |
+
"epoch": 0.001011605361508416,
|
100 |
"eval_loss": NaN,
|
101 |
+
"eval_runtime": 17.3807,
|
102 |
+
"eval_samples_per_second": 215.584,
|
103 |
+
"eval_steps_per_second": 107.821,
|
104 |
+
"step": 9
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
105 |
},
|
106 |
{
|
107 |
+
"epoch": 0.0011240059572315733,
|
108 |
"grad_norm": NaN,
|
109 |
+
"learning_rate": 0.0002,
|
110 |
"loss": 0.0,
|
111 |
+
"step": 10
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
112 |
}
|
113 |
],
|
114 |
"logging_steps": 1,
|
115 |
+
"max_steps": 10,
|
116 |
"num_input_tokens_seen": 0,
|
117 |
"num_train_epochs": 1,
|
118 |
+
"save_steps": 3,
|
119 |
"stateful_callbacks": {
|
120 |
"TrainerControl": {
|
121 |
"args": {
|
|
|
128 |
"attributes": {}
|
129 |
}
|
130 |
},
|
131 |
+
"total_flos": 1392771072.0,
|
132 |
"train_batch_size": 2,
|
133 |
"trial_name": null,
|
134 |
"trial_params": null
|
last-checkpoint/training_args.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 6776
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:da8d4b8596323039091b12417dccd4a38b26dbcf81aa9432ceb95bb9b73f1251
|
3 |
size 6776
|
training_args.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 6776
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:da8d4b8596323039091b12417dccd4a38b26dbcf81aa9432ceb95bb9b73f1251
|
3 |
size 6776
|