leixa commited on
Commit
36e01bf
1 Parent(s): 4f96ccf

Training in progress, step 84, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:421d33d054daeb271659f629f667a37ecef24e273e8dfa833bcde0144fc04530
3
  size 671149168
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:88a479e925ede0c3da3821d664013f8c545b67f4c0210547884d9612a19eef4f
3
  size 671149168
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f931aa37152dedf153064b3be0776d8e1dacee8778f5d5df791f5d97f83e6a74
3
  size 341314196
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:27a7e3cd7b75a685cbd36c0f64b79a56218ed9625b7edaae67dc35cc2696380b
3
  size 341314196
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:543a07122da3e253f249468a50ec3be9293aece3c127574755dcffd0eea2d380
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ab838fbe570b6d415c10780e3a0e3a3e25daa76549fd9a5954f6f1727ad17437
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d24225e147d7b9425c760fd15a44cb007389a8d29dfe49f25bc43f19fd631f65
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2df224011d0e75c4a97901f6c1b2930bba4bc3a9aa7c877e6c91e796bec6013f
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.13680781758957655,
5
  "eval_steps": 42,
6
- "global_step": 42,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -121,6 +121,112 @@
121
  "eval_samples_per_second": 23.359,
122
  "eval_steps_per_second": 5.874,
123
  "step": 42
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
124
  }
125
  ],
126
  "logging_steps": 3,
@@ -140,7 +246,7 @@
140
  "attributes": {}
141
  }
142
  },
143
- "total_flos": 3.1678785153662976e+16,
144
  "train_batch_size": 4,
145
  "trial_name": null,
146
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.2736156351791531,
5
  "eval_steps": 42,
6
+ "global_step": 84,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
121
  "eval_samples_per_second": 23.359,
122
  "eval_steps_per_second": 5.874,
123
  "step": 42
124
+ },
125
+ {
126
+ "epoch": 0.1465798045602606,
127
+ "grad_norm": 2.9569833278656006,
128
+ "learning_rate": 9.874639560909117e-05,
129
+ "loss": 1.6317,
130
+ "step": 45
131
+ },
132
+ {
133
+ "epoch": 0.1563517915309446,
134
+ "grad_norm": 2.9722719192504883,
135
+ "learning_rate": 9.852339101019574e-05,
136
+ "loss": 1.4734,
137
+ "step": 48
138
+ },
139
+ {
140
+ "epoch": 0.16612377850162866,
141
+ "grad_norm": 3.663701295852661,
142
+ "learning_rate": 9.828243544427796e-05,
143
+ "loss": 1.7673,
144
+ "step": 51
145
+ },
146
+ {
147
+ "epoch": 0.1758957654723127,
148
+ "grad_norm": 2.7686305046081543,
149
+ "learning_rate": 9.802361805155097e-05,
150
+ "loss": 1.5428,
151
+ "step": 54
152
+ },
153
+ {
154
+ "epoch": 0.18566775244299674,
155
+ "grad_norm": 2.781644582748413,
156
+ "learning_rate": 9.774703458011453e-05,
157
+ "loss": 1.7697,
158
+ "step": 57
159
+ },
160
+ {
161
+ "epoch": 0.19543973941368079,
162
+ "grad_norm": 2.7254538536071777,
163
+ "learning_rate": 9.745278735053343e-05,
164
+ "loss": 1.5856,
165
+ "step": 60
166
+ },
167
+ {
168
+ "epoch": 0.20521172638436483,
169
+ "grad_norm": 2.544527769088745,
170
+ "learning_rate": 9.714098521798465e-05,
171
+ "loss": 1.5917,
172
+ "step": 63
173
+ },
174
+ {
175
+ "epoch": 0.21498371335504887,
176
+ "grad_norm": 3.226173162460327,
177
+ "learning_rate": 9.681174353198687e-05,
178
+ "loss": 1.5432,
179
+ "step": 66
180
+ },
181
+ {
182
+ "epoch": 0.2247557003257329,
183
+ "grad_norm": 2.7061498165130615,
184
+ "learning_rate": 9.64651840937276e-05,
185
+ "loss": 1.6754,
186
+ "step": 69
187
+ },
188
+ {
189
+ "epoch": 0.23452768729641693,
190
+ "grad_norm": 2.9291999340057373,
191
+ "learning_rate": 9.610143511100354e-05,
192
+ "loss": 1.5638,
193
+ "step": 72
194
+ },
195
+ {
196
+ "epoch": 0.24429967426710097,
197
+ "grad_norm": 2.4796836376190186,
198
+ "learning_rate": 9.572063115079063e-05,
199
+ "loss": 1.474,
200
+ "step": 75
201
+ },
202
+ {
203
+ "epoch": 0.254071661237785,
204
+ "grad_norm": 2.7676734924316406,
205
+ "learning_rate": 9.53229130894619e-05,
206
+ "loss": 1.5904,
207
+ "step": 78
208
+ },
209
+ {
210
+ "epoch": 0.26384364820846906,
211
+ "grad_norm": 2.8210036754608154,
212
+ "learning_rate": 9.490842806067095e-05,
213
+ "loss": 1.6858,
214
+ "step": 81
215
+ },
216
+ {
217
+ "epoch": 0.2736156351791531,
218
+ "grad_norm": 2.67210054397583,
219
+ "learning_rate": 9.44773294009206e-05,
220
+ "loss": 1.5434,
221
+ "step": 84
222
+ },
223
+ {
224
+ "epoch": 0.2736156351791531,
225
+ "eval_loss": 1.6323200464248657,
226
+ "eval_runtime": 22.1692,
227
+ "eval_samples_per_second": 23.321,
228
+ "eval_steps_per_second": 5.864,
229
+ "step": 84
230
  }
231
  ],
232
  "logging_steps": 3,
 
246
  "attributes": {}
247
  }
248
  },
249
+ "total_flos": 6.335757030732595e+16,
250
  "train_batch_size": 4,
251
  "trial_name": null,
252
  "trial_params": null