DuongTrongChi commited on
Commit
f905251
·
verified ·
1 Parent(s): 23a38de

Training in progress, step 193, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bcab26ada2c2637d79ec11d98df950fe228da88d062fd59cebde5c23cc576bd9
3
  size 100198584
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6d08662b031cdd86ed774881debe128c873e00074cf7929595f42b6a63d84048
3
  size 100198584
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c3777d03e8d2baa5c405d941ac93d97f6e70f91bdf36b70dc8613461422b413d
3
  size 50675156
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fa10ee462c7e49a82fafb47e9d4304eed05d8aaf2fa6327757fcf5a538adb758
3
  size 50675156
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ae12e87061029d0002bd195ec24682167e09466b871fd07c9121b60003cd8e79
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5ac39c24740490f5e39e7ce5934c2a2903951fd3baae22c89e765d403647b6d1
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.23522965939183635,
5
  "eval_steps": 500,
6
- "global_step": 161,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1134,6 +1134,230 @@
1134
  "learning_rate": 1.791095890410959e-05,
1135
  "loss": 1.3022,
1136
  "step": 161
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1137
  }
1138
  ],
1139
  "logging_steps": 1,
@@ -1153,7 +1377,7 @@
1153
  "attributes": {}
1154
  }
1155
  },
1156
- "total_flos": 1.8126428980702003e+17,
1157
  "train_batch_size": 4,
1158
  "trial_name": null,
1159
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.28198338051319516,
5
  "eval_steps": 500,
6
+ "global_step": 193,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1134
  "learning_rate": 1.791095890410959e-05,
1135
  "loss": 1.3022,
1136
  "step": 161
1137
+ },
1138
+ {
1139
+ "epoch": 0.23669071317687881,
1140
+ "grad_norm": 0.16716252267360687,
1141
+ "learning_rate": 1.7876712328767125e-05,
1142
+ "loss": 1.2339,
1143
+ "step": 162
1144
+ },
1145
+ {
1146
+ "epoch": 0.23815176696192128,
1147
+ "grad_norm": 0.1609048694372177,
1148
+ "learning_rate": 1.7842465753424658e-05,
1149
+ "loss": 1.3374,
1150
+ "step": 163
1151
+ },
1152
+ {
1153
+ "epoch": 0.23961282074696375,
1154
+ "grad_norm": 0.17205393314361572,
1155
+ "learning_rate": 1.7808219178082194e-05,
1156
+ "loss": 1.2043,
1157
+ "step": 164
1158
+ },
1159
+ {
1160
+ "epoch": 0.2410738745320062,
1161
+ "grad_norm": 0.17497386038303375,
1162
+ "learning_rate": 1.7773972602739727e-05,
1163
+ "loss": 1.2267,
1164
+ "step": 165
1165
+ },
1166
+ {
1167
+ "epoch": 0.24253492831704868,
1168
+ "grad_norm": 0.17982399463653564,
1169
+ "learning_rate": 1.773972602739726e-05,
1170
+ "loss": 1.2363,
1171
+ "step": 166
1172
+ },
1173
+ {
1174
+ "epoch": 0.24399598210209114,
1175
+ "grad_norm": 0.17415454983711243,
1176
+ "learning_rate": 1.7705479452054797e-05,
1177
+ "loss": 1.2339,
1178
+ "step": 167
1179
+ },
1180
+ {
1181
+ "epoch": 0.2454570358871336,
1182
+ "grad_norm": 0.17761662602424622,
1183
+ "learning_rate": 1.767123287671233e-05,
1184
+ "loss": 1.2549,
1185
+ "step": 168
1186
+ },
1187
+ {
1188
+ "epoch": 0.24691808967217604,
1189
+ "grad_norm": 0.17029732465744019,
1190
+ "learning_rate": 1.7636986301369866e-05,
1191
+ "loss": 1.2175,
1192
+ "step": 169
1193
+ },
1194
+ {
1195
+ "epoch": 0.2483791434572185,
1196
+ "grad_norm": 0.1831396073102951,
1197
+ "learning_rate": 1.76027397260274e-05,
1198
+ "loss": 1.2975,
1199
+ "step": 170
1200
+ },
1201
+ {
1202
+ "epoch": 0.24984019724226098,
1203
+ "grad_norm": 0.1648183912038803,
1204
+ "learning_rate": 1.7568493150684932e-05,
1205
+ "loss": 1.317,
1206
+ "step": 171
1207
+ },
1208
+ {
1209
+ "epoch": 0.25130125102730344,
1210
+ "grad_norm": 0.19559577107429504,
1211
+ "learning_rate": 1.7534246575342465e-05,
1212
+ "loss": 1.2482,
1213
+ "step": 172
1214
+ },
1215
+ {
1216
+ "epoch": 0.2527623048123459,
1217
+ "grad_norm": 0.17486748099327087,
1218
+ "learning_rate": 1.7500000000000002e-05,
1219
+ "loss": 1.2638,
1220
+ "step": 173
1221
+ },
1222
+ {
1223
+ "epoch": 0.25422335859738837,
1224
+ "grad_norm": 0.18816767632961273,
1225
+ "learning_rate": 1.7465753424657538e-05,
1226
+ "loss": 1.2739,
1227
+ "step": 174
1228
+ },
1229
+ {
1230
+ "epoch": 0.25568441238243084,
1231
+ "grad_norm": 0.1921810507774353,
1232
+ "learning_rate": 1.743150684931507e-05,
1233
+ "loss": 1.2028,
1234
+ "step": 175
1235
+ },
1236
+ {
1237
+ "epoch": 0.2571454661674733,
1238
+ "grad_norm": 0.20229558646678925,
1239
+ "learning_rate": 1.7397260273972604e-05,
1240
+ "loss": 1.2518,
1241
+ "step": 176
1242
+ },
1243
+ {
1244
+ "epoch": 0.25860651995251577,
1245
+ "grad_norm": 0.21454092860221863,
1246
+ "learning_rate": 1.7363013698630137e-05,
1247
+ "loss": 1.2257,
1248
+ "step": 177
1249
+ },
1250
+ {
1251
+ "epoch": 0.26006757373755823,
1252
+ "grad_norm": 0.20349366962909698,
1253
+ "learning_rate": 1.7328767123287674e-05,
1254
+ "loss": 1.229,
1255
+ "step": 178
1256
+ },
1257
+ {
1258
+ "epoch": 0.2615286275226007,
1259
+ "grad_norm": 0.22348977625370026,
1260
+ "learning_rate": 1.7294520547945207e-05,
1261
+ "loss": 1.3047,
1262
+ "step": 179
1263
+ },
1264
+ {
1265
+ "epoch": 0.26298968130764316,
1266
+ "grad_norm": 0.18413174152374268,
1267
+ "learning_rate": 1.726027397260274e-05,
1268
+ "loss": 1.2372,
1269
+ "step": 180
1270
+ },
1271
+ {
1272
+ "epoch": 0.2644507350926856,
1273
+ "grad_norm": 0.22073839604854584,
1274
+ "learning_rate": 1.7226027397260273e-05,
1275
+ "loss": 1.2719,
1276
+ "step": 181
1277
+ },
1278
+ {
1279
+ "epoch": 0.26591178887772804,
1280
+ "grad_norm": 0.25906509160995483,
1281
+ "learning_rate": 1.719178082191781e-05,
1282
+ "loss": 1.2132,
1283
+ "step": 182
1284
+ },
1285
+ {
1286
+ "epoch": 0.2673728426627705,
1287
+ "grad_norm": 0.22006724774837494,
1288
+ "learning_rate": 1.7157534246575346e-05,
1289
+ "loss": 1.2595,
1290
+ "step": 183
1291
+ },
1292
+ {
1293
+ "epoch": 0.26883389644781297,
1294
+ "grad_norm": 0.2628309428691864,
1295
+ "learning_rate": 1.712328767123288e-05,
1296
+ "loss": 1.1812,
1297
+ "step": 184
1298
+ },
1299
+ {
1300
+ "epoch": 0.27029495023285544,
1301
+ "grad_norm": 0.21583111584186554,
1302
+ "learning_rate": 1.7089041095890412e-05,
1303
+ "loss": 1.2631,
1304
+ "step": 185
1305
+ },
1306
+ {
1307
+ "epoch": 0.2717560040178979,
1308
+ "grad_norm": 0.19774451851844788,
1309
+ "learning_rate": 1.7054794520547945e-05,
1310
+ "loss": 1.2511,
1311
+ "step": 186
1312
+ },
1313
+ {
1314
+ "epoch": 0.27321705780294037,
1315
+ "grad_norm": 0.199305459856987,
1316
+ "learning_rate": 1.702054794520548e-05,
1317
+ "loss": 1.2836,
1318
+ "step": 187
1319
+ },
1320
+ {
1321
+ "epoch": 0.27467811158798283,
1322
+ "grad_norm": 0.17964106798171997,
1323
+ "learning_rate": 1.6986301369863014e-05,
1324
+ "loss": 1.2784,
1325
+ "step": 188
1326
+ },
1327
+ {
1328
+ "epoch": 0.2761391653730253,
1329
+ "grad_norm": 0.15635524690151215,
1330
+ "learning_rate": 1.695205479452055e-05,
1331
+ "loss": 1.2959,
1332
+ "step": 189
1333
+ },
1334
+ {
1335
+ "epoch": 0.27760021915806776,
1336
+ "grad_norm": 0.14683344960212708,
1337
+ "learning_rate": 1.6917808219178084e-05,
1338
+ "loss": 1.2453,
1339
+ "step": 190
1340
+ },
1341
+ {
1342
+ "epoch": 0.27906127294311023,
1343
+ "grad_norm": 0.1619580239057541,
1344
+ "learning_rate": 1.6883561643835617e-05,
1345
+ "loss": 1.2602,
1346
+ "step": 191
1347
+ },
1348
+ {
1349
+ "epoch": 0.2805223267281527,
1350
+ "grad_norm": 0.19470493495464325,
1351
+ "learning_rate": 1.6849315068493153e-05,
1352
+ "loss": 1.1052,
1353
+ "step": 192
1354
+ },
1355
+ {
1356
+ "epoch": 0.28198338051319516,
1357
+ "grad_norm": 0.17526264488697052,
1358
+ "learning_rate": 1.6815068493150686e-05,
1359
+ "loss": 1.2553,
1360
+ "step": 193
1361
  }
1362
  ],
1363
  "logging_steps": 1,
 
1377
  "attributes": {}
1378
  }
1379
  },
1380
+ "total_flos": 2.1621842547974554e+17,
1381
  "train_batch_size": 4,
1382
  "trial_name": null,
1383
  "trial_params": null