joelniklaus's picture
Training in progress, step 100000
32038df
raw
history blame
12.8 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 19.012745,
"global_step": 100000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01,
"learning_rate": 1e-05,
"loss": 6.5159,
"step": 1000
},
{
"epoch": 0.01,
"learning_rate": 2e-05,
"loss": 4.7939,
"step": 2000
},
{
"epoch": 0.01,
"learning_rate": 3e-05,
"loss": 4.1435,
"step": 3000
},
{
"epoch": 0.02,
"learning_rate": 4e-05,
"loss": 3.9199,
"step": 4000
},
{
"epoch": 0.03,
"learning_rate": 5e-05,
"loss": 3.3094,
"step": 5000
},
{
"epoch": 1.0,
"learning_rate": 6e-05,
"loss": 2.3907,
"step": 6000
},
{
"epoch": 1.01,
"learning_rate": 7e-05,
"loss": 2.0078,
"step": 7000
},
{
"epoch": 1.01,
"learning_rate": 8e-05,
"loss": 1.7064,
"step": 8000
},
{
"epoch": 1.02,
"learning_rate": 9e-05,
"loss": 1.6833,
"step": 9000
},
{
"epoch": 1.02,
"learning_rate": 0.0001,
"loss": 1.4878,
"step": 10000
},
{
"epoch": 2.0,
"learning_rate": 9.999316524962345e-05,
"loss": 1.4635,
"step": 11000
},
{
"epoch": 2.01,
"learning_rate": 9.997266286704631e-05,
"loss": 1.4416,
"step": 12000
},
{
"epoch": 2.01,
"learning_rate": 9.993849845741524e-05,
"loss": 1.3392,
"step": 13000
},
{
"epoch": 2.02,
"learning_rate": 9.989068136093873e-05,
"loss": 1.3889,
"step": 14000
},
{
"epoch": 2.02,
"learning_rate": 9.98292246503335e-05,
"loss": 1.2742,
"step": 15000
},
{
"epoch": 3.0,
"learning_rate": 9.975414512725057e-05,
"loss": 1.2841,
"step": 16000
},
{
"epoch": 3.01,
"learning_rate": 9.966546331768191e-05,
"loss": 1.2706,
"step": 17000
},
{
"epoch": 3.01,
"learning_rate": 9.956320346634876e-05,
"loss": 1.2599,
"step": 18000
},
{
"epoch": 3.02,
"learning_rate": 9.944739353007344e-05,
"loss": 1.2441,
"step": 19000
},
{
"epoch": 3.02,
"learning_rate": 9.931806517013612e-05,
"loss": 1.1784,
"step": 20000
},
{
"epoch": 4.0,
"learning_rate": 9.917525374361912e-05,
"loss": 1.2054,
"step": 21000
},
{
"epoch": 4.01,
"learning_rate": 9.901899829374047e-05,
"loss": 1.1766,
"step": 22000
},
{
"epoch": 4.01,
"learning_rate": 9.884934153917997e-05,
"loss": 1.2071,
"step": 23000
},
{
"epoch": 4.02,
"learning_rate": 9.86663298624003e-05,
"loss": 1.1654,
"step": 24000
},
{
"epoch": 4.02,
"learning_rate": 9.847001329696653e-05,
"loss": 1.1619,
"step": 25000
},
{
"epoch": 5.0,
"learning_rate": 9.826044551386744e-05,
"loss": 1.1389,
"step": 26000
},
{
"epoch": 5.01,
"learning_rate": 9.803768380684242e-05,
"loss": 1.0962,
"step": 27000
},
{
"epoch": 5.01,
"learning_rate": 9.780178907671789e-05,
"loss": 1.1754,
"step": 28000
},
{
"epoch": 5.02,
"learning_rate": 9.755282581475769e-05,
"loss": 1.1028,
"step": 29000
},
{
"epoch": 5.02,
"learning_rate": 9.729086208503174e-05,
"loss": 1.1469,
"step": 30000
},
{
"epoch": 6.0,
"learning_rate": 9.701596950580806e-05,
"loss": 1.078,
"step": 31000
},
{
"epoch": 6.01,
"learning_rate": 9.672822322997305e-05,
"loss": 1.0601,
"step": 32000
},
{
"epoch": 6.01,
"learning_rate": 9.642770192448536e-05,
"loss": 1.1506,
"step": 33000
},
{
"epoch": 6.02,
"learning_rate": 9.611448774886924e-05,
"loss": 1.0549,
"step": 34000
},
{
"epoch": 6.02,
"learning_rate": 9.578866633275288e-05,
"loss": 1.1374,
"step": 35000
},
{
"epoch": 7.0,
"learning_rate": 9.545032675245813e-05,
"loss": 1.0301,
"step": 36000
},
{
"epoch": 7.01,
"learning_rate": 9.509956150664796e-05,
"loss": 1.0283,
"step": 37000
},
{
"epoch": 7.01,
"learning_rate": 9.473646649103818e-05,
"loss": 1.1236,
"step": 38000
},
{
"epoch": 7.02,
"learning_rate": 9.43611409721806e-05,
"loss": 1.0206,
"step": 39000
},
{
"epoch": 7.02,
"learning_rate": 9.397368756032445e-05,
"loss": 1.1147,
"step": 40000
},
{
"epoch": 7.03,
"learning_rate": 9.357421218136386e-05,
"loss": 1.0182,
"step": 41000
},
{
"epoch": 8.0,
"learning_rate": 9.316282404787871e-05,
"loss": 1.0063,
"step": 42000
},
{
"epoch": 8.01,
"learning_rate": 9.273963562927695e-05,
"loss": 1.0877,
"step": 43000
},
{
"epoch": 8.01,
"learning_rate": 9.230476262104677e-05,
"loss": 0.9996,
"step": 44000
},
{
"epoch": 8.02,
"learning_rate": 9.185832391312644e-05,
"loss": 1.0898,
"step": 45000
},
{
"epoch": 8.02,
"learning_rate": 9.140044155740101e-05,
"loss": 0.9928,
"step": 46000
},
{
"epoch": 9.0,
"learning_rate": 9.093124073433463e-05,
"loss": 1.0028,
"step": 47000
},
{
"epoch": 9.01,
"learning_rate": 9.045084971874738e-05,
"loss": 1.0667,
"step": 48000
},
{
"epoch": 9.01,
"learning_rate": 8.995939984474624e-05,
"loss": 0.9789,
"step": 49000
},
{
"epoch": 9.02,
"learning_rate": 8.945702546981969e-05,
"loss": 1.0668,
"step": 50000
},
{
"epoch": 9.02,
"eval_loss": 0.7346000671386719,
"eval_runtime": 17.1312,
"eval_samples_per_second": 291.864,
"eval_steps_per_second": 2.335,
"step": 50000
},
{
"epoch": 9.02,
"learning_rate": 8.894386393810563e-05,
"loss": 0.9766,
"step": 51000
},
{
"epoch": 10.0,
"learning_rate": 8.842005554284296e-05,
"loss": 0.9989,
"step": 52000
},
{
"epoch": 10.01,
"learning_rate": 8.788574348801675e-05,
"loss": 1.0319,
"step": 53000
},
{
"epoch": 10.01,
"learning_rate": 8.73410738492077e-05,
"loss": 0.9828,
"step": 54000
},
{
"epoch": 10.02,
"learning_rate": 8.678619553365659e-05,
"loss": 1.0349,
"step": 55000
},
{
"epoch": 10.02,
"learning_rate": 8.622126023955446e-05,
"loss": 0.9663,
"step": 56000
},
{
"epoch": 11.0,
"learning_rate": 8.564642241456986e-05,
"loss": 0.9874,
"step": 57000
},
{
"epoch": 11.01,
"learning_rate": 8.506183921362443e-05,
"loss": 0.9959,
"step": 58000
},
{
"epoch": 11.01,
"learning_rate": 8.44676704559283e-05,
"loss": 1.0047,
"step": 59000
},
{
"epoch": 11.02,
"learning_rate": 8.386407858128706e-05,
"loss": 1.0018,
"step": 60000
},
{
"epoch": 11.02,
"learning_rate": 8.32512286056924e-05,
"loss": 0.9518,
"step": 61000
},
{
"epoch": 12.0,
"learning_rate": 8.262928807620843e-05,
"loss": 0.9936,
"step": 62000
},
{
"epoch": 12.01,
"learning_rate": 8.199842702516583e-05,
"loss": 0.9534,
"step": 63000
},
{
"epoch": 12.01,
"learning_rate": 8.135881792367686e-05,
"loss": 1.0122,
"step": 64000
},
{
"epoch": 12.02,
"learning_rate": 8.07106356344834e-05,
"loss": 0.9651,
"step": 65000
},
{
"epoch": 12.02,
"learning_rate": 8.005405736415126e-05,
"loss": 0.9925,
"step": 66000
},
{
"epoch": 13.0,
"learning_rate": 7.938926261462366e-05,
"loss": 0.946,
"step": 67000
},
{
"epoch": 13.01,
"learning_rate": 7.871643313414718e-05,
"loss": 0.9306,
"step": 68000
},
{
"epoch": 13.01,
"learning_rate": 7.803575286758364e-05,
"loss": 1.0195,
"step": 69000
},
{
"epoch": 13.02,
"learning_rate": 7.734740790612136e-05,
"loss": 0.9357,
"step": 70000
},
{
"epoch": 13.02,
"learning_rate": 7.66515864363997e-05,
"loss": 0.9933,
"step": 71000
},
{
"epoch": 14.0,
"learning_rate": 7.594847868906076e-05,
"loss": 0.9245,
"step": 72000
},
{
"epoch": 14.01,
"learning_rate": 7.52382768867422e-05,
"loss": 0.9189,
"step": 73000
},
{
"epoch": 14.01,
"learning_rate": 7.452117519152542e-05,
"loss": 1.0111,
"step": 74000
},
{
"epoch": 14.02,
"learning_rate": 7.379736965185368e-05,
"loss": 0.9169,
"step": 75000
},
{
"epoch": 14.02,
"learning_rate": 7.30670581489344e-05,
"loss": 1.0053,
"step": 76000
},
{
"epoch": 15.0,
"learning_rate": 7.233044034264034e-05,
"loss": 0.9057,
"step": 77000
},
{
"epoch": 15.01,
"learning_rate": 7.158771761692464e-05,
"loss": 0.9039,
"step": 78000
},
{
"epoch": 15.01,
"learning_rate": 7.083909302476453e-05,
"loss": 0.9972,
"step": 79000
},
{
"epoch": 15.02,
"learning_rate": 7.008477123264848e-05,
"loss": 0.9042,
"step": 80000
},
{
"epoch": 15.02,
"learning_rate": 6.932495846462261e-05,
"loss": 0.9975,
"step": 81000
},
{
"epoch": 15.03,
"learning_rate": 6.855986244591104e-05,
"loss": 0.9056,
"step": 82000
},
{
"epoch": 16.0,
"learning_rate": 6.778969234612584e-05,
"loss": 0.903,
"step": 83000
},
{
"epoch": 16.01,
"learning_rate": 6.701465872208216e-05,
"loss": 0.9749,
"step": 84000
},
{
"epoch": 16.01,
"learning_rate": 6.623497346023418e-05,
"loss": 0.8956,
"step": 85000
},
{
"epoch": 16.02,
"learning_rate": 6.545084971874738e-05,
"loss": 0.9854,
"step": 86000
},
{
"epoch": 16.02,
"learning_rate": 6.466250186922325e-05,
"loss": 0.891,
"step": 87000
},
{
"epoch": 17.0,
"learning_rate": 6.387014543809223e-05,
"loss": 0.9065,
"step": 88000
},
{
"epoch": 17.01,
"learning_rate": 6.307399704769099e-05,
"loss": 0.9685,
"step": 89000
},
{
"epoch": 17.01,
"learning_rate": 6.227427435703997e-05,
"loss": 0.8875,
"step": 90000
},
{
"epoch": 17.02,
"learning_rate": 6.147119600233758e-05,
"loss": 0.9719,
"step": 91000
},
{
"epoch": 17.02,
"learning_rate": 6.066498153718735e-05,
"loss": 0.8849,
"step": 92000
},
{
"epoch": 18.0,
"learning_rate": 5.985585137257401e-05,
"loss": 0.9135,
"step": 93000
},
{
"epoch": 18.01,
"learning_rate": 5.90440267166055e-05,
"loss": 0.9328,
"step": 94000
},
{
"epoch": 18.01,
"learning_rate": 5.8229729514036705e-05,
"loss": 0.9044,
"step": 95000
},
{
"epoch": 18.02,
"learning_rate": 5.74131823855921e-05,
"loss": 0.9439,
"step": 96000
},
{
"epoch": 18.02,
"learning_rate": 5.6594608567103456e-05,
"loss": 0.8858,
"step": 97000
},
{
"epoch": 19.0,
"learning_rate": 5.577423184847932e-05,
"loss": 0.912,
"step": 98000
},
{
"epoch": 19.01,
"learning_rate": 5.495227651252315e-05,
"loss": 0.9085,
"step": 99000
},
{
"epoch": 19.01,
"learning_rate": 5.4128967273616625e-05,
"loss": 0.9313,
"step": 100000
},
{
"epoch": 19.01,
"eval_loss": 0.6309990286827087,
"eval_runtime": 10.4505,
"eval_samples_per_second": 478.448,
"eval_steps_per_second": 3.828,
"step": 100000
}
],
"max_steps": 200000,
"num_train_epochs": 9223372036854775807,
"total_flos": 1.684779907512533e+18,
"trial_name": null,
"trial_params": null
}