|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 4.985976939856653, |
|
"global_step": 4000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 4.2e-05, |
|
"loss": 0.8909, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 9.1e-05, |
|
"loss": 0.8023, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"eval_loss": 0.7678152918815613, |
|
"eval_runtime": 2324.1166, |
|
"eval_samples_per_second": 0.632, |
|
"eval_steps_per_second": 0.632, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 9.895140664961637e-05, |
|
"loss": 0.7815, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 9.767263427109975e-05, |
|
"loss": 0.8184, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"eval_loss": 0.7796664834022522, |
|
"eval_runtime": 2323.4202, |
|
"eval_samples_per_second": 0.633, |
|
"eval_steps_per_second": 0.633, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 9.639386189258313e-05, |
|
"loss": 0.7501, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 9.51150895140665e-05, |
|
"loss": 0.7786, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"eval_loss": 0.7461986541748047, |
|
"eval_runtime": 2323.156, |
|
"eval_samples_per_second": 0.633, |
|
"eval_steps_per_second": 0.633, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 9.383631713554988e-05, |
|
"loss": 0.7247, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 9.255754475703325e-05, |
|
"loss": 0.699, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"eval_loss": 0.7154579162597656, |
|
"eval_runtime": 2326.716, |
|
"eval_samples_per_second": 0.632, |
|
"eval_steps_per_second": 0.632, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 9.127877237851663e-05, |
|
"loss": 0.7498, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 9e-05, |
|
"loss": 0.6729, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"eval_loss": 0.6918142437934875, |
|
"eval_runtime": 2326.9582, |
|
"eval_samples_per_second": 0.632, |
|
"eval_steps_per_second": 0.632, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 8.872122762148338e-05, |
|
"loss": 0.6821, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 8.744245524296676e-05, |
|
"loss": 0.7392, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"eval_loss": 0.6949509978294373, |
|
"eval_runtime": 2328.8704, |
|
"eval_samples_per_second": 0.631, |
|
"eval_steps_per_second": 0.631, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 8.616368286445013e-05, |
|
"loss": 0.6848, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 8.488491048593351e-05, |
|
"loss": 0.7601, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"eval_loss": 0.6541839241981506, |
|
"eval_runtime": 2322.3854, |
|
"eval_samples_per_second": 0.633, |
|
"eval_steps_per_second": 0.633, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 8.360613810741689e-05, |
|
"loss": 0.7163, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 8.232736572890026e-05, |
|
"loss": 0.7121, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_loss": 0.7020866274833679, |
|
"eval_runtime": 2329.0246, |
|
"eval_samples_per_second": 0.631, |
|
"eval_steps_per_second": 0.631, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"learning_rate": 8.104859335038364e-05, |
|
"loss": 0.6303, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"learning_rate": 7.992327365728901e-05, |
|
"loss": 0.834, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"eval_loss": 0.8280826210975647, |
|
"eval_runtime": 2322.7118, |
|
"eval_samples_per_second": 0.633, |
|
"eval_steps_per_second": 0.633, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"learning_rate": 7.867007672634272e-05, |
|
"loss": 0.7102, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"learning_rate": 7.73913043478261e-05, |
|
"loss": 0.6845, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"eval_loss": 0.684939444065094, |
|
"eval_runtime": 2322.9147, |
|
"eval_samples_per_second": 0.633, |
|
"eval_steps_per_second": 0.633, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 1.31, |
|
"learning_rate": 7.611253196930947e-05, |
|
"loss": 0.6918, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 1.37, |
|
"learning_rate": 7.483375959079285e-05, |
|
"loss": 0.7139, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 1.37, |
|
"eval_loss": 0.682366132736206, |
|
"eval_runtime": 2323.8642, |
|
"eval_samples_per_second": 0.633, |
|
"eval_steps_per_second": 0.633, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"learning_rate": 7.355498721227622e-05, |
|
"loss": 0.6513, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"learning_rate": 7.22762148337596e-05, |
|
"loss": 0.6515, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"eval_loss": 0.6614900827407837, |
|
"eval_runtime": 2324.2619, |
|
"eval_samples_per_second": 0.632, |
|
"eval_steps_per_second": 0.632, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 1.56, |
|
"learning_rate": 7.099744245524297e-05, |
|
"loss": 0.6325, |
|
"step": 1250 |
|
}, |
|
{ |
|
"epoch": 1.62, |
|
"learning_rate": 6.971867007672635e-05, |
|
"loss": 0.6658, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 1.62, |
|
"eval_loss": 0.6691812872886658, |
|
"eval_runtime": 2322.8992, |
|
"eval_samples_per_second": 0.633, |
|
"eval_steps_per_second": 0.633, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"learning_rate": 6.843989769820973e-05, |
|
"loss": 0.6709, |
|
"step": 1350 |
|
}, |
|
{ |
|
"epoch": 1.75, |
|
"learning_rate": 6.71611253196931e-05, |
|
"loss": 0.6571, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 1.75, |
|
"eval_loss": 0.7406654357910156, |
|
"eval_runtime": 2323.316, |
|
"eval_samples_per_second": 0.633, |
|
"eval_steps_per_second": 0.633, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 1.81, |
|
"learning_rate": 6.588235294117648e-05, |
|
"loss": 0.6527, |
|
"step": 1450 |
|
}, |
|
{ |
|
"epoch": 1.87, |
|
"learning_rate": 6.460358056265985e-05, |
|
"loss": 0.6432, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 1.87, |
|
"eval_loss": 0.6645843982696533, |
|
"eval_runtime": 2325.3057, |
|
"eval_samples_per_second": 0.632, |
|
"eval_steps_per_second": 0.632, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 1.93, |
|
"learning_rate": 6.332480818414323e-05, |
|
"loss": 0.6998, |
|
"step": 1550 |
|
}, |
|
{ |
|
"epoch": 1.99, |
|
"learning_rate": 6.20460358056266e-05, |
|
"loss": 0.6777, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 1.99, |
|
"eval_loss": 0.7001596093177795, |
|
"eval_runtime": 2324.698, |
|
"eval_samples_per_second": 0.632, |
|
"eval_steps_per_second": 0.632, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 2.06, |
|
"learning_rate": 6.0767263427109976e-05, |
|
"loss": 0.63, |
|
"step": 1650 |
|
}, |
|
{ |
|
"epoch": 2.12, |
|
"learning_rate": 5.948849104859335e-05, |
|
"loss": 0.6027, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 2.12, |
|
"eval_loss": 0.6740280389785767, |
|
"eval_runtime": 2325.0824, |
|
"eval_samples_per_second": 0.632, |
|
"eval_steps_per_second": 0.632, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 2.18, |
|
"learning_rate": 5.820971867007673e-05, |
|
"loss": 0.6171, |
|
"step": 1750 |
|
}, |
|
{ |
|
"epoch": 2.24, |
|
"learning_rate": 5.6930946291560104e-05, |
|
"loss": 0.5711, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 2.24, |
|
"eval_loss": 0.6770710349082947, |
|
"eval_runtime": 2324.7006, |
|
"eval_samples_per_second": 0.632, |
|
"eval_steps_per_second": 0.632, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 2.31, |
|
"learning_rate": 5.565217391304348e-05, |
|
"loss": 0.6193, |
|
"step": 1850 |
|
}, |
|
{ |
|
"epoch": 2.37, |
|
"learning_rate": 5.4373401534526856e-05, |
|
"loss": 0.6544, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 2.37, |
|
"eval_loss": 0.7468971014022827, |
|
"eval_runtime": 2324.7323, |
|
"eval_samples_per_second": 0.632, |
|
"eval_steps_per_second": 0.632, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 2.43, |
|
"learning_rate": 5.309462915601023e-05, |
|
"loss": 0.6559, |
|
"step": 1950 |
|
}, |
|
{ |
|
"epoch": 2.49, |
|
"learning_rate": 5.181585677749361e-05, |
|
"loss": 0.6981, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 2.49, |
|
"eval_loss": 0.6552309393882751, |
|
"eval_runtime": 2325.8405, |
|
"eval_samples_per_second": 0.632, |
|
"eval_steps_per_second": 0.632, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 2.56, |
|
"learning_rate": 5.0537084398976984e-05, |
|
"loss": 0.6161, |
|
"step": 2050 |
|
}, |
|
{ |
|
"epoch": 2.62, |
|
"learning_rate": 4.925831202046036e-05, |
|
"loss": 0.6425, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 2.62, |
|
"eval_loss": 0.6608405113220215, |
|
"eval_runtime": 2324.8602, |
|
"eval_samples_per_second": 0.632, |
|
"eval_steps_per_second": 0.632, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 2.68, |
|
"learning_rate": 4.7979539641943737e-05, |
|
"loss": 0.5966, |
|
"step": 2150 |
|
}, |
|
{ |
|
"epoch": 2.74, |
|
"learning_rate": 4.670076726342711e-05, |
|
"loss": 0.6435, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 2.74, |
|
"eval_loss": 0.6722382307052612, |
|
"eval_runtime": 2325.4748, |
|
"eval_samples_per_second": 0.632, |
|
"eval_steps_per_second": 0.632, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 2.8, |
|
"learning_rate": 4.542199488491049e-05, |
|
"loss": 0.6493, |
|
"step": 2250 |
|
}, |
|
{ |
|
"epoch": 2.87, |
|
"learning_rate": 4.4143222506393865e-05, |
|
"loss": 0.6281, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 2.87, |
|
"eval_loss": 0.6553158760070801, |
|
"eval_runtime": 2324.9442, |
|
"eval_samples_per_second": 0.632, |
|
"eval_steps_per_second": 0.632, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 2.93, |
|
"learning_rate": 4.286445012787724e-05, |
|
"loss": 0.5495, |
|
"step": 2350 |
|
}, |
|
{ |
|
"epoch": 2.99, |
|
"learning_rate": 4.158567774936061e-05, |
|
"loss": 0.6129, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 2.99, |
|
"eval_loss": 0.6536511182785034, |
|
"eval_runtime": 2325.6669, |
|
"eval_samples_per_second": 0.632, |
|
"eval_steps_per_second": 0.632, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 3.05, |
|
"learning_rate": 4.0306905370843986e-05, |
|
"loss": 0.5085, |
|
"step": 2450 |
|
}, |
|
{ |
|
"epoch": 3.12, |
|
"learning_rate": 3.902813299232736e-05, |
|
"loss": 0.5443, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 3.12, |
|
"eval_loss": 0.659809947013855, |
|
"eval_runtime": 2326.2914, |
|
"eval_samples_per_second": 0.632, |
|
"eval_steps_per_second": 0.632, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 3.18, |
|
"learning_rate": 3.774936061381074e-05, |
|
"loss": 0.5447, |
|
"step": 2550 |
|
}, |
|
{ |
|
"epoch": 3.24, |
|
"learning_rate": 3.6470588235294114e-05, |
|
"loss": 0.535, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 3.24, |
|
"eval_loss": 0.6724178194999695, |
|
"eval_runtime": 2327.4156, |
|
"eval_samples_per_second": 0.632, |
|
"eval_steps_per_second": 0.632, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 3.3, |
|
"learning_rate": 3.519181585677749e-05, |
|
"loss": 0.6094, |
|
"step": 2650 |
|
}, |
|
{ |
|
"epoch": 3.37, |
|
"learning_rate": 3.3913043478260867e-05, |
|
"loss": 0.5347, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 3.37, |
|
"eval_loss": 0.6599487662315369, |
|
"eval_runtime": 2327.143, |
|
"eval_samples_per_second": 0.632, |
|
"eval_steps_per_second": 0.632, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 3.43, |
|
"learning_rate": 3.263427109974424e-05, |
|
"loss": 0.572, |
|
"step": 2750 |
|
}, |
|
{ |
|
"epoch": 3.49, |
|
"learning_rate": 3.135549872122762e-05, |
|
"loss": 0.5493, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 3.49, |
|
"eval_loss": 0.6231494545936584, |
|
"eval_runtime": 2326.2678, |
|
"eval_samples_per_second": 0.632, |
|
"eval_steps_per_second": 0.632, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 3.55, |
|
"learning_rate": 3.0076726342710998e-05, |
|
"loss": 0.5566, |
|
"step": 2850 |
|
}, |
|
{ |
|
"epoch": 3.61, |
|
"learning_rate": 2.8797953964194374e-05, |
|
"loss": 0.5564, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 3.61, |
|
"eval_loss": 0.6593871116638184, |
|
"eval_runtime": 2327.2395, |
|
"eval_samples_per_second": 0.632, |
|
"eval_steps_per_second": 0.632, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 3.68, |
|
"learning_rate": 2.751918158567775e-05, |
|
"loss": 0.5787, |
|
"step": 2950 |
|
}, |
|
{ |
|
"epoch": 3.74, |
|
"learning_rate": 2.6240409207161126e-05, |
|
"loss": 0.509, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 3.74, |
|
"eval_loss": 0.6348086595535278, |
|
"eval_runtime": 2326.4619, |
|
"eval_samples_per_second": 0.632, |
|
"eval_steps_per_second": 0.632, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 3.8, |
|
"learning_rate": 2.4961636828644502e-05, |
|
"loss": 0.5356, |
|
"step": 3050 |
|
}, |
|
{ |
|
"epoch": 3.86, |
|
"learning_rate": 2.368286445012788e-05, |
|
"loss": 0.52, |
|
"step": 3100 |
|
}, |
|
{ |
|
"epoch": 3.86, |
|
"eval_loss": 0.6384520530700684, |
|
"eval_runtime": 2327.6186, |
|
"eval_samples_per_second": 0.632, |
|
"eval_steps_per_second": 0.632, |
|
"step": 3100 |
|
}, |
|
{ |
|
"epoch": 3.93, |
|
"learning_rate": 2.2404092071611255e-05, |
|
"loss": 0.5418, |
|
"step": 3150 |
|
}, |
|
{ |
|
"epoch": 3.99, |
|
"learning_rate": 2.112531969309463e-05, |
|
"loss": 0.5569, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 3.99, |
|
"eval_loss": 0.6191104650497437, |
|
"eval_runtime": 2329.026, |
|
"eval_samples_per_second": 0.631, |
|
"eval_steps_per_second": 0.631, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 4.05, |
|
"learning_rate": 1.9846547314578007e-05, |
|
"loss": 0.4716, |
|
"step": 3250 |
|
}, |
|
{ |
|
"epoch": 4.11, |
|
"learning_rate": 1.8567774936061383e-05, |
|
"loss": 0.501, |
|
"step": 3300 |
|
}, |
|
{ |
|
"epoch": 4.11, |
|
"eval_loss": 0.6244591474533081, |
|
"eval_runtime": 2328.4154, |
|
"eval_samples_per_second": 0.631, |
|
"eval_steps_per_second": 0.631, |
|
"step": 3300 |
|
}, |
|
{ |
|
"epoch": 4.18, |
|
"learning_rate": 1.728900255754476e-05, |
|
"loss": 0.4853, |
|
"step": 3350 |
|
}, |
|
{ |
|
"epoch": 4.24, |
|
"learning_rate": 1.6010230179028135e-05, |
|
"loss": 0.5105, |
|
"step": 3400 |
|
}, |
|
{ |
|
"epoch": 4.24, |
|
"eval_loss": 0.6227459907531738, |
|
"eval_runtime": 2328.6203, |
|
"eval_samples_per_second": 0.631, |
|
"eval_steps_per_second": 0.631, |
|
"step": 3400 |
|
}, |
|
{ |
|
"epoch": 4.3, |
|
"learning_rate": 1.4731457800511508e-05, |
|
"loss": 0.4757, |
|
"step": 3450 |
|
}, |
|
{ |
|
"epoch": 4.36, |
|
"learning_rate": 1.3452685421994884e-05, |
|
"loss": 0.5064, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 4.36, |
|
"eval_loss": 0.628924548625946, |
|
"eval_runtime": 2328.7385, |
|
"eval_samples_per_second": 0.631, |
|
"eval_steps_per_second": 0.631, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 4.43, |
|
"learning_rate": 1.2173913043478261e-05, |
|
"loss": 0.5107, |
|
"step": 3550 |
|
}, |
|
{ |
|
"epoch": 4.49, |
|
"learning_rate": 1.0895140664961638e-05, |
|
"loss": 0.5088, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 4.49, |
|
"eval_loss": 0.620337724685669, |
|
"eval_runtime": 2329.0799, |
|
"eval_samples_per_second": 0.631, |
|
"eval_steps_per_second": 0.631, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 4.55, |
|
"learning_rate": 9.616368286445012e-06, |
|
"loss": 0.5184, |
|
"step": 3650 |
|
}, |
|
{ |
|
"epoch": 4.61, |
|
"learning_rate": 8.337595907928388e-06, |
|
"loss": 0.5246, |
|
"step": 3700 |
|
}, |
|
{ |
|
"epoch": 4.61, |
|
"eval_loss": 0.618162989616394, |
|
"eval_runtime": 2327.9739, |
|
"eval_samples_per_second": 0.631, |
|
"eval_steps_per_second": 0.631, |
|
"step": 3700 |
|
}, |
|
{ |
|
"epoch": 4.67, |
|
"learning_rate": 7.058823529411765e-06, |
|
"loss": 0.5027, |
|
"step": 3750 |
|
}, |
|
{ |
|
"epoch": 4.74, |
|
"learning_rate": 5.780051150895141e-06, |
|
"loss": 0.4927, |
|
"step": 3800 |
|
}, |
|
{ |
|
"epoch": 4.74, |
|
"eval_loss": 0.6198447942733765, |
|
"eval_runtime": 2328.8672, |
|
"eval_samples_per_second": 0.631, |
|
"eval_steps_per_second": 0.631, |
|
"step": 3800 |
|
}, |
|
{ |
|
"epoch": 4.8, |
|
"learning_rate": 4.501278772378517e-06, |
|
"loss": 0.5568, |
|
"step": 3850 |
|
}, |
|
{ |
|
"epoch": 4.86, |
|
"learning_rate": 3.2225063938618927e-06, |
|
"loss": 0.5038, |
|
"step": 3900 |
|
}, |
|
{ |
|
"epoch": 4.86, |
|
"eval_loss": 0.6127236485481262, |
|
"eval_runtime": 2329.1619, |
|
"eval_samples_per_second": 0.631, |
|
"eval_steps_per_second": 0.631, |
|
"step": 3900 |
|
}, |
|
{ |
|
"epoch": 4.92, |
|
"learning_rate": 1.943734015345269e-06, |
|
"loss": 0.5114, |
|
"step": 3950 |
|
}, |
|
{ |
|
"epoch": 4.99, |
|
"learning_rate": 6.649616368286445e-07, |
|
"loss": 0.5494, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 4.99, |
|
"eval_loss": 0.614413321018219, |
|
"eval_runtime": 2328.9991, |
|
"eval_samples_per_second": 0.631, |
|
"eval_steps_per_second": 0.631, |
|
"step": 4000 |
|
} |
|
], |
|
"max_steps": 4010, |
|
"num_train_epochs": 5, |
|
"total_flos": 3.2509882552308204e+18, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|