new_model3 / trainer_state.json
junhyeok0415's picture
Initial commit
254f473
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0251447504387619,
"eval_steps": 500,
"global_step": 250,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.02,
"grad_norm": 0.0849609375,
"learning_rate": 3.8000000000000005e-06,
"loss": 1.2889,
"step": 5
},
{
"epoch": 0.04,
"grad_norm": 0.083984375,
"learning_rate": 7.600000000000001e-06,
"loss": 1.2871,
"step": 10
},
{
"epoch": 0.06,
"grad_norm": 0.08447265625,
"learning_rate": 1.1400000000000001e-05,
"loss": 1.2972,
"step": 15
},
{
"epoch": 0.08,
"grad_norm": 0.0869140625,
"learning_rate": 1.5200000000000002e-05,
"loss": 1.2867,
"step": 20
},
{
"epoch": 0.1,
"grad_norm": 0.0908203125,
"learning_rate": 1.9e-05,
"loss": 1.2884,
"step": 25
},
{
"epoch": 0.12,
"grad_norm": 0.09130859375,
"learning_rate": 2.2800000000000002e-05,
"loss": 1.2734,
"step": 30
},
{
"epoch": 0.14,
"grad_norm": 0.08642578125,
"learning_rate": 2.66e-05,
"loss": 1.2667,
"step": 35
},
{
"epoch": 0.16,
"grad_norm": 0.0849609375,
"learning_rate": 3.0400000000000004e-05,
"loss": 1.2469,
"step": 40
},
{
"epoch": 0.18,
"grad_norm": 0.08203125,
"learning_rate": 3.4200000000000005e-05,
"loss": 1.2343,
"step": 45
},
{
"epoch": 0.21,
"grad_norm": 0.08935546875,
"learning_rate": 3.8e-05,
"loss": 1.2214,
"step": 50
},
{
"epoch": 0.23,
"grad_norm": 0.087890625,
"learning_rate": 3.705e-05,
"loss": 1.2089,
"step": 55
},
{
"epoch": 0.25,
"grad_norm": 0.0810546875,
"learning_rate": 3.61e-05,
"loss": 1.1902,
"step": 60
},
{
"epoch": 0.27,
"grad_norm": 0.0810546875,
"learning_rate": 3.515e-05,
"loss": 1.18,
"step": 65
},
{
"epoch": 0.29,
"grad_norm": 0.087890625,
"learning_rate": 3.4200000000000005e-05,
"loss": 1.1726,
"step": 70
},
{
"epoch": 0.31,
"grad_norm": 0.0947265625,
"learning_rate": 3.325e-05,
"loss": 1.168,
"step": 75
},
{
"epoch": 0.33,
"grad_norm": 0.099609375,
"learning_rate": 3.23e-05,
"loss": 1.1664,
"step": 80
},
{
"epoch": 0.35,
"grad_norm": 0.103515625,
"learning_rate": 3.135e-05,
"loss": 1.1573,
"step": 85
},
{
"epoch": 0.37,
"grad_norm": 0.10595703125,
"learning_rate": 3.0400000000000004e-05,
"loss": 1.1506,
"step": 90
},
{
"epoch": 0.39,
"grad_norm": 0.10791015625,
"learning_rate": 2.945e-05,
"loss": 1.1515,
"step": 95
},
{
"epoch": 0.41,
"grad_norm": 0.11474609375,
"learning_rate": 2.85e-05,
"loss": 1.1445,
"step": 100
},
{
"epoch": 0.43,
"grad_norm": 0.12060546875,
"learning_rate": 2.7550000000000002e-05,
"loss": 1.1444,
"step": 105
},
{
"epoch": 0.45,
"grad_norm": 0.126953125,
"learning_rate": 2.66e-05,
"loss": 1.145,
"step": 110
},
{
"epoch": 0.47,
"grad_norm": 0.126953125,
"learning_rate": 2.5650000000000003e-05,
"loss": 1.1398,
"step": 115
},
{
"epoch": 0.49,
"grad_norm": 0.12890625,
"learning_rate": 2.47e-05,
"loss": 1.129,
"step": 120
},
{
"epoch": 0.51,
"grad_norm": 0.1123046875,
"learning_rate": 2.375e-05,
"loss": 1.1365,
"step": 125
},
{
"epoch": 0.53,
"grad_norm": 0.10791015625,
"learning_rate": 2.2800000000000002e-05,
"loss": 1.1243,
"step": 130
},
{
"epoch": 0.55,
"grad_norm": 0.09765625,
"learning_rate": 2.185e-05,
"loss": 1.1268,
"step": 135
},
{
"epoch": 0.57,
"grad_norm": 0.0771484375,
"learning_rate": 2.0900000000000003e-05,
"loss": 1.1283,
"step": 140
},
{
"epoch": 0.59,
"grad_norm": 0.0625,
"learning_rate": 1.995e-05,
"loss": 1.1224,
"step": 145
},
{
"epoch": 0.62,
"grad_norm": 0.0517578125,
"learning_rate": 1.9e-05,
"loss": 1.1284,
"step": 150
},
{
"epoch": 0.64,
"grad_norm": 0.044921875,
"learning_rate": 1.805e-05,
"loss": 1.1182,
"step": 155
},
{
"epoch": 0.66,
"grad_norm": 0.0419921875,
"learning_rate": 1.7100000000000002e-05,
"loss": 1.1255,
"step": 160
},
{
"epoch": 0.68,
"grad_norm": 0.04052734375,
"learning_rate": 1.615e-05,
"loss": 1.1197,
"step": 165
},
{
"epoch": 0.7,
"grad_norm": 0.0390625,
"learning_rate": 1.5200000000000002e-05,
"loss": 1.1243,
"step": 170
},
{
"epoch": 0.72,
"grad_norm": 0.037353515625,
"learning_rate": 1.425e-05,
"loss": 1.1249,
"step": 175
},
{
"epoch": 0.74,
"grad_norm": 0.03759765625,
"learning_rate": 1.33e-05,
"loss": 1.1235,
"step": 180
},
{
"epoch": 0.76,
"grad_norm": 0.037353515625,
"learning_rate": 1.235e-05,
"loss": 1.1184,
"step": 185
},
{
"epoch": 0.78,
"grad_norm": 0.036376953125,
"learning_rate": 1.1400000000000001e-05,
"loss": 1.1152,
"step": 190
},
{
"epoch": 0.8,
"grad_norm": 0.0361328125,
"learning_rate": 1.0450000000000002e-05,
"loss": 1.1201,
"step": 195
},
{
"epoch": 0.82,
"grad_norm": 0.035888671875,
"learning_rate": 9.5e-06,
"loss": 1.1206,
"step": 200
},
{
"epoch": 0.84,
"grad_norm": 0.034912109375,
"learning_rate": 8.550000000000001e-06,
"loss": 1.1155,
"step": 205
},
{
"epoch": 0.86,
"grad_norm": 0.033935546875,
"learning_rate": 7.600000000000001e-06,
"loss": 1.1136,
"step": 210
},
{
"epoch": 0.88,
"grad_norm": 0.03515625,
"learning_rate": 6.65e-06,
"loss": 1.1167,
"step": 215
},
{
"epoch": 0.9,
"grad_norm": 0.034423828125,
"learning_rate": 5.7000000000000005e-06,
"loss": 1.1177,
"step": 220
},
{
"epoch": 0.92,
"grad_norm": 0.0341796875,
"learning_rate": 4.75e-06,
"loss": 1.1191,
"step": 225
},
{
"epoch": 0.94,
"grad_norm": 0.03369140625,
"learning_rate": 3.8000000000000005e-06,
"loss": 1.1215,
"step": 230
},
{
"epoch": 0.96,
"grad_norm": 0.032958984375,
"learning_rate": 2.8500000000000002e-06,
"loss": 1.1171,
"step": 235
},
{
"epoch": 0.98,
"grad_norm": 0.033935546875,
"learning_rate": 1.9000000000000002e-06,
"loss": 1.119,
"step": 240
},
{
"epoch": 1.0,
"grad_norm": 0.033447265625,
"learning_rate": 9.500000000000001e-07,
"loss": 1.1161,
"step": 245
},
{
"epoch": 1.03,
"grad_norm": 0.035400390625,
"learning_rate": 0.0,
"loss": 1.1181,
"step": 250
}
],
"logging_steps": 5,
"max_steps": 250,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 50,
"total_flos": 5.8437883477546985e+20,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}