category_cleaning / trainer_state.json
kpalczewski-displate's picture
End of training
4ff944b verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.0,
"eval_steps": 100,
"global_step": 1176,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.26,
"grad_norm": 23.253705978393555,
"learning_rate": 4.5748299319727895e-05,
"loss": 3.0173,
"step": 100
},
{
"epoch": 0.26,
"eval_loss": 4.248861312866211,
"eval_runtime": 33.2541,
"eval_samples_per_second": 125.699,
"eval_steps_per_second": 1.985,
"step": 100
},
{
"epoch": 0.51,
"grad_norm": 24.05482292175293,
"learning_rate": 4.149659863945579e-05,
"loss": 2.8146,
"step": 200
},
{
"epoch": 0.51,
"eval_loss": 4.2605061531066895,
"eval_runtime": 33.1469,
"eval_samples_per_second": 126.105,
"eval_steps_per_second": 1.991,
"step": 200
},
{
"epoch": 0.77,
"grad_norm": 19.086837768554688,
"learning_rate": 3.724489795918368e-05,
"loss": 2.7177,
"step": 300
},
{
"epoch": 0.77,
"eval_loss": 4.254702091217041,
"eval_runtime": 33.1625,
"eval_samples_per_second": 126.046,
"eval_steps_per_second": 1.99,
"step": 300
},
{
"epoch": 1.02,
"grad_norm": 19.842321395874023,
"learning_rate": 3.2993197278911564e-05,
"loss": 2.6338,
"step": 400
},
{
"epoch": 1.02,
"eval_loss": 4.386436462402344,
"eval_runtime": 33.0306,
"eval_samples_per_second": 126.549,
"eval_steps_per_second": 1.998,
"step": 400
},
{
"epoch": 1.28,
"grad_norm": 25.054931640625,
"learning_rate": 2.8741496598639456e-05,
"loss": 2.102,
"step": 500
},
{
"epoch": 1.28,
"eval_loss": 4.353818893432617,
"eval_runtime": 32.9823,
"eval_samples_per_second": 126.734,
"eval_steps_per_second": 2.001,
"step": 500
},
{
"epoch": 1.53,
"grad_norm": 22.865522384643555,
"learning_rate": 2.448979591836735e-05,
"loss": 2.1015,
"step": 600
},
{
"epoch": 1.53,
"eval_loss": 4.339511871337891,
"eval_runtime": 33.0575,
"eval_samples_per_second": 126.446,
"eval_steps_per_second": 1.997,
"step": 600
},
{
"epoch": 1.79,
"grad_norm": 25.445798873901367,
"learning_rate": 2.023809523809524e-05,
"loss": 2.0691,
"step": 700
},
{
"epoch": 1.79,
"eval_loss": 4.336267471313477,
"eval_runtime": 32.9895,
"eval_samples_per_second": 126.707,
"eval_steps_per_second": 2.001,
"step": 700
},
{
"epoch": 2.04,
"grad_norm": 22.133098602294922,
"learning_rate": 1.5986394557823133e-05,
"loss": 1.9422,
"step": 800
},
{
"epoch": 2.04,
"eval_loss": 4.633069038391113,
"eval_runtime": 32.9021,
"eval_samples_per_second": 127.044,
"eval_steps_per_second": 2.006,
"step": 800
},
{
"epoch": 2.3,
"grad_norm": 31.207412719726562,
"learning_rate": 1.1734693877551021e-05,
"loss": 1.4405,
"step": 900
},
{
"epoch": 2.3,
"eval_loss": 4.565266132354736,
"eval_runtime": 33.1329,
"eval_samples_per_second": 126.158,
"eval_steps_per_second": 1.992,
"step": 900
},
{
"epoch": 2.55,
"grad_norm": 19.457901000976562,
"learning_rate": 7.482993197278912e-06,
"loss": 1.3896,
"step": 1000
},
{
"epoch": 2.55,
"eval_loss": 4.576756477355957,
"eval_runtime": 33.1251,
"eval_samples_per_second": 126.188,
"eval_steps_per_second": 1.992,
"step": 1000
},
{
"epoch": 2.81,
"grad_norm": 20.708255767822266,
"learning_rate": 3.231292517006803e-06,
"loss": 1.3735,
"step": 1100
},
{
"epoch": 2.81,
"eval_loss": 4.561042308807373,
"eval_runtime": 33.6147,
"eval_samples_per_second": 124.35,
"eval_steps_per_second": 1.963,
"step": 1100
},
{
"epoch": 3.0,
"step": 1176,
"total_flos": 4376310425967942.0,
"train_loss": 2.093686363324016,
"train_runtime": 1727.448,
"train_samples_per_second": 43.554,
"train_steps_per_second": 0.681
}
],
"logging_steps": 100,
"max_steps": 1176,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"total_flos": 4376310425967942.0,
"train_batch_size": 64,
"trial_name": null,
"trial_params": null
}