Bahia-1 / trainer_state.json
BlackHorseTeck's picture
Upload 18 files
33e0752 verified
raw
history blame
3.73 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.976,
"eval_steps": 500,
"global_step": 93,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.16,
"grad_norm": 0.7531044483184814,
"learning_rate": 0.00019857697953148037,
"loss": 2.1283,
"step": 5
},
{
"epoch": 0.32,
"grad_norm": 2.2025675773620605,
"learning_rate": 0.00019434841787099803,
"loss": 1.8,
"step": 10
},
{
"epoch": 0.48,
"grad_norm": 0.9267807602882385,
"learning_rate": 0.00018743466161445823,
"loss": 1.5595,
"step": 15
},
{
"epoch": 0.64,
"grad_norm": 0.6875874996185303,
"learning_rate": 0.0001780324790952092,
"loss": 1.6097,
"step": 20
},
{
"epoch": 0.8,
"grad_norm": 0.7420657873153687,
"learning_rate": 0.00016640946027672392,
"loss": 1.2942,
"step": 25
},
{
"epoch": 0.96,
"grad_norm": 0.6181728839874268,
"learning_rate": 0.00015289640103269625,
"loss": 1.462,
"step": 30
},
{
"epoch": 1.12,
"grad_norm": 0.8008699417114258,
"learning_rate": 0.0001378778885610576,
"loss": 1.4432,
"step": 35
},
{
"epoch": 1.28,
"grad_norm": 1.0314457416534424,
"learning_rate": 0.00012178135587488515,
"loss": 1.3561,
"step": 40
},
{
"epoch": 1.44,
"grad_norm": 0.47113487124443054,
"learning_rate": 0.00010506491688387127,
"loss": 1.3299,
"step": 45
},
{
"epoch": 1.6,
"grad_norm": 0.41678062081336975,
"learning_rate": 8.820432828491542e-05,
"loss": 1.2208,
"step": 50
},
{
"epoch": 1.76,
"grad_norm": 0.4888109862804413,
"learning_rate": 7.16794493317696e-05,
"loss": 1.4574,
"step": 55
},
{
"epoch": 1.92,
"grad_norm": 0.3696904480457306,
"learning_rate": 5.596058484423656e-05,
"loss": 1.1423,
"step": 60
},
{
"epoch": 2.08,
"grad_norm": 0.6482197046279907,
"learning_rate": 4.149510014046922e-05,
"loss": 1.3134,
"step": 65
},
{
"epoch": 2.24,
"grad_norm": 0.7164911031723022,
"learning_rate": 2.869468883687798e-05,
"loss": 1.3384,
"step": 70
},
{
"epoch": 2.4,
"grad_norm": 0.3950205147266388,
"learning_rate": 1.7923655879272393e-05,
"loss": 1.3873,
"step": 75
},
{
"epoch": 2.56,
"grad_norm": 0.387712687253952,
"learning_rate": 9.488549274967872e-06,
"loss": 1.2214,
"step": 80
},
{
"epoch": 2.7199999999999998,
"grad_norm": 0.2879863679409027,
"learning_rate": 3.6294356110059157e-06,
"loss": 1.3198,
"step": 85
},
{
"epoch": 2.88,
"grad_norm": 0.9349786043167114,
"learning_rate": 5.130676608104845e-07,
"loss": 1.3832,
"step": 90
},
{
"epoch": 2.976,
"step": 93,
"total_flos": 1234235324694528.0,
"train_loss": 1.4291518221619308,
"train_runtime": 192.8949,
"train_samples_per_second": 7.776,
"train_steps_per_second": 0.482
}
],
"logging_steps": 5,
"max_steps": 93,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 100,
"total_flos": 1234235324694528.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}