|
{ |
|
"best_metric": 27.5466, |
|
"best_model_checkpoint": "/content/drive/MyDrive/ArabartModel/checkpoint-7486", |
|
"epoch": 2.0, |
|
"eval_steps": 500, |
|
"global_step": 7486, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.1335826876836762, |
|
"grad_norm": 3.816420078277588, |
|
"learning_rate": 4.7773621871938735e-05, |
|
"loss": 2.0568, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.2671653753673524, |
|
"grad_norm": 4.217596054077148, |
|
"learning_rate": 4.554724374387746e-05, |
|
"loss": 1.949, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.4007480630510286, |
|
"grad_norm": 3.4469385147094727, |
|
"learning_rate": 4.332086561581619e-05, |
|
"loss": 1.9236, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.5343307507347048, |
|
"grad_norm": 3.3545796871185303, |
|
"learning_rate": 4.1094487487754926e-05, |
|
"loss": 1.9083, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.667913438418381, |
|
"grad_norm": 3.905182361602783, |
|
"learning_rate": 3.886810935969365e-05, |
|
"loss": 1.8793, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.8014961261020572, |
|
"grad_norm": 3.746492862701416, |
|
"learning_rate": 3.6641731231632384e-05, |
|
"loss": 1.8767, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.9350788137857333, |
|
"grad_norm": 3.5829365253448486, |
|
"learning_rate": 3.441535310357111e-05, |
|
"loss": 1.8613, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_loss": 2.4695754051208496, |
|
"eval_rouge1": 27.3427, |
|
"eval_rouge2": 11.5623, |
|
"eval_rougeL": 23.8752, |
|
"eval_runtime": 854.6158, |
|
"eval_samples_per_second": 5.487, |
|
"eval_steps_per_second": 0.549, |
|
"step": 3743 |
|
}, |
|
{ |
|
"epoch": 1.0686615014694096, |
|
"grad_norm": 3.6381685733795166, |
|
"learning_rate": 3.218897497550984e-05, |
|
"loss": 1.7299, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 1.2022441891530857, |
|
"grad_norm": 3.850755453109741, |
|
"learning_rate": 2.9962596847448572e-05, |
|
"loss": 1.6235, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 1.3358268768367618, |
|
"grad_norm": 3.5649123191833496, |
|
"learning_rate": 2.77362187193873e-05, |
|
"loss": 1.6276, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 1.4694095645204381, |
|
"grad_norm": 3.684051036834717, |
|
"learning_rate": 2.5509840591326034e-05, |
|
"loss": 1.635, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 1.6029922522041145, |
|
"grad_norm": 3.216923236846924, |
|
"learning_rate": 2.3283462463264763e-05, |
|
"loss": 1.6329, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 1.7365749398877905, |
|
"grad_norm": 3.666384220123291, |
|
"learning_rate": 2.1057084335203492e-05, |
|
"loss": 1.628, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 1.8701576275714666, |
|
"grad_norm": 3.5083119869232178, |
|
"learning_rate": 1.883070620714222e-05, |
|
"loss": 1.619, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_loss": 2.552595376968384, |
|
"eval_rouge1": 27.5466, |
|
"eval_rouge2": 11.7619, |
|
"eval_rougeL": 24.0393, |
|
"eval_runtime": 855.3721, |
|
"eval_samples_per_second": 5.482, |
|
"eval_steps_per_second": 0.548, |
|
"step": 7486 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 11229, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 4.390892214460416e+16, |
|
"train_batch_size": 10, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|