MNLP_M3_mcqa_model_source / trainer_state.json
ellendagher's picture
Upload folder using huggingface_hub
a540958 verified
{
"best_global_step": 1037,
"best_metric": 0.5065046548843384,
"best_model_checkpoint": "./mcqa_model_test_source/checkpoint-1037",
"epoch": 0.9995180722891567,
"eval_steps": 500,
"global_step": 1037,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.04819277108433735,
"grad_norm": 40.22263717651367,
"learning_rate": 4.326923076923077e-07,
"loss": 0.7966,
"step": 50
},
{
"epoch": 0.0963855421686747,
"grad_norm": 88.02349853515625,
"learning_rate": 9.038461538461538e-07,
"loss": 0.6168,
"step": 100
},
{
"epoch": 0.14457831325301204,
"grad_norm": 64.6302719116211,
"learning_rate": 9.571275455519828e-07,
"loss": 0.6088,
"step": 150
},
{
"epoch": 0.1927710843373494,
"grad_norm": 48.12834930419922,
"learning_rate": 9.035369774919614e-07,
"loss": 0.5406,
"step": 200
},
{
"epoch": 0.24096385542168675,
"grad_norm": 37.15648651123047,
"learning_rate": 8.4994640943194e-07,
"loss": 0.6103,
"step": 250
},
{
"epoch": 0.2891566265060241,
"grad_norm": 70.53849029541016,
"learning_rate": 7.963558413719184e-07,
"loss": 0.5637,
"step": 300
},
{
"epoch": 0.3373493975903614,
"grad_norm": 43.44021987915039,
"learning_rate": 7.42765273311897e-07,
"loss": 0.5857,
"step": 350
},
{
"epoch": 0.3855421686746988,
"grad_norm": 71.3525390625,
"learning_rate": 6.891747052518756e-07,
"loss": 0.5508,
"step": 400
},
{
"epoch": 0.43373493975903615,
"grad_norm": 57.75139617919922,
"learning_rate": 6.355841371918542e-07,
"loss": 0.5528,
"step": 450
},
{
"epoch": 0.4819277108433735,
"grad_norm": 54.9525146484375,
"learning_rate": 5.819935691318327e-07,
"loss": 0.5326,
"step": 500
},
{
"epoch": 0.5301204819277109,
"grad_norm": 50.39666748046875,
"learning_rate": 5.284030010718113e-07,
"loss": 0.5683,
"step": 550
},
{
"epoch": 0.5783132530120482,
"grad_norm": 59.521488189697266,
"learning_rate": 4.748124330117899e-07,
"loss": 0.5337,
"step": 600
},
{
"epoch": 0.6265060240963856,
"grad_norm": 40.77716827392578,
"learning_rate": 4.2122186495176846e-07,
"loss": 0.5917,
"step": 650
},
{
"epoch": 0.6746987951807228,
"grad_norm": 83.60078430175781,
"learning_rate": 3.6763129689174703e-07,
"loss": 0.5459,
"step": 700
},
{
"epoch": 0.7228915662650602,
"grad_norm": 46.46372604370117,
"learning_rate": 3.140407288317256e-07,
"loss": 0.5251,
"step": 750
},
{
"epoch": 0.7710843373493976,
"grad_norm": 42.664146423339844,
"learning_rate": 2.6045016077170417e-07,
"loss": 0.4937,
"step": 800
},
{
"epoch": 0.8192771084337349,
"grad_norm": 78.2928466796875,
"learning_rate": 2.0685959271168274e-07,
"loss": 0.5579,
"step": 850
},
{
"epoch": 0.8674698795180723,
"grad_norm": 56.88075256347656,
"learning_rate": 1.532690246516613e-07,
"loss": 0.5014,
"step": 900
},
{
"epoch": 0.9156626506024096,
"grad_norm": 27.720346450805664,
"learning_rate": 9.967845659163988e-08,
"loss": 0.5585,
"step": 950
},
{
"epoch": 0.963855421686747,
"grad_norm": 54.69992446899414,
"learning_rate": 4.608788853161844e-08,
"loss": 0.4791,
"step": 1000
},
{
"epoch": 0.9995180722891567,
"eval_loss": 0.5065046548843384,
"eval_runtime": 69.1291,
"eval_samples_per_second": 45.003,
"eval_steps_per_second": 5.627,
"step": 1037
}
],
"logging_steps": 50,
"max_steps": 1037,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 7892894912348160.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}