MNLP_M3_mcqa_model_source_fixed / trainer_state.json
ellendagher's picture
Upload folder using huggingface_hub
a136be9 verified
{
"best_global_step": 1037,
"best_metric": 0.49640655517578125,
"best_model_checkpoint": "./mcqa_model_test_source_fixed/checkpoint-1037",
"epoch": 0.9995180722891567,
"eval_steps": 500,
"global_step": 1037,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.04819277108433735,
"grad_norm": 42.9211540222168,
"learning_rate": 4.2307692307692304e-07,
"loss": 0.7894,
"step": 50
},
{
"epoch": 0.0963855421686747,
"grad_norm": 72.75926208496094,
"learning_rate": 9.038461538461538e-07,
"loss": 0.6269,
"step": 100
},
{
"epoch": 0.14457831325301204,
"grad_norm": 73.69217681884766,
"learning_rate": 9.571275455519828e-07,
"loss": 0.5828,
"step": 150
},
{
"epoch": 0.1927710843373494,
"grad_norm": 49.02070617675781,
"learning_rate": 9.035369774919614e-07,
"loss": 0.5468,
"step": 200
},
{
"epoch": 0.24096385542168675,
"grad_norm": 36.791542053222656,
"learning_rate": 8.4994640943194e-07,
"loss": 0.6211,
"step": 250
},
{
"epoch": 0.2891566265060241,
"grad_norm": 76.28284454345703,
"learning_rate": 7.963558413719184e-07,
"loss": 0.5783,
"step": 300
},
{
"epoch": 0.3373493975903614,
"grad_norm": 42.93614196777344,
"learning_rate": 7.42765273311897e-07,
"loss": 0.5927,
"step": 350
},
{
"epoch": 0.3855421686746988,
"grad_norm": 68.8043212890625,
"learning_rate": 6.891747052518756e-07,
"loss": 0.5652,
"step": 400
},
{
"epoch": 0.43373493975903615,
"grad_norm": 58.556663513183594,
"learning_rate": 6.355841371918542e-07,
"loss": 0.5515,
"step": 450
},
{
"epoch": 0.4819277108433735,
"grad_norm": 47.458282470703125,
"learning_rate": 5.819935691318327e-07,
"loss": 0.5392,
"step": 500
},
{
"epoch": 0.5301204819277109,
"grad_norm": 53.92032241821289,
"learning_rate": 5.284030010718113e-07,
"loss": 0.5744,
"step": 550
},
{
"epoch": 0.5783132530120482,
"grad_norm": 61.76318359375,
"learning_rate": 4.748124330117899e-07,
"loss": 0.5361,
"step": 600
},
{
"epoch": 0.6265060240963856,
"grad_norm": 40.69456481933594,
"learning_rate": 4.2122186495176846e-07,
"loss": 0.6077,
"step": 650
},
{
"epoch": 0.6746987951807228,
"grad_norm": 80.4998550415039,
"learning_rate": 3.6763129689174703e-07,
"loss": 0.5567,
"step": 700
},
{
"epoch": 0.7228915662650602,
"grad_norm": 45.61492156982422,
"learning_rate": 3.140407288317256e-07,
"loss": 0.5114,
"step": 750
},
{
"epoch": 0.7710843373493976,
"grad_norm": 47.459102630615234,
"learning_rate": 2.6045016077170417e-07,
"loss": 0.4868,
"step": 800
},
{
"epoch": 0.8192771084337349,
"grad_norm": 86.90262603759766,
"learning_rate": 2.0685959271168274e-07,
"loss": 0.5324,
"step": 850
},
{
"epoch": 0.8674698795180723,
"grad_norm": 45.50479507446289,
"learning_rate": 1.532690246516613e-07,
"loss": 0.5092,
"step": 900
},
{
"epoch": 0.9156626506024096,
"grad_norm": 44.45884323120117,
"learning_rate": 9.967845659163988e-08,
"loss": 0.556,
"step": 950
},
{
"epoch": 0.963855421686747,
"grad_norm": 54.122867584228516,
"learning_rate": 4.608788853161844e-08,
"loss": 0.4876,
"step": 1000
},
{
"epoch": 0.9995180722891567,
"eval_loss": 0.49640655517578125,
"eval_runtime": 69.1307,
"eval_samples_per_second": 45.002,
"eval_steps_per_second": 5.627,
"step": 1037
}
],
"logging_steps": 50,
"max_steps": 1037,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 7892894912348160.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}