anezatra's picture
Upload folder using huggingface_hub
4752c45 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.237432327919567,
"eval_steps": 200,
"global_step": 400,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.03,
"grad_norm": 0.5968947410583496,
"learning_rate": 9.896800825593395e-05,
"loss": 2.3935,
"step": 10
},
{
"epoch": 0.06,
"grad_norm": 0.6443502306938171,
"learning_rate": 9.793601651186791e-05,
"loss": 2.1763,
"step": 20
},
{
"epoch": 0.09,
"grad_norm": 0.4056578576564789,
"learning_rate": 9.690402476780186e-05,
"loss": 1.9601,
"step": 30
},
{
"epoch": 0.12,
"grad_norm": 0.24697279930114746,
"learning_rate": 9.587203302373582e-05,
"loss": 1.9009,
"step": 40
},
{
"epoch": 0.15,
"grad_norm": 0.24417832493782043,
"learning_rate": 9.484004127966977e-05,
"loss": 1.8698,
"step": 50
},
{
"epoch": 0.19,
"grad_norm": 0.19971579313278198,
"learning_rate": 9.380804953560372e-05,
"loss": 1.8502,
"step": 60
},
{
"epoch": 0.22,
"grad_norm": 0.21794500946998596,
"learning_rate": 9.277605779153768e-05,
"loss": 1.8139,
"step": 70
},
{
"epoch": 0.25,
"grad_norm": 0.20081552863121033,
"learning_rate": 9.174406604747162e-05,
"loss": 1.8246,
"step": 80
},
{
"epoch": 0.28,
"grad_norm": 0.2045777291059494,
"learning_rate": 9.071207430340559e-05,
"loss": 1.8009,
"step": 90
},
{
"epoch": 0.31,
"grad_norm": 0.21513579785823822,
"learning_rate": 8.968008255933953e-05,
"loss": 1.7745,
"step": 100
},
{
"epoch": 0.34,
"grad_norm": 0.2838321924209595,
"learning_rate": 8.864809081527348e-05,
"loss": 1.7831,
"step": 110
},
{
"epoch": 0.37,
"grad_norm": 0.19812746345996857,
"learning_rate": 8.761609907120744e-05,
"loss": 1.7554,
"step": 120
},
{
"epoch": 0.4,
"grad_norm": 0.30143260955810547,
"learning_rate": 8.658410732714138e-05,
"loss": 1.7707,
"step": 130
},
{
"epoch": 0.43,
"grad_norm": 0.21341949701309204,
"learning_rate": 8.555211558307535e-05,
"loss": 1.7476,
"step": 140
},
{
"epoch": 0.46,
"grad_norm": 0.24006041884422302,
"learning_rate": 8.452012383900929e-05,
"loss": 1.7653,
"step": 150
},
{
"epoch": 0.49,
"grad_norm": 0.25095027685165405,
"learning_rate": 8.348813209494324e-05,
"loss": 1.7438,
"step": 160
},
{
"epoch": 0.53,
"grad_norm": 0.2602318525314331,
"learning_rate": 8.24561403508772e-05,
"loss": 1.7728,
"step": 170
},
{
"epoch": 0.56,
"grad_norm": 0.26738253235816956,
"learning_rate": 8.142414860681114e-05,
"loss": 1.7433,
"step": 180
},
{
"epoch": 0.59,
"grad_norm": 0.25230053067207336,
"learning_rate": 8.039215686274511e-05,
"loss": 1.7304,
"step": 190
},
{
"epoch": 0.62,
"grad_norm": 0.2360549122095108,
"learning_rate": 7.936016511867905e-05,
"loss": 1.7297,
"step": 200
},
{
"epoch": 0.62,
"eval_loss": 1.744746446609497,
"eval_runtime": 294.0669,
"eval_samples_per_second": 35.172,
"eval_steps_per_second": 4.397,
"step": 200
},
{
"epoch": 0.65,
"grad_norm": 0.2905316948890686,
"learning_rate": 6.74922600619195e-05,
"loss": 1.727,
"step": 210
},
{
"epoch": 0.68,
"grad_norm": 0.25649935007095337,
"learning_rate": 6.594427244582044e-05,
"loss": 1.7224,
"step": 220
},
{
"epoch": 0.71,
"grad_norm": 0.23987528681755066,
"learning_rate": 6.439628482972137e-05,
"loss": 1.7389,
"step": 230
},
{
"epoch": 0.74,
"grad_norm": 0.2479698807001114,
"learning_rate": 6.28482972136223e-05,
"loss": 1.7255,
"step": 240
},
{
"epoch": 0.77,
"grad_norm": 0.25272852182388306,
"learning_rate": 6.130030959752322e-05,
"loss": 1.7354,
"step": 250
},
{
"epoch": 0.8,
"grad_norm": 0.2447136789560318,
"learning_rate": 5.9752321981424155e-05,
"loss": 1.7443,
"step": 260
},
{
"epoch": 0.84,
"grad_norm": 0.26579201221466064,
"learning_rate": 5.8204334365325074e-05,
"loss": 1.7462,
"step": 270
},
{
"epoch": 0.87,
"grad_norm": 0.31005680561065674,
"learning_rate": 5.6656346749226006e-05,
"loss": 1.7144,
"step": 280
},
{
"epoch": 0.9,
"grad_norm": 0.2663085460662842,
"learning_rate": 5.510835913312694e-05,
"loss": 1.7094,
"step": 290
},
{
"epoch": 0.93,
"grad_norm": 0.28601768612861633,
"learning_rate": 5.3560371517027864e-05,
"loss": 1.6838,
"step": 300
},
{
"epoch": 0.96,
"grad_norm": 0.2900325059890747,
"learning_rate": 5.20123839009288e-05,
"loss": 1.7042,
"step": 310
},
{
"epoch": 0.99,
"grad_norm": 0.28358617424964905,
"learning_rate": 5.046439628482973e-05,
"loss": 1.7057,
"step": 320
},
{
"epoch": 1.02,
"grad_norm": 0.3409838378429413,
"learning_rate": 4.891640866873065e-05,
"loss": 1.7228,
"step": 330
},
{
"epoch": 1.05,
"grad_norm": 0.28400272130966187,
"learning_rate": 4.736842105263158e-05,
"loss": 1.7157,
"step": 340
},
{
"epoch": 1.08,
"grad_norm": 0.33671486377716064,
"learning_rate": 4.582043343653251e-05,
"loss": 1.709,
"step": 350
},
{
"epoch": 1.11,
"grad_norm": 0.29089200496673584,
"learning_rate": 4.427244582043344e-05,
"loss": 1.7018,
"step": 360
},
{
"epoch": 1.14,
"grad_norm": 0.2736106514930725,
"learning_rate": 4.2724458204334365e-05,
"loss": 1.6888,
"step": 370
},
{
"epoch": 1.18,
"grad_norm": 0.272792786359787,
"learning_rate": 4.11764705882353e-05,
"loss": 1.7191,
"step": 380
},
{
"epoch": 1.21,
"grad_norm": 0.282695472240448,
"learning_rate": 3.962848297213623e-05,
"loss": 1.688,
"step": 390
},
{
"epoch": 1.24,
"grad_norm": 0.29477787017822266,
"learning_rate": 3.8080495356037155e-05,
"loss": 1.7292,
"step": 400
},
{
"epoch": 1.24,
"eval_loss": 1.7159068584442139,
"eval_runtime": 296.4339,
"eval_samples_per_second": 34.891,
"eval_steps_per_second": 4.362,
"step": 400
}
],
"logging_steps": 10,
"max_steps": 646,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 200,
"total_flos": 1.7617367955800064e+17,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}