anezatra's picture
Upload folder using huggingface_hub
4752c45 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.6187161639597835,
"eval_steps": 200,
"global_step": 200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.03,
"grad_norm": 0.5968947410583496,
"learning_rate": 9.896800825593395e-05,
"loss": 2.3935,
"step": 10
},
{
"epoch": 0.06,
"grad_norm": 0.6443502306938171,
"learning_rate": 9.793601651186791e-05,
"loss": 2.1763,
"step": 20
},
{
"epoch": 0.09,
"grad_norm": 0.4056578576564789,
"learning_rate": 9.690402476780186e-05,
"loss": 1.9601,
"step": 30
},
{
"epoch": 0.12,
"grad_norm": 0.24697279930114746,
"learning_rate": 9.587203302373582e-05,
"loss": 1.9009,
"step": 40
},
{
"epoch": 0.15,
"grad_norm": 0.24417832493782043,
"learning_rate": 9.484004127966977e-05,
"loss": 1.8698,
"step": 50
},
{
"epoch": 0.19,
"grad_norm": 0.19971579313278198,
"learning_rate": 9.380804953560372e-05,
"loss": 1.8502,
"step": 60
},
{
"epoch": 0.22,
"grad_norm": 0.21794500946998596,
"learning_rate": 9.277605779153768e-05,
"loss": 1.8139,
"step": 70
},
{
"epoch": 0.25,
"grad_norm": 0.20081552863121033,
"learning_rate": 9.174406604747162e-05,
"loss": 1.8246,
"step": 80
},
{
"epoch": 0.28,
"grad_norm": 0.2045777291059494,
"learning_rate": 9.071207430340559e-05,
"loss": 1.8009,
"step": 90
},
{
"epoch": 0.31,
"grad_norm": 0.21513579785823822,
"learning_rate": 8.968008255933953e-05,
"loss": 1.7745,
"step": 100
},
{
"epoch": 0.34,
"grad_norm": 0.2838321924209595,
"learning_rate": 8.864809081527348e-05,
"loss": 1.7831,
"step": 110
},
{
"epoch": 0.37,
"grad_norm": 0.19812746345996857,
"learning_rate": 8.761609907120744e-05,
"loss": 1.7554,
"step": 120
},
{
"epoch": 0.4,
"grad_norm": 0.30143260955810547,
"learning_rate": 8.658410732714138e-05,
"loss": 1.7707,
"step": 130
},
{
"epoch": 0.43,
"grad_norm": 0.21341949701309204,
"learning_rate": 8.555211558307535e-05,
"loss": 1.7476,
"step": 140
},
{
"epoch": 0.46,
"grad_norm": 0.24006041884422302,
"learning_rate": 8.452012383900929e-05,
"loss": 1.7653,
"step": 150
},
{
"epoch": 0.49,
"grad_norm": 0.25095027685165405,
"learning_rate": 8.348813209494324e-05,
"loss": 1.7438,
"step": 160
},
{
"epoch": 0.53,
"grad_norm": 0.2602318525314331,
"learning_rate": 8.24561403508772e-05,
"loss": 1.7728,
"step": 170
},
{
"epoch": 0.56,
"grad_norm": 0.26738253235816956,
"learning_rate": 8.142414860681114e-05,
"loss": 1.7433,
"step": 180
},
{
"epoch": 0.59,
"grad_norm": 0.25230053067207336,
"learning_rate": 8.039215686274511e-05,
"loss": 1.7304,
"step": 190
},
{
"epoch": 0.62,
"grad_norm": 0.2360549122095108,
"learning_rate": 7.936016511867905e-05,
"loss": 1.7297,
"step": 200
},
{
"epoch": 0.62,
"eval_loss": 1.744746446609497,
"eval_runtime": 294.0669,
"eval_samples_per_second": 35.172,
"eval_steps_per_second": 4.397,
"step": 200
}
],
"logging_steps": 10,
"max_steps": 969,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 200,
"total_flos": 8.880783974203392e+16,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}