|
{
|
|
"best_metric": null,
|
|
"best_model_checkpoint": null,
|
|
"epoch": 2.986666666666667,
|
|
"eval_steps": 100,
|
|
"global_step": 168,
|
|
"is_hyper_param_search": false,
|
|
"is_local_process_zero": true,
|
|
"is_world_process_zero": true,
|
|
"log_history": [
|
|
{
|
|
"epoch": 0.08888888888888889,
|
|
"grad_norm": 0.7499012351036072,
|
|
"learning_rate": 4.989080197352834e-05,
|
|
"loss": 0.9542,
|
|
"num_input_tokens_seen": 47168,
|
|
"step": 5
|
|
},
|
|
{
|
|
"epoch": 0.17777777777777778,
|
|
"grad_norm": 0.6703861355781555,
|
|
"learning_rate": 4.956416183083221e-05,
|
|
"loss": 0.7834,
|
|
"num_input_tokens_seen": 100640,
|
|
"step": 10
|
|
},
|
|
{
|
|
"epoch": 0.26666666666666666,
|
|
"grad_norm": 0.6384798884391785,
|
|
"learning_rate": 4.9022933048627496e-05,
|
|
"loss": 0.7296,
|
|
"num_input_tokens_seen": 150048,
|
|
"step": 15
|
|
},
|
|
{
|
|
"epoch": 0.35555555555555557,
|
|
"grad_norm": 0.36834755539894104,
|
|
"learning_rate": 4.827184371610511e-05,
|
|
"loss": 0.6653,
|
|
"num_input_tokens_seen": 196928,
|
|
"step": 20
|
|
},
|
|
{
|
|
"epoch": 0.4444444444444444,
|
|
"grad_norm": 0.2962658405303955,
|
|
"learning_rate": 4.731745523109029e-05,
|
|
"loss": 0.683,
|
|
"num_input_tokens_seen": 244592,
|
|
"step": 25
|
|
},
|
|
{
|
|
"epoch": 0.5333333333333333,
|
|
"grad_norm": 0.33365580439567566,
|
|
"learning_rate": 4.6168104980707107e-05,
|
|
"loss": 0.5651,
|
|
"num_input_tokens_seen": 293488,
|
|
"step": 30
|
|
},
|
|
{
|
|
"epoch": 0.6222222222222222,
|
|
"grad_norm": 0.4523426294326782,
|
|
"learning_rate": 4.4833833507280884e-05,
|
|
"loss": 0.5411,
|
|
"num_input_tokens_seen": 338160,
|
|
"step": 35
|
|
},
|
|
{
|
|
"epoch": 0.7111111111111111,
|
|
"grad_norm": 0.41214215755462646,
|
|
"learning_rate": 4.332629679574566e-05,
|
|
"loss": 0.572,
|
|
"num_input_tokens_seen": 382768,
|
|
"step": 40
|
|
},
|
|
{
|
|
"epoch": 0.8,
|
|
"grad_norm": 0.3807964026927948,
|
|
"learning_rate": 4.16586644488001e-05,
|
|
"loss": 0.6106,
|
|
"num_input_tokens_seen": 430928,
|
|
"step": 45
|
|
},
|
|
{
|
|
"epoch": 0.8888888888888888,
|
|
"grad_norm": 0.45064786076545715,
|
|
"learning_rate": 3.9845504639337535e-05,
|
|
"loss": 0.5552,
|
|
"num_input_tokens_seen": 477520,
|
|
"step": 50
|
|
},
|
|
{
|
|
"epoch": 0.9777777777777777,
|
|
"grad_norm": 0.4501620829105377,
|
|
"learning_rate": 3.790265684518767e-05,
|
|
"loss": 0.5659,
|
|
"num_input_tokens_seen": 526640,
|
|
"step": 55
|
|
},
|
|
{
|
|
"epoch": 1.0666666666666667,
|
|
"grad_norm": 0.24308599531650543,
|
|
"learning_rate": 3.5847093477938956e-05,
|
|
"loss": 0.58,
|
|
"num_input_tokens_seen": 577184,
|
|
"step": 60
|
|
},
|
|
{
|
|
"epoch": 1.1555555555555554,
|
|
"grad_norm": 0.33691608905792236,
|
|
"learning_rate": 3.369677161463068e-05,
|
|
"loss": 0.5429,
|
|
"num_input_tokens_seen": 625344,
|
|
"step": 65
|
|
},
|
|
{
|
|
"epoch": 1.2444444444444445,
|
|
"grad_norm": 0.2771952748298645,
|
|
"learning_rate": 3.147047612756302e-05,
|
|
"loss": 0.4384,
|
|
"num_input_tokens_seen": 670224,
|
|
"step": 70
|
|
},
|
|
{
|
|
"epoch": 1.3333333333333333,
|
|
"grad_norm": 0.4307265281677246,
|
|
"learning_rate": 2.918765558261841e-05,
|
|
"loss": 0.5749,
|
|
"num_input_tokens_seen": 714880,
|
|
"step": 75
|
|
},
|
|
{
|
|
"epoch": 1.4222222222222223,
|
|
"grad_norm": 0.4149227440357208,
|
|
"learning_rate": 2.686825233966061e-05,
|
|
"loss": 0.4222,
|
|
"num_input_tokens_seen": 752944,
|
|
"step": 80
|
|
},
|
|
{
|
|
"epoch": 1.511111111111111,
|
|
"grad_norm": 0.44140687584877014,
|
|
"learning_rate": 2.4532528339227452e-05,
|
|
"loss": 0.556,
|
|
"num_input_tokens_seen": 802064,
|
|
"step": 85
|
|
},
|
|
{
|
|
"epoch": 1.6,
|
|
"grad_norm": 0.376556396484375,
|
|
"learning_rate": 2.2200888097417307e-05,
|
|
"loss": 0.4963,
|
|
"num_input_tokens_seen": 852112,
|
|
"step": 90
|
|
},
|
|
{
|
|
"epoch": 1.6888888888888889,
|
|
"grad_norm": 0.3596157431602478,
|
|
"learning_rate": 1.9893700455257996e-05,
|
|
"loss": 0.5085,
|
|
"num_input_tokens_seen": 905184,
|
|
"step": 95
|
|
},
|
|
{
|
|
"epoch": 1.7777777777777777,
|
|
"grad_norm": 0.4776301085948944,
|
|
"learning_rate": 1.7631120639727393e-05,
|
|
"loss": 0.4726,
|
|
"num_input_tokens_seen": 949424,
|
|
"step": 100
|
|
},
|
|
{
|
|
"epoch": 1.7777777777777777,
|
|
"eval_loss": 0.39410004019737244,
|
|
"eval_runtime": 11.4337,
|
|
"eval_samples_per_second": 8.746,
|
|
"eval_steps_per_second": 4.373,
|
|
"num_input_tokens_seen": 949424,
|
|
"step": 100
|
|
},
|
|
{
|
|
"epoch": 1.8666666666666667,
|
|
"grad_norm": 0.26946938037872314,
|
|
"learning_rate": 1.5432914190872757e-05,
|
|
"loss": 0.4874,
|
|
"num_input_tokens_seen": 996112,
|
|
"step": 105
|
|
},
|
|
{
|
|
"epoch": 1.9555555555555557,
|
|
"grad_norm": 0.37138107419013977,
|
|
"learning_rate": 1.331828429317345e-05,
|
|
"loss": 0.5235,
|
|
"num_input_tokens_seen": 1047520,
|
|
"step": 110
|
|
},
|
|
{
|
|
"epoch": 2.0444444444444443,
|
|
"grad_norm": 0.33912232518196106,
|
|
"learning_rate": 1.130570401955322e-05,
|
|
"loss": 0.4984,
|
|
"num_input_tokens_seen": 1096832,
|
|
"step": 115
|
|
},
|
|
{
|
|
"epoch": 2.1333333333333333,
|
|
"grad_norm": 0.4000737965106964,
|
|
"learning_rate": 9.412754953531663e-06,
|
|
"loss": 0.4777,
|
|
"num_input_tokens_seen": 1151104,
|
|
"step": 120
|
|
},
|
|
{
|
|
"epoch": 2.2222222222222223,
|
|
"grad_norm": 0.5536447167396545,
|
|
"learning_rate": 7.65597359928646e-06,
|
|
"loss": 0.4918,
|
|
"num_input_tokens_seen": 1194592,
|
|
"step": 125
|
|
},
|
|
{
|
|
"epoch": 2.311111111111111,
|
|
"grad_norm": 0.42406854033470154,
|
|
"learning_rate": 6.050706921363672e-06,
|
|
"loss": 0.4573,
|
|
"num_input_tokens_seen": 1235104,
|
|
"step": 130
|
|
},
|
|
{
|
|
"epoch": 2.4,
|
|
"grad_norm": 0.37090376019477844,
|
|
"learning_rate": 4.610978276018496e-06,
|
|
"loss": 0.4549,
|
|
"num_input_tokens_seen": 1280560,
|
|
"step": 135
|
|
},
|
|
{
|
|
"epoch": 2.488888888888889,
|
|
"grad_norm": 0.3070351779460907,
|
|
"learning_rate": 3.3493649053890326e-06,
|
|
"loss": 0.5422,
|
|
"num_input_tokens_seen": 1329792,
|
|
"step": 140
|
|
},
|
|
{
|
|
"epoch": 2.5777777777777775,
|
|
"grad_norm": 0.3824547529220581,
|
|
"learning_rate": 2.2768880646947268e-06,
|
|
"loss": 0.4968,
|
|
"num_input_tokens_seen": 1380976,
|
|
"step": 145
|
|
},
|
|
{
|
|
"epoch": 2.6666666666666665,
|
|
"grad_norm": 0.4585011899471283,
|
|
"learning_rate": 1.4029167422908107e-06,
|
|
"loss": 0.4361,
|
|
"num_input_tokens_seen": 1423712,
|
|
"step": 150
|
|
},
|
|
{
|
|
"epoch": 2.7555555555555555,
|
|
"grad_norm": 0.3712801933288574,
|
|
"learning_rate": 7.350858136652261e-07,
|
|
"loss": 0.4517,
|
|
"num_input_tokens_seen": 1470000,
|
|
"step": 155
|
|
},
|
|
{
|
|
"epoch": 2.8444444444444446,
|
|
"grad_norm": 0.4640035629272461,
|
|
"learning_rate": 2.7922934437178695e-07,
|
|
"loss": 0.4099,
|
|
"num_input_tokens_seen": 1515168,
|
|
"step": 160
|
|
},
|
|
{
|
|
"epoch": 2.9333333333333336,
|
|
"grad_norm": 0.24545951187610626,
|
|
"learning_rate": 3.9329624554584884e-08,
|
|
"loss": 0.4861,
|
|
"num_input_tokens_seen": 1566432,
|
|
"step": 165
|
|
}
|
|
],
|
|
"logging_steps": 5,
|
|
"max_steps": 168,
|
|
"num_input_tokens_seen": 1596080,
|
|
"num_train_epochs": 3,
|
|
"save_steps": 100,
|
|
"stateful_callbacks": {
|
|
"TrainerControl": {
|
|
"args": {
|
|
"should_epoch_stop": false,
|
|
"should_evaluate": false,
|
|
"should_log": false,
|
|
"should_save": true,
|
|
"should_training_stop": true
|
|
},
|
|
"attributes": {}
|
|
}
|
|
},
|
|
"total_flos": 1.467473931042816e+16,
|
|
"train_batch_size": 2,
|
|
"trial_name": null,
|
|
"trial_params": null
|
|
}
|
|
|