|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 5.0, |
|
"eval_steps": 500, |
|
"global_step": 3175, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.15748031496062992, |
|
"grad_norm": 1.2890625, |
|
"learning_rate": 0.00031446540880503143, |
|
"loss": 1.5497, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.31496062992125984, |
|
"grad_norm": 0.92578125, |
|
"learning_rate": 0.0004997720451762572, |
|
"loss": 0.9254, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.47244094488188976, |
|
"grad_norm": 0.90625, |
|
"learning_rate": 0.0004973084374349976, |
|
"loss": 0.8457, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.6299212598425197, |
|
"grad_norm": 0.734375, |
|
"learning_rate": 0.0004921639131931859, |
|
"loss": 0.7922, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.7874015748031497, |
|
"grad_norm": 0.80078125, |
|
"learning_rate": 0.00048439424102900066, |
|
"loss": 0.7571, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.9448818897637795, |
|
"grad_norm": 0.73828125, |
|
"learning_rate": 0.00047408364711169396, |
|
"loss": 0.7314, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 1.1023622047244095, |
|
"grad_norm": 0.6484375, |
|
"learning_rate": 0.00046134390215823, |
|
"loss": 0.5663, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 1.2598425196850394, |
|
"grad_norm": 0.70703125, |
|
"learning_rate": 0.00044631310979666443, |
|
"loss": 0.5111, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 1.4173228346456692, |
|
"grad_norm": 0.78125, |
|
"learning_rate": 0.0004291542094708612, |
|
"loss": 0.5099, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 1.574803149606299, |
|
"grad_norm": 1.0078125, |
|
"learning_rate": 0.000410053210115622, |
|
"loss": 0.5007, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 1.7322834645669292, |
|
"grad_norm": 0.828125, |
|
"learning_rate": 0.00038921717374985584, |
|
"loss": 0.5068, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 1.889763779527559, |
|
"grad_norm": 0.74609375, |
|
"learning_rate": 0.0003668719708463959, |
|
"loss": 0.4938, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 2.047244094488189, |
|
"grad_norm": 0.78515625, |
|
"learning_rate": 0.00034325983181110047, |
|
"loss": 0.4232, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 2.204724409448819, |
|
"grad_norm": 0.8515625, |
|
"learning_rate": 0.00031863672111412524, |
|
"loss": 0.2999, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 2.362204724409449, |
|
"grad_norm": 0.921875, |
|
"learning_rate": 0.00029326956253877123, |
|
"loss": 0.3159, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 2.5196850393700787, |
|
"grad_norm": 0.79296875, |
|
"learning_rate": 0.00026743334562725617, |
|
"loss": 0.3034, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 2.677165354330709, |
|
"grad_norm": 0.86328125, |
|
"learning_rate": 0.00024140814469062377, |
|
"loss": 0.3046, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 2.8346456692913384, |
|
"grad_norm": 0.84375, |
|
"learning_rate": 0.0002154760826978469, |
|
"loss": 0.3078, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 2.9921259842519685, |
|
"grad_norm": 0.765625, |
|
"learning_rate": 0.00018991827295670777, |
|
"loss": 0.2941, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 3.1496062992125986, |
|
"grad_norm": 0.7265625, |
|
"learning_rate": 0.00016501177173978493, |
|
"loss": 0.2171, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 3.3070866141732282, |
|
"grad_norm": 0.765625, |
|
"learning_rate": 0.00014102657489022886, |
|
"loss": 0.2132, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 3.4645669291338583, |
|
"grad_norm": 0.65234375, |
|
"learning_rate": 0.00011822269096524812, |
|
"loss": 0.2121, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 3.622047244094488, |
|
"grad_norm": 0.84375, |
|
"learning_rate": 9.684732264553247e-05, |
|
"loss": 0.2142, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 3.779527559055118, |
|
"grad_norm": 0.66796875, |
|
"learning_rate": 7.713218696519558e-05, |
|
"loss": 0.2175, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 3.937007874015748, |
|
"grad_norm": 0.68359375, |
|
"learning_rate": 5.929100341195187e-05, |
|
"loss": 0.2124, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 4.094488188976378, |
|
"grad_norm": 0.5390625, |
|
"learning_rate": 4.351717712746703e-05, |
|
"loss": 0.1979, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 4.251968503937007, |
|
"grad_norm": 0.67578125, |
|
"learning_rate": 2.9981702322862735e-05, |
|
"loss": 0.1953, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 4.409448818897638, |
|
"grad_norm": 0.625, |
|
"learning_rate": 1.8831308637139e-05, |
|
"loss": 0.2015, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 4.566929133858268, |
|
"grad_norm": 0.58203125, |
|
"learning_rate": 1.0186870532686742e-05, |
|
"loss": 0.1981, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 4.724409448818898, |
|
"grad_norm": 0.5, |
|
"learning_rate": 4.1420969706420505e-06, |
|
"loss": 0.19, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 4.881889763779528, |
|
"grad_norm": 0.55859375, |
|
"learning_rate": 7.625155704936715e-07, |
|
"loss": 0.1945, |
|
"step": 3100 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"step": 3175, |
|
"total_flos": 1.94980087881216e+16, |
|
"train_loss": 0.42690239373154526, |
|
"train_runtime": 1498.3039, |
|
"train_samples_per_second": 16.946, |
|
"train_steps_per_second": 2.119 |
|
} |
|
], |
|
"logging_steps": 100, |
|
"max_steps": 3175, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 5, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.94980087881216e+16, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|