shibajustfor's picture
Training in progress, step 200, checkpoint
e54e7bf verified
raw
history blame
5.04 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.022013097793186946,
"eval_steps": 50,
"global_step": 200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.00011006548896593473,
"eval_loss": 1.3274837732315063,
"eval_runtime": 241.8919,
"eval_samples_per_second": 15.817,
"eval_steps_per_second": 7.908,
"step": 1
},
{
"epoch": 0.0011006548896593472,
"grad_norm": 0.49815604090690613,
"learning_rate": 0.0002,
"loss": 1.2469,
"step": 10
},
{
"epoch": 0.0022013097793186945,
"grad_norm": 0.4162350296974182,
"learning_rate": 0.0002,
"loss": 1.1037,
"step": 20
},
{
"epoch": 0.003301964668978042,
"grad_norm": 0.4359431564807892,
"learning_rate": 0.0002,
"loss": 1.1139,
"step": 30
},
{
"epoch": 0.004402619558637389,
"grad_norm": 0.34668853878974915,
"learning_rate": 0.0002,
"loss": 1.103,
"step": 40
},
{
"epoch": 0.005503274448296736,
"grad_norm": 0.36034461855888367,
"learning_rate": 0.0002,
"loss": 1.0576,
"step": 50
},
{
"epoch": 0.005503274448296736,
"eval_loss": 1.0534749031066895,
"eval_runtime": 240.7504,
"eval_samples_per_second": 15.892,
"eval_steps_per_second": 7.946,
"step": 50
},
{
"epoch": 0.006603929337956084,
"grad_norm": 0.42812806367874146,
"learning_rate": 0.0002,
"loss": 1.0562,
"step": 60
},
{
"epoch": 0.007704584227615431,
"grad_norm": 0.44979560375213623,
"learning_rate": 0.0002,
"loss": 1.0747,
"step": 70
},
{
"epoch": 0.008805239117274778,
"grad_norm": 0.444010466337204,
"learning_rate": 0.0002,
"loss": 1.031,
"step": 80
},
{
"epoch": 0.009905894006934125,
"grad_norm": 0.44595256447792053,
"learning_rate": 0.0002,
"loss": 1.0268,
"step": 90
},
{
"epoch": 0.011006548896593473,
"grad_norm": 0.46254387497901917,
"learning_rate": 0.0002,
"loss": 1.0139,
"step": 100
},
{
"epoch": 0.011006548896593473,
"eval_loss": 1.0108003616333008,
"eval_runtime": 240.8303,
"eval_samples_per_second": 15.887,
"eval_steps_per_second": 7.943,
"step": 100
},
{
"epoch": 0.01210720378625282,
"grad_norm": 0.47793570160865784,
"learning_rate": 0.0002,
"loss": 0.9809,
"step": 110
},
{
"epoch": 0.013207858675912168,
"grad_norm": 0.5524470806121826,
"learning_rate": 0.0002,
"loss": 1.0559,
"step": 120
},
{
"epoch": 0.014308513565571515,
"grad_norm": 0.5550538301467896,
"learning_rate": 0.0002,
"loss": 0.9841,
"step": 130
},
{
"epoch": 0.015409168455230863,
"grad_norm": 0.46083733439445496,
"learning_rate": 0.0002,
"loss": 1.0335,
"step": 140
},
{
"epoch": 0.01650982334489021,
"grad_norm": 0.4849002957344055,
"learning_rate": 0.0002,
"loss": 0.9981,
"step": 150
},
{
"epoch": 0.01650982334489021,
"eval_loss": 0.9864005446434021,
"eval_runtime": 240.7832,
"eval_samples_per_second": 15.89,
"eval_steps_per_second": 7.945,
"step": 150
},
{
"epoch": 0.017610478234549556,
"grad_norm": 0.5414583086967468,
"learning_rate": 0.0002,
"loss": 0.9461,
"step": 160
},
{
"epoch": 0.018711133124208905,
"grad_norm": 0.47824686765670776,
"learning_rate": 0.0002,
"loss": 1.0095,
"step": 170
},
{
"epoch": 0.01981178801386825,
"grad_norm": 0.47877955436706543,
"learning_rate": 0.0002,
"loss": 0.9779,
"step": 180
},
{
"epoch": 0.0209124429035276,
"grad_norm": 0.3664035499095917,
"learning_rate": 0.0002,
"loss": 1.0182,
"step": 190
},
{
"epoch": 0.022013097793186946,
"grad_norm": 0.4645732045173645,
"learning_rate": 0.0002,
"loss": 0.9779,
"step": 200
},
{
"epoch": 0.022013097793186946,
"eval_loss": 0.9666519165039062,
"eval_runtime": 240.7663,
"eval_samples_per_second": 15.891,
"eval_steps_per_second": 7.945,
"step": 200
}
],
"logging_steps": 10,
"max_steps": 200,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 50,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 8.78621657923584e+16,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}