BobaZooba's picture
Training in progress, step 50, checkpoint
a5c794d
raw
history blame
6.19 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.0012437810945273632,
"eval_steps": 1000,
"global_step": 50,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"learning_rate": 0.0,
"loss": 1.9492,
"step": 1
},
{
"epoch": 0.0,
"learning_rate": 0.0,
"loss": 1.9778,
"step": 2
},
{
"epoch": 0.0,
"learning_rate": 4e-05,
"loss": 2.0908,
"step": 3
},
{
"epoch": 0.0,
"learning_rate": 4e-05,
"loss": 2.0456,
"step": 4
},
{
"epoch": 0.0,
"learning_rate": 8e-05,
"loss": 2.2288,
"step": 5
},
{
"epoch": 0.0,
"learning_rate": 0.00012,
"loss": 1.7521,
"step": 6
},
{
"epoch": 0.0,
"learning_rate": 0.00016,
"loss": 1.746,
"step": 7
},
{
"epoch": 0.0,
"learning_rate": 0.0002,
"loss": 1.6457,
"step": 8
},
{
"epoch": 0.0,
"learning_rate": 0.00019789473684210526,
"loss": 1.6226,
"step": 9
},
{
"epoch": 0.0,
"learning_rate": 0.00019578947368421054,
"loss": 1.643,
"step": 10
},
{
"epoch": 0.0,
"learning_rate": 0.0001936842105263158,
"loss": 1.7807,
"step": 11
},
{
"epoch": 0.0,
"learning_rate": 0.00019157894736842104,
"loss": 1.7257,
"step": 12
},
{
"epoch": 0.0,
"learning_rate": 0.00018947368421052632,
"loss": 1.6412,
"step": 13
},
{
"epoch": 0.0,
"learning_rate": 0.0001873684210526316,
"loss": 1.6792,
"step": 14
},
{
"epoch": 0.0,
"learning_rate": 0.00018526315789473685,
"loss": 1.4892,
"step": 15
},
{
"epoch": 0.0,
"learning_rate": 0.0001831578947368421,
"loss": 1.4418,
"step": 16
},
{
"epoch": 0.0,
"learning_rate": 0.00018105263157894739,
"loss": 1.5064,
"step": 17
},
{
"epoch": 0.0,
"learning_rate": 0.00017894736842105264,
"loss": 1.5943,
"step": 18
},
{
"epoch": 0.0,
"learning_rate": 0.0001768421052631579,
"loss": 1.419,
"step": 19
},
{
"epoch": 0.0,
"learning_rate": 0.00017473684210526317,
"loss": 1.5235,
"step": 20
},
{
"epoch": 0.0,
"learning_rate": 0.00017263157894736842,
"loss": 1.6562,
"step": 21
},
{
"epoch": 0.0,
"learning_rate": 0.0001705263157894737,
"loss": 1.4182,
"step": 22
},
{
"epoch": 0.0,
"learning_rate": 0.00016842105263157895,
"loss": 1.7132,
"step": 23
},
{
"epoch": 0.0,
"learning_rate": 0.00016631578947368423,
"loss": 1.7307,
"step": 24
},
{
"epoch": 0.0,
"learning_rate": 0.00016421052631578948,
"loss": 1.7079,
"step": 25
},
{
"epoch": 0.0,
"learning_rate": 0.00016210526315789473,
"loss": 1.4506,
"step": 26
},
{
"epoch": 0.0,
"learning_rate": 0.00016,
"loss": 1.673,
"step": 27
},
{
"epoch": 0.0,
"learning_rate": 0.00015789473684210527,
"loss": 1.5159,
"step": 28
},
{
"epoch": 0.0,
"learning_rate": 0.00015578947368421052,
"loss": 1.8044,
"step": 29
},
{
"epoch": 0.0,
"learning_rate": 0.0001536842105263158,
"loss": 1.311,
"step": 30
},
{
"epoch": 0.0,
"learning_rate": 0.00015157894736842108,
"loss": 1.5876,
"step": 31
},
{
"epoch": 0.0,
"learning_rate": 0.00014947368421052633,
"loss": 1.5843,
"step": 32
},
{
"epoch": 0.0,
"learning_rate": 0.00014736842105263158,
"loss": 1.7256,
"step": 33
},
{
"epoch": 0.0,
"learning_rate": 0.00014526315789473686,
"loss": 1.5446,
"step": 34
},
{
"epoch": 0.0,
"learning_rate": 0.0001431578947368421,
"loss": 1.9188,
"step": 35
},
{
"epoch": 0.0,
"learning_rate": 0.00014105263157894736,
"loss": 1.6284,
"step": 36
},
{
"epoch": 0.0,
"learning_rate": 0.00013894736842105264,
"loss": 1.6446,
"step": 37
},
{
"epoch": 0.0,
"learning_rate": 0.0001368421052631579,
"loss": 1.5255,
"step": 38
},
{
"epoch": 0.0,
"learning_rate": 0.00013473684210526317,
"loss": 1.4796,
"step": 39
},
{
"epoch": 0.0,
"learning_rate": 0.00013263157894736842,
"loss": 1.8069,
"step": 40
},
{
"epoch": 0.0,
"learning_rate": 0.0001305263157894737,
"loss": 1.5269,
"step": 41
},
{
"epoch": 0.0,
"learning_rate": 0.00012842105263157895,
"loss": 1.3387,
"step": 42
},
{
"epoch": 0.0,
"learning_rate": 0.0001263157894736842,
"loss": 1.3193,
"step": 43
},
{
"epoch": 0.0,
"learning_rate": 0.00012421052631578949,
"loss": 1.6728,
"step": 44
},
{
"epoch": 0.0,
"learning_rate": 0.00012210526315789474,
"loss": 1.5991,
"step": 45
},
{
"epoch": 0.0,
"learning_rate": 0.00012,
"loss": 1.6619,
"step": 46
},
{
"epoch": 0.0,
"learning_rate": 0.00011789473684210525,
"loss": 1.4312,
"step": 47
},
{
"epoch": 0.0,
"learning_rate": 0.00011578947368421053,
"loss": 1.4846,
"step": 48
},
{
"epoch": 0.0,
"learning_rate": 0.0001136842105263158,
"loss": 1.3929,
"step": 49
},
{
"epoch": 0.0,
"learning_rate": 0.00011157894736842105,
"loss": 1.647,
"step": 50
}
],
"logging_steps": 1,
"max_steps": 100,
"num_train_epochs": 1,
"save_steps": 25,
"total_flos": 2841070876655616.0,
"trial_name": null,
"trial_params": null
}