Liang-Su's picture
Upload 74 files
18187e5 verified
raw
history blame
4.82 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.6217616580310881,
"eval_steps": 500,
"global_step": 300,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.02,
"grad_norm": 4.99941873550415,
"learning_rate": 2e-05,
"loss": 9.9329,
"step": 10
},
{
"epoch": 0.04,
"grad_norm": 1.741065502166748,
"learning_rate": 4e-05,
"loss": 11.0746,
"step": 20
},
{
"epoch": 0.06,
"grad_norm": 1.4727320671081543,
"learning_rate": 6e-05,
"loss": 2.7159,
"step": 30
},
{
"epoch": 0.08,
"grad_norm": 0.1335960477590561,
"learning_rate": 8e-05,
"loss": 0.3969,
"step": 40
},
{
"epoch": 0.1,
"grad_norm": 0.0014472692273557186,
"learning_rate": 0.0001,
"loss": 0.0032,
"step": 50
},
{
"epoch": 0.12,
"grad_norm": 0.0010780546581372619,
"learning_rate": 0.0001,
"loss": 0.0002,
"step": 60
},
{
"epoch": 0.15,
"grad_norm": 1.03132963180542,
"learning_rate": 0.0001,
"loss": 0.0002,
"step": 70
},
{
"epoch": 0.17,
"grad_norm": 0.008827299810945988,
"learning_rate": 0.0001,
"loss": 0.0,
"step": 80
},
{
"epoch": 0.19,
"grad_norm": 0.0002956670359708369,
"learning_rate": 0.0001,
"loss": 0.0001,
"step": 90
},
{
"epoch": 0.21,
"grad_norm": 0.0003419867134653032,
"learning_rate": 0.0001,
"loss": 0.0,
"step": 100
},
{
"epoch": 0.23,
"grad_norm": 0.0003681881644297391,
"learning_rate": 0.0001,
"loss": 0.0,
"step": 110
},
{
"epoch": 0.25,
"grad_norm": 0.0002884200366679579,
"learning_rate": 0.0001,
"loss": 0.0,
"step": 120
},
{
"epoch": 0.27,
"grad_norm": 0.00011985149467363954,
"learning_rate": 0.0001,
"loss": 0.0,
"step": 130
},
{
"epoch": 0.29,
"grad_norm": 0.0003195986500941217,
"learning_rate": 0.0001,
"loss": 0.0,
"step": 140
},
{
"epoch": 0.31,
"grad_norm": 0.00010149635636480525,
"learning_rate": 0.0001,
"loss": 0.0,
"step": 150
},
{
"epoch": 0.33,
"grad_norm": 0.00010508792183827609,
"learning_rate": 0.0001,
"loss": 0.0,
"step": 160
},
{
"epoch": 0.35,
"grad_norm": 0.00011793687008321285,
"learning_rate": 0.0001,
"loss": 0.006,
"step": 170
},
{
"epoch": 0.37,
"grad_norm": 8.076676749624312e-05,
"learning_rate": 0.0001,
"loss": 0.0,
"step": 180
},
{
"epoch": 0.39,
"grad_norm": 0.0007808339432813227,
"learning_rate": 0.0001,
"loss": 0.006,
"step": 190
},
{
"epoch": 0.41,
"grad_norm": 0.11711683869361877,
"learning_rate": 0.0001,
"loss": 0.003,
"step": 200
},
{
"epoch": 0.44,
"grad_norm": 0.0002039404644165188,
"learning_rate": 0.0001,
"loss": 0.0001,
"step": 210
},
{
"epoch": 0.46,
"grad_norm": 0.00873592495918274,
"learning_rate": 0.0001,
"loss": 0.0209,
"step": 220
},
{
"epoch": 0.48,
"grad_norm": 3.0506539344787598,
"learning_rate": 0.0001,
"loss": 0.0201,
"step": 230
},
{
"epoch": 0.5,
"grad_norm": 0.05903371796011925,
"learning_rate": 0.0001,
"loss": 0.0026,
"step": 240
},
{
"epoch": 0.52,
"grad_norm": 0.0002484666183590889,
"learning_rate": 0.0001,
"loss": 0.0001,
"step": 250
},
{
"epoch": 0.54,
"grad_norm": 0.0003493047261144966,
"learning_rate": 0.0001,
"loss": 0.0001,
"step": 260
},
{
"epoch": 0.56,
"grad_norm": 0.0008058947860263288,
"learning_rate": 0.0001,
"loss": 0.0001,
"step": 270
},
{
"epoch": 0.58,
"grad_norm": 0.0004198936221655458,
"learning_rate": 0.0001,
"loss": 0.0001,
"step": 280
},
{
"epoch": 0.6,
"grad_norm": 0.0002983050071634352,
"learning_rate": 0.0001,
"loss": 0.0001,
"step": 290
},
{
"epoch": 0.62,
"grad_norm": 0.0002279053587699309,
"learning_rate": 0.0001,
"loss": 0.0001,
"step": 300
}
],
"logging_steps": 10,
"max_steps": 482,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"total_flos": 5.004587702980301e+17,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}