SOLAR-10.7B-ko_alpaca / trainer_state.json
joowon99's picture
upload
6f2a226
raw
history blame
No virus
8.64 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 500,
"global_step": 669,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01,
"learning_rate": 0.0002998660541859271,
"loss": 1.1033,
"step": 10
},
{
"epoch": 0.03,
"learning_rate": 0.0002994033387324446,
"loss": 0.9066,
"step": 20
},
{
"epoch": 0.04,
"learning_rate": 0.0002986112199359036,
"loss": 0.846,
"step": 30
},
{
"epoch": 0.06,
"learning_rate": 0.00029749144425379216,
"loss": 0.8246,
"step": 40
},
{
"epoch": 0.07,
"learning_rate": 0.000296205525182313,
"loss": 0.8011,
"step": 50
},
{
"epoch": 0.09,
"learning_rate": 0.00029447059587783353,
"loss": 0.7911,
"step": 60
},
{
"epoch": 0.1,
"learning_rate": 0.0002924171389041629,
"loss": 0.7719,
"step": 70
},
{
"epoch": 0.12,
"learning_rate": 0.0002903004140503956,
"loss": 0.7793,
"step": 80
},
{
"epoch": 0.13,
"learning_rate": 0.0002876548000410533,
"loss": 0.7633,
"step": 90
},
{
"epoch": 0.15,
"learning_rate": 0.00028470568577725704,
"loss": 0.7623,
"step": 100
},
{
"epoch": 0.16,
"learning_rate": 0.0002814595734436687,
"loss": 0.7519,
"step": 110
},
{
"epoch": 0.18,
"learning_rate": 0.0002779236200440321,
"loss": 0.7471,
"step": 120
},
{
"epoch": 0.19,
"learning_rate": 0.0002741056216214651,
"loss": 0.7505,
"step": 130
},
{
"epoch": 0.21,
"learning_rate": 0.00027001399606980383,
"loss": 0.7342,
"step": 140
},
{
"epoch": 0.22,
"learning_rate": 0.0002656577645738938,
"loss": 0.7443,
"step": 150
},
{
"epoch": 0.24,
"learning_rate": 0.0002610465317197509,
"loss": 0.7247,
"step": 160
},
{
"epoch": 0.25,
"learning_rate": 0.0002561904643184426,
"loss": 0.7319,
"step": 170
},
{
"epoch": 0.27,
"learning_rate": 0.00025110026899038105,
"loss": 0.7297,
"step": 180
},
{
"epoch": 0.28,
"learning_rate": 0.0002457871685594478,
"loss": 0.7373,
"step": 190
},
{
"epoch": 0.3,
"learning_rate": 0.0002402628773089967,
"loss": 0.7267,
"step": 200
},
{
"epoch": 0.31,
"learning_rate": 0.00023453957515429192,
"loss": 0.7341,
"step": 210
},
{
"epoch": 0.33,
"learning_rate": 0.0002286298807883231,
"loss": 0.7195,
"step": 220
},
{
"epoch": 0.34,
"learning_rate": 0.00022254682386020736,
"loss": 0.7221,
"step": 230
},
{
"epoch": 0.36,
"learning_rate": 0.00021630381624751795,
"loss": 0.7214,
"step": 240
},
{
"epoch": 0.37,
"learning_rate": 0.00020991462248587929,
"loss": 0.7192,
"step": 250
},
{
"epoch": 0.39,
"learning_rate": 0.00020339332942102297,
"loss": 0.7139,
"step": 260
},
{
"epoch": 0.4,
"learning_rate": 0.0001967543151502182,
"loss": 0.7164,
"step": 270
},
{
"epoch": 0.42,
"learning_rate": 0.00019001221732155218,
"loss": 0.7219,
"step": 280
},
{
"epoch": 0.43,
"learning_rate": 0.0001831819008609551,
"loss": 0.7237,
"step": 290
},
{
"epoch": 0.45,
"learning_rate": 0.00017627842519812485,
"loss": 0.7236,
"step": 300
},
{
"epoch": 0.46,
"learning_rate": 0.00016931701106361107,
"loss": 0.7038,
"step": 310
},
{
"epoch": 0.48,
"learning_rate": 0.00016231300693026433,
"loss": 0.7073,
"step": 320
},
{
"epoch": 0.49,
"learning_rate": 0.00015528185517304027,
"loss": 0.711,
"step": 330
},
{
"epoch": 0.51,
"learning_rate": 0.00014823905802176872,
"loss": 0.7029,
"step": 340
},
{
"epoch": 0.52,
"learning_rate": 0.0001412001433819556,
"loss": 0.7199,
"step": 350
},
{
"epoch": 0.54,
"learning_rate": 0.00013418063059897524,
"loss": 0.7229,
"step": 360
},
{
"epoch": 0.55,
"learning_rate": 0.00012719599624113573,
"loss": 0.7189,
"step": 370
},
{
"epoch": 0.57,
"learning_rate": 0.00012026163997705916,
"loss": 0.7147,
"step": 380
},
{
"epoch": 0.58,
"learning_rate": 0.00011339285062260957,
"loss": 0.6928,
"step": 390
},
{
"epoch": 0.6,
"learning_rate": 0.00010660477243222808,
"loss": 0.7002,
"step": 400
},
{
"epoch": 0.61,
"learning_rate": 9.991237170899614e-05,
"loss": 0.7198,
"step": 410
},
{
"epoch": 0.63,
"learning_rate": 9.33304038070446e-05,
"loss": 0.701,
"step": 420
},
{
"epoch": 0.64,
"learning_rate": 8.68733805990611e-05,
"loss": 0.6928,
"step": 430
},
{
"epoch": 0.66,
"learning_rate": 8.055553848062404e-05,
"loss": 0.696,
"step": 440
},
{
"epoch": 0.67,
"learning_rate": 7.439080698190579e-05,
"loss": 0.7038,
"step": 450
},
{
"epoch": 0.69,
"learning_rate": 6.839277805595081e-05,
"loss": 0.6949,
"step": 460
},
{
"epoch": 0.7,
"learning_rate": 6.257467611124105e-05,
"loss": 0.7054,
"step": 470
},
{
"epoch": 0.72,
"learning_rate": 5.6949328854621045e-05,
"loss": 0.7013,
"step": 480
},
{
"epoch": 0.73,
"learning_rate": 5.152913900886729e-05,
"loss": 0.6976,
"step": 490
},
{
"epoch": 0.75,
"learning_rate": 4.632605696725987e-05,
"loss": 0.6978,
"step": 500
},
{
"epoch": 0.76,
"learning_rate": 4.13515544454462e-05,
"loss": 0.6964,
"step": 510
},
{
"epoch": 0.78,
"learning_rate": 3.66165991886898e-05,
"loss": 0.7135,
"step": 520
},
{
"epoch": 0.79,
"learning_rate": 3.2131630790269134e-05,
"loss": 0.7026,
"step": 530
},
{
"epoch": 0.81,
"learning_rate": 2.790653767434181e-05,
"loss": 0.6986,
"step": 540
},
{
"epoch": 0.82,
"learning_rate": 2.3950635294022347e-05,
"loss": 0.6809,
"step": 550
},
{
"epoch": 0.84,
"learning_rate": 2.027264559274224e-05,
"loss": 0.6954,
"step": 560
},
{
"epoch": 0.85,
"learning_rate": 1.6880677774175354e-05,
"loss": 0.7028,
"step": 570
},
{
"epoch": 0.87,
"learning_rate": 1.3782210423127449e-05,
"loss": 0.6802,
"step": 580
},
{
"epoch": 0.88,
"learning_rate": 1.0984075016809528e-05,
"loss": 0.6969,
"step": 590
},
{
"epoch": 0.9,
"learning_rate": 8.492440862848787e-06,
"loss": 0.6995,
"step": 600
},
{
"epoch": 0.91,
"learning_rate": 6.312801497246206e-06,
"loss": 0.6908,
"step": 610
},
{
"epoch": 0.93,
"learning_rate": 4.44996257227021e-06,
"loss": 0.6933,
"step": 620
},
{
"epoch": 0.94,
"learning_rate": 2.908031260991195e-06,
"loss": 0.6993,
"step": 630
},
{
"epoch": 0.96,
"learning_rate": 1.6904072018175786e-06,
"loss": 0.6977,
"step": 640
},
{
"epoch": 0.97,
"learning_rate": 7.997750029989647e-07,
"loss": 0.6936,
"step": 650
},
{
"epoch": 0.99,
"learning_rate": 2.380983236223377e-07,
"loss": 0.6879,
"step": 660
},
{
"epoch": 1.0,
"step": 669,
"total_flos": 5.563883205708218e+18,
"train_loss": 0.7295773652993331,
"train_runtime": 13470.6062,
"train_samples_per_second": 6.356,
"train_steps_per_second": 0.05
}
],
"logging_steps": 10,
"max_steps": 669,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 1000,
"total_flos": 5.563883205708218e+18,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}