pythia-6.9b-HC3 / trainer_state.json
Peter Szemraj
add sharded checkpoint
4b90790
raw
history blame
10.5 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.9905956112852663,
"global_step": 158,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.03,
"learning_rate": 2.5e-05,
"loss": 1.5684,
"step": 2
},
{
"epoch": 0.05,
"learning_rate": 5e-05,
"loss": 1.7603,
"step": 4
},
{
"epoch": 0.08,
"learning_rate": 7.500000000000001e-05,
"loss": 1.7578,
"step": 6
},
{
"epoch": 0.1,
"learning_rate": 0.0001,
"loss": 1.5327,
"step": 8
},
{
"epoch": 0.13,
"learning_rate": 9.995614150494293e-05,
"loss": 1.5737,
"step": 10
},
{
"epoch": 0.15,
"learning_rate": 9.982464296247522e-05,
"loss": 1.4917,
"step": 12
},
{
"epoch": 0.18,
"learning_rate": 9.96057350657239e-05,
"loss": 1.4312,
"step": 14
},
{
"epoch": 0.2,
"learning_rate": 9.929980185352526e-05,
"loss": 1.4136,
"step": 16
},
{
"epoch": 0.23,
"learning_rate": 9.890738003669029e-05,
"loss": 1.3789,
"step": 18
},
{
"epoch": 0.25,
"learning_rate": 9.842915805643155e-05,
"loss": 1.5537,
"step": 20
},
{
"epoch": 0.28,
"learning_rate": 9.786597487660337e-05,
"loss": 1.6504,
"step": 22
},
{
"epoch": 0.3,
"learning_rate": 9.721881851187406e-05,
"loss": 1.4834,
"step": 24
},
{
"epoch": 0.33,
"learning_rate": 9.648882429441257e-05,
"loss": 1.4912,
"step": 26
},
{
"epoch": 0.35,
"learning_rate": 9.567727288213005e-05,
"loss": 1.4409,
"step": 28
},
{
"epoch": 0.38,
"learning_rate": 9.478558801197065e-05,
"loss": 1.3872,
"step": 30
},
{
"epoch": 0.4,
"learning_rate": 9.381533400219318e-05,
"loss": 1.4736,
"step": 32
},
{
"epoch": 0.43,
"learning_rate": 9.276821300802534e-05,
"loss": 1.4048,
"step": 34
},
{
"epoch": 0.45,
"learning_rate": 9.164606203550497e-05,
"loss": 1.4502,
"step": 36
},
{
"epoch": 0.48,
"learning_rate": 9.045084971874738e-05,
"loss": 1.4141,
"step": 38
},
{
"epoch": 0.5,
"learning_rate": 8.9184672866292e-05,
"loss": 1.3911,
"step": 40
},
{
"epoch": 0.53,
"learning_rate": 8.784975278258783e-05,
"loss": 1.3379,
"step": 42
},
{
"epoch": 0.55,
"learning_rate": 8.644843137107059e-05,
"loss": 1.3379,
"step": 44
},
{
"epoch": 0.58,
"learning_rate": 8.498316702566828e-05,
"loss": 1.3242,
"step": 46
},
{
"epoch": 0.6,
"learning_rate": 8.345653031794292e-05,
"loss": 1.3091,
"step": 48
},
{
"epoch": 0.63,
"learning_rate": 8.18711994874345e-05,
"loss": 1.312,
"step": 50
},
{
"epoch": 0.65,
"learning_rate": 8.022995574311876e-05,
"loss": 1.3018,
"step": 52
},
{
"epoch": 0.68,
"learning_rate": 7.85356783842216e-05,
"loss": 1.3115,
"step": 54
},
{
"epoch": 0.7,
"learning_rate": 7.679133974894983e-05,
"loss": 1.3188,
"step": 56
},
{
"epoch": 0.73,
"learning_rate": 7.500000000000001e-05,
"loss": 1.314,
"step": 58
},
{
"epoch": 0.75,
"learning_rate": 7.316480175599309e-05,
"loss": 1.3062,
"step": 60
},
{
"epoch": 0.78,
"learning_rate": 7.128896457825364e-05,
"loss": 1.3076,
"step": 62
},
{
"epoch": 0.8,
"learning_rate": 6.937577932260515e-05,
"loss": 1.2847,
"step": 64
},
{
"epoch": 0.83,
"learning_rate": 6.742860236609077e-05,
"loss": 1.2837,
"step": 66
},
{
"epoch": 0.85,
"learning_rate": 6.545084971874738e-05,
"loss": 1.2798,
"step": 68
},
{
"epoch": 0.88,
"learning_rate": 6.344599103076329e-05,
"loss": 1.3247,
"step": 70
},
{
"epoch": 0.9,
"learning_rate": 6.141754350553279e-05,
"loss": 1.2842,
"step": 72
},
{
"epoch": 0.93,
"learning_rate": 5.9369065729286245e-05,
"loss": 1.2969,
"step": 74
},
{
"epoch": 0.95,
"learning_rate": 5.730415142812059e-05,
"loss": 1.2866,
"step": 76
},
{
"epoch": 0.98,
"learning_rate": 5.522642316338268e-05,
"loss": 1.2598,
"step": 78
},
{
"epoch": 0.99,
"eval_accuracy": 0.6496387020106186,
"eval_loss": 1.329074740409851,
"eval_runtime": 71.5884,
"eval_samples_per_second": 3.562,
"eval_steps_per_second": 1.788,
"step": 79
},
{
"epoch": 1.01,
"learning_rate": 5.313952597646568e-05,
"loss": 1.6475,
"step": 80
},
{
"epoch": 1.04,
"learning_rate": 5.104712099416785e-05,
"loss": 0.9348,
"step": 82
},
{
"epoch": 1.06,
"learning_rate": 4.895287900583216e-05,
"loss": 0.8953,
"step": 84
},
{
"epoch": 1.09,
"learning_rate": 4.6860474023534335e-05,
"loss": 0.8994,
"step": 86
},
{
"epoch": 1.11,
"learning_rate": 4.477357683661734e-05,
"loss": 0.8591,
"step": 88
},
{
"epoch": 1.14,
"learning_rate": 4.269584857187943e-05,
"loss": 0.8354,
"step": 90
},
{
"epoch": 1.16,
"learning_rate": 4.063093427071376e-05,
"loss": 0.863,
"step": 92
},
{
"epoch": 1.19,
"learning_rate": 3.858245649446721e-05,
"loss": 0.8845,
"step": 94
},
{
"epoch": 1.21,
"learning_rate": 3.655400896923672e-05,
"loss": 0.8325,
"step": 96
},
{
"epoch": 1.24,
"learning_rate": 3.4549150281252636e-05,
"loss": 0.8989,
"step": 98
},
{
"epoch": 1.26,
"learning_rate": 3.257139763390925e-05,
"loss": 0.8782,
"step": 100
},
{
"epoch": 1.29,
"learning_rate": 3.062422067739485e-05,
"loss": 0.8052,
"step": 102
},
{
"epoch": 1.31,
"learning_rate": 2.8711035421746367e-05,
"loss": 0.8518,
"step": 104
},
{
"epoch": 1.34,
"learning_rate": 2.6835198244006927e-05,
"loss": 0.8516,
"step": 106
},
{
"epoch": 1.36,
"learning_rate": 2.500000000000001e-05,
"loss": 0.7979,
"step": 108
},
{
"epoch": 1.39,
"learning_rate": 2.3208660251050158e-05,
"loss": 0.8108,
"step": 110
},
{
"epoch": 1.41,
"learning_rate": 2.1464321615778422e-05,
"loss": 0.8547,
"step": 112
},
{
"epoch": 1.44,
"learning_rate": 1.977004425688126e-05,
"loss": 0.8684,
"step": 114
},
{
"epoch": 1.46,
"learning_rate": 1.8128800512565513e-05,
"loss": 0.8428,
"step": 116
},
{
"epoch": 1.49,
"learning_rate": 1.6543469682057106e-05,
"loss": 0.7878,
"step": 118
},
{
"epoch": 1.51,
"learning_rate": 1.5016832974331724e-05,
"loss": 0.822,
"step": 120
},
{
"epoch": 1.54,
"learning_rate": 1.3551568628929434e-05,
"loss": 0.79,
"step": 122
},
{
"epoch": 1.56,
"learning_rate": 1.2150247217412186e-05,
"loss": 0.7979,
"step": 124
},
{
"epoch": 1.59,
"learning_rate": 1.0815327133708015e-05,
"loss": 0.8081,
"step": 126
},
{
"epoch": 1.61,
"learning_rate": 9.549150281252633e-06,
"loss": 0.7751,
"step": 128
},
{
"epoch": 1.64,
"learning_rate": 8.353937964495029e-06,
"loss": 0.7871,
"step": 130
},
{
"epoch": 1.66,
"learning_rate": 7.2317869919746705e-06,
"loss": 0.7722,
"step": 132
},
{
"epoch": 1.69,
"learning_rate": 6.184665997806832e-06,
"loss": 0.8198,
"step": 134
},
{
"epoch": 1.71,
"learning_rate": 5.214411988029355e-06,
"loss": 0.7859,
"step": 136
},
{
"epoch": 1.74,
"learning_rate": 4.322727117869951e-06,
"loss": 0.7458,
"step": 138
},
{
"epoch": 1.76,
"learning_rate": 3.511175705587433e-06,
"loss": 0.7554,
"step": 140
},
{
"epoch": 1.79,
"learning_rate": 2.7811814881259503e-06,
"loss": 0.7783,
"step": 142
},
{
"epoch": 1.82,
"learning_rate": 2.134025123396638e-06,
"loss": 0.7783,
"step": 144
},
{
"epoch": 1.84,
"learning_rate": 1.5708419435684462e-06,
"loss": 0.7793,
"step": 146
},
{
"epoch": 1.87,
"learning_rate": 1.0926199633097157e-06,
"loss": 0.8064,
"step": 148
},
{
"epoch": 1.89,
"learning_rate": 7.001981464747565e-07,
"loss": 0.8042,
"step": 150
},
{
"epoch": 1.92,
"learning_rate": 3.9426493427611177e-07,
"loss": 0.791,
"step": 152
},
{
"epoch": 1.94,
"learning_rate": 1.753570375247815e-07,
"loss": 0.8147,
"step": 154
},
{
"epoch": 1.97,
"learning_rate": 4.385849505708084e-08,
"loss": 0.7617,
"step": 156
},
{
"epoch": 1.99,
"learning_rate": 0.0,
"loss": 0.7446,
"step": 158
},
{
"epoch": 1.99,
"eval_accuracy": 0.6768941789814655,
"eval_loss": 1.2372242212295532,
"eval_runtime": 71.61,
"eval_samples_per_second": 3.561,
"eval_steps_per_second": 1.787,
"step": 158
},
{
"epoch": 1.99,
"step": 158,
"total_flos": 4.14872908733612e+17,
"train_loss": 1.1195918215981013,
"train_runtime": 35849.6176,
"train_samples_per_second": 0.284,
"train_steps_per_second": 0.004
}
],
"max_steps": 158,
"num_train_epochs": 2,
"total_flos": 4.14872908733612e+17,
"trial_name": null,
"trial_params": null
}