gpt2-indonesia / trainer_state.json
akahana's picture
train with TPUv2
ac358da
raw history blame
No virus
7.22 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 35.0,
"global_step": 27335,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.64,
"learning_rate": 4.908542162063289e-05,
"loss": 6.7425,
"step": 500
},
{
"epoch": 1.28,
"learning_rate": 4.8170843241265775e-05,
"loss": 5.4902,
"step": 1000
},
{
"epoch": 1.92,
"learning_rate": 4.725626486189867e-05,
"loss": 4.9535,
"step": 1500
},
{
"epoch": 2.56,
"learning_rate": 4.6341686482531555e-05,
"loss": 4.5816,
"step": 2000
},
{
"epoch": 3.2,
"learning_rate": 4.5427108103164445e-05,
"loss": 4.3055,
"step": 2500
},
{
"epoch": 3.84,
"learning_rate": 4.451252972379733e-05,
"loss": 4.0758,
"step": 3000
},
{
"epoch": 4.48,
"learning_rate": 4.359795134443022e-05,
"loss": 3.8768,
"step": 3500
},
{
"epoch": 5.12,
"learning_rate": 4.268337296506311e-05,
"loss": 3.7165,
"step": 4000
},
{
"epoch": 5.76,
"learning_rate": 4.1768794585696e-05,
"loss": 3.5584,
"step": 4500
},
{
"epoch": 6.4,
"learning_rate": 4.085421620632888e-05,
"loss": 3.431,
"step": 5000
},
{
"epoch": 7.04,
"learning_rate": 3.993963782696177e-05,
"loss": 3.3197,
"step": 5500
},
{
"epoch": 7.68,
"learning_rate": 3.902505944759466e-05,
"loss": 3.2002,
"step": 6000
},
{
"epoch": 8.32,
"learning_rate": 3.811048106822755e-05,
"loss": 3.1084,
"step": 6500
},
{
"epoch": 8.96,
"learning_rate": 3.719590268886043e-05,
"loss": 3.0326,
"step": 7000
},
{
"epoch": 9.6,
"learning_rate": 3.628132430949332e-05,
"loss": 2.9372,
"step": 7500
},
{
"epoch": 10.24,
"learning_rate": 3.536674593012621e-05,
"loss": 2.8657,
"step": 8000
},
{
"epoch": 10.88,
"learning_rate": 3.44521675507591e-05,
"loss": 2.8072,
"step": 8500
},
{
"epoch": 11.52,
"learning_rate": 3.353758917139199e-05,
"loss": 2.7354,
"step": 9000
},
{
"epoch": 12.16,
"learning_rate": 3.2623010792024876e-05,
"loss": 2.6862,
"step": 9500
},
{
"epoch": 12.8,
"learning_rate": 3.1708432412657766e-05,
"loss": 2.6269,
"step": 10000
},
{
"epoch": 13.44,
"learning_rate": 3.0793854033290656e-05,
"loss": 2.5687,
"step": 10500
},
{
"epoch": 14.08,
"learning_rate": 2.9879275653923545e-05,
"loss": 2.5422,
"step": 11000
},
{
"epoch": 14.72,
"learning_rate": 2.896469727455643e-05,
"loss": 2.4796,
"step": 11500
},
{
"epoch": 15.36,
"learning_rate": 2.8050118895189322e-05,
"loss": 2.4468,
"step": 12000
},
{
"epoch": 16.01,
"learning_rate": 2.7135540515822205e-05,
"loss": 2.4179,
"step": 12500
},
{
"epoch": 16.65,
"learning_rate": 2.6220962136455095e-05,
"loss": 2.3558,
"step": 13000
},
{
"epoch": 17.29,
"learning_rate": 2.530638375708798e-05,
"loss": 2.3408,
"step": 13500
},
{
"epoch": 17.93,
"learning_rate": 2.439180537772087e-05,
"loss": 2.3129,
"step": 14000
},
{
"epoch": 18.57,
"learning_rate": 2.347722699835376e-05,
"loss": 2.2645,
"step": 14500
},
{
"epoch": 19.21,
"learning_rate": 2.2562648618986647e-05,
"loss": 2.2492,
"step": 15000
},
{
"epoch": 19.85,
"learning_rate": 2.1648070239619537e-05,
"loss": 2.2222,
"step": 15500
},
{
"epoch": 20.49,
"learning_rate": 2.0733491860252424e-05,
"loss": 2.1802,
"step": 16000
},
{
"epoch": 21.13,
"learning_rate": 1.9818913480885314e-05,
"loss": 2.173,
"step": 16500
},
{
"epoch": 21.77,
"learning_rate": 1.89043351015182e-05,
"loss": 2.1459,
"step": 17000
},
{
"epoch": 22.41,
"learning_rate": 1.7989756722151087e-05,
"loss": 2.118,
"step": 17500
},
{
"epoch": 23.05,
"learning_rate": 1.7075178342783976e-05,
"loss": 2.1077,
"step": 18000
},
{
"epoch": 23.69,
"learning_rate": 1.6160599963416863e-05,
"loss": 2.0791,
"step": 18500
},
{
"epoch": 24.33,
"learning_rate": 1.5246021584049754e-05,
"loss": 2.0544,
"step": 19000
},
{
"epoch": 24.97,
"learning_rate": 1.4331443204682643e-05,
"loss": 2.0541,
"step": 19500
},
{
"epoch": 25.61,
"learning_rate": 1.341686482531553e-05,
"loss": 2.0238,
"step": 20000
},
{
"epoch": 26.25,
"learning_rate": 1.2502286445948419e-05,
"loss": 2.0115,
"step": 20500
},
{
"epoch": 26.89,
"learning_rate": 1.1587708066581307e-05,
"loss": 1.9977,
"step": 21000
},
{
"epoch": 27.53,
"learning_rate": 1.0673129687214195e-05,
"loss": 1.979,
"step": 21500
},
{
"epoch": 28.17,
"learning_rate": 9.758551307847083e-06,
"loss": 1.9705,
"step": 22000
},
{
"epoch": 28.81,
"learning_rate": 8.843972928479972e-06,
"loss": 1.9635,
"step": 22500
},
{
"epoch": 29.45,
"learning_rate": 7.92939454911286e-06,
"loss": 1.9388,
"step": 23000
},
{
"epoch": 30.09,
"learning_rate": 7.014816169745747e-06,
"loss": 1.9347,
"step": 23500
},
{
"epoch": 30.73,
"learning_rate": 6.100237790378636e-06,
"loss": 1.9227,
"step": 24000
},
{
"epoch": 31.37,
"learning_rate": 5.1856594110115235e-06,
"loss": 1.9157,
"step": 24500
},
{
"epoch": 32.01,
"learning_rate": 4.271081031644412e-06,
"loss": 1.9104,
"step": 25000
},
{
"epoch": 32.65,
"learning_rate": 3.3565026522773006e-06,
"loss": 1.895,
"step": 25500
},
{
"epoch": 33.29,
"learning_rate": 2.4419242729101884e-06,
"loss": 1.8957,
"step": 26000
},
{
"epoch": 33.93,
"learning_rate": 1.5273458935430768e-06,
"loss": 1.8829,
"step": 26500
},
{
"epoch": 34.57,
"learning_rate": 6.127675141759649e-07,
"loss": 1.8824,
"step": 27000
},
{
"epoch": 35.0,
"step": 27335,
"total_flos": 5.713934155776e+16,
"train_loss": 2.6988527534168076,
"train_runtime": 23608.904,
"train_samples_per_second": 148.16,
"train_steps_per_second": 1.158
}
],
"max_steps": 27335,
"num_train_epochs": 35,
"total_flos": 5.713934155776e+16,
"trial_name": null,
"trial_params": null
}