GPT2-python-code-generator / trainer_state.json
SIC98's picture
Upload model
3bb490d
raw history blame
No virus
8.24 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 4.922136886324097,
"global_step": 116000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"learning_rate": 4.999957567785463e-05,
"loss": 2.0156,
"step": 1
},
{
"epoch": 0.08,
"learning_rate": 4.915135570925447e-05,
"loss": 1.305,
"step": 2000
},
{
"epoch": 0.17,
"learning_rate": 4.8302711418508934e-05,
"loss": 1.1809,
"step": 4000
},
{
"epoch": 0.25,
"learning_rate": 4.74540671277634e-05,
"loss": 1.1207,
"step": 6000
},
{
"epoch": 0.34,
"learning_rate": 4.6605422837017866e-05,
"loss": 1.0869,
"step": 8000
},
{
"epoch": 0.42,
"learning_rate": 4.575677854627233e-05,
"loss": 1.0604,
"step": 10000
},
{
"epoch": 0.51,
"learning_rate": 4.49081342555268e-05,
"loss": 1.0425,
"step": 12000
},
{
"epoch": 0.59,
"learning_rate": 4.4059489964781264e-05,
"loss": 1.0303,
"step": 14000
},
{
"epoch": 0.68,
"learning_rate": 4.321084567403573e-05,
"loss": 1.0101,
"step": 16000
},
{
"epoch": 0.76,
"learning_rate": 4.2362201383290196e-05,
"loss": 0.9935,
"step": 18000
},
{
"epoch": 0.85,
"learning_rate": 4.151355709254466e-05,
"loss": 0.9833,
"step": 20000
},
{
"epoch": 0.93,
"learning_rate": 4.066491280179913e-05,
"loss": 0.9833,
"step": 22000
},
{
"epoch": 1.0,
"eval_loss": 1.1714409589767456,
"eval_runtime": 285.104,
"eval_samples_per_second": 50.936,
"step": 23567
},
{
"epoch": 1.02,
"learning_rate": 3.9816268511053594e-05,
"loss": 0.9653,
"step": 24000
},
{
"epoch": 1.1,
"learning_rate": 3.896762422030806e-05,
"loss": 0.9378,
"step": 26000
},
{
"epoch": 1.19,
"learning_rate": 3.8118979929562526e-05,
"loss": 0.9398,
"step": 28000
},
{
"epoch": 1.27,
"learning_rate": 3.727033563881699e-05,
"loss": 0.9269,
"step": 30000
},
{
"epoch": 1.36,
"learning_rate": 3.642169134807146e-05,
"loss": 0.9311,
"step": 32000
},
{
"epoch": 1.44,
"learning_rate": 3.5573047057325924e-05,
"loss": 0.9179,
"step": 34000
},
{
"epoch": 1.53,
"learning_rate": 3.472440276658039e-05,
"loss": 0.91,
"step": 36000
},
{
"epoch": 1.61,
"learning_rate": 3.3875758475834856e-05,
"loss": 0.9146,
"step": 38000
},
{
"epoch": 1.7,
"learning_rate": 3.302711418508932e-05,
"loss": 0.9101,
"step": 40000
},
{
"epoch": 1.78,
"learning_rate": 3.217846989434379e-05,
"loss": 0.9024,
"step": 42000
},
{
"epoch": 1.87,
"learning_rate": 3.132982560359825e-05,
"loss": 0.9078,
"step": 44000
},
{
"epoch": 1.95,
"learning_rate": 3.048118131285272e-05,
"loss": 0.8919,
"step": 46000
},
{
"epoch": 2.0,
"eval_loss": 1.1437541246414185,
"eval_runtime": 285.2156,
"eval_samples_per_second": 50.916,
"step": 47134
},
{
"epoch": 2.04,
"learning_rate": 2.9632537022107186e-05,
"loss": 0.8866,
"step": 48000
},
{
"epoch": 2.12,
"learning_rate": 2.878389273136165e-05,
"loss": 0.876,
"step": 50000
},
{
"epoch": 2.21,
"learning_rate": 2.793524844061612e-05,
"loss": 0.8733,
"step": 52000
},
{
"epoch": 2.29,
"learning_rate": 2.708660414987058e-05,
"loss": 0.8685,
"step": 54000
},
{
"epoch": 2.38,
"learning_rate": 2.623795985912505e-05,
"loss": 0.8666,
"step": 56000
},
{
"epoch": 2.46,
"learning_rate": 2.5389315568379513e-05,
"loss": 0.8661,
"step": 58000
},
{
"epoch": 2.55,
"learning_rate": 2.4540671277633982e-05,
"loss": 0.8542,
"step": 60000
},
{
"epoch": 2.63,
"learning_rate": 2.3692026986888445e-05,
"loss": 0.8638,
"step": 62000
},
{
"epoch": 2.72,
"learning_rate": 2.284338269614291e-05,
"loss": 0.854,
"step": 64000
},
{
"epoch": 2.8,
"learning_rate": 2.1994738405397377e-05,
"loss": 0.8596,
"step": 66000
},
{
"epoch": 2.89,
"learning_rate": 2.1146094114651846e-05,
"loss": 0.8566,
"step": 68000
},
{
"epoch": 2.97,
"learning_rate": 2.0297449823906312e-05,
"loss": 0.8531,
"step": 70000
},
{
"epoch": 3.0,
"eval_loss": 1.1323479413986206,
"eval_runtime": 285.2208,
"eval_samples_per_second": 50.915,
"step": 70701
},
{
"epoch": 3.06,
"learning_rate": 1.944880553316078e-05,
"loss": 0.8331,
"step": 72000
},
{
"epoch": 3.14,
"learning_rate": 1.8600161242415244e-05,
"loss": 0.8402,
"step": 74000
},
{
"epoch": 3.22,
"learning_rate": 1.7751516951669707e-05,
"loss": 0.8381,
"step": 76000
},
{
"epoch": 3.31,
"learning_rate": 1.6902872660924173e-05,
"loss": 0.8362,
"step": 78000
},
{
"epoch": 3.39,
"learning_rate": 1.605422837017864e-05,
"loss": 0.8318,
"step": 80000
},
{
"epoch": 3.48,
"learning_rate": 1.5205584079433105e-05,
"loss": 0.835,
"step": 82000
},
{
"epoch": 3.56,
"learning_rate": 1.4356939788687573e-05,
"loss": 0.8344,
"step": 84000
},
{
"epoch": 3.65,
"learning_rate": 1.3508295497942039e-05,
"loss": 0.8354,
"step": 86000
},
{
"epoch": 3.73,
"learning_rate": 1.2659651207196505e-05,
"loss": 0.8283,
"step": 88000
},
{
"epoch": 3.82,
"learning_rate": 1.181100691645097e-05,
"loss": 0.8341,
"step": 90000
},
{
"epoch": 3.9,
"learning_rate": 1.0962362625705435e-05,
"loss": 0.8225,
"step": 92000
},
{
"epoch": 3.99,
"learning_rate": 1.0113718334959903e-05,
"loss": 0.8161,
"step": 94000
},
{
"epoch": 4.0,
"eval_loss": 1.1247462034225464,
"eval_runtime": 285.2246,
"eval_samples_per_second": 50.914,
"step": 94268
},
{
"epoch": 4.07,
"learning_rate": 9.265074044214369e-06,
"loss": 0.8112,
"step": 96000
},
{
"epoch": 4.16,
"learning_rate": 8.416429753468833e-06,
"loss": 0.8188,
"step": 98000
},
{
"epoch": 4.24,
"learning_rate": 7.567785462723299e-06,
"loss": 0.8161,
"step": 100000
},
{
"epoch": 4.33,
"learning_rate": 6.719141171977767e-06,
"loss": 0.8157,
"step": 102000
},
{
"epoch": 4.41,
"learning_rate": 5.870496881232232e-06,
"loss": 0.8058,
"step": 104000
},
{
"epoch": 4.5,
"learning_rate": 5.021852590486698e-06,
"loss": 0.8122,
"step": 106000
},
{
"epoch": 4.58,
"learning_rate": 4.173208299741164e-06,
"loss": 0.8135,
"step": 108000
},
{
"epoch": 4.67,
"learning_rate": 3.3245640089956294e-06,
"loss": 0.816,
"step": 110000
},
{
"epoch": 4.75,
"learning_rate": 2.475919718250096e-06,
"loss": 0.8163,
"step": 112000
},
{
"epoch": 4.84,
"learning_rate": 1.6272754275045616e-06,
"loss": 0.813,
"step": 114000
},
{
"epoch": 4.92,
"learning_rate": 7.786311367590275e-07,
"loss": 0.8102,
"step": 116000
}
],
"max_steps": 117835,
"num_train_epochs": 5,
"total_flos": 443431511673274368,
"trial_name": null,
"trial_params": null
}