indobart-base / trainer_state.json
Gaduh Hartawan
initial commit
d1dfd80
raw
history blame
16.4 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.20630892698727074,
"eval_steps": 500,
"global_step": 10000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0,
"grad_norm": 12.003094673156738,
"learning_rate": 0.00001,
"loss": 2.4799,
"step": 100
},
{
"epoch": 0,
"grad_norm": 2.0890064239501953,
"learning_rate": 0.00002,
"loss": 0.6329,
"step": 200
},
{
"epoch": 0.01,
"grad_norm": 2.033069133758545,
"learning_rate": 0.00003,
"loss": 0.5035,
"step": 300
},
{
"epoch": 0.01,
"grad_norm": 2.252149820327759,
"learning_rate": 0.00004,
"loss": 0.4943,
"step": 400
},
{
"epoch": 0.01,
"grad_norm": 2.118387460708618,
"learning_rate": 0.00005,
"loss": 0.5105,
"step": 500
},
{
"epoch": 0.01,
"grad_norm": 1.954121708869934,
"learning_rate": 0.000049473684210526315,
"loss": 0.5008,
"step": 600
},
{
"epoch": 0.01,
"grad_norm": 2.036804676055908,
"learning_rate": 0.000048947368421052635,
"loss": 0.4793,
"step": 700
},
{
"epoch": 0.02,
"grad_norm": 1.824407696723938,
"learning_rate": 0.00004842105263157895,
"loss": 0.469,
"step": 800
},
{
"epoch": 0.02,
"grad_norm": 1.724145531654358,
"learning_rate": 0.00004789473684210526,
"loss": 0.4633,
"step": 900
},
{
"epoch": 0.02,
"grad_norm": 1.3427892923355103,
"learning_rate": 0.00004736842105263158,
"loss": 0.4614,
"step": 1000
},
{
"epoch": 0.02,
"grad_norm": 1.0959577560424805,
"learning_rate": 0.0000468421052631579,
"loss": 0.4605,
"step": 1100
},
{
"epoch": 0.02,
"grad_norm": 1.5479639768600464,
"learning_rate": 0.000046315789473684214,
"loss": 0.46,
"step": 1200
},
{
"epoch": 0.03,
"grad_norm": 1.535975456237793,
"learning_rate": 0.000045789473684210527,
"loss": 0.4252,
"step": 1300
},
{
"epoch": 0.03,
"grad_norm": 1.5321663618087769,
"learning_rate": 0.000045263157894736846,
"loss": 0.4369,
"step": 1400
},
{
"epoch": 0.03,
"grad_norm": 1.5459933280944824,
"learning_rate": 0.00004473684210526316,
"loss": 0.4276,
"step": 1500
},
{
"epoch": 0.03,
"grad_norm": 1.7711249589920044,
"learning_rate": 0.00004421052631578947,
"loss": 0.4474,
"step": 1600
},
{
"epoch": 0.04,
"grad_norm": 1.376570701599121,
"learning_rate": 0.00004368421052631579,
"loss": 0.4064,
"step": 1700
},
{
"epoch": 0.04,
"grad_norm": 1.74457585811615,
"learning_rate": 0.000043157894736842105,
"loss": 0.4093,
"step": 1800
},
{
"epoch": 0.04,
"grad_norm": 1.4628654718399048,
"learning_rate": 0.000042631578947368425,
"loss": 0.4236,
"step": 1900
},
{
"epoch": 0.04,
"grad_norm": 1.0637551546096802,
"learning_rate": 0.00004210526315789474,
"loss": 0.4137,
"step": 2000
},
{
"epoch": 0.04,
"grad_norm": 1.385728359222412,
"learning_rate": 0.00004157894736842106,
"loss": 0.4071,
"step": 2100
},
{
"epoch": 0.05,
"grad_norm": 1.3536651134490967,
"learning_rate": 0.00004105263157894737,
"loss": 0.4215,
"step": 2200
},
{
"epoch": 0.05,
"grad_norm": 1.5586481094360352,
"learning_rate": 0.000040526315789473684,
"loss": 0.4041,
"step": 2300
},
{
"epoch": 0.05,
"grad_norm": 1.0527902841567993,
"learning_rate": 0.00004,
"loss": 0.3984,
"step": 2400
},
{
"epoch": 0.05,
"grad_norm": 1.5508724451065063,
"learning_rate": 0.000039473684210526316,
"loss": 0.406,
"step": 2500
},
{
"epoch": 0.05,
"grad_norm": 1.7268626689910889,
"learning_rate": 0.00003894736842105263,
"loss": 0.3978,
"step": 2600
},
{
"epoch": 0.06,
"grad_norm": 1.259959101676941,
"learning_rate": 0.00003842105263157895,
"loss": 0.4111,
"step": 2700
},
{
"epoch": 0.06,
"grad_norm": 1.5187910795211792,
"learning_rate": 0.00003789473684210527,
"loss": 0.3862,
"step": 2800
},
{
"epoch": 0.06,
"grad_norm": 1.4480005502700806,
"learning_rate": 0.00003736842105263158,
"loss": 0.4021,
"step": 2900
},
{
"epoch": 0.06,
"grad_norm": 1.2096664905548096,
"learning_rate": 0.000036842105263157895,
"loss": 0.4092,
"step": 3000
},
{
"epoch": 0.06,
"grad_norm": 1.0258468389511108,
"learning_rate": 0.000036315789473684214,
"loss": 0.3829,
"step": 3100
},
{
"epoch": 0.07,
"grad_norm": 1.336549162864685,
"learning_rate": 0.00003578947368421053,
"loss": 0.4082,
"step": 3200
},
{
"epoch": 0.07,
"grad_norm": 1.1610171794891357,
"learning_rate": 0.00003526315789473684,
"loss": 0.4216,
"step": 3300
},
{
"epoch": 0.07,
"grad_norm": 1.1225395202636719,
"learning_rate": 0.00003473684210526316,
"loss": 0.3866,
"step": 3400
},
{
"epoch": 0.07,
"grad_norm": 1.2079445123672485,
"learning_rate": 0.00003421052631578947,
"loss": 0.3827,
"step": 3500
},
{
"epoch": 0.07,
"grad_norm": 0.8150144815444946,
"learning_rate": 0.00003368421052631579,
"loss": 0.3833,
"step": 3600
},
{
"epoch": 0.08,
"grad_norm": 1.1681067943572998,
"learning_rate": 0.000033157894736842106,
"loss": 0.3953,
"step": 3700
},
{
"epoch": 0.08,
"grad_norm": 1.1494988203048706,
"learning_rate": 0.000032631578947368426,
"loss": 0.3785,
"step": 3800
},
{
"epoch": 0.08,
"grad_norm": 0.8699579238891602,
"learning_rate": 0.00003210526315789474,
"loss": 0.3802,
"step": 3900
},
{
"epoch": 0.08,
"grad_norm": 1.560229778289795,
"learning_rate": 0.00003157894736842105,
"loss": 0.3837,
"step": 4000
},
{
"epoch": 0.08,
"grad_norm": 1.1917518377304077,
"learning_rate": 0.00003105263157894737,
"loss": 0.3775,
"step": 4100
},
{
"epoch": 0.09,
"grad_norm": 1.050404667854309,
"learning_rate": 0.000030526315789473684,
"loss": 0.3781,
"step": 4200
},
{
"epoch": 0.09,
"grad_norm": 0.8547882437705994,
"learning_rate": 0.00003,
"loss": 0.3629,
"step": 4300
},
{
"epoch": 0.09,
"grad_norm": 1.0195717811584473,
"learning_rate": 0.000029473684210526314,
"loss": 0.3644,
"step": 4400
},
{
"epoch": 0.09,
"grad_norm": 1.0551220178604126,
"learning_rate": 0.000028947368421052634,
"loss": 0.3774,
"step": 4500
},
{
"epoch": 0.09,
"grad_norm": 0.9689894318580627,
"learning_rate": 0.00002842105263157895,
"loss": 0.3384,
"step": 4600
},
{
"epoch": 0.1,
"grad_norm": 1.3785911798477173,
"learning_rate": 0.000027894736842105263,
"loss": 0.3725,
"step": 4700
},
{
"epoch": 0.1,
"grad_norm": 1.1647924184799194,
"learning_rate": 0.000027368421052631583,
"loss": 0.3716,
"step": 4800
},
{
"epoch": 0.1,
"grad_norm": 1.0317350625991821,
"learning_rate": 0.000026842105263157896,
"loss": 0.3454,
"step": 4900
},
{
"epoch": 0.1,
"grad_norm": 1.2156306505203247,
"learning_rate": 0.000026315789473684212,
"loss": 0.3414,
"step": 5000
},
{
"epoch": 0.11,
"grad_norm": 1.0330926179885864,
"learning_rate": 0.00002578947368421053,
"loss": 0.3658,
"step": 5100
},
{
"epoch": 0.11,
"grad_norm": 1.1999331712722778,
"learning_rate": 0.000025263157894736845,
"loss": 0.3659,
"step": 5200
},
{
"epoch": 0.11,
"grad_norm": 0.8645588159561157,
"learning_rate": 0.000024736842105263158,
"loss": 0.3567,
"step": 5300
},
{
"epoch": 0.11,
"grad_norm": 1.3966509103775024,
"learning_rate": 0.000024210526315789474,
"loss": 0.3593,
"step": 5400
},
{
"epoch": 0.11,
"grad_norm": 0.9758176207542419,
"learning_rate": 0.00002368421052631579,
"loss": 0.3491,
"step": 5500
},
{
"epoch": 0.12,
"grad_norm": 1.0079020261764526,
"learning_rate": 0.000023157894736842107,
"loss": 0.3403,
"step": 5600
},
{
"epoch": 0.12,
"grad_norm": 1.0078275203704834,
"learning_rate": 0.000022631578947368423,
"loss": 0.3396,
"step": 5700
},
{
"epoch": 0.12,
"grad_norm": 1.2329459190368652,
"learning_rate": 0.000022105263157894736,
"loss": 0.3323,
"step": 5800
},
{
"epoch": 0.12,
"grad_norm": 1.5663444995880127,
"learning_rate": 0.000021578947368421053,
"loss": 0.332,
"step": 5900
},
{
"epoch": 0.12,
"grad_norm": 1.104218602180481,
"learning_rate": 0.00002105263157894737,
"loss": 0.346,
"step": 6000
},
{
"epoch": 0.13,
"grad_norm": 1.1730228662490845,
"learning_rate": 0.000020526315789473685,
"loss": 0.3485,
"step": 6100
},
{
"epoch": 0.13,
"grad_norm": 1.2409980297088623,
"learning_rate": 0.00002,
"loss": 0.3539,
"step": 6200
},
{
"epoch": 0.13,
"grad_norm": 0.933708906173706,
"learning_rate": 0.000019473684210526315,
"loss": 0.3177,
"step": 6300
},
{
"epoch": 0.13,
"grad_norm": 1.0058486461639404,
"learning_rate": 0.000018947368421052634,
"loss": 0.3394,
"step": 6400
},
{
"epoch": 0.13,
"grad_norm": 1.2994540929794312,
"learning_rate": 0.000018421052631578947,
"loss": 0.3447,
"step": 6500
},
{
"epoch": 0.14,
"grad_norm": 1.1113885641098022,
"learning_rate": 0.000017894736842105264,
"loss": 0.3572,
"step": 6600
},
{
"epoch": 0.14,
"grad_norm": 1.1501245498657227,
"learning_rate": 0.00001736842105263158,
"loss": 0.3468,
"step": 6700
},
{
"epoch": 0.14,
"grad_norm": 1.5754578113555908,
"learning_rate": 0.000016842105263157896,
"loss": 0.3461,
"step": 6800
},
{
"epoch": 0.14,
"grad_norm": 1.214449167251587,
"learning_rate": 0.000016315789473684213,
"loss": 0.3463,
"step": 6900
},
{
"epoch": 0.14,
"grad_norm": 1.38958740234375,
"learning_rate": 0.000015789473684210526,
"loss": 0.3432,
"step": 7000
},
{
"epoch": 0.15,
"grad_norm": 1.1833761930465698,
"learning_rate": 0.000015263157894736842,
"loss": 0.336,
"step": 7100
},
{
"epoch": 0.15,
"grad_norm": 1.2316564321517944,
"learning_rate": 0.000014736842105263157,
"loss": 0.3266,
"step": 7200
},
{
"epoch": 0.15,
"grad_norm": 1.5909250974655151,
"learning_rate": 0.000014210526315789475,
"loss": 0.3354,
"step": 7300
},
{
"epoch": 0.15,
"grad_norm": 1.1195193529129028,
"learning_rate": 0.000013684210526315791,
"loss": 0.3388,
"step": 7400
},
{
"epoch": 0.15,
"grad_norm": 1.1586064100265503,
"learning_rate": 0.000013157894736842106,
"loss": 0.3413,
"step": 7500
},
{
"epoch": 0.16,
"grad_norm": 0.935364305973053,
"learning_rate": 0.000012631578947368422,
"loss": 0.3359,
"step": 7600
},
{
"epoch": 0.16,
"grad_norm": 1.3456859588623047,
"learning_rate": 0.000012105263157894737,
"loss": 0.3328,
"step": 7700
},
{
"epoch": 0.16,
"grad_norm": 1.5139777660369873,
"learning_rate": 0.000011578947368421053,
"loss": 0.3499,
"step": 7800
},
{
"epoch": 0.16,
"grad_norm": 1.0874484777450562,
"learning_rate": 0.000011052631578947368,
"loss": 0.3123,
"step": 7900
},
{
"epoch": 0.17,
"grad_norm": 0.9059338569641113,
"learning_rate": 0.000010526315789473684,
"loss": 0.3305,
"step": 8000
},
{
"epoch": 0.17,
"grad_norm": 1.1520755290985107,
"learning_rate": 0.00001,
"loss": 0.3235,
"step": 8100
},
{
"epoch": 0.17,
"grad_norm": 1.2957168817520142,
"learning_rate": 0.000009473684210526317,
"loss": 0.3204,
"step": 8200
},
{
"epoch": 0.17,
"grad_norm": 1.0766379833221436,
"learning_rate": 0.000008947368421052632,
"loss": 0.3239,
"step": 8300
},
{
"epoch": 0.17,
"grad_norm": 1.1667858362197876,
"learning_rate": 0.000008421052631578948,
"loss": 0.3183,
"step": 8400
},
{
"epoch": 0.18,
"grad_norm": 1.364989161491394,
"learning_rate": 0.000007894736842105263,
"loss": 0.3326,
"step": 8500
},
{
"epoch": 0.18,
"grad_norm": 1.2203367948532104,
"learning_rate": 0.0000073684210526315784,
"loss": 0.3205,
"step": 8600
},
{
"epoch": 0.18,
"grad_norm": 1.254922866821289,
"learning_rate": 0.000006842105263157896,
"loss": 0.3448,
"step": 8700
},
{
"epoch": 0.18,
"grad_norm": 1.471166729927063,
"learning_rate": 0.000006315789473684211,
"loss": 0.3307,
"step": 8800
},
{
"epoch": 0.18,
"grad_norm": 1.1350233554840088,
"learning_rate": 0.000005789473684210527,
"loss": 0.3174,
"step": 8900
},
{
"epoch": 0.19,
"grad_norm": 0.953658401966095,
"learning_rate": 0.000005263157894736842,
"loss": 0.3327,
"step": 9000
},
{
"epoch": 0.19,
"grad_norm": 1.2155749797821045,
"learning_rate": 0.000004736842105263159,
"loss": 0.3251,
"step": 9100
},
{
"epoch": 0.19,
"grad_norm": 1.106632947921753,
"learning_rate": 0.000004210526315789474,
"loss": 0.3011,
"step": 9200
},
{
"epoch": 0.19,
"grad_norm": 1.2169803380966187,
"learning_rate": 0.0000036842105263157892,
"loss": 0.3331,
"step": 9300
},
{
"epoch": 0.19,
"grad_norm": 0.7549806237220764,
"learning_rate": 0.0000031578947368421056,
"loss": 0.319,
"step": 9400
},
{
"epoch": 0.2,
"grad_norm": 1.2598366737365723,
"learning_rate": 0.000002631578947368421,
"loss": 0.3201,
"step": 9500
},
{
"epoch": 0.2,
"grad_norm": 0.991303563117981,
"learning_rate": 0.000002105263157894737,
"loss": 0.3144,
"step": 9600
},
{
"epoch": 0.2,
"grad_norm": 1.120094656944275,
"learning_rate": 0.0000015789473684210528,
"loss": 0.2997,
"step": 9700
},
{
"epoch": 0.2,
"grad_norm": 0.9906071424484253,
"learning_rate": 0.0000010526315789473685,
"loss": 0.3244,
"step": 9800
},
{
"epoch": 0.2,
"grad_norm": 1.083082675933838,
"learning_rate": 5.263157894736843e-7,
"loss": 0.3382,
"step": 9900
},
{
"epoch": 0.21,
"grad_norm": 1.6393320560455322,
"learning_rate": 0,
"loss": 0.3064,
"step": 10000
}
],
"logging_steps": 100,
"max_steps": 10000,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 5000,
"total_flos": 21671046021120000,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}