arabart-gec-lora / last-checkpoint /trainer_state.json
somaia02's picture
Training in progress, step 1500, checkpoint
b98c12f
raw
history blame
20.9 kB
{
"best_metric": 0.45563551783561707,
"best_model_checkpoint": "bart_lora_outputs\\checkpoint-1500",
"epoch": 2.4469820554649266,
"eval_steps": 100,
"global_step": 1500,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.02,
"learning_rate": 2e-05,
"loss": 2.9281,
"step": 10
},
{
"epoch": 0.03,
"learning_rate": 4e-05,
"loss": 2.8201,
"step": 20
},
{
"epoch": 0.05,
"learning_rate": 6e-05,
"loss": 2.579,
"step": 30
},
{
"epoch": 0.07,
"learning_rate": 8e-05,
"loss": 2.4427,
"step": 40
},
{
"epoch": 0.08,
"learning_rate": 0.0001,
"loss": 2.1681,
"step": 50
},
{
"epoch": 0.1,
"learning_rate": 0.00012,
"loss": 1.7104,
"step": 60
},
{
"epoch": 0.11,
"learning_rate": 0.00014000000000000001,
"loss": 1.4177,
"step": 70
},
{
"epoch": 0.13,
"learning_rate": 0.00016,
"loss": 1.2515,
"step": 80
},
{
"epoch": 0.15,
"learning_rate": 0.00017999999999999998,
"loss": 1.1238,
"step": 90
},
{
"epoch": 0.16,
"learning_rate": 0.0002,
"loss": 1.0489,
"step": 100
},
{
"epoch": 0.16,
"eval_loss": 0.8469381928443909,
"eval_runtime": 5.9601,
"eval_samples_per_second": 196.474,
"eval_steps_per_second": 24.664,
"step": 100
},
{
"epoch": 0.18,
"learning_rate": 0.00022,
"loss": 1.0079,
"step": 110
},
{
"epoch": 0.2,
"learning_rate": 0.00024,
"loss": 1.0077,
"step": 120
},
{
"epoch": 0.21,
"learning_rate": 0.00026000000000000003,
"loss": 0.9416,
"step": 130
},
{
"epoch": 0.23,
"learning_rate": 0.00028000000000000003,
"loss": 0.8882,
"step": 140
},
{
"epoch": 0.24,
"learning_rate": 0.0003,
"loss": 0.8595,
"step": 150
},
{
"epoch": 0.26,
"learning_rate": 0.00032,
"loss": 0.8853,
"step": 160
},
{
"epoch": 0.28,
"learning_rate": 0.00034,
"loss": 0.7678,
"step": 170
},
{
"epoch": 0.29,
"learning_rate": 0.00035999999999999997,
"loss": 0.8595,
"step": 180
},
{
"epoch": 0.31,
"learning_rate": 0.00038,
"loss": 0.8514,
"step": 190
},
{
"epoch": 0.33,
"learning_rate": 0.0004,
"loss": 0.8128,
"step": 200
},
{
"epoch": 0.33,
"eval_loss": 0.6850531697273254,
"eval_runtime": 6.0279,
"eval_samples_per_second": 194.264,
"eval_steps_per_second": 24.387,
"step": 200
},
{
"epoch": 0.34,
"learning_rate": 0.00042,
"loss": 0.7782,
"step": 210
},
{
"epoch": 0.36,
"learning_rate": 0.00044,
"loss": 0.8064,
"step": 220
},
{
"epoch": 0.38,
"learning_rate": 0.00046,
"loss": 0.7627,
"step": 230
},
{
"epoch": 0.39,
"learning_rate": 0.00048,
"loss": 0.7447,
"step": 240
},
{
"epoch": 0.41,
"learning_rate": 0.0005,
"loss": 0.7652,
"step": 250
},
{
"epoch": 0.42,
"learning_rate": 0.0005200000000000001,
"loss": 0.7568,
"step": 260
},
{
"epoch": 0.44,
"learning_rate": 0.00054,
"loss": 0.7291,
"step": 270
},
{
"epoch": 0.46,
"learning_rate": 0.0005600000000000001,
"loss": 0.7118,
"step": 280
},
{
"epoch": 0.47,
"learning_rate": 0.00058,
"loss": 0.7462,
"step": 290
},
{
"epoch": 0.49,
"learning_rate": 0.0006,
"loss": 0.6866,
"step": 300
},
{
"epoch": 0.49,
"eval_loss": 0.629724383354187,
"eval_runtime": 6.0378,
"eval_samples_per_second": 193.943,
"eval_steps_per_second": 24.346,
"step": 300
},
{
"epoch": 0.51,
"learning_rate": 0.00062,
"loss": 0.6995,
"step": 310
},
{
"epoch": 0.52,
"learning_rate": 0.00064,
"loss": 0.724,
"step": 320
},
{
"epoch": 0.54,
"learning_rate": 0.00066,
"loss": 0.6698,
"step": 330
},
{
"epoch": 0.55,
"learning_rate": 0.00068,
"loss": 0.6516,
"step": 340
},
{
"epoch": 0.57,
"learning_rate": 0.0007,
"loss": 0.6657,
"step": 350
},
{
"epoch": 0.59,
"learning_rate": 0.0007199999999999999,
"loss": 0.6765,
"step": 360
},
{
"epoch": 0.6,
"learning_rate": 0.00074,
"loss": 0.6596,
"step": 370
},
{
"epoch": 0.62,
"learning_rate": 0.00076,
"loss": 0.6884,
"step": 380
},
{
"epoch": 0.64,
"learning_rate": 0.0007800000000000001,
"loss": 0.647,
"step": 390
},
{
"epoch": 0.65,
"learning_rate": 0.0008,
"loss": 0.713,
"step": 400
},
{
"epoch": 0.65,
"eval_loss": 0.5541791319847107,
"eval_runtime": 6.0716,
"eval_samples_per_second": 192.864,
"eval_steps_per_second": 24.211,
"step": 400
},
{
"epoch": 0.67,
"learning_rate": 0.00082,
"loss": 0.6593,
"step": 410
},
{
"epoch": 0.69,
"learning_rate": 0.00084,
"loss": 0.62,
"step": 420
},
{
"epoch": 0.7,
"learning_rate": 0.00086,
"loss": 0.6912,
"step": 430
},
{
"epoch": 0.72,
"learning_rate": 0.00088,
"loss": 0.6407,
"step": 440
},
{
"epoch": 0.73,
"learning_rate": 0.0009000000000000001,
"loss": 0.6444,
"step": 450
},
{
"epoch": 0.75,
"learning_rate": 0.00092,
"loss": 0.6591,
"step": 460
},
{
"epoch": 0.77,
"learning_rate": 0.00094,
"loss": 0.6329,
"step": 470
},
{
"epoch": 0.78,
"learning_rate": 0.00096,
"loss": 0.6097,
"step": 480
},
{
"epoch": 0.8,
"learning_rate": 0.00098,
"loss": 0.6444,
"step": 490
},
{
"epoch": 0.82,
"learning_rate": 0.001,
"loss": 0.6106,
"step": 500
},
{
"epoch": 0.82,
"eval_loss": 0.5469470024108887,
"eval_runtime": 6.1353,
"eval_samples_per_second": 190.861,
"eval_steps_per_second": 23.96,
"step": 500
},
{
"epoch": 0.83,
"learning_rate": 0.0009982238010657195,
"loss": 0.6725,
"step": 510
},
{
"epoch": 0.85,
"learning_rate": 0.0009964476021314388,
"loss": 0.6612,
"step": 520
},
{
"epoch": 0.86,
"learning_rate": 0.000994671403197158,
"loss": 0.6202,
"step": 530
},
{
"epoch": 0.88,
"learning_rate": 0.0009928952042628776,
"loss": 0.6085,
"step": 540
},
{
"epoch": 0.9,
"learning_rate": 0.0009911190053285969,
"loss": 0.6023,
"step": 550
},
{
"epoch": 0.91,
"learning_rate": 0.0009893428063943162,
"loss": 0.6209,
"step": 560
},
{
"epoch": 0.93,
"learning_rate": 0.0009875666074600357,
"loss": 0.6128,
"step": 570
},
{
"epoch": 0.95,
"learning_rate": 0.000985790408525755,
"loss": 0.5971,
"step": 580
},
{
"epoch": 0.96,
"learning_rate": 0.0009840142095914742,
"loss": 0.6028,
"step": 590
},
{
"epoch": 0.98,
"learning_rate": 0.0009822380106571937,
"loss": 0.6105,
"step": 600
},
{
"epoch": 0.98,
"eval_loss": 0.527148425579071,
"eval_runtime": 6.0982,
"eval_samples_per_second": 192.024,
"eval_steps_per_second": 24.105,
"step": 600
},
{
"epoch": 1.0,
"learning_rate": 0.000980461811722913,
"loss": 0.6101,
"step": 610
},
{
"epoch": 1.01,
"learning_rate": 0.0009786856127886323,
"loss": 0.5349,
"step": 620
},
{
"epoch": 1.03,
"learning_rate": 0.0009769094138543518,
"loss": 0.5903,
"step": 630
},
{
"epoch": 1.04,
"learning_rate": 0.0009751332149200711,
"loss": 0.6033,
"step": 640
},
{
"epoch": 1.06,
"learning_rate": 0.0009733570159857904,
"loss": 0.544,
"step": 650
},
{
"epoch": 1.08,
"learning_rate": 0.0009715808170515098,
"loss": 0.5582,
"step": 660
},
{
"epoch": 1.09,
"learning_rate": 0.0009698046181172292,
"loss": 0.5488,
"step": 670
},
{
"epoch": 1.11,
"learning_rate": 0.0009680284191829485,
"loss": 0.5799,
"step": 680
},
{
"epoch": 1.13,
"learning_rate": 0.0009662522202486678,
"loss": 0.5857,
"step": 690
},
{
"epoch": 1.14,
"learning_rate": 0.0009644760213143872,
"loss": 0.5857,
"step": 700
},
{
"epoch": 1.14,
"eval_loss": 0.5239382982254028,
"eval_runtime": 6.2471,
"eval_samples_per_second": 187.448,
"eval_steps_per_second": 23.531,
"step": 700
},
{
"epoch": 1.16,
"learning_rate": 0.0009626998223801065,
"loss": 0.5289,
"step": 710
},
{
"epoch": 1.17,
"learning_rate": 0.0009609236234458259,
"loss": 0.6082,
"step": 720
},
{
"epoch": 1.19,
"learning_rate": 0.0009591474245115453,
"loss": 0.5837,
"step": 730
},
{
"epoch": 1.21,
"learning_rate": 0.0009573712255772646,
"loss": 0.5571,
"step": 740
},
{
"epoch": 1.22,
"learning_rate": 0.000955595026642984,
"loss": 0.5948,
"step": 750
},
{
"epoch": 1.24,
"learning_rate": 0.0009538188277087034,
"loss": 0.5455,
"step": 760
},
{
"epoch": 1.26,
"learning_rate": 0.0009520426287744227,
"loss": 0.5858,
"step": 770
},
{
"epoch": 1.27,
"learning_rate": 0.0009502664298401421,
"loss": 0.5289,
"step": 780
},
{
"epoch": 1.29,
"learning_rate": 0.0009484902309058615,
"loss": 0.6011,
"step": 790
},
{
"epoch": 1.31,
"learning_rate": 0.0009467140319715807,
"loss": 0.5841,
"step": 800
},
{
"epoch": 1.31,
"eval_loss": 0.5005862712860107,
"eval_runtime": 5.9494,
"eval_samples_per_second": 196.827,
"eval_steps_per_second": 24.708,
"step": 800
},
{
"epoch": 1.32,
"learning_rate": 0.0009449378330373001,
"loss": 0.5403,
"step": 810
},
{
"epoch": 1.34,
"learning_rate": 0.0009431616341030196,
"loss": 0.558,
"step": 820
},
{
"epoch": 1.35,
"learning_rate": 0.0009413854351687389,
"loss": 0.5435,
"step": 830
},
{
"epoch": 1.37,
"learning_rate": 0.0009396092362344583,
"loss": 0.5341,
"step": 840
},
{
"epoch": 1.39,
"learning_rate": 0.0009378330373001777,
"loss": 0.5398,
"step": 850
},
{
"epoch": 1.4,
"learning_rate": 0.000936056838365897,
"loss": 0.5548,
"step": 860
},
{
"epoch": 1.42,
"learning_rate": 0.0009342806394316164,
"loss": 0.5438,
"step": 870
},
{
"epoch": 1.44,
"learning_rate": 0.0009325044404973358,
"loss": 0.5845,
"step": 880
},
{
"epoch": 1.45,
"learning_rate": 0.0009307282415630552,
"loss": 0.5146,
"step": 890
},
{
"epoch": 1.47,
"learning_rate": 0.0009289520426287745,
"loss": 0.5274,
"step": 900
},
{
"epoch": 1.47,
"eval_loss": 0.49163827300071716,
"eval_runtime": 6.0349,
"eval_samples_per_second": 194.04,
"eval_steps_per_second": 24.359,
"step": 900
},
{
"epoch": 1.48,
"learning_rate": 0.0009271758436944939,
"loss": 0.5999,
"step": 910
},
{
"epoch": 1.5,
"learning_rate": 0.0009253996447602132,
"loss": 0.5798,
"step": 920
},
{
"epoch": 1.52,
"learning_rate": 0.0009236234458259325,
"loss": 0.5465,
"step": 930
},
{
"epoch": 1.53,
"learning_rate": 0.0009218472468916519,
"loss": 0.5752,
"step": 940
},
{
"epoch": 1.55,
"learning_rate": 0.0009200710479573713,
"loss": 0.5544,
"step": 950
},
{
"epoch": 1.57,
"learning_rate": 0.0009182948490230906,
"loss": 0.5576,
"step": 960
},
{
"epoch": 1.58,
"learning_rate": 0.00091651865008881,
"loss": 0.5351,
"step": 970
},
{
"epoch": 1.6,
"learning_rate": 0.0009147424511545294,
"loss": 0.5584,
"step": 980
},
{
"epoch": 1.62,
"learning_rate": 0.0009129662522202487,
"loss": 0.5191,
"step": 990
},
{
"epoch": 1.63,
"learning_rate": 0.0009111900532859681,
"loss": 0.5304,
"step": 1000
},
{
"epoch": 1.63,
"eval_loss": 0.48665139079093933,
"eval_runtime": 5.92,
"eval_samples_per_second": 197.803,
"eval_steps_per_second": 24.831,
"step": 1000
},
{
"epoch": 1.65,
"learning_rate": 0.0009094138543516875,
"loss": 0.5042,
"step": 1010
},
{
"epoch": 1.66,
"learning_rate": 0.0009076376554174067,
"loss": 0.5495,
"step": 1020
},
{
"epoch": 1.68,
"learning_rate": 0.0009058614564831261,
"loss": 0.5309,
"step": 1030
},
{
"epoch": 1.7,
"learning_rate": 0.0009040852575488455,
"loss": 0.5207,
"step": 1040
},
{
"epoch": 1.71,
"learning_rate": 0.0009023090586145648,
"loss": 0.5209,
"step": 1050
},
{
"epoch": 1.73,
"learning_rate": 0.0009005328596802842,
"loss": 0.5172,
"step": 1060
},
{
"epoch": 1.75,
"learning_rate": 0.0008987566607460036,
"loss": 0.5284,
"step": 1070
},
{
"epoch": 1.76,
"learning_rate": 0.0008969804618117229,
"loss": 0.532,
"step": 1080
},
{
"epoch": 1.78,
"learning_rate": 0.0008952042628774423,
"loss": 0.534,
"step": 1090
},
{
"epoch": 1.79,
"learning_rate": 0.0008934280639431617,
"loss": 0.5299,
"step": 1100
},
{
"epoch": 1.79,
"eval_loss": 0.47891008853912354,
"eval_runtime": 5.951,
"eval_samples_per_second": 196.772,
"eval_steps_per_second": 24.702,
"step": 1100
},
{
"epoch": 1.81,
"learning_rate": 0.000891651865008881,
"loss": 0.5213,
"step": 1110
},
{
"epoch": 1.83,
"learning_rate": 0.0008898756660746004,
"loss": 0.5443,
"step": 1120
},
{
"epoch": 1.84,
"learning_rate": 0.0008880994671403197,
"loss": 0.5367,
"step": 1130
},
{
"epoch": 1.86,
"learning_rate": 0.0008863232682060391,
"loss": 0.5393,
"step": 1140
},
{
"epoch": 1.88,
"learning_rate": 0.0008845470692717584,
"loss": 0.5286,
"step": 1150
},
{
"epoch": 1.89,
"learning_rate": 0.0008827708703374778,
"loss": 0.5363,
"step": 1160
},
{
"epoch": 1.91,
"learning_rate": 0.0008809946714031972,
"loss": 0.4707,
"step": 1170
},
{
"epoch": 1.92,
"learning_rate": 0.0008792184724689165,
"loss": 0.5414,
"step": 1180
},
{
"epoch": 1.94,
"learning_rate": 0.0008774422735346359,
"loss": 0.508,
"step": 1190
},
{
"epoch": 1.96,
"learning_rate": 0.0008756660746003553,
"loss": 0.5238,
"step": 1200
},
{
"epoch": 1.96,
"eval_loss": 0.47723039984703064,
"eval_runtime": 11.5341,
"eval_samples_per_second": 101.525,
"eval_steps_per_second": 12.745,
"step": 1200
},
{
"epoch": 1.97,
"learning_rate": 0.0008738898756660746,
"loss": 0.527,
"step": 1210
},
{
"epoch": 1.99,
"learning_rate": 0.000872113676731794,
"loss": 0.537,
"step": 1220
},
{
"epoch": 2.01,
"learning_rate": 0.0008703374777975134,
"loss": 0.5252,
"step": 1230
},
{
"epoch": 2.02,
"learning_rate": 0.0008685612788632326,
"loss": 0.5252,
"step": 1240
},
{
"epoch": 2.04,
"learning_rate": 0.000866785079928952,
"loss": 0.501,
"step": 1250
},
{
"epoch": 2.06,
"learning_rate": 0.0008650088809946714,
"loss": 0.4979,
"step": 1260
},
{
"epoch": 2.07,
"learning_rate": 0.0008632326820603907,
"loss": 0.5041,
"step": 1270
},
{
"epoch": 2.09,
"learning_rate": 0.0008614564831261101,
"loss": 0.4837,
"step": 1280
},
{
"epoch": 2.1,
"learning_rate": 0.0008596802841918295,
"loss": 0.5124,
"step": 1290
},
{
"epoch": 2.12,
"learning_rate": 0.0008579040852575488,
"loss": 0.4876,
"step": 1300
},
{
"epoch": 2.12,
"eval_loss": 0.4801134765148163,
"eval_runtime": 5.9243,
"eval_samples_per_second": 197.662,
"eval_steps_per_second": 24.813,
"step": 1300
},
{
"epoch": 2.14,
"learning_rate": 0.0008561278863232682,
"loss": 0.4937,
"step": 1310
},
{
"epoch": 2.15,
"learning_rate": 0.0008543516873889876,
"loss": 0.4969,
"step": 1320
},
{
"epoch": 2.17,
"learning_rate": 0.0008525754884547069,
"loss": 0.4921,
"step": 1330
},
{
"epoch": 2.19,
"learning_rate": 0.0008507992895204263,
"loss": 0.5073,
"step": 1340
},
{
"epoch": 2.2,
"learning_rate": 0.0008490230905861456,
"loss": 0.4758,
"step": 1350
},
{
"epoch": 2.22,
"learning_rate": 0.000847246891651865,
"loss": 0.5329,
"step": 1360
},
{
"epoch": 2.23,
"learning_rate": 0.0008454706927175843,
"loss": 0.4786,
"step": 1370
},
{
"epoch": 2.25,
"learning_rate": 0.0008436944937833037,
"loss": 0.4819,
"step": 1380
},
{
"epoch": 2.27,
"learning_rate": 0.0008419182948490231,
"loss": 0.5125,
"step": 1390
},
{
"epoch": 2.28,
"learning_rate": 0.0008401420959147424,
"loss": 0.5048,
"step": 1400
},
{
"epoch": 2.28,
"eval_loss": 0.46734750270843506,
"eval_runtime": 13.0931,
"eval_samples_per_second": 89.436,
"eval_steps_per_second": 11.227,
"step": 1400
},
{
"epoch": 2.3,
"learning_rate": 0.0008383658969804618,
"loss": 0.5128,
"step": 1410
},
{
"epoch": 2.32,
"learning_rate": 0.0008365896980461812,
"loss": 0.5022,
"step": 1420
},
{
"epoch": 2.33,
"learning_rate": 0.0008348134991119005,
"loss": 0.4767,
"step": 1430
},
{
"epoch": 2.35,
"learning_rate": 0.00083303730017762,
"loss": 0.4959,
"step": 1440
},
{
"epoch": 2.37,
"learning_rate": 0.0008312611012433394,
"loss": 0.5147,
"step": 1450
},
{
"epoch": 2.38,
"learning_rate": 0.0008294849023090586,
"loss": 0.4922,
"step": 1460
},
{
"epoch": 2.4,
"learning_rate": 0.000827708703374778,
"loss": 0.4936,
"step": 1470
},
{
"epoch": 2.41,
"learning_rate": 0.0008259325044404974,
"loss": 0.5041,
"step": 1480
},
{
"epoch": 2.43,
"learning_rate": 0.0008241563055062167,
"loss": 0.491,
"step": 1490
},
{
"epoch": 2.45,
"learning_rate": 0.0008223801065719361,
"loss": 0.5096,
"step": 1500
},
{
"epoch": 2.45,
"eval_loss": 0.45563551783561707,
"eval_runtime": 13.1221,
"eval_samples_per_second": 89.239,
"eval_steps_per_second": 11.202,
"step": 1500
}
],
"logging_steps": 10,
"max_steps": 6130,
"num_train_epochs": 10,
"save_steps": 500,
"total_flos": 2833006835073024.0,
"trial_name": null,
"trial_params": null
}