gsmyrnis's picture
End of training
7b63d1e verified
raw
history blame
16.8 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.0,
"eval_steps": 500,
"global_step": 972,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.030864197530864196,
"grad_norm": 5.446850666927686,
"learning_rate": 5e-06,
"loss": 0.9199,
"step": 10
},
{
"epoch": 0.06172839506172839,
"grad_norm": 2.663126142630872,
"learning_rate": 5e-06,
"loss": 0.821,
"step": 20
},
{
"epoch": 0.09259259259259259,
"grad_norm": 1.1642696107304236,
"learning_rate": 5e-06,
"loss": 0.7706,
"step": 30
},
{
"epoch": 0.12345679012345678,
"grad_norm": 4.280110588222716,
"learning_rate": 5e-06,
"loss": 0.7596,
"step": 40
},
{
"epoch": 0.15432098765432098,
"grad_norm": 4.412787627263132,
"learning_rate": 5e-06,
"loss": 0.746,
"step": 50
},
{
"epoch": 0.18518518518518517,
"grad_norm": 1.5711972402992538,
"learning_rate": 5e-06,
"loss": 0.7267,
"step": 60
},
{
"epoch": 0.21604938271604937,
"grad_norm": 0.9451756673135364,
"learning_rate": 5e-06,
"loss": 0.7186,
"step": 70
},
{
"epoch": 0.24691358024691357,
"grad_norm": 0.9020860720269491,
"learning_rate": 5e-06,
"loss": 0.7099,
"step": 80
},
{
"epoch": 0.2777777777777778,
"grad_norm": 0.7223983788610128,
"learning_rate": 5e-06,
"loss": 0.7001,
"step": 90
},
{
"epoch": 0.30864197530864196,
"grad_norm": 0.8632286972903407,
"learning_rate": 5e-06,
"loss": 0.6965,
"step": 100
},
{
"epoch": 0.3395061728395062,
"grad_norm": 0.6600509272889379,
"learning_rate": 5e-06,
"loss": 0.6878,
"step": 110
},
{
"epoch": 0.37037037037037035,
"grad_norm": 0.8445466577407515,
"learning_rate": 5e-06,
"loss": 0.685,
"step": 120
},
{
"epoch": 0.4012345679012346,
"grad_norm": 0.7606146778204088,
"learning_rate": 5e-06,
"loss": 0.6826,
"step": 130
},
{
"epoch": 0.43209876543209874,
"grad_norm": 0.6093510086614801,
"learning_rate": 5e-06,
"loss": 0.6828,
"step": 140
},
{
"epoch": 0.46296296296296297,
"grad_norm": 0.7249517817925787,
"learning_rate": 5e-06,
"loss": 0.6808,
"step": 150
},
{
"epoch": 0.49382716049382713,
"grad_norm": 0.559508936091321,
"learning_rate": 5e-06,
"loss": 0.6806,
"step": 160
},
{
"epoch": 0.5246913580246914,
"grad_norm": 0.6485112723957187,
"learning_rate": 5e-06,
"loss": 0.6755,
"step": 170
},
{
"epoch": 0.5555555555555556,
"grad_norm": 0.5734053780909245,
"learning_rate": 5e-06,
"loss": 0.6676,
"step": 180
},
{
"epoch": 0.5864197530864198,
"grad_norm": 0.7330429838481911,
"learning_rate": 5e-06,
"loss": 0.6695,
"step": 190
},
{
"epoch": 0.6172839506172839,
"grad_norm": 0.7031639101048125,
"learning_rate": 5e-06,
"loss": 0.669,
"step": 200
},
{
"epoch": 0.6481481481481481,
"grad_norm": 0.5618292117245931,
"learning_rate": 5e-06,
"loss": 0.6759,
"step": 210
},
{
"epoch": 0.6790123456790124,
"grad_norm": 0.5384920681614226,
"learning_rate": 5e-06,
"loss": 0.6626,
"step": 220
},
{
"epoch": 0.7098765432098766,
"grad_norm": 0.8695843119793333,
"learning_rate": 5e-06,
"loss": 0.6613,
"step": 230
},
{
"epoch": 0.7407407407407407,
"grad_norm": 0.8530303198305609,
"learning_rate": 5e-06,
"loss": 0.6622,
"step": 240
},
{
"epoch": 0.7716049382716049,
"grad_norm": 0.7246183829809985,
"learning_rate": 5e-06,
"loss": 0.6585,
"step": 250
},
{
"epoch": 0.8024691358024691,
"grad_norm": 0.6467615123805251,
"learning_rate": 5e-06,
"loss": 0.6663,
"step": 260
},
{
"epoch": 0.8333333333333334,
"grad_norm": 0.6051023696593527,
"learning_rate": 5e-06,
"loss": 0.6578,
"step": 270
},
{
"epoch": 0.8641975308641975,
"grad_norm": 0.5325735601636417,
"learning_rate": 5e-06,
"loss": 0.6628,
"step": 280
},
{
"epoch": 0.8950617283950617,
"grad_norm": 0.5624534504885511,
"learning_rate": 5e-06,
"loss": 0.6551,
"step": 290
},
{
"epoch": 0.9259259259259259,
"grad_norm": 0.5105392081865389,
"learning_rate": 5e-06,
"loss": 0.6585,
"step": 300
},
{
"epoch": 0.9567901234567902,
"grad_norm": 0.7882251080908214,
"learning_rate": 5e-06,
"loss": 0.6578,
"step": 310
},
{
"epoch": 0.9876543209876543,
"grad_norm": 0.7218610961797266,
"learning_rate": 5e-06,
"loss": 0.6542,
"step": 320
},
{
"epoch": 1.0,
"eval_loss": 0.6550179123878479,
"eval_runtime": 31.8484,
"eval_samples_per_second": 273.641,
"eval_steps_per_second": 1.099,
"step": 324
},
{
"epoch": 1.0185185185185186,
"grad_norm": 0.7148119037457841,
"learning_rate": 5e-06,
"loss": 0.624,
"step": 330
},
{
"epoch": 1.0493827160493827,
"grad_norm": 0.8285481607039845,
"learning_rate": 5e-06,
"loss": 0.6138,
"step": 340
},
{
"epoch": 1.0802469135802468,
"grad_norm": 0.5405908730469038,
"learning_rate": 5e-06,
"loss": 0.6154,
"step": 350
},
{
"epoch": 1.1111111111111112,
"grad_norm": 0.6837013751545337,
"learning_rate": 5e-06,
"loss": 0.613,
"step": 360
},
{
"epoch": 1.1419753086419753,
"grad_norm": 0.8086734597067436,
"learning_rate": 5e-06,
"loss": 0.6051,
"step": 370
},
{
"epoch": 1.1728395061728394,
"grad_norm": 0.6168727254851533,
"learning_rate": 5e-06,
"loss": 0.6047,
"step": 380
},
{
"epoch": 1.2037037037037037,
"grad_norm": 0.6222564529681911,
"learning_rate": 5e-06,
"loss": 0.6135,
"step": 390
},
{
"epoch": 1.2345679012345678,
"grad_norm": 0.6502199448782471,
"learning_rate": 5e-06,
"loss": 0.6092,
"step": 400
},
{
"epoch": 1.2654320987654322,
"grad_norm": 0.642957942043697,
"learning_rate": 5e-06,
"loss": 0.6151,
"step": 410
},
{
"epoch": 1.2962962962962963,
"grad_norm": 0.6973190504371732,
"learning_rate": 5e-06,
"loss": 0.6087,
"step": 420
},
{
"epoch": 1.3271604938271606,
"grad_norm": 0.5681720826825847,
"learning_rate": 5e-06,
"loss": 0.6058,
"step": 430
},
{
"epoch": 1.3580246913580247,
"grad_norm": 0.5560347945291988,
"learning_rate": 5e-06,
"loss": 0.6152,
"step": 440
},
{
"epoch": 1.3888888888888888,
"grad_norm": 0.5202152575422632,
"learning_rate": 5e-06,
"loss": 0.6112,
"step": 450
},
{
"epoch": 1.4197530864197532,
"grad_norm": 0.6652453548193578,
"learning_rate": 5e-06,
"loss": 0.6084,
"step": 460
},
{
"epoch": 1.4506172839506173,
"grad_norm": 0.5959726615804369,
"learning_rate": 5e-06,
"loss": 0.611,
"step": 470
},
{
"epoch": 1.4814814814814814,
"grad_norm": 0.5753042580268376,
"learning_rate": 5e-06,
"loss": 0.6099,
"step": 480
},
{
"epoch": 1.5123456790123457,
"grad_norm": 0.5657589973831328,
"learning_rate": 5e-06,
"loss": 0.6158,
"step": 490
},
{
"epoch": 1.5432098765432098,
"grad_norm": 0.48962281372551314,
"learning_rate": 5e-06,
"loss": 0.612,
"step": 500
},
{
"epoch": 1.574074074074074,
"grad_norm": 0.5867122795097997,
"learning_rate": 5e-06,
"loss": 0.6092,
"step": 510
},
{
"epoch": 1.6049382716049383,
"grad_norm": 0.567887162579392,
"learning_rate": 5e-06,
"loss": 0.6049,
"step": 520
},
{
"epoch": 1.6358024691358026,
"grad_norm": 0.5846370116192817,
"learning_rate": 5e-06,
"loss": 0.6102,
"step": 530
},
{
"epoch": 1.6666666666666665,
"grad_norm": 0.7372794706158421,
"learning_rate": 5e-06,
"loss": 0.6121,
"step": 540
},
{
"epoch": 1.6975308641975309,
"grad_norm": 0.5679832644741175,
"learning_rate": 5e-06,
"loss": 0.6039,
"step": 550
},
{
"epoch": 1.7283950617283952,
"grad_norm": 0.4951982048137007,
"learning_rate": 5e-06,
"loss": 0.6097,
"step": 560
},
{
"epoch": 1.7592592592592593,
"grad_norm": 0.5947044750562988,
"learning_rate": 5e-06,
"loss": 0.6108,
"step": 570
},
{
"epoch": 1.7901234567901234,
"grad_norm": 0.5672737096963988,
"learning_rate": 5e-06,
"loss": 0.6095,
"step": 580
},
{
"epoch": 1.8209876543209877,
"grad_norm": 0.5215432816515835,
"learning_rate": 5e-06,
"loss": 0.6096,
"step": 590
},
{
"epoch": 1.8518518518518519,
"grad_norm": 0.5356125614039279,
"learning_rate": 5e-06,
"loss": 0.6037,
"step": 600
},
{
"epoch": 1.882716049382716,
"grad_norm": 0.5290482585426443,
"learning_rate": 5e-06,
"loss": 0.6062,
"step": 610
},
{
"epoch": 1.9135802469135803,
"grad_norm": 0.6241816332212873,
"learning_rate": 5e-06,
"loss": 0.6106,
"step": 620
},
{
"epoch": 1.9444444444444444,
"grad_norm": 0.4959658493368521,
"learning_rate": 5e-06,
"loss": 0.6067,
"step": 630
},
{
"epoch": 1.9753086419753085,
"grad_norm": 0.5282675010462219,
"learning_rate": 5e-06,
"loss": 0.6069,
"step": 640
},
{
"epoch": 2.0,
"eval_loss": 0.6462317109107971,
"eval_runtime": 32.1855,
"eval_samples_per_second": 270.774,
"eval_steps_per_second": 1.087,
"step": 648
},
{
"epoch": 2.006172839506173,
"grad_norm": 0.7935581137695162,
"learning_rate": 5e-06,
"loss": 0.596,
"step": 650
},
{
"epoch": 2.037037037037037,
"grad_norm": 0.6261816641543,
"learning_rate": 5e-06,
"loss": 0.5645,
"step": 660
},
{
"epoch": 2.067901234567901,
"grad_norm": 0.5849160839297688,
"learning_rate": 5e-06,
"loss": 0.558,
"step": 670
},
{
"epoch": 2.0987654320987654,
"grad_norm": 0.6105130235265663,
"learning_rate": 5e-06,
"loss": 0.5591,
"step": 680
},
{
"epoch": 2.1296296296296298,
"grad_norm": 0.6033282280363377,
"learning_rate": 5e-06,
"loss": 0.5573,
"step": 690
},
{
"epoch": 2.1604938271604937,
"grad_norm": 0.7739145588445309,
"learning_rate": 5e-06,
"loss": 0.5571,
"step": 700
},
{
"epoch": 2.191358024691358,
"grad_norm": 0.626140801689634,
"learning_rate": 5e-06,
"loss": 0.56,
"step": 710
},
{
"epoch": 2.2222222222222223,
"grad_norm": 0.5916889343710152,
"learning_rate": 5e-06,
"loss": 0.5571,
"step": 720
},
{
"epoch": 2.253086419753086,
"grad_norm": 0.636584234517333,
"learning_rate": 5e-06,
"loss": 0.5638,
"step": 730
},
{
"epoch": 2.2839506172839505,
"grad_norm": 0.5507349524263677,
"learning_rate": 5e-06,
"loss": 0.5724,
"step": 740
},
{
"epoch": 2.314814814814815,
"grad_norm": 0.6732342797910674,
"learning_rate": 5e-06,
"loss": 0.567,
"step": 750
},
{
"epoch": 2.3456790123456788,
"grad_norm": 0.5395015214035337,
"learning_rate": 5e-06,
"loss": 0.5569,
"step": 760
},
{
"epoch": 2.376543209876543,
"grad_norm": 0.5992087208696053,
"learning_rate": 5e-06,
"loss": 0.5647,
"step": 770
},
{
"epoch": 2.4074074074074074,
"grad_norm": 0.5102699543572234,
"learning_rate": 5e-06,
"loss": 0.5622,
"step": 780
},
{
"epoch": 2.4382716049382718,
"grad_norm": 0.6025610989122395,
"learning_rate": 5e-06,
"loss": 0.5634,
"step": 790
},
{
"epoch": 2.4691358024691357,
"grad_norm": 0.6231368998767735,
"learning_rate": 5e-06,
"loss": 0.5594,
"step": 800
},
{
"epoch": 2.5,
"grad_norm": 0.5707370285498646,
"learning_rate": 5e-06,
"loss": 0.5685,
"step": 810
},
{
"epoch": 2.5308641975308643,
"grad_norm": 0.5549089552658856,
"learning_rate": 5e-06,
"loss": 0.5629,
"step": 820
},
{
"epoch": 2.5617283950617287,
"grad_norm": 0.542471712528822,
"learning_rate": 5e-06,
"loss": 0.5681,
"step": 830
},
{
"epoch": 2.5925925925925926,
"grad_norm": 0.5566494869370542,
"learning_rate": 5e-06,
"loss": 0.5663,
"step": 840
},
{
"epoch": 2.623456790123457,
"grad_norm": 0.5552433471790524,
"learning_rate": 5e-06,
"loss": 0.5635,
"step": 850
},
{
"epoch": 2.6543209876543212,
"grad_norm": 0.5049248358370862,
"learning_rate": 5e-06,
"loss": 0.5654,
"step": 860
},
{
"epoch": 2.685185185185185,
"grad_norm": 0.6071017582058569,
"learning_rate": 5e-06,
"loss": 0.5669,
"step": 870
},
{
"epoch": 2.7160493827160495,
"grad_norm": 0.5625590442980464,
"learning_rate": 5e-06,
"loss": 0.5642,
"step": 880
},
{
"epoch": 2.746913580246914,
"grad_norm": 0.5506548153090438,
"learning_rate": 5e-06,
"loss": 0.5638,
"step": 890
},
{
"epoch": 2.7777777777777777,
"grad_norm": 0.6470830808425209,
"learning_rate": 5e-06,
"loss": 0.5629,
"step": 900
},
{
"epoch": 2.808641975308642,
"grad_norm": 0.6202475264816772,
"learning_rate": 5e-06,
"loss": 0.566,
"step": 910
},
{
"epoch": 2.8395061728395063,
"grad_norm": 0.6200010347166321,
"learning_rate": 5e-06,
"loss": 0.5672,
"step": 920
},
{
"epoch": 2.8703703703703702,
"grad_norm": 0.5382597238916978,
"learning_rate": 5e-06,
"loss": 0.5674,
"step": 930
},
{
"epoch": 2.9012345679012346,
"grad_norm": 0.6294871904049958,
"learning_rate": 5e-06,
"loss": 0.5714,
"step": 940
},
{
"epoch": 2.932098765432099,
"grad_norm": 0.5413425077416242,
"learning_rate": 5e-06,
"loss": 0.5677,
"step": 950
},
{
"epoch": 2.962962962962963,
"grad_norm": 0.5307261656256946,
"learning_rate": 5e-06,
"loss": 0.5674,
"step": 960
},
{
"epoch": 2.993827160493827,
"grad_norm": 0.519858507326187,
"learning_rate": 5e-06,
"loss": 0.5688,
"step": 970
},
{
"epoch": 3.0,
"eval_loss": 0.6513996720314026,
"eval_runtime": 31.4148,
"eval_samples_per_second": 277.417,
"eval_steps_per_second": 1.114,
"step": 972
},
{
"epoch": 3.0,
"step": 972,
"total_flos": 1628136202567680.0,
"train_loss": 0.6230609294318368,
"train_runtime": 6074.7502,
"train_samples_per_second": 81.773,
"train_steps_per_second": 0.16
}
],
"logging_steps": 10,
"max_steps": 972,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1628136202567680.0,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}