nguyenthanhdo's picture
Upload folder using huggingface_hub
bba80b7 verified
raw
history blame contribute delete
No virus
14.8 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.49930871025083945,
"eval_steps": 16,
"global_step": 79,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.006320363420896702,
"grad_norm": 1.921875,
"learning_rate": 2e-05,
"loss": 0.6579,
"step": 1
},
{
"epoch": 0.006320363420896702,
"eval_loss": 0.6361147165298462,
"eval_runtime": 57.1659,
"eval_samples_per_second": 18.647,
"eval_steps_per_second": 18.647,
"step": 1
},
{
"epoch": 0.012640726841793404,
"grad_norm": 1.8359375,
"learning_rate": 4e-05,
"loss": 0.6363,
"step": 2
},
{
"epoch": 0.018961090262690106,
"grad_norm": 1.8671875,
"learning_rate": 6e-05,
"loss": 0.6081,
"step": 3
},
{
"epoch": 0.025281453683586808,
"grad_norm": 0.93359375,
"learning_rate": 8e-05,
"loss": 0.4346,
"step": 4
},
{
"epoch": 0.03160181710448351,
"grad_norm": 1.1171875,
"learning_rate": 0.0001,
"loss": 0.3671,
"step": 5
},
{
"epoch": 0.03792218052538021,
"grad_norm": 1.1484375,
"learning_rate": 0.00012,
"loss": 0.297,
"step": 6
},
{
"epoch": 0.04424254394627691,
"grad_norm": 0.59765625,
"learning_rate": 0.00014,
"loss": 0.2502,
"step": 7
},
{
"epoch": 0.050562907367173615,
"grad_norm": 0.451171875,
"learning_rate": 0.00016,
"loss": 0.2189,
"step": 8
},
{
"epoch": 0.05688327078807032,
"grad_norm": 0.31640625,
"learning_rate": 0.00018,
"loss": 0.1996,
"step": 9
},
{
"epoch": 0.06320363420896702,
"grad_norm": 0.3203125,
"learning_rate": 0.0002,
"loss": 0.2141,
"step": 10
},
{
"epoch": 0.06952399762986372,
"grad_norm": 0.294921875,
"learning_rate": 0.00019999770790755575,
"loss": 0.216,
"step": 11
},
{
"epoch": 0.07584436105076042,
"grad_norm": 0.306640625,
"learning_rate": 0.00019999083173529673,
"loss": 0.1861,
"step": 12
},
{
"epoch": 0.08216472447165712,
"grad_norm": 0.30859375,
"learning_rate": 0.00019997937179843937,
"loss": 0.1993,
"step": 13
},
{
"epoch": 0.08848508789255383,
"grad_norm": 0.310546875,
"learning_rate": 0.0001999633286223284,
"loss": 0.215,
"step": 14
},
{
"epoch": 0.09480545131345053,
"grad_norm": 0.259765625,
"learning_rate": 0.00019994270294241266,
"loss": 0.1796,
"step": 15
},
{
"epoch": 0.10112581473434723,
"grad_norm": 0.240234375,
"learning_rate": 0.00019991749570421146,
"loss": 0.1746,
"step": 16
},
{
"epoch": 0.10112581473434723,
"eval_loss": 0.1862088441848755,
"eval_runtime": 58.7553,
"eval_samples_per_second": 18.143,
"eval_steps_per_second": 18.143,
"step": 16
},
{
"epoch": 0.10744617815524393,
"grad_norm": 0.279296875,
"learning_rate": 0.0001998877080632712,
"loss": 0.1839,
"step": 17
},
{
"epoch": 0.11376654157614063,
"grad_norm": 0.26171875,
"learning_rate": 0.00019985334138511237,
"loss": 0.1906,
"step": 18
},
{
"epoch": 0.12008690499703734,
"grad_norm": 0.2294921875,
"learning_rate": 0.00019981439724516716,
"loss": 0.1756,
"step": 19
},
{
"epoch": 0.12640726841793404,
"grad_norm": 0.25390625,
"learning_rate": 0.0001997708774287068,
"loss": 0.1963,
"step": 20
},
{
"epoch": 0.13272763183883074,
"grad_norm": 0.2158203125,
"learning_rate": 0.00019972278393076023,
"loss": 0.1466,
"step": 21
},
{
"epoch": 0.13904799525972744,
"grad_norm": 0.22265625,
"learning_rate": 0.0001996701189560223,
"loss": 0.1519,
"step": 22
},
{
"epoch": 0.14536835868062414,
"grad_norm": 0.224609375,
"learning_rate": 0.00019961288491875278,
"loss": 0.1601,
"step": 23
},
{
"epoch": 0.15168872210152085,
"grad_norm": 0.2001953125,
"learning_rate": 0.00019955108444266585,
"loss": 0.1402,
"step": 24
},
{
"epoch": 0.15800908552241755,
"grad_norm": 0.248046875,
"learning_rate": 0.00019948472036080949,
"loss": 0.1897,
"step": 25
},
{
"epoch": 0.16432944894331425,
"grad_norm": 0.2177734375,
"learning_rate": 0.00019941379571543596,
"loss": 0.1408,
"step": 26
},
{
"epoch": 0.17064981236421095,
"grad_norm": 0.2001953125,
"learning_rate": 0.00019933831375786216,
"loss": 0.1563,
"step": 27
},
{
"epoch": 0.17697017578510765,
"grad_norm": 0.228515625,
"learning_rate": 0.00019925827794832056,
"loss": 0.1542,
"step": 28
},
{
"epoch": 0.18329053920600435,
"grad_norm": 0.1875,
"learning_rate": 0.00019917369195580063,
"loss": 0.1312,
"step": 29
},
{
"epoch": 0.18961090262690106,
"grad_norm": 0.201171875,
"learning_rate": 0.00019908455965788067,
"loss": 0.1597,
"step": 30
},
{
"epoch": 0.19593126604779776,
"grad_norm": 0.201171875,
"learning_rate": 0.00019899088514055004,
"loss": 0.1477,
"step": 31
},
{
"epoch": 0.20225162946869446,
"grad_norm": 0.1904296875,
"learning_rate": 0.00019889267269802176,
"loss": 0.1495,
"step": 32
},
{
"epoch": 0.20225162946869446,
"eval_loss": 0.15768449008464813,
"eval_runtime": 62.0983,
"eval_samples_per_second": 17.166,
"eval_steps_per_second": 17.166,
"step": 32
},
{
"epoch": 0.20857199288959116,
"grad_norm": 0.1875,
"learning_rate": 0.00019878992683253582,
"loss": 0.1341,
"step": 33
},
{
"epoch": 0.21489235631048786,
"grad_norm": 0.1904296875,
"learning_rate": 0.00019868265225415265,
"loss": 0.1413,
"step": 34
},
{
"epoch": 0.22121271973138457,
"grad_norm": 0.2158203125,
"learning_rate": 0.00019857085388053723,
"loss": 0.1507,
"step": 35
},
{
"epoch": 0.22753308315228127,
"grad_norm": 0.2265625,
"learning_rate": 0.00019845453683673368,
"loss": 0.1635,
"step": 36
},
{
"epoch": 0.23385344657317797,
"grad_norm": 0.1923828125,
"learning_rate": 0.00019833370645493047,
"loss": 0.1436,
"step": 37
},
{
"epoch": 0.24017380999407467,
"grad_norm": 0.205078125,
"learning_rate": 0.0001982083682742156,
"loss": 0.1545,
"step": 38
},
{
"epoch": 0.24649417341497137,
"grad_norm": 0.1728515625,
"learning_rate": 0.00019807852804032305,
"loss": 0.1132,
"step": 39
},
{
"epoch": 0.2528145368358681,
"grad_norm": 0.19140625,
"learning_rate": 0.00019794419170536916,
"loss": 0.1311,
"step": 40
},
{
"epoch": 0.2591349002567648,
"grad_norm": 0.19140625,
"learning_rate": 0.00019780536542758,
"loss": 0.1415,
"step": 41
},
{
"epoch": 0.2654552636776615,
"grad_norm": 0.1904296875,
"learning_rate": 0.00019766205557100868,
"loss": 0.1466,
"step": 42
},
{
"epoch": 0.2717756270985582,
"grad_norm": 0.2158203125,
"learning_rate": 0.00019751426870524407,
"loss": 0.1586,
"step": 43
},
{
"epoch": 0.2780959905194549,
"grad_norm": 0.19140625,
"learning_rate": 0.00019736201160510931,
"loss": 0.1446,
"step": 44
},
{
"epoch": 0.2844163539403516,
"grad_norm": 0.1943359375,
"learning_rate": 0.0001972052912503514,
"loss": 0.1464,
"step": 45
},
{
"epoch": 0.2907367173612483,
"grad_norm": 0.19140625,
"learning_rate": 0.00019704411482532116,
"loss": 0.1514,
"step": 46
},
{
"epoch": 0.297057080782145,
"grad_norm": 0.1923828125,
"learning_rate": 0.00019687848971864389,
"loss": 0.1529,
"step": 47
},
{
"epoch": 0.3033774442030417,
"grad_norm": 0.1845703125,
"learning_rate": 0.0001967084235228807,
"loss": 0.1288,
"step": 48
},
{
"epoch": 0.3033774442030417,
"eval_loss": 0.14587479829788208,
"eval_runtime": 61.6497,
"eval_samples_per_second": 17.291,
"eval_steps_per_second": 17.291,
"step": 48
},
{
"epoch": 0.3096978076239384,
"grad_norm": 0.1982421875,
"learning_rate": 0.00019653392403418043,
"loss": 0.1424,
"step": 49
},
{
"epoch": 0.3160181710448351,
"grad_norm": 0.193359375,
"learning_rate": 0.0001963549992519223,
"loss": 0.1495,
"step": 50
},
{
"epoch": 0.3223385344657318,
"grad_norm": 0.275390625,
"learning_rate": 0.00019617165737834916,
"loss": 0.1205,
"step": 51
},
{
"epoch": 0.3286588978866285,
"grad_norm": 0.1943359375,
"learning_rate": 0.0001959839068181914,
"loss": 0.1383,
"step": 52
},
{
"epoch": 0.3349792613075252,
"grad_norm": 0.2060546875,
"learning_rate": 0.00019579175617828187,
"loss": 0.1587,
"step": 53
},
{
"epoch": 0.3412996247284219,
"grad_norm": 0.1865234375,
"learning_rate": 0.00019559521426716118,
"loss": 0.129,
"step": 54
},
{
"epoch": 0.3476199881493186,
"grad_norm": 0.193359375,
"learning_rate": 0.0001953942900946739,
"loss": 0.1414,
"step": 55
},
{
"epoch": 0.3539403515702153,
"grad_norm": 0.18359375,
"learning_rate": 0.00019518899287155556,
"loss": 0.139,
"step": 56
},
{
"epoch": 0.360260714991112,
"grad_norm": 0.1943359375,
"learning_rate": 0.0001949793320090105,
"loss": 0.1391,
"step": 57
},
{
"epoch": 0.3665810784120087,
"grad_norm": 0.1982421875,
"learning_rate": 0.00019476531711828027,
"loss": 0.1439,
"step": 58
},
{
"epoch": 0.3729014418329054,
"grad_norm": 0.1826171875,
"learning_rate": 0.0001945469580102031,
"loss": 0.1231,
"step": 59
},
{
"epoch": 0.3792218052538021,
"grad_norm": 0.1640625,
"learning_rate": 0.0001943242646947643,
"loss": 0.1129,
"step": 60
},
{
"epoch": 0.3855421686746988,
"grad_norm": 0.1943359375,
"learning_rate": 0.00019409724738063714,
"loss": 0.1324,
"step": 61
},
{
"epoch": 0.3918625320955955,
"grad_norm": 0.177734375,
"learning_rate": 0.00019386591647471506,
"loss": 0.1273,
"step": 62
},
{
"epoch": 0.3981828955164922,
"grad_norm": 0.1923828125,
"learning_rate": 0.00019363028258163447,
"loss": 0.1418,
"step": 63
},
{
"epoch": 0.4045032589373889,
"grad_norm": 0.19140625,
"learning_rate": 0.00019339035650328869,
"loss": 0.1508,
"step": 64
},
{
"epoch": 0.4045032589373889,
"eval_loss": 0.13675296306610107,
"eval_runtime": 61.5705,
"eval_samples_per_second": 17.313,
"eval_steps_per_second": 17.313,
"step": 64
},
{
"epoch": 0.4108236223582856,
"grad_norm": 0.1982421875,
"learning_rate": 0.0001931461492383327,
"loss": 0.1602,
"step": 65
},
{
"epoch": 0.4171439857791823,
"grad_norm": 0.166015625,
"learning_rate": 0.00019289767198167916,
"loss": 0.1166,
"step": 66
},
{
"epoch": 0.423464349200079,
"grad_norm": 0.19140625,
"learning_rate": 0.00019264493612398481,
"loss": 0.1382,
"step": 67
},
{
"epoch": 0.42978471262097573,
"grad_norm": 0.1689453125,
"learning_rate": 0.0001923879532511287,
"loss": 0.1127,
"step": 68
},
{
"epoch": 0.43610507604187243,
"grad_norm": 0.1826171875,
"learning_rate": 0.0001921267351436808,
"loss": 0.1227,
"step": 69
},
{
"epoch": 0.44242543946276913,
"grad_norm": 0.1943359375,
"learning_rate": 0.0001918612937763622,
"loss": 0.14,
"step": 70
},
{
"epoch": 0.44874580288366583,
"grad_norm": 0.19921875,
"learning_rate": 0.00019159164131749587,
"loss": 0.1403,
"step": 71
},
{
"epoch": 0.45506616630456254,
"grad_norm": 0.1923828125,
"learning_rate": 0.00019131779012844912,
"loss": 0.1214,
"step": 72
},
{
"epoch": 0.46138652972545924,
"grad_norm": 0.18359375,
"learning_rate": 0.00019103975276306678,
"loss": 0.1346,
"step": 73
},
{
"epoch": 0.46770689314635594,
"grad_norm": 0.177734375,
"learning_rate": 0.00019075754196709572,
"loss": 0.1195,
"step": 74
},
{
"epoch": 0.47402725656725264,
"grad_norm": 0.173828125,
"learning_rate": 0.0001904711706776006,
"loss": 0.1196,
"step": 75
},
{
"epoch": 0.48034761998814934,
"grad_norm": 0.1923828125,
"learning_rate": 0.00019018065202237083,
"loss": 0.1292,
"step": 76
},
{
"epoch": 0.48666798340904605,
"grad_norm": 0.15625,
"learning_rate": 0.00018988599931931866,
"loss": 0.1039,
"step": 77
},
{
"epoch": 0.49298834682994275,
"grad_norm": 0.181640625,
"learning_rate": 0.0001895872260758688,
"loss": 0.116,
"step": 78
},
{
"epoch": 0.49930871025083945,
"grad_norm": 0.1826171875,
"learning_rate": 0.00018928434598833912,
"loss": 0.1282,
"step": 79
}
],
"logging_steps": 1,
"max_steps": 474,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 79,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1.3281696018805555e+17,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}