Bloomz_Lora / checkpoint-100 /trainer_state.json
CreatorPhan's picture
Upload folder using huggingface_hub (#1)
fc5ff04
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.28368794326241137,
"eval_steps": 500,
"global_step": 100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"learning_rate": 0.0001994318181818182,
"loss": 2.7526,
"step": 1
},
{
"epoch": 0.01,
"learning_rate": 0.00019886363636363637,
"loss": 2.6231,
"step": 2
},
{
"epoch": 0.01,
"learning_rate": 0.00019829545454545455,
"loss": 2.5691,
"step": 3
},
{
"epoch": 0.01,
"learning_rate": 0.00019772727272727273,
"loss": 2.5755,
"step": 4
},
{
"epoch": 0.01,
"learning_rate": 0.00019715909090909094,
"loss": 2.5077,
"step": 5
},
{
"epoch": 0.02,
"learning_rate": 0.0001965909090909091,
"loss": 2.4698,
"step": 6
},
{
"epoch": 0.02,
"learning_rate": 0.00019602272727272727,
"loss": 2.4541,
"step": 7
},
{
"epoch": 0.02,
"learning_rate": 0.00019545454545454548,
"loss": 2.4764,
"step": 8
},
{
"epoch": 0.03,
"learning_rate": 0.00019488636363636366,
"loss": 2.4176,
"step": 9
},
{
"epoch": 0.03,
"learning_rate": 0.0001943181818181818,
"loss": 2.396,
"step": 10
},
{
"epoch": 0.03,
"learning_rate": 0.00019375000000000002,
"loss": 2.3929,
"step": 11
},
{
"epoch": 0.03,
"learning_rate": 0.0001931818181818182,
"loss": 2.405,
"step": 12
},
{
"epoch": 0.04,
"learning_rate": 0.00019261363636363635,
"loss": 2.3947,
"step": 13
},
{
"epoch": 0.04,
"learning_rate": 0.00019204545454545456,
"loss": 2.4164,
"step": 14
},
{
"epoch": 0.04,
"learning_rate": 0.00019147727272727274,
"loss": 2.373,
"step": 15
},
{
"epoch": 0.05,
"learning_rate": 0.00019090909090909092,
"loss": 2.3552,
"step": 16
},
{
"epoch": 0.05,
"learning_rate": 0.0001903409090909091,
"loss": 2.3988,
"step": 17
},
{
"epoch": 0.05,
"learning_rate": 0.00018977272727272728,
"loss": 2.3826,
"step": 18
},
{
"epoch": 0.05,
"learning_rate": 0.00018920454545454546,
"loss": 2.3672,
"step": 19
},
{
"epoch": 0.06,
"learning_rate": 0.00018863636363636364,
"loss": 2.3885,
"step": 20
},
{
"epoch": 0.06,
"learning_rate": 0.00018806818181818182,
"loss": 2.3043,
"step": 21
},
{
"epoch": 0.06,
"learning_rate": 0.0001875,
"loss": 2.2597,
"step": 22
},
{
"epoch": 0.07,
"learning_rate": 0.00018693181818181818,
"loss": 2.3457,
"step": 23
},
{
"epoch": 0.07,
"learning_rate": 0.00018636363636363636,
"loss": 2.3409,
"step": 24
},
{
"epoch": 0.07,
"learning_rate": 0.00018579545454545454,
"loss": 2.303,
"step": 25
},
{
"epoch": 0.07,
"learning_rate": 0.00018522727272727273,
"loss": 2.3253,
"step": 26
},
{
"epoch": 0.08,
"learning_rate": 0.00018465909090909093,
"loss": 2.3453,
"step": 27
},
{
"epoch": 0.08,
"learning_rate": 0.00018409090909090909,
"loss": 2.3151,
"step": 28
},
{
"epoch": 0.08,
"learning_rate": 0.00018352272727272727,
"loss": 2.3457,
"step": 29
},
{
"epoch": 0.09,
"learning_rate": 0.00018295454545454547,
"loss": 2.2792,
"step": 30
},
{
"epoch": 0.09,
"learning_rate": 0.00018238636363636365,
"loss": 2.3257,
"step": 31
},
{
"epoch": 0.09,
"learning_rate": 0.00018181818181818183,
"loss": 2.353,
"step": 32
},
{
"epoch": 0.09,
"learning_rate": 0.00018125000000000001,
"loss": 2.2633,
"step": 33
},
{
"epoch": 0.1,
"learning_rate": 0.0001806818181818182,
"loss": 2.3089,
"step": 34
},
{
"epoch": 0.1,
"learning_rate": 0.00018011363636363638,
"loss": 2.3085,
"step": 35
},
{
"epoch": 0.1,
"learning_rate": 0.00017954545454545456,
"loss": 2.2746,
"step": 36
},
{
"epoch": 0.1,
"learning_rate": 0.00017897727272727274,
"loss": 2.3212,
"step": 37
},
{
"epoch": 0.11,
"learning_rate": 0.00017840909090909092,
"loss": 2.2991,
"step": 38
},
{
"epoch": 0.11,
"learning_rate": 0.0001778409090909091,
"loss": 2.2807,
"step": 39
},
{
"epoch": 0.11,
"learning_rate": 0.00017727272727272728,
"loss": 2.3342,
"step": 40
},
{
"epoch": 0.12,
"learning_rate": 0.00017670454545454546,
"loss": 2.3144,
"step": 41
},
{
"epoch": 0.12,
"learning_rate": 0.00017613636363636366,
"loss": 2.3084,
"step": 42
},
{
"epoch": 0.12,
"learning_rate": 0.00017556818181818182,
"loss": 2.3032,
"step": 43
},
{
"epoch": 0.12,
"learning_rate": 0.000175,
"loss": 2.3105,
"step": 44
},
{
"epoch": 0.13,
"learning_rate": 0.0001744318181818182,
"loss": 2.3101,
"step": 45
},
{
"epoch": 0.13,
"learning_rate": 0.00017386363636363636,
"loss": 2.313,
"step": 46
},
{
"epoch": 0.13,
"learning_rate": 0.00017329545454545454,
"loss": 2.293,
"step": 47
},
{
"epoch": 0.14,
"learning_rate": 0.00017272727272727275,
"loss": 2.2807,
"step": 48
},
{
"epoch": 0.14,
"learning_rate": 0.00017215909090909093,
"loss": 2.2665,
"step": 49
},
{
"epoch": 0.14,
"learning_rate": 0.00017159090909090908,
"loss": 2.2626,
"step": 50
},
{
"epoch": 0.14,
"learning_rate": 0.0001710227272727273,
"loss": 2.2839,
"step": 51
},
{
"epoch": 0.15,
"learning_rate": 0.00017045454545454547,
"loss": 2.2865,
"step": 52
},
{
"epoch": 0.15,
"learning_rate": 0.00016988636363636365,
"loss": 2.2788,
"step": 53
},
{
"epoch": 0.15,
"learning_rate": 0.00016931818181818183,
"loss": 2.2622,
"step": 54
},
{
"epoch": 0.16,
"learning_rate": 0.00016875,
"loss": 2.3121,
"step": 55
},
{
"epoch": 0.16,
"learning_rate": 0.0001681818181818182,
"loss": 2.2958,
"step": 56
},
{
"epoch": 0.16,
"learning_rate": 0.00016761363636363637,
"loss": 2.2898,
"step": 57
},
{
"epoch": 0.16,
"learning_rate": 0.00016704545454545455,
"loss": 2.2903,
"step": 58
},
{
"epoch": 0.17,
"learning_rate": 0.00016647727272727273,
"loss": 2.3393,
"step": 59
},
{
"epoch": 0.17,
"learning_rate": 0.00016590909090909094,
"loss": 2.3086,
"step": 60
},
{
"epoch": 0.17,
"learning_rate": 0.0001653409090909091,
"loss": 2.2739,
"step": 61
},
{
"epoch": 0.18,
"learning_rate": 0.00016477272727272727,
"loss": 2.252,
"step": 62
},
{
"epoch": 0.18,
"learning_rate": 0.00016420454545454548,
"loss": 2.2868,
"step": 63
},
{
"epoch": 0.18,
"learning_rate": 0.00016363636363636366,
"loss": 2.2301,
"step": 64
},
{
"epoch": 0.18,
"learning_rate": 0.0001630681818181818,
"loss": 2.2563,
"step": 65
},
{
"epoch": 0.19,
"learning_rate": 0.00016250000000000002,
"loss": 2.2552,
"step": 66
},
{
"epoch": 0.19,
"learning_rate": 0.0001619318181818182,
"loss": 2.2615,
"step": 67
},
{
"epoch": 0.19,
"learning_rate": 0.00016136363636363635,
"loss": 2.3237,
"step": 68
},
{
"epoch": 0.2,
"learning_rate": 0.00016079545454545456,
"loss": 2.2741,
"step": 69
},
{
"epoch": 0.2,
"learning_rate": 0.00016022727272727274,
"loss": 2.2152,
"step": 70
},
{
"epoch": 0.2,
"learning_rate": 0.00015965909090909092,
"loss": 2.2558,
"step": 71
},
{
"epoch": 0.2,
"learning_rate": 0.0001590909090909091,
"loss": 2.2293,
"step": 72
},
{
"epoch": 0.21,
"learning_rate": 0.00015852272727272728,
"loss": 2.2278,
"step": 73
},
{
"epoch": 0.21,
"learning_rate": 0.00015795454545454546,
"loss": 2.2583,
"step": 74
},
{
"epoch": 0.21,
"learning_rate": 0.00015738636363636364,
"loss": 2.2609,
"step": 75
},
{
"epoch": 0.22,
"learning_rate": 0.00015681818181818182,
"loss": 2.2955,
"step": 76
},
{
"epoch": 0.22,
"learning_rate": 0.00015625,
"loss": 2.2359,
"step": 77
},
{
"epoch": 0.22,
"learning_rate": 0.00015568181818181818,
"loss": 2.2427,
"step": 78
},
{
"epoch": 0.22,
"learning_rate": 0.00015511363636363636,
"loss": 2.2632,
"step": 79
},
{
"epoch": 0.23,
"learning_rate": 0.00015454545454545454,
"loss": 2.2649,
"step": 80
},
{
"epoch": 0.23,
"learning_rate": 0.00015397727272727272,
"loss": 2.2347,
"step": 81
},
{
"epoch": 0.23,
"learning_rate": 0.00015340909090909093,
"loss": 2.2439,
"step": 82
},
{
"epoch": 0.24,
"learning_rate": 0.00015284090909090909,
"loss": 2.267,
"step": 83
},
{
"epoch": 0.24,
"learning_rate": 0.00015227272727272727,
"loss": 2.2682,
"step": 84
},
{
"epoch": 0.24,
"learning_rate": 0.00015170454545454547,
"loss": 2.2477,
"step": 85
},
{
"epoch": 0.24,
"learning_rate": 0.00015113636363636365,
"loss": 2.2534,
"step": 86
},
{
"epoch": 0.25,
"learning_rate": 0.0001505681818181818,
"loss": 2.2609,
"step": 87
},
{
"epoch": 0.25,
"learning_rate": 0.00015000000000000001,
"loss": 2.2381,
"step": 88
},
{
"epoch": 0.25,
"learning_rate": 0.0001494318181818182,
"loss": 2.2505,
"step": 89
},
{
"epoch": 0.26,
"learning_rate": 0.00014886363636363635,
"loss": 2.2628,
"step": 90
},
{
"epoch": 0.26,
"learning_rate": 0.00014829545454545455,
"loss": 2.1874,
"step": 91
},
{
"epoch": 0.26,
"learning_rate": 0.00014772727272727274,
"loss": 2.2024,
"step": 92
},
{
"epoch": 0.26,
"learning_rate": 0.00014715909090909092,
"loss": 2.2453,
"step": 93
},
{
"epoch": 0.27,
"learning_rate": 0.0001465909090909091,
"loss": 2.259,
"step": 94
},
{
"epoch": 0.27,
"learning_rate": 0.00014602272727272728,
"loss": 2.2304,
"step": 95
},
{
"epoch": 0.27,
"learning_rate": 0.00014545454545454546,
"loss": 2.2179,
"step": 96
},
{
"epoch": 0.28,
"learning_rate": 0.00014488636363636366,
"loss": 2.2497,
"step": 97
},
{
"epoch": 0.28,
"learning_rate": 0.00014431818181818182,
"loss": 2.2458,
"step": 98
},
{
"epoch": 0.28,
"learning_rate": 0.00014375,
"loss": 2.2438,
"step": 99
},
{
"epoch": 0.28,
"learning_rate": 0.0001431818181818182,
"loss": 2.2461,
"step": 100
}
],
"logging_steps": 1,
"max_steps": 352,
"num_train_epochs": 1,
"save_steps": 100,
"total_flos": 2.5310590559514624e+17,
"trial_name": null,
"trial_params": null
}