PEFT
Not-For-All-Audiences
kingbri's picture
Add lora files
7f8e339
raw
history blame contribute delete
No virus
18.9 kB
{
"best_metric": 1.4968100786209106,
"best_model_checkpoint": "./pippa-sharegpt-13b-qlora/checkpoint-150",
"epoch": 1.58311345646438,
"eval_steps": 50,
"global_step": 150,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01,
"learning_rate": 2e-05,
"loss": 1.9036,
"step": 1
},
{
"epoch": 0.02,
"learning_rate": 4e-05,
"loss": 1.9209,
"step": 2
},
{
"epoch": 0.03,
"learning_rate": 6e-05,
"loss": 1.9161,
"step": 3
},
{
"epoch": 0.04,
"learning_rate": 8e-05,
"loss": 1.8722,
"step": 4
},
{
"epoch": 0.05,
"learning_rate": 0.0001,
"loss": 1.866,
"step": 5
},
{
"epoch": 0.06,
"learning_rate": 0.00012,
"loss": 1.872,
"step": 6
},
{
"epoch": 0.07,
"learning_rate": 0.00014,
"loss": 1.9138,
"step": 7
},
{
"epoch": 0.08,
"learning_rate": 0.00016,
"loss": 1.8785,
"step": 8
},
{
"epoch": 0.09,
"learning_rate": 0.00018,
"loss": 1.8013,
"step": 9
},
{
"epoch": 0.11,
"learning_rate": 0.0002,
"loss": 1.7859,
"step": 10
},
{
"epoch": 0.12,
"learning_rate": 0.00019999332998034514,
"loss": 1.7805,
"step": 11
},
{
"epoch": 0.13,
"learning_rate": 0.00019997332081116373,
"loss": 1.7716,
"step": 12
},
{
"epoch": 0.14,
"learning_rate": 0.00019993997516168689,
"loss": 1.7453,
"step": 13
},
{
"epoch": 0.15,
"learning_rate": 0.00019989329748023725,
"loss": 1.7306,
"step": 14
},
{
"epoch": 0.16,
"learning_rate": 0.00019983329399363598,
"loss": 1.7535,
"step": 15
},
{
"epoch": 0.17,
"learning_rate": 0.0001997599727063717,
"loss": 1.7811,
"step": 16
},
{
"epoch": 0.18,
"learning_rate": 0.000199673343399533,
"loss": 1.7455,
"step": 17
},
{
"epoch": 0.19,
"learning_rate": 0.00019957341762950344,
"loss": 1.748,
"step": 18
},
{
"epoch": 0.2,
"learning_rate": 0.0001994602087264201,
"loss": 1.723,
"step": 19
},
{
"epoch": 0.21,
"learning_rate": 0.00019933373179239502,
"loss": 1.7539,
"step": 20
},
{
"epoch": 0.22,
"learning_rate": 0.000199194003699501,
"loss": 1.7293,
"step": 21
},
{
"epoch": 0.23,
"learning_rate": 0.0001990410430875205,
"loss": 1.6857,
"step": 22
},
{
"epoch": 0.24,
"learning_rate": 0.0001988748703614594,
"loss": 1.7209,
"step": 23
},
{
"epoch": 0.25,
"learning_rate": 0.00019869550768882455,
"loss": 1.7127,
"step": 24
},
{
"epoch": 0.26,
"learning_rate": 0.0001985029789966671,
"loss": 1.7024,
"step": 25
},
{
"epoch": 0.27,
"learning_rate": 0.0001982973099683902,
"loss": 1.6927,
"step": 26
},
{
"epoch": 0.28,
"learning_rate": 0.00019807852804032305,
"loss": 1.6584,
"step": 27
},
{
"epoch": 0.3,
"learning_rate": 0.0001978466623980609,
"loss": 1.7149,
"step": 28
},
{
"epoch": 0.31,
"learning_rate": 0.00019760174397257156,
"loss": 1.6815,
"step": 29
},
{
"epoch": 0.32,
"learning_rate": 0.0001973438054360693,
"loss": 1.7067,
"step": 30
},
{
"epoch": 0.33,
"learning_rate": 0.00019707288119765623,
"loss": 1.6799,
"step": 31
},
{
"epoch": 0.34,
"learning_rate": 0.00019678900739873226,
"loss": 1.7206,
"step": 32
},
{
"epoch": 0.35,
"learning_rate": 0.0001964922219081738,
"loss": 1.6458,
"step": 33
},
{
"epoch": 0.36,
"learning_rate": 0.00019618256431728194,
"loss": 1.6649,
"step": 34
},
{
"epoch": 0.37,
"learning_rate": 0.00019586007593450097,
"loss": 1.6684,
"step": 35
},
{
"epoch": 0.38,
"learning_rate": 0.000195524799779908,
"loss": 1.7411,
"step": 36
},
{
"epoch": 0.39,
"learning_rate": 0.00019517678057947384,
"loss": 1.6678,
"step": 37
},
{
"epoch": 0.4,
"learning_rate": 0.0001948160647590966,
"loss": 1.6738,
"step": 38
},
{
"epoch": 0.41,
"learning_rate": 0.00019444270043840852,
"loss": 1.6804,
"step": 39
},
{
"epoch": 0.42,
"learning_rate": 0.00019405673742435678,
"loss": 1.6728,
"step": 40
},
{
"epoch": 0.43,
"learning_rate": 0.00019365822720455916,
"loss": 1.6941,
"step": 41
},
{
"epoch": 0.44,
"learning_rate": 0.00019324722294043558,
"loss": 1.6861,
"step": 42
},
{
"epoch": 0.45,
"learning_rate": 0.00019282377946011652,
"loss": 1.6751,
"step": 43
},
{
"epoch": 0.46,
"learning_rate": 0.0001923879532511287,
"loss": 1.7175,
"step": 44
},
{
"epoch": 0.47,
"learning_rate": 0.00019193980245285966,
"loss": 1.6454,
"step": 45
},
{
"epoch": 0.49,
"learning_rate": 0.0001914793868488021,
"loss": 1.6934,
"step": 46
},
{
"epoch": 0.5,
"learning_rate": 0.0001910067678585786,
"loss": 1.6534,
"step": 47
},
{
"epoch": 0.51,
"learning_rate": 0.00019052200852974819,
"loss": 1.6797,
"step": 48
},
{
"epoch": 0.52,
"learning_rate": 0.00019002517352939598,
"loss": 1.6566,
"step": 49
},
{
"epoch": 0.53,
"learning_rate": 0.00018951632913550626,
"loss": 1.657,
"step": 50
},
{
"epoch": 0.53,
"eval_loss": 1.653587818145752,
"eval_runtime": 200.2473,
"eval_samples_per_second": 6.672,
"eval_steps_per_second": 0.21,
"step": 50
},
{
"epoch": 0.54,
"learning_rate": 0.0001889955432281212,
"loss": 1.6736,
"step": 51
},
{
"epoch": 0.55,
"learning_rate": 0.00018846288528028555,
"loss": 1.6696,
"step": 52
},
{
"epoch": 0.56,
"learning_rate": 0.00018791842634877898,
"loss": 1.6429,
"step": 53
},
{
"epoch": 0.57,
"learning_rate": 0.00018736223906463696,
"loss": 1.608,
"step": 54
},
{
"epoch": 0.58,
"learning_rate": 0.00018679439762346185,
"loss": 1.6245,
"step": 55
},
{
"epoch": 0.59,
"learning_rate": 0.00018621497777552507,
"loss": 1.6199,
"step": 56
},
{
"epoch": 0.6,
"learning_rate": 0.00018562405681566216,
"loss": 1.7028,
"step": 57
},
{
"epoch": 0.61,
"learning_rate": 0.00018502171357296144,
"loss": 1.6372,
"step": 58
},
{
"epoch": 0.62,
"learning_rate": 0.00018440802840024822,
"loss": 1.6198,
"step": 59
},
{
"epoch": 0.63,
"learning_rate": 0.00018378308316336584,
"loss": 1.6061,
"step": 60
},
{
"epoch": 0.64,
"learning_rate": 0.00018314696123025454,
"loss": 1.6051,
"step": 61
},
{
"epoch": 0.65,
"learning_rate": 0.00018249974745983023,
"loss": 1.6257,
"step": 62
},
{
"epoch": 0.66,
"learning_rate": 0.00018184152819066435,
"loss": 1.6295,
"step": 63
},
{
"epoch": 0.68,
"learning_rate": 0.00018117239122946615,
"loss": 1.6382,
"step": 64
},
{
"epoch": 0.69,
"learning_rate": 0.0001804924258393692,
"loss": 1.6032,
"step": 65
},
{
"epoch": 0.7,
"learning_rate": 0.000179801722728024,
"loss": 1.652,
"step": 66
},
{
"epoch": 0.71,
"learning_rate": 0.00017910037403549693,
"loss": 1.6431,
"step": 67
},
{
"epoch": 0.72,
"learning_rate": 0.00017838847332197938,
"loss": 1.6482,
"step": 68
},
{
"epoch": 0.73,
"learning_rate": 0.00017766611555530636,
"loss": 1.6276,
"step": 69
},
{
"epoch": 0.74,
"learning_rate": 0.00017693339709828792,
"loss": 1.611,
"step": 70
},
{
"epoch": 0.75,
"learning_rate": 0.00017619041569585418,
"loss": 1.6309,
"step": 71
},
{
"epoch": 0.76,
"learning_rate": 0.0001754372704620164,
"loss": 1.6316,
"step": 72
},
{
"epoch": 0.77,
"learning_rate": 0.00017467406186664474,
"loss": 1.5779,
"step": 73
},
{
"epoch": 0.78,
"learning_rate": 0.00017390089172206592,
"loss": 1.6015,
"step": 74
},
{
"epoch": 0.79,
"learning_rate": 0.0001731178631694811,
"loss": 1.6133,
"step": 75
},
{
"epoch": 0.8,
"learning_rate": 0.00017232508066520702,
"loss": 1.6048,
"step": 76
},
{
"epoch": 0.81,
"learning_rate": 0.00017152264996674136,
"loss": 1.5792,
"step": 77
},
{
"epoch": 0.82,
"learning_rate": 0.00017071067811865476,
"loss": 1.5756,
"step": 78
},
{
"epoch": 0.83,
"learning_rate": 0.00016988927343831095,
"loss": 1.5497,
"step": 79
},
{
"epoch": 0.84,
"learning_rate": 0.00016905854550141716,
"loss": 1.5604,
"step": 80
},
{
"epoch": 0.85,
"learning_rate": 0.00016821860512740671,
"loss": 1.5722,
"step": 81
},
{
"epoch": 0.87,
"learning_rate": 0.00016736956436465573,
"loss": 1.5943,
"step": 82
},
{
"epoch": 0.88,
"learning_rate": 0.00016651153647553567,
"loss": 1.538,
"step": 83
},
{
"epoch": 0.89,
"learning_rate": 0.00016564463592130428,
"loss": 1.5875,
"step": 84
},
{
"epoch": 0.9,
"learning_rate": 0.0001647689783468362,
"loss": 1.5835,
"step": 85
},
{
"epoch": 0.91,
"learning_rate": 0.00016388468056519612,
"loss": 1.5724,
"step": 86
},
{
"epoch": 0.92,
"learning_rate": 0.00016299186054205577,
"loss": 1.573,
"step": 87
},
{
"epoch": 0.93,
"learning_rate": 0.00016209063737995715,
"loss": 1.543,
"step": 88
},
{
"epoch": 0.94,
"learning_rate": 0.00016118113130242432,
"loss": 1.5438,
"step": 89
},
{
"epoch": 0.95,
"learning_rate": 0.00016026346363792567,
"loss": 1.5711,
"step": 90
},
{
"epoch": 0.96,
"learning_rate": 0.00015933775680368822,
"loss": 1.5542,
"step": 91
},
{
"epoch": 0.97,
"learning_rate": 0.00015840413428936767,
"loss": 1.5209,
"step": 92
},
{
"epoch": 0.98,
"learning_rate": 0.0001574627206405744,
"loss": 1.569,
"step": 93
},
{
"epoch": 0.99,
"learning_rate": 0.0001565136414422592,
"loss": 1.5657,
"step": 94
},
{
"epoch": 1.0,
"learning_rate": 0.00015555702330196023,
"loss": 1.5248,
"step": 95
},
{
"epoch": 1.01,
"learning_rate": 0.00015459299383291345,
"loss": 1.5482,
"step": 96
},
{
"epoch": 1.02,
"learning_rate": 0.000153621681637029,
"loss": 1.5186,
"step": 97
},
{
"epoch": 1.03,
"learning_rate": 0.0001526432162877356,
"loss": 1.5363,
"step": 98
},
{
"epoch": 1.04,
"learning_rate": 0.00015165772831269547,
"loss": 1.5221,
"step": 99
},
{
"epoch": 1.06,
"learning_rate": 0.00015066534917639195,
"loss": 1.5071,
"step": 100
},
{
"epoch": 1.06,
"eval_loss": 1.541548728942871,
"eval_runtime": 200.1636,
"eval_samples_per_second": 6.675,
"eval_steps_per_second": 0.21,
"step": 100
},
{
"epoch": 1.07,
"learning_rate": 0.00014966621126259183,
"loss": 1.5179,
"step": 101
},
{
"epoch": 1.08,
"learning_rate": 0.00014866044785668563,
"loss": 1.4923,
"step": 102
},
{
"epoch": 1.09,
"learning_rate": 0.00014764819312790707,
"loss": 1.5733,
"step": 103
},
{
"epoch": 1.1,
"learning_rate": 0.0001466295821114348,
"loss": 1.4586,
"step": 104
},
{
"epoch": 1.11,
"learning_rate": 0.00014560475069037894,
"loss": 1.5379,
"step": 105
},
{
"epoch": 1.12,
"learning_rate": 0.00014457383557765386,
"loss": 1.5059,
"step": 106
},
{
"epoch": 1.13,
"learning_rate": 0.00014353697429774084,
"loss": 1.512,
"step": 107
},
{
"epoch": 1.14,
"learning_rate": 0.0001424943051683422,
"loss": 1.4908,
"step": 108
},
{
"epoch": 1.15,
"learning_rate": 0.0001414459672819297,
"loss": 1.5067,
"step": 109
},
{
"epoch": 1.16,
"learning_rate": 0.00014039210048718949,
"loss": 1.4908,
"step": 110
},
{
"epoch": 1.17,
"learning_rate": 0.00013933284537036625,
"loss": 1.5299,
"step": 111
},
{
"epoch": 1.18,
"learning_rate": 0.000138268343236509,
"loss": 1.4836,
"step": 112
},
{
"epoch": 1.19,
"learning_rate": 0.00013719873609062077,
"loss": 1.5285,
"step": 113
},
{
"epoch": 1.2,
"learning_rate": 0.00013612416661871533,
"loss": 1.5493,
"step": 114
},
{
"epoch": 1.21,
"learning_rate": 0.0001350447781687826,
"loss": 1.4915,
"step": 115
},
{
"epoch": 1.22,
"learning_rate": 0.00013396071473166613,
"loss": 1.5139,
"step": 116
},
{
"epoch": 1.23,
"learning_rate": 0.00013287212092185464,
"loss": 1.4589,
"step": 117
},
{
"epoch": 1.25,
"learning_rate": 0.00013177914195819016,
"loss": 1.4869,
"step": 118
},
{
"epoch": 1.26,
"learning_rate": 0.00013068192364449618,
"loss": 1.5081,
"step": 119
},
{
"epoch": 1.27,
"learning_rate": 0.00012958061235012706,
"loss": 1.5172,
"step": 120
},
{
"epoch": 1.28,
"learning_rate": 0.0001284753549904423,
"loss": 1.5195,
"step": 121
},
{
"epoch": 1.29,
"learning_rate": 0.0001273662990072083,
"loss": 1.4635,
"step": 122
},
{
"epoch": 1.3,
"learning_rate": 0.00012625359234892907,
"loss": 1.5036,
"step": 123
},
{
"epoch": 1.31,
"learning_rate": 0.0001251373834511103,
"loss": 1.5203,
"step": 124
},
{
"epoch": 1.32,
"learning_rate": 0.00012401782121645766,
"loss": 1.5047,
"step": 125
},
{
"epoch": 1.33,
"learning_rate": 0.0001228950549950134,
"loss": 1.5047,
"step": 126
},
{
"epoch": 1.34,
"learning_rate": 0.00012176923456423284,
"loss": 1.4715,
"step": 127
},
{
"epoch": 1.35,
"learning_rate": 0.00012064051010900397,
"loss": 1.4884,
"step": 128
},
{
"epoch": 1.36,
"learning_rate": 0.00011950903220161285,
"loss": 1.4947,
"step": 129
},
{
"epoch": 1.37,
"learning_rate": 0.00011837495178165706,
"loss": 1.4665,
"step": 130
},
{
"epoch": 1.38,
"learning_rate": 0.00011723842013591044,
"loss": 1.4888,
"step": 131
},
{
"epoch": 1.39,
"learning_rate": 0.00011609958887814129,
"loss": 1.52,
"step": 132
},
{
"epoch": 1.4,
"learning_rate": 0.00011495860992888712,
"loss": 1.4305,
"step": 133
},
{
"epoch": 1.41,
"learning_rate": 0.00011381563549518823,
"loss": 1.4865,
"step": 134
},
{
"epoch": 1.42,
"learning_rate": 0.00011267081805028339,
"loss": 1.5155,
"step": 135
},
{
"epoch": 1.44,
"learning_rate": 0.00011152431031326978,
"loss": 1.4952,
"step": 136
},
{
"epoch": 1.45,
"learning_rate": 0.00011037626522873019,
"loss": 1.4765,
"step": 137
},
{
"epoch": 1.46,
"learning_rate": 0.00010922683594633021,
"loss": 1.4729,
"step": 138
},
{
"epoch": 1.47,
"learning_rate": 0.00010807617580038796,
"loss": 1.481,
"step": 139
},
{
"epoch": 1.48,
"learning_rate": 0.00010692443828941918,
"loss": 1.4877,
"step": 140
},
{
"epoch": 1.49,
"learning_rate": 0.00010577177705566061,
"loss": 1.4884,
"step": 141
},
{
"epoch": 1.5,
"learning_rate": 0.00010461834586457398,
"loss": 1.4786,
"step": 142
},
{
"epoch": 1.51,
"learning_rate": 0.00010346429858433352,
"loss": 1.4585,
"step": 143
},
{
"epoch": 1.52,
"learning_rate": 0.00010230978916530012,
"loss": 1.4867,
"step": 144
},
{
"epoch": 1.53,
"learning_rate": 0.00010115497161948409,
"loss": 1.487,
"step": 145
},
{
"epoch": 1.54,
"learning_rate": 0.0001,
"loss": 1.4693,
"step": 146
},
{
"epoch": 1.55,
"learning_rate": 9.884502838051595e-05,
"loss": 1.4391,
"step": 147
},
{
"epoch": 1.56,
"learning_rate": 9.76902108346999e-05,
"loss": 1.493,
"step": 148
},
{
"epoch": 1.57,
"learning_rate": 9.653570141566653e-05,
"loss": 1.5152,
"step": 149
},
{
"epoch": 1.58,
"learning_rate": 9.538165413542607e-05,
"loss": 1.4766,
"step": 150
},
{
"epoch": 1.58,
"eval_loss": 1.4968100786209106,
"eval_runtime": 199.9343,
"eval_samples_per_second": 6.682,
"eval_steps_per_second": 0.21,
"step": 150
}
],
"logging_steps": 1,
"max_steps": 282,
"num_train_epochs": 3,
"save_steps": 50,
"total_flos": 1.546341313689944e+18,
"trial_name": null,
"trial_params": null
}