samantha-1.1-llama-7b / trainer_state.json
ehartford's picture
Upload folder using huggingface_hub
12433fe
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.9702970297029703,
"global_step": 150,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.02,
"learning_rate": 3.3333333333333333e-06,
"loss": 1.6064,
"step": 1
},
{
"epoch": 0.04,
"learning_rate": 6.666666666666667e-06,
"loss": 1.6143,
"step": 2
},
{
"epoch": 0.06,
"learning_rate": 1e-05,
"loss": 1.6016,
"step": 3
},
{
"epoch": 0.08,
"learning_rate": 1.3333333333333333e-05,
"loss": 1.5439,
"step": 4
},
{
"epoch": 0.1,
"learning_rate": 1.6666666666666667e-05,
"loss": 1.418,
"step": 5
},
{
"epoch": 0.12,
"learning_rate": 2e-05,
"loss": 1.3711,
"step": 6
},
{
"epoch": 0.14,
"learning_rate": 1.999762027079909e-05,
"loss": 1.5273,
"step": 7
},
{
"epoch": 0.16,
"learning_rate": 1.999048221581858e-05,
"loss": 1.3262,
"step": 8
},
{
"epoch": 0.18,
"learning_rate": 1.9978589232386036e-05,
"loss": 1.3193,
"step": 9
},
{
"epoch": 0.2,
"learning_rate": 1.9961946980917457e-05,
"loss": 1.2812,
"step": 10
},
{
"epoch": 0.22,
"learning_rate": 1.9940563382223196e-05,
"loss": 1.2598,
"step": 11
},
{
"epoch": 0.24,
"learning_rate": 1.9914448613738107e-05,
"loss": 1.2188,
"step": 12
},
{
"epoch": 0.26,
"learning_rate": 1.988361510467761e-05,
"loss": 1.251,
"step": 13
},
{
"epoch": 0.28,
"learning_rate": 1.9848077530122083e-05,
"loss": 1.2041,
"step": 14
},
{
"epoch": 0.3,
"learning_rate": 1.9807852804032306e-05,
"loss": 1.2031,
"step": 15
},
{
"epoch": 0.32,
"learning_rate": 1.9762960071199334e-05,
"loss": 1.1963,
"step": 16
},
{
"epoch": 0.34,
"learning_rate": 1.9713420698132614e-05,
"loss": 1.1914,
"step": 17
},
{
"epoch": 0.36,
"learning_rate": 1.9659258262890683e-05,
"loss": 1.1611,
"step": 18
},
{
"epoch": 0.38,
"learning_rate": 1.960049854385929e-05,
"loss": 1.1426,
"step": 19
},
{
"epoch": 0.4,
"learning_rate": 1.953716950748227e-05,
"loss": 1.1846,
"step": 20
},
{
"epoch": 0.4,
"eval_loss": 1.1589336395263672,
"eval_runtime": 8.8572,
"eval_samples_per_second": 14.79,
"eval_steps_per_second": 0.339,
"step": 20
},
{
"epoch": 0.42,
"learning_rate": 1.946930129495106e-05,
"loss": 1.127,
"step": 21
},
{
"epoch": 0.44,
"learning_rate": 1.9396926207859085e-05,
"loss": 1.1592,
"step": 22
},
{
"epoch": 0.46,
"learning_rate": 1.932007869282799e-05,
"loss": 1.1309,
"step": 23
},
{
"epoch": 0.48,
"learning_rate": 1.9238795325112867e-05,
"loss": 1.1309,
"step": 24
},
{
"epoch": 0.5,
"learning_rate": 1.9153114791194475e-05,
"loss": 1.1318,
"step": 25
},
{
"epoch": 0.51,
"learning_rate": 1.9063077870366504e-05,
"loss": 1.123,
"step": 26
},
{
"epoch": 0.53,
"learning_rate": 1.8968727415326885e-05,
"loss": 1.1475,
"step": 27
},
{
"epoch": 0.55,
"learning_rate": 1.887010833178222e-05,
"loss": 1.123,
"step": 28
},
{
"epoch": 0.57,
"learning_rate": 1.876726755707508e-05,
"loss": 1.1143,
"step": 29
},
{
"epoch": 0.59,
"learning_rate": 1.866025403784439e-05,
"loss": 1.0967,
"step": 30
},
{
"epoch": 0.61,
"learning_rate": 1.854911870672947e-05,
"loss": 1.083,
"step": 31
},
{
"epoch": 0.63,
"learning_rate": 1.843391445812886e-05,
"loss": 1.0859,
"step": 32
},
{
"epoch": 0.65,
"learning_rate": 1.8314696123025456e-05,
"loss": 1.0771,
"step": 33
},
{
"epoch": 0.67,
"learning_rate": 1.819152044288992e-05,
"loss": 1.1133,
"step": 34
},
{
"epoch": 0.69,
"learning_rate": 1.806444604267483e-05,
"loss": 1.0762,
"step": 35
},
{
"epoch": 0.71,
"learning_rate": 1.7933533402912354e-05,
"loss": 1.0674,
"step": 36
},
{
"epoch": 0.73,
"learning_rate": 1.7798844830928818e-05,
"loss": 1.0791,
"step": 37
},
{
"epoch": 0.75,
"learning_rate": 1.766044443118978e-05,
"loss": 1.0854,
"step": 38
},
{
"epoch": 0.77,
"learning_rate": 1.7518398074789776e-05,
"loss": 1.083,
"step": 39
},
{
"epoch": 0.79,
"learning_rate": 1.737277336810124e-05,
"loss": 1.085,
"step": 40
},
{
"epoch": 0.79,
"eval_loss": 1.0905295610427856,
"eval_runtime": 8.5437,
"eval_samples_per_second": 15.333,
"eval_steps_per_second": 0.351,
"step": 40
},
{
"epoch": 0.81,
"learning_rate": 1.7223639620597556e-05,
"loss": 1.0605,
"step": 41
},
{
"epoch": 0.83,
"learning_rate": 1.7071067811865477e-05,
"loss": 1.0693,
"step": 42
},
{
"epoch": 0.85,
"learning_rate": 1.6915130557822698e-05,
"loss": 1.0859,
"step": 43
},
{
"epoch": 0.87,
"learning_rate": 1.6755902076156606e-05,
"loss": 1.083,
"step": 44
},
{
"epoch": 0.89,
"learning_rate": 1.659345815100069e-05,
"loss": 1.1016,
"step": 45
},
{
"epoch": 0.91,
"learning_rate": 1.6427876096865394e-05,
"loss": 1.0977,
"step": 46
},
{
"epoch": 0.93,
"learning_rate": 1.6259234721840595e-05,
"loss": 1.0693,
"step": 47
},
{
"epoch": 0.95,
"learning_rate": 1.608761429008721e-05,
"loss": 1.0879,
"step": 48
},
{
"epoch": 0.97,
"learning_rate": 1.5913096483635827e-05,
"loss": 1.0811,
"step": 49
},
{
"epoch": 0.99,
"learning_rate": 1.573576436351046e-05,
"loss": 1.0742,
"step": 50
},
{
"epoch": 1.01,
"learning_rate": 1.5555702330196024e-05,
"loss": 1.0176,
"step": 51
},
{
"epoch": 1.03,
"learning_rate": 1.5372996083468242e-05,
"loss": 0.9863,
"step": 52
},
{
"epoch": 1.05,
"learning_rate": 1.5187732581605217e-05,
"loss": 0.9824,
"step": 53
},
{
"epoch": 1.07,
"learning_rate": 1.5000000000000002e-05,
"loss": 0.9653,
"step": 54
},
{
"epoch": 1.09,
"learning_rate": 1.4809887689193878e-05,
"loss": 0.9629,
"step": 55
},
{
"epoch": 1.11,
"learning_rate": 1.4617486132350343e-05,
"loss": 0.9341,
"step": 56
},
{
"epoch": 1.13,
"learning_rate": 1.4422886902190014e-05,
"loss": 0.9492,
"step": 57
},
{
"epoch": 1.15,
"learning_rate": 1.4226182617406996e-05,
"loss": 0.9668,
"step": 58
},
{
"epoch": 1.17,
"learning_rate": 1.4027466898587375e-05,
"loss": 0.9585,
"step": 59
},
{
"epoch": 1.19,
"learning_rate": 1.3826834323650899e-05,
"loss": 0.9399,
"step": 60
},
{
"epoch": 1.19,
"eval_loss": 1.0739504098892212,
"eval_runtime": 8.5451,
"eval_samples_per_second": 15.33,
"eval_steps_per_second": 0.351,
"step": 60
},
{
"epoch": 1.21,
"learning_rate": 1.3624380382837017e-05,
"loss": 0.9312,
"step": 61
},
{
"epoch": 1.23,
"learning_rate": 1.342020143325669e-05,
"loss": 0.9727,
"step": 62
},
{
"epoch": 1.25,
"learning_rate": 1.3214394653031616e-05,
"loss": 0.9629,
"step": 63
},
{
"epoch": 1.27,
"learning_rate": 1.300705799504273e-05,
"loss": 0.9502,
"step": 64
},
{
"epoch": 1.29,
"learning_rate": 1.2798290140309924e-05,
"loss": 0.9917,
"step": 65
},
{
"epoch": 1.31,
"learning_rate": 1.2588190451025209e-05,
"loss": 0.9331,
"step": 66
},
{
"epoch": 1.33,
"learning_rate": 1.2376858923261732e-05,
"loss": 0.9419,
"step": 67
},
{
"epoch": 1.35,
"learning_rate": 1.2164396139381029e-05,
"loss": 0.9551,
"step": 68
},
{
"epoch": 1.37,
"learning_rate": 1.1950903220161286e-05,
"loss": 0.9482,
"step": 69
},
{
"epoch": 1.39,
"learning_rate": 1.1736481776669307e-05,
"loss": 0.9692,
"step": 70
},
{
"epoch": 1.41,
"learning_rate": 1.1521233861899168e-05,
"loss": 0.96,
"step": 71
},
{
"epoch": 1.43,
"learning_rate": 1.130526192220052e-05,
"loss": 0.9565,
"step": 72
},
{
"epoch": 1.45,
"learning_rate": 1.1088668748519646e-05,
"loss": 0.9575,
"step": 73
},
{
"epoch": 1.47,
"learning_rate": 1.0871557427476585e-05,
"loss": 0.957,
"step": 74
},
{
"epoch": 1.49,
"learning_rate": 1.0654031292301432e-05,
"loss": 0.9331,
"step": 75
},
{
"epoch": 1.5,
"learning_rate": 1.0436193873653362e-05,
"loss": 0.9536,
"step": 76
},
{
"epoch": 1.52,
"learning_rate": 1.0218148850345613e-05,
"loss": 0.9302,
"step": 77
},
{
"epoch": 1.54,
"learning_rate": 1e-05,
"loss": 0.9502,
"step": 78
},
{
"epoch": 1.56,
"learning_rate": 9.78185114965439e-06,
"loss": 0.937,
"step": 79
},
{
"epoch": 1.58,
"learning_rate": 9.563806126346643e-06,
"loss": 0.9287,
"step": 80
},
{
"epoch": 1.58,
"eval_loss": 1.0581464767456055,
"eval_runtime": 8.5408,
"eval_samples_per_second": 15.338,
"eval_steps_per_second": 0.351,
"step": 80
},
{
"epoch": 1.6,
"learning_rate": 9.34596870769857e-06,
"loss": 0.9463,
"step": 81
},
{
"epoch": 1.62,
"learning_rate": 9.128442572523418e-06,
"loss": 0.9336,
"step": 82
},
{
"epoch": 1.64,
"learning_rate": 8.911331251480357e-06,
"loss": 0.9463,
"step": 83
},
{
"epoch": 1.66,
"learning_rate": 8.694738077799487e-06,
"loss": 0.959,
"step": 84
},
{
"epoch": 1.68,
"learning_rate": 8.478766138100834e-06,
"loss": 0.9297,
"step": 85
},
{
"epoch": 1.7,
"learning_rate": 8.263518223330698e-06,
"loss": 0.9512,
"step": 86
},
{
"epoch": 1.72,
"learning_rate": 8.04909677983872e-06,
"loss": 0.9556,
"step": 87
},
{
"epoch": 1.74,
"learning_rate": 7.835603860618973e-06,
"loss": 0.9346,
"step": 88
},
{
"epoch": 1.76,
"learning_rate": 7.623141076738271e-06,
"loss": 0.9341,
"step": 89
},
{
"epoch": 1.78,
"learning_rate": 7.411809548974792e-06,
"loss": 0.9253,
"step": 90
},
{
"epoch": 1.8,
"learning_rate": 7.201709859690081e-06,
"loss": 0.9556,
"step": 91
},
{
"epoch": 1.82,
"learning_rate": 6.992942004957271e-06,
"loss": 0.9478,
"step": 92
},
{
"epoch": 1.84,
"learning_rate": 6.785605346968387e-06,
"loss": 0.9346,
"step": 93
},
{
"epoch": 1.86,
"learning_rate": 6.579798566743314e-06,
"loss": 0.9585,
"step": 94
},
{
"epoch": 1.88,
"learning_rate": 6.375619617162985e-06,
"loss": 0.939,
"step": 95
},
{
"epoch": 1.9,
"learning_rate": 6.173165676349103e-06,
"loss": 0.9351,
"step": 96
},
{
"epoch": 1.92,
"learning_rate": 5.97253310141263e-06,
"loss": 0.9312,
"step": 97
},
{
"epoch": 1.94,
"learning_rate": 5.773817382593008e-06,
"loss": 0.9307,
"step": 98
},
{
"epoch": 1.96,
"learning_rate": 5.5771130978099896e-06,
"loss": 0.9482,
"step": 99
},
{
"epoch": 1.98,
"learning_rate": 5.382513867649663e-06,
"loss": 0.9077,
"step": 100
},
{
"epoch": 1.98,
"eval_loss": 1.0503339767456055,
"eval_runtime": 8.5441,
"eval_samples_per_second": 15.332,
"eval_steps_per_second": 0.351,
"step": 100
},
{
"epoch": 2.0,
"learning_rate": 5.190112310806126e-06,
"loss": 0.9731,
"step": 101
},
{
"epoch": 2.02,
"learning_rate": 5.000000000000003e-06,
"loss": 0.8608,
"step": 102
},
{
"epoch": 2.04,
"learning_rate": 4.812267418394784e-06,
"loss": 0.8711,
"step": 103
},
{
"epoch": 2.06,
"learning_rate": 4.627003916531761e-06,
"loss": 0.8706,
"step": 104
},
{
"epoch": 2.08,
"learning_rate": 4.444297669803981e-06,
"loss": 0.8618,
"step": 105
},
{
"epoch": 2.1,
"learning_rate": 4.264235636489542e-06,
"loss": 0.8882,
"step": 106
},
{
"epoch": 2.12,
"learning_rate": 4.086903516364179e-06,
"loss": 0.8594,
"step": 107
},
{
"epoch": 2.14,
"learning_rate": 3.912385709912794e-06,
"loss": 0.8809,
"step": 108
},
{
"epoch": 2.16,
"learning_rate": 3.7407652781594094e-06,
"loss": 0.8423,
"step": 109
},
{
"epoch": 2.18,
"learning_rate": 3.5721239031346067e-06,
"loss": 0.8242,
"step": 110
},
{
"epoch": 2.2,
"learning_rate": 3.4065418489993118e-06,
"loss": 0.8018,
"step": 111
},
{
"epoch": 2.22,
"learning_rate": 3.2440979238433977e-06,
"loss": 0.854,
"step": 112
},
{
"epoch": 2.24,
"learning_rate": 3.0848694421773075e-06,
"loss": 0.8452,
"step": 113
},
{
"epoch": 2.26,
"learning_rate": 2.9289321881345257e-06,
"loss": 0.8564,
"step": 114
},
{
"epoch": 2.28,
"learning_rate": 2.776360379402445e-06,
"loss": 0.855,
"step": 115
},
{
"epoch": 2.3,
"learning_rate": 2.6272266318987606e-06,
"loss": 0.8545,
"step": 116
},
{
"epoch": 2.32,
"learning_rate": 2.4816019252102274e-06,
"loss": 0.8452,
"step": 117
},
{
"epoch": 2.34,
"learning_rate": 2.339555568810221e-06,
"loss": 0.855,
"step": 118
},
{
"epoch": 2.36,
"learning_rate": 2.201155169071184e-06,
"loss": 0.8501,
"step": 119
},
{
"epoch": 2.38,
"learning_rate": 2.0664665970876496e-06,
"loss": 0.8481,
"step": 120
},
{
"epoch": 2.38,
"eval_loss": 1.067867398262024,
"eval_runtime": 8.5474,
"eval_samples_per_second": 15.326,
"eval_steps_per_second": 0.351,
"step": 120
},
{
"epoch": 2.4,
"learning_rate": 1.9355539573251737e-06,
"loss": 0.855,
"step": 121
},
{
"epoch": 2.42,
"learning_rate": 1.808479557110081e-06,
"loss": 0.8335,
"step": 122
},
{
"epoch": 2.44,
"learning_rate": 1.6853038769745466e-06,
"loss": 0.8462,
"step": 123
},
{
"epoch": 2.46,
"learning_rate": 1.566085541871145e-06,
"loss": 0.8374,
"step": 124
},
{
"epoch": 2.48,
"learning_rate": 1.4508812932705364e-06,
"loss": 0.8281,
"step": 125
},
{
"epoch": 2.5,
"learning_rate": 1.339745962155613e-06,
"loss": 0.8408,
"step": 126
},
{
"epoch": 2.51,
"learning_rate": 1.2327324429249232e-06,
"loss": 0.8574,
"step": 127
},
{
"epoch": 2.53,
"learning_rate": 1.129891668217783e-06,
"loss": 0.8486,
"step": 128
},
{
"epoch": 2.55,
"learning_rate": 1.0312725846731174e-06,
"loss": 0.8579,
"step": 129
},
{
"epoch": 2.57,
"learning_rate": 9.369221296335007e-07,
"loss": 0.8604,
"step": 130
},
{
"epoch": 2.59,
"learning_rate": 8.468852088055291e-07,
"loss": 0.8276,
"step": 131
},
{
"epoch": 2.61,
"learning_rate": 7.612046748871327e-07,
"loss": 0.8398,
"step": 132
},
{
"epoch": 2.63,
"learning_rate": 6.799213071720156e-07,
"loss": 0.8369,
"step": 133
},
{
"epoch": 2.65,
"learning_rate": 6.030737921409169e-07,
"loss": 0.8555,
"step": 134
},
{
"epoch": 2.67,
"learning_rate": 5.306987050489442e-07,
"loss": 0.8477,
"step": 135
},
{
"epoch": 2.69,
"learning_rate": 4.628304925177318e-07,
"loss": 0.8276,
"step": 136
},
{
"epoch": 2.71,
"learning_rate": 3.99501456140714e-07,
"loss": 0.8438,
"step": 137
},
{
"epoch": 2.73,
"learning_rate": 3.4074173710931804e-07,
"loss": 0.8442,
"step": 138
},
{
"epoch": 2.75,
"learning_rate": 2.865793018673857e-07,
"loss": 0.8159,
"step": 139
},
{
"epoch": 2.77,
"learning_rate": 2.370399288006664e-07,
"loss": 0.8691,
"step": 140
},
{
"epoch": 2.77,
"eval_loss": 1.067211389541626,
"eval_runtime": 8.5453,
"eval_samples_per_second": 15.33,
"eval_steps_per_second": 0.351,
"step": 140
},
{
"epoch": 2.79,
"learning_rate": 1.921471959676957e-07,
"loss": 0.8594,
"step": 141
},
{
"epoch": 2.81,
"learning_rate": 1.519224698779198e-07,
"loss": 0.8237,
"step": 142
},
{
"epoch": 2.83,
"learning_rate": 1.1638489532239339e-07,
"loss": 0.8335,
"step": 143
},
{
"epoch": 2.85,
"learning_rate": 8.555138626189619e-08,
"loss": 0.8511,
"step": 144
},
{
"epoch": 2.87,
"learning_rate": 5.943661777680354e-08,
"loss": 0.8413,
"step": 145
},
{
"epoch": 2.89,
"learning_rate": 3.805301908254455e-08,
"loss": 0.8564,
"step": 146
},
{
"epoch": 2.91,
"learning_rate": 2.1410767613965212e-08,
"loss": 0.8452,
"step": 147
},
{
"epoch": 2.93,
"learning_rate": 9.517784181422018e-09,
"loss": 0.8452,
"step": 148
},
{
"epoch": 2.95,
"learning_rate": 2.379729200908676e-09,
"loss": 0.8403,
"step": 149
},
{
"epoch": 2.97,
"learning_rate": 0.0,
"loss": 0.8643,
"step": 150
}
],
"max_steps": 150,
"num_train_epochs": 3,
"total_flos": 62211930193920.0,
"trial_name": null,
"trial_params": null
}