File size: 6,902 Bytes
d589982
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
{
  "best_metric": null,
  "best_model_checkpoint": null,
  "epoch": 3.0,
  "eval_steps": 500,
  "global_step": 183,
  "is_hyper_param_search": false,
  "is_local_process_zero": true,
  "is_world_process_zero": true,
  "log_history": [
    {
      "epoch": 0.08196721311475409,
      "grad_norm": 8.290995597839355,
      "learning_rate": 4.990795908619189e-05,
      "loss": 6.3211,
      "step": 5
    },
    {
      "epoch": 0.16393442622950818,
      "grad_norm": 10.25100040435791,
      "learning_rate": 4.9632514067152726e-05,
      "loss": 5.8414,
      "step": 10
    },
    {
      "epoch": 0.2459016393442623,
      "grad_norm": 9.911712646484375,
      "learning_rate": 4.928142498664579e-05,
      "loss": 6.2669,
      "step": 15
    },
    {
      "epoch": 0.32786885245901637,
      "grad_norm": 12.882040977478027,
      "learning_rate": 4.868186180746792e-05,
      "loss": 4.3475,
      "step": 20
    },
    {
      "epoch": 0.4098360655737705,
      "grad_norm": 9.486019134521484,
      "learning_rate": 4.790792261217512e-05,
      "loss": 3.6657,
      "step": 25
    },
    {
      "epoch": 0.4918032786885246,
      "grad_norm": 4.90289306640625,
      "learning_rate": 4.696530612642871e-05,
      "loss": 2.6512,
      "step": 30
    },
    {
      "epoch": 0.5737704918032787,
      "grad_norm": 2.928373336791992,
      "learning_rate": 4.586095309284618e-05,
      "loss": 2.1788,
      "step": 35
    },
    {
      "epoch": 0.6557377049180327,
      "grad_norm": 5.331892967224121,
      "learning_rate": 4.460299516441777e-05,
      "loss": 2.0556,
      "step": 40
    },
    {
      "epoch": 0.7377049180327869,
      "grad_norm": 3.0905227661132812,
      "learning_rate": 4.320069502892462e-05,
      "loss": 2.0827,
      "step": 45
    },
    {
      "epoch": 0.819672131147541,
      "grad_norm": 3.840454339981079,
      "learning_rate": 4.1664378205239085e-05,
      "loss": 1.9863,
      "step": 50
    },
    {
      "epoch": 0.9016393442622951,
      "grad_norm": 2.925448179244995,
      "learning_rate": 4.000535701370921e-05,
      "loss": 2.0797,
      "step": 55
    },
    {
      "epoch": 0.9836065573770492,
      "grad_norm": 4.124454498291016,
      "learning_rate": 3.823584728045463e-05,
      "loss": 1.9855,
      "step": 60
    },
    {
      "epoch": 1.0655737704918034,
      "grad_norm": 2.905304431915283,
      "learning_rate": 3.636887838890265e-05,
      "loss": 1.8565,
      "step": 65
    },
    {
      "epoch": 1.1475409836065573,
      "grad_norm": 2.937238931655884,
      "learning_rate": 3.4418197340879635e-05,
      "loss": 1.7846,
      "step": 70
    },
    {
      "epoch": 1.2295081967213115,
      "grad_norm": 2.9058425426483154,
      "learning_rate": 3.239816753368223e-05,
      "loss": 1.8977,
      "step": 75
    },
    {
      "epoch": 1.3114754098360657,
      "grad_norm": 3.247345447540283,
      "learning_rate": 3.0323662998460393e-05,
      "loss": 1.6528,
      "step": 80
    },
    {
      "epoch": 1.3934426229508197,
      "grad_norm": 3.5700137615203857,
      "learning_rate": 2.8209958878663778e-05,
      "loss": 1.7865,
      "step": 85
    },
    {
      "epoch": 1.4754098360655736,
      "grad_norm": 3.3404250144958496,
      "learning_rate": 2.6072618954988866e-05,
      "loss": 1.6595,
      "step": 90
    },
    {
      "epoch": 1.5573770491803278,
      "grad_norm": 2.9976389408111572,
      "learning_rate": 2.3927381045011136e-05,
      "loss": 1.795,
      "step": 95
    },
    {
      "epoch": 1.639344262295082,
      "grad_norm": 2.907928943634033,
      "learning_rate": 2.1790041121336225e-05,
      "loss": 1.6754,
      "step": 100
    },
    {
      "epoch": 1.721311475409836,
      "grad_norm": 2.8854143619537354,
      "learning_rate": 1.9676337001539612e-05,
      "loss": 1.6433,
      "step": 105
    },
    {
      "epoch": 1.8032786885245902,
      "grad_norm": 3.0124289989471436,
      "learning_rate": 1.760183246631777e-05,
      "loss": 1.5446,
      "step": 110
    },
    {
      "epoch": 1.8852459016393444,
      "grad_norm": 3.203061819076538,
      "learning_rate": 1.558180265912037e-05,
      "loss": 1.5668,
      "step": 115
    },
    {
      "epoch": 1.9672131147540983,
      "grad_norm": 2.9160172939300537,
      "learning_rate": 1.3631121611097364e-05,
      "loss": 1.5542,
      "step": 120
    },
    {
      "epoch": 2.0491803278688523,
      "grad_norm": 3.249051570892334,
      "learning_rate": 1.1764152719545372e-05,
      "loss": 1.4999,
      "step": 125
    },
    {
      "epoch": 2.1311475409836067,
      "grad_norm": 3.0132319927215576,
      "learning_rate": 9.994642986290797e-06,
      "loss": 1.6873,
      "step": 130
    },
    {
      "epoch": 2.2131147540983607,
      "grad_norm": 3.1967945098876953,
      "learning_rate": 8.33562179476092e-06,
      "loss": 1.5267,
      "step": 135
    },
    {
      "epoch": 2.2950819672131146,
      "grad_norm": 3.5244369506835938,
      "learning_rate": 6.799304971075382e-06,
      "loss": 1.6676,
      "step": 140
    },
    {
      "epoch": 2.3770491803278686,
      "grad_norm": 3.073401689529419,
      "learning_rate": 5.397004835582242e-06,
      "loss": 1.7066,
      "step": 145
    },
    {
      "epoch": 2.459016393442623,
      "grad_norm": 2.9026217460632324,
      "learning_rate": 4.139046907153818e-06,
      "loss": 1.4762,
      "step": 150
    },
    {
      "epoch": 2.540983606557377,
      "grad_norm": 2.8583261966705322,
      "learning_rate": 3.0346938735712954e-06,
      "loss": 1.5355,
      "step": 155
    },
    {
      "epoch": 2.6229508196721314,
      "grad_norm": 3.106534242630005,
      "learning_rate": 2.092077387824884e-06,
      "loss": 1.708,
      "step": 160
    },
    {
      "epoch": 2.7049180327868854,
      "grad_norm": 3.100788116455078,
      "learning_rate": 1.3181381925320785e-06,
      "loss": 1.54,
      "step": 165
    },
    {
      "epoch": 2.7868852459016393,
      "grad_norm": 3.149162530899048,
      "learning_rate": 7.185750133542169e-07,
      "loss": 1.5244,
      "step": 170
    },
    {
      "epoch": 2.8688524590163933,
      "grad_norm": 3.3356781005859375,
      "learning_rate": 2.978025977230736e-07,
      "loss": 1.5209,
      "step": 175
    },
    {
      "epoch": 2.9508196721311473,
      "grad_norm": 2.8666839599609375,
      "learning_rate": 5.891920784984184e-08,
      "loss": 1.5363,
      "step": 180
    },
    {
      "epoch": 3.0,
      "step": 183,
      "total_flos": 9540135882719232.0,
      "train_loss": 2.231679132075909,
      "train_runtime": 350.7334,
      "train_samples_per_second": 8.348,
      "train_steps_per_second": 0.522
    }
  ],
  "logging_steps": 5,
  "max_steps": 183,
  "num_input_tokens_seen": 0,
  "num_train_epochs": 3,
  "save_steps": 100,
  "total_flos": 9540135882719232.0,
  "train_batch_size": 2,
  "trial_name": null,
  "trial_params": null
}