File size: 6,013 Bytes
8b1b653
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
{
  "best_metric": 2.7149553298950195,
  "best_model_checkpoint": "/home/datta0/models/lora_final/gemma-2-9b_pct_ortho/checkpoint-8",
  "epoch": 0.18543259557344063,
  "eval_steps": 8,
  "global_step": 72,
  "is_hyper_param_search": false,
  "is_local_process_zero": true,
  "is_world_process_zero": true,
  "log_history": [
    {
      "epoch": 0.002575452716297787,
      "grad_norm": 6.355404376983643,
      "learning_rate": 1.25e-05,
      "loss": 2.1779,
      "step": 1
    },
    {
      "epoch": 0.010301810865191148,
      "grad_norm": 7.009632587432861,
      "learning_rate": 5e-05,
      "loss": 2.2793,
      "step": 4
    },
    {
      "epoch": 0.020603621730382295,
      "grad_norm": 5.262685298919678,
      "learning_rate": 0.0001,
      "loss": 2.3111,
      "step": 8
    },
    {
      "epoch": 0.020603621730382295,
      "eval_loss": 2.7149553298950195,
      "eval_runtime": 519.9082,
      "eval_samples_per_second": 0.471,
      "eval_steps_per_second": 0.471,
      "step": 8
    },
    {
      "epoch": 0.03090543259557344,
      "grad_norm": 8.695486068725586,
      "learning_rate": 9.997266286704631e-05,
      "loss": 6.4602,
      "step": 12
    },
    {
      "epoch": 0.04120724346076459,
      "grad_norm": 3.0442869663238525,
      "learning_rate": 9.989068136093873e-05,
      "loss": 11.4782,
      "step": 16
    },
    {
      "epoch": 0.04120724346076459,
      "eval_loss": 11.982385635375977,
      "eval_runtime": 308.4334,
      "eval_samples_per_second": 0.794,
      "eval_steps_per_second": 0.794,
      "step": 16
    },
    {
      "epoch": 0.05150905432595573,
      "grad_norm": 1.3675501346588135,
      "learning_rate": 9.975414512725057e-05,
      "loss": 11.9837,
      "step": 20
    },
    {
      "epoch": 0.06181086519114688,
      "grad_norm": 1.629292607307434,
      "learning_rate": 9.956320346634876e-05,
      "loss": 11.9866,
      "step": 24
    },
    {
      "epoch": 0.06181086519114688,
      "eval_loss": 12.02308464050293,
      "eval_runtime": 203.4649,
      "eval_samples_per_second": 1.204,
      "eval_steps_per_second": 1.204,
      "step": 24
    },
    {
      "epoch": 0.07211267605633803,
      "grad_norm": 0.635460615158081,
      "learning_rate": 9.931806517013612e-05,
      "loss": 12.0407,
      "step": 28
    },
    {
      "epoch": 0.08241448692152918,
      "grad_norm": 0.50620037317276,
      "learning_rate": 9.901899829374047e-05,
      "loss": 12.0022,
      "step": 32
    },
    {
      "epoch": 0.08241448692152918,
      "eval_loss": 12.044427871704102,
      "eval_runtime": 327.7452,
      "eval_samples_per_second": 0.748,
      "eval_steps_per_second": 0.748,
      "step": 32
    },
    {
      "epoch": 0.09271629778672032,
      "grad_norm": 0.3963007628917694,
      "learning_rate": 9.86663298624003e-05,
      "loss": 12.0261,
      "step": 36
    },
    {
      "epoch": 0.10301810865191147,
      "grad_norm": 0.23691882193088531,
      "learning_rate": 9.826044551386744e-05,
      "loss": 11.967,
      "step": 40
    },
    {
      "epoch": 0.10301810865191147,
      "eval_loss": 11.981504440307617,
      "eval_runtime": 120.2154,
      "eval_samples_per_second": 2.038,
      "eval_steps_per_second": 2.038,
      "step": 40
    },
    {
      "epoch": 0.11331991951710262,
      "grad_norm": 0.20406781136989594,
      "learning_rate": 9.780178907671789e-05,
      "loss": 11.9641,
      "step": 44
    },
    {
      "epoch": 0.12362173038229377,
      "grad_norm": 0.16647957265377045,
      "learning_rate": 9.729086208503174e-05,
      "loss": 11.9231,
      "step": 48
    },
    {
      "epoch": 0.12362173038229377,
      "eval_loss": 11.89189624786377,
      "eval_runtime": 163.7077,
      "eval_samples_per_second": 1.497,
      "eval_steps_per_second": 1.497,
      "step": 48
    },
    {
      "epoch": 0.1339235412474849,
      "grad_norm": 0.15248265862464905,
      "learning_rate": 9.672822322997305e-05,
      "loss": 11.8599,
      "step": 52
    },
    {
      "epoch": 0.14422535211267606,
      "grad_norm": 0.09781660884618759,
      "learning_rate": 9.611448774886924e-05,
      "loss": 11.804,
      "step": 56
    },
    {
      "epoch": 0.14422535211267606,
      "eval_loss": 11.80902099609375,
      "eval_runtime": 118.3727,
      "eval_samples_per_second": 2.07,
      "eval_steps_per_second": 2.07,
      "step": 56
    },
    {
      "epoch": 0.1545271629778672,
      "grad_norm": 0.2582569718360901,
      "learning_rate": 9.545032675245813e-05,
      "loss": 11.7282,
      "step": 60
    },
    {
      "epoch": 0.16482897384305836,
      "grad_norm": 0.15904435515403748,
      "learning_rate": 9.473646649103818e-05,
      "loss": 11.9348,
      "step": 64
    },
    {
      "epoch": 0.16482897384305836,
      "eval_loss": 11.950884819030762,
      "eval_runtime": 118.9665,
      "eval_samples_per_second": 2.059,
      "eval_steps_per_second": 2.059,
      "step": 64
    },
    {
      "epoch": 0.1751307847082495,
      "grad_norm": 0.12957070767879486,
      "learning_rate": 9.397368756032445e-05,
      "loss": 11.8253,
      "step": 68
    },
    {
      "epoch": 0.18543259557344063,
      "grad_norm": 0.13200531899929047,
      "learning_rate": 9.316282404787871e-05,
      "loss": 11.7656,
      "step": 72
    },
    {
      "epoch": 0.18543259557344063,
      "eval_loss": 11.661236763000488,
      "eval_runtime": 325.413,
      "eval_samples_per_second": 0.753,
      "eval_steps_per_second": 0.753,
      "step": 72
    }
  ],
  "logging_steps": 4,
  "max_steps": 388,
  "num_input_tokens_seen": 0,
  "num_train_epochs": 1,
  "save_steps": 8,
  "stateful_callbacks": {
    "TrainerControl": {
      "args": {
        "should_epoch_stop": false,
        "should_evaluate": false,
        "should_log": false,
        "should_save": true,
        "should_training_stop": false
      },
      "attributes": {}
    }
  },
  "total_flos": 1.0372092502440346e+17,
  "train_batch_size": 1,
  "trial_name": null,
  "trial_params": null
}