Daoguang commited on
Commit
e4c1cd7
1 Parent(s): 0c0acef

Upload 17 files

Browse files
Files changed (3) hide show
  1. latest +1 -1
  2. trainer_state.json +115 -85
  3. training_args.bin +1 -1
latest CHANGED
@@ -1 +1 @@
1
- global_step80
 
1
+ global_step90
trainer_state.json CHANGED
@@ -1,260 +1,290 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 4.776119402985074,
5
  "eval_steps": 500,
6
- "global_step": 80,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
- "epoch": 0.12,
13
  "learning_rate": 5e-05,
14
- "loss": 0.8672,
15
  "step": 2
16
  },
17
  {
18
- "epoch": 0.24,
19
  "learning_rate": 5e-05,
20
- "loss": 0.8051,
21
  "step": 4
22
  },
23
  {
24
- "epoch": 0.36,
25
  "learning_rate": 5e-05,
26
- "loss": 0.6955,
27
  "step": 6
28
  },
29
  {
30
- "epoch": 0.48,
31
  "learning_rate": 5e-05,
32
- "loss": 0.6879,
33
  "step": 8
34
  },
35
  {
36
- "epoch": 0.6,
37
  "learning_rate": 5e-05,
38
- "loss": 0.6785,
39
  "step": 10
40
  },
41
  {
42
- "epoch": 0.72,
43
  "learning_rate": 5e-05,
44
- "loss": 0.6584,
45
  "step": 12
46
  },
47
  {
48
- "epoch": 0.84,
49
  "learning_rate": 5e-05,
50
- "loss": 0.646,
51
  "step": 14
52
  },
53
  {
54
- "epoch": 0.96,
55
  "learning_rate": 5e-05,
56
- "loss": 0.6585,
57
  "step": 16
58
  },
59
  {
60
- "epoch": 1.07,
61
  "learning_rate": 5e-05,
62
- "loss": 0.5556,
63
  "step": 18
64
  },
65
  {
66
- "epoch": 1.19,
67
  "learning_rate": 5e-05,
68
- "loss": 0.4459,
69
  "step": 20
70
  },
71
  {
72
- "epoch": 1.31,
73
  "learning_rate": 5e-05,
74
- "loss": 0.4237,
75
  "step": 22
76
  },
77
  {
78
- "epoch": 1.43,
79
  "learning_rate": 5e-05,
80
- "loss": 0.4039,
81
  "step": 24
82
  },
83
  {
84
- "epoch": 1.55,
85
  "learning_rate": 5e-05,
86
- "loss": 0.3943,
87
  "step": 26
88
  },
89
  {
90
- "epoch": 1.67,
91
  "learning_rate": 5e-05,
92
- "loss": 0.3858,
93
  "step": 28
94
  },
95
  {
96
- "epoch": 1.79,
97
  "learning_rate": 5e-05,
98
- "loss": 0.402,
99
  "step": 30
100
  },
101
  {
102
- "epoch": 1.91,
103
  "learning_rate": 5e-05,
104
- "loss": 0.4071,
105
  "step": 32
106
  },
107
  {
108
- "epoch": 2.03,
109
  "learning_rate": 5e-05,
110
- "loss": 0.3372,
111
  "step": 34
112
  },
113
  {
114
- "epoch": 2.15,
115
  "learning_rate": 5e-05,
116
- "loss": 0.2406,
117
  "step": 36
118
  },
119
  {
120
- "epoch": 2.27,
121
  "learning_rate": 5e-05,
122
- "loss": 0.1945,
123
  "step": 38
124
  },
125
  {
126
- "epoch": 2.39,
127
  "learning_rate": 5e-05,
128
- "loss": 0.1935,
129
  "step": 40
130
  },
131
  {
132
- "epoch": 2.51,
133
  "learning_rate": 5e-05,
134
- "loss": 0.1866,
135
  "step": 42
136
  },
137
  {
138
- "epoch": 2.63,
139
  "learning_rate": 5e-05,
140
- "loss": 0.1739,
141
  "step": 44
142
  },
143
  {
144
- "epoch": 2.75,
145
  "learning_rate": 5e-05,
146
- "loss": 0.1708,
147
  "step": 46
148
  },
149
  {
150
- "epoch": 2.87,
151
  "learning_rate": 5e-05,
152
- "loss": 0.1808,
153
  "step": 48
154
  },
155
  {
156
- "epoch": 2.99,
157
  "learning_rate": 5e-05,
158
- "loss": 0.1847,
159
  "step": 50
160
  },
161
  {
162
- "epoch": 3.1,
163
  "learning_rate": 5e-05,
164
- "loss": 0.1019,
165
  "step": 52
166
  },
167
  {
168
- "epoch": 3.22,
169
  "learning_rate": 5e-05,
170
- "loss": 0.0741,
171
  "step": 54
172
  },
173
  {
174
- "epoch": 3.34,
175
  "learning_rate": 5e-05,
176
- "loss": 0.073,
177
  "step": 56
178
  },
179
  {
180
- "epoch": 3.46,
181
  "learning_rate": 5e-05,
182
- "loss": 0.0748,
183
  "step": 58
184
  },
185
  {
186
- "epoch": 3.58,
187
  "learning_rate": 5e-05,
188
- "loss": 0.07,
189
  "step": 60
190
  },
191
  {
192
- "epoch": 3.7,
193
  "learning_rate": 5e-05,
194
- "loss": 0.0692,
195
  "step": 62
196
  },
197
  {
198
- "epoch": 3.82,
199
  "learning_rate": 5e-05,
200
- "loss": 0.079,
201
  "step": 64
202
  },
203
  {
204
- "epoch": 3.94,
205
  "learning_rate": 5e-05,
206
- "loss": 0.0835,
207
  "step": 66
208
  },
209
  {
210
- "epoch": 4.06,
211
  "learning_rate": 5e-05,
212
- "loss": 0.0533,
213
  "step": 68
214
  },
215
  {
216
- "epoch": 4.18,
217
  "learning_rate": 5e-05,
218
- "loss": 0.0356,
219
  "step": 70
220
  },
221
  {
222
- "epoch": 4.3,
223
  "learning_rate": 5e-05,
224
- "loss": 0.0326,
225
  "step": 72
226
  },
227
  {
228
- "epoch": 4.42,
229
  "learning_rate": 5e-05,
230
- "loss": 0.0417,
231
  "step": 74
232
  },
233
  {
234
- "epoch": 4.54,
235
  "learning_rate": 5e-05,
236
- "loss": 0.0352,
237
  "step": 76
238
  },
239
  {
240
- "epoch": 4.66,
241
  "learning_rate": 5e-05,
242
- "loss": 0.042,
243
  "step": 78
244
  },
245
  {
246
- "epoch": 4.78,
247
  "learning_rate": 5e-05,
248
- "loss": 0.0359,
249
  "step": 80
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
250
  }
251
  ],
252
  "logging_steps": 2,
253
- "max_steps": 80,
254
  "num_input_tokens_seen": 0,
255
  "num_train_epochs": 5,
256
- "save_steps": 20,
257
- "total_flos": 58433383956480.0,
258
  "train_batch_size": 8,
259
  "trial_name": null,
260
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 4.8979591836734695,
5
  "eval_steps": 500,
6
+ "global_step": 90,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
+ "epoch": 0.11,
13
  "learning_rate": 5e-05,
14
+ "loss": 0.9113,
15
  "step": 2
16
  },
17
  {
18
+ "epoch": 0.22,
19
  "learning_rate": 5e-05,
20
+ "loss": 0.7869,
21
  "step": 4
22
  },
23
  {
24
+ "epoch": 0.33,
25
  "learning_rate": 5e-05,
26
+ "loss": 0.7093,
27
  "step": 6
28
  },
29
  {
30
+ "epoch": 0.44,
31
  "learning_rate": 5e-05,
32
+ "loss": 0.6995,
33
  "step": 8
34
  },
35
  {
36
+ "epoch": 0.54,
37
  "learning_rate": 5e-05,
38
+ "loss": 0.688,
39
  "step": 10
40
  },
41
  {
42
+ "epoch": 0.65,
43
  "learning_rate": 5e-05,
44
+ "loss": 0.6936,
45
  "step": 12
46
  },
47
  {
48
+ "epoch": 0.76,
49
  "learning_rate": 5e-05,
50
+ "loss": 0.6948,
51
  "step": 14
52
  },
53
  {
54
+ "epoch": 0.87,
55
  "learning_rate": 5e-05,
56
+ "loss": 0.7126,
57
  "step": 16
58
  },
59
  {
60
+ "epoch": 0.98,
61
  "learning_rate": 5e-05,
62
+ "loss": 0.6552,
63
  "step": 18
64
  },
65
  {
66
+ "epoch": 1.09,
67
  "learning_rate": 5e-05,
68
+ "loss": 0.5185,
69
  "step": 20
70
  },
71
  {
72
+ "epoch": 1.2,
73
  "learning_rate": 5e-05,
74
+ "loss": 0.4545,
75
  "step": 22
76
  },
77
  {
78
+ "epoch": 1.31,
79
  "learning_rate": 5e-05,
80
+ "loss": 0.4339,
81
  "step": 24
82
  },
83
  {
84
+ "epoch": 1.41,
85
  "learning_rate": 5e-05,
86
+ "loss": 0.4015,
87
  "step": 26
88
  },
89
  {
90
+ "epoch": 1.52,
91
  "learning_rate": 5e-05,
92
+ "loss": 0.3998,
93
  "step": 28
94
  },
95
  {
96
+ "epoch": 1.63,
97
  "learning_rate": 5e-05,
98
+ "loss": 0.4085,
99
  "step": 30
100
  },
101
  {
102
+ "epoch": 1.74,
103
  "learning_rate": 5e-05,
104
+ "loss": 0.3968,
105
  "step": 32
106
  },
107
  {
108
+ "epoch": 1.85,
109
  "learning_rate": 5e-05,
110
+ "loss": 0.3707,
111
  "step": 34
112
  },
113
  {
114
+ "epoch": 1.96,
115
  "learning_rate": 5e-05,
116
+ "loss": 0.3904,
117
  "step": 36
118
  },
119
  {
120
+ "epoch": 2.07,
121
  "learning_rate": 5e-05,
122
+ "loss": 0.2891,
123
  "step": 38
124
  },
125
  {
126
+ "epoch": 2.18,
127
  "learning_rate": 5e-05,
128
+ "loss": 0.2091,
129
  "step": 40
130
  },
131
  {
132
+ "epoch": 2.29,
133
  "learning_rate": 5e-05,
134
+ "loss": 0.1942,
135
  "step": 42
136
  },
137
  {
138
+ "epoch": 2.39,
139
  "learning_rate": 5e-05,
140
+ "loss": 0.188,
141
  "step": 44
142
  },
143
  {
144
+ "epoch": 2.5,
145
  "learning_rate": 5e-05,
146
+ "loss": 0.1921,
147
  "step": 46
148
  },
149
  {
150
+ "epoch": 2.61,
151
  "learning_rate": 5e-05,
152
+ "loss": 0.1812,
153
  "step": 48
154
  },
155
  {
156
+ "epoch": 2.72,
157
  "learning_rate": 5e-05,
158
+ "loss": 0.1746,
159
  "step": 50
160
  },
161
  {
162
+ "epoch": 2.83,
163
  "learning_rate": 5e-05,
164
+ "loss": 0.1867,
165
  "step": 52
166
  },
167
  {
168
+ "epoch": 2.94,
169
  "learning_rate": 5e-05,
170
+ "loss": 0.1831,
171
  "step": 54
172
  },
173
  {
174
+ "epoch": 3.05,
175
  "learning_rate": 5e-05,
176
+ "loss": 0.1302,
177
  "step": 56
178
  },
179
  {
180
+ "epoch": 3.16,
181
  "learning_rate": 5e-05,
182
+ "loss": 0.0841,
183
  "step": 58
184
  },
185
  {
186
+ "epoch": 3.27,
187
  "learning_rate": 5e-05,
188
+ "loss": 0.0747,
189
  "step": 60
190
  },
191
  {
192
+ "epoch": 3.37,
193
  "learning_rate": 5e-05,
194
+ "loss": 0.0754,
195
  "step": 62
196
  },
197
  {
198
+ "epoch": 3.48,
199
  "learning_rate": 5e-05,
200
+ "loss": 0.075,
201
  "step": 64
202
  },
203
  {
204
+ "epoch": 3.59,
205
  "learning_rate": 5e-05,
206
+ "loss": 0.0763,
207
  "step": 66
208
  },
209
  {
210
+ "epoch": 3.7,
211
  "learning_rate": 5e-05,
212
+ "loss": 0.0761,
213
  "step": 68
214
  },
215
  {
216
+ "epoch": 3.81,
217
  "learning_rate": 5e-05,
218
+ "loss": 0.0789,
219
  "step": 70
220
  },
221
  {
222
+ "epoch": 3.92,
223
  "learning_rate": 5e-05,
224
+ "loss": 0.0757,
225
  "step": 72
226
  },
227
  {
228
+ "epoch": 4.03,
229
  "learning_rate": 5e-05,
230
+ "loss": 0.0757,
231
  "step": 74
232
  },
233
  {
234
+ "epoch": 4.14,
235
  "learning_rate": 5e-05,
236
+ "loss": 0.0337,
237
  "step": 76
238
  },
239
  {
240
+ "epoch": 4.24,
241
  "learning_rate": 5e-05,
242
+ "loss": 0.0377,
243
  "step": 78
244
  },
245
  {
246
+ "epoch": 4.35,
247
  "learning_rate": 5e-05,
248
+ "loss": 0.0373,
249
  "step": 80
250
+ },
251
+ {
252
+ "epoch": 4.46,
253
+ "learning_rate": 5e-05,
254
+ "loss": 0.0388,
255
+ "step": 82
256
+ },
257
+ {
258
+ "epoch": 4.57,
259
+ "learning_rate": 5e-05,
260
+ "loss": 0.0433,
261
+ "step": 84
262
+ },
263
+ {
264
+ "epoch": 4.68,
265
+ "learning_rate": 5e-05,
266
+ "loss": 0.04,
267
+ "step": 86
268
+ },
269
+ {
270
+ "epoch": 4.79,
271
+ "learning_rate": 5e-05,
272
+ "loss": 0.0487,
273
+ "step": 88
274
+ },
275
+ {
276
+ "epoch": 4.9,
277
+ "learning_rate": 5e-05,
278
+ "loss": 0.0499,
279
+ "step": 90
280
  }
281
  ],
282
  "logging_steps": 2,
283
+ "max_steps": 90,
284
  "num_input_tokens_seen": 0,
285
  "num_train_epochs": 5,
286
+ "save_steps": 30,
287
+ "total_flos": 65847448043520.0,
288
  "train_batch_size": 8,
289
  "trial_name": null,
290
  "trial_params": null
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:307a17178835f4dc291457cc32d36551ee702b49bbfcb565831d619d7743c969
3
  size 7224
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3b51f0b7428a26ea5c653b835d1af27e22f6718d9487f29295fe3e3d5922149f
3
  size 7224