bansilp commited on
Commit
d30f949
1 Parent(s): 13f3ca0

Model save

Browse files
README.md CHANGED
@@ -22,7 +22,7 @@ model-index:
22
  metrics:
23
  - name: Accuracy
24
  type: accuracy
25
- value: 0.9009259259259259
26
  ---
27
 
28
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
@@ -32,8 +32,8 @@ should probably proofread and complete it, then remove this comment. -->
32
 
33
  This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the imagefolder dataset.
34
  It achieves the following results on the evaluation set:
35
- - Loss: 0.5926
36
- - Accuracy: 0.9009
37
 
38
  ## Model description
39
 
@@ -52,7 +52,7 @@ More information needed
52
  ### Training hyperparameters
53
 
54
  The following hyperparameters were used during training:
55
- - learning_rate: 0.0003
56
  - train_batch_size: 48
57
  - eval_batch_size: 8
58
  - seed: 42
@@ -65,10 +65,10 @@ The following hyperparameters were used during training:
65
 
66
  | Training Loss | Epoch | Step | Validation Loss | Accuracy |
67
  |:-------------:|:-----:|:----:|:---------------:|:--------:|
68
- | 0.128 | 1.11 | 100 | 0.7718 | 0.8685 |
69
- | 0.0236 | 2.22 | 200 | 0.6526 | 0.8852 |
70
- | 0.004 | 3.33 | 300 | 0.5478 | 0.9009 |
71
- | 0.0024 | 4.44 | 400 | 0.5926 | 0.9009 |
72
 
73
 
74
  ### Framework versions
 
22
  metrics:
23
  - name: Accuracy
24
  type: accuracy
25
+ value: 0.9111111111111111
26
  ---
27
 
28
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
 
32
 
33
  This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the imagefolder dataset.
34
  It achieves the following results on the evaluation set:
35
+ - Loss: 0.5822
36
+ - Accuracy: 0.9111
37
 
38
  ## Model description
39
 
 
52
  ### Training hyperparameters
53
 
54
  The following hyperparameters were used during training:
55
+ - learning_rate: 0.0001
56
  - train_batch_size: 48
57
  - eval_batch_size: 8
58
  - seed: 42
 
65
 
66
  | Training Loss | Epoch | Step | Validation Loss | Accuracy |
67
  |:-------------:|:-----:|:----:|:---------------:|:--------:|
68
+ | 0.0006 | 1.11 | 100 | 0.6744 | 0.9 |
69
+ | 0.0048 | 2.22 | 200 | 0.6603 | 0.8991 |
70
+ | 0.0015 | 3.33 | 300 | 0.6312 | 0.9 |
71
+ | 0.0015 | 4.44 | 400 | 0.5822 | 0.9111 |
72
 
73
 
74
  ### Framework versions
all_results.json CHANGED
@@ -1,13 +1,13 @@
1
  {
2
  "epoch": 5.0,
3
- "eval_accuracy": 0.9092592592592592,
4
- "eval_loss": 0.5364564061164856,
5
- "eval_runtime": 14.0587,
6
- "eval_samples_per_second": 76.821,
7
- "eval_steps_per_second": 9.603,
8
  "total_flos": 1.6739319895474176e+18,
9
- "train_loss": 0.02156418908904824,
10
- "train_runtime": 434.3833,
11
- "train_samples_per_second": 49.726,
12
- "train_steps_per_second": 1.036
13
  }
 
1
  {
2
  "epoch": 5.0,
3
+ "eval_accuracy": 0.9037037037037037,
4
+ "eval_loss": 0.5827512741088867,
5
+ "eval_runtime": 14.2129,
6
+ "eval_samples_per_second": 75.987,
7
+ "eval_steps_per_second": 9.498,
8
  "total_flos": 1.6739319895474176e+18,
9
+ "train_loss": 0.03626411052524216,
10
+ "train_runtime": 439.1764,
11
+ "train_samples_per_second": 49.183,
12
+ "train_steps_per_second": 1.025
13
  }
eval_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "epoch": 5.0,
3
- "eval_accuracy": 0.9092592592592592,
4
- "eval_loss": 0.5364564061164856,
5
- "eval_runtime": 14.0587,
6
- "eval_samples_per_second": 76.821,
7
- "eval_steps_per_second": 9.603
8
  }
 
1
  {
2
  "epoch": 5.0,
3
+ "eval_accuracy": 0.9037037037037037,
4
+ "eval_loss": 0.5827512741088867,
5
+ "eval_runtime": 14.2129,
6
+ "eval_samples_per_second": 75.987,
7
+ "eval_steps_per_second": 9.498
8
  }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:195c22084a8777f5cc7b698a55f748752ec59dfc255766ae2fbf09e121c968ee
3
  size 343245508
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2061c4261f2dbaf11d6238026d3e90b914cad651135f214ab41c9dcb20a25cf7
3
  size 343245508
runs/Dec15_02-37-22_d2a76fcee09b/events.out.tfevents.1702608328.d2a76fcee09b.2614.10 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e0b8ca25e06e467b6a17c50b068e3df07c995bdd121564f85540ae4bdf66ef11
3
+ size 411
runs/Dec15_02-45-48_d2a76fcee09b/events.out.tfevents.1702608352.d2a76fcee09b.2614.11 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:09d153778373c39ffb8242354ad510727e5330179e2eb370825621316f41cb93
3
+ size 13189
train_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "epoch": 5.0,
3
  "total_flos": 1.6739319895474176e+18,
4
- "train_loss": 0.02156418908904824,
5
- "train_runtime": 434.3833,
6
- "train_samples_per_second": 49.726,
7
- "train_steps_per_second": 1.036
8
  }
 
1
  {
2
  "epoch": 5.0,
3
  "total_flos": 1.6739319895474176e+18,
4
+ "train_loss": 0.03626411052524216,
5
+ "train_runtime": 439.1764,
6
+ "train_samples_per_second": 49.183,
7
+ "train_steps_per_second": 1.025
8
  }
trainer_state.json CHANGED
@@ -10,318 +10,318 @@
10
  "log_history": [
11
  {
12
  "epoch": 0.11,
13
- "learning_rate": 0.00019555555555555556,
14
- "loss": 0.0015,
15
  "step": 10
16
  },
17
  {
18
  "epoch": 0.22,
19
- "learning_rate": 0.00019111111111111114,
20
- "loss": 0.0402,
21
  "step": 20
22
  },
23
  {
24
  "epoch": 0.33,
25
- "learning_rate": 0.0001866666666666667,
26
- "loss": 0.0814,
27
  "step": 30
28
  },
29
  {
30
  "epoch": 0.44,
31
- "learning_rate": 0.00018222222222222224,
32
- "loss": 0.0893,
33
  "step": 40
34
  },
35
  {
36
  "epoch": 0.56,
37
- "learning_rate": 0.00017777777777777779,
38
- "loss": 0.1203,
39
  "step": 50
40
  },
41
  {
42
  "epoch": 0.67,
43
- "learning_rate": 0.00017333333333333334,
44
- "loss": 0.0505,
45
  "step": 60
46
  },
47
  {
48
  "epoch": 0.78,
49
- "learning_rate": 0.00016888888888888889,
50
- "loss": 0.0448,
51
  "step": 70
52
  },
53
  {
54
  "epoch": 0.89,
55
- "learning_rate": 0.00016444444444444444,
56
- "loss": 0.073,
57
  "step": 80
58
  },
59
  {
60
  "epoch": 1.0,
61
- "learning_rate": 0.00016,
62
- "loss": 0.0954,
63
  "step": 90
64
  },
65
  {
66
  "epoch": 1.11,
67
- "learning_rate": 0.00015555555555555556,
68
- "loss": 0.0273,
69
  "step": 100
70
  },
71
  {
72
  "epoch": 1.11,
73
- "eval_accuracy": 0.8740740740740741,
74
- "eval_loss": 0.7636905312538147,
75
- "eval_runtime": 12.8677,
76
- "eval_samples_per_second": 83.931,
77
- "eval_steps_per_second": 10.491,
78
  "step": 100
79
  },
80
  {
81
  "epoch": 1.22,
82
- "learning_rate": 0.0001511111111111111,
83
- "loss": 0.0246,
84
  "step": 110
85
  },
86
  {
87
  "epoch": 1.33,
88
- "learning_rate": 0.00014666666666666666,
89
  "loss": 0.0223,
90
  "step": 120
91
  },
92
  {
93
  "epoch": 1.44,
94
- "learning_rate": 0.00014222222222222224,
95
- "loss": 0.0129,
96
  "step": 130
97
  },
98
  {
99
  "epoch": 1.56,
100
- "learning_rate": 0.0001377777777777778,
101
- "loss": 0.0401,
102
  "step": 140
103
  },
104
  {
105
  "epoch": 1.67,
106
- "learning_rate": 0.00013333333333333334,
107
- "loss": 0.0285,
108
  "step": 150
109
  },
110
  {
111
  "epoch": 1.78,
112
- "learning_rate": 0.00012888888888888892,
113
- "loss": 0.0261,
114
  "step": 160
115
  },
116
  {
117
  "epoch": 1.89,
118
- "learning_rate": 0.00012444444444444444,
119
- "loss": 0.0456,
120
  "step": 170
121
  },
122
  {
123
  "epoch": 2.0,
124
- "learning_rate": 0.00012,
125
- "loss": 0.0198,
126
  "step": 180
127
  },
128
  {
129
  "epoch": 2.11,
130
- "learning_rate": 0.00011555555555555555,
131
- "loss": 0.0051,
132
  "step": 190
133
  },
134
  {
135
  "epoch": 2.22,
136
- "learning_rate": 0.00011111111111111112,
137
- "loss": 0.0314,
138
  "step": 200
139
  },
140
  {
141
  "epoch": 2.22,
142
- "eval_accuracy": 0.8824074074074074,
143
- "eval_loss": 0.6027200818061829,
144
- "eval_runtime": 13.4571,
145
- "eval_samples_per_second": 80.255,
146
- "eval_steps_per_second": 10.032,
147
  "step": 200
148
  },
149
  {
150
  "epoch": 2.33,
151
- "learning_rate": 0.00010666666666666667,
152
- "loss": 0.0061,
153
  "step": 210
154
  },
155
  {
156
  "epoch": 2.44,
157
- "learning_rate": 0.00010222222222222222,
158
- "loss": 0.0103,
159
  "step": 220
160
  },
161
  {
162
  "epoch": 2.56,
163
- "learning_rate": 9.777777777777778e-05,
164
- "loss": 0.0119,
165
  "step": 230
166
  },
167
  {
168
  "epoch": 2.67,
169
- "learning_rate": 9.333333333333334e-05,
170
- "loss": 0.0121,
171
  "step": 240
172
  },
173
  {
174
  "epoch": 2.78,
175
- "learning_rate": 8.888888888888889e-05,
176
- "loss": 0.0109,
177
  "step": 250
178
  },
179
  {
180
  "epoch": 2.89,
181
- "learning_rate": 8.444444444444444e-05,
182
- "loss": 0.0017,
183
  "step": 260
184
  },
185
  {
186
  "epoch": 3.0,
187
- "learning_rate": 8e-05,
188
- "loss": 0.0043,
189
  "step": 270
190
  },
191
  {
192
  "epoch": 3.11,
193
- "learning_rate": 7.555555555555556e-05,
194
- "loss": 0.0005,
195
  "step": 280
196
  },
197
  {
198
  "epoch": 3.22,
199
- "learning_rate": 7.111111111111112e-05,
200
- "loss": 0.0004,
201
  "step": 290
202
  },
203
  {
204
  "epoch": 3.33,
205
- "learning_rate": 6.666666666666667e-05,
206
- "loss": 0.0019,
207
  "step": 300
208
  },
209
  {
210
  "epoch": 3.33,
211
- "eval_accuracy": 0.9037037037037037,
212
- "eval_loss": 0.5983412861824036,
213
- "eval_runtime": 12.7475,
214
- "eval_samples_per_second": 84.723,
215
- "eval_steps_per_second": 10.59,
216
  "step": 300
217
  },
218
  {
219
  "epoch": 3.44,
220
- "learning_rate": 6.222222222222222e-05,
221
- "loss": 0.0037,
222
  "step": 310
223
  },
224
  {
225
  "epoch": 3.56,
226
- "learning_rate": 5.7777777777777776e-05,
227
- "loss": 0.0025,
228
  "step": 320
229
  },
230
  {
231
  "epoch": 3.67,
232
- "learning_rate": 5.333333333333333e-05,
233
- "loss": 0.0026,
234
  "step": 330
235
  },
236
  {
237
  "epoch": 3.78,
238
- "learning_rate": 4.888888888888889e-05,
239
  "loss": 0.0006,
240
  "step": 340
241
  },
242
  {
243
  "epoch": 3.89,
244
- "learning_rate": 4.4444444444444447e-05,
245
- "loss": 0.0053,
246
  "step": 350
247
  },
248
  {
249
  "epoch": 4.0,
250
- "learning_rate": 4e-05,
251
- "loss": 0.0006,
252
  "step": 360
253
  },
254
  {
255
  "epoch": 4.11,
256
- "learning_rate": 3.555555555555556e-05,
257
- "loss": 0.0003,
258
  "step": 370
259
  },
260
  {
261
  "epoch": 4.22,
262
- "learning_rate": 3.111111111111111e-05,
263
- "loss": 0.0019,
264
  "step": 380
265
  },
266
  {
267
  "epoch": 4.33,
268
- "learning_rate": 2.6666666666666667e-05,
269
- "loss": 0.0004,
270
  "step": 390
271
  },
272
  {
273
  "epoch": 4.44,
274
- "learning_rate": 2.2222222222222223e-05,
275
- "loss": 0.0019,
276
  "step": 400
277
  },
278
  {
279
  "epoch": 4.44,
280
- "eval_accuracy": 0.9092592592592592,
281
- "eval_loss": 0.5358805060386658,
282
- "eval_runtime": 12.6258,
283
- "eval_samples_per_second": 85.539,
284
- "eval_steps_per_second": 10.692,
285
  "step": 400
286
  },
287
  {
288
  "epoch": 4.56,
289
- "learning_rate": 1.777777777777778e-05,
290
- "loss": 0.0003,
291
  "step": 410
292
  },
293
  {
294
  "epoch": 4.67,
295
- "learning_rate": 1.3333333333333333e-05,
296
- "loss": 0.002,
297
  "step": 420
298
  },
299
  {
300
  "epoch": 4.78,
301
- "learning_rate": 8.88888888888889e-06,
302
- "loss": 0.0046,
303
  "step": 430
304
  },
305
  {
306
  "epoch": 4.89,
307
- "learning_rate": 4.444444444444445e-06,
308
- "loss": 0.0032,
309
  "step": 440
310
  },
311
  {
312
  "epoch": 5.0,
313
  "learning_rate": 0.0,
314
- "loss": 0.0003,
315
  "step": 450
316
  },
317
  {
318
  "epoch": 5.0,
319
  "step": 450,
320
  "total_flos": 1.6739319895474176e+18,
321
- "train_loss": 0.02156418908904824,
322
- "train_runtime": 434.3833,
323
- "train_samples_per_second": 49.726,
324
- "train_steps_per_second": 1.036
325
  }
326
  ],
327
  "logging_steps": 10,
 
10
  "log_history": [
11
  {
12
  "epoch": 0.11,
13
+ "learning_rate": 0.00029333333333333327,
14
+ "loss": 0.0774,
15
  "step": 10
16
  },
17
  {
18
  "epoch": 0.22,
19
+ "learning_rate": 0.0002866666666666667,
20
+ "loss": 0.1816,
21
  "step": 20
22
  },
23
  {
24
  "epoch": 0.33,
25
+ "learning_rate": 0.00028,
26
+ "loss": 0.0859,
27
  "step": 30
28
  },
29
  {
30
  "epoch": 0.44,
31
+ "learning_rate": 0.00027333333333333333,
32
+ "loss": 0.1125,
33
  "step": 40
34
  },
35
  {
36
  "epoch": 0.56,
37
+ "learning_rate": 0.0002666666666666666,
38
+ "loss": 0.0928,
39
  "step": 50
40
  },
41
  {
42
  "epoch": 0.67,
43
+ "learning_rate": 0.00026,
44
+ "loss": 0.0845,
45
  "step": 60
46
  },
47
  {
48
  "epoch": 0.78,
49
+ "learning_rate": 0.00025333333333333333,
50
+ "loss": 0.0974,
51
  "step": 70
52
  },
53
  {
54
  "epoch": 0.89,
55
+ "learning_rate": 0.0002466666666666666,
56
+ "loss": 0.1446,
57
  "step": 80
58
  },
59
  {
60
  "epoch": 1.0,
61
+ "learning_rate": 0.00023999999999999998,
62
+ "loss": 0.1117,
63
  "step": 90
64
  },
65
  {
66
  "epoch": 1.11,
67
+ "learning_rate": 0.0002333333333333333,
68
+ "loss": 0.128,
69
  "step": 100
70
  },
71
  {
72
  "epoch": 1.11,
73
+ "eval_accuracy": 0.8685185185185185,
74
+ "eval_loss": 0.7717533707618713,
75
+ "eval_runtime": 16.0389,
76
+ "eval_samples_per_second": 67.336,
77
+ "eval_steps_per_second": 8.417,
78
  "step": 100
79
  },
80
  {
81
  "epoch": 1.22,
82
+ "learning_rate": 0.00022666666666666663,
83
+ "loss": 0.0305,
84
  "step": 110
85
  },
86
  {
87
  "epoch": 1.33,
88
+ "learning_rate": 0.00021999999999999995,
89
  "loss": 0.0223,
90
  "step": 120
91
  },
92
  {
93
  "epoch": 1.44,
94
+ "learning_rate": 0.00021333333333333333,
95
+ "loss": 0.0938,
96
  "step": 130
97
  },
98
  {
99
  "epoch": 1.56,
100
+ "learning_rate": 0.00020666666666666666,
101
+ "loss": 0.0403,
102
  "step": 140
103
  },
104
  {
105
  "epoch": 1.67,
106
+ "learning_rate": 0.00019999999999999998,
107
+ "loss": 0.0536,
108
  "step": 150
109
  },
110
  {
111
  "epoch": 1.78,
112
+ "learning_rate": 0.00019333333333333333,
113
+ "loss": 0.0223,
114
  "step": 160
115
  },
116
  {
117
  "epoch": 1.89,
118
+ "learning_rate": 0.00018666666666666666,
119
+ "loss": 0.0588,
120
  "step": 170
121
  },
122
  {
123
  "epoch": 2.0,
124
+ "learning_rate": 0.00017999999999999998,
125
+ "loss": 0.0055,
126
  "step": 180
127
  },
128
  {
129
  "epoch": 2.11,
130
+ "learning_rate": 0.0001733333333333333,
131
+ "loss": 0.0251,
132
  "step": 190
133
  },
134
  {
135
  "epoch": 2.22,
136
+ "learning_rate": 0.00016666666666666666,
137
+ "loss": 0.0236,
138
  "step": 200
139
  },
140
  {
141
  "epoch": 2.22,
142
+ "eval_accuracy": 0.8851851851851852,
143
+ "eval_loss": 0.6526010632514954,
144
+ "eval_runtime": 12.8913,
145
+ "eval_samples_per_second": 83.777,
146
+ "eval_steps_per_second": 10.472,
147
  "step": 200
148
  },
149
  {
150
  "epoch": 2.33,
151
+ "learning_rate": 0.00015999999999999999,
152
+ "loss": 0.0328,
153
  "step": 210
154
  },
155
  {
156
  "epoch": 2.44,
157
+ "learning_rate": 0.0001533333333333333,
158
+ "loss": 0.0177,
159
  "step": 220
160
  },
161
  {
162
  "epoch": 2.56,
163
+ "learning_rate": 0.00014666666666666664,
164
+ "loss": 0.002,
165
  "step": 230
166
  },
167
  {
168
  "epoch": 2.67,
169
+ "learning_rate": 0.00014,
170
+ "loss": 0.0109,
171
  "step": 240
172
  },
173
  {
174
  "epoch": 2.78,
175
+ "learning_rate": 0.0001333333333333333,
176
+ "loss": 0.0237,
177
  "step": 250
178
  },
179
  {
180
  "epoch": 2.89,
181
+ "learning_rate": 0.00012666666666666666,
182
+ "loss": 0.0021,
183
  "step": 260
184
  },
185
  {
186
  "epoch": 3.0,
187
+ "learning_rate": 0.00011999999999999999,
188
+ "loss": 0.0024,
189
  "step": 270
190
  },
191
  {
192
  "epoch": 3.11,
193
+ "learning_rate": 0.00011333333333333331,
194
+ "loss": 0.007,
195
  "step": 280
196
  },
197
  {
198
  "epoch": 3.22,
199
+ "learning_rate": 0.00010666666666666667,
200
+ "loss": 0.0008,
201
  "step": 290
202
  },
203
  {
204
  "epoch": 3.33,
205
+ "learning_rate": 9.999999999999999e-05,
206
+ "loss": 0.004,
207
  "step": 300
208
  },
209
  {
210
  "epoch": 3.33,
211
+ "eval_accuracy": 0.9009259259259259,
212
+ "eval_loss": 0.5478248596191406,
213
+ "eval_runtime": 13.1334,
214
+ "eval_samples_per_second": 82.233,
215
+ "eval_steps_per_second": 10.279,
216
  "step": 300
217
  },
218
  {
219
  "epoch": 3.44,
220
+ "learning_rate": 9.333333333333333e-05,
221
+ "loss": 0.0041,
222
  "step": 310
223
  },
224
  {
225
  "epoch": 3.56,
226
+ "learning_rate": 8.666666666666665e-05,
227
+ "loss": 0.0026,
228
  "step": 320
229
  },
230
  {
231
  "epoch": 3.67,
232
+ "learning_rate": 7.999999999999999e-05,
233
+ "loss": 0.0035,
234
  "step": 330
235
  },
236
  {
237
  "epoch": 3.78,
238
+ "learning_rate": 7.333333333333332e-05,
239
  "loss": 0.0006,
240
  "step": 340
241
  },
242
  {
243
  "epoch": 3.89,
244
+ "learning_rate": 6.666666666666666e-05,
245
+ "loss": 0.0087,
246
  "step": 350
247
  },
248
  {
249
  "epoch": 4.0,
250
+ "learning_rate": 5.9999999999999995e-05,
251
+ "loss": 0.0004,
252
  "step": 360
253
  },
254
  {
255
  "epoch": 4.11,
256
+ "learning_rate": 5.333333333333333e-05,
257
+ "loss": 0.0004,
258
  "step": 370
259
  },
260
  {
261
  "epoch": 4.22,
262
+ "learning_rate": 4.6666666666666665e-05,
263
+ "loss": 0.0017,
264
  "step": 380
265
  },
266
  {
267
  "epoch": 4.33,
268
+ "learning_rate": 3.9999999999999996e-05,
269
+ "loss": 0.0005,
270
  "step": 390
271
  },
272
  {
273
  "epoch": 4.44,
274
+ "learning_rate": 3.333333333333333e-05,
275
+ "loss": 0.0024,
276
  "step": 400
277
  },
278
  {
279
  "epoch": 4.44,
280
+ "eval_accuracy": 0.9009259259259259,
281
+ "eval_loss": 0.5925586819648743,
282
+ "eval_runtime": 12.8084,
283
+ "eval_samples_per_second": 84.32,
284
+ "eval_steps_per_second": 10.54,
285
  "step": 400
286
  },
287
  {
288
  "epoch": 4.56,
289
+ "learning_rate": 2.6666666666666667e-05,
290
+ "loss": 0.0004,
291
  "step": 410
292
  },
293
  {
294
  "epoch": 4.67,
295
+ "learning_rate": 1.9999999999999998e-05,
296
+ "loss": 0.0019,
297
  "step": 420
298
  },
299
  {
300
  "epoch": 4.78,
301
+ "learning_rate": 1.3333333333333333e-05,
302
+ "loss": 0.005,
303
  "step": 430
304
  },
305
  {
306
  "epoch": 4.89,
307
+ "learning_rate": 6.666666666666667e-06,
308
+ "loss": 0.0035,
309
  "step": 440
310
  },
311
  {
312
  "epoch": 5.0,
313
  "learning_rate": 0.0,
314
+ "loss": 0.0004,
315
  "step": 450
316
  },
317
  {
318
  "epoch": 5.0,
319
  "step": 450,
320
  "total_flos": 1.6739319895474176e+18,
321
+ "train_loss": 0.03626411052524216,
322
+ "train_runtime": 439.1764,
323
+ "train_samples_per_second": 49.183,
324
+ "train_steps_per_second": 1.025
325
  }
326
  ],
327
  "logging_steps": 10,
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ff0144e19c345d1cafd5108586fa9401bae1c146468e7f549fa127ec77535ac0
3
  size 4536
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:86febfc51531354521c419f30c7abba7552245c1616e4e8eaaeaeca09b5c770e
3
  size 4536