sdgroeve commited on
Commit
b030729
1 Parent(s): 545747d

Training in progress, epoch 1

Browse files
README.md CHANGED
@@ -4,7 +4,7 @@ base_model: microsoft/swin-tiny-patch4-window7-224
4
  tags:
5
  - generated_from_trainer
6
  datasets:
7
- - cifar10
8
  metrics:
9
  - accuracy
10
  model-index:
@@ -14,15 +14,15 @@ model-index:
14
  name: Image Classification
15
  type: image-classification
16
  dataset:
17
- name: cifar10
18
- type: cifar10
19
- config: plain_text
20
  split: train
21
- args: plain_text
22
  metrics:
23
  - name: Accuracy
24
  type: accuracy
25
- value: 0.973
26
  ---
27
 
28
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
@@ -30,10 +30,10 @@ should probably proofread and complete it, then remove this comment. -->
30
 
31
  # swin-tiny-patch4-window7-224-finetuned-eurosat
32
 
33
- This model is a fine-tuned version of [microsoft/swin-tiny-patch4-window7-224](https://huggingface.co/microsoft/swin-tiny-patch4-window7-224) on the cifar10 dataset.
34
  It achieves the following results on the evaluation set:
35
- - Loss: 0.0799
36
- - Accuracy: 0.973
37
 
38
  ## Model description
39
 
@@ -67,14 +67,14 @@ The following hyperparameters were used during training:
67
 
68
  | Training Loss | Epoch | Step | Validation Loss | Accuracy |
69
  |:-------------:|:-----:|:----:|:---------------:|:--------:|
70
- | 0.5238 | 1.0 | 351 | 0.1417 | 0.9512 |
71
- | 0.3708 | 2.0 | 703 | 0.0975 | 0.9688 |
72
- | 0.3201 | 2.99 | 1053 | 0.0799 | 0.973 |
73
 
74
 
75
  ### Framework versions
76
 
77
- - Transformers 4.33.0
78
- - Pytorch 2.0.0
79
- - Datasets 2.1.0
80
  - Tokenizers 0.13.3
 
4
  tags:
5
  - generated_from_trainer
6
  datasets:
7
+ - imagefolder
8
  metrics:
9
  - accuracy
10
  model-index:
 
14
  name: Image Classification
15
  type: image-classification
16
  dataset:
17
+ name: imagefolder
18
+ type: imagefolder
19
+ config: default
20
  split: train
21
+ args: default
22
  metrics:
23
  - name: Accuracy
24
  type: accuracy
25
+ value: 0.9803703703703703
26
  ---
27
 
28
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
 
30
 
31
  # swin-tiny-patch4-window7-224-finetuned-eurosat
32
 
33
+ This model is a fine-tuned version of [microsoft/swin-tiny-patch4-window7-224](https://huggingface.co/microsoft/swin-tiny-patch4-window7-224) on the imagefolder dataset.
34
  It achieves the following results on the evaluation set:
35
+ - Loss: 0.0592
36
+ - Accuracy: 0.9804
37
 
38
  ## Model description
39
 
 
67
 
68
  | Training Loss | Epoch | Step | Validation Loss | Accuracy |
69
  |:-------------:|:-----:|:----:|:---------------:|:--------:|
70
+ | 0.2484 | 1.0 | 190 | 0.1036 | 0.9685 |
71
+ | 0.219 | 2.0 | 380 | 0.0825 | 0.9730 |
72
+ | 0.121 | 3.0 | 570 | 0.0592 | 0.9804 |
73
 
74
 
75
  ### Framework versions
76
 
77
+ - Transformers 4.33.2
78
+ - Pytorch 2.0.1+cu117
79
+ - Datasets 2.14.5
80
  - Tokenizers 0.13.3
all_results.json CHANGED
@@ -1,13 +1,13 @@
1
  {
2
- "epoch": 2.99,
3
- "eval_accuracy": 0.973,
4
- "eval_loss": 0.079922154545784,
5
- "eval_runtime": 20.1587,
6
- "eval_samples_per_second": 248.032,
7
- "eval_steps_per_second": 7.788,
8
- "total_flos": 3.3497451642252165e+18,
9
- "train_loss": 0.5413832122217669,
10
- "train_runtime": 1364.9822,
11
- "train_samples_per_second": 98.902,
12
- "train_steps_per_second": 0.771
13
  }
 
1
  {
2
+ "epoch": 3.0,
3
+ "eval_accuracy": 0.9803703703703703,
4
+ "eval_loss": 0.059159792959690094,
5
+ "eval_runtime": 4.8853,
6
+ "eval_samples_per_second": 552.68,
7
+ "eval_steps_per_second": 17.399,
8
+ "total_flos": 1.8124066505760768e+18,
9
+ "train_loss": 0.32973566536317794,
10
+ "train_runtime": 250.3967,
11
+ "train_samples_per_second": 291.138,
12
+ "train_steps_per_second": 2.276
13
  }
config.json CHANGED
@@ -17,30 +17,30 @@
17
  "hidden_dropout_prob": 0.0,
18
  "hidden_size": 768,
19
  "id2label": {
20
- "0": "airplane",
21
- "1": "automobile",
22
- "2": "bird",
23
- "3": "cat",
24
- "4": "deer",
25
- "5": "dog",
26
- "6": "frog",
27
- "7": "horse",
28
- "8": "ship",
29
- "9": "truck"
30
  },
31
  "image_size": 224,
32
  "initializer_range": 0.02,
33
  "label2id": {
34
- "airplane": 0,
35
- "automobile": 1,
36
- "bird": 2,
37
- "cat": 3,
38
- "deer": 4,
39
- "dog": 5,
40
- "frog": 6,
41
- "horse": 7,
42
- "ship": 8,
43
- "truck": 9
44
  },
45
  "layer_norm_eps": 1e-05,
46
  "mlp_ratio": 4.0,
@@ -71,7 +71,7 @@
71
  "stage4"
72
  ],
73
  "torch_dtype": "float32",
74
- "transformers_version": "4.33.0",
75
  "use_absolute_embeddings": false,
76
  "window_size": 7
77
  }
 
17
  "hidden_dropout_prob": 0.0,
18
  "hidden_size": 768,
19
  "id2label": {
20
+ "0": "AnnualCrop",
21
+ "1": "Forest",
22
+ "2": "HerbaceousVegetation",
23
+ "3": "Highway",
24
+ "4": "Industrial",
25
+ "5": "Pasture",
26
+ "6": "PermanentCrop",
27
+ "7": "Residential",
28
+ "8": "River",
29
+ "9": "SeaLake"
30
  },
31
  "image_size": 224,
32
  "initializer_range": 0.02,
33
  "label2id": {
34
+ "AnnualCrop": 0,
35
+ "Forest": 1,
36
+ "HerbaceousVegetation": 2,
37
+ "Highway": 3,
38
+ "Industrial": 4,
39
+ "Pasture": 5,
40
+ "PermanentCrop": 6,
41
+ "Residential": 7,
42
+ "River": 8,
43
+ "SeaLake": 9
44
  },
45
  "layer_norm_eps": 1e-05,
46
  "mlp_ratio": 4.0,
 
71
  "stage4"
72
  ],
73
  "torch_dtype": "float32",
74
+ "transformers_version": "4.33.2",
75
  "use_absolute_embeddings": false,
76
  "window_size": 7
77
  }
eval_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
- "epoch": 2.99,
3
- "eval_accuracy": 0.973,
4
- "eval_loss": 0.079922154545784,
5
- "eval_runtime": 20.1587,
6
- "eval_samples_per_second": 248.032,
7
- "eval_steps_per_second": 7.788
8
  }
 
1
  {
2
+ "epoch": 3.0,
3
+ "eval_accuracy": 0.9803703703703703,
4
+ "eval_loss": 0.059159792959690094,
5
+ "eval_runtime": 4.8853,
6
+ "eval_samples_per_second": 552.68,
7
+ "eval_steps_per_second": 17.399
8
  }
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:58fdfac218c785be2841977efb5b9e0f6eaedb03a10d45f1bd617f9360b54400
3
  size 110419441
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4b3f83e44dcabca5ca5389693ba84eb94e0cc0c17db2be57c34ba08b9afb98ce
3
  size 110419441
train_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
- "epoch": 2.99,
3
- "total_flos": 3.3497451642252165e+18,
4
- "train_loss": 0.5413832122217669,
5
- "train_runtime": 1364.9822,
6
- "train_samples_per_second": 98.902,
7
- "train_steps_per_second": 0.771
8
  }
 
1
  {
2
+ "epoch": 3.0,
3
+ "total_flos": 1.8124066505760768e+18,
4
+ "train_loss": 0.32973566536317794,
5
+ "train_runtime": 250.3967,
6
+ "train_samples_per_second": 291.138,
7
+ "train_steps_per_second": 2.276
8
  }
trainer_state.json CHANGED
@@ -1,685 +1,397 @@
1
  {
2
- "best_metric": 0.973,
3
- "best_model_checkpoint": "swin-tiny-patch4-window7-224-finetuned-eurosat/checkpoint-1053",
4
- "epoch": 2.9936034115138592,
5
  "eval_steps": 500,
6
- "global_step": 1053,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
- "epoch": 0.03,
13
- "learning_rate": 4.716981132075472e-06,
14
- "loss": 2.3037,
15
  "step": 10
16
  },
17
  {
18
- "epoch": 0.06,
19
- "learning_rate": 9.433962264150944e-06,
20
- "loss": 2.2484,
21
  "step": 20
22
  },
23
  {
24
- "epoch": 0.09,
25
- "learning_rate": 1.4150943396226415e-05,
26
- "loss": 2.109,
27
  "step": 30
28
  },
29
  {
30
- "epoch": 0.11,
31
- "learning_rate": 1.8867924528301888e-05,
32
- "loss": 1.8715,
33
  "step": 40
34
  },
35
  {
36
- "epoch": 0.14,
37
- "learning_rate": 2.358490566037736e-05,
38
- "loss": 1.5984,
39
  "step": 50
40
  },
41
  {
42
- "epoch": 0.17,
43
- "learning_rate": 2.830188679245283e-05,
44
- "loss": 1.323,
45
  "step": 60
46
  },
47
  {
48
- "epoch": 0.2,
49
- "learning_rate": 3.30188679245283e-05,
50
- "loss": 1.1205,
51
  "step": 70
52
  },
53
  {
54
- "epoch": 0.23,
55
- "learning_rate": 3.7735849056603776e-05,
56
- "loss": 0.9739,
57
  "step": 80
58
  },
59
  {
60
- "epoch": 0.26,
61
- "learning_rate": 4.245283018867925e-05,
62
- "loss": 0.9502,
63
  "step": 90
64
  },
65
  {
66
- "epoch": 0.28,
67
- "learning_rate": 4.716981132075472e-05,
68
- "loss": 0.8486,
69
  "step": 100
70
  },
71
  {
72
- "epoch": 0.31,
73
- "learning_rate": 4.978880675818374e-05,
74
- "loss": 0.7495,
75
  "step": 110
76
  },
77
  {
78
- "epoch": 0.34,
79
- "learning_rate": 4.9260823653643085e-05,
80
- "loss": 0.7036,
81
  "step": 120
82
  },
83
  {
84
- "epoch": 0.37,
85
- "learning_rate": 4.8732840549102435e-05,
86
- "loss": 0.6417,
87
  "step": 130
88
  },
89
  {
90
- "epoch": 0.4,
91
- "learning_rate": 4.820485744456177e-05,
92
- "loss": 0.6558,
93
  "step": 140
94
  },
95
  {
96
- "epoch": 0.43,
97
- "learning_rate": 4.767687434002112e-05,
98
- "loss": 0.5976,
99
  "step": 150
100
  },
101
  {
102
- "epoch": 0.45,
103
- "learning_rate": 4.7148891235480466e-05,
104
- "loss": 0.6296,
105
  "step": 160
106
  },
107
  {
108
- "epoch": 0.48,
109
- "learning_rate": 4.662090813093981e-05,
110
- "loss": 0.6067,
111
  "step": 170
112
  },
113
  {
114
- "epoch": 0.51,
115
- "learning_rate": 4.609292502639916e-05,
116
- "loss": 0.5513,
117
  "step": 180
118
  },
119
  {
120
- "epoch": 0.54,
121
- "learning_rate": 4.55649419218585e-05,
122
- "loss": 0.5577,
123
  "step": 190
124
  },
125
  {
126
- "epoch": 0.57,
127
- "learning_rate": 4.503695881731785e-05,
128
- "loss": 0.5611,
 
 
 
 
 
 
 
 
 
129
  "step": 200
130
  },
131
  {
132
- "epoch": 0.6,
133
- "learning_rate": 4.45089757127772e-05,
134
- "loss": 0.5693,
135
  "step": 210
136
  },
137
  {
138
- "epoch": 0.63,
139
- "learning_rate": 4.398099260823654e-05,
140
- "loss": 0.5354,
141
  "step": 220
142
  },
143
  {
144
- "epoch": 0.65,
145
- "learning_rate": 4.3453009503695884e-05,
146
- "loss": 0.5329,
147
  "step": 230
148
  },
149
  {
150
- "epoch": 0.68,
151
- "learning_rate": 4.292502639915523e-05,
152
- "loss": 0.544,
153
  "step": 240
154
  },
155
  {
156
- "epoch": 0.71,
157
- "learning_rate": 4.239704329461457e-05,
158
- "loss": 0.5865,
159
  "step": 250
160
  },
161
  {
162
- "epoch": 0.74,
163
- "learning_rate": 4.186906019007392e-05,
164
- "loss": 0.4998,
165
  "step": 260
166
  },
167
  {
168
- "epoch": 0.77,
169
- "learning_rate": 4.1341077085533265e-05,
170
- "loss": 0.5228,
171
  "step": 270
172
  },
173
  {
174
- "epoch": 0.8,
175
- "learning_rate": 4.081309398099261e-05,
176
- "loss": 0.4614,
177
  "step": 280
178
  },
179
  {
180
- "epoch": 0.82,
181
- "learning_rate": 4.028511087645195e-05,
182
- "loss": 0.5211,
183
  "step": 290
184
  },
185
  {
186
- "epoch": 0.85,
187
- "learning_rate": 3.97571277719113e-05,
188
- "loss": 0.5343,
189
  "step": 300
190
  },
191
  {
192
- "epoch": 0.88,
193
- "learning_rate": 3.9229144667370646e-05,
194
- "loss": 0.5135,
195
  "step": 310
196
  },
197
  {
198
- "epoch": 0.91,
199
- "learning_rate": 3.870116156282999e-05,
200
- "loss": 0.4749,
201
  "step": 320
202
  },
203
  {
204
- "epoch": 0.94,
205
- "learning_rate": 3.817317845828934e-05,
206
- "loss": 0.4425,
207
  "step": 330
208
  },
209
  {
210
- "epoch": 0.97,
211
- "learning_rate": 3.764519535374868e-05,
212
- "loss": 0.512,
213
  "step": 340
214
  },
215
  {
216
- "epoch": 1.0,
217
- "learning_rate": 3.711721224920803e-05,
218
- "loss": 0.5238,
219
  "step": 350
220
  },
221
  {
222
- "epoch": 1.0,
223
- "eval_accuracy": 0.9512,
224
- "eval_loss": 0.14167343080043793,
225
- "eval_runtime": 20.0773,
226
- "eval_samples_per_second": 249.038,
227
- "eval_steps_per_second": 7.82,
228
- "step": 351
229
- },
230
- {
231
- "epoch": 1.02,
232
- "learning_rate": 3.658922914466738e-05,
233
- "loss": 0.4826,
234
  "step": 360
235
  },
236
  {
237
- "epoch": 1.05,
238
- "learning_rate": 3.6061246040126714e-05,
239
- "loss": 0.4862,
240
  "step": 370
241
  },
242
  {
243
- "epoch": 1.08,
244
- "learning_rate": 3.5533262935586064e-05,
245
- "loss": 0.4651,
246
  "step": 380
247
  },
248
  {
249
- "epoch": 1.11,
250
- "learning_rate": 3.500527983104541e-05,
251
- "loss": 0.4217,
 
 
 
 
 
 
 
 
 
252
  "step": 390
253
  },
254
  {
255
- "epoch": 1.14,
256
- "learning_rate": 3.447729672650475e-05,
257
- "loss": 0.4617,
258
  "step": 400
259
  },
260
  {
261
- "epoch": 1.17,
262
- "learning_rate": 3.3949313621964095e-05,
263
- "loss": 0.4409,
264
  "step": 410
265
  },
266
  {
267
- "epoch": 1.19,
268
- "learning_rate": 3.3421330517423445e-05,
269
- "loss": 0.4056,
270
  "step": 420
271
  },
272
  {
273
- "epoch": 1.22,
274
- "learning_rate": 3.289334741288279e-05,
275
- "loss": 0.4872,
276
  "step": 430
277
  },
278
  {
279
- "epoch": 1.25,
280
- "learning_rate": 3.236536430834213e-05,
281
- "loss": 0.4484,
282
  "step": 440
283
  },
284
  {
285
- "epoch": 1.28,
286
- "learning_rate": 3.183738120380148e-05,
287
- "loss": 0.4289,
288
  "step": 450
289
  },
290
  {
291
- "epoch": 1.31,
292
- "learning_rate": 3.130939809926082e-05,
293
- "loss": 0.4384,
294
  "step": 460
295
  },
296
  {
297
- "epoch": 1.34,
298
- "learning_rate": 3.078141499472017e-05,
299
- "loss": 0.4248,
300
  "step": 470
301
  },
302
  {
303
- "epoch": 1.36,
304
- "learning_rate": 3.0253431890179517e-05,
305
- "loss": 0.4518,
306
  "step": 480
307
  },
308
  {
309
- "epoch": 1.39,
310
- "learning_rate": 2.972544878563886e-05,
311
- "loss": 0.4412,
312
  "step": 490
313
  },
314
  {
315
- "epoch": 1.42,
316
- "learning_rate": 2.9197465681098207e-05,
317
- "loss": 0.4597,
318
  "step": 500
319
  },
320
  {
321
- "epoch": 1.45,
322
- "learning_rate": 2.8669482576557548e-05,
323
- "loss": 0.458,
324
  "step": 510
325
  },
326
  {
327
- "epoch": 1.48,
328
- "learning_rate": 2.8141499472016898e-05,
329
- "loss": 0.4359,
330
  "step": 520
331
  },
332
  {
333
- "epoch": 1.51,
334
- "learning_rate": 2.7613516367476245e-05,
335
- "loss": 0.4051,
336
  "step": 530
337
  },
338
  {
339
- "epoch": 1.54,
340
- "learning_rate": 2.7085533262935585e-05,
341
- "loss": 0.4527,
342
  "step": 540
343
  },
344
  {
345
- "epoch": 1.56,
346
- "learning_rate": 2.6557550158394935e-05,
347
- "loss": 0.4189,
348
  "step": 550
349
  },
350
  {
351
- "epoch": 1.59,
352
- "learning_rate": 2.6029567053854276e-05,
353
- "loss": 0.4014,
354
  "step": 560
355
  },
356
  {
357
- "epoch": 1.62,
358
- "learning_rate": 2.5501583949313622e-05,
359
- "loss": 0.4195,
360
  "step": 570
361
  },
362
  {
363
- "epoch": 1.65,
364
- "learning_rate": 2.497360084477297e-05,
365
- "loss": 0.4497,
366
- "step": 580
367
- },
368
- {
369
- "epoch": 1.68,
370
- "learning_rate": 2.4445617740232313e-05,
371
- "loss": 0.4156,
372
- "step": 590
373
- },
374
- {
375
- "epoch": 1.71,
376
- "learning_rate": 2.391763463569166e-05,
377
- "loss": 0.4268,
378
- "step": 600
379
- },
380
- {
381
- "epoch": 1.73,
382
- "learning_rate": 2.3389651531151003e-05,
383
- "loss": 0.436,
384
- "step": 610
385
- },
386
- {
387
- "epoch": 1.76,
388
- "learning_rate": 2.286166842661035e-05,
389
- "loss": 0.392,
390
- "step": 620
391
- },
392
- {
393
- "epoch": 1.79,
394
- "learning_rate": 2.2333685322069694e-05,
395
- "loss": 0.4059,
396
- "step": 630
397
- },
398
- {
399
- "epoch": 1.82,
400
- "learning_rate": 2.180570221752904e-05,
401
- "loss": 0.4014,
402
- "step": 640
403
- },
404
- {
405
- "epoch": 1.85,
406
- "learning_rate": 2.1277719112988384e-05,
407
- "loss": 0.4304,
408
- "step": 650
409
- },
410
- {
411
- "epoch": 1.88,
412
- "learning_rate": 2.074973600844773e-05,
413
- "loss": 0.3908,
414
- "step": 660
415
- },
416
- {
417
- "epoch": 1.9,
418
- "learning_rate": 2.0221752903907075e-05,
419
- "loss": 0.3963,
420
- "step": 670
421
- },
422
- {
423
- "epoch": 1.93,
424
- "learning_rate": 1.9693769799366422e-05,
425
- "loss": 0.3438,
426
- "step": 680
427
- },
428
- {
429
- "epoch": 1.96,
430
- "learning_rate": 1.9165786694825765e-05,
431
- "loss": 0.3941,
432
- "step": 690
433
- },
434
- {
435
- "epoch": 1.99,
436
- "learning_rate": 1.863780359028511e-05,
437
- "loss": 0.3708,
438
- "step": 700
439
- },
440
- {
441
- "epoch": 2.0,
442
- "eval_accuracy": 0.9688,
443
- "eval_loss": 0.09748782962560654,
444
- "eval_runtime": 20.0916,
445
- "eval_samples_per_second": 248.86,
446
- "eval_steps_per_second": 7.814,
447
- "step": 703
448
- },
449
- {
450
- "epoch": 2.02,
451
- "learning_rate": 1.810982048574446e-05,
452
- "loss": 0.3771,
453
- "step": 710
454
- },
455
- {
456
- "epoch": 2.05,
457
- "learning_rate": 1.7581837381203803e-05,
458
- "loss": 0.3559,
459
- "step": 720
460
- },
461
- {
462
- "epoch": 2.08,
463
- "learning_rate": 1.7053854276663146e-05,
464
- "loss": 0.379,
465
- "step": 730
466
- },
467
- {
468
- "epoch": 2.1,
469
- "learning_rate": 1.6525871172122493e-05,
470
- "loss": 0.3301,
471
- "step": 740
472
- },
473
- {
474
- "epoch": 2.13,
475
- "learning_rate": 1.5997888067581837e-05,
476
- "loss": 0.3691,
477
- "step": 750
478
- },
479
- {
480
- "epoch": 2.16,
481
- "learning_rate": 1.5469904963041184e-05,
482
- "loss": 0.3257,
483
- "step": 760
484
- },
485
- {
486
- "epoch": 2.19,
487
- "learning_rate": 1.4941921858500529e-05,
488
- "loss": 0.3673,
489
- "step": 770
490
- },
491
- {
492
- "epoch": 2.22,
493
- "learning_rate": 1.4413938753959874e-05,
494
- "loss": 0.3701,
495
- "step": 780
496
- },
497
- {
498
- "epoch": 2.25,
499
- "learning_rate": 1.388595564941922e-05,
500
- "loss": 0.3434,
501
- "step": 790
502
- },
503
- {
504
- "epoch": 2.27,
505
- "learning_rate": 1.3357972544878563e-05,
506
- "loss": 0.3811,
507
- "step": 800
508
- },
509
- {
510
- "epoch": 2.3,
511
- "learning_rate": 1.2829989440337912e-05,
512
- "loss": 0.3483,
513
- "step": 810
514
- },
515
- {
516
- "epoch": 2.33,
517
- "learning_rate": 1.2302006335797255e-05,
518
- "loss": 0.369,
519
- "step": 820
520
- },
521
- {
522
- "epoch": 2.36,
523
- "learning_rate": 1.17740232312566e-05,
524
- "loss": 0.4224,
525
- "step": 830
526
- },
527
- {
528
- "epoch": 2.39,
529
- "learning_rate": 1.1246040126715946e-05,
530
- "loss": 0.3386,
531
- "step": 840
532
- },
533
- {
534
- "epoch": 2.42,
535
- "learning_rate": 1.0718057022175291e-05,
536
- "loss": 0.3552,
537
- "step": 850
538
- },
539
- {
540
- "epoch": 2.44,
541
- "learning_rate": 1.0190073917634636e-05,
542
- "loss": 0.3818,
543
- "step": 860
544
- },
545
- {
546
- "epoch": 2.47,
547
- "learning_rate": 9.662090813093982e-06,
548
- "loss": 0.3485,
549
- "step": 870
550
- },
551
- {
552
- "epoch": 2.5,
553
- "learning_rate": 9.134107708553327e-06,
554
- "loss": 0.3713,
555
- "step": 880
556
- },
557
- {
558
- "epoch": 2.53,
559
- "learning_rate": 8.606124604012672e-06,
560
- "loss": 0.3233,
561
- "step": 890
562
- },
563
- {
564
- "epoch": 2.56,
565
- "learning_rate": 8.078141499472017e-06,
566
- "loss": 0.3931,
567
- "step": 900
568
- },
569
- {
570
- "epoch": 2.59,
571
- "learning_rate": 7.5501583949313625e-06,
572
- "loss": 0.3595,
573
- "step": 910
574
- },
575
- {
576
- "epoch": 2.62,
577
- "learning_rate": 7.022175290390708e-06,
578
- "loss": 0.368,
579
- "step": 920
580
- },
581
- {
582
- "epoch": 2.64,
583
- "learning_rate": 6.494192185850054e-06,
584
- "loss": 0.3494,
585
- "step": 930
586
- },
587
- {
588
- "epoch": 2.67,
589
- "learning_rate": 5.966209081309398e-06,
590
- "loss": 0.3388,
591
- "step": 940
592
- },
593
- {
594
- "epoch": 2.7,
595
- "learning_rate": 5.438225976768744e-06,
596
- "loss": 0.3311,
597
- "step": 950
598
- },
599
- {
600
- "epoch": 2.73,
601
- "learning_rate": 4.910242872228089e-06,
602
- "loss": 0.3335,
603
- "step": 960
604
- },
605
- {
606
- "epoch": 2.76,
607
- "learning_rate": 4.382259767687434e-06,
608
- "loss": 0.3763,
609
- "step": 970
610
- },
611
- {
612
- "epoch": 2.79,
613
- "learning_rate": 3.854276663146779e-06,
614
- "loss": 0.3199,
615
- "step": 980
616
- },
617
- {
618
- "epoch": 2.81,
619
- "learning_rate": 3.326293558606125e-06,
620
- "loss": 0.3904,
621
- "step": 990
622
- },
623
- {
624
- "epoch": 2.84,
625
- "learning_rate": 2.79831045406547e-06,
626
- "loss": 0.3893,
627
- "step": 1000
628
- },
629
- {
630
- "epoch": 2.87,
631
- "learning_rate": 2.2703273495248154e-06,
632
- "loss": 0.36,
633
- "step": 1010
634
- },
635
- {
636
- "epoch": 2.9,
637
- "learning_rate": 1.7423442449841606e-06,
638
- "loss": 0.3613,
639
- "step": 1020
640
- },
641
- {
642
- "epoch": 2.93,
643
- "learning_rate": 1.2143611404435059e-06,
644
- "loss": 0.3556,
645
- "step": 1030
646
- },
647
- {
648
- "epoch": 2.96,
649
- "learning_rate": 6.863780359028511e-07,
650
- "loss": 0.3358,
651
- "step": 1040
652
- },
653
- {
654
- "epoch": 2.99,
655
- "learning_rate": 1.5839493136219642e-07,
656
- "loss": 0.3201,
657
- "step": 1050
658
- },
659
- {
660
- "epoch": 2.99,
661
- "eval_accuracy": 0.973,
662
- "eval_loss": 0.079922154545784,
663
- "eval_runtime": 20.1022,
664
- "eval_samples_per_second": 248.73,
665
- "eval_steps_per_second": 7.81,
666
- "step": 1053
667
  },
668
  {
669
- "epoch": 2.99,
670
- "step": 1053,
671
- "total_flos": 3.3497451642252165e+18,
672
- "train_loss": 0.5413832122217669,
673
- "train_runtime": 1364.9822,
674
- "train_samples_per_second": 98.902,
675
- "train_steps_per_second": 0.771
676
  }
677
  ],
678
  "logging_steps": 10,
679
- "max_steps": 1053,
680
  "num_train_epochs": 3,
681
  "save_steps": 500,
682
- "total_flos": 3.3497451642252165e+18,
683
  "trial_name": null,
684
  "trial_params": null
685
  }
 
1
  {
2
+ "best_metric": 0.9803703703703703,
3
+ "best_model_checkpoint": "swin-tiny-patch4-window7-224-finetuned-eurosat/checkpoint-570",
4
+ "epoch": 3.0,
5
  "eval_steps": 500,
6
+ "global_step": 570,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
+ "epoch": 0.05,
13
+ "learning_rate": 8.771929824561403e-06,
14
+ "loss": 2.2734,
15
  "step": 10
16
  },
17
  {
18
+ "epoch": 0.11,
19
+ "learning_rate": 1.7543859649122806e-05,
20
+ "loss": 2.0923,
21
  "step": 20
22
  },
23
  {
24
+ "epoch": 0.16,
25
+ "learning_rate": 2.6315789473684212e-05,
26
+ "loss": 1.6401,
27
  "step": 30
28
  },
29
  {
30
+ "epoch": 0.21,
31
+ "learning_rate": 3.508771929824561e-05,
32
+ "loss": 0.9479,
33
  "step": 40
34
  },
35
  {
36
+ "epoch": 0.26,
37
+ "learning_rate": 4.3859649122807014e-05,
38
+ "loss": 0.5664,
39
  "step": 50
40
  },
41
  {
42
+ "epoch": 0.32,
43
+ "learning_rate": 4.970760233918128e-05,
44
+ "loss": 0.476,
45
  "step": 60
46
  },
47
  {
48
+ "epoch": 0.37,
49
+ "learning_rate": 4.8732943469785574e-05,
50
+ "loss": 0.4555,
51
  "step": 70
52
  },
53
  {
54
+ "epoch": 0.42,
55
+ "learning_rate": 4.7758284600389865e-05,
56
+ "loss": 0.3879,
57
  "step": 80
58
  },
59
  {
60
+ "epoch": 0.47,
61
+ "learning_rate": 4.678362573099415e-05,
62
+ "loss": 0.3706,
63
  "step": 90
64
  },
65
  {
66
+ "epoch": 0.53,
67
+ "learning_rate": 4.580896686159844e-05,
68
+ "loss": 0.394,
69
  "step": 100
70
  },
71
  {
72
+ "epoch": 0.58,
73
+ "learning_rate": 4.483430799220273e-05,
74
+ "loss": 0.344,
75
  "step": 110
76
  },
77
  {
78
+ "epoch": 0.63,
79
+ "learning_rate": 4.3859649122807014e-05,
80
+ "loss": 0.2958,
81
  "step": 120
82
  },
83
  {
84
+ "epoch": 0.68,
85
+ "learning_rate": 4.2884990253411305e-05,
86
+ "loss": 0.2769,
87
  "step": 130
88
  },
89
  {
90
+ "epoch": 0.74,
91
+ "learning_rate": 4.1910331384015596e-05,
92
+ "loss": 0.3316,
93
  "step": 140
94
  },
95
  {
96
+ "epoch": 0.79,
97
+ "learning_rate": 4.093567251461988e-05,
98
+ "loss": 0.2917,
99
  "step": 150
100
  },
101
  {
102
+ "epoch": 0.84,
103
+ "learning_rate": 3.996101364522417e-05,
104
+ "loss": 0.2905,
105
  "step": 160
106
  },
107
  {
108
+ "epoch": 0.89,
109
+ "learning_rate": 3.898635477582846e-05,
110
+ "loss": 0.2667,
111
  "step": 170
112
  },
113
  {
114
+ "epoch": 0.95,
115
+ "learning_rate": 3.8011695906432746e-05,
116
+ "loss": 0.2338,
117
  "step": 180
118
  },
119
  {
120
+ "epoch": 1.0,
121
+ "learning_rate": 3.7037037037037037e-05,
122
+ "loss": 0.2484,
123
  "step": 190
124
  },
125
  {
126
+ "epoch": 1.0,
127
+ "eval_accuracy": 0.9685185185185186,
128
+ "eval_loss": 0.1036255806684494,
129
+ "eval_runtime": 5.1297,
130
+ "eval_samples_per_second": 526.346,
131
+ "eval_steps_per_second": 16.57,
132
+ "step": 190
133
+ },
134
+ {
135
+ "epoch": 1.05,
136
+ "learning_rate": 3.606237816764133e-05,
137
+ "loss": 0.2245,
138
  "step": 200
139
  },
140
  {
141
+ "epoch": 1.11,
142
+ "learning_rate": 3.508771929824561e-05,
143
+ "loss": 0.2082,
144
  "step": 210
145
  },
146
  {
147
+ "epoch": 1.16,
148
+ "learning_rate": 3.41130604288499e-05,
149
+ "loss": 0.2787,
150
  "step": 220
151
  },
152
  {
153
+ "epoch": 1.21,
154
+ "learning_rate": 3.313840155945419e-05,
155
+ "loss": 0.2265,
156
  "step": 230
157
  },
158
  {
159
+ "epoch": 1.26,
160
+ "learning_rate": 3.216374269005848e-05,
161
+ "loss": 0.2218,
162
  "step": 240
163
  },
164
  {
165
+ "epoch": 1.32,
166
+ "learning_rate": 3.118908382066277e-05,
167
+ "loss": 0.2426,
168
  "step": 250
169
  },
170
  {
171
+ "epoch": 1.37,
172
+ "learning_rate": 3.0214424951267055e-05,
173
+ "loss": 0.2044,
174
  "step": 260
175
  },
176
  {
177
+ "epoch": 1.42,
178
+ "learning_rate": 2.9239766081871346e-05,
179
+ "loss": 0.2025,
180
  "step": 270
181
  },
182
  {
183
+ "epoch": 1.47,
184
+ "learning_rate": 2.8265107212475634e-05,
185
+ "loss": 0.2021,
186
  "step": 280
187
  },
188
  {
189
+ "epoch": 1.53,
190
+ "learning_rate": 2.729044834307992e-05,
191
+ "loss": 0.1925,
192
  "step": 290
193
  },
194
  {
195
+ "epoch": 1.58,
196
+ "learning_rate": 2.6315789473684212e-05,
197
+ "loss": 0.2151,
198
  "step": 300
199
  },
200
  {
201
+ "epoch": 1.63,
202
+ "learning_rate": 2.53411306042885e-05,
203
+ "loss": 0.1906,
204
  "step": 310
205
  },
206
  {
207
+ "epoch": 1.68,
208
+ "learning_rate": 2.4366471734892787e-05,
209
+ "loss": 0.1787,
210
  "step": 320
211
  },
212
  {
213
+ "epoch": 1.74,
214
+ "learning_rate": 2.3391812865497074e-05,
215
+ "loss": 0.1393,
216
  "step": 330
217
  },
218
  {
219
+ "epoch": 1.79,
220
+ "learning_rate": 2.2417153996101365e-05,
221
+ "loss": 0.1516,
222
  "step": 340
223
  },
224
  {
225
+ "epoch": 1.84,
226
+ "learning_rate": 2.1442495126705653e-05,
227
+ "loss": 0.164,
228
  "step": 350
229
  },
230
  {
231
+ "epoch": 1.89,
232
+ "learning_rate": 2.046783625730994e-05,
233
+ "loss": 0.1747,
 
 
 
 
 
 
 
 
 
234
  "step": 360
235
  },
236
  {
237
+ "epoch": 1.95,
238
+ "learning_rate": 1.949317738791423e-05,
239
+ "loss": 0.1964,
240
  "step": 370
241
  },
242
  {
243
+ "epoch": 2.0,
244
+ "learning_rate": 1.8518518518518518e-05,
245
+ "loss": 0.219,
246
  "step": 380
247
  },
248
  {
249
+ "epoch": 2.0,
250
+ "eval_accuracy": 0.9729629629629629,
251
+ "eval_loss": 0.08252211660146713,
252
+ "eval_runtime": 4.4943,
253
+ "eval_samples_per_second": 600.762,
254
+ "eval_steps_per_second": 18.913,
255
+ "step": 380
256
+ },
257
+ {
258
+ "epoch": 2.05,
259
+ "learning_rate": 1.7543859649122806e-05,
260
+ "loss": 0.2044,
261
  "step": 390
262
  },
263
  {
264
+ "epoch": 2.11,
265
+ "learning_rate": 1.6569200779727097e-05,
266
+ "loss": 0.1499,
267
  "step": 400
268
  },
269
  {
270
+ "epoch": 2.16,
271
+ "learning_rate": 1.5594541910331384e-05,
272
+ "loss": 0.1401,
273
  "step": 410
274
  },
275
  {
276
+ "epoch": 2.21,
277
+ "learning_rate": 1.4619883040935673e-05,
278
+ "loss": 0.1596,
279
  "step": 420
280
  },
281
  {
282
+ "epoch": 2.26,
283
+ "learning_rate": 1.364522417153996e-05,
284
+ "loss": 0.163,
285
  "step": 430
286
  },
287
  {
288
+ "epoch": 2.32,
289
+ "learning_rate": 1.267056530214425e-05,
290
+ "loss": 0.1403,
291
  "step": 440
292
  },
293
  {
294
+ "epoch": 2.37,
295
+ "learning_rate": 1.1695906432748537e-05,
296
+ "loss": 0.1767,
297
  "step": 450
298
  },
299
  {
300
+ "epoch": 2.42,
301
+ "learning_rate": 1.0721247563352826e-05,
302
+ "loss": 0.134,
303
  "step": 460
304
  },
305
  {
306
+ "epoch": 2.47,
307
+ "learning_rate": 9.746588693957115e-06,
308
+ "loss": 0.1522,
309
  "step": 470
310
  },
311
  {
312
+ "epoch": 2.53,
313
+ "learning_rate": 8.771929824561403e-06,
314
+ "loss": 0.14,
315
  "step": 480
316
  },
317
  {
318
+ "epoch": 2.58,
319
+ "learning_rate": 7.797270955165692e-06,
320
+ "loss": 0.129,
321
  "step": 490
322
  },
323
  {
324
+ "epoch": 2.63,
325
+ "learning_rate": 6.82261208576998e-06,
326
+ "loss": 0.1437,
327
  "step": 500
328
  },
329
  {
330
+ "epoch": 2.68,
331
+ "learning_rate": 5.8479532163742686e-06,
332
+ "loss": 0.1594,
333
  "step": 510
334
  },
335
  {
336
+ "epoch": 2.74,
337
+ "learning_rate": 4.873294346978558e-06,
338
+ "loss": 0.1436,
339
  "step": 520
340
  },
341
  {
342
+ "epoch": 2.79,
343
+ "learning_rate": 3.898635477582846e-06,
344
+ "loss": 0.1386,
345
  "step": 530
346
  },
347
  {
348
+ "epoch": 2.84,
349
+ "learning_rate": 2.9239766081871343e-06,
350
+ "loss": 0.1363,
351
  "step": 540
352
  },
353
  {
354
+ "epoch": 2.89,
355
+ "learning_rate": 1.949317738791423e-06,
356
+ "loss": 0.105,
357
  "step": 550
358
  },
359
  {
360
+ "epoch": 2.95,
361
+ "learning_rate": 9.746588693957115e-07,
362
+ "loss": 0.1413,
363
  "step": 560
364
  },
365
  {
366
+ "epoch": 3.0,
367
+ "learning_rate": 0.0,
368
+ "loss": 0.121,
369
  "step": 570
370
  },
371
  {
372
+ "epoch": 3.0,
373
+ "eval_accuracy": 0.9803703703703703,
374
+ "eval_loss": 0.059159792959690094,
375
+ "eval_runtime": 4.6082,
376
+ "eval_samples_per_second": 585.917,
377
+ "eval_steps_per_second": 18.446,
378
+ "step": 570
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
379
  },
380
  {
381
+ "epoch": 3.0,
382
+ "step": 570,
383
+ "total_flos": 1.8124066505760768e+18,
384
+ "train_loss": 0.32973566536317794,
385
+ "train_runtime": 250.3967,
386
+ "train_samples_per_second": 291.138,
387
+ "train_steps_per_second": 2.276
388
  }
389
  ],
390
  "logging_steps": 10,
391
+ "max_steps": 570,
392
  "num_train_epochs": 3,
393
  "save_steps": 500,
394
+ "total_flos": 1.8124066505760768e+18,
395
  "trial_name": null,
396
  "trial_params": null
397
  }
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:40c3316f98f195515c4a8443e601de9ed3c10d163cb3811e80f17e5b22321810
3
  size 4091
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:970cd5a0062561b5253d54acfb6e6f947d5f5a6529c933acf60a03d0c624934c
3
  size 4091