MaulikMadhavi commited on
Commit
1ad31c5
1 Parent(s): 156ae5a

trained oxford-flowers

Browse files
README.md ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ base_model: google/vit-base-patch16-224-in21k
4
+ tags:
5
+ - image-classification
6
+ - generated_from_trainer
7
+ metrics:
8
+ - accuracy
9
+ model-index:
10
+ - name: vit-base-flowers102
11
+ results: []
12
+ ---
13
+
14
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
15
+ should probably proofread and complete it, then remove this comment. -->
16
+
17
+ # vit-base-flowers102
18
+
19
+ This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the nelorth/oxford-flowers dataset.
20
+ It achieves the following results on the evaluation set:
21
+ - Loss: 0.0770
22
+ - Accuracy: 0.9853
23
+
24
+ ## Model description
25
+
26
+ More information needed
27
+
28
+ ## Intended uses & limitations
29
+
30
+ More information needed
31
+
32
+ ## Training and evaluation data
33
+
34
+ More information needed
35
+
36
+ ## Training procedure
37
+
38
+ ### Training hyperparameters
39
+
40
+ The following hyperparameters were used during training:
41
+ - learning_rate: 0.0002
42
+ - train_batch_size: 16
43
+ - eval_batch_size: 8
44
+ - seed: 42
45
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
46
+ - lr_scheduler_type: linear
47
+ - num_epochs: 4
48
+ - mixed_precision_training: Native AMP
49
+
50
+ ### Training results
51
+
52
+ | Training Loss | Epoch | Step | Validation Loss | Accuracy |
53
+ |:-------------:|:-----:|:----:|:---------------:|:--------:|
54
+ | 2.5779 | 0.22 | 100 | 2.8895 | 0.7775 |
55
+ | 1.2226 | 0.45 | 200 | 1.5942 | 0.9255 |
56
+ | 0.606 | 0.67 | 300 | 0.8012 | 0.9529 |
57
+ | 0.3413 | 0.89 | 400 | 0.4845 | 0.9706 |
58
+ | 0.1571 | 1.11 | 500 | 0.2611 | 0.9814 |
59
+ | 0.1237 | 1.34 | 600 | 0.1691 | 0.9784 |
60
+ | 0.049 | 1.56 | 700 | 0.1146 | 0.9892 |
61
+ | 0.0763 | 1.78 | 800 | 0.1209 | 0.9863 |
62
+ | 0.0864 | 2.0 | 900 | 0.1223 | 0.9804 |
63
+ | 0.0786 | 2.23 | 1000 | 0.1075 | 0.9833 |
64
+ | 0.0269 | 2.45 | 1100 | 0.0919 | 0.9843 |
65
+ | 0.0178 | 2.67 | 1200 | 0.0795 | 0.9873 |
66
+ | 0.0165 | 2.9 | 1300 | 0.0727 | 0.9873 |
67
+ | 0.0144 | 3.12 | 1400 | 0.0784 | 0.9853 |
68
+ | 0.0138 | 3.34 | 1500 | 0.0759 | 0.9853 |
69
+ | 0.0135 | 3.56 | 1600 | 0.0737 | 0.9863 |
70
+ | 0.0123 | 3.79 | 1700 | 0.0770 | 0.9853 |
71
+
72
+
73
+ ### Framework versions
74
+
75
+ - Transformers 4.35.2
76
+ - Pytorch 2.1.0+cu121
77
+ - Datasets 2.16.1
78
+ - Tokenizers 0.15.0
all_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 4.0,
3
+ "total_flos": 2.2241519461722194e+18,
4
+ "train_loss": 0.4137112680814314,
5
+ "train_runtime": 1023.9197,
6
+ "train_samples_per_second": 28.006,
7
+ "train_steps_per_second": 1.754
8
+ }
config.json ADDED
@@ -0,0 +1,232 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "google/vit-base-patch16-224-in21k",
3
+ "architectures": [
4
+ "ViTForImageClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.0,
7
+ "encoder_stride": 16,
8
+ "hidden_act": "gelu",
9
+ "hidden_dropout_prob": 0.0,
10
+ "hidden_size": 768,
11
+ "id2label": {
12
+ "0": "1",
13
+ "1": "10",
14
+ "10": "16",
15
+ "100": "98",
16
+ "101": "99",
17
+ "11": "17",
18
+ "12": "18",
19
+ "13": "19",
20
+ "14": "2",
21
+ "15": "20",
22
+ "16": "21",
23
+ "17": "22",
24
+ "18": "23",
25
+ "19": "24",
26
+ "2": "100",
27
+ "20": "25",
28
+ "21": "26",
29
+ "22": "27",
30
+ "23": "28",
31
+ "24": "29",
32
+ "25": "3",
33
+ "26": "30",
34
+ "27": "31",
35
+ "28": "32",
36
+ "29": "33",
37
+ "3": "101",
38
+ "30": "34",
39
+ "31": "35",
40
+ "32": "36",
41
+ "33": "37",
42
+ "34": "38",
43
+ "35": "39",
44
+ "36": "4",
45
+ "37": "40",
46
+ "38": "41",
47
+ "39": "42",
48
+ "4": "102",
49
+ "40": "43",
50
+ "41": "44",
51
+ "42": "45",
52
+ "43": "46",
53
+ "44": "47",
54
+ "45": "48",
55
+ "46": "49",
56
+ "47": "5",
57
+ "48": "50",
58
+ "49": "51",
59
+ "5": "11",
60
+ "50": "52",
61
+ "51": "53",
62
+ "52": "54",
63
+ "53": "55",
64
+ "54": "56",
65
+ "55": "57",
66
+ "56": "58",
67
+ "57": "59",
68
+ "58": "6",
69
+ "59": "60",
70
+ "6": "12",
71
+ "60": "61",
72
+ "61": "62",
73
+ "62": "63",
74
+ "63": "64",
75
+ "64": "65",
76
+ "65": "66",
77
+ "66": "67",
78
+ "67": "68",
79
+ "68": "69",
80
+ "69": "7",
81
+ "7": "13",
82
+ "70": "70",
83
+ "71": "71",
84
+ "72": "72",
85
+ "73": "73",
86
+ "74": "74",
87
+ "75": "75",
88
+ "76": "76",
89
+ "77": "77",
90
+ "78": "78",
91
+ "79": "79",
92
+ "8": "14",
93
+ "80": "8",
94
+ "81": "80",
95
+ "82": "81",
96
+ "83": "82",
97
+ "84": "83",
98
+ "85": "84",
99
+ "86": "85",
100
+ "87": "86",
101
+ "88": "87",
102
+ "89": "88",
103
+ "9": "15",
104
+ "90": "89",
105
+ "91": "9",
106
+ "92": "90",
107
+ "93": "91",
108
+ "94": "92",
109
+ "95": "93",
110
+ "96": "94",
111
+ "97": "95",
112
+ "98": "96",
113
+ "99": "97"
114
+ },
115
+ "image_size": 224,
116
+ "initializer_range": 0.02,
117
+ "intermediate_size": 3072,
118
+ "label2id": {
119
+ "1": "0",
120
+ "10": "1",
121
+ "100": "2",
122
+ "101": "3",
123
+ "102": "4",
124
+ "11": "5",
125
+ "12": "6",
126
+ "13": "7",
127
+ "14": "8",
128
+ "15": "9",
129
+ "16": "10",
130
+ "17": "11",
131
+ "18": "12",
132
+ "19": "13",
133
+ "2": "14",
134
+ "20": "15",
135
+ "21": "16",
136
+ "22": "17",
137
+ "23": "18",
138
+ "24": "19",
139
+ "25": "20",
140
+ "26": "21",
141
+ "27": "22",
142
+ "28": "23",
143
+ "29": "24",
144
+ "3": "25",
145
+ "30": "26",
146
+ "31": "27",
147
+ "32": "28",
148
+ "33": "29",
149
+ "34": "30",
150
+ "35": "31",
151
+ "36": "32",
152
+ "37": "33",
153
+ "38": "34",
154
+ "39": "35",
155
+ "4": "36",
156
+ "40": "37",
157
+ "41": "38",
158
+ "42": "39",
159
+ "43": "40",
160
+ "44": "41",
161
+ "45": "42",
162
+ "46": "43",
163
+ "47": "44",
164
+ "48": "45",
165
+ "49": "46",
166
+ "5": "47",
167
+ "50": "48",
168
+ "51": "49",
169
+ "52": "50",
170
+ "53": "51",
171
+ "54": "52",
172
+ "55": "53",
173
+ "56": "54",
174
+ "57": "55",
175
+ "58": "56",
176
+ "59": "57",
177
+ "6": "58",
178
+ "60": "59",
179
+ "61": "60",
180
+ "62": "61",
181
+ "63": "62",
182
+ "64": "63",
183
+ "65": "64",
184
+ "66": "65",
185
+ "67": "66",
186
+ "68": "67",
187
+ "69": "68",
188
+ "7": "69",
189
+ "70": "70",
190
+ "71": "71",
191
+ "72": "72",
192
+ "73": "73",
193
+ "74": "74",
194
+ "75": "75",
195
+ "76": "76",
196
+ "77": "77",
197
+ "78": "78",
198
+ "79": "79",
199
+ "8": "80",
200
+ "80": "81",
201
+ "81": "82",
202
+ "82": "83",
203
+ "83": "84",
204
+ "84": "85",
205
+ "85": "86",
206
+ "86": "87",
207
+ "87": "88",
208
+ "88": "89",
209
+ "89": "90",
210
+ "9": "91",
211
+ "90": "92",
212
+ "91": "93",
213
+ "92": "94",
214
+ "93": "95",
215
+ "94": "96",
216
+ "95": "97",
217
+ "96": "98",
218
+ "97": "99",
219
+ "98": "100",
220
+ "99": "101"
221
+ },
222
+ "layer_norm_eps": 1e-12,
223
+ "model_type": "vit",
224
+ "num_attention_heads": 12,
225
+ "num_channels": 3,
226
+ "num_hidden_layers": 12,
227
+ "patch_size": 16,
228
+ "problem_type": "single_label_classification",
229
+ "qkv_bias": true,
230
+ "torch_dtype": "float32",
231
+ "transformers_version": "4.35.2"
232
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:76bee5ebdb3ee0625a0175f243125c94ee5cbf2b7f2dc1052a23c96c38f9d363
3
+ size 343531584
preprocessor_config.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": true,
3
+ "do_rescale": true,
4
+ "do_resize": true,
5
+ "image_mean": [
6
+ 0.5,
7
+ 0.5,
8
+ 0.5
9
+ ],
10
+ "image_processor_type": "ViTFeatureExtractor",
11
+ "image_std": [
12
+ 0.5,
13
+ 0.5,
14
+ 0.5
15
+ ],
16
+ "resample": 2,
17
+ "rescale_factor": 0.00392156862745098,
18
+ "size": {
19
+ "height": 224,
20
+ "width": 224
21
+ }
22
+ }
runs/Jan01_02-35-15_a5b36194356f/events.out.tfevents.1704076520.a5b36194356f.3303.2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0ef94daf7dd39eb906248e7c9e8e5e53d11e49d68ee860fa7151c375f48f1659
3
+ size 41282
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 4.0,
3
+ "total_flos": 2.2241519461722194e+18,
4
+ "train_loss": 0.4137112680814314,
5
+ "train_runtime": 1023.9197,
6
+ "train_samples_per_second": 28.006,
7
+ "train_steps_per_second": 1.754
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,1255 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.07266250252723694,
3
+ "best_model_checkpoint": "./vit-base-flowers102/checkpoint-1300",
4
+ "epoch": 4.0,
5
+ "eval_steps": 100,
6
+ "global_step": 1796,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.02,
13
+ "learning_rate": 0.00019888641425389755,
14
+ "loss": 4.5016,
15
+ "step": 10
16
+ },
17
+ {
18
+ "epoch": 0.04,
19
+ "learning_rate": 0.00019777282850779511,
20
+ "loss": 4.311,
21
+ "step": 20
22
+ },
23
+ {
24
+ "epoch": 0.07,
25
+ "learning_rate": 0.00019665924276169265,
26
+ "loss": 4.1383,
27
+ "step": 30
28
+ },
29
+ {
30
+ "epoch": 0.09,
31
+ "learning_rate": 0.0001955456570155902,
32
+ "loss": 3.8337,
33
+ "step": 40
34
+ },
35
+ {
36
+ "epoch": 0.11,
37
+ "learning_rate": 0.00019443207126948776,
38
+ "loss": 3.6639,
39
+ "step": 50
40
+ },
41
+ {
42
+ "epoch": 0.13,
43
+ "learning_rate": 0.00019331848552338533,
44
+ "loss": 3.4038,
45
+ "step": 60
46
+ },
47
+ {
48
+ "epoch": 0.16,
49
+ "learning_rate": 0.00019220489977728286,
50
+ "loss": 3.2495,
51
+ "step": 70
52
+ },
53
+ {
54
+ "epoch": 0.18,
55
+ "learning_rate": 0.0001910913140311804,
56
+ "loss": 2.9898,
57
+ "step": 80
58
+ },
59
+ {
60
+ "epoch": 0.2,
61
+ "learning_rate": 0.00018997772828507797,
62
+ "loss": 2.8136,
63
+ "step": 90
64
+ },
65
+ {
66
+ "epoch": 0.22,
67
+ "learning_rate": 0.0001888641425389755,
68
+ "loss": 2.5779,
69
+ "step": 100
70
+ },
71
+ {
72
+ "epoch": 0.22,
73
+ "eval_accuracy": 0.7774509803921569,
74
+ "eval_loss": 2.8894846439361572,
75
+ "eval_runtime": 16.9816,
76
+ "eval_samples_per_second": 60.065,
77
+ "eval_steps_per_second": 7.538,
78
+ "step": 100
79
+ },
80
+ {
81
+ "epoch": 0.24,
82
+ "learning_rate": 0.00018775055679287305,
83
+ "loss": 2.4361,
84
+ "step": 110
85
+ },
86
+ {
87
+ "epoch": 0.27,
88
+ "learning_rate": 0.00018663697104677061,
89
+ "loss": 2.2851,
90
+ "step": 120
91
+ },
92
+ {
93
+ "epoch": 0.29,
94
+ "learning_rate": 0.00018552338530066815,
95
+ "loss": 2.1522,
96
+ "step": 130
97
+ },
98
+ {
99
+ "epoch": 0.31,
100
+ "learning_rate": 0.00018440979955456572,
101
+ "loss": 2.0138,
102
+ "step": 140
103
+ },
104
+ {
105
+ "epoch": 0.33,
106
+ "learning_rate": 0.00018329621380846326,
107
+ "loss": 1.878,
108
+ "step": 150
109
+ },
110
+ {
111
+ "epoch": 0.36,
112
+ "learning_rate": 0.00018218262806236082,
113
+ "loss": 1.7814,
114
+ "step": 160
115
+ },
116
+ {
117
+ "epoch": 0.38,
118
+ "learning_rate": 0.00018106904231625836,
119
+ "loss": 1.5264,
120
+ "step": 170
121
+ },
122
+ {
123
+ "epoch": 0.4,
124
+ "learning_rate": 0.0001799554565701559,
125
+ "loss": 1.5172,
126
+ "step": 180
127
+ },
128
+ {
129
+ "epoch": 0.42,
130
+ "learning_rate": 0.00017884187082405347,
131
+ "loss": 1.4431,
132
+ "step": 190
133
+ },
134
+ {
135
+ "epoch": 0.45,
136
+ "learning_rate": 0.000177728285077951,
137
+ "loss": 1.2226,
138
+ "step": 200
139
+ },
140
+ {
141
+ "epoch": 0.45,
142
+ "eval_accuracy": 0.9254901960784314,
143
+ "eval_loss": 1.5941656827926636,
144
+ "eval_runtime": 16.7137,
145
+ "eval_samples_per_second": 61.028,
146
+ "eval_steps_per_second": 7.658,
147
+ "step": 200
148
+ },
149
+ {
150
+ "epoch": 0.47,
151
+ "learning_rate": 0.00017661469933184855,
152
+ "loss": 1.2047,
153
+ "step": 210
154
+ },
155
+ {
156
+ "epoch": 0.49,
157
+ "learning_rate": 0.00017550111358574611,
158
+ "loss": 1.2643,
159
+ "step": 220
160
+ },
161
+ {
162
+ "epoch": 0.51,
163
+ "learning_rate": 0.00017438752783964368,
164
+ "loss": 1.1202,
165
+ "step": 230
166
+ },
167
+ {
168
+ "epoch": 0.53,
169
+ "learning_rate": 0.00017327394209354122,
170
+ "loss": 0.9402,
171
+ "step": 240
172
+ },
173
+ {
174
+ "epoch": 0.56,
175
+ "learning_rate": 0.00017216035634743876,
176
+ "loss": 0.8425,
177
+ "step": 250
178
+ },
179
+ {
180
+ "epoch": 0.58,
181
+ "learning_rate": 0.00017104677060133632,
182
+ "loss": 0.7436,
183
+ "step": 260
184
+ },
185
+ {
186
+ "epoch": 0.6,
187
+ "learning_rate": 0.00016993318485523386,
188
+ "loss": 0.6685,
189
+ "step": 270
190
+ },
191
+ {
192
+ "epoch": 0.62,
193
+ "learning_rate": 0.0001688195991091314,
194
+ "loss": 0.6464,
195
+ "step": 280
196
+ },
197
+ {
198
+ "epoch": 0.65,
199
+ "learning_rate": 0.00016770601336302897,
200
+ "loss": 0.5846,
201
+ "step": 290
202
+ },
203
+ {
204
+ "epoch": 0.67,
205
+ "learning_rate": 0.0001665924276169265,
206
+ "loss": 0.606,
207
+ "step": 300
208
+ },
209
+ {
210
+ "epoch": 0.67,
211
+ "eval_accuracy": 0.9529411764705882,
212
+ "eval_loss": 0.8011782169342041,
213
+ "eval_runtime": 17.625,
214
+ "eval_samples_per_second": 57.872,
215
+ "eval_steps_per_second": 7.262,
216
+ "step": 300
217
+ },
218
+ {
219
+ "epoch": 0.69,
220
+ "learning_rate": 0.00016547884187082405,
221
+ "loss": 0.5889,
222
+ "step": 310
223
+ },
224
+ {
225
+ "epoch": 0.71,
226
+ "learning_rate": 0.0001643652561247216,
227
+ "loss": 0.5238,
228
+ "step": 320
229
+ },
230
+ {
231
+ "epoch": 0.73,
232
+ "learning_rate": 0.0001633630289532294,
233
+ "loss": 0.5481,
234
+ "step": 330
235
+ },
236
+ {
237
+ "epoch": 0.76,
238
+ "learning_rate": 0.00016224944320712695,
239
+ "loss": 0.5891,
240
+ "step": 340
241
+ },
242
+ {
243
+ "epoch": 0.78,
244
+ "learning_rate": 0.00016113585746102451,
245
+ "loss": 0.4343,
246
+ "step": 350
247
+ },
248
+ {
249
+ "epoch": 0.8,
250
+ "learning_rate": 0.00016002227171492205,
251
+ "loss": 0.4681,
252
+ "step": 360
253
+ },
254
+ {
255
+ "epoch": 0.82,
256
+ "learning_rate": 0.0001589086859688196,
257
+ "loss": 0.4484,
258
+ "step": 370
259
+ },
260
+ {
261
+ "epoch": 0.85,
262
+ "learning_rate": 0.00015779510022271716,
263
+ "loss": 0.3993,
264
+ "step": 380
265
+ },
266
+ {
267
+ "epoch": 0.87,
268
+ "learning_rate": 0.0001566815144766147,
269
+ "loss": 0.4087,
270
+ "step": 390
271
+ },
272
+ {
273
+ "epoch": 0.89,
274
+ "learning_rate": 0.00015556792873051224,
275
+ "loss": 0.3413,
276
+ "step": 400
277
+ },
278
+ {
279
+ "epoch": 0.89,
280
+ "eval_accuracy": 0.9705882352941176,
281
+ "eval_loss": 0.4845229685306549,
282
+ "eval_runtime": 16.4436,
283
+ "eval_samples_per_second": 62.03,
284
+ "eval_steps_per_second": 7.784,
285
+ "step": 400
286
+ },
287
+ {
288
+ "epoch": 0.91,
289
+ "learning_rate": 0.0001544543429844098,
290
+ "loss": 0.3052,
291
+ "step": 410
292
+ },
293
+ {
294
+ "epoch": 0.94,
295
+ "learning_rate": 0.00015334075723830737,
296
+ "loss": 0.2869,
297
+ "step": 420
298
+ },
299
+ {
300
+ "epoch": 0.96,
301
+ "learning_rate": 0.0001522271714922049,
302
+ "loss": 0.3613,
303
+ "step": 430
304
+ },
305
+ {
306
+ "epoch": 0.98,
307
+ "learning_rate": 0.00015111358574610245,
308
+ "loss": 0.3189,
309
+ "step": 440
310
+ },
311
+ {
312
+ "epoch": 1.0,
313
+ "learning_rate": 0.00015000000000000001,
314
+ "loss": 0.2199,
315
+ "step": 450
316
+ },
317
+ {
318
+ "epoch": 1.02,
319
+ "learning_rate": 0.00014888641425389755,
320
+ "loss": 0.1689,
321
+ "step": 460
322
+ },
323
+ {
324
+ "epoch": 1.05,
325
+ "learning_rate": 0.0001477728285077951,
326
+ "loss": 0.2321,
327
+ "step": 470
328
+ },
329
+ {
330
+ "epoch": 1.07,
331
+ "learning_rate": 0.00014665924276169266,
332
+ "loss": 0.1722,
333
+ "step": 480
334
+ },
335
+ {
336
+ "epoch": 1.09,
337
+ "learning_rate": 0.0001455456570155902,
338
+ "loss": 0.1581,
339
+ "step": 490
340
+ },
341
+ {
342
+ "epoch": 1.11,
343
+ "learning_rate": 0.00014443207126948776,
344
+ "loss": 0.1571,
345
+ "step": 500
346
+ },
347
+ {
348
+ "epoch": 1.11,
349
+ "eval_accuracy": 0.9813725490196078,
350
+ "eval_loss": 0.26114311814308167,
351
+ "eval_runtime": 18.4125,
352
+ "eval_samples_per_second": 55.397,
353
+ "eval_steps_per_second": 6.952,
354
+ "step": 500
355
+ },
356
+ {
357
+ "epoch": 1.14,
358
+ "learning_rate": 0.0001433184855233853,
359
+ "loss": 0.1276,
360
+ "step": 510
361
+ },
362
+ {
363
+ "epoch": 1.16,
364
+ "learning_rate": 0.00014220489977728287,
365
+ "loss": 0.1203,
366
+ "step": 520
367
+ },
368
+ {
369
+ "epoch": 1.18,
370
+ "learning_rate": 0.0001410913140311804,
371
+ "loss": 0.1005,
372
+ "step": 530
373
+ },
374
+ {
375
+ "epoch": 1.2,
376
+ "learning_rate": 0.00013997772828507795,
377
+ "loss": 0.1351,
378
+ "step": 540
379
+ },
380
+ {
381
+ "epoch": 1.22,
382
+ "learning_rate": 0.0001388641425389755,
383
+ "loss": 0.0928,
384
+ "step": 550
385
+ },
386
+ {
387
+ "epoch": 1.25,
388
+ "learning_rate": 0.00013775055679287305,
389
+ "loss": 0.0848,
390
+ "step": 560
391
+ },
392
+ {
393
+ "epoch": 1.27,
394
+ "learning_rate": 0.0001366369710467706,
395
+ "loss": 0.1251,
396
+ "step": 570
397
+ },
398
+ {
399
+ "epoch": 1.29,
400
+ "learning_rate": 0.00013552338530066816,
401
+ "loss": 0.0774,
402
+ "step": 580
403
+ },
404
+ {
405
+ "epoch": 1.31,
406
+ "learning_rate": 0.00013440979955456572,
407
+ "loss": 0.097,
408
+ "step": 590
409
+ },
410
+ {
411
+ "epoch": 1.34,
412
+ "learning_rate": 0.00013329621380846326,
413
+ "loss": 0.1237,
414
+ "step": 600
415
+ },
416
+ {
417
+ "epoch": 1.34,
418
+ "eval_accuracy": 0.9784313725490196,
419
+ "eval_loss": 0.16912123560905457,
420
+ "eval_runtime": 16.6514,
421
+ "eval_samples_per_second": 61.256,
422
+ "eval_steps_per_second": 7.687,
423
+ "step": 600
424
+ },
425
+ {
426
+ "epoch": 1.36,
427
+ "learning_rate": 0.0001321826280623608,
428
+ "loss": 0.0724,
429
+ "step": 610
430
+ },
431
+ {
432
+ "epoch": 1.38,
433
+ "learning_rate": 0.00013106904231625837,
434
+ "loss": 0.0754,
435
+ "step": 620
436
+ },
437
+ {
438
+ "epoch": 1.4,
439
+ "learning_rate": 0.0001299554565701559,
440
+ "loss": 0.065,
441
+ "step": 630
442
+ },
443
+ {
444
+ "epoch": 1.43,
445
+ "learning_rate": 0.00012884187082405345,
446
+ "loss": 0.0679,
447
+ "step": 640
448
+ },
449
+ {
450
+ "epoch": 1.45,
451
+ "learning_rate": 0.000127728285077951,
452
+ "loss": 0.0579,
453
+ "step": 650
454
+ },
455
+ {
456
+ "epoch": 1.47,
457
+ "learning_rate": 0.00012661469933184855,
458
+ "loss": 0.0524,
459
+ "step": 660
460
+ },
461
+ {
462
+ "epoch": 1.49,
463
+ "learning_rate": 0.0001255011135857461,
464
+ "loss": 0.0727,
465
+ "step": 670
466
+ },
467
+ {
468
+ "epoch": 1.51,
469
+ "learning_rate": 0.00012438752783964366,
470
+ "loss": 0.0721,
471
+ "step": 680
472
+ },
473
+ {
474
+ "epoch": 1.54,
475
+ "learning_rate": 0.00012327394209354122,
476
+ "loss": 0.0665,
477
+ "step": 690
478
+ },
479
+ {
480
+ "epoch": 1.56,
481
+ "learning_rate": 0.00012216035634743876,
482
+ "loss": 0.049,
483
+ "step": 700
484
+ },
485
+ {
486
+ "epoch": 1.56,
487
+ "eval_accuracy": 0.9892156862745098,
488
+ "eval_loss": 0.11457555741071701,
489
+ "eval_runtime": 16.6002,
490
+ "eval_samples_per_second": 61.445,
491
+ "eval_steps_per_second": 7.711,
492
+ "step": 700
493
+ },
494
+ {
495
+ "epoch": 1.58,
496
+ "learning_rate": 0.00012104677060133632,
497
+ "loss": 0.0695,
498
+ "step": 710
499
+ },
500
+ {
501
+ "epoch": 1.6,
502
+ "learning_rate": 0.00011993318485523385,
503
+ "loss": 0.0487,
504
+ "step": 720
505
+ },
506
+ {
507
+ "epoch": 1.63,
508
+ "learning_rate": 0.00011881959910913141,
509
+ "loss": 0.0432,
510
+ "step": 730
511
+ },
512
+ {
513
+ "epoch": 1.65,
514
+ "learning_rate": 0.00011770601336302896,
515
+ "loss": 0.0509,
516
+ "step": 740
517
+ },
518
+ {
519
+ "epoch": 1.67,
520
+ "learning_rate": 0.0001165924276169265,
521
+ "loss": 0.1105,
522
+ "step": 750
523
+ },
524
+ {
525
+ "epoch": 1.69,
526
+ "learning_rate": 0.00011547884187082405,
527
+ "loss": 0.0894,
528
+ "step": 760
529
+ },
530
+ {
531
+ "epoch": 1.71,
532
+ "learning_rate": 0.0001143652561247216,
533
+ "loss": 0.0668,
534
+ "step": 770
535
+ },
536
+ {
537
+ "epoch": 1.74,
538
+ "learning_rate": 0.00011325167037861917,
539
+ "loss": 0.0663,
540
+ "step": 780
541
+ },
542
+ {
543
+ "epoch": 1.76,
544
+ "learning_rate": 0.00011213808463251671,
545
+ "loss": 0.0423,
546
+ "step": 790
547
+ },
548
+ {
549
+ "epoch": 1.78,
550
+ "learning_rate": 0.00011102449888641426,
551
+ "loss": 0.0763,
552
+ "step": 800
553
+ },
554
+ {
555
+ "epoch": 1.78,
556
+ "eval_accuracy": 0.9862745098039216,
557
+ "eval_loss": 0.12085805088281631,
558
+ "eval_runtime": 17.6825,
559
+ "eval_samples_per_second": 57.684,
560
+ "eval_steps_per_second": 7.239,
561
+ "step": 800
562
+ },
563
+ {
564
+ "epoch": 1.8,
565
+ "learning_rate": 0.00010991091314031181,
566
+ "loss": 0.0545,
567
+ "step": 810
568
+ },
569
+ {
570
+ "epoch": 1.83,
571
+ "learning_rate": 0.00010879732739420935,
572
+ "loss": 0.0563,
573
+ "step": 820
574
+ },
575
+ {
576
+ "epoch": 1.85,
577
+ "learning_rate": 0.0001076837416481069,
578
+ "loss": 0.0743,
579
+ "step": 830
580
+ },
581
+ {
582
+ "epoch": 1.87,
583
+ "learning_rate": 0.00010657015590200446,
584
+ "loss": 0.04,
585
+ "step": 840
586
+ },
587
+ {
588
+ "epoch": 1.89,
589
+ "learning_rate": 0.000105456570155902,
590
+ "loss": 0.1064,
591
+ "step": 850
592
+ },
593
+ {
594
+ "epoch": 1.92,
595
+ "learning_rate": 0.00010434298440979955,
596
+ "loss": 0.0389,
597
+ "step": 860
598
+ },
599
+ {
600
+ "epoch": 1.94,
601
+ "learning_rate": 0.00010322939866369712,
602
+ "loss": 0.0372,
603
+ "step": 870
604
+ },
605
+ {
606
+ "epoch": 1.96,
607
+ "learning_rate": 0.00010211581291759467,
608
+ "loss": 0.0514,
609
+ "step": 880
610
+ },
611
+ {
612
+ "epoch": 1.98,
613
+ "learning_rate": 0.00010100222717149221,
614
+ "loss": 0.0406,
615
+ "step": 890
616
+ },
617
+ {
618
+ "epoch": 2.0,
619
+ "learning_rate": 9.988864142538976e-05,
620
+ "loss": 0.0864,
621
+ "step": 900
622
+ },
623
+ {
624
+ "epoch": 2.0,
625
+ "eval_accuracy": 0.9803921568627451,
626
+ "eval_loss": 0.12226687371730804,
627
+ "eval_runtime": 16.7815,
628
+ "eval_samples_per_second": 60.781,
629
+ "eval_steps_per_second": 7.627,
630
+ "step": 900
631
+ },
632
+ {
633
+ "epoch": 2.03,
634
+ "learning_rate": 9.877505567928731e-05,
635
+ "loss": 0.0337,
636
+ "step": 910
637
+ },
638
+ {
639
+ "epoch": 2.05,
640
+ "learning_rate": 9.766146993318485e-05,
641
+ "loss": 0.055,
642
+ "step": 920
643
+ },
644
+ {
645
+ "epoch": 2.07,
646
+ "learning_rate": 9.65478841870824e-05,
647
+ "loss": 0.0329,
648
+ "step": 930
649
+ },
650
+ {
651
+ "epoch": 2.09,
652
+ "learning_rate": 9.543429844097996e-05,
653
+ "loss": 0.028,
654
+ "step": 940
655
+ },
656
+ {
657
+ "epoch": 2.12,
658
+ "learning_rate": 9.432071269487751e-05,
659
+ "loss": 0.0314,
660
+ "step": 950
661
+ },
662
+ {
663
+ "epoch": 2.14,
664
+ "learning_rate": 9.320712694877506e-05,
665
+ "loss": 0.0515,
666
+ "step": 960
667
+ },
668
+ {
669
+ "epoch": 2.16,
670
+ "learning_rate": 9.20935412026726e-05,
671
+ "loss": 0.0294,
672
+ "step": 970
673
+ },
674
+ {
675
+ "epoch": 2.18,
676
+ "learning_rate": 9.097995545657017e-05,
677
+ "loss": 0.0318,
678
+ "step": 980
679
+ },
680
+ {
681
+ "epoch": 2.2,
682
+ "learning_rate": 8.986636971046771e-05,
683
+ "loss": 0.0332,
684
+ "step": 990
685
+ },
686
+ {
687
+ "epoch": 2.23,
688
+ "learning_rate": 8.875278396436526e-05,
689
+ "loss": 0.0786,
690
+ "step": 1000
691
+ },
692
+ {
693
+ "epoch": 2.23,
694
+ "eval_accuracy": 0.9833333333333333,
695
+ "eval_loss": 0.1075347512960434,
696
+ "eval_runtime": 17.0089,
697
+ "eval_samples_per_second": 59.969,
698
+ "eval_steps_per_second": 7.525,
699
+ "step": 1000
700
+ },
701
+ {
702
+ "epoch": 2.25,
703
+ "learning_rate": 8.763919821826281e-05,
704
+ "loss": 0.0269,
705
+ "step": 1010
706
+ },
707
+ {
708
+ "epoch": 2.27,
709
+ "learning_rate": 8.652561247216035e-05,
710
+ "loss": 0.0247,
711
+ "step": 1020
712
+ },
713
+ {
714
+ "epoch": 2.29,
715
+ "learning_rate": 8.541202672605792e-05,
716
+ "loss": 0.0234,
717
+ "step": 1030
718
+ },
719
+ {
720
+ "epoch": 2.32,
721
+ "learning_rate": 8.429844097995546e-05,
722
+ "loss": 0.0241,
723
+ "step": 1040
724
+ },
725
+ {
726
+ "epoch": 2.34,
727
+ "learning_rate": 8.318485523385301e-05,
728
+ "loss": 0.0227,
729
+ "step": 1050
730
+ },
731
+ {
732
+ "epoch": 2.36,
733
+ "learning_rate": 8.207126948775056e-05,
734
+ "loss": 0.0199,
735
+ "step": 1060
736
+ },
737
+ {
738
+ "epoch": 2.38,
739
+ "learning_rate": 8.095768374164812e-05,
740
+ "loss": 0.023,
741
+ "step": 1070
742
+ },
743
+ {
744
+ "epoch": 2.41,
745
+ "learning_rate": 7.984409799554567e-05,
746
+ "loss": 0.0254,
747
+ "step": 1080
748
+ },
749
+ {
750
+ "epoch": 2.43,
751
+ "learning_rate": 7.873051224944321e-05,
752
+ "loss": 0.0209,
753
+ "step": 1090
754
+ },
755
+ {
756
+ "epoch": 2.45,
757
+ "learning_rate": 7.761692650334076e-05,
758
+ "loss": 0.0269,
759
+ "step": 1100
760
+ },
761
+ {
762
+ "epoch": 2.45,
763
+ "eval_accuracy": 0.984313725490196,
764
+ "eval_loss": 0.09193924814462662,
765
+ "eval_runtime": 17.7915,
766
+ "eval_samples_per_second": 57.331,
767
+ "eval_steps_per_second": 7.194,
768
+ "step": 1100
769
+ },
770
+ {
771
+ "epoch": 2.47,
772
+ "learning_rate": 7.650334075723831e-05,
773
+ "loss": 0.0207,
774
+ "step": 1110
775
+ },
776
+ {
777
+ "epoch": 2.49,
778
+ "learning_rate": 7.538975501113587e-05,
779
+ "loss": 0.0189,
780
+ "step": 1120
781
+ },
782
+ {
783
+ "epoch": 2.52,
784
+ "learning_rate": 7.427616926503342e-05,
785
+ "loss": 0.0213,
786
+ "step": 1130
787
+ },
788
+ {
789
+ "epoch": 2.54,
790
+ "learning_rate": 7.316258351893096e-05,
791
+ "loss": 0.02,
792
+ "step": 1140
793
+ },
794
+ {
795
+ "epoch": 2.56,
796
+ "learning_rate": 7.204899777282851e-05,
797
+ "loss": 0.0683,
798
+ "step": 1150
799
+ },
800
+ {
801
+ "epoch": 2.58,
802
+ "learning_rate": 7.093541202672605e-05,
803
+ "loss": 0.0187,
804
+ "step": 1160
805
+ },
806
+ {
807
+ "epoch": 2.61,
808
+ "learning_rate": 6.982182628062362e-05,
809
+ "loss": 0.0209,
810
+ "step": 1170
811
+ },
812
+ {
813
+ "epoch": 2.63,
814
+ "learning_rate": 6.870824053452117e-05,
815
+ "loss": 0.0176,
816
+ "step": 1180
817
+ },
818
+ {
819
+ "epoch": 2.65,
820
+ "learning_rate": 6.759465478841871e-05,
821
+ "loss": 0.0199,
822
+ "step": 1190
823
+ },
824
+ {
825
+ "epoch": 2.67,
826
+ "learning_rate": 6.648106904231626e-05,
827
+ "loss": 0.0178,
828
+ "step": 1200
829
+ },
830
+ {
831
+ "epoch": 2.67,
832
+ "eval_accuracy": 0.9872549019607844,
833
+ "eval_loss": 0.07947444170713425,
834
+ "eval_runtime": 17.2701,
835
+ "eval_samples_per_second": 59.062,
836
+ "eval_steps_per_second": 7.412,
837
+ "step": 1200
838
+ },
839
+ {
840
+ "epoch": 2.69,
841
+ "learning_rate": 6.536748329621381e-05,
842
+ "loss": 0.0184,
843
+ "step": 1210
844
+ },
845
+ {
846
+ "epoch": 2.72,
847
+ "learning_rate": 6.425389755011137e-05,
848
+ "loss": 0.0182,
849
+ "step": 1220
850
+ },
851
+ {
852
+ "epoch": 2.74,
853
+ "learning_rate": 6.314031180400892e-05,
854
+ "loss": 0.0169,
855
+ "step": 1230
856
+ },
857
+ {
858
+ "epoch": 2.76,
859
+ "learning_rate": 6.202672605790646e-05,
860
+ "loss": 0.0169,
861
+ "step": 1240
862
+ },
863
+ {
864
+ "epoch": 2.78,
865
+ "learning_rate": 6.091314031180401e-05,
866
+ "loss": 0.0172,
867
+ "step": 1250
868
+ },
869
+ {
870
+ "epoch": 2.81,
871
+ "learning_rate": 5.979955456570156e-05,
872
+ "loss": 0.0189,
873
+ "step": 1260
874
+ },
875
+ {
876
+ "epoch": 2.83,
877
+ "learning_rate": 5.8685968819599115e-05,
878
+ "loss": 0.0194,
879
+ "step": 1270
880
+ },
881
+ {
882
+ "epoch": 2.85,
883
+ "learning_rate": 5.757238307349666e-05,
884
+ "loss": 0.0167,
885
+ "step": 1280
886
+ },
887
+ {
888
+ "epoch": 2.87,
889
+ "learning_rate": 5.6458797327394206e-05,
890
+ "loss": 0.0177,
891
+ "step": 1290
892
+ },
893
+ {
894
+ "epoch": 2.9,
895
+ "learning_rate": 5.5345211581291766e-05,
896
+ "loss": 0.0165,
897
+ "step": 1300
898
+ },
899
+ {
900
+ "epoch": 2.9,
901
+ "eval_accuracy": 0.9872549019607844,
902
+ "eval_loss": 0.07266250252723694,
903
+ "eval_runtime": 16.2341,
904
+ "eval_samples_per_second": 62.831,
905
+ "eval_steps_per_second": 7.885,
906
+ "step": 1300
907
+ },
908
+ {
909
+ "epoch": 2.92,
910
+ "learning_rate": 5.423162583518931e-05,
911
+ "loss": 0.0165,
912
+ "step": 1310
913
+ },
914
+ {
915
+ "epoch": 2.94,
916
+ "learning_rate": 5.3118040089086864e-05,
917
+ "loss": 0.0165,
918
+ "step": 1320
919
+ },
920
+ {
921
+ "epoch": 2.96,
922
+ "learning_rate": 5.200445434298441e-05,
923
+ "loss": 0.0158,
924
+ "step": 1330
925
+ },
926
+ {
927
+ "epoch": 2.98,
928
+ "learning_rate": 5.0890868596881956e-05,
929
+ "loss": 0.0155,
930
+ "step": 1340
931
+ },
932
+ {
933
+ "epoch": 3.01,
934
+ "learning_rate": 4.977728285077951e-05,
935
+ "loss": 0.0143,
936
+ "step": 1350
937
+ },
938
+ {
939
+ "epoch": 3.03,
940
+ "learning_rate": 4.866369710467706e-05,
941
+ "loss": 0.0142,
942
+ "step": 1360
943
+ },
944
+ {
945
+ "epoch": 3.05,
946
+ "learning_rate": 4.7550111358574614e-05,
947
+ "loss": 0.0144,
948
+ "step": 1370
949
+ },
950
+ {
951
+ "epoch": 3.07,
952
+ "learning_rate": 4.643652561247217e-05,
953
+ "loss": 0.0139,
954
+ "step": 1380
955
+ },
956
+ {
957
+ "epoch": 3.1,
958
+ "learning_rate": 4.532293986636971e-05,
959
+ "loss": 0.0148,
960
+ "step": 1390
961
+ },
962
+ {
963
+ "epoch": 3.12,
964
+ "learning_rate": 4.420935412026726e-05,
965
+ "loss": 0.0144,
966
+ "step": 1400
967
+ },
968
+ {
969
+ "epoch": 3.12,
970
+ "eval_accuracy": 0.9852941176470589,
971
+ "eval_loss": 0.07835763692855835,
972
+ "eval_runtime": 17.268,
973
+ "eval_samples_per_second": 59.069,
974
+ "eval_steps_per_second": 7.413,
975
+ "step": 1400
976
+ },
977
+ {
978
+ "epoch": 3.14,
979
+ "learning_rate": 4.309576837416481e-05,
980
+ "loss": 0.0138,
981
+ "step": 1410
982
+ },
983
+ {
984
+ "epoch": 3.16,
985
+ "learning_rate": 4.1982182628062364e-05,
986
+ "loss": 0.0146,
987
+ "step": 1420
988
+ },
989
+ {
990
+ "epoch": 3.18,
991
+ "learning_rate": 4.0868596881959917e-05,
992
+ "loss": 0.015,
993
+ "step": 1430
994
+ },
995
+ {
996
+ "epoch": 3.21,
997
+ "learning_rate": 3.975501113585746e-05,
998
+ "loss": 0.0148,
999
+ "step": 1440
1000
+ },
1001
+ {
1002
+ "epoch": 3.23,
1003
+ "learning_rate": 3.8641425389755015e-05,
1004
+ "loss": 0.0144,
1005
+ "step": 1450
1006
+ },
1007
+ {
1008
+ "epoch": 3.25,
1009
+ "learning_rate": 3.752783964365256e-05,
1010
+ "loss": 0.0149,
1011
+ "step": 1460
1012
+ },
1013
+ {
1014
+ "epoch": 3.27,
1015
+ "learning_rate": 3.6414253897550114e-05,
1016
+ "loss": 0.0134,
1017
+ "step": 1470
1018
+ },
1019
+ {
1020
+ "epoch": 3.3,
1021
+ "learning_rate": 3.5300668151447666e-05,
1022
+ "loss": 0.0141,
1023
+ "step": 1480
1024
+ },
1025
+ {
1026
+ "epoch": 3.32,
1027
+ "learning_rate": 3.418708240534521e-05,
1028
+ "loss": 0.0488,
1029
+ "step": 1490
1030
+ },
1031
+ {
1032
+ "epoch": 3.34,
1033
+ "learning_rate": 3.3073496659242765e-05,
1034
+ "loss": 0.0138,
1035
+ "step": 1500
1036
+ },
1037
+ {
1038
+ "epoch": 3.34,
1039
+ "eval_accuracy": 0.9852941176470589,
1040
+ "eval_loss": 0.07589561492204666,
1041
+ "eval_runtime": 16.5936,
1042
+ "eval_samples_per_second": 61.47,
1043
+ "eval_steps_per_second": 7.714,
1044
+ "step": 1500
1045
+ },
1046
+ {
1047
+ "epoch": 3.36,
1048
+ "learning_rate": 3.195991091314031e-05,
1049
+ "loss": 0.0142,
1050
+ "step": 1510
1051
+ },
1052
+ {
1053
+ "epoch": 3.39,
1054
+ "learning_rate": 3.084632516703786e-05,
1055
+ "loss": 0.013,
1056
+ "step": 1520
1057
+ },
1058
+ {
1059
+ "epoch": 3.41,
1060
+ "learning_rate": 2.9732739420935413e-05,
1061
+ "loss": 0.0138,
1062
+ "step": 1530
1063
+ },
1064
+ {
1065
+ "epoch": 3.43,
1066
+ "learning_rate": 2.8619153674832965e-05,
1067
+ "loss": 0.0141,
1068
+ "step": 1540
1069
+ },
1070
+ {
1071
+ "epoch": 3.45,
1072
+ "learning_rate": 2.7505567928730515e-05,
1073
+ "loss": 0.014,
1074
+ "step": 1550
1075
+ },
1076
+ {
1077
+ "epoch": 3.47,
1078
+ "learning_rate": 2.639198218262806e-05,
1079
+ "loss": 0.0136,
1080
+ "step": 1560
1081
+ },
1082
+ {
1083
+ "epoch": 3.5,
1084
+ "learning_rate": 2.5278396436525613e-05,
1085
+ "loss": 0.0133,
1086
+ "step": 1570
1087
+ },
1088
+ {
1089
+ "epoch": 3.52,
1090
+ "learning_rate": 2.4164810690423166e-05,
1091
+ "loss": 0.015,
1092
+ "step": 1580
1093
+ },
1094
+ {
1095
+ "epoch": 3.54,
1096
+ "learning_rate": 2.3051224944320715e-05,
1097
+ "loss": 0.013,
1098
+ "step": 1590
1099
+ },
1100
+ {
1101
+ "epoch": 3.56,
1102
+ "learning_rate": 2.1937639198218264e-05,
1103
+ "loss": 0.0135,
1104
+ "step": 1600
1105
+ },
1106
+ {
1107
+ "epoch": 3.56,
1108
+ "eval_accuracy": 0.9862745098039216,
1109
+ "eval_loss": 0.07374249398708344,
1110
+ "eval_runtime": 16.5728,
1111
+ "eval_samples_per_second": 61.546,
1112
+ "eval_steps_per_second": 7.723,
1113
+ "step": 1600
1114
+ },
1115
+ {
1116
+ "epoch": 3.59,
1117
+ "learning_rate": 2.0824053452115813e-05,
1118
+ "loss": 0.0134,
1119
+ "step": 1610
1120
+ },
1121
+ {
1122
+ "epoch": 3.61,
1123
+ "learning_rate": 1.9710467706013363e-05,
1124
+ "loss": 0.0132,
1125
+ "step": 1620
1126
+ },
1127
+ {
1128
+ "epoch": 3.63,
1129
+ "learning_rate": 1.8596881959910915e-05,
1130
+ "loss": 0.0121,
1131
+ "step": 1630
1132
+ },
1133
+ {
1134
+ "epoch": 3.65,
1135
+ "learning_rate": 1.7483296213808465e-05,
1136
+ "loss": 0.033,
1137
+ "step": 1640
1138
+ },
1139
+ {
1140
+ "epoch": 3.67,
1141
+ "learning_rate": 1.6369710467706014e-05,
1142
+ "loss": 0.0378,
1143
+ "step": 1650
1144
+ },
1145
+ {
1146
+ "epoch": 3.7,
1147
+ "learning_rate": 1.5256124721603565e-05,
1148
+ "loss": 0.0123,
1149
+ "step": 1660
1150
+ },
1151
+ {
1152
+ "epoch": 3.72,
1153
+ "learning_rate": 1.4142538975501116e-05,
1154
+ "loss": 0.0138,
1155
+ "step": 1670
1156
+ },
1157
+ {
1158
+ "epoch": 3.74,
1159
+ "learning_rate": 1.3028953229398663e-05,
1160
+ "loss": 0.0124,
1161
+ "step": 1680
1162
+ },
1163
+ {
1164
+ "epoch": 3.76,
1165
+ "learning_rate": 1.1915367483296214e-05,
1166
+ "loss": 0.0128,
1167
+ "step": 1690
1168
+ },
1169
+ {
1170
+ "epoch": 3.79,
1171
+ "learning_rate": 1.0801781737193764e-05,
1172
+ "loss": 0.0123,
1173
+ "step": 1700
1174
+ },
1175
+ {
1176
+ "epoch": 3.79,
1177
+ "eval_accuracy": 0.9852941176470589,
1178
+ "eval_loss": 0.07696886360645294,
1179
+ "eval_runtime": 17.0943,
1180
+ "eval_samples_per_second": 59.669,
1181
+ "eval_steps_per_second": 7.488,
1182
+ "step": 1700
1183
+ },
1184
+ {
1185
+ "epoch": 3.81,
1186
+ "learning_rate": 9.688195991091315e-06,
1187
+ "loss": 0.0133,
1188
+ "step": 1710
1189
+ },
1190
+ {
1191
+ "epoch": 3.83,
1192
+ "learning_rate": 8.574610244988866e-06,
1193
+ "loss": 0.013,
1194
+ "step": 1720
1195
+ },
1196
+ {
1197
+ "epoch": 3.85,
1198
+ "learning_rate": 7.461024498886416e-06,
1199
+ "loss": 0.0122,
1200
+ "step": 1730
1201
+ },
1202
+ {
1203
+ "epoch": 3.88,
1204
+ "learning_rate": 6.347438752783964e-06,
1205
+ "loss": 0.0124,
1206
+ "step": 1740
1207
+ },
1208
+ {
1209
+ "epoch": 3.9,
1210
+ "learning_rate": 5.233853006681515e-06,
1211
+ "loss": 0.0117,
1212
+ "step": 1750
1213
+ },
1214
+ {
1215
+ "epoch": 3.92,
1216
+ "learning_rate": 4.120267260579064e-06,
1217
+ "loss": 0.0116,
1218
+ "step": 1760
1219
+ },
1220
+ {
1221
+ "epoch": 3.94,
1222
+ "learning_rate": 3.006681514476615e-06,
1223
+ "loss": 0.0122,
1224
+ "step": 1770
1225
+ },
1226
+ {
1227
+ "epoch": 3.96,
1228
+ "learning_rate": 1.893095768374165e-06,
1229
+ "loss": 0.0123,
1230
+ "step": 1780
1231
+ },
1232
+ {
1233
+ "epoch": 3.99,
1234
+ "learning_rate": 7.79510022271715e-07,
1235
+ "loss": 0.0132,
1236
+ "step": 1790
1237
+ },
1238
+ {
1239
+ "epoch": 4.0,
1240
+ "step": 1796,
1241
+ "total_flos": 2.2241519461722194e+18,
1242
+ "train_loss": 0.4137112680814314,
1243
+ "train_runtime": 1023.9197,
1244
+ "train_samples_per_second": 28.006,
1245
+ "train_steps_per_second": 1.754
1246
+ }
1247
+ ],
1248
+ "logging_steps": 10,
1249
+ "max_steps": 1796,
1250
+ "num_train_epochs": 4,
1251
+ "save_steps": 100,
1252
+ "total_flos": 2.2241519461722194e+18,
1253
+ "trial_name": null,
1254
+ "trial_params": null
1255
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fbc5a42612cc0512831f6c5854487b16c569cf613638852dc60841dc4a4dd3f6
3
+ size 4600