p-s commited on
Commit
b004d07
1 Parent(s): a1ab131

Initial release

Browse files
README.md CHANGED
@@ -1,3 +1,113 @@
1
  ---
2
- license: apache-2.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ language:
3
+ - ja
4
+ license: cc-by-sa-4.0
5
+ tags:
6
+ - zero-shot-classification
7
+ - text-classification
8
+ - nli
9
+ - pytorch
10
+ metrics:
11
+ - accuracy
12
+ datasets:
13
+ - JSNLI
14
+ pipeline_tag: text-classification
15
+ widget:
16
+ - text: "あなた が 好きです 。   あなた を 愛して い ます 。"
17
+ model-index:
18
+ - name: roberta-base-japanese-jsnli
19
+ results:
20
+ - task:
21
+ type: text-classification
22
+ name: Natural Language Inference
23
+ dataset:
24
+ type: snli
25
+ name: JSNLI
26
+ split: dev
27
+ metrics:
28
+ - type: accuracy
29
+ value: 0.9328
30
+ verified: false
31
  ---
32
+ # roberta-base-japanese-jsnli
33
+
34
+ This model is a fine-tuned version of [nlp-waseda/roberta-base-japanese](https://huggingface.co/nlp-waseda/roberta-base-japanese) on the [JSNLI](https://nlp.ist.i.kyoto-u.ac.jp/?%E6%97%A5%E6%9C%AC%E8%AA%9ESNLI%28JSNLI%29%E3%83%87%E3%83%BC%E3%82%BF%E3%82%BB%E3%83%83%E3%83%88) dataset.
35
+ It achieves the following results on the evaluation set:
36
+ - Loss: 0.2039
37
+ - Accuracy: 0.9328
38
+
39
+ ### How to use the model
40
+
41
+ The input text should be segmented into words by [Juman++](https://github.com/ku-nlp/jumanpp) in advance.
42
+
43
+ #### Simple zero-shot classification pipeline
44
+ ```python
45
+ from transformers import pipeline
46
+ from pyknp import Juman
47
+
48
+ juman = Juman()
49
+
50
+ classifier = pipeline("zero-shot-classification", model="Formzu/roberta-base-japanese-jsnli")
51
+
52
+ sequence_to_classify = " ".join([tok.midasi for tok in juman.analysis("いつか世界を見る。").mrph_list()])
53
+
54
+ candidate_labels = ['旅行', '料理', '踊り']
55
+ out = classifier(sequence_to_classify, candidate_labels, hypothesis_template="この 例 は {} です 。")
56
+ print(out)
57
+ #{'sequence': 'いつか 世界 を 見る 。',
58
+ # 'labels': ['旅行', '踊り', '料理'],
59
+ # 'scores': [0.8998081684112549, 0.06059670448303223, 0.03959512338042259]}
60
+ ```
61
+ #### NLI use-case
62
+ ```python
63
+ from transformers import AutoTokenizer, AutoModelForSequenceClassification
64
+ import torch
65
+ from pyknp import Juman
66
+
67
+ juman = Juman()
68
+
69
+ device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
70
+
71
+ model_name = "Formzu/roberta-base-japanese-jsnli"
72
+ model = AutoModelForSequenceClassification.from_pretrained(model_name).to(device)
73
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
74
+
75
+ premise = " ".join([tok.midasi for tok in juman.analysis("いつか世界を見る。").mrph_list()])
76
+ label = '旅行'
77
+ hypothesis = f'この 例 は {label} です 。'
78
+
79
+ input = tokenizer.encode(premise, hypothesis, return_tensors='pt').to(device)
80
+ with torch.no_grad():
81
+ logits = model(input)["logits"][0]
82
+ probs = logits.softmax(dim=-1)
83
+ print(probs.cpu().numpy(), logits.cpu().numpy())
84
+ #[0.82168734 0.1744363 0.00387629] [ 2.3362164 0.78641605 -3.0202653 ]
85
+ ```
86
+ ## Training procedure
87
+
88
+ ### Training hyperparameters
89
+
90
+ The following hyperparameters were used during training:
91
+ - learning_rate: 2e-05
92
+ - train_batch_size: 32
93
+ - eval_batch_size: 32
94
+ - seed: 42
95
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
96
+ - lr_scheduler_type: linear
97
+ - num_epochs: 3.0
98
+
99
+ ### Training results
100
+
101
+ | Training Loss | Epoch | Step | Validation Loss | Accuracy |
102
+ |:-------------:|:-----:|:-----:|:---------------:|:--------:|
103
+ | 0.4067 | 1.0 | 16657 | 0.2224 | 0.9201 |
104
+ | 0.3397 | 2.0 | 33314 | 0.2152 | 0.9208 |
105
+ | 0.2775 | 3.0 | 49971 | 0.2039 | 0.9328 |
106
+
107
+
108
+ ### Framework versions
109
+
110
+ - Transformers 4.21.2
111
+ - Pytorch 1.12.1+cu116
112
+ - Datasets 2.4.0
113
+ - Tokenizers 0.12.1
all_results.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 3.0,
3
+ "eval_accuracy": 0.9328396320343018,
4
+ "eval_loss": 0.2038867175579071,
5
+ "eval_runtime": 27.4426,
6
+ "eval_samples": 3916,
7
+ "eval_samples_per_second": 142.698,
8
+ "eval_steps_per_second": 4.482,
9
+ "train_loss": 0.36741465170648585,
10
+ "train_runtime": 31812.3263,
11
+ "train_samples": 533005,
12
+ "train_samples_per_second": 50.264,
13
+ "train_steps_per_second": 1.571
14
+ }
config.json ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "nlp-waseda/roberta-base-japanese",
3
+ "architectures": [
4
+ "RobertaForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "bos_token_id": 2,
8
+ "classifier_dropout": null,
9
+ "eos_token_id": 3,
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.1,
12
+ "hidden_size": 768,
13
+ "id2label": {
14
+ "0": "entailment",
15
+ "1": "neutral",
16
+ "2": "contradiction"
17
+ },
18
+ "initializer_range": 0.02,
19
+ "intermediate_size": 3072,
20
+ "label2id": {
21
+ "entailment": 0,
22
+ "neutral": 1,
23
+ "contradiction": 2
24
+ },
25
+ "layer_norm_eps": 1e-12,
26
+ "max_position_embeddings": 514,
27
+ "model_type": "roberta",
28
+ "num_attention_heads": 12,
29
+ "num_hidden_layers": 12,
30
+ "pad_token_id": 0,
31
+ "position_embedding_type": "absolute",
32
+ "problem_type": "single_label_classification",
33
+ "torch_dtype": "float32",
34
+ "transformers_version": "4.21.2",
35
+ "type_vocab_size": 2,
36
+ "use_cache": true,
37
+ "vocab_size": 32000
38
+ }
eval_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 3.0,
3
+ "eval_accuracy": 0.9328396320343018,
4
+ "eval_loss": 0.2038867175579071,
5
+ "eval_runtime": 27.4426,
6
+ "eval_samples": 3916,
7
+ "eval_samples_per_second": 142.698,
8
+ "eval_steps_per_second": 4.482
9
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a4c0dc9f05170a5f332bc21660acb751993db79533b7c311ffe255852d666e5a
3
+ size 442556397
special_tokens_map.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "[CLS]",
3
+ "cls_token": "[CLS]",
4
+ "eos_token": "[SEP]",
5
+ "mask_token": {
6
+ "content": "[MASK]",
7
+ "lstrip": true,
8
+ "normalized": true,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "pad_token": "[PAD]",
13
+ "sep_token": "[SEP]",
14
+ "unk_token": "[UNK]"
15
+ }
spiece.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a7f87f538d8c73fb0a6a34efb7ba6e3488f920341119c02c208bce7965cf248e
3
+ size 810161
tokenizer_config.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "[CLS]",
3
+ "cls_token": "[CLS]",
4
+ "do_lower_case": false,
5
+ "eos_token": "[SEP]",
6
+ "keep_accents": true,
7
+ "mask_token": {
8
+ "__type": "AddedToken",
9
+ "content": "[MASK]",
10
+ "lstrip": true,
11
+ "normalized": true,
12
+ "rstrip": false,
13
+ "single_word": false
14
+ },
15
+ "name_or_path": "nlp-waseda/roberta-base-japanese",
16
+ "pad_token": "[PAD]",
17
+ "remove_space": true,
18
+ "sep_token": "[SEP]",
19
+ "sp_model_kwargs": {},
20
+ "special_tokens_map_file": null,
21
+ "tokenizer_class": "AlbertTokenizer",
22
+ "unk_token": "[UNK]"
23
+ }
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 3.0,
3
+ "train_loss": 0.36741465170648585,
4
+ "train_runtime": 31812.3263,
5
+ "train_samples": 533005,
6
+ "train_samples_per_second": 50.264,
7
+ "train_steps_per_second": 1.571
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,646 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 3.0,
5
+ "global_step": 49971,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.03,
12
+ "learning_rate": 1.9799883932680958e-05,
13
+ "loss": 0.7935,
14
+ "step": 500
15
+ },
16
+ {
17
+ "epoch": 0.06,
18
+ "learning_rate": 1.959976786536191e-05,
19
+ "loss": 0.6149,
20
+ "step": 1000
21
+ },
22
+ {
23
+ "epoch": 0.09,
24
+ "learning_rate": 1.9399651798042868e-05,
25
+ "loss": 0.5758,
26
+ "step": 1500
27
+ },
28
+ {
29
+ "epoch": 0.12,
30
+ "learning_rate": 1.919953573072382e-05,
31
+ "loss": 0.5473,
32
+ "step": 2000
33
+ },
34
+ {
35
+ "epoch": 0.15,
36
+ "learning_rate": 1.8999419663404778e-05,
37
+ "loss": 0.5186,
38
+ "step": 2500
39
+ },
40
+ {
41
+ "epoch": 0.18,
42
+ "learning_rate": 1.879930359608573e-05,
43
+ "loss": 0.5147,
44
+ "step": 3000
45
+ },
46
+ {
47
+ "epoch": 0.21,
48
+ "learning_rate": 1.8599187528766688e-05,
49
+ "loss": 0.5012,
50
+ "step": 3500
51
+ },
52
+ {
53
+ "epoch": 0.24,
54
+ "learning_rate": 1.839907146144764e-05,
55
+ "loss": 0.49,
56
+ "step": 4000
57
+ },
58
+ {
59
+ "epoch": 0.27,
60
+ "learning_rate": 1.8198955394128598e-05,
61
+ "loss": 0.4918,
62
+ "step": 4500
63
+ },
64
+ {
65
+ "epoch": 0.3,
66
+ "learning_rate": 1.799883932680955e-05,
67
+ "loss": 0.4766,
68
+ "step": 5000
69
+ },
70
+ {
71
+ "epoch": 0.33,
72
+ "learning_rate": 1.7798723259490508e-05,
73
+ "loss": 0.4886,
74
+ "step": 5500
75
+ },
76
+ {
77
+ "epoch": 0.36,
78
+ "learning_rate": 1.759860719217146e-05,
79
+ "loss": 0.4762,
80
+ "step": 6000
81
+ },
82
+ {
83
+ "epoch": 0.39,
84
+ "learning_rate": 1.7398491124852417e-05,
85
+ "loss": 0.4621,
86
+ "step": 6500
87
+ },
88
+ {
89
+ "epoch": 0.42,
90
+ "learning_rate": 1.719837505753337e-05,
91
+ "loss": 0.46,
92
+ "step": 7000
93
+ },
94
+ {
95
+ "epoch": 0.45,
96
+ "learning_rate": 1.6998258990214324e-05,
97
+ "loss": 0.457,
98
+ "step": 7500
99
+ },
100
+ {
101
+ "epoch": 0.48,
102
+ "learning_rate": 1.679814292289528e-05,
103
+ "loss": 0.4413,
104
+ "step": 8000
105
+ },
106
+ {
107
+ "epoch": 0.51,
108
+ "learning_rate": 1.6598026855576234e-05,
109
+ "loss": 0.4482,
110
+ "step": 8500
111
+ },
112
+ {
113
+ "epoch": 0.54,
114
+ "learning_rate": 1.639791078825719e-05,
115
+ "loss": 0.4495,
116
+ "step": 9000
117
+ },
118
+ {
119
+ "epoch": 0.57,
120
+ "learning_rate": 1.6197794720938144e-05,
121
+ "loss": 0.4391,
122
+ "step": 9500
123
+ },
124
+ {
125
+ "epoch": 0.6,
126
+ "learning_rate": 1.59976786536191e-05,
127
+ "loss": 0.4369,
128
+ "step": 10000
129
+ },
130
+ {
131
+ "epoch": 0.63,
132
+ "learning_rate": 1.5797562586300054e-05,
133
+ "loss": 0.4413,
134
+ "step": 10500
135
+ },
136
+ {
137
+ "epoch": 0.66,
138
+ "learning_rate": 1.559744651898101e-05,
139
+ "loss": 0.4354,
140
+ "step": 11000
141
+ },
142
+ {
143
+ "epoch": 0.69,
144
+ "learning_rate": 1.5397330451661963e-05,
145
+ "loss": 0.4218,
146
+ "step": 11500
147
+ },
148
+ {
149
+ "epoch": 0.72,
150
+ "learning_rate": 1.519721438434292e-05,
151
+ "loss": 0.4267,
152
+ "step": 12000
153
+ },
154
+ {
155
+ "epoch": 0.75,
156
+ "learning_rate": 1.4997098317023875e-05,
157
+ "loss": 0.424,
158
+ "step": 12500
159
+ },
160
+ {
161
+ "epoch": 0.78,
162
+ "learning_rate": 1.479698224970483e-05,
163
+ "loss": 0.4234,
164
+ "step": 13000
165
+ },
166
+ {
167
+ "epoch": 0.81,
168
+ "learning_rate": 1.4596866182385785e-05,
169
+ "loss": 0.423,
170
+ "step": 13500
171
+ },
172
+ {
173
+ "epoch": 0.84,
174
+ "learning_rate": 1.439675011506674e-05,
175
+ "loss": 0.4188,
176
+ "step": 14000
177
+ },
178
+ {
179
+ "epoch": 0.87,
180
+ "learning_rate": 1.4196634047747695e-05,
181
+ "loss": 0.4145,
182
+ "step": 14500
183
+ },
184
+ {
185
+ "epoch": 0.9,
186
+ "learning_rate": 1.399651798042865e-05,
187
+ "loss": 0.4213,
188
+ "step": 15000
189
+ },
190
+ {
191
+ "epoch": 0.93,
192
+ "learning_rate": 1.3796401913109605e-05,
193
+ "loss": 0.4177,
194
+ "step": 15500
195
+ },
196
+ {
197
+ "epoch": 0.96,
198
+ "learning_rate": 1.359628584579056e-05,
199
+ "loss": 0.4071,
200
+ "step": 16000
201
+ },
202
+ {
203
+ "epoch": 0.99,
204
+ "learning_rate": 1.3396169778471514e-05,
205
+ "loss": 0.4067,
206
+ "step": 16500
207
+ },
208
+ {
209
+ "epoch": 1.0,
210
+ "eval_accuracy": 0.9200714826583862,
211
+ "eval_loss": 0.22239993512630463,
212
+ "eval_runtime": 27.5138,
213
+ "eval_samples_per_second": 142.329,
214
+ "eval_steps_per_second": 4.47,
215
+ "step": 16657
216
+ },
217
+ {
218
+ "epoch": 1.02,
219
+ "learning_rate": 1.319605371115247e-05,
220
+ "loss": 0.3698,
221
+ "step": 17000
222
+ },
223
+ {
224
+ "epoch": 1.05,
225
+ "learning_rate": 1.2995937643833424e-05,
226
+ "loss": 0.3628,
227
+ "step": 17500
228
+ },
229
+ {
230
+ "epoch": 1.08,
231
+ "learning_rate": 1.279582157651438e-05,
232
+ "loss": 0.3584,
233
+ "step": 18000
234
+ },
235
+ {
236
+ "epoch": 1.11,
237
+ "learning_rate": 1.2595705509195336e-05,
238
+ "loss": 0.3546,
239
+ "step": 18500
240
+ },
241
+ {
242
+ "epoch": 1.14,
243
+ "learning_rate": 1.239558944187629e-05,
244
+ "loss": 0.359,
245
+ "step": 19000
246
+ },
247
+ {
248
+ "epoch": 1.17,
249
+ "learning_rate": 1.2195473374557246e-05,
250
+ "loss": 0.3564,
251
+ "step": 19500
252
+ },
253
+ {
254
+ "epoch": 1.2,
255
+ "learning_rate": 1.19953573072382e-05,
256
+ "loss": 0.3621,
257
+ "step": 20000
258
+ },
259
+ {
260
+ "epoch": 1.23,
261
+ "learning_rate": 1.1795241239919156e-05,
262
+ "loss": 0.353,
263
+ "step": 20500
264
+ },
265
+ {
266
+ "epoch": 1.26,
267
+ "learning_rate": 1.1595125172600107e-05,
268
+ "loss": 0.3525,
269
+ "step": 21000
270
+ },
271
+ {
272
+ "epoch": 1.29,
273
+ "learning_rate": 1.1395009105281064e-05,
274
+ "loss": 0.3564,
275
+ "step": 21500
276
+ },
277
+ {
278
+ "epoch": 1.32,
279
+ "learning_rate": 1.1194893037962019e-05,
280
+ "loss": 0.3506,
281
+ "step": 22000
282
+ },
283
+ {
284
+ "epoch": 1.35,
285
+ "learning_rate": 1.0994776970642974e-05,
286
+ "loss": 0.3454,
287
+ "step": 22500
288
+ },
289
+ {
290
+ "epoch": 1.38,
291
+ "learning_rate": 1.0794660903323929e-05,
292
+ "loss": 0.3479,
293
+ "step": 23000
294
+ },
295
+ {
296
+ "epoch": 1.41,
297
+ "learning_rate": 1.0594544836004884e-05,
298
+ "loss": 0.3604,
299
+ "step": 23500
300
+ },
301
+ {
302
+ "epoch": 1.44,
303
+ "learning_rate": 1.0394428768685838e-05,
304
+ "loss": 0.36,
305
+ "step": 24000
306
+ },
307
+ {
308
+ "epoch": 1.47,
309
+ "learning_rate": 1.0194312701366793e-05,
310
+ "loss": 0.356,
311
+ "step": 24500
312
+ },
313
+ {
314
+ "epoch": 1.5,
315
+ "learning_rate": 9.994196634047748e-06,
316
+ "loss": 0.3515,
317
+ "step": 25000
318
+ },
319
+ {
320
+ "epoch": 1.53,
321
+ "learning_rate": 9.794080566728703e-06,
322
+ "loss": 0.3447,
323
+ "step": 25500
324
+ },
325
+ {
326
+ "epoch": 1.56,
327
+ "learning_rate": 9.593964499409658e-06,
328
+ "loss": 0.3548,
329
+ "step": 26000
330
+ },
331
+ {
332
+ "epoch": 1.59,
333
+ "learning_rate": 9.393848432090613e-06,
334
+ "loss": 0.3558,
335
+ "step": 26500
336
+ },
337
+ {
338
+ "epoch": 1.62,
339
+ "learning_rate": 9.193732364771568e-06,
340
+ "loss": 0.349,
341
+ "step": 27000
342
+ },
343
+ {
344
+ "epoch": 1.65,
345
+ "learning_rate": 8.993616297452523e-06,
346
+ "loss": 0.3352,
347
+ "step": 27500
348
+ },
349
+ {
350
+ "epoch": 1.68,
351
+ "learning_rate": 8.793500230133478e-06,
352
+ "loss": 0.3475,
353
+ "step": 28000
354
+ },
355
+ {
356
+ "epoch": 1.71,
357
+ "learning_rate": 8.593384162814433e-06,
358
+ "loss": 0.3482,
359
+ "step": 28500
360
+ },
361
+ {
362
+ "epoch": 1.74,
363
+ "learning_rate": 8.393268095495388e-06,
364
+ "loss": 0.3389,
365
+ "step": 29000
366
+ },
367
+ {
368
+ "epoch": 1.77,
369
+ "learning_rate": 8.193152028176343e-06,
370
+ "loss": 0.3485,
371
+ "step": 29500
372
+ },
373
+ {
374
+ "epoch": 1.8,
375
+ "learning_rate": 7.993035960857298e-06,
376
+ "loss": 0.3454,
377
+ "step": 30000
378
+ },
379
+ {
380
+ "epoch": 1.83,
381
+ "learning_rate": 7.792919893538253e-06,
382
+ "loss": 0.3354,
383
+ "step": 30500
384
+ },
385
+ {
386
+ "epoch": 1.86,
387
+ "learning_rate": 7.592803826219208e-06,
388
+ "loss": 0.3284,
389
+ "step": 31000
390
+ },
391
+ {
392
+ "epoch": 1.89,
393
+ "learning_rate": 7.3926877589001625e-06,
394
+ "loss": 0.3361,
395
+ "step": 31500
396
+ },
397
+ {
398
+ "epoch": 1.92,
399
+ "learning_rate": 7.192571691581117e-06,
400
+ "loss": 0.3418,
401
+ "step": 32000
402
+ },
403
+ {
404
+ "epoch": 1.95,
405
+ "learning_rate": 6.992455624262072e-06,
406
+ "loss": 0.3373,
407
+ "step": 32500
408
+ },
409
+ {
410
+ "epoch": 1.98,
411
+ "learning_rate": 6.792339556943027e-06,
412
+ "loss": 0.3397,
413
+ "step": 33000
414
+ },
415
+ {
416
+ "epoch": 2.0,
417
+ "eval_accuracy": 0.9208375811576843,
418
+ "eval_loss": 0.21520225703716278,
419
+ "eval_runtime": 27.5064,
420
+ "eval_samples_per_second": 142.367,
421
+ "eval_steps_per_second": 4.472,
422
+ "step": 33314
423
+ },
424
+ {
425
+ "epoch": 2.01,
426
+ "learning_rate": 6.592223489623982e-06,
427
+ "loss": 0.32,
428
+ "step": 33500
429
+ },
430
+ {
431
+ "epoch": 2.04,
432
+ "learning_rate": 6.392107422304937e-06,
433
+ "loss": 0.2961,
434
+ "step": 34000
435
+ },
436
+ {
437
+ "epoch": 2.07,
438
+ "learning_rate": 6.191991354985892e-06,
439
+ "loss": 0.2895,
440
+ "step": 34500
441
+ },
442
+ {
443
+ "epoch": 2.1,
444
+ "learning_rate": 5.991875287666847e-06,
445
+ "loss": 0.2965,
446
+ "step": 35000
447
+ },
448
+ {
449
+ "epoch": 2.13,
450
+ "learning_rate": 5.791759220347802e-06,
451
+ "loss": 0.2922,
452
+ "step": 35500
453
+ },
454
+ {
455
+ "epoch": 2.16,
456
+ "learning_rate": 5.591643153028758e-06,
457
+ "loss": 0.2837,
458
+ "step": 36000
459
+ },
460
+ {
461
+ "epoch": 2.19,
462
+ "learning_rate": 5.391527085709713e-06,
463
+ "loss": 0.2812,
464
+ "step": 36500
465
+ },
466
+ {
467
+ "epoch": 2.22,
468
+ "learning_rate": 5.191411018390668e-06,
469
+ "loss": 0.2819,
470
+ "step": 37000
471
+ },
472
+ {
473
+ "epoch": 2.25,
474
+ "learning_rate": 4.991294951071622e-06,
475
+ "loss": 0.2874,
476
+ "step": 37500
477
+ },
478
+ {
479
+ "epoch": 2.28,
480
+ "learning_rate": 4.791178883752577e-06,
481
+ "loss": 0.2727,
482
+ "step": 38000
483
+ },
484
+ {
485
+ "epoch": 2.31,
486
+ "learning_rate": 4.5910628164335316e-06,
487
+ "loss": 0.2733,
488
+ "step": 38500
489
+ },
490
+ {
491
+ "epoch": 2.34,
492
+ "learning_rate": 4.390946749114487e-06,
493
+ "loss": 0.2865,
494
+ "step": 39000
495
+ },
496
+ {
497
+ "epoch": 2.37,
498
+ "learning_rate": 4.190830681795442e-06,
499
+ "loss": 0.2799,
500
+ "step": 39500
501
+ },
502
+ {
503
+ "epoch": 2.4,
504
+ "learning_rate": 3.990714614476396e-06,
505
+ "loss": 0.2833,
506
+ "step": 40000
507
+ },
508
+ {
509
+ "epoch": 2.43,
510
+ "learning_rate": 3.7905985471573513e-06,
511
+ "loss": 0.2874,
512
+ "step": 40500
513
+ },
514
+ {
515
+ "epoch": 2.46,
516
+ "learning_rate": 3.5904824798383066e-06,
517
+ "loss": 0.2853,
518
+ "step": 41000
519
+ },
520
+ {
521
+ "epoch": 2.49,
522
+ "learning_rate": 3.3903664125192616e-06,
523
+ "loss": 0.2862,
524
+ "step": 41500
525
+ },
526
+ {
527
+ "epoch": 2.52,
528
+ "learning_rate": 3.1902503452002165e-06,
529
+ "loss": 0.2851,
530
+ "step": 42000
531
+ },
532
+ {
533
+ "epoch": 2.55,
534
+ "learning_rate": 2.9901342778811715e-06,
535
+ "loss": 0.2744,
536
+ "step": 42500
537
+ },
538
+ {
539
+ "epoch": 2.58,
540
+ "learning_rate": 2.7900182105621264e-06,
541
+ "loss": 0.2729,
542
+ "step": 43000
543
+ },
544
+ {
545
+ "epoch": 2.61,
546
+ "learning_rate": 2.589902143243081e-06,
547
+ "loss": 0.2792,
548
+ "step": 43500
549
+ },
550
+ {
551
+ "epoch": 2.64,
552
+ "learning_rate": 2.3897860759240363e-06,
553
+ "loss": 0.2828,
554
+ "step": 44000
555
+ },
556
+ {
557
+ "epoch": 2.67,
558
+ "learning_rate": 2.189670008604991e-06,
559
+ "loss": 0.277,
560
+ "step": 44500
561
+ },
562
+ {
563
+ "epoch": 2.7,
564
+ "learning_rate": 1.989553941285946e-06,
565
+ "loss": 0.2812,
566
+ "step": 45000
567
+ },
568
+ {
569
+ "epoch": 2.73,
570
+ "learning_rate": 1.789437873966901e-06,
571
+ "loss": 0.2807,
572
+ "step": 45500
573
+ },
574
+ {
575
+ "epoch": 2.76,
576
+ "learning_rate": 1.5893218066478558e-06,
577
+ "loss": 0.2852,
578
+ "step": 46000
579
+ },
580
+ {
581
+ "epoch": 2.79,
582
+ "learning_rate": 1.3892057393288107e-06,
583
+ "loss": 0.2824,
584
+ "step": 46500
585
+ },
586
+ {
587
+ "epoch": 2.82,
588
+ "learning_rate": 1.1890896720097659e-06,
589
+ "loss": 0.2784,
590
+ "step": 47000
591
+ },
592
+ {
593
+ "epoch": 2.85,
594
+ "learning_rate": 9.889736046907208e-07,
595
+ "loss": 0.2736,
596
+ "step": 47500
597
+ },
598
+ {
599
+ "epoch": 2.88,
600
+ "learning_rate": 7.888575373716756e-07,
601
+ "loss": 0.281,
602
+ "step": 48000
603
+ },
604
+ {
605
+ "epoch": 2.91,
606
+ "learning_rate": 5.887414700526306e-07,
607
+ "loss": 0.2781,
608
+ "step": 48500
609
+ },
610
+ {
611
+ "epoch": 2.94,
612
+ "learning_rate": 3.886254027335855e-07,
613
+ "loss": 0.2833,
614
+ "step": 49000
615
+ },
616
+ {
617
+ "epoch": 2.97,
618
+ "learning_rate": 1.8850933541454047e-07,
619
+ "loss": 0.2775,
620
+ "step": 49500
621
+ },
622
+ {
623
+ "epoch": 3.0,
624
+ "eval_accuracy": 0.9328396320343018,
625
+ "eval_loss": 0.2038867175579071,
626
+ "eval_runtime": 27.4892,
627
+ "eval_samples_per_second": 142.456,
628
+ "eval_steps_per_second": 4.474,
629
+ "step": 49971
630
+ },
631
+ {
632
+ "epoch": 3.0,
633
+ "step": 49971,
634
+ "total_flos": 4.207223016483379e+17,
635
+ "train_loss": 0.36741465170648585,
636
+ "train_runtime": 31812.3263,
637
+ "train_samples_per_second": 50.264,
638
+ "train_steps_per_second": 1.571
639
+ }
640
+ ],
641
+ "max_steps": 49971,
642
+ "num_train_epochs": 3,
643
+ "total_flos": 4.207223016483379e+17,
644
+ "trial_name": null,
645
+ "trial_params": null
646
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:60ec9e05d62905148e152cb8ea6003042f18e94ab4eba950a4b1eb7c86a7ecaa
3
+ size 3375