Seid Muhie Yimam commited on
Commit
07fad2a
1 Parent(s): a86c4ee

ethio-xlmr base

Browse files
README.md ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: mit
3
+ base_model: xlm-roberta-base
4
+ tags:
5
+ - generated_from_trainer
6
+ model-index:
7
+ - name: ethio_xlmr_base
8
+ results: []
9
+ ---
10
+
11
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
12
+ should probably proofread and complete it, then remove this comment. -->
13
+
14
+ # ethio_xlmr_base
15
+
16
+ This model is a fine-tuned version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) on an unknown dataset.
17
+ It achieves the following results on the evaluation set:
18
+ - Loss: 3.1618
19
+
20
+ ## Model description
21
+
22
+ More information needed
23
+
24
+ ## Intended uses & limitations
25
+
26
+ More information needed
27
+
28
+ ## Training and evaluation data
29
+
30
+ More information needed
31
+
32
+ ## Training procedure
33
+
34
+ ### Training hyperparameters
35
+
36
+ The following hyperparameters were used during training:
37
+ - learning_rate: 5e-05
38
+ - train_batch_size: 48
39
+ - eval_batch_size: 24
40
+ - seed: 42
41
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
42
+ - lr_scheduler_type: linear
43
+ - num_epochs: 10.0
44
+
45
+ ### Training results
46
+
47
+
48
+
49
+ ### Framework versions
50
+
51
+ - Transformers 4.33.1
52
+ - Pytorch 2.0.1+cu117
53
+ - Datasets 2.14.4
54
+ - Tokenizers 0.13.3
all_results.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 10.0,
3
+ "eval_loss": 3.161780834197998,
4
+ "eval_runtime": 2455.7469,
5
+ "eval_samples": 181215,
6
+ "eval_samples_per_second": 73.792,
7
+ "eval_steps_per_second": 3.075,
8
+ "perplexity": 23.612608650107834,
9
+ "train_loss": 3.866458753945259,
10
+ "train_runtime": 140181.055,
11
+ "train_samples": 724891,
12
+ "train_samples_per_second": 51.711,
13
+ "train_steps_per_second": 1.077
14
+ }
config.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "xlm-roberta-base",
3
+ "architectures": [
4
+ "XLMRobertaForMaskedLM"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "bos_token_id": 0,
8
+ "classifier_dropout": null,
9
+ "eos_token_id": 2,
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.1,
12
+ "hidden_size": 768,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 3072,
15
+ "layer_norm_eps": 1e-05,
16
+ "max_position_embeddings": 514,
17
+ "model_type": "xlm-roberta",
18
+ "num_attention_heads": 12,
19
+ "num_hidden_layers": 12,
20
+ "output_past": true,
21
+ "pad_token_id": 1,
22
+ "position_embedding_type": "absolute",
23
+ "torch_dtype": "float32",
24
+ "transformers_version": "4.33.1",
25
+ "type_vocab_size": 1,
26
+ "use_cache": true,
27
+ "vocab_size": 70002
28
+ }
eval_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 10.0,
3
+ "eval_loss": 3.161780834197998,
4
+ "eval_runtime": 2455.7469,
5
+ "eval_samples": 181215,
6
+ "eval_samples_per_second": 73.792,
7
+ "eval_steps_per_second": 3.075,
8
+ "perplexity": 23.612608650107834
9
+ }
optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3832e592e5a98405330ae1bb8c89ffe30cdbdd8c80ec74b335c3c1a31a58fc64
3
+ size 1119169989
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:77084c3681ca7e8c3ca12fe1023eca75344c2806ad9bf61bccfb948a1986e635
3
+ size 559570229
rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0aac8a4b57325652a09cc5f0a740dc1b42028cd06d67b0437bf6c4d6df94a325
3
+ size 14575
scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:55b61d99d2ed2001312d6b9e3c2f2b0f464d7204ceb9acba6d539ea534cc898f
3
+ size 627
sentencepiece.bpe.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:17222963fdc50cff64fd9b0ec00ef13b14d0b62c02bf3aa717fde09a9c784cfa
3
+ size 1630156
special_tokens_map.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<s>",
3
+ "cls_token": "<s>",
4
+ "eos_token": "</s>",
5
+ "mask_token": {
6
+ "content": "<mask>",
7
+ "lstrip": true,
8
+ "normalized": true,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "pad_token": "<pad>",
13
+ "sep_token": "</s>",
14
+ "unk_token": "<unk>"
15
+ }
tokenizer_config.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<s>",
3
+ "clean_up_tokenization_spaces": true,
4
+ "cls_token": "<s>",
5
+ "eos_token": "</s>",
6
+ "mask_token": {
7
+ "__type": "AddedToken",
8
+ "content": "<mask>",
9
+ "lstrip": true,
10
+ "normalized": true,
11
+ "rstrip": false,
12
+ "single_word": false
13
+ },
14
+ "model_max_length": 1000000000000000019884624838656,
15
+ "pad_token": "<pad>",
16
+ "sep_token": "</s>",
17
+ "sp_model_kwargs": {},
18
+ "tokenizer_class": "XLMRobertaTokenizer",
19
+ "unk_token": "<unk>",
20
+ "use_fast": true
21
+ }
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 10.0,
3
+ "train_loss": 3.866458753945259,
4
+ "train_runtime": 140181.055,
5
+ "train_samples": 724891,
6
+ "train_samples_per_second": 51.711,
7
+ "train_steps_per_second": 1.077
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,1219 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 6.621639517944643,
5
+ "eval_steps": 500,
6
+ "global_step": 100000,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.03,
13
+ "learning_rate": 4.983445901205139e-05,
14
+ "loss": 7.9646,
15
+ "step": 500
16
+ },
17
+ {
18
+ "epoch": 0.07,
19
+ "learning_rate": 4.9668918024102774e-05,
20
+ "loss": 7.067,
21
+ "step": 1000
22
+ },
23
+ {
24
+ "epoch": 0.1,
25
+ "learning_rate": 4.950337703615415e-05,
26
+ "loss": 6.6489,
27
+ "step": 1500
28
+ },
29
+ {
30
+ "epoch": 0.13,
31
+ "learning_rate": 4.933783604820554e-05,
32
+ "loss": 6.4267,
33
+ "step": 2000
34
+ },
35
+ {
36
+ "epoch": 0.17,
37
+ "learning_rate": 4.9172295060256924e-05,
38
+ "loss": 6.2394,
39
+ "step": 2500
40
+ },
41
+ {
42
+ "epoch": 0.2,
43
+ "learning_rate": 4.900675407230831e-05,
44
+ "loss": 6.1117,
45
+ "step": 3000
46
+ },
47
+ {
48
+ "epoch": 0.23,
49
+ "learning_rate": 4.884121308435969e-05,
50
+ "loss": 5.9807,
51
+ "step": 3500
52
+ },
53
+ {
54
+ "epoch": 0.26,
55
+ "learning_rate": 4.8675672096411075e-05,
56
+ "loss": 5.8532,
57
+ "step": 4000
58
+ },
59
+ {
60
+ "epoch": 0.3,
61
+ "learning_rate": 4.851013110846246e-05,
62
+ "loss": 5.7589,
63
+ "step": 4500
64
+ },
65
+ {
66
+ "epoch": 0.33,
67
+ "learning_rate": 4.8344590120513846e-05,
68
+ "loss": 5.661,
69
+ "step": 5000
70
+ },
71
+ {
72
+ "epoch": 0.36,
73
+ "learning_rate": 4.8179049132565225e-05,
74
+ "loss": 5.5663,
75
+ "step": 5500
76
+ },
77
+ {
78
+ "epoch": 0.4,
79
+ "learning_rate": 4.801350814461661e-05,
80
+ "loss": 5.5025,
81
+ "step": 6000
82
+ },
83
+ {
84
+ "epoch": 0.43,
85
+ "learning_rate": 4.7847967156667996e-05,
86
+ "loss": 5.4251,
87
+ "step": 6500
88
+ },
89
+ {
90
+ "epoch": 0.46,
91
+ "learning_rate": 4.768242616871938e-05,
92
+ "loss": 5.35,
93
+ "step": 7000
94
+ },
95
+ {
96
+ "epoch": 0.5,
97
+ "learning_rate": 4.751688518077076e-05,
98
+ "loss": 5.2991,
99
+ "step": 7500
100
+ },
101
+ {
102
+ "epoch": 0.53,
103
+ "learning_rate": 4.735134419282215e-05,
104
+ "loss": 5.2476,
105
+ "step": 8000
106
+ },
107
+ {
108
+ "epoch": 0.56,
109
+ "learning_rate": 4.718580320487353e-05,
110
+ "loss": 5.1949,
111
+ "step": 8500
112
+ },
113
+ {
114
+ "epoch": 0.6,
115
+ "learning_rate": 4.702026221692492e-05,
116
+ "loss": 5.1361,
117
+ "step": 9000
118
+ },
119
+ {
120
+ "epoch": 0.63,
121
+ "learning_rate": 4.68547212289763e-05,
122
+ "loss": 5.0988,
123
+ "step": 9500
124
+ },
125
+ {
126
+ "epoch": 0.66,
127
+ "learning_rate": 4.668918024102768e-05,
128
+ "loss": 5.0639,
129
+ "step": 10000
130
+ },
131
+ {
132
+ "epoch": 0.7,
133
+ "learning_rate": 4.652363925307907e-05,
134
+ "loss": 5.037,
135
+ "step": 10500
136
+ },
137
+ {
138
+ "epoch": 0.73,
139
+ "learning_rate": 4.6358098265130454e-05,
140
+ "loss": 4.9896,
141
+ "step": 11000
142
+ },
143
+ {
144
+ "epoch": 0.76,
145
+ "learning_rate": 4.619255727718183e-05,
146
+ "loss": 4.9501,
147
+ "step": 11500
148
+ },
149
+ {
150
+ "epoch": 0.79,
151
+ "learning_rate": 4.602701628923322e-05,
152
+ "loss": 4.9465,
153
+ "step": 12000
154
+ },
155
+ {
156
+ "epoch": 0.83,
157
+ "learning_rate": 4.58614753012846e-05,
158
+ "loss": 4.9,
159
+ "step": 12500
160
+ },
161
+ {
162
+ "epoch": 0.86,
163
+ "learning_rate": 4.569593431333598e-05,
164
+ "loss": 4.8633,
165
+ "step": 13000
166
+ },
167
+ {
168
+ "epoch": 0.89,
169
+ "learning_rate": 4.553039332538737e-05,
170
+ "loss": 4.8352,
171
+ "step": 13500
172
+ },
173
+ {
174
+ "epoch": 0.93,
175
+ "learning_rate": 4.536485233743875e-05,
176
+ "loss": 4.7846,
177
+ "step": 14000
178
+ },
179
+ {
180
+ "epoch": 0.96,
181
+ "learning_rate": 4.5199311349490134e-05,
182
+ "loss": 4.775,
183
+ "step": 14500
184
+ },
185
+ {
186
+ "epoch": 0.99,
187
+ "learning_rate": 4.503377036154152e-05,
188
+ "loss": 4.7556,
189
+ "step": 15000
190
+ },
191
+ {
192
+ "epoch": 1.03,
193
+ "learning_rate": 4.48682293735929e-05,
194
+ "loss": 4.7332,
195
+ "step": 15500
196
+ },
197
+ {
198
+ "epoch": 1.06,
199
+ "learning_rate": 4.4702688385644284e-05,
200
+ "loss": 4.7188,
201
+ "step": 16000
202
+ },
203
+ {
204
+ "epoch": 1.09,
205
+ "learning_rate": 4.453714739769567e-05,
206
+ "loss": 4.6928,
207
+ "step": 16500
208
+ },
209
+ {
210
+ "epoch": 1.13,
211
+ "learning_rate": 4.4371606409747055e-05,
212
+ "loss": 4.6792,
213
+ "step": 17000
214
+ },
215
+ {
216
+ "epoch": 1.16,
217
+ "learning_rate": 4.4206065421798434e-05,
218
+ "loss": 4.6542,
219
+ "step": 17500
220
+ },
221
+ {
222
+ "epoch": 1.19,
223
+ "learning_rate": 4.404052443384982e-05,
224
+ "loss": 4.6433,
225
+ "step": 18000
226
+ },
227
+ {
228
+ "epoch": 1.23,
229
+ "learning_rate": 4.3874983445901206e-05,
230
+ "loss": 4.616,
231
+ "step": 18500
232
+ },
233
+ {
234
+ "epoch": 1.26,
235
+ "learning_rate": 4.370944245795259e-05,
236
+ "loss": 4.6071,
237
+ "step": 19000
238
+ },
239
+ {
240
+ "epoch": 1.29,
241
+ "learning_rate": 4.354390147000397e-05,
242
+ "loss": 4.5814,
243
+ "step": 19500
244
+ },
245
+ {
246
+ "epoch": 1.32,
247
+ "learning_rate": 4.3378360482055356e-05,
248
+ "loss": 4.5617,
249
+ "step": 20000
250
+ },
251
+ {
252
+ "epoch": 1.36,
253
+ "learning_rate": 4.321281949410674e-05,
254
+ "loss": 4.5472,
255
+ "step": 20500
256
+ },
257
+ {
258
+ "epoch": 1.39,
259
+ "learning_rate": 4.304727850615813e-05,
260
+ "loss": 4.5401,
261
+ "step": 21000
262
+ },
263
+ {
264
+ "epoch": 1.42,
265
+ "learning_rate": 4.2881737518209507e-05,
266
+ "loss": 4.5001,
267
+ "step": 21500
268
+ },
269
+ {
270
+ "epoch": 1.46,
271
+ "learning_rate": 4.271619653026089e-05,
272
+ "loss": 4.4979,
273
+ "step": 22000
274
+ },
275
+ {
276
+ "epoch": 1.49,
277
+ "learning_rate": 4.255065554231228e-05,
278
+ "loss": 4.4717,
279
+ "step": 22500
280
+ },
281
+ {
282
+ "epoch": 1.52,
283
+ "learning_rate": 4.2385114554363664e-05,
284
+ "loss": 4.4488,
285
+ "step": 23000
286
+ },
287
+ {
288
+ "epoch": 1.56,
289
+ "learning_rate": 4.221957356641504e-05,
290
+ "loss": 4.4499,
291
+ "step": 23500
292
+ },
293
+ {
294
+ "epoch": 1.59,
295
+ "learning_rate": 4.205403257846643e-05,
296
+ "loss": 4.4122,
297
+ "step": 24000
298
+ },
299
+ {
300
+ "epoch": 1.62,
301
+ "learning_rate": 4.1888491590517814e-05,
302
+ "loss": 4.4016,
303
+ "step": 24500
304
+ },
305
+ {
306
+ "epoch": 1.66,
307
+ "learning_rate": 4.17229506025692e-05,
308
+ "loss": 4.3926,
309
+ "step": 25000
310
+ },
311
+ {
312
+ "epoch": 1.69,
313
+ "learning_rate": 4.155740961462058e-05,
314
+ "loss": 4.3787,
315
+ "step": 25500
316
+ },
317
+ {
318
+ "epoch": 1.72,
319
+ "learning_rate": 4.1391868626671964e-05,
320
+ "loss": 4.379,
321
+ "step": 26000
322
+ },
323
+ {
324
+ "epoch": 1.75,
325
+ "learning_rate": 4.122632763872335e-05,
326
+ "loss": 4.3696,
327
+ "step": 26500
328
+ },
329
+ {
330
+ "epoch": 1.79,
331
+ "learning_rate": 4.1060786650774736e-05,
332
+ "loss": 4.3436,
333
+ "step": 27000
334
+ },
335
+ {
336
+ "epoch": 1.82,
337
+ "learning_rate": 4.089524566282612e-05,
338
+ "loss": 4.3236,
339
+ "step": 27500
340
+ },
341
+ {
342
+ "epoch": 1.85,
343
+ "learning_rate": 4.07297046748775e-05,
344
+ "loss": 4.3132,
345
+ "step": 28000
346
+ },
347
+ {
348
+ "epoch": 1.89,
349
+ "learning_rate": 4.0564163686928886e-05,
350
+ "loss": 4.3176,
351
+ "step": 28500
352
+ },
353
+ {
354
+ "epoch": 1.92,
355
+ "learning_rate": 4.039862269898027e-05,
356
+ "loss": 4.2956,
357
+ "step": 29000
358
+ },
359
+ {
360
+ "epoch": 1.95,
361
+ "learning_rate": 4.023308171103166e-05,
362
+ "loss": 4.2884,
363
+ "step": 29500
364
+ },
365
+ {
366
+ "epoch": 1.99,
367
+ "learning_rate": 4.0067540723083036e-05,
368
+ "loss": 4.2735,
369
+ "step": 30000
370
+ },
371
+ {
372
+ "epoch": 2.02,
373
+ "learning_rate": 3.990199973513442e-05,
374
+ "loss": 4.2596,
375
+ "step": 30500
376
+ },
377
+ {
378
+ "epoch": 2.05,
379
+ "learning_rate": 3.973645874718581e-05,
380
+ "loss": 4.2534,
381
+ "step": 31000
382
+ },
383
+ {
384
+ "epoch": 2.09,
385
+ "learning_rate": 3.9570917759237194e-05,
386
+ "loss": 4.2429,
387
+ "step": 31500
388
+ },
389
+ {
390
+ "epoch": 2.12,
391
+ "learning_rate": 3.940537677128857e-05,
392
+ "loss": 4.2193,
393
+ "step": 32000
394
+ },
395
+ {
396
+ "epoch": 2.15,
397
+ "learning_rate": 3.923983578333996e-05,
398
+ "loss": 4.2025,
399
+ "step": 32500
400
+ },
401
+ {
402
+ "epoch": 2.19,
403
+ "learning_rate": 3.9074294795391344e-05,
404
+ "loss": 4.1868,
405
+ "step": 33000
406
+ },
407
+ {
408
+ "epoch": 2.22,
409
+ "learning_rate": 3.890875380744273e-05,
410
+ "loss": 4.1895,
411
+ "step": 33500
412
+ },
413
+ {
414
+ "epoch": 2.25,
415
+ "learning_rate": 3.874321281949411e-05,
416
+ "loss": 4.1681,
417
+ "step": 34000
418
+ },
419
+ {
420
+ "epoch": 2.28,
421
+ "learning_rate": 3.8577671831545494e-05,
422
+ "loss": 4.18,
423
+ "step": 34500
424
+ },
425
+ {
426
+ "epoch": 2.32,
427
+ "learning_rate": 3.841213084359688e-05,
428
+ "loss": 4.1666,
429
+ "step": 35000
430
+ },
431
+ {
432
+ "epoch": 2.35,
433
+ "learning_rate": 3.8246589855648266e-05,
434
+ "loss": 4.136,
435
+ "step": 35500
436
+ },
437
+ {
438
+ "epoch": 2.38,
439
+ "learning_rate": 3.8081048867699645e-05,
440
+ "loss": 4.1456,
441
+ "step": 36000
442
+ },
443
+ {
444
+ "epoch": 2.42,
445
+ "learning_rate": 3.791550787975103e-05,
446
+ "loss": 4.1194,
447
+ "step": 36500
448
+ },
449
+ {
450
+ "epoch": 2.45,
451
+ "learning_rate": 3.7749966891802416e-05,
452
+ "loss": 4.1111,
453
+ "step": 37000
454
+ },
455
+ {
456
+ "epoch": 2.48,
457
+ "learning_rate": 3.7584425903853795e-05,
458
+ "loss": 4.092,
459
+ "step": 37500
460
+ },
461
+ {
462
+ "epoch": 2.52,
463
+ "learning_rate": 3.741888491590518e-05,
464
+ "loss": 4.0972,
465
+ "step": 38000
466
+ },
467
+ {
468
+ "epoch": 2.55,
469
+ "learning_rate": 3.7253343927956566e-05,
470
+ "loss": 4.0929,
471
+ "step": 38500
472
+ },
473
+ {
474
+ "epoch": 2.58,
475
+ "learning_rate": 3.7087802940007945e-05,
476
+ "loss": 4.0645,
477
+ "step": 39000
478
+ },
479
+ {
480
+ "epoch": 2.62,
481
+ "learning_rate": 3.692226195205933e-05,
482
+ "loss": 4.0618,
483
+ "step": 39500
484
+ },
485
+ {
486
+ "epoch": 2.65,
487
+ "learning_rate": 3.675672096411072e-05,
488
+ "loss": 4.0674,
489
+ "step": 40000
490
+ },
491
+ {
492
+ "epoch": 2.68,
493
+ "learning_rate": 3.6591179976162096e-05,
494
+ "loss": 4.0467,
495
+ "step": 40500
496
+ },
497
+ {
498
+ "epoch": 2.71,
499
+ "learning_rate": 3.642563898821348e-05,
500
+ "loss": 4.0345,
501
+ "step": 41000
502
+ },
503
+ {
504
+ "epoch": 2.75,
505
+ "learning_rate": 3.626009800026487e-05,
506
+ "loss": 4.01,
507
+ "step": 41500
508
+ },
509
+ {
510
+ "epoch": 2.78,
511
+ "learning_rate": 3.6094557012316246e-05,
512
+ "loss": 4.0087,
513
+ "step": 42000
514
+ },
515
+ {
516
+ "epoch": 2.81,
517
+ "learning_rate": 3.592901602436763e-05,
518
+ "loss": 4.0112,
519
+ "step": 42500
520
+ },
521
+ {
522
+ "epoch": 2.85,
523
+ "learning_rate": 3.576347503641902e-05,
524
+ "loss": 4.0137,
525
+ "step": 43000
526
+ },
527
+ {
528
+ "epoch": 2.88,
529
+ "learning_rate": 3.55979340484704e-05,
530
+ "loss": 4.005,
531
+ "step": 43500
532
+ },
533
+ {
534
+ "epoch": 2.91,
535
+ "learning_rate": 3.543239306052178e-05,
536
+ "loss": 4.0055,
537
+ "step": 44000
538
+ },
539
+ {
540
+ "epoch": 2.95,
541
+ "learning_rate": 3.526685207257317e-05,
542
+ "loss": 3.98,
543
+ "step": 44500
544
+ },
545
+ {
546
+ "epoch": 2.98,
547
+ "learning_rate": 3.5101311084624553e-05,
548
+ "loss": 3.9646,
549
+ "step": 45000
550
+ },
551
+ {
552
+ "epoch": 3.01,
553
+ "learning_rate": 3.493577009667594e-05,
554
+ "loss": 3.9678,
555
+ "step": 45500
556
+ },
557
+ {
558
+ "epoch": 3.05,
559
+ "learning_rate": 3.477022910872732e-05,
560
+ "loss": 3.9427,
561
+ "step": 46000
562
+ },
563
+ {
564
+ "epoch": 3.08,
565
+ "learning_rate": 3.4604688120778704e-05,
566
+ "loss": 3.9335,
567
+ "step": 46500
568
+ },
569
+ {
570
+ "epoch": 3.11,
571
+ "learning_rate": 3.443914713283009e-05,
572
+ "loss": 3.9361,
573
+ "step": 47000
574
+ },
575
+ {
576
+ "epoch": 3.15,
577
+ "learning_rate": 3.4273606144881475e-05,
578
+ "loss": 3.9476,
579
+ "step": 47500
580
+ },
581
+ {
582
+ "epoch": 3.18,
583
+ "learning_rate": 3.4108065156932854e-05,
584
+ "loss": 3.924,
585
+ "step": 48000
586
+ },
587
+ {
588
+ "epoch": 3.21,
589
+ "learning_rate": 3.394252416898424e-05,
590
+ "loss": 3.9141,
591
+ "step": 48500
592
+ },
593
+ {
594
+ "epoch": 3.24,
595
+ "learning_rate": 3.3776983181035626e-05,
596
+ "loss": 3.9092,
597
+ "step": 49000
598
+ },
599
+ {
600
+ "epoch": 3.28,
601
+ "learning_rate": 3.361144219308701e-05,
602
+ "loss": 3.8767,
603
+ "step": 49500
604
+ },
605
+ {
606
+ "epoch": 3.31,
607
+ "learning_rate": 3.344590120513839e-05,
608
+ "loss": 3.9022,
609
+ "step": 50000
610
+ },
611
+ {
612
+ "epoch": 3.34,
613
+ "learning_rate": 3.3280360217189776e-05,
614
+ "loss": 3.8732,
615
+ "step": 50500
616
+ },
617
+ {
618
+ "epoch": 3.38,
619
+ "learning_rate": 3.311481922924116e-05,
620
+ "loss": 3.8522,
621
+ "step": 51000
622
+ },
623
+ {
624
+ "epoch": 3.41,
625
+ "learning_rate": 3.294927824129255e-05,
626
+ "loss": 3.8773,
627
+ "step": 51500
628
+ },
629
+ {
630
+ "epoch": 3.44,
631
+ "learning_rate": 3.2783737253343926e-05,
632
+ "loss": 3.8715,
633
+ "step": 52000
634
+ },
635
+ {
636
+ "epoch": 3.48,
637
+ "learning_rate": 3.261819626539531e-05,
638
+ "loss": 3.8549,
639
+ "step": 52500
640
+ },
641
+ {
642
+ "epoch": 3.51,
643
+ "learning_rate": 3.24526552774467e-05,
644
+ "loss": 3.8547,
645
+ "step": 53000
646
+ },
647
+ {
648
+ "epoch": 3.54,
649
+ "learning_rate": 3.228711428949808e-05,
650
+ "loss": 3.8408,
651
+ "step": 53500
652
+ },
653
+ {
654
+ "epoch": 3.58,
655
+ "learning_rate": 3.212157330154946e-05,
656
+ "loss": 3.8309,
657
+ "step": 54000
658
+ },
659
+ {
660
+ "epoch": 3.61,
661
+ "learning_rate": 3.195603231360085e-05,
662
+ "loss": 3.8275,
663
+ "step": 54500
664
+ },
665
+ {
666
+ "epoch": 3.64,
667
+ "learning_rate": 3.1790491325652234e-05,
668
+ "loss": 3.8257,
669
+ "step": 55000
670
+ },
671
+ {
672
+ "epoch": 3.68,
673
+ "learning_rate": 3.162495033770362e-05,
674
+ "loss": 3.8233,
675
+ "step": 55500
676
+ },
677
+ {
678
+ "epoch": 3.71,
679
+ "learning_rate": 3.1459409349755005e-05,
680
+ "loss": 3.8184,
681
+ "step": 56000
682
+ },
683
+ {
684
+ "epoch": 3.74,
685
+ "learning_rate": 3.1293868361806384e-05,
686
+ "loss": 3.7942,
687
+ "step": 56500
688
+ },
689
+ {
690
+ "epoch": 3.77,
691
+ "learning_rate": 3.112832737385777e-05,
692
+ "loss": 3.8072,
693
+ "step": 57000
694
+ },
695
+ {
696
+ "epoch": 3.81,
697
+ "learning_rate": 3.0962786385909155e-05,
698
+ "loss": 3.787,
699
+ "step": 57500
700
+ },
701
+ {
702
+ "epoch": 3.84,
703
+ "learning_rate": 3.079724539796054e-05,
704
+ "loss": 3.7954,
705
+ "step": 58000
706
+ },
707
+ {
708
+ "epoch": 3.87,
709
+ "learning_rate": 3.063170441001192e-05,
710
+ "loss": 3.7816,
711
+ "step": 58500
712
+ },
713
+ {
714
+ "epoch": 3.91,
715
+ "learning_rate": 3.0466163422063302e-05,
716
+ "loss": 3.7723,
717
+ "step": 59000
718
+ },
719
+ {
720
+ "epoch": 3.94,
721
+ "learning_rate": 3.0300622434114688e-05,
722
+ "loss": 3.7713,
723
+ "step": 59500
724
+ },
725
+ {
726
+ "epoch": 3.97,
727
+ "learning_rate": 3.0135081446166074e-05,
728
+ "loss": 3.7619,
729
+ "step": 60000
730
+ },
731
+ {
732
+ "epoch": 4.01,
733
+ "learning_rate": 2.9969540458217453e-05,
734
+ "loss": 3.7479,
735
+ "step": 60500
736
+ },
737
+ {
738
+ "epoch": 4.04,
739
+ "learning_rate": 2.980399947026884e-05,
740
+ "loss": 3.7411,
741
+ "step": 61000
742
+ },
743
+ {
744
+ "epoch": 4.07,
745
+ "learning_rate": 2.9638458482320224e-05,
746
+ "loss": 3.7416,
747
+ "step": 61500
748
+ },
749
+ {
750
+ "epoch": 4.11,
751
+ "learning_rate": 2.947291749437161e-05,
752
+ "loss": 3.7423,
753
+ "step": 62000
754
+ },
755
+ {
756
+ "epoch": 4.14,
757
+ "learning_rate": 2.930737650642299e-05,
758
+ "loss": 3.7344,
759
+ "step": 62500
760
+ },
761
+ {
762
+ "epoch": 4.17,
763
+ "learning_rate": 2.9141835518474375e-05,
764
+ "loss": 3.7244,
765
+ "step": 63000
766
+ },
767
+ {
768
+ "epoch": 4.2,
769
+ "learning_rate": 2.897629453052576e-05,
770
+ "loss": 3.739,
771
+ "step": 63500
772
+ },
773
+ {
774
+ "epoch": 4.24,
775
+ "learning_rate": 2.8810753542577146e-05,
776
+ "loss": 3.7142,
777
+ "step": 64000
778
+ },
779
+ {
780
+ "epoch": 4.27,
781
+ "learning_rate": 2.8645212554628525e-05,
782
+ "loss": 3.7158,
783
+ "step": 64500
784
+ },
785
+ {
786
+ "epoch": 4.3,
787
+ "learning_rate": 2.847967156667991e-05,
788
+ "loss": 3.7159,
789
+ "step": 65000
790
+ },
791
+ {
792
+ "epoch": 4.34,
793
+ "learning_rate": 2.8314130578731296e-05,
794
+ "loss": 3.6938,
795
+ "step": 65500
796
+ },
797
+ {
798
+ "epoch": 4.37,
799
+ "learning_rate": 2.8148589590782682e-05,
800
+ "loss": 3.692,
801
+ "step": 66000
802
+ },
803
+ {
804
+ "epoch": 4.4,
805
+ "learning_rate": 2.798304860283406e-05,
806
+ "loss": 3.6779,
807
+ "step": 66500
808
+ },
809
+ {
810
+ "epoch": 4.44,
811
+ "learning_rate": 2.7817507614885447e-05,
812
+ "loss": 3.6856,
813
+ "step": 67000
814
+ },
815
+ {
816
+ "epoch": 4.47,
817
+ "learning_rate": 2.7651966626936832e-05,
818
+ "loss": 3.6771,
819
+ "step": 67500
820
+ },
821
+ {
822
+ "epoch": 4.5,
823
+ "learning_rate": 2.7486425638988218e-05,
824
+ "loss": 3.672,
825
+ "step": 68000
826
+ },
827
+ {
828
+ "epoch": 4.54,
829
+ "learning_rate": 2.7320884651039597e-05,
830
+ "loss": 3.6788,
831
+ "step": 68500
832
+ },
833
+ {
834
+ "epoch": 4.57,
835
+ "learning_rate": 2.7155343663090983e-05,
836
+ "loss": 3.6562,
837
+ "step": 69000
838
+ },
839
+ {
840
+ "epoch": 4.6,
841
+ "learning_rate": 2.698980267514237e-05,
842
+ "loss": 3.6582,
843
+ "step": 69500
844
+ },
845
+ {
846
+ "epoch": 4.64,
847
+ "learning_rate": 2.682426168719375e-05,
848
+ "loss": 3.6605,
849
+ "step": 70000
850
+ },
851
+ {
852
+ "epoch": 4.67,
853
+ "learning_rate": 2.6658720699245133e-05,
854
+ "loss": 3.6535,
855
+ "step": 70500
856
+ },
857
+ {
858
+ "epoch": 4.7,
859
+ "learning_rate": 2.649317971129652e-05,
860
+ "loss": 3.649,
861
+ "step": 71000
862
+ },
863
+ {
864
+ "epoch": 4.73,
865
+ "learning_rate": 2.63276387233479e-05,
866
+ "loss": 3.6455,
867
+ "step": 71500
868
+ },
869
+ {
870
+ "epoch": 4.77,
871
+ "learning_rate": 2.6162097735399287e-05,
872
+ "loss": 3.6437,
873
+ "step": 72000
874
+ },
875
+ {
876
+ "epoch": 4.8,
877
+ "learning_rate": 2.599655674745067e-05,
878
+ "loss": 3.6402,
879
+ "step": 72500
880
+ },
881
+ {
882
+ "epoch": 4.83,
883
+ "learning_rate": 2.583101575950205e-05,
884
+ "loss": 3.6257,
885
+ "step": 73000
886
+ },
887
+ {
888
+ "epoch": 4.87,
889
+ "learning_rate": 2.5665474771553437e-05,
890
+ "loss": 3.6125,
891
+ "step": 73500
892
+ },
893
+ {
894
+ "epoch": 4.9,
895
+ "learning_rate": 2.5499933783604823e-05,
896
+ "loss": 3.6132,
897
+ "step": 74000
898
+ },
899
+ {
900
+ "epoch": 4.93,
901
+ "learning_rate": 2.5334392795656202e-05,
902
+ "loss": 3.612,
903
+ "step": 74500
904
+ },
905
+ {
906
+ "epoch": 4.97,
907
+ "learning_rate": 2.5168851807707588e-05,
908
+ "loss": 3.6139,
909
+ "step": 75000
910
+ },
911
+ {
912
+ "epoch": 5.0,
913
+ "learning_rate": 2.5003310819758973e-05,
914
+ "loss": 3.5995,
915
+ "step": 75500
916
+ },
917
+ {
918
+ "epoch": 5.03,
919
+ "learning_rate": 2.4837769831810356e-05,
920
+ "loss": 3.6016,
921
+ "step": 76000
922
+ },
923
+ {
924
+ "epoch": 5.07,
925
+ "learning_rate": 2.467222884386174e-05,
926
+ "loss": 3.5882,
927
+ "step": 76500
928
+ },
929
+ {
930
+ "epoch": 5.1,
931
+ "learning_rate": 2.4506687855913124e-05,
932
+ "loss": 3.6023,
933
+ "step": 77000
934
+ },
935
+ {
936
+ "epoch": 5.13,
937
+ "learning_rate": 2.434114686796451e-05,
938
+ "loss": 3.6025,
939
+ "step": 77500
940
+ },
941
+ {
942
+ "epoch": 5.16,
943
+ "learning_rate": 2.417560588001589e-05,
944
+ "loss": 3.5897,
945
+ "step": 78000
946
+ },
947
+ {
948
+ "epoch": 5.2,
949
+ "learning_rate": 2.4010064892067277e-05,
950
+ "loss": 3.5767,
951
+ "step": 78500
952
+ },
953
+ {
954
+ "epoch": 5.23,
955
+ "learning_rate": 2.384452390411866e-05,
956
+ "loss": 3.5835,
957
+ "step": 79000
958
+ },
959
+ {
960
+ "epoch": 5.26,
961
+ "learning_rate": 2.3678982916170045e-05,
962
+ "loss": 3.5579,
963
+ "step": 79500
964
+ },
965
+ {
966
+ "epoch": 5.3,
967
+ "learning_rate": 2.3513441928221428e-05,
968
+ "loss": 3.5708,
969
+ "step": 80000
970
+ },
971
+ {
972
+ "epoch": 5.33,
973
+ "learning_rate": 2.3347900940272813e-05,
974
+ "loss": 3.5613,
975
+ "step": 80500
976
+ },
977
+ {
978
+ "epoch": 5.36,
979
+ "learning_rate": 2.3182359952324196e-05,
980
+ "loss": 3.5632,
981
+ "step": 81000
982
+ },
983
+ {
984
+ "epoch": 5.4,
985
+ "learning_rate": 2.301681896437558e-05,
986
+ "loss": 3.5688,
987
+ "step": 81500
988
+ },
989
+ {
990
+ "epoch": 5.43,
991
+ "learning_rate": 2.2851277976426967e-05,
992
+ "loss": 3.5563,
993
+ "step": 82000
994
+ },
995
+ {
996
+ "epoch": 5.46,
997
+ "learning_rate": 2.268573698847835e-05,
998
+ "loss": 3.5438,
999
+ "step": 82500
1000
+ },
1001
+ {
1002
+ "epoch": 5.5,
1003
+ "learning_rate": 2.2520196000529735e-05,
1004
+ "loss": 3.5384,
1005
+ "step": 83000
1006
+ },
1007
+ {
1008
+ "epoch": 5.53,
1009
+ "learning_rate": 2.2354655012581117e-05,
1010
+ "loss": 3.5491,
1011
+ "step": 83500
1012
+ },
1013
+ {
1014
+ "epoch": 5.56,
1015
+ "learning_rate": 2.21891140246325e-05,
1016
+ "loss": 3.5491,
1017
+ "step": 84000
1018
+ },
1019
+ {
1020
+ "epoch": 5.6,
1021
+ "learning_rate": 2.2023573036683885e-05,
1022
+ "loss": 3.5437,
1023
+ "step": 84500
1024
+ },
1025
+ {
1026
+ "epoch": 5.63,
1027
+ "learning_rate": 2.1858032048735268e-05,
1028
+ "loss": 3.5335,
1029
+ "step": 85000
1030
+ },
1031
+ {
1032
+ "epoch": 5.66,
1033
+ "learning_rate": 2.169249106078665e-05,
1034
+ "loss": 3.5302,
1035
+ "step": 85500
1036
+ },
1037
+ {
1038
+ "epoch": 5.69,
1039
+ "learning_rate": 2.1526950072838036e-05,
1040
+ "loss": 3.5227,
1041
+ "step": 86000
1042
+ },
1043
+ {
1044
+ "epoch": 5.73,
1045
+ "learning_rate": 2.1361409084889418e-05,
1046
+ "loss": 3.5239,
1047
+ "step": 86500
1048
+ },
1049
+ {
1050
+ "epoch": 5.76,
1051
+ "learning_rate": 2.1195868096940804e-05,
1052
+ "loss": 3.5148,
1053
+ "step": 87000
1054
+ },
1055
+ {
1056
+ "epoch": 5.79,
1057
+ "learning_rate": 2.1030327108992186e-05,
1058
+ "loss": 3.5282,
1059
+ "step": 87500
1060
+ },
1061
+ {
1062
+ "epoch": 5.83,
1063
+ "learning_rate": 2.0864786121043572e-05,
1064
+ "loss": 3.5222,
1065
+ "step": 88000
1066
+ },
1067
+ {
1068
+ "epoch": 5.86,
1069
+ "learning_rate": 2.0699245133094954e-05,
1070
+ "loss": 3.5179,
1071
+ "step": 88500
1072
+ },
1073
+ {
1074
+ "epoch": 5.89,
1075
+ "learning_rate": 2.053370414514634e-05,
1076
+ "loss": 3.4973,
1077
+ "step": 89000
1078
+ },
1079
+ {
1080
+ "epoch": 5.93,
1081
+ "learning_rate": 2.0368163157197722e-05,
1082
+ "loss": 3.5072,
1083
+ "step": 89500
1084
+ },
1085
+ {
1086
+ "epoch": 5.96,
1087
+ "learning_rate": 2.0202622169249108e-05,
1088
+ "loss": 3.5043,
1089
+ "step": 90000
1090
+ },
1091
+ {
1092
+ "epoch": 5.99,
1093
+ "learning_rate": 2.003708118130049e-05,
1094
+ "loss": 3.5099,
1095
+ "step": 90500
1096
+ },
1097
+ {
1098
+ "epoch": 6.03,
1099
+ "learning_rate": 1.9871540193351876e-05,
1100
+ "loss": 3.4962,
1101
+ "step": 91000
1102
+ },
1103
+ {
1104
+ "epoch": 6.06,
1105
+ "learning_rate": 1.9705999205403258e-05,
1106
+ "loss": 3.4934,
1107
+ "step": 91500
1108
+ },
1109
+ {
1110
+ "epoch": 6.09,
1111
+ "learning_rate": 1.9540458217454644e-05,
1112
+ "loss": 3.484,
1113
+ "step": 92000
1114
+ },
1115
+ {
1116
+ "epoch": 6.13,
1117
+ "learning_rate": 1.9374917229506026e-05,
1118
+ "loss": 3.4683,
1119
+ "step": 92500
1120
+ },
1121
+ {
1122
+ "epoch": 6.16,
1123
+ "learning_rate": 1.9209376241557412e-05,
1124
+ "loss": 3.4929,
1125
+ "step": 93000
1126
+ },
1127
+ {
1128
+ "epoch": 6.19,
1129
+ "learning_rate": 1.9043835253608794e-05,
1130
+ "loss": 3.4759,
1131
+ "step": 93500
1132
+ },
1133
+ {
1134
+ "epoch": 6.22,
1135
+ "learning_rate": 1.887829426566018e-05,
1136
+ "loss": 3.4733,
1137
+ "step": 94000
1138
+ },
1139
+ {
1140
+ "epoch": 6.26,
1141
+ "learning_rate": 1.8712753277711562e-05,
1142
+ "loss": 3.4746,
1143
+ "step": 94500
1144
+ },
1145
+ {
1146
+ "epoch": 6.29,
1147
+ "learning_rate": 1.8547212289762948e-05,
1148
+ "loss": 3.4734,
1149
+ "step": 95000
1150
+ },
1151
+ {
1152
+ "epoch": 6.32,
1153
+ "learning_rate": 1.838167130181433e-05,
1154
+ "loss": 3.4738,
1155
+ "step": 95500
1156
+ },
1157
+ {
1158
+ "epoch": 6.36,
1159
+ "learning_rate": 1.8216130313865716e-05,
1160
+ "loss": 3.4768,
1161
+ "step": 96000
1162
+ },
1163
+ {
1164
+ "epoch": 6.39,
1165
+ "learning_rate": 1.80505893259171e-05,
1166
+ "loss": 3.4731,
1167
+ "step": 96500
1168
+ },
1169
+ {
1170
+ "epoch": 6.42,
1171
+ "learning_rate": 1.7885048337968484e-05,
1172
+ "loss": 3.4659,
1173
+ "step": 97000
1174
+ },
1175
+ {
1176
+ "epoch": 6.46,
1177
+ "learning_rate": 1.7719507350019866e-05,
1178
+ "loss": 3.4642,
1179
+ "step": 97500
1180
+ },
1181
+ {
1182
+ "epoch": 6.49,
1183
+ "learning_rate": 1.755396636207125e-05,
1184
+ "loss": 3.4595,
1185
+ "step": 98000
1186
+ },
1187
+ {
1188
+ "epoch": 6.52,
1189
+ "learning_rate": 1.738842537412263e-05,
1190
+ "loss": 3.4715,
1191
+ "step": 98500
1192
+ },
1193
+ {
1194
+ "epoch": 6.56,
1195
+ "learning_rate": 1.7222884386174017e-05,
1196
+ "loss": 3.4457,
1197
+ "step": 99000
1198
+ },
1199
+ {
1200
+ "epoch": 6.59,
1201
+ "learning_rate": 1.70573433982254e-05,
1202
+ "loss": 3.4621,
1203
+ "step": 99500
1204
+ },
1205
+ {
1206
+ "epoch": 6.62,
1207
+ "learning_rate": 1.6891802410276785e-05,
1208
+ "loss": 3.446,
1209
+ "step": 100000
1210
+ }
1211
+ ],
1212
+ "logging_steps": 500,
1213
+ "max_steps": 151020,
1214
+ "num_train_epochs": 10,
1215
+ "save_steps": 100000,
1216
+ "total_flos": 1.2639573579451392e+18,
1217
+ "trial_name": null,
1218
+ "trial_params": null
1219
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2377555161a9203790a40b5d774fd4a5ca2696bf3a21b4c2a8aa945588b63a1e
3
+ size 4027