Kyle1668 commited on
Commit
2128c98
1 Parent(s): 1d02fb8

Upload folder using huggingface_hub

Browse files
config.json ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "t5-large",
3
+ "architectures": [
4
+ "T5ForConditionalGeneration"
5
+ ],
6
+ "d_ff": 4096,
7
+ "d_kv": 64,
8
+ "d_model": 1024,
9
+ "decoder_start_token_id": 0,
10
+ "dense_act_fn": "relu",
11
+ "dropout_rate": 0.1,
12
+ "eos_token_id": 1,
13
+ "feed_forward_proj": "relu",
14
+ "initializer_factor": 1.0,
15
+ "is_encoder_decoder": true,
16
+ "is_gated_act": false,
17
+ "layer_norm_epsilon": 1e-06,
18
+ "model_type": "t5",
19
+ "n_positions": 512,
20
+ "num_decoder_layers": 24,
21
+ "num_heads": 16,
22
+ "num_layers": 24,
23
+ "output_past": true,
24
+ "pad_token_id": 0,
25
+ "relative_attention_max_distance": 128,
26
+ "relative_attention_num_buckets": 32,
27
+ "task_specific_params": {
28
+ "summarization": {
29
+ "early_stopping": true,
30
+ "length_penalty": 2.0,
31
+ "max_length": 200,
32
+ "min_length": 30,
33
+ "no_repeat_ngram_size": 3,
34
+ "num_beams": 4,
35
+ "prefix": "summarize: "
36
+ },
37
+ "translation_en_to_de": {
38
+ "early_stopping": true,
39
+ "max_length": 300,
40
+ "num_beams": 4,
41
+ "prefix": "translate English to German: "
42
+ },
43
+ "translation_en_to_fr": {
44
+ "early_stopping": true,
45
+ "max_length": 300,
46
+ "num_beams": 4,
47
+ "prefix": "translate English to French: "
48
+ },
49
+ "translation_en_to_ro": {
50
+ "early_stopping": true,
51
+ "max_length": 300,
52
+ "num_beams": 4,
53
+ "prefix": "translate English to Romanian: "
54
+ }
55
+ },
56
+ "torch_dtype": "float32",
57
+ "transformers_version": "4.31.0",
58
+ "use_cache": false,
59
+ "vocab_size": 32128
60
+ }
generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "decoder_start_token_id": 0,
4
+ "eos_token_id": 1,
5
+ "pad_token_id": 0,
6
+ "transformers_version": "4.31.0"
7
+ }
optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3fd6f784c50b4a5afbba6c73e38c741522db60fe4c56c8729e8e08bd6f231590
3
+ size 5901652619
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e2f7ace74313dddaf0bb79e0527d3033e8b85ddac4961e91b04d753efe66d996
3
+ size 2950848513
rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eb06fb21d9a87fd469ed01f64f182d8b46dee9fa7c26a6c2396c20528ee7bfe7
3
+ size 14575
scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a4c3c3527015ca732cff266710a480f5914698d62ae42ae11bdf0a34dce020fb
3
+ size 627
special_tokens_map.json ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<extra_id_0>",
4
+ "<extra_id_1>",
5
+ "<extra_id_2>",
6
+ "<extra_id_3>",
7
+ "<extra_id_4>",
8
+ "<extra_id_5>",
9
+ "<extra_id_6>",
10
+ "<extra_id_7>",
11
+ "<extra_id_8>",
12
+ "<extra_id_9>",
13
+ "<extra_id_10>",
14
+ "<extra_id_11>",
15
+ "<extra_id_12>",
16
+ "<extra_id_13>",
17
+ "<extra_id_14>",
18
+ "<extra_id_15>",
19
+ "<extra_id_16>",
20
+ "<extra_id_17>",
21
+ "<extra_id_18>",
22
+ "<extra_id_19>",
23
+ "<extra_id_20>",
24
+ "<extra_id_21>",
25
+ "<extra_id_22>",
26
+ "<extra_id_23>",
27
+ "<extra_id_24>",
28
+ "<extra_id_25>",
29
+ "<extra_id_26>",
30
+ "<extra_id_27>",
31
+ "<extra_id_28>",
32
+ "<extra_id_29>",
33
+ "<extra_id_30>",
34
+ "<extra_id_31>",
35
+ "<extra_id_32>",
36
+ "<extra_id_33>",
37
+ "<extra_id_34>",
38
+ "<extra_id_35>",
39
+ "<extra_id_36>",
40
+ "<extra_id_37>",
41
+ "<extra_id_38>",
42
+ "<extra_id_39>",
43
+ "<extra_id_40>",
44
+ "<extra_id_41>",
45
+ "<extra_id_42>",
46
+ "<extra_id_43>",
47
+ "<extra_id_44>",
48
+ "<extra_id_45>",
49
+ "<extra_id_46>",
50
+ "<extra_id_47>",
51
+ "<extra_id_48>",
52
+ "<extra_id_49>",
53
+ "<extra_id_50>",
54
+ "<extra_id_51>",
55
+ "<extra_id_52>",
56
+ "<extra_id_53>",
57
+ "<extra_id_54>",
58
+ "<extra_id_55>",
59
+ "<extra_id_56>",
60
+ "<extra_id_57>",
61
+ "<extra_id_58>",
62
+ "<extra_id_59>",
63
+ "<extra_id_60>",
64
+ "<extra_id_61>",
65
+ "<extra_id_62>",
66
+ "<extra_id_63>",
67
+ "<extra_id_64>",
68
+ "<extra_id_65>",
69
+ "<extra_id_66>",
70
+ "<extra_id_67>",
71
+ "<extra_id_68>",
72
+ "<extra_id_69>",
73
+ "<extra_id_70>",
74
+ "<extra_id_71>",
75
+ "<extra_id_72>",
76
+ "<extra_id_73>",
77
+ "<extra_id_74>",
78
+ "<extra_id_75>",
79
+ "<extra_id_76>",
80
+ "<extra_id_77>",
81
+ "<extra_id_78>",
82
+ "<extra_id_79>",
83
+ "<extra_id_80>",
84
+ "<extra_id_81>",
85
+ "<extra_id_82>",
86
+ "<extra_id_83>",
87
+ "<extra_id_84>",
88
+ "<extra_id_85>",
89
+ "<extra_id_86>",
90
+ "<extra_id_87>",
91
+ "<extra_id_88>",
92
+ "<extra_id_89>",
93
+ "<extra_id_90>",
94
+ "<extra_id_91>",
95
+ "<extra_id_92>",
96
+ "<extra_id_93>",
97
+ "<extra_id_94>",
98
+ "<extra_id_95>",
99
+ "<extra_id_96>",
100
+ "<extra_id_97>",
101
+ "<extra_id_98>",
102
+ "<extra_id_99>"
103
+ ],
104
+ "eos_token": "</s>",
105
+ "pad_token": "<pad>",
106
+ "unk_token": "<unk>"
107
+ }
spiece.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d60acb128cf7b7f2536e8f38a5b18a05535c9e14c7a355904270e15b0945ea86
3
+ size 791656
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<extra_id_0>",
4
+ "<extra_id_1>",
5
+ "<extra_id_2>",
6
+ "<extra_id_3>",
7
+ "<extra_id_4>",
8
+ "<extra_id_5>",
9
+ "<extra_id_6>",
10
+ "<extra_id_7>",
11
+ "<extra_id_8>",
12
+ "<extra_id_9>",
13
+ "<extra_id_10>",
14
+ "<extra_id_11>",
15
+ "<extra_id_12>",
16
+ "<extra_id_13>",
17
+ "<extra_id_14>",
18
+ "<extra_id_15>",
19
+ "<extra_id_16>",
20
+ "<extra_id_17>",
21
+ "<extra_id_18>",
22
+ "<extra_id_19>",
23
+ "<extra_id_20>",
24
+ "<extra_id_21>",
25
+ "<extra_id_22>",
26
+ "<extra_id_23>",
27
+ "<extra_id_24>",
28
+ "<extra_id_25>",
29
+ "<extra_id_26>",
30
+ "<extra_id_27>",
31
+ "<extra_id_28>",
32
+ "<extra_id_29>",
33
+ "<extra_id_30>",
34
+ "<extra_id_31>",
35
+ "<extra_id_32>",
36
+ "<extra_id_33>",
37
+ "<extra_id_34>",
38
+ "<extra_id_35>",
39
+ "<extra_id_36>",
40
+ "<extra_id_37>",
41
+ "<extra_id_38>",
42
+ "<extra_id_39>",
43
+ "<extra_id_40>",
44
+ "<extra_id_41>",
45
+ "<extra_id_42>",
46
+ "<extra_id_43>",
47
+ "<extra_id_44>",
48
+ "<extra_id_45>",
49
+ "<extra_id_46>",
50
+ "<extra_id_47>",
51
+ "<extra_id_48>",
52
+ "<extra_id_49>",
53
+ "<extra_id_50>",
54
+ "<extra_id_51>",
55
+ "<extra_id_52>",
56
+ "<extra_id_53>",
57
+ "<extra_id_54>",
58
+ "<extra_id_55>",
59
+ "<extra_id_56>",
60
+ "<extra_id_57>",
61
+ "<extra_id_58>",
62
+ "<extra_id_59>",
63
+ "<extra_id_60>",
64
+ "<extra_id_61>",
65
+ "<extra_id_62>",
66
+ "<extra_id_63>",
67
+ "<extra_id_64>",
68
+ "<extra_id_65>",
69
+ "<extra_id_66>",
70
+ "<extra_id_67>",
71
+ "<extra_id_68>",
72
+ "<extra_id_69>",
73
+ "<extra_id_70>",
74
+ "<extra_id_71>",
75
+ "<extra_id_72>",
76
+ "<extra_id_73>",
77
+ "<extra_id_74>",
78
+ "<extra_id_75>",
79
+ "<extra_id_76>",
80
+ "<extra_id_77>",
81
+ "<extra_id_78>",
82
+ "<extra_id_79>",
83
+ "<extra_id_80>",
84
+ "<extra_id_81>",
85
+ "<extra_id_82>",
86
+ "<extra_id_83>",
87
+ "<extra_id_84>",
88
+ "<extra_id_85>",
89
+ "<extra_id_86>",
90
+ "<extra_id_87>",
91
+ "<extra_id_88>",
92
+ "<extra_id_89>",
93
+ "<extra_id_90>",
94
+ "<extra_id_91>",
95
+ "<extra_id_92>",
96
+ "<extra_id_93>",
97
+ "<extra_id_94>",
98
+ "<extra_id_95>",
99
+ "<extra_id_96>",
100
+ "<extra_id_97>",
101
+ "<extra_id_98>",
102
+ "<extra_id_99>"
103
+ ],
104
+ "clean_up_tokenization_spaces": true,
105
+ "eos_token": "</s>",
106
+ "extra_ids": 100,
107
+ "model_max_length": 512,
108
+ "pad_token": "<pad>",
109
+ "tokenizer_class": "T5Tokenizer",
110
+ "unk_token": "<unk>"
111
+ }
trainer_state.json ADDED
@@ -0,0 +1,344 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.4793087553644425,
3
+ "best_model_checkpoint": "trained_models/training_1691463035_ag_news_t5-large/model/checkpoint-24000",
4
+ "epoch": 4.0,
5
+ "global_step": 24000,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.08,
12
+ "learning_rate": 4.166666666666667e-06,
13
+ "loss": 12.1134,
14
+ "step": 500
15
+ },
16
+ {
17
+ "epoch": 0.17,
18
+ "learning_rate": 8.333333333333334e-06,
19
+ "loss": 0.3869,
20
+ "step": 1000
21
+ },
22
+ {
23
+ "epoch": 0.25,
24
+ "learning_rate": 1.25e-05,
25
+ "loss": 0.1264,
26
+ "step": 1500
27
+ },
28
+ {
29
+ "epoch": 0.33,
30
+ "learning_rate": 1.6666666666666667e-05,
31
+ "loss": 0.1039,
32
+ "step": 2000
33
+ },
34
+ {
35
+ "epoch": 0.42,
36
+ "learning_rate": 2.0833333333333336e-05,
37
+ "loss": 0.0863,
38
+ "step": 2500
39
+ },
40
+ {
41
+ "epoch": 0.5,
42
+ "learning_rate": 2.5e-05,
43
+ "loss": 0.0796,
44
+ "step": 3000
45
+ },
46
+ {
47
+ "epoch": 0.58,
48
+ "learning_rate": 2.916666666666667e-05,
49
+ "loss": 0.0777,
50
+ "step": 3500
51
+ },
52
+ {
53
+ "epoch": 0.67,
54
+ "learning_rate": 3.3333333333333335e-05,
55
+ "loss": 0.0688,
56
+ "step": 4000
57
+ },
58
+ {
59
+ "epoch": 0.75,
60
+ "learning_rate": 3.7500000000000003e-05,
61
+ "loss": 0.072,
62
+ "step": 4500
63
+ },
64
+ {
65
+ "epoch": 0.83,
66
+ "learning_rate": 4.166666666666667e-05,
67
+ "loss": 0.0699,
68
+ "step": 5000
69
+ },
70
+ {
71
+ "epoch": 0.92,
72
+ "learning_rate": 4.5833333333333334e-05,
73
+ "loss": 0.0666,
74
+ "step": 5500
75
+ },
76
+ {
77
+ "epoch": 1.0,
78
+ "learning_rate": 5e-05,
79
+ "loss": 0.061,
80
+ "step": 6000
81
+ },
82
+ {
83
+ "epoch": 1.0,
84
+ "eval_acc": 0.9465789473684211,
85
+ "eval_f1": 0.4774685948550837,
86
+ "eval_loss": 0.06575142592191696,
87
+ "eval_runtime": 297.0218,
88
+ "eval_samples_per_second": 25.587,
89
+ "eval_steps_per_second": 3.198,
90
+ "step": 6000
91
+ },
92
+ {
93
+ "epoch": 1.08,
94
+ "learning_rate": 5.4166666666666664e-05,
95
+ "loss": 0.0569,
96
+ "step": 6500
97
+ },
98
+ {
99
+ "epoch": 1.17,
100
+ "learning_rate": 5.833333333333334e-05,
101
+ "loss": 0.0568,
102
+ "step": 7000
103
+ },
104
+ {
105
+ "epoch": 1.25,
106
+ "learning_rate": 6.25e-05,
107
+ "loss": 0.0566,
108
+ "step": 7500
109
+ },
110
+ {
111
+ "epoch": 1.33,
112
+ "learning_rate": 6.666666666666667e-05,
113
+ "loss": 0.0559,
114
+ "step": 8000
115
+ },
116
+ {
117
+ "epoch": 1.42,
118
+ "learning_rate": 7.083333333333334e-05,
119
+ "loss": 0.0538,
120
+ "step": 8500
121
+ },
122
+ {
123
+ "epoch": 1.5,
124
+ "learning_rate": 7.500000000000001e-05,
125
+ "loss": 0.0553,
126
+ "step": 9000
127
+ },
128
+ {
129
+ "epoch": 1.58,
130
+ "learning_rate": 7.916666666666666e-05,
131
+ "loss": 0.056,
132
+ "step": 9500
133
+ },
134
+ {
135
+ "epoch": 1.67,
136
+ "learning_rate": 8.333333333333334e-05,
137
+ "loss": 0.0524,
138
+ "step": 10000
139
+ },
140
+ {
141
+ "epoch": 1.75,
142
+ "learning_rate": 8.75e-05,
143
+ "loss": 0.0538,
144
+ "step": 10500
145
+ },
146
+ {
147
+ "epoch": 1.83,
148
+ "learning_rate": 9.166666666666667e-05,
149
+ "loss": 0.0544,
150
+ "step": 11000
151
+ },
152
+ {
153
+ "epoch": 1.92,
154
+ "learning_rate": 9.583333333333334e-05,
155
+ "loss": 0.058,
156
+ "step": 11500
157
+ },
158
+ {
159
+ "epoch": 2.0,
160
+ "learning_rate": 0.0001,
161
+ "loss": 0.0542,
162
+ "step": 12000
163
+ },
164
+ {
165
+ "epoch": 2.0,
166
+ "eval_acc": 0.9465789473684211,
167
+ "eval_f1": 0.47827238398782235,
168
+ "eval_loss": 0.061526209115982056,
169
+ "eval_runtime": 296.7796,
170
+ "eval_samples_per_second": 25.608,
171
+ "eval_steps_per_second": 3.201,
172
+ "step": 12000
173
+ },
174
+ {
175
+ "epoch": 2.08,
176
+ "learning_rate": 9.953703703703704e-05,
177
+ "loss": 0.0371,
178
+ "step": 12500
179
+ },
180
+ {
181
+ "epoch": 2.17,
182
+ "learning_rate": 9.907407407407407e-05,
183
+ "loss": 0.0396,
184
+ "step": 13000
185
+ },
186
+ {
187
+ "epoch": 2.25,
188
+ "learning_rate": 9.861111111111112e-05,
189
+ "loss": 0.0408,
190
+ "step": 13500
191
+ },
192
+ {
193
+ "epoch": 2.33,
194
+ "learning_rate": 9.814814814814815e-05,
195
+ "loss": 0.0425,
196
+ "step": 14000
197
+ },
198
+ {
199
+ "epoch": 2.42,
200
+ "learning_rate": 9.768518518518519e-05,
201
+ "loss": 0.0429,
202
+ "step": 14500
203
+ },
204
+ {
205
+ "epoch": 2.5,
206
+ "learning_rate": 9.722222222222223e-05,
207
+ "loss": 0.0419,
208
+ "step": 15000
209
+ },
210
+ {
211
+ "epoch": 2.58,
212
+ "learning_rate": 9.675925925925926e-05,
213
+ "loss": 0.0411,
214
+ "step": 15500
215
+ },
216
+ {
217
+ "epoch": 2.67,
218
+ "learning_rate": 9.62962962962963e-05,
219
+ "loss": 0.0429,
220
+ "step": 16000
221
+ },
222
+ {
223
+ "epoch": 2.75,
224
+ "learning_rate": 9.583333333333334e-05,
225
+ "loss": 0.0402,
226
+ "step": 16500
227
+ },
228
+ {
229
+ "epoch": 2.83,
230
+ "learning_rate": 9.537037037037038e-05,
231
+ "loss": 0.047,
232
+ "step": 17000
233
+ },
234
+ {
235
+ "epoch": 2.92,
236
+ "learning_rate": 9.490740740740742e-05,
237
+ "loss": 0.042,
238
+ "step": 17500
239
+ },
240
+ {
241
+ "epoch": 3.0,
242
+ "learning_rate": 9.444444444444444e-05,
243
+ "loss": 0.0383,
244
+ "step": 18000
245
+ },
246
+ {
247
+ "epoch": 3.0,
248
+ "eval_acc": 0.945921052631579,
249
+ "eval_f1": 0.47849464202012254,
250
+ "eval_loss": 0.05987730622291565,
251
+ "eval_runtime": 303.3329,
252
+ "eval_samples_per_second": 25.055,
253
+ "eval_steps_per_second": 3.132,
254
+ "step": 18000
255
+ },
256
+ {
257
+ "epoch": 3.08,
258
+ "learning_rate": 9.398148148148148e-05,
259
+ "loss": 0.0237,
260
+ "step": 18500
261
+ },
262
+ {
263
+ "epoch": 3.17,
264
+ "learning_rate": 9.351851851851852e-05,
265
+ "loss": 0.0226,
266
+ "step": 19000
267
+ },
268
+ {
269
+ "epoch": 3.25,
270
+ "learning_rate": 9.305555555555556e-05,
271
+ "loss": 0.0262,
272
+ "step": 19500
273
+ },
274
+ {
275
+ "epoch": 3.33,
276
+ "learning_rate": 9.25925925925926e-05,
277
+ "loss": 0.0268,
278
+ "step": 20000
279
+ },
280
+ {
281
+ "epoch": 3.42,
282
+ "learning_rate": 9.212962962962963e-05,
283
+ "loss": 0.027,
284
+ "step": 20500
285
+ },
286
+ {
287
+ "epoch": 3.5,
288
+ "learning_rate": 9.166666666666667e-05,
289
+ "loss": 0.0271,
290
+ "step": 21000
291
+ },
292
+ {
293
+ "epoch": 3.58,
294
+ "learning_rate": 9.120370370370371e-05,
295
+ "loss": 0.0264,
296
+ "step": 21500
297
+ },
298
+ {
299
+ "epoch": 3.67,
300
+ "learning_rate": 9.074074074074075e-05,
301
+ "loss": 0.0279,
302
+ "step": 22000
303
+ },
304
+ {
305
+ "epoch": 3.75,
306
+ "learning_rate": 9.027777777777779e-05,
307
+ "loss": 0.0285,
308
+ "step": 22500
309
+ },
310
+ {
311
+ "epoch": 3.83,
312
+ "learning_rate": 8.981481481481481e-05,
313
+ "loss": 0.0314,
314
+ "step": 23000
315
+ },
316
+ {
317
+ "epoch": 3.92,
318
+ "learning_rate": 8.935185185185185e-05,
319
+ "loss": 0.032,
320
+ "step": 23500
321
+ },
322
+ {
323
+ "epoch": 4.0,
324
+ "learning_rate": 8.888888888888889e-05,
325
+ "loss": 0.0302,
326
+ "step": 24000
327
+ },
328
+ {
329
+ "epoch": 4.0,
330
+ "eval_acc": 0.9489473684210527,
331
+ "eval_f1": 0.4793087553644425,
332
+ "eval_loss": 0.05936088785529137,
333
+ "eval_runtime": 301.2156,
334
+ "eval_samples_per_second": 25.231,
335
+ "eval_steps_per_second": 3.154,
336
+ "step": 24000
337
+ }
338
+ ],
339
+ "max_steps": 120000,
340
+ "num_train_epochs": 20,
341
+ "total_flos": 6.39508422721536e+17,
342
+ "trial_name": null,
343
+ "trial_params": null
344
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ab969a0672d44d89cf862920a4485fe8128d3ae13992b1755c7e1f50002d0ced
3
+ size 4283