edumunozsala commited on
Commit
94189dd
1 Parent(s): 0d59578

commit files to HF hub

Browse files
README.md ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language: es
3
+ tags:
4
+ - sagemaker
5
+ - roberta
6
+ - ruperta
7
+ - TextClassification
8
+ license: apache-2.0
9
+ datasets:
10
+ - IMDbreviews_es
11
+ model-index:
12
+ - name: RuPERTa_base_sentiment_analysis_es
13
+ results:
14
+ - task:
15
+ name: Sentiment Analysis
16
+ type: sentiment-analysis
17
+ dataset:
18
+ name: "IMDb Reviews in Spanish"
19
+ type: IMDbreviews_es
20
+ metrics:
21
+ - name: Accuracy
22
+ type: accuracy
23
+ value: 0.881866
24
+ - name: F1 Score
25
+ type: f1
26
+ value: 0.008272
27
+ - name: Precision
28
+ type: precision
29
+ value: 0.858605
30
+ - name: Recall
31
+ type: recall
32
+ value: 0.920062
33
+ ## `RuPERTa_base_sentiment_analysis_es`
34
+
35
+ This model was trained using Amazon SageMaker and the new Hugging Face Deep Learning container.
36
+
37
+ The base model is RuPERTa-base (uncased) which is a RoBERTa model trained on a uncased version of big Spanish corpus.
38
+ It was trained by mrm8488, Manuel Romero.
39
+
40
+ ## Hyperparameters
41
+
42
+ {
43
+ "epochs": "4",
44
+ "eval_batch_size": "8",
45
+ "fp16": "true",
46
+ "learning_rate": "3e-05",
47
+ "model_name": "\"mrm8488/RuPERTa-base\"",
48
+ "sagemaker_container_log_level": "20",
49
+ "sagemaker_job_name": "\"ruperta-sentiment-analysis-full-p2-2021-12-06-20-32-27\"",
50
+ "sagemaker_program": "\"train.py\"",
51
+ "sagemaker_region": "\"us-east-1\"",
52
+ "sagemaker_submit_directory": "\"s3://edumunozsala-ml-sagemaker/ruperta-sentiment/ruperta-sentiment-analysis-full-p2-2021-12-06-20-32-27/source/sourcedir.tar.gz\"",
53
+ "train_batch_size": "32",
54
+ "train_filename": "\"train_data.pt\"",
55
+ "val_filename": "\"val_data.pt\""
56
+ }
57
+
58
+
59
+ ## Usage
60
+
61
+ ## Results
62
+
63
+ epoch = 1.0
64
+ eval_accuracy = 0.8629333333333333
65
+ eval_f1 = 0.8648790746582545
66
+ eval_loss = 0.3160930573940277
67
+ eval_mem_cpu_alloc_delta = 0
68
+ eval_mem_cpu_peaked_delta = 0
69
+ eval_mem_gpu_alloc_delta = 0
70
+ eval_mem_gpu_peaked_delta = 94507520
71
+ eval_precision = 0.8479381443298969
72
+ eval_recall = 0.8825107296137339
73
+ eval_runtime = 114.4994
74
+ eval_samples_per_second = 32.751
75
+
README.txt ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language: es
3
+ tags:
4
+ - sagemaker
5
+ - roberta
6
+ - ruperta
7
+ - TextClassification
8
+ license: apache-2.0
9
+ datasets:
10
+ - IMDbreviews_es
11
+ model-index:
12
+ - name: {model_name}
13
+ results:
14
+ - task:
15
+ name: Sentiment Analysis
16
+ type: sentiment-analysis
17
+ dataset:
18
+ name: "IMDb Reviews in Spanish"
19
+ type: IMDbreviews_es
20
+ metrics:
21
+ - name: Accuracy
22
+ type: accuracy
23
+ value: 0.881866
24
+ - name: F1 Score
25
+ type: f1
26
+ value: 0.008272
27
+ - name: Precision
28
+ type: precision
29
+ value: 0.858605
30
+ - name: Recall
31
+ type: recall
32
+ value: 0.920062
33
+ ## `{model_name}`
34
+
35
+ This model was trained using Amazon SageMaker and the new Hugging Face Deep Learning container.
36
+
37
+ The base model is RuPERTa-base (uncased) which is a RoBERTa model trained on a uncased version of big Spanish corpus.
38
+ It was trained by mrm8488, Manuel Romero.
39
+
40
+ ## Hyperparameters
41
+
42
+ {hyperparameters}
43
+
44
+
45
+ ## Usage
46
+
47
+ ## Results
48
+
49
+ {eval_results}
checkpoint-1329/trainer_state.json DELETED
@@ -1,50 +0,0 @@
1
- {
2
- "best_metric": 0.3160930573940277,
3
- "best_model_checkpoint": "/opt/ml/model/checkpoint-1329",
4
- "epoch": 1.0,
5
- "global_step": 1329,
6
- "is_hyper_param_search": false,
7
- "is_local_process_zero": true,
8
- "is_world_process_zero": true,
9
- "log_history": [
10
- {
11
- "epoch": 1.0,
12
- "eval_accuracy": 0.7650666666666667,
13
- "eval_f1": 0.7940144961421556,
14
- "eval_loss": 0.48894673585891724,
15
- "eval_precision": 0.7039800995024875,
16
- "eval_recall": 0.910455764075067,
17
- "eval_runtime": 114.6163,
18
- "eval_samples_per_second": 32.718,
19
- "step": 391
20
- },
21
- {
22
- "epoch": 0.38,
23
- "learning_rate": 2.994e-05,
24
- "loss": 0.4577,
25
- "step": 500
26
- },
27
- {
28
- "epoch": 0.75,
29
- "learning_rate": 1.1942098914354645e-05,
30
- "loss": 0.3945,
31
- "step": 1000
32
- },
33
- {
34
- "epoch": 1.0,
35
- "eval_accuracy": 0.8629333333333333,
36
- "eval_f1": 0.8648790746582545,
37
- "eval_loss": 0.3160930573940277,
38
- "eval_precision": 0.8479381443298969,
39
- "eval_recall": 0.8825107296137339,
40
- "eval_runtime": 114.2725,
41
- "eval_samples_per_second": 32.816,
42
- "step": 1329
43
- }
44
- ],
45
- "max_steps": 1329,
46
- "num_train_epochs": 1,
47
- "total_flos": 5802820215091200.0,
48
- "trial_name": null,
49
- "trial_params": null
50
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
{checkpoint-1329 → checkpoint-5316}/config.json RENAMED
@@ -11,8 +11,16 @@
11
  "hidden_act": "gelu",
12
  "hidden_dropout_prob": 0.1,
13
  "hidden_size": 768,
 
 
 
 
14
  "initializer_range": 0.02,
15
  "intermediate_size": 3072,
 
 
 
 
16
  "layer_norm_eps": 1e-05,
17
  "max_position_embeddings": 514,
18
  "model_type": "roberta",
 
11
  "hidden_act": "gelu",
12
  "hidden_dropout_prob": 0.1,
13
  "hidden_size": 768,
14
+ "id2label": {
15
+ "0": "Negativo",
16
+ "1": "Positivo"
17
+ },
18
  "initializer_range": 0.02,
19
  "intermediate_size": 3072,
20
+ "label2id": {
21
+ "Negativo": 0,
22
+ "Positivo": 1
23
+ },
24
  "layer_norm_eps": 1e-05,
25
  "max_position_embeddings": 514,
26
  "model_type": "roberta",
{checkpoint-1329 → checkpoint-5316}/merges.txt RENAMED
File without changes
{checkpoint-1329 → checkpoint-5316}/optimizer.pt RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:85426e7cccc1989fd380a398746c807cf7e309e5c510c512d75c80327edece25
3
  size 1007963345
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b50926ca88f6ee9c6217302ef3adae677541a1c60347a6e4c5e1460ab9d802b3
3
  size 1007963345
{checkpoint-1329 → checkpoint-5316}/pytorch_model.bin RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5bc05a9ad531de2438300efe50279758c1d5c2801d08fc85f2c57b9032847fd3
3
  size 504009161
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2db1137e2e5530537901d3506d0f41e0e31cba39ee222fda282c12179311b47f
3
  size 504009161
{checkpoint-1329 → checkpoint-5316}/rng_state.pth RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:79caf4b4d1ffafd1c1bdf54d218dd1db1cb7ff816574a3804cf02f9bd1a7cb0a
3
  size 14657
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b209614552f0eb0c826d33644e2cd8ee657bf831e988da1f95e910205951fb17
3
  size 14657
{checkpoint-1329 → checkpoint-5316}/scaler.pt RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0991f03871b8ca058cbfc8112957ccab3725257cd85d7a12251165521e2d9b60
3
  size 559
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2de02dc1387aff49543d27a56f9c2ff079c9190578ced45536b97f28290285a2
3
  size 559
{checkpoint-1329 → checkpoint-5316}/scheduler.pt RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a0f9610481fadeea44390444cb542baa4c3cd3e3b19c04409d4b9e7b62e2e388
3
  size 623
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0efa315db1f6ea9a90491ff2097b03bd28b11b741f7a85a51de9abd70c51c92b
3
  size 623
{checkpoint-1329 → checkpoint-5316}/special_tokens_map.json RENAMED
File without changes
{checkpoint-1329 → checkpoint-5316}/tokenizer.json RENAMED
File without changes
{checkpoint-1329 → checkpoint-5316}/tokenizer_config.json RENAMED
File without changes
checkpoint-5316/trainer_state.json ADDED
@@ -0,0 +1,300 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.8818666666666667,
3
+ "best_model_checkpoint": "/opt/ml/model/checkpoint-3987",
4
+ "epoch": 4.0,
5
+ "global_step": 5316,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.1,
12
+ "learning_rate": 7.92e-06,
13
+ "loss": 0.6945,
14
+ "step": 132
15
+ },
16
+ {
17
+ "epoch": 0.2,
18
+ "learning_rate": 1.578e-05,
19
+ "loss": 0.6107,
20
+ "step": 264
21
+ },
22
+ {
23
+ "epoch": 0.3,
24
+ "learning_rate": 2.37e-05,
25
+ "loss": 0.5082,
26
+ "step": 396
27
+ },
28
+ {
29
+ "epoch": 0.4,
30
+ "learning_rate": 2.983181063122924e-05,
31
+ "loss": 0.4632,
32
+ "step": 528
33
+ },
34
+ {
35
+ "epoch": 0.5,
36
+ "learning_rate": 2.9009551495016612e-05,
37
+ "loss": 0.419,
38
+ "step": 660
39
+ },
40
+ {
41
+ "epoch": 0.6,
42
+ "learning_rate": 2.8187292358803985e-05,
43
+ "loss": 0.4142,
44
+ "step": 792
45
+ },
46
+ {
47
+ "epoch": 0.7,
48
+ "learning_rate": 2.736503322259136e-05,
49
+ "loss": 0.3748,
50
+ "step": 924
51
+ },
52
+ {
53
+ "epoch": 0.79,
54
+ "learning_rate": 2.6542774086378738e-05,
55
+ "loss": 0.3793,
56
+ "step": 1056
57
+ },
58
+ {
59
+ "epoch": 0.89,
60
+ "learning_rate": 2.5720514950166114e-05,
61
+ "loss": 0.3584,
62
+ "step": 1188
63
+ },
64
+ {
65
+ "epoch": 0.99,
66
+ "learning_rate": 2.489825581395349e-05,
67
+ "loss": 0.3456,
68
+ "step": 1320
69
+ },
70
+ {
71
+ "epoch": 1.0,
72
+ "eval_accuracy": 0.8576,
73
+ "eval_f1": 0.8572192513368984,
74
+ "eval_loss": 0.32413816452026367,
75
+ "eval_precision": 0.8778751369112815,
76
+ "eval_recall": 0.8375130616509927,
77
+ "eval_runtime": 107.1381,
78
+ "eval_samples_per_second": 35.002,
79
+ "step": 1329
80
+ },
81
+ {
82
+ "epoch": 1.09,
83
+ "learning_rate": 2.4075996677740867e-05,
84
+ "loss": 0.2832,
85
+ "step": 1452
86
+ },
87
+ {
88
+ "epoch": 1.19,
89
+ "learning_rate": 2.325373754152824e-05,
90
+ "loss": 0.2625,
91
+ "step": 1584
92
+ },
93
+ {
94
+ "epoch": 1.29,
95
+ "learning_rate": 2.2431478405315613e-05,
96
+ "loss": 0.2583,
97
+ "step": 1716
98
+ },
99
+ {
100
+ "epoch": 1.39,
101
+ "learning_rate": 2.160921926910299e-05,
102
+ "loss": 0.264,
103
+ "step": 1848
104
+ },
105
+ {
106
+ "epoch": 1.49,
107
+ "learning_rate": 2.0786960132890365e-05,
108
+ "loss": 0.2579,
109
+ "step": 1980
110
+ },
111
+ {
112
+ "epoch": 1.59,
113
+ "learning_rate": 1.9964700996677742e-05,
114
+ "loss": 0.2426,
115
+ "step": 2112
116
+ },
117
+ {
118
+ "epoch": 1.69,
119
+ "learning_rate": 1.9142441860465118e-05,
120
+ "loss": 0.243,
121
+ "step": 2244
122
+ },
123
+ {
124
+ "epoch": 1.79,
125
+ "learning_rate": 1.832641196013289e-05,
126
+ "loss": 0.2642,
127
+ "step": 2376
128
+ },
129
+ {
130
+ "epoch": 1.89,
131
+ "learning_rate": 1.7504152823920267e-05,
132
+ "loss": 0.2632,
133
+ "step": 2508
134
+ },
135
+ {
136
+ "epoch": 1.99,
137
+ "learning_rate": 1.6681893687707643e-05,
138
+ "loss": 0.2564,
139
+ "step": 2640
140
+ },
141
+ {
142
+ "epoch": 2.0,
143
+ "eval_accuracy": 0.8696,
144
+ "eval_f1": 0.8666484865012271,
145
+ "eval_loss": 0.3016686737537384,
146
+ "eval_precision": 0.9064460924130062,
147
+ "eval_recall": 0.8301985370950888,
148
+ "eval_runtime": 107.0545,
149
+ "eval_samples_per_second": 35.029,
150
+ "step": 2658
151
+ },
152
+ {
153
+ "epoch": 2.09,
154
+ "learning_rate": 1.5859634551495016e-05,
155
+ "loss": 0.1767,
156
+ "step": 2772
157
+ },
158
+ {
159
+ "epoch": 2.19,
160
+ "learning_rate": 1.504360465116279e-05,
161
+ "loss": 0.1527,
162
+ "step": 2904
163
+ },
164
+ {
165
+ "epoch": 2.28,
166
+ "learning_rate": 1.4221345514950167e-05,
167
+ "loss": 0.1381,
168
+ "step": 3036
169
+ },
170
+ {
171
+ "epoch": 2.38,
172
+ "learning_rate": 1.3399086378737542e-05,
173
+ "loss": 0.1516,
174
+ "step": 3168
175
+ },
176
+ {
177
+ "epoch": 2.48,
178
+ "learning_rate": 1.2576827242524916e-05,
179
+ "loss": 0.1508,
180
+ "step": 3300
181
+ },
182
+ {
183
+ "epoch": 2.58,
184
+ "learning_rate": 1.1754568106312293e-05,
185
+ "loss": 0.1479,
186
+ "step": 3432
187
+ },
188
+ {
189
+ "epoch": 2.68,
190
+ "learning_rate": 1.093230897009967e-05,
191
+ "loss": 0.1341,
192
+ "step": 3564
193
+ },
194
+ {
195
+ "epoch": 2.78,
196
+ "learning_rate": 1.0110049833887042e-05,
197
+ "loss": 0.1474,
198
+ "step": 3696
199
+ },
200
+ {
201
+ "epoch": 2.88,
202
+ "learning_rate": 9.287790697674419e-06,
203
+ "loss": 0.1462,
204
+ "step": 3828
205
+ },
206
+ {
207
+ "epoch": 2.98,
208
+ "learning_rate": 8.465531561461795e-06,
209
+ "loss": 0.1444,
210
+ "step": 3960
211
+ },
212
+ {
213
+ "epoch": 3.0,
214
+ "eval_accuracy": 0.8818666666666667,
215
+ "eval_f1": 0.8856774193548387,
216
+ "eval_loss": 0.37261688709259033,
217
+ "eval_precision": 0.8750637429882713,
218
+ "eval_recall": 0.896551724137931,
219
+ "eval_runtime": 107.1711,
220
+ "eval_samples_per_second": 34.991,
221
+ "step": 3987
222
+ },
223
+ {
224
+ "epoch": 3.08,
225
+ "learning_rate": 7.64327242524917e-06,
226
+ "loss": 0.0754,
227
+ "step": 4092
228
+ },
229
+ {
230
+ "epoch": 3.18,
231
+ "learning_rate": 6.821013289036545e-06,
232
+ "loss": 0.0811,
233
+ "step": 4224
234
+ },
235
+ {
236
+ "epoch": 3.28,
237
+ "learning_rate": 5.9987541528239206e-06,
238
+ "loss": 0.0808,
239
+ "step": 4356
240
+ },
241
+ {
242
+ "epoch": 3.38,
243
+ "learning_rate": 5.176495016611296e-06,
244
+ "loss": 0.0752,
245
+ "step": 4488
246
+ },
247
+ {
248
+ "epoch": 3.48,
249
+ "learning_rate": 4.354235880398672e-06,
250
+ "loss": 0.0803,
251
+ "step": 4620
252
+ },
253
+ {
254
+ "epoch": 3.58,
255
+ "learning_rate": 3.5319767441860467e-06,
256
+ "loss": 0.0734,
257
+ "step": 4752
258
+ },
259
+ {
260
+ "epoch": 3.67,
261
+ "learning_rate": 2.7097176079734222e-06,
262
+ "loss": 0.0615,
263
+ "step": 4884
264
+ },
265
+ {
266
+ "epoch": 3.77,
267
+ "learning_rate": 1.8874584717607975e-06,
268
+ "loss": 0.0605,
269
+ "step": 5016
270
+ },
271
+ {
272
+ "epoch": 3.87,
273
+ "learning_rate": 1.0651993355481726e-06,
274
+ "loss": 0.0781,
275
+ "step": 5148
276
+ },
277
+ {
278
+ "epoch": 3.97,
279
+ "learning_rate": 2.491694352159469e-07,
280
+ "loss": 0.0713,
281
+ "step": 5280
282
+ },
283
+ {
284
+ "epoch": 4.0,
285
+ "eval_accuracy": 0.8818666666666667,
286
+ "eval_f1": 0.8882723833543505,
287
+ "eval_loss": 0.5432285666465759,
288
+ "eval_precision": 0.8586055582642613,
289
+ "eval_recall": 0.9200626959247649,
290
+ "eval_runtime": 107.1677,
291
+ "eval_samples_per_second": 34.992,
292
+ "step": 5316
293
+ }
294
+ ],
295
+ "max_steps": 5316,
296
+ "num_train_epochs": 4,
297
+ "total_flos": 8223951552000000.0,
298
+ "trial_name": null,
299
+ "trial_params": null
300
+ }
{checkpoint-1329 → checkpoint-5316}/training_args.bin RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e96e4499e3d195f280a9a2fb6cc2e06b1146519df2f974d078adb55855aec71a
3
  size 2415
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d3e2df9d5019e15fd17f73664ef9fabbf698f2c0068074c59f34b7383659bdbd
3
  size 2415
{checkpoint-1329 → checkpoint-5316}/vocab.json RENAMED
File without changes
config.json CHANGED
@@ -11,8 +11,16 @@
11
  "hidden_act": "gelu",
12
  "hidden_dropout_prob": 0.1,
13
  "hidden_size": 768,
 
 
 
 
14
  "initializer_range": 0.02,
15
  "intermediate_size": 3072,
 
 
 
 
16
  "layer_norm_eps": 1e-05,
17
  "max_position_embeddings": 514,
18
  "model_type": "roberta",
 
11
  "hidden_act": "gelu",
12
  "hidden_dropout_prob": 0.1,
13
  "hidden_size": 768,
14
+ "id2label": {
15
+ "0": "Negativo",
16
+ "1": "Positivo"
17
+ },
18
  "initializer_range": 0.02,
19
  "intermediate_size": 3072,
20
+ "label2id": {
21
+ "Negativo": 0,
22
+ "Positivo": 1
23
+ },
24
  "layer_norm_eps": 1e-05,
25
  "max_position_embeddings": 514,
26
  "model_type": "roberta",
eval_results.txt ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ epoch = 1.0
2
+ eval_accuracy = 0.8629333333333333
3
+ eval_f1 = 0.8648790746582545
4
+ eval_loss = 0.3160930573940277
5
+ eval_mem_cpu_alloc_delta = 0
6
+ eval_mem_cpu_peaked_delta = 0
7
+ eval_mem_gpu_alloc_delta = 0
8
+ eval_mem_gpu_peaked_delta = 94507520
9
+ eval_precision = 0.8479381443298969
10
+ eval_recall = 0.8825107296137339
11
+ eval_runtime = 114.4994
12
+ eval_samples_per_second = 32.751
output.tar.gz DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:3fe99e23333d3d941623c20794e4ff3c4d5299dfe30477348551bea85e8771f1
3
- size 302
 
 
 
 
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5bc05a9ad531de2438300efe50279758c1d5c2801d08fc85f2c57b9032847fd3
3
  size 504009161
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:41ce4881fd31109522c4ab5fe9495c902eeaa6f9f5905aff32734d20661ce9cc
3
  size 504009161
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e96e4499e3d195f280a9a2fb6cc2e06b1146519df2f974d078adb55855aec71a
3
  size 2415
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d3e2df9d5019e15fd17f73664ef9fabbf698f2c0068074c59f34b7383659bdbd
3
  size 2415