arubenruben commited on
Commit
7511825
1 Parent(s): 0bfa5cc

N-Grams Benchmark working

Browse files
__pycache__/autoencoder.cpython-311.pyc ADDED
Binary file (393 Bytes). View file
 
__pycache__/embeddings.cpython-311.pyc ADDED
Binary file (391 Bytes). View file
 
__pycache__/n_grams.cpython-311.pyc ADDED
Binary file (4.72 kB). View file
 
autoencoder.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ import logging
2
+
3
+
4
+ def test():
5
+ logging.info("Autoencoder Not Implemented")
embeddings.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ import logging
2
+
3
+ def test():
4
+ logging.info("Embeddings Not Implemented")
n_grams.py ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ from datasets import load_dataset
3
+ from pathlib import Path
4
+ import pandas as pd
5
+ import os
6
+ import pickle
7
+ import logging
8
+ import time
9
+ import evaluate
10
+ import nltk
11
+
12
+ CURRENT_PATH = Path(__file__).parent
13
+
14
+ logging.basicConfig(level=logging.INFO,
15
+ format='%(asctime)s %(levelname)s %(message)s', datefmt='%H:%M:%S', filename=os.path.join(CURRENT_PATH, 'out', 'debug.txt'), filemode='a')
16
+
17
+ nltk.download("stopwords")
18
+ nltk.download("punkt")
19
+
20
+
21
+ def tokenizer(text):
22
+ return nltk.tokenize.word_tokenize(text, language="portuguese")
23
+
24
+
25
+ def load_pipelines():
26
+ in_path = os.path.join(CURRENT_PATH, 'models', 'n_grams')
27
+
28
+ pipeline = []
29
+
30
+ for domain in ['politics', 'news', 'law', 'social_media', 'literature', 'web']:
31
+ with open(os.path.join(in_path, f'{domain}.pickle'), 'rb') as f:
32
+ logging.info(f"Loading {domain} pipeline...")
33
+ pipeline.append({
34
+ 'pipeline': pickle.load(f),
35
+ 'train_domain': domain,
36
+ })
37
+
38
+ return pipeline
39
+
40
+
41
+ def benchmark(pipeline, debug=False):
42
+
43
+ accuracy_evaluator = evaluate.load('accuracy')
44
+ f1_evaluator = evaluate.load('f1')
45
+ precision_evaluator = evaluate.load('precision')
46
+ recall_evaluator = evaluate.load('recall')
47
+
48
+ df_results = pd.DataFrame(
49
+ columns=['train_domain', 'test_domain', 'accuracy', 'f1', 'precision', 'recall'])
50
+
51
+ train_domain = pipeline['train_domain']
52
+
53
+ pipeline = pipeline['pipeline']
54
+
55
+ for test_domain in ['politics', 'news', 'law', 'social_media', 'literature', 'web']:
56
+
57
+ logging.info(f"Test Domain {test_domain}...")
58
+
59
+ dataset = load_dataset(
60
+ 'arubenruben/Portuguese_Language_Identification', test_domain, split='test')
61
+
62
+ if debug:
63
+ logging.info("Debug mode: using only 100 samples")
64
+ dataset = dataset.shuffle().select(range(100))
65
+ else:
66
+ dataset = dataset.shuffle().select(range(min(50_000, len(dataset))))
67
+
68
+ y = pipeline.predict(dataset['text'])
69
+
70
+ accuracy = accuracy_evaluator.compute(
71
+ predictions=y, references=dataset['label'])['accuracy']
72
+ f1 = f1_evaluator.compute(
73
+ predictions=y, references=dataset['label'])['f1']
74
+ precision = precision_evaluator.compute(
75
+ predictions=y, references=dataset['label'])['precision']
76
+ recall = recall_evaluator.compute(
77
+ predictions=y, references=dataset['label'])['recall']
78
+
79
+ logging.info(
80
+ f"Accuracy: {accuracy} | F1: {f1} | Precision: {precision} | Recall: {recall}")
81
+
82
+ df_results = pd.concat([df_results, pd.DataFrame(
83
+ [[train_domain, test_domain, accuracy, f1, precision, recall]], columns=df_results.columns)], ignore_index=True)
84
+
85
+ return df_results
86
+
87
+
88
+ def test():
89
+
90
+ DEBUG = True
91
+
92
+ logging.info(f"Debug mode: {DEBUG}")
93
+
94
+ pipelines = load_pipelines()
95
+ df_results = pd.DataFrame(
96
+ columns=['train_domain', 'test_domain', 'accuracy', 'f1', 'precision', 'recall'])
97
+
98
+ for pipeline in pipelines:
99
+ logging.info(f"Train Domain {pipeline['train_domain']}...")
100
+
101
+ df_results = pd.concat(
102
+ [df_results, benchmark(pipeline, debug=True)], ignore_index=True)
103
+
104
+ logging.info("Saving results...")
105
+
106
+ df_results.to_json(os.path.join(CURRENT_PATH, 'out', 'n_grams.json'),
107
+ orient='records', indent=4, force_ascii=False)
108
+
109
+
110
+ if __name__ == "__main__":
111
+ test()
out/debug.txt ADDED
File without changes
out/n_grams.json ADDED
@@ -0,0 +1,290 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "train_domain":"politics",
4
+ "test_domain":"politics",
5
+ "accuracy":0.99,
6
+ "f1":0.9887640449,
7
+ "precision":0.9777777778,
8
+ "recall":1.0
9
+ },
10
+ {
11
+ "train_domain":"politics",
12
+ "test_domain":"news",
13
+ "accuracy":0.34,
14
+ "f1":0.3653846154,
15
+ "precision":0.2261904762,
16
+ "recall":0.95
17
+ },
18
+ {
19
+ "train_domain":"politics",
20
+ "test_domain":"law",
21
+ "accuracy":0.56,
22
+ "f1":0.6206896552,
23
+ "precision":0.5454545455,
24
+ "recall":0.72
25
+ },
26
+ {
27
+ "train_domain":"politics",
28
+ "test_domain":"social_media",
29
+ "accuracy":0.49,
30
+ "f1":0.6222222222,
31
+ "precision":0.4516129032,
32
+ "recall":1.0
33
+ },
34
+ {
35
+ "train_domain":"politics",
36
+ "test_domain":"literature",
37
+ "accuracy":0.19,
38
+ "f1":0.2568807339,
39
+ "precision":0.1473684211,
40
+ "recall":1.0
41
+ },
42
+ {
43
+ "train_domain":"politics",
44
+ "test_domain":"web",
45
+ "accuracy":0.65,
46
+ "f1":0.7619047619,
47
+ "precision":0.7088607595,
48
+ "recall":0.8235294118
49
+ },
50
+ {
51
+ "train_domain":"news",
52
+ "test_domain":"politics",
53
+ "accuracy":0.87,
54
+ "f1":0.8828828829,
55
+ "precision":0.7903225806,
56
+ "recall":1.0
57
+ },
58
+ {
59
+ "train_domain":"news",
60
+ "test_domain":"news",
61
+ "accuracy":0.97,
62
+ "f1":0.88,
63
+ "precision":0.7857142857,
64
+ "recall":1.0
65
+ },
66
+ {
67
+ "train_domain":"news",
68
+ "test_domain":"law",
69
+ "accuracy":0.33,
70
+ "f1":0.2298850575,
71
+ "precision":0.2272727273,
72
+ "recall":0.2325581395
73
+ },
74
+ {
75
+ "train_domain":"news",
76
+ "test_domain":"social_media",
77
+ "accuracy":0.62,
78
+ "f1":0.7121212121,
79
+ "precision":0.5949367089,
80
+ "recall":0.8867924528
81
+ },
82
+ {
83
+ "train_domain":"news",
84
+ "test_domain":"literature",
85
+ "accuracy":0.38,
86
+ "f1":0.2790697674,
87
+ "precision":0.1666666667,
88
+ "recall":0.8571428571
89
+ },
90
+ {
91
+ "train_domain":"news",
92
+ "test_domain":"web",
93
+ "accuracy":0.74,
94
+ "f1":0.835443038,
95
+ "precision":0.7333333333,
96
+ "recall":0.9705882353
97
+ },
98
+ {
99
+ "train_domain":"law",
100
+ "test_domain":"politics",
101
+ "accuracy":0.43,
102
+ "f1":0.0952380952,
103
+ "precision":0.2142857143,
104
+ "recall":0.0612244898
105
+ },
106
+ {
107
+ "train_domain":"law",
108
+ "test_domain":"news",
109
+ "accuracy":0.6,
110
+ "f1":0.2,
111
+ "precision":0.2631578947,
112
+ "recall":0.1612903226
113
+ },
114
+ {
115
+ "train_domain":"law",
116
+ "test_domain":"law",
117
+ "accuracy":0.96,
118
+ "f1":0.9607843137,
119
+ "precision":1.0,
120
+ "recall":0.9245283019
121
+ },
122
+ {
123
+ "train_domain":"law",
124
+ "test_domain":"social_media",
125
+ "accuracy":0.57,
126
+ "f1":0.3384615385,
127
+ "precision":0.6875,
128
+ "recall":0.2244897959
129
+ },
130
+ {
131
+ "train_domain":"law",
132
+ "test_domain":"literature",
133
+ "accuracy":0.79,
134
+ "f1":0.275862069,
135
+ "precision":0.3333333333,
136
+ "recall":0.2352941176
137
+ },
138
+ {
139
+ "train_domain":"law",
140
+ "test_domain":"web",
141
+ "accuracy":0.4,
142
+ "f1":0.4545454545,
143
+ "precision":0.8928571429,
144
+ "recall":0.3048780488
145
+ },
146
+ {
147
+ "train_domain":"social_media",
148
+ "test_domain":"politics",
149
+ "accuracy":0.86,
150
+ "f1":0.8157894737,
151
+ "precision":0.8857142857,
152
+ "recall":0.756097561
153
+ },
154
+ {
155
+ "train_domain":"social_media",
156
+ "test_domain":"news",
157
+ "accuracy":0.83,
158
+ "f1":0.3703703704,
159
+ "precision":0.5,
160
+ "recall":0.2941176471
161
+ },
162
+ {
163
+ "train_domain":"social_media",
164
+ "test_domain":"law",
165
+ "accuracy":0.69,
166
+ "f1":0.6265060241,
167
+ "precision":0.8387096774,
168
+ "recall":0.5
169
+ },
170
+ {
171
+ "train_domain":"social_media",
172
+ "test_domain":"social_media",
173
+ "accuracy":0.94,
174
+ "f1":0.9333333333,
175
+ "precision":0.9130434783,
176
+ "recall":0.9545454545
177
+ },
178
+ {
179
+ "train_domain":"social_media",
180
+ "test_domain":"literature",
181
+ "accuracy":0.7,
182
+ "f1":0.3181818182,
183
+ "precision":0.2333333333,
184
+ "recall":0.5
185
+ },
186
+ {
187
+ "train_domain":"social_media",
188
+ "test_domain":"web",
189
+ "accuracy":0.33,
190
+ "f1":0.2298850575,
191
+ "precision":0.9090909091,
192
+ "recall":0.1315789474
193
+ },
194
+ {
195
+ "train_domain":"literature",
196
+ "test_domain":"politics",
197
+ "accuracy":0.6,
198
+ "f1":0.0,
199
+ "precision":0.0,
200
+ "recall":0.0
201
+ },
202
+ {
203
+ "train_domain":"literature",
204
+ "test_domain":"news",
205
+ "accuracy":0.79,
206
+ "f1":0.16,
207
+ "precision":0.6666666667,
208
+ "recall":0.0909090909
209
+ },
210
+ {
211
+ "train_domain":"literature",
212
+ "test_domain":"law",
213
+ "accuracy":0.61,
214
+ "f1":0.2909090909,
215
+ "precision":1.0,
216
+ "recall":0.170212766
217
+ },
218
+ {
219
+ "train_domain":"literature",
220
+ "test_domain":"social_media",
221
+ "accuracy":0.44,
222
+ "f1":0.4166666667,
223
+ "precision":0.4347826087,
224
+ "recall":0.4
225
+ },
226
+ {
227
+ "train_domain":"literature",
228
+ "test_domain":"literature",
229
+ "accuracy":0.96,
230
+ "f1":0.8333333333,
231
+ "precision":0.7142857143,
232
+ "recall":1.0
233
+ },
234
+ {
235
+ "train_domain":"literature",
236
+ "test_domain":"web",
237
+ "accuracy":0.27,
238
+ "f1":0.0266666667,
239
+ "precision":0.25,
240
+ "recall":0.014084507
241
+ },
242
+ {
243
+ "train_domain":"web",
244
+ "test_domain":"politics",
245
+ "accuracy":0.6,
246
+ "f1":0.4594594595,
247
+ "precision":0.7083333333,
248
+ "recall":0.34
249
+ },
250
+ {
251
+ "train_domain":"web",
252
+ "test_domain":"news",
253
+ "accuracy":0.89,
254
+ "f1":0.6666666667,
255
+ "precision":0.9166666667,
256
+ "recall":0.5238095238
257
+ },
258
+ {
259
+ "train_domain":"web",
260
+ "test_domain":"law",
261
+ "accuracy":0.71,
262
+ "f1":0.6947368421,
263
+ "precision":0.8048780488,
264
+ "recall":0.6111111111
265
+ },
266
+ {
267
+ "train_domain":"web",
268
+ "test_domain":"social_media",
269
+ "accuracy":0.46,
270
+ "f1":0.2894736842,
271
+ "precision":0.6111111111,
272
+ "recall":0.1896551724
273
+ },
274
+ {
275
+ "train_domain":"web",
276
+ "test_domain":"literature",
277
+ "accuracy":0.89,
278
+ "f1":0.0,
279
+ "precision":0.0,
280
+ "recall":0.0
281
+ },
282
+ {
283
+ "train_domain":"web",
284
+ "test_domain":"web",
285
+ "accuracy":0.9,
286
+ "f1":0.9285714286,
287
+ "precision":0.9848484848,
288
+ "recall":0.8783783784
289
+ }
290
+ ]
requirements.txt ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ evaluate
2
+ datasets
3
+ transformers
4
+ torch
5
+ sklearn
6
+ python-dotenv
7
+ pandas
8
+ nltk
9
+ numpy