Random-Mary-Smith commited on
Commit
c528d63
·
1 Parent(s): 2206113

Add Benchmarks and Models

Browse files
Files changed (42) hide show
  1. benchmark/ensemble/autoencoder.py +233 -0
  2. benchmark/ensemble/bert.py +263 -0
  3. benchmark/ensemble/n_grams.py +156 -0
  4. benchmark/isolated/autoencoder.py +210 -0
  5. benchmark/isolated/bert.py +205 -0
  6. benchmark/isolated/n_grams.py +108 -0
  7. benchmark/requirements.txt +9 -0
  8. benchmark/run.sh +19 -0
  9. results/autoencoder/ensemble/autoencoder_two_models_bert_ensemble.json +51 -0
  10. results/autoencoder/isolated/models/law_brazilian_model.pt +3 -0
  11. results/autoencoder/isolated/models/law_european_model.pt +3 -0
  12. results/autoencoder/isolated/models/literature_brazilian_model.pt +3 -0
  13. results/autoencoder/isolated/models/literature_european_model.pt +3 -0
  14. results/autoencoder/isolated/models/news_brazilian_model.pt +3 -0
  15. results/autoencoder/isolated/models/news_european_model.pt +3 -0
  16. results/autoencoder/isolated/models/politics_brazilian_model.pt +3 -0
  17. results/autoencoder/isolated/models/politics_european_model.pt +3 -0
  18. results/autoencoder/isolated/models/social_media_brazilian_model.pt +3 -0
  19. results/autoencoder/isolated/models/social_media_european_model.pt +3 -0
  20. results/autoencoder/isolated/models/web_brazilian_model.pt +3 -0
  21. results/autoencoder/isolated/models/web_european_model.pt +3 -0
  22. results/autoencoder/isolated/out/autoencoder_two_models_bert.json +298 -0
  23. results/bert/all_mixed/models/all_mixed.pt +3 -0
  24. results/bert/all_mixed/out/accuracy_chart.pdf +0 -0
  25. results/bert/all_mixed/out/loss_chart.pdf +0 -0
  26. results/bert/all_mixed/out/results.json +20 -0
  27. results/bert/ensemble/bert_ensemble.json +58 -0
  28. results/bert/isolated/models/law.pt +3 -0
  29. results/bert/isolated/models/literature.pt +3 -0
  30. results/bert/isolated/models/news.pt +3 -0
  31. results/bert/isolated/models/politics.pt +3 -0
  32. results/bert/isolated/models/social_media.pt +3 -0
  33. results/bert/isolated/models/web.pt +3 -0
  34. results/bert/isolated/out/bert_isolated.json +298 -0
  35. results/n_grams/ensemble/n_gram_ensemble.json +58 -0
  36. results/n_grams/isolated/models/law.pickle +3 -0
  37. results/n_grams/isolated/models/literature.pickle +3 -0
  38. results/n_grams/isolated/models/news.pickle +3 -0
  39. results/n_grams/isolated/models/politics.pickle +3 -0
  40. results/n_grams/isolated/models/social_media.pickle +3 -0
  41. results/n_grams/isolated/models/web.pickle +3 -0
  42. results/n_grams/isolated/out/n_gram_isolated.json +299 -0
benchmark/ensemble/autoencoder.py ADDED
@@ -0,0 +1,233 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import logging
3
+ from transformers import BertModel, BertTokenizerFast
4
+ import os
5
+ from pathlib import Path
6
+ import pandas as pd
7
+ from datasets import load_dataset
8
+ from torch.utils.data import DataLoader
9
+ from tqdm import tqdm
10
+ import numpy as np
11
+
12
+ ROOT_PATH = Path(__file__).parent.parent.parent
13
+
14
+ PATH_AUTOENCODER = os.path.join(
15
+ ROOT_PATH, "results", "autoencoder", "isolated")
16
+
17
+
18
+ def tokenize(dataset):
19
+ BERT_MAX_LEN = 512
20
+
21
+ tokenizer = BertTokenizerFast.from_pretrained(
22
+ "neuralmind/bert-base-portuguese-cased", max_length=BERT_MAX_LEN)
23
+
24
+ dataset = dataset.map(lambda example: tokenizer(
25
+ example["text"], truncation=True, padding="max_length", max_length=BERT_MAX_LEN))
26
+
27
+ return dataset
28
+
29
+
30
+ def create_dataloader(dataset, shuffle=True):
31
+ return DataLoader(dataset, batch_size=8, shuffle=shuffle, num_workers=8, drop_last=True)
32
+
33
+
34
+ def process_results(results):
35
+ predictions = []
36
+
37
+ # Perform Majority Voting
38
+ for row in results:
39
+ number_of_ones = np.array(row).sum()
40
+
41
+ number_of_zeros = len(row) - number_of_ones
42
+
43
+ if number_of_ones > number_of_zeros:
44
+ predictions.append(1)
45
+ else:
46
+ predictions.append(0)
47
+
48
+ return predictions
49
+
50
+
51
+ class AutoEncoder(torch.nn.Module):
52
+ def __init__(self):
53
+ super().__init__()
54
+
55
+ self.device = torch.device(
56
+ 'cuda' if torch.cuda.is_available() else 'cpu')
57
+
58
+ self.bert = BertModel.from_pretrained(
59
+ 'neuralmind/bert-base-portuguese-cased').to(self.device)
60
+
61
+ # Freeze BERT
62
+ for param in self.bert.parameters():
63
+ param.requires_grad = False
64
+
65
+ self.encoder = torch.nn.Sequential(
66
+ torch.nn.Linear(self.bert.config.hidden_size,
67
+ self.bert.config.hidden_size // 5),
68
+ torch.nn.ReLU(),
69
+ torch.nn.Linear(self.bert.config.hidden_size // 5,
70
+ self.bert.config.hidden_size // 10),
71
+ torch.nn.ReLU(),
72
+ torch.nn.Linear(self.bert.config.hidden_size // 10,
73
+ self.bert.config.hidden_size // 30),
74
+ torch.nn.ReLU(),
75
+ ).to(self.device)
76
+
77
+ self.decoder = torch.nn.Sequential(
78
+ torch.nn.Linear(self.bert.config.hidden_size // 30,
79
+ self.bert.config.hidden_size // 10),
80
+ torch.nn.ReLU(),
81
+ torch.nn.Linear(self.bert.config.hidden_size // 10,
82
+ self.bert.config.hidden_size // 5),
83
+ torch.nn.ReLU(),
84
+ torch.nn.Linear(self.bert.config.hidden_size //
85
+ 5, self.bert.config.hidden_size),
86
+ torch.nn.Sigmoid()
87
+ ).to(self.device)
88
+
89
+ def forward(self, input_ids, attention_mask):
90
+ bert_output = self.bert(input_ids=input_ids,
91
+ attention_mask=attention_mask).last_hidden_state[:, 0, :]
92
+
93
+ encoded = self.encoder(bert_output)
94
+
95
+ decoded = self.decoder(encoded)
96
+
97
+ return bert_output, decoded
98
+
99
+
100
+ class EnsembleModel(torch.nn.Module):
101
+ def __init__(self, domains=['law', 'literature', 'news', 'politics', 'social_media', 'web']):
102
+ super().__init__()
103
+ self.models = []
104
+ self.domains = domains
105
+ self.device = torch.device(
106
+ 'cuda' if torch.cuda.is_available() else 'cpu')
107
+
108
+ for domain in domains:
109
+ dict_model = {}
110
+
111
+ for language in ['brazilian', 'european']:
112
+ model_state_dict = torch.load(os.path.join(
113
+ PATH_AUTOENCODER, 'isolated', 'models', f"{domain}_{language}_model.pt"), map_location=self.device)
114
+
115
+ model = AutoEncoder()
116
+ model.load_state_dict(model_state_dict)
117
+ model.to(self.device)
118
+ model.eval()
119
+
120
+ dict_model[language] = model
121
+
122
+ self.models.append(dict_model)
123
+
124
+ def forward(self, input_ids, attention_mask):
125
+ results = []
126
+
127
+ loss_fn = torch.nn.MSELoss(reduction='none')
128
+
129
+ for dict_model in self.models:
130
+ brazilian_bert_output, brazilian_decoded = dict_model['brazilian'](
131
+ input_ids=input_ids, attention_mask=attention_mask)
132
+ european_bert_output, european_decoded = dict_model['european'](
133
+ input_ids=input_ids, attention_mask=attention_mask)
134
+
135
+ brazilian_loss = loss_fn(brazilian_decoded, brazilian_bert_output)
136
+ european_loss = loss_fn(european_decoded, european_bert_output)
137
+
138
+ brazilian_loss = torch.mean(brazilian_loss, dim=1)
139
+
140
+ european_loss = torch.mean(european_loss, dim=1)
141
+
142
+ aux_labels = []
143
+
144
+ aux_labels = torch.where(
145
+ european_loss < brazilian_loss, 0, 1).tolist()
146
+
147
+ results.append(aux_labels)
148
+
149
+ return np.array(results).transpose().tolist()
150
+
151
+
152
+ def benchmark(model, debug=False):
153
+
154
+ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
155
+
156
+ df_results = pd.DataFrame(
157
+ columns=['train_domain', 'test_domain', 'accuracy', 'f1', 'precision', 'recall'])
158
+
159
+ train_domain = model['train_domain']
160
+
161
+ brazilian_model = model['models'][0]
162
+
163
+ european_model = model['models'][1]
164
+
165
+ brazilian_model.eval()
166
+ european_model.eval()
167
+
168
+ brazilian_model.to(device)
169
+ european_model.to(device)
170
+
171
+ for test_domain in ['politics', 'news', 'law', 'social_media', 'literature', 'web']:
172
+ dataset = load_dataset(
173
+ 'Random-Mary-Smith/port_data_random', test_domain, split='test')
174
+
175
+ if debug:
176
+ logging.info(f"Debugging {test_domain} dataset...")
177
+ dataset = dataset.select(range(100))
178
+
179
+ dataset = tokenize(dataset)
180
+
181
+ dataset.set_format(type='torch', columns=[
182
+ 'input_ids', 'attention_mask', 'label'])
183
+
184
+ dataset = create_dataloader(dataset)
185
+
186
+ all_labels = []
187
+
188
+ predictions = []
189
+
190
+ with torch.no_grad():
191
+ for batch in tqdm(dataset, ascii=True, miniters=10):
192
+ input_ids = batch['input_ids'].to(model.device)
193
+ attention_mask = batch['attention_mask'].to(model.device)
194
+ labels = batch['label'].to(model.device)
195
+
196
+ results = model(input_ids, attention_mask)
197
+
198
+ all_labels.extend(labels.flatten().int().cpu().tolist())
199
+ predictions.extend(process_results(results))
200
+
201
+ accuracy = accuracy.compute(
202
+ predictions=predictions, references=labels)['accuracy']
203
+ f1 = f1.compute(predictions=predictions, references=labels)['f1']
204
+ precision = precision.compute(
205
+ predictions=predictions, references=labels)['precision']
206
+ recall = recall.compute(predictions=predictions,
207
+ references=labels)['recall']
208
+
209
+ df_results = pd.concat([df_results, pd.DataFrame(
210
+ [[train_domain, test_domain, accuracy, f1, precision, recall]], columns=df_results.columns)], ignore_index=True)
211
+
212
+ return df_results
213
+
214
+
215
+ def test():
216
+ DEBUG = True
217
+
218
+ df_results = pd.DataFrame(
219
+ columns=['train_domain', 'test_domain', 'accuracy', 'f1', 'precision', 'recall'])
220
+
221
+ model = EnsembleModel()
222
+
223
+ df_results = pd.concat([df_results, benchmark(
224
+ model, debug=DEBUG)], ignore_index=True)
225
+
226
+ logging.info(f"Saving results...")
227
+
228
+ df_results.to_json(os.path.join(PATH_AUTOENCODER, 'out',
229
+ 'autoencoder.json'), orient='records', indent=4, force_ascii=False)
230
+
231
+
232
+ if __name__ == '__main__':
233
+ test()
benchmark/ensemble/bert.py ADDED
@@ -0,0 +1,263 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import logging
3
+ from pathlib import Path
4
+ import os
5
+ from transformers import BertModel, BertForTokenClassification
6
+ import pandas as pd
7
+ import evaluate
8
+ from datasets import load_dataset
9
+ from transformers import BertTokenizerFast
10
+ from torch.utils.data import DataLoader
11
+ from tqdm import tqdm
12
+
13
+ ROOT_PATH = Path(__file__).parent.parent.parent
14
+
15
+ BERT_PATH = os.path.join(ROOT_PATH, 'results', 'bert', 'isolated')
16
+
17
+ logging.basicConfig(level=logging.INFO,
18
+ format='%(asctime)s %(levelname)s %(message)s', datefmt='%H:%M:%S', filename=os.path.join(BERT_PATH, 'out', 'debug_embeddings.txt'), filemode='w')
19
+
20
+
21
+ def tokenize(dataset):
22
+ BERT_MAX_LEN = 512
23
+
24
+ tokenizer = BertTokenizerFast.from_pretrained(
25
+ "neuralmind/bert-base-portuguese-cased", max_length=BERT_MAX_LEN)
26
+
27
+ dataset = dataset.map(lambda example: tokenizer(
28
+ example["text"], truncation=True, padding="max_length", max_length=BERT_MAX_LEN))
29
+
30
+ return dataset
31
+
32
+
33
+ def create_dataloader(dataset, shuffle=True):
34
+ return DataLoader(dataset, batch_size=8, shuffle=shuffle, num_workers=8, drop_last=True)
35
+
36
+
37
+ def process_output(predictions, reduction):
38
+ final_predictions = []
39
+
40
+ for tensor in predictions:
41
+ if reduction == 'mean':
42
+ raw_label = torch.mean(tensor).item()
43
+ elif reduction == 'median':
44
+ raw_label = torch.median(tensor).item()
45
+ elif reduction == 'max':
46
+ max_value = torch.max(tensor)
47
+ min_value = torch.min(tensor)
48
+
49
+ raw_label = min_value.item() if abs(min_value) > max_value else max_value.item()
50
+ elif reduction == 'majority_vote':
51
+ number_of_positive = torch.sum(tensor > 0).item()
52
+ number_of_negative = len(tensor) - number_of_positive
53
+
54
+ raw_label = 1 if number_of_positive > number_of_negative else -1
55
+ else:
56
+ raise ValueError("Invalid reduction type")
57
+
58
+ final_predictions.append(raw_label)
59
+
60
+ final_predictions = torch.tensor(
61
+ final_predictions, dtype=torch.float32, device=predictions.device)
62
+
63
+ return (final_predictions > 0).flatten().int().cpu().tolist()
64
+
65
+
66
+ def benchmark(model, debug=False):
67
+
68
+ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
69
+
70
+ df_result = pd.DataFrame(
71
+ columns=['train_domain', 'test_domain', 'accuracy', 'f1', 'precision', 'recall'])
72
+
73
+ train_domain = model['train_domain']
74
+
75
+ model = model['model']
76
+
77
+ model.to(device)
78
+
79
+ model.eval()
80
+
81
+ for test_domain in ['politics', 'news', 'law', 'social_media', 'literature', 'web']:
82
+ dataset = load_dataset(
83
+ 'Random-Mary-Smith/port_data_random', test_domain, split='test')
84
+
85
+ if debug:
86
+ logging.info("Debug mode: using only 100 samples")
87
+ dataset = dataset.shuffle().select(range(100))
88
+
89
+ dataset = tokenize(dataset)
90
+
91
+ dataset.set_format(type='torch', columns=[
92
+ 'input_ids', 'attention_mask', 'label'])
93
+
94
+ dataset = create_dataloader(dataset)
95
+
96
+ y = []
97
+
98
+ all_labels = []
99
+
100
+ with torch.no_grad():
101
+ for batch in tqdm(dataset):
102
+ input_ids = batch['input_ids'].to(device)
103
+ attention_mask = batch['attention_mask'].to(device)
104
+
105
+ # Convert Labels from 1D to 2D. Example [4] -> [4x1]
106
+ labels = batch['label'].unsqueeze(1).float().to(model.device)
107
+
108
+ all_labels.extend(labels.flatten().int().cpu().tolist())
109
+
110
+ outputs = model(input_ids=input_ids,
111
+ attention_mask=attention_mask)
112
+
113
+ y.extend(process_output(outputs, reduction='mean'))
114
+
115
+ accuracy = evaluate.load('accuracy').compute(
116
+ predictions=y, references=dataset['label'])['accuracy']
117
+ f1 = evaluate.load('f1').compute(
118
+ predictions=y, references=dataset['label'])['f1']
119
+ precision = evaluate.load('precision').compute(
120
+ predictions=y, references=dataset['label'])['precision']
121
+ recall = evaluate.load('recall').compute(
122
+ predictions=y, references=dataset['label'])['recall']
123
+
124
+ df_result = pd.concat([df_result, pd.DataFrame({
125
+ 'train_domain': [train_domain],
126
+ 'test_domain': [test_domain],
127
+ 'accuracy': [accuracy],
128
+ 'f1': [f1],
129
+ 'precision': [precision],
130
+ 'recall': [recall],
131
+ })], ignore_index=True)
132
+
133
+ return df_result
134
+
135
+
136
+ class LanguageIdentifer(torch.nn.Module):
137
+ def __init__(self, mode='horizontal_stacking', pos_layers_to_freeze=0, bertimbau_layers_to_freeze=0):
138
+ super().__init__()
139
+
140
+ self.labels = ['pt-PT', 'pt-BR']
141
+
142
+ self.portuguese_model = BertModel.from_pretrained(
143
+ "neuralmind/bert-base-portuguese-cased")
144
+
145
+ self.portuguese_pos_tagging_model = BertForTokenClassification.from_pretrained(
146
+ "lisaterumi/postagger-portuguese")
147
+
148
+ for layer in range(bertimbau_layers_to_freeze):
149
+ for name, param in self.portuguese_model.named_parameters():
150
+ if f".{layer}" in name:
151
+ print(f"Freezing Layer {name} of Bertimbau")
152
+ param.requires_grad = False
153
+
154
+ for layer in range(pos_layers_to_freeze):
155
+ for name, param in self.portuguese_pos_tagging_model.named_parameters():
156
+ if f".{layer}" in name:
157
+ print(f"Freezing Layer {name} of POS")
158
+ param.requires_grad = False
159
+
160
+ self.portuguese_pos_tagging_model.classifier = torch.nn.Identity()
161
+ self.mode = mode
162
+
163
+ if self.mode == 'horizontal_stacking':
164
+ self.linear = self.common_network(torch.nn.Linear(
165
+ self.portuguese_pos_tagging_model.config.hidden_size + self.portuguese_model.config.hidden_size, 512))
166
+ elif self.mode == 'bertimbau_only' or self.mode == 'pos_only' or self.mode == 'vertical_sum':
167
+ self.linear = self.common_network(torch.nn.Linear(
168
+ self.portuguese_model.config.hidden_size, 512))
169
+ else:
170
+ raise NotImplementedError
171
+
172
+ def common_network(self, custom_linear):
173
+ return torch.nn.Sequential(
174
+ custom_linear,
175
+ torch.nn.ReLU(),
176
+ torch.nn.Dropout(0.2),
177
+ torch.nn.Linear(512, 1),
178
+ )
179
+
180
+ def forward(self, input_ids, attention_mask):
181
+
182
+ #(Batch_Size,Sequence Length, Hidden_Size)
183
+ outputs_bert = self.portuguese_model(
184
+ input_ids=input_ids, attention_mask=attention_mask).last_hidden_state[:, 0, :]
185
+
186
+ #(Batch_Size,Sequence Length, Hidden_Size)
187
+ outputs_pos = self.portuguese_pos_tagging_model(
188
+ input_ids=input_ids, attention_mask=attention_mask).logits[:, 0, :]
189
+
190
+ if self.mode == 'horizontal_stacking':
191
+ outputs = torch.cat((outputs_bert, outputs_pos), dim=1)
192
+ elif self.mode == 'bertimbau_only':
193
+ outputs = outputs_bert
194
+ elif self.mode == 'pos_only':
195
+ outputs = outputs_pos
196
+ elif self.mode == 'vertical_sum':
197
+ outputs = outputs_bert + outputs_pos
198
+ outputs = torch.nn.functional.normalize(outputs, p=2, dim=1)
199
+
200
+ return self.linear(outputs)
201
+
202
+
203
+ class EnsembleModel(torch.nn.Module):
204
+ def __init__(self, domain=['law', 'literature', 'news', 'politics', 'social_media', 'web']):
205
+ super().__init__()
206
+
207
+ self.domain = domain
208
+
209
+ self.models = []
210
+
211
+ self.device = torch.device(
212
+ 'cuda' if torch.cuda.is_available() else 'cpu')
213
+
214
+ print(f'Ensemble Model running on {self.device}')
215
+
216
+ for domain in self.domain:
217
+ model_state_dict = torch.load(os.path.join(
218
+ BERT_PATH, 'isolated', 'models', f'{domain}.pt'), map_location=self.device)
219
+
220
+ new_model = LanguageIdentifer(mode='pos_only')
221
+
222
+ new_model.load_state_dict(model_state_dict)
223
+
224
+ new_model = new_model.to(self.device)
225
+
226
+ self.models.append({
227
+ 'model': new_model,
228
+ 'domain': domain,
229
+ })
230
+
231
+ def forward(self, input_ids, attention_mask):
232
+ results = []
233
+
234
+ for model in self.models:
235
+
236
+ model = model['model']
237
+
238
+ output = model(input_ids=input_ids, attention_mask=attention_mask)
239
+
240
+ results.append(output)
241
+
242
+ return torch.stack(results, dim=1)
243
+
244
+
245
+ def test():
246
+ DEBUG = False
247
+
248
+ df_results = pd.DataFrame(
249
+ columns=['train_domain', 'test_domain', 'accuracy', 'f1', 'precision', 'recall'])
250
+
251
+ ensemble = EnsembleModel()
252
+
253
+ df_results = pd.concat(
254
+ [df_results, benchmark(ensemble, debug=DEBUG)], ignore_index=True)
255
+
256
+ logging.info("Saving Results...")
257
+
258
+ df_results.to_json(os.path.join(BERT_PATH, 'out', 'bert_ensemble.json'),
259
+ orient='records', indent=4, force_ascii=False)
260
+
261
+
262
+ if __name__ == '__main__':
263
+ test()
benchmark/ensemble/n_grams.py ADDED
@@ -0,0 +1,156 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ from datasets import load_dataset
3
+ from pathlib import Path
4
+ import pandas as pd
5
+ import os
6
+ import pickle
7
+ import logging
8
+ import evaluate
9
+ import nltk
10
+ import numpy as np
11
+ from tqdm import tqdm
12
+
13
+ ROOT_PATH = Path(__file__).parent.parent.parant
14
+
15
+ N_GRAMS_PATH = os.path.join(ROOT_PATH, 'results', 'n_grams', 'isolated')
16
+
17
+ logging.basicConfig(level=logging.INFO,
18
+ format='%(asctime)s %(levelname)s %(message)s', datefmt='%H:%M:%S', filename=os.path.join(N_GRAMS_PATH, 'out', 'debug_ngrams.txt'), filemode='w')
19
+
20
+ nltk.download("stopwords")
21
+ nltk.download("punkt")
22
+
23
+
24
+ def tokenizer(text):
25
+ return nltk.tokenize.word_tokenize(text, language="portuguese")
26
+
27
+
28
+ def load_pipelines():
29
+ in_path = os.path.join(N_GRAMS_PATH, 'models')
30
+
31
+ pipeline = []
32
+
33
+ for domain in ['politics', 'news', 'law', 'social_media', 'literature', 'web']:
34
+ with open(os.path.join(in_path, f'{domain}.pickle'), 'rb') as f:
35
+ logging.info(f"Loading {domain} pipeline...")
36
+ pipeline.append({
37
+ 'pipeline': pickle.load(f),
38
+ 'train_domain': domain,
39
+ })
40
+
41
+ return pipeline
42
+
43
+
44
+ def process_strategy(strategy, raw_predictions):
45
+ raw_predictions = np.array(raw_predictions)
46
+
47
+ if strategy == 'mean':
48
+ predictions = np.mean(raw_predictions, axis=0)
49
+
50
+ elif strategy == 'median':
51
+ predictions = np.median(raw_predictions, axis=0)
52
+
53
+ elif strategy == 'majority_voting':
54
+ raw_predictions = raw_predictions.transpose()
55
+
56
+ predictions = [1 if np.sum(row > 0) > np.sum(
57
+ row < 0) else -1 for row in raw_predictions]
58
+
59
+ else:
60
+ raise Exception(f"Strategy {strategy} not implemented")
61
+
62
+ predictions = np.array(predictions)
63
+
64
+ # Convert predictions to 1 or 0 based on a threshold
65
+ return np.where(predictions > 0.5, 1, 0)
66
+
67
+
68
+ def process_batch(batch, models, strategy):
69
+ predictions = []
70
+
71
+ for model in models:
72
+ predictions.append(model.predict(batch).tolist())
73
+
74
+ return process_strategy(strategy, predictions)
75
+
76
+
77
+ def benchmark(pipelines, debug=False):
78
+
79
+ df_results = pd.DataFrame(
80
+ columns=['test_domain', 'accuracy', 'f1', 'precision', 'recall'])
81
+
82
+ accuracy_evaluate = evaluate.load('accuracy')
83
+ f1_evaluate = evaluate.load('f1')
84
+ precision_evaluate = evaluate.load('precision')
85
+ recall_evaluate = evaluate.load('recall')
86
+
87
+ for test_domain in ['politics', 'news', 'law', 'social_media', 'literature', 'web']:
88
+
89
+ logging.info(f"Test Domain {test_domain}...")
90
+
91
+ dataset = load_dataset(
92
+ 'Random-Mary-Smith/port_data_random', test_domain, split='test')
93
+
94
+ if debug:
95
+ logging.info("Debug mode: using only 100 samples")
96
+ dataset = dataset.shuffle().select(range(100))
97
+
98
+ batch = []
99
+ predictions = []
100
+
101
+ for row in tqdm(dataset):
102
+ batch.append(row['text'])
103
+
104
+ if len(batch) == 100:
105
+ predictions.extend(process_batch(
106
+ batch, pipelines, 'majority_voting'))
107
+ batch = []
108
+
109
+ if len(batch) > 0:
110
+ predictions.extend(process_batch(
111
+ batch, pipelines, 'majority_voting'))
112
+
113
+ accuracy = accuracy_evaluate.compute(
114
+ predictions=predictions, references=dataset['label'])['accuracy']
115
+ f1 = f1_evaluate.compute(
116
+ predictions=predictions, references=dataset['label'])['f1']
117
+ precision = precision_evaluate.compute(
118
+ predictions=predictions, references=dataset['label'])['precision']
119
+ recall = recall_evaluate.compute(
120
+ predictions=predictions, references=dataset['label'])['recall']
121
+
122
+ logging.info(
123
+ f"Accuracy: {accuracy} | F1: {f1} | Precision: {precision} | Recall: {recall}")
124
+
125
+ df_results = pd.concat([df_results, pd.DataFrame(
126
+ [[test_domain, accuracy, f1, precision, recall]], columns=df_results.columns)], ignore_index=True)
127
+
128
+ return df_results
129
+
130
+
131
+ def test():
132
+
133
+ DEBUG = False
134
+
135
+ pipelines = []
136
+
137
+ for domain in ['politics', 'news', 'law', 'social_media', 'literature', 'web']:
138
+ with open(os.path.join(N_GRAMS_PATH, "models", f"{domain}.pickle"), "rb") as f:
139
+ pipelines.append(pickle.load(f))
140
+
141
+ logging.info(f"Debug mode: {DEBUG}")
142
+
143
+ df_results = pd.DataFrame(
144
+ columns=['train_domain', 'test_domain', 'accuracy', 'f1', 'precision', 'recall'])
145
+
146
+ df_results = pd.concat(
147
+ [df_results, benchmark(pipelines, debug=True)], ignore_index=True)
148
+
149
+ logging.info("Saving results...")
150
+
151
+ df_results.to_json(os.path.join(N_GRAMS_PATH, 'out', 'n_grams.json'),
152
+ orient='records', indent=4, force_ascii=False)
153
+
154
+
155
+ if __name__ == "__main__":
156
+ test()
benchmark/isolated/autoencoder.py ADDED
@@ -0,0 +1,210 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import logging
3
+ from transformers import BertModel, BertTokenizerFast
4
+ import os
5
+ from pathlib import Path
6
+ import pandas as pd
7
+ from datasets import load_dataset
8
+ from torch.utils.data import DataLoader
9
+ from tqdm import tqdm
10
+
11
+ ROOT_PATH = Path(__file__).parent.parent.parent
12
+
13
+ PATH_AUTOENCODER = os.path.join(
14
+ ROOT_PATH, "results", "autoencoder", "isolated")
15
+
16
+
17
+ def tokenize(dataset):
18
+ BERT_MAX_LEN = 512
19
+
20
+ tokenizer = BertTokenizerFast.from_pretrained(
21
+ "neuralmind/bert-base-portuguese-cased", max_length=BERT_MAX_LEN)
22
+
23
+ dataset = dataset.map(lambda example: tokenizer(
24
+ example["text"], truncation=True, padding="max_length", max_length=BERT_MAX_LEN))
25
+
26
+ return dataset
27
+
28
+
29
+ def create_dataloader(dataset, shuffle=True):
30
+ return DataLoader(dataset, batch_size=8, shuffle=shuffle, num_workers=8, drop_last=True)
31
+
32
+
33
+ class AutoEncoder(torch.nn.Module):
34
+ def __init__(self):
35
+ super().__init__()
36
+
37
+ self.device = torch.device(
38
+ 'cuda' if torch.cuda.is_available() else 'cpu')
39
+
40
+ self.bert = BertModel.from_pretrained(
41
+ 'neuralmind/bert-base-portuguese-cased').to(self.device)
42
+
43
+ # Freeze BERT
44
+ for param in self.bert.parameters():
45
+ param.requires_grad = False
46
+
47
+ self.encoder = torch.nn.Sequential(
48
+ torch.nn.Linear(self.bert.config.hidden_size,
49
+ self.bert.config.hidden_size // 5),
50
+ torch.nn.ReLU(),
51
+ torch.nn.Linear(self.bert.config.hidden_size // 5,
52
+ self.bert.config.hidden_size // 10),
53
+ torch.nn.ReLU(),
54
+ torch.nn.Linear(self.bert.config.hidden_size // 10,
55
+ self.bert.config.hidden_size // 30),
56
+ torch.nn.ReLU(),
57
+ ).to(self.device)
58
+
59
+ self.decoder = torch.nn.Sequential(
60
+ torch.nn.Linear(self.bert.config.hidden_size // 30,
61
+ self.bert.config.hidden_size // 10),
62
+ torch.nn.ReLU(),
63
+ torch.nn.Linear(self.bert.config.hidden_size // 10,
64
+ self.bert.config.hidden_size // 5),
65
+ torch.nn.ReLU(),
66
+ torch.nn.Linear(self.bert.config.hidden_size //
67
+ 5, self.bert.config.hidden_size),
68
+ torch.nn.Sigmoid()
69
+ ).to(self.device)
70
+
71
+ def forward(self, input_ids, attention_mask):
72
+ bert_output = self.bert(input_ids=input_ids,
73
+ attention_mask=attention_mask).last_hidden_state[:, 0, :]
74
+
75
+ encoded = self.encoder(bert_output)
76
+
77
+ decoded = self.decoder(encoded)
78
+
79
+ return bert_output, decoded
80
+
81
+
82
+ def load_models():
83
+ models = []
84
+
85
+ for domain in ['politics', 'news', 'law', 'social_media', 'literature', 'web']:
86
+ logging.info(f"Loading {domain} model...")
87
+
88
+ accumulator = []
89
+
90
+ for lang in ['brazilian', 'european']:
91
+ model = AutoEncoder()
92
+ model.load_state_dict(torch.load(os.path.join(
93
+ PATH_AUTOENCODER, 'models', f'{domain}_{lang}_model.pt')))
94
+ accumulator.append(model)
95
+
96
+ models.append({
97
+ 'models': accumulator,
98
+ 'train_domain': domain,
99
+ })
100
+
101
+ return models
102
+
103
+
104
+ def benchmark(model, debug=False):
105
+
106
+ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
107
+
108
+ df_results = pd.DataFrame(
109
+ columns=['train_domain', 'test_domain', 'accuracy', 'f1', 'precision', 'recall'])
110
+
111
+ train_domain = model['train_domain']
112
+
113
+ brazilian_model = model['models'][0]
114
+
115
+ european_model = model['models'][1]
116
+
117
+ brazilian_model.eval()
118
+ european_model.eval()
119
+
120
+ brazilian_model.to(device)
121
+ european_model.to(device)
122
+
123
+ for test_domain in ['politics', 'news', 'law', 'social_media', 'literature', 'web']:
124
+ dataset = load_dataset(
125
+ 'Random-Mary-Smith/port_data_random', test_domain, split='test')
126
+
127
+ if debug:
128
+ logging.info(f"Debugging {test_domain} dataset...")
129
+ dataset = dataset.select(range(100))
130
+
131
+ dataset = tokenize(dataset)
132
+
133
+ dataset.set_format(type='torch', columns=[
134
+ 'input_ids', 'attention_mask', 'label'])
135
+
136
+ dataset = create_dataloader(dataset)
137
+
138
+ predictions = []
139
+ labels = []
140
+
141
+ reconstruction_loss = torch.nn.MSELoss(reduction='none')
142
+
143
+ with torch.no_grad():
144
+ for batch in tqdm(dataset):
145
+ input_ids = batch['input_ids'].to(device)
146
+
147
+ attention_mask = batch['attention_mask'].to(device)
148
+
149
+ label = batch['label'].to(device)
150
+
151
+ bert_european, reconstruction_european = european_model(
152
+ input_ids=input_ids, attention_mask=attention_mask)
153
+
154
+ bert_brazilian, reconstruction_brazilian = brazilian_model(
155
+ input_ids=input_ids, attention_mask=attention_mask)
156
+
157
+ test_loss_european = reconstruction_loss(
158
+ reconstruction_european, bert_european)
159
+
160
+ test_loss_brazilian = reconstruction_loss(
161
+ reconstruction_brazilian, bert_brazilian)
162
+
163
+ for loss_european, loss_brazilian in zip(test_loss_european, test_loss_brazilian):
164
+
165
+ if loss_european.mean().item() < loss_brazilian.mean().item():
166
+ predictions.append(0)
167
+ total_loss += loss_european.mean().item() / len(test_loss_european)
168
+
169
+ else:
170
+ predictions.append(1)
171
+ total_loss += loss_brazilian.mean().item() / len(test_loss_brazilian)
172
+
173
+ labels.extend(label.tolist())
174
+
175
+ accuracy = accuracy.compute(
176
+ predictions=predictions, references=labels)['accuracy']
177
+ f1 = f1.compute(predictions=predictions, references=labels)['f1']
178
+ precision = precision.compute(
179
+ predictions=predictions, references=labels)['precision']
180
+ recall = recall.compute(predictions=predictions,
181
+ references=labels)['recall']
182
+
183
+ df_results = pd.concat([df_results, pd.DataFrame(
184
+ [[train_domain, test_domain, accuracy, f1, precision, recall]], columns=df_results.columns)], ignore_index=True)
185
+
186
+ return df_results
187
+
188
+
189
+ def test():
190
+ DEBUG = True
191
+
192
+ models = load_models()
193
+
194
+ df_results = pd.DataFrame(
195
+ columns=['train_domain', 'test_domain', 'accuracy', 'f1', 'precision', 'recall'])
196
+
197
+ for model in models:
198
+ logging.info(f"Train Domain {model['train_domain']}...")
199
+
200
+ df_results = pd.concat([df_results, benchmark(
201
+ model, debug=DEBUG)], ignore_index=True)
202
+
203
+ logging.info(f"Saving results...")
204
+
205
+ df_results.to_json(os.path.join(PATH_AUTOENCODER, 'out',
206
+ 'autoencoder.json'), orient='records', indent=4, force_ascii=False)
207
+
208
+
209
+ if __name__ == '__main__':
210
+ test()
benchmark/isolated/bert.py ADDED
@@ -0,0 +1,205 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import logging
3
+ from pathlib import Path
4
+ import os
5
+ from transformers import BertModel, BertForTokenClassification
6
+ import pandas as pd
7
+ import evaluate
8
+ from datasets import load_dataset
9
+ from transformers import BertTokenizerFast
10
+ from torch.utils.data import DataLoader
11
+ from tqdm import tqdm
12
+
13
+ ROOT_PATH = Path(__file__).parent.parent.parent
14
+
15
+ BERT_PATH = os.path.join(ROOT_PATH, 'results', 'bert', 'isolated')
16
+
17
+ logging.basicConfig(level=logging.INFO,
18
+ format='%(asctime)s %(levelname)s %(message)s', datefmt='%H:%M:%S', filename=os.path.join(BERT_PATH, 'out', 'debug_embeddings.txt'), filemode='w')
19
+
20
+
21
+ def tokenize(dataset):
22
+ BERT_MAX_LEN = 512
23
+
24
+ tokenizer = BertTokenizerFast.from_pretrained(
25
+ "neuralmind/bert-base-portuguese-cased", max_length=BERT_MAX_LEN)
26
+
27
+ dataset = dataset.map(lambda example: tokenizer(
28
+ example["text"], truncation=True, padding="max_length", max_length=BERT_MAX_LEN))
29
+
30
+ return dataset
31
+
32
+
33
+ def create_dataloader(dataset, shuffle=True):
34
+ return DataLoader(dataset, batch_size=8, shuffle=shuffle, num_workers=8, drop_last=True)
35
+
36
+
37
+ class LanguageIdentifer(torch.nn.Module):
38
+ def __init__(self, mode='horizontal_stacking', pos_layers_to_freeze=0, bertimbau_layers_to_freeze=0):
39
+ super().__init__()
40
+
41
+ self.labels = ['pt-PT', 'pt-BR']
42
+
43
+ self.portuguese_model = BertModel.from_pretrained(
44
+ "neuralmind/bert-base-portuguese-cased")
45
+
46
+ self.portuguese_pos_tagging_model = BertForTokenClassification.from_pretrained(
47
+ "lisaterumi/postagger-portuguese")
48
+
49
+ for layer in range(bertimbau_layers_to_freeze):
50
+ for name, param in self.portuguese_model.named_parameters():
51
+ if f".{layer}" in name:
52
+ print(f"Freezing Layer {name} of Bertimbau")
53
+ param.requires_grad = False
54
+
55
+ for layer in range(pos_layers_to_freeze):
56
+ for name, param in self.portuguese_pos_tagging_model.named_parameters():
57
+ if f".{layer}" in name:
58
+ print(f"Freezing Layer {name} of POS")
59
+ param.requires_grad = False
60
+
61
+ self.portuguese_pos_tagging_model.classifier = torch.nn.Identity()
62
+ self.mode = mode
63
+
64
+ if self.mode == 'horizontal_stacking':
65
+ self.linear = self.common_network(torch.nn.Linear(
66
+ self.portuguese_pos_tagging_model.config.hidden_size + self.portuguese_model.config.hidden_size, 512))
67
+ elif self.mode == 'bertimbau_only' or self.mode == 'pos_only' or self.mode == 'vertical_sum':
68
+ self.linear = self.common_network(torch.nn.Linear(
69
+ self.portuguese_model.config.hidden_size, 512))
70
+ else:
71
+ raise NotImplementedError
72
+
73
+ def common_network(self, custom_linear):
74
+ return torch.nn.Sequential(
75
+ custom_linear,
76
+ torch.nn.ReLU(),
77
+ torch.nn.Dropout(0.2),
78
+ torch.nn.Linear(512, 1),
79
+ )
80
+
81
+ def forward(self, input_ids, attention_mask):
82
+
83
+ #(Batch_Size,Sequence Length, Hidden_Size)
84
+ outputs_bert = self.portuguese_model(
85
+ input_ids=input_ids, attention_mask=attention_mask).last_hidden_state[:, 0, :]
86
+
87
+ #(Batch_Size,Sequence Length, Hidden_Size)
88
+ outputs_pos = self.portuguese_pos_tagging_model(
89
+ input_ids=input_ids, attention_mask=attention_mask).logits[:, 0, :]
90
+
91
+ if self.mode == 'horizontal_stacking':
92
+ outputs = torch.cat((outputs_bert, outputs_pos), dim=1)
93
+ elif self.mode == 'bertimbau_only':
94
+ outputs = outputs_bert
95
+ elif self.mode == 'pos_only':
96
+ outputs = outputs_pos
97
+ elif self.mode == 'vertical_sum':
98
+ outputs = outputs_bert + outputs_pos
99
+ outputs = torch.nn.functional.normalize(outputs, p=2, dim=1)
100
+
101
+ return self.linear(outputs)
102
+
103
+
104
+ def load_models():
105
+ models = []
106
+
107
+ for domain in ['politics', 'news', 'law', 'social_media', 'literature', 'web']:
108
+ logging.info(f"Loading {domain} model...")
109
+
110
+ model = LanguageIdentifer(mode='pos_only')
111
+ model.load_state_dict(torch.load(os.path.join(
112
+ BERT_PATH, 'models', f'{domain}.pt')))
113
+
114
+ models.append({
115
+ 'model': model,
116
+ 'train_domain': domain,
117
+ })
118
+
119
+ return models
120
+
121
+
122
+ def benchmark(model, debug=False):
123
+
124
+ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
125
+
126
+ df_result = pd.DataFrame(
127
+ columns=['train_domain', 'test_domain', 'accuracy', 'f1', 'precision', 'recall'])
128
+
129
+ train_domain = model['train_domain']
130
+
131
+ model = model['model']
132
+
133
+ model.to(device)
134
+
135
+ model.eval()
136
+
137
+ for test_domain in ['politics', 'news', 'law', 'social_media', 'literature', 'web']:
138
+ dataset = load_dataset(
139
+ 'Random-Mary-Smith/port_data_random', test_domain, split='test')
140
+
141
+ if debug:
142
+ logging.info("Debug mode: using only 100 samples")
143
+ dataset = dataset.shuffle().select(range(100))
144
+
145
+ dataset = tokenize(dataset)
146
+
147
+ dataset.set_format(type='torch', columns=[
148
+ 'input_ids', 'attention_mask', 'label'])
149
+
150
+ dataset = create_dataloader(dataset)
151
+
152
+ y = []
153
+
154
+ with torch.no_grad():
155
+ for batch in tqdm(dataset):
156
+ input_ids = batch['input_ids'].to(device)
157
+ attention_mask = batch['attention_mask'].to(device)
158
+
159
+ y.extend(model(input_ids, attention_mask).cpu().detach().numpy())
160
+
161
+ y = [1 if y_ > 0.5 else 0 for y_ in y]
162
+
163
+ accuracy = evaluate.load('accuracy').compute(
164
+ predictions=y, references=dataset['label'])['accuracy']
165
+ f1 = evaluate.load('f1').compute(
166
+ predictions=y, references=dataset['label'])['f1']
167
+ precision = evaluate.load('precision').compute(
168
+ predictions=y, references=dataset['label'])['precision']
169
+ recall = evaluate.load('recall').compute(
170
+ predictions=y, references=dataset['label'])['recall']
171
+
172
+ df_result = pd.concat([df_result, pd.DataFrame({
173
+ 'train_domain': [train_domain],
174
+ 'test_domain': [test_domain],
175
+ 'accuracy': [accuracy],
176
+ 'f1': [f1],
177
+ 'precision': [precision],
178
+ 'recall': [recall],
179
+ })], ignore_index=True)
180
+
181
+ return df_result
182
+
183
+
184
+ def test():
185
+ DEBUG = False
186
+
187
+ models = load_models()
188
+
189
+ df_results = pd.DataFrame(
190
+ columns=['train_domain', 'test_domain', 'accuracy', 'f1', 'precision', 'recall'])
191
+
192
+ for model in models:
193
+ logging.info(f"Train Domain {model['train_domain']}...")
194
+
195
+ df_results = pd.concat(
196
+ [df_results, benchmark(model, debug=DEBUG)], ignore_index=True)
197
+
198
+ logging.info("Saving Results...")
199
+
200
+ df_results.to_json(os.path.join(BERT_PATH, 'out', 'embeddings.json'),
201
+ orient='records', indent=4, force_ascii=False)
202
+
203
+
204
+ if __name__ == '__main__':
205
+ test()
benchmark/isolated/n_grams.py ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ from datasets import load_dataset
3
+ from pathlib import Path
4
+ import pandas as pd
5
+ import os
6
+ import pickle
7
+ import logging
8
+ import evaluate
9
+ import nltk
10
+
11
+ ROOT_PATH = Path(__file__).parent.parent.parant
12
+
13
+ N_GRAMS_PATH = os.path.join(ROOT_PATH, 'results', 'n_grams', 'isolated')
14
+
15
+ logging.basicConfig(level=logging.INFO,
16
+ format='%(asctime)s %(levelname)s %(message)s', datefmt='%H:%M:%S', filename=os.path.join(N_GRAMS_PATH, 'out', 'debug_ngrams.txt'), filemode='w')
17
+
18
+ nltk.download("stopwords")
19
+ nltk.download("punkt")
20
+
21
+
22
+ def tokenizer(text):
23
+ return nltk.tokenize.word_tokenize(text, language="portuguese")
24
+
25
+
26
+ def load_pipelines():
27
+ in_path = os.path.join(N_GRAMS_PATH, 'models')
28
+
29
+ pipeline = []
30
+
31
+ for domain in ['politics', 'news', 'law', 'social_media', 'literature', 'web']:
32
+ with open(os.path.join(in_path, f'{domain}.pickle'), 'rb') as f:
33
+ logging.info(f"Loading {domain} pipeline...")
34
+ pipeline.append({
35
+ 'pipeline': pickle.load(f),
36
+ 'train_domain': domain,
37
+ })
38
+
39
+ return pipeline
40
+
41
+
42
+ def benchmark(pipeline, debug=False):
43
+
44
+ df_results = pd.DataFrame(
45
+ columns=['train_domain', 'test_domain', 'accuracy', 'f1', 'precision', 'recall'])
46
+
47
+ train_domain = pipeline['train_domain']
48
+
49
+ pipeline = pipeline['pipeline']
50
+
51
+ for test_domain in ['politics', 'news', 'law', 'social_media', 'literature', 'web']:
52
+
53
+ logging.info(f"Test Domain {test_domain}...")
54
+
55
+ dataset = load_dataset(
56
+ 'Random-Mary-Smith/port_data_random', test_domain, split='test')
57
+
58
+ if debug:
59
+ logging.info("Debug mode: using only 100 samples")
60
+ dataset = dataset.shuffle().select(range(100))
61
+
62
+ y = pipeline.predict(dataset['text'])
63
+
64
+ accuracy = evaluate.load('accuracy').compute(
65
+ predictions=y, references=dataset['label'])['accuracy']
66
+
67
+ f1 = evaluate.load('f1').compute(
68
+ predictions=y, references=dataset['label'])['f1']
69
+
70
+ precision = evaluate.load('precision').compute(
71
+ predictions=y, references=dataset['label'])['precision']
72
+
73
+ recall = evaluate.load('recall').compute(
74
+ predictions=y, references=dataset['label'])['recall']
75
+
76
+ logging.info(
77
+ f"Accuracy: {accuracy} | F1: {f1} | Precision: {precision} | Recall: {recall}")
78
+
79
+ df_results = pd.concat([df_results, pd.DataFrame(
80
+ [[train_domain, test_domain, accuracy, f1, precision, recall]], columns=df_results.columns)], ignore_index=True)
81
+
82
+ return df_results
83
+
84
+
85
+ def test():
86
+
87
+ DEBUG = False
88
+
89
+ logging.info(f"Debug mode: {DEBUG}")
90
+
91
+ pipelines = load_pipelines()
92
+ df_results = pd.DataFrame(
93
+ columns=['train_domain', 'test_domain', 'accuracy', 'f1', 'precision', 'recall'])
94
+
95
+ for pipeline in pipelines:
96
+ logging.info(f"Train Domain {pipeline['train_domain']}...")
97
+
98
+ df_results = pd.concat(
99
+ [df_results, benchmark(pipeline, debug=True)], ignore_index=True)
100
+
101
+ logging.info("Saving results...")
102
+
103
+ df_results.to_json(os.path.join(N_GRAMS_PATH, 'out', 'n_grams.json'),
104
+ orient='records', indent=4, force_ascii=False)
105
+
106
+
107
+ if __name__ == "__main__":
108
+ test()
benchmark/requirements.txt ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ evaluate
2
+ datasets
3
+ transformers
4
+ torch
5
+ sklearn
6
+ python-dotenv
7
+ pandas
8
+ nltk
9
+ numpy
benchmark/run.sh ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ pip install -U -r requirements.txt
2
+
3
+ echo "N-Grams Isolated"
4
+ python isolated/n_grams.py
5
+
6
+ echo "Embeddings Isolated"
7
+ python isolated/embeddings.py
8
+
9
+ echo "Autoencoder Isolated"
10
+ python isolated/autoencoder.py
11
+
12
+ echo "N-Grams Ensemble"
13
+ python ensemble/n_grams.py
14
+
15
+ echo "Embeddings Ensemble"
16
+ python ensemble/embeddings.py
17
+
18
+ echo "Autoencoder Ensemble"
19
+ python ensemble/autoencoder.py
results/autoencoder/ensemble/autoencoder_two_models_bert_ensemble.json ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "domain": "literature",
4
+ "accuracy": 0.811065051,
5
+ "precision": 0.2609853529,
6
+ "recall": 0.2372881356,
7
+ "f1": 0.2485732403
8
+ },
9
+ {
10
+ "domain": "web",
11
+ "accuracy": 0.9622961957,
12
+ "precision": 0.9545522056,
13
+ "recall": 0.9958165479,
14
+ "f1": 0.9747478577
15
+ },
16
+ {
17
+ "domain": "politics",
18
+ "accuracy": 0.9711538462,
19
+ "precision": 0.9904552129,
20
+ "recall": 0.9420391061,
21
+ "f1": 0.9656406586
22
+ },
23
+ {
24
+ "domain": "social_media",
25
+ "accuracy": 0.869140625,
26
+ "precision": 0.884,
27
+ "recall": 0.8532818533,
28
+ "f1": 0.8683693517
29
+ },
30
+ {
31
+ "domain": "law",
32
+ "accuracy": 0.9862580128,
33
+ "precision": 0.5836298932,
34
+ "recall": 0.2237380628,
35
+ "f1": 0.3234714004
36
+ },
37
+ {
38
+ "domain": "news",
39
+ "accuracy": 0.9621394231,
40
+ "precision": 0.8615737204,
41
+ "recall": 0.9611385717,
42
+ "f1": 0.9086368031
43
+ },
44
+ {
45
+ "domain": "dslcc",
46
+ "accuracy": 0.7217741935,
47
+ "precision": 0.9311023622,
48
+ "recall": 0.4777777778,
49
+ "f1": 0.6315086782
50
+ }
51
+ ]
results/autoencoder/isolated/models/law_brazilian_model.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:444001532ac9842ae633037e1d747b3143f1ee0dcf4c83a4651cc4bede9dd02d
3
+ size 436832599
results/autoencoder/isolated/models/law_european_model.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6c7b88d69e3514c292d715a86e7d431e7e38de66945e8e37c4822ee9b2076eec
3
+ size 436832386
results/autoencoder/isolated/models/literature_brazilian_model.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:52654c651543949e087d9fad18b5d05a2fc7b32ee87f3e5caf54fba039bd4a40
3
+ size 436834538
results/autoencoder/isolated/models/literature_european_model.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d9ea3c1082c7736371a91df53986132e45e4ce498632f5f2030e96c6035bd2b8
3
+ size 436834325
results/autoencoder/isolated/models/news_brazilian_model.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ad68858f7adbf2716f2625f3ac23616e7be798ad825765de9a2d215ba716770a
3
+ size 436832812
results/autoencoder/isolated/models/news_european_model.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ab068cf0430b89fb2a7d0f5328eaee6b3d8eff5a45861eac1cfb86e674a5c426
3
+ size 436832599
results/autoencoder/isolated/models/politics_brazilian_model.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7269e21cda82de1812f3d04dc780e51b423ecd21406ff688c3c13a9f475c2880
3
+ size 436834112
results/autoencoder/isolated/models/politics_european_model.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ab8fc3cdf5dcd2f68767a85d99a51aa4670c875299a33c39988d7dc0d9d7ba54
3
+ size 436833899
results/autoencoder/isolated/models/social_media_brazilian_model.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:24eb156db710d19443aefbd0f99ab62434ae2c7fa4cc02220048b2be7fd3d945
3
+ size 436834964
results/autoencoder/isolated/models/social_media_european_model.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8b00fa7475744c1f0ce3da688ef96615c40e62777329954d3220a281647fb782
3
+ size 436834751
results/autoencoder/isolated/models/web_brazilian_model.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:18d4403dbe50ce55ee68db624dfa96b48caa75d913317b7e403197a2518471b7
3
+ size 436832599
results/autoencoder/isolated/models/web_european_model.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:96d788e6a7ffbcae6537abab2318ee948276d095dcd8efe0211cb3b24bedf76f
3
+ size 436832386
results/autoencoder/isolated/out/autoencoder_two_models_bert.json ADDED
@@ -0,0 +1,298 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "train_domain":"politics",
4
+ "test_domain":"politics",
5
+ "accuracy":0.9964871194,
6
+ "f1":0.995898838,
7
+ "precision":0.9986291981,
8
+ "recall":0.9931833674
9
+ },
10
+ {
11
+ "train_domain":"politics",
12
+ "test_domain":"news",
13
+ "accuracy":0.9326,
14
+ "f1":0.8001897308,
15
+ "precision":0.9323017408,
16
+ "recall":0.7008724553
17
+ },
18
+ {
19
+ "train_domain":"politics",
20
+ "test_domain":"law",
21
+ "accuracy":0.733,
22
+ "f1":0.6927502877,
23
+ "precision":0.8157181572,
24
+ "recall":0.602
25
+ },
26
+ {
27
+ "train_domain":"politics",
28
+ "test_domain":"social_media",
29
+ "accuracy":0.7583333333,
30
+ "f1":0.7826086957,
31
+ "precision":0.7111716621,
32
+ "recall":0.87
33
+ },
34
+ {
35
+ "train_domain":"politics",
36
+ "test_domain":"literature",
37
+ "accuracy":0.7081749049,
38
+ "f1":0.2194915254,
39
+ "precision":0.1695026178,
40
+ "recall":0.3112980769
41
+ },
42
+ {
43
+ "train_domain":"politics",
44
+ "test_domain":"web",
45
+ "accuracy":0.8920351986,
46
+ "f1":0.9221761405,
47
+ "precision":0.97405944,
48
+ "recall":0.8755404571
49
+ },
50
+ {
51
+ "train_domain":"news",
52
+ "test_domain":"politics",
53
+ "accuracy":0.9411592506,
54
+ "f1":0.9358851675,
55
+ "precision":0.8800239952,
56
+ "recall":0.9993188011
57
+ },
58
+ {
59
+ "train_domain":"news",
60
+ "test_domain":"news",
61
+ "accuracy":0.96158,
62
+ "f1":0.9057640422,
63
+ "precision":0.857275513,
64
+ "recall":0.9600665557
65
+ },
66
+ {
67
+ "train_domain":"news",
68
+ "test_domain":"law",
69
+ "accuracy":0.783,
70
+ "f1":0.8181056161,
71
+ "precision":0.7041847042,
72
+ "recall":0.976
73
+ },
74
+ {
75
+ "train_domain":"news",
76
+ "test_domain":"social_media",
77
+ "accuracy":0.6283333333,
78
+ "f1":0.7283800244,
79
+ "precision":0.5738963532,
80
+ "recall":0.9966666667
81
+ },
82
+ {
83
+ "train_domain":"news",
84
+ "test_domain":"literature",
85
+ "accuracy":0.6192965779,
86
+ "f1":0.2373849572,
87
+ "precision":0.1612068966,
88
+ "recall":0.4500601685
89
+ },
90
+ {
91
+ "train_domain":"news",
92
+ "test_domain":"web",
93
+ "accuracy":0.9623194946,
94
+ "f1":0.974758162,
95
+ "precision":0.9545589106,
96
+ "recall":0.9958307597
97
+ },
98
+ {
99
+ "train_domain":"law",
100
+ "test_domain":"politics",
101
+ "accuracy":0.5825526932,
102
+ "f1":0.0606060606,
103
+ "precision":0.92,
104
+ "recall":0.0313351499
105
+ },
106
+ {
107
+ "train_domain":"law",
108
+ "test_domain":"news",
109
+ "accuracy":0.80902,
110
+ "f1":0.0072772637,
111
+ "precision":0.9459459459,
112
+ "recall":0.0036526821
113
+ },
114
+ {
115
+ "train_domain":"law",
116
+ "test_domain":"law",
117
+ "accuracy":0.627,
118
+ "f1":0.4069952305,
119
+ "precision":0.992248062,
120
+ "recall":0.256
121
+ },
122
+ {
123
+ "train_domain":"law",
124
+ "test_domain":"social_media",
125
+ "accuracy":0.5,
126
+ "f1":0.0,
127
+ "precision":0.0,
128
+ "recall":0.0
129
+ },
130
+ {
131
+ "train_domain":"law",
132
+ "test_domain":"literature",
133
+ "accuracy":0.8681875792,
134
+ "f1":0.0,
135
+ "precision":0.0,
136
+ "recall":0.0
137
+ },
138
+ {
139
+ "train_domain":"law",
140
+ "test_domain":"web",
141
+ "accuracy":0.272450361,
142
+ "f1":0.0086087625,
143
+ "precision":1.0,
144
+ "recall":0.004322989
145
+ },
146
+ {
147
+ "train_domain":"social_media",
148
+ "test_domain":"politics",
149
+ "accuracy":0.9464285714,
150
+ "f1":0.9376065462,
151
+ "precision":0.9385665529,
152
+ "recall":0.9366485014
153
+ },
154
+ {
155
+ "train_domain":"social_media",
156
+ "test_domain":"news",
157
+ "accuracy":0.8606,
158
+ "f1":0.4624402283,
159
+ "precision":0.9065618385,
160
+ "recall":0.3103840977
161
+ },
162
+ {
163
+ "train_domain":"social_media",
164
+ "test_domain":"law",
165
+ "accuracy":0.735,
166
+ "f1":0.6856465006,
167
+ "precision":0.8425655977,
168
+ "recall":0.578
169
+ },
170
+ {
171
+ "train_domain":"social_media",
172
+ "test_domain":"social_media",
173
+ "accuracy":0.9133333333,
174
+ "f1":0.9155844156,
175
+ "precision":0.8924050633,
176
+ "recall":0.94
177
+ },
178
+ {
179
+ "train_domain":"social_media",
180
+ "test_domain":"literature",
181
+ "accuracy":0.7621989861,
182
+ "f1":0.2337927514,
183
+ "precision":0.2030141844,
184
+ "recall":0.2755716005
185
+ },
186
+ {
187
+ "train_domain":"social_media",
188
+ "test_domain":"web",
189
+ "accuracy":0.3959837545,
190
+ "f1":0.3026829904,
191
+ "precision":0.9675270608,
192
+ "recall":0.1794040451
193
+ },
194
+ {
195
+ "train_domain":"literature",
196
+ "test_domain":"politics",
197
+ "accuracy":0.5714285714,
198
+ "f1":0.0040816327,
199
+ "precision":1.0,
200
+ "recall":0.0020449898
201
+ },
202
+ {
203
+ "train_domain":"literature",
204
+ "test_domain":"news",
205
+ "accuracy":0.81004,
206
+ "f1":0.007523511,
207
+ "precision":0.4090909091,
208
+ "recall":0.0037966674
209
+ },
210
+ {
211
+ "train_domain":"literature",
212
+ "test_domain":"law",
213
+ "accuracy":0.5,
214
+ "f1":0.0,
215
+ "precision":0.0,
216
+ "recall":0.0
217
+ },
218
+ {
219
+ "train_domain":"literature",
220
+ "test_domain":"social_media",
221
+ "accuracy":0.5,
222
+ "f1":0.0,
223
+ "precision":0.0,
224
+ "recall":0.0
225
+ },
226
+ {
227
+ "train_domain":"literature",
228
+ "test_domain":"literature",
229
+ "accuracy":0.9085868188,
230
+ "f1":0.6526189043,
231
+ "precision":0.6537997587,
232
+ "recall":0.6514423077
233
+ },
234
+ {
235
+ "train_domain":"literature",
236
+ "test_domain":"web",
237
+ "accuracy":0.2691787004,
238
+ "f1":0.000308642,
239
+ "precision":0.3333333333,
240
+ "recall":0.0001543925
241
+ },
242
+ {
243
+ "train_domain":"web",
244
+ "test_domain":"politics",
245
+ "accuracy":0.4294496487,
246
+ "f1":0.600860127,
247
+ "precision":0.4294496487,
248
+ "recall":1.0
249
+ },
250
+ {
251
+ "train_domain":"web",
252
+ "test_domain":"news",
253
+ "accuracy":0.19406,
254
+ "f1":0.3250422927,
255
+ "precision":0.19406,
256
+ "recall":1.0
257
+ },
258
+ {
259
+ "train_domain":"web",
260
+ "test_domain":"law",
261
+ "accuracy":0.5,
262
+ "f1":0.6666666667,
263
+ "precision":0.5,
264
+ "recall":1.0
265
+ },
266
+ {
267
+ "train_domain":"web",
268
+ "test_domain":"social_media",
269
+ "accuracy":0.5,
270
+ "f1":0.6666666667,
271
+ "precision":0.5,
272
+ "recall":1.0
273
+ },
274
+ {
275
+ "train_domain":"web",
276
+ "test_domain":"literature",
277
+ "accuracy":0.1316539924,
278
+ "f1":0.2326753465,
279
+ "precision":0.1316539924,
280
+ "recall":1.0
281
+ },
282
+ {
283
+ "train_domain":"web",
284
+ "test_domain":"web",
285
+ "accuracy":0.7305956679,
286
+ "f1":0.8443285528,
287
+ "precision":0.7305956679,
288
+ "recall":1.0
289
+ },
290
+ {
291
+ "train_domain": "dslcc",
292
+ "test_domain": "dslcc",
293
+ "accuracy": 0.8385,
294
+ "f1": 0.8532485234,
295
+ "precision": 0.7818484596,
296
+ "recall": 0.939
297
+ }
298
+ ]
results/bert/all_mixed/models/all_mixed.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:beaadaa18288ba670f5462b6d64608141636430a8b8eab8d4a74bfbac0a3ff9c
3
+ size 870781645
results/bert/all_mixed/out/accuracy_chart.pdf ADDED
Binary file (12 kB). View file
 
results/bert/all_mixed/out/loss_chart.pdf ADDED
Binary file (11.7 kB). View file
 
results/bert/all_mixed/out/results.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "epoch":0,
4
+ "train_loss":0.0914312218,
5
+ "validation_loss":0.0514053669,
6
+ "validation_accuracy":0.9849954874,
7
+ "validation_f1":0.9897747367,
8
+ "validation_precision":0.9857580398,
9
+ "validation_recall":0.9938243014
10
+ },
11
+ {
12
+ "epoch":1,
13
+ "train_loss":0.0308213484,
14
+ "validation_loss":0.0557541026,
15
+ "validation_accuracy":0.9862364621,
16
+ "validation_f1":0.9906067139,
17
+ "validation_precision":0.988172043,
18
+ "validation_recall":0.9930534115
19
+ }
20
+ ]
results/bert/ensemble/bert_ensemble.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "domain": "law",
4
+ "accuracy": 0.9174107143,
5
+ "f1": 0.9160997732,
6
+ "precision": 0.9099099099,
7
+ "recall": 0.9223744292,
8
+ "reduction": "mean"
9
+ },
10
+ {
11
+ "domain": "literature",
12
+ "accuracy": 0.8989158163,
13
+ "f1": 0.5914948454,
14
+ "precision": 0.6296296296,
15
+ "recall": 0.5577156744,
16
+ "reduction": "mean"
17
+ },
18
+ {
19
+ "domain": "news",
20
+ "accuracy": 0.9659455128,
21
+ "f1": 0.9082816293,
22
+ "precision": 0.9614505997,
23
+ "recall": 0.8606850716,
24
+ "reduction": "mean"
25
+ },
26
+ {
27
+ "domain": "politics",
28
+ "accuracy": 0.9930889423,
29
+ "f1": 0.9918928446,
30
+ "precision": 0.9964589235,
31
+ "recall": 0.9873684211,
32
+ "reduction": "mean"
33
+ },
34
+ {
35
+ "domain": "social_media",
36
+ "accuracy": 0.87109375,
37
+ "f1": 0.875,
38
+ "precision": 0.8555555556,
39
+ "recall": 0.8953488372,
40
+ "reduction": "mean"
41
+ },
42
+ {
43
+ "domain": "web",
44
+ "accuracy": 0.9461050725,
45
+ "f1": 0.9621200064,
46
+ "precision": 0.9895236536,
47
+ "recall": 0.9361932786,
48
+ "reduction": "mean"
49
+ },
50
+ {
51
+ "domain": "dslcc",
52
+ "accuracy": 0.9417842742,
53
+ "precision": 0.927734375,
54
+ "recall": 0.9581442259,
55
+ "f1": 0.9426941206,
56
+ "reduction": "mean"
57
+ }
58
+ ]
results/bert/isolated/models/law.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1eea49d09c473d5e5f48cd13684f7ef8a2169b8cdef8c3f1b416098a6d2906af
3
+ size 870778721
results/bert/isolated/models/literature.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2ed0b9eea2b8e952dc7de0b336df9ea9b7c711e2493335ad2e84f9cf6558160a
3
+ size 870781645
results/bert/isolated/models/news.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:43919c77fb5b3c0b0c4f655db7aafe411e50288c7bcab0f6a2ca57b736669c8c
3
+ size 870781645
results/bert/isolated/models/politics.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eb16dbe006cacded46926019d19444f594bd6837db5e4bf9b689c2cc121e3b31
3
+ size 870781645
results/bert/isolated/models/social_media.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ea2013fd41113421e0bbc7c194c748b2938a5d87424e3b19180b696b7b5b9e1d
3
+ size 870781645
results/bert/isolated/models/web.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eb13539ce7edba148aaee8689bf2ba64b57f52b9ddae5c3daf95b7bd9b1eb02f
3
+ size 870781645
results/bert/isolated/out/bert_isolated.json ADDED
@@ -0,0 +1,298 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "train_domain":"politics",
4
+ "test_domain":"politics",
5
+ "accuracy":1.0,
6
+ "f1":1.0,
7
+ "precision":1.0,
8
+ "recall":1.0
9
+ },
10
+ {
11
+ "train_domain":"politics",
12
+ "test_domain":"news",
13
+ "accuracy":0.79322,
14
+ "f1":0.6391148033,
15
+ "precision":0.4800985893,
16
+ "recall":0.9556367432
17
+ },
18
+ {
19
+ "train_domain":"politics",
20
+ "test_domain":"law",
21
+ "accuracy":0.537,
22
+ "f1":0.6537023186,
23
+ "precision":0.5221027479,
24
+ "recall":0.874
25
+ },
26
+ {
27
+ "train_domain":"politics",
28
+ "test_domain":"social_media",
29
+ "accuracy":0.745,
30
+ "f1":0.7779390421,
31
+ "precision":0.6889460154,
32
+ "recall":0.8933333333
33
+ },
34
+ {
35
+ "train_domain":"politics",
36
+ "test_domain":"literature",
37
+ "accuracy":0.2461977186,
38
+ "f1":0.2225490196,
39
+ "precision":0.1287821483,
40
+ "recall":0.8185096154
41
+ },
42
+ {
43
+ "train_domain":"politics",
44
+ "test_domain":"web",
45
+ "accuracy":0.7766245487,
46
+ "f1":0.8349449817,
47
+ "precision":0.9074107628,
48
+ "recall":0.773197468
49
+ },
50
+ {
51
+ "train_domain":"news",
52
+ "test_domain":"politics",
53
+ "accuracy":0.8711943794,
54
+ "f1":0.8433048433,
55
+ "precision":0.8829231916,
56
+ "recall":0.8070892979
57
+ },
58
+ {
59
+ "train_domain":"news",
60
+ "test_domain":"news",
61
+ "accuracy":0.9904,
62
+ "f1":0.9748348537,
63
+ "precision":0.9714733542,
64
+ "recall":0.978219697
65
+ },
66
+ {
67
+ "train_domain":"news",
68
+ "test_domain":"law",
69
+ "accuracy":0.867,
70
+ "f1":0.8618899273,
71
+ "precision":0.8963282937,
72
+ "recall":0.83
73
+ },
74
+ {
75
+ "train_domain":"news",
76
+ "test_domain":"social_media",
77
+ "accuracy":0.6516666667,
78
+ "f1":0.7052186178,
79
+ "precision":0.6112469438,
80
+ "recall":0.8333333333
81
+ },
82
+ {
83
+ "train_domain":"news",
84
+ "test_domain":"literature",
85
+ "accuracy":0.8113117871,
86
+ "f1":0.3252124646,
87
+ "precision":0.3076098607,
88
+ "recall":0.3449519231
89
+ },
90
+ {
91
+ "train_domain":"news",
92
+ "test_domain":"web",
93
+ "accuracy":0.9653655235,
94
+ "f1":0.9760735718,
95
+ "precision":0.9853658537,
96
+ "recall":0.9669549104
97
+ },
98
+ {
99
+ "train_domain":"law",
100
+ "test_domain":"politics",
101
+ "accuracy":0.7570257611,
102
+ "f1":0.759837963,
103
+ "precision":0.659798995,
104
+ "recall":0.8956343793
105
+ },
106
+ {
107
+ "train_domain":"law",
108
+ "test_domain":"news",
109
+ "accuracy":0.93408,
110
+ "f1":0.8249415764,
111
+ "precision":0.8448651001,
112
+ "recall":0.8059360731
113
+ },
114
+ {
115
+ "train_domain":"law",
116
+ "test_domain":"law",
117
+ "accuracy":0.991,
118
+ "f1":0.9909547739,
119
+ "precision":0.995959596,
120
+ "recall":0.986
121
+ },
122
+ {
123
+ "train_domain":"law",
124
+ "test_domain":"social_media",
125
+ "accuracy":0.5866666667,
126
+ "f1":0.6467236467,
127
+ "precision":0.5646766169,
128
+ "recall":0.7566666667
129
+ },
130
+ {
131
+ "train_domain":"law",
132
+ "test_domain":"literature",
133
+ "accuracy":0.8241444867,
134
+ "f1":0.2570281124,
135
+ "precision":0.2900302115,
136
+ "recall":0.2307692308
137
+ },
138
+ {
139
+ "train_domain":"law",
140
+ "test_domain":"web",
141
+ "accuracy":0.8910198556,
142
+ "f1":0.9210139002,
143
+ "precision":0.9787973584,
144
+ "recall":0.8696726374
145
+ },
146
+ {
147
+ "train_domain":"social_media",
148
+ "test_domain":"politics",
149
+ "accuracy":0.9871194379,
150
+ "f1":0.9851551957,
151
+ "precision":0.9759358289,
152
+ "recall":0.9945504087
153
+ },
154
+ {
155
+ "train_domain":"social_media",
156
+ "test_domain":"news",
157
+ "accuracy":0.92494,
158
+ "f1":0.7731366741,
159
+ "precision":0.9403028966,
160
+ "recall":0.6564360501
161
+ },
162
+ {
163
+ "train_domain":"social_media",
164
+ "test_domain":"law",
165
+ "accuracy":0.831,
166
+ "f1":0.8234064786,
167
+ "precision":0.8621444201,
168
+ "recall":0.788
169
+ },
170
+ {
171
+ "train_domain":"social_media",
172
+ "test_domain":"social_media",
173
+ "accuracy":0.9716666667,
174
+ "f1":0.9713322091,
175
+ "precision":0.9829351536,
176
+ "recall":0.96
177
+ },
178
+ {
179
+ "train_domain":"social_media",
180
+ "test_domain":"literature",
181
+ "accuracy":0.806400507,
182
+ "f1":0.285380117,
183
+ "precision":0.2775881684,
184
+ "recall":0.293622142
185
+ },
186
+ {
187
+ "train_domain":"social_media",
188
+ "test_domain":"web",
189
+ "accuracy":0.9073781588,
190
+ "f1":0.9333766128,
191
+ "precision":0.9837495724,
192
+ "recall":0.8879110699
193
+ },
194
+ {
195
+ "train_domain":"literature",
196
+ "test_domain":"politics",
197
+ "accuracy":0.6053864169,
198
+ "f1":0.1543287327,
199
+ "precision":0.9761904762,
200
+ "recall":0.0837874659
201
+ },
202
+ {
203
+ "train_domain":"literature",
204
+ "test_domain":"news",
205
+ "accuracy":0.82226,
206
+ "f1":0.1939229025,
207
+ "precision":0.7267165194,
208
+ "recall":0.1118903077
209
+ },
210
+ {
211
+ "train_domain":"literature",
212
+ "test_domain":"law",
213
+ "accuracy":0.534,
214
+ "f1":0.1527272727,
215
+ "precision":0.84,
216
+ "recall":0.084
217
+ },
218
+ {
219
+ "train_domain":"literature",
220
+ "test_domain":"social_media",
221
+ "accuracy":0.5566666667,
222
+ "f1":0.3813953488,
223
+ "precision":0.6307692308,
224
+ "recall":0.2733333333
225
+ },
226
+ {
227
+ "train_domain":"literature",
228
+ "test_domain":"literature",
229
+ "accuracy":0.9632446134,
230
+ "f1":0.8730853392,
231
+ "precision":0.8012048193,
232
+ "recall":0.9591346154
233
+ },
234
+ {
235
+ "train_domain":"literature",
236
+ "test_domain":"web",
237
+ "accuracy":0.3385604693,
238
+ "f1":0.1805730259,
239
+ "precision":0.9513991163,
240
+ "recall":0.0997529339
241
+ },
242
+ {
243
+ "train_domain":"web",
244
+ "test_domain":"politics",
245
+ "accuracy":0.9464285714,
246
+ "f1":0.9397431676,
247
+ "precision":0.9094964946,
248
+ "recall":0.9720708447
249
+ },
250
+ {
251
+ "train_domain":"web",
252
+ "test_domain":"news",
253
+ "accuracy":0.93934,
254
+ "f1":0.8187197418,
255
+ "precision":0.9561636186,
256
+ "recall":0.7158235786
257
+ },
258
+ {
259
+ "train_domain":"web",
260
+ "test_domain":"law",
261
+ "accuracy":0.848,
262
+ "f1":0.8512720157,
263
+ "precision":0.8333333333,
264
+ "recall":0.87
265
+ },
266
+ {
267
+ "train_domain":"web",
268
+ "test_domain":"social_media",
269
+ "accuracy":0.635,
270
+ "f1":0.5628742515,
271
+ "precision":0.7014925373,
272
+ "recall":0.47
273
+ },
274
+ {
275
+ "train_domain":"web",
276
+ "test_domain":"literature",
277
+ "accuracy":0.8678707224,
278
+ "f1":0.1146496815,
279
+ "precision":0.4821428571,
280
+ "recall":0.065060241
281
+ },
282
+ {
283
+ "train_domain":"web",
284
+ "test_domain":"web",
285
+ "accuracy":0.9877030686,
286
+ "f1":0.9915862601,
287
+ "precision":0.9913566908,
288
+ "recall":0.9918159358
289
+ },
290
+ {
291
+ "train_domain": "dslcc",
292
+ "test_domain": "dslcc",
293
+ "accuracy": 0.962,
294
+ "f1": 0.962962962962963,
295
+ "precision": 0.9391634980988594,
296
+ "recall": 0.988
297
+ }
298
+ ]
results/n_grams/ensemble/n_gram_ensemble.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "domain": "politics",
4
+ "strategy": "majority_voting",
5
+ "f1": 0.7564366632,
6
+ "precision": 0.6082815735,
7
+ "recall": 1.0,
8
+ "accuracy": 0.7233109096
9
+ },
10
+ {
11
+ "domain": "news",
12
+ "strategy": "majority_voting",
13
+ "f1": 0.356506143,
14
+ "precision": 0.2170128905,
15
+ "recall": 0.9980236038,
16
+ "accuracy": 0.3097422841
17
+ },
18
+ {
19
+ "domain": "law",
20
+ "strategy": "majority_voting",
21
+ "f1": 0.6983002833,
22
+ "precision": 0.5405701754,
23
+ "recall": 0.986,
24
+ "accuracy": 0.574
25
+ },
26
+ {
27
+ "domain": "social_media",
28
+ "strategy": "majority_voting",
29
+ "f1": 0.6674082314,
30
+ "precision": 0.5008347245,
31
+ "recall": 1.0,
32
+ "accuracy": 0.5016666667
33
+ },
34
+ {
35
+ "domain": "literature",
36
+ "strategy": "majority_voting",
37
+ "f1": 0.2360283688,
38
+ "precision": 0.133805082,
39
+ "recall": 1.0,
40
+ "accuracy": 0.1473800855
41
+ },
42
+ {
43
+ "domain": "web",
44
+ "strategy": "majority_voting",
45
+ "f1": 0.8529256721,
46
+ "precision": 0.7440790986,
47
+ "recall": 0.9990737882,
48
+ "accuracy": 0.7482517483
49
+ },
50
+ {
51
+ "domain": "dslcc",
52
+ "strategy": "majority_voting",
53
+ "accuracy": 0.5605,
54
+ "f1": 0.6936214708957825,
55
+ "precision": 0.5323702514713751,
56
+ "recall": 0.995
57
+ }
58
+ ]
results/n_grams/isolated/models/law.pickle ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fd1926e566a8840d9656aadf955343fdf8a8841c3bc251058e37baf3e726b986
3
+ size 3870287
results/n_grams/isolated/models/literature.pickle ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7fa782a2211bbbb110fa891014747cbcc1d08347562d12538a57eefef353942e
3
+ size 3643250
results/n_grams/isolated/models/news.pickle ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7f9f1276713546b4ad7e866caf06ff6f8139f273b10353a6daf234197786e28c
3
+ size 11673593
results/n_grams/isolated/models/politics.pickle ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:92d473047d3737c5670d96c1c54cd7bff379fb25b827bd122cc7b454450df3a0
3
+ size 6181398
results/n_grams/isolated/models/social_media.pickle ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3db018d5788a331601603653bbbf660a36849033d73f2db133f26695db5fe772
3
+ size 4416756
results/n_grams/isolated/models/web.pickle ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ee28487012dbe05c85bf7ec19cf406f2cef194b654c8057207e08bca2af26f38
3
+ size 8580899
results/n_grams/isolated/out/n_gram_isolated.json ADDED
@@ -0,0 +1,299 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "train_domain": "politics",
4
+ "test_domain": "politics",
5
+ "accuracy": 0.9874232232,
6
+ "f1": 0.985565626,
7
+ "precision": 0.9721854305,
8
+ "recall": 0.9993192648
9
+ },
10
+ {
11
+ "train_domain": "politics",
12
+ "test_domain": "news",
13
+ "accuracy": 0.3608,
14
+ "f1": 0.3550730487,
15
+ "precision": 0.2192156276,
16
+ "recall": 0.9337720229
17
+ },
18
+ {
19
+ "train_domain": "politics",
20
+ "test_domain": "law",
21
+ "accuracy": 0.519,
22
+ "f1": 0.5842696629,
23
+ "precision": 0.5144596651,
24
+ "recall": 0.676
25
+ },
26
+ {
27
+ "train_domain": "politics",
28
+ "test_domain": "social_media",
29
+ "accuracy": 0.53,
30
+ "f1": 0.6766055046,
31
+ "precision": 0.5157342657,
32
+ "recall": 0.9833333333
33
+ },
34
+ {
35
+ "train_domain": "politics",
36
+ "test_domain": "literature",
37
+ "accuracy": 0.175716321,
38
+ "f1": 0.2392987582,
39
+ "precision": 0.1362048894,
40
+ "recall": 0.984375
41
+ },
42
+ {
43
+ "train_domain": "politics",
44
+ "test_domain": "web",
45
+ "accuracy": 0.6861042184,
46
+ "f1": 0.7966683714,
47
+ "precision": 0.7562768761,
48
+ "recall": 0.8416177833
49
+ },
50
+ {
51
+ "train_domain": "news",
52
+ "test_domain": "politics",
53
+ "accuracy": 0.831529687,
54
+ "f1": 0.8360842345,
55
+ "precision": 0.7183374083,
56
+ "recall": 1.0
57
+ },
58
+ {
59
+ "train_domain": "news",
60
+ "test_domain": "news",
61
+ "accuracy": 0.98862,
62
+ "f1": 0.9705196622,
63
+ "precision": 0.9575708005,
64
+ "recall": 0.9838235294
65
+ },
66
+ {
67
+ "train_domain": "news",
68
+ "test_domain": "law",
69
+ "accuracy": 0.325,
70
+ "f1": 0.2734122713,
71
+ "precision": 0.296037296,
72
+ "recall": 0.254
73
+ },
74
+ {
75
+ "train_domain": "news",
76
+ "test_domain": "social_media",
77
+ "accuracy": 0.5566666667,
78
+ "f1": 0.6366120219,
79
+ "precision": 0.5393518519,
80
+ "recall": 0.7766666667
81
+ },
82
+ {
83
+ "train_domain": "news",
84
+ "test_domain": "literature",
85
+ "accuracy": 0.3902168751,
86
+ "f1": 0.2372277228,
87
+ "precision": 0.1420104315,
88
+ "recall": 0.7199519231
89
+ },
90
+ {
91
+ "train_domain": "news",
92
+ "test_domain": "web",
93
+ "accuracy": 0.7957365215,
94
+ "f1": 0.8756437547,
95
+ "precision": 0.7886209029,
96
+ "recall": 0.9842543995
97
+ },
98
+ {
99
+ "train_domain": "law",
100
+ "test_domain": "politics",
101
+ "accuracy": 0.4474992688,
102
+ "f1": 0.077186126,
103
+ "precision": 0.1366782007,
104
+ "recall": 0.0537780803
105
+ },
106
+ {
107
+ "train_domain": "law",
108
+ "test_domain": "news",
109
+ "accuracy": 0.66152,
110
+ "f1": 0.2485569665,
111
+ "precision": 0.2167583056,
112
+ "recall": 0.2912894162
113
+ },
114
+ {
115
+ "train_domain": "law",
116
+ "test_domain": "law",
117
+ "accuracy": 0.944,
118
+ "f1": 0.94092827,
119
+ "precision": 0.9955357143,
120
+ "recall": 0.892
121
+ },
122
+ {
123
+ "train_domain": "law",
124
+ "test_domain": "social_media",
125
+ "accuracy": 0.5366666667,
126
+ "f1": 0.3015075377,
127
+ "precision": 0.612244898,
128
+ "recall": 0.2
129
+ },
130
+ {
131
+ "train_domain": "law",
132
+ "test_domain": "literature",
133
+ "accuracy": 0.8163685294,
134
+ "f1": 0.161849711,
135
+ "precision": 0.2028985507,
136
+ "recall": 0.1346153846
137
+ },
138
+ {
139
+ "train_domain": "law",
140
+ "test_domain": "web",
141
+ "accuracy": 0.466839612,
142
+ "f1": 0.4695320391,
143
+ "precision": 0.8598438142,
144
+ "recall": 0.3229391788
145
+ },
146
+ {
147
+ "train_domain": "social_media",
148
+ "test_domain": "politics",
149
+ "accuracy": 0.9128400117,
150
+ "f1": 0.8960223308,
151
+ "precision": 0.9191123837,
152
+ "recall": 0.8740639891
153
+ },
154
+ {
155
+ "train_domain": "social_media",
156
+ "test_domain": "news",
157
+ "accuracy": 0.79194,
158
+ "f1": 0.3744813902,
159
+ "precision": 0.4545985401,
160
+ "recall": 0.3183723546
161
+ },
162
+ {
163
+ "train_domain": "social_media",
164
+ "test_domain": "law",
165
+ "accuracy": 0.657,
166
+ "f1": 0.5619412516,
167
+ "precision": 0.777385159,
168
+ "recall": 0.44
169
+ },
170
+ {
171
+ "train_domain": "social_media",
172
+ "test_domain": "social_media",
173
+ "accuracy": 0.95,
174
+ "f1": 0.9503311258,
175
+ "precision": 0.9440789474,
176
+ "recall": 0.9566666667
177
+ },
178
+ {
179
+ "train_domain": "social_media",
180
+ "test_domain": "literature",
181
+ "accuracy": 0.7101472218,
182
+ "f1": 0.249897583,
183
+ "precision": 0.1895587321,
184
+ "recall": 0.3665865385
185
+ },
186
+ {
187
+ "train_domain": "social_media",
188
+ "test_domain": "web",
189
+ "accuracy": 0.3842770133,
190
+ "f1": 0.2850032744,
191
+ "precision": 0.9403630078,
192
+ "recall": 0.1679530719
193
+ },
194
+ {
195
+ "train_domain": "literature",
196
+ "test_domain": "politics",
197
+ "accuracy": 0.5691722726,
198
+ "f1": 0.0133958473,
199
+ "precision": 0.4166666667,
200
+ "recall": 0.0068073519
201
+ },
202
+ {
203
+ "train_domain": "literature",
204
+ "test_domain": "news",
205
+ "accuracy": 0.80216,
206
+ "f1": 0.1064137308,
207
+ "precision": 0.4006802721,
208
+ "recall": 0.0613541667
209
+ },
210
+ {
211
+ "train_domain": "literature",
212
+ "test_domain": "law",
213
+ "accuracy": 0.542,
214
+ "f1": 0.1821428571,
215
+ "precision": 0.85,
216
+ "recall": 0.102
217
+ },
218
+ {
219
+ "train_domain": "literature",
220
+ "test_domain": "social_media",
221
+ "accuracy": 0.465,
222
+ "f1": 0.3977485929,
223
+ "precision": 0.4549356223,
224
+ "recall": 0.3533333333
225
+ },
226
+ {
227
+ "train_domain": "literature",
228
+ "test_domain": "literature",
229
+ "accuracy": 0.9086591737,
230
+ "f1": 0.7235265932,
231
+ "precision": 0.6015936255,
232
+ "recall": 0.9074519231
233
+ },
234
+ {
235
+ "train_domain": "literature",
236
+ "test_domain": "web",
237
+ "accuracy": 0.2752086623,
238
+ "f1": 0.0365817091,
239
+ "precision": 0.6354166667,
240
+ "recall": 0.0188329731
241
+ },
242
+ {
243
+ "train_domain": "web",
244
+ "test_domain": "politics",
245
+ "accuracy": 0.6715413864,
246
+ "f1": 0.5491770373,
247
+ "precision": 0.6692759295,
248
+ "recall": 0.4656228727
249
+ },
250
+ {
251
+ "train_domain": "web",
252
+ "test_domain": "news",
253
+ "accuracy": 0.89038,
254
+ "f1": 0.653605511,
255
+ "precision": 0.8251156853,
256
+ "recall": 0.5411259941
257
+ },
258
+ {
259
+ "train_domain": "web",
260
+ "test_domain": "law",
261
+ "accuracy": 0.694,
262
+ "f1": 0.6408450704,
263
+ "precision": 0.7755681818,
264
+ "recall": 0.546
265
+ },
266
+ {
267
+ "train_domain": "web",
268
+ "test_domain": "social_media",
269
+ "accuracy": 0.525,
270
+ "f1": 0.2784810127,
271
+ "precision": 0.5789473684,
272
+ "recall": 0.1833333333
273
+ },
274
+ {
275
+ "train_domain": "web",
276
+ "test_domain": "literature",
277
+ "accuracy": 0.8576856103,
278
+ "f1": 0.0343716434,
279
+ "precision": 0.1616161616,
280
+ "recall": 0.0192307692
281
+ },
282
+ {
283
+ "train_domain": "web",
284
+ "test_domain": "web",
285
+ "accuracy": 0.8958944282,
286
+ "f1": 0.9253417455,
287
+ "precision": 0.9719626168,
288
+ "recall": 0.8829885767
289
+ },
290
+ {
291
+ "train_domain": "dslcc",
292
+ "test_domain": "dslcc",
293
+ "accuracy": 0.9225,
294
+ "f1": 0.9233811171527434,
295
+ "precision": 0.9130009775171065,
296
+ "recall": 0.934,
297
+ "analyzer": "word"
298
+ }
299
+ ]