arubenruben commited on
Commit
ed540d3
1 Parent(s): 7511825

Embeddings Not yet tested

Browse files
Files changed (2) hide show
  1. embeddings.py +195 -1
  2. n_grams.py +2 -2
embeddings.py CHANGED
@@ -1,4 +1,198 @@
 
1
  import logging
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
 
3
  def test():
4
- logging.info("Embeddings Not Implemented")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
  import logging
3
+ from pathlib import Path
4
+ import os
5
+ from transformers import BertModel, BertForTokenClassification
6
+ import pandas as pd
7
+ import evaluate
8
+ from datasets import load_dataset
9
+ from transformers import BertTokenizerFast
10
+ from torch.utils.data import DataLoader
11
+ from tqdm import tqdm
12
+
13
+
14
+ def tokenize(dataset):
15
+ BERT_MAX_LEN = 512
16
+
17
+ tokenizer = BertTokenizerFast.from_pretrained(
18
+ "neuralmind/bert-base-portuguese-cased", max_length=BERT_MAX_LEN)
19
+
20
+ dataset = dataset.map(lambda example: tokenizer(
21
+ example["text"], truncation=True, padding="max_length", max_length=BERT_MAX_LEN))
22
+
23
+ return dataset
24
+
25
+
26
+ def create_dataloader(dataset, shuffle=True):
27
+ return DataLoader(dataset, batch_size=8, shuffle=shuffle, num_workers=8, drop_last=True)
28
+
29
+
30
+ CURRENT_PATH = Path(__file__).parent
31
+
32
+ logging.basicConfig(level=logging.INFO,
33
+ format='%(asctime)s %(levelname)s %(message)s', datefmt='%H:%M:%S', filename=os.path.join(CURRENT_PATH, 'out', 'debug_embeddings.txt'), filemode='w')
34
+
35
+
36
+ class LanguageIdentifer(torch.nn.Module):
37
+ def __init__(self, mode='horizontal_stacking', pos_layers_to_freeze=0, bertimbau_layers_to_freeze=0):
38
+ super().__init__()
39
+
40
+ self.labels = ['pt-PT', 'pt-BR']
41
+
42
+ self.portuguese_model = BertModel.from_pretrained(
43
+ "neuralmind/bert-base-portuguese-cased")
44
+
45
+ self.portuguese_pos_tagging_model = BertForTokenClassification.from_pretrained(
46
+ "lisaterumi/postagger-portuguese")
47
+
48
+ for layer in range(bertimbau_layers_to_freeze):
49
+ for name, param in self.portuguese_model.named_parameters():
50
+ if f".{layer}" in name:
51
+ print(f"Freezing Layer {name} of Bertimbau")
52
+ param.requires_grad = False
53
+
54
+ for layer in range(pos_layers_to_freeze):
55
+ for name, param in self.portuguese_pos_tagging_model.named_parameters():
56
+ if f".{layer}" in name:
57
+ print(f"Freezing Layer {name} of POS")
58
+ param.requires_grad = False
59
+
60
+ self.portuguese_pos_tagging_model.classifier = torch.nn.Identity()
61
+ self.mode = mode
62
+
63
+ if self.mode == 'horizontal_stacking':
64
+ self.linear = self.common_network(torch.nn.Linear(
65
+ self.portuguese_pos_tagging_model.config.hidden_size + self.portuguese_model.config.hidden_size, 512))
66
+ elif self.mode == 'bertimbau_only' or self.mode == 'pos_only' or self.mode == 'vertical_sum':
67
+ self.linear = self.common_network(torch.nn.Linear(
68
+ self.portuguese_model.config.hidden_size, 512))
69
+ else:
70
+ raise NotImplementedError
71
+
72
+ def common_network(self, custom_linear):
73
+ return torch.nn.Sequential(
74
+ custom_linear,
75
+ torch.nn.ReLU(),
76
+ torch.nn.Dropout(0.2),
77
+ torch.nn.Linear(512, 1),
78
+ )
79
+
80
+ def forward(self, input_ids, attention_mask):
81
+
82
+ #(Batch_Size,Sequence Length, Hidden_Size)
83
+ outputs_bert = self.portuguese_model(
84
+ input_ids=input_ids, attention_mask=attention_mask).last_hidden_state[:, 0, :]
85
+
86
+ #(Batch_Size,Sequence Length, Hidden_Size)
87
+ outputs_pos = self.portuguese_pos_tagging_model(
88
+ input_ids=input_ids, attention_mask=attention_mask).logits[:, 0, :]
89
+
90
+ if self.mode == 'horizontal_stacking':
91
+ outputs = torch.cat((outputs_bert, outputs_pos), dim=1)
92
+ elif self.mode == 'bertimbau_only':
93
+ outputs = outputs_bert
94
+ elif self.mode == 'pos_only':
95
+ outputs = outputs_pos
96
+ elif self.mode == 'vertical_sum':
97
+ outputs = outputs_bert + outputs_pos
98
+ outputs = torch.nn.functional.normalize(outputs, p=2, dim=1)
99
+
100
+ return self.linear(outputs)
101
+
102
+
103
+ def load_models():
104
+ models = []
105
+
106
+ for domain in ['politics', 'news', 'law', 'social_media', 'literature', 'web']:
107
+ logging.info(f"Loading {domain} model...")
108
+
109
+ model = LanguageIdentifer(mode='pos_only')
110
+ model.load_state_dict(torch.load(os.path.join(
111
+ CURRENT_PATH, 'models', 'embeddings', f'{domain}.pt')))
112
+
113
+ models.append({
114
+ 'model': model,
115
+ 'train_domain': domain,
116
+ })
117
+
118
+ return models
119
+
120
+
121
+ def benchmark(model, debug=False):
122
+
123
+ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
124
+
125
+ df_result = pd.DataFrame(
126
+ columns=['train_domain', 'test_domain', 'accuracy', 'f1', 'precision', 'recall'])
127
+
128
+ train_domain = model['train_domain']
129
+
130
+ model = model['model']
131
+
132
+ model.to(device)
133
+
134
+ model.eval()
135
+
136
+ for test_domain in ['politics', 'news', 'law', 'social_media', 'literature', 'web']:
137
+ dataset = load_dataset(
138
+ 'arubenruben/Portuguese_Language_Identification', test_domain, split='test')
139
+
140
+ if debug:
141
+ logging.info("Debug mode: using only 100 samples")
142
+ dataset = dataset.shuffle().select(range(100))
143
+ else:
144
+ dataset = dataset.shuffle().select(range(min(50_000, len(dataset))))
145
+
146
+ dataset = tokenize(dataset)
147
+ dataset = create_dataloader(dataset)
148
+
149
+ y = []
150
+
151
+ with torch.no_grad():
152
+ for batch in tqdm(dataset):
153
+ input_ids = batch['input_ids'].to(device)
154
+ attention_mask = batch['attention_mask'].to(device)
155
+
156
+ y.extend(model(input_ids, attention_mask).cpu().detach().numpy())
157
+
158
+ y = [1 if y_ > 0.5 else 0 for y_ in y]
159
+
160
+ accuracy = evaluate.load('accuracy').compute(
161
+ predictions=y, references=dataset['label'])['accuracy']
162
+ f1 = evaluate.load('f1').compute(
163
+ predictions=y, references=dataset['label'])['f1']
164
+ precision = evaluate.load('precision').compute(
165
+ predictions=y, references=dataset['label'])['precision']
166
+ recall = evaluate.load('recall').compute(
167
+ predictions=y, references=dataset['label'])['recall']
168
+
169
+ df_result = pd.concat([df_result, pd.DataFrame({
170
+ 'train_domain': [train_domain],
171
+ 'test_domain': [test_domain],
172
+ 'accuracy': [accuracy],
173
+ 'f1': [f1],
174
+ 'precision': [precision],
175
+ 'recall': [recall],
176
+ })], ignore_index=True)
177
+
178
+ return df_result
179
+
180
 
181
  def test():
182
+ DEBUG = False
183
+
184
+ models = load_models()
185
+
186
+ df_results = pd.DataFrame(
187
+ columns=['train_domain', 'test_domain', 'accuracy', 'f1', 'precision', 'recall'])
188
+
189
+ for model in models:
190
+ logging.info(f"Train Domain {model['train_domain']}...")
191
+
192
+ df_results = pd.concat(
193
+ [df_results, benchmark(model, debug=DEBUG)], ignore_index=True)
194
+
195
+ logging.info("Saving Results...")
196
+
197
+ df_results.to_json(os.path.join(CURRENT_PATH, 'out', 'embeddings.json'),
198
+ orient='records', indent=4, force_ascii=False)
n_grams.py CHANGED
@@ -12,7 +12,7 @@ import nltk
12
  CURRENT_PATH = Path(__file__).parent
13
 
14
  logging.basicConfig(level=logging.INFO,
15
- format='%(asctime)s %(levelname)s %(message)s', datefmt='%H:%M:%S', filename=os.path.join(CURRENT_PATH, 'out', 'debug.txt'), filemode='a')
16
 
17
  nltk.download("stopwords")
18
  nltk.download("punkt")
@@ -87,7 +87,7 @@ def benchmark(pipeline, debug=False):
87
 
88
  def test():
89
 
90
- DEBUG = True
91
 
92
  logging.info(f"Debug mode: {DEBUG}")
93
 
 
12
  CURRENT_PATH = Path(__file__).parent
13
 
14
  logging.basicConfig(level=logging.INFO,
15
+ format='%(asctime)s %(levelname)s %(message)s', datefmt='%H:%M:%S', filename=os.path.join(CURRENT_PATH, 'out', 'debug_ngrams.txt'), filemode='w')
16
 
17
  nltk.download("stopwords")
18
  nltk.download("punkt")
 
87
 
88
  def test():
89
 
90
+ DEBUG = False
91
 
92
  logging.info(f"Debug mode: {DEBUG}")
93