|
import torch |
|
import logging |
|
from transformers import BertModel, BertTokenizerFast |
|
import os |
|
from pathlib import Path |
|
import pandas as pd |
|
from datasets import load_dataset |
|
from torch.utils.data import DataLoader |
|
from tqdm import tqdm |
|
|
|
CURRENT_PATH = Path(__file__).parent |
|
|
|
|
|
def tokenize(dataset): |
|
BERT_MAX_LEN = 512 |
|
|
|
tokenizer = BertTokenizerFast.from_pretrained( |
|
"neuralmind/bert-base-portuguese-cased", max_length=BERT_MAX_LEN) |
|
|
|
dataset = dataset.map(lambda example: tokenizer( |
|
example["text"], truncation=True, padding="max_length", max_length=BERT_MAX_LEN)) |
|
|
|
return dataset |
|
|
|
|
|
def create_dataloader(dataset, shuffle=True): |
|
return DataLoader(dataset, batch_size=8, shuffle=shuffle, num_workers=8, drop_last=True) |
|
|
|
|
|
class AutoEncoder(torch.nn.Module): |
|
def __init__(self): |
|
super().__init__() |
|
|
|
self.device = torch.device( |
|
'cuda' if torch.cuda.is_available() else 'cpu') |
|
|
|
self.bert = BertModel.from_pretrained( |
|
'neuralmind/bert-base-portuguese-cased').to(self.device) |
|
|
|
|
|
for param in self.bert.parameters(): |
|
param.requires_grad = False |
|
|
|
self.encoder = torch.nn.Sequential( |
|
torch.nn.Linear(self.bert.config.hidden_size, |
|
self.bert.config.hidden_size // 5), |
|
torch.nn.ReLU(), |
|
torch.nn.Linear(self.bert.config.hidden_size // 5, |
|
self.bert.config.hidden_size // 10), |
|
torch.nn.ReLU(), |
|
torch.nn.Linear(self.bert.config.hidden_size // 10, |
|
self.bert.config.hidden_size // 30), |
|
torch.nn.ReLU(), |
|
).to(self.device) |
|
|
|
self.decoder = torch.nn.Sequential( |
|
torch.nn.Linear(self.bert.config.hidden_size // 30, |
|
self.bert.config.hidden_size // 10), |
|
torch.nn.ReLU(), |
|
torch.nn.Linear(self.bert.config.hidden_size // 10, |
|
self.bert.config.hidden_size // 5), |
|
torch.nn.ReLU(), |
|
torch.nn.Linear(self.bert.config.hidden_size // |
|
5, self.bert.config.hidden_size), |
|
torch.nn.Sigmoid() |
|
).to(self.device) |
|
|
|
def forward(self, input_ids, attention_mask): |
|
bert_output = self.bert(input_ids=input_ids, |
|
attention_mask=attention_mask).last_hidden_state[:, 0, :] |
|
|
|
encoded = self.encoder(bert_output) |
|
|
|
decoded = self.decoder(encoded) |
|
|
|
return bert_output, decoded |
|
|
|
|
|
def load_models(): |
|
models = [] |
|
|
|
for domain in ['politics', 'news', 'law', 'social_media', 'literature', 'web']: |
|
logging.info(f"Loading {domain} model...") |
|
|
|
accumulator = [] |
|
|
|
for lang in ['brazilian', 'european']: |
|
model = AutoEncoder() |
|
model.load_state_dict(torch.load(os.path.join( |
|
CURRENT_PATH, 'models', 'autoencoder', f'{domain}_{lang}_model.pt'))) |
|
accumulator.append(model) |
|
|
|
models.append({ |
|
'models': accumulator, |
|
'train_domain': domain, |
|
}) |
|
|
|
return models |
|
|
|
|
|
def benchmark(model, debug=False): |
|
|
|
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') |
|
|
|
df_results = pd.DataFrame( |
|
columns=['train_domain', 'test_domain', 'accuracy', 'f1', 'precision', 'recall']) |
|
|
|
train_domain = model['train_domain'] |
|
|
|
brazilian_model = model['models'][0] |
|
|
|
european_model = model['models'][1] |
|
|
|
brazilian_model.eval() |
|
european_model.eval() |
|
|
|
brazilian_model.to(device) |
|
european_model.to(device) |
|
|
|
for test_domain in ['politics', 'news', 'law', 'social_media', 'literature', 'web']: |
|
dataset = load_dataset( |
|
'arubenruben/Portuguese_Language_Identification', test_domain, split='test') |
|
|
|
if debug: |
|
logging.info(f"Debugging {test_domain} dataset...") |
|
dataset = dataset.select(range(100)) |
|
else: |
|
dataset = dataset.shuffle().select(range(min(50_000, len(dataset)))) |
|
|
|
dataset = tokenize(dataset) |
|
|
|
dataset.set_format(type='torch', columns=[ |
|
'input_ids', 'attention_mask', 'label']) |
|
|
|
dataset = create_dataloader(dataset) |
|
|
|
predictions = [] |
|
labels = [] |
|
|
|
reconstruction_loss = torch.nn.MSELoss(reduction='none') |
|
|
|
with torch.no_grad(): |
|
for batch in tqdm(dataset): |
|
input_ids = batch['input_ids'].to(device) |
|
|
|
attention_mask = batch['attention_mask'].to(device) |
|
|
|
label = batch['label'].to(device) |
|
|
|
bert_european, reconstruction_european = european_model( |
|
input_ids=input_ids, attention_mask=attention_mask) |
|
|
|
bert_brazilian, reconstruction_brazilian = brazilian_model( |
|
input_ids=input_ids, attention_mask=attention_mask) |
|
|
|
test_loss_european = reconstruction_loss( |
|
reconstruction_european, bert_european) |
|
|
|
test_loss_brazilian = reconstruction_loss( |
|
reconstruction_brazilian, bert_brazilian) |
|
|
|
for loss_european, loss_brazilian in zip(test_loss_european, test_loss_brazilian): |
|
|
|
if loss_european.mean().item() < loss_brazilian.mean().item(): |
|
predictions.append(0) |
|
total_loss += loss_european.mean().item() / len(test_loss_european) |
|
|
|
else: |
|
predictions.append(1) |
|
total_loss += loss_brazilian.mean().item() / len(test_loss_brazilian) |
|
|
|
labels.extend(label.tolist()) |
|
|
|
accuracy = accuracy.compute( |
|
predictions=predictions, references=labels)['accuracy'] |
|
f1 = f1.compute(predictions=predictions, references=labels)['f1'] |
|
precision = precision.compute( |
|
predictions=predictions, references=labels)['precision'] |
|
recall = recall.compute(predictions=predictions, |
|
references=labels)['recall'] |
|
|
|
df_results = pd.concat([df_results, pd.DataFrame( |
|
[[train_domain, test_domain, accuracy, f1, precision, recall]], columns=df_results.columns)], ignore_index=True) |
|
|
|
return df_results |
|
|
|
|
|
def test(): |
|
DEBUG = True |
|
|
|
models = load_models() |
|
|
|
df_results = pd.DataFrame( |
|
columns=['train_domain', 'test_domain', 'accuracy', 'f1', 'precision', 'recall']) |
|
|
|
for model in models: |
|
logging.info(f"Train Domain {model['train_domain']}...") |
|
|
|
df_results = pd.concat([df_results, benchmark( |
|
model, debug=DEBUG)], ignore_index=True) |
|
|
|
logging.info(f"Saving results...") |
|
|
|
df_results.to_json(os.path.join(CURRENT_PATH, 'results', |
|
'autoencoder.json'), orient='records', indent=4, force_ascii=False) |
|
|
|
|
|
if __name__ == '__main__': |
|
test() |
|
|