File size: 2,632 Bytes
1d71f97
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
import torch
from transformers import BertModel, RobertaModel
from torch import nn
from sklearn.metrics import accuracy_score, classification_report
import numpy as np

class BertClassifier(nn.Module):
    def __init__(self, model_name, dropout_rate=0.1):
        super(BertClassifier, self).__init__()
        self.bert = BertModel.from_pretrained(model_name)
        self.dropout = nn.Dropout(dropout_rate)
        self.fc = nn.Linear(self.bert.config.hidden_size, 1)

    def forward(self, input_ids, attention_mask):
        output = self.bert(input_ids, attention_mask)
        output = output.pooler_output
        output = self.dropout(output)
        output = self.fc(output)
        output = torch.sigmoid(output)
        return output

class RobertaClassifier(nn.Module):
    def __init__(self, model_name, dropout_rate=0.1):
        super(RobertaClassifier, self).__init__()
        self.roberta = RobertaModel.from_pretrained(model_name)
        self.dropout = nn.Dropout(dropout_rate)
        self.fc = nn.Linear(self.roberta.config.hidden_size, 1)
        
    def forward(self, input_ids, attention_mask):
        output = self.roberta(input_ids, attention_mask)
        output = output.pooler_output
        output = self.dropout(output)
        output = self.fc(output)
        output = torch.sigmoid(output)
        return output

def train(model, data_loader, optimizer, scheduler, device):
    model.to(device)
    model.train()
    loss_func = nn.BCELoss()
    for batch in data_loader:
        # print(batch)
        optimizer.zero_grad()
        # prepare inputs
        input_ids = batch['input_ids'].to(device)
        attention_mask = batch['attention_mask'].to(device)
        y_true = batch['label'].reshape(-1, 1).to(device)

        # Compute output
        output = model(input_ids, attention_mask)

        # Calculate Loss
        loss = loss_func(output, y_true)
        
        # Backward propagation
        loss.backward()
        optimizer.step()
        scheduler.step()

def evaluate(model, data_loader, device):
    model.eval()
    predictions = []
    val_labels = []
    torch.cuda.empty_cache()
    for batch in data_loader:
        input_ids = batch['input_ids'].to(device)
        attention_mask = batch['attention_mask'].to(device)
        y_true = batch['label'].tolist()

        output = model(input_ids, attention_mask)
        y_pred = np.int64(output.cpu().detach().numpy() > 0.5).reshape(-1).tolist()

        predictions.extend(y_pred)
        val_labels.extend(y_true)

    
    return accuracy_score(val_labels, predictions), classification_report(val_labels, predictions)