from transformers import BertModel, BertTokenizer
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
import json
from torch.nn.utils.rnn import pad_sequence
import torch.optim as optim
from tqdm import tqdm

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

class MyDataSet(Dataset):
    def __init__(self, path):
        self.data = []
        self.tokenizer = BertTokenizer.from_pretrained('bert-base-chinese')
        with open(path, 'r') as f:
            for line in f.readlines():
                self.data.append(json.loads(line))
        self.label_to_id = {'contradiction': 0, 'entailment': 1, 'neutral': 2}

    def __len__(self):
        return len(self.data)

    def __getitem__(self, idx):
        sentence1 = self.data[idx]["sentence1"]
        sentence2 = self.data[idx]["sentence2"]
        label = torch.tensor([self.label_to_id[self.data[idx]["gold_label"]]], dtype=torch.long)
        tokenized = self.tokenizer(sentence1, sentence2, return_tensors='pt')
        input_ids = tokenized['input_ids'].squeeze()
        attention_mask = tokenized['attention_mask'].squeeze()
        token_type_ids = tokenized['token_type_ids'].squeeze()
        return input_ids, token_type_ids, attention_mask, label


def my_collate_fn(batch):
    input_ids, token_type_ids, attention_mask, label = zip(*batch)
    batch_input_ids = pad_sequence(input_ids, batch_first=True)
    batch_token_type_ids = pad_sequence(token_type_ids, batch_first=True)
    batch_attention_mask = pad_sequence(attention_mask, batch_first=True)
    label = torch.cat(label)

    batch_input_ids = batch_input_ids.to(device)
    batch_token_type_ids = batch_token_type_ids.to(device)
    batch_attention_mask = batch_attention_mask.to(device)
    label = label.to(device)

    return batch_input_ids, batch_token_type_ids, batch_attention_mask, label


class NLI(nn.Module):
    def __init__(self):
        super(NLI, self).__init__()
        self.bert = BertModel.from_pretrained('bert-base-chinese')
        self.classifier = nn.Linear(768, 3)

    def forward(self, input_ids, attention_mask, token_type_ids):
        hidden = self.bert(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids)[0]
        logits = self.classifier(hidden[:, 0, :])
        return logits


if __name__ == '__main__':
    dataset = MyDataSet('snli-dev.json')
    dataloader = DataLoader(dataset=dataset, collate_fn=my_collate_fn, batch_size=32)
    model = NLI().to(device)
    optimizer = optim.AdamW(model.parameters(), lr=1e-5)
    criterion = nn.CrossEntropyLoss()
    tokenizer = BertTokenizer.from_pretrained('bert-base-chinese')
    for batch, (batch_input_ids, batch_token_type_ids, batch_attention_mask, label) in tqdm(enumerate(dataloader)):
        logits = model(batch_input_ids, batch_attention_mask, batch_token_type_ids)
        optimizer.zero_grad()
        loss = criterion(logits, label)
        if batch % 30 == 0:
            print(loss.item())
        loss.backward()
        optimizer.step()

    dataset = MyDataSet('snli-test.json')
    dataloader = DataLoader(dataset=dataset, collate_fn=my_collate_fn, batch_size=32)
    correct = 0
    for batch, (batch_input_ids, batch_token_type_ids, batch_attention_mask, label) in tqdm(enumerate(dataloader)):
        logits = model(batch_input_ids, batch_attention_mask, batch_token_type_ids)
        correct += torch.sum(logits.max(-1)[1].eq(label))
        if batch % 50 == 0:
            print(tokenizer.decode(batch_input_ids))
            print(label)
            print(logits.max(-1)[1])
    print(correct / len(dataset))

