import torch
from datasets import load_dataset
from torch import nn
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import BertTokenizer, BertModel
from torch.optim import AdamW
import time

tokenizer = BertTokenizer.from_pretrained('model/bert-base-chinese')


def load_all_dataset():
    train_dataset = load_dataset(path="csv", data_files='data/train.csv', split='train')
    print(f'train_dataset={train_dataset}')
    print(train_dataset[:3])
    test_dataset = load_dataset(path="csv", data_files='data/test.csv', split='train')
    print(f'test_dataset={test_dataset}')
    validation_dataset = load_dataset(path="csv", data_files='data/validation.csv', split='train')
    print(f'validation_dataset={validation_dataset}')
    return train_dataset, test_dataset, validation_dataset


def collate_fn(batch):
    sentences = [i['text'] for i in batch]
    labels = [i['label'] for i in batch]
    data = tokenizer.batch_encode_plus(sentences,
                                       truncation=True,
                                       max_length=300,
                                       padding='max_length',
                                       return_tensors='pt',
                                       return_length=True
                                       )
    input_ids = data['input_ids']
    attention_mask = data['attention_mask']
    token_type_ids = data['token_type_ids']
    labels = torch.LongTensor(labels)
    return input_ids, attention_mask, token_type_ids, labels


def get_dataloader(dataset):
    return DataLoader(dataset, batch_size=16, shuffle=True, collate_fn=collate_fn, drop_last=True)


class SentenceClassificationModel(nn.Module):
    def __init__(self, pretrained_model):
        super().__init__()
        self.pretrained_model = pretrained_model
        self.fc = nn.Linear(768, 2)

    def forward(self, input_ids, attention_mask, token_type_ids):
        with torch.no_grad():
            outputs = self.pretrained_model(input_ids=input_ids,
                                            attention_mask=attention_mask,
                                            token_type_ids=token_type_ids)
        outputs = self.fc(outputs.pooler_output)
        return outputs


def do():
    pretrained_model = BertModel.from_pretrained('model/bert-base-chinese')
    for param in pretrained_model.parameters():
        param.requires_grad = False

    train_dataset, test_dataset, validation_dataset = load_all_dataset()
    model = SentenceClassificationModel(pretrained_model)
    criterion = nn.CrossEntropyLoss()
    optim = AdamW(model.parameters(), lr=5e-4)
    epoches = 1
    start = time.time()
    for i in range(epoches):
        epoch = i + 1
        dataloader = get_dataloader(train_dataset)
        total_items = 0
        total_correct = 0
        total_loss = 0.0
        for j, (input_ids, attention_mask, token_type_ids, labels) in enumerate(tqdm(dataloader), start=1):
            output: torch.Tensor = model(input_ids, attention_mask, token_type_ids)
            loss = criterion(output, labels)
            optim.zero_grad()
            loss.backward()
            optim.step()

            total_items += len(labels)
            total_loss += loss.item()
            total_correct += (output.argmax() == labels).sum().item()
            if (j + 1) % 100 == 0:
                print(f'Epoch: {epoch}, Step: {j}, cost={time.time() - start:.3f}s '
                      f'avg_loss={total_loss / total_items:.5f}, avg_accuracy={total_correct / total_items:.5f}')

    torch.save(model.state_dict(), 'model/sentence_classify_on_bert')


if __name__ == '__main__':
    do()

    pass
