import torch
import torch.nn.functional as F
from torch import nn
from torch.utils.data import DataLoader
from torch.utils.data.dataset import random_split
from torchtext.datasets import AG_NEWS
from torchtext.data.utils import get_tokenizer
from torchtext.vocab import Vocab

from collections import Counter

from IPython.core.display import HTML, display
import torch
from captum.attr import visualization as viz
from captum.attr import Lime, LimeBase
from captum._utils.models.linear_model import SkLearnLinearRegression, SkLearnLasso
import pandas as pd
import os
import json
def pretreatment(comments):
    result_comments = []
    punctuation = '。，？！：%&~（）、；“”&|,.?!:%&~();""'
    for comment in comments:
        comment = ''.join([c for c in comment if c not in punctuation])
        comment = ''.join(comment.split())  # \xa0
        result_comments.append(comment)

    return result_comments
train_path = "/mnt/COMP/baidu_lic_2022_interpretability/data/train.tsv"

train_df = pd.read_csv(train_path, sep='\t', header=0)
train_data =[]
for idx,item in train_df.iterrows():
    if 5<=len(str(item[1])) <=300:
        train_data.append((item[0],item[1]))


dev_path = "/mnt/COMP/baidu_lic_2022_interpretability/data/dev.tsv"

dev_df = pd.read_csv(dev_path, sep='\t', header=0)

dev_data =[]
for idx,item in dev_df.iterrows():
    if 5 <= len(str(item[2])) <= 300:
        dev_data.append((item[1],item[2]))

def tokenizer_punc_mine(string):
    res = []
    sub_string_list = string.replace(" ","").split('[MASK]')
    for idx, sub_string in enumerate(sub_string_list):
        for c in sub_string:
            if '\u4e00' <= c <= '\u9fff':
                res.extend(c)
        if idx < len(sub_string_list) - 1:
            res.append('[MASK]')
    return res

word_counter = Counter()
for (label, line) in train_data:
    word_counter.update(tokenizer_punc_mine(line))
voc = Vocab(word_counter, min_freq=5)

print('Vocabulary size:', len(voc))

num_class = len(set(label for label, _ in train_data))
print('Num of classes:', num_class)

class EmbeddingBagModel(nn.Module):
    def __init__(self, vocab_size, embed_dim, num_class):
        super().__init__()
        self.embedding = nn.EmbeddingBag(vocab_size, embed_dim)
        self.linear = nn.Linear(embed_dim, num_class)

    def forward(self, inputs, offsets):
        embedded = self.embedding(inputs, offsets)
        return self.linear(embedded)


BATCH_SIZE = 64


def collate_batch(batch):
    labels = torch.tensor([label for label, _ in batch])
    text_list = [tokenizer_punc_mine(line) for _, line in batch]

    # flatten tokens across the whole batch
    text = torch.tensor([voc[t] for tokens in text_list for t in tokens])
    # the offset of each example
    offsets = torch.tensor(
        [0] + [len(tokens) for tokens in text_list][:-1]
    ).cumsum(dim=0)

    return labels, text, offsets


train_loader = DataLoader(train_data, batch_size=BATCH_SIZE,
                          shuffle=True, collate_fn=collate_batch)
val_loader = DataLoader(dev_data, batch_size=BATCH_SIZE,
                        shuffle=False, collate_fn=collate_batch)

if __name__=="__main__":

    EPOCHS = 20
    EMB_SIZE = 128
    CHECKPOINT = '/mnt/COMP/baidu_lic_2022_interpretability/saved/embedding_bag_ag_news.pt'
    USE_PRETRAINED = False  # change to False if you want to retrain your own model


    def train_model(train_loader, val_loader):
        model = EmbeddingBagModel(len(voc), EMB_SIZE, num_class)

        loss = nn.CrossEntropyLoss()
        optimizer = torch.optim.Adam(model.parameters())

        for epoch in range(1, EPOCHS + 1):
            # training
            model.train()
            total_acc, total_count = 0, 0

            for idx, (label, text, offsets) in enumerate(train_loader):

                optimizer.zero_grad()
                predited_label = model(text, offsets)
                loss(predited_label, label).backward()
                optimizer.step()

                total_acc += (predited_label.argmax(1) == label).sum().item()
                total_count += label.size(0)

                if (idx + 1) % 50 == 0:
                    print('epoch {:3d} | {:5d}/{:5d} batches | accuracy {:8.3f}'.format(
                        epoch, idx + 1, len(train_loader), total_acc / (total_count + 1)
                    ))
                    total_acc, total_count = 0, 0

                    # evaluation
            model.eval()
            total_acc, total_count = 0, 0

            with torch.no_grad():
                for label, text, offsets in val_loader:
                    predited_label = model(text, offsets)
                    total_acc += (predited_label.argmax(1) == label).sum().item()

                    total_count += label.size(0)
                print('-' * 59)
                print('end of epoch {:3d} | valid accuracy {:8.3f} '.format(epoch, total_acc / (total_count + 1)))
                print('-' * 59)

        torch.save(model, CHECKPOINT)
        return model


    eb_model = torch.load(CHECKPOINT) if USE_PRETRAINED else train_model(train_loader, val_loader)

