import os
from tqdm import tqdm
from path import Path
import pandas as pd
import torch
from torch.utils.data import Dataset, DataLoader
from transformers import BertTokenizer, BertModel
from torch import cuda
from tensorboardX import SummaryWriter
import argparse


device = 'cuda' if cuda.is_available() else 'cpu'
print(cuda.is_available())



"""
@ author: tianpeng wu
@ concat: tianpengwu@outlook.com
"""


class SCZ_DATA():
    """
    从视察者数据库中提取数据
    :param path:
    :return:
    """
    def __init__(self):
        self.encode_dict = {}

    def encode_cat(self, x):
        if x not in self.encode_dict.keys():
            self.encode_dict[x] = len(self.encode_dict)
        return self.encode_dict[x]

    def data(self, path):
        # Import the csv into pandas dataframe and add the headers
        self.df = pd.read_csv(path)
        # df.head()
        # # Removing unwanted columns and only leaving title of news and the category which will be the target
        self.df = self.df[['content', 'category']]
        # df.head()
        self.df['CATEGORY'] = self.df['category']
        self.df['TITLE'] = self.df['content']
        self.df = self.df[['TITLE', 'CATEGORY']]
        self.df['ENCODE_CAT'] = self.df['CATEGORY'].apply(lambda x: self.encode_cat(x))
        # print(self.df['ENCODE_CAT'])
        return self.df


class Triage(Dataset):
    def __init__(self, dataframe, tokenizer, max_len):
        self.len = len(dataframe)
        self.data = dataframe
        self.tokenizer = tokenizer
        self.max_len = max_len

    def __getitem__(self, index):
        title = str(self.data.TITLE[index])
        title = " ".join(title.split())
        inputs = self.tokenizer.encode_plus(
            title,
            None,
            add_special_tokens=True,
            max_length=self.max_len,
            pad_to_max_length=True,
            return_token_type_ids=True,
            truncation=True
        )
        ids = inputs['input_ids']
        mask = inputs['attention_mask']

        return {
            'ids': torch.tensor(ids, dtype=torch.long),
            'mask': torch.tensor(mask, dtype=torch.long),
            'targets': torch.tensor(self.data.ENCODE_CAT[index], dtype=torch.long)
        }

    def __len__(self):
        return self.len


# Creating the dataset and dataloader for the neural network

# train_size = 0.8
# train_dataset=df.sample(frac=train_size,random_state=200)
# test_dataset=df.drop(train_dataset.index).reset_index(drop=True)
# train_dataset = train_dataset.reset_index(drop=True)
# Defining some key variables that will be used later on in the training

# Creating the customized model, by adding a drop out and a dense layer on top of distil bert to get the final output for the model.


class DistillBERTClass(torch.nn.Module):
    def __init__(self, pretrained_model):
        super(DistillBERTClass, self).__init__()
        # self.l1 = DistilBertModel.from_pretrained("distilbert-base-uncased")
        # self.l1 = DistilBertModel.from_pretrained(pretrained_model)
        self.l1 = BertModel.from_pretrained(pretrained_model)
        self.pre_classifier = torch.nn.Linear(768, 768)
        self.dropout = torch.nn.Dropout(0.3)
        # self.classifier = torch.nn.Linear(768, 4)
        self.classifier = torch.nn.Linear(768, 3)

    def forward(self, input_ids, attention_mask):
        output_1 = self.l1(input_ids=input_ids, attention_mask=attention_mask)
        hidden_state = output_1[0]
        pooler = hidden_state[:, 0]
        pooler = self.pre_classifier(pooler)
        pooler = torch.nn.ReLU()(pooler)
        pooler = self.dropout(pooler)
        output = self.classifier(pooler)
        return output


# Function to calcuate the accuracy of the model

def calcuate_accu(big_idx, targets):
    n_correct = (big_idx == targets).sum().item()
    return n_correct


# Defining the training function on the 80% of the dataset for tuning the distilbert model

def train_model(epoch, model, training_loader, loss_function, optimizer, writer):
    tr_loss = 0
    n_correct = 0
    nb_tr_steps = 0
    nb_tr_examples = 0
    model.train()
    for _, data in tqdm(enumerate(training_loader, 0)):
        ids = data['ids'].to(device, dtype=torch.long)
        mask = data['mask'].to(device, dtype=torch.long)
        targets = data['targets'].to(device, dtype=torch.long)

        # print(ids)
        # print(targets)

        outputs = model(ids, mask)
        loss = loss_function(outputs, targets)
        tr_loss += loss.item()
        big_val, big_idx = torch.max(outputs.data, dim=1)
        n_correct += calcuate_accu(big_idx, targets)

        nb_tr_steps += 1
        nb_tr_examples += targets.size(0)
        if _ % 100 == 0:
            loss_step = tr_loss / nb_tr_steps
            accu_step = (n_correct * 100) / nb_tr_examples
            print(f"Training Loss per 100 steps: {loss_step}")
            print(f"Training Accuracy per 100 steps: {accu_step}")
            writer.add_scalar('loss', loss_step)
            writer.add_scalar('accuracy', accu_step)

        optimizer.zero_grad()
        loss.backward()
        # # When using GPU
        optimizer.step()

    print(f'The Total Accuracy for Epoch {epoch}: {(n_correct * 100) / nb_tr_examples}')
    epoch_loss = tr_loss / nb_tr_steps
    epoch_accu = (n_correct * 100) / nb_tr_examples
    print(f"Training Loss Epoch: {epoch_loss}")
    print(f"Training Accuracy Epoch: {epoch_accu}")
    return


def valid_model(model, testing_loader, loss_function):
    tr_loss = 0
    n_correct = 0
    nb_tr_steps = 0
    nb_tr_examples = 0
    model.eval()
    n_correct = 0;
    n_wrong = 0;
    total = 0
    with torch.no_grad():
        for _, data in tqdm(enumerate(testing_loader, 0)):
            ids = data['ids'].to(device, dtype=torch.long)
            mask = data['mask'].to(device, dtype=torch.long)
            targets = data['targets'].to(device, dtype=torch.long)
            outputs = model(ids, mask).squeeze()
            loss = loss_function(outputs, targets)
            tr_loss += loss.item()
            big_val, big_idx = torch.max(outputs.data, dim=1)
            n_correct += calcuate_accu(big_idx, targets)

            nb_tr_steps += 1
            nb_tr_examples += targets.size(0)
            # if _ % 10 == 0:
            #     loss_step = tr_loss / nb_tr_steps
            #     accu_step = (n_correct * 100) / nb_tr_examples
            #     print(f"Validation Loss per 10 steps: {loss_step}")
            #     print(f"Validation Accuracy per 10 steps: {accu_step}")
    epoch_loss = tr_loss / nb_tr_steps
    epoch_accu = (n_correct * 100) / nb_tr_examples
    print(f"Validation Loss Epoch: {epoch_loss}")
    print(f"Validation Accuracy Epoch: {epoch_accu}")
    return epoch_accu


def train(args):
    MAX_LEN = args.max_len
    TRAIN_BATCH_SIZE = args.train_batch_size
    VALID_BATCH_SIZE = args.valid_batch_size
    EPOCHS = args.epochs
    LEARNING_RATE = args.learning_rate
    # train_data = scz_data(args.train_data)
    # test_data = scz_data(args.test_data)
    scz_data = SCZ_DATA()
    task = args.task

    # 下载的预训练模型地址
    pretrained_model = args.pretrained_model
    # 加载预训练模型
    tokenizer = BertTokenizer.from_pretrained(pretrained_model)

    train_dataset = scz_data.data(args.train_data)
    test_dataset = scz_data.data(args.test_data)


    print("TRAIN Dataset: {}".format(train_dataset.shape))
    print("TEST Dataset: {}".format(test_dataset.shape))

    training_set = Triage(train_dataset, tokenizer, MAX_LEN)
    testing_set = Triage(test_dataset, tokenizer, MAX_LEN)

    train_params = {'batch_size': TRAIN_BATCH_SIZE,
                    'shuffle': True,
                    'num_workers': 0
                    }

    test_params = {'batch_size': VALID_BATCH_SIZE,
                   'shuffle': True,
                   'num_workers': 0
                   }

    training_loader = DataLoader(training_set, **train_params)
    testing_loader = DataLoader(testing_set, **test_params)

    model = DistillBERTClass(pretrained_model)
    model.to(device)

    # Creating the loss function and optimizer
    loss_function = torch.nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(params=model.parameters(), lr=LEARNING_RATE)
    writer = SummaryWriter(F'logs/{task}')
    for epoch in range(EPOCHS):
        train_model(epoch, model, training_loader, loss_function, optimizer, writer)
        print('This is the validation section to print the accuracy and see how it performs')
        print(
            'Here we are leveraging on the dataloader crearted for the validation dataset, the approcah is using more of pytorch')
        acc = valid_model(model, testing_loader, loss_function)
        print("Accuracy on test data = %0.2f%%" % acc)
        # Saving the files for re-use

        output_model_file = F'../logs/{args.task}/{epoch}/pytorch_model.bin'
        output_vocab_file = F'../logs/{args.task}/{epoch}/vocab.bin'
        os.makedirs(Path(output_model_file).parent, exist_ok=True)
        os.makedirs(Path(output_vocab_file).parent, exist_ok=True)
        model_to_save = model
        torch.save(model_to_save, output_model_file)
        tokenizer.save_vocabulary(output_vocab_file)

        print(F'All files saved epoch:{epoch}')


def argument_parser():
    parser = argparse.ArgumentParser(description='SCZ_DIGDATA')
    parser.add_argument("--train_data", default='../data/train.csv')
    parser.add_argument("--test_data", default='../data/dev.csv')
    parser.add_argument("--max_len", default=512)
    parser.add_argument("--task", default='test1')
    parser.add_argument("--train_batch_size", default=4)
    parser.add_argument("--valid_batch_size", default=2)
    parser.add_argument("--pretrained_model", default="../models/bert-base-chinese")
    parser.add_argument("--epochs", default=1000)
    parser.add_argument("--learning_rate", default=1e-05)
    return parser


if __name__ == '__main__':
    args = argument_parser().parse_args()
    train(args)

