import math
import os
import random
import numpy
import time
import transformers

import torch
import torch.nn as nn
from torch.utils.data import DataLoader, random_split
from utils import BertDataset, Tokenizer
# from lcf_bert import LCF_BERT
seed = int(time.time()) % 1000000
args = {
    "lr": 2e-5, 
    "n_epoch": 15, 
    "batch_size": 16, 
    "max_len": 85, 
    "n_output": 3, 
    "pretrained_bert_name": "bert-base-uncased", 
    "device": "cuda:0", 
    "seed": 628980, 
    "valset_ratio": 0, 
    "l2_coef": 0.0004, 
    "bert_dim": 768, 
    "dropout": 0.1,
}

class BERT_SPC(nn.Module):
    def __init__(self, bert, args):
        super(BERT_SPC, self).__init__()
        self.bert = bert
        self.dropout = nn.Dropout(args["dropout"])
        self.dense = nn.Linear(args["bert_dim"], args["n_output"])

    def forward(self, inputs):
        text_bert_indices, bert_segments_ids = inputs[0], inputs[1]
        _, pooled_output = self.bert(text_bert_indices, token_type_ids=bert_segments_ids)
        pooled_output = self.dropout(pooled_output)
        logits = self.dense(pooled_output)
        return logits


class Model(object):
    def __init__(self, args):
        self.args = args
        bert = transformers.BertModel.from_pretrained(args["pretrained_bert_name"])
        self.model = BERT_SPC(bert, args).to(args["device"])
        self.do_validation = False

    def seed(self, seed):
        """
        设置seed以复现实验结果
        """
        random.seed(seed)
        numpy.random.seed(seed)
        torch.manual_seed(seed)
        torch.cuda.manual_seed(seed)
        torch.cuda.manual_seed_all(seed)
        torch.backends.cudnn.deterministic = True
        torch.backends.cudnn.benchmark = False
        os.environ["PYTHONHASHSEED"] = str(seed)

    def train(self, dataset):
        # 设置optimizer和loss function，使用Dataloader加载训练集
        optimizer = torch.optim.Adam(self.get_trainable_parameters(), lr=self.args["lr"], weight_decay=self.args["l2_coef"])
        loss_fn = torch.nn.CrossEntropyLoss()

        do_validation = False

        trainset = dataset
        # 判断是否进行validation
        if args["valset_ratio"] > 0:
            size = int(len(trainset)*args["valset_ratio"])
            trainset, valset = random_split(trainset, (len(trainset)-size, size))
            do_validation = True
            self.do_validation = True

        train_data = DataLoader(trainset, batch_size=args["batch_size"], shuffle=True)
        if do_validation:
            val_data = DataLoader(valset, batch_size=args["batch_size"], shuffle=False)
            max_val_acc = 0
            max_val_epoch = 0
        
        self.reset()

        # 训练
        path = None
        global_step = 0
        for i_epoch in range(args["n_epoch"]):
            print("=========== Epoch {} ===========".format(i_epoch))
            epoch_correct = 0
            epoch_total = 0
            epoch_loss = 0

            self.model.train()
            for i_batch, batch in enumerate(train_data):
                global_step += 1
                optimizer.zero_grad()

                input = [batch[i].to(args["device"]) for i in range(len(batch)-1)]
                out = self.model(input)
                label = batch[-1].to(args["device"])

                loss = loss_fn(out, label)

                loss.backward()
                optimizer.step()

                epoch_correct += (torch.argmax(out, -1)==label).sum().item()
                epoch_total += len(out)
                epoch_loss += loss.item()*len(out)
                if global_step % 10 == 0:
                    print("batch {}: training loss {:.4f}, training acc {:.4f}".format(global_step, epoch_loss/epoch_total, epoch_correct/epoch_total))
            
            # 每个epoch结束时在验证集上训练，并保存最好的模型参数
            if do_validation:
                val_acc = self.validate(val_data)
                print(">>> Validation acc: {:.4f}".format(val_acc))
                if val_acc > max_val_acc:
                    max_val_acc = val_acc
                    max_val_epoch = i_epoch
                    if not os.path.exists("state_dict"):
                        os.mkdir("state_dict")
                    cur_time = "_".join(time.ctime().split(" ")[-2:])
                    path = "state_dict/seed_{}_time_{}_acc_{}".format(seed, cur_time, round(val_acc, 4))
                    torch.save(self.model.state_dict(), path)
        if not os.path.exists("./state_dict"):
            os.mkdir("./state_dict")
        if not os.path.exists("./state_dict/fully_trained"):
            os.mkdir("state_dict/fully_trained")
        path = "./state_dict/fully_trained/seed_{}_time_{}".format(seed, "_".join(time.ctime().split(" ")[-2:]))
        torch.save(self.model.state_dict(), path)

        return path

    def validate(self, val_data: DataLoader):
        """
        根据参数val_data在验证集上进行准确率验证
        """
        correct, total = 0, 0
        self.model.eval()
        with torch.no_grad():
            for i_batch, batch in enumerate(val_data):
                input = [batch[i].to(args["device"]) for i in range(len(batch)-1)]
                out = self.model(input)
                label = batch[-1].to(args["device"])

                correct += (torch.argmax(out, -1) == label).sum().item()
                total += len(out)

        return correct/total

    def get_trainable_parameters(self):
        """
        获取self.model中可以训练的参数进行fine-tuning
        """
        return filter(lambda x: x.requires_grad, self.model.parameters())

    def reset(self):
        for child in self.model.children():
            if not isinstance(child, transformers.BertModel):  # skip bert params
                for p in child.parameters():
                    if p.requires_grad:
                        if len(p.shape) > 1:
                            torch.nn.init.xavier_uniform_(p)
                        else:
                            stdv = 1. / math.sqrt(p.shape[0])
                            torch.nn.init.uniform_(p, a=-stdv, b=stdv)

    def load(self, path, from_validation=False):
        if from_validation:
            best_acc = 0
            best_path = None
            for name in os.listdir("./state_dict/"):
                path = os.path.join("./state_dict/", name)
                if os.path.isfile(path):
                    acc = float(path.split("_")[-1])
                    if acc > best_acc:
                        best_path = path
                        best_acc = acc
            print("Loading from {}...".format(best_path))
            self.model.load_state_dict(torch.load(best_path))
        else:
            print("Loading from {}...".format(path))
            self.model.load_state_dict(torch.load(path))
    
    def test(self, test_dataset):
        test_data = DataLoader(test_dataset, batch_size=16, shuffle=False)
        with open("./assets/181220014.txt", "w+") as fp:
            for i_batch, batch in enumerate(test_data):
                input = [batch[i].to(self.args["device"]) for i in range(len(batch))]
                out = self.model(input)

                label = (torch.argmax(out, -1)-1).cpu().numpy().tolist()
                label = [str(i)+"\n" for i in label]
                fp.writelines(label)





if __name__ == "__main__":

    print(">>>>>>>>>>>>>>>>>>>>> Loading and converting datasets <<<<<<<<<<<<<<<<<<<<")
    datasets = {
        "train": "./assets/train.txt", 
        "test": "./assets/test.txt", 
    }
    bert_datasets = {}
    tokenizer = Tokenizer(args["max_len"], args["pretrained_bert_name"])
    for type, path in datasets.items():
        bert_datasets[type] = BertDataset(path, tokenizer, type=="test")
    print("Datasets loaded.")
    model = Model(args)
    if args["seed"] != None:
        seed = args["seed"]
        model.seed(args["seed"])
    else:
        model.seed(seed)



    print(">>>>>>>>>>>>>>>>>>>>> Training model <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<")
    model.train(bert_datasets["train"])
    print("Training completed.")



    print(">>>>>>>>>>>>>>>>>>>>> Testing <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<")
    model_name = "seed_628980_time_20:05:55_2020"
    path_to_model = os.path.join("./state_dict/fully_trained/", model_name)
    model.load(path_to_model)
    model.test(bert_datasets["test"])
    print("predictions generated in ./assets/181220014.txt")


    
