import torch
from torch.cuda.random import manual_seed_all
import torch.optim as optim
import torch.nn.functional as F
from torch.serialization import load
from torch.utils.data import DataLoader
import numpy as np
import os
import random
import copy

import sys
sys.path.append(".")
from utils import WordDictionary, EntityDictionary, NNERDataset, NNERTestDataset, load_data
from models import ModelArgs, BiLSTM

class TrainArg:
    device = "cuda"
    epochs = 60
    val_interval = 100
    lr = 0.0005 # 0.0005
    batch_size = 32

class BiLSTMModel(object):
    """
    使用BiLSTM+多标签分类的方式进行训练、预测
    """
    def __init__(self, d1: WordDictionary, d2:EntityDictionary):
        self.d1 = d1
        self.d2 = d2
        self.device = TrainArg.device
        self.model = BiLSTM(d1, d2).to(self.device)
        self.optimizer = optim.Adam(self.model.parameters(), lr=TrainArg.lr)

        self._best_loss = 1e18
        self._best_model = None

        self.global_step = 0

    def train(self, train_dataset, dev_dataset):
        self.model.train()

        # train_order为对训练集遍历的顺序，每个epoch时都会重新打乱
        train_order = np.arange(0, len(train_dataset), dtype=int)
        data_num = len(train_dataset)
        batch_num = data_num // TrainArg.batch_size + 1

        for i_epoch in range(TrainArg.epochs):
            print("========== Epoch {} ==========".format(i_epoch))
            np.random.shuffle(train_order)

            for B in range(batch_num):

                # 获取句子的id序列和标签序列，并进行padding
                sent_batch, label_batch = zip(*[train_dataset[i] for i in train_order[B:min(B+TrainArg.batch_size, data_num)]])
                sent_tensor, sent_length = self.pad_and_tensorize(sent_batch, self.d1, "int")
                label_tensor, label_length = self.pad_and_tensorize(label_batch, self.d2, "array")

                scores = self.model(sent_tensor, sent_length)

                self.optimizer.zero_grad()
                loss = self.cal_loss(scores, label_tensor, label_length)
                loss.backward()

                self.optimizer.step()

                if self.global_step % TrainArg.val_interval == 0:
                    val_loss = self.validate(dev_dataset)
                    print("Global step {}, val loss {:.6f}".format(self.global_step, val_loss))
                
                self.global_step += 1

    def validate(self, dev_dataset):
        """
        在给定验证集上验证
        """
        data_num = len(dev_dataset)
        batch_num = data_num // TrainArg.batch_size + 1
        total_loss = 0
        self.model.eval()
        with torch.no_grad():
            for B in range(batch_num):
                sent_batch, label_batch = zip(*dev_dataset[B:min(B+TrainArg.batch_size, data_num)])               
                sent_tensor, sent_length = self.pad_and_tensorize(sent_batch, self.d1, "int")
                label_tensor, label_length = self.pad_and_tensorize(label_batch, self.d2, "array")

                scores = self.model(sent_tensor, sent_length)
                loss = self.cal_loss(scores, label_tensor, label_length)
                total_loss += loss.item()*TrainArg.batch_size
        val_loss = total_loss / len(dev_dataset)
        if val_loss < self._best_loss:
            self._best_loss = val_loss
            self._best_model = copy.deepcopy(self.model)
            if not os.path.exists("./state_dicts"):
                os.mkdir("./state_dicts")
            if self.global_step > 100 * TrainArg.val_interval:
                torch.save(self._best_model.state_dict(), "./state_dicts/val_loss_{:.6f}".format(val_loss))
        
        self.model.train()
        return val_loss

    def cal_loss(self, score, label, label_length):
        """
        损失函数定义：预测标签的分布与实际分布的KL散度
        """
        mask = torch.zeros_like(label)
        for i in range(len(label_length)):
            mask[i, :label_length[i], :] = 1
        mask = mask == 1
        flatten_label = label.masked_select(mask).view(-1, len(self.d2))
        flatten_logit = score.masked_select(mask).view(-1, len(self.d2))
        assert flatten_logit.size(0) == flatten_label.size(0)

        flatten_logit = F.log_softmax(flatten_logit, dim=1)
        flatten_label = F.softmax(flatten_label, dim=1)
        klloss = F.kl_div(flatten_logit, flatten_label, reduction="batchmean")

        return klloss

    def test(self, test_dataset, model_path=None):
        """
        在测试集上进行测试，对实体进行预测，将最终结果写入181220014.txt文件
        """
        print(">>>>>>>>>> Testing <<<<<<<<<<")

        # 选择测试所用模型
        toeval = self.model
        if self._best_model != None:
            toeval = self._best_model
        else:
            toeval.load_state_dict(torch.load(model_path))
        
        sentences = []
        tags = []
        with torch.no_grad():
            with open("./assets/181220014.txt", "w") as fp:
                for i_data, sent in enumerate(test_dataset):
                    length = len(sent)
                    sent, _ = self.pad_and_tensorize([sent], self.d1, "int")
                    score = toeval(sent, _)[0]
                    score = torch.softmax(score, dim=1)

                    rank = torch.argsort(score ,descending=True, dim=1).tolist()

                    # 对每个词，获取可能的标签分类，方法如下：
                    # 首先将概率最大的标签加入候选集合，其次如果概率第二大的标签的概率大于阈值0.044,则也加入候选集合
                    candidate = []
                    for i in range(len(score)):
                        candidate.append([])
                        candidate[i].append(rank[i][0])
                        if score[i][rank[i][1]] >= 0.044:
                            candidate[i].append(rank[i][1])

                    candidate_tag = [[self.d2[j] for j in candidate[i]] for i in range(len(candidate))]

                    # 基于候选标签，生成实体
                    buffer = []
                    for i in range(len(candidate_tag)):
                        for j in range(len(candidate_tag[i])):
                            if candidate_tag[i][j].startswith("S#"):
                                buffer.append("%d,%d %s"%(i, i+1, candidate_tag[i][j][2:]))
                            elif candidate_tag[i][j].startswith("B#"):
                                target_tag = candidate_tag[i][j][2:]
                                ok = False
                                l = 1
                                k = i+1
                                while k < len(candidate):
                                    if "M#"+target_tag in candidate_tag[k]:
                                        l += 1
                                        k += 1
                                    elif "E#"+target_tag in candidate_tag[k]:
                                        l += 1 
                                        k += 1
                                        ok = True
                                        break
                                    else:
                                        break
                                if ok and l > 1:
                                    buffer.append("%d,%d %s"%(i, i+l, target_tag))
                    fp.write("|".join(buffer)+"\n")
                            


    def pad_and_tensorize(self, seq, d, dtype):
        """
        由于一个batch内的序列长度往往不用，因此需要padding，并转换为tensor
        """
        length = [len(i) for i in seq]
        longest = max(length)
        PAD = d["[PAD]"]
        if dtype=="float":
            seq = [i+[PAD]*(longest - len(i)) for i in seq]
            seq = torch.FloatTensor(seq).to(self.device)
        elif dtype=="int":
            seq = [i+[PAD]*(longest - len(i)) for i in seq]
            seq = torch.LongTensor(seq).to(self.device)
        elif dtype=="array":
            seq = copy.deepcopy(seq)
            PAD_ARRAY = [0 for i in range(len(d))]
            PAD_ARRAY[PAD] = 1
            for i in range(len(seq)):
                seq[i].extend([PAD_ARRAY[:] for i in range(longest-length[i])])
            seq = torch.FloatTensor(seq).to(self.device)
        return seq, length


def setup_seed(seed):
    torch.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    torch.cuda.manual_seed(seed)
    np.random.seed(seed)
    random.seed(seed)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False
    os.environ["PYTHONHASHSEED"] = str(seed)
                
if __name__ == "__main__":
    setup_seed(20000531)

    # 加载数据
    train_sent, train_ent = load_data("./assets/train.txt", True)
    dev_sent, dev_ent = load_data("./assets/dev.txt", True)
    test_sent, _ = load_data("./assets/test.txt", False)

    # 训练词典
    word_dict = WordDictionary()
    entity_dict = EntityDictionary()
    word_dict.populate(train_sent+dev_sent+test_sent)
    entity_dict.populate(train_ent+dev_ent)

    # 生成数据集
    train_dataset = NNERDataset(train_sent, train_ent, word_dict, entity_dict)
    dev_dataset = NNERDataset(dev_sent, dev_ent, word_dict, entity_dict)
    test_dataset = NNERTestDataset(test_sent, word_dict)

    # 训练、预测
    bilstm_crf = BiLSTMModel(word_dict, entity_dict)
    bilstm_crf.train(train_dataset, dev_dataset)
    bilstm_crf.test(test_dataset)

