import torch
import torch.optim as optim
from torch.serialization import load
from torch.utils.data import DataLoader
import numpy as np
import os
import random
import copy
import sys

sys.path.append(".")
from utils import NNERTestDataset, WordDictionary, EntityDictionary, NNERDataset, load_data
from models import BiLSTM_CRF

class TrainArg:
    device="cuda"
    epochs = 70
    val_interval = 100
    lr = 0.0001
    batch_size = 32

class BiLSTM_CRFModel(object):
    def __init__(self, d1:WordDictionary, d2:EntityDictionary):
        self.d1 = d1
        self.d2 = d2
        self.device = TrainArg.device
        self.model = BiLSTM_CRF(d1, d2).to(self.device)
        self.optimizer = optim.Adam(self.model.parameters(), lr=TrainArg.lr)

        self._best_loss = 1e18
        self._best_model = None

        self.global_step = 0

    def train(self, train_dataset, dev_dataset):
        self.model.train()

        # train_order为对训练集遍历的顺序，每个epoch时都会重新打乱
        train_order = np.arange(0, len(train_dataset), dtype=int)
        data_num = len(train_dataset)
        batch_num = data_num // TrainArg.batch_size + 1

        for i_epoch in range(TrainArg.epochs):
            print("========== Epoch {} ==========".format(i_epoch))
            np.random.shuffle(train_order)

            for B in range(batch_num):
                self.global_step += 1

                # 获取句子的id序列和标签序列，并进行padding
                sent_batch, label_batch = zip(*[train_dataset[i] for i in train_order[B:min(B+TrainArg.batch_size, data_num)]])
                sent_tensor, sent_length = self.pad_and_tensorize(sent_batch, self.d1, "int")
                label_tensor, label_length = self.pad_and_tensorize(label_batch, self.d2, "int")

                scores = self.model(sent_tensor, sent_length)

                self.optimizer.zero_grad()
                loss = self.cal_loss(scores, label_tensor, label_length).to(self.device)
                loss.backward()

                self.optimizer.step()

                if self.global_step % TrainArg.val_interval == 0:
                    val_loss = self.validate(dev_dataset)
                    print("Global step {}, val loss {:.4f}".format(self.global_step, val_loss))

    def validate(self, dev_dataset):
        """
        在给定的验证集上验证
        """
        data_num = len(dev_dataset)
        batch_num = data_num // TrainArg.batch_size + 1
        total_loss = 0
        self.model.eval()
        with torch.no_grad():
            for B in range(batch_num):
                sent_batch, label_batch = zip(*dev_dataset[B:min(B+TrainArg.batch_size, data_num)])
                sent_tensor, sent_length = self.pad_and_tensorize(sent_batch, self.d1, "int")
                label_tensor, label_length = self.pad_and_tensorize(label_batch, self.d2, "int")

                scores = self.model(sent_tensor, sent_length)
                loss = self.cal_loss(scores, label_tensor, label_length)
                total_loss += loss.item()*TrainArg.batch_size
        val_loss = total_loss / len(dev_dataset)

        if val_loss < self._best_loss:
            self._best_loss = val_loss
            self._best_model = copy.deepcopy(self.model)
            if not os.path.exists("./state_dicts"):
                os.mkdir("./state_dicts")
            if self.global_step > 100*TrainArg.val_interval:
                torch.save(self._best_model.state_dict(), "./state_dicts/val_loss_{:.4f}".format(val_loss))

        self.model.train()
        return val_loss

    def test(self, test_dataset, model_path=None):
        """
        在测试集上进行测试，对实体进行预测，将最终结果写入181220014.txt文件
        """
        print(">>>>>>>>>> Testing <<<<<<<<<<")

        # 选择测试所用模型
        toeval = self.model
        if self._best_model != None:
            toeval = self._best_model
        else:
            toeval.load_state_dict(torch.load(model_path))
        toeval.eval()

        sentences = []
        tags = []
        with torch.no_grad():
            for i_data, sent in enumerate(test_dataset):
                length = len(sent)
                sent, _ = self.pad_and_tensorize([sent], self.d1, "int")
                label = toeval.test(sent)
                tag = [self.d2[i] for i in label]
                sentence = [self.d1[i] for i in sent[0].cpu().tolist()][:-1]
                sentences.append(sentences)
                tags.append(tag)
        
        with open("./assets/181220014.txt", "w") as fp:
            for tag in tags:
                buffer = []
                i = 0
                while i < len(tag):
                    if tag[i] == "O" or tag[i] == "[UNK]" or tag[i] == "[PAD]":
                        i+=1
                    else:
                        begin=i
                        while i + 1 < len(tag) and tag[i][1:] == tag[i+1][1:]:\
                            i += 1
                        buffer.append("%d,%d %s" %(begin, i+1, tag[i][2:]))
                        i += 1
                fp.write("|".join(buffer) + "\n")

                    
    def pad_and_tensorize(self, seq, d, dtype):
        """
        由于一个batch内的序列长度往往不用，因此需要padding，并转换为tensor
        """
        length = [len(i) for i in seq]
        longest = max(length)
        PAD = d["[PAD]"]
        seq = [i+[PAD]*(longest - len(i)) for i in seq]
        if dtype=="float":
            seq = torch.FloatTensor(seq).to(self.device)
        elif dtype=="int":
            seq = torch.LongTensor(seq).to(self.device)
        return seq, length
                

    def cal_loss(self, crf_score, label, label_length):
        """
        计算BiLSTM-CRF算法的loss，详情见原论文和实验报告中的参考文档
        """
        PAD = self.d2["[PAD]"]
        BEG = self.d2["[BEG]"]
        END = self.d2["[END]"]

        mask = (label != PAD)
        batch_size, max_len = label.size()
        target_size = len(self.d2)
        label = self.indexed(label) # label原本是[B*MaxL]，经过indexed后中间的每个元素i满足使用label size作为模进行相加

        # 计算label所对应的标签得分
        flatten_label = label.masked_select(mask) # 选中所有mask为True的数据，展平为一维[B*L']数组
        flatten_score = crf_score.masked_select(
            mask.view(batch_size, max_len, 1, 1).expand_as(crf_score) # 将mask扩展成和crf score一样的形状，对srf进行选择，相当于去除掉padding
        ).view(-1, target_size**2).contiguous()
        label_score = flatten_score.gather(dim=1, index=flatten_label.unsqueeze(1)).sum()
        
        # 计算所有可能的label的总分
        tmp = torch.zeros(batch_size, target_size).to(self.device)
        for t in range(max_len):
            legal_sample_mask = [i>t for i in label_length]
            if t == 0:
                tmp[legal_sample_mask] = crf_score[legal_sample_mask, t, BEG, :]
            else:
                tmp[legal_sample_mask] = torch.logsumexp(
                    crf_score[legal_sample_mask, t, :, :] + tmp[legal_sample_mask].unsqueeze(2), dim=1
                )
        total_score = tmp[:, END].sum()

        loss = (-label_score+total_score)/batch_size

        return loss

    def indexed(self, label):
        for col in range(label.size()[1]-1, 0, -1):
            label[:, col] += label[:, col-1]*len(self.d2)
        label[:, 0] += len(self.d2)*self.d2["[BEG]"]
        return label



def setup_seed(seed):
     torch.manual_seed(seed)
     torch.cuda.manual_seed_all(seed)
     np.random.seed(seed)
     random.seed(seed)
     torch.backends.cudnn.deterministic = True

if __name__ == "__main__":
    setup_seed(181220014)

    # 加载数据
    train_sent, train_ent = load_data("./assets/train.txt", True)
    dev_sent, dev_ent = load_data("./assets/dev.txt", True)
    test_sent, _ = load_data("./assets/test.txt", False)

    # 训练词典
    word_dict = WordDictionary()
    entity_dict = EntityDictionary()
    word_dict.populate(train_sent+dev_sent+test_sent)
    entity_dict.populate(train_ent+dev_ent)

    # 生成数据集
    train_dataset = NNERDataset(train_sent, train_ent, word_dict, entity_dict)
    dev_dataset = NNERDataset(dev_sent, dev_ent, word_dict, entity_dict)
    test_dataset = NNERTestDataset(test_sent, word_dict)

    # 训练、预测
    bilstm_crf = BiLSTM_CRFModel(word_dict, entity_dict)
    bilstm_crf.train(train_dataset, dev_dataset)
    bilstm_crf.test(test_dataset)

