import numpy as np
import matplotlib.pyplot as plt
import os
from src import util
import datetime
import math
from tqdm import tqdm
import torch
import torch.autograd as autograd
from torch.autograd import Variable
import torch.nn as nn
import torch.optim as optim
from torchtext.data import Dataset, Field, Example, BucketIterator
from sklearn.model_selection import train_test_split
from src.net.policy_selector import policy_selector
from src.DSNER.trainer_dsner import Trainer_PA_SL_DSNER
from bilstmner import BI_LSTMNER

from src.net.bilstm_crf import BiLSTM_CRF

PROJECT_ROOT = util.get_project_root()

class DSNER_P_R_SL(BI_LSTMNER):

    def __init__(self,model_name, EMBEDDING_DIM, EMBEDDING_filename,optim_sl,):
        BI_LSTMNER.__init__(self,model_name, EMBEDDING_DIM, EMBEDDING_filename)
        self.optim_sl = optim_sl
        self.HIDDEN_DIM = 256
        self.lr = 1e-3
        self.weight_decay = 1e-4

    def build_data(self):
        '''
        构造数据集
        :return:
        '''
        datas = []
        datas_p = []
        sample_x = []
        sample_y = []
        sign = []
        vocabs = {'UNK'}
        for line in open(self.train_p_path, encoding="UTF-8"):
            line = line.rstrip().split('\t')
            if not line:
                continue
            char = line[0]
            if not char:
                continue
            cate = line[-1]
            sample_x.append(char)
            sample_y.append(cate)
            vocabs.add(char)
            if char in ['。', '?', '!', '！', '？']:
                sign.append(0)
                datas.append([sample_x, sample_y, sign])
                datas_p.append([sample_x, sample_y])
                sample_x = []
                sample_y = []
                sign = []

        for line in open(self.train_r_path, encoding="UTF-8"):
            line = line.rstrip().split('\t')
            if not line:
                continue
            char = line[0]
            if not char:
                continue
            cate = line[-1]
            sample_x.append(char)
            sample_y.append(cate)
            vocabs.add(char)
            if char in ['。', '?', '!', '！', '？']:
                sign.append(1)
                datas.append([sample_x, sample_y, sign])
                sample_x = []
                sample_y = []
                sign = []

        if os.path.exists(self.vocab_path):
            word_dict = self.load_worddict()
        else:
            word_dict = {wd: index for index, wd in enumerate(list(vocabs))}
            self.write_file(list(vocabs), self.vocab_path)
        return datas,datas_p, word_dict

    def create_initial_model(self):
        tagger_model = BiLSTM_CRF(word_size=self.VOCAB_SIZE,
                           label_size= self.NUM_CLASSES,
                           word_ebd_dim=self.EMBEDDING_DIM,
                           lstm_layers=2,
                           dropout=0.5,
                           lstm_hsz = self.HIDDEN_DIM,
                           batch_size = self.batch_size,
                           embedding_matrix=self.embedding_matrix,
                           use_cuda=self.use_GPU)

        sl_model = policy_selector(4 * self.HIDDEN_DIM, self.HIDDEN_DIM)

        # 源代码的优化器
        # optimizer_tagger = optim.RMSprop(filter(lambda p: p.requires_grad, tagger_model.parameters()), lr=self.lr,
        #                                  weight_decay=self.weight_decay)

        optimizer_tagger = optim.SGD(tagger_model.parameters(), lr=0.01, weight_decay=1e-4)

        criterion_sl = torch.nn.BCELoss(reduction='none')

        if self.optim_sl == 'adam':
            optimizer_sl = optim.Adam(filter(lambda p: p.requires_grad, sl_model.parameters()), lr=self.lr)
        elif self.optim_sl == 'adagrad':
            optimizer_sl = optim.Adagrad(filter(lambda p: p.requires_grad, sl_model.parameters()), lr=self.lr)
        elif self.optim_sl == 'sgd':
            optimizer_sl = optim.SGD(filter(lambda p: p.requires_grad, sl_model.parameters()), lr=self.lr,
                                     weight_decay=self.weight_decay)

        # if partial = False it will apply only selection with normal crf
        trainer = Trainer_PA_SL_DSNER(tagger_model, sl_model, optimizer_tagger, optimizer_sl, criterion_sl,
                                      partial=True)


        return trainer


    def load_model(self, model_file):
        '''
        加载模型
        :param model_file: 模型文件
        :return: 网络模型
        '''
        if model_file is None:
            checkpoint_dir = self.model_root
            if (not os.path.exists(checkpoint_dir)):
                os.makedirs(checkpoint_dir)

            model_file = util.latest_checkpoint(checkpoint_dir)

        if model_file is not None:
            print("loading >>> ", model_file, " ...")
            load_object = torch.load(model_file)
            if isinstance(load_object, dict):
                model = self.create_initial_model()
                model.tagger_model.load_state_dict(torch.load(model_file))
            else:
                model = load_object
        else:
            model = self.create_initial_model()
        return model

    def create_single_dataset(self, datas, batch_size):
        """
        不切分训练集和测试集
        :param datas:
        :param batch_size:
        :return:
        """
        TEXT = Field(sequential=True, use_vocab=False, batch_first=True, fix_length=None,
                     eos_token=None, init_token=None, include_lengths=True, pad_token=0)
        LABEL = Field(sequential=True, use_vocab=False, batch_first=True, fix_length=None,
                      eos_token=None, init_token=None, include_lengths=False, pad_token=0, is_target=True)

        fields = [
            ("label", LABEL), ("text", TEXT)]

        examples = []
        if len(datas[0]) == 2:
            for text, label in datas:
                x_token = self.x_tokenize(text)
                y_token = self.y_tokenize(label)
                new_example = Example.fromlist([y_token, x_token], fields)
                examples.append(new_example)
        elif len(datas[0]) == 3:
            for text, label, sign in datas:
                x_token = self.x_tokenize(text)
                y_token = self.y_tokenize(label)
                new_example = Example.fromlist([y_token, x_token], fields)
                examples.append(new_example)
        else:
            print('error')

        data = Dataset(examples, fields)

        data_iter = BucketIterator(data,
                                   batch_size=batch_size,
                                   device=self.device,
                                   sort_key=lambda x: len(x.text),  # field sorted by len
                                   sort_within_batch=True,
                                   repeat=False)

        return data_iter, len(examples)

    def list_difference(self,list_1,list_2):
        """
        做两个数组的差集
        :param list_1:
        :param list_2:
        :return:
        """
        dif_list = []
        for i in list_1:
            if i in list_2:
                continue
            else:
                dif_list.append(i)
        return dif_list

    def train_DSNER_P_R_model(self, train_p_file, train_r_file, batch_size, epochs=10):
        '''
        训练模型
        :param epochs: 训练的Epoch次数
        :return:
        '''
        self.batch_size = batch_size
        self.train_p_path = PROJECT_ROOT + '//data//' + train_p_file
        self.train_r_path = PROJECT_ROOT + '//data//' + train_r_file

        dataset, dataset_p, self.word_dict = self.build_data()
        self.VOCAB_SIZE = len(self.word_dict)
        data_p_train, data_p_test = train_test_split(dataset_p, test_size=0.1, random_state=12)
        data_train = self.list_difference(dataset,data_p_test)
        test_p_iter, test_p_len = self.create_single_dataset(data_p_test,self.batch_size)

        # word_to_ix
        self.embedding_matrix = self.build_embedding_matrix()


        model = self.load_model(model_file=None)
        self.epoch = epochs
        if self.use_GPU:
            model.tagger_model.to(self.device)
            model.sl_model.to(self.device)
        for epoch in range(epochs):

            PA_num_in_batch = 0  # select_pa_sample_num in this sample (number of PAs)
            y_label_in_batch = []  # all_pa_data action (0/1)
            PA_sentense_representation = []  # representation of every PA instance

            datas = []

            indices = torch.randperm(len(data_train))
            indices_p = torch.randperm(len(data_p_train))

            if epoch < 10:
                for start_index in tqdm(range(math.floor(len(data_p_train))),
                                    desc="Data_P Training Epoch {}/{}".format(epoch + 1, 10)):
                    s_length = []
                    sent = data_p_train[indices_p[start_index]][0]
                    token_sent = self.x_tokenize(sent)
                    token_sent = np.array([token_sent])
                    token_sent = torch.cuda.LongTensor(token_sent)
                    tags = data_p_train[indices_p[start_index]][1]
                    token_tags = self.y_tokenize(tags)
                    token_tags = torch.cuda.LongTensor(token_tags)
                    s_length.append(len(sent))
                    s_length = torch.cuda.LongTensor(s_length)
                    datas.append([sent, tags])

                    y_label_in_batch.append(1)
                    this_representation = model.get_representation(token_sent, token_tags, s_length)
                    PA_sentense_representation.append(this_representation)

                train_iter, train_len = self.create_single_dataset(datas, self.batch_size)

                with tqdm(train_iter, desc="SL training") as tq:
                    for step, batch in enumerate(tq):
                        word = batch.text[0]
                        label = batch.label
                        seq_lengths = batch.text[1]

                        '''
                        optimize the selector:
                        1. count reward: add all p(y|x) of dev_dataset and average
                        2. input1: average_reward
                        3. input2: all PA_sample in this step(0 or 1)
                        '''

                        # calculate reward as r=1/(|A_i| +|H_i|) *(sum(log p(z|x)) + sum(log p(y|x)) just for EXperts and PA that selector choose
                        reward = model.get_reward(word, seq_lengths, label)
                        reward_list = [reward for i in range(len(y_label_in_batch))]
                        # @todo convert to cuda:
                        model.optimize_selector(PA_sentense_representation, y_label_in_batch, reward_list)

            else:

                for start_index in tqdm(range(math.ceil(len(data_train))),
                                        desc="PA select Epoch {}/{}".format(epoch + 1, 20)):

                    s_length = []
                    sent = data_train[indices[start_index]][0]
                    token_sent = self.x_tokenize(sent)
                    token_sent = np.array([token_sent])
                    token_sent = torch.cuda.LongTensor(token_sent)
                    tags = data_train[indices[start_index]][1]
                    token_tags = self.y_tokenize(tags)
                    token_tags = torch.cuda.LongTensor(token_tags)
                    s_length.append(len(sent))
                    s_length = torch.cuda.LongTensor(s_length)
                    sign = data_train[indices[start_index]][2]

                    if sign[0] == 0:  # if it is the expert instance
                        datas.append([sent, tags])

                    elif sign[0] == 1:  # the PA instance
                        this_representation = model.get_representation(token_sent, token_tags, s_length)
                        PA_sentense_representation.append(this_representation)
                        action_point = model.select_action(
                            this_representation)  # Get the probablity fro selector for the sentence
                        if action_point > 0.5:
                            datas.append([sent, tags])
                            PA_num_in_batch += 1
                            y_label_in_batch.append(1)
                        else:
                            y_label_in_batch.append(0)

                train_iter, train_len = self.create_single_dataset(datas, self.batch_size)

                if len(y_label_in_batch) > 0:
                    with tqdm(train_iter, desc="SL training") as tq:
                        for step, batch in enumerate(tq):
                            word = batch.text[0]
                            label = batch.label
                            seq_lengths = batch.text[1]

                            '''
                            optimize the selector:
                            1. count reward: add all p(y|x) of dev_dataset and average
                            2. input1: average_reward
                            3. input2: all PA_sample in this step(0 or 1)
                            '''

                            # calculate reward as r=1/(|A_i| +|H_i|) *(sum(log p(z|x)) + sum(log p(y|x)) just for EXperts and PA that selector choose
                            reward = model.get_reward(word, seq_lengths, label)
                            reward_list = [reward for i in range(len(y_label_in_batch))]
                            # @todo convert to cuda:
                            model.optimize_selector(PA_sentense_representation, y_label_in_batch, reward_list)


            scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(model.optimizer_tagger, mode='min',
                                                                   factor=0.1)

            # print('Epoch {}/{}'.format(epoch + 1, epochs))
            # print('-' * 80)
            model.tagger_model.train()
            # 开始训练
            total_loss = 0
            with tqdm(train_iter, desc="tagger_model training in Epoch {}/{}".format(epoch + 1, epochs)) as tq:
                for step, batch in enumerate(tq):
                    word = batch.text[0]
                    label = batch.label
                    # label_one_hot = torch.zeros(batch_size,self.NUM_CLASSES).scatter_(1,label,1)
                    seq_lengths = batch.text[1]

                    model.optimizer_tagger.zero_grad()
                    loss, _ = model.tagger_model(word, label, seq_lengths, use_decode=False)
                    loss.backward()
                    model.optimizer_tagger.step()

                    running_loss = loss.item()
                    total_loss += running_loss

                    if step % 3 == 0:
                        tq.set_postfix(Loss=running_loss)

            scheduler.step(total_loss)

            running_loss = 0.0
            running_corrects = 0
            epoch_count = 0
            model.tagger_model.eval()
            # 开始评估
            for step, batch in enumerate(test_p_iter):
                word = batch.text[0]
                label = batch.label
                seq_lengths = batch.text[1]
                """
                在训练时未实现源代码的损失函数
                """
                loss, preds = model.tagger_model(word, label, seq_lengths, use_decode=True)

                running_loss += loss.item() * word.size(0)
                running_corrects += torch.sum(preds == label)
                epoch_count += label.size(0) * label.size(1)  # 补0也被计算在内了

            epoch_loss = running_loss / test_p_len
            epoch_acc = running_corrects.double() / epoch_count
            print('Epoch {}/{}, on test set loss = {:.4f}, accu = {:.4f} \n'.format(epoch + 1, epochs, epoch_loss,
                                                                                    epoch_acc))

            torch.save(model.tagger_model.state_dict(),
                       self.model_root + "/{}_{}_cp-{:04d}-{:.4f}-{:.4f}.pth".format(
                           self.model_name, self.HIDDEN_DIM, epoch + 1, epoch_loss, epoch_acc),
                       )


