from tqdm import tqdm
import torch
import math
import numpy as np

#device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
device = torch.device("cuda:0")

class Trainer_PA_SL_DSNER(object):

    def __init__(self, tagger_mdl, sl_mdl, optimizer_tagger, optimizer_sl, criterion_sl, partial):

        self.tagger_model = tagger_mdl
        self.sl_model = sl_mdl
        self.criterion_sl = criterion_sl
        self.batch_size = 50

        self.class_dict = {
            'O': 0,
            'TREATMENT-I': 1,
            'TREATMENT-B': 2,
            'BODY-B': 3,
            'BODY-I': 4,
            'SIGNS-I': 5,
            'SIGNS-B': 6,
            'CHECK-B': 7,
            'CHECK-I': 8,
            'DISEASE-I': 9,
            'DISEASE-B': 10,
            'INFER-B': 11,
            'INFER-I': 12,
            'SECTION-B': 13,
            'SECTION-I': 14,
            'RESULT-B': 15,
            'RESULT-I': 16,
            'START': 17,
            'STOP': 18
        }
        self.tagset_size = len(self.class_dict)
        self.o_tag = self.class_dict['O']


        '''
        it need to define two optimizer:
            1- for selector
            2- for encoder+crf

        first, calculate the reward from encoder+crf
        then,  calculate loss for the selector
        and,  add reward to the loss and optimize the select parameters 
        for name, param in self.model.named_parameters():
                    print (f'{name}: {param.requires_grad}') 
        '''
        #

        self.optimizer_tagger = optimizer_tagger
        self.optimizer_sl = optimizer_sl
        self.partial = partial


    def get_reward(self, x_words, lengths, y_tags):
        self.tagger_model.eval()
        with torch.no_grad():
            batch_loss,_ = self.tagger_model(x_words, y_tags, lengths)
            reward = -1 * (batch_loss / self.batch_size)
        return reward

    def select_action(self, state):
        # Get the probablity fro selector for the sentence
        # state = torch.from_numpy(state).float().unsqueeze(0)
        self.sl_model.eval()
        prob = self.sl_model(state)

        return prob.item()

    def optimize_selector(self, x_representations, y_select, rewards):
        """
        选择器训练过程
        :param x_representations: 数据经过LSTM网络后的特征列表
        :param y_select: action列表
        :param rewards: reward列表
        :return:
        """

        self.sl_model.train()
        self.optimizer_sl.zero_grad()
        #eps = np.finfo(np.float32).eps.item()

        x_representations = torch.stack(x_representations)
        y_select = torch.cuda.FloatTensor(y_select)

        y_preds = self.sl_model(x_representations)
        y_preds = y_preds.squeeze(2).squeeze(1)

        neg_log_prob = self.criterion_sl(y_preds, y_select)
        rewards = torch.cuda.FloatTensor(rewards)
        policy_loss = torch.sum(neg_log_prob * rewards)
        #policy_loss = neg_log_prob * rewards

        lambda1, lambda2 = 0.003, 0.003
        all_linear1_params = torch.cat([x.view(-1) for x in self.sl_model.affine1.parameters()])
        all_linear2_params = torch.cat([x.view(-1) for x in self.sl_model.affine1.parameters()])
        l1_regularization = lambda1 * torch.norm(all_linear1_params, 1)
        l2_regularization = lambda2 * torch.norm(all_linear2_params, 2)
        policy_loss += l1_regularization + l2_regularization
        #torch.autograd.backward(policy_loss,grad_tensors=torch.ones_like(policy_loss))
        policy_loss.backward()
        self.optimizer_sl.step()

    def get_representation(self, X_char, y_char, length):
        # concat sample_representation and tag_representation
        # Get representation from BiLSTM for the current sentence
        with torch.no_grad():

            # lstm
            output,_ = self.tagger_model.bilstm(X_char,length)

            w_last_node = output[:, length - 1, :]

            this_representation_o_t = self.tagger_model.logistic(output)


            # get the lable score ( if the label is unk get averge of all label scores
            y_char = y_char % self.tagset_size
            tag_scores = []

            # @ todo cuda
            for i, y in enumerate(y_char):
                if y == self.o_tag and self.partial == True:
                    tag_scores.append(torch.mean(this_representation_o_t[:, i, :]))
                else:
                    tag_scores.append(this_representation_o_t[:, i, y].squeeze(0))
            tag_scores = torch.stack(tag_scores).unsqueeze(0)
            w_last_node = w_last_node.squeeze(0)

            rep = torch.cat([w_last_node, tag_scores], dim=-1)

            """
            与源代码不同
            因句子不定长，将得到的特征pad成与选择器输入的size（1024）一样长
            """
            rep = np.pad(rep.cpu(),((0,0),(0,1024-len(rep[0]))),'constant')
            rep = torch.from_numpy(rep).cuda()

        return rep
