import torch
import torch.nn as nn
from torch.nn import functional
from torch.autograd import Variable
from kgdlg.Trainer import Statistics,PriorStatistics
import kgdlg.IO
import torch.nn.functional as F


class NMTLossCompute(nn.Module):
    """
    Standard NMT Loss Computation.
    """
    def __init__(self, generator, tgt_vocab):
        super(NMTLossCompute, self).__init__()
        self.generator = generator
        self.tgt_vocab = tgt_vocab
        self.padding_idx = tgt_vocab.stoi[kgdlg.IO.PAD_WORD]
        weight = torch.ones(len(tgt_vocab))
        weight[self.padding_idx] = 0
        weight[kgdlg.IO.UNK] = 0
        self.criterion = nn.NLLLoss(weight, size_average=False)

    def make_shard_state(self, batch, output):
        """ See base class for args description. """
        return {
            "output": output,
            "target": batch.tgt[1:],
            }   

    def compute_loss(self, batch, output, target):

        scores = self.generator(self.bottle(output))
        target = target.view(-1)
        loss = self.criterion(scores,target)

        loss_data = loss.item()
        stats = self.stats(loss_data, scores, target)
        return  loss, stats

    def compute_train_loss(self, batch, output):
        """
        Compute the loss in shards for efficiency.
        """
        batch_stats = Statistics()
        shard_state = self.make_shard_state(batch, output)
        loss, stats = self.compute_loss(batch, **shard_state)
        loss.div(batch.batch_size).backward()
        batch_stats.update(stats)

        return batch_stats       
        
    def compute_valid_loss(self, batch, output):
        """
        Compute the loss monolithically, not dividing into shards.
        """

        shard_state = self.make_shard_state(batch, output)
        _, batch_stats = self.compute_loss(batch, **shard_state)

        return batch_stats

    def stats(self, loss, scores, target):
        """
        Compute and return a Statistics object.
        Args:
            loss(Tensor): the loss computed by the loss criterion.
            scores(Tensor): a sequence of predict output with scores.
        """
        pred = scores.max(1)[1]
        non_padding = target.ne(self.padding_idx)
        num_correct = pred.eq(target) \
                          .masked_select(non_padding) \
                          .sum()
        return Statistics(loss, non_padding.sum().item(), num_correct.item())

    def bottle(self, v):
        return v.view(-1, v.size(2))

    def unbottle(self, v, batch_size):
        return v.view(-1, batch_size, v.size(1)) 


class PriorLossCompute(nn.Module):
    """
    Standard NMT Loss Computation.
    """
    def __init__(self, num_clusters, tgt_vocab):
        super(PriorLossCompute, self).__init__()
        self.tgt_vocab = tgt_vocab
        self.padding_idx = tgt_vocab.stoi[kgdlg.IO.PAD_WORD]
        weight = torch.ones(len(tgt_vocab))
        weight[self.padding_idx] = 0
        weight[kgdlg.IO.UNK] = 0
        self.word_crt = nn.NLLLoss(weight, size_average=True)
        weight = torch.ones(num_clusters)
        weight[0] = 0
        self.cluster_crt = nn.NLLLoss(size_average=True)
    def compute_loss(self, batch, cluster_logits, word_logits):

        cluster_targets = batch.cluster.transpose(0,1)
        keyword_targets = batch.bow[0].transpose(0,1)


        cluster_logits = cluster_logits.transpose(0,1)
        cluster_logits = cluster_logits.expand(cluster_logits.size(0),cluster_targets.size(1),cluster_logits.size(2))
        word_logits = word_logits.transpose(0,1)
        word_logits = word_logits.expand((word_logits.size(0),keyword_targets.size(1),word_logits.size(2)))

        cluster_logits = self.bottle(cluster_logits.contiguous())
        cluster_logits = F.log_softmax(cluster_logits,-1)
        cluster_targets = cluster_targets.contiguous()
        cluster_targets = cluster_targets.view(-1)
        cluster_loss = self.cluster_crt(cluster_logits,cluster_targets)
        
        word_logits = self.bottle(word_logits.contiguous())
        word_logits = F.log_softmax(word_logits,-1)
        keyword_targets = keyword_targets.contiguous()
        keyword_targets = keyword_targets.view(-1)

        word_loss = self.word_crt(word_logits, keyword_targets)

        loss = cluster_loss+word_loss
        loss.backward()
        cluster_loss_data = cluster_loss.item()
        word_loss_data = word_loss.item()
        stats = PriorStatistics(cluster_loss_data,word_loss_data)
        return  stats

    def bottle(self, v):
        return v.view(-1, v.size(2))

    def unbottle(self, v, batch_size):
        return v.view(-1, batch_size, v.size(1)) 


class JointLossCompute(nn.Module):
    """
    Standard NMT Loss Computation.
    """
    def __init__(self, generator, tgt_vocab):
        super(JointLossCompute, self).__init__()
        self.generator = generator
        self.tgt_vocab = tgt_vocab
        self.padding_idx = tgt_vocab.stoi[kgdlg.IO.PAD_WORD]
        weight = torch.ones(len(tgt_vocab))
        weight[self.padding_idx] = 0
        weight[kgdlg.IO.UNK] = 0
        self.criterion = nn.NLLLoss(weight, size_average=False)
        # self.kld = nn.KLDivLoss(size_average=False)
    def compute_loss(self, batch, output_list, kld_loss, bow_logits):



        tgt_outputs_list = []
        bacth_dict = vars(batch)
        for name in bacth_dict:
            if 'tgt' in name:
                tgt_inputs = bacth_dict[name][0][1:]
                tgt_outputs_list.append(tgt_inputs)

        # 当前设置一个src只对应一个bow

        # bow_target的原来的shape是:[65, 38]
        # bath.bow（和batch_dict['bow]一致）是是个tuple，，第一个[0]tensor_list可能是数据对应的序号，第二个[1]tensor_list可能是对应的长度？
        bow_targets = batch.bow[0]

        bow_targets = batch.bow[0].transpose(0, 1)
        # bow_target经过transpose之后应该为：[38,65]

        # 该环节进行bow_loss的
        bow_logits = bow_logits.transpose(0, 1)
        bow_logits = bow_logits.expand((bow_logits.size(0), bow_targets.size(1), bow_logits.size(2)))

        bow_logits = self.bottle(bow_logits.contiguous())
        bow_logits = F.log_softmax(bow_logits, -1)
        bow_targets = bow_targets.contiguous()
        bow_targets = bow_targets.view(-1)

        bow_loss = self.criterion(bow_logits, bow_targets)

        seq_loss_list = []
        for y_i in range(len(output_list)):
            output = output_list[y_i]
            target = tgt_outputs_list[y_i]

            bottle_out = self.bottle(output)
            scores = self.generator(bottle_out)
            target = target.view(-1)
            seq_loss = self.criterion(scores,target)
            seq_loss_list.append(seq_loss)

        if isinstance(kld_loss, tuple):
            # 在训练模式为202的时候，kld_loss是个tuple
            # 要调整量级
            (kld_loss,diveristy_loss,similarity_error) = kld_loss
            seq_loss = sum(seq_loss_list)

            diveristy_loss = functional.softmax(diveristy_loss)*seq_loss
            similarity_error = functional.softmax(similarity_error)*seq_loss
            bow_loss = functional.softmax(bow_loss)*seq_loss

            seq_loss.backward()
            diveristy_loss.backward()
            similarity_error.backward()
            bow_loss.backward()

            loss = seq_loss+diveristy_loss+similarity_error+bow_loss


        else:
            kld_loss = torch.sum(kld_loss,dim = 1)
            loss = sum(seq_loss_list)+bow_loss+sum(kld_loss)


        # print("当前的loss是多少",loss)

        # print(type(batch.batch_size))
        batch_size = batch.batch_size

        loss.div(batch.batch_size).backward()
        seq_loss_data = seq_loss.item()
        kld_loss = sum(kld_loss).item()
        kld_loss_data = kld_loss/batch.batch_size
        stats = self.stats(seq_loss_data, kld_loss_data, scores, target)

        return  stats

    def neg_compute_loss(self, batch, output_list, kld_loss, bow_logits):

        tgt_outputs_list = []
        bacth_dict = vars(batch)
        for name in bacth_dict:
            if 'tgt' in name:
                tgt_inputs = bacth_dict[name][0][1:]
                tgt_outputs_list.append(tgt_inputs)

        # 当前设置一个src只对应一个bow

        # bow_target的原来的shape是:[65, 38]
        # bath.bow（和batch_dict['bow]一致）是是个tuple，，第一个[0]tensor_list可能是数据对应的序号，第二个[1]tensor_list可能是对应的长度？

        bow_targets = batch.bow[0].transpose(0, 1)
        # bow_target经过transpose之后应该为：[38,65]


        # 该环节进行bow_loss的
        bow_logits = bow_logits.transpose(0, 1)
        bow_logits = bow_logits.expand((bow_logits.size(0), bow_targets.size(1), bow_logits.size(2)))

        bow_logits = self.bottle(bow_logits.contiguous())
        bow_logits = F.log_softmax(bow_logits, -1)
        bow_targets = bow_targets.contiguous()
        bow_targets = bow_targets.view(-1)

        bow_loss = self.criterion(bow_logits, bow_targets)

        seq_loss_list = []
        for y_i in range(len(output_list)):
            output = output_list[y_i]
            target = tgt_outputs_list[y_i]

            bottle_out = self.bottle(output)
            scores = self.generator(bottle_out)
            target = target.view(-1)
            seq_loss = self.criterion(scores,target)
            seq_loss_list.append(seq_loss)


        kld_loss = torch.sum(kld_loss,dim = 1)
        loss = -(sum(seq_loss_list)+bow_loss+sum(kld_loss))


        # print("当前的loss是多少",loss)

        # print(type(batch.batch_size))
        batch_size = batch.batch_size
        # print("batch.batch_size是什么？：",batch_size)
        div_loss = loss.div(batch_size)
        # print("div之后又是什么",div_loss)
        loss.div(batch.batch_size).backward()
        seq_loss_data = seq_loss.item()
        kld_loss = sum(kld_loss).item()
        kld_loss_data = kld_loss/batch.batch_size
        stats = self.stats(seq_loss_data, kld_loss_data, scores, target)

        return  stats


    def stats(self, seq_loss, kld_loss, scores, target):
        """
        Compute and return a Statistics object.
        Args:
            loss(Tensor): the loss computed by the loss criterion.
            scores(Tensor): a sequence of predict output with scores.
        """
        pred = scores.max(1)[1]
        non_padding = target.ne(self.padding_idx)
        num_correct = pred.eq(target) \
                          .masked_select(non_padding) \
                          .sum()
        return Statistics(seq_loss, kld_loss, non_padding.sum().item(), num_correct.item())
    def bottle(self, v):
        return v.view(-1, v.size(2))
    def unbottle(self, v, batch_size):
        return v.view(-1, batch_size, v.size(1)) 