import heapq
import random

import torch
import torch.nn as nn
import torch.nn.functional as f
import chatbot.config as config
from chatbot.chatbot_core.attention import Attention


class Beam:
    def __init__(self):
        self.heap = list()  # 数据池
        self.beam_width = config.beam_width  # 能保存的数据总量

    def add(self, probability, complete, seq, decoder_input, decoder_hidden):
        """
        添加数据，同时判断总的数据个数，超出就删除
        :param probability: 概率乘积
        :param complete: 完成标志，判断最后一个是否为EOS
        :param seq: 所有token的列表
        :param decoder_input: 下一次进行解码的输入，通过前一次获得
        :param decoder_hidden: 下一次进行解码的hidden，通过前一次获得
        :return:
        """
        heapq.heappush(self.heap, [probability, complete, seq, decoder_input, decoder_hidden])
        # 超出就弹出一个，保证保存的最可能数值不大于特定个数
        if len(self.heap) > self.beam_width:
            heapq.heappop(self.heap)

    def __iter__(self):
        return iter(self.heap)


class Decoder(nn.Module):
    def __init__(self):
        super(Decoder, self).__init__()
        self.embedding = nn.Embedding(num_embeddings=len(config.chatbot_ws_target),
                                      embedding_dim=config.chatbot_embedding_dim,
                                      padding_idx=config.chatbot_ws_target.PAD)
        self.gru = nn.GRU(input_size=config.chatbot_embedding_dim, batch_first=True,
                          num_layers=config.chatbot_decoder_num_layers,
                          hidden_size=config.chatbot_decoder_hidden_size)
        self.fc = nn.Linear(config.chatbot_decoder_hidden_size, len(config.chatbot_ws_target))
        self.attn = Attention()
        self.wa = nn.Linear(config.chatbot_encoder_hidden_size + config.chatbot_decoder_hidden_size,
                            config.chatbot_decoder_hidden_size, bias=False)
        self.va = nn.Linear(config.chatbot_decoder_hidden_size, 1)

    def forward(self, target, encoder_hidden, encoder_outputs):
        # 获取encoder的输出作为decoder的第一次的hidden state
        decoder_hidden = encoder_hidden
        # 准备decoder的第一个时间步输入，[batch_size,1] sos作为输入
        batch_size = target.size(0)
        decoder_input = torch.LongTensor(
            torch.ones([batch_size, 1], dtype=torch.int64) * config.chatbot_ws_target.SOS).to(
            config.device)
        # 在第一个时间步进行计算，得到第一个时间步的输出和hidden state
        # 把前一个输出进行计算，得到第一个最后的输出结果
        # 把前一次的输出和hidden state作为当前时间步输入和hidden state的输入

        # 保存预测结果
        decoder_outputs = torch.zeros(
            [batch_size, config.chatbot_target_max_len + 1, len(config.chatbot_ws_target)]).to(config.device)

        if random.random() > config.chatbot_teacher_forcing_ratio:
            for t in range(config.chatbot_target_max_len + 1):
                decoder_output_t, decoder_hidden = self.forward_step(decoder_input, decoder_hidden, encoder_outputs)
                decoder_outputs[:, t, :] = decoder_output_t  # decoder_outputs中第二个维度正好是t
                decoder_input = target[:, t].unsqueeze(-1)  # 最后加一个维度
        else:
            for t in range(config.chatbot_target_max_len + 1):
                decoder_output_t, decoder_hidden = self.forward_step(decoder_input, decoder_hidden, encoder_outputs)
                decoder_outputs[:, t, :] = decoder_output_t  # decoder_outputs中第二个维度正好是t
                value, index = torch.topk(decoder_output_t, 1)  # 获取第一个维度上的最大值
                decoder_input = index
        return decoder_outputs, decoder_hidden

    def forward_step(self, decoder_input, decoder_hidden, encoder_outputs):
        decoder_input_embedded = self.embedding(decoder_input)  # [batch_size,1]->[batch_size,1,embedding_dim]
        out, decoder_hidden = self.gru(decoder_input_embedded,
                                       decoder_hidden)  # [batch_size,1,hidden_size],[1,batch_size,hidden_size]
        # 使用attention
        attention_weight = self.attn(decoder_hidden, encoder_outputs).unsqueeze(1)  # [batch_size,1,seq_len]
        context_vector = attention_weight.bmm(encoder_outputs)  # [batch_size,1,hidden_size]
        concated = torch.cat([out, context_vector], dim=-1).squeeze(
            1)  # [batch_size,1,encoder_hidden_size+decoder_hidden_size]
        out = torch.tanh(self.wa(concated))

        # self.wa处理之后就不需要out.squeeze(1)了，已经去掉了
        # out = out.squeeze(1)  # [1,batch_size,hidden_size]->[batch_size,hidden_size]，就是把第一维度去掉了
        output = f.log_softmax(self.fc(out), dim=-1)  # [batch_size,vocab_size]
        return output, decoder_hidden

    def evaluate(self, encoder_hidden, encoder_outputs):
        """
        评估
        :param encoder_outputs:
        :param encoder_hidden:
        :return:
        """
        decoder_hidden = encoder_hidden
        batch_size = encoder_hidden.size(1)
        decoder_input = torch.LongTensor(
            torch.ones([batch_size, 1], dtype=torch.int64) * config.chatbot_ws_target.SOS).to(
            config.device)
        indices = []
        for i in range(config.chatbot_target_max_len + 5):
            # 一步一判断
            decoder_output_t, decoder_hidden = self.forward_step(decoder_input, decoder_hidden, encoder_outputs)
            value, index = torch.topk(decoder_output_t, 1)
            decoder_input = index
            # # 碰到结束符就结束了，说明此时输入的一个结果值到结尾了
            # if index.item() == config.num_sequence.EOS:
            #     break
            indices.append(index.squeeze(-1).cpu().detach().numpy())
        return indices

    def prepar_seq(self, seq):
        if seq[0].item() == config.chatbot_ws_target.SOS:
            seq = seq[1:]
        if seq[-1].item() == config.chatbot_ws_target.EOS:
            seq = seq[:-1]
        seq = [i.item() for i in seq]
        return seq

    # 使用堆来完成beam search，是一种优先级的队列，安装优先级顺序存取数据
    def evaluate_beamsearch_heapq(self, encoder_hidden, encoder_outputs):
        batch_size = encoder_hidden.size(1)
        # 构造第一次需要的数据，放在堆中
        decoder_input = torch.LongTensor([[config.chatbot_ws_target.SOS] * batch_size]).to(config.device)
        decoder_hidden = encoder_hidden

        prev_beam = Beam()
        prev_beam.add(1, False, [decoder_input], decoder_input, decoder_hidden)
        while True:
            cur_beam = Beam()
            # 取出堆中数据，进行forwar_step操作，获取当前时间步的output、hidden
            for prev_probability, prev_complete, prev_seq, prev_decoder_input, prev_decoder_hidden in prev_beam:
                # 判断前一次complete是否为True，如果是，就不需要forward
                if prev_complete:
                    cur_beam.add(prev_probability, prev_complete, prev_seq, prev_decoder_input, prev_decoder_hidden)
                else:
                    decoder_output_t, decoder_hidden = self.forward_step(prev_decoder_input, prev_decoder_hidden,
                                                                         encoder_outputs)
                    # 从topk中取出取出多个最优值
                    value, index = torch.topk(decoder_output_t, config.beam_width)
                    for m, n in zip(value[0], index[0]):
                        decoder_input = torch.LongTensor([[n]]).to(config.device)
                        seq = prev_seq + [n]
                        probability = prev_probability * m
                        if n.item() == config.chatbot_ws_target.SOS:
                            complete = True
                        else:
                            complete = False
                        # 保存
                        cur_beam.add(probability, complete, seq, decoder_input, decoder_hidden)
            best_probability, best_complete, best_seq, best_decoder_input, best_decoder_hidden = max(cur_beam)
            if best_complete or len(best_seq) - 1 == config.chatbot_target_max_len + 1:
                return self.prepar_seq(best_seq)
            else:
                # 继续遍历，直到出现最优值或者语句结束
                prev_beam = cur_beam
