"""
解码器
"""
import torch.nn as nn
import torch
import random
import heapq

import config
from chatbot.attention import Attention


class Decoder(nn.Module):
    def __init__(self):
        super(Decoder, self).__init__()
        self.embedding = nn.Embedding(num_embeddings=len(config.chatbot_ws_target_by_word_model),
                                      embedding_dim=config.chatbot_embedding_dim,
                                      padding_idx=config.chatbot_ws_target_by_word_model.PAD)
        self.gru = nn.GRU(input_size=config.chatbot_embedding_dim,
                          hidden_size=config.chatbot_decoder_hidden_size,
                          num_layers=config.chatbot_decoder_num_layers,
                          batch_first=True)
        self.attention = Attention(method="general")
        self.Wc = nn.Linear(config.chatbot_decoder_hidden_size * 2,
                            config.chatbot_decoder_hidden_size)
        self.fc = nn.Linear(in_features=config.chatbot_decoder_hidden_size,
                            out_features=len(config.chatbot_ws_target_by_word_model))
        pass

    def forward(self, target, encoder_hidden, encoder_ouptput):
        # target: [batch_size, max_len+1]
        # encoder_hidden: [ 1, batch_size, hidden_szie]

        # 1. 获取解码器隐藏层的输入
        # decoder_hidden: [1, batch_size, hidden_size]
        decoder_hidden = encoder_hidden

        # 2. 获取batch_size
        batch_size = target.size(0)

        # 3. 创建解码器第一个时间步的输入
        # decoder_input: [batch_size, 1]
        decoder_input = torch.LongTensor([[config.chatbot_ws_target_by_word_model.SOS]] * batch_size)
        decoder_input = decoder_input.to(config.device)

        # 4. 获得每个时间步的输出
        # decoder_outputs: [batch_size, max_len+1, vocab_size]
        decoder_outputs = torch.zeros(size=[batch_size, config.chatbot_max_len + 1,
                                            len(config.chatbot_ws_target_by_word_model)])
        decoder_outputs = decoder_outputs.to(config.device)

        if random.random() > config.chatbot_teacher_forcing_rate:
            # 使用teacher forcing机制，加速模型收敛速度
            for t in range(config.chatbot_max_len + 1):
                # decoder_output_t: [batch_size, vocab_size]
                # decoder_hidden: [1, batch_size, hidden_size]
                decoder_output_t, decoder_hidden = self.forward_step(decoder_input,
                                                                     decoder_hidden,
                                                                     encoder_ouptput)
                # decoder_input: [batch_size, 1]
                decoder_input = target[:, t].unsqueeze(-1)
                decoder_outputs[:, t, :] = decoder_output_t
        else:  # 不适用teacher forcing
            for t in range(config.chatbot_max_len + 1):
                # decoder_output_t: [batch_size, vocab_size]
                # decoder_hidden: [1, batch_size, hidden_size]
                decoder_output_t, decoder_hidden = self.forward_step(decoder_input,
                                                                     decoder_hidden,
                                                                     encoder_ouptput)

                # decodre_input: [batch_size, 1]
                decoder_input = torch.topk(decoder_output_t, 1)[1]

                decoder_outputs[:, t, :] = decoder_output_t

        # 5. 返回结果
        # decoder_outputs: [batch_size, max_len+1, vocab_size]
        return decoder_outputs

    def forward_step(self, decoder_input, decoder_hidden, encoder_output):
        # decoder_input: [batch_size, 1]
        # decoder_hidden: [1, batch_size, hidden_size]
        # encoder_output: [batch_size, seq_len, hidden_size]

        # 1. Embedding
        # decoder_input: [batch_size, 1, embedding_dim]
        decoder_input = self.embedding(decoder_input)

        # 2. GRU
        # decoder_output: [batch_size, 1, hidden_size]
        # decoder_hidden: [1, batch_size, hidden_size]
        decoder_output, decoder_hidden = self.gru(decoder_input, decoder_hidden)

        # 3. 加入attenton机制
        # attention_weights: [batch_size, seq_len]
        attention_weights = self.attention(encoder_output, decoder_hidden)
        # 3.1 计算context_vector
        # context_vector: [batch_size, 1, hidden_size]
        context_vector = torch.bmm(attention_weights.unsqueeze(1), encoder_output)
        # 3.2 context_vector和decoder_output合并
        # concat_data: [batch_size, 1, hidden_size*2]
        concat_data = torch.cat([context_vector, decoder_output], dim=-1)
        # 3.3 进行Wc操作
        # decoder_output: [batch_size, 1, hidden_size)
        decoder_output = self.Wc(concat_data)
        # 3.4 进行tanh操作
        # decoder_output: [batch_size, 1, hidden_size]
        decoder_output = torch.tanh(decoder_output)

        # 4. Linear
        # decoder_output: [batch_size, 1, vocab_size]
        decoder_output = self.fc(decoder_output)

        # 5. 计算损失
        # decoder_output: [batch_size, 1, vocab_size]
        decoder_output = nn.functional.log_softmax(decoder_output, dim=-1)

        # 6. 返回结果
        return decoder_output.squeeze(1), decoder_hidden

    def evaluate(self, encoder_hidden, encoder_output):
        # encoder_hidden: [1, batch_size, hidden_size]
        # encoder_output: [batch_size, max_len, hidden_size]

        # 1. 获取解码器的隐藏层的输入
        decoder_hidden = encoder_hidden

        # 2. 获取batch_size
        batch_size = encoder_hidden.size(1)

        # 3. 创建解码器的输入层
        decoder_input = torch.LongTensor([[config.chatbot_ws_target_by_word_model.SOS]] * batch_size)
        decoder_input = decoder_input.to(config.device)

        # 4. 开始预测
        pre = []
        for t in range(config.chatbot_max_len + 1):
            # decoder_output_t: [batch_size, vocab_size]
            # decoder_hidden: [1, batch_size, hidden_size]
            decoder_output_t, decoder_hidden = self.forward_step(decoder_input,
                                                                 decoder_hidden,
                                                                 encoder_output)
            # index: [batch_size, 1]
            index = torch.topk(decoder_output_t, 1)[1]

            decoder_input = index

            pre.append(torch.max(decoder_output_t, 1)[1].cpu().detach().numpy())

        # pre: [max_len+1, batch_size]
        return pre

    def evaluate_by_beamsearch(self, encoder_hidden, encoder_output):
        """
        encoder_hidden: [1, batch_size, encoder_hidden_size]
        encoder_output: [batch_size, max_len, encoder_hidden_size]
        """
        # 1. 获取解码器隐藏层初始的输入数据和batch_szie
        decoder_hidden = encoder_hidden
        batch_size = encoder_hidden.size(1)

        # 2. 初始化解码器初始的输入数据
        decoder_input = torch.LongTensor([[config.chatbot_ws_target_by_word_model.SOS]]
                                         * batch_size).to(config.device)

        # 3. 实例化BeamSearch对象，并添加初始数据
        prev_beam = BeamSearch()
        prev_beam.add(1, False, [decoder_input], decoder_input, decoder_hidden)

        # 4. 开始层次遍历
        while True:
            cur_beam = BeamSearch()
            for _prob, _complete, _seq, _decoder_input, _decoder_hidden in prev_beam:
                # print(_seq)
                # raise Exception("...")
                if _complete:
                    cur_beam.add(_prob, _complete, _seq, _decoder_input, _decoder_hidden)
                else:
                    decoder_output_t, decoder_hidden = self.forward_step(_decoder_input,
                                                                         _decoder_hidden,
                                                                         encoder_output)
                    # print(decoder_output_t)
                    value, index = torch.topk(decoder_output_t, config.chatbot_beam_width)
                    value = value.squeeze(0).cpu().detach().numpy()
                    index = index.squeeze(0).cpu().detach().numpy()
                    for m, n in zip(value, index):
                        prob = _prob * m
                        seq = _seq + [n]
                        # print(seq)
                        # raise Exception("...")
                        decoder_input = torch.LongTensor([[n]]).to(config.device)
                        if n == config.chatbot_ws_target_by_word_model.EOS:
                            complete = True
                        else:
                            complete = False
                        cur_beam.add(prob, complete, seq, decoder_input, decoder_hidden)
            base_prob, base_complete,base_seq, _, _ =max(cur_beam)
            if base_complete or len(base_seq) == config.chatbot_max_len + 1:
                print("base_seq:", base_seq)
                return base_seq
            else:
                prev_beam = cur_beam


class BeamSearch:
    def __init__(self):
        self.beam = []
        self.beam_width = config.chatbot_beam_width

    def add(self, prob, complete, seq, decoder_input, decoder_hidden):
        heapq.heappush(self.beam, [prob, complete, seq, decoder_input, decoder_hidden])
        if len(self.beam) > self.beam_width:
            heapq.heappop(self.beam)

    def __iter__(self):
        return iter(self.beam)
