from torch import nn
from config.Config import PAD_TOKEN, BOS_TOKEN
from config.Config import BATCH_SIZE, DEVICE
import torch
from modules.attention import RnnAttention, Attention
import torch.nn.functional as F


class DecoderCellState:
    # output: None
    context: None
    hidden_state: None
    cell_state: None
    alignments: None

    def __init__(self, context=None, hidden_state=None, cell_state=None, alignments=None):
        # self.output = output
        self.context = context
        self.hidden_state = hidden_state
        self.cell_state = cell_state
        self.alignments = alignments

    @staticmethod
    def select(x, indices, dim=0):
        return x.index_select(dim, indices)

    def batch_select(self, indices):
        n = DecoderCellState()
        n.context = self.select(self.context, indices)
        n.hidden_state = self.select(self.hidden_state, indices, dim=1)
        n.cell_state = self.select(self.cell_state, indices, dim=1)
        n.alignments = self.select(self.alignments, indices)
        return n


class BiLstmCell(nn.Module):
    """

    Args:
        embedding_size : Decoder的嵌入向量
        hidden_size : Decoder的隐层数量
        num_layers : Decoder的层数
        dropout : dropout的概率
    Input: pre_output, pre_state: DecoderCellState, encoder_outputs, src_lengths
        pre_embedding : [batch_size, embedding_size] 前一步的输出，当前步的输入
        pre_state :  前一步的cell state
            - context : [batch_size, hidden_size]
            - hidden_state : [num_layers, batch_size, hidden_size]
            - cell_state : [num_layers, batch_size, hidden_size]
            - alignments : [batch_size, max_src_len]
        encoder_outputs : [batch_size, max_src_len, hidden] encoder的输出
        src_lengths : [batch_size, ] encoder的输入的有效长度
    Output:
        decoder_cell_state :
            - context : [batch_size, hidden_size]
            - hidden_state : [num_layers, batch_size, hidden_size]
            - cell_state : [num_layers, batch_size, hidden_size]
            - alignments : [batch_size, max_src_len]
    """

    def __init__(self, vocab, embedding_size, hidden_size, num_layers, dropout):
        super(BiLstmCell, self).__init__()
        self.vocab = vocab
        self.vocab_size = len(vocab)
        self.pad_id = self.vocab[PAD_TOKEN]
        self.dropout = nn.Dropout(dropout)
        self.cell_input_dim = embedding_size + hidden_size  # decoder接收的输入是[前一步的输出, 环境向量],[前一步的Cell]
        self.cell = nn.LSTM(self.cell_input_dim,
                            hidden_size=hidden_size,
                            num_layers=num_layers,
                            batch_first=True,
                            dropout=dropout,
                            bidirectional=False)
        # self.embedding = nn.Embedding(self.vocab_size, embedding_size, padding_idx=self.pad_id)
        self.attention = Attention(encoder_hidden_size=hidden_size, decoder_hidden_size=hidden_size, is_bi_encoder=True)

    def forward(self, pre_embedding, pre_state: DecoderCellState, encoder_outputs, src_lengths):
        embedding = self.dropout(pre_embedding)  # [batch, embedding_size]
        embedding = embedding.squeeze(1)  # [batch, 1, embedding_size]
        cell_input = torch.cat([embedding, pre_state.context], 1).unsqueeze(1)  # [batch, 1, embedding_size + con_size]
        if pre_state.hidden_state is None and pre_state.cell_state is None:
            output, (hidden_state, cell_state) = self.cell(cell_input)
        else:
            state = (
                pre_state.hidden_state,
                pre_state.cell_state
            )
            output, (hidden_state, cell_state) = self.cell(cell_input, state)
        output = output.squeeze(1)  # [batch, hidden_size]
        context, alignments = self.attention(output, encoder_outputs, src_lengths)
        decoder_cell_state = DecoderCellState(
            # output=output,
            context=context,
            hidden_state=hidden_state,
            cell_state=cell_state,
            alignments=alignments
        )
        return decoder_cell_state


class LstmDecoder(nn.Module):
    """
    Args:
        vocab :
        embedding_size :
        hidden_size :
        num_layers :
        dropout :
    Input:
        src_batch : [batch_size, max_src_len]
        src_lengths : [batch_size, ]
        tgt_batch : [batch_size, max_tgt_len]
        final_encoder_hidden_state : [batch_size, hidden_size]
        final_encoder_cell_state : [batch_size, hidden_size]
        encoder_outputs : [batch_size, max_src_len, hidden_size]
    Output:
        vocab_dist : [batch_size, max_tgt_len, vocab_size]
        attention_history : [batch_size, max_tgt_len,  max_src_len]
    """

    def __init__(self,
                 vocab,
                 embedding_size,
                 hidden_size,
                 num_layers,
                 dropout):
        super(LstmDecoder, self).__init__()
        self.vocab = vocab
        self.vocab_size = len(vocab)
        self.pad_id = self.vocab[PAD_TOKEN]
        self.hidden_size = hidden_size
        self.embedding_size = embedding_size
        self.embedding = nn.Embedding(self.vocab_size, self.embedding_size, padding_idx=self.pad_id)
        self.out_layer = nn.Linear(hidden_size, self.vocab_size, bias=False)
        self.attn_layer = nn.Linear(hidden_size, hidden_size)
        self.cell = BiLstmCell(vocab=vocab,
                               embedding_size=embedding_size,
                               hidden_size=hidden_size,
                               num_layers=num_layers,
                               dropout=dropout)
        self.dropout = nn.Dropout(dropout)
        self.p_gen_linear = nn.Linear(3 * self.hidden_size + self.embedding_size, 1)
        # context_vector : hidden_size
        # decoder_input : embedding_size
        # decoder_cell_state : hidden_size
        # decoder_hidden_state : hidden_size

    def generate_init_cell_state(self, final_encoder_hidden_state, final_encoder_cell_state):
        init_context = torch.zeros(BATCH_SIZE, self.hidden_size, device=DEVICE)  # Device
        return DecoderCellState(context=init_context,
                                hidden_state=final_encoder_hidden_state,
                                cell_state=final_encoder_cell_state)

    def init_pre_output(self):
        return torch.zeros(BATCH_SIZE, 1, device=DEVICE).to(torch.long) + self.vocab[BOS_TOKEN]

    def one_step(self, cur_embed, prev_cell_state, encoder_outputs, src_batch, src_lengths):
        cur_decoder_cell_state = self.cell(cur_embed, prev_cell_state, encoder_outputs, src_lengths)
        cur_output = torch.tanh(self.attn_layer(cur_decoder_cell_state.context))
        cur_output = self.dropout(cur_output)

        p_gen_input = torch.cat([
            cur_decoder_cell_state.context,
            cur_embed,
            cur_decoder_cell_state.hidden_state[-1],
            cur_decoder_cell_state.cell_state[-1],
        ], dim=1)

        p_gen = self.p_gen_linear(p_gen_input)  # [batch, 1]
        p_gen = torch.sigmoid(p_gen)  # [batch, 1]

        gen_dist = self.out_layer(cur_output)
        gen_dist = torch.softmax(gen_dist, dim=1)  # [batch, vocab_size]
        final_dist = p_gen * gen_dist

        copy_dist = cur_decoder_cell_state.alignments  # [batch, max_src_len]
        copy_dist = (1 - p_gen) * copy_dist

        final_dist = final_dist.scatter_add_(1, src_batch, copy_dist)

        return cur_decoder_cell_state, final_dist

    def forward(self,
                src_batch,
                src_lengths,
                tgt_batch,
                final_encoder_hidden_state,
                final_encoder_cell_state,
                encoder_outputs):
        tgt_embeddings = self.embedding(tgt_batch)  # [batch, max_tgt_len ,embedding_size]
        prev_cell_state = self.generate_init_cell_state(final_encoder_hidden_state, final_encoder_cell_state)

        attention_history = []
        dist_history = []

        steps_tgt = torch.split(tgt_embeddings, 1, dim=1)  # max_tgt 个 [batch * embedding_size]
        for cur_embed in steps_tgt:
            # cur_embed [batch, 1,  embedding_size], 第一个是<bos>
            cur_embed = cur_embed.squeeze(1)
            cur_decoder_cell_state, final_dist = self.one_step(cur_embed,
                                                               prev_cell_state,
                                                               encoder_outputs,
                                                               src_batch,
                                                               src_lengths)
            attention_history.append(cur_decoder_cell_state.alignments)
            dist_history.append(final_dist)
            prev_cell_state = cur_decoder_cell_state

        attention = torch.stack(attention_history, 1)
        dist = torch.stack(dist_history, 1)

        return dist, attention
