import torch
import torch.nn as nn
import torch.nn.functional as F
from config.Config import PAD_TOKEN, BATCH_SIZE, BOS_TOKEN, DEVICE
from modules.attention import RnnAttention, Attention


class ConsumCellState:
    output: None  # [batch, hidden_size]
    method_context: None  # [batch, hidden_size]
    method_alignments: None  # [batch, method_len]
    context_context: None  # [batch, hidden_size]
    context_alignments: None  # [batch, context_len]
    hidden_state: None  # [batch, hidden_size]
    cell_state: None  # [batch, hidden_size]

    def __init__(self, output=None,
                 method_context=None,
                 method_alignments=None,
                 context_context=None,
                 context_alignments=None,
                 hidden_state=None,
                 cell_state=None):
        self.output = output
        self.method_context = method_context
        self.method_alignments = method_alignments
        self.context_context = context_context
        self.context_alignments = context_alignments
        self.hidden_state = hidden_state
        self.cell_state = cell_state


class ConsumDecoderCell(nn.Module):
    """
    Args:
        vocab : 词表
        embedding_size : embedding size
        hidden_size : hidden_size
        num_layers : number of layers, same as encoder
        dropout : probability
    Input:
        pre_embedding : 前一步的输出的embedding vector [batch, embedding_size]
        pre_state : 前一步的状态
            -output : [batch_size, hidden_size]
            -method_context : [batch_size, m_hidden_size]
            -method_alignments : [batch_size, m_src_len]
            -context_context : [batch_size, c_hidden_size]
            -context_alignments : [batch_size, c_src_len]
            -hidden_state : [batch_size, hidden_size]
            -cell_state : [batch_size, hidden_size]
        m_encoder_outputs : method encoder的输出 [batch_size, m_src_len, m_hidden_size]
        m_src_lengths : method src的有效长度 [batch_size, ]
        c_decoder_outputs : context encoder的输出 [batch_size, c_src_len, c_hidden_size]
        c_src_lengths : context src的有效长度 [batch_size, ]
    Output:
        decoder-cell-state: 本次输出的状态
            -output : [batch_size, hidden_size]
            -method_context : [batch_size, m_hidden_size]
            -method_alignments : [batch_size, m_src_len]
            -context_context : [batch_size, c_hidden_size]
            -context_alignments : [batch_size, c_src_len]
            -hidden_state : [batch_size, hidden_size]
            -cell_state : [batch_size, hidden_size]
    """

    def __init__(self,
                 vocab,
                 embedding_size,
                 method_encoder_hidden_size,
                 context_encoder_hidden_size,
                 hidden_size,
                 num_layers,
                 dropout):
        super(ConsumDecoderCell, self).__init__()
        self.vocab = vocab
        self.vocab_size = len(vocab)
        self.pad_id = self.vocab[PAD_TOKEN]
        self.dropout = nn.Dropout(dropout)
        self.hidden_size = hidden_size
        self.cell_input_size = embedding_size + 2 * self.hidden_size  # decoder接收的输入是[前一步的输出, 环境向量1，环境向量2], [前一步的Cell]
        self.cell = nn.LSTM(self.cell_input_size,
                            hidden_size=self.hidden_size,
                            num_layers=num_layers,
                            batch_first=True,
                            dropout=dropout,
                            bidirectional=False)
        # self.embedding = nn.Embedding(self.vocab_size, embedding_size, padding_idx=self.pad_id)
        self.method_attention = Attention(encoder_hidden_size=method_encoder_hidden_size,
                                          decoder_hidden_size=hidden_size,
                                          is_bi_encoder=True)
        self.context_attention = Attention(encoder_hidden_size=context_encoder_hidden_size,
                                           decoder_hidden_size=hidden_size,
                                           is_bi_encoder=True)

    def forward(self, pre_embedding, pre_state: ConsumCellState,
                m_encoder_outputs, m_src_lengths,
                c_encoder_outputs, c_src_lengths):
        embedding = self.dropout(pre_embedding)
        cell_input = torch.cat([embedding, pre_state.method_context, pre_state.context_context], 1).unsqueeze(1)
        # [batch, 1, embedding_size + con_size]
        if pre_state.hidden_state is None and pre_state.cell_state is None:
            output, (hidden_state, cell_state) = self.cell(cell_input)
        else:
            state = (
                pre_state.hidden_state,
                pre_state.cell_state
            )
            output, (hidden_state, cell_state) = self.cell(cell_input, state)
        output = output.squeeze(1)  # [batch, hidden_size]
        method_context, m_alignments = self.method_attention(output, m_encoder_outputs, m_src_lengths)
        context_context, c_alignments = self.method_attention(output, c_encoder_outputs, c_src_lengths)
        decoder_cell_state = ConsumCellState(
            output=output,
            method_context=method_context,
            method_alignments=m_alignments,
            context_context=context_context,
            context_alignments=c_alignments,
            hidden_state=hidden_state,
            cell_state=cell_state
        )
        return decoder_cell_state


class ConsumDecoder(nn.Module):
    """
    Args:
        vocab : 词表
        embedding_size :
        method_encoder_hidden_size : method encoder的隐层大小
        context_encoder_hidden_size : context encoder的隐层大小
        num_layers : 层数
        dropout : drop的概率
    Input:
        m_src_batch : [batch, m_src_len]
        m_src_lengths : [batch,]
        c_src_batch : [batch, c_src_len]
        c_src_lengths : [batch,]
        tgt_batch : [batch, tgt_len]
        tgt_lengths : [batch, ]
        final_m_encoder_hidden_state : [num_layers, batch, m_hidden_state]
        final_m_encoder_cell_state : [num_layers, batch, m_hidden_state]
        final_c_encoder_hidden_state : [num_layers, batch, c_hidden_state]
        final_c_encoder_cell_state : [num_layers, batch, c_hidden_state]
        m_encoder_outputs : [batch, m_src_len, m_hidden_size]
        c_encoder_outputs : [batch, c_src_len, c_hidden_size]
    Output:
        vocab_dist : 目标每个位置的词分布 [batch, tgt_len, vocab_size]
        m_attention_history : [batch, m_src_len]
        c_attention_history : [batch, c_src_len]
    """

    def __init__(self,
                 vocab,
                 embedding_size,
                 method_encoder_hidden_size,
                 context_encoder_hidden_size,
                 num_layers,
                 dropout):
        super(ConsumDecoder, self).__init__()
        self.vocab = vocab
        self.vocab_size = len(vocab)
        self.pad_id = self.vocab[PAD_TOKEN]
        self.hidden_size = method_encoder_hidden_size + context_encoder_hidden_size
        self.embedding = nn.Embedding(self.vocab_size, embedding_size, padding_idx=self.pad_id)
        self.out_layer = nn.Linear(self.hidden_size, self.vocab_size, bias=False)
        self.attn_layer = nn.Linear(3 * self.hidden_size, self.hidden_size)
        self.cell = ConsumDecoderCell(vocab=vocab,
                                      embedding_size=embedding_size,
                                      method_encoder_hidden_size=method_encoder_hidden_size,
                                      context_encoder_hidden_size=context_encoder_hidden_size,
                                      hidden_size=self.hidden_size,
                                      num_layers=num_layers,
                                      dropout=dropout)
        self.dropout = nn.Dropout(dropout)
        # copy----------------
        input_size = self.hidden_size + self.hidden_size + self.hidden_size + self.hidden_size + embedding_size
        self.copy_linear = nn.Linear(input_size, 3)
        # m_context, c_context, decoder_cell, decoder_hidden, decoder_input
        # --------------------

    def generate_init_cell_state(self,
                                 final_m_encoder_hidden_state,
                                 final_m_encoder_cell_state,
                                 final_c_encoder_hidden_state,
                                 final_c_encoder_cell_state,
                                 ):
        init_m_context = torch.zeros(BATCH_SIZE, self.hidden_size, device=DEVICE)  # Device
        init_c_context = torch.zeros(BATCH_SIZE, self.hidden_size, device=DEVICE)  # Device
        return ConsumCellState(
            method_context=init_m_context,
            context_context=init_c_context,
            hidden_state=torch.cat([final_m_encoder_hidden_state, final_c_encoder_hidden_state], dim=2),
            cell_state=torch.cat([final_m_encoder_cell_state, final_c_encoder_cell_state], dim=2))

    def init_pre_output(self):
        return torch.zeros(BATCH_SIZE, 1).to(torch.long) + self.vocab[BOS_TOKEN]

    def forward(self,
                m_src_batch,
                m_src_lengths,
                c_src_batch,
                c_src_lengths,
                tgt_batch,
                final_m_encoder_hidden_state,
                final_m_encoder_cell_state,
                final_c_encoder_hidden_state,
                final_c_encoder_cell_state,
                m_encoder_outputs,
                c_encoder_outputs):
        tgt_embeddings = self.embedding(tgt_batch)  # [batch, max_tgt_len ,embedding_size]

        prev_cell_state = self.generate_init_cell_state(
            final_m_encoder_hidden_state,  # [num_layers, batch_size, hidden_size]
            final_m_encoder_cell_state,
            final_c_encoder_hidden_state,
            final_c_encoder_cell_state
        )

        output_seqs = []
        m_attention_history = []
        c_attention_history = []

        gen_seqs = []
        m_copy_seqs = []
        c_copy_seqs = []

        steps_tgt = torch.split(tgt_embeddings, 1, dim=1)  # max_tgt 个 [batch * embedding_size]
        for cur_embed in steps_tgt:
            # cur_embed [batch, 1,  embedding_size], 第一个是<bos>
            embed = cur_embed.squeeze(1)
            decoder_cell_state = self.cell(embed, prev_cell_state,
                                           m_encoder_outputs, m_src_lengths,
                                           c_encoder_outputs, c_src_lengths)

            output = torch.cat([
                decoder_cell_state.method_context,
                decoder_cell_state.context_context,
                decoder_cell_state.output], 1)
            output = torch.tanh(self.attn_layer(output))
            output = self.dropout(output)

            output_seqs.append(output)
            m_attention_history.append(decoder_cell_state.method_alignments)
            c_attention_history.append(decoder_cell_state.context_alignments)

            copy_input = torch.cat([
                decoder_cell_state.context_context,
                decoder_cell_state.method_context,
                embed,
                decoder_cell_state.hidden_state[-1],
                decoder_cell_state.cell_state[-1],
            ], dim=1)

            p_copy = self.copy_linear(copy_input)
            p_copy = torch.softmax(p_copy, dim=1)

            p_gen = p_copy[:, 0].unsqueeze(-1)
            m_copy = p_copy[:, 1].unsqueeze(-1)
            c_copy = p_copy[:, 2].unsqueeze(-1)
            gen_seqs.append(p_gen)
            m_copy_seqs.append(m_copy)
            c_copy_seqs.append(c_copy)

            prev_cell_state = decoder_cell_state

        p_gen_seqs = torch.stack(gen_seqs, 1)  # [batch_size, step , 1]
        p_m_copy_seqs = torch.stack(m_copy_seqs, 1)
        p_c_copy_seqs = torch.stack(c_copy_seqs, 1)

        output_seqs = torch.stack(output_seqs, 1)
        m_attention_history = torch.stack(m_attention_history, 1)
        c_attention_history = torch.stack(c_attention_history, 1)

        m_src_batch = m_src_batch.unsqueeze(1)  # batch_size * 1 * src_length
        m_src_batch = m_src_batch.expand_as(m_attention_history)  # batch_size * step * src_length
        c_src_batch = c_src_batch.unsqueeze(1)  # batch_size * 1 * src_length
        c_src_batch = c_src_batch.expand_as(c_attention_history)  # batch_size * step * src_length

        raw_vocab_dist = self.out_layer(output_seqs)
        vocab_dist = nn.functional.softmax(raw_vocab_dist, dim=2)
        # vocab_dist = torch.where(vocab_dist == 0, vocab_dist, torch.zeros_like(vocab_dist) + 1e-7)
        vocab_dist = p_gen_seqs * vocab_dist

        m_copy_dist = m_attention_history
        m_copy_dist = p_m_copy_seqs * m_copy_dist

        c_copy_dist = c_attention_history
        c_copy_dist = p_c_copy_seqs * c_copy_dist

        final_dist = vocab_dist
        final_dist = final_dist.scatter_add_(2, m_src_batch, m_copy_dist)
        final_dist = final_dist.scatter_add_(2, c_src_batch, c_copy_dist)

        return final_dist, m_attention_history, c_attention_history
