import torch
import torch.nn as nn
from modules.global_attn import DotAttention
from utils.common import tuple_map


class CellState:
    output: None  # [batch, hidden]
    contexts: None  # [batch, hidden]
    alignments: None  # [batch, hidden]
    state: None  # [batch, hidden] or ([batch, hidden],[batch, hidden])

    def __init__(self,
                 output=None,
                 contexts=None,
                 alignments=None,
                 state=None):
        self.output = output
        self.contexts = contexts
        self.alignments = alignments
        self.state = state

    def batch_select(self, indices):
        """For beam search"""
        select = lambda x, dim=0: x.index_select(dim, indices)
        contexts = select(self.contexts)
        alignments = select(self.alignments)
        state = tuple_map(select, self.state)
        return CellState(
            contexts=contexts,
            alignments=alignments,
            state=state
        )


class Cell(nn.Module):
    def __init__(self,
                 embedding,
                 bidrectional_encoder,
                 hidden_size,
                 num_layers,
                 dropout):
        super(Cell, self).__init__()
        self.dropout = nn.Dropout(dropout)
        self.hidden_size = hidden_size
        self.input_size = embedding_size
        self.encoders = encoders
        for _ in encoders:
            self.input_size += hidden_size  # decoder接收的输入是[前一步的输出, 环境向量...], [前一步的Cell]
        self.cell = nn.LSTM(self.input_size,
                            hidden_size=self.hidden_size,
                            num_layers=num_layers,
                            batch_first=True,
                            dropout=dropout,
                            bidirectional=False)
        self.attentions = nn.ModuleList([
            Attention(enc.hidden_size, self.hidden_size, True) for enc in self.encoders
        ])

    def forward(self,
                pre_embedding,
                pre_state: CellState,
                encoders_outputs,
                encoders_lengths):
        embedding = pre_embedding
        inputs = [embedding] + [each for each in pre_state.contexts]
        inputs = torch.cat(inputs, 1).unsqueeze(1)
        # cell_input = torch.cat([embedding, pre_state.method_context, pre_state.context_context], 1).unsqueeze(1)
        # [batch, 1, embedding_size + con_size]
        if pre_state.hidden is None and pre_state.cell is None:
            output, (hidden, cell_state) = self.cell(inputs)
        else:
            state = (
                pre_state.hidden,
                pre_state.cell
            )
            output, (hidden, cell_state) = self.cell(inputs, state)
        output = output.squeeze(1)  # [batch, hidden_size]
        attn_outputs = [
            attn(output, encoders_outputs[idx], encoders_lengths[idx])
            for idx, attn in enumerate(self.attentions)
        ]
        contexts = [each[0] for each in attn_outputs]
        alignments = [each[1] for each in attn_outputs]
        decoder_cell_state = CellState(
            output=output,
            contexts=contexts,
            alignments=alignments,
            hidden=hidden,
            cell=cell_state
        )
        return decoder_cell_state


class JointLstmDecoder(nn.Module):
    def __init__(self,
                 field: Field,
                 use_embedding: bool,
                 embedding_size,
                 encoders,
                 num_layers,
                 dropout,
                 transform: bool,
                 hidden_size: int
                 ):
        super(JointLstmDecoder, self).__init__()
        self.field = field
        self.vocab_size = len(self.field.vocab)
        self.pad_id = self.field.vocab.stoi[PAD_TOKEN]
        self.encoders = encoders
        if len(self.encoders) < 2:
            transform = False
        self.transform = transform
        if not transform:
            self.hidden_size = 0
            for encoder in encoders:
                self.hidden_size += encoder.hidden_size
        else:
            transform_size = 0
            for encoder in encoders:
                transform_size += encoder.hidden_size
            self.hidden_size = hidden_size
            self.h_transform_layer = nn.Linear(transform_size, hidden_size, bias=False)
            self.c_transform_layer = nn.Linear(transform_size, hidden_size, bias=False)

        self.use_embedding = use_embedding
        if use_embedding:
            self.embedding = nn.Embedding(self.vocab_size, embedding_size, padding_idx=self.pad_id)
        else:
            self.embedding = None

        self.out_layer = nn.Linear(len(self.encoders) * self.hidden_size, self.vocab_size, bias=False)
        self.cell = Cell(embedding_size=embedding_size,
                         encoders=encoders,
                         hidden_size=self.hidden_size,
                         num_layers=num_layers,
                         dropout=dropout)

    def generate_init_cell_state(self,
                                 encoders_hidden,
                                 encoders_cell):
        batch_size = encoders_hidden[0].size(1)
        device = encoders_hidden[0].device
        init_contexts = [torch.zeros(batch_size, self.hidden_size, device=device) for _ in self.encoders]
        hidden = torch.cat(encoders_hidden, dim=2)
        cell = torch.cat(encoders_cell, dim=2)
        if self.transform:
            hidden = torch.sigmoid(self.h_transform_layer(hidden))
            cell = torch.sigmoid(self.c_transform_layer(cell))

        return CellState(
            contexts=init_contexts,
            hidden=hidden,
            cell=cell)

    def init_pre_output(self, batch_size, device=Config.DEVICE):
        return torch.zeros(batch_size).to(torch.long).to(device) + self.field.vocab.stoi[BOS_TOKEN]

    def one_step(self,
                 cur_embed,
                 prev_cell_state,
                 encoders_outputs,
                 src_lengths):
        cur_decoder_cell_state = self.cell(pre_embedding=cur_embed,
                                           pre_state=prev_cell_state,
                                           encoders_outputs=encoders_outputs,
                                           encoders_lengths=src_lengths)
        out = torch.cat([each for each in cur_decoder_cell_state.contexts], dim=1)
        gen_dist = self.out_layer(out)
        gen_dist = torch.softmax(gen_dist, dim=1)  # [batch, vocab_size]
        return cur_decoder_cell_state, gen_dist

    def forward(self,
                src_list,
                src_lengths,
                tgt,
                encoders_hidden,
                encoders_cell,
                encoders_outputs):
        if self.embedding is None:
            tgt_embeddings = tgt  # [batch,max_tgt_len,embedding_size]
        else:
            tgt_embeddings = self.embedding(tgt)  # [batch, max_tgt_len ,embedding_size]

        prev_cell_state = self.generate_init_cell_state(
            encoders_hidden=encoders_hidden,
            encoders_cell=encoders_cell
        )

        attention_histories = [[] for _ in self.encoders]
        dist_history = []

        steps_tgt = torch.split(tgt_embeddings, 1, dim=1)  # max_tgt 个 [batch * embedding_size]
        for cur_embed in steps_tgt:
            # cur_embed [batch, 1,  embedding_size], 第一个是<bos>
            cur_embed = cur_embed.squeeze(1)
            decoder_cell_state, dist = self.one_step(cur_embed,
                                                     prev_cell_state,
                                                     encoders_outputs,
                                                     src_lengths)

            dist_history.append(dist)
            for idx, _ in enumerate(self.encoders):
                attention_histories[idx].append(decoder_cell_state.alignments[idx])
            prev_cell_state = decoder_cell_state

        dist_history = torch.stack(dist_history, 1)
        attention_histories = [torch.stack(each, 1) for each in attention_histories]
        return dist_history, attention_histories
