from typing import Dict, List

import pytorch_lightning as pl
import torch
import torch.nn as nn
import torch.nn.functional as F
from ginnm.tuple_map import tuple_map
from torchtext.data import Field

from .modules.parallel_rnn_decoder import ParallelRNNDecoder
from .modules.parallel_rnn_encoder import ParallelRNNEncoder
from .modules.rnn_encoder import RNNEncoder
from .utils.beam import BeamUtil
from .utils.metrics import batch_bleu, batch_meteor


class MulEncNet(pl.LightningModule):
    def __init__(self,
                 target_name: str,
                 text_names: List[str],
                 filed_dict: Dict[str, Field],
                 max_out=20,
                 num_layers=1,
                 embedding_size=128,
                 hidden_size=256,
                 dropout=0.1,
                 lr=0.001
                 ):
        super(MulEncNet, self).__init__()

        self.text_names = text_names
        self.target_name = target_name

        # 超参
        self.embedding_size = embedding_size
        self.hidden_size = hidden_size
        self.num_layers = num_layers  # 1
        self.lr = lr
        self.num_encoders = len(text_names) - 1
        self.max_out = max_out

        # 网络
        encoders = []
        for name in text_names:
            if name != target_name:
                pad_token = filed_dict[name].pad_token
                pad_id = filed_dict[name].vocab[pad_token]
                embedding = nn.Embedding(len(filed_dict[name].vocab), embedding_size, padding_idx=pad_id)
                encoder = RNNEncoder(
                    rnn_type="LSTM",
                    bidirectional=True,
                    num_layers=self.num_layers,
                    hidden_size=self.hidden_size,
                    embedding=embedding,
                    dropout=dropout
                )
                encoders.append(encoder)

        self.encoder = ParallelRNNEncoder(encoders)

        # pad
        pad_token = filed_dict[target_name].pad_token
        pad_id = filed_dict[target_name].vocab[pad_token]
        self.target_pad_token = pad_token
        self.target_pad_id = pad_id

        # bos
        bos_token = filed_dict[target_name].init_token
        bos_id = filed_dict[target_name].vocab[bos_token]
        self.target_bos_token = bos_token
        self.target_bos_id = bos_id

        # eos
        eos_token = filed_dict[target_name].eos_token
        eos_id = filed_dict[target_name].vocab[eos_token]
        self.target_eos_token = eos_token
        self.target_eos_id = eos_id

        self.target_vocab = filed_dict[target_name].vocab
        target_embedding = nn.Embedding(len(filed_dict[target_name].vocab), embedding_size, padding_idx=pad_id)
        self.decoder = ParallelRNNDecoder(
            rnn_type="LSTM",
            num_encoders=len(encoders),
            bidirectional_encoders=True,
            num_layers=self.num_layers,
            hidden_size=self.hidden_size,
            init_reduce="mlp",
            context_reduce="cat",
            dropout=dropout,
            embedding=target_embedding
        )
        self.out_layer = nn.Linear(
            self.decoder.hidden_size * len(encoders),
            len(self.target_vocab)
        )
        self.softmax = nn.Softmax(dim=-1)
        self.save_hyperparameters()

    def forward(self, src_list, lengths, tgt):
        final_states, memory_banks, memory_lengths = self.encoder(src_list, lengths)
        self.decoder.init_state(final_states)
        dec_outs, attn = self.decoder(tgt, memory_banks, memory_lengths)
        outs = self.out_layer(dec_outs)  # [batch_size, tgt_len, target_vocab]
        outs = self.softmax(outs)  # [batch_size, tgt_len, target_vocab]
        return outs, attn

    def configure_optimizers(self):
        return torch.optim.Adam(params=self.parameters(), lr=self.lr)

    def training_step(self, batch, batch_idx: int):
        src_names = [name for name in self.text_names if name != self.target_name]
        src_list = [batch[name]['input'] for name in src_names]
        lengths = [batch[name]['length'] for name in src_names]
        tgt = batch[self.target_name]['input']
        dist, attn = self.forward(src_list, lengths, tgt[:, :-1])
        dist = torch.clamp_min(dist, 1e-12)
        dist = dist.view(-1, len(self.target_vocab))
        loss = F.nll_loss(dist.log(), tgt[:, 1:].reshape(-1), ignore_index=self.target_pad_id)
        return loss

    def validation_step(self, batch, batch_idx: int):
        src_names = [name for name in self.text_names if name != self.target_name]
        src_list = [batch[name]['input'] for name in src_names]
        lengths = [batch[name]['length'] for name in src_names]
        tgt = batch[self.target_name]['input']
        dist, attn = self.forward(src_list, lengths, tgt[:, :-1])
        dist = torch.clamp_min(dist, 1e-12)
        dist = dist.view(-1, len(self.target_vocab))
        loss = F.nll_loss(dist.log(), tgt[:, 1:].reshape(-1), ignore_index=self.target_pad_id)
        self.log("val_loss", loss.item(), on_step=True, on_epoch=True, prog_bar=True)
        raw_tgt = batch[self.target_name]['raw']
        batch_hyp, _, _ = self.beam_decode(src_list, lengths)
        batch_hyp = [s[:-1] if s[-1] == self.target_eos_id else s for s in batch_hyp]
        batch_hyp = [[self.target_vocab.itos[token] for token in item] for item in batch_hyp]
        batch_ref = raw_tgt
        bleu = batch_bleu(batch_hyp, batch_ref)
        meteor = batch_meteor(batch_hyp, batch_ref)
        self.log("val_bleu4", bleu, on_epoch=True, prog_bar=True)
        self.log("val_meteor", meteor, on_epoch=True, prog_bar=True)
        return bleu

    def validation_epoch_end(self, outputs) -> None:
        print("", end="\n")

    def test_step(self, batch, batch_idx: int):
        src_names = [name for name in self.text_names if name != self.target_name]
        src_list = [batch[name]['input'] for name in src_names]
        lengths = [batch[name]['length'] for name in src_names]
        tgt = batch[self.target_name]['input']
        dist, attn = self.forward(src_list, lengths, tgt[:, :-1])
        dist = torch.clamp_min(dist, 1e-12)
        dist = dist.view(-1, len(self.target_vocab))
        loss = F.nll_loss(dist.log(), tgt[:, 1:].reshape(-1), ignore_index=self.target_pad_id)
        self.log("test_loss", loss.item(), on_step=True, on_epoch=True, prog_bar=True)
        raw_tgt = batch[self.target_name]['raw']
        batch_hyp, _, _ = self.beam_decode(src_list, lengths)
        batch_hyp = [s[:-1] if s[-1] == self.target_eos_id else s for s in batch_hyp]
        batch_hyp = [[self.target_vocab.itos[token] for token in item] for item in batch_hyp]
        batch_ref = raw_tgt
        bleu = batch_bleu(batch_hyp, batch_ref)
        meteor = batch_meteor(batch_hyp, batch_ref)
        self.log("test_bleu4", bleu, on_epoch=True, prog_bar=True)
        self.log("test_meteor", meteor, on_epoch=True, prog_bar=True)
        return bleu

    def beam_decode(self, src_list, lengths):
        primary_id = 0
        bu = BeamUtil(
            bos_id=self.target_bos_id,
            pad_id=self.target_pad_id,
            eos_id=self.target_eos_id,
            vocab_size=len(self.target_vocab),
            beam_width=4,
            with_attention=False,
            n_best=1,
            max_iter=self.max_out,
            length_penalty=0.4,
            coverage_penalty=0.
        )

        batch_beam = bu.gen_batch_beam(
            batch_size=src_list[primary_id].shape[0],
            device=src_list[primary_id].device,
            src_length=lengths[primary_id])

        final_states, memory_banks, memory_lengths = self.encoder(src_list, lengths)

        memory_banks = [bu.repeat(each, dim=0) for each in memory_banks]
        memory_lengths = [bu.repeat(each, dim=0) for each in memory_lengths]

        for idx, enc_state in enumerate(final_states):
            def repeat(t):
                return bu.repeat(t, 1)

            final_states[idx] = tuple_map(repeat, enc_state)

        self.decoder.init_state(final_states)

        for step in range(bu.max_iter):
            beam_tgt = batch_beam.current_predictions.unsqueeze(1)
            dec_outs, attn = self.decoder(beam_tgt, memory_banks, memory_lengths)
            dec_outs = dec_outs.squeeze(1)
            dist = self.softmax(self.out_layer(dec_outs))
            dist = torch.clamp_min(dist, 1e-12)
            batch_beam.advance(dist.log(), None)

            any_beam_finished = batch_beam.is_finished.any()
            if any_beam_finished:
                batch_beam.update_finished()
                if batch_beam.done:
                    break

            if any_beam_finished:
                memory_banks = [each.index_select(0, batch_beam.current_origin) for each in memory_banks]
                memory_lengths = [each.index_select(0, batch_beam.current_origin) for each in memory_lengths]

            self.decoder.state.batch_select(batch_beam.current_origin)

        f = lambda x: [each[0] for each in x]
        if bu.n_best == 1:
            return f(batch_beam.predictions), f(batch_beam.scores), None
        else:
            return batch_beam.predictions, batch_beam.scores, None
