import torch
import torch.nn as nn
import pytorch_lightning as pl
from pytorch_lightning.utilities.types import EVAL_DATALOADERS, TRAIN_DATALOADERS
from modules.encoders.rnn_encoder import RNNEncoder
from modules.decoders.rnn_decoder import InputFeedRNNDecoder
from utils.beam import BeamUtil
from torch import Tensor
from typing import List
import torch.nn.functional as F
from utils.metrics import batch_bleu
from utils.jsonl import JsonLWriter


# noinspection DuplicatedCode
class BiLstm(pl.LightningModule):
    def __init__(self,
                 encoder_vocab,
                 decoder_vocab,
                 num_layers=2,
                 embedding_size=128,
                 hidden=256,
                 dropout=0.1,
                 lr=0.001,
                 translate_path=None):
        super(BiLstm, self).__init__()

        # 词表
        self.encoder_vocab = encoder_vocab
        self.decoder_vocab = decoder_vocab

        # 超参
        self.e_embedding_size = embedding_size
        self.d_embedding_size = embedding_size
        self.hidden_size = hidden
        self.num_layers = num_layers

        # 网络
        enc_embedding = nn.Embedding(len(encoder_vocab), embedding_size, padding_idx=self.encoder_vocab["<pad>"])
        dec_embedding = nn.Embedding(len(decoder_vocab), embedding_size, padding_idx=self.decoder_vocab["<pad>"])

        self.encoder = RNNEncoder(
            rnn_type="LSTM",
            bidirectional=True,
            num_layers=self.num_layers,
            hidden_size=self.hidden_size,
            embedding=enc_embedding,
            dropout=dropout
        )
        self.decoder = InputFeedRNNDecoder(
            rnn_type="LSTM",
            bidirectional_encoder=True,
            num_layers=self.num_layers,
            hidden_size=2 * self.hidden_size,
            attn_type="dot",
            dropout=dropout,
            embedding=dec_embedding, )
        self.out_layer = nn.Linear(self.decoder.hidden_size, len(decoder_vocab))
        self.softmax = nn.Softmax(dim=-1)

        self.lr = lr
        self.save_hyperparameters(ignore=["encoder_vocab", "decoder_vocab", "translate_path"])
        self.translate_path = translate_path
        if translate_path is not None:
            self.saver = JsonLWriter(self.translate_path)
        else:
            self.saver = None

    def forward(self, src, lengths, tgt):
        final_state, memory_bank, memory_lengths = self.encoder(src, lengths)
        self.decoder.init_state(final_state)
        dec_outs, attn = self.decoder(tgt, memory_bank, memory_lengths)
        outs = self.out_layer(dec_outs)
        outs = self.softmax(outs)
        return outs, attn

    def training_step(self, batch, batch_idx: int):
        src, lengths, tgt = batch['code'], batch['code_len'], batch['nl']
        dist, attn = self(src, lengths, tgt[:, :-1])
        dist = torch.clamp_min(dist, 1e-12)
        dist = dist.view(-1, len(self.decoder_vocab))
        loss = F.nll_loss(dist.log(), tgt[:, 1:].reshape(-1), ignore_index=self.decoder_vocab["<pad>"])
        self.log("train_loss", loss, on_epoch=True, prog_bar=True)
        return loss

    def validation_step(self, batch, batch_idx: int):
        src, lengths, raw_tgt = batch['code'], batch['code_len'], batch['raw_nl']
        batch_hyp, _, _ = self.beam_decode(src, lengths)
        batch_hyp = [s[:-1] if s[-1] == self.decoder_vocab['<eos>'] else s for s in batch_hyp]
        batch_hyp = [[self.decoder_vocab.itos[token] for token in item] for item in batch_hyp]
        batch_ref = raw_tgt
        bleu = batch_bleu(batch_hyp, batch_ref)
        self.log("bleu", bleu, on_epoch=True, prog_bar=True)
        return bleu

    def test_step(self, batch, batch_idx: int):
        src, lengths, raw_tgt = batch['code'], batch['code_len'], batch['raw_nl']
        batch_hyp, _, _ = self.beam_decode(src, lengths)
        batch_hyp = [s[:-1] if s[-1] == self.decoder_vocab['<eos>'] else s for s in batch_hyp]
        batch_hyp = [[self.decoder_vocab.itos[token] for token in item] for item in batch_hyp]
        batch_ref = raw_tgt
        if self.saver:
            for hyp, ref in zip(batch_hyp, batch_ref):
                self.saver.save_one({"hyp": hyp, "ref": ref})
        bleu = batch_bleu(batch_hyp, batch_ref)
        self.log("bleu", bleu, on_step=True, on_epoch=True, prog_bar=True)
        return bleu

    def configure_optimizers(self):
        return torch.optim.Adam(self.parameters(), lr=self.lr)

    def beam_decode(self, src, lengths):
        bu = BeamUtil(
            bos_id=self.decoder_vocab["<bos>"],
            pad_id=self.decoder_vocab["<pad>"],
            eos_id=self.decoder_vocab["<eos>"],
            vocab_size=len(self.decoder_vocab),
            beam_width=4,
            with_attention=False,
            n_best=1,
            max_iter=30,
            length_penalty=0.4,
            coverage_penalty=0.
        )

        batch_beam = bu.gen_batch_beam(
            batch_size=src.shape[0],
            device=src.device,
            src_length=lengths)

        final_state, memory_bank, memory_lengths = self.encoder(src, lengths)

        final_state = tuple([bu.repeat(each, dim=1) for each in final_state])
        memory_bank = bu.repeat(memory_bank, dim=0)
        memory_lengths = bu.repeat(memory_lengths, dim=0)

        self.decoder.init_state(final_state)

        for step in range(bu.max_iter):
            beam_tgt = batch_beam.current_predictions.unsqueeze(1)
            dec_outs, _ = self.decoder(beam_tgt, memory_bank, memory_lengths)
            dec_outs = dec_outs.squeeze(1)
            # attn = attn.squeeze(1)
            dist = self.softmax(self.out_layer(dec_outs))
            dist = torch.clamp_min(dist, 1e-12)
            batch_beam.advance(dist.log(), None)

            any_beam_finished = batch_beam.is_finished.any()
            if any_beam_finished:
                batch_beam.update_finished()
                if batch_beam.done:
                    break

            memory_bank = memory_bank.index_select(0, batch_beam.current_origin)
            memory_lengths = memory_lengths.index_select(0, batch_beam.current_origin)

            self.decoder.state.hidden = (self.decoder.state.hidden[0].index_select(1, batch_beam.current_origin),
                                         self.decoder.state.hidden[1].index_select(1, batch_beam.current_origin))
            self.decoder.state.input_feed = self.decoder.state.input_feed.index_select(0, batch_beam.current_origin)
        f = lambda x: [each[0] for each in x]
        if bu.n_best == 1:
            return f(batch_beam.predictions), f(batch_beam.scores), None
        else:
            return batch_beam.predictions, batch_beam.scores, None
