import torch
import torch.nn as nn
import pytorch_lightning as pl
from modules.encoders.none_encoder import NoneEncoder
from modules.decoders.rnn_decoder import StdRNNDecoder
import torch.nn.functional as F
from utils.beam import BeamUtil
from utils.jsonl import JsonLWriter
from utils.metrics import batch_bleu


class CodeNN(pl.LightningModule):
    """
    https://github.com/sriniiyer/codenn/blob/master/summarizing_source_code.pdf
    batch_size : 100
    embedding_size, hidden_size : 400
    uniform : [-0.35,0.35]
    Start learning rate : 0.5
    Start decaying it by a factor of 0.8 if accuracy on the validation set goes up.
    Early stop after 60 epochs if accuracy on the validation set goes down.
    Early stop training when the learning rate goes below 0.001.
    Cap the parameter gradients to 5 and use a dropout rate of 0.5.
    """

    def __init__(self,
                 encoder_vocab,
                 decoder_vocab,
                 pad_token="<pad>",
                 bos_token="<bos>",
                 eos_token="<eos>",
                 e_embedding_size=400,
                 num_layers=1,
                 d_hidden=400,
                 d_embedding_size=400,
                 dropout=0.1,
                 lr=0.001,
                 translate_path=None
                 ):
        super().__init__()
        assert e_embedding_size == d_hidden
        assert encoder_vocab[pad_token] == decoder_vocab[pad_token]

        self.encoder_vocab = encoder_vocab
        self.decoder_vocab = decoder_vocab

        self.pad_token = pad_token
        self.bos_token = bos_token
        self.eos_token = eos_token

        self.e_embedding_size = e_embedding_size
        self.d_embedding_size = d_embedding_size

        self.num_layers = num_layers
        self.enc_embedding = nn.Embedding(len(encoder_vocab), e_embedding_size, padding_idx=encoder_vocab[pad_token])
        self.dec_embedding = nn.Embedding(len(decoder_vocab), d_embedding_size, padding_idx=decoder_vocab[pad_token])

        self.encoder = NoneEncoder(embedding=self.enc_embedding)
        self.decoder = StdRNNDecoder(
            rnn_type="LSTM",
            bidirectional_encoder=False,
            num_layers=self.num_layers,
            hidden_size=d_hidden,
            attn_type="dot",
            dropout=dropout,
            embedding=self.dec_embedding)
        self.out_layer = nn.Linear(d_hidden, len(decoder_vocab))
        self.softmax = nn.Softmax(dim=-1)

        self.lr = lr

        self.save_hyperparameters(ignore=["encoder_vocab", "decoder_vocab", "translate_path"])

        self.translate_path = translate_path
        if translate_path is not None:
            self.saver = JsonLWriter(self.translate_path)
        else:
            self.saver = None

        # for p in self.parameters():
        #     if p.dim() > 1:
        #         nn.init.uniform_(p, -0.35, 0.35)

    def forward(self, src, lengths, tgt):
        _, memory_bank, memory_lengths = self.encoder(src, lengths)
        batch_size = memory_lengths.shape[0]
        final_state = (
            torch.zeros(self.num_layers, batch_size, self.e_embedding_size, device=self.device),
            torch.zeros(self.num_layers, batch_size, self.e_embedding_size, device=self.device)
        )
        self.decoder.init_state(final_state)
        dec_outs, attn = self.decoder(tgt, memory_bank, memory_lengths)
        outs = self.out_layer(dec_outs)
        outs = self.softmax(outs)
        return outs, attn

    def training_step(self, batch, batch_idx: int):
        src, lengths, tgt = batch['code'], batch['code_len'], batch['nl']
        dist, attn = self(src, lengths, tgt[:, :-1])
        dist = torch.clamp_min(dist, 1e-12)
        dist = dist.view(-1, len(self.decoder_vocab))
        loss = F.nll_loss(dist.log(), tgt[:, 1:].reshape(-1), ignore_index=self.decoder_vocab["<pad>"])
        self.log("train_loss", loss, on_epoch=True, prog_bar=True)
        return loss

    def validation_step(self, batch, batch_idx: int):
        src, lengths, raw_tgt = batch['code'], batch['code_len'], batch['raw_nl']
        batch_hyp, _, _ = self.beam_decode(src, lengths)
        batch_hyp = [s[:-1] if s[-1] == self.decoder_vocab['<eos>'] else s for s in batch_hyp]
        batch_hyp = [[self.decoder_vocab.itos[token] for token in item] for item in batch_hyp]
        batch_ref = raw_tgt
        bleu = batch_bleu(batch_hyp, batch_ref)
        self.log("bleu", bleu, on_step=True, on_epoch=True, prog_bar=True)
        return bleu

    def test_step(self, batch, batch_idx: int):
        src, lengths, raw_tgt = batch['code'], batch['code_len'], batch['raw_nl']
        batch_hyp, _, _ = self.beam_decode(src, lengths)
        batch_hyp = [s[:-1] if s[-1] == self.decoder_vocab['<eos>'] else s for s in batch_hyp]
        batch_hyp = [[self.decoder_vocab.itos[token] for token in item] for item in batch_hyp]
        batch_ref = raw_tgt
        if self.saver:
            for hyp, ref in zip(batch_hyp, batch_ref):
                self.saver.save_one({"hyp": hyp, "ref": ref})
        bleu = batch_bleu(batch_hyp, batch_ref)
        self.log("bleu", bleu, on_step=True, on_epoch=True, prog_bar=True)
        return bleu

    def configure_optimizers(self):
        return torch.optim.Adam(self.parameters(), lr=self.lr)

    def beam_decode(self, src, lengths):
        bu = BeamUtil(
            bos_id=self.decoder_vocab['<bos>'],
            pad_id=self.decoder_vocab['<pad>'],
            eos_id=self.decoder_vocab['<eos>'],
            vocab_size=len(self.decoder_vocab),
            beam_width=4,
            with_attention=True,
            n_best=1,
            max_iter=30,
            length_penalty=0.4,
            coverage_penalty=0.
        )
        batch_beam = bu.gen_batch_beam(
            batch_size=src.shape[0],
            device=self.device,
            src_length=lengths)

        final_state, memory_bank, memory_lengths = self.encoder(src, lengths)
        batch_size = memory_lengths.shape[0]
        final_state = (
            torch.zeros(self.num_layers, batch_size, self.e_embedding_size).type_as(memory_bank),
            torch.zeros(self.num_layers, batch_size, self.e_embedding_size).type_as(memory_bank)
        )
        memory_bank = bu.repeat(memory_bank, dim=0)
        memory_lengths = bu.repeat(memory_lengths, dim=0)
        final_state = tuple([bu.repeat(each, dim=1) for each in final_state])

        self.decoder.init_state(final_state)

        for step in range(bu.max_iter):
            beam_tgt = batch_beam.current_predictions.unsqueeze(1)
            dec_outs, attn = self.decoder(beam_tgt, memory_bank, memory_lengths)
            dec_outs = dec_outs.squeeze(1)
            attn = attn.squeeze(1)
            dist = self.softmax(self.out_layer(dec_outs))
            dist = torch.clamp_min(dist, 1e-12)
            batch_beam.advance(dist.log(), attn.unsqueeze(0))

            any_beam_finished = batch_beam.is_finished.any()
            if any_beam_finished:
                batch_beam.update_finished()
                if batch_beam.done:
                    break

            if any_beam_finished and bu.with_attention:
                memory_bank = memory_bank.index_select(0, batch_beam.current_origin)
                memory_lengths = memory_lengths.index_select(0, batch_beam.current_origin)

            self.decoder.state.hidden = (self.decoder.state.hidden[0].index_select(1, batch_beam.current_origin),
                                         self.decoder.state.hidden[1].index_select(1, batch_beam.current_origin))

        f = lambda x: [each[0] for each in x]
        if bu.n_best == 1:
            return f(batch_beam.predictions), f(batch_beam.scores), f(batch_beam.attention)
        else:
            return batch_beam.predictions, batch_beam.scores, batch_beam.attention