import pytorch_lightning as pl
import torch.nn as nn
import torch
from modules.encoders.rnn_encoder import RNNEncoder
from modules.encoders.parallel_encoder import ParallelRnnEncoder
from modules.decoders.parallel_decoder import ParallelRNNDecoder
from utils.beam import BeamUtil
import torch.nn.functional as F
from utils.metrics import batch_bleu
from argparse import ArgumentParser
from utils.jsonl import JsonLWriter
import gc
from utils.common import tuple_map


class CsGen(pl.LightningModule):

    @staticmethod
    def add_model_args(parser: ArgumentParser):
        parser.add_argument("--layers", type=int, default=2)
        parser.add_argument("--embedding", type=int, default=128)
        parser.add_argument("--hidden", type=int, default=256)
        parser.add_argument("--dropout", type=float, default=0.1)

    def __init__(self,
                 code_vocab,
                 nl_vocab,
                 vocab_map,
                 num_layers,
                 embedding_size,
                 hidden_size,
                 dropout,
                 lr,
                 translate_path=None):
        super(CsGen, self).__init__()

        # 词表
        self.code_vocab = code_vocab
        self.nl_vocab = nl_vocab
        self.vocab_map = vocab_map

        # 超参
        self.embedding_size = embedding_size
        self.hidden_size = hidden_size
        self.num_layers = num_layers  # 1
        self.lr = lr

        # 网络
        code_embedding = nn.Embedding(len(code_vocab), embedding_size, padding_idx=self.code_vocab["<pad>"])
        nl_embedding = nn.Embedding(len(nl_vocab), embedding_size, padding_idx=self.nl_vocab["<pad>"])

        self.code_encoder = RNNEncoder(
            rnn_type="LSTM",
            bidirectional=True,
            num_layers=self.num_layers,
            hidden_size=self.hidden_size,
            embedding=code_embedding,
            dropout=dropout
        )

        self.unit_encoder = RNNEncoder(
            rnn_type="LSTM",
            bidirectional=True,
            num_layers=self.num_layers,
            hidden_size=self.hidden_size,
            embedding=code_embedding,
            dropout=dropout
        )

        self.encoder = ParallelRnnEncoder(
            encoders_list=[self.code_encoder, self.unit_encoder]
        )

        self.decoder = ParallelRNNDecoder(
            rnn_type="LSTM",
            num_encoders=2,
            bidirectional_encoders=True,
            num_layers=self.num_layers,
            hidden_size=2 * self.hidden_size,
            reduce="mlp",
            dropout=dropout,
            embedding=nl_embedding
        )

        self.out_layer = nn.Linear(self.decoder.hidden_size * 2, len(nl_vocab))
        self.p_gen_linear = nn.Sequential(
            nn.Linear(self.decoder.hidden_size * 2, 1),
            nn.Sigmoid()
        )
        self.softmax = nn.Softmax(dim=-1)

        self.save_hyperparameters(ignore=["code_vocab", "nl_vocab", "translate_path", "vocab_map"])

        self.translate_path = translate_path
        if translate_path is not None:
            self.saver = JsonLWriter(self.translate_path)
        else:
            self.saver = None

    def forward(self, src_list, lengths, tgt):
        final_states, memory_banks, memory_lengths = self.encoder(src_list, lengths)
        self.decoder.init_state(final_states)
        dec_outs, attn = self.decoder(tgt, memory_banks, memory_lengths)

        # 指针网络
        p_gen = self.p_gen_linear(dec_outs)  # [batch, tgt_len, 1]

        # 复制概率
        copy_dist = attn[0]  # [batch, tgt_len, src_len]
        copy_token = src_list[0]  # [batch, src_len]
        copy_token = copy_token.unsqueeze(1)  # [batch, 1, src_len]
        copy_token = copy_token.expand_as(copy_dist)  # [batch_size, tgt_len, src_len]
        dist = torch.zeros(tgt.shape[0], tgt.shape[1], len(self.code_vocab), device=self.device)
        dist = dist.scatter_add_(2, copy_token, copy_dist)  # [batch_size, tgt_len, code_vocab]
        dist = torch.matmul(dist, self.vocab_map)
        # vm = self.vocab_map
        # dist = [torch.sparse.mm(vm.to(torch.float), each.transpose(0, 1)).transpose(0, 1) for each in dist]
        # dist = torch.stack(dist, dim=0).contiguous()
        outs = self.out_layer(dec_outs)  # [batch_size, tgt_len, nl_vocab]
        outs = self.softmax(outs)  # [batch_size, tgt_len, nl_vocab]
        dist = p_gen * outs + (1 - p_gen) * dist
        dist = dist.squeeze(1)
        return dist, attn

    def configure_optimizers(self):
        return torch.optim.Adam(params=self.parameters(), lr=self.lr)

    def training_step(self, batch, batch_idx: int):
        code, code_len, units, units_len, tgt = batch['code'], batch['code_len'], batch['units'], batch['units_len'], \
                                                batch['nl']
        dist, attn = self([code, units], [code_len, units_len], tgt[:, :-1])
        dist = torch.clamp_min(dist, 1e-12)
        dist = dist.view(-1, len(self.nl_vocab))
        loss = F.nll_loss(dist.log(), tgt[:, 1:].reshape(-1), ignore_index=self.nl_vocab["<pad>"])
        self.log("train_loss", loss, on_epoch=True, prog_bar=True)
        return loss

    def validation_step(self, batch, batch_idx: int):
        code, code_len, units, units_len, tgt, raw_tgt = batch['code'], batch['code_len'], batch['units'], \
                                                         batch['units_len'], batch['nl'], batch['raw_nl']
        batch_hyp, _, _ = self.beam_decode([code, units], [code_len, units_len])
        batch_hyp = [s[:-1] if s[-1] == self.nl_vocab['<eos>'] else s for s in batch_hyp]
        batch_hyp = [[self.nl_vocab.itos[token] for token in item] for item in batch_hyp]
        batch_ref = raw_tgt
        bleu = batch_bleu(batch_hyp, batch_ref)
        self.log("bleu", bleu, on_step=True, on_epoch=True, prog_bar=True)
        return bleu

    def test_step(self, batch, batch_idx: int):
        code, code_len, units, units_len, tgt, raw_tgt = batch['code'], batch['code_len'], batch['units'], \
                                                         batch['units_len'], batch['nl'], batch['raw_nl']
        batch_hyp, _, _ = self.beam_decode([code, units], [code_len, units_len])
        batch_hyp = [s[:-1] if s[-1] == self.nl_vocab['<eos>'] else s for s in batch_hyp]
        batch_hyp = [[self.nl_vocab.itos[token] for token in item] for item in batch_hyp]
        batch_ref = raw_tgt
        if self.saver:
            for hyp, ref in zip(batch_hyp, batch_ref):
                self.saver.save_one({"hyp": hyp, "ref": ref})
        bleu = batch_bleu(batch_hyp, batch_ref)
        self.log("bleu", bleu, on_step=True, on_epoch=True, prog_bar=True)
        return bleu

    def beam_decode(self, src_list, lengths):
        primary_id = 0
        bu = BeamUtil(
            bos_id=self.nl_vocab["<bos>"],
            pad_id=self.nl_vocab["<pad>"],
            eos_id=self.nl_vocab["<eos>"],
            vocab_size=len(self.nl_vocab),
            beam_width=4,
            with_attention=False,
            n_best=1,
            max_iter=30,
            length_penalty=0.4,
            coverage_penalty=0.
        )

        batch_beam = bu.gen_batch_beam(
            batch_size=src_list[primary_id].shape[0],
            device=src_list[primary_id].device,
            src_length=lengths[primary_id])

        final_states, memory_banks, memory_lengths = self.encoder(src_list, lengths)

        memory_banks = [bu.repeat(each, dim=0) for each in memory_banks]
        memory_lengths = [bu.repeat(each, dim=0) for each in memory_lengths]

        for idx, enc_state in enumerate(final_states):
            def repeat(t):
                return bu.repeat(t, 1)

            final_states[idx] = tuple_map(repeat, enc_state)

        self.decoder.init_state(final_states)

        copy_token = src_list[primary_id]  # [batch, src_len]
        copy_token = copy_token.unsqueeze(1)  # [batch, 1, src_len]
        copy_token = bu.repeat(copy_token, dim=0)

        for step in range(bu.max_iter):
            beam_tgt = batch_beam.current_predictions.unsqueeze(1)
            dec_outs, attn = self.decoder(beam_tgt, memory_banks, memory_lengths)

            # 指针网络
            p_gen = self.p_gen_linear(dec_outs)  # [batch, 1, 1]

            # 复制概率
            copy_dist = attn[primary_id]  # [batch, 1, src_len]
            # copy_token = copy_token.expand_as(copy_dist)  # [batch_size, 1, src_len]
            dist = torch.zeros(beam_tgt.shape[0], beam_tgt.shape[1], len(self.code_vocab), device=self.device)
            dist = dist.scatter_add_(2, copy_token, copy_dist)  # [batch_size, 1, code_vocab]
            # dist = [each.squeeze(0) for each in dist.split(1, 0)]
            dist = torch.matmul(dist, self.vocab_map)  # [batch_size, tgt_len, nl_vocab]
            # vm = self.vocab_map
            # dist = [torch.sparse.mm(vm.to(torch.float), each.transpose(0, 1)).transpose(0, 1) for each in dist]
            # dist = torch.stack(dist, dim=0).contiguous()
            outs = self.out_layer(dec_outs)  # [batch_size, tgt_len, nl_vocab]
            outs = self.softmax(outs)  # [batch_size, tgt_len, nl_vocab]
            dist = p_gen * outs + (1 - p_gen) * dist
            dist = dist.squeeze(1)
            dist = F.softmax(dist, dim=-1)
            dist = dist.contiguous()
            dist = torch.clamp_min(dist, 1e-12)
            batch_beam.advance(dist.log(), None)

            any_beam_finished = batch_beam.is_finished.any()
            if any_beam_finished:
                batch_beam.update_finished()
                if batch_beam.done:
                    break

            if any_beam_finished:
                memory_banks = [each.index_select(0, batch_beam.current_origin) for each in memory_banks]
                memory_lengths = [each.index_select(0, batch_beam.current_origin) for each in memory_lengths]

            self.decoder.state.batch_select(batch_beam.current_origin)
            copy_token = copy_token.index_select(0, batch_beam.current_origin)
            # self.decoder.state.hidden = (self.decoder.state.hidden[0].index_select(1, batch_beam.current_origin),)
            # self.decoder.input_feed = self.decoder.input_feed.

        f = lambda x: [each[0] for each in x]
        if bu.n_best == 1:
            return f(batch_beam.predictions), f(batch_beam.scores), None
        else:
            return batch_beam.predictions, batch_beam.scores, None

# if __name__ == '__main__':
#     t_embedding = torch.nn.Embedding(12, 7, 0)
#     t_src_list = [
#         torch.tensor([
#             [1, 2, 3, 4, 5],
#             [2, 3, 4, 5, 0],
#             [3, 4, 5, 0, 0],
#             [4, 5, 0, 0, 0]
#         ]), torch.tensor([
#             [1, 2, 3, 4, 5],
#             [2, 3, 4, 5, 0],
#             [3, 4, 5, 0, 0],
#             [4, 5, 0, 0, 0]
#         ])
#     ]
#     t_src_len = [torch.tensor([5, 4, 3, 2]), torch.tensor([5, 4, 3, 2])]
#     ttgt = torch.tensor([
#         [1, 2, 3, 4, 5],
#         [2, 3, 4, 5, 0],
#         [3, 4, 5, 0, 0],
#         [4, 5, 0, 0, 0]
#     ])
#     vb = {"pad": 0, "bos": 1, "eos": 2}
#     for i, each in enumerate(list("abcderghijklmnopqrstuvwxuz")):
#         vb[each] = i
#     model = CsGen(
#         code_vocab=vb,
#         nl_vocab=vb,
#         vocab_map=torch.ones(len(vb), len(vb)),
#         pad_token="pad",
#         bos_token="bos",
#         eos_token="eos",
#         num_layers=2,
#         embedding_size=16,
#         hidden_size=16,
#         dropout=0.2
#     )
#     with torch.no_grad():
#         s = model.beam_decode(t_src_list, t_src_len, )
#         print(s)
