import torch
import torch.nn as nn
import pytorch_lightning as pl
from utils.beam import BeamUtil
from modules.embeddings.position_embedding import PositionalEncoding
from torch.nn import Transformer
from torch import Tensor
import torch.nn.functional as F
from utils.metrics import batch_bleu
from utils.jsonl import JsonLWriter
from argparse import ArgumentParser
import math


class TokenEmbedding(nn.Module):
    def __init__(self, vocab_size: int, emb_size, pad):
        super(TokenEmbedding, self).__init__()
        self.embedding = nn.Embedding(vocab_size, emb_size, padding_idx=pad)
        self.emb_size = emb_size

    def forward(self, tokens: Tensor):
        return self.embedding(tokens.long()) * math.sqrt(self.emb_size)


class mTransformer(pl.LightningModule):
    def __init__(self,
                 code_vocab,
                 nl_vocab,
                 # hidden_size: int,
                 embedding_size: int,
                 num_encoder_layers: int,
                 num_decoder_layers: int,
                 nhead: int,
                 dim_feedforward: int,
                 dropout: float,
                 lr: float,
                 translate_path=None):
        super(mTransformer, self).__init__()

        # 词表
        self.code_vocab = code_vocab
        self.nl_vocab = nl_vocab

        # pad 做mask用
        self.code_pad = self.code_vocab["<pad>"]
        self.nl_pad = self.nl_vocab["<pad>"]

        # 超参
        self.embedding_size = embedding_size
        self.dim_feedforward = dim_feedforward

        # 网络
        self.code_embedding = TokenEmbedding(len(code_vocab), embedding_size, pad=self.code_vocab['<pad>'])
        self.nl_embedding = TokenEmbedding(len(nl_vocab), embedding_size, pad=self.nl_vocab["<pad>"])
        self.positional_encoding = PositionalEncoding(dropout=dropout, dim=embedding_size)

        self.transformer = Transformer(d_model=embedding_size,
                                       nhead=nhead,
                                       num_encoder_layers=num_encoder_layers,
                                       num_decoder_layers=num_decoder_layers,
                                       dim_feedforward=dim_feedforward,
                                       dropout=dropout,
                                       batch_first=True)
        self.out_layer = nn.Linear(self.embedding_size, len(nl_vocab))
        self.softmax = nn.Softmax(dim=-1)
        self.lr = lr

        self.translate_path = translate_path
        if translate_path is not None:
            self.saver = JsonLWriter(self.translate_path)
        else:
            self.saver = None

        for p in self.transformer.parameters():
            if p.dim() > 1:
                nn.init.xavier_uniform_(p)

        self.save_hyperparameters(ignore=["code_vocab", "nl_vocab", "translate_path"])

    @staticmethod
    def add_model_args(parser: ArgumentParser):
        parser.add_argument("--embedding", help="embedding size", type=int, default=512)
        parser.add_argument("--dffn", help="dim_feedforward", type=int, default=2048)
        parser.add_argument("--layers", help="layers", type=int, default=6)
        parser.add_argument("--nhead", help="num of head", type=int, default=8)
        parser.add_argument("--dropout", help="dropout", type=float, default=0.1)

    def configure_optimizers(self):
        return torch.optim.Adam(self.parameters(), lr=self.lr, betas=(0.9, 0.98))

    def forward(self,
                src: Tensor,
                tgt: Tensor,
                src_mask: Tensor,
                tgt_mask: Tensor,
                src_padding_mask: Tensor,
                tgt_padding_mask: Tensor,
                memory_key_padding_mask: Tensor):

        src_emb = self.code_embedding(src)
        src_emb = self.positional_encoding(src_emb)

        tgt_emb = self.nl_embedding(tgt)
        tgt_emb = self.positional_encoding(tgt_emb)

        outs = self.transformer(
            src_emb,
            tgt_emb,
            src_mask,
            tgt_mask,
            None,
            src_padding_mask,
            tgt_padding_mask,
            memory_key_padding_mask)

        outs = self.out_layer(outs)
        outs = self.softmax(outs)
        return outs

    def training_step(self, batch, batch_idx):
        src, lengths, tgt = batch['code'], batch['code_len'], batch['nl']
        tgt_input = tgt[:, :-1]  # drop eos
        src_mask, tgt_mask, src_padding_mask, tgt_padding_mask = self.create_mask(src, tgt_input)
        dist = self(src=src,
                    tgt=tgt_input,
                    src_mask=src_mask,
                    tgt_mask=tgt_mask,
                    src_padding_mask=src_padding_mask,
                    tgt_padding_mask=tgt_padding_mask,
                    memory_key_padding_mask=src_padding_mask)
        # dist = torch.clamp_min(dist, 1e-12)
        dist = dist.view(-1, len(self.nl_vocab))
        tgt_out = tgt[:, 1:]
        loss = F.nll_loss(dist.log(), tgt_out.reshape(-1), ignore_index=self.nl_vocab["<pad>"])
        self.log("train_loss", loss, on_epoch=True, prog_bar=True)
        return loss

    def validation_step(self, batch, batch_idx):
        src, lengths, raw_tgt = batch['code'], batch['code_len'], batch['raw_nl']
        batch_hyp, _, _ = self.beam_decode(src, lengths)
        batch_hyp = [s[:-1] if s[-1] == self.nl_vocab['<eos>'] else s for s in batch_hyp]
        batch_hyp = [[self.nl_vocab.itos[token] for token in item] for item in batch_hyp]
        batch_ref = raw_tgt
        bleu = batch_bleu(batch_hyp, batch_ref)
        self.log("bleu", bleu, on_step=True, on_epoch=True, prog_bar=True)
        return bleu

    def test_step(self, batch, batch_idx):
        src, lengths, raw_tgt = batch['code'], batch['code_len'], batch['raw_nl']
        batch_hyp, _, _ = self.beam_decode(src, lengths)
        batch_hyp = [s[:-1] if s[-1] == self.nl_vocab['<eos>'] else s for s in batch_hyp]
        batch_hyp = [[self.nl_vocab.itos[token] for token in item] for item in batch_hyp]
        batch_ref = raw_tgt
        if self.saver:
            for hyp, ref in zip(batch_hyp, batch_ref):
                self.saver.save_one({"hyp": hyp, "ref": ref})
        bleu = batch_bleu(batch_hyp, batch_ref)
        self.log("bleu", bleu, on_step=True, on_epoch=True, prog_bar=True)
        return bleu

    def generate_square_subsequent_mask(self, sz):
        """创建一个上半部分为-inf的mask矩阵
        generate_square_subsequent_mask(3)
        tensor([[0., -inf, -inf],
        [0., 0., -inf],
        [0., 0., 0.]])
        """
        mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)
        # mask = (torch.triu(torch.ones(sz, sz)) == 1)
        mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))
        return mask.to(self.device)

    def create_mask(self, src, tgt):
        src_seq_len = src.shape[1]
        tgt_seq_len = tgt.shape[1]
        tgt_mask = self.generate_square_subsequent_mask(tgt_seq_len)
        src_mask = torch.zeros(src_seq_len, src_seq_len, device=self.device).to(torch.bool)
        src_padding_mask = (src == self.code_vocab['<pad>'])
        tgt_padding_mask = (tgt == self.nl_vocab['<pad>'])
        return src_mask, tgt_mask, src_padding_mask, tgt_padding_mask

    def encode(self, src: Tensor, src_mask: Tensor, src_padding_mask: Tensor):
        src_emb = self.code_embedding(src)
        src_emb = self.positional_encoding(src_emb)
        return self.transformer.encoder(src_emb, src_mask, src_padding_mask)

    def decode(self, tgt: Tensor, memory: Tensor, tgt_mask: Tensor):
        tgt_emb = self.nl_embedding(tgt)
        tgt_emb = self.positional_encoding(tgt_emb)
        return self.transformer.decoder(tgt_emb, memory, tgt_mask)

    def beam_decode(self, src, lengths):
        src_mask = torch.zeros(src.shape[1], src.shape[1], device=self.device).to(torch.bool)
        src_padding_mask = (src == self.code_vocab['<pad>'])

        bu = BeamUtil(
            bos_id=self.nl_vocab['<bos>'],
            pad_id=self.nl_vocab['<pad>'],
            eos_id=self.nl_vocab['<eos>'],
            vocab_size=len(self.nl_vocab),
            beam_width=4,
            with_attention=False,
            n_best=1,
            max_iter=30,
            length_penalty=0.4,
            coverage_penalty=0.
        )

        batch_beam = bu.gen_batch_beam(
            batch_size=src.shape[0],
            device=src.device,
            src_length=lengths)

        memory = self.encode(src, src_mask, src_padding_mask)
        memory = bu.repeat(memory, dim=0)

        for step in range(bu.max_iter):
            beam_tgt = batch_beam.alive_seq[:, :]
            tgt_mask = self.generate_square_subsequent_mask(beam_tgt.shape[1]).to(torch.bool)
            outs = self.decode(beam_tgt, memory, tgt_mask)
            outs = self.out_layer(outs)
            outs = self.softmax(outs)

            if outs.shape[1] == 1:
                dist = outs.squeeze(1)
                batch_beam.advance(dist.log(), None)
            else:
                batch_beam.advance(outs[:, -1, :].log(), None)

            any_beam_finished = batch_beam.is_finished.any()
            if any_beam_finished:
                batch_beam.update_finished()
                if batch_beam.done:
                    break

            if any_beam_finished:
                memory = memory.index_select(0, batch_beam.current_origin)

        f = lambda x: [each[0] for each in x]
        if bu.n_best == 1:
            return f(batch_beam.predictions), f(batch_beam.scores), None
        else:
            return batch_beam.predictions, batch_beam.scores, None
