import random

import torch
from torch.nn import functional as F

__all__ = ["Seq2Seq", "PointerGeneratorSeq2seq"]


class Seq2Seq(torch.nn.Module):
    def __init__(self, encoder, decoder):
        super().__init__()
        self.encoder = encoder
        self.decoder = decoder

    @staticmethod
    def create_mask(src):
        mask = (src != 0).permute(0, 1)  # Assuming that the padding token index is 0
        return mask  # [batch size, src len]

    def forward(self, src, trg, teacher_forcing_ratio=1):
        batch_size = trg.shape[0]
        trg_len = trg.shape[1]
        trg_vocab_size = self.decoder.embedding.num_embeddings

        outputs = torch.zeros(batch_size, trg_len, trg_vocab_size).to(src.device)

        hidden, cell, encoder_outputs = self.encoder(src)
        encoder_mask = self.create_mask(src)

        input = trg[:, 0]

        for t in range(1, trg_len):
            output, hidden, cell = self.decoder(
                input, hidden, cell, encoder_outputs, encoder_mask
            )
            outputs[:, t] = output
            teacher_force = random.random() < teacher_forcing_ratio
            top1 = output.argmax(1)
            input = trg[:, t] if teacher_force else top1

        return outputs

    def generate(self, src, max_len=150, eos=2):
        hidden, cell, encoder_outputs = self.encoder(src)
        encoder_mask = self.create_mask(src)

        batch_size = src.shape[0]
        trg = torch.zeros(batch_size, max_len).to(src.device)

        input = src[:, 0]

        for t in range(1, max_len):
            output, hidden, cell = self.decoder(
                input, hidden, cell, encoder_outputs, encoder_mask
            )
            top1 = output.argmax(1)
            trg[:, t] = top1
            input = top1

        # set tokens behind eos as 0
        for i in range(trg.shape[0]):
            index_eos = torch.where(trg[i] == eos)[0]
            if index_eos.shape[0] > 0:
                trg[i, index_eos[0] + 1 :] = 0

        return trg


class PointerGeneratorSeq2seq(Seq2Seq):
    eps = 1e-31

    def forward(self, src, trg, teacher_forcing_ratio=1):
        batch_size = trg.shape[0]
        trg_len = trg.shape[1]
        trg_vocab_size = self.decoder.embedding.num_embeddings

        outputs = torch.zeros(batch_size, trg_len, trg_vocab_size).to(src.device)

        hidden, cell, encoder_outputs = self.encoder(src)
        encoder_mask = self.create_mask(src)

        input = trg[:, 0]

        for t in range(1, trg_len):
            output, hidden, cell, attention_weight, pointer = self.decoder(
                input, hidden, cell, encoder_outputs, encoder_mask
            )
            output_distribution = F.softmax(output, dim=1) * pointer
            attention_distribution = attention_weight * (1 - pointer)
            output_distribution.scatter_add_(1, src, attention_distribution)

            outputs[:, t] = output_distribution
            teacher_force = random.random() < teacher_forcing_ratio
            top1 = output.argmax(1)
            input = trg[:, t] if teacher_force else top1
        return torch.log(outputs + self.eps)

    def generate(self, src, max_len=150, eos=2):
        hidden, cell, encoder_outputs = self.encoder(src)
        encoder_mask = self.create_mask(src)

        batch_size = src.shape[0]
        trg = torch.zeros(batch_size, max_len).to(src.device)

        input = src[:, 0]

        for t in range(1, max_len):
            output, hidden, cell, attention_weight, pointer = self.decoder(
                input, hidden, cell, encoder_outputs, encoder_mask
            )
            output_distribution = F.softmax(output, dim=1) * pointer
            attention_distribution = attention_weight * (1 - pointer)

            output_distribution.scatter_add_(1, src, attention_distribution)
            top1 = output_distribution.argmax(1)
            trg[:, t] = top1
            input = top1

        # set tokens behind eos as 0
        for i in range(trg.shape[0]):
            index_eos = torch.where(trg[i] == eos)[0]
            if index_eos.shape[0] > 0:
                trg[i, index_eos[0] + 1 :] = 0

        return trg
