from functools import partial
from typing import Optional, Tuple

import math
import torch.nn.functional as F
import torch.nn as nn
import torch


class BaseQuantizer(nn.Module):
    def __init__(self, embed_dim, num_tokens, straight_through, use_norm, use_residual=False, num_quantizer=1):
        super().__init__()
        self.norm = lambda x: F.normalize(x, dim=-1) if use_norm else x
        self.straight_through = straight_through
        self.num_quantizer = num_quantizer

        self.use_residual = use_residual
        self.num_tokens = num_tokens
        self.embed_dim = embed_dim

        self.embedding = nn.Embedding(self.num_tokens, self.embed_dim)
        self.embedding.weight.data.normal_()

    def forward(self, z):
        if not self.use_residual:
            z_q, encoding_indices, loss = self.quantize(z)
        else:
            z_q = torch.zeros_like(z)
            residual = z.detach().clone()

            losses = []
            encoding_indices = []

            for _ in range(self.num_quantizers):
                z_qi, loss, indices = self.quantize(residual.clone())
                residual.sub_(z_qi)
                z_q.add_(z_qi)

                encoding_indices.append(indices)
                losses.append(loss)

            losses, encoding_indices = map(partial(torch.stack, dim=-1), (losses, encoding_indices))
            loss = losses.mean()

        # preserve gradients with straight-through estimator
        if self.straight_through:
            z_q = z + (z_q - z).detach()

        return z_q, encoding_indices, loss


class VectorQuantizer(BaseQuantizer):
    def __init__(self, embed_dim: int, num_tokens: int, beta: float = 0.25, use_norm=True,
                 use_residual=False, straight_through=True, num_quantizer=1):
        super().__init__(embed_dim, num_tokens, straight_through, use_norm, use_residual, num_quantizer)

        self.beta = beta

    def quantize(self, z):
        z_reshaped_norm = self.norm(z.view(-1, self.embed_dim))
        embedding_norm = self.norm(self.embedding.weight)

        d = torch.sum(z_reshaped_norm ** 2, dim=1, keepdim=True) + \
            torch.sum(embedding_norm ** 2, dim=1) - 2 * \
            torch.einsum('b d, n d -> b n', z_reshaped_norm, embedding_norm)

        encoding_indices = torch.argmin(d, dim=1).unsqueeze(1)
        encoding_indices = encoding_indices.view(*z.shape[:-1])

        z_q = self.embedding(encoding_indices).view(z.shape)

        # ====================== norm =========================
        # z_qnorm, z_norm = self.norm(z_q), self.norm(z)
        #
        # loss = self.beta * torch.mean((z_qnorm.detach() - z_norm) ** 2) + \
        #        torch.mean((z_qnorm - z_norm.detach()) ** 2)
        # return z_qnorm, encoding_indices, loss

        # ======================= tensor ========================
        loss = self.beta * torch.mean((z_q.detach() - z) ** 2) + \
               torch.mean((z_q - z.detach()) ** 2)
        return z_q, encoding_indices, loss


class GumbelQuantizer(BaseQuantizer):
    def __init__(self, embed_dim: int, num_tokens: int, temp_init: float = 1.0,
                 use_norm: bool = True, use_residual: bool = False, straight_through=True,
                 num_quantizer: Optional[int] = 1) -> None:
        super().__init__(embed_dim, num_tokens, straight_through, use_norm, use_residual, num_quantizer)

        self.temperature = temp_init

    def quantize(self, z: torch.FloatTensor, temp: Optional[float] = None) -> Tuple[
        torch.FloatTensor, torch.FloatTensor, torch.LongTensor]:
        # force hard = True when we are in eval mode, as we must quantize
        hard = not self.training
        temp = self.temperature if temp is None else temp

        z_reshaped_norm = self.norm(z.view(-1, self.embed_dim))
        embedding_norm = self.norm(self.embedding.weight)

        logits = - torch.sum(z_reshaped_norm ** 2, dim=1, keepdim=True) - \
                 torch.sum(embedding_norm ** 2, dim=1) + 2 * \
                 torch.einsum('b d, n d -> b n', z_reshaped_norm, embedding_norm)
        logits = logits.view(*z.shape[:-1], -1)

        soft_one_hot = F.gumbel_softmax(logits, tau=temp, dim=-1, hard=hard)
        z_qnorm = torch.matmul(soft_one_hot, embedding_norm)

        # kl divergence to the prior loss
        logits = F.log_softmax(logits, dim=-1)  # use log_softmax because it is more numerically stable
        loss = torch.sum(logits.exp() * (logits + math.log(self.num_tokens)), dim=-1).mean()

        # get encoding via argmax
        encoding_indices = soft_one_hot.argmax(dim=-1)

        return z_qnorm, encoding_indices, loss
