import math
import torch
from torch import nn


class LearnablePosEmbd(nn.Module):
    def __init__(self, d, N, add_noise=True):
        super().__init__()
        self.d = d
        self.N = N
        self.add_noise = add_noise

        self.embd_weights = nn.Embedding(N, d)

    def forward(self, x, dim):
        assert x.shape[-1] == self.d
        assert x.shape[dim] > 1
        
        with torch.no_grad():
            l = x.shape[dim]
            p = torch.arange(l, dtype=torch.float, device=x.device) / l
            if self.training and self.add_noise:
                interval = 1. / (l-1) / 2.1
                n = torch.clamp(torch.randn_like(p) * interval, min=-interval, max=interval)
                p = torch.clamp(p + n, min=0., max=1.)
            p = torch.round(p * (self.N-1)).long()
        
        pe = self.embd_weights(p)
        shape = [1 if d != dim else l for d in range(x.ndim - 1)]
        shape.append(self.d)
        pe = pe.reshape(shape)

        return pe


class LearnableTimeEmbd(nn.Module):
    def __init__(self, d):
        super().__init__()
        self.d = d

        self.pos_a = nn.Parameter(torch.randn(1, d//4))
        self.pos_b = nn.Parameter(torch.randn(1, d//4))
        self.mlp = nn.Sequential(
            nn.GELU(), nn.Linear(d//4, d//2),
            nn.GELU(), nn.Linear(d//2, d//1)
        )

    def forward(self, x, dim):
        assert x.shape[-1] == self.d
        assert x.shape[dim] > 1
        
        with torch.no_grad():
            l = x.shape[dim]
            p = torch.arange(l, dtype=torch.float, device=x.device) / (l - 1)
            if self.training:
                interval = 1. / (l-1) / 2.1
                n = torch.clamp(torch.randn_like(p) * interval, min=-interval, max=interval)
                p = torch.clamp(p + n, min=0., max=1.)
            p = p.unsqueeze(-1)
        
        pe = p * self.pos_a.float() + (1.-p) * self.pos_b.float()
        pe = self.mlp(pe.to(x.dtype))
        shape = [1 if d != dim else l for d in range(x.ndim - 1)]
        shape.append(self.d)
        pe = pe.reshape(shape)

        return pe


class AbsolutePosEmbd:
    def __init__(self, d):
        super().__init__()
        self.d = d

    @staticmethod
    def sinusoidal(d_model, positions):
        """
        Ref: https://github.com/wzlxjtu/PositionalEncoding2D/blob/master/positionalembedding2d.py
        :param d_model: dimension of the model
        :param position: positions of each embedding
        :return: length x d_model position matrix
        """
        if d_model % 2 != 0:
            raise ValueError("Cannot use sin/cos positional encoding with "
                            "odd dim (got dim={:d})".format(d_model))
        device = positions.device
        
        pe = torch.zeros(len(positions), d_model, dtype=torch.float, device=device)
        div_term = torch.exp((torch.arange(0, d_model, 2, dtype=torch.float, device=device) *
                            -(math.log(10000.0) / d_model)))
        pe[:, 0::2] = torch.sin(positions.unsqueeze(1) * div_term)
        pe[:, 1::2] = torch.cos(positions.unsqueeze(1) * div_term)

        return pe
    
    def __call__(self, x: torch.Tensor, t: torch.Tensor, dim: int):
        assert x.shape[-1] == self.d
        assert x.shape[dim] > 1
        assert len(t) == x.shape[dim]
        
        t = torch.tensor(t, dtype=torch.float, device=x.device)
        p = self.sinusoidal(self.d, t).to(dtype=x.dtype)

        shape = [1 if d != dim else x.shape[dim] for d in range(x.ndim - 1)]
        shape.append(self.d)
        p = p.reshape(shape)

        return x + p