# We reference the code in https://github.com/nerfstudio-project/nerfstudio/blob/a8e6f8fa3fd6c0ad2f3e681dcf1519e74ad2230f/nerfstudio/field_components/embedding.py
# Thanks to their great work!

import torch
from abc import abstractmethod
from typing import Optional
from jaxtyping import Shaped
from torch import Tensor, nn
import torch.nn.functional as F

class FieldComponent(nn.Module):
    """Field modules that can be combined to store and compute the fields.

    Args:
        in_dim: Input dimension to module.
        out_dim: Output dimension to module.
    """

    def __init__(self, in_dim: Optional[int] = None, out_dim: Optional[int] = None):
        super().__init__()
        self.in_dim = in_dim
        self.out_dim = out_dim

    def build_nn_modules(self):
        """Function instantiates any torch.nn members within the module.
        If none exist, do nothing."""

    def set_in_dim(self, in_dim: int):
        """Sets input dimension of encoding

        Args:
            in_dim: input dimension
        """
        if in_dim <= 0:
            raise ValueError("Input dimension should be greater than zero")
        self.in_dim = in_dim

    def get_out_dim(self):
        """Calculates output dimension of encoding."""
        if self.out_dim is None:
            raise ValueError("Output dimension has not been set")
        return self.out_dim

    @abstractmethod
    def forward(self, in_tensor: Shaped[Tensor, "*bs input_dim"]):
        """
        Returns processed tensor

        Args:
            in_tensor: Input tensor to process
        """
        raise NotImplementedError
  
class Embedding(FieldComponent):
    """Index into embeddings.
    # TODO: add different types of initializations

    Args:
        in_dim: Number of embeddings
        out_dim: Dimension of the embedding vectors
    """

    def __init__(self, in_dim: int, out_dim: int):
        super().__init__()
        self.in_dim = in_dim
        self.out_dim = out_dim
        self.build_nn_modules()

    def build_nn_modules(self):
        self.embedding = torch.nn.Embedding(self.in_dim, self.out_dim)

    def mean(self, dim=0):
        """Return the mean of the embedding weights along a dim."""
        return self.embedding.weight.mean(dim)

    def forward(self, in_tensor: Shaped[Tensor, "*batch input_dim"]):
        """Call forward

        Args:
            in_tensor: input tensor to process
        """
        return self.embedding(in_tensor)

 
class PosEmbedding(nn.Module):
    def __init__(self, N_freqs):
        super(PosEmbedding, self).__init__()
        self.N_freqs            = N_freqs
        self.funcs              = [torch.sin, torch.cos]
        self.freq_bands         = 2 ** torch.linspace(0, N_freqs-1, N_freqs)
        self.freqs              = list(range(N_freqs))
                    
    def forward(self, x):
        out = [x]
        for L, freq in zip(self.freqs, self.freq_bands):
            for func in self.funcs:
                out += [func(freq * x)]
        return torch.cat(out, dim=-1)
    
    
class PosEmbedding1(nn.Module):
    def __init__(self, N_freqs):
        super(PosEmbedding1, self).__init__()
        self.N_freqs = N_freqs
        # Register as buffer so it's moved with model.to(device)
        freq_bands = 2 ** torch.linspace(0, N_freqs - 1, N_freqs)
        self.register_buffer('freq_bands', freq_bands)

    def forward(self, x):
        """
        Input:  x, shape [..., D]
        Output: [..., D * (1 + 2 * N_freqs)]
        """
        if self.N_freqs < 1:
            return x
        # Shape: [..., D] → [..., D, 1] → [..., D, N_freqs]
        x_expanded = x.unsqueeze(-1) * self.freq_bands   # [..., D, N_freqs]
        
        sin = torch.sin(x_expanded).reshape(x_expanded.shape[0], -1)
        cos = torch.cos(x_expanded).reshape(x_expanded.shape[0], -1)
        # Concatenate original x, sin, and cos along the last dimension
        pe = torch.cat([x, sin, cos], dim=-1)            # [..., D * (1 + 2 * N_freqs)]
        return pe
    

class PosEmbedding3(nn.Module):
    def __init__(self, N_freqs):
        super(PosEmbedding3, self).__init__()
        self.N_freqs = N_freqs
        freq_bands = 2 ** torch.linspace(0, N_freqs - 1, N_freqs)
        self.register_buffer('freq_bands', freq_bands)
        
    def forward(self, x):
        """
        Args:
            x: (..., D)
        Returns:
            out: (..., D * (2 * N_freqs + 1))
        """
        x = x.unsqueeze(-2)  # (..., 1, D)
        orig_x = x
        # (N_freqs,) -> (N_freqs, 1)
        freq_bands = self.freq_bands[:, None]  # (N_freqs, 1)

        # Multiply: (..., N_freqs, D)
        x_freq = x * freq_bands  # broadcast

        # Apply sin and cos: (..., N_freqs, D)
        sin_enc = torch.sin(x_freq)
        cos_enc = torch.cos(x_freq)
        # Concatenate original input and embeddings: (..., D + 2 * N_freqs * D)
        out = torch.cat([orig_x, sin_enc, cos_enc], dim=-2)
        out = out.flatten(start_dim=-2)  # flatten last two dims

        return out

class Sine(nn.Module):
    def __init__(self, omega_0=30.0):
        super().__init__()
        self.omega_0 = omega_0

    def forward(self, x):
        return torch.sin(self.omega_0 * x)
 
    
class MLP(nn.Module):
    def __init__(self, in_dim=32, hidden_dim=32, out_dim=3, n_layers=2, out_act=nn.Sigmoid()):
        super(MLP, self).__init__()
        
        self.head = nn.Sequential(nn.Linear(in_dim, hidden_dim), nn.ReLU(True))
        
        self.body = []
        for _ in range(n_layers - 2):
            self.body.append(nn.Linear(hidden_dim, hidden_dim))
            self.body.append(Sine())
        if self.body:
            self.body = nn.Sequential(*self.body)
        
        if out_act:
            self.tail = nn.Sequential(nn.Linear(hidden_dim, out_dim), out_act)
        else:
            self.tail = nn.Sequential(nn.Linear(hidden_dim, out_dim))
        
    def forward(self, x):
    
        out = self.head(x)
        if self.body:
            out = self.body(out)
        out = self.tail(out)
        return out
    
class AnchorFeatureVQ(nn.Module):
    def __init__(self, num_embeddings=256, embedding_dim=32):
        super().__init__()
        self.embedding_dim = embedding_dim
        self.num_embeddings = num_embeddings
        self.codebook = nn.Parameter(torch.randn(num_embeddings, embedding_dim))  # [D, 32]

    def encode(self, anchor_feats):  # [N, 32]
        a2 = (anchor_feats ** 2).sum(dim=1, keepdim=True)           # [N, 1]
        b2 = (self.codebook ** 2).sum(dim=1, keepdim=True).T     # [1, D]
        ab = anchor_feats @ self.codebook.T                      # [N, D]
        dist = a2 + b2 - 2 * ab                                     # [N, D]
        indices = torch.argmin(dist, dim=1)  # [N]
        return indices

    def decode(self, indices):  # [N]
        return self.codebook[indices]  # [N, 32]

    def forward(self, anchor_feats=None):
        # Quantization during training
        indices = self.encode(anchor_feats)
        quantized = self.decode(indices)
        return quantized, indices
    
class HierarchicalQuantizer(nn.Module):
    def __init__(self, anchor_feat_dim, K1, K2, codebook_dim, temp=1.0):
        super().__init__()
        
        self.codebook = nn.Parameter(torch.randn(K1, K2, codebook_dim))  # [K1, K2, D]

        self.coarse_proj = nn.Sequential(
            nn.Linear(anchor_feat_dim, 64),
            nn.ReLU(),
            nn.Linear(64, K1)
        )

        self.fine_proj = nn.Sequential(
            nn.Linear(anchor_feat_dim + codebook_dim, 64),
            nn.ReLU(),
            nn.Linear(64, K2)
        )

        self.temp = temp

    def forward(self, anchor):
        # Step 1: coarse prediction
        coarse_logits = self.coarse_proj(anchor)  # [N, K1]
        coarse_prob = F.gumbel_softmax(coarse_logits, tau=self.temp, hard=False)  # [N, K1]
        
        # Step 2: get coarse vector (soft combination)
        coarse_vec = torch.einsum('nk,kvd->nvd', coarse_prob, self.codebook)  # [N, K2, D]
        coarse_context = torch.einsum('nk,kd->nd', coarse_prob, self.codebook.mean(dim=1))  # [N, D]

        # Step 3: predict fine
        fine_input = torch.cat([anchor, coarse_context], dim=-1)  # [N, D+D]
        fine_logits = self.fine_proj(fine_input)  # [N, K2]
        fine_prob = F.gumbel_softmax(fine_logits, tau=self.temp, hard=False)  # [N, K2]

        # Step 4: get final feature
        final_feat = torch.einsum('nk,nkd->nd', fine_prob, coarse_vec)  # [N, D]
        return final_feat
    

class GaussianMLP(nn.Module):
    def __init__(self, xyz_in_dim, dir_in_dim, hid_dim=128, feat_dim=32, n_offsets=16, n_layer=4):
        super(GaussianMLP, self).__init__()
        self.share_net = MLP(xyz_in_dim, hid_dim, feat_dim, n_layers=4, out_act=None)
        self.opacity_mlp = MLP(feat_dim+dir_in_dim, feat_dim, n_offsets*1, 2, out_act=nn.Tanh())
        self.color_mlp = MLP(feat_dim+dir_in_dim, feat_dim, n_offsets*3, 2, out_act=nn.Sigmoid())
        self.cov_mlp = MLP(feat_dim+dir_in_dim, feat_dim, n_offsets*7, 2, out_act=None)
        self.offset_mlp = MLP(feat_dim+dir_in_dim, feat_dim, n_offsets*3, 2, out_act=nn.Tanh())

    def forward(self, xyz_feat, dir_xyz):
        feat = self.share_net(xyz_feat)
        neural_opacity = self.opacity_mlp(torch.cat([feat, dir_xyz], -1)).reshape(-1, 1)
        color = self.color_mlp(torch.cat([feat, dir_xyz], -1)).reshape(-1, 3)
        scale_rot = self.cov_mlp(torch.cat([feat, dir_xyz], -1)).reshape(-1, 7)
        offsets = self.offset_mlp(torch.cat([feat, dir_xyz], -1)).reshape(-1, 3)
        
        return neural_opacity, color, scale_rot, offsets
        
        
        
class GaussianRBFEmbedding(nn.Module):
    def __init__(self, in_dim=3, num_kernels=64, sigma=None):
        super(GaussianRBFEmbedding, self).__init__()
        self.centers = nn.Parameter(torch.randn(num_kernels, in_dim))
        
        if sigma is None:
            sigma = 1.0
        self.sigma = nn.Parameter(torch.tensor(sigma))

    def forward(self, x):
        """
        x: Tensor, shape (N, in_dim)
        return: Tensor, shape (N, num_kernels)
        """
        diff = x.unsqueeze(1) - self.centers.unsqueeze(0)
        dist_sq = torch.sum(diff ** 2, dim=-1)  # (N, M)

        rbf = torch.exp(-dist_sq / (2 * self.sigma ** 2))  # (N, M)
        return rbf

class FastGaussianRBFEmbedding(nn.Module):
    def __init__(self, in_dim=3, num_kernels=64, sigma=None):
        super(FastGaussianRBFEmbedding, self).__init__()
        self.centers = nn.Parameter(torch.randn(num_kernels, in_dim))
        if sigma is None:
            sigma = 1.0
        self.sigma = nn.Parameter(torch.tensor(sigma))

    def forward(self, x):
        """
        x: (N, in_dim)
        return: (N, num_kernels)
        """
        # ||x||^2, shape (N, 1)
        x_norm = (x ** 2).sum(dim=1, keepdim=True)  

        # ||μ||^2, shape (1, M)
        c_norm = (self.centers ** 2).sum(dim=1).unsqueeze(0)

        # x·μ^T, shape (N, M)
        cross_term = x @ self.centers.t()

        # dist^2 = ||x||^2 + ||μ||^2 - 2x·μ
        dist_sq = x_norm + c_norm - 2 * cross_term

        # Gaussian RBF
        rbf = torch.exp(-dist_sq / (2 * self.sigma ** 2))
        return rbf
