# We reference the code in https://github.com/nerfstudio-project/nerfstudio/blob/a8e6f8fa3fd6c0ad2f3e681dcf1519e74ad2230f/nerfstudio/field_components/embedding.py
# Thanks to their great work!

import torch
from abc import abstractmethod
from typing import Optional
from torch import Tensor, nn
from utils.sh_utils import C0
import torch.nn.functional as F
from torchvision.models import vgg16



class PosEmbedding(nn.Module):
    def __init__(self, N_freqs):
        super(PosEmbedding, self).__init__()
        self.N_freqs            = N_freqs
        self.funcs              = [torch.sin, torch.cos]
        self.freq_bands         = 2 ** torch.linspace(0, N_freqs-1, N_freqs)
        self.freqs              = list(range(N_freqs))
            
    def forward(self, x):
        out = [x]
        for L, freq in zip(self.freqs, self.freq_bands):
            for func in self.funcs:
                out += [func(freq * x)]
        return torch.cat(out, dim=-1)


class FieldComponent(nn.Module):
    """Field modules that can be combined to store and compute the fields.

    Args:
        in_dim: Input dimension to module.
        out_dim: Output dimension to module.
    """

    def __init__(self, in_dim: Optional[int] = None, out_dim: Optional[int] = None) -> None:
        super().__init__()
        self.in_dim = in_dim
        self.out_dim = out_dim

    def build_nn_modules(self) -> None:
        """Function instantiates any torch.nn members within the module.
        If none exist, do nothing."""

    def set_in_dim(self, in_dim: int) -> None:
        """Sets input dimension of encoding

        Args:
            in_dim: input dimension
        """
        if in_dim <= 0:
            raise ValueError("Input dimension should be greater than zero")
        self.in_dim = in_dim

    def get_out_dim(self) -> int:
        """Calculates output dimension of encoding."""
        if self.out_dim is None:
            raise ValueError("Output dimension has not been set")
        return self.out_dim

    @abstractmethod
    def forward(self, in_tensor: Tensor) -> Tensor:
        """
        Returns processed tensor

        Args:
            in_tensor: Input tensor to process
        """
        raise NotImplementedError
  
class Embedding(FieldComponent):
    """Index into embeddings.
    # TODO: add different types of initializations

    Args:
        in_dim: Number of embeddings
        out_dim: Dimension of the embedding vectors
    """

    def __init__(self, in_dim: int, out_dim: int):
        super().__init__()
        self.in_dim = in_dim
        self.out_dim = out_dim
        self.build_nn_modules()

    def build_nn_modules(self):
        self.embedding = torch.nn.Embedding(self.in_dim, self.out_dim)

    def mean(self, dim=0):
        """Return the mean of the embedding weights along a dim."""
        return self.embedding.weight.mean(dim)

    def forward(self, in_tensor: Tensor):
        return self.embedding(in_tensor)
            
    
class MLP(nn.Module):
    def __init__(self, in_dim=32, hidden_dim=32, out_dim=3, n_layers=2, out_act=nn.Sigmoid()):
        super(MLP, self).__init__()
        
        self.head = nn.Sequential(nn.Linear(in_dim, hidden_dim), nn.ReLU(True))
        
        self.body = []
        for _ in range(n_layers - 2):
            self.body.append(nn.Linear(hidden_dim, hidden_dim))
            self.body.append(nn.ReLU(True))
        if self.body:
            self.body = nn.Sequential(*self.body)
        
        if out_act:
            self.tail = nn.Sequential(nn.Linear(hidden_dim, out_dim), out_act)
        else:
            self.tail = nn.Sequential(nn.Linear(hidden_dim, out_dim))
        
    def forward(self, x):
        out = self.head(x)
        if self.body:
            out = self.body(out)
        out = self.tail(out)
        return out
    
class CNN(nn.Module):
    def __init__(self, in_dim=3, hidden_dim=64, out_dim=3, n_layers=2, out_act=nn.Sigmoid()):
        super(CNN, self).__init__()
        self.head = nn.Sequential(nn.Conv2d(in_dim, hidden_dim, 3, stride=1, padding='same'), nn.LeakyReLU())
        
        self.body = []
        for _ in range(n_layers - 2):
            self.body.append(nn.Conv2d(hidden_dim, hidden_dim, 3, stride=1, padding='same'))
            self.body.append(nn.LeakyReLU())
        if self.body:
            self.body = nn.Sequential(*self.body)
        
        if out_act:
            self.tail = nn.Sequential(nn.Conv2d(hidden_dim, out_dim, 1), out_act)
        else:
            self.tail = nn.Sequential(nn.Conv2d(hidden_dim, out_dim, 1))
        
    def forward(self, x):
        out = self.head(x)
        if self.body:
            out = self.body(out)
        out = self.tail(out)
        return out
    
    
class  Base_MLP(nn.Module):
    def __init__(self, in_dim=32, hidden_dim=32, out_dim=3, n_layers=2, out_act=nn.Sigmoid()):
        super(Base_MLP, self).__init__()
        
        self.head = nn.Sequential(nn.Linear(in_dim, hidden_dim), nn.ReLU())
        
        self.body = []
        for _ in range(n_layers - 2):
            self.body.append(nn.Linear(hidden_dim, hidden_dim))
            self.body.append(nn.ReLU())
        if self.body:
            self.body = nn.Sequential(*self.body)
        
        if out_act is not None:
            self.tail = nn.Sequential(nn.Linear(hidden_dim, out_dim), out_act)
        else:
            self.tail = nn.Sequential(nn.Linear(hidden_dim, out_dim))
        
    def forward(self, color, gembedding, aembedding, viewdir=None):
        inp = torch.cat((color, gembedding, aembedding), dim=-1) # (N, 3 + Cg + Ca)
        out = self.head(inp)
        if self.body:
            out = self.body(out)
        out = self.tail(out) * 0.01
        gamma, beta = torch.split(out, [3, 3], dim=-1)
        color = gamma * color + (beta / C0)
        return torch.clamp(color, 0.0, 1.0)
    
    
class UpsampleBlock(nn.Module):
    def __init__(self, num_input_channels, num_output_channels):
        super(UpsampleBlock, self).__init__()
        self.pixel_shuffle = nn.PixelShuffle(2)
        self.conv = nn.Conv2d(num_input_channels // (2 * 2), num_output_channels, 3, stride=1, padding=1)
        self.relu = nn.ReLU()
        
    def forward(self, x):
        x = self.pixel_shuffle(x)
        x = self.conv(x)
        x = self.relu(x)
        return x
    
class Refiner(nn.Module):
    def __init__(self, num_input_channels, num_output_channels):
        super(Refiner, self).__init__()
        
        self.conv1 = nn.Conv2d(num_input_channels, 256, 3, stride=1, padding=1)
        self.up1 = UpsampleBlock(256, 128)
        self.up2 = UpsampleBlock(128, 64)
        self.up3 = UpsampleBlock(64, 32)
        self.up4 = UpsampleBlock(32, 16)
        
        self.conv2 = nn.Conv2d(16, 16, 3, stride=1, padding=1)
        self.conv3 = nn.Conv2d(16, num_output_channels, 3, stride=1, padding=1)
        self.relu = nn.ReLU()
        
    def forward(self, init_x, x, H, W):
        x = self.conv1(x)
        x = self.relu(x)
        x = self.up1(x)
        x = self.up2(x)
        x = self.up3(x)
        x = self.up4(x)
        # bilinear interpolation
        x = F.interpolate(x, size=(H, W), mode='bilinear', align_corners=True)
        x = self.conv2(x)
        x = self.relu(x)
        x = self.conv3(x)
        x = torch.tanh( self.relu(x) )
        return torch.clip(x+init_x, 0.0, 1.0), x

import torch
import torch.nn as nn
from torchvision.models import vgg16, VGG16_Weights

class VGGFeatureExtractor(nn.Module):
    def __init__(self, output_dim=512):
        super().__init__()
        # 新版本推荐写法（使用weights参数）
        original_vgg = vgg16(weights=VGG16_Weights.IMAGENET1K_V1).features
        
        # 截取到指定层（默认到conv5_3）
        self.feature_extractor = nn.Sequential(*list(original_vgg.children())[:30])
        
        # 自适应池化
        self.adaptive_pool = nn.AdaptiveAvgPool2d((1, 1))
        
        # 降维投影层
        self.projection = nn.Linear(512, output_dim)

    def forward(self, x):
        features = self.feature_extractor(x)
        pooled = self.adaptive_pool(features)
        flattened = pooled.view(pooled.size(0), -1)
        return self.projection(flattened)