# We reference the code in https://github.com/nerfstudio-project/nerfstudio/blob/a8e6f8fa3fd6c0ad2f3e681dcf1519e74ad2230f/nerfstudio/field_components/embedding.py
# Thanks to their great work!

import torch, math
from abc import abstractmethod
from typing import Optional
from torch import Tensor, nn
from utils.sh_utils import C0
import torch.nn.functional as F
from torchvision.models import vgg16



class PosEmbedding(nn.Module):
    def __init__(self, N_freqs):
        super(PosEmbedding, self).__init__()
        self.N_freqs            = N_freqs
        self.funcs              = [torch.sin, torch.cos]
        self.freq_bands         = 2 ** torch.linspace(0, N_freqs-1, N_freqs)
        self.freqs              = list(range(N_freqs))
            
    def forward(self, x):
        out = [x]
        for L, freq in zip(self.freqs, self.freq_bands):
            for func in self.funcs:
                out += [func(freq * x)]
        return torch.cat(out, dim=-1)


class FieldComponent(nn.Module):
    """Field modules that can be combined to store and compute the fields.

    Args:
        in_dim: Input dimension to module.
        out_dim: Output dimension to module.
    """

    def __init__(self, in_dim: Optional[int] = None, out_dim: Optional[int] = None) -> None:
        super().__init__()
        self.in_dim = in_dim
        self.out_dim = out_dim

    def build_nn_modules(self):
        """Function instantiates any torch.nn members within the module.
        If none exist, do nothing."""

    def set_in_dim(self, in_dim: int):
        """Sets input dimension of encoding

        Args:
            in_dim: input dimension
        """
        if in_dim <= 0:
            raise ValueError("Input dimension should be greater than zero")
        self.in_dim = in_dim

    def get_out_dim(self):
        """Calculates output dimension of encoding."""
        if self.out_dim is None:
            raise ValueError("Output dimension has not been set")
        return self.out_dim

    @abstractmethod
    def forward(self, in_tensor: Tensor):
        """
        Returns processed tensor

        Args:
            in_tensor: Input tensor to process
        """
        raise NotImplementedError
  
class Embedding(FieldComponent):
    """Index into embeddings.
    # TODO: add different types of initializations

    Args:
        in_dim: Number of embeddings
        out_dim: Dimension of the embedding vectors
    """

    def __init__(self, in_dim: int, out_dim: int):
        super().__init__()
        self.in_dim = in_dim
        self.out_dim = out_dim
        self.build_nn_modules()

    def build_nn_modules(self):
        self.embedding = torch.nn.Embedding(self.in_dim, self.out_dim)

    def mean(self, dim=0):
        """Return the mean of the embedding weights along a dim."""
        return self.embedding.weight.mean(dim)

    def forward(self, in_tensor: Tensor):
        return self.embedding(in_tensor)
            
    
class MLP(nn.Module):
    def __init__(self, in_dim=32, hidden_dim=32, out_dim=3, n_layers=2, out_act=nn.Sigmoid()):
        super(MLP, self).__init__()
        
        self.head = nn.Sequential(nn.Linear(in_dim, hidden_dim), nn.ReLU(True))
        
        self.body = []
        for _ in range(n_layers - 2):
            self.body.append(nn.Linear(hidden_dim, hidden_dim))
            self.body.append(nn.ReLU(True))
        if self.body:
            self.body = nn.Sequential(*self.body)
        
        if out_act:
            self.tail = nn.Sequential(nn.Linear(hidden_dim, out_dim), out_act)
        else:
            self.tail = nn.Sequential(nn.Linear(hidden_dim, out_dim))
        
    def forward(self, x):
        out = self.head(x)
        if self.body:
            out = self.body(out)
        out = self.tail(out)
        return out
    
class CNN(nn.Module):
    def __init__(self, in_dim=3, hidden_dim=64, out_dim=3, n_layers=2, out_act=nn.Sigmoid()):
        super(CNN, self).__init__()
        self.head = nn.Sequential(nn.Conv2d(in_dim, hidden_dim, 3, stride=1, padding='same'), nn.LeakyReLU())
        
        self.body = []
        for _ in range(n_layers - 2):
            self.body.append(nn.Conv2d(hidden_dim, hidden_dim, 3, stride=1, padding='same'))
            self.body.append(nn.LeakyReLU())
        if self.body:
            self.body = nn.Sequential(*self.body)
        
        if out_act:
            self.tail = nn.Sequential(nn.Conv2d(hidden_dim, out_dim, 1), out_act)
        else:
            self.tail = nn.Sequential(nn.Conv2d(hidden_dim, out_dim, 1))
        
    def forward(self, x):
        out = self.head(x)
        if self.body:
            out = self.body(out)
        out = self.tail(out)
        return out
    
class SEBlock(nn.Module):
    def __init__(self, channels, reduction=8):
        super(SEBlock, self).__init__()
        self.se = nn.Sequential(
            nn.AdaptiveAvgPool2d(1),
            nn.Conv2d(channels, channels // reduction, 1),
            nn.ReLU(inplace=True),
            nn.Conv2d(channels // reduction, channels, 1),
            nn.Sigmoid()
        )

    def forward(self, x):
        scale = self.se(x)
        return x * scale
    
    
class UpsampleBlock(nn.Module):
    def __init__(self, num_input_channels, num_output_channels, hid_dim=64, out_act=None):
        super(UpsampleBlock, self).__init__()
        if out_act:
            self.model = nn.Sequential(
                nn.Conv2d(num_input_channels, hid_dim, 3, stride=1, padding=1), 
                nn.ReLU(True),
                nn.ConvTranspose2d(hid_dim, hid_dim, kernel_size=2, stride=2), 
                nn.ReLU(True),
                nn.Conv2d(hid_dim, num_output_channels, 1, stride=1),
                out_act) 
        else:
            self.model = nn.Sequential(
                nn.Conv2d(num_input_channels, hid_dim, 3, stride=1, padding=1), 
                nn.ReLU(True),
                nn.ConvTranspose2d(hid_dim, hid_dim, kernel_size=2, stride=2), 
                nn.ReLU(True),
                nn.Conv2d(hid_dim, num_output_channels, 1, stride=1))

    def forward(self, x):
        return self.model(x)
    
    
class UpSampler(nn.Module):
    def __init__(self, num_input_channels, up_ratio=4, hid_dim=32):
        super(UpSampler, self).__init__()
        
        assert up_ratio == 2**(math.log2(up_ratio))
        
        self.head = UpsampleBlock(num_input_channels, hid_dim, hid_dim, out_act=nn.ReLU(True))
        
        n_fold = int(math.log2(up_ratio))
        bodys = []
        self.has_body = (n_fold - 2) > 0
        for i in range(n_fold - 2):
            bodys.append(UpsampleBlock(hid_dim, hid_dim, hid_dim, out_act=nn.ReLU(True)))
        self.bodys = nn.Sequential(*bodys)
        
        self.tail = UpsampleBlock(hid_dim, 3, hid_dim, nn.Sigmoid())
                
    def forward(self, feats_map):
                
        x = self.head(feats_map)
        if self.has_body:
            x = self.bodys(x)
            
        x = self.tail(x)
              
        return x[0]

    
    
class Model1(nn.Module):
    def __init__(self, num_input_channels, num_output_channels=3):
        super(Model1, self).__init__()
        
        self.hid_dim = 64
        self.conv1 = nn.Sequential(
            nn.Conv2d(num_input_channels, self.hid_dim, 1, stride=1), self.act,
            nn.Conv2d(self.hid_dim, self.hid_dim, 1, stride=1), self.act,
            nn.ConvTranspose2d(self.hid_dim, self.hid_dim, kernel_size=2, stride=2), self.act
            )
        
        self.conv2 = nn.Sequential(
            nn.Conv2d(self.hid_dim, self.hid_dim, 1, stride=1), self.act,
            nn.Conv2d(self.hid_dim, self.hid_dim, 1, stride=1), self.act,
            nn.ConvTranspose2d(self.hid_dim, self.hid_dim, kernel_size=2, stride=2), self.act,
            )
                
        self.conv3 = nn.Conv2d(self.hid_dim, num_output_channels, 1, stride=1)
        self.conv4 = nn.Conv2d(num_output_channels, num_output_channels, 1, stride=1)
        self.relu = nn.ReLU()
        self.sigmoid = nn.Sigmoid()
        
    def forward(self, image, embedding, H, W):
        '''
            image: (3, H//s, W//s)
            embedding: (C, D, D)
        '''
        
        h, w = image.shape[-2:]
        embedding = F.interpolate(embedding[None], size=(h, w), mode='bilinear', align_corners=True)
        x = torch.cat([image[None], embedding], 1)   # (1, C+3, h, w)
                
        x = self.conv1(x)
        x = self.conv2(x)
        
        x = self.conv3(x)
        x = self.relu(x)
        x = F.interpolate(x, size=(H, W), mode='bilinear', align_corners=True)        
        x = self.conv4(x)
        x = self.sigmoid(x)
                        
        return x[0] # (3, H, W)



class ResnetBlock(nn.Module):
    """
        Define a Resnet block.
        Refer to "https://github.com/ermongroup/ncsn/blob/master/models/pix2pix.py"
    """

    def __init__(
        self, 
        dim,
        kernel_size=1, 
        padding_type='zero',
        norm_layer=nn.BatchNorm2d, 
        use_dropout=False, 
        use_bias=True, 
        act=None
    ):
        
        """Initialize the Resnet block
        A resnet block is a conv block with skip connections
        We construct a conv block with build_conv_block function,
        and implement skip connections in <forward> function.
        Original Resnet paper: https://arxiv.org/pdf/1512.03385.pdf
        """
        super(ResnetBlock, self).__init__()
        self.conv_block = self.build_conv_block(
            dim, kernel_size, padding_type,
            norm_layer, use_dropout, use_bias, act
        )

    def build_conv_block(
        self, 
        dim, 
        kernel_size, 
        padding_type, 
        norm_layer, 
        use_dropout, 
        use_bias, 
        act=nn.GELU()
    ):

        """Construct a convolutional block.
        Parameters:
            dim (int)           -- the number of channels in the conv layer.
            padding_type (str)  -- the name of padding layer: reflect | replicate | zero
            norm_layer          -- normalization layer
            use_dropout (bool)  -- if use dropout layers.
            use_bias (bool)     -- if the conv layer uses bias or not
        Returns a conv block (with a conv layer, a normalization layer, and a non-linearity layer)
        """
        conv_block = []
        p = 0
        if padding_type == 'reflect':
            conv_block += [nn.ReflectionPad2d(1)]
        elif padding_type == 'replicate':
            conv_block += [nn.ReplicationPad2d(1)]
        elif padding_type == 'zero':
            p = 0
        else:
            raise NotImplementedError('padding [%s] is not implemented' % padding_type)
        conv_block += [nn.Conv2d(dim, dim, kernel_size=kernel_size, padding=p, bias=use_bias)]
        if norm_layer:
            conv_block += [norm_layer(dim, momentum=0.1)]
        if act:
            conv_block += [act]
        if use_dropout:
            conv_block += [nn.Dropout(0.5)]

        p = 0
        if padding_type == 'reflect':
            conv_block += [nn.ReflectionPad2d(1)]
        elif padding_type == 'replicate':
            conv_block += [nn.ReplicationPad2d(1)]
        elif padding_type == 'zero':
            p = 0
        else:
            raise NotImplementedError('padding [%s] is not implemented' % padding_type)
        conv_block += [nn.Conv2d(dim, dim, kernel_size=kernel_size, padding=p, bias=use_bias)]
        if norm_layer:
            conv_block += [norm_layer(dim, momentum=0.1)]

        return nn.Sequential(*conv_block)

    def forward(self, x):
        """Forward function (with skip connections)"""
        out = x + self.conv_block(x)  # add skip connections

        return out
    
    

def get_activation(act):
    if act.lower() == 'relu':
        func = nn.ReLU(inplace=True)
    elif act.lower() == 'gelu':
        func = nn.GELU()
    elif act.lower() == 'none':
        func = None
    else:
        raise NotImplementedError
    return func


class R2L(nn.Module):
    def __init__(
        self,
        args,
        input_dim, 
        output_dim
    ):
        super(R2L, self).__init__()
        self.args =  args
        self.input_dim = input_dim
        D, W = args.netdepth, args.netwidth
        Ws = [W] * (D-1) + [3]
        act = get_activation(args.activation_fn)
        self.head = nn.Sequential(
            *[nn.Conv2d(input_dim, Ws[0], 1), act]) if act else nn.Sequential(*[nn.Conv2d(input_dim, Ws[0], 1)]
        )

        n_block = (D - 2) // 2 # 2 layers per resblock
        body = [ResnetBlock(W, act=act) for _ in range(n_block)]
        self.body = nn.Sequential(*body)
            
        if args.use_sr_module:
            n_conv = args.num_conv_layers # 2 conv layers in each sr
            n_up_block = args.num_sr_blocks # 3 sr blocls
            kernels = args.sr_kernel

            up_blocks = []
            for i in range(n_up_block - 1):
                in_dim = Ws[-2] if not i else kernels[i]
                up_blocks += [nn.ConvTranspose2d(in_dim, kernels[i], 4, stride=2, padding=1)]
                up_blocks += [ResnetBlock(kernels[i], act=act) for _ in range(n_conv)]

            # hard-coded up-sampling factors
            # 12x for colmap
            if args.dataset_type == 'Colmap':
                k, s, p = 3, 3, 0
            elif args.dataset_type == 'Blender': # 8x for blender
                k, s, p = 4, 2, 1
            else:
                raise ValueError(f'Undefined dataset type: {args.dataset_type}.')            
        
            up_blocks += [nn.ConvTranspose2d(kernels[1], kernels[-1], k, stride=s, padding=p)]
            up_blocks += [ResnetBlock(kernels[-1], act=act)  for _ in range(n_conv)]
            up_blocks += [nn.Conv2d(kernels[-1], output_dim, 1), nn.Sigmoid()]
            self.tail = nn.Sequential(*up_blocks )
        else:
            self.tail = nn.Sequential(*[nn.Conv2d(W, output_dim, 1), nn.Sigmoid()])
    
    def forward(self, x):
        x = self.head(x)
        x = self.body(x) + x
        return self.tail(x)
    
    
class ResidualBlock1x1(nn.Module):
    """1×1 卷积残差块，带 BatchNorm + GeLU"""
    def __init__(self, channels):
        super().__init__()
        self.conv1 = nn.Conv2d(channels, channels, kernel_size=1, bias=False)
        self.bn1   = nn.BatchNorm2d(channels)
        self.act   = nn.GELU()
        self.conv2 = nn.Conv2d(channels, channels, kernel_size=1, bias=False)
        self.bn2   = nn.BatchNorm2d(channels)

    def forward(self, x):
        identity = x
        out = self.act(self.bn1(self.conv1(x)))
        out = self.bn2(self.conv2(out))
        return self.act(out + identity)

class SRModule(nn.Module):
    """
    超分辨率模块：
      - 转置卷积上采样
      - 两个 1×1 残差块
    """
    def __init__(self, in_ch, mid_ch, is_sr2=False):
        super().__init__()
        if not is_sr2:
            # 2× 上采样
            self.up = nn.ConvTranspose2d(in_ch, mid_ch, kernel_size=4, stride=2, padding=1, bias=False)
        else:
            # 3× 上采样
            self.up = nn.ConvTranspose2d(in_ch, mid_ch, kernel_size=3, stride=3, padding=0, bias=False)
        self.bn_up = nn.BatchNorm2d(mid_ch)
        self.act   = nn.GELU()
        # 两个 1×1 残差块
        self.res1 = ResidualBlock1x1(mid_ch)
        self.res2 = ResidualBlock1x1(mid_ch)

    def forward(self, x):
        x = self.act(self.bn_up(self.up(x)))
        x = self.res1(x)
        x = self.res2(x)
        return x
    
class DepthwiseConv(nn.Module):
    def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=1):
        super().__init__()
        self.depthwise = nn.Conv2d(in_channels, in_channels, kernel_size,
                                   stride, padding, groups=in_channels, bias=False)
        self.pointwise = nn.Conv2d(in_channels, out_channels, 1, bias=False)
        self.bn = nn.BatchNorm2d(out_channels)
        self.act = nn.ReLU(inplace=True)
    
    def forward(self, x):
        return self.act(self.bn(self.pointwise(self.depthwise(x))))

class MinimalUpsampleBlock(nn.Module):
    def __init__(self, in_channels, upscale_factor=2):
        super().__init__()
        out_channels = in_channels * (upscale_factor ** 2)
        self.conv = DepthwiseConv(in_channels, out_channels)
        self.pixel_shuffle = nn.PixelShuffle(upscale_factor)
    
    def forward(self, x):
        x = self.conv(x)
        x = self.pixel_shuffle(x)
        return x

class MinimalUpsampleNet(nn.Module):
    def __init__(self, in_channels=3, out_channels=3, num_blocks=2):
        super().__init__()
        self.head = nn.Conv2d(in_channels, 16, 3, padding=1)
        self.body = nn.Sequential(
            *[MinimalUpsampleBlock(16) for _ in range(num_blocks)]
        )
        self.tail = nn.Conv2d(16, out_channels, 3, padding=1)
    
    def forward(self, x):
        x = self.head(x)
        x = self.body(x)
        x = torch.sigmoid(self.tail(x))
        return x[0]
    
class WeightModel(nn.Module):
    def __init__(self, in_dim=3, hid_dim=32, out_dim=64, n_layers=4, in_shape=(224, 224)):
        super(WeightModel, self).__init__()
        self.in_dim = in_dim
        self.hid_dim = hid_dim
        self.out_dim = out_dim
        self.n_layers = n_layers
        self.in_shape = in_shape
        
        self.conv_head = nn.Sequential(nn.Conv2d(3, hid_dim, 3, stride=2, padding=1), nn.ReLU(True))
        
        self.conv_body = []
        for _ in range(self.n_layers - 2):
            self.conv_body.append(nn.Conv2d(hid_dim, hid_dim, 3, stride=2, padding=1))
            self.conv_body.append(nn.ReLU(True))
        if self.conv_body:
            self.conv_body = nn.Sequential(*self.conv_body)
        
        self.conv_tail = nn.Sequential(nn.Conv2d(hid_dim, hid_dim, 3, stride=2, padding=1), nn.ReLU(True))
        
        self.flatten_dim = hid_dim * (in_shape[0]//(2**n_layers)) * (in_shape[1]//(2**n_layers))
        self.weight_layer = nn.Sequential(nn.Linear(self.flatten_dim, out_dim), 
                                          nn.ReLU(True),
                                          nn.Linear(out_dim, out_dim))
                
        
    def forward(self, x):
        '''
            x: (B, C, H, W) => (B, C, 224, 224)
        '''
        B, C, H, W = x.shape
        if (H, W) != self.in_shape:
            x = F.interpolate(x, self.in_shape, mode='bilinear', align_corners=True)
            
        x = self.conv_head(x)
        if self.conv_body:
            x = self.conv_body(x)
        x = self.conv_tail(x)
        
        x = x.reshape(B, -1)
        x = self.weight_layer(x)
        
        return torch.softmax(x, dim=-1)


class ResidualBlock(nn.Module):
    def __init__(self, channels=64):
        super().__init__()
        self.conv1 = nn.Conv2d(channels, channels, 3, 1, 1)
        self.conv2 = nn.Conv2d(channels, channels, 3, 1, 1)

    def forward(self, x):
        return x + self.conv2(F.relu(self.conv1(x)))
    
    
class Encoder(nn.Module):
    def __init__(self, in_ch=3, channels=64, num_blocks=8):
        super().__init__()
        layers = [nn.Conv2d(in_ch, channels, 3, 1, 1)]
        for _ in range(num_blocks):
            layers.append(ResidualBlock(channels))
        self.body = nn.Sequential(*layers)

    def forward(self, x):
        return self.body(x)

def get_coord(height, width, device):
    y = torch.linspace(-1, 1, height, device=device)
    x = torch.linspace(-1, 1, width, device=device)
    yy, xx = torch.meshgrid(y, x, indexing="ij")
    coords = torch.stack([xx, yy], dim=-1).reshape(-1, 2)
    return coords


class GaussianGenerator(nn.Module):
    def __init__(self, in_ch=3, channels=64, num_blocks=8, expand_factor=1.0):
        super(GaussianGenerator, self).__init__()
        self.encoder = Encoder(in_ch=in_ch, channels=channels, num_blocks=num_blocks)
        
        self.expand_factor = expand_factor
        assert channels % (expand_factor**2) == 0
        self.flat_dim =  int(channels // (expand_factor**2))
        
        self.mlp_color = nn.Linear(self.flat_dim, 3)     # color
        self.mlp_offset = nn.Linear(self.flat_dim, 2)         # offset
        self.mlp_cov = nn.Linear(self.flat_dim, 3)        # cov: (σ_x, σ_y, σ_xy)
        self.mlp_opa = nn.Linear(self.flat_dim, 1)        # opacity
        self.ps = nn.PixelShuffle(expand_factor)
        
        self.cov_range = torch.tensor([[0.0000, -0.8600,  0.0000], [2.3600, 1.5400, 2.2300]])
    
    def forward(self, lr_img, scale_factor=1.0):
        B, C, H, W = lr_img.shape
        hH, hW = int(H*scale_factor), int(W*scale_factor)
        cov_range = self.cov_range.to(lr_img.device).float()
        
        feat = self.encoder(lr_img)
        if self.expand_factor > 1:
            feat = self.ps(feat)
        feat = feat.permute(0, 2, 3, 1).reshape(-1, self.flat_dim)
        
        opacity = torch.sigmoid(self.mlp_opa(feat))
        color = torch.sigmoid(self.mlp_color(feat))
        
        offset = torch.tanh(self.mlp_offset(feat)) 
        coords = get_coord(int(H*self.expand_factor), int(W*self.expand_factor), lr_img.device) 
        
        xyz1 = coords[:,0:1] + 2 * offset[:,0:1] / (W*self.expand_factor) - 1/hW # -  1/lr_w
        xyz2 = coords[:,1:2] + 2 * offset[:,1:2] / (H*self.expand_factor) - 1/hH # -  1/lr_h
        get_xyz = torch.cat((xyz1, xyz2), dim=1)
        
        cov_weights = torch.sigmoid(self.mlp_cov(feat)) # (N, 3)
        
        chol = cov_range[0:1] * cov_weights + cov_range[1:2] * (1 - cov_weights)
        chol = chol * scale_factor
        return color, opacity, get_xyz, chol