"""
Depth Filler Network.

Author: Hongjie Fang.
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange
from .dense import DenseBlock
from .duc import DenseUpsamplingConvolution
from mamba.PointMamba import *
from utils.depth_to_pointcloud import *
from mamba.FusionMamba import *

class DFNet(nn.Module):
    """
    Depth Filler Network (DFNet).
    """
    def __init__(self, in_channels = 4, hidden_channels = 64, L = 5, k = 12, use_DUC = True, **kwargs):
        super(DFNet, self).__init__()
        self.in_channels = in_channels
        self.hidden_channels = hidden_channels
        self.L = L
        self.k = k
        self.use_DUC = use_DUC
        # First
        self.first = nn.Sequential(
            nn.Conv2d(self.in_channels, self.hidden_channels, kernel_size = 3, stride = 2, padding = 1),
            nn.BatchNorm2d(self.hidden_channels),
            nn.ReLU(True)
        )
        # Dense1: skip
        self.dense1s_conv1 = nn.Sequential(
            nn.Conv2d(self.hidden_channels, self.hidden_channels, kernel_size = 3, stride = 1, padding = 1),
            nn.BatchNorm2d(self.hidden_channels),
            nn.ReLU(True)
        )
        self.dense1s = DenseBlock(self.hidden_channels, self.L, self.k, with_bn = True)
        self.dense1s_conv2 = nn.Sequential(
            nn.Conv2d(self.k, self.hidden_channels, kernel_size = 3, stride = 1, padding = 1),
            nn.BatchNorm2d(self.hidden_channels),
            nn.ReLU(True)
        )
        # Dense1: normal
        self.dense1_conv1 = nn.Sequential(
            nn.Conv2d(self.hidden_channels, self.hidden_channels, kernel_size = 3, stride = 1, padding = 1),
            nn.BatchNorm2d(self.hidden_channels),
            nn.ReLU(True)
        )
        self.dense1 = DenseBlock(self.hidden_channels, self.L, self.k, with_bn = True)
        self.dense1_conv2 = nn.Sequential(
            nn.Conv2d(self.k, self.hidden_channels, kernel_size = 3, stride = 2, padding = 1),
            nn.BatchNorm2d(self.hidden_channels),
            nn.ReLU(True)
        )
        # Dense2: skip
        self.dense2s_conv1 = nn.Sequential(
            nn.Conv2d(self.hidden_channels, self.hidden_channels, kernel_size = 3, stride = 1, padding = 1),
            nn.BatchNorm2d(self.hidden_channels),
            nn.ReLU(True)
        )
        self.dense2s = DenseBlock(self.hidden_channels, self.L, self.k, with_bn = True)
        self.dense2s_conv2 = nn.Sequential(
            nn.Conv2d(self.k, self.hidden_channels, kernel_size = 3, stride = 1, padding = 1),
            nn.BatchNorm2d(self.hidden_channels),
            nn.ReLU(True)
        )
        # Dense2: normal
        self.dense2_conv1 = nn.Sequential(
            nn.Conv2d(self.hidden_channels, self.hidden_channels, kernel_size = 3, stride = 1, padding = 1),
            nn.BatchNorm2d(self.hidden_channels),
            nn.ReLU(True)
        )
        self.dense2 = DenseBlock(self.hidden_channels, self.L, self.k, with_bn = True)
        self.dense2_conv2 = nn.Sequential(
            nn.Conv2d(self.k, self.hidden_channels, kernel_size = 3, stride = 2, padding = 1),
            nn.BatchNorm2d(self.hidden_channels),
            nn.ReLU(True)
        )
        # Dense3: skip
        self.dense3s_conv1 = nn.Sequential(
            nn.Conv2d(self.hidden_channels, self.hidden_channels, kernel_size = 3, stride = 1, padding = 1),
            nn.BatchNorm2d(self.hidden_channels),
            nn.ReLU(True)
        )
        self.dense3s = DenseBlock(self.hidden_channels, self.L, self.k, with_bn = True)
        self.dense3s_conv2 = nn.Sequential(
            nn.Conv2d(self.k, self.hidden_channels, kernel_size = 3, stride = 1, padding = 1),
            nn.BatchNorm2d(self.hidden_channels),
            nn.ReLU(True)
        )
        # Dense3: normal
        self.dense3_conv1 = nn.Sequential(
            nn.Conv2d(self.hidden_channels, self.hidden_channels, kernel_size = 3, stride = 1, padding = 1),
            nn.BatchNorm2d(self.hidden_channels),
            nn.ReLU(True)
        )
        self.dense3 = DenseBlock(self.hidden_channels, self.L, self.k, with_bn = True)
        self.dense3_conv2 = nn.Sequential(
            nn.Conv2d(self.k, self.hidden_channels, kernel_size = 3, stride = 2, padding = 1),
            nn.BatchNorm2d(self.hidden_channels),
            nn.ReLU(True)
        )
        # Dense4
        self.dense4_conv1 = nn.Sequential(
            nn.Conv2d(self.hidden_channels, self.hidden_channels, kernel_size = 3, stride = 1, padding = 1),
            nn.BatchNorm2d(self.hidden_channels),
            nn.ReLU(True)
        )
        self.dense4 = DenseBlock(self.hidden_channels, self.L, self.k, with_bn = True)
        self.dense4_conv2 = nn.Sequential(
            nn.Conv2d(self.k, self.hidden_channels, kernel_size = 3, stride = 1, padding = 1),
            nn.BatchNorm2d(self.hidden_channels),
            nn.ReLU(True)
        )
        # DUC upsample 1
        self.updense1_conv = nn.Sequential(
            nn.Conv2d(self.hidden_channels, self.hidden_channels, kernel_size = 3, stride = 1, padding = 1),
            nn.BatchNorm2d(self.hidden_channels),
            nn.ReLU(True)
        )
        self.updense1 = DenseBlock(self.hidden_channels, self.L, self.k, with_bn = True)
        self.updense1_duc = self._make_upconv(self.k, self.hidden_channels, upscale_factor = 2)
        # DUC upsample 2
        self.updense2_conv = nn.Sequential(
            nn.Conv2d(self.hidden_channels * 2, self.hidden_channels, kernel_size = 3, stride = 1, padding = 1),
            nn.BatchNorm2d(self.hidden_channels),
            nn.ReLU(True)
        )
        self.updense2 = DenseBlock(self.hidden_channels, self.L, self.k, with_bn = True)
        self.updense2_duc = self._make_upconv(self.k, self.hidden_channels, upscale_factor = 2)
        # DUC upsample 3
        self.updense3_conv = nn.Sequential(
            nn.Conv2d(self.hidden_channels * 2, self.hidden_channels, kernel_size = 3, stride = 1, padding = 1),
            nn.BatchNorm2d(self.hidden_channels),
            nn.ReLU(True)
        )
        self.updense3 = DenseBlock(self.hidden_channels, self.L, self.k, with_bn = True)
        self.updense3_duc = self._make_upconv(self.k, self.hidden_channels, upscale_factor = 2)
        # DUC upsample 4
        self.updense4_conv = nn.Sequential(
            nn.Conv2d(self.hidden_channels * 2, self.hidden_channels, kernel_size = 3, stride = 1, padding = 1),
            nn.BatchNorm2d(self.hidden_channels),
            nn.ReLU(True)
        )
        self.updense4 = DenseBlock(self.hidden_channels, self.L, self.k, with_bn = True)
        self.updense4_duc = self._make_upconv(self.k, self.hidden_channels, upscale_factor = 2)
        # Final
        self.final = nn.Sequential(
            nn.Conv2d(self.hidden_channels, self.hidden_channels, kernel_size = 3, stride = 1, padding = 1),
            nn.BatchNorm2d(self.hidden_channels),
            nn.ReLU(True),
            nn.Conv2d(self.hidden_channels, 1, kernel_size = 1, stride = 1)
        )

        self.mamba1 = MixerModel(d_model=64, n_layer=2)
        self.mamba2 = MixerModel(d_model=64, n_layer=2)
        self.mamba3 = MixerModel(d_model=64, n_layer=2)

        self.depthToPointCloud = DepthToPointCloud("/home/ail504-2/Tang/TransCG-main/data/camera_intrinsics/1-camIntrinsics-D435.npy")

        # 声明PatchMerging2D实例
        self.downsample = PatchMerging2D(dim=64)
        # 声明VSSBlock实例
        # 注意：下采样后通道数会翻倍，所以hidden_dim应该是2*in_channels
        self.vss_block = VSSBlock(
            hidden_dim=2 * 64,  # 下采样后通道数变为128
            drop_path=0.1,
            norm_layer=nn.LayerNorm,
            attn_drop_rate=0.0,
            d_state=16
        )
        # 声明PatchExpand2D实例
        self.upsample = PatchExpand2D(dim=2 * 64)  # 输入通道是128

        self.feature_compress = nn.Sequential(
            nn.Conv2d(64 + 3 + 3, 64, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm2d(64),
            nn.ReLU(True),
            nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm2d(64),
            nn.ReLU(True)
        )

    def _make_upconv(self, in_channels, out_channels, upscale_factor = 2):
        if self.use_DUC:
            return DenseUpsamplingConvolution(in_channels, out_channels, upscale_factor = upscale_factor)
        else:
            return nn.Sequential(
                nn.ConvTranspose2d(in_channels, out_channels, kernel_size = upscale_factor, stride = upscale_factor, padding = 0, output_padding = 0),
                nn.BatchNorm2d(out_channels),
                nn.ReLU(True)
            )

    def _apply_mamba(self, x, mamba_block):
        """ 将 (B, C, H, W) 特征送入 Mamba 处理后返回同尺寸输出 """
        B, C, H, W = x.shape
        seq = x.permute(0, 2, 3, 1).reshape(B, H * W, C)
        seq = mamba_block(seq)
        x = seq.reshape(B, H, W, C).permute(0, 3, 1, 2)
        return x
    
    def forward(self, rgb, depth):
        # 720 x 1280 (rgb, depth) -> 360 x 640 (h)
        n, h, w = depth.shape
        depth = depth.view(n, 1, h, w)
        h = self.first(torch.cat((rgb, depth), dim = 1))
        # 获得原始深度图的点云
        pointCloud = self.depthToPointCloud(depth)

        # dense1: 360 x 640 (h, depth1) -> 180 x 320 (h, depth2)
        depth1 = F.interpolate(depth, scale_factor = 0.5, mode = "nearest")
        rgb1 = F.interpolate(rgb, scale_factor = 0.5, mode = "nearest")
        # dense1: skip
        h_d1s = self.dense1s_conv1(h)
        h_d1s = self.dense1s(torch.cat((h_d1s, depth1), dim = 1))
        h_d1s = self.dense1s_conv2(h_d1s)#[32,64,120,160]
        #此时h_d1s包含depth和RGB的拼接块、下采样的深度图，感觉我们可以再拼接一个下采样的rgb图
        # 拼接下采样的 rgb1：rgb1 的 shape 是 [32, 3, 120, 160]
        h_d1s = torch.cat((h_d1s, rgb1), dim=1)  # [32, 64+3=67, 120, 160]
        # 用 1×1 卷积压缩回 64 通道
        self.h_d1s_compress = nn.Conv2d(67, 64, kernel_size=1, stride=1, padding=0).to(h_d1s.device)
        h_d1s = self.h_d1s_compress(h_d1s)  # [32,64,120,160]
        #并对新拼接的RGB图做mamba处理
        h_d1s = self._apply_mamba(h_d1s, self.mamba1)
        # dense1: normal
        h = self.dense1_conv1(h)
        h = self.dense1(torch.cat((h, depth1), dim = 1))
        h = self.dense1_conv2(h)

        # dense2: 180 x 320 (h, depth2) -> 90 x 160 (h, depth3)
        depth2 = F.interpolate(depth1, scale_factor = 0.5, mode = "nearest")
        rgb2 = F.interpolate(rgb1, scale_factor = 0.5, mode = "nearest")
        # dense2: skip
        h_d2s = self.dense2s_conv1(h)
        h_d2s = self.dense2s(torch.cat((h_d2s, depth2), dim = 1))
        h_d2s = self.dense2s_conv2(h_d2s) #[32,64,60,80]

        h_d2s = torch.cat((h_d2s, rgb2), dim=1)  # [32, 64+3=67, 60, 80]
        self.h_d2s_compress = nn.Conv2d(67, 64, kernel_size=1, stride=1, padding=0).to(h_d2s.device)
        h_d2s = self.h_d2s_compress(h_d2s)  # [32,64,60,80]
        # 插入 Mamba
        h_d2s = self._apply_mamba(h_d2s, self.mamba2)
        # dense2: normal
        h = self.dense2_conv1(h)
        h = self.dense2(torch.cat((h, depth2), dim = 1))
        h = self.dense2_conv2(h)
        
        # dense3: 90 x 160 (h, depth3) -> 45 x 80 (h, depth4)
        depth3 = F.interpolate(depth2, scale_factor = 0.5, mode = "nearest")
        rgb3 = F.interpolate(rgb2, scale_factor = 0.5, mode = "nearest")
        # dense3: skip
        h_d3s = self.dense3s_conv1(h)
        h_d3s = self.dense3s(torch.cat((h_d3s, depth3), dim = 1))
        h_d3s = self.dense3s_conv2(h_d3s)#[32,64,30,40]

        h_d3s = torch.cat((h_d3s, rgb3), dim=1)  # [32, 64+3=67, 30, 40]
        self.h_d3s_compress = nn.Conv2d(67, 64, kernel_size=1, stride=1, padding=0).to(h_d3s.device)
        h_d3s = self.h_d3s_compress(h_d3s)  # [32,64,60,80]
        # 插入 Mamba
        h_d3s = self._apply_mamba(h_d3s, self.mamba3)
        # dense3: normal
        h = self.dense3_conv1(h)
        h = self.dense3(torch.cat((h, depth3), dim = 1))
        h = self.dense3_conv2(h)

        # dense4: 45 x 80
        depth4 = F.interpolate(depth3, scale_factor = 0.5, mode = "nearest")
        h = self.dense4_conv1(h)
        h = self.dense4(torch.cat((h, depth4), dim = 1))
        h = self.dense4_conv2(h)

        # updense1: 45 x 80 -> 90 x 160
        h = self.updense1_conv(h)
        h = self.updense1(torch.cat((h, depth4), dim = 1))
        h = self.updense1_duc(h)

        # updense2: 90 x 160 -> 180 x 320
        h = torch.cat((h, h_d3s), dim = 1)
        h = self.updense2_conv(h)
        h = self.updense2(torch.cat((h, depth3), dim = 1))
        h = self.updense2_duc(h)

        # updense3: 180 x 320 -> 360 x 640
        h = torch.cat((h, h_d2s), dim = 1)
        h = self.updense3_conv(h)
        h = self.updense3(torch.cat((h, depth2), dim = 1))
        h = self.updense3_duc(h)

        # updense4: 360 x 640 -> 720 x 1280
        h = torch.cat((h, h_d1s), dim = 1)
        h = self.updense4_conv(h)
        h = self.updense4(torch.cat((h, depth1), dim = 1))
        h = self.updense4_duc(h)#[32,64,240,320]

        # 新增：将三个特征拼接并融合
        # h: [32,64,240,320], rgb: [32,3,240,320], pointCloud: [32,3,240,320]
        # 拼接后的特征: [32,64+3+3=70,240,320]
        fused_h = torch.cat([h, rgb, pointCloud], dim=1)
        # 将拼接特征压缩回64通道
        fused_h = self.feature_compress(fused_h) #[32,64,240,320]
        # 将 fused_h 从 [B, C, H, W] 转换为 [B, H, W, C] 格式
        fused_h = fused_h.permute(0, 2, 3, 1)  # [32,240,320,64]

        fused_h_downsample = self.downsample(fused_h)
        fused_h_downsample_mamba = self.vss_block(fused_h_downsample)
        fused_h = self.upsample(fused_h_downsample_mamba)
        fused_h = fused_h.permute(0,3,1,2)
        # final
        h = self.final(fused_h)

        return rearrange(h, 'n 1 h w -> n h w')

