import torch.nn as nn
import torch
import torch.nn.functional as F
from .t2t_vit import T2t_vit_t_14
from .Transformer import Transformer
from .Transformer import token_Transformer
from .Decoder import Decoder

class MultiScaleFeatureFusion(nn.Module):
    def __init__(self, in_channels, out_channels):
        super().__init__()
        self.conv1 = nn.Conv2d(in_channels, out_channels, 3, padding=1)
        self.bn1 = nn.BatchNorm2d(out_channels)
        self.conv2 = nn.Conv2d(out_channels, out_channels, 3, padding=1)
        self.bn2 = nn.BatchNorm2d(out_channels)
        self.relu = nn.ReLU(inplace=True)

    def forward(self, x):
        x = self.relu(self.bn1(self.conv1(x)))
        x = self.relu(self.bn2(self.conv2(x)))
        return x

class UNetConnection(nn.Module):
    def __init__(self, in_channels1, in_channels2, out_channels):
        super().__init__()
        # in_channels1: 第一个输入的通道数
        # in_channels2: 第二个输入的通道数
        # out_channels: 输出通道数
        self.pre_conv1 = nn.Conv2d(in_channels1, out_channels, 1)
        self.pre_conv2 = nn.Conv2d(in_channels2, out_channels, 1)
        self.fuse_conv = nn.Sequential(
            nn.Conv2d(out_channels * 2, out_channels, 1),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(inplace=True)
        )
        
    def forward(self, x1, x2):
        # 预处理两个输入到相同的通道数
        x1 = self.pre_conv1(x1)
        x2 = self.pre_conv2(x2)
        # 上采样x2到x1的尺寸
        x2 = F.interpolate(x2, size=x1.shape[2:], mode='bilinear', align_corners=False)
        # 拼接并融合
        x = torch.cat([x1, x2], dim=1)
        x = self.fuse_conv(x)
        return x

class ImageDepthNet(nn.Module):
    def __init__(self, args):
        super(ImageDepthNet, self).__init__()
        # 配置消融实验参数
        self.use_msf = getattr(args, 'use_msf', False)  # 是否使用多尺度特征融合
        self.use_unet = getattr(args, 'use_unet', True)  # 是否使用U-Net连接
        
        # VST Encoder
        self.rgb_backbone = T2t_vit_t_14(pretrained=True, args=args)

        # 多尺度特征处理模块（可选）
        if self.use_msf:
            self.msf_1_4 = MultiScaleFeatureFusion(64, 64)
            self.msf_1_8 = MultiScaleFeatureFusion(64, 64)
            self.msf_1_16 = MultiScaleFeatureFusion(384, 384)
        
        # U-Net连接模块（可选）
        if self.use_unet:
            self.unet_1_8_to_1_4 = UNetConnection(
                in_channels1=64,
                in_channels2=64,
                out_channels=64
            )
            self.unet_1_16_to_1_8 = UNetConnection(
                in_channels1=64,
                in_channels2=384,
                out_channels=64
            )
        
        # 如果不使用U-Net连接，需要投影层来处理特征
        if not self.use_unet:
            self.proj_1_8 = nn.Conv2d(64, 64, 1)
            self.proj_1_4 = nn.Conv2d(64, 64, 1)

        # VST Convertor
        self.transformer = Transformer(embed_dim=384, depth=4, num_heads=6, mlp_ratio=3.)

        # VST Decoder
        self.token_trans = token_Transformer(embed_dim=384, depth=4, num_heads=6, mlp_ratio=3.)
        self.decoder = Decoder(embed_dim=384, token_dim=64, depth=2, img_size=args.img_size)

    def reshape_sequence_to_2d(self, x, H, W, scale):
        B, L, C = x.shape
        h, w = H // scale, W // scale
        return x.transpose(1, 2).reshape(B, C, h, w)

    def reshape_2d_to_sequence(self, x):
        B, C, H, W = x.shape
        return x.flatten(2).transpose(1, 2)

    def forward(self, image_Input):
        B, _, H, W = image_Input.shape

        # VST Encoder
        rgb_fea_1_16, rgb_fea_1_8, rgb_fea_1_4 = self.rgb_backbone(image_Input)

        # Convert sequence features to 2D
        rgb_fea_1_16_2d = self.reshape_sequence_to_2d(rgb_fea_1_16, H, W, 16)
        rgb_fea_1_8_2d = self.reshape_sequence_to_2d(rgb_fea_1_8, H, W, 8)
        rgb_fea_1_4_2d = self.reshape_sequence_to_2d(rgb_fea_1_4, H, W, 4)

        # 多尺度特征处理（可选）
        if self.use_msf:
            rgb_fea_1_16_processed = self.msf_1_16(rgb_fea_1_16_2d)
            rgb_fea_1_8_processed = self.msf_1_8(rgb_fea_1_8_2d)
            rgb_fea_1_4_processed = self.msf_1_4(rgb_fea_1_4_2d)
        else:
            rgb_fea_1_16_processed = rgb_fea_1_16_2d
            rgb_fea_1_8_processed = rgb_fea_1_8_2d
            rgb_fea_1_4_processed = rgb_fea_1_4_2d

        # U-Net特征融合（可选）
        if self.use_unet:
            rgb_fea_1_8_fused = self.unet_1_16_to_1_8(rgb_fea_1_8_processed, rgb_fea_1_16_processed)
            rgb_fea_1_4_fused = self.unet_1_8_to_1_4(rgb_fea_1_4_processed, rgb_fea_1_8_fused)
        else:
            # 如果不使用U-Net，直接使用投影层
            rgb_fea_1_8_fused = self.proj_1_8(rgb_fea_1_8_processed)
            rgb_fea_1_4_fused = self.proj_1_4(rgb_fea_1_4_processed)

        # Convert back to sequence format
        rgb_fea_1_16 = self.reshape_2d_to_sequence(rgb_fea_1_16_processed)
        rgb_fea_1_8 = self.reshape_2d_to_sequence(rgb_fea_1_8_fused)
        rgb_fea_1_4 = self.reshape_2d_to_sequence(rgb_fea_1_4_fused)

        # VST Convertor
        rgb_fea_1_16 = self.transformer(rgb_fea_1_16)

        # VST Decoder
        saliency_fea_1_16, fea_1_16, saliency_tokens, contour_fea_1_16, contour_tokens = self.token_trans(rgb_fea_1_16)

        outputs = self.decoder(saliency_fea_1_16, fea_1_16, saliency_tokens, 
                             contour_fea_1_16, contour_tokens, rgb_fea_1_8, rgb_fea_1_4)

        return outputs


