import torch
import torch.nn as nn
import torch.nn.functional as F


class DoubleConv(nn.Module):
    """双重卷积块 (Conv -> BN -> ReLU) × 2"""
    def __init__(self, in_channels, out_channels, mid_channels=None):
        super().__init__()
        if not mid_channels:
            mid_channels = out_channels
        self.double_conv = nn.Sequential(
            nn.Conv2d(in_channels, mid_channels, kernel_size=3, padding=1, bias=False),
            nn.BatchNorm2d(mid_channels),
            nn.ReLU(inplace=True),
            nn.Conv2d(mid_channels, out_channels, kernel_size=3, padding=1, bias=False),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(inplace=True)
        )

    def forward(self, x):
        return self.double_conv(x)


class Down(nn.Module):
    """下采样: Maxpool -> DoubleConv"""
    def __init__(self, in_channels, out_channels):
        super().__init__()
        self.maxpool_conv = nn.Sequential(
            nn.MaxPool2d(2),
            DoubleConv(in_channels, out_channels)
        )

    def forward(self, x):
        return self.maxpool_conv(x)


class Up(nn.Module):
    """上采样: Upsample + Concat -> DoubleConv"""
    def __init__(self, in_channels, out_channels, bilinear=True):
        super().__init__()
        
        # 使用双线性插值上采样或转置卷积上采样
        if bilinear:
            self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
            # 注意：上采样后特征图尺寸不变，通道数减半
            self.conv = DoubleConv(in_channels, out_channels)
        else:
            self.up = nn.ConvTranspose2d(in_channels // 2, in_channels // 2, kernel_size=2, stride=2)
            self.conv = DoubleConv(in_channels, out_channels)

    def forward(self, x1, x2):
        x1 = self.up(x1)
        
        # 输入可能不是2的幂数，需要调整大小以匹配
        diff_y = x2.size()[2] - x1.size()[2]
        diff_x = x2.size()[3] - x1.size()[3]

        x1 = F.pad(x1, [diff_x // 2, diff_x - diff_x // 2,
                         diff_y // 2, diff_y - diff_y // 2])
        
        # 沿通道维度拼接特征
        x = torch.cat([x2, x1], dim=1)
        return self.conv(x)


class OutConv(nn.Module):
    """输出卷积层"""
    def __init__(self, in_channels, out_channels):
        super(OutConv, self).__init__()
        self.conv = nn.Sequential(
            nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=False),
            nn.BatchNorm2d(out_channels)
        )

    def forward(self, x):
        return self.conv(x)


class InputAdapter(nn.Module):
    """高级输入适配器: 处理任意尺寸输入到统一特征空间"""
    def __init__(self, input_shape, target_shape):
        super(InputAdapter, self).__init__()
        self.input_shape = input_shape
        self.target_shape = target_shape
        
        # 使用更强大的适配网络处理更大的尺寸差异
        
        # 1. 通道适配 - 先用1x1卷积调整通道数
        self.channel_adapt = nn.Conv2d(input_shape[1], 16, kernel_size=1)
        
        # 2. 特征提取 - 使用多尺度卷积提取特征
        self.feature_extract = nn.Sequential(
            nn.Conv2d(16, 32, kernel_size=3, stride=2, padding=1),
            nn.BatchNorm2d(32),
            nn.ReLU(inplace=True),
            nn.Conv2d(32, 32, kernel_size=3, stride=2, padding=1), 
            nn.BatchNorm2d(32),
            nn.ReLU(inplace=True),
        )
        
        # 3. 空间适配 - 自适应池化层调整空间维度
        self.spatial_adapt = nn.AdaptiveAvgPool2d((target_shape[2], target_shape[3]))
        
        # 4. 最终调整到目标通道数
        self.final_adapt = nn.Sequential(
            nn.Conv2d(32, target_shape[1], kernel_size=1),
            nn.BatchNorm2d(target_shape[1]),
            nn.ReLU(inplace=True)
        )

    def forward(self, x):
        # 初步通道适配
        x = self.channel_adapt(x)
        
        # 多尺度特征提取 - 高效处理大尺寸输入
        x = self.feature_extract(x)
        
        # 空间适配
        x = self.spatial_adapt(x)
        
        # 最终通道调整
        x = self.final_adapt(x)
        
        return x


class EnhancedUNet(nn.Module):
    """增强型U-Net，处理双输入并生成高精度图像"""
    def __init__(self, bilinear=True, output_activation='tanh'):
        super(EnhancedUNet, self).__init__()
        
        # 定义特征通道数量
        self.n_channels = 64
        factor = 2 if bilinear else 1
        
        # 适配器层 - 将不同输入调整为相同尺寸和通道数
        # 注意：更新了第一个输入的形状为(1, 1, 2560, 64)
        self.adapter1 = InputAdapter((1, 1, 2560, 64), (1, 32, 256, 128))
        self.adapter2 = InputAdapter((1, 1, 256, 128), (1, 32, 256, 128))
        
        # 编码器部分
        self.inc = DoubleConv(64, 64)  # 合并两个32通道的适配器输出
        self.down1 = Down(64, 128)
        self.down2 = Down(128, 256)
        self.down3 = Down(256, 512)
        self.down4 = Down(512, 1024 // factor)
        
        # 解码器部分 - 修正通道数计算
        self.up1 = Up(1024, 512 // factor, bilinear)
        self.up2 = Up(512, 256 // factor, bilinear)
        self.up3 = Up(256, 128 // factor, bilinear)
        self.up4 = Up(128, 64, bilinear)
        
        # 输出层
        self.outc = OutConv(64, 1)
        
        # 最终激活函数，确保输出在[-1, 1]范围内
        if output_activation == 'tanh':
            self.final_act = nn.Tanh()
        elif output_activation == 'hardtanh':
            self.final_act = nn.Hardtanh(min_val=-1, max_val=1)
        elif output_activation == 'sigmoid_mapped':
            self.final_act = lambda x: 2 * nn.Sigmoid()(x) - 1
        elif output_activation == 'clamp':
            self.final_act = lambda x: torch.clamp(x, -1, 1)
        elif output_activation == 'scalable_tanh':
            self.final_act = lambda x: 0.999 * torch.tanh(x)

    def forward(self, x1, x2):
        """
        输入:
            x1: 形状为(B, 1, 2560, 64)的低分辨率特征图 (更新后的尺寸)
            x2: 形状为(B, 1, 256, 128)的目标尺寸参考图
        
        输出:
            输出: 形状为(B, 1, 256, 128)的高精度图像
        """
        # 检查输入维度
        if x1.size()[1:] != (1, 2560, 64):  # 更新了验证尺寸
            raise ValueError(f"Expected x1 shape (B, 1, 2560, 64), got {x1.size()}")
        if x2.size()[1:] != (1, 256, 128):
            raise ValueError(f"Expected x2 shape (B, 1, 256, 128), got {x2.size()}")
        
        # 适配输入
        x1_adapted = self.adapter1(x1)
        x2_adapted = self.adapter2(x2)
        
        # 合并两个输入特征
        x = torch.cat([x1_adapted, x2_adapted], dim=1)  # 64通道的结合
        
        # 编码器路径
        x1_d = self.inc(x)
        x2_d = self.down1(x1_d)
        x3_d = self.down2(x2_d)
        x4_d = self.down3(x3_d)
        x5_d = self.down4(x4_d)
        
        # 解码器路径及跳跃连接
        x = self.up1(x5_d, x4_d)
        x = self.up2(x, x3_d)
        x = self.up3(x, x2_d)
        x = self.up4(x, x1_d)
        
        # 输出层
        logits = self.outc(x)
        
        # 应用Tanh激活以确保输出范围在[-1, 1]
        out = self.final_act(logits)
        
        # 确保输出尺寸正确
        if out.size()[2:] != (256, 128):
            out = F.interpolate(out, size=(256, 128), mode='bilinear', align_corners=True)
        
        return out


class HighResolutionModel(nn.Module):
    """完整的高分辨率图像增强模型"""
    def __init__(self):
        super(HighResolutionModel, self).__init__()
        self.unet = EnhancedUNet(bilinear=True, output_activation='tanh')
        self.init_weights(self.unet)
        
    def forward(self, input1, input2):
        """
        输入:
            input1: 形状为(B, 1, 2560, 64)的低分辨率特征图 (更新后的尺寸)
            input2: 形状为(B, 1, 256, 128)的目标尺寸参考图
        
        输出:
            output: 形状为(B, 1, 256, 128)的高精度图像
        """
        return self.unet(input1, input2)

    @staticmethod
    def init_weights(model):
        for m in model.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
            elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)


# 使用示例与测试
if __name__ == "__main__":
    # 创建模型
    model = HighResolutionModel()
    
    # 创建测试输入 - 注意调整了input1的尺寸为2560x64
    input1 = torch.randn(1, 1, 2560, 64)
    input2 = torch.randn(1, 1, 256, 128)
    
    # 启用调试信息，跟踪每一层的输出尺寸
    debug = True
    
    # 定义钩子函数来打印中间层输出尺寸
    if debug:
        activation = {}
        def get_activation(name):
            def hook(model, input, output):
                activation[name] = output.shape
            return hook
        
        # 注册钩子
        model.unet.adapter1.register_forward_hook(get_activation('adapter1'))
        model.unet.adapter2.register_forward_hook(get_activation('adapter2'))
        model.unet.inc.register_forward_hook(get_activation('inc'))
        model.unet.down1.register_forward_hook(get_activation('down1'))
        model.unet.down2.register_forward_hook(get_activation('down2'))
        model.unet.down3.register_forward_hook(get_activation('down3'))
        model.unet.down4.register_forward_hook(get_activation('down4'))
        model.unet.up1.register_forward_hook(get_activation('up1'))
        model.unet.up2.register_forward_hook(get_activation('up2'))
        model.unet.up3.register_forward_hook(get_activation('up3'))
        model.unet.up4.register_forward_hook(get_activation('up4'))
    
    # 前向传播
    output = model(input1, input2)
    
    # 打印每层的输出尺寸
    if debug:
        print("==== 特征图尺寸跟踪 ====")
        for name, shape in activation.items():
            print(f"{name}: {shape}")
    
    # 检查输出
    print("\n==== 模型输入输出信息 ====")
    print(f"输入1形状: {input1.shape} - 更新为2560x64")
    print(f"输入2形状: {input2.shape}")
    print(f"输出形状: {output.shape}")
    print(f"输出值范围: [{output.min().item():.4f}, {output.max().item():.4f}]")
    
    # 计算模型参数量
    total_params = sum(p.numel() for p in model.parameters())
    print(f"模型总参数量: {total_params:,}")
