import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F


class SpatialMultiViewFusion(nn.Module):
    """支持二维特征图的多视图融合模块 (VR手势追踪场景) - 空间感知版本"""

    def __init__(self, feature_dim, num_heads, proj_dim=16, max_views=8):
        """
        Args:
            feature_dim: 特征通道维度
            num_heads: 注意力头数
            proj_dim: 参数矩阵投影维度
            max_views: 最大视图数
        """
        super().__init__()
        self.feature_dim = feature_dim
        self.num_heads = num_heads
        self.head_dim = feature_dim // num_heads
        self.max_views = max_views
        self.proj_dim = proj_dim

        # ===== 位置编码系统 =====
        # 视图级位置编码 (视图顺序感知)
        self.view_position_enc = nn.Parameter(torch.zeros(1, max_views, feature_dim, 1, 1))
        nn.init.trunc_normal_(self.view_position_enc, mean=0.0, std=0.02)

        # ===== 参数矩阵转换器 =====
        # 外参矩阵转换器 (4x4 -> proj_dim)
        self.extrinsic_proj = nn.Sequential(
            nn.Linear(16, 32),
            nn.ReLU(),
            nn.Linear(32, proj_dim)
        )
        # 内参矩阵转换器 (3x3 -> proj_dim)
        self.intrinsic_proj = nn.Sequential(
            nn.Linear(9, 16),
            nn.ReLU(),
            nn.Linear(16, proj_dim)
        )

        # ===== 特征融合层 =====
        # 特征拼接后的维度适配层 (使用卷积处理空间信息)
        self.feature_adapter = nn.Sequential(
            nn.Conv2d(feature_dim + 2 * proj_dim, feature_dim, kernel_size=3, padding=1),
            nn.ReLU(),
            nn.GroupNorm(8, feature_dim)  # 更适合图像特征的归一化
        )

        # ===== 注意力融合层 =====
        # 相对位置偏置 (视图间关系)
        self.rel_pos_bias = nn.Parameter(torch.randn(max_views, max_views))

        # 注意力投影层 (使用卷积处理空间信息)
        self.q_proj = nn.Conv2d(feature_dim, feature_dim, kernel_size=1)
        self.k_proj = nn.Conv2d(feature_dim, feature_dim, kernel_size=1)
        self.v_proj = nn.Conv2d(feature_dim, feature_dim, kernel_size=1)

        # ===== 输出层 =====
        self.out_proj = nn.Sequential(
            nn.Conv2d(feature_dim, feature_dim, kernel_size=3, padding=1),
            nn.GroupNorm(8, feature_dim),
            nn.ReLU()
        )

        # 残差缩放因子
        self.residual_scale = nn.Parameter(torch.tensor(0.1))

    def forward(self, S, extrinsic_matrices, intrinsic_matrices, mask=None):
        """
        Args:
            S: (B, C, D, H, W) 视图特征图 (填充后)
            extrinsic_matrices: (B, C, 4, 4) 外参矩阵 (填充后)
            intrinsic_matrices: (B, C, 3, 3) 内参矩阵 (填充后)
            mask: (B, C) 掩码张量，1表示有效视图，0表示填充
        Returns:
            out: (B, D, H, W) 融合后的空间特征图
        """
        B, C, D, H, W = S.shape

        # 如果没有提供掩码，假设所有视图都是有效的
        if mask is None:
            mask = torch.ones(B, C, device=S.device, dtype=torch.float)

        # ===== 1. 参数矩阵转换 =====
        # 外参矩阵转换 (B, C, 4, 4) -> (B, C, proj_dim)
        ext_input = extrinsic_matrices.reshape(B, C, -1)
        ext_enc = self.extrinsic_proj(ext_input)  # (B, C, proj_dim)

        # 内参矩阵转换 (B, C, 3, 3) -> (B, C, proj_dim)
        intr_input = intrinsic_matrices.reshape(B, C, -1)
        intr_enc = self.intrinsic_proj(intr_input)  # (B, C, proj_dim)

        # 扩展相机参数编码到空间维度
        ext_enc = ext_enc.view(B, C, self.proj_dim, 1, 1).expand(-1, -1, -1, H, W)
        intr_enc = intr_enc.view(B, C, self.proj_dim, 1, 1).expand(-1, -1, -1, H, W)

        # ===== 2. 构建位置感知特征 =====
        # 视图位置编码 (加法融合)
        view_pos_enc = self.view_position_enc[:, :C].expand(B, -1, -1, H, W)
        F_pos = S + view_pos_enc

        # 拼接外参和内参编码 (concat融合)
        F_concat = torch.cat([F_pos, ext_enc, intr_enc], dim=2)  # (B, C, D + 2*proj_dim, H, W)

        # 维度适配 (使用卷积处理空间信息)
        # 重塑为(B*C, D+2*proj_dim, H, W)用于卷积处理
        F_fused = F_concat.view(B * C, -1, H, W)
        F_fused = self.feature_adapter(F_fused)  # (B*C, D, H, W)
        F_fused = F_fused.view(B, C, D, H, W)  # 恢复原始形状 (B, C, D, H, W)

        # 应用掩码：将填充位置的特征置零
        F_fused = F_fused * mask.view(B, C, 1, 1, 1)

        # ===== 3. 多头注意力投影 =====
        # 重塑特征以便空间注意力 (B, C, D, H, W) -> (B, D, H, W, C) -> (B*D*H*W, C)
        F_att = F_fused.permute(0, 2, 3, 4, 1).contiguous()
        F_att = F_att.view(B * D * H * W, C)

        # 注意力投影
        Q = self.q_proj(F_fused.view(B * C, D, H, W)).view(B, C, D, H, W)
        K = self.k_proj(F_fused.view(B * C, D, H, W)).view(B, C, D, H, W)
        V = self.v_proj(F_fused.view(B * C, D, H, W)).view(B, C, D, H, W)

        # 重塑为多头格式 (B, C, num_heads, head_dim, H, W)
        Q = Q.view(B, C, self.num_heads, self.head_dim, H, W)
        K = K.view(B, C, self.num_heads, self.head_dim, H, W)
        V = V.view(B, C, self.num_heads, self.head_dim, H, W)

        # ===== 4. 注意力计算 =====
        # 计算注意力分数 (B, num_heads, H, W, C, C)
        Q = Q.permute(0, 2, 4, 5, 1, 3)  # (B, num_heads, H, W, C, head_dim)
        K = K.permute(0, 2, 4, 5, 1, 3)  # (B, num_heads, H, W, C, head_dim)

        attn_scores = torch.matmul(Q, K.transpose(-1, -2)) / (self.head_dim ** 0.5)  # (B, num_heads, H, W, C, C)

        # 添加相对位置偏置
        rel_bias = self.rel_pos_bias[:C, :C].view(1, 1, 1, 1, C, C)
        attn_scores = attn_scores + rel_bias

        # 应用注意力掩码
        attn_mask = mask.view(B, 1, 1, 1, C, 1)  # (B, 1, 1, 1, C, 1)
        attn_mask = (1 - attn_mask) * -1e9
        attn_scores = attn_scores + attn_mask

        # 注意力权重
        attn_weights = F.softmax(attn_scores, dim=-1)  # (B, num_heads, H, W, C, C)

        # ===== 5. 注意力融合 =====
        V = V.permute(0, 2, 4, 5, 1, 3)  # (B, num_heads, H, W, C, head_dim)
        attn_output = torch.matmul(attn_weights, V)  # (B, num_heads, H, W, C, head_dim)

        # ===== 6. 合并多头输出 =====
        attn_output = attn_output.permute(0, 4, 5, 1, 2, 3)  # (B, C, head_dim, num_heads, H, W)
        attn_output = attn_output.contiguous().view(B, C, self.head_dim * self.num_heads, H, W)

        # 应用掩码：确保填充位置的输出为零
        attn_output = attn_output * mask.view(B, C, 1, 1, 1)

        # ===== 7. 视图加权聚合 =====
        # 计算视图权重 (B, C, 1, 1, 1)
        view_weights = attn_output.mean(dim=(2, 3, 4), keepdim=True)  # (B, C, 1, 1, 1)

        # 应用掩码：将填充位置的权重设置为负无穷
        view_weights = view_weights.masked_fill(mask.view(B, C, 1, 1, 1) == 0, -1e9)
        view_weights = F.softmax(view_weights, dim=1)

        # 加权聚合 (B, D, H, W)
        aggregated = torch.sum(attn_output * view_weights, dim=1)

        # ===== 8. 输出投影与残差连接 =====
        # 计算有效视图的平均特征作为残差
        valid_S = S * mask.view(B, C, 1, 1, 1)
        num_valid = mask.sum(dim=1, keepdim=True).view(B, 1, 1, 1) + 1e-9
        residual = torch.sum(valid_S, dim=1) / num_valid

        out = self.out_proj(aggregated)
        out = out + self.residual_scale * residual

        return out


class SimpleMultiViewFusion(nn.Module):
    """简化的多视图融合模块，仅使用图像特征和掩码"""

    def __init__(self, feature_dim, num_views=4, n_blocks=3):
        """
        Args:
            feature_dim: 特征通道维度
            num_views: 最大视图数
            n_blocks: 融合层块数
        """
        super().__init__()
        self.feature_dim = feature_dim
        self.num_views = num_views
        self.n_blocks = n_blocks

        # 创建多视图融合层
        self.fusion_net = self.create_fusion_layers(
            nc_in=feature_dim,
            nc_out=feature_dim,
            n_blocks=n_blocks,
            num_views=num_views
        )

        # 残差缩放因子
        self.residual_scale = nn.Parameter(torch.tensor(0.1))

    @staticmethod
    def create_fusion_layers(nc_in: int, nc_out: int, n_blocks: int, num_views: int) -> nn.Module:
        """创建多视图融合层"""
        # 线性增加/减少通道数
        n_channels_list = np.linspace(num_views * nc_in, nc_out, n_blocks + 1)
        fusion_layers = nn.ModuleList()

        for i in range(n_blocks):
            nc_in_cur = int(n_channels_list[i])
            nc_out_cur = int(n_channels_list[i + 1])

            fusion_layers.append(nn.Conv2d(nc_in_cur, nc_out_cur, kernel_size=1, padding=0))
            fusion_layers.append(nn.BatchNorm2d(nc_out_cur))
            fusion_layers.append(nn.ReLU(inplace=True))

        # 添加额外的卷积，使特征不全是正数（ReLU导致）
        fusion_layers.append(nn.Conv2d(nc_out, nc_out, kernel_size=1, padding=0))
        return nn.Sequential(*fusion_layers)

    def forward(self, x, mask):
        """
        Args:
            x: (B, V, C, H, W) 视图特征图
            mask: (B, V) 掩码张量，1表示有效视图，0表示填充
        Returns:
            out: (B, C, H, W) 融合后的空间特征图
        """
        B, V, C, H, W = x.shape

        # 应用掩码：将填充位置的特征置零
        x = x * mask.view(B, V, 1, 1, 1)

        # 合并视图维度: (B, V, C, H, W) -> (B, V*C, H, W)
        x_flat = x.view(B, V * C, H, W)

        # 通过融合网络
        fused = self.fusion_net(x_flat)  # (B, C, H, W)

        # 计算残差（有效视图的平均特征）
        residual = torch.sum(x, dim=1) / V

        # 残差连接
        out = fused + self.residual_scale * residual
        # out = fused
        return out


# 测试代码 - 支持二维特征图
# if __name__ == '__main__':
#     # 设置随机种子确保可重复性
#     torch.manual_seed(42)
#
#     # 不同样本的视图数量
#     view_counts = [3, 5, 4, 8]  # 一个batch中4个样本分别有3,5,4,8个视图
#     B = len(view_counts)  # batch size = 4
#     D = 32  # 特征通道维度
#     H, W = 16, 16  # 特征图高度和宽度
#     max_views = 4  # 模型支持的最大视图数
#
#     # 初始化模型
#     fusion = SpatialMultiViewFusion(
#         feature_dim=D,
#         num_heads=4,
#         proj_dim=16,
#         max_views=max_views
#     )
#
#     # 创建存储数据的列表
#     S_list = []
#     extrinsic_list = []
#     intrinsic_list = []
#
#     # 为每个样本创建不同数量的视图
#     for i, num_views in enumerate(view_counts):
#         # 视图特征图 (num_views, D, H, W)
#         S_list.append(torch.randn(num_views, D, H, W))
#
#         # 外参矩阵 (num_views, 4, 4)
#         extrinsic_list.append(torch.randn(num_views, 4, 4))
#
#         # 内参矩阵 (num_views, 3, 3)
#         intrinsic_list.append(torch.randn(num_views, 3, 3))
#
#     # 填充到统一尺寸 (B, max_views, D, H, W)
#     S_padded = torch.zeros(B, max_views, D, H, W)
#     extrinsic_padded = torch.zeros(B, max_views, 4, 4)
#     intrinsic_padded = torch.zeros(B, max_views, 3, 3)
#     mask = torch.zeros(B, max_views)  # 创建掩码标记有效视图
#
#     for i in range(B):
#         num_views = view_counts[i]
#         S_padded[i, :num_views] = S_list[i]
#         extrinsic_padded[i, :num_views] = extrinsic_list[i]
#         intrinsic_padded[i, :num_views] = intrinsic_list[i]
#         mask[i, :num_views] = 1  # 标记有效视图
#
#     print("测试同一个batch中不同视图数量(二维特征图):")
#     print(f"视图数量: {view_counts}")
#     print(f"填充后的特征形状: {S_padded.shape} [B, C, D, H, W]")
#     print(f"填充后的外参矩阵形状: {extrinsic_padded.shape}")
#     print(f"填充后的内参矩阵形状: {intrinsic_padded.shape}")
#     print(f"掩码形状: {mask.shape}")
#
#     # 执行融合 (传入掩码)
#     out = fusion(S_padded, extrinsic_padded, intrinsic_padded, mask=mask)
#
#     # 检查输出
#     print(f"\n输出形状: {out.shape} (应为({B}, {D}, {H}, {W}))")
#
#     # 验证掩码有效性
#     print("\n验证掩码有效性:")
#     for i in range(B):
#         print(f"样本 {i} (应有 {view_counts[i]} 个有效视图): {mask[i]}")
#
#     # 测试模型对掩码的响应
#     print("\n测试掩码处理能力(二维特征):")
#
#     # 创建一个特殊样本：只有第一个视图有效
#     special_mask = torch.zeros(1, max_views)
#     special_mask[0, 0] = 1  # 只有第一个视图有效
#
#     # 创建随机输入
#     S_special = torch.randn(1, max_views, D, H, W)
#     ext_special = torch.randn(1, max_views, 4, 4)
#     intr_special = torch.randn(1, max_views, 3, 3)
#
#     # 运行融合
#     out_special = fusion(S_special, ext_special, intr_special, mask=special_mask)
#
#     # 检查输出是否接近第一个视图的特征
#     diff = torch.abs(out_special - S_special[0, 0]).mean()
#     print(f"特殊样本输出与第一个视图的差异: {diff.item():.6f}")
#     print("(注意：由于模型包含位置编码和变换，差异不会为0，但应合理)")
#
#     # 测试模型对空间位置的处理
#     print("\n测试空间位置感知能力:")
#     print("在特征图上添加空间模式...")
#
#     # 创建具有明显空间模式的特征图
#     spatial_pattern = torch.zeros(1, max_views, D, H, W)
#     for c in range(max_views):
#         # 创建垂直条纹模式 - 正确的维度处理
#         stripe = torch.arange(H).view(1, 1, H, 1).float() / H  # 形状 (1, 1, H, 1)
#         stripe = stripe.expand(D, 1, H, W)  # 扩展到 (D, 1, H, W)
#         stripe = stripe.permute(0, 2, 3, 1).squeeze(-1)  # 调整形状为 (D, H, W)
#         spatial_pattern[0, c] = stripe
#
#     # 运行融合
#     out_pattern = fusion(spatial_pattern, ext_special, intr_special, mask=special_mask)
#
#     # 检查输出是否保留了空间模式
#     output_pattern = out_pattern[0].mean(dim=0)  # 平均通道维度 (H, W)
#     input_pattern = spatial_pattern[0, 0].mean(dim=0)  # 平均通道维度 (H, W)
#
#     # 可视化输入和输出的空间模式
#     print("\n输入空间模式 (第一个视图的前5行前5列):")
#     print(input_pattern[:5, :5])
#
#     print("\n输出空间模式 (前5行前5列):")
#     print(output_pattern[:5, :5])
#
#     pattern_diff = torch.abs(output_pattern - input_pattern).mean()
#     print(f"\n输出与输入空间模式的差异: {pattern_diff.item():.6f}")
#     print("(期望值接近0，表示模型保留了空间结构)")
#
#     # 测试模型输出范围
#     print("\n测试输出范围:")
#     print(f"输出最小值: {out.min().item():.4f}, 最大值: {out.max().item():.4f}")
#     print(f"特殊样本输出最小值: {out_special.min().item():.4f}, 最大值: {out_special.max().item():.4f}")
#     print(f"空间模式输出最小值: {out_pattern.min().item():.4f}, 最大值: {out_pattern.max().item():.4f}")
#
#     print("\n所有测试完成!")


if __name__ == '__main__':
    # 设置随机种子确保可重复性
    torch.manual_seed(42)

    # 不同样本的视图数量
    view_counts = [3, 5, 4, 8]  # 一个batch中4个样本分别有3,5,4,8个视图
    B = len(view_counts)  # batch size = 4
    C = 32  # 特征通道维度
    H, W = 16, 16  # 特征图高度和宽度
    max_views = 8  # 模型支持的最大视图数

    # 初始化简化版融合模型
    fusion = SimpleMultiViewFusion(
        feature_dim=C,
        num_views=max_views,
        n_blocks=3
    )

    # 创建存储数据的列表
    x_list = []

    # 为每个样本创建不同数量的视图
    for i, num_views in enumerate(view_counts):
        # 视图特征图 (num_views, C, H, W)
        x_list.append(torch.randn(num_views, C, H, W))

    # 填充到统一尺寸 (B, max_views, C, H, W)
    x_padded = torch.zeros(B, max_views, C, H, W)
    mask = torch.zeros(B, max_views)  # 创建掩码标记有效视图

    for i in range(B):
        num_views = view_counts[i]
        x_padded[i, :num_views] = x_list[i]
        mask[i, :num_views] = 1  # 标记有效视图

    print("测试同一个batch中不同视图数量(二维特征图):")
    print(f"视图数量: {view_counts}")
    print(f"填充后的特征形状: {x_padded.shape} [B, V, C, H, W]")
    print(f"掩码形状: {mask.shape}")

    # 执行融合 (传入掩码)
    out = fusion(x_padded, mask)

    # 检查输出
    print(f"\n输出形状: {out.shape} (应为({B}, {C}, {H}, {W}))")

    # 验证掩码有效性
    print("\n验证掩码有效性:")
    for i in range(B):
        print(f"样本 {i} (应有 {view_counts[i]} 个有效视图): {mask[i]}")

    # 测试模型对掩码的响应
    print("\n测试掩码处理能力(二维特征):")

    # 创建一个特殊样本：只有第一个视图有效
    special_mask = torch.zeros(1, max_views)
    special_mask[0, 0] = 1  # 只有第一个视图有效

    # 创建随机输入
    x_special = torch.randn(1, max_views, C, H, W)

    # 运行融合
    out_special = fusion(x_special, special_mask)

    # 检查输出是否接近第一个视图的特征（考虑残差连接）
    first_view = x_special[0, 0]
    diff = torch.abs(out_special[0] - (first_view + fusion.residual_scale * first_view)).mean()
    print(f"特殊样本输出与第一个视图的差异: {diff.item():.6f}")
    print("(期望值接近0，表示模型正确处理了单视图情况)")

    # 测试模型对空间位置的处理
    print("\n测试空间位置感知能力:")
    print("在特征图上添加空间模式...")

    # 创建具有明显空间模式的特征图
    spatial_pattern = torch.zeros(1, max_views, C, H, W)
    for v in range(max_views):
        # 为每个视图创建独特的空间模式
        if v % 2 == 0:
            # 创建垂直条纹模式
            stripe = torch.arange(H).view(1, H, 1).float() / H  # 形状 (1, H, 1)
            stripe = stripe.expand(C, H, W)  # 扩展到 (C, H, W)
        else:
            # 创建水平条纹模式
            stripe = torch.arange(W).view(1, 1, W).float() / W  # 形状 (1, 1, W)
            stripe = stripe.expand(C, H, W)  # 扩展到 (C, H, W)
        spatial_pattern[0, v] = stripe

    # 运行融合 (使用所有视图)
    spatial_mask = torch.ones(1, max_views)
    out_pattern = fusion(spatial_pattern, spatial_mask)

    # 检查输出是否保留了空间模式
    output_pattern = out_pattern[0].mean(dim=0)  # 平均通道维度 (H, W)

    # 可视化输出的空间模式
    print("\n输出空间模式 (前5行前5列):")
    print(output_pattern[:5, :5])

    # 测试模型输出范围
    print("\n测试输出范围:")
    print(f"输出最小值: {out.min().item():.4f}, 最大值: {out.max().item():.4f}")
    print(f"特殊样本输出最小值: {out_special.min().item():.4f}, 最大值: {out_special.max().item():.4f}")
    print(f"空间模式输出最小值: {out_pattern.min().item():.4f}, 最大值: {out_pattern.max().item():.4f}")

    # 测试残差连接
    print("\n测试残差连接:")
    print(f"残差缩放因子: {fusion.residual_scale.item():.4f}")

    # 禁用残差连接
    original_scale = fusion.residual_scale.item()
    fusion.residual_scale.data.fill_(0.0)

    # 运行无残差的融合
    out_no_residual = fusion(x_special, special_mask)

    # 恢复残差缩放因子
    fusion.residual_scale.data.fill_(original_scale)

    # 计算差异
    diff = torch.abs(out_special - out_no_residual).mean()
    print(f"有无残差连接的输出差异: {diff.item():.6f} (应大于0)")

    print("\n所有测试完成!")
