import torch
import torch.nn as nn


class SimpleDYDModel(nn.Module):
    def __init__(self, input_size=(96, 96)):
        super(SimpleDYDModel, self).__init__()
        self.input_size = input_size

        # 共享的图像特征提取器
        self.image_feature_extractor = nn.Sequential(
            nn.Conv2d(1, 16, kernel_size=3, padding=1),
            nn.ReLU(),
            nn.MaxPool2d(2),
            nn.Conv2d(16, 32, kernel_size=3, padding=1),
            nn.ReLU(),
            nn.MaxPool2d(2),
            nn.Conv2d(32, 64, kernel_size=3, padding=1),
            nn.ReLU(),
            nn.MaxPool2d(2),
            nn.AdaptiveAvgPool2d(1),
            nn.Flatten()
        )

        # 相机参数特征提取
        self.camera_feature_extractor = nn.Sequential(
            nn.Linear(16 + 9, 32),  # 外参(16) + 内参(9)
            nn.ReLU()
        )

        # 多视角特征融合
        self.feature_fusion = nn.Sequential(
            nn.Linear(64 + 32, 128),  # 图像特征(64) + 相机特征(32)
            nn.ReLU()
        )

        # 四个输出分支
        self.branch1 = nn.Linear(128, 22)  # 形状 [B, 22]
        self.branch2 = nn.Linear(128, 16)  # 形状 [B, 16] -> 重塑为 [B, 4, 4]
        self.branch3 = nn.Linear(128, 1)  # 形状 [B, 1] -> 重塑为 [B]
        self.branch4 = nn.Linear(128, 2)  # 形状 [B, 2]

    def forward(self, images, extrinsic_matrices, intrinsic_matrices, masks):
        B, V, C, H, W = images.shape
        device = images.device

        # 处理图像特征
        images = images.view(B * V, C, H, W)
        image_features = self.image_feature_extractor(images)  # [B*V, 64]
        image_features = image_features.view(B, V, -1)  # [B, V, 64]

        # 处理相机参数特征
        extrinsic_features = extrinsic_matrices.view(B, V, -1)  # [B, V, 16]
        intrinsic_features = intrinsic_matrices.view(B, V, -1)  # [B, V, 9]
        camera_features = torch.cat([extrinsic_features, intrinsic_features], dim=-1)
        camera_features = self.camera_feature_extractor(camera_features)  # [B, V, 32]

        # 融合特征
        combined_features = torch.cat([image_features, camera_features], dim=-1)
        fused_features = self.feature_fusion(combined_features)  # [B, V, 128]

        # 应用视角掩码
        mask_expanded = masks.unsqueeze(-1).expand_as(fused_features)  # [B, V, 128]
        masked_features = fused_features * mask_expanded

        # 聚合多视角特征 (平均有效视角)
        valid_counts = masks.sum(dim=1, keepdim=True)  # [B, 1]
        global_features = masked_features.sum(dim=1) / valid_counts.clamp(min=1)  # [B, 128]

        # 生成四个输出
        out1 = self.branch1(global_features)  # [B, 22]
        out2 = self.branch2(global_features).view(B, 4, 4)  # [B, 4, 4]
        out3 = self.branch3(global_features).squeeze(-1)  # [B]
        out4 = self.branch4(global_features)  # [B, 2]

        return out1, out2, out3, out4


# 使用示例
if __name__ == "__main__":
    # 初始化模型
    model = SimpleDYDModel(input_size=(96, 96))
    model.eval()

    # 构造测试输入
    B, V = 1, 4
    images = torch.randn(B, V, 1, 96, 96)
    extrinsic_matrices = torch.randn(B, V, 4, 4)
    intrinsic_matrices = torch.randn(B, V, 3, 3)
    masks = torch.ones(B, V)
    masks[0, 1] = 0  # 设置第二个视角无效

    # 前向传播
    outputs = model(images, extrinsic_matrices, intrinsic_matrices, masks)

    # 打印输出形状
    for i, out in enumerate(outputs):
        print(f"Output {i + 1} shape: {out.shape}")

    # 跟踪并保存模型
    example_inputs = (images, extrinsic_matrices, intrinsic_matrices, masks)
    traced_model = torch.jit.trace(model, example_inputs)
    traced_model.save("simple_dyd_model.pt")
    print("Model saved as simple_dyd_model.pt")