import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))

from spherical_defense_upgrade import convert_grid_to_pc, convert_pc_to_grid, low_pass_filter, duplicate_randomly
import torch
import numpy as np
import torch.nn as nn
from data_utils.ModelNetDataLoader import ModelNetDataLoader

# 定义 Ball Query 函数
def ball_query(xyz, radius, nsample):
    """
    :param xyz: 点云坐标，形状为 [B, N, 3]
    :param radius: 搜索半径
    :param nsample: 采样点数
    :return: 每个点的邻域索引，形状为 [B, N, nsample]
    """
    B, N, _ = xyz.shape
    idx = torch.zeros(B, N, nsample, dtype=torch.long, device=xyz.device)
    for b in range(B):
        for i in range(N):
            dist = torch.norm(xyz[b, :, :] - xyz[b, i, :], dim=1)
            near_idx = torch.where(dist <= radius)[0]
            if len(near_idx) < nsample:
                # 填充 near_idx 使其长度达到 nsample
                num_repeats = (nsample + len(near_idx) - 1) // len(near_idx)
                near_idx = near_idx.repeat(num_repeats)[:nsample]
            idx[b, i, :] = near_idx[:nsample]
    return idx


class LocalTransformerBlock(nn.Module):
    """修正后的局部Transformer注意力模块"""
    def __init__(self, in_dim, out_dim, num_heads=4, k_neighbors=16, radius=0.1):
        super().__init__()
        self.k = k_neighbors
        self.radius = radius
        self.attention = nn.MultiheadAttention(embed_dim=out_dim, num_heads=num_heads)
        self.pos_encoder = nn.Sequential(
            nn.Linear(3, 64),
            nn.ReLU(),
            nn.Linear(64, out_dim)
        )
        self.ffn = nn.Sequential(
            nn.Linear(out_dim, out_dim * 2),
            nn.ReLU(),
            nn.Linear(out_dim * 2, out_dim)
        )
        self.qkv_proj = nn.Linear(in_dim, out_dim * 3)
        self.norm = nn.LayerNorm(out_dim)

    def forward(self, xyz, features):
        B, N, _ = features.shape

        # 1. 构建局部邻域 (Ball Query)
        idx = ball_query(xyz, self.radius, self.k)  # [B, N, k]

        # 2. 安全索引提取
        batch_indices = torch.arange(B, device=xyz.device)[:, None, None].expand(B, N, self.k).reshape(-1)
        point_indices = idx.reshape(-1)

        # 3. 确保索引在有效范围内
        max_index = xyz.shape[1] - 1
        point_indices = torch.clamp(point_indices, 0, max_index)

        # 3. 提取局部特征块
        grouped_xyz = xyz[batch_indices, point_indices].view(B, N, self.k, 3)  # [B, N, k, 3]
        grouped_features = features[batch_indices, point_indices].view(B, N, self.k, features.shape[-1])  # [B, N, k, C]

        # 4. 位置编码与QKV投影
        pos_emb = self.pos_encoder(grouped_xyz - xyz.unsqueeze(2))  # [B, N, k, C]
        qkv = self.qkv_proj(grouped_features).view(B, N, self.k, 3, -1)  # [B, N, k, 3, C]
        q, k, v = qkv[..., 0, :], qkv[..., 1, :], qkv[..., 2, :]  # 每个点生成Q/K/V

        # 5. 局部注意力计算
        attn_output, _ = self.attention(
            (q + pos_emb).view(B * N, self.k, -1).transpose(0, 1),
            (k + pos_emb).view(B * N, self.k, -1).transpose(0, 1),
            v.view(B * N, self.k, -1).transpose(0, 1)
        )
        attn_output = attn_output.transpose(0, 1).view(B, N, self.k, -1).max(dim=2)[0]  # [B, N, C]

        # 6. 前馈网络与残差连接
        output = self.norm(self.ffn(attn_output)) + features
        return output


class MultiScaleFeaturePyramid(nn.Module):
    """修正后的多尺度特征金字塔"""
    def __init__(self, in_dims=[256, 512, 1024], out_dim=256):
        super().__init__()
        # 下采样模块
        self.down_modules = nn.ModuleList([
            nn.Sequential(
                nn.Conv1d(in_dims[0], 256, 1),
                nn.BatchNorm1d(256),
                nn.ReLU(),
                nn.MaxPool1d(2)),
            nn.Sequential(
                nn.Conv1d(256, 512, 1),
                nn.BatchNorm1d(512),
                nn.ReLU(),
                nn.MaxPool1d(2))
        ])

        # 上采样模块
        self.up_modules = nn.ModuleList([
            nn.Sequential(
                nn.Conv1d(512, 256, 1),
                nn.BatchNorm1d(256),
                nn.ReLU(),
                nn.Upsample(scale_factor=2)),
            nn.Sequential(
                nn.Conv1d(256 + in_dims[1], 256, 1),
                nn.BatchNorm1d(256),
                nn.ReLU(),
                nn.Upsample(scale_factor=2))
        ])

        # 最终融合
        # 修改输入通道数为 1024
        self.final_fuse = nn.Sequential(
            nn.Conv1d(1024, out_dim, 1),
            nn.BatchNorm1d(out_dim),
            nn.ReLU())

    def forward(self, xyz, features_list):
        """
        features_list: [low_freq_feat, feat1, feat2, feat3]
        """
        # 下采样路径
        x = features_list[0].transpose(1, 2)
        down_features = []
        for i, module in enumerate(self.down_modules):
            x = module(x)
            down_features.append(x)

        # 上采样路径
        for i, module in enumerate(self.up_modules):
            x = module(x)
            if i < len(features_list) - 1:
                # 确保维度匹配
                target_size = x.shape[2]
                current_feat = features_list[i + 1].transpose(1, 2)
                if current_feat.shape[2] != target_size:
                    current_feat = nn.functional.interpolate(current_feat, size=target_size, mode='nearest')
                x = torch.cat([x, current_feat], dim=1)

        # 最终融合
        target_size = x.shape[2]
        first_feat = features_list[0].transpose(1, 2)
        if first_feat.shape[2] != target_size:
            first_feat = nn.functional.interpolate(first_feat, size=target_size, mode='nearest')
        x = torch.cat([x, first_feat], dim=1)
        return self.final_fuse(x).transpose(1, 2)


class LowFreqExtractor(nn.Module):
    def __init__(self, lmax, sigma, pc_size=1024, device="cuda"):
        super().__init__()
        self.lmax = lmax
        self.sigma = sigma
        self.pc_size = pc_size
        self.device = device
        self.feature_proj = nn.Linear(1, 256)

    def forward(self, pc):
        pc_np = pc.detach().cpu().numpy().astype(np.float32)

        batch_smooth_pc = []
        batch_low_freq_feat = []
        for i in range(pc_np.shape[0]):
            grid, flag, origin = convert_pc_to_grid(pc_np[i], self.lmax, "cpu")
            smooth_grid = low_pass_filter(grid, self.sigma)
            smooth_pc = convert_grid_to_pc(smooth_grid, flag, origin)
            smooth_pc = duplicate_randomly(smooth_pc, self.pc_size)

            batch_smooth_pc.append(torch.from_numpy(smooth_pc).float().to(self.device))
            batch_low_freq_feat.append(torch.from_numpy(smooth_grid.data).float().to(self.device))

        smooth_pc = torch.stack(batch_smooth_pc, dim=0)  # [B, pc_size, 3]
        low_freq_feat = torch.stack(batch_low_freq_feat, dim=0)  # [B, nlat, nlon]

        # 特征对齐处理
        low_freq_feat = low_freq_feat.view(low_freq_feat.shape[0], -1)  # [B, nlat*nlon]
        # 动态计算输入维度
        input_dim = low_freq_feat.shape[1]
        align_layer = nn.Linear(input_dim, self.pc_size).to(self.device)
        low_freq_feat = align_layer(low_freq_feat)  # [B, pc_size]
        low_freq_feat = low_freq_feat.unsqueeze(-1)  # [B, pc_size, 1]
        low_freq_feat = self.feature_proj(low_freq_feat)  # [B, pc_size, 256]

        return smooth_pc, low_freq_feat


class LowFreqAttention(nn.Module):
    def __init__(self, in_dim, out_dim, num_heads=4, k_neighbors=16):
        super().__init__()
        self.local_transformer = LocalTransformerBlock(in_dim, out_dim, num_heads, k_neighbors)

    def forward(self, xyz, features):
        """
        xyz: 低频点云坐标 [B, N, 3]
        features: 低频特征 [B, N, in_dim]
        输出: 增强后的特征 [B, N, out_dim]
        """
        return self.local_transformer(xyz, features)


class LowFreqEnhancedModel(nn.Module):
    def __init__(self, lmax, sigma, pc_size=1024, device="cuda"):
        super().__init__()
        # 维度定义
        self.low_freq_dim = 256
        self.pyramid_dims = [256, 512, 1024]
        self.pc_size = pc_size  # 添加这一行

        # 低频特征提取
        self.low_freq_extractor = LowFreqExtractor(lmax, sigma, pc_size, device)

        # 低频特征注意力
        self.low_freq_attention = LocalTransformerBlock(
            in_dim=self.low_freq_dim,
            out_dim=self.low_freq_dim,
            num_heads=4,
            k_neighbors=16
        )

        # 多尺度特征融合
        self.multi_scale_fusion = MultiScaleFeaturePyramid(
            in_dims=[self.low_freq_dim] + self.pyramid_dims,
            out_dim=512
        )

        # 分类头
        self.classifier = nn.Sequential(
            nn.Linear(512, 256),
            nn.BatchNorm1d(256),
            nn.ReLU(),
            nn.Linear(256, 40))

    def forward(self, xyz, original_feats):
        # 提取低频特征
        smooth_pc, low_freq_feat = self.low_freq_extractor(xyz)

        # 维度验证
        assert smooth_pc.shape[1] == self.pc_size, f"点云数量应为{self.pc_size}, 实际得到{smooth_pc.shape[1]}"
        assert low_freq_feat.shape[1] == self.pc_size, f"特征数量应为{self.pc_size}, 实际得到{low_freq_feat.shape[1]}"

        # 注意力增强 (需确保维度匹配)
        enhanced_feat = self.low_freq_attention(
            smooth_pc,
            low_freq_feat)  # 添加batch维度

        # 多尺度融合
        fused_feat = self.multi_scale_fusion(
            xyz,
            [enhanced_feat] + original_feats
        )

        # 分类
        global_feat = torch.max(fused_feat, dim=1)[0]
        return self.classifier(global_feat)


# 测试代码
if __name__ == "__main__":
    device = "cuda" if torch.cuda.is_available() else "cpu"

    # 初始化模型
    model = LowFreqEnhancedModel(
        lmax=16,  # 确保nlat*nlon=33*33=1089接近pc_size=1024
        sigma=0.1,
        pc_size=1024,
        device=device
    ).to(device).float()

    # 测试数据加载
    root = '/media/shangli211/4TB_SSD/program_file/Data/'
    data = ModelNetDataLoader(root, npoint=1024, uniform=False, normal_channel=True)
    data_loader = torch.utils.data.DataLoader(data, batch_size=6, shuffle=True)

    # 获取一个批次的数据
    for points, labels in data_loader:  # 假设DataLoader返回(points, labels)
        xyz = points[:, :, :3].to(device).float()  # 提取坐标 [B, N, 3]
        labels = labels.to(device).float()

        # 生成原始特征（示例随机生成，实际需替换为特征提取网络）
        original_feats = [
            torch.randn(6, 1024, 256).to(device).float(),
            torch.randn(6, 512, 512).to(device).float(),
            torch.randn(6, 256, 1024).to(device).float()
        ]

        # 前向测试
        output = model(xyz, original_feats)
        print(output.shape)  # 应输出torch.Size([6, 40])
        break
