#!/usr/bin/env python
# -*- coding: utf-8 -*-

import os
import torch
import numpy as np
import h5py
from tqdm import tqdm
import glob
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import pandas as pd
import matplotlib

# 设置matplotlib支持中文显示
matplotlib.rcParams['font.family'] = ['SimHei', 'Microsoft YaHei', 'sans-serif']  # 优先使用黑体、微软雅黑
matplotlib.rcParams['axes.unicode_minus'] = False  # 解决负号显示问题
# 如果系统中没有中文字体，可以尝试使用系统默认字体
try:
    # 尝试设置中文字体
    plt.rcParams['font.sans-serif'] = ['SimHei']  # 用来正常显示中文标签
except:
    print("警告: 无法设置中文字体，可能导致中文显示为乱码")

class DenoisingDatasetPreparer:
    def __init__(self, root_dir='./data', sample_points=1024):
        """
        初始化去噪数据集准备器
        
        Args:
            root_dir: 数据集保存的根目录
            sample_points: 每个点云的采样点数
        """
        self.root_dir = root_dir
        self.sample_points = sample_points
        self.noise_dir = os.path.join(root_dir, 'modelnet_noisy')
        os.makedirs(self.noise_dir, exist_ok=True)

    def load_modelnet_from_files(self, modelnet_dir):
        """
        直接从文件系统加载ModelNet数据集，跳过PyTorch Geometric的数据加载
        
        Args:
            modelnet_dir: ModelNet数据集目录
            
        Returns:
            训练和测试数据的点云和标签
        """
        print(f"从 {modelnet_dir} 直接加载点云数据...")
        
        # 寻找所有OFF文件（ModelNet使用OFF格式）
        train_dir = os.path.join(modelnet_dir, 'train')
        test_dir = os.path.join(modelnet_dir, 'test')
        
        # 寻找类别目录
        categories = []
        for item in os.listdir(modelnet_dir):
            full_path = os.path.join(modelnet_dir, item)
            if os.path.isdir(full_path) and not item.startswith('.'):
                categories.append(item)
        
        print(f"找到 {len(categories)} 个类别: {categories}")
        
        # 加载训练集
        train_points = []
        train_labels = []
        
        for i, category in enumerate(tqdm(categories, desc="加载训练数据")):
            # 找到这个类别下的所有OFF文件
            pattern = os.path.join(modelnet_dir, category, 'train', '*.off')
            for off_file in glob.glob(pattern):
                # 从OFF文件加载点云
                points = self._load_off_file(off_file, self.sample_points)
                if points is not None:
                    train_points.append(points)
                    train_labels.append(i)  # 使用类别索引作为标签
        
        # 加载测试集
        test_points = []
        test_labels = []
        
        for i, category in enumerate(tqdm(categories, desc="加载测试数据")):
            # 找到这个类别下的所有OFF文件
            pattern = os.path.join(modelnet_dir, category, 'test', '*.off')
            for off_file in glob.glob(pattern):
                # 从OFF文件加载点云
                points = self._load_off_file(off_file, self.sample_points)
                if points is not None:
                    test_points.append(points)
                    test_labels.append(i)  # 使用类别索引作为标签
        
        # 转换为张量
        train_points = torch.tensor(np.array(train_points), dtype=torch.float)
        train_labels = torch.tensor(np.array(train_labels), dtype=torch.long)
        test_points = torch.tensor(np.array(test_points), dtype=torch.float)
        test_labels = torch.tensor(np.array(test_labels), dtype=torch.long)
        
        print(f"训练集大小: {len(train_points)} 个样本")
        print(f"测试集大小: {len(test_points)} 个样本")
        print(f"类别数量: {len(categories)}")
        
        return train_points, train_labels, test_points, test_labels, categories
    
    def _load_off_file(self, off_file, num_points):
        """
        加载OFF格式的3D模型文件并采样点云
        
        Args:
            off_file: OFF文件路径
            num_points: 要采样的点数
            
        Returns:
            采样的点云数组，形状为 [num_points, 3]
        """
        try:
            with open(off_file, 'r') as f:
                lines = f.readlines()
            
            # 第一行通常是 "OFF"
            if lines[0].strip() != "OFF":
                first_line = lines[0].strip()
                if first_line.startswith("OFF"):
                    # 有时 "OFF" 和顶点/面的数量在同一行
                    parts = first_line[3:].strip().split()
                    if len(parts) == 3:
                        n_verts, n_faces, _ = map(int, parts)
                    else:
                        # 读取下一行获取顶点和面的数量
                        n_verts, n_faces, _ = map(int, lines[1].strip().split())
                        lines = [lines[0]] + lines[2:]  # 跳过计数行
                else:
                    # 不是标准OFF格式
                    return None
            else:
                # 标准OFF格式，第二行是顶点和面的数量
                n_verts, n_faces, _ = map(int, lines[1].strip().split())
                lines = lines[2:]  # 跳过前两行
            
            # 读取顶点
            vertices = []
            for i in range(n_verts):
                vertex = list(map(float, lines[i].strip().split()))
                vertices.append(vertex[:3])  # 只取XYZ坐标
            
            # 如果顶点太少，返回None
            if len(vertices) < num_points:
                # 简单复制已有顶点作为采样
                vertices = vertices * (num_points // len(vertices) + 1)
            
            # 随机采样
            vertices = np.array(vertices)
            indices = np.random.choice(len(vertices), num_points, replace=False)
            sampled_points = vertices[indices]
            
            # 归一化到单位球内
            center = np.mean(sampled_points, axis=0)
            sampled_points = sampled_points - center
            scale = np.max(np.linalg.norm(sampled_points, axis=1))
            sampled_points = sampled_points / scale
            
            return sampled_points
        
        except Exception as e:
            print(f"加载文件 {off_file} 时出错: {e}")
            return None
    
    def add_noise(self, points, noise_type, noise_level):
        """
        为点云添加不同类型的噪声
        
        Args:
            points: 点云数据，形状为 [N, 3]
            noise_type: 噪声类型 ('gaussian', 'outlier', 'density')
            noise_level: 噪声级别
            
        Returns:
            带有噪声的点云
        """
        noisy_points = points.clone()
        
        if noise_type == 'gaussian':
            # 添加高斯噪声
            noise = torch.randn_like(points) * noise_level
            noisy_points = points + noise
            
        elif noise_type == 'outlier':
            # 添加离群点
            num_outliers = int(points.shape[0] * noise_level)
            if num_outliers > 0:
                # 计算点云的边界盒
                min_bound = torch.min(points, dim=0)[0]
                max_bound = torch.max(points, dim=0)[0]
                
                # 生成随机的离群点
                outliers = torch.rand(num_outliers, 3) * (max_bound - min_bound) + min_bound
                outliers = outliers + (torch.rand(num_outliers, 3) - 0.5) * 0.4  # 增加一些随机偏移
                
                # 随机选择要替换的点的索引
                indices = torch.randperm(points.shape[0])[:num_outliers]
                noisy_points[indices] = outliers
        
        elif noise_type == 'density':
            # 模拟密度不均匀：随机删除一些点然后复制一些点
            num_to_keep = int(points.shape[0] * (1 - noise_level))
            if num_to_keep < points.shape[0]:
                # 随机选择要保留的点
                indices = torch.randperm(points.shape[0])[:num_to_keep]
                kept_points = points[indices]
                
                # 复制一些点以保持点的总数不变
                extra_indices = torch.randint(0, num_to_keep, (points.shape[0] - num_to_keep,))
                extra_points = kept_points[extra_indices]
                
                # 合并保留的点和复制的点
                noisy_points = torch.cat([kept_points, extra_points], dim=0)
        
        elif noise_type == 'mixed':
            # 混合噪声：组合高斯噪声和离群点
            # 首先添加高斯噪声
            noise = torch.randn_like(points) * (noise_level * 0.7)
            noisy_points = points + noise
            
            # 然后添加一些离群点
            num_outliers = int(points.shape[0] * (noise_level * 0.3))
            if num_outliers > 0:
                # 计算点云的边界盒
                min_bound = torch.min(points, dim=0)[0]
                max_bound = torch.max(points, dim=0)[0]
                
                # 生成随机的离群点
                outliers = torch.rand(num_outliers, 3) * (max_bound - min_bound) + min_bound
                outliers = outliers + (torch.rand(num_outliers, 3) - 0.5) * 0.4
                
                # 随机选择要替换的点的索引
                indices = torch.randperm(points.shape[0])[:num_outliers]
                noisy_points[indices] = outliers
                
        else:
            raise ValueError(f"未知的噪声类型: {noise_type}")
        
        return noisy_points
    
    def prepare_denoising_dataset(self, modelnet_dir, noise_types=['gaussian', 'outlier', 'density', 'mixed'], 
                                  noise_levels=[0.01, 0.02, 0.05, 0.1]):
        """
        准备用于点云去噪的数据集
        
        Args:
            modelnet_dir: ModelNet数据集目录
            noise_types: 噪声类型列表
            noise_levels: 噪声级别列表
        """
        # 直接从文件系统加载数据
        train_points, train_labels, test_points, test_labels, categories = self.load_modelnet_from_files(modelnet_dir)
        
        # 为每种噪声类型和级别创建数据集
        for noise_type in noise_types:
            for noise_level in noise_levels:
                print(f"\n准备 {noise_type} 噪声 (级别: {noise_level}) 的去噪数据集...")
                
                # 创建输出目录和文件
                output_dir = os.path.join(self.noise_dir, f"{noise_type}_{noise_level}")
                os.makedirs(output_dir, exist_ok=True)
                
                # 训练集
                train_output_file = os.path.join(output_dir, "train_denoising.h5")
                self._create_noisy_dataset(train_points, train_labels, train_output_file, noise_type, noise_level, 'train')
                
                # 测试集
                test_output_file = os.path.join(output_dir, "test_denoising.h5")
                self._create_noisy_dataset(test_points, test_labels, test_output_file, noise_type, noise_level, 'test')
                
                # 保存类别信息
                with open(os.path.join(output_dir, "categories.txt"), 'w') as f:
                    for i, category in enumerate(categories):
                        f.write(f"{i}: {category}\n")
    
    def _create_noisy_dataset(self, points, labels, output_file, noise_type, noise_level, split):
        """
        创建带有噪声的数据集
        
        Args:
            points: 点云数据，形状为 [batch_size, N, 3]
            labels: 类别标签
            output_file: 输出文件路径
            noise_type: 噪声类型
            noise_level: 噪声级别
            split: 数据集分割 ('train' 或 'test')
        """
        clean_points_list = []
        noisy_points_list = []
        labels_list = []
        
        print(f"处理 {split} 数据集...")
        
        for i in tqdm(range(len(points)), desc=f"添加 {noise_type} 噪声"):
            # 获取干净的点云
            clean_point_cloud = points[i]
            
            # 添加噪声
            noisy_point_cloud = self.add_noise(clean_point_cloud, noise_type, noise_level)
            
            # 收集数据
            clean_points_list.append(clean_point_cloud.numpy())
            noisy_points_list.append(noisy_point_cloud.numpy())
            labels_list.append(labels[i].item())
        
        # 转换为NumPy数组
        clean_points_array = np.array(clean_points_list)
        noisy_points_array = np.array(noisy_points_list)
        labels_array = np.array(labels_list).reshape(-1, 1)
        
        # 保存为H5文件
        with h5py.File(output_file, 'w') as f:
            f.create_dataset('clean_points', data=clean_points_array)
            f.create_dataset('noisy_points', data=noisy_points_array)
            f.create_dataset('labels', data=labels_array)
        
        print(f"{split} 数据集保存至 {output_file}")
        print(f"数据集大小: {len(clean_points_list)} 个样本")
    
    def visualize_denoising_pairs(self, h5_file, num_samples=3):
        """
        可视化去噪数据对，展示原始点云和噪声点云的对比
        
        Args:
            h5_file: 去噪数据集的H5文件路径
            num_samples: 要可视化的样本数量
        """
        # 加载数据
        with h5py.File(h5_file, 'r') as f:
            clean_points = f['clean_points'][:]
            noisy_points = f['noisy_points'][:]
            labels = f['labels'][:]
        
        # 读取类别信息
        categories = {}
        category_file = os.path.join(os.path.dirname(h5_file), "categories.txt")
        if os.path.exists(category_file):
            with open(category_file, 'r') as f:
                for line in f:
                    parts = line.strip().split(': ')
                    if len(parts) == 2:
                        idx, name = parts
                        categories[int(idx)] = name
        
        # 随机选择样本进行可视化
        indices = np.random.choice(len(clean_points), min(num_samples, len(clean_points)), replace=False)
        
        fig = plt.figure(figsize=(15, 5 * len(indices)))
        
        for i, idx in enumerate(indices):
            # 获取干净和带噪声的点云
            clean = clean_points[idx]
            noisy = noisy_points[idx]
            label = labels[idx][0]
            
            # 获取类别名称
            category_name = categories.get(int(label), f"类别 {int(label)}")
            
            # 创建子图
            ax1 = fig.add_subplot(len(indices), 2, i*2+1, projection='3d')
            ax2 = fig.add_subplot(len(indices), 2, i*2+2, projection='3d')
            
            # 可视化干净点云
            ax1.scatter(clean[:, 0], clean[:, 1], clean[:, 2], s=2, c='blue', alpha=0.8)
            ax1.set_title(f'样本 {idx} ({category_name}) - 干净点云', fontsize=12)
            ax1.set_xlabel('X', fontsize=10)
            ax1.set_ylabel('Y', fontsize=10)
            ax1.set_zlabel('Z', fontsize=10)
            ax1.view_init(elev=30, azim=45)
            
            # 可视化噪声点云
            ax2.scatter(noisy[:, 0], noisy[:, 1], noisy[:, 2], s=2, c='red', alpha=0.8)
            ax2.set_title(f'样本 {idx} ({category_name}) - 噪声点云', fontsize=12)
            ax2.set_xlabel('X', fontsize=10)
            ax2.set_ylabel('Y', fontsize=10)
            ax2.set_zlabel('Z', fontsize=10)
            ax2.view_init(elev=30, azim=45)
        
        plt.tight_layout()
        
        # 保存图像到文件，避免显示问题
        noise_type = os.path.basename(os.path.dirname(h5_file))
        output_dir = os.path.dirname(h5_file)
        fig_file = os.path.join(output_dir, f"{noise_type}_visualization.png")
        plt.savefig(fig_file, dpi=150, bbox_inches='tight')
        print(f"可视化结果已保存到: {fig_file}")
        
        # 显示图像
        plt.show()

if __name__ == "__main__":
    # 创建去噪数据集准备器
    denoiser = DenoisingDatasetPreparer(root_dir='./data', sample_points=1024)
    
    # 准备具有不同噪声类型和级别的去噪数据集
    modelnet_dir = './data/ModelNet40'  # 修改为您的ModelNet40目录路径
    denoiser.prepare_denoising_dataset(
        modelnet_dir=modelnet_dir,
        noise_types=['gaussian', 'outlier', 'mixed'],  # 噪声类型
        noise_levels=[0.01, 0.05, 0.1]  # 噪声级别
    )
    
    # 可视化一些样本对
    print("\n可视化噪声数据样本...")
    gaussian_h5 = os.path.join(denoiser.noise_dir, "gaussian_0.05", "test_denoising.h5")
    if os.path.exists(gaussian_h5):
        denoiser.visualize_denoising_pairs(gaussian_h5, num_samples=3)
    
    outlier_h5 = os.path.join(denoiser.noise_dir, "outlier_0.05", "test_denoising.h5")
    if os.path.exists(outlier_h5):
        denoiser.visualize_denoising_pairs(outlier_h5, num_samples=3)
    
    print("完成!") 