import os
import numpy as np
from torch.utils.data import Dataset, DataLoader
import torch
from scipy.spatial.distance import cdist
from concurrent.futures import ProcessPoolExecutor
import re,time,trimesh
import pandas as pd
from scipy.spatial import Delaunay
from sklearn.neighbors import NearestNeighbors
from scipy.spatial import cKDTree


def pc_normalize(pc):
    centroid = torch.mean(pc, axis=0)
    pc = pc - centroid
    m = torch.max(torch.sqrt(torch.sum(pc ** 2, axis=1)))
    pc = pc / (m+1e-10)
    return pc
 
#     return downsampled_points
def min_max_normalize(sample):
    # normalized_samples = []
    # for sample in samples:
    min_val = -2.5
    max_val = 2.5
    normalized_sample = (sample - min_val) / (max_val - min_val) if max_val != min_val else sample
    # normalized_samples.append(normalized_sample)
    return np.array(normalized_sample)

def voxel_downsampling_np(point_cloud, target_num_points, voxel_size):
    # 计算体素网格的索引
    voxel_grid = np.floor(point_cloud[:, :3] / voxel_size).astype(np.int32)
    
    # 创建一个字典以存储每个体素的点
    voxel_dict = {}
    for idx, voxel in enumerate(voxel_grid):
        voxel_key = tuple(voxel)  # 将体素索引转换为元组作为字典的键
        if voxel_key not in voxel_dict:
            voxel_dict[voxel_key] = []
        voxel_dict[voxel_key].append(point_cloud[idx])
    
    # 从每个体素中随机选择一个点
    downsampled_points = []
    for points in voxel_dict.values():
        # 从体素中的点中随机选择一个
        downsampled_points.append(points[np.random.choice(len(points))])
    
    # 如果下采样后点数少于目标数量，随机选择
    downsampled_points = np.array(downsampled_points)
    
    if downsampled_points.shape[0] < target_num_points:
        additional_points = np.random.choice(downsampled_points.shape[0], target_num_points - downsampled_points.shape[0], replace=True)
        downsampled_points = np.vstack((downsampled_points, downsampled_points[additional_points]))
    
    return downsampled_points[:target_num_points]

def voxel_downsampling(point_cloud, target_num_points, voxel_size):
    # 计算体素网格的索引
    voxel_grid = torch.floor(point_cloud[:, :3] / voxel_size).to(torch.int32)  # 转换为整数类型
    
    # 创建一个字典以存储每个体素的点
    voxel_dict = {}
    for idx, voxel in enumerate(voxel_grid):
        voxel_key = tuple(voxel.tolist())  # 将体素索引转换为元组作为字典的键
        if voxel_key not in voxel_dict:
            voxel_dict[voxel_key] = []
        voxel_dict[voxel_key].append(point_cloud[idx])
    
    # 从每个体素中随机选择一个点
    downsampled_points = []
    for points in voxel_dict.values():
        # 从体素中的点中随机选择一个
        random_idx = torch.randint(0, len(points), (1,)).item()  # 随机选择一个索引
        downsampled_points.append(points[random_idx])
    
    # 转换为 tensor
    downsampled_points = torch.stack(downsampled_points)
    
    # 如果下采样后点数少于目标数量，随机选择
    if downsampled_points.shape[0] < target_num_points:
        additional_points = torch.randint(0, downsampled_points.shape[0], (target_num_points - downsampled_points.shape[0],))  # 选择额外的点
        downsampled_points = torch.cat((downsampled_points, downsampled_points[additional_points]))
    
    # 返回目标数量的点
    return downsampled_points[:target_num_points]


def min_max_normalize_coord(arr):
    # 计算每列的最小值和最大值
    col_min = arr.min(axis=0)
    col_max = arr.max(axis=0)
    
    # 进行归一化
    normalized_arr = (arr - col_min) / (col_max - col_min+ 1e-10)
    
    return normalized_arr

def calculate_normals_with_angle(point_cloud):
    # 假设输入的点云数据形状为 [N, 6]
    # 前三列为坐标，最后一列为攻角
    coordinates = point_cloud[:, :3]  # 提取坐标
    angles = point_cloud[:, -1]  # 提取攻角

    # 初始化法向量数组
    normals = np.zeros((point_cloud.shape[0], 3))

    # 计算法向量
    for i in range(len(angles)):
        angle_rad = np.radians(angles[i])  # 将角度转换为弧度
        
        # 计算法向量 (法向量可能基于具体情况而定)
        # 假设法向量在水平面上的分量为 (cos, sin)，垂直分量为 0
        normals[i] = [
            np.cos(angle_rad),  # 法向量的x分量
            np.sin(angle_rad),  # 法向量的y分量
            0                   # 假设法向量的z分量为0（可以根据需求调整）
        ]

    # 将法向量作为第七个特征添加到原始数据中
    augmented_point_cloud = np.hstack((point_cloud, normals))

    return augmented_point_cloud


def compute_normals(points, k=10):
    """
    计算点云中每个点的法向量。
    
    :param points: 点云数据，形状为 (N, 3)，每一行是一个点的坐标。
    :param k: 邻域大小，表示每个点考虑的最近邻点的数量。
    :return: 法向量，形状为 (N, 3)，每一行是一个点的法向量。
    """
    n_points = points.shape[0]
    normals = np.zeros_like(points)  # 初始化法向量
    
    # 计算所有点之间的距离矩阵
    distances = cdist(points, points)  # 计算距离矩阵，形状为 (N, N)
    
    # 对每一行进行处理
    for i in range(n_points):
        # 获取当前点及其邻域（k个最近邻）
        neighbor_indices = np.argsort(distances[i])[1:k+1]  # 排序并取k个最近邻（跳过自身）

        # 获取邻域点
        neighbors = points[neighbor_indices]
        
        # 使用PCA计算邻域点的法向量
        pca = PCA(n_components=3)
        pca.fit(neighbors)  # 对邻域点进行PCA拟合
        normal = pca.components_[-1]  # 获取最小方差方向作为法向量
        
        # 如果法向量指向错误方向（反向），就反转法向量
        if np.dot(normal, points[i] - np.mean(neighbors, axis=0)) < 0:
            normal = -normal
        
        # 将法向量保存到对应位置
        normals[i] = normal

    return normals

def standardize(y, y_mean, y_std):
    return (y - y_mean) / y_std


def pc_normalize1(xyz, global_mean=None, global_std=None):
    """
    对坐标数据进行全局归一化，如果没有提供全局均值和标准差，则计算局部均值和标准差
    """
    if global_mean is None or global_std is None:
        # 如果没有全局均值和标准差，则使用局部均值和标准差
        return (xyz - xyz.mean(axis=0)) / xyz.std(axis=0)
    else:
        # 使用全局均值和标准差
        return (xyz - global_mean) / global_std



### initial for txt files
class DrivAerDataset(Dataset):
    def __init__(self, config,subset):
        self.config = config
        self.data_root = config['paths']['csv_paths']
        self.subset = subset
        self.shape_path = config['paths']['shape_path']
        self.data_list_file = os.path.join(self.data_root, self.subset)
        
        # self.sample_points_num = config.npoints
        # self.permutation = np.arange(self.npoints)
        self.epoch = 0
        
        self.sample_list = pd.read_csv(self.data_list_file).values
        
        self.shape_path = os.path.join(self.shape_path)
        

    
    def pc_norm(self, pc):
        """ pc: NxC, return NxC """
        centroid = np.mean(pc, axis=0)
        pc = pc - centroid
        m = np.max(np.sqrt(np.sum(pc**2, axis=1)))
        pc = pc / m
        return pc
        
    def random_sample(self, pc, num):
        np.random.shuffle(self.permutation)
        pc = pc[self.permutation[:num]]
        return pc
    
    def set_epoch(self, epoch):
        self.epoch = epoch
        
    def sample_points_random(self, vertices, num_sample=5000):
        # 确保输入的 vertices 和 vert_normal 是 numpy 数组
        vertices = np.array(vertices)
        # vert_normal = np.array(vert_normal)

        # 获取总点数
        total_points = len(vertices)

        # 随机选择 num_samples 个点的索引
        random_indices = np.random.choice(total_points, size=num_sample, replace=False)

        # 获取对应的坐标和法向量
        sampled_points = vertices[random_indices]
        # sampled_normals = vert_normal[random_indices]

        # 将坐标和法向量拼接为一个数据
        # sampled_data = np.hstack([sampled_points, sampled_normals])

        return sampled_points
    
        
    def __getitem__(self, idx):
        sample = self.sample_list[idx]
        sample_name = sample[0] ##tain 5, test 0
        shape_data = np.loadtxt(os.path.join(self.shape_path, f'{sample_name}.txt'))
        # shape_data = self.sample_points_random(shape_data)
        shape_data = self.pc_norm(shape_data[:,:3]) # ###
        shape_data = torch.tensor(shape_data, dtype=torch.float32)
        parameter_data = sample[2:5].astype(np.float32)  
        # cd = sample[1].astype(np.float32)     
        cd = np.array(sample[1]).astype(np.float32) 
        # input_data = torch.tensor(input_data)        
        label_data = torch.tensor(cd)         
        cubesize = parameter_data[:3] ### 
        # 原始数据
        xyzsize = np.array([
            [4, 1.8, 1],
            [6, 2.6, 2]
        ], dtype=np.float32)

        # 计算最小值和最大值
        min_vals = xyzsize.min(axis=0)
        max_vals = xyzsize.max(axis=0)
        # 进行最大最小归一化
        # cubesize = (cubesize - min_vals) / (max_vals - min_vals)
        # 进行最大最小归一化到 [-1, 1]  
        cubesize = 2 * (cubesize - min_vals) / (max_vals - min_vals) - 1
        #print("shape_data 初始形状:", cubasize.shape)
        N, C = shape_data.shape[0], shape_data.shape[1]
             # 使用 unsqueeze 和 repeat 扩展 cubasize 为 [B, N, 3]
        cubesize = torch.tensor(cubesize)
        cubesize = cubesize.unsqueeze(0)  # 变为  [1, 3]
        cubesize = cubesize.repeat(N, 1)  # 扩展为 [B, N, 3]
        # cubasize = torch.tensor(cubasize)  ### B,N,3
        #print("cubasize 处理后形状:", cubasize.shape)
        return shape_data, label_data, sample_name,cubesize

    def __len__(self):
        return len(self.sample_list)


### read stl to train 1.4/2024
class DrivAerDataset_stl(Dataset):
    def __init__(self, config,subset):
        self.config = config
        self.data_root = config['paths']['csv_paths']
        self.subset = subset
        self.shape_path = config['paths']['shape_path']
        # self.data_list_file = os.path.join(self.data_root, self.subset)
        self.data_list_file = os.path.join(self.data_root, self.subset)
        
        # self.sample_points_num = config.npoints
        # self.permutation = np.arange(self.npoints)
        self.epoch = 0
        
        self.sample_list = pd.read_csv(self.data_list_file).values
        
        self.shape_path = os.path.join(self.shape_path)
        
    
    def norm_data(self,):
        columns_to_scale = self.sample_list.drop(['Design'])
        names = self.sample_list['Design']
        data_scale = self.sample_list[columns_to_scale].values
        standard_data = normalizaAllData(data_scale, self.norm_val1, self.norm_val2, method=self.norm_method)
       
        standardized_df = pd.DataFrame(standard_data, columns=columns_to_scale)
        standardized_df.insert(0, 'Design', names)
        
        return standardized_df
            
    def get_norm_info(self,):
        return self.config, self.norm_val1, self.norm_val2
    
    def pc_norm(self, pc):
        """ pc: NxC, return NxC """
        centroid = np.mean(pc, axis=0)
        pc = pc - centroid
        m = np.max(np.sqrt(np.sum(pc**2, axis=1)))
        pc = pc / m
        return pc
        
    def random_sample(self, pc, num):
        np.random.shuffle(self.permutation)
        pc = pc[self.permutation[:num]]
        return pc
    
    def set_epoch(self, epoch):
        self.epoch = epoch
        
    def transfomer_position(self, position):
        x = position[:,0]
        y = position[:,1]
        z = position[:,2]
        new_position = torch.stack([-z,y,x], dim=1)
        return new_position
    
    def sample_points_random(self, vertices, num_sample=5000):
        # 确保输入的 vertices 和 vert_normal 是 numpy 数组
        vertices = np.array(vertices)
        # vert_normal = np.array(vert_normal)

        # 获取总点数
        total_points = len(vertices)

        # 随机选择 num_samples 个点的索引
        random_indices = np.random.choice(total_points, size=num_sample, replace=False)

        # 获取对应的坐标和法向量
        sampled_points = vertices[random_indices]
        # sampled_normals = vert_normal[random_indices]

        # 将坐标和法向量拼接为一个数据
        # sampled_data = np.hstack([sampled_points, sampled_normals])

        return sampled_points
    
    def load_stl_with_normals(self, file_path, num_samples=10000):
        # 记录读取 STL 文件的时间
        start_time = time.time()
        
        # 加载 STL 文件
        stl_mesh = trimesh.load_mesh(file_path, force='mesh')
        
        # 记录加载 STL 文件后的时间
        load_time = time.time() - start_time
        # print(f"Time to load STL mesh: {load_time:.6f} seconds")
        
        # # 获取网格的顶点和法向量
        # vertices = stl_mesh.vertices
        # normals = stl_mesh.vertex_normals  # 面的法向量
        
        # 计算网格的体积、边界框、面积等属性
        # volume = stl_mesh.volume
        bounding_box = stl_mesh.bounding_box.bounds  # Bounding box bounds
        length = bounding_box[1, 0] - bounding_box[0, 0]  # x 轴的范围
        width = bounding_box[1, 1] - bounding_box[0, 1]   # y 轴的范围
        height = bounding_box[1, 2] - bounding_box[0, 2]  # z 轴的范围
        # area = stl_mesh.area
        # interg_mean_convx = stl_mesh.integral_mean_curvature
        # 将 length, width, height 合并成一个 NumPy 数组
        lwh = np.array([length, width, height])
        # 记录下采样的时间
        start_time_sampling = time.time()
        
        # 均匀采样：基于面的采样
        sampled_points = stl_mesh.sample(num_samples)  # 获取点云数据
        
        # 计算采样点的法向量
        # closest_faces = stl_mesh.nearest.face(sampled_points)
        # sampled_normals = stl_mesh.face_normals[closest_faces]
        
        # 记录下采样后的时间
        sampling_time = time.time() - start_time_sampling
        # print(f"Time to sample {num_samples} points: {sampling_time:.6f} seconds")
        
        # 保存采样的数据到 .txt 文件
        # output_file = file_path.replace('.stl', '_sampled_points_normals.txt')
        # np.savetxt(output_file, sampled_points, fmt='%.6f', delimiter=' ')
        # print(f"Saved sampled data to {output_file}")
        
        return sampled_points, lwh

        
    def __getitem__(self, idx):
        sample = self.sample_list[idx]
        sample_name = sample[0] ##tain 5, test 0
        # shape_data = np.loadtxt(os.path.join(self.shape_path, f'{sample_name}.txt'))
        shape_data, cubesize = self.load_stl_with_normals(os.path.join(self.shape_path, f'{sample_name}.stl'))
        # shape_data = self.sample_points_random(shape_data)
        shape_data = self.pc_norm(shape_data) # ###
        shape_data = torch.tensor(shape_data, dtype=torch.float32)
        # parameter_data = sample[3:6].astype(np.float32)  
        # cd = sample[1].astype(np.float32)     
        cd = np.array(sample[1]).astype(np.float32) 
        # input_data = torch.tensor(input_data)        
        label_data = torch.tensor(cd)         
        # cubesize = parameter_data[:3] ### 
        # 原始数据
        xyzsize = np.array([
            [4.3195825, 1.8442736, 1.3227063],
            [5.2373676, 2.3877249, 1.761713]
        ], dtype=np.float32)

        # 计算最小值和最大值
        min_vals = xyzsize.min(axis=0)
        max_vals = xyzsize.max(axis=0)
        # 进行最大最小归一化
        cubesize = (cubesize - min_vals) / (max_vals - min_vals)
        
        #print("shape_data 初始形状:", cubasize.shape)
        N, C = shape_data.shape[0], shape_data.shape[1]
             # 使用 unsqueeze 和 repeat 扩展 cubasize 为 [B, N, 3]
        cubesize = torch.tensor(cubesize, dtype=torch.float32)
        cubesize = cubesize.unsqueeze(0)  # 变为  [1, 3]
        cubesize = cubesize.repeat(N, 1)  # 扩展为 [B, N, 3]
        # cubasize = torch.tensor(cubasize)  ### B,N,3
        #print("cubasize 处理后形状:", cubasize.shape)
        return shape_data, label_data, sample_name, cubesize

    def __len__(self):
        return len(self.sample_list)


### 单个stl 测试, 均匀和自适应采样 1.8
class DrivAerDataset_stl4pre_random(Dataset):
    def __init__(self, data_dir, transform=None):
        """
        数据集初始化，支持STL文件读取和数据预处理。
        
        :param data_dir: 存储STL文件和其他数据的目录
        :param transform: 数据增强或预处理方法
        """
        self.data_root = "/home/zhpe/ldz/driver/data/drivenet4test-txt"
        self.subset = "int-norm8shape-valide.csv"
        self.data_list_file = os.path.join(self.data_root, self.subset)
        self.data_dir = data_dir
        self.transform = transform
        self.stl_files = [f for f in os.listdir(data_dir) if f.endswith('.stl')]
        # self.sample_list = pd.read_csv(self.data_list_file).values
        
    def __len__(self):
        """
        返回数据集大小。
        """
        return len(self.stl_files)

    def load_stl_with_normals(self,file_path):
        stl_mesh = trimesh.load_mesh(file_path, force='mesh')
        vertices = stl_mesh.vertices
        normals = stl_mesh.vertex_normals  # 面的法向量
        volume = stl_mesh.volume
        bounding_box = stl_mesh.bounding_box.bounds  # Bounding box bounds
        length = bounding_box[1, 0] - bounding_box[0, 0]  # x 轴的范围
        width = bounding_box[1, 1] - bounding_box[0, 1]   # y 轴的范围
        height = bounding_box[1, 2] - bounding_box[0, 2]  # z 轴的范围
        area = stl_mesh.area
        interg_mean_convx = stl_mesh.integral_mean_curvature
        return vertices, normals, length,width,height,volume,area,interg_mean_convx

    def compute_entropy(self, values):
        """
        计算给定值的熵。
        
        :param values: 输入的值
        :return: 熵值
        """
        hist, bin_edges = np.histogram(values, bins=20, density=True)
        probabilities = hist / hist.sum()
        entropy = -np.sum(probabilities * np.log(probabilities + 1e-9))
        return entropy

    def compute_normal_entropy(self, normals):
        """
        计算基于法向量的熵。
        
        :param normals: 法向量
        :return: 熵值
        """
        # 将法向量归一化到球面投影
        normal_magnitudes = np.linalg.norm(normals, axis=1)
        normalized_normals = normals / (normal_magnitudes[:, np.newaxis] + 1e-9)
        
        # 球面分区 (bins 分区)
        theta = np.arctan2(normalized_normals[:, 1], normalized_normals[:, 0])  # 投影到XY平面
        phi = np.arccos(normalized_normals[:, 2])  # Z方向角
        hist, _ = np.histogramdd((theta, phi), bins=20)
        
        probabilities = hist / hist.sum()
        entropy = -np.sum(probabilities * np.log(probabilities + 1e-9))
        return entropy

    def compute_entropy_from_cubes(self, points, cube_size=(8, 1)):
        """
        根据指定的立方体尺寸 (cube_size)，计算点云的熵。
        
        :param points: 下采样后的点云
        :param cube_size: 立方体大小，默认是8x1
        :return: 熵值
        """
        # 假设cube_size是 (8, 1)，我们将点云分布到此网格中
        x_min, y_min, z_min = points.min(axis=0)
        x_max, y_max, z_max = points.max(axis=0)
        
        # 计算每个维度的分割
        x_bins = np.linspace(x_min, x_max, cube_size[0])
        y_bins = np.linspace(y_min, y_max, cube_size[1])
        
        # 将点云分配到网格中
        grid, _, _ = np.histogram2d(points[:, 0], points[:, 1], bins=[x_bins, y_bins])
        
        # 将点云分布计算成概率分布
        grid = grid / grid.sum()
        
        # 计算熵
        entropy = -np.sum(grid * np.log(grid + 1e-9))
        return entropy

    ## 随机采样
    def sample_points_random(self, vertices, vert_normal, num_sample=1000):
        # 确保输入的 vertices 和 vert_normal 是 numpy 数组
        vertices = np.array(vertices)
        vert_normal = np.array(vert_normal)

        # 获取总点数
        total_points = len(vertices)

        # 随机选择 num_samples 个点的索引
        random_indices = np.random.choice(total_points, size=num_sample, replace=False)

        # 获取对应的坐标和法向量
        sampled_points = vertices[random_indices]
        sampled_normals = vert_normal[random_indices]

        # 将坐标和法向量拼接为一个数据
        sampled_data = np.hstack([sampled_points, sampled_normals])

        return sampled_data
    
    ## FPS 采样
    def sample_points_random_FPS(self, vertices, vert_normal, num_sample=1000, num_random=10000):
        # 确保输入的 vertices 和 vert_normal 是 numpy 数组
        vertices = np.array(vertices)
        vert_normal = np.array(vert_normal)

        # Step 1: 随机选择 num_random 个点
        total_points = len(vertices)
        random_indices = np.random.choice(total_points, size=num_random, replace=False)

        # 获取对应的坐标和法向量
        random_points = vertices[random_indices]
        random_normals = vert_normal[random_indices]

        # Step 2: Farthest Point Sampling (FPS) 从随机采样的点中选取 num_sample 个点
        # 使用 cKDTree 来加速最近邻搜索
        tree = cKDTree(random_points)
        
        # Step 3: FPS 初始化：随机选择一个点作为起始点
        fps_indices = [np.random.randint(num_random)]  # 从随机点中选择一个起始点
        fps_points = [random_points[fps_indices[0]]]
        fps_normals = [random_normals[fps_indices[0]]]
        
        # Step 4: 计算所有点到已选择点集的最小距离
        dist, _ = tree.query(fps_points, k=1)
        dist = dist.flatten()  # 最初的距离数组

        # Step 5: 迭代选取最远的点
        for _ in range(1, num_sample):
            # 使用广播机制（向量化）计算每个点到所有已选点的距离
            # 计算所有点到已选择点集的距离，返回每个点到已选点的最小距离
            dists_matrix = np.linalg.norm(random_points[:, np.newaxis] - np.array(fps_points), axis=2)  # 计算每个点到每个已选点的距离
            min_distances = np.min(dists_matrix, axis=1)  # 获取每个点到所有已选点的最小距离

            # 选择距离当前已选点集最远的点
            farthest_index = np.argmax(min_distances)
            
            # 将选中的点添加到 FPS 结果中
            fps_indices.append(random_indices[farthest_index])
            fps_points.append(random_points[farthest_index])
            fps_normals.append(random_normals[farthest_index])

        # 将选中的点和法向量拼接在一起
        sampled_data = np.hstack([np.array(fps_points), np.array(fps_normals)])

        return sampled_data
    
    ### 均匀采样
    def sample_points_uniform(self, vertices, vert_normal, num_sample=100000):
        # 确保输入的 vertices 和 vert_normal 是 numpy 数组
        vertices = np.array(vertices)
        vert_normal = np.array(vert_normal)

        # 获取点云的边界，计算最大和最小坐标
        min_coords = np.min(vertices, axis=0)
        max_coords = np.max(vertices, axis=0)

        # 计算每个维度的步长，划分为 num_sample 的网格
        grid_size = np.ceil(np.cbrt(len(vertices) / num_sample))  # 每个网格的大小
        grid_steps = np.ceil((max_coords - min_coords) / grid_size).astype(int)

        # 生成网格的所有索引
        grid_indices = np.mgrid[0:grid_steps[0], 0:grid_steps[1], 0:grid_steps[2]].reshape(3, -1).T

        # 将网格索引转换为网格边界
        grid_bounds = min_coords + grid_indices * grid_size
        grid_bounds_max = np.minimum(min_coords + (grid_indices + 1) * grid_size, max_coords)

        # 计算每个点属于哪个网格
        grid_ids = np.floor((vertices - min_coords) / grid_size).astype(int)

        # 创建一个字典保存每个网格中的点
        grid_dict = {}

        # 使用 NumPy 向量化选择属于各个网格的点
        for i in range(len(grid_bounds)):
            lower_bound = grid_bounds[i]
            upper_bound = grid_bounds_max[i]

            # 使用布尔索引来筛选属于当前网格的点
            mask = np.all((vertices >= lower_bound) & (vertices <= upper_bound), axis=1)

            grid_points = vertices[mask]
            grid_normals = vert_normal[mask]

            # 如果网格中有点，则随机选择一个
            if len(grid_points) > 0:
                grid_dict[i] = (grid_points, grid_normals)

        # 从每个网格中随机选取一个点，直到采样足够的点
        sampled_points = []
        sampled_normals = []
        grid_keys = list(grid_dict.keys())

        # 随机选择每个网格中的点，直到总数达到所需的样本数
        num_sampled = 0
        while num_sampled < num_sample and grid_keys:
            key = np.random.choice(grid_keys)
            grid_points, grid_normals = grid_dict[key]

            if len(grid_points) > 0:
                random_index = np.random.choice(len(grid_points))
                sampled_points.append(grid_points[random_index])
                sampled_normals.append(grid_normals[random_index])
                num_sampled += 1

            # 处理完成的网格从字典中移除
            grid_keys.remove(key)

            # 如果采样数量达到要求，跳出循环
            if num_sampled >= num_sample:
                break

        # 将采样的点和法向量拼接为一个数据
        sampled_data = np.hstack([np.array(sampled_points), np.array(sampled_normals)])

        return sampled_data

        
        # 重要性采样点
    
    ### 重要性采样
    def sample_points(self,vertices, curvatures, densities, vertical_axis=2, upper_samples=5000, lower_samples=5000):
        # Determine the vertical range
        pointmax = np.max(vertices[:, vertical_axis])
        pointmin = np.min(vertices[:, vertical_axis])
        midpoint = np.mean([pointmax, pointmin])
        # midpoint = (pointmax-pointmin)*
        # Create masks for upper and lower half based on vertical axis
        upper_mask = vertices[:, vertical_axis] > midpoint
        lower_mask = ~upper_mask

        # Separate points, normals, and curvatures based on the masks
        upper_points = vertices[upper_mask]
        lower_points = vertices[lower_mask]

        # upper_normals = vert_normal[upper_mask]
        # lower_normals = vert_normal[lower_mask]

        upper_curvatures = curvatures[upper_mask]
        lower_curvatures = curvatures[lower_mask]
        
        # upper_vert_degree = vert_degree[upper_mask]
        # lower_vert_degree = vert_degree[lower_mask]

        upper_densities = densities[upper_mask]
        lower_densities = densities[lower_mask]

        # Compute weights for sampling
        upper_weight = np.exp(upper_curvatures)
        upper_weight /= upper_weight.sum()
        lower_weight = 1 / (lower_densities + 1e-6)
        lower_weight /= lower_weight.sum()

        # Perform the sampling with probability distribution based on weights
        upper_indices = np.random.choice(len(upper_points), size=upper_samples, p=upper_weight)
        lower_indices = np.random.choice(len(lower_points), size=lower_samples, p=lower_weight)

        # Get sampled points, normals, and curvatures
        upper_sampled_points = upper_points[upper_indices]
        # upper_sampled_normals = upper_normals[upper_indices]
        upper_sampled_curvatures = upper_curvatures[upper_indices]
        # upper_sampled_degree = upper_vert_degree[upper_indices]

        lower_sampled_points = lower_points[lower_indices]
        # lower_sampled_normals = lower_normals[lower_indices]
        lower_sampled_curvatures = lower_curvatures[lower_indices]
        # lower_sampled_degree = lower_vert_degree[lower_indices]

        # Stack the results together (points, normals, and curvatures)
        upper_sampled_data = np.hstack([upper_sampled_points,  upper_sampled_curvatures.reshape(-1, 1)])
        lower_sampled_data = np.hstack([lower_sampled_points, lower_sampled_curvatures.reshape(-1, 1)])

        # Combine the upper and lower sampled data
        sampled_data = np.vstack([upper_sampled_data, lower_sampled_data])

        # Return the combined array with shape [N, 4]
        return sampled_data

        # 计算每个点的曲率
    
    def compute_curvature(self,vertices, k=10):
        knn = NearestNeighbors(n_neighbors=k)
        knn.fit(vertices)
        distances, indices = knn.kneighbors(vertices)
        neighbors = vertices[indices]
        covariance_matrices = np.array([np.cov(neigh.T) for neigh in neighbors])
        eigvals = np.linalg.eigvalsh(covariance_matrices)
        curvatures = eigvals[:, 0]  # 最小特征值表示曲率
        return curvatures

    def compute_density(self,vertices, k=10):
        knn = NearestNeighbors(n_neighbors=k)
        knn.fit(vertices)
        distances, _ = knn.kneighbors(vertices)
        density = 1 / (distances[:, 1:].mean(axis=1) + 1e-6)
        return density

    def pc_norm(self, pc):
        """ pc: NxC, return NxC """
        centroid = np.mean(pc, axis=0)
        pc = pc - centroid
        m = np.max(np.sqrt(np.sum(pc**2, axis=1)))
        pc = pc / m
        return pc
        

    # 计算每个点的局部密度（邻居的平均距离）
        def compute_density(self,vertices, k=10):
            knn = NearestNeighbors(n_neighbors=k)
            knn.fit(vertices)
            distances, _ = knn.kneighbors(vertices)
            density = 1 / (distances[:, 1:].mean(axis=1) + 1e-6)
            return density

    def load_stl_with_normals_new(self, file_path, num_samples=10000):
            # 记录读取 STL 文件的时间
        start_time = time.time()
        
        # 加载 STL 文件
        stl_mesh = trimesh.load_mesh(file_path, force='mesh')
        
        # 记录加载 STL 文件后的时间
        load_time = time.time() - start_time
        print(f"Time to load STL mesh: {load_time:.6f} seconds")
        

        # 计算网格的体积、边界框、面积等属性
        volume = stl_mesh.volume
        bounding_box = stl_mesh.bounding_box.bounds  # Bounding box bounds
        length = bounding_box[1, 0] - bounding_box[0, 0]  # x 轴的范围
        width = bounding_box[1, 1] - bounding_box[0, 1]   # y 轴的范围
        height = bounding_box[1, 2] - bounding_box[0, 2]  # z 轴的范围
        # area = stl_mesh.area
        # interg_mean_convx = stl_mesh.integral_mean_curvature
        # 将 length, width, height 合并成一个 NumPy 数组
        lwh = np.array([length, width, height])
        # 记录下采样的时间
        # # 获取网格的顶点和法向量
        vertices = stl_mesh.vertices
        normals = stl_mesh.vertex_normals  # 
        start_time_sampling = time.time()
        
        # 均匀采样：基于面的采样
        # sampled_points = self.kmeans_sampling(vertices)
        sampled_points,face_indices = stl_mesh.sample(num_samples,return_index=True)  # 获取点云数据
        # 获取每个面的顶点索引
        faces = stl_mesh.faces  # 每个面是一个由三个顶点索引构成的数组
        # 获取每个采样点对应的顶点索引
        # face_indices 包含了每个采样点对应的面索引，faces 是三角形的顶点索引
        # 所以通过面索引获取面上的顶点
        # sampled_vertices_indices = faces[face_indices]

        # 从这些顶点索引中取出对应的顶点法向量
        # sampled_normals = normals[sampled_vertices_indices]
        # 计算采样点的法向量
        # closest_faces = stl_mesh.nearest.face(sampled_points)
        # sampled_normals = stl_mesh.face_normals[closest_faces]
        
        # 记录下采样后的时间
        sampling_time = time.time() - start_time_sampling
        print(f"Time to sample {num_samples} points: {sampling_time:.6f} seconds")
        
        # 保存采样的数据到 .txt 文件
        # output_file = file_path.replace('.stl', '_sampled_points_normals.txt')
        # np.savetxt(output_file, sampled_points, fmt='%.6f', delimiter=' ')
        # print(f"Saved sampled data to {output_file}")
        
        return sampled_points, lwh

    def importance_sampling(self,vertices, normals, curvatures, num_samples=10000):
        # 计算法向量变化的权重：可以基于法向量的模长或法向量差异
        # 打印法向量的形状
        print("Shape of normals:", normals.shape)
    
        normal_weights = np.linalg.norm(normals, axis=1)  # 法向量的模长作为权重（可以调整）
        
        # 使用曲率作为权重
        curvature_weights = curvatures
        curvature_weights = np.maximum(curvature_weights, 0)  # 防止负值
        # 组合法向量和曲率的权重
        # total_weights = normal_weights + curvature_weights
        total_weights = curvature_weights
        # 归一化权重，防止除以零
        total_weight_sum = np.sum(total_weights)
        if total_weight_sum == 0:
            total_weights = np.ones_like(total_weights)  # 如果权重和为 0，设置为均匀分布
        else:
            total_weights /= total_weight_sum  # 归一化为概率分布
        
        # 根据权重进行重要性采样
        sampled_indices = np.random.choice(len(vertices), size=num_samples, p=total_weights)
        
        # 返回采样后的点云
        sampled_points = vertices[sampled_indices]
        return sampled_points

    def compute_normals_and_curvatures(self,vertices, k_neighbors=10):
        # 使用KNN计算法向量和曲率
        nbrs = NearestNeighbors(n_neighbors=k_neighbors, algorithm='auto').fit(vertices)
        distances, indices = nbrs.kneighbors(vertices)
        
        # 计算法向量（假设使用PCA拟合法线）
        normals = np.zeros_like(vertices)
        curvatures = np.zeros(len(vertices))
        
        for i in range(len(vertices)):
            # 计算局部点云的协方差矩阵
            points_nearby = vertices[indices[i]]
            covariance_matrix = np.cov(points_nearby.T)
            
            # 使用PCA进行法向量估计（最小特征值对应的特征向量）
            eigvals, eigvecs = np.linalg.eigh(covariance_matrix)
            
            # 法向量是最小特征值对应的特征向量
            normals[i] = eigvecs[:, 0]
            
            # 曲率的一个简单近似：根据邻域点的方差来衡量
            curvatures[i] = eigvals[0] / np.sum(eigvals)
        
        return normals, curvatures
    
    def kmeans_sampling(self,vertices, num_clusters=100000):
        # 使用K-means聚类将点云降采样到10万个点
        kmeans = KMeans(n_clusters=num_clusters, random_state=0).fit(vertices)
        
        # 获取聚类的质心（即聚类后的10万个点）
        sampled_points = kmeans.cluster_centers_
        return sampled_points
    
    # 假设 vertices 是你的 800万点云数据
    def process_point_cloud(self,vertices, num_clusters=100000, num_samples=10000, k_neighbors=10):
        # Step 1: K-means 聚类，先将点云降到10万个点
        sampled_points_kmeans = self.fkmeans_sampling(vertices, num_clusters=num_clusters)
        
        # Step 2: 计算法向量和曲率
        normals, curvatures = self.compute_normals_and_curvatures(sampled_points_kmeans, k_neighbors)
        
        # Step 3: 基于法向量和曲率进行重要性采样，最终降到10000个点
        final_sampled_points = self.importance_sampling(sampled_points_kmeans, normals, curvatures, num_samples=num_samples)
        
        return final_sampled_points
    
    def __getitem__(self, idx):
        """
        获取数据集中的一个样本，包括STL数据及相关标签。
        
        :param idx: 索引
        :return: 输入数据和标签
        """
        stl_filename = os.path.join(self.data_dir, self.stl_files[idx])
        time1 = time.time()
        ### 读取顶点
        vertices, cubesize= self.load_stl_with_normals_new(stl_filename,num_samples=200000)        
        
        time2 = time.time()
        # 对点云进行下采样
        curvatures = self.compute_curvature(vertices) 
        time3 = time.time()
        densities = self.compute_density(vertices)
        time4 = time.time()
        ### 基于曲率和法向量下采样
        # sampled_data = self.importance_sampling(vertices, normals, curvatures, num_samples=10000)
        # sampled_data = self.sample_points_random(vertices, normals)
        # sampled_data = self.sample_points_random_FPS(vertices, normals)
        # sampled_data = self.sample_points_uniform(vertices, normals)
        ### 重要性采样
        # sampled_data = vertices
        sampled_data = self.sample_points(vertices,curvatures, densities,vertical_axis=2, upper_samples=5000, lower_samples=5000)

        xyzsize = np.array([
            [4, 1.8, 1],
            [6, 2.6, 2]
        ], dtype=np.float32)

        # 计算最小值和最大值
        min_vals = xyzsize.min(axis=0)
        max_vals = xyzsize.max(axis=0)
        # 进行最大最小归一化
        print("Cubesize before division:", cubesize)
        # cubesize = (cubesize - min_vals) / (max_vals - min_vals) ### 0,1 归一化
        cubesize = 2 * (cubesize - min_vals) / (max_vals - min_vals) - 1  ##-1，1
        print("Cubesize after division:", cubesize)
        cubesize = np.array(cubesize)
        cubesize = cubesize.astype(np.float32)
        ### 支取L,W,H
        cubesize = cubesize[:3]
        N,C = sampled_data.shape
        
        # curvature_entropy = self.compute_entropy(sampled_points[:, 2])
        cubesize = torch.tensor(cubesize)
        cubesize = cubesize.unsqueeze(0)  # 变为  [1, 3]
        cubesize = cubesize.repeat(N, 1)  # 扩展为 [C, 8]
        
        sampled_data[:, :3] = self.pc_norm(sampled_data[:, :3])
        sampled_data = torch.tensor(sampled_data, dtype=torch.float32)
        
        # print(f"计算曲率 time: {time3-time2:.6f} 秒")
        # print(f"计算密度 time: {time4-time3:.6f} 秒")
        # print(f"下采样 time: {time5-time4:.6f} 秒")
        # print(f"计算法向熵 time: {time7-time6:.6f} 秒")
        # print(f"计算曲率熵 time: {time8-time7:.6f} 秒")
        # 返回点云及熵信息
        return sampled_data[:,:3],cubesize

    def get_label_from_filename(self, filename):
        """
        从文件名中提取标签，假设文件名格式为‘model_123.stl’，标签为数字部分。
        
        :param filename: 文件名
        :return: 标签
        """
        label_str = filename.split('_')[1].split('.')[0]
        return int(label_str)

#### 自适应采样 
class DrivAerDataset_stl4pre_random_adapt(Dataset):
    def __init__(self, data_dir, transform=None):
        """
        数据集初始化，支持STL文件读取和数据预处理。
        
        :param data_dir: 存储STL文件和其他数据的目录
        :param transform: 数据增强或预处理方法
        """
        self.data_root = "/home/zhpe/ldz/driver/data/drivenet4test-txt"
        self.subset = "int-norm8shape-valide.csv"
        self.data_list_file = os.path.join(self.data_root, self.subset)
        self.data_dir = data_dir
        self.transform = transform
        self.stl_files = [f for f in os.listdir(data_dir) if f.endswith('.stl')]
        # self.sample_list = pd.read_csv(self.data_list_file).values
        
    def __len__(self):
        """
        返回数据集大小。
        """
        return len(self.stl_files)

    
    def load_stl_with_normals(self,file_path):
        stl_mesh = trimesh.load_mesh(file_path, force='mesh')
        vertices = stl_mesh.vertices
        normals = stl_mesh.vertex_normals  # 面的法向量
        volume = stl_mesh.volume
        bounding_box = stl_mesh.bounding_box.bounds  # Bounding box bounds
        length = bounding_box[1, 0] - bounding_box[0, 0]  # x 轴的范围
        width = bounding_box[1, 1] - bounding_box[0, 1]   # y 轴的范围
        height = bounding_box[1, 2] - bounding_box[0, 2]  # z 轴的范围
        area = stl_mesh.area
        interg_mean_convx = stl_mesh.integral_mean_curvature
        return vertices, normals, length,width,height,volume,area,interg_mean_convx

    def compute_entropy(self, values):
        """
        计算给定值的熵。
        
        :param values: 输入的值
        :return: 熵值
        """
        hist, bin_edges = np.histogram(values, bins=20, density=True)
        probabilities = hist / hist.sum()
        entropy = -np.sum(probabilities * np.log(probabilities + 1e-9))
        return entropy

    def compute_normal_entropy(self, normals):
        """
        计算基于法向量的熵。
        
        :param normals: 法向量
        :return: 熵值
        """
        # 将法向量归一化到球面投影
        normal_magnitudes = np.linalg.norm(normals, axis=1)
        normalized_normals = normals / (normal_magnitudes[:, np.newaxis] + 1e-9)
        
        # 球面分区 (bins 分区)
        theta = np.arctan2(normalized_normals[:, 1], normalized_normals[:, 0])  # 投影到XY平面
        phi = np.arccos(normalized_normals[:, 2])  # Z方向角
        hist, _ = np.histogramdd((theta, phi), bins=20)
        
        probabilities = hist / hist.sum()
        entropy = -np.sum(probabilities * np.log(probabilities + 1e-9))
        return entropy

    def compute_entropy_from_cubes(self, points, cube_size=(8, 1)):
        """
        根据指定的立方体尺寸 (cube_size)，计算点云的熵。
        
        :param points: 下采样后的点云
        :param cube_size: 立方体大小，默认是8x1
        :return: 熵值
        """
        # 假设cube_size是 (8, 1)，我们将点云分布到此网格中
        x_min, y_min, z_min = points.min(axis=0)
        x_max, y_max, z_max = points.max(axis=0)
        
        # 计算每个维度的分割
        x_bins = np.linspace(x_min, x_max, cube_size[0])
        y_bins = np.linspace(y_min, y_max, cube_size[1])
        
        # 将点云分配到网格中
        grid, _, _ = np.histogram2d(points[:, 0], points[:, 1], bins=[x_bins, y_bins])
        
        # 将点云分布计算成概率分布
        grid = grid / grid.sum()
        
        # 计算熵
        entropy = -np.sum(grid * np.log(grid + 1e-9))
        return entropy

    def sample_points_random(self, vertices, vert_normal, num_sample=100000):
        # 确保输入的 vertices 和 vert_normal 是 numpy 数组
        vertices = np.array(vertices)
        vert_normal = np.array(vert_normal)

        # 获取总点数
        total_points = len(vertices)

        # 随机选择 num_samples 个点的索引
        random_indices = np.random.choice(total_points, size=num_sample, replace=False)

        # 获取对应的坐标和法向量
        sampled_points = vertices[random_indices]
        sampled_normals = vert_normal[random_indices]

        # 将坐标和法向量拼接为一个数据
        sampled_data = np.hstack([sampled_points, sampled_normals])

        return sampled_data
    
    # 重要性采样点
    def sample_points(self,vertices, vert_normal, curvatures, densities, vertical_axis=2, upper_samples=14000, lower_samples=6000):
        # Determine the vertical range
        pointmax = np.max(vertices[:, vertical_axis])
        pointmin = np.min(vertices[:, vertical_axis])
        midpoint = np.mean([pointmax, pointmin])
        # midpoint = (pointmax-pointmin)*
        # Create masks for upper and lower half based on vertical axis
        upper_mask = vertices[:, vertical_axis] > midpoint
        lower_mask = ~upper_mask

        # Separate points, normals, and curvatures based on the masks
        upper_points = vertices[upper_mask]
        lower_points = vertices[lower_mask]

        # upper_normals = vert_normal[upper_mask]
        # lower_normals = vert_normal[lower_mask]

        upper_curvatures = curvatures[upper_mask]
        lower_curvatures = curvatures[lower_mask]
        
        # upper_vert_degree = vert_degree[upper_mask]
        # lower_vert_degree = vert_degree[lower_mask]

        upper_densities = densities[upper_mask]
        lower_densities = densities[lower_mask]

        # Compute weights for sampling
        upper_weight = np.exp(upper_curvatures)
        upper_weight /= upper_weight.sum()
        lower_weight = 1 / (lower_densities + 1e-6)
        lower_weight /= lower_weight.sum()

        # Perform the sampling with probability distribution based on weights
        upper_indices = np.random.choice(len(upper_points), size=upper_samples, p=upper_weight)
        lower_indices = np.random.choice(len(lower_points), size=lower_samples, p=lower_weight)

        # Get sampled points, normals, and curvatures
        upper_sampled_points = upper_points[upper_indices]
        # upper_sampled_normals = upper_normals[upper_indices]
        upper_sampled_curvatures = upper_curvatures[upper_indices]
        # upper_sampled_degree = upper_vert_degree[upper_indices]

        lower_sampled_points = lower_points[lower_indices]
        # lower_sampled_normals = lower_normals[lower_indices]
        lower_sampled_curvatures = lower_curvatures[lower_indices]
        # lower_sampled_degree = lower_vert_degree[lower_indices]

        # Stack the results together (points, normals, and curvatures)
        # upper_sampled_data = np.hstack([upper_sampled_points, upper_sampled_normals, upper_sampled_curvatures.reshape(-1, 1)])
        # lower_sampled_data = np.hstack([lower_sampled_points, lower_sampled_normals, lower_sampled_curvatures.reshape(-1, 1)])
        upper_sampled_data = np.hstack([upper_sampled_points,  upper_sampled_curvatures.reshape(-1, 1)])
        lower_sampled_data = np.hstack([lower_sampled_points,  lower_sampled_curvatures.reshape(-1, 1)])

        # Combine the upper and lower sampled data
        sampled_data = np.vstack([upper_sampled_data, lower_sampled_data])

        # Return the combined array with shape [N, 7]
        return sampled_data

# 重要性采样点
    def sample_points_norm_adapt(self, vertices, vert_normal,densities, vertical_axis=2, upper_samples=700, lower_samples=300):
        # Determine the vertical range
        pointmax = np.max(vertices[:, vertical_axis])
        pointmin = np.min(vertices[:, vertical_axis])
        midpoint = np.mean([pointmax, pointmin])

        # Create masks for upper and lower half based on vertical axis
        upper_mask = vertices[:, vertical_axis] > midpoint
        lower_mask = ~upper_mask

        # Separate points, normals, and curvatures based on the masks
        upper_points = vertices[upper_mask]
        lower_points = vertices[lower_mask]

        upper_normals = vert_normal[upper_mask]
        lower_normals = vert_normal[lower_mask]

        # Separate densities for lower half
        upper_densities = densities[upper_mask]
        lower_densities = densities[lower_mask]

        # --- 上半部分：基于法向量采样 ---
        # 计算法向量相似度（余弦相似度）来定义采样权重
        def calculate_normal_weights(normals):
            # 计算法向量间的余弦相似度
            similarity_matrix = np.dot(normals, normals.T)
            np.fill_diagonal(similarity_matrix, -1)  # 排除自相似度
            
            # 将相似度转换为权重（相似的法向量权重更高）
            weights = np.exp(similarity_matrix)  # 进行指数变换以放大差异
            np.fill_diagonal(weights, 0)  # 排除自采样
            weights /= np.sum(weights, axis=1, keepdims=True)  # 归一化权重
            return np.mean(weights, axis=1)  # 对每行取平均权重

        # 计算上半部分的法向量权重
        upper_weight = calculate_normal_weights(upper_normals)

        # 归一化权重
        upper_weight /= np.sum(upper_weight)

        # 执行基于法向量的上半部分采样
        upper_indices = np.random.choice(len(upper_points), size=upper_samples, p=upper_weight)

        # 获取采样点
        upper_sampled_points = upper_points[upper_indices]
        upper_sampled_normals = upper_normals[upper_indices]

        # --- 下半部分：基于密度采样 ---
        # 对下半部分的密度进行归一化
        lower_weight = 1 / (lower_densities + 1e-6)  # 防止除零错误
        lower_weight /= np.sum(lower_weight)

        # 执行基于密度的下半部分采样
        lower_indices = np.random.choice(len(lower_points), size=lower_samples, p=lower_weight)

        # 获取采样点
        lower_sampled_points = lower_points[lower_indices]
        lower_sampled_normals = lower_normals[lower_indices]

        # 将上半部分和下半部分的采样结果合并
        upper_sampled_data = np.hstack([upper_sampled_points, upper_sampled_normals])
        lower_sampled_data = np.hstack([lower_sampled_points, lower_sampled_normals])

        # 合并最终的采样数据
        sampled_data = np.vstack([upper_sampled_data, lower_sampled_data])

        # 返回采样后的数据，形状为 [N, 6]（每个采样点的 3D 坐标和法向量）
        return sampled_data

        # 计算每个点的曲率
    
    def compute_curvature(self,vertices, k=10):
        knn = NearestNeighbors(n_neighbors=k)
        knn.fit(vertices)
        distances, indices = knn.kneighbors(vertices)
        neighbors = vertices[indices]
        covariance_matrices = np.array([np.cov(neigh.T) for neigh in neighbors])
        eigvals = np.linalg.eigvalsh(covariance_matrices)
        curvatures = eigvals[:, 0]  # 最小特征值表示曲率
        return curvatures

    def pc_norm(self, pc):
        """ pc: NxC, return NxC """
        centroid = np.mean(pc, axis=0)
        pc = pc - centroid
        m = np.max(np.sqrt(np.sum(pc**2, axis=1)))
        pc = pc / m
        return pc
        
# 计算每个点的局部密度（邻居的平均距离）
    def compute_density(self,vertices, k=10):
        knn = NearestNeighbors(n_neighbors=k)
        knn.fit(vertices)
        distances, _ = knn.kneighbors(vertices)
        density = 1 / (distances[:, 1:].mean(axis=1) + 1e-6)
        return density

    def load_stl_with_normals_new(self, file_path, num_samples=1000000):
            # 记录读取 STL 文件的时间
        start_time = time.time()
        
        # 加载 STL 文件
        stl_mesh = trimesh.load_mesh(file_path, force='mesh')
        
        # 记录加载 STL 文件后的时间
        load_time = time.time() - start_time
        print(f"Time to load STL mesh: {load_time:.6f} seconds")
        
        # # 获取网格的顶点和法向量
        vertices = stl_mesh.vertices
        normals = stl_mesh.vertex_normals  # 面的法向量
        
        # 计算网格的体积、边界框、面积等属性
        volume = stl_mesh.volume
        bounding_box = stl_mesh.bounding_box.bounds  # Bounding box bounds
        length = bounding_box[1, 0] - bounding_box[0, 0]  # x 轴的范围
        width = bounding_box[1, 1] - bounding_box[0, 1]   # y 轴的范围
        height = bounding_box[1, 2] - bounding_box[0, 2]  # z 轴的范围
        area = stl_mesh.area
        interg_mean_convx = stl_mesh.integral_mean_curvature
        # 将 length, width, height 合并成一个 NumPy 数组
        lwh = np.array([length, width, height])
        # 记录下采样的时间
        start_time_sampling = time.time()
        
        # 均匀采样：基于面的采样
        sampled_points,face_indices = stl_mesh.sample(num_samples,return_index=True)  # 获取点云数据
        sampled_vertex_indices = stl_mesh.faces[face_indices]
        normals = normals[sampled_vertex_indices]
        # sampled_points = self.sample_points_random(vertices)
        # 计算采样点的法向量
        # closest_faces = stl_mesh.nearest.face(sampled_points)
        # sampled_normals = stl_mesh.face_normals[closest_faces]
        
        # 记录下采样后的时间
        sampling_time = time.time() - start_time_sampling
        print(f"Time to sample {num_samples} points: {sampling_time:.6f} seconds")
        
        # 保存采样的数据到 .txt 文件
        # output_file = file_path.replace('.stl', '_sampled_points_normals.txt')
        # np.savetxt(output_file, sampled_points, fmt='%.6f', delimiter=' ')
        # print(f"Saved sampled data to {output_file}")
        
        return sampled_points,normals, lwh

    def __getitem__(self, idx):
        """
        获取数据集中的一个样本，包括STL数据及相关标签。
        
        :param idx: 索引
        :return: 输入数据和标签
        """
        stl_filename = os.path.join(self.data_dir, self.stl_files[idx])
        time1 = time.time()
        # vertices, normals, length,width,height,volume,area,interg_mean_convx= self.load_stl_with_normals(stl_filename)
        sampled_data, normals, cubesize = self.load_stl_with_normals_new(stl_filename,num_samples=100000)
        # 打印 sampled_data 的 shape
        print("sampled_data shape:", sampled_data.shape)
        print("normals:", normals.shape)
        time2 = time.time()
        # cubesize = [length,width,height]
        # 随机采样到100000点
        # sampled_data = self.sample_points_random(vertices, normals)
        time22 = time.time()
        smp_vert = sampled_data[:,:3]
        # smp_norm = sampled_data[:,3:6]
        smp_norm = normals[:,:3]
        ### 10w点计算曲率
        curvatures = self.compute_curvature(smp_vert) 
        time3 = time.time()
        densities = self.compute_density(smp_vert)
        time4 = time.time()
        # sampled_data = self.sample_points_random(vertices, normals)
        ### 自适应采样
        # sampled_data = self.sample_points(smp_vert, smp_norm, curvatures, densities,vertical_axis=2, upper_samples=8000, lower_samples=2000)
        
        ### 基于法向量的采样8
        # sampled_data = self.sample_points_norm_adapt(smp_vert, smp_norm, densities,vertical_axis=2, upper_samples=7000, lower_samples=3000)
        
        ### 均匀采样 1.6
        sampled_data, cubesize = self.load_stl_with_normals_new(stl_filename)
        time5 = time.time()
        # sampled_curvatures = sampled_data[:, -1]  # 曲率在最后一列
        # sampled_normals = sampled_data[:, 3:6]
        # 计算基于法向量的熵
        time6 = time.time()
        # normal_entropy = self.compute_normal_entropy(sampled_normals)
        time7 = time.time()
        # 计算基于曲率的熵（假设使用点的Z坐标作为曲率代理）
        sampled_curvature_entropy = 0.2
        # sampled_curvature_entropy = self.compute_entropy(sampled_curvatures)
        time8 = time.time()
        # cubasize = [length,width,height,volume,area,interg_mean_convx,sampled_curvature_entropy,normal_entropy]
        # cubasize = [length,width,height,volume,area,sampled_curvature_entropy,normal_entropy]
        # max_vals = [5.130126953, 2.028320313, 1.760761201, 7.439345682, 40.72243571,500, 0.318805436, 5.407586516]
        # min_vals = [4.760537982, 2.025634766, 1.363978565, 6.968431614, 35.42832927,200, 0.124140312, 5.307547326]
        
        xyzsize = np.array([
            [4, 1.8, 1],
            [6, 2.6, 2]
        ], dtype=np.float32)

        # 计算最小值和最大值
        min_vals = xyzsize.min(axis=0)
        max_vals = xyzsize.max(axis=0)

        cubesize = np.array(cubesize)
        # min_vals = np.array(min_vals)
        # max_vals = np.array(max_vals)
        # int_vals = np.array(int_vals)
        # cubasize = (cubasize-min_vals)/(max_vals-min_vals)
        print("Cubesize before division:", cubesize)
                # 进行最大最小归一化
        cubesize = (cubesize - min_vals) / (max_vals - min_vals)
        print("Cubesize after division:", cubesize)
        # int_vals = [10, 10, 10, 10, 50, 500, 50, 1, 10]
        cubesize = cubesize.astype(np.float32)
        ### 支取L,W,H
        # cubasize = cubasize[:3]
        N,C = sampled_data.shape
        
        # curvature_entropy = self.compute_entropy(sampled_points[:, 2])
        cubesize = torch.tensor(cubesize)
        cubesize = cubesize.unsqueeze(0)  # 变为  [1, 3]
        cubesize = cubesize.repeat(N, 1)  # 扩展为 [C, 8]
        
        sampled_data[:, :3] = self.pc_norm(sampled_data[:, :3])
        sampled_data = torch.tensor(sampled_data, dtype=torch.float32)
        print(f"加载stl time: {time2-time1:.6f} 秒")
        print(f"随机采样到10000点 time: {time22-time2:.6f} 秒")
        print(f"计算曲率 time: {time3-time22:.6f} 秒")
        print(f"计算密度 time: {time4-time3:.6f} 秒")
        print(f"下采样 time: {time5-time4:.6f} 秒")
        print(f"计算法向熵 time: {time7-time6:.6f} 秒")
        print(f"计算曲率熵 time: {time8-time7:.6f} 秒")
        # 返回点云及熵信息
        return sampled_data[:,:3],cubesize

    def get_label_from_filename(self, filename):
        """
        从文件名中提取标签，假设文件名格式为‘model_123.stl’，标签为数字部分。
        
        :param filename: 文件名
        :return: 标签
        """
        label_str = filename.split('_')[1].split('.')[0]
        return int(label_str)

### 加上Cd , 950 test 1.8 先均匀采样然后重要性采样
class DrivAerDataset_stl4pre_random_cd(Dataset):
    def __init__(self, data_dir, config,transform=None):
        """
        数据集初始化，支持STL文件读取和数据预处理。
        
        :param data_dir: 存储STL文件和其他数据的目录
        :param transform: 数据增强或预处理方法
        """
        # self.data_root = "/home/bingxing2/home/scx8ajl/ldz/Dirve/csvfiles/"
        # self.subset = "DrivAer_test36.csv"
        self.data_root = config['paths']['csv_paths']
        self.subset = config['paths']['subset']
        # self.subset = subset
        self.data_list_file = os.path.join(self.data_root, self.subset)
        self.data_dir = data_dir
        self.transform = transform
        self.stl_files = [f for f in os.listdir(data_dir) if f.endswith('.stl')]
        self.sample_list = pd.read_csv(self.data_list_file).values
        # self.sample_list = pd.read_csv(self.data_list_file).values
        
    def __len__(self):
        """
        返回数据集大小。
        """
        return len(self.stl_files)

    
    def load_stl_with_normals(self,file_path,num_samples=10000):
        stl_mesh = trimesh.load_mesh(file_path, force='mesh')
        # vertices = stl_mesh.vertices
        sampled_points = stl_mesh.sample(num_samples)  # 获取点云数据
        # normals = stl_mesh.vertex_normals  # 面的法向量
        # volume = stl_mesh.volume
        # bounding_box = stl_mesh.bounding_box.bounds  # Bounding box bounds
        # length = bounding_box[1, 0] - bounding_box[0, 0]  # x 轴的范围
        # width = bounding_box[1, 1] - bounding_box[0, 1]   # y 轴的范围
        # height = bounding_box[1, 2] - bounding_box[0, 2]  # z 轴的范围
        # area = stl_mesh.area
        # interg_mean_convx = stl_mesh.integral_mean_curvature
        # return vertices, normals, length,width,height,volume,area,interg_mean_convx
        return sampled_points


    ### 1-13 new
    def load_stl_with_normals_new(self, file_path, num_samples=200000):
        # 记录读取 STL 文件的时间
        start_time = time.time()
        
        try:
            # 尝试加载二进制 STL 文件
            # stl_mesh = trimesh.load_mesh(file_path, file_type='binary', force='mesh')
            stl_mesh = trimesh.load_mesh(file_path, file_type='stl')
        except Exception as e:
            print(f"Error loading {file_path}: {e}")
            return None, None
        
        # 记录加载 STL 文件后的时间
        load_time = time.time() - start_time
        print(f"Time to load STL mesh: {load_time:.6f} seconds")
        
        # 获取网格的顶点和法向量
        vertices = stl_mesh.vertices
        normals = stl_mesh.vertex_normals  # 面的法向量
        
        # 计算网格的体积、边界框、面积等属性
        volume = stl_mesh.volume
        bounding_box = stl_mesh.bounding_box.bounds  # Bounding box bounds
        length = bounding_box[1, 0] - bounding_box[0, 0]  # x 轴的范围
        width = bounding_box[1, 1] - bounding_box[0, 1]   # y 轴的范围
        height = bounding_box[1, 2] - bounding_box[0, 2]  # z 轴的范围
        area = stl_mesh.area
        interg_mean_convx = stl_mesh.integral_mean_curvature
        
        # 将 length, width, height 合并成一个 NumPy 数组
        lwh = np.array([length, width, height])
        
        # 记录下采样的时间
        start_time_sampling = time.time()
        
        # 均匀采样：基于面的采样
        sampled_points = stl_mesh.sample(num_samples)  # 获取点云数据
        
        # 记录下采样后的时间
        sampling_time = time.time() - start_time_sampling
        print(f"Time to sample {num_samples} points: {sampling_time:.6f} seconds")
        
        return sampled_points, lwh

    def compute_entropy(self, values):
        """
        计算给定值的熵。
        
        :param values: 输入的值
        :return: 熵值
        """
        hist, bin_edges = np.histogram(values, bins=20, density=True)
        probabilities = hist / hist.sum()
        entropy = -np.sum(probabilities * np.log(probabilities + 1e-9))
        return entropy

    def compute_normal_entropy(self, normals):
        """
        计算基于法向量的熵。
        
        :param normals: 法向量
        :return: 熵值
        """
        # 将法向量归一化到球面投影
        normal_magnitudes = np.linalg.norm(normals, axis=1)
        normalized_normals = normals / (normal_magnitudes[:, np.newaxis] + 1e-9)
        
        # 球面分区 (bins 分区)
        theta = np.arctan2(normalized_normals[:, 1], normalized_normals[:, 0])  # 投影到XY平面
        phi = np.arccos(normalized_normals[:, 2])  # Z方向角
        hist, _ = np.histogramdd((theta, phi), bins=20)
        
        probabilities = hist / hist.sum()
        entropy = -np.sum(probabilities * np.log(probabilities + 1e-9))
        return entropy

    def compute_entropy_from_cubes(self, points, cube_size=(8, 1)):
        """
        根据指定的立方体尺寸 (cube_size)，计算点云的熵。
        
        :param points: 下采样后的点云
        :param cube_size: 立方体大小，默认是8x1
        :return: 熵值
        """
        # 假设cube_size是 (8, 1)，我们将点云分布到此网格中
        x_min, y_min, z_min = points.min(axis=0)
        x_max, y_max, z_max = points.max(axis=0)
        
        # 计算每个维度的分割
        x_bins = np.linspace(x_min, x_max, cube_size[0])
        y_bins = np.linspace(y_min, y_max, cube_size[1])
        
        # 将点云分配到网格中
        grid, _, _ = np.histogram2d(points[:, 0], points[:, 1], bins=[x_bins, y_bins])
        
        # 将点云分布计算成概率分布
        grid = grid / grid.sum()
        
        # 计算熵
        entropy = -np.sum(grid * np.log(grid + 1e-9))
        return entropy

    def sample_points_random(self, vertices, vert_normal, num_sample=1000):
        # 确保输入的 vertices 和 vert_normal 是 numpy 数组
        vertices = np.array(vertices)
        vert_normal = np.array(vert_normal)

        # 获取总点数
        total_points = len(vertices)

        # 随机选择 num_samples 个点的索引
        random_indices = np.random.choice(total_points, size=num_sample, replace=False)

        # 获取对应的坐标和法向量
        sampled_points = vertices[random_indices]
        sampled_normals = vert_normal[random_indices]

        # 将坐标和法向量拼接为一个数据
        sampled_data = np.hstack([sampled_points, sampled_normals])

        return sampled_data
    
    # 重要性采样点
    def sample_points(self,vertices, curvatures, densities, vertical_axis=2, upper_samples=7000, lower_samples=3000):
        # Determine the vertical range
        pointmax = np.max(vertices[:, vertical_axis])
        pointmin = np.min(vertices[:, vertical_axis])
        midpoint = np.mean([pointmax, pointmin])
        # midpoint = (pointmax-pointmin)*
        # Create masks for upper and lower half based on vertical axis
        upper_mask = vertices[:, vertical_axis] > midpoint
        lower_mask = ~upper_mask

        # Separate points, normals, and curvatures based on the masks
        upper_points = vertices[upper_mask]
        lower_points = vertices[lower_mask]

        # upper_normals = vert_normal[upper_mask]
        # lower_normals = vert_normal[lower_mask]

        upper_curvatures = curvatures[upper_mask]
        lower_curvatures = curvatures[lower_mask]
        
        # upper_vert_degree = vert_degree[upper_mask]
        # lower_vert_degree = vert_degree[lower_mask]

        upper_densities = densities[upper_mask]
        lower_densities = densities[lower_mask]

        # Compute weights for sampling
        upper_weight = np.exp(upper_curvatures)
        upper_weight /= upper_weight.sum()
        lower_weight = 1 / (lower_densities + 1e-6)
        lower_weight /= lower_weight.sum()

        # Perform the sampling with probability distribution based on weights
        upper_indices = np.random.choice(len(upper_points), size=upper_samples, p=upper_weight)
        lower_indices = np.random.choice(len(lower_points), size=lower_samples, p=lower_weight)

        # Get sampled points, normals, and curvatures
        upper_sampled_points = upper_points[upper_indices]
        # upper_sampled_normals = upper_normals[upper_indices]
        upper_sampled_curvatures = upper_curvatures[upper_indices]
        # upper_sampled_degree = upper_vert_degree[upper_indices]

        lower_sampled_points = lower_points[lower_indices]
        # lower_sampled_normals = lower_normals[lower_indices]
        lower_sampled_curvatures = lower_curvatures[lower_indices]
        # lower_sampled_degree = lower_vert_degree[lower_indices]

        # Stack the results together (points, normals, and curvatures)
        upper_sampled_data = np.hstack([upper_sampled_points, upper_sampled_curvatures.reshape(-1, 1)])
        lower_sampled_data = np.hstack([lower_sampled_points, lower_sampled_curvatures.reshape(-1, 1)])

        # Combine the upper and lower sampled data
        sampled_data = np.vstack([upper_sampled_data, lower_sampled_data])

        # Return the combined array with shape [N, 7]
        return sampled_data

    # 计算每个点的曲率
    def compute_curvature(self,vertices, k=10):
        knn = NearestNeighbors(n_neighbors=k)
        knn.fit(vertices)
        distances, indices = knn.kneighbors(vertices)
        neighbors = vertices[indices]
        covariance_matrices = np.array([np.cov(neigh.T) for neigh in neighbors])
        eigvals = np.linalg.eigvalsh(covariance_matrices)
        curvatures = eigvals[:, 0]  # 最小特征值表示曲率
        return curvatures

    def pc_norm(self, pc):
        """ pc: NxC, return NxC """
        centroid = np.mean(pc, axis=0)
        pc = pc - centroid
        m = np.max(np.sqrt(np.sum(pc**2, axis=1)))
        pc = pc / m
        return pc
        

# 计算每个点的局部密度（邻居的平均距离）
    def compute_density(self,vertices, k=10):
        knn = NearestNeighbors(n_neighbors=k)
        knn.fit(vertices)
        distances, _ = knn.kneighbors(vertices)
        density = 1 / (distances[:, 1:].mean(axis=1) + 1e-6)
        return density

    def __getitem__(self, idx):
        """
        获取数据集中的一个样本，包括STL数据及相关标签。
        
        :param idx: 索引
        :return: 输入数据和标签
        """
        # stl_filename = os.path.join(self.data_dir, self.stl_files[idx])
        sample = self.sample_list[idx]
        # parameter_data = sample[1].astype(np.float32)        
        parameter_data = np.array([sample[1]], dtype=np.float32)
        # input_data = torch.tensor(input_data)        
        label_data = torch.tensor(parameter_data)   
        sample_name = sample[0] ##tain 5, test 0
        time1 = time.time()
        # sampled_data= self.load_stl_with_normals(stl_filename)
        # cubesize = sample[3:6]
        stl_file_path = os.path.join(self.data_dir, f'{sample_name}.stl')
    
        if not os.path.exists(stl_file_path):
            print(f"Warning: STL file '{sample_name}.stl' not found in {self.data_dir}")
            return self.__getitem__((idx + 1) % len(self))  # 跳过当前样本，递归获取下一个有效样本
        
        # vertices, cubesize = self.load_stl_with_normals_new(os.path.join(self.data_dir, f'{sample_name}.stl'),num_samples=200000)
        vertices, cubesize = self.load_stl_with_normals_new(stl_file_path,num_samples=200000)
        curvatures = self.compute_curvature(vertices) 
        # time3 = time.time()
        densities = self.compute_density(vertices)
        # time4 = time.time()
        ### 重要性采样
        sampled_data = self.sample_points(vertices, curvatures, densities,vertical_axis=2, upper_samples=5000, lower_samples=5000)
        ### 均匀采样
        # sampled_data = vertices
        time5 = time.time()
        
        xyzsize = np.array([
            [4, 1.8, 1],
            [6, 2.6, 2]
        ], dtype=np.float32)

        # 计算最小值和最大值
        min_vals = xyzsize.min(axis=0)
        max_vals = xyzsize.max(axis=0)
        # 进行最大最小归一化
        # cubesize = (cubesize - min_vals) / (max_vals - min_vals)
        cubesize = 2 * (cubesize - min_vals) / (max_vals - min_vals) - 1 # -1,1 
        cubesize = cubesize.astype(np.float32)
        N,C = sampled_data.shape
        cubesize = torch.tensor(cubesize)
        cubesize = cubesize.unsqueeze(0)  # 变为  [1, 3]
        cubesize = cubesize.repeat(N, 1)  # 扩展为 [C, 8]
        
        sampled_data[:, :3] = self.pc_norm(sampled_data[:, :3])
        sampled_data = torch.tensor(sampled_data, dtype=torch.float32)
        return sampled_data[:,:3],cubesize,label_data,sample_name
    
    def get_label_from_filename(self, filename):
        """
        从文件名中提取标签，假设文件名格式为‘model_123.stl’，标签为数字部分。
        
        :param filename: 文件名
        :return: 标签
        """
        label_str = filename.split('_')[1].split('.')[0]
        return int(label_str)

### 加上Cd , 950 test 1.6 随机采样
class DrivAerDataset_stl4pre_random_cd_new(Dataset):
    def __init__(self, data_dir, transform=None):
        """
        数据集初始化，支持STL文件读取和数据预处理。
        
        :param data_dir: 存储STL文件和其他数据的目录
        :param transform: 数据增强或预处理方法
        """
        self.data_root = "/home/bingxing2/home/scx8ajl/ldz/Dirve/csvfiles/"
        # self.subset = "int-norm8shape-valide.csv"
        self.subset = "DrivAer_test36.csv"
        self.data_list_file = os.path.join(self.data_root, self.subset)
        self.data_dir = data_dir
        self.transform = transform
        self.stl_files = [f for f in os.listdir(data_dir) if f.endswith('.stl')]
        self.sample_list = pd.read_csv(self.data_list_file).values
        # self.sample_list = pd.read_csv(self.data_list_file).values
        
    def __len__(self):
        """
        返回数据集大小。
        """
        return len(self.stl_files)
    
    def load_stl_with_normals(self,file_path,num_samples=10000):
        stl_mesh = trimesh.load_mesh(file_path, force='mesh')
        # vertices = stl_mesh.vertices
        sampled_points = stl_mesh.sample(num_samples)  # 获取点云数据
        # normals = stl_mesh.vertex_normals  # 面的法向量
        # volume = stl_mesh.volume
        # bounding_box = stl_mesh.bounding_box.bounds  # Bounding box bounds
        # length = bounding_box[1, 0] - bounding_box[0, 0]  # x 轴的范围
        # width = bounding_box[1, 1] - bounding_box[0, 1]   # y 轴的范围
        # height = bounding_box[1, 2] - bounding_box[0, 2]  # z 轴的范围
        # area = stl_mesh.area
        # interg_mean_convx = stl_mesh.integral_mean_curvature
        # return vertices, normals, length,width,height,volume,area,interg_mean_convx
        return sampled_points

    def load_stl_with_normals_new(self, file_path, num_samples=10000):
        # 记录读取 STL 文件的时间
        start_time = time.time()
        
        # 加载 STL 文件
        stl_mesh = trimesh.load_mesh(file_path, force='mesh')
        
        # 记录加载 STL 文件后的时间
        load_time = time.time() - start_time
        # print(f"Time to load STL mesh: {load_time:.6f} seconds")
        
        # # 获取网格的顶点和法向量
        vertices = stl_mesh.vertices
        normals = stl_mesh.vertex_normals  # 面的法向量
        
        # 计算网格的体积、边界框、面积等属性
        volume = stl_mesh.volume
        bounding_box = stl_mesh.bounding_box.bounds  # Bounding box bounds
        length = bounding_box[1, 0] - bounding_box[0, 0]  # x 轴的范围
        width = bounding_box[1, 1] - bounding_box[0, 1]   # y 轴的范围
        height = bounding_box[1, 2] - bounding_box[0, 2]  # z 轴的范围
        area = stl_mesh.area
        interg_mean_convx = stl_mesh.integral_mean_curvature
        # 将 length, width, height 合并成一个 NumPy 数组
        lwh = np.array([length, width, height])
        # 记录下采样的时间
        start_time_sampling = time.time()
        
        # 均匀采样：基于面的采样
        sampled_points,vertices_indices = stl_mesh.sample(num_samples)  # 获取点云数据
        normals = normals(vertices_indices)
        # sampled_points = self.sample_points_random(vertices)
        # 计算采样点的法向量
        # closest_faces = stl_mesh.nearest.face(sampled_points)
        # sampled_normals = stl_mesh.face_normals[closest_faces]
        
        # 记录下采样后的时间
        sampling_time = time.time() - start_time_sampling
        # print(f"Time to sample {num_samples} points: {sampling_time:.6f} seconds")
        
        # 保存采样的数据到 .txt 文件
        # output_file = file_path.replace('.stl', '_sampled_points_normals.txt')
        # np.savetxt(output_file, sampled_points, fmt='%.6f', delimiter=' ')
        # print(f"Saved sampled data to {output_file}")
        
        return sampled_points, lwh

    def compute_entropy(self, values):
        """
        计算给定值的熵。
        
        :param values: 输入的值
        :return: 熵值
        """
        hist, bin_edges = np.histogram(values, bins=20, density=True)
        probabilities = hist / hist.sum()
        entropy = -np.sum(probabilities * np.log(probabilities + 1e-9))
        return entropy

    def compute_normal_entropy(self, normals):
        """
        计算基于法向量的熵。
        
        :param normals: 法向量
        :return: 熵值
        """
        # 将法向量归一化到球面投影
        normal_magnitudes = np.linalg.norm(normals, axis=1)
        normalized_normals = normals / (normal_magnitudes[:, np.newaxis] + 1e-9)
        
        # 球面分区 (bins 分区)
        theta = np.arctan2(normalized_normals[:, 1], normalized_normals[:, 0])  # 投影到XY平面
        phi = np.arccos(normalized_normals[:, 2])  # Z方向角
        hist, _ = np.histogramdd((theta, phi), bins=20)
        
        probabilities = hist / hist.sum()
        entropy = -np.sum(probabilities * np.log(probabilities + 1e-9))
        return entropy

    def compute_entropy_from_cubes(self, points, cube_size=(8, 1)):
        """
        根据指定的立方体尺寸 (cube_size)，计算点云的熵。
        
        :param points: 下采样后的点云
        :param cube_size: 立方体大小，默认是8x1
        :return: 熵值
        """
        # 假设cube_size是 (8, 1)，我们将点云分布到此网格中
        x_min, y_min, z_min = points.min(axis=0)
        x_max, y_max, z_max = points.max(axis=0)
        
        # 计算每个维度的分割
        x_bins = np.linspace(x_min, x_max, cube_size[0])
        y_bins = np.linspace(y_min, y_max, cube_size[1])
        
        # 将点云分配到网格中
        grid, _, _ = np.histogram2d(points[:, 0], points[:, 1], bins=[x_bins, y_bins])
        
        # 将点云分布计算成概率分布
        grid = grid / grid.sum()
        
        # 计算熵
        entropy = -np.sum(grid * np.log(grid + 1e-9))
        return entropy

    def sample_points_random(self, vertices, num_sample=10000):
        # 确保输入的 vertices 和 vert_normal 是 numpy 数组
        vertices = np.array(vertices)
        # vert_normal = np.array(vert_normal)

        # 获取总点数
        total_points = len(vertices)

        # 随机选择 num_samples 个点的索引
        random_indices = np.random.choice(total_points, size=num_sample, replace=False)

        # 获取对应的坐标和法向量
        sampled_points = vertices[random_indices]
        # sampled_normals = vert_normal[random_indices]

        # 将坐标和法向量拼接为一个数据
        # sampled_data = np.hstack([sampled_points, sampled_normals])

        return sampled_points
    
    # 重要性采样点
    def sample_points(self,vertices, vert_normal, curvatures, densities, vertical_axis=2, upper_samples=7000, lower_samples=3000):
        # Determine the vertical range
        pointmax = np.max(vertices[:, vertical_axis])
        pointmin = np.min(vertices[:, vertical_axis])
        midpoint = np.mean([pointmax, pointmin])
        # midpoint = (pointmax-pointmin)*
        # Create masks for upper and lower half based on vertical axis
        upper_mask = vertices[:, vertical_axis] > midpoint
        lower_mask = ~upper_mask

        # Separate points, normals, and curvatures based on the masks
        upper_points = vertices[upper_mask]
        lower_points = vertices[lower_mask]

        upper_normals = vert_normal[upper_mask]
        lower_normals = vert_normal[lower_mask]

        upper_curvatures = curvatures[upper_mask]
        lower_curvatures = curvatures[lower_mask]
        
        # upper_vert_degree = vert_degree[upper_mask]
        # lower_vert_degree = vert_degree[lower_mask]

        upper_densities = densities[upper_mask]
        lower_densities = densities[lower_mask]

        # Compute weights for sampling
        upper_weight = np.exp(upper_curvatures)
        upper_weight /= upper_weight.sum()
        lower_weight = 1 / (lower_densities + 1e-6)
        lower_weight /= lower_weight.sum()

        # Perform the sampling with probability distribution based on weights
        upper_indices = np.random.choice(len(upper_points), size=upper_samples, p=upper_weight)
        lower_indices = np.random.choice(len(lower_points), size=lower_samples, p=lower_weight)

        # Get sampled points, normals, and curvatures
        upper_sampled_points = upper_points[upper_indices]
        upper_sampled_normals = upper_normals[upper_indices]
        upper_sampled_curvatures = upper_curvatures[upper_indices]
        # upper_sampled_degree = upper_vert_degree[upper_indices]

        lower_sampled_points = lower_points[lower_indices]
        lower_sampled_normals = lower_normals[lower_indices]
        lower_sampled_curvatures = lower_curvatures[lower_indices]
        # lower_sampled_degree = lower_vert_degree[lower_indices]

        # Stack the results together (points, normals, and curvatures)
        upper_sampled_data = np.hstack([upper_sampled_points, upper_sampled_normals, upper_sampled_curvatures.reshape(-1, 1)])
        lower_sampled_data = np.hstack([lower_sampled_points, lower_sampled_normals, lower_sampled_curvatures.reshape(-1, 1)])

        # Combine the upper and lower sampled data
        sampled_data = np.vstack([upper_sampled_data, lower_sampled_data])

        # Return the combined array with shape [N, 7]
        return sampled_data

    # 计算每个点的曲率
    def compute_curvature(self,vertices, k=10):
        knn = NearestNeighbors(n_neighbors=k)
        knn.fit(vertices)
        distances, indices = knn.kneighbors(vertices)
        neighbors = vertices[indices]
        covariance_matrices = np.array([np.cov(neigh.T) for neigh in neighbors])
        eigvals = np.linalg.eigvalsh(covariance_matrices)
        curvatures = eigvals[:, 0]  # 最小特征值表示曲率
        return curvatures

    def pc_norm(self, pc):
        """ pc: NxC, return NxC """
        centroid = np.mean(pc, axis=0)
        pc = pc - centroid
        m = np.max(np.sqrt(np.sum(pc**2, axis=1)))
        pc = pc / m
        return pc
        

# 计算每个点的局部密度（邻居的平均距离）
    def compute_density(self,vertices, k=10):
        knn = NearestNeighbors(n_neighbors=k)
        knn.fit(vertices)
        distances, _ = knn.kneighbors(vertices)
        density = 1 / (distances[:, 1:].mean(axis=1) + 1e-6)
        return density

    def __getitem__(self, idx):
        """
        获取数据集中的一个样本，包括STL数据及相关标签。
        
        :param idx: 索引
        :return: 输入数据和标签
        """
        stl_filename = os.path.join(self.data_dir, self.stl_files[idx])
        sample = self.sample_list[idx]
        # parameter_data = sample[1].astype(np.float32)        
        parameter_data = np.array([sample[1]], dtype=np.float32)
        # input_data = torch.tensor(input_data)        
        label_data = torch.tensor(parameter_data)   
        sample_name = sample[0] ##tain 5, test 0
        time1 = time.time()
        # sampled_data= self.load_stl_with_normals(stl_filename)
        # cubesize = sample[3:6]
        sampled_data, cubesize = self.load_stl_with_normals_new(stl_filename)
        
        time2 = time.time()
        # 随机采样到10000点
        # sampled_data = self.sample_points_random(vertices, normals)
        time22 = time.time()
        # smp_vert = sampled_data[:,:3]
        # smp_norm = sampled_data[:,3:6]
        ### 10000点计算曲率
        # curvatures = self.compute_curvature(smp_vert) 
        time3 = time.time()
        # densities = self.compute_density(smp_vert)
        time4 = time.time()
        # sampled_data = self.sample_points_random(vertices, normals)
        # sampled_data = self.sample_points(smp_vert, smp_norm, curvatures, densities,vertical_axis=2, upper_samples=700, lower_samples=300)
        time5 = time.time()
        # sampled_curvatures = sampled_data[:, -1]  # 曲率在最后一列
        # sampled_normals = sampled_data[:, 3:6]
        # 计算基于法向量的熵
        time6 = time.time()
        # normal_entropy = self.compute_normal_entropy(sampled_normals)
        time7 = time.time()
        # 计算基于曲率的熵（假设使用点的Z坐标作为曲率代理）
        # sampled_curvature_entropy = 0.2
        # sampled_curvature_entropy = self.compute_entropy(sampled_curvatures)
        time8 = time.time()
        # cubasize = [length,width,height,volume,area,interg_mean_convx,sampled_curvature_entropy,normal_entropy]
        # cubasize = [length,width,height,volume,area,sampled_curvature_entropy,normal_entropy]
        max_vals = [5.130126953, 2.028320313, 1.760761201, 7.439345682, 40.72243571,500, 0.318805436, 5.407586516]
        min_vals = [4.760537982, 2.025634766, 1.363978565, 6.968431614, 35.42832927,200, 0.124140312, 5.307547326]
        
        # max_vals = [6, 2.5, 2, 10, 45, 500, 0.35, 6]
        # min_vals = [4, 1.5, 1, 5, 30, 200, 0.1, 5]
        # int_vals = [10, 10, 10, 10, 500, 50, 1, 10]  ### scaled norm
        int_vals = [10, 10, 10] ## LWH
        cubesize = np.array(cubesize)
        min_vals = np.array(min_vals)
        max_vals = np.array(max_vals)
        # int_vals = np.array(int_vals)
        # cubasize = (cubasize-min_vals)/(max_vals-min_vals)
        # print("Cubesize before division:", cubesize)
        cubesize = (cubesize/int_vals).abs()
        print("Cubesize after division:", cubesize)
        # int_vals = [10, 10, 10, 10, 50, 500, 50, 1, 10]
        cubesize = cubesize.astype(np.float32)
        ### 支取L,W,H
        # cubesize = cubesize[:3]
        N,C = sampled_data.shape
        
        # curvature_entropy = self.compute_entropy(sampled_points[:, 2])
        cubesize = torch.tensor(cubesize)
        cubesize = cubesize.unsqueeze(0)  # 变为  [1, 3]
        cubesize = cubesize.repeat(N, 1)  # 扩展为 [C, 8]
        
        sampled_data = self.pc_norm(sampled_data)
        sampled_data = torch.tensor(sampled_data, dtype=torch.float32)
        # print(f"加载stl time: {time2-time1:.6f} 秒")
        # print(f"均匀采样到10000点 time: {time22-time2:.6f} 秒")
        # print(f"计算曲率 time: {time3-time22:.6f} 秒")
        # print(f"计算密度 time: {time4-time3:.6f} 秒")
        # print(f"下采样 time: {time5-time4:.6f} 秒")
        # print(f"计算法向熵 time: {time7-time6:.6f} 秒")
        # print(f"计算曲率熵 time: {time8-time7:.6f} 秒")
        # 返回点云及熵信息
        return sampled_data,cubesize,label_data,sample_name
    
    def get_label_from_filename(self, filename):
        """
        从文件名中提取标签，假设文件名格式为‘model_123.stl’，标签为数字部分。
        
        :param filename: 文件名
        :return: 标签
        """
        label_str = filename.split('_')[1].split('.')[0]
        return int(label_str)


# class newDrivAerDataset(Dataset):
#     def __init__(self, config,subset):
#         self.config = config
#         self.data_root = config['paths']['csv_paths']
#         self.subset = subset
#         self.shape_path = config['paths']['shape_path']
#         # self.npoints = config.N_POINTS
#         # self.norm_method = config.NORM_METHOD

#         # self.data_list_file = os.path.join(self.data_root, self.subset)
#         self.data_list_file = os.path.join(self.data_root, self.subset)
        
#         # self.sample_points_num = config.npoints
#         # self.permutation = np.arange(self.npoints)
#         self.epoch = 0
        
#         self.sample_list = pd.read_csv(self.data_list_file).values
        
#         # 保存外形坐标字典
#         self.shape_path = os.path.join(self.shape_path)

    
#     def norm_data(self,):
#         columns_to_scale = self.sample_list.drop(['Design'])
#         names = self.sample_list['Design']
#         data_scale = self.sample_list[columns_to_scale].values
#         standard_data = normalizaAllData(data_scale, self.norm_val1, self.norm_val2, method=self.norm_method)
       
#         standardized_df = pd.DataFrame(standard_data, columns=columns_to_scale)
#         standardized_df.insert(0, 'Design', names)
        
#         return standardized_df
            
#     def get_norm_info(self,):
#         return self.config, self.norm_val1, self.norm_val2
    
#     def pc_norm(self, pc):
#         """ pc: NxC, return NxC """
#         centroid = np.mean(pc, axis=0)
#         pc = pc - centroid
#         m = np.max(np.sqrt(np.sum(pc**2, axis=1)))
#         pc = pc / m
#         return pc
        

#     def random_sample(self, pc, num):
#         np.random.shuffle(self.permutation)
#         pc = pc[self.permutation[:num]]
#         return pc
    
#     def set_epoch(self, epoch):
#         self.epoch = epoch
        
#     def transfomer_position(self, position):
#         x = position[:,0]
#         y = position[:,1]
#         z = position[:,2]
#         new_position = torch.stack([-z,y,x], dim=1)
#         return new_position
    
        
#     def __getitem__(self, idx):
#         sample = self.sample_list[idx]
#         sample_name = sample[5] ##tain 5, test 0
        
#         shape_data = np.txtload(os.path.join(self.shape_path, f'{sample_name}.txt'))  ## N*8
#         # 对前三列进行 pc_norm 归一化
#         shape_data[:, :3] = self.pc_norm(shape_data[:, :3])
#         shape_data = torch.tensor(shape_data, dtype=torch.float32)
#         # indices = [1, 3, 4, 5, 6, 7, 8, 10, 11]
#         indices = [1, 3, 4, 5]
#         parameter_data = sample[indices].astype(np.float32)

#         # input_data = torch.tensor(input_data)        
#         label_data = torch.tensor(parameter_data[0])         
#         cubasize = parameter_data[1:] ### 
#         # 原始数据
#         xyzsize = np.array([
#             [4.3195825, 1.8442736, 1.3227063],
#             [5.2373676, 2.3877249, 1.761713]
#         ], dtype=np.float32)

#         # 计算最小值和最大值
#         min_vals = xyzsize.min(axis=0)
#         max_vals = xyzsize.max(axis=0)
#         # 进行最大最小归一化
#         # cubasize = (cubasize - min_vals) / (max_vals - min_vals)
#         ###
#         #print("shape_data 初始形状:", cubasize.shape)
#         N, C = shape_data.shape[0], shape_data.shape[1]
#              # 使用 unsqueeze 和 repeat 扩展 cubasize 为 [B, N, 3]
#         cubasize = torch.tensor(cubasize)
#         cubasize = cubasize.unsqueeze(0)  # 变为  [1, 3]
#         cubasize = cubasize.repeat(N, 1)  # 扩展为 [B, N, 3]
#         # cubasize = torch.tensor(cubasize)  ### B,N,3
#         #print("cubasize 处理后形状:", cubasize.shape)
#         return shape_data[:, :3], label_data, sample_name,cubasize

#     def __len__(self):
#         return len(self.sample_list)


  
if __name__ == '__main__':
    # 数据集存放的目录
    data_dir = "./data/train/"

    # 创建数据集实例
    dataset = PointCloudDataset(data_dir)
    data_loader = DataLoader(dataset, batch_size=1, shuffle=True)
    for point, label in data_loader:
        print(point[0])
        print(label.shape)
        break
