import os
import time, csv
import trimesh
import numpy as np
from sklearn.neighbors import NearestNeighbors
from concurrent.futures import ProcessPoolExecutor
import numpy as np
import pandas as pd
# from sklearn.neighbors import NearestNeighbors

# 读取STL文件并提取点和法向量
def load_stl_with_normals(file_path):
    stl_mesh = trimesh.load_mesh(file_path, force='mesh')
    vertices = stl_mesh.vertices
    normals = stl_mesh.face_normals  # 面的法向量
    return vertices, normals, stl_mesh


# 计算每个点的曲率
def compute_curvature(vertices, k=10):
    knn = NearestNeighbors(n_neighbors=k)
    knn.fit(vertices)
    distances, indices = knn.kneighbors(vertices)
    neighbors = vertices[indices]
    covariance_matrices = np.array([np.cov(neigh.T) for neigh in neighbors])
    eigvals = np.linalg.eigvalsh(covariance_matrices)
    curvatures = eigvals[:, 0]  # 最小特征值表示曲率
    curvatures = np.round(curvatures, 10)  # 保留10位小数
    return curvatures


# 计算每个点的局部密度（邻居的平均距离）
def compute_density(vertices, k=10):
    knn = NearestNeighbors(n_neighbors=k)
    knn.fit(vertices)
    distances, _ = knn.kneighbors(vertices)
    density = 1 / (distances[:, 1:].mean(axis=1) + 1e-6)
    return density


# 计算迎风面的网格数目
def compute_windward_faces(normals, direction=(1, 0, 0)):
    windward = normals @ direction  # 计算法向量与风向的点积
    windward_count = np.sum(windward < 0)  # 点积大于0表示迎风面
    return windward_count


# 计算基于曲率的熵
def compute_entropy(values):
    hist, bin_edges = np.histogram(values, bins=20, density=True)
    probabilities = hist / hist.sum()
    entropy = -np.sum(probabilities * np.log(probabilities + 1e-9))
    return entropy


# 计算基于法向量的熵
def compute_normal_entropy(normals):
    # 将法向量归一化到球面投影
    normal_magnitudes = np.linalg.norm(normals, axis=1)
    normalized_normals = normals / (normal_magnitudes[:, np.newaxis] + 1e-9)
    
    # 球面分区 (bins 分区)
    theta = np.arctan2(normalized_normals[:, 1], normalized_normals[:, 0])  # 投影到XY平面
    phi = np.arccos(normalized_normals[:, 2])  # Z方向角
    hist, _ = np.histogramdd((theta, phi), bins=20)
    
    probabilities = hist / hist.sum()
    entropy = -np.sum(probabilities * np.log(probabilities + 1e-9))
    return entropy


# 采样点
def sample_points(vertices, vert_normal, curvatures, densities, vertical_axis=2, upper_samples=5000, lower_samples=5000):
    # Determine the vertical range
    pointmax = np.max(vertices[:, vertical_axis])
    pointmin = np.min(vertices[:, vertical_axis])
    midpoint = np.mean([pointmax, pointmin])
    
    # Create masks for upper and lower half based on vertical axis
    upper_mask = vertices[:, vertical_axis] > midpoint
    lower_mask = ~upper_mask

    # Separate points, normals, and curvatures based on the masks
    upper_points = vertices[upper_mask]
    lower_points = vertices[lower_mask]

    upper_normals = vert_normal[upper_mask]
    lower_normals = vert_normal[lower_mask]

    upper_curvatures = curvatures[upper_mask]
    lower_curvatures = curvatures[lower_mask]
    
    # upper_vert_degree = vert_degree[upper_mask]
    # lower_vert_degree = vert_degree[lower_mask]

    upper_densities = densities[upper_mask]
    lower_densities = densities[lower_mask]

    # Compute weights for sampling
    upper_weight = np.exp(upper_curvatures)
    upper_weight /= upper_weight.sum()
    lower_weight = 1 / (lower_densities + 1e-6)
    lower_weight /= lower_weight.sum()

    # Perform the sampling with probability distribution based on weights
    upper_indices = np.random.choice(len(upper_points), size=upper_samples, p=upper_weight)
    lower_indices = np.random.choice(len(lower_points), size=lower_samples, p=lower_weight)

    # Get sampled points, normals, and curvatures
    upper_sampled_points = upper_points[upper_indices]
    upper_sampled_normals = upper_normals[upper_indices]
    upper_sampled_curvatures = upper_curvatures[upper_indices]
    # upper_sampled_degree = upper_vert_degree[upper_indices]

    lower_sampled_points = lower_points[lower_indices]
    lower_sampled_normals = lower_normals[lower_indices]
    lower_sampled_curvatures = lower_curvatures[lower_indices]
    # lower_sampled_degree = lower_vert_degree[lower_indices]

    # Stack the results together (points, normals, and curvatures)
    # upper_sampled_data = np.hstack([upper_sampled_points, upper_sampled_normals, upper_sampled_degree.reshape(-1, 1), upper_sampled_curvatures.reshape(-1, 1)])
    # lower_sampled_data = np.hstack([lower_sampled_points, lower_sampled_normals, lower_sampled_degree.reshape(-1, 1), lower_sampled_curvatures.reshape(-1, 1)])
    
    upper_sampled_data = np.hstack([upper_sampled_points, upper_sampled_normals, upper_sampled_curvatures.reshape(-1, 1)])
    lower_sampled_data = np.hstack([lower_sampled_points, lower_sampled_normals, lower_sampled_curvatures.reshape(-1, 1)])

    # Combine the upper and lower sampled data
    sampled_data = np.vstack([upper_sampled_data, lower_sampled_data])

    # Return the combined array with shape [N, 8]
    return sampled_data


### 基于最远点和KNN采样
def sample_points_fps_knn_all(vertices, vert_normal, vert_degree, curvatures, densities, num_samples=10000):
    """
    使用最远点采样（FPS）和KNN对所有样本进行采样的函数

    参数:
    vertices: 点云的顶点坐标数组，形状为 [N, 3]，N为点的数量
    vert_normal: 顶点法向量数组，形状为 [N, 3]
    vert_degree: 顶点相关的度数信息数组（具体含义取决于应用场景），形状为 [N,]
    curvatures: 曲率信息数组，形状为 [N,]
    densities: 点云密度信息数组，形状为 [N,]
    num_samples: 期望采样的总点数

    返回:
    sampled_data: 采样后的合并数据数组，形状为 [num_samples, 8]
    """
    # 最远点采样部分
    def farthest_point_sampling(points, num_samples):
        """
        最远点采样函数

        参数:
        points: 输入的点坐标数组，形状为 [M, 3]，M为点的数量
        num_samples: 要采样的点数

        返回:
        sampled_indices: 采样点的索引数组，形状为 [num_samples,]
        """
        num_points = points.shape[0]
        sampled_indices = []
        # 随机选择第一个点
        first_index = np.random.randint(num_points)
        sampled_indices.append(first_index)
        distances = np.ones(num_points) * np.inf
        for _ in range(num_samples - 1):
            last_index = sampled_indices[-1]
            last_point = points[last_index].reshape(1, -1)
            # 计算所有点到已采样点集中最后一个点的距离
            new_distances = np.linalg.norm(points - last_point, axis=1)
            distances = np.minimum(distances, new_distances)
            # 选择距离已采样点集最远的点
            farthest_index = np.argmax(distances)
            sampled_indices.append(farthest_index)
        return np.array(sampled_indices)

    # 对整体点云进行最远点采样
    fps_indices = farthest_point_sampling(vertices, num_samples)
    fps_points = vertices[fps_indices]
    fps_normals = vert_normal[fps_indices]
    fps_curvatures = curvatures[fps_indices]
    fps_degree = vert_degree[fps_indices]

    # KNN部分
    def knn_based_sampling(points, fps_points, num_additional_samples):
        """
        基于KNN的采样补充函数

        参数:
        points: 原始的点坐标数组，形状为 [M, 3]
        fps_points: 经过最远点采样后的点坐标数组，形状为 [K, 3]，K为最远点采样的点数
        num_additional_samples: 额外要采样的点数

        返回:
        additional_indices: 额外采样点的索引数组，形状为 [num_additional_samples,]
        """
        nbrs = NearestNeighbors(n_neighbors=5).fit(fps_points)
        distances, indices = nbrs.kneighbors(points)
        # 可以根据需求自定义选择策略，这里简单地从每个KNN邻域中随机选一个点
        additional_indices = []
        for i in range(len(points)):
            knn_indices = indices[i]
            random_index_in_knn = np.random.choice(knn_indices)
            additional_indices.append(random_index_in_knn)
        return np.array(additional_indices)

    # 基于KNN进行采样补充
    additional_indices = knn_based_sampling(vertices, fps_points, num_samples)
    sampled_points = np.vstack([fps_points, vertices[additional_indices]])
    sampled_normals = np.vstack([fps_normals, vert_normal[additional_indices]])
    sampled_curvatures = np.concatenate([fps_curvatures, curvatures[additional_indices]])
    sampled_degree = np.concatenate([fps_degree, vert_degree[additional_indices]])

    # Stack the results together (points, normals, and curvatures)
    sampled_data = np.hstack([sampled_points, sampled_normals, sampled_degree.reshape(-1, 1), sampled_curvatures.reshape(-1, 1)])

    return sampled_data

# 单个文件的处理函数
def append_to_csv(csv_path, row):
    # Check if CSV exists, if not, write the header
    file_exists = os.path.isfile(csv_path)
    with open(csv_path, mode='a', newline='') as file:
        writer = csv.DictWriter(file, fieldnames=row.keys())
        if not file_exists:
            writer.writeheader()  # Write header only if file doesn't exist
        writer.writerow(row)

### 得到Cd 值
def get_cd_from_csv(csv_path, input_name):
    """
    根据输入的文件名从CSV文件中获取对应的Cd值。

    参数:
    - csv_path: CSV文件的路径
    - input_name: 输入的文件名 (字符串)

    返回:
    - Cd值 (float)，如果文件名不存在，返回 None
    """
    try:
        # 读取CSV文件
        data = pd.read_csv(csv_path)
        
        # 确保CSV文件包含至少两列：文件名和Cd值
        if data.shape[1] < 2:
            print("CSV文件格式错误，应包含至少两列：文件名和Cd值。")
            return None
        
        # 确保列名正确，假设第一列为文件名，第二列为Cd值
        file_column = data.columns[0]  # 第一列：文件名
        cd_column = data.columns[1]    # 第二列：Cd值
        
        # 查找文件名对应的Cd值
        result = data[data[file_column] == input_name]
        
        if result.empty:
            print(f"文件名 '{input_name}' 不存在于CSV文件中。")
            return None
        
        cd_value = result[cd_column].values[0]
        return cd_value
    except Exception as e:
        print(f"读取CSV文件时出错: {e}")
        return None


def process_single_stl(stl_file, input_dir, output_dir, csv_path, cd_path, wind_direction=(1, 0, 0)):
    try:
        start_time = time.time()
        stl_path = os.path.join(input_dir, stl_file)

        # Load vertices, face normals, and mesh
        vertices, face_normals, mesh = load_stl_with_normals(stl_path)
        vert_normal = mesh.vertex_normals
        # vert_degree = mesh.vertex_degree

        # Compute curvature and density
        timeC1 = time.time()
        curvatures = compute_curvature(vertices)
        timeC2 = time.time()
        densities = compute_density(vertices)

        # Compute entropy based on all points
        curvature_entropy = compute_entropy(curvatures)
        normal_entropy = compute_normal_entropy(vert_normal)

        # Compute windward faces
        windward_count = compute_windward_faces(face_normals, direction=wind_direction)

        # Compute overall volume and boundary area
        volume = mesh.volume
        bounding_box = mesh.bounding_box.bounds  # Bounding box bounds
        length = bounding_box[1, 0] - bounding_box[0, 0]  # x 轴的范围
        width = bounding_box[1, 1] - bounding_box[0, 1]   # y 轴的范围
        height = bounding_box[1, 2] - bounding_box[0, 2]  # z 轴的范围
        boundary_area = mesh.area
        interg_mean_convx = mesh.integral_mean_curvature

        # Sample points based on curvature and density
        time1 = time.time()
        sampled_data = sample_points(
            vertices, vert_normal, curvatures, densities,
            vertical_axis=2, upper_samples=5000, lower_samples=5000
        )
        time2 = time.time()
        
        # output_path = os.path.join(output_dir, os.path.splitext(stl_file)[0] + '.txt')
        # np.save(output_path, sampled_data)
        output_path = os.path.join(output_dir, os.path.splitext(stl_file)[0] + '.txt')
        # np.savetxt(output_path, sampled_data, fmt='%.10f', delimiter=',')
        np.savetxt(output_path, sampled_data, fmt='%.10f', delimiter=' ')

        #### sample with fps and knn
        # sampled_data = sample_points_fps_knn_all(
        #     vertices, vert_normal, vert_degree, curvatures, densities
        # )
        # Extract sampled curvature and normals
        sampled_curvatures = sampled_data[:, -1]  # 曲率在最后一列
        sampled_normals = sampled_data[:, 3:6]    # 法向量在第4~6列

        # Compute entropy based on sampled points
        timeD0 = time.time()
        sampled_curvature_entropy = compute_entropy(sampled_curvatures)
        timeD1 = time.time()
        sampled_normal_entropy = compute_normal_entropy(sampled_normals)
        timeD2 = time.time()
        file_name = os.path.splitext(stl_file)[0]
        cd = get_cd_from_csv(cd_path, file_name)
        # Prepare the row to append to the CSV file
        row = {
            "file_name": file_name,
            "Cd": cd,
            "length": length,
            "width": width,
            "height": height,
            "volume": volume,
            "area": boundary_area,
            "interg_mean_convx": interg_mean_convx,
            "smp_curvature_entropy": sampled_curvature_entropy,
            "smp_normal_entropy": sampled_normal_entropy
        }

        # Append scalar data to CSV
        append_to_csv(csv_path, row)

        elapsed_time = time.time() - start_time
        print(f"convx entro in {timeD1-timeD0:.2f} seconds")
        print(f"normal entro in {timeD2-timeD1:.2f} seconds")
        print(f"compute {stl_file} conv in {timeC2-timeC1:.2f} seconds")
        print(f"sampling {stl_file} in {time2-time1:.2f} seconds")
        print(f"Processed {stl_file} in {elapsed_time:.2f} seconds")
    except Exception as e:
        print(f"Failed to process {stl_file}: {e}")


# 处理文件夹中的所有STL文件（并行版本）
def process_stl_directory_parallel(input_dir, output_dir, csv_path, cd_path,max_workers=10):
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    stl_files = [f for f in os.listdir(input_dir) if f.endswith(".stl")]

    with ProcessPoolExecutor(max_workers=max_workers) as executor:
        futures = [executor.submit(process_single_stl, stl_file, input_dir, output_dir, csv_path,cd_path) for stl_file in stl_files]

    # Wait for all futures to complete
    for future in futures:
        future.result()


# 主程序
if __name__ == "__main__":
    input_dir = "/home/bingxing2/home/scx8ajl/ldz/Dirve/data/DrivAerNetPlusSTL-Test"  # 替换为实际的输入文件夹路径
    # input_dir = "/home/bingxing2/home/scx8ajl/ldz/Dirve/data/1stlfile"  # 替换为实际的输入文件夹路径
    output_dir = "/home/bingxing2/home/scx8ajl/ldz/Dirve/data/5-5-950-test"  # 替换为实际的输出文件夹路径
    # csv_path = "/home/bingxing2/home/scx8ajl/ldz/Dirve/data/flow-face-num.csv"  # 替换为实际的 CSV 文件路径
    csv_path = "/home/bingxing2/home/scx8ajl/ldz/Dirve/csvfiles/5-5-950-test.csv"  # 替换为实际的 CSV 文件路径
    cd_path =  "/home/bingxing2/home/scx8ajl/ldz/Dirve/csvfiles/原始DrivAerNetPlusPlus_Drag_8k.csv" 
    # cd_path =  "/home/bingxing2/home/scx8ajl/ldz/Dirve/data/1stlfile/DrivAer_model_TrainingData_3900.csv" 

    process_stl_directory_parallel(input_dir, output_dir, csv_path, cd_path,max_workers=8)
