import os
import sys
sys.path.append(
    os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
)
import numpy as np
import multiprocessing
import threading
import torch
import matplotlib

matplotlib.use("Agg")

import h5py
import math
import vtk
import pymeshlab
import subprocess
import sys
import utils.DS_utils as DS_utils
from utils.utilities import (
    calc_cell_centered_with_node_attr,
    calc_node_centered_with_cell_attr,
)

import numpy as np
import csv
import h5py
import torch
from utils import get_param
from scipy.spatial import cKDTree
import torch
from torch_geometric.nn import knn_interpolate

# 将输出缓冲区设置为0
sys.stdout.flush()

# Initialize lock for file writing synchronization
lock = threading.Lock()


class Basemanager:
    
    def extract_vertices_and_faces(self, polydata):
        # 获取顶点坐标
        points = polydata.GetPoints()
        vertices = np.array([points.GetPoint(i) for i in range(points.GetNumberOfPoints())])

        # 获取单元索引
        faces = []
        for i in range(polydata.GetNumberOfCells()):
            cell = polydata.GetCell(i)
            faces.append([cell.GetPointId(j) for j in range(cell.GetNumberOfPoints())])
        faces = np.array(faces)

        return vertices, faces, polydata
    
    def polygon_area(self, vertices):
        """
        使用shoelace formula（鞋带公式）来计算多边形的面积。
        :param vertices: 多边形的顶点坐标，一个二维numpy数组。
        :return: 多边形的面积。
        """
        x = vertices[:, 0]
        y = vertices[:, 1]
        return 0.5 * np.abs(np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1)))

    def elements_to_faces(self, polydata):
        """Computes mesh edges from elements."""

        edges = []
        # 获取单元格的数量
        num_cells = polydata.GetNumberOfCells()
        
        for i in range(num_cells):
            cell = polydata.GetCell(i)
            edge_indices = []
            for edge_id in range(cell.GetNumberOfEdges()):
                edge = cell.GetEdge(edge_id)
                edge_indices.append([int(edge.GetPointId(0)), int(edge.GetPointId(1))])
            edges.append(edge_indices)
            
        cells_face_node = np.array(edges)
        
        cells_index = np.arange(cells_face_node.shape[0]).reshape(-1, 1).repeat(
            cell.GetNumberOfPoints(), axis=1
        ).reshape(-1, 1)
        
        cells_face_node = np.concatenate(
            [cells_face_node[_] for _ in range(cells_face_node.shape[0])],
            axis=0,
            )

        return cells_face_node, cells_index

    def position_relative_to_line_pytorch(A, B, angle_c):
        # A是点的坐标，表示为(x, y)的元组
        # B是一个数组，shape为[nums, 2]，其中nums为参与判断的点数量，2为xy坐标
        # angle_c是与X轴的夹角，以角度为单位

        # 将输入转换为张量
        A = torch.tensor(A, dtype=torch.float64)
        B = torch.tensor(B, dtype=torch.float64)
        angle_c = torch.tensor(angle_c, dtype=torch.float64)

        # 计算直线的方向向量
        direction_vector = torch.tensor(
            [
                torch.cos(angle_c * math.pi / 180.0),
                torch.sin(angle_c * math.pi / 180.0),
            ],
            dtype=torch.float64,
        )

        # 计算向量AB
        vector_AB = B - A

        # 计算两个向量的叉积，注意这里使用广播
        cross_product = (
            direction_vector[0] * vector_AB[:, 1]
            - direction_vector[1] * vector_AB[:, 0]
        )

        # 判断每个点相对于直线的位置，返回一个mask
        mask = cross_product > 0
        return mask.view(-1, 1)  # 调整shape为[nums, 1]

    def is_convex(self, polygon):
        """
        检查一个多边形是否是凸的。
        :param polygon: 多边形的顶点坐标，一个二维numpy数组。
        :return: 如果是凸的返回True，否则返回False。
        """
        n = len(polygon)
        for i in range(n):
            a = polygon[i]
            b = polygon[(i + 1) % n]
            c = polygon[(i + 2) % n]
            ba = a - b
            bc = c - b
            cross_product = np.cross(ba, bc)
            if cross_product < 0:
                return False
        return True

    def reorder_polygon(self, polygon):
        """
        重新排序多边形的顶点使其成为一个凸多边形。
        :param polygon: 多边形的顶点坐标，一个二维numpy数组。
        :return: 重新排序后的多边形的顶点坐标。
        """
        centroid = np.mean(polygon, axis=0)
        sorted_polygon = sorted(
            polygon, key=lambda p: np.arctan2(p[1] - centroid[1], p[0] - centroid[0])
        )
        return np.array(sorted_polygon)

    def ensure_counterclockwise(self, cells, mesh_pos):
        """
        确保每个单元的顶点是按逆时针顺序排列的，并且是凸的。
        :param cells: 单元的索引数组。
        :param mesh_pos: 顶点的坐标数组。
        :return: 调整后的cells数组。
        """
        for i, cell in enumerate(cells):
            vertices = mesh_pos[cell]
            if not self.is_convex(vertices):
                vertices = self.reorder_polygon(vertices)
                sorted_indices = sorted(
                    range(len(cell)),
                    key=lambda k: list(map(list, vertices)).index(
                        list(mesh_pos[cell][k])
                    ),
                )
                cells[i] = np.array(cell)[sorted_indices]
        return cell

    def is_equal(self, x, pivot):
        """
        Determine if a value x is between two other values a and b.

        Parameters:
        - a (float or int): The lower bound.
        - b (float or int): The upper bound.
        - x (float or int): The value to check.

        Returns:
        - (bool): True if x is between a and b (inclusive), False otherwise.
        """
        a = abs(pivot) - float(1e-8)
        b = abs(pivot) + float(1e-8)
        # Check if x is between a and b, inclusive
        if a <= abs(x) <= b:
            return True
        else:
            return False

    def make_edges_unique(self, cells_face_node, cells_node, cells_index):
        """Computes mesh edges from triangles."""
        # collect edges from triangles
        cells_face_node_biased = torch.sort(cells_face_node, dim=1)[0]
        senders, receivers = cells_face_node_biased[:, 0], cells_face_node_biased[:, 1]
        packed_edges = torch.stack((senders, receivers), dim=1)
        singleway_edge_index = torch.unique(
            packed_edges, return_inverse=False, return_counts=False, dim=0
        ).to(torch.int64)

        return {
            "edge_index": singleway_edge_index,
            "cells_face_node_unbiased": cells_face_node,
            "cells_face_node_biased": packed_edges,
        }

    def create_neighbor_matrix(self, vertex_coords, edges):
        """
        Create a matrix representing the neighbors for each vertex in a graph.

        Parameters:
        vertex_coords (Tensor): A tensor of shape [n, 2] representing n vertex coordinates.
        edges (Tensor): A tensor of shape [m, 2] representing m edges, where each edge is a pair of vertex indices.

        Returns:
        Tensor: A matrix where each row corresponds to a vertex and contains the indices of its neighbors.
        """
        # Adjust edges to ensure all indices are within the range of vertex_coords' first dimension
        edges_mod = edges % vertex_coords.shape[0]

        # Create a tensor to hold the counts of neighbors for each vertex
        counts = torch.zeros(vertex_coords.shape[0], dtype=torch.int64)

        # Count the occurrence of each index in edges_mod to determine the number of neighbors
        counts.scatter_add_(0, edges_mod.view(-1), torch.ones_like(edges_mod.view(-1)))

        # Find the maximum number of neighbors to define the second dimension of the neighbor matrix
        max_neighbors = counts.max()

        # Create a tensor to hold the neighbors, initialized with -1 (indicating no neighbor)
        neighbor_matrix = torch.full(
            (vertex_coords.shape[0], max_neighbors), -1, dtype=torch.int64
        )

        # Create an array to keep track of the current count of neighbors for each vertex
        current_count = torch.zeros(vertex_coords.shape[0], dtype=torch.int64)

        # Iterate through each edge and populate the neighbor matrix
        for edge in edges_mod:
            # Unpack the edge
            start, end = edge
            # Place the end vertex in the next available spot for the start vertex in neighbor_matrix
            neighbor_matrix[start, current_count[start]] = end
            # Increment the count for the start vertex
            current_count[start] += 1
            # Do the same for the end vertex, assuming undirected edges
            neighbor_matrix[end, current_count[end]] = start
            current_count[end] += 1

        return neighbor_matrix, max_neighbors

    def generate_directed_edges(self, cells_node):
        # 生成给定单元的所有可能边组合，但只保留一个方向的边
        edges = []
        for i in range(len(cells_node)):
            for j in range(i + 1, len(cells_node)):
                edge = [cells_node[i], cells_node[j]]
                reversed_edge = [cells_node[j], cells_node[i]]

                # 只添加一个方向的边
                if reversed_edge not in edges:
                    edges.append(edge)
        return edges

    def compose_edge_index_x(
        self, face_node, cells_face_node_biased, cells_node, cells_index
    ):
        face_node_x = face_node.clone()

        for i in range(cells_index.max() + 1):
            mask_cell = (cells_index == i).view(-1)
            current_cells_face_node_biased = cells_face_node_biased[mask_cell]
            current_cells_node = cells_node[mask_cell]
            all_possible_edges, _ = torch.tensor(
                self.generate_directed_edges(current_cells_node)
            ).sort(dim=-1)

            for edge in all_possible_edges:
                edge = edge.unsqueeze(0)
                if (edge.unsqueeze(0) == current_cells_face_node_biased).all(
                    dim=-1
                ).sum() < 1:
                    face_node_x = torch.cat((face_node_x, edge), dim=0)

        return face_node_x

    def convert_to_tensors(self, input_dict):
        # 如果输入是字典
        if isinstance(input_dict, dict):
            # 遍历字典中的所有键
            for key in input_dict.keys():
                # 检查值的类型
                value = input_dict[key]
                if isinstance(value, np.ndarray):
                    # 如果值是一个Numpy数组，使用torch.from_numpy进行转换
                    input_dict[key] = torch.from_numpy(value)
                elif not isinstance(value, torch.Tensor):
                    # 如果值不是一个PyTorch张量，使用torch.tensor进行转换
                    input_dict[key] = torch.tensor(value)
                # 如果值已经是一个PyTorch张量，不进行任何操作

        # 如果输入是列表
        elif isinstance(input_dict, list):
            for i in range(len(input_dict)):
                value = input_dict[i]
                if isinstance(value, np.ndarray):
                    # 如果值是一个Numpy数组，使用torch.from_numpy进行转换
                    input_dict[i] = torch.from_numpy(value)
                elif not isinstance(value, torch.Tensor):
                    # 如果值不是一个PyTorch张量，使用torch.tensor进行转换
                    input_dict[i] = torch.tensor(value)
                # 如果值已经是一个PyTorch张量，不进行任何操作

        return input_dict

    def convert_to_numpy(self, input_dict):
        # 如果输入是字典
        if isinstance(input_dict, dict):
            # 遍历字典中的所有键
            for key in input_dict.keys():
                # 检查值的类型
                value = input_dict[key]
                if isinstance(value, torch.Tensor):
                    # 如果值是一个Numpy数组，使用torch.from_numpy进行转换
                    input_dict[key] = value.numpy()
                elif not isinstance(value, torch.Tensor):
                    # 如果值不是一个PyTorch张量，使用torch.tensor进行转换
                    input_dict[key] = torch.tensor(value).numpy()
                # 如果值已经是一个PyTorch张量，不进行任何操作

        # 如果输入是列表
        elif isinstance(input_dict, list):
            for i in range(len(input)):
                value = input_dict[i]
                if isinstance(value, torch.Tensor):
                    # 如果值是一个Numpy数组，使用torch.from_numpy进行转换
                    input_dict[i] = value.numpy()
                elif not isinstance(value, torch.Tensor):
                    # 如果值不是一个PyTorch张量，使用torch.tensor进行转换
                    input_dict[i] = torch.tensor(value).numpy()
                # 如果值已经是一个PyTorch张量，不进行任何操作
        return input_dict

    def compute_node_normals(self, polydata):
        # 判断输入对象是否为 vtkUnstructuredGrid 类型
        if isinstance(polydata, vtk.vtkUnstructuredGrid):
            # 将 vtkUnstructuredGrid 转换为 vtkPolyData
            geometry_filter = vtk.vtkGeometryFilter()
            geometry_filter.SetInputData(polydata)
            geometry_filter.Update()
            polydata = geometry_filter.GetOutput()
        
        # 创建一个法向量计算器
        normals_generator = vtk.vtkPolyDataNormals()
        normals_generator.SetInputData(polydata)
        
        # 设置选项
        normals_generator.ComputePointNormalsOn()  # 计算顶点法向量
        normals_generator.ComputeCellNormalsOn()   # 计算单元法向量
        normals_generator.AutoOrientNormalsOn()    # 自动调整法向量方向
        normals_generator.SplittingOff()           # 禁止处理锐角时的分裂
        normals_generator.Update()

        # 获取带有法向量的PolyData
        polydata_with_normals = normals_generator.GetOutput()
        
        # 提取计算后的法向量
        normals = polydata_with_normals.GetPointData().GetNormals()
        normals_array = np.array([normals.GetTuple(i) for i in range(normals.GetNumberOfTuples())])
        
        return normals_array, polydata_with_normals

    def compute_ao(self, mesh_pos=None, cells=None, track="trackA", rays=64):

        # Create a new MeshSet
        ms = pymeshlab.MeshSet()

        # Create a new mesh from Mesh_pos and Cells
        ms.add_mesh(pymeshlab.Mesh(vertex_matrix=mesh_pos, face_matrix=cells))

        # Compute ambient occlusion
        if track == "trackA":
            ms.compute_scalar_ambient_occlusion(rays=rays)
        else:
            ms.compute_scalar_by_volumetric_obscurance(rays=rays)
        # ms.compute_scalar_ambient_occlusion(rays=500)

        # Retrieve the mesh and the scalar field (ambient occlusion values)
        mesh = ms.current_mesh()
        ao_values = mesh.vertex_scalar_array()

        # Convert to numpy array for further processing if needed
        ao_numpy = np.array(ao_values)

        # 归一化AO值
        # ao_numpy = 1 - ((ao_numpy - ao_numpy.min()) / (ao_numpy.max() - ao_numpy.min()))

        # if track == "trackB":
        #     # 归一化AO值
        #     ao_numpy = (ao_numpy - ao_numpy.min()) / (ao_numpy.max() - ao_numpy.min())
        # else:
        #     # 归一化AO值 并 inverse
        #     ao_numpy = 1 - ((ao_numpy - ao_numpy.min()) / (ao_numpy.max() - ao_numpy.min()))
        
        ao_numpy = (ao_numpy - ao_numpy.min()) / (ao_numpy.max() - ao_numpy.min())
        
        return np.expand_dims(ao_numpy,axis=-1)

    def compute_distances_to_sphere_surface(self, dataset):

        mesh_pos = dataset["node|pos"]

        norm_pos = mesh_pos - mesh_pos.mean(dim=0)

        # 计算每个点到原点的距离
        distances_to_origin = torch.norm(norm_pos, dim=1)

        # 球体半径
        radius = 10 / 2

        # 计算每个点到球体表面的最短距离
        distances_to_surface = torch.abs(distances_to_origin - radius)

        dataset["node|distances_to_surface"] = distances_to_surface

        return dataset

    def get_hexahedral_edges(polydata):
        edges = []
        # 获取单元格的数量
        num_cells = polydata.GetNumberOfCells()
        
        for i in range(num_cells):
            cell = polydata.GetCell(i)
            if cell.GetCellType() == vtk.VTK_HEXAHEDRON:
                edge_indices = []
                for edge_id in range(cell.GetNumberOfEdges()):
                    edge = cell.GetEdge(edge_id)
                    edge_points = edge.GetPoints()
                    edge_indices.append([int(edge.GetPointId(0)), int(edge.GetPointId(1))])
                edges.append(edge_indices)
        
        return np.array(edges)
    
    def knn_interpolate_wrapper(self, phi, mesh_pos_A, mesh_pos_B, k=3):
        """
        使用KNN插值将phi从mesh_pos_A插值到mesh_pos_B。
        
        参数:
        phi (numpy.ndarray or torch.Tensor): 源点云的特征，形状为 [N, C]
        mesh_pos_A (numpy.ndarray or torch.Tensor): 源点云的坐标，形状为 [N, 3]
        mesh_pos_B (numpy.ndarray or torch.Tensor): 目标点云的坐标，形状为 [M, 3]
        k (int): K近邻数量，默认为3
        
        返回:
        numpy.ndarray: 插值后的特征，形状为 [M, C]
        """
        # 检查并转换输入为PyTorch张量
        def to_tensor(x):
            if not isinstance(x, torch.Tensor):
                return torch.from_numpy(x).float()
            return x.float()
        
        phi = to_tensor(phi)
        mesh_pos_A = to_tensor(mesh_pos_A)
        mesh_pos_B = to_tensor(mesh_pos_B)

        mesh_pos_A = mesh_pos_A
        mesh_pos_B = mesh_pos_B
        
        # 执行KNN插值
        phi_interpolated = knn_interpolate(phi, mesh_pos_A, mesh_pos_B, k=k)
        
        # 将结果转换回numpy数组
        return phi_interpolated.numpy()
    
    def create_mesh_mask(self, ply_pos, mesh_pos, tolerance=1e-6):
        """
        创建一个掩码，表示mesh_pos中的每个点是否出现在ply_pos中。

        参数:
        ply_pos: 形状为 (N1, 3) 的NumPy数组或PyTorch张量，表示参考点云坐标。
        mesh_pos: 形状为 (N2, 3) 的NumPy数组或PyTorch张量，表示需要检查的网格点坐标。
        tolerance (float): 判断两个点是否相同的距离阈值，默认为1e-6。

        返回:
        np.ndarray: 形状为 (N2, 1) 的布尔NumPy数组，表示mesh_pos中的每个点是否出现在ply_pos中。
        """
        
        def to_numpy(tensor):
            if isinstance(tensor, np.ndarray):
                return tensor
            elif 'torch' in str(type(tensor)):
                return tensor.cpu().numpy()
            else:
                raise TypeError("Input must be a NumPy array or PyTorch tensor")

        # 转换输入为NumPy数组并确保为float32类型
        ply_pos = to_numpy(ply_pos).astype(np.float32)
        mesh_pos = to_numpy(mesh_pos).astype(np.float32)

        # 初始化掩码数组
        mask = np.zeros((mesh_pos.shape[0], 1), dtype=bool)

        # 使用for循环检查每个mesh_pos中的点
        for i, point in enumerate(mesh_pos):
            # 使用np.isclose来考虑误差范围
            matches = np.any(np.isin(point, ply_pos, assume_unique=False, invert=False))
            mask[i] = np.any(matches)

        return mask

class VtkMesh(Basemanager):
    """
    Tecplot .dat file is only supported with Tobias`s airfoil dataset ,No more data file supported
    """

    def __init__(self, path=None):

        self.path = path
        # 从 VTK 文件读取网格
        vtk_filename = path["mesh_file_path"]
        
        unstructureddata, _, tri_polydata = self.read_vtk(vtk_filename)

        self.unstructureddata = unstructureddata
        self.tri_polydata = tri_polydata
        
        # 提取坐标，单元顶点索引
        mesh_pos, cells_node, _ = self.extract_vertices_and_faces(unstructureddata)

        cells_face_node, cells_index = self.elements_to_faces(unstructureddata)
        
        try:
            # training 读取target文件文件
            velocity = DS_utils.get_velocity_data(unstructureddata)
        except:
            # test 无target文件
            velocity = np.zeros((mesh_pos.shape[0], 3))
            
        self.mesh_info = {
            "node|pos": mesh_pos,
            "cell|cells_node": cells_node,
            "cells_node": cells_node.reshape(-1, 1),
            "cells_index": cells_index.reshape(-1, 1),
            "cells_face_node": cells_face_node,
            'node|velocity': velocity,
        }

        self.path = path
    
    def read_vtk(self, file_path):
        # 读取 VTK 文件
        reader = vtk.vtkUnstructuredGridReader()
        reader.SetFileName(file_path)
        reader.Update()
        original_unstructured = reader.GetOutput()
        original_unstructured_backup = vtk.vtkUnstructuredGrid()
        original_unstructured_backup.DeepCopy(original_unstructured)
        
        # 将 vtkUnstructuredGrid 转换为 vtkPolyData
        geometry_filter = vtk.vtkGeometryFilter()
        geometry_filter.SetInputData(original_unstructured)
        geometry_filter.Update()
        original_polydata = geometry_filter.GetOutput()
        
        # 复制 VTK 对象
        copied_polydata = vtk.vtkPolyData()
        copied_polydata.DeepCopy(original_polydata)

        # 判断是否为四边形网格并转换为三角形网格
        if self.is_quad_mesh(copied_polydata):
            triangulated_polydata = self.quad_to_triangle(copied_polydata)
        else:
            triangulated_polydata = copied_polydata

        return original_unstructured_backup, original_polydata, triangulated_polydata

    def is_quad_mesh(self, polydata):
        cell_types = set()
        for i in range(polydata.GetNumberOfCells()):
            cell_types.add(polydata.GetCellType(i))
        
        # VTK_QUAD 的类型编号是 9
        return vtk.VTK_QUAD in cell_types

    def quad_to_triangle(self, polydata):
        triangleFilter = vtk.vtkTriangleFilter()
        triangleFilter.SetInputData(polydata)
        triangleFilter.Update()
        return triangleFilter.GetOutput()
    
    def find_velocity_field(self, unstructured_grid):
        point_data = unstructured_grid.GetPointData()
        num_arrays = point_data.GetNumberOfArrays()
        
        for i in range(num_arrays):
            array = point_data.GetArray(i)
            if array.GetNumberOfComponents() == 3:  # 检查是否为3维向量数据
                # 将数据转换为Numpy数组
                velocity_field = np.array([array.GetTuple(j) for j in range(array.GetNumberOfTuples())])
                print(f"Found velocity field with array name: {array.GetName()}")
                return velocity_field
        
        raise ValueError("No 3D velocity field found in the VTK file.")

    def extract_mesh(self):
        """
        all input dataset values should be pytorch tensor object
        """
        dataset = self.convert_to_tensors(self.mesh_info)
        cells_node = dataset["cells_node"][:, 0]
        cells_index = dataset["cells_index"][:, 0]

        """>>>compute centroid crds>>>"""
        mesh_pos = dataset["node|pos"]
        centroid = calc_cell_centered_with_node_attr(
            node_attr=dataset["node|pos"],
            cells_node=cells_node,
            cells_index=cells_index,
            reduce="mean",
        )
        dataset["centroid"] = centroid
        """<<<compute centroid crds<<<"""

        """ >>>   compose face  and face_center_pos >>> """
        decomposed_cells = self.make_edges_unique(
            dataset["cells_face_node"],
            cells_node.view(-1, 1),
            cells_index.view(-1, 1),
        )

        face_node = decomposed_cells["edge_index"].T
        dataset["face_node"] = face_node

        face_center_pos = (mesh_pos[face_node[0]] + mesh_pos[face_node[1]]) / 2.0
        dataset["face_center_pos"] = face_center_pos
        """ <<<   compose face   <<< """

        """ >>>   compute face length   >>>"""
        face_length = torch.norm(
            (mesh_pos[face_node[0]] - mesh_pos[face_node[1]]), dim=1, keepdim=True
        )
        dataset["face_length"] = face_length
        """ <<<   compute face length   <<<"""

        """ >>> unit normal vector >>> """
        # ply_path = {"mesh_file_path":self.path["mesh_file_path"].replace("_vel","_press").replace("vel_","mesh_").replace(".vtk",".ply"),
        #             "data_file_path":self.path["mesh_file_path"].replace("_vel","_press").replace("vel_","press_").replace(".vtk",".npy")}

        # plydata = PlyMesh(ply_path)
        # ply_pos = plydata.mesh_info["node|pos"]
        # ply_cells_node = plydata.mesh_info["cells_node"]
        if self.path["split"]=="train":
            vtk_path = {"mesh_file_path":self.path["mesh_file_path"].replace("vel_","mesh_"),
                        "data_file_path":self.path["mesh_file_path"].replace("vel_","mesh_")}
        else:
            vtk_path = {"mesh_file_path":self.path["mesh_file_path"].replace("vel_","mesh_").replace(".vtk","_no_pressure.vtk"),
                        "data_file_path":self.path["mesh_file_path"].replace("vel_","mesh_").replace(".vtk","_no_pressure.vtk")}

        vtkdata = VtkMesh(vtk_path)
        vtk_pos = vtkdata.mesh_info["node|pos"]
        node_unv,_ = self.compute_node_normals(polydata=vtkdata.tri_polydata)
        dataset["unit_norm_v"] = self.knn_interpolate_wrapper(node_unv, vtk_pos, mesh_pos, k=8)
        """ <<< unit normal vector <<< """
        
        """ <<< surface mesh pos mask <<< """
        # vtk_path = {"mesh_file_path":self.path["mesh_file_path"].replace("vel_","mesh_"),
        #             "data_file_path":self.path["mesh_file_path"].replace("vel_","mesh_")}

        # vtkdata = VtkMesh(vtk_path)
        # vtk_pos = vtkdata.mesh_info["node|pos"]
        # boundary_node_mask = self.create_mesh_mask(vtk_pos, mesh_pos, tolerance=1e-4)
        
        # if boundary_node_mask.sum()!=ply_pos.shape[0]:
        #     raise ValueError("Boundary node mask is not equal to ply_pos, maybe ply number is not equal to vtk number")
        """ <<< surface mesh pos mask <<< """
        
        """ >>> sdf voxel precomputing >>> """
        _, vtk_cells_node, _ = self.extract_vertices_and_faces(vtkdata.tri_polydata)
        bounds = np.loadtxt(
            os.path.join(os.path.dirname(self.path["mesh_file_path"]), "watertight_global_bounds.txt")
        )
        grid, sdf = DS_utils.compute_sdf_grid(
            vtk_pos, vtk_cells_node.reshape(-1, 3), bounds, [64, 64, 64]
        )
        sdf_irrgular = DS_utils.compute_sdf_query_points(vtk_pos, vtk_cells_node.reshape(-1, 3), mesh_pos).reshape(-1,1)
        """ <<< sdf voxel precomputing <<< """
        
        output_dict = {
            "node|pos": dataset["node|pos"],
            "node|velocity": dataset["node|velocity"],
            "node|unit_norm_v":dataset["unit_norm_v"],
            # "node|boundary_node_mask":boundary_node_mask,
            "face|face_node": dataset["face_node"],
            "face|face_center_pos": dataset["face_center_pos"],
            "face|face_length": dataset["face_length"],
            "cell|centroid": dataset["centroid"],
            "cells_node": dataset["cells_node"],
            "cells_index": dataset["cells_index"],
            "sdf_irrgular":sdf_irrgular,
            "voxel|grid": grid,
            "voxel|sdf": sdf,
        }

        # write to vtk file
        # point_data_dict = {
        #     "node|pos": dataset["node|pos"],
        #     "node|velocity": dataset["node|velocity"],
        #     "node|unit_norm_v":dataset["unit_norm_v"],
        #     "node|sdf_irrgular":sdf_irrgular,
        # }
        # file_path = "/lvm_data/litianyu/mycode-new/CIKM_car_race/datasets/conveted_dataset/converttest/vel_sdf_unv.vtu"
        # DS_utils.add_point_data_and_save_vtu(self.unstructureddata, self.convert_to_numpy(point_data_dict.copy()), file_path)
        
        return output_dict

class PlyMesh(Basemanager):
    """
    Tecplot .dat file is only supported with Tobias`s airfoil dataset ,No more data file supported
    """

    def __init__(self, path=None):
        self.path = path
        
        _, polydata = self.read_ply(path["mesh_file_path"])
        
        self.polydata = polydata
        
        mesh_pos, cells_node, _ = self.extract_vertices_and_faces(polydata)
        
        cells_face_node, cells_index = self.elements_to_faces(polydata)

        try:
            # training 读取npy文件
            pressuredata = np.expand_dims(np.load(path["data_file_path"]), axis=1)
            press_mean_std = np.loadtxt(f"{os.path.dirname(path['data_file_path'])}/train_pressure_min_std.txt")
            
        except:
            # test 每月npy文件
            pressuredata = np.zeros((mesh_pos.shape[0], 1))
            press_mean_std = np.array([0 , 1])
            
        # pressuredata = np.expand_dims(np.load(path["data_file_path"]), axis=1)
        # press_mean_std = np.loadtxt(f"{os.path.dirname(path['data_file_path'])}/train_pressure_min_std.txt")
        
        self.mesh_info = {
            "node|pos": mesh_pos,
            "cell|cells_node": cells_node,
            "cells_node": cells_node.reshape(-1, 1),
            "cells_index": cells_index.reshape(-1, 1),
            "cells_face_node": cells_face_node,
            "node|pressure": np.concatenate(
                (pressuredata[0:16], pressuredata[112:]), axis=0
            ),
            "press_mean_std":press_mean_std,
        }

        self.path = path
    
    def read_ply(self, file_path):
        reader = vtk.vtkPLYReader()
        reader.SetFileName(file_path)
        reader.Update()
        polydata = reader.GetOutput()
        return reader, polydata
    
    def extract_mesh(self):
        """
        all input dataset values should be pytorch tensor object
        """
        dataset = self.convert_to_tensors(self.mesh_info)
        cells_node = dataset["cells_node"][:, 0]
        cells_index = dataset["cells_index"][:, 0]

        """>>>compute centroid crds>>>"""
        mesh_pos = dataset["node|pos"]
        centroid = calc_cell_centered_with_node_attr(
            node_attr=dataset["node|pos"],
            cells_node=cells_node,
            cells_index=cells_index,
            reduce="mean",
        )
        dataset["centroid"] = centroid
        """<<<compute centroid crds<<<"""

        """ >>>   compose face  and face_center_pos >>> """
        decomposed_cells = self.make_edges_unique(
            dataset["cells_face_node"],
            cells_node.view(-1, 1),
            cells_index.view(-1, 1),
        )

        face_node = decomposed_cells["edge_index"].T
        dataset["face_node"] = face_node

        face_center_pos = (mesh_pos[face_node[0]] + mesh_pos[face_node[1]]) / 2.0
        dataset["face_center_pos"] = face_center_pos
        """ <<<   compose face   <<< """

        """ >>>   compute face length   >>>"""
        face_length = torch.norm(
            (mesh_pos[face_node[0]] - mesh_pos[face_node[1]]), dim=1, keepdim=True
        )
        dataset["face_length"] = face_length
        """ <<<   compute face length   <<<"""

        """ >>> unit normal vector >>> """
        node_unv,_ = self.compute_node_normals(polydata=self.polydata)

        dataset["unit_norm_v"] = node_unv
        """ <<< unit normal vector <<< """

        # sdf voxel precomputing
        bounds = np.loadtxt(
            os.path.join(os.path.dirname(self.path["mesh_file_path"]), "watertight_global_bounds.txt")
        )
        pos = dataset["node|pos"]
        grid, sdf = DS_utils.compute_sdf_grid(
            pos, dataset["cells_node"].reshape(-1, 3), bounds, [64, 64, 64]
        )

        # ambient occlusion computing
        ao = self.compute_ao(mesh_pos=mesh_pos, cells=dataset["cells_node"].reshape(-1, 3), track="trackA", rays=1000)
        dataset['node|ao'] = ao

        output_dict = {
            "node|pos": dataset["node|pos"],
            "node|pressure": dataset["node|pressure"],
            "node|unit_norm_v": dataset["unit_norm_v"],
            "face|face_node": dataset["face_node"],
            "face|face_center_pos": dataset["face_center_pos"],
            "face|face_length": dataset["face_length"],
            "cell|centroid": dataset["centroid"],
            "cells_node": dataset["cells_node"],
            "cells_index": dataset["cells_index"],
            "voxel|grid": grid,
            "voxel|sdf": sdf,
            'node|ao': dataset['node|ao'] ,
            "press_mean_std":dataset["press_mean_std"],
        }

        # write to vtk file
        # DS_utils.write_to_vtk(self.convert_to_numpy(output_dict.copy()), f"/home/doomduke2/3D-ShapeNet-car/src/Dataset/Converted_datset_for_testing/{data_index}.vtu")

        return output_dict

class ObjMesh(Basemanager):
    
    def __init__(self, path=None):
        self.path = path
        
        _, polydata = self.read_obj(path["mesh_file_path"])
        
        self.polydata = polydata
        
        mesh_pos, cells_node, _ = self.extract_vertices_and_faces(polydata)
        
        if cells_node.shape[0]<100:
            
            print(f"{path['mesh_file_path']} cells_node shape is less than 100")
            
        cells_face_node, cells_index = self.elements_to_faces(polydata)

        try:
            #  training 读取CSV文件并提取目标行的最后一列数据
            print(f"getting {path['file_name'].split('.')[0]} cd data")
            with open(path['data_file_path'], mode='r', encoding='utf-8') as file:
                reader = csv.reader(file)
                for row in reader:
                    if row[1] == (path['file_name'].split('.')[0]):
                        cd_data = float(row[-1])  # 获取最后一列的数据
                        break
        except FileNotFoundError:
            #  test 读取CSV文件并提取目标行的最后一列数据
            cd_data = 0.0
            
        self.mesh_info = {
            "node|pos": mesh_pos,
            "cell|cells_node": cells_node,
            "cells_node": cells_node.reshape(-1, 1),
            "cells_index": cells_index.reshape(-1, 1),
            "cells_face_node": cells_face_node,
            "coff_drag": cd_data,
        }

        self.path = path
    
    def read_obj(self, file_path):
        reader = vtk.vtkOBJReader()
        reader.SetFileName(file_path)
        reader.Update()
        polydata = reader.GetOutput()      
        return reader, polydata
    
    def extract_mesh(self):
        """
        all input dataset values should be pytorch tensor object
        """
        dataset = self.convert_to_tensors(self.mesh_info)
        cells_node = dataset["cells_node"][:, 0]
        cells_index = dataset["cells_index"][:, 0]

        """>>>compute centroid crds>>>"""
        mesh_pos = dataset["node|pos"]
        centroid = calc_cell_centered_with_node_attr(
            node_attr=dataset["node|pos"],
            cells_node=cells_node,
            cells_index=cells_index,
            reduce="mean",
        )
        dataset["centroid"] = centroid
        """<<<compute centroid crds<<<"""

        """ >>>   compose face  and face_center_pos >>> """
        decomposed_cells = self.make_edges_unique(
            dataset["cells_face_node"],
            cells_node.view(-1, 1),
            cells_index.view(-1, 1),
        )

        face_node = decomposed_cells["edge_index"].T
        dataset["face_node"] = face_node

        face_center_pos = (mesh_pos[face_node[0]] + mesh_pos[face_node[1]]) / 2.0
        dataset["face_center_pos"] = face_center_pos
        """ <<<   compose face   <<< """

        """ >>>   compute face length   >>>"""
        face_length = torch.norm(
            (mesh_pos[face_node[0]] - mesh_pos[face_node[1]]), dim=1, keepdim=True
        )
        dataset["face_length"] = face_length
        """ <<<   compute face length   <<<"""

        """ >>> unit normal vector >>> """
        
        node_unv,_ = self.compute_node_normals(polydata=self.polydata)

        dataset["unit_norm_v"] = node_unv
        
        """ <<< unit normal vector <<< """

        # sdf voxel precomputing
        bounds = np.loadtxt(
            os.path.join(os.path.dirname(self.path["mesh_file_path"]), "watertight_global_bounds.txt")
        )
        pos = dataset["node|pos"]
        grid, sdf = DS_utils.compute_sdf_grid(
            pos, dataset["cells_node"].reshape(-1, 3), bounds, [64, 64, 64]
        )

        # ambient occlusion computing
        ao = self.compute_ao(mesh_pos=mesh_pos, cells=dataset["cells_node"].reshape(-1, 3), track="trackA", rays=1000)
        dataset['node|ao'] = ao

        output_dict = {
            "node|pos": dataset["node|pos"],
            "node|unit_norm_v": dataset["unit_norm_v"],
            "face|face_node": dataset["face_node"],
            "face|face_center_pos": dataset["face_center_pos"],
            "face|face_length": dataset["face_length"],
            "cell|centroid": dataset["centroid"],
            "cells_node": dataset["cells_node"],
            "cells_index": dataset["cells_index"],
            "voxel|grid": grid,
            "voxel|sdf": sdf,
            'node|ao': dataset['node|ao'],
            "coff_drag": dataset["coff_drag"],
        }

        # write to vtk file
        # DS_utils.write_to_vtk(self.convert_to_numpy(output_dict.copy()), f"/home/doomduke2/3D-ShapeNet-car/src/Dataset/Converted_datset_for_testing/{data_index}.vtu")

        return output_dict

def random_samples_no_replacement(arr, num_samples, num_iterations):
    if num_samples * num_iterations > len(arr):
        raise ValueError(
            "Number of samples multiplied by iterations cannot be greater than the length of the array."
        )

    samples = []
    arr_copy = arr.copy()

    for _ in range(num_iterations):
        sample_indices = np.random.choice(len(arr_copy), num_samples, replace=False)
        sample = arr_copy[sample_indices]
        samples.append(sample)

        # 从 arr_copy 中移除已选样本
        arr_copy = np.delete(arr_copy, sample_indices)

    return samples, arr_copy


# Define the processing function
def process_file(file_index, file_path, path, queue):
    file_name = os.path.basename(file_path)  # 获取文件名
    path['file_name'] = os.path.basename(file_path)
    subdir = os.path.dirname(file_path)  # 获取文件所在目录
    mesh_name = file_name  # 网格文件名
    path["mesh_file_path"] = file_path
    path["data_file_path"] = file_path

    # start convert func
    if path["mesh_file_path"].endswith("vtk"):

        data = VtkMesh(
            path=path,
        )
        mesh_index = file_index
        h5_data = data.extract_mesh()

    elif path["mesh_file_path"].endswith("ply"):

        # 根据mesh文件名生成对应的data文件名
        mesh_index = "".join(char for char in mesh_name if char.isdigit())
        data_name = f"press_{mesh_index}.npy"

        # 替换目录中的'Feature'为'Label'来构建data文件路径
        data_dir = subdir.replace("Feature", "Label")
        data_file_path = f"{data_dir}/{data_name}"
        path["mesh_file_path"] = file_path
        path["data_file_path"] = data_file_path

        data = PlyMesh(
            path=path,
        )
        h5_data = data.extract_mesh()
        
    elif path["mesh_file_path"].endswith("obj"):

        data_dir = subdir.replace("Feature", "Label")
        path['data_file_path'] =f"{data_dir}/dataset2_train_label.csv" 
        
        data = ObjMesh(
            path=path,
        )
        
        mesh_index = file_index
        
        h5_data = data.extract_mesh()
        
    else:
        raise ValueError(f"wrong mesh file at {path['mesh_file_path']}")

    # Put the results in the queue
    queue.put((h5_data, os.path.basename(file_path)))


def string_to_floats(s):
    """将字符串转换为一组浮点数"""
    return np.asarray([float(ord(c)) for c in s])


def floats_to_string(floats):
    """将一组浮点数转换为字符串"""
    return "".join([chr(int(f)) for f in floats])

def calculate_and_write_h5_statistics(h5_writer):
    """
    计算 H5 文件中 'node|pressure'、'node|velocity' 和 'coff_drag' 三个 key 的均值和标准差，
    并将其写入每个组（字典）。

    参数:
    h5_writer: h5py.File - 已写入数据的 H5 文件句柄
    """

    # Initialize accumulators
    pressure_values = []
    velocity_values = []
    drag_values = []

    # Traverse all groups in the H5 file to collect data
    for file_name in h5_writer.keys():
        group = h5_writer[file_name]

        # Extract relevant data
        if 'node|pressure' in group:
            pressure_values.append(group['node|pressure'][:])

        if 'node|velocity' in group:
            velocity_values.append(group['node|velocity'][:])

        if 'coff_drag' in group:
            drag_values.append(group['coff_drag'][()])

    # Calculate mean and std
    if pressure_values:
        pressure_values = np.vstack(pressure_values)
        pressure_mean = np.mean(pressure_values)
        pressure_std = np.std(pressure_values)
    else:
        pressure_mean = pressure_std = None

    if velocity_values:
        velocity_values = np.vstack(velocity_values)
        velocity_mean = np.mean(velocity_values, axis=0)
        velocity_std = np.std(velocity_values, axis=0)
    else:
        velocity_mean = velocity_std = None

    if drag_values:
        drag_values = np.array(drag_values)
        drag_mean = np.mean(drag_values)
        drag_std = np.std(drag_values)
    else:
        drag_mean = drag_std = None

    # Write the mean and std back to each group in the H5 file
    for file_name in h5_writer.keys():
        group = h5_writer[file_name]

        if pressure_mean is not None:
            group.create_dataset('node|pressure_mean', data=pressure_mean)
            group.create_dataset('node|pressure_std', data=pressure_std)

        if velocity_mean is not None:
            group.create_dataset('node|velocity_mean', data=velocity_mean)
            group.create_dataset('node|velocity_std', data=velocity_std)

        if drag_mean is not None:
            group.create_dataset('coff_drag_mean', data=drag_mean)
            group.create_dataset('coff_drag_std', data=drag_std)

    print("Statistics have been written to each group in the H5 file.")

# Writer process function
def writer_process(queue, split, path):

    os.makedirs(path["h5_save_path"], exist_ok=True)
    h5_writer = h5py.File(f"{path['h5_save_path']}/{split}.h5", "w")

    while True:
        # Get data from queue
        h5_data, file_name = queue.get()

        # Break if None is received (sentinel value)
        if h5_data is None:
            break

        # Write dataset key value
        current_traj = h5_writer.create_group(file_name)
        for key, value in h5_data.items():
            current_traj.create_dataset(key, data=value)

        print("{0}th mesh has been writed".format(file_name))

    calculate_and_write_h5_statistics(h5_writer)
    
    # 关闭所有的writer
    h5_writer.close()


def run_command(tfrecord_file, idx_file):
    subprocess.run(
        ["python", "-m", "tfrecord.tools.tfrecord2idx", tfrecord_file, idx_file],
        check=True,
    )


if __name__ == "__main__":

    main_path = os.path.dirname(__file__)
    for _ in range(2):
        main_path = os.path.dirname(main_path)

    # for debugging
    debug_file_path = None
    # debug_file_path = "/lvm_data/litianyu/mycode-new/CIKM_car_race/datasets/original_datasets/train/train_vel/Feature_File/vel_007.vtk"
    # debug_file_path = f"/lvm_data/litianyu/mycode-new/CIKM_car_race/datasets/original_datasets/train/train_cd_move_to_center/Feature_File/17ac544cdfbf74b999c8924280047dd9.obj"

    params = get_param.params()

    path = {
        "mesh_type": 1,
        "message_passing_num": params.message_passing_num,
        "origin_dataset_path": f"/lvm_data/litianyu/mycode-new/CIKM_car_race/datasets/original_datasets/test",
        "h5_save_path": f"/lvm_data/litianyu/mycode-new/CIKM_car_race/datasets/conveted_dataset/converttest",
        "plot": False,
        "split":"test"
    }

    os.makedirs(path["h5_save_path"], exist_ok=True)

    # stastic total number of data samples
    total_samples = 0
    file_paths = []
    for subdir, _, files in os.walk(path["origin_dataset_path"]):
        for data_name in files:
            if data_name.endswith(".ply") or (
                 data_name.endswith(".vtk")
                ) or ( data_name.endswith(".obj")
                ):
                file_paths.append(os.path.join(subdir, data_name))
                
    # 过滤vtk的mesh：
    file_paths = [
            x for x in file_paths 
            if not (x.split('/')[-1].startswith('mesh') and x.split('/')[-1].endswith('vtk'))
        ]
        
    print(f"Total samples: {len(file_paths)}")

    

    # Determine the number of processes to use
    if debug_file_path is not None:
        multi_process = 1
    elif len(file_paths) < multiprocessing.cpu_count():
        multi_process = len(file_paths)
    else:
        multi_process = int(multiprocessing.cpu_count() / 2)

    # Start to convert data using multiprocessing
    global_data_index = 0
    with multiprocessing.Pool(multi_process) as pool:
        manager = multiprocessing.Manager()
        queue = manager.Queue()

        # Start writer process
        writer_proc = multiprocessing.Process(
            target=writer_process, args=(queue, path['split'], path)
        )
        writer_proc.start()

        if debug_file_path is not None:
            # for debuging
            file_path = debug_file_path
            results = [
                pool.apply_async(
                    process_file,
                    args=(
                        0,
                        file_path,
                        path,
                        queue,
                    ),
                )
            ]
        else:
            # Process files in parallel
            results = [
                pool.apply_async(
                    process_file,
                    args=(
                        file_index,
                        file_path,
                        path,
                        queue,
                    ),
                )
                for file_index, file_path in enumerate(file_paths)
            ]

        # Wait for all processing processes to finish
        for res in results:
            res.get()

        # Send sentinel value to terminate writer process
        queue.put((None, None))
        writer_proc.join()

    print("Fininsh parsing train dataset calc mean and std")
