import os
import sys
import argparse

sys.path.append(
    os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
)
import numpy as np
import multiprocessing
import threading
import torch
import matplotlib

matplotlib.use("Agg")

import h5py
import math
import subprocess
import sys
import utils.DS_utils as DS_utils
from utils.utilities import (
    calc_cell_centered_with_node_attr,
    calc_node_centered_with_cell_attr,
)

# for BSMS mesh corsen
import os
import numpy as np
import h5py
import torch
import shutil

# 将输出缓冲区设置为0
sys.stdout.flush()

# Initialize lock for file writing synchronization
lock = threading.Lock()


class Basemanager:

    def polygon_area(self, vertices):
        """
        使用shoelace formula（鞋带公式）来计算多边形的面积。
        :param vertices: 多边形的顶点坐标，一个二维numpy数组。
        :return: 多边形的面积。
        """
        x = vertices[:, 0]
        y = vertices[:, 1]
        return 0.5 * np.abs(np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1)))

    def triangles_to_faces(self, faces):
        """Computes mesh edges from triangles."""

        cells_face_node = torch.cat(
            (
                faces[:, 0:2],
                faces[:, 1:3],
                torch.stack((faces[:, 2], faces[:, 0]), dim=1),
            ),
            dim=0,
        )

        return cells_face_node.numpy()

    def position_relative_to_line_pytorch(A, B, angle_c):
        # A是点的坐标，表示为(x, y)的元组
        # B是一个数组，shape为[nums, 2]，其中nums为参与判断的点数量，2为xy坐标
        # angle_c是与X轴的夹角，以角度为单位

        # 将输入转换为张量
        A = torch.tensor(A, dtype=torch.float64)
        B = torch.tensor(B, dtype=torch.float64)
        angle_c = torch.tensor(angle_c, dtype=torch.float64)

        # 计算直线的方向向量
        direction_vector = torch.tensor(
            [
                torch.cos(angle_c * math.pi / 180.0),
                torch.sin(angle_c * math.pi / 180.0),
            ],
            dtype=torch.float64,
        )

        # 计算向量AB
        vector_AB = B - A

        # 计算两个向量的叉积，注意这里使用广播
        cross_product = (
            direction_vector[0] * vector_AB[:, 1]
            - direction_vector[1] * vector_AB[:, 0]
        )

        # 判断每个点相对于直线的位置，返回一个mask
        mask = cross_product > 0
        return mask.view(-1, 1)  # 调整shape为[nums, 1]

    def is_convex(self, polygon):
        """
        检查一个多边形是否是凸的。
        :param polygon: 多边形的顶点坐标，一个二维numpy数组。
        :return: 如果是凸的返回True，否则返回False。
        """
        n = len(polygon)
        for i in range(n):
            a = polygon[i]
            b = polygon[(i + 1) % n]
            c = polygon[(i + 2) % n]
            ba = a - b
            bc = c - b
            cross_product = np.cross(ba, bc)
            if cross_product < 0:
                return False
        return True

    def reorder_polygon(self, polygon):
        """
        重新排序多边形的顶点使其成为一个凸多边形。
        :param polygon: 多边形的顶点坐标，一个二维numpy数组。
        :return: 重新排序后的多边形的顶点坐标。
        """
        centroid = np.mean(polygon, axis=0)
        sorted_polygon = sorted(
            polygon, key=lambda p: np.arctan2(p[1] - centroid[1], p[0] - centroid[0])
        )
        return np.array(sorted_polygon)

    def ensure_counterclockwise(self, cells, mesh_pos):
        """
        确保每个单元的顶点是按逆时针顺序排列的，并且是凸的。
        :param cells: 单元的索引数组。
        :param mesh_pos: 顶点的坐标数组。
        :return: 调整后的cells数组。
        """
        for i, cell in enumerate(cells):
            vertices = mesh_pos[cell]
            if not self.is_convex(vertices):
                vertices = self.reorder_polygon(vertices)
                sorted_indices = sorted(
                    range(len(cell)),
                    key=lambda k: list(map(list, vertices)).index(
                        list(mesh_pos[cell][k])
                    ),
                )
                cells[i] = np.array(cell)[sorted_indices]
        return cell

    def is_equal(self, x, pivot):
        """
        Determine if a value x is between two other values a and b.

        Parameters:
        - a (float or int): The lower bound.
        - b (float or int): The upper bound.
        - x (float or int): The value to check.

        Returns:
        - (bool): True if x is between a and b (inclusive), False otherwise.
        """
        a = abs(pivot) - float(1e-8)
        b = abs(pivot) + float(1e-8)
        # Check if x is between a and b, inclusive
        if a <= abs(x) <= b:
            return True
        else:
            return False

    def make_edges_unique(self, cells_face_node, cells_node, cells_index):
        """Computes mesh edges from triangles."""
        # collect edges from triangles
        cells_face_node_biased = torch.sort(cells_face_node, dim=1)[0]
        senders, receivers = cells_face_node_biased[:, 0], cells_face_node_biased[:, 1]
        packed_edges = torch.stack((senders, receivers), dim=1)
        singleway_edge_index = torch.unique(
            packed_edges, return_inverse=False, return_counts=False, dim=0
        ).to(torch.int64)

        cells_face = []
        edge_indice = torch.arange(singleway_edge_index.shape[0])

        for i_edge in range(cells_face_node.shape[0]):

            current_edge = cells_face_node[i_edge : i_edge + 1, :].sort(dim=-1)[0]
            mask = (singleway_edge_index == current_edge).all(dim=-1)
            cells_face.append(edge_indice[mask])

        cells_face = torch.cat(cells_face).view(-1, 1)

        if cells_face.shape[0] != cells_face_node.shape[0]:
            raise ValueError("cells_face shape is not equal to cells_face_node shape")

        return {
            "edge_index": singleway_edge_index,
            "cells_face": cells_face,
            "cells_face_node_unbiased": cells_face_node,
            "cells_face_node_biased": packed_edges,
        }

    def create_neighbor_matrix(self, vertex_coords, edges):
        """
        Create a matrix representing the neighbors for each vertex in a graph.

        Parameters:
        vertex_coords (Tensor): A tensor of shape [n, 2] representing n vertex coordinates.
        edges (Tensor): A tensor of shape [m, 2] representing m edges, where each edge is a pair of vertex indices.

        Returns:
        Tensor: A matrix where each row corresponds to a vertex and contains the indices of its neighbors.
        """
        # Adjust edges to ensure all indices are within the range of vertex_coords' first dimension
        edges_mod = edges % vertex_coords.shape[0]

        # Create a tensor to hold the counts of neighbors for each vertex
        counts = torch.zeros(vertex_coords.shape[0], dtype=torch.int64)

        # Count the occurrence of each index in edges_mod to determine the number of neighbors
        counts.scatter_add_(0, edges_mod.view(-1), torch.ones_like(edges_mod.view(-1)))

        # Find the maximum number of neighbors to define the second dimension of the neighbor matrix
        max_neighbors = counts.max()

        # Create a tensor to hold the neighbors, initialized with -1 (indicating no neighbor)
        neighbor_matrix = torch.full(
            (vertex_coords.shape[0], max_neighbors), -1, dtype=torch.int64
        )

        # Create an array to keep track of the current count of neighbors for each vertex
        current_count = torch.zeros(vertex_coords.shape[0], dtype=torch.int64)

        # Iterate through each edge and populate the neighbor matrix
        for edge in edges_mod:
            # Unpack the edge
            start, end = edge
            # Place the end vertex in the next available spot for the start vertex in neighbor_matrix
            neighbor_matrix[start, current_count[start]] = end
            # Increment the count for the start vertex
            current_count[start] += 1
            # Do the same for the end vertex, assuming undirected edges
            neighbor_matrix[end, current_count[end]] = start
            current_count[end] += 1

        return neighbor_matrix, max_neighbors

    def generate_directed_edges(self, cells_node):
        # 生成给定单元的所有可能边组合，但只保留一个方向的边
        edges = []
        for i in range(len(cells_node)):
            for j in range(i + 1, len(cells_node)):
                edge = [cells_node[i], cells_node[j]]
                reversed_edge = [cells_node[j], cells_node[i]]

                # 只添加一个方向的边
                if reversed_edge not in edges:
                    edges.append(edge)
        return edges

    def compose_edge_index_x(
        self, face_node, cells_face_node_biased, cells_node, cells_index
    ):
        face_node_x = face_node.clone()

        for i in range(cells_index.max() + 1):
            mask_cell = (cells_index == i).view(-1)
            current_cells_face_node_biased = cells_face_node_biased[mask_cell]
            current_cells_node = cells_node[mask_cell]
            all_possible_edges, _ = torch.tensor(
                self.generate_directed_edges(current_cells_node)
            ).sort(dim=-1)

            for edge in all_possible_edges:
                edge = edge.unsqueeze(0)
                if (edge.unsqueeze(0) == current_cells_face_node_biased).all(
                    dim=-1
                ).sum() < 1:
                    face_node_x = torch.cat((face_node_x, edge), dim=0)

        return face_node_x

    def convert_to_tensors(self, input_dict):
        # 如果输入是字典
        if isinstance(input_dict, dict):
            # 遍历字典中的所有键
            for key in input_dict.keys():
                # 检查值的类型
                value = input_dict[key]
                if isinstance(value, np.ndarray):
                    # 如果值是一个Numpy数组，使用torch.from_numpy进行转换
                    input_dict[key] = torch.from_numpy(value)
                elif not isinstance(value, torch.Tensor):
                    # 如果值不是一个PyTorch张量，使用torch.tensor进行转换
                    input_dict[key] = torch.tensor(value)
                # 如果值已经是一个PyTorch张量，不进行任何操作

        # 如果输入是列表
        elif isinstance(input_dict, list):
            for i in range(len(input_dict)):
                value = input_dict[i]
                if isinstance(value, np.ndarray):
                    # 如果值是一个Numpy数组，使用torch.from_numpy进行转换
                    input_dict[i] = torch.from_numpy(value)
                elif not isinstance(value, torch.Tensor):
                    # 如果值不是一个PyTorch张量，使用torch.tensor进行转换
                    input_dict[i] = torch.tensor(value)
                # 如果值已经是一个PyTorch张量，不进行任何操作

        return input_dict

    def convert_to_numpy(self, input_dict):
        # 如果输入是字典
        if isinstance(input_dict, dict):
            # 遍历字典中的所有键
            for key in input_dict.keys():
                # 检查值的类型
                value = input_dict[key]
                if isinstance(value, torch.Tensor):
                    # 如果值是一个Numpy数组，使用torch.from_numpy进行转换
                    input_dict[key] = value.numpy()
                elif not isinstance(value, torch.Tensor):
                    # 如果值不是一个PyTorch张量，使用torch.tensor进行转换
                    input_dict[key] = torch.tensor(value).numpy()
                # 如果值已经是一个PyTorch张量，不进行任何操作

        # 如果输入是列表
        elif isinstance(input_dict, list):
            for i in range(len(input)):
                value = input_dict[i]
                if isinstance(value, torch.Tensor):
                    # 如果值是一个Numpy数组，使用torch.from_numpy进行转换
                    input_dict[i] = value.numpy()
                elif not isinstance(value, torch.Tensor):
                    # 如果值不是一个PyTorch张量，使用torch.tensor进行转换
                    input_dict[i] = torch.tensor(value).numpy()
                # 如果值已经是一个PyTorch张量，不进行任何操作
        return input_dict

    # 计算法向量
    def compute_unit_normals(
        self,
        mesh_pos: torch.Tensor,
        cells_node: torch.Tensor,
        centroid: torch.Tensor = None,
    ):
        cells_node = cells_node.reshape(-1, 3)

        # 获取顶点的坐标
        A = mesh_pos[cells_node[:, 0]]
        B = mesh_pos[cells_node[:, 1]]
        C = mesh_pos[cells_node[:, 2]]

        # 计算边向量
        AB = B - A
        AC = C - A

        # 计算法向量（叉积）
        N = torch.cross(AB, AC, dim=-1)

        # 归一化法向量
        norm = torch.norm(N, dim=-1, keepdim=True)
        unit_N = N / norm

        # make sure the normal vector is outward
        geo_center = torch.mean(centroid, dim=0, keepdim=True)
        outward = centroid - geo_center
        mask_outward = ((unit_N * outward).sum(dim=-1, keepdim=True)) > 0
        unit_N = torch.where(mask_outward.repeat(1, 3), unit_N, -unit_N)

        return unit_N


class PlyMesh(Basemanager):
    """
    Tecplot .dat file is only supported with Tobias`s airfoil dataset ,No more data file supported
    """

    def __init__(self, path=None):
        mesh_pos, cells_node = DS_utils.load_mesh_ply_vtk(path["mesh_file_path"])

        self.mesh_pos = mesh_pos
        self.cells_node = cells_node

        cells_face_node = self.triangles_to_faces(torch.from_numpy(cells_node))
        cells_index = (
            torch.arange(cells_node.shape[0]).view(-1, 1).repeat(1, 3)
        ).numpy()  # all elements are triangles

        try:
            pressuredata = np.expand_dims(np.load(path["data_file_path"]), axis=1)
        except:
            pressuredata = np.zeros((mesh_pos.shape[0], 1), dtype=np.float32)

        if mesh_pos.shape[0] < 10000:
            self.mesh_info = {
                "node|pos": mesh_pos,
                "cell|cells_node": cells_node,
                "cells_node": cells_node.reshape(-1, 1),
                "cells_index": cells_index.reshape(-1, 1),
                "cells_face_node": cells_face_node,
                "node|pressure": np.concatenate(
                    (pressuredata[0:16], pressuredata[112:]), axis=0
                ),
            }

        else:
            self.mesh_info = {
                "node|pos": mesh_pos,
                "cell|cells_node": cells_node,
                "cells_node": cells_node.reshape(-1, 1),
                "cells_index": cells_index.reshape(-1, 1),
                "cells_face_node": cells_face_node,
                "cell|pressure": pressuredata,
            }

        self.path = path

    def extract_mesh_A(self, data_index=None):
        """
        all input dataset values should be pytorch tensor object
        """
        dataset = self.convert_to_tensors(self.mesh_info)
        cells_node = dataset["cells_node"][:, 0]
        cells_index = dataset["cells_index"][:, 0]

        """>>>compute centroid crds>>>"""
        mesh_pos = dataset["node|pos"]
        centroid = calc_cell_centered_with_node_attr(
            node_attr=dataset["node|pos"],
            cells_node=cells_node,
            cells_index=cells_index,
            reduce="mean",
        )
        dataset["centroid"] = centroid
        """<<<compute centroid crds<<<"""

        """ >>>   compose face  and face_center_pos >>> """
        decomposed_cells = self.make_edges_unique(
            dataset["cells_face_node"],
            cells_node.view(-1, 1),
            cells_index.view(-1, 1),
        )
        cells_face_node = decomposed_cells["cells_face_node_biased"]

        cells_face = decomposed_cells["cells_face"]
        dataset["cells_face"] = cells_face

        face_node = decomposed_cells["edge_index"].T
        dataset["face_node"] = face_node

        face_center_pos = (mesh_pos[face_node[0]] + mesh_pos[face_node[1]]) / 2.0
        dataset["face_center_pos"] = face_center_pos
        """ <<<   compose face   <<< """

        """ >>>   compute face length   >>>"""
        face_length = torch.norm(
            (mesh_pos[face_node[0]] - mesh_pos[face_node[1]]), dim=1, keepdim=True
        )
        dataset["face_length"] = face_length
        """ <<<   compute face length   <<<"""

        """ >>> compute cells_face and neighbor_cell >>> """
        senders_cell = calc_node_centered_with_cell_attr(
            cell_attr=cells_index.view(-1),
            cells_node=cells_face.view(-1),
            cells_index=cells_index.view(-1),
            reduce="max",
            map=False,
        )

        recivers_cell = calc_node_centered_with_cell_attr(
            cell_attr=cells_index.view(-1),
            cells_node=cells_face.view(-1),
            cells_index=cells_index.view(-1),
            reduce="min",
            map=False,
        )
        neighbour_cell = torch.stack((recivers_cell, senders_cell), dim=0)
        dataset["neighbour_cell"] = neighbour_cell.to(torch.int64)
        """ <<< compute cells_face and neighbor_cell <<< """

        """ >>> compute cell_area >>> """
        # valid
        cells_node_reshape = cells_node.reshape(-1, 3)
        cells_face_node = torch.stack(
            (
                cells_node_reshape[:, 0:2],
                cells_node_reshape[:, 1:3],
                torch.stack(
                    (cells_node_reshape[:, 2], cells_node_reshape[:, 0]), dim=1
                ),
            ),
            dim=1,
        )
        cells_length = torch.norm(
            mesh_pos[cells_face_node[:, :, 0]] - mesh_pos[cells_face_node[:, :, 1]],
            dim=-1,
            keepdim=True,
        )
        circum = cells_length.sum(dim=1, keepdim=True) * 0.5
        mul = (
            circum[:, 0]
            * ((circum - cells_length)[:, 0])
            * ((circum - cells_length)[:, 1])
            * ((circum - cells_length)[:, 2])
        )
        valid_cells_area = torch.sqrt(mul)

        dataset["cells_area"] = valid_cells_area
        """ <<< compute cell_area <<< """

        """ >>> unit normal vector >>> """
        unv = self.compute_unit_normals(mesh_pos, cells_node, centroid=centroid)
        node_unv = calc_node_centered_with_cell_attr(
            cell_attr=unv[cells_index],
            cells_node=cells_node.view(-1, 1),
            cells_index=cells_index.view(-1, 1),
            reduce="mean",
            map=False,
        )

        dataset["unit_norm_v"] = node_unv
        """ <<< unit normal vector <<< """

        # sdf voxel precomputing
        bounds = np.loadtxt(
            os.path.join(self.path["aux_dir"], "watertight_global_bounds.txt")
        )
        pos = dataset["node|pos"]
        grid, sdf = DS_utils.compute_sdf_grid(
            pos, dataset["cells_node"].reshape(-1, 3), bounds, [64, 64, 64]
        )

        # ambient occlusion computing
        ply_file = self.path["mesh_file_path"]
        ao = DS_utils.compute_ao(ply_file)
        dataset["node|ao"] = ao

        output_dict = {
            "node|pos": dataset["node|pos"],
            "node|pressure": dataset["node|pressure"],
            "node|unit_norm_v": dataset["unit_norm_v"],
            "node|ao": dataset["node|ao"],
            "face|face_node": dataset["face_node"],
            "face|face_center_pos": dataset["face_center_pos"],
            "face|face_length": dataset["face_length"],
            "face|neighbour_cell": dataset["neighbour_cell"],
            "cell|cells_area": dataset["cells_area"],
            "cell|centroid": dataset["centroid"],
            "cells_node": dataset["cells_node"],
            "cells_index": dataset["cells_index"],
            "cells_face": dataset["cells_face"],
            "voxel|grid": grid,
            "voxel|sdf": sdf[:,None],
        }

        # write to vtk file
        # DS_utils.write_to_vtk(self.convert_to_numpy(output_dict.copy()), f"/home/doomduke2/3D-ShapeNet-car/src/Dataset/Converted_datset_for_testing/{data_index}.vtu")
        # write to vtk file

        h5_dataset = output_dict

        print("{0}th mesh has been extracted".format(data_index))

        return h5_dataset

    def extract_mesh_B(self, data_index=None):
        """
        all input dataset values should be pytorch tensor object
        """
        dataset = self.convert_to_tensors(self.mesh_info)
        cells_node = dataset["cells_node"][:, 0]
        cells_index = dataset["cells_index"][:, 0]

        """>>>compute centroid crds>>>"""
        mesh_pos = dataset["node|pos"]
        centroid = calc_cell_centered_with_node_attr(
            node_attr=dataset["node|pos"],
            cells_node=cells_node,
            cells_index=cells_index,
            reduce="mean",
        )
        dataset["centroid"] = centroid
        """<<<compute centroid crds<<<"""

        """ >>>   compose face  and face_center_pos >>> """
        decomposed_cells = self.make_edges_unique(
            dataset["cells_face_node"],
            cells_node.view(-1, 1),
            cells_index.view(-1, 1),
        )
        cells_face_node = decomposed_cells["cells_face_node_biased"]

        cells_face = decomposed_cells["cells_face"]
        dataset["cells_face"] = cells_face

        face_node = decomposed_cells["edge_index"].T
        dataset["face_node"] = face_node

        face_center_pos = (mesh_pos[face_node[0]] + mesh_pos[face_node[1]]) / 2.0
        dataset["face_center_pos"] = face_center_pos
        """ <<<   compose face   <<< """

        """ >>>   compute face length   >>>"""
        face_length = torch.norm(
            (mesh_pos[face_node[0]] - mesh_pos[face_node[1]]), dim=1, keepdim=True
        )
        dataset["face_length"] = face_length
        """ <<<   compute face length   <<<"""

        """ >>> compute cells_face and neighbor_cell >>> """
        senders_cell = calc_node_centered_with_cell_attr(
            cell_attr=cells_index.view(-1),
            cells_node=cells_face.view(-1),
            cells_index=cells_index.view(-1),
            reduce="max",
            map=False,
        )

        recivers_cell = calc_node_centered_with_cell_attr(
            cell_attr=cells_index.view(-1),
            cells_node=cells_face.view(-1),
            cells_index=cells_index.view(-1),
            reduce="min",
            map=False,
        )
        neighbour_cell = torch.stack((recivers_cell, senders_cell), dim=0)
        dataset["neighbour_cell"] = neighbour_cell.to(torch.int64)
        """ <<< compute cells_face and neighbor_cell <<< """

        """ >>> compute cell_area >>> """
        # valid
        cells_node_reshape = cells_node.reshape(-1, 3)
        cells_face_node = torch.stack(
            (
                cells_node_reshape[:, 0:2],
                cells_node_reshape[:, 1:3],
                torch.stack(
                    (cells_node_reshape[:, 2], cells_node_reshape[:, 0]), dim=1
                ),
            ),
            dim=1,
        )
        cells_length = torch.norm(
            mesh_pos[cells_face_node[:, :, 0]] - mesh_pos[cells_face_node[:, :, 1]],
            dim=-1,
            keepdim=True,
        )
        circum = cells_length.sum(dim=1, keepdim=True) * 0.5
        mul = (
            circum[:, 0]
            * ((circum - cells_length)[:, 0])
            * ((circum - cells_length)[:, 1])
            * ((circum - cells_length)[:, 2])
        )
        valid_cells_area = torch.sqrt(mul)

        dataset["cells_area"] = valid_cells_area
        """ <<< compute cell_area <<< """

        """ >>> unit normal vector >>> """
        unv = self.compute_unit_normals(mesh_pos, cells_node, centroid=centroid)
        node_unv = calc_node_centered_with_cell_attr(
            cell_attr=unv[cells_index],
            cells_node=cells_node.view(-1, 1),
            cells_index=cells_index.view(-1, 1),
            reduce="mean",
            map=False,
        )

        dataset["unit_norm_v"] = node_unv
        """ <<< unit normal vector <<< """

        # sdf voxel precomputing
        bounds = np.loadtxt(
            os.path.join(path["aux_dir"], "watertight_global_bounds.txt")
        )
        pos = dataset["node|pos"]
        grid, sdf = DS_utils.compute_sdf_grid(
            pos, dataset["cells_node"].reshape(-1, 3), bounds, [64, 64, 64]
        )

        # ambient occlusion computing
        ply_file = self.path["mesh_file_path"]
        ao = DS_utils.compute_ao(ply_file)
        dataset["node|ao"] = ao

        output_dict = {
            "node|pos": dataset["node|pos"],
            "node|pressure": dataset["node|pressure"],
            "node|unit_norm_v": dataset["unit_norm_v"],
            "node|ao": dataset["node|ao"],
            "face|face_node": dataset["face_node"],
            "face|face_center_pos": dataset["face_center_pos"],
            "face|face_length": dataset["face_length"],
            "face|neighbour_cell": dataset["neighbour_cell"],
            "cell|cells_area": dataset["cells_area"],
            "cell|centroid": dataset["centroid"],
            "cells_node": dataset["cells_node"],
            "cells_index": dataset["cells_index"],
            "cells_face": dataset["cells_face"],
            "voxel|grid": grid,
            "voxel|sdf": sdf,
        }

        # write to vtk file
        # DS_utils.write_to_vtk(self.convert_to_numpy(output_dict.copy()), f"/home/doomduke2/3D-ShapeNet-car/src/Dataset/Converted_datset_for_testing/{data_index}.vtu")
        # write to vtk file

        h5_dataset = output_dict

        print("{0}th mesh has been extracted".format(data_index))

        return h5_dataset


def random_samples_no_replacement(arr, num_samples, num_iterations):
    if num_samples * num_iterations > len(arr):
        raise ValueError(
            "Number of samples multiplied by iterations cannot be greater than the length of the array."
        )

    samples = []
    arr_copy = arr.copy()

    for _ in range(num_iterations):
        sample_indices = np.random.choice(len(arr_copy), num_samples, replace=False)
        sample = arr_copy[sample_indices]
        samples.append(sample)

        # 从 arr_copy 中移除已选样本
        arr_copy = np.delete(arr_copy, sample_indices)

    return samples, arr_copy


# Define the processing function
def process_file(file_index, file_path, path, queue):

    file_name = os.path.basename(file_path)  # 获取文件名
    mesh_name = file_name  # 网格文件名
    path["mesh_file_path"] = file_path

    if path["mesh_file_path"].endswith("ply"):

        # 根据mesh文件名生成对应的data文件名
        mesh_index = int("".join(char for char in mesh_name if char.isdigit()))
        data_name = f"press_{''.join(char for char in mesh_name if char.isdigit())}.npy"

        data_file_path = f"{path['label_dir']}/{data_name}"
        path["mesh_file_path"] = file_path
        path["data_file_path"] = data_file_path

        data = PlyMesh(
            path=path,
        )

        if data.mesh_pos.shape[0] < 10000:
            h5_data = data.extract_mesh_A(data_index=mesh_index)
        else:
            h5_data = data.extract_mesh_B(data_index=mesh_index)

    else:
        raise ValueError(f"wrong mesh file at {path['mesh_file_path']}")

    # Put the results in the queue
    queue.put((h5_data, mesh_index))


def string_to_floats(s):
    """将字符串转换为一组浮点数"""
    return np.asarray([float(ord(c)) for c in s])


def floats_to_string(floats):
    """将一组浮点数转换为字符串"""
    return "".join([chr(int(f)) for f in floats])


# Writer process function
def writer_process(queue, split, path):

    os.makedirs(path["h5_save_path"], exist_ok=True)
    h5_writer = h5py.File(f"{path['h5_save_path']}/{split}.h5", "w")
    sdf_list = []
    while True:
        # Get data from queue
        h5_data, file_index = queue.get()

        # Break if None is received (sentinel value)
        if h5_data is None:
            break

        # Write dataset key value
        if str(file_index) in h5_writer:
            continue

        current_traj = h5_writer.create_group(str(file_index))
        for key, value in h5_data.items():
            current_traj.create_dataset(key, data=value)
            if key == "voxel|sdf" and (not (h5_data["node|pressure"] == 0).all()):
                sdf_list.append(torch.from_numpy(value))

        print("{0}th mesh has been writed".format(file_index))

    # parsing train dataset not test dataset
    if split == "train":
        voxel_mean, voxel_std = DS_utils.compute_mean_std(sdf_list)

        np.savetxt(
            f"{path['h5_save_path']}/voxel_mean_std.txt",
            np.array([voxel_mean.item(), voxel_std.item()]),
        )
        if path["track"] == "A":
            shutil.copy(
                f"{path['aux_dir']}/train_pressure_min_std.txt",
                f"{path['h5_save_path']}/train_pressure_min_std.txt",
            )
            shutil.copy(
                f"{path['aux_dir']}/watertight_global_bounds.txt",
                f"{path['h5_save_path']}/watertight_global_bounds.txt",
            )
        else:
            shutil.copy(
                f"{path['aux_dir']}/train_pressure_mean_std.txt",
                f"{path['h5_save_path']}/train_pressure_mean_std.txt",
            )
            shutil.copy(
                f"{path['aux_dir']}/global_bounds.txt",
                f"{path['h5_save_path']}/global_bounds.txt",
            )

    # 关闭所有的writer
    h5_writer.close()


def run_command(tfrecord_file, idx_file):
    subprocess.run(
        ["python", "-m", "tfrecord.tools.tfrecord2idx", tfrecord_file, idx_file],
        check=True,
    )


if __name__ == "__main__":

    parser = argparse.ArgumentParser(
        description="train / test a pytorch model to predict frames"
    )

    # Training parameters
    parser.add_argument(
        "--msh_dir",
        default="/lvm_data/litianyu/mycode-new/3D-ShapeNet-car/src/Dataset/rawDataset/trackA/Train/Feature",
        type=str,
        help="",
    )
    parser.add_argument(
        "--label_dir",
        default="/lvm_data/litianyu/mycode-new/3D-ShapeNet-car/src/Dataset/rawDataset/trackA/Train/Label",
        type=str,
        help="",
    )
    parser.add_argument(
        "--aux_dir",
        default="/lvm_data/litianyu/mycode-new/3D-ShapeNet-car/src/Dataset/rawDataset/trackA/Train",
        type=str,
        help="",
    )
    parser.add_argument(
        "--h5_save_path",
        default="/lvm_data/litianyu/mycode-new/3D-ShapeNet-car/src/Dataset/converted_dataset_test/trackA",
        type=str,
        help="",
    )
    parser.add_argument(
        "--split",
        default="test",
        type=str,
        help="",
    )
    parser.add_argument(
        "--track",
        default="A",
        type=str,
        help="",
    )
    params = parser.parse_args()

    # for debugging
    debug_file_path = None
    # debug_file_path = f"/lvm_data/litianyu/mycode-new/3D-ShapeNet-car/src/Dataset/rawDataset/trackA/Test/Inference/mesh_658.ply"

    path = {
        "msh_dir": params.msh_dir,
        "label_dir": params.label_dir,
        "aux_dir": params.aux_dir,
        "h5_save_path": params.h5_save_path,
        "split": params.split,
        "track": params.track,
        "plot": False,
    }

    os.makedirs(path["h5_save_path"], exist_ok=True)

    # stastic total number of data samples
    total_samples = 0
    file_paths_list = []
    for subdir, _, files in os.walk(path["msh_dir"]):
        for data_name in files:
            if data_name.endswith(".ply"):
                file_paths_list.append(os.path.join(subdir, data_name))

    # Shuffle file paths
    np.random.shuffle(file_paths_list)
    print(f"Total samples: {len(file_paths_list)}")

    # Determine the number of processes to use
    if debug_file_path is not None:
        multi_process = 1
    elif len(file_paths_list) < multiprocessing.cpu_count():
        multi_process = len(file_paths_list)
    else:
        multi_process = int(multiprocessing.cpu_count() / 2)

    # Start to convert data using multiprocessing
    global_data_index = 0
    with multiprocessing.Pool(multi_process) as pool:
        manager = multiprocessing.Manager()
        queue = manager.Queue()

        # Start writer process
        writer_proc = multiprocessing.Process(
            target=writer_process, args=(queue, params.split, path)
        )
        writer_proc.start()

        if debug_file_path is not None:
            # for debuging
            file_path = debug_file_path
            results = [
                pool.apply_async(
                    process_file,
                    args=(
                        0,
                        file_path,
                        path,
                        queue,
                    ),
                )
            ]
        else:
            # Process files in parallel
            results = [
                pool.apply_async(
                    process_file,
                    args=(
                        file_index,
                        file_path,
                        path,
                        queue,
                    ),
                )
                for file_index, file_path in enumerate(file_paths_list)
            ]

        # Wait for all processing processes to finish
        for res in results:
            res.get()

        # Send sentinel value to terminate writer process
        queue.put((None, None))
        writer_proc.join()

    print("Fininsh parsing train dataset calc mean and std")
