"""Utility functions for reading the datasets."""

import sys
import os

from torch.utils.data import IterableDataset, Dataset
from torch_geometric.data import InMemoryDataset
from torch_geometric.data import Data

import torch_geometric.nn as nng
from torch_geometric.loader import NeighborSampler as PyG_NeighborSampler
from torch_geometric.utils import k_hop_subgraph

import matplotlib

matplotlib.use("Agg")
import matplotlib.pyplot as plt
import torch
import numpy as np
from torch_geometric.loader import DataLoader as torch_geometric_DataLoader
import h5py
import re


def sort_key_list(in_list: list, every_set_valid_num=10, train_cd=False):
    # 定义三个列表来存储不同类型的文件
    mesh_files = []
    vel_files = []
    other_files = []

    # 正则表达式模式
    mesh_pattern = re.compile(r"mesh_(\d+)\.ply")
    vel_pattern = re.compile(r"vel_(\d+)\.vtk")

    # 将文件名分类到相应的列表
    for file in in_list:
        if mesh_pattern.match(file):
            mesh_files.append(file)
        elif vel_pattern.match(file):
            vel_files.append(file)
        else:
            other_files.append(file)

    # 自定义排序函数，按照文件名中的数字排序
    def extract_number(file_name, pattern):
        match = pattern.match(file_name)
        return int(match.group(1)) if match else float("inf")

    # 对 mesh_files 和 vel_files 进行排序
    mesh_files.sort(key=lambda x: extract_number(x, mesh_pattern))
    vel_files.sort(key=lambda x: extract_number(x, vel_pattern))

    if train_cd:
        trainset_list = other_files[: (len(other_files) - every_set_valid_num)]
        
    else:
        trainset_list = (
            mesh_files[: (len(mesh_files) - every_set_valid_num)]
            + vel_files[: (len(vel_files) - every_set_valid_num)]
            + other_files[: (len(other_files) - every_set_valid_num)]
        )

    return trainset_list


class Data_Pool:
    def __init__(
        self,
        params=None,
        is_training=True,
        dataset_dir=None,
        every_set_valid_num=0,
        train_cd=False,
        device=None,
    ):
        self.params = params
        self.is_training = is_training
        self.device = device
        self.epoch = 0
        self.load_mesh_to_cpu(
            dataset_dir=dataset_dir,
            every_set_valid_num=every_set_valid_num,
            train_cd=train_cd,
        )

    def load_mesh_to_cpu(self, dataset_dir, every_set_valid_num=10,train_cd=False):

        self.pool = h5py.File(dataset_dir, "r")

        self.key_list= sort_key_list(
            list(self.pool.keys()), every_set_valid_num=every_set_valid_num,train_cd=train_cd
        )

        return dataset_dir

    @staticmethod
    def datapreprocessing(graph_node, is_training=False):

        # graph_node.x = torch.cat((graph_node.x, graph_node.pos, graph_node.ao), dim=-1)
        def randbool(*size, device="cuda"):
            """Returns 50% channce of True of False"""
            return torch.randint(2, size, device=device) == torch.randint(
                2, size, device=device
            )

        # permute edge direction
        senders, receivers = graph_node.edge_index
        if is_training:
            random_mask = randbool(1, senders.shape[0], device=senders.device).repeat(
                2, 1
            )
            random_direction_edge = torch.where(
                random_mask,
                torch.stack((senders, receivers), dim=0),
                torch.stack((receivers, senders), dim=0),
            )

        else:
            random_direction_edge = torch.stack((senders, receivers), dim=0)

        releative_node_attr = (
            graph_node.x[random_direction_edge[0]]
            - graph_node.x[random_direction_edge[1]]
        )
        releative_node_pos = (
            graph_node.pos[random_direction_edge[0]]
            - graph_node.pos[random_direction_edge[1]]
        )
        releative_node_attr = torch.cat(
            (releative_node_attr, releative_node_pos), dim=-1
        )
        graph_node.edge_index = random_direction_edge
        graph_node.edge_attr = releative_node_attr

        return graph_node


class CustomGraphData(Data):
    def __init__(self, **kwargs):
        super().__init__(**kwargs)

    def __inc__(self, key, value, *args, **kwargs):
        offset_rules = {
            "edge_index": self.num_nodes,
            "face": self.num_nodes,
            "cells_node": self.num_nodes,
            "face_node": self.num_nodes,
            "cells_face": self.num_nodes,
            "neighbour_cell": self.num_nodes,
            "face_node_x": self.num_nodes,
            "graph_block_id": self.graph_block_id.max().item() + 1,
            "grid_block_id": 0,
            "pos": 0,
            "cell_area": 0,
            "node_type": 0,
            "graph_index": 0,
            "mask_vel": 0,
            "mask_press": 0,
            "mask_cd":0,
            "mask_cd": 0,
            "dt_graph": 0,
            "x": 0,
            "y": 0,
            "cd_data": 0,
            "case_global_index": 0,
        }
        return offset_rules.get(key, super().__inc__(key, value, *args, **kwargs))

    def __cat_dim__(self, key, value, *args, **kwargs):
        cat_dim_rules = {
            "x": 0,
            "pos": 0,
            "y": 0,
            "norm_y": 0,
            "query": 0,  # 保持query为列表，不进行拼接
            "edge_index": 1,  # edge_index保持默认的offset拼接
            "voxel": 0,
            "graph_index": 0,
        }
        return cat_dim_rules.get(key, super().__cat_dim__(key, value, *args, **kwargs))


class GraphNodeDataset(InMemoryDataset):
    def __init__(
        self,
        base_dataset,
        key_list=None,
        subsampling=False,
    ):
        super().__init__()
        self.base_dataset = base_dataset
        self.key_list = key_list.copy()
        self.subsampling = subsampling

    @property
    def pool(self):
        # 这里你可以根据需要从基类的pool中筛选出GraphNode的数据
        return self.base_dataset.pool

    @property
    def params(self):
        return self.base_dataset.params

    def len(self):
        return len(self.key_list)

    def assign_blocks(self, points, axis=0, num_blocks=8):
        """
        将点云沿指定的轴分块，并返回标记张量.

        参数:
            points (torch.Tensor): 点云的坐标张量，形状为 [N, 3].
            axis (int): 指定沿哪个轴进行分块（0 为 x 轴，1 为 y 轴，2 为 z 轴）.
            num_blocks (int): 沿指定轴划分的块数，默认是 8.

        返回:
            torch.Tensor: 标记张量，形状为 [N, 1]，其中每个值表示点所属的块的索引.
        """
        # 获取指定轴上的最大值和最小值
        min_value = torch.min(points[:, axis])
        max_value = torch.max(points[:, axis])

        # 计算每个块的宽度
        block_width = (max_value - min_value) / num_blocks

        # 计算每个点所属的块索引
        block_indices = torch.floor((points[:, axis] - min_value) / block_width).long()

        # 确保索引在0到num_blocks-1之间
        block_indices = torch.clamp(block_indices, 0, num_blocks - 1)

        # 将结果转换为[N, 1]的张量
        block_indices = block_indices.view(-1, 1)

        return block_indices

    def load_ply_data(self, idx, key, minibatch_data):
        """<KeysViewHDF5 ['cells_face', 'cells_index',
        'cells_node', 'cell|cells_area', 'cell|centroid',
        'face|face_center_pos', 'face|face_length',
        'face|face_node', 'face|neighbour_cell',
        'node|pos', 'node|pressure',
        'node|unit_norm_v', 'voxel|grid',
        'voxel|sdf']>"""

        # --> graph attr -->
        mesh_pos = torch.from_numpy(minibatch_data["node|pos"][:]).to(torch.float32)
        unit_norm_v = torch.from_numpy(minibatch_data["node|unit_norm_v"][:]).to(
            torch.float32
        )
        edge_index = torch.from_numpy(minibatch_data["face|face_node"][:]).to(
            torch.long
        )
        ao = torch.from_numpy(minibatch_data["node|ao"][:]).to(torch.float32)
        node_pressure = (
            torch.from_numpy(minibatch_data["node|pressure"][:])
            .to(torch.float32)
            .view(-1, 1)
        )
        norm_pressure = (
            (node_pressure - minibatch_data["press_mean_std"][:][0])
            / (minibatch_data["press_mean_std"][:][1])
        ).view(-1, 1)
        norm_velocity = torch.zeros_like(mesh_pos).to(torch.float32)
        cd_data = torch.zeros(1).view(-1, 1)
        x = torch.cat((unit_norm_v, torch.zeros_like(ao)), dim=-1)
        # <-- graph attr <--

        # --> voxel attr -->
        voxel = (
            torch.from_numpy(minibatch_data["voxel|sdf"][:])
            .to(torch.float32)
            .permute(3, 0, 1, 2)
            .unsqueeze(0)
        )  # B C W H D
        grids = torch.from_numpy(minibatch_data["voxel|grid"][:].reshape(-1, 3))
        max_bound, _ = torch.max(grids, dim=0)
        min_bound, _ = torch.min(grids, dim=0)
        bounds = torch.stack((min_bound, max_bound), dim=0)
        mid = (bounds[0] + bounds[1]) / 2
        scale = (bounds[1] - bounds[0]) / 2
        canonical_query = (mesh_pos - mid) / scale
        canonical_query = canonical_query.to(torch.float32)
        # <-- voxel attr <--

        graph_block_id = torch.full(
            (mesh_pos.shape[0],), self.params.pool_cd_num_blocks - 1, dtype=torch.long
        )
        grid_block_id = torch.full(
            (grids.shape[0],), self.params.pool_cd_num_blocks - 1, dtype=torch.long
        ).unsqueeze(0)

        graph_node = CustomGraphData(
            x=x,
            edge_index=edge_index,
            pos=mesh_pos,
            norm_pressure=norm_pressure,
            norm_velocity=norm_velocity,
            cd_data=cd_data,
            voxel=voxel,
            query=canonical_query,
            graph_block_id=graph_block_id,
            grid_block_id=grid_block_id,
            phi_mean=torch.tensor(minibatch_data["press_mean_std"][:][0])
            .to(torch.float32)
            .view(-1, 1)
            .repeat(1, 3),
            phi_std=torch.tensor(minibatch_data["press_mean_std"][:][1])
            .to(torch.float32)
            .view(-1, 1)
            .repeat(1, 3),
            origin_id=torch.as_tensor([ord(char) for char in (key)], dtype=torch.long),
            idx=torch.tensor([idx], dtype=torch.long),
            mask_vel=torch.tensor([False]).to(torch.bool),
            mask_press=torch.tensor([True]).to(torch.bool),
            mask_pos=torch.tensor([False]).to(torch.bool),
            mask_cd=torch.tensor([False]).to(torch.bool),
        )

        return graph_node

    def load_vtk_data(self, idx, key, minibatch_data):
        """<KeysViewHDF5 ['cells_face', 'cells_index',
        'cells_node', 'cell|cells_area', 'cell|centroid',
        'face|face_center_pos', 'face|face_length',
        'face|face_node', 'face|neighbour_cell',
        'node|pos', 'node|pressure',
        'node|unit_norm_v', 'voxel|grid',
        'voxel|sdf']>"""

        # --> graph attr -->
        mesh_pos = torch.from_numpy(minibatch_data["node|pos"][:]).to(torch.float32)
        unit_norm_v = torch.from_numpy(minibatch_data["node|unit_norm_v"][:]).to(
            torch.float32
        )
        edge_index = torch.from_numpy(minibatch_data["face|face_node"][:]).to(
            torch.long
        )
        sdf_irr = torch.from_numpy(minibatch_data["sdf_irrgular"][:]).to(torch.float32)
        norm_pressure = torch.zeros_like(sdf_irr).to(torch.float32)
        norm_velocity = torch.from_numpy(minibatch_data["node|velocity"][:]).to(
            torch.float32
        )
        cd_data = torch.zeros(1).view(-1, 1)
        x = torch.cat((unit_norm_v, sdf_irr), dim=-1)
        # <-- graph attr <--

        # --> voxel attr -->
        voxel = (
            torch.from_numpy(minibatch_data["voxel|sdf"][:])
            .to(torch.float32)
            .permute(3, 0, 1, 2)
            .unsqueeze(0)
        )  # B C W H D
        grids = torch.from_numpy(minibatch_data["voxel|grid"][:].reshape(-1, 3))
        max_bound, _ = torch.max(grids, dim=0)
        min_bound, _ = torch.min(grids, dim=0)
        bounds = torch.stack((min_bound, max_bound), dim=0)
        mid = (bounds[0] + bounds[1]) / 2
        scale = (bounds[1] - bounds[0]) / 2
        canonical_query = (mesh_pos - mid) / scale
        canonical_query = canonical_query.to(torch.float32)
        # <-- voxel attr <--

        if self.subsampling:

            sampled_nodes = torch.randint(
                0, mesh_pos.shape[0], [self.params.num_samples]
            )
            subgraph_nodes, subgraph_edge_index, _, _ = k_hop_subgraph(
                sampled_nodes, self.params.k_hop, edge_index, relabel_nodes=True
            )
            x = x[subgraph_nodes]
            mesh_pos = mesh_pos[subgraph_nodes]
            norm_pressure = norm_pressure[subgraph_nodes]
            norm_velocity = norm_velocity[subgraph_nodes]
            canonical_query = canonical_query[subgraph_nodes]
            edge_index = subgraph_edge_index

        graph_block_id = torch.full(
            (mesh_pos.shape[0],), self.params.pool_cd_num_blocks - 1, dtype=torch.long
        )
        grid_block_id = torch.full(
            (grids.shape[0],), self.params.pool_cd_num_blocks - 1, dtype=torch.long
        ).unsqueeze(0)

        graph_node = CustomGraphData(
            x=x,
            edge_index=edge_index,
            pos=mesh_pos,
            norm_pressure=norm_pressure,
            norm_velocity=norm_velocity,
            cd_data=cd_data,
            query=canonical_query,
            voxel=voxel,
            graph_block_id=graph_block_id,
            grid_block_id=grid_block_id,
            phi_mean=torch.tensor([0.0]).view(-1, 1).repeat(1, 3),
            phi_std=torch.tensor([1.0]).view(-1, 1).repeat(1, 3),
            origin_id=torch.as_tensor([ord(char) for char in (key)], dtype=torch.long),
            idx=torch.tensor([idx], dtype=torch.long),
            mask_vel=torch.tensor([True]).to(torch.bool),
            mask_press=torch.tensor([False]).to(torch.bool),
            mask_pos=torch.tensor([False]).to(torch.bool),
            mask_cd=torch.tensor([False]).to(torch.bool),
        )

        return graph_node

    def load_obj_data(self, idx, key, minibatch_data):
        """<KeysViewHDF5 ['cells_face', 'cells_index',
        'cells_node', 'cell|cells_area', 'cell|centroid',
        'face|face_center_pos', 'face|face_length',
        'face|face_node', 'face|neighbour_cell',
        'node|pos', 'node|pressure',
        'node|unit_norm_v', 'voxel|grid',
        'voxel|sdf']>"""

        # --> graph attr -->
        mesh_pos = torch.from_numpy(minibatch_data["node|pos"][:]).to(torch.float32)
        unit_norm_v = torch.from_numpy(minibatch_data["node|unit_norm_v"][:]).to(
            torch.float32
        )
        edge_index = torch.from_numpy(minibatch_data["face|face_node"][:]).to(
            torch.long
        )
        ao = torch.from_numpy(minibatch_data["node|ao"][:]).to(torch.float32)
        norm_pressure = torch.zeros_like(ao).to(torch.float32).view(-1, 1)
        norm_velocity = torch.zeros_like(mesh_pos).to(torch.float32)
        cd_data = (
            torch.tensor(minibatch_data["coff_drag"][()]).to(torch.float32).view(-1, 1)
        )
        x = torch.cat((unit_norm_v, torch.zeros_like(ao)), dim=-1)
        # <-- graph attr <--

        # --> voxel attr -->
        voxel = (
            torch.from_numpy(minibatch_data["voxel|sdf"][:])
            .to(torch.float32)
            .permute(3, 0, 1, 2)
            .unsqueeze(0)
        )  # B C W H D
        grids = torch.from_numpy(minibatch_data["voxel|grid"][:].reshape(-1, 3))
        max_bound, _ = torch.max(grids, dim=0)
        min_bound, _ = torch.min(grids, dim=0)
        bounds = torch.stack((min_bound, max_bound), dim=0)
        mid = (bounds[0] + bounds[1]) / 2
        scale = (bounds[1] - bounds[0]) / 2
        canonical_query = (mesh_pos - mid) / scale
        canonical_query = canonical_query.to(torch.float32)
        # <-- voxel attr <--

        if self.subsampling:

            sampled_nodes = torch.randint(
                0, mesh_pos.shape[0], [self.params.num_samples]
            )
            try:
                subgraph_nodes, subgraph_edge_index, _, _ = k_hop_subgraph(
                    sampled_nodes, self.params.k_hop, edge_index, relabel_nodes=True
                )
            except:
                raise ValueError(f"subgraph_nodes failds {key}")

            x = x[subgraph_nodes]
            mesh_pos = mesh_pos[subgraph_nodes]
            norm_pressure = norm_pressure[subgraph_nodes]
            norm_velocity = norm_velocity[subgraph_nodes]
            canonical_query = canonical_query[subgraph_nodes]
            edge_index = subgraph_edge_index

        graph_block_id = self.assign_blocks(
            mesh_pos, axis=0, num_blocks=self.params.pool_cd_num_blocks
        ).squeeze()
        grid_block_id = (
            self.assign_blocks(grids, axis=0, num_blocks=self.params.pool_cd_num_blocks)
            .squeeze()
            .unsqueeze(0)
        )

        graph_node = CustomGraphData(
            x=x,
            edge_index=edge_index,
            pos=mesh_pos,
            norm_pressure=norm_pressure,
            norm_velocity=norm_velocity,
            cd_data=cd_data,
            voxel=voxel,
            query=canonical_query,
            graph_block_id=graph_block_id,
            grid_block_id=grid_block_id,
            phi_mean=torch.tensor([0.0]).view(-1, 1).repeat(1, 3),
            phi_std=torch.tensor([1.0]).view(-1, 1).repeat(1, 3),
            origin_id=torch.as_tensor([ord(char) for char in (key)], dtype=torch.long),
            idx=torch.tensor([idx], dtype=torch.long),
            mask_vel=torch.tensor([False]).to(torch.bool),
            mask_press=torch.tensor([False]).to(torch.bool),
            mask_pos=torch.tensor([True]).to(torch.bool),
            mask_cd=torch.tensor([True]).to(torch.bool),
        )

        return graph_node

    def get(self, idx):
        """<KeysViewHDF5 ['cells_face', 'cells_index',
        'cells_node', 'cell|cells_area', 'cell|centroid',
        'face|face_center_pos', 'face|face_length',
        'face|face_node', 'face|neighbour_cell',
        'node|pos', 'node|pressure',
        'node|unit_norm_v', 'voxel|grid',
        'voxel|sdf']>"""
        key = self.key_list[idx]
        minibatch_data = self.pool[key]

        if key.endswith(".ply"):
            graph_node = self.load_ply_data(idx, key, minibatch_data)

        elif key.endswith(".vtk"):
            graph_node = self.load_vtk_data(idx, key, minibatch_data)

        elif key.endswith(".obj"):
            graph_node = self.load_obj_data(idx, key, minibatch_data)

        else:
            raise ValueError("invalid data format")

        return graph_node


class DatasetFactory:
    def __init__(
        self,
        params=None,
        train_cd=False,
        device=None,
    ):
        self.params = params
        self.device = device
        self.train_dataset = []
        self.valid_dataset = []
        self.test_dataset = []
        self.train_cd=train_cd
        
    def create_trainset(
        self,
        batch_size=100,
        num_workers=4,
        pin_memory=True,
        persistent_workers=True,
        subsampling=False,
    ):
        """training set"""
        self.train_dataset = Data_Pool(
            params=self.params,
            is_training=True,
            dataset_dir=self.params.trainset,
            every_set_valid_num=self.params.every_set_valid_num,
            train_cd=self.train_cd,
            device=self.params,
        )

        graph_node_dataset = GraphNodeDataset(
            base_dataset=self.train_dataset,
            key_list=self.train_dataset.key_list,
            subsampling=subsampling,
        )

        loader = torch_geometric_DataLoader(
            graph_node_dataset,
            batch_size,
            num_workers=num_workers,
            pin_memory=pin_memory,
            persistent_workers=persistent_workers,
            shuffle=True,
        )
        """ training set """

        return self.train_dataset, loader

    def create_testset(
        self,
        batch_size=1,
        num_workers=0,
        pin_memory=False,
        persistent_workers=False,
        subsampling=False,
    ):
        """test set"""
        self.test_dataset = Data_Pool(
            params=self.params,
            is_training=False,
            dataset_dir=self.params.testset,
            every_set_valid_num=0,
            train_cd=self.train_cd,
            device=self.device,
        )

        graph_node_dataset = GraphNodeDataset(
            base_dataset=self.test_dataset,
            key_list=self.test_dataset.key_list,
            subsampling=subsampling,
        )

        loader = torch_geometric_DataLoader(
            graph_node_dataset,
            batch_size,
            num_workers=num_workers,
            pin_memory=pin_memory,
            persistent_workers=persistent_workers,
            shuffle=False,
        )
        """ test set """

        return self.test_dataset, loader
