"""Utility functions for reading the datasets."""

import sys
import os
from torch.utils.data import IterableDataset, Dataset
from torch_geometric.data import InMemoryDataset
from torch_geometric.data import Data
import matplotlib
matplotlib.use("Agg")
import torch
import numpy as np
from torch_geometric.loader import DataLoader as torch_geometric_DataLoader
from torch.utils.data import Sampler
import h5py


def GetCarDatasetInfoList(params, path, split: list):
    dataset_dir = os.path.join(path, "train.h5")
    pressure_min_std = (np.loadtxt(os.path.join(path, "train_pressure_min_std.txt")),)
    bounds = (np.loadtxt(os.path.join(path, "watertight_global_bounds.txt")),)
    all_mesh_indices = np.loadtxt(os.path.join(path, "watertight_meshes.txt")).reshape(
        -1
    )

    splited_mesh_indices = [
        all_mesh_indices[start:end] for start, end in zip(split[:-1], split[1:])
    ]
    return dataset_dir, *pressure_min_std, *bounds, splited_mesh_indices


class CFDdatasetmap(Dataset):
    def __init__(
        self, params, path, split="train", dataset_type="h5", is_training=False
    ):
        super().__init__()

        self.path = path
        self.split = split
        self.dataset_dir = path
        self.params = params
        self.is_training = is_training
        if dataset_type == "h5":
            self.file_handle = h5py.File(self.dataset_dir + f"/{split}.h5", "r")
        else:
            raise ValueError("invalid data format")

    def __getitem__(self, index):
        trajectory_handle = self.file_handle[str(index)]
        trajectory = {}

        for key in trajectory_handle.keys():
            trajectory[key] = torch.from_numpy(trajectory_handle[key][:])

        return trajectory

    def __len__(self):
        return len(self.file_handle)


class Data_Pool:
    def __init__(self, params=None, is_training=True, split="train", device=None):
        self.params = params
        self.is_training = is_training
        self.device = device
        self.epoch = 0
        self.load_mesh_to_cpu(split=split, dataset_dir=params.dataset_dir)

    def load_mesh_to_cpu(self, split="train", dataset_dir=None):

        self.valid_pool = []

        if dataset_dir is not None:
            self.pool = h5py.File(dataset_dir + f"/{split}.h5", "r")
        else:
            self.pool = h5py.File(self.params.dataset_dir + f"/{split}.h5", "r")

        self.key_list = list(self.pool.keys())
        self.key_list = np.sort([int(key) for key in self.key_list])
        self.key_list = [str(key) for key in self.key_list]

        _, pressure_min_std, bounds, _ = GetCarDatasetInfoList(
            self.params, self.params.dataset_dir, [0, 1]
        )
        voxel_mean_std = torch.from_numpy(
            np.loadtxt(self.params.dataset_dir + "/voxel_mean_std.txt")
        ).to(torch.float32)

        pos_scale = np.linalg.norm(bounds[0] - bounds[1]) * 0.5

        self.physics_info = {
            "pressure_min_std": pressure_min_std,
            "bounds": bounds,
            "pos_scale": pos_scale,
            "voxel_mean_std": voxel_mean_std,
        }

        return self.params.dataset_dir

    @staticmethod
    def datapreprocessing(graph_node, graph_edge, graph_cell, is_training=False):

        def randbool(*size, device="cuda"):
            """Returns 50% channce of True of False"""
            return torch.randint(2, size, device=device) == torch.randint(
                2, size, device=device
            )

        graph_node.x = torch.cat((graph_node.x, graph_node.pos, graph_node.ao), dim=-1)

        # permute edge direction
        senders, receivers = graph_node.edge_index
        if is_training:
            random_mask = randbool(1, senders.shape[0], device=senders.device).repeat(
                2, 1
            )
            random_direction_edge = torch.where(
                random_mask,
                torch.stack((senders, receivers), dim=0),
                torch.stack((receivers, senders), dim=0),
            )

        else:
            random_direction_edge = torch.stack((senders, receivers), dim=0)

        graph_node.edge_index = random_direction_edge

        releative_node_attr = (
            graph_node.x[random_direction_edge[0]]
            - graph_node.x[random_direction_edge[1]]
        )
        graph_node.x = torch.cat((graph_node.x[:, 0:3], graph_node.x[:, 6:7]), dim=-1)
        graph_node.edge_attr = releative_node_attr

        return [graph_node, graph_edge, graph_cell]


class CustomGraphData(Data):
    def __init__(self, **kwargs):
        super().__init__(**kwargs)

    def __inc__(self, key, value, *args, **kwargs):
        offset_rules = {
            "edge_index": self.num_nodes,
            "face": self.num_nodes,
            "cells_node": self.num_nodes,
            "face_node": self.num_nodes,
            "cells_face": self.num_nodes,
            "neighbour_cell": self.num_nodes,
            "face_node_x": self.num_nodes,
            "pos": 0,
            "A_node_to_node": 0,
            "A_node_to_node_x": 0,
            "B_node_to_node": 0,
            "B_node_to_node_x": 0,
            "cell_area": 0,
            "node_type": 0,
            "graph_index": 0,
            "pde_theta": 0,
            "neural_network_output_mask": 0,
            "uvp_dim": 0,
            "dt_graph": 0,
            "x": 0,
            "y": 0,
            "m_ids": 0,
            "m_gs": 0,
            "case_global_index": 0,
        }

        return offset_rules.get(key, super().__inc__(key, value, *args, **kwargs))


class GraphNodeDataset(InMemoryDataset):
    def __init__(self, base_dataset, len_ds=None, indices=None):
        super().__init__()
        self.base_dataset = base_dataset
        self._len = len_ds
        self.i_idx = None
        if indices is not None:
            self._len = len(indices)
            self.i_idx = indices

    @property
    def pool(self):
        return self.base_dataset.pool

    @property
    def key_list(self):
        return self.base_dataset.key_list

    @property
    def physics_info(self):
        return self.base_dataset.physics_info

    def len(self):
        return self._len

    def get(self, idx):
        """<KeysViewHDF5 ['cells_face', 'cells_index',
        'cells_node', 'cell|cells_area', 'cell|centroid',
        'face|face_center_pos', 'face|face_length',
        'face|face_node', 'face|neighbour_cell',
        'node|pos', 'node|pressure',
        'node|unit_norm_v', 'voxel|grid',
        'voxel|sdf']>"""
        if self.i_idx is not None:
            idx = self.i_idx[idx]
        minibatch_data = self.pool[self.key_list[idx]]

        mesh_pos = torch.from_numpy(minibatch_data["node|pos"][:]).to(torch.float32)
        unit_norm_v = torch.from_numpy(minibatch_data["node|unit_norm_v"][:]).to(
            torch.float32
        )
        face_node = torch.from_numpy(minibatch_data["face|face_node"][:]).to(torch.long)
        cells_node = torch.from_numpy(minibatch_data["cells_node"][:]).to(torch.long)
        ao = torch.from_numpy(minibatch_data["node|ao"][:]).to(torch.float32)

        voxel = (
            torch.from_numpy(minibatch_data["voxel|sdf"][:])
            .to(torch.float32)
            .reshape(1, 1, *minibatch_data["voxel|grid"][:].shape[:-1])
        )  # B C W H D
        voxel = (voxel - self.physics_info["voxel_mean_std"][0]) / self.physics_info[
            "voxel_mean_std"
        ][1]

        # voxel left
        voxel_left = voxel[:, :, :, :32, :]

        bounds = self.physics_info["bounds"]
        mid = (bounds[0] + bounds[1]) / 2
        scale = (bounds[1] - bounds[0]) / 2
        canonical_query = (mesh_pos - mid) / scale
        canonical_query = canonical_query[:, [2, 0, 1]][None,]  # B, N, 3
        canonical_query = canonical_query.unsqueeze(2).unsqueeze(2)  # B, N(D), H, W, 3
        canonical_query = canonical_query.to(torch.float32)
        canonical_pos = ((mesh_pos - mid) / scale).to(torch.float32)

        grid = minibatch_data["voxel|grid"][:][None,]  # B X Y Z 3
        grid = (grid - mid) / scale
        grid = torch.from_numpy(grid).to(torch.float32)

        # grid left
        grid_left = grid[:, :, :, :32, :]

        try:
            target_on_node = torch.from_numpy(minibatch_data["node|pressure"][:]).to(
                torch.float32
            )
            norm_y = (
                target_on_node - self.physics_info["pressure_min_std"][0]
            ) / self.physics_info["pressure_min_std"][1]
        except:
            norm_y = torch.zeros(mesh_pos.shape[0], 1)
            target_on_node = torch.zeros(mesh_pos.shape[0], 1)

        graph_node = CustomGraphData(
            x=unit_norm_v,
            edge_index=face_node,
            face=cells_node.T,
            pos=mesh_pos,
            y=target_on_node,
            norm_y=norm_y,
            ao=ao,
            voxel=voxel,
            voxel_left=voxel_left,
            query=canonical_query,
            grid=grid,
            grid_left=grid_left,
            cpos=canonical_pos,
            graph_index=torch.as_tensor([idx], dtype=torch.long),
            origin_id=torch.as_tensor([ord(char) for char in (self.key_list[idx])], dtype=torch.long),
        )

        return graph_node


class GraphEdgeDataset(InMemoryDataset):
    def __init__(self, base_dataset, len=None):
        super().__init__()
        self.base_dataset = base_dataset
        self._len = len

    @property
    def pool(self):
        # 这里你可以根据需要从基类的pool中筛选出GraphNode的数据
        return self.base_dataset.pool

    @property
    def key_list(self):
        return self.base_dataset.key_list

    @property
    def physics_info(self):
        return self.base_dataset.physics_info

    def len(self):
        return self._len

    def get(self, idx):
        """<KeysViewHDF5 ['cells_face', 'cells_index',
        'cells_node', 'cell|cells_area', 'cell|centroid',
        'face|face_center_pos', 'face|face_length',
        'face|face_node', 'face|neighbour_cell',
        'node|pos', 'node|pressure',
        'node|unit_norm_v', 'voxel|grid',
        'voxel|sdf']>"""

        minibatch_data = self.pool[self.key_list[idx]]

        # edge_attr
        face_length = torch.from_numpy(minibatch_data["face|face_length"][:]).to(
            torch.float32
        )
        face_center_pos = torch.from_numpy(
            minibatch_data["face|face_center_pos"][:]
        ).to(torch.float32)
        cells_face = torch.from_numpy(minibatch_data["cells_face"][:]).to(torch.long)

        graph_edge = CustomGraphData(
            x=face_length,
            face=cells_face.T,
            pos=face_center_pos,
            graph_index=torch.as_tensor([idx], dtype=torch.long),
        )

        return graph_edge


class GraphCellDataset(InMemoryDataset):
    def __init__(self, base_dataset, len=None):
        super().__init__()
        self.base_dataset = base_dataset
        self._len = len

    @property
    def pool(self):
        # 这里你可以根据需要从基类的pool中筛选出GraphNode的数据
        return self.base_dataset.pool

    @property
    def key_list(self):
        return self.base_dataset.key_list

    def len(self):
        return self._len

    def get(self, idx):
        """<KeysViewHDF5 ['cells_face', 'cells_index',
        'cells_node', 'cell|cells_area', 'cell|centroid',
        'face|face_center_pos', 'face|face_length',
        'face|face_node', 'face|neighbour_cell',
        'node|pos', 'node|pressure',
        'node|unit_norm_v', 'voxel|grid',
        'voxel|sdf']>"""

        minibatch_data = self.pool[self.key_list[idx]]

        # cell_attr
        edge_neighbour_cell = torch.from_numpy(
            minibatch_data["face|neighbour_cell"][:]
        ).to(torch.long)
        cell_area = torch.from_numpy(minibatch_data["cell|cells_area"][:]).to(
            torch.float32
        )
        centroid = torch.from_numpy(minibatch_data["cell|centroid"][:]).to(
            torch.float32
        )
        cells_index = torch.from_numpy(minibatch_data["cells_index"][:]).to(torch.long)

        graph_cell = CustomGraphData(
            edge_index=edge_neighbour_cell,
            cell_area=cell_area,
            pos=centroid,
            face=cells_index.T,
            graph_index=torch.as_tensor([idx], dtype=torch.long),
        )

        return graph_cell


# 在你的代码文件的开头添加以下类定义
class SharedSampler(Sampler):
    def __init__(self, data_source):
        self.data_source = data_source
        self.epoch = 0
        self.specific_indices = None  # 用于存储特定的索引

    def __iter__(self):
        g = torch.Generator()
        g.manual_seed(self.epoch)
        if self.specific_indices is not None:
            return iter(self.specific_indices)
        return iter(torch.randperm(len(self.data_source), generator=g).tolist())

    def __len__(self):
        return len(self.data_source)

    def set_epoch(self, epoch):
        # self.epoch = int(datetime.datetime.now().timestamp())
        self.epoch = epoch

    def set_specific_indices(self, indices):
        self.specific_indices = indices


# 修改CustomDataLoader以使用SharedSampler
class CustomDataLoader:
    def __init__(
        self,
        graph_node_dataset,
        graph_edge_dataset,
        graph_cell_dataset,
        batch_size,
        sampler,
        num_workers=4,
        persistent_workers=True,
        pin_memory=True,
    ):
        # 保存输入参数到实例变量
        self.graph_node_dataset = graph_node_dataset
        self.graph_edge_dataset = graph_edge_dataset
        self.graph_cell_dataset = graph_cell_dataset
        self.batch_size = batch_size
        self.sampler = sampler
        self.num_workers = num_workers
        self.pin_memory = pin_memory

        # 初始化DataLoaders
        self.loader_A = torch_geometric_DataLoader(
            graph_node_dataset,
            batch_size,
            sampler=sampler,
            num_workers=num_workers,
            pin_memory=pin_memory,
            persistent_workers=persistent_workers,
        )

        self.loader_B = torch_geometric_DataLoader(
            graph_edge_dataset,
            batch_size,
            sampler=sampler,
            num_workers=num_workers,
            pin_memory=pin_memory,
            persistent_workers=persistent_workers,
        )

        self.loader_C = torch_geometric_DataLoader(
            graph_cell_dataset,
            batch_size,
            sampler=sampler,
            num_workers=num_workers,
            pin_memory=pin_memory,
            persistent_workers=persistent_workers,
        )

    def __iter__(self):
        return zip(self.loader_A, self.loader_B, self.loader_C)

    def __len__(self):
        return min(
            len(self.loader_A),
            len(self.loader_B),
            len(self.loader_C),
        )

    def get_specific_data(self, indices):
        # 设置Sampler的特定索引
        self.sampler.set_specific_indices(indices)

        # 重新创建DataLoaders来使用更新的Sampler
        self.loader_A = torch_geometric_DataLoader(
            self.graph_node_dataset,
            self.batch_size,
            sampler=self.sampler,
            num_workers=self.num_workers,
            pin_memory=self.pin_memory,
        )
        self.loader_B = torch_geometric_DataLoader(
            self.graph_edge_dataset,
            self.batch_size,
            sampler=self.sampler,
            num_workers=self.num_workers,
            pin_memory=self.pin_memory,
        )
        self.loader_C = torch_geometric_DataLoader(
            self.graph_cell_dataset,
            self.batch_size,
            sampler=self.sampler,
            num_workers=self.num_workers,
            pin_memory=self.pin_memory,
        )

        graph_node, graph_edge, graph_cell = next(iter(self))

        return graph_node, graph_edge, graph_cell


# 修改DatasetFactory以创建SharedSampler并将其传递给CustomDataLoader
class DatasetFactory:
    def __init__(
        self,
        params=None,
        device=None,
    ):
        self.train_dataset = Data_Pool(
            params=params,
            is_training=True,
            split="train",
            device=device,
        )

        self.test_dataset = Data_Pool(
            params=params,
            is_training=False,
            split="test",
            device=device,
        )


    def create_trainset(
        self,
        batch_size=100,
        num_workers=4,
        pin_memory=True,
        persistent_workers=True,
        indices=None,
    ):
        len_ds = len(indices)
        """training set"""
        graph_node_dataset = GraphNodeDataset(
            base_dataset=self.train_dataset, len_ds=len(self.train_dataset.pool), indices=indices,
        )
        graph_edge_dataset = GraphEdgeDataset(
            base_dataset=self.train_dataset, len=len_ds
        )
        graph_cell_dataset = GraphCellDataset(
            base_dataset=self.train_dataset, len=len_ds
        )

        sampler = SharedSampler(graph_node_dataset)

        loader = CustomDataLoader(
            graph_node_dataset,
            graph_edge_dataset,
            graph_cell_dataset,
            batch_size=batch_size,
            sampler=sampler,
            num_workers=num_workers,
            pin_memory=pin_memory,
            persistent_workers=persistent_workers,
        )
        """ training set """

        return self.train_dataset, loader, sampler

    def create_testset(
        self,
        batch_size=1,
        num_workers=0,
        pin_memory=False,
        persistent_workers=False,
        valid_num=10,
    ):
        """training set"""
        graph_node_dataset = GraphNodeDataset(
            base_dataset=self.test_dataset, len_ds=valid_num
        )
        graph_edge_dataset = GraphEdgeDataset(
            base_dataset=self.test_dataset, len=valid_num
        )
        graph_cell_dataset = GraphCellDataset(
            base_dataset=self.test_dataset, len=valid_num
        )

        sampler = SharedSampler(graph_node_dataset)

        loader = CustomDataLoader(
            graph_node_dataset,
            graph_edge_dataset,
            graph_cell_dataset,
            batch_size=batch_size,
            sampler=sampler,
            num_workers=num_workers,
            pin_memory=pin_memory,
            persistent_workers=persistent_workers,
        )
        """ training set """

        return self.test_dataset, loader, sampler
