import torch

# from torch_geometric.datasets import TUDataset
# dataset = TUDataset('./', name="PROTEINS_full", use_node_attr=True)
# print(next(iter(dataset)))
#
# print(dataset.len())

from torch_geometric.data import Dataset, Data
import os
from typing import List
from torch_geometric.io import read_txt_array
import torch.nn.functional as F
from torch_sparse import coalesce
from torch_geometric.utils import remove_self_loops
from torch_geometric.data.separate import separate
import copy
from typing import Callable, Dict, Iterable, List, Optional, Tuple, Union
from collections.abc import Mapping
import numpy as np


class CustomDatset(Dataset):

    def __init__(self, root='./PROTEINS_full', filepath='./PROTEINS_full/raw', name='custom',
                 use_edge_attr=True, transform=None,
                 pre_transform=None, pre_filter=None):
        """
        root: 数据集保存的地方。
        会产生两个文件夹：
          raw_dir(downloaded dataset) 和 processed_dir(processed data)。
        """

        self.name = name
        self.root = root
        self.filepath = filepath
        self.filenames = os.listdir(filepath)
        self.use_edge_attr = use_edge_attr
        self.pre_transform = pre_transform
        self.pre_filter = pre_filter

        super().__init__(root, transform, pre_transform, pre_filter)

        self.data, self.slices = torch.load(self.processed_paths[0])
        # self.slices：一个切片字典，用于从该对象重构单个示例

    @property
    def raw_dir(self):
        """原始文件的文件夹"""
        return self.filepath

    @property
    def processed_dir(self):
        """处理后文件的文件夹"""
        return os.path.join(self.root, self.name)

    @property
    def raw_file_names(self) -> List[str]:
        """"原始文件的文件名，如果存在则不会触发download"""
        return self.filenames

    @property
    def processed_file_names(self) -> str:
        """处理后的文件名，如果在 processed_dir 中找到则跳过 process"""
        return ['data.pt']

    def download(self):
        """这里不需要下载"""
        pass

    # -----------------------------------------------------
    # 添加一些特征处理，如果不用处理，这部分可以不要
    # -----------------------------------------------------
    def _get_node_features(self):
        """获取结点特征"""
        print(self.filepath)
        path = os.path.join(self.filepath, 'PROTEINS_full_node_attributes.txt')
        node_attributes = read_txt_array(path, sep=',')
        return node_attributes

    def _get_node_labels(self):
        """获取结点标签"""
        path = os.path.join(self.filepath, 'PROTEINS_full_node_labels.txt')
        node_labels = read_txt_array(path, sep=',', dtype=torch.long)
        if node_labels.dim() == 1:
            node_labels = node_labels.unsqueeze(-1)
        node_labels = node_labels - node_labels.min(dim=0)[0]
        node_labels = node_labels.unbind(dim=-1)
        node_labels = [F.one_hot(x, num_classes=-1) for x in node_labels]
        node_labels = torch.cat(node_labels, dim=-1).to(torch.float)
        return node_labels

    def _get_adjacent(self):
        """获取边索引"""
        path = os.path.join(self.filepath, 'PROTEINS_full_A.txt')
        edge_index = read_txt_array(path, sep=',', dtype=torch.long).t() - 1
        return edge_index

    def _get_batch(self):
        """获取 batch 索引"""
        path = os.path.join(self.filepath, 'PROTEINS_full_graph_indicator.txt')
        batch = read_txt_array(path, sep=',', dtype=torch.long) - 1
        return batch

    def _get_graph_labels(self):
        """获取图标签"""
        path = os.path.join(self.filepath, 'PROTEINS_full_graph_labels.txt')
        y = read_txt_array(path, sep=',', dtype=torch.long)
        _, y = y.unique(sorted=True, return_inverse=True)
        return y

    # -----------------------------------------------------
    # 数据处理主程序
    # -----------------------------------------------------

    def process(self):
        """主程序，对原始数据进行处理"""

        edge_index = self._get_adjacent()
        batch = self._get_batch()
        node_features = self._get_node_features()
        node_labels = self._get_node_labels()
        x = cat([node_features, node_labels])
        y = self._get_graph_labels()
        # 总共结点数
        num_nodes = edge_index.max().item() + 1 if x is None else x.size(0)
        edge_attr = None
        # 去掉环
        edge_index, edge_attr = remove_self_loops(edge_index, edge_attr)
        edge_index, edge_attr = coalesce(edge_index, edge_attr, num_nodes, num_nodes)
        data = Data(x=x, edge_index=edge_index, edge_attr=edge_attr, y=y)
        self.data, self.slices = split(data, batch)

        if self.pre_filter is not None:
            data_list = [self.get(idx) for idx in range(len(self))]
            data_list = [data for data in data_list if self.pre_filter(data)]
            self.data = data_list

        if self.pre_transform is not None:
            data_list = [self.get(idx) for idx in range(len(self))]
            data_list = [self.pre_transform(data) for data in data_list]
            self.data = data_list

        torch.save((self.data, self.slices), self.processed_paths[0])

    # -----------------------------------------------------
    # 获取单个图，第idx图
    # -----------------------------------------------------
    def get(self, idx):
        """根据索引获取数据"""
        if self.len() == 1:
            return copy.copy(self.data)

        if not hasattr(self, '_data_list') or self._data_list is None:
            self._data_list = self.len() * [None]
        elif self._data_list[idx] is not None:
            return copy.copy(self._data_list[idx])

        """
        separate:
        将单个元素与索引“idx”处的“批处理”分隔开。
        “separate”可以通过单独分离所有的存储来处理同构和异质数据对象。
        此外，“separate”可以处理嵌套的数据结构，如字典和列表。
        """
        data = separate(cls=self.data.__class__, batch=self.data, idx=idx, slice_dict=self.slices, decrement=False, )

        self._data_list[idx] = copy.copy(data)
        return data

    def len(self) -> int:
        if self.slices is None:
            return 1
        for _, value in nested_iter(self.slices):
            return len(value) - 1
        return 0

    def __repr__(self) -> str:
        return f'{self.name}({len(self)})'


# -----------------------------------------------------
# 辅助函数
# -----------------------------------------------------
def cat(seq):
    seq = [item for item in seq if item is not None]
    seq = [item.unsqueeze(-1) if item.dim() == 1 else item for item in seq]
    return torch.cat(seq, dim=-1) if len(seq) > 0 else None


def split(data, batch):
    node_slice = torch.cumsum(torch.from_numpy(np.bincount(batch)), 0)
    node_slice = torch.cat([torch.tensor([0]), node_slice])

    row, _ = data.edge_index
    edge_slice = torch.cumsum(torch.from_numpy(np.bincount(batch[row])), 0)
    edge_slice = torch.cat([torch.tensor([0]), edge_slice])

    # Edge indices should start at zero for every graph.
    data.edge_index -= node_slice[batch[row]].unsqueeze(0)

    slices = {'edge_index': edge_slice}
    if data.x is not None:
        slices['x'] = node_slice
    else:
        # Imitate `collate` functionality:
        data._num_nodes = torch.bincount(batch).tolist()
        data.num_nodes = batch.numel()
    if data.edge_attr is not None:
        slices['edge_attr'] = edge_slice
    if data.y is not None:
        if data.y.size(0) == batch.size(0):
            slices['y'] = node_slice
        else:
            slices['y'] = torch.arange(0, batch[-1] + 2, dtype=torch.long)
    return data, slices


def nested_iter(mapping: Mapping) -> Iterable:
    for key, value in mapping.items():
        if isinstance(value, Mapping):
            for inner_key, inner_value in nested_iter(value):
                yield inner_key, inner_value
        else:
            yield key, value

dataset = CustomDatset()
next(iter(dataset))