import numpy as np
import metis
import scipy.sparse as sp
import torch

import lzyutil
from config import args
from data_loader import GraphDataset
import pandas as pd
import file_cache


class ClusteringMachine:
    def __init__(self, graph: GraphDataset, cluster_num: int):
        self.graph = graph
        self.cluster_num = cluster_num
        self._cluster_result = None
        self.cluster_size_map = None
        self._cluster_member_list_map = None
        with file_cache.CustomizedCachePath("clusterResult"):
            self._do_cluster()

    def _do_cluster(self):

        cluster_result_id = f"{self.graph.dataset_name}-{self.cluster_num}"
        if file_cache.check(cluster_result_id):
            ok, data = file_cache.load(cluster_result_id)
            assert ok
            cuts, parts = data
        elif self.cluster_num != 1:
            (cuts, parts) = metis.part_graph(self.graph.get_nx_graph(), self.cluster_num)
        else:
            cuts = []
            parts = [0] * self.graph.get_size()

        assert len(list(set(parts))) == self.cluster_num  # or the clustering process is illegal
        self._cluster_result = parts
        self.cluster_size_map = [0] * self.cluster_num
        self._cluster_member_list_map = {cluster: [] for cluster in range(self.cluster_num)}
        for node, cluster in enumerate(parts):
            self.cluster_size_map[cluster] += 1
            self._cluster_member_list_map[cluster].append(node)

        print("cluster result stats:")
        for cluster in range(self.cluster_num):
            print(f"cluster {cluster} : {self.cluster_size_map[cluster]} nodes")
        file_cache.register(cluster_result_id, (cuts, parts), overwrite=False)

    def get_cluster_nodes(self, clusters: tuple[int, ...]) -> list:
        rst = []
        for cluster in clusters:
            rst.extend(self._cluster_member_list_map[cluster])
        return rst

    def get_sparse_subgraph(self, clusters: tuple[int, ...]) -> sp.coo_matrix:
        sg_node_list = self.get_cluster_nodes(clusters)
        return self.graph.get_adjacency_csr()[np.ix_(sg_node_list, sg_node_list)].tocoo()

    def get_numpy_subgraph(self, clusters: tuple[int, ...]) -> np.ndarray:
        return self.get_sparse_subgraph(clusters).toarray()

    def get_sparse_nl_subgraph(self,
                               cluster: tuple[int, ...]) -> sp.coo_matrix:  # nl stands for normalized-laplacian matrix
        return lzyutil.get_norm_laplacian_sparse(self.get_sparse_subgraph(cluster))

    def get_subgraph_edge_index(self, clusters: tuple[int, ...]) -> torch.Tensor:
        coo = self.get_sparse_subgraph(clusters)
        return torch.from_numpy(np.vstack((coo.row, coo.col)))

    def get_cluster_size(self, clusters: tuple[int, ...]) -> int:
        rst = 0
        for cluster in clusters:
            rst += self.cluster_size_map[cluster]
        return rst

    def get_cluster_num(self):
        return self.cluster_num

    def get_all_node_num(self):
        return self.graph.get_size()


if __name__ == '__main__':
    cm = ClusteringMachine(GraphDataset('jazz'), 2)
