#! env python3

import os
import os.path as osp
import sys
sys.path.append('')
import math
import random
import time
import argparse
import subprocess as subp
from itertools import chain

import matplotlib.pyplot as plt

import torch
import dgl
import dgl.function as fn
import numpy as np
import networkx as nx
from torch.nn import Parameter
import scipy.sparse as sp
from scipy.sparse.linalg import norm as sparse_norm
from torch.utils.data import DataLoader
from torch_geometric.data import Batch
from torch_geometric.loader import LinkNeighborLoader, ClusterLoader, ClusterData,\
   GraphSAINTSampler, GraphSAINTRandomWalkSampler, GraphSAINTEdgeSampler, GraphSAINTNodeSampler
from torch_sparse import SparseTensor

from GNNSwitch.dataset import *
from GNNSwitch.graph import COOGraph, CSRGraph, compute_GI, compute_Hen, plot_freq


def data2scipy(data):
    return sp.csr_matrix((np.ones(data.edge_index.size(1)), (data.edge_index[0].numpy(), data.edge_index[1].numpy())))


def scipy2custom(scipy_mx, FMT):
    graph_class = eval(FMT+"Graph")
    coomx = scipy_mx.tocoo()
    edge_list = np.stack([coomx.row, coomx.col])
    return graph_class(torch.Tensor(edge_list))

def dglgraph2custom(dgl_blocks, FMT):
    graph_class = eval(FMT+"Graph")
    ret = []
    if isinstance(dgl_blocks, list):
        for block in dgl_blocks:
            ret.append(graph_class(torch.stack(block.edges())))
    else:
        ret.append(graph_class(torch.stack(dgl_blocks.edges())))
    return ret


def batch_gen(train_mask, batch_size, shuffle=False):
    if shuffle:
        idx = np.arange(train_mask.size(0))
        np.random.shuffle(idx)
    
    i = 0
    while i < train_mask.size(0):
        if shuffle:
            sub_mask = train_mask[idx[i : i + batch_size]]
            batch = idx[i : i + batch_size]
        else:
            sub_mask = train_mask[i : i + batch_size]
            batch = np.arange(i, i + batch_size)
        
        yield  batch, sub_mask

        i += batch_size

"""
Copied FastGCN & AS-GCN sampler
"""
    
class Sampler(object):
    def __init__(self, features, adj, **kwargs):
        allowed_kwargs = {'input_dim', 'layer_sizes', 'device'}
        for kwarg in kwargs.keys():
            assert kwarg in allowed_kwargs, \
                'Invalid keyword argument: ' + kwarg

        self.input_dim = kwargs.get('input_dim', 1)
        self.layer_sizes = kwargs.get('layer_sizes', [1])
        self.scope = kwargs.get('scope', 'test_graph')

        self.num_layers = len(self.layer_sizes)

        self.adj = adj
        self.features = features

        self.train_nodes_number = self.adj.shape[0]

    def sampling(self, v_indices):
        raise NotImplementedError("sampling is not implimented")

    def _change_sparse_to_tensor(self, adjs):
        new_adjs = []
        for adj in adjs:
            new_adjs.append(
                scipy2custom(adj, "CSR").to(torch.device("cpu")))
        return new_adjs


class Sampler_FastGCN(Sampler):
    def __init__(self, pre_probs, features, adj, **kwargs):
        super().__init__(features, adj, **kwargs)
        # NOTE: uniform sampling can also has the same performance!!!!
        # try, with the change: col_norm = np.ones(features.shape[0])
        col_norm = sparse_norm(adj, axis=0)
        self.probs = col_norm / np.sum(col_norm)

    def sampling(self, v):
        """
        Inputs:
            v: batch nodes list
        """
        all_support = [[]] * self.num_layers

        cur_out_nodes = v
        for layer_index in range(self.num_layers-1, -1, -1):
            cur_sampled, cur_support = self._one_layer_sampling(
                cur_out_nodes, self.layer_sizes[layer_index])
            all_support[layer_index] = cur_support
            cur_out_nodes = cur_sampled

        all_support = self._change_sparse_to_tensor(all_support)
        sampled_X0 = self.features[cur_out_nodes]
        return sampled_X0, all_support, 0

    def _one_layer_sampling(self, v_indices, output_size):
        # NOTE: FastGCN described in paper samples neighboors without reference
        # to the v_indices. But in its tensorflow implementation, it has used
        # the v_indice to filter out the disconnected nodes. So the same thing
        # has been done here.
        v_indices_filtered = v_indices[v_indices < self.adj.shape[0]]
        support = self.adj[v_indices_filtered, :]
        neis = np.nonzero(np.sum(support, axis=0))[1]
        p1 = self.probs[neis]
        p1 = p1 / np.sum(p1)
        sampled = np.random.choice(np.array(np.arange(np.size(neis))),
                                   output_size, True, p1)

        u_sampled = neis[sampled]
        support = support[:, u_sampled]
        sampled_p1 = p1[sampled]

        support = support.dot(sp.diags(1.0 / (sampled_p1 * output_size)))
        return u_sampled, support


class Sampler_ASGCN(Sampler, torch.nn.Module):
    def __init__(self, pre_probs, features, adj, **kwargs):
        super().__init__(features, adj, **kwargs)
        torch.nn.Module.__init__(self)
        self.feats_dim = features.shape[1]

        # attention weights w1 is also wg
        self.w1 = Parameter(torch.FloatTensor(self.feats_dim, 1))
        self.w2 = Parameter(torch.FloatTensor(self.feats_dim, 1))
        self._reset_parameters()

    def _reset_parameters(self):
        stdv = 1.0 / math.sqrt(self.w1.size(0))
        self.w1.data.uniform_(-stdv, stdv)
        self.w2.data.uniform_(-stdv, stdv)

    def sampling(self, v):
        """
        Inputs:
            v: batch nodes list
        """
        v = torch.LongTensor(v)
        all_support = [[]] * self.num_layers
        all_p_u = [[]] * self.num_layers

        # sample top-1 layer
        # all_x_u[self.num_layers - 1] = self.features[v]
        cur_out_nodes = v
        for i in range(self.num_layers-1, -1, -1):
            cur_u_sampled, cur_support, cur_var_need = \
                self._one_layer_sampling(cur_out_nodes,
                                         output_size=self.layer_sizes[i])

            all_support[i] = cur_support
            all_p_u[i] = cur_var_need

            cur_out_nodes = cur_u_sampled

        loss = self._calc_variance(all_p_u)
        sampled_X0 = self.features[cur_out_nodes]
        return sampled_X0, all_support, loss

    def _calc_variance(self, var_need):
        # NOTE: it's useless in this implementation for the three datasets
        # only calc the variane of the last layer
        u_nodes, p_u = var_need[-1][0], var_need[-1][1]
        p_u = p_u.reshape(-1, 1)
        feature = self.features[u_nodes]
        means = torch.sum(feature, 0)
        feature = feature - means
        var = torch.mean(torch.sum(torch.mul(feature, feature) * p_u, 0))
        return var

    def _one_layer_sampling(self, v_indices, output_size):
        support = self.adj[v_indices, :]
        neis = np.nonzero(np.sum(support, axis=0))[1]
        support = support[:, neis]
        # NOTE: change the sparse support to dense, mind the matrix size
        support = support.todense()
        support = torch.FloatTensor(support).to(self.device)
        h_v = self.features[v_indices]
        h_u = self.features[neis]

        attention = torch.mm(h_v, self.w1) + \
            torch.mm(h_u, self.w2).reshape(1, -1) + 1
        attention = (1.0 / np.size(neis)) * torch.relu(attention)

        p1 = torch.sum(support * attention, 0)
        # sampling only done in CPU
        numpy_p1 = p1.to('cpu').data.numpy()
        numpy_p1 = numpy_p1 / np.sum(numpy_p1)
        sampled = np.random.choice(np.array(np.arange(np.size(neis))),
                                   size=output_size,
                                   replace=True,
                                   p=numpy_p1)

        u_sampled = neis[sampled]
        support = support[:, sampled]
        sampled_p1 = p1[sampled]

        t_diag = torch.diag(1.0 / (sampled_p1 * output_size))
        support = torch.mm(support, t_diag)

        return u_sampled, support, (neis, p1 / torch.sum(p1))


class VRGCNSampler(object):
    def __init__(self, g, fanouts):
        self.g = g
        self.fanouts = fanouts

    def sample_blocks(self, seeds):
        seeds = torch.LongTensor(seeds)
        blocks = []
        hist_blocks = []
        for fanout in self.fanouts:
            # For each seed node, sample ``fanout`` neighbors.
            frontier = dgl.sampling.sample_neighbors(self.g, seeds, fanout)
            hist_frontier = dgl.in_subgraph(self.g, seeds)
            # Then we compact the frontier into a bipartite graph for message passing.
            block = dgl.to_block(frontier, seeds)
            hist_block = dgl.to_block(hist_frontier, seeds)
            # Obtain the seed nodes for next layer.
            seeds = block.srcdata[dgl.NID]

            blocks.insert(0, block)
            hist_blocks.insert(0, hist_block)
        # 原则上 sampled graph 和 hist graph都要用于计算
        return blocks, hist_blocks



class SAINTSampler:
    """
    Description
    -----------
    SAINTSampler implements the sampler described in GraphSAINT. This sampler implements offline sampling in
    pre-sampling phase as well as fully offline sampling, fully online sampling in training phase.
    Users can conveniently set param 'online' of the sampler to choose different modes.
    Parameters
    ----------
    node_budget : int
        the expected number of nodes in each subgraph, which is specifically explained in the paper. Actually this
        param specifies the times of sampling nodes from the original graph with replacement. The meaning of edge_budget
        is similar to the node_budget.
    dn : str
        name of dataset.
    g : DGLGraph
        the full graph.
    train_nid : list
        ids of training nodes.
    num_workers_sampler : int
        number of processes to sample subgraphs in pre-sampling procedure using torch.dataloader.
    num_subg_sampler : int, optional
        the max number of subgraphs sampled in pre-sampling phase for computing normalization coefficients in the beginning.
        Actually this param is used as ``__len__`` of sampler in pre-sampling phase.
        Please make sure that num_subg_sampler is greater than batch_size_sampler so that we can sample enough subgraphs.
        Defaults: 10000
    batch_size_sampler : int, optional
        the number of subgraphs sampled by each process concurrently in pre-sampling phase.
        Defaults: 200
    online : bool, optional
        If `True`, we employ online sampling in training phase. Otherwise employing offline sampling.
        Defaults: True
    num_subg : int, optional
        the expected number of sampled subgraphs in pre-sampling phase.
        It is actually the 'N' in the original paper. Note that this param is different from the num_subg_sampler.
        This param is just used to control the number of pre-sampled subgraphs.
        Defaults: 50
    full : bool, optional
        True if the number of subgraphs used in the training phase equals to that of pre-sampled subgraphs, or
        ``math.ceil(self.train_g.num_nodes() / self.node_budget)``. This formula takes the result of A divided by B as
        the number of subgraphs used in the training phase, where A is the number of training nodes in the original
        graph, B is the expected number of nodes in each pre-sampled subgraph. Please refer to the paper to check the
        details.
        Defaults: True
    Notes
    -----
    For parallelism of pre-sampling, we utilize `torch.DataLoader` to concurrently speed up sampling.
    The `num_subg_sampler` is the return value of `__len__` in pre-sampling phase. Moreover, the param `batch_size_sampler`
    determines the batch_size of `torch.DataLoader` in internal pre-sampling part. But note that if we wanna pass the
    SAINTSampler to `torch.DataLoader` for concurrently sampling subgraphs in training phase, we need to specify
    `batch_size` of `DataLoader`, that is, `batch_size_sampler` is not related to how sampler works in training procedure.
    """

    def __init__(
        self,
        node_budget,
        dn,
        g,
        train_nid,
        num_workers_sampler,
        num_subg_sampler=10000,
        batch_size_sampler=200,
        online=True,
        num_subg=50,
        full=True,
    ):
        self.g = g.cpu()
        self.node_budget = node_budget
        self.train_g: dgl.graph = g.subgraph(train_nid)
        self.dn, self.num_subg = dn, num_subg
        self.node_counter = torch.zeros((self.train_g.num_nodes(),))
        self.edge_counter = torch.zeros((self.train_g.num_edges(),))
        self.prob = None
        self.num_subg_sampler = num_subg_sampler
        self.batch_size_sampler = batch_size_sampler
        self.num_workers_sampler = num_workers_sampler
        self.train = False
        self.online = online
        self.full = full

        assert (
            self.num_subg_sampler >= self.batch_size_sampler
        ), "num_subg_sampler should be greater than batch_size_sampler"
        graph_fn, norm_fn = self.__generate_fn__()

        if os.path.exists(graph_fn):
            self.subgraphs = np.load(graph_fn, allow_pickle=True)
            aggr_norm, loss_norm = np.load(norm_fn, allow_pickle=True)
        else:
            os.makedirs("./subgraphs/", exist_ok=True)

            self.subgraphs = []
            self.N, sampled_nodes = 0, 0
            # N: the number of pre-sampled subgraphs

            # Employ parallelism to speed up the sampling procedure
            loader = DataLoader(
                self,
                batch_size=self.batch_size_sampler,
                shuffle=True,
                num_workers=self.num_workers_sampler,
                collate_fn=self.__collate_fn__,
                drop_last=False,
            )

            t = time.perf_counter()
            for num_nodes, subgraphs_nids, subgraphs_eids in loader:

                self.subgraphs.extend(subgraphs_nids)
                sampled_nodes += num_nodes

                _subgraphs, _node_counts = np.unique(
                    np.concatenate(subgraphs_nids), return_counts=True
                )
                sampled_nodes_idx = torch.from_numpy(_subgraphs)
                _node_counts = torch.from_numpy(_node_counts)
                self.node_counter[sampled_nodes_idx] += _node_counts

                _subgraphs_eids, _edge_counts = np.unique(
                    np.concatenate(subgraphs_eids), return_counts=True
                )
                sampled_edges_idx = torch.from_numpy(_subgraphs_eids)
                _edge_counts = torch.from_numpy(_edge_counts)
                self.edge_counter[sampled_edges_idx] += _edge_counts

                self.N += len(subgraphs_nids)  # number of subgraphs
                if sampled_nodes > self.train_g.num_nodes() * num_subg:
                    break

            print(f"Sampling time: [{time.perf_counter() - t:.2f}s]")
            np.save(graph_fn, self.subgraphs)

            t = time.perf_counter()
            aggr_norm, loss_norm = self.__compute_norm__()
            print(f"Normalization time: [{time.perf_counter() - t:.2f}s]")
            np.save(norm_fn, (aggr_norm, loss_norm))

        self.train_g.ndata["l_n"] = torch.Tensor(loss_norm)
        self.train_g.edata["w"] = torch.Tensor(aggr_norm)
        self.__compute_degree_norm()  # basically normalizing adjacent matrix

        random.shuffle(self.subgraphs)
        self.__clear__()
        print("The number of subgraphs is: ", len(self.subgraphs))

        self.train = True

    def __len__(self):
        if self.train is False:
            return self.num_subg_sampler
        else:
            if self.full:
                return len(self.subgraphs)
            else:
                return math.ceil(self.train_g.num_nodes() / self.node_budget)

    def __getitem__(self, idx):
        # Only when sampling subgraphs in training procedure and need to utilize sampled subgraphs and we still
        # have sampled subgraphs we can fetch a subgraph from sampled subgraphs
        if self.train:
            if self.online:
                subgraph = self.__sample__()
                return dgl.node_subgraph(self.train_g, subgraph)
            else:
                return dgl.node_subgraph(self.train_g, self.subgraphs[idx])
        else:
            subgraph_nids = self.__sample__()
            num_nodes = len(subgraph_nids)
            subgraph_eids = dgl.node_subgraph(
                self.train_g, subgraph_nids
            ).edata[dgl.EID]
            return num_nodes, subgraph_nids, subgraph_eids

    def __collate_fn__(self, batch):
        if (
            self.train
        ):  # sample only one graph each epoch, batch_size in training phase in 1
            return batch[0]
        else:
            sum_num_nodes = 0
            subgraphs_nids_list = []
            subgraphs_eids_list = []
            for num_nodes, subgraph_nids, subgraph_eids in batch:
                sum_num_nodes += num_nodes
                subgraphs_nids_list.append(subgraph_nids)
                subgraphs_eids_list.append(subgraph_eids)
            return sum_num_nodes, subgraphs_nids_list, subgraphs_eids_list

    def __clear__(self):
        self.prob = None
        self.node_counter = None
        self.edge_counter = None
        self.g = None

    def __generate_fn__(self):
        raise NotImplementedError

    def __compute_norm__(self):

        self.node_counter[self.node_counter == 0] = 1
        self.edge_counter[self.edge_counter == 0] = 1

        loss_norm = self.N / self.node_counter / self.train_g.num_nodes()

        self.train_g.ndata["n_c"] = self.node_counter
        self.train_g.edata["e_c"] = self.edge_counter
        self.train_g.apply_edges(fn.v_div_e("n_c", "e_c", "a_n"))
        aggr_norm = self.train_g.edata.pop("a_n")

        self.train_g.ndata.pop("n_c")
        self.train_g.edata.pop("e_c")

        return aggr_norm.numpy(), loss_norm.numpy()

    def __compute_degree_norm(self):

        self.train_g.ndata[
            "train_D_norm"
        ] = 1.0 / self.train_g.in_degrees().float().clamp(min=1).unsqueeze(1)
        self.g.ndata["full_D_norm"] = 1.0 / self.g.in_degrees().float().clamp(
            min=1
        ).unsqueeze(1)

    def __sample__(self):
        raise NotImplementedError


class SAINTNodeSampler(SAINTSampler):
    """
    Description
    -----------
    GraphSAINT with node sampler.
    Parameters
    ----------
    node_budget : int
        the expected number of nodes in each subgraph, which is specifically explained in the paper.
    """

    def __init__(self, node_budget, **kwargs):
        self.node_budget = node_budget
        super(SAINTNodeSampler, self).__init__(
            node_budget=node_budget, **kwargs
        )

    def __generate_fn__(self):
        graph_fn = os.path.join(
            "./subgraphs/{}_Node_{}_{}.npy".format(
                self.dn, self.node_budget, self.num_subg
            )
        )
        norm_fn = os.path.join(
            "./subgraphs/{}_Node_{}_{}_norm.npy".format(
                self.dn, self.node_budget, self.num_subg
            )
        )
        return graph_fn, norm_fn

    def __sample__(self):
        if self.prob is None:
            self.prob = self.train_g.in_degrees().float().clamp(min=1)

        sampled_nodes = torch.multinomial(
            self.prob, num_samples=self.node_budget, replacement=True
        ).unique()
        return sampled_nodes.numpy()


class SAINTEdgeSampler(SAINTSampler):
    """
    Description
    -----------
    GraphSAINT with edge sampler.
    Parameters
    ----------
    edge_budget : int
        the expected number of edges in each subgraph, which is specifically explained in the paper.
    """

    def __init__(self, edge_budget, **kwargs):
        self.edge_budget = edge_budget
        self.rng = np.random.default_rng()

        super(SAINTEdgeSampler, self).__init__(
            node_budget=edge_budget * 2, **kwargs
        )

    def __generate_fn__(self):
        graph_fn = os.path.join(
            "./subgraphs/{}_Edge_{}_{}.npy".format(
                self.dn, self.edge_budget, self.num_subg
            )
        )
        norm_fn = os.path.join(
            "./subgraphs/{}_Edge_{}_{}_norm.npy".format(
                self.dn, self.edge_budget, self.num_subg
            )
        )
        return graph_fn, norm_fn

    # TODO: only sample half edges, then add another half edges
    # TODO: use numpy to implement cython sampling method
    def __sample__(self):
        if self.prob is None:
            src, dst = self.train_g.edges()
            src_degrees, dst_degrees = self.train_g.in_degrees(
                src
            ).float().clamp(min=1), self.train_g.in_degrees(dst).float().clamp(
                min=1
            )
            prob_mat = 1.0 / src_degrees + 1.0 / dst_degrees
            prob_mat = sp.csr_matrix(
                (prob_mat.numpy(), (src.numpy(), dst.numpy()))
            )
            # The edge probability here only contains that of edges in upper triangle adjacency matrix
            # Because we assume the graph is undirected, that is, the adjacency matrix is symmetric. We only need
            # to consider half of edges in the graph.
            self.prob = torch.tensor(sp.triu(prob_mat).data)
            self.prob /= self.prob.sum()
            self.adj_nodes = np.stack(prob_mat.nonzero(), axis=1)

        sampled_edges = np.unique(
            dgl.random.choice(
                len(self.prob),
                size=self.edge_budget,
                prob=self.prob,
                replace=False,
            )
        )
        sampled_nodes = np.unique(
            self.adj_nodes[sampled_edges].flatten()
        ).astype("long")
        return sampled_nodes


class GraphSAINTWeightedNodeSampler(GraphSAINTSampler):
    def __init__(self, data, 
                 batch_size: int, num_steps: int = 1, 
                 sample_coverage: int = 0, save_dir: Optional[str] = None, 
                 log: bool = True, **kwargs):
        self.prob = None
        super().__init__(data, batch_size, num_steps, sample_coverage, save_dir, log, **kwargs)

    def __getitem__(self, idx):
        first = True
        while first or adj.nnz() == 0:
            node_idx = self.__sample_nodes__(self.__batch_size__).unique()
            adj, _ = self.adj.saint_subgraph(node_idx)
            first = False
        return node_idx, adj

    def __sample_nodes__(self, batch_size):
        if self.prob is None:
            deg_pow = self.adj.storage.rowcount() ** 2
            self.prob = deg_pow / deg_pow.sum()

        
        sampled_nodes = torch.multinomial(
            self.prob, num_samples=batch_size, replacement=True
        ).unique()
        
        return sampled_nodes

class GraphSAINTEdgeSamplerM(GraphSAINTSampler):
    r"""The GraphSAINT edge sampler class (see
    :class:`~torch_geometric.loader.GraphSAINTSampler`).
    """
    def __sample_nodes__(self, batch_size):
        row, col, _ = self.adj.coo()

        deg_in = 1. / self.adj.storage.colcount()
        deg_out = 1. / self.adj.storage.rowcount()
        prob = (1. / deg_in[row]) + (1. / deg_out[col])

        # Parallel multinomial sampling (without replacement)
        # https://github.com/pytorch/pytorch/issues/11931#issuecomment-625882503
        # nonsence if original graph's self.E is too large
        
        edge_sample = torch.unique(dgl.random.choice(self.E, batch_size, replace=False, prob=prob))
                
        source_node_sample = col[edge_sample]
        target_node_sample = row[edge_sample]

        return torch.cat([source_node_sample, target_node_sample], -1)

"""
实验目标：尝试不同工作的采样算法在不同数据集上生成的子图是否有独立的特点
 - 多个数据集的接入
 - 不同采样算法的dataloader的接入
 - 采样结果导出为矩阵格式，profile
"""

def FastGCN_sampler(data, batch_size, layer_sizes = [128, 128]):
    sp_mat = data2scipy(data)
    sampler = Sampler_FastGCN(None, torch.ones(data.num_nodes), sp_mat, layer_sizes=layer_sizes, 
                              device=torch.cpu)
    
    sub_adjs = []
    for i, (batch, train_mask) in enumerate(batch_gen(torch.ones(sp_mat.shape[0]), batch_size, shuffle=True)):
        sub_feat, sub_adj, _ = sampler.sampling(batch)
        sub_adjs += sub_adj
        if i == 20:
            break

    return sub_adjs


def ASGCN_sampler():
    pass
    

def Sage_sampler(data, batch_size=512, layer_sizes=[25, 10]):
    sample_loader = LinkNeighborLoader(data, 
                                       num_neighbors=layer_sizes,
                                       batch_size=batch_size,
                                       shuffle=True)
    sub_edge_lists = []
    
    for i, batch in enumerate(sample_loader):
        sub_edge_lists.append(CSRGraph(batch.edge_index))
        if i == 20:
            break

    return sub_edge_lists


def VRGCN_sampler(data, batch_size=1000, fanout=[1,1]):
    g = dgl.graph((data.edge_index[1], data.edge_index[0]), num_nodes=data.num_nodes)

    train_nid = np.arange(data.num_nodes) # full sampling
    # train_nid = data.train_mask.nonzero().squeeze() # train set sampling

    sampler = VRGCNSampler(g, fanout)

    dataloader = DataLoader(
        dataset=train_nid,
        batch_size=batch_size,
        collate_fn=sampler.sample_blocks,
        shuffle=True,
        drop_last=False
    )
    
    sampled_graph = []
    hist_graph = []
    # forward 过程中只用到block，histblock暂时没有用到
    for i, (blocks, hist_blocks) in enumerate(dataloader):
        sampled_graph += dglgraph2custom(blocks, "CSR")
        hist_graph += dglgraph2custom(hist_blocks, "CSR")
        if i == 20:
            break

    return sampled_graph+hist_graph

def ClusterGCN_sampler(data, num_parts, sdir):
    cluster_data = ClusterData(data, num_parts=num_parts, recursive=False, save_dir=sdir)
    cloader = ClusterLoader(cluster_data, batch_size=1, shuffle=True, num_workers=1)
    sub_graph = []
    for batch in cloader:
        sub_graph.append(CSRGraph(batch.edge_index))
    
    return sub_graph

def GraphSAINT_V_sampler_dgl(data,
                         dn,
                         node_budget=500,
                         num_workers=8):

    g = dgl.graph((data.edge_index[1], data.edge_index[0]), num_nodes=data.num_nodes)
    # train_nid = data.train_mask.nonzero().squeeze()
    # 有的数据集是semi-supervise的（如Planetoid）发表版的数据划分训练集很小
    train_nid = np.arange(data.num_nodes)
    saint_sampler = SAINTNodeSampler(node_budget,
                                dn=dn,
                                g=g,
                                train_nid=train_nid,
                                num_workers_sampler=0,
                                batch_size_sampler=200)
    loader = DataLoader(
        saint_sampler,
        collate_fn=saint_sampler.__collate_fn__,
        batch_size=1,
        shuffle=True,
        num_workers=num_workers,
        drop_last=False,
    )

    res = []
    for subg in loader:
        res += dglgraph2custom(subg, "CSR")

    return res

def GraphSAINT_V_sampler(data, batch_size, save_dir, num_steps=20, sample_coverage=0):
    sampler_loader = GraphSAINTWeightedNodeSampler(data,
                                           batch_size=batch_size,
                                           num_steps=num_steps,
                                           sample_coverage=sample_coverage,
                                           save_dir=save_dir,
                                           num_workers=4)
    
    sub_graph = []
    for batch in sampler_loader:
        sub_graph.append(CSRGraph(batch.edge_index))

    return sub_graph

def GraphSAINT_E_sampler(data, batch_size, save_dir, num_steps=20, sample_coverage=0):
    sampler_loader = GraphSAINTEdgeSamplerM(data,
                                           batch_size=batch_size,
                                           num_steps=num_steps,
                                           sample_coverage=sample_coverage,
                                           save_dir=save_dir,
                                           num_workers=4)
    
    sub_graph = []
    for batch in sampler_loader:
        sub_graph.append(CSRGraph(batch.edge_index))

    return sub_graph

def GraphSAINT_RW_sampler(data, batch_budget, save_dir, num_steps=20, sample_coverage=0):
    sampler_loader = GraphSAINTRandomWalkSampler(data, 
                                                 batch_size=batch_budget[0], 
                                                 walk_length=batch_budget[1], 
                                                 num_steps=num_steps,
                                                 sample_coverage=sample_coverage,
                                                 save_dir=save_dir, 
                                                 num_workers=4)
    sub_graph = []
    for batch in sampler_loader:
        sub_graph.append(CSRGraph(batch.edge_index))
    
    return sub_graph



parser=argparse.ArgumentParser()
parser.add_argument('--dataset', default='Planetoid.PubMed', help='dataset to use.')
parser.add_argument('--scale', help='sample size to original')

def save_freq(fig, pic_path, pic_name):
    if not osp.exists(pic_path):
        os.mkdir(pic_path)
        
    fig.savefig(osp.join(pic_path, f"{pic_name}.png"))
    

def benchmark4SAINT(method):
    config = {
        "V" : [6000, 8000, 8000, 5000, 4500],
        "E" : [4000, 60000, 6000, 2500, 2000],
        "RW" : [ (3000, 2), (6000, 2), (2000,4), (1250,2), (1500,2) ]
    }

    Data = ["PPI", "Flickr", "Reddit", "Yelp", "AmazonProducts"]
    fn = eval(f"GraphSAINT_{method}_sampler")

    for i, sname in enumerate(Data):
        dataset = warp_dataset(sname)
        if len(dataset) > 1:
            data = Batch.from_data_list(dataset)
        else:
            data = dataset[0]
        res = []
        res.append(CSRGraph(data.edge_index))
        
        res += fn(data, config[method][i], dataset.processed_dir)

        x_max = y_max = 0.
        fig, axes = plt.subplots(ncols=2)
        fig.set_size_inches(10,4)
        for i, g in enumerate(res):
            hist, var = g.histogram()
            hist = hist[hist.nonzero()]
            hist.sort()
            GI = compute_GI(hist)
            Hen = compute_Hen(hist, g.nedge)

            if i == 0:
                x, y = plot_freq(axes[0], hist)
                print(f"| {sname} | N | Navg | Deg_var | GI | Hen |\n"
                        "|:-------:|:--:|:---:|:-------:|:--:|:---:|\n" 
                        f"|original| {g.nvertex} | {g.avg_deg:.4f} | {var:.4f} | {GI:.4f} | {Hen:.4f} |")
            else:
                x, y = plot_freq(axes[1], hist)
                x_max = max(x, x_max)
                y_max = max(y, y_max)
                print(f"|sample {i} | {hist.size} | {(g.nedge / hist.size):.4f} | {var:.4f}"
                        f"| {GI:.4f} | {Hen:.4f} |")
        
        save_freq(fig, f"./workspace/pics/GraphSAINT_{method}_sampler", sname)
        print(f"\n![](./pics/GraphSAINT_{method}_sampler/{sname}.png)\n")


def benchmark(fn_name):
    MaskedData = ["PPI", "Reddit", "AmazonProducts", "Flickr", "FacebookPagePage"]
    MaskedData += ["Amazon."+x for x in os.listdir("data/Amazon")]
    MaskedData += ["GNNAdvData."+x for x in os.listdir("data/GNNAdvData/processed")]
    MaskedData += ["Twitch."+x for x in os.listdir("data/Twitch")]
    MaskedData += ["Planetoid."+x for x in os.listdir("data/Planetoid")]
    MaskedData += ["SNAPDataset."+x for x in os.listdir("data/SNAPDataset")]
    MaskedData += ["Coauthor."+x for x in os.listdir("data/Coauthor")]
    
    fn = eval(fn_name)
    for sname in MaskedData:
        dataset = warp_dataset(sname)
        if len(dataset) > 1:
            data = Batch.from_data_list(dataset)
        else:
            data = dataset[0]
        res = []
        res.append(CSRGraph(data.edge_index))
        
        if fn_name.find("Cluster") != -1:
            res += fn(data, 20, dataset.processed_dir)
        elif fn_name.find("FastGCN") != -1:
            res += fn(data, min(1024, data.num_nodes//20), [200, 200])
        elif fn_name.find("Sage") != -1:
            res += fn(data, min(512, data.num_nodes//20), [25, 10]) # offical sampling param
        elif fn_name.find("VRGCN") != -1:
            res += fn(data, min(1000, data.num_nodes//20), [2, 2]) 

        x_max = y_max = 0.
        fig, axes = plt.subplots(ncols=2)
        fig.set_size_inches(10, 4)
        for i, g in enumerate(res):
            hist, var = g.histogram()
            hist = hist[hist.nonzero()]
            hist.sort()
            GI = compute_GI(hist)
            Hen = compute_Hen(hist, g.nedge)

            if i == 0:
                plot_freq(axes[0], hist)
                print(f"| {sname} | N | Navg | Deg_var | GI | Hen |\n"
                        "|:-------:|:--:|:---:|:-------:|:--:|:---:|\n" 
                        f"|original| {g.nvertex} | {g.avg_deg:.4f} | {var:.4f} | {GI:.4f} | {Hen:.4f} |")
            else:
                x, y = plot_freq(axes[1], hist)
                x_max = max(x, x_max)
                y_max = max(y, y_max)
                print(f"|sample {i} | {hist.size} | {(g.nedge / hist.size):.4f} | {var:.4f}"
                        f"| {GI:.4f} | {Hen:.4f} |")
        
        # hist, var = CSRGraph(data.edge_index).histogram()
        # x, y = plot_freq(hist)
        # x_max = max(x, x_max)
        # y_max = max(y, y_max)

        save_freq(fig, f"./workspace/pics/{fn_name}", sname)
        print(f"\n![](./pics/{fn_name}/{sname}.png)\n")

def draw_sampler_on_one_fig(subs):
    Samplers = [
        "ClusterGCN_sampler(data, 30, dataset.processed_dir)",
        "FastGCN_sampler(data, min(1024, data.num_nodes//20), [100, 100])",
        "Sage_sampler(data, min(256, data.num_nodes//20), [25, 10])",
        "VRGCN_sampler(data, min(1000, data.num_nodes//20), [2, 2]) ",
        "GraphSAINT_V_sampler(data, min(2000, data.num_nodes//30), dataset.processed_dir)",
        "GraphSAINT_E_sampler(data, min(2000, data.num_nodes//30), dataset.processed_dir)",
        "GraphSAINT_RW_sampler(data, (min(2000, data.num_nodes//30),2), dataset.processed_dir)"]

    DataName = ["PPI", "Flickr", "Reddit", "Yelp", "AmazonProducts"]
    for sname in DataName:
        fig, axes = plt.subplots(nrows=len(Samplers), ncols=subs, layout="tight")
        fig.set_size_inches([20,15])
        for i, call_str in enumerate(Samplers):
            dataset = warp_dataset(sname)
            if len(dataset) > 1:
                data = Batch.from_data_list(dataset)
            else:
                data = dataset[0]
            # res = []
            # res.append(CSRGraph(data.edge_index))

            all_samples = eval(call_str)
            sampler_name = call_str[:call_str.find("(")]
            for idx, g in enumerate(all_samples):
                # if g.nedge < 10000 or g.nvertex < 1000:
                g.toMtx(f"./workspace/samples/{sname}-{sampler_name}-s{idx}.mtx", True)
            samples = []
            for s in random.sample(range(len(all_samples)), subs):
                samples.append(all_samples[s])

            axes[i][0].set_ylabel(sampler_name)
            for j, g in enumerate(samples):
                ax = axes[i][j]
                hist, _ = g.histogram()
                plot_freq(ax, hist)

        fig.suptitle(f"{sname} sampling")
        save_freq(fig, f"./workspace/pics/", f"{sname}_all")

def benchmark_samples_on_mformats(feature_size):
    dir_path = './workspace/samples'
    for mtx_name in os.listdir(dir_path):
        cmds = ["./build/torch_grad_test/mat_benchmark", 
                osp.join(dir_path,mtx_name), "1", str(feature_size), "50"]
        try:
            out = subp.check_output(cmds)
        except subp.CalledProcessError:
            print(f'benchmarking {mtx_name} failed', file=sys.stderr)
            continue

        records = str(out, encoding='utf-8').strip().split("\n")[-1].split()
        print(f"|{mtx_name}-{feature_size}|", end="")
        for i in range(5):
            print(f"{records[i]}|", end="")
        print(flush=True)
        

def test():
    args = parser.parse_args()
    dataset = warp_dataset(args.dataset)
    res = []
    if len(dataset) > 1:
        data = Batch.from_data_list(dataset)
    else:
        data = dataset[0]
    # res += GraphSAINT_E_sampler(data, 500, dataset.processed_dir)
    res += Sage_sampler(data, batch_size=128)
    
    x_max = 0
    y_max = 0
    cols = math.ceil(math.sqrt(len(res)))
    rows = math.ceil(len(res) / cols)
    fig, axes = plt.subplots(nrows=rows, ncols=cols, figsize=(30,20))
    axes = chain(*[ax for ax in axes])
    for i, (ax, g) in enumerate(zip(axes, res)):
        hist, var = g.histogram()
        hist = hist[hist.nonzero()]
        hist.sort()
        GI = compute_GI(hist)
        Hen = compute_Hen(hist, g.nedge)
        x, y = plot_freq(ax, hist)
        x_max = max(x, x_max)
        y_max = max(y, y_max)
        print(f"({hist.size}, {g.nedge / hist.size}, {var}, {GI}, {Hen})")

    save_freq(fig, ".", args.dataset)

if __name__ == "__main__":
    # for sampler in [
    #     "FastGCN_sampler",
    #     # "ClusterGCN_sampler",
    #     # "VRGCN_sampler",
    #     # "Sage_sampler", 
    # ]:
    #     print(f"# {sampler}\n")
    #     benchmark(sampler)

    # for sampler in [
    #     "V", 
    #     "E",
    #     "RW", 
    # ]:
    #     print(f"# GraphSAINT_{sampler}_sampler")
    #     benchmark4SAINT(sampler)
    
    test()
    # draw_sampler_on_one_fig(7)
    # i = 8
    # print("|sample name|coo|csr|bsr(2)|bsr(4)|bsr(8)|\n",
    #     "|:---------:|:--:|:--:|:--:|:--:|:--:|")
    # while i < 512:
    #     benchmark_samples_on_mformats(i)
    #     i *= 2

        