import inspect
import os
import os.path as osp
from typing import Callable, Optional, List, Tuple

import torch
import node2vec as n2v
import networkx as nx
import numpy as np
from torch_geometric.data.in_memory_dataset import InMemoryDataset
from torch_geometric.data import Data, Batch

from torch_geometric.datasets import *
import dgl
from dgl.data.rdf import *
from ogb.nodeproppred import PygNodePropPredDataset, DglNodePropPredDataset
from ogb.linkproppred import PygLinkPropPredDataset, DglLinkPropPredDataset

class CustomDataset(InMemoryDataset):
   def _generate_label(self, data):
      # traditional one-hot representation
      label_type = self.label_type
      cls_num = self.cls_num
      nvertex = self.nvertex

      if label_type == 'single-class':
         data.y = torch.randint(0, cls_num, (nvertex,))
      # multi-hot label for multiclassification tasks
      elif label_type == 'multi-class':
         data.y = (torch.randn((nvertex, cls_num)).abs() > 1).to(dtype=torch.float32)
      # regression job, generate `cls_num` dimension of label 
      elif label_type == 'regression':
         data.y = torch.randn((nvertex, cls_num))
      else:
         print(f'unsupported label strategy: {cls_num}')
         raise ValueError
      
      data.num_classes = cls_num


   def _generate_feature(self, data, method:str):
      nvertex = self.nvertex
      fet_dim = self.fet_dim
      # build a networkx graph to feed the nxg
      nxg = nx.Graph()
      nxg.add_nodes_from(range(nvertex))
      el = data.edge_index.t().tolist()
      nxg.add_edges_from([tuple(e) for e in el])

      def __calc_embed():
         session = n2v.Node2Vec(nxg, dimensions=fet_dim, num_walks=20, workers=20)
         model = session.fit(window=10, min_count=1, batch_words=4)
         return model.wv

      if method == 'node2vec':
         data.x = __calc_embed()
      else:
         data.x = torch.randn(nvertex, fet_dim)
      # data.x = torch.randn((nvertex, fet_dim)) # debug dummy feature gen
      assert isinstance(data, Data)
      # data.num_features = fet_dim


   def _random_split(self, data):
      nvertex = self.nvertex
      if self.set_config is None:
         config = [0.7, 0.2, 0.1]
      else:
         config = self.set_config

      if 'train_mask' not in dir(data):
         if config == [1., 1., 1.]:
            data.train_mask = torch.ones(nvertex, dtype=torch.bool)
            data.val_mask = torch.ones(nvertex, dtype=torch.bool)
            data.test_mask = torch.ones(nvertex, dtype=torch.bool)
            return
         else:
            data.train_mask = torch.zeros(nvertex, dtype=torch.bool)
            data.val_mask = torch.zeros(nvertex, dtype=torch.bool)
            data.test_mask = torch.zeros(nvertex, dtype=torch.bool)
      else:
         print("dataset already contains data splits, skip split generation")
         return
      

      config = [int(config[0] * nvertex), int(sum(config[:-1]) * nvertex)]
      seq = torch.randperm(nvertex)
      data.train_mask[seq[:config[0]]] = True
      data.val_mask[seq[config[0]:config[1]]] = True
      data.test_mask[seq[config[1]:]] = True

   def _add_self_loop(self, data):
      nvertex = self.nvertex
      self_loop = []
      appeared = torch.unique(data.edge_index, sorted=True).tolist()
      for n in range(nvertex):
         if n not in appeared:
            self_loop.append(n)
      
      self_loop = torch.tensor([self_loop, self_loop], dtype=torch.int32)
      data.edge_index = torch.cat((data.edge_index, self_loop), dim=1)

      
   def __init__(self, root: str, 
                fet_dim: Optional[int], 
                cls_num: Optional[int], 
                label_type: Optional[str],
                set_config: Optional[List[int]] = None, 
                transform: Optional[Callable] = None, 
                pre_transform: Optional[Callable] = None, 
                pre_filter: Optional[Callable] = None):
      
      self.fet_dim = fet_dim
      self.cls_num = cls_num
      self.label_type = label_type
      self.set_config = set_config
      super().__init__(root, transform, pre_transform, pre_filter)
   

class DGLDataset(CustomDataset):
   def __init__(self, dgl_graph, 
                transform: Optional[Callable] = None, 
                pre_transform: Optional[Callable] = None, 
                pre_filter: Optional[Callable] = None):
      
      super().__init__(".", transform, pre_transform, pre_filter)
      
      if not dgl_graph.is_homogeneous:
         self._g = dgl.to_homogeneous(dgl_graph)
         self.data = Data(edge_index=torch.stack(self._g.edges()))
         self.data.ntype_name = list(dgl_graph.ndata['_ID'].keys())
         self.data.etype_name = list(dgl_graph.edata['_ID'].keys())
         self.data.edge_types = self._g.edata['_TYPE']
         self.data.node_types = self._g.ndata['_TYPE']
      else:
         self._g = dgl_graph
         self.data = Data(edge_index=torch.stack(self._g.edges()))

      self.slices = {}
      for k in self.data.keys:
         if k[0] == 'n':
            self.slices[k] = torch.tensor([0, self.data.num_nodes])
         if k[0] == 'e':
            self.slices[k] = torch.tensor([0, self.data.num_edges])
         

def pygOgbWrapper(sname,
                  transform: Optional[Callable] = None, 
                  pre_transform: Optional[Callable] = None):
   set_name = sname[:sname.find('-')]

   if set_name == 'ogbn':
      dataset = PygNodePropPredDataset(sname, 'data', transform, pre_transform)
      splits = dataset.get_idx_split()

   elif set_name == 'ogbl':
      dataset = PygLinkPropPredDataset(sname, 'data', transform, pre_transform)
      splits = dataset.get_edge_split()
   
   for k, v in splits.items():
      setattr(dataset[0], f"{k}_idx", v)
   return dataset

def dglOgbWrapper(sname):
   set_name = sname[:sname.find('-')]
   if set_name == 'ogbn':
      dataset = DglNodePropPredDataset(sname, 'data')
   elif set_name == 'ogbl':
      dataset = DglLinkPropPredDataset(sname, 'data')
   return [dataset]


class DglRdfDataset(DGLDataset):
   def __init__(self, sname,
                transform: Optional[Callable] = None, 
                pre_transform: Optional[Callable] = None, 
                pre_filter: Optional[Callable] = None):
      # different dataset name between old & new version of dgl
      dataset = eval(sname+"Dataset()")
      super().__init__(dataset[0], transform, pre_transform, pre_filter)

   
def toSeastar(data, dir_path):
   if data.__module__.find('ogb') >= 0 and \
         type(data).__name__.lower().find('dgl') >= 0:# hetero-graph
      os.system(f"cp -r {data.root}/* {dir_path}")
   else:         
      np.save(osp.join(dir_path, "edges.npy"), data.edge_index)
      # np.save(osp.join(dir_path, "features.npy"), data.x)
      # np.save(osp.join(dir_path, "labels.npy"), data.y)
      # np.save(osp.join(dir_path, "train_mask.npy"), data.train_mask)

def toDGLDataset(data, dir_path):
   if hasattr(data, '_g'): # converted dgl dataset
      g = [data._g]
   elif hasattr(data, 'graph'): # original dgl dataset
      g = data.graph
   else: # build DGLGraph inplace
      g = dgl.graph(
         (data.edge_index[1], data.edge_index[0]), 
         num_nodes=data.num_nodes)
      dgl.add_self_loop(g)
      # if data.edge_attr is not None:
      #    g.edata['attr'] = data.edge_attr
      # if data.x is not None:
      #    g.ndata['x'] = data.x
      # g.ndata['y'] = data.y
      # g.ndata['train_mask'] = data.train_mask
      # g.ndata['test_mask'] = data.test_mask
      # g.ndata['val_mask'] = data.val_mask
      g = [g]

   dgl.save_graphs(osp.join(dir_path, 'graphs.bin'), g)


class MtxDataset(CustomDataset):

   @property
   def processed_dir(self):
      return osp.join(self.root, 'processed', self.name)

   @property
   def processed_file_names(self):
      return f"{self.name}.pt"

   @property
   def raw_file_names(self):
      return f"{self.name}.mtx"
   
   def __init__(self, root, name, 
                fet_dim: Optional[int] = None, 
                cls_num: Optional[int] = None, 
                label_type = "single-class",
                with_header: Optional[bool] = False, with_weight: Optional[bool] = False, 
                set_config: Optional[List[int]] = None,
                transform: Optional[Callable] = None, 
                pre_transform: Optional[Callable] = None):
      
      self.name = name
      self.with_header = with_header
      self.with_weight = with_weight
      super().__init__(root, fet_dim, cls_num, label_type, set_config,
                       transform, pre_transform)
      self.data, self.slices = torch.load(self.processed_paths[0])

   def process(self):
      edges = []
      val = []

      vmax = 0
      vmin = 1<<64
      with open(self.raw_paths[0]) as f:
         line = f.readline()
         while line.strip()[0] == '%':
            line = f.readline() 
         if self.with_header:
            line = f.readline()
         while line:
            edge = line.split()
            s = int(edge[0])
            d = int(edge[1])
            
            vmin = min(vmin, s, d)
            vmax = max(vmax, s, d)
            
            edges.append((s,d))
            
            if len(edge) > 2:
               val.append(float(edge[2]))

            line = f.readline()
            
         f.close()

      edges = set(edges)
      src = [x[0]-vmin for x in edges]
      dst = [x[1]-vmin for x in edges]

      data = Data(edge_index=torch.tensor([src,dst]))

      if self.with_weight:
         if len(val) == len(src):
            data.edge_attr = torch.tensor(val)
         else:
            data.edge_attr = torch.randn(len(src))

      self.nvertex = vmax + 1
      self.nedge = len(src)

      data.num_nodes = self.nvertex
      data.num_edges = self.nedge
      if self.fet_dim is not None:
         self._generate_feature(data, 'random')
      if self.cls_num is not None:
         self._generate_label(data)
      # self._random_split(data)
      self._add_self_loop(data)

      data = data if self.pre_transform is None else self.pre_transform(data)
      torch.save(self.collate([data]), self.processed_paths[0])
      

class DebugWrapperData(CustomDataset):
   def __init__(self, edge_list : torch.Tensor, weight_list : Optional[torch.Tensor] = None, feat_dim = None, cls_num = None):
      super(DebugWrapperData, self).__init__(edge_list, 
                                             edge_list.size(1), 
                                             len(set(edge_list.view(-1).tolist())), 
                                             weight_list)
      
      feature_key = "dummy_feature"
      self._generate_feature(feat_dim, feature_key)


class DataFiller(CustomDataset):
   def __init__(self, data: Data,
                fet_dim: int, 
                cls_num: int,
                set_config: Optional[List[float]] = None,
                gen_method: str = 'random'):
      self.do_fet = (not 'x' in dir(data) or data.x is None)
      self.do_cls = (not 'y' in dir(data) or data.y is None)
      self.do_split = (not 'train_mask' in dir(data) or data.train_mask is None)
      self.method  = gen_method
      self.nvertex = data.num_nodes
      self.nedge   = data.num_edges
      super().__init__("", fet_dim, cls_num, "single-class", set_config)

   def fill(self, data):
      if self.do_fet:
         self._generate_feature(data, self.method)
      if self.do_cls:
         self._generate_label(data)
      if self.do_split:
         self._random_split(data)
      

class GNNAdvData(CustomDataset):
   def __init__(self, root, name,
                fet_dim: Optional[int] = None, 
                cls_num: Optional[int] = None, 
                label_type = "single-class",
                with_weight: Optional[bool] = False, 
                set_config: Optional[List[int]] = None,
                transform: Optional[Callable] = None, 
                pre_transform: Optional[Callable] = None):
      self.name = name
      self.with_weight = with_weight
      # root = osp.join(root, 'GNNAdv')
      super().__init__(root, fet_dim, cls_num, label_type, set_config,
                       transform, pre_transform)
      self.data, self.slices = torch.load(self.processed_paths[0])
   
   @property
   def processed_dir(self) -> str:
      return osp.join(self.root, 'processed', self.name)

   @property
   def processed_file_names(self):
       return f"{self.name}.pt"

   @property
   def raw_file_names(self):
      return osp.join(self.raw_dir, f"{self.name}.npz")


   def process(self):
      assert not self.fet_dim is None and not self.cls_num is None
      if not self.raw_file_names.endswith('.npz'):
         raise ValueError("graph file must be a .npz file")

      graph_obj = np.load(self.raw_file_names)
      src_li = graph_obj['src_li']
      dst_li = graph_obj['dst_li']
      self.nvertex = int(graph_obj['num_nodes'])
      self.nedge = len(src_li)
      data = Data(edge_index=torch.tensor(np.array([src_li,dst_li])))

      if self.with_weight:
         data.edge_attr = torch.randn(data.edge_index.size(1))

      self._generate_feature(data, 'random')
      self._generate_label(data)
      self._random_split(data)

      data = data if self.pre_transform is None else self.pre_transform(data)
      torch.save(self.collate([data]), self.processed_paths[0])


def _generate_rmat_dataset(node_range: List[str], 
                           sparse_range: List[float], 
                           params_range: List[Tuple[float]]):
   import os
   mtx_root = "/home/limingyi/gnn-workspace/data/Mtx"
   output_file = os.path.join(mtx_root, "raw/{}.mtx")
   parmat_command = "/home/limingyi/PaRMAT/Release/PaRMAT "\
                    "-nVertices {} -nEdges {} -a {} -b {} -c {} "\
                    "-threads 4 -undirected -sorted -noDuplicateEdges -output {}"
   
   def __parse_num(num_str: str):
      while not num_str.isdigit():
         num_str = num_str.replace('k', '000')
      return int(num_str)

   for node in node_range:
      for sparsity in sparse_range:
         for params in params_range:
            node_v = __parse_num(node)
            edge_v = int(node_v * sparsity)
            file_name = f"rmat_v{node}_s{sparsity}_{int(params[0]*100)}_{int(params[1]*100)}_{int(params[2]*100)}"
            abs_path = output_file.format(file_name)
            print(f"running :{parmat_command.format(node_v, edge_v, params[0], params[1], params[2], abs_path)}...")
            os.system(parmat_command.format(node_v, edge_v, params[0], params[1], params[2], abs_path))
            # fill header
            lines = None
            with open(abs_path, "r") as f:
               lines = f.readlines()
               lines = ["%%atrixMarket matrix coordinate pattern general\n",
                        f"{node_v} {node_v} {edge_v}\n"] + lines
               f.close()

            with open(abs_path, "w") as f:
               f.writelines(lines)
               f.close()

            dataset = MtxDataset(mtx_root, file_name, 128, 2, with_header=True, with_weight=True)
            print(dataset[0])

def warp_dataset(sname):
   sname_parts = sname.split('.')
   if sname == 'KarateClub':
      dataset = KarateClub()
   elif sname.find("Mtx") != -1:
      mtx_name = sname.split('.')[1]
      dataset = MtxDataset("data/Mtx", mtx_name, with_header=True, with_weight=False)
   elif sname.find("GNNAdvData") != -1:
      graph_name = sname.split('.')[1]
      dataset = GNNAdvData("data/GNNAdvData", graph_name)
   elif sname in ["AIFB", "MUTAG", "BGS", "AM"]:
      dataset = DglRdfDataset(sname)
   elif sname in ["ogbl-biokg","ogbn-mag"]:
      dataset = dglOgbWrapper(sname)
   elif sname.find("ogb") != -1:
      dataset = pygOgbWrapper(sname)
   elif len(sname_parts) > 1:
   # elif any([sname.find(x) != -1 for x in ["WebKB", "WikipediaNetwork", "Planetoid", "CitationFull", "Coauthor"]]):
      if sname_parts[0] in ["SHREC2016", "TOSCA"]:
         dataset = eval(sname_parts[0]+f"('data/{sname_parts[0]}', partiality='holes', category='{sname_parts[1]}')")
      # elif sname_parts[0] in ['SNAPDataset']:
      elif "homophily" in inspect.getfullargspec(eval(sname_parts[0]+".__init__")).args:
         dataset = eval(sname_parts[0]+"('data/{}', homophily={})".format(sname_parts[0], sname_parts[1]))
      elif "category" in inspect.getfullargspec(eval(sname_parts[0]+".__init__")).args:
         dataset = eval(sname_parts[0]+"('data/{}', category='{}')".format(sname_parts[0], sname_parts[1]))
      elif "name" in inspect.getfullargspec(eval(sname_parts[0]+".__init__")).args:
         dataset = eval(sname_parts[0]+"('data/{}', name='{}')".format(sname_parts[0], sname_parts[1]))
      else:
         raise ValueError(f"Dataset {sname_parts[0]} does not support `name' parameter")
   else:
      dataset = eval(sname+"('data/{}')".format(sname))
   
   return dataset

if __name__ == '__main__':
   # _generate_rmat_dataset(["80k"], [2, 10, 32, 64], [(0.13, 0.12, 0.11)])#, (0.12, 0.37, 0.13), (0.25, 0.25, 0.25)])
   # exit(0)
   import sys
   # d = MtxDataset("GSWITCH/script/dataset/web-uk-2005/web-uk-2005.mtx", with_header=True, feat_dim=100)
   # ds = ["amazon0505", "amazon0601", "artist", "com-amazon", "DD", "OVCAR-8H", "PROTEINS_full", "soc-BlogCatalog", "SW-620H", "TWITTER-Real-Graph-Partial", "Yeast"]
   # fd =   [96, 96, 100, 96, 89, 66, 29, 128, 66, 1323, 74]
   # nc = [22, 22, 12,  22, 2,  2,  2,  39,  2,  2,    2]

   # dss = ["ogbn-arxiv", "ogbn-proteins", "ogbl-biokg","ogbn-mag"]
   # dss = ["ogbl-biokg","ogbn-mag"]
   # dss = ["AIFB", "MUTAG", "BGS", "AM"]
   
   # dss =  ["Mtx." + x[:-4] for x in os.listdir("data/Mtx/raw")]

   # dss = ["Reddit", "AmazonProducts", "Flickr", "FacebookPagePage", "PPI"]
   # dss += ["Amazon."+x for x in os.listdir("data/Amazon")]
   # dss += ["GNNAdvData."+x for x in os.listdir("data/GNNAdvData")]
   # dss += ["Twitch."+x for x in os.listdir("data/Twitch")]
   # dss += ["Planetoid."+x for x in os.listdir("data/Planetoid")]
   # dss += ["SNAPDataset."+x for x in os.listdir("data/SNAPDataset")]
   # dss += ["Coauthor."+x for x in os.listdir("data/Coauthor")]

   dst_frmwk = 'DGLDataset'
   path = "data/Mtx"
   
   # for sname, f, c in zip(ds, fd, ncls):
   dss = [ x[:-4] for x in os.listdir(path+"/raw") if x.find('flat') == 0 ]
   for sname in dss:
      # dataset = GNNAdvData(path, sname)
      dataset = MtxDataset(path, sname, with_header=True, with_weight=False)
      print(dataset[0])
   # exit(0)
   # export data

   dss = [ "Mtx." + x for x in dss]

   if not osp.exists(dst_frmwk + '_data'):
      os.mkdir(dst_frmwk + '_data')

   for i, ds in enumerate(dss):
      dataset = warp_dataset(ds)
      print(f"dataset {ds} done...")
      parts = ds.split('.')
      if len(dataset) > 1:
         dataset = [Batch.from_data_list(dataset)]
         # parts.append(None)
      for i, data in enumerate(dataset):
         dst_dir = dst_frmwk + '_data'
         # if len(dataset) > 1:
         #    parts[-1] = str(i)
         for folder in parts:
            dst_dir = osp.join(dst_dir, folder)
            if not osp.exists(dst_dir):
               os.mkdir(dst_dir)
         
         save_name = ds + f".{i}" if len(dataset) > 1 else ""
         print(f"saving {save_name} to {dst_dir}")
         if isinstance(data, Data):
            DataFiller(data, 256, 3).fill(data)
         to = eval('to'+dst_frmwk)
         to(data, dst_dir)
