import torch
from typing import Callable, Optional
import matplotlib.pyplot as plt
from torch_geometric.data import HeteroData, Batch
import networkx as nx
import numpy as np

from .dataset import warp_dataset
torch.ops.load_library("build/libAgradTest.so")
graph_lib = torch.ops.graphdata

def _count_vertex(edge_list, vmin, vmax):
   """
   this function counts all vertex appeared in the edge list to prune singleton vertices
    - this kind of vertex do exsists in real data (e.g. CiteSeer)
    - excessive additional effort is required for the pruning procedure
    - it's better to build a standalone pruning
   """
   def __popc(n:int):
      ret = 0
      while n > 0:
         ret += n & 0x1
         n >>= 1
      return ret

   def __bits(n : int): 
      return n // 64, 1<<(n % 64)

   eview = edge_list.view(-1)
   num_word = (vmax - vmin + 63) // 64
   # last_word = (1 << ((vmax - vmin) % 64)) -1
   bm = [0x0]*num_word
   for i in range(eview.size(0)):
      nw, mask = __bits(int(eview[i]))
      bm[nw] |= mask
   nb = 0
   for i in bm:
      nb += __popc(i)

   return nb
   

class MetaGraph(object):
   def __init__(self, edge_list: torch.Tensor, edge_idx: Optional[torch.Tensor] = None):
      self.nedge = edge_list.size(1)
      self.vmin = int(edge_list.min())
      self.vmax = int(edge_list.max())
      self.nvertex = edge_list.unique().size(0)
      self.avg_deg = self.nedge / self.nvertex
      # using dgl style graph data storage
      self.ndata = {}
      self.edata = {}

   def memory(self):
      total = 0
      def _tensor_size_in_byte(tn : torch.Tensor):
         return tn.storage().nbytes()
      for attr in self.__dict__.items():
         if isinstance(attr, torch.Tensor):
            total += _tensor_size_in_byte(attr)
      for feat in self.edata.values():
         total += _tensor_size_in_byte(feat)
      for feat in self.ndata.values():
         total += _tensor_size_in_byte(feat)
      print("graph taking up {} GBs in total".format(total*1e-9))

   def to(self, dev_name):
      for name, key in self.__dict__.items():
         if isinstance(key, torch.Tensor):
            self.__dict__[name] = key.to(dev_name)
      for feat in self.ndata.values():
         feat.to(dev_name)
      for feat in self.edata.values():
         feat.to(dev_name)

      return self

   def cuda(self):
      return self.to('cuda')

   def cpu(self):
      return self.to('cpu')

   def edges(self):
      raise NotImplementedError

   def appendNode(self, num_nodes):
      self.nvertex = max(self.nvertex, num_nodes)

   def plot(self, graph_name):
      src = []
      dst = []
      prog = 0
      total = self.nedge
      print()
      # for e in self.edges():
      #     src.append(e[0])
      #     dst.append(e[1])
      #     prog += 1
      #     print("\rprogress ({} / {})\t\t".format(prog,total), end="")
      src, dst = self.edges()
      dpi = 360
      plt.rcParams['savefig.dpi'] = dpi
      plt.rcParams['figure.figsize'] = (10,10)
      plt.scatter(src, dst, marker='.', s=2)
      plt.savefig(graph_name+".png")

   def histogram(self):
      ss, ds = self.edges()
      hist = np.zeros(self.nvertex, dtype=int)

      bar = 0
      for i in range(self.nvertex):
         hist[i] = ss.count(i)
         bar += hist[i]

      bar /= self.nvertex
      var = ((hist-bar)**2).sum() / self.nvertex

      return hist, var

   def toMtx(self, path, with_header: bool = False):
      with open(path, "w") as f:
         # additional message for gespmm-benchmark
         f.write("%%MatrixMarket matrix coordinate pattern general\n")
         if with_header:
            f.write("{} {} {}\n".format(self.nvertex, self.nvertex, self.nedge))
         s, d = self.edges()
         for e in zip(s,d):
            f.write("{} {}\n".format(e[0], e[1]))
         f.close()
   
   def toGraphViz(self, path):
      nxg = nx.Graph()
      nxg.add_nodes_from(range(self.nvertex))
      s, d = self.edges()
      nxg.add_edges_from([e for e in zip(s,d)])
      nx.drawing.nx_agraph.write_dot(nxg, path)


class COOGraph(MetaGraph):
   """
      this is the original format of COO
   """
   def __init__(self, edge_list, edge_idx):
      super(COOGraph, self).__init__(edge_list, edge_idx)

      self.src = edge_list[0,:]
      self.dst = edge_list[1,:]
      self.directed = graph_lib.check_directed(self.src, self.dst)
      if edge_idx is not None:
         self.eids = edge_idx
      else:
         self.eids = torch.arange(self.src.shape[0]).dtype(torch.int32)

   
   def edges(self):
      # for i in range(self.src.size(0)):
      #     yield (int(self.src[i]), int(self.dst[i]))
      return self.src.tolist(), self.dst.tolist()


class GCOOGraph(MetaGraph):
   """
     this is a slightly modified chunked coo
   """
   def __init__(self, edge_list, edge_idx: Optional[torch.Tensor] = None):
      super(GCOOGraph, self).__init__(edge_list, edge_idx)
      self.directed = graph_lib.check_directed(edge_list[0], edge_list[1])

      if edge_list.dtype != torch.int32:
         edge_list = edge_list.to(dtype=torch.int32)

      self.edge_list, self.grp_offset, self.weight_list\
         = torch.ops.graphdata.coo_to_gcoo(edge_list, edge_idx)

   def edges(self):
      return self.edge_list[0,:].tolist(), self.edge_list[1,:].tolist()

      
class CSRGraph(MetaGraph):
   """
      this class not only implements CSR but also CSC
      in fact this class contains both CSR and CSC data
   """

   @DeprecationWarning
   def _scan_deg(self, edge_list):
      nv = self.vmax+1 
      odeg = [0] * nv 
      r_odeg = [0] * nv
      row_offset = [0]
      col_offset = [0]
      for e in edge_list.t():
         v0 = int(e[0]) - self.vmin
         v1 = int(e[1]) - self.vmin
         odeg[v0] += 1
         r_odeg[v1] += 1

      for i in range(1, nv+1):
         # if odeg[i-1] == 0:
            # continue
         row_offset.append(row_offset[i-1] + odeg[i-1])

      for i in range(1, nv+1):
         # if r_odeg[i-1] == 0:
            # continue
         col_offset.append(col_offset[i-1] + r_odeg[i-1])

      # self.odegree = torch.tensor(odeg, dtype=torch.int32)
      # self.idegree = torch.tensor(r_odeg, dtype=torch.int32)

      assert row_offset[nv] == self.nedge and col_offset[nv] == self.nedge
      return row_offset, col_offset, odeg, r_odeg

   @DeprecationWarning
   def _compress_edge(self, edge_list, row_offset, col_offset, edge_data):
      adj_list = [-1] * self.nedge if self.directed else [-1]*(2*self.nedge)
      loffset = [0] * self.nvertex
      e_val = ([0.0] * self.nedge) if edge_data else None

      if self.directed:
         r_adj_list = [-1] * self.nedge
         r_loffset = [0] * self.nvertex
         r_e_val = [0.0] * self.nedge if edge_data else None
      else:
         r_adj_list, r_loffset, r_e_val = None, None, None

      el_view = edge_list.t()
      for ei in range(el_view.size(0)):
         v0 = int(el_view[ei, 0]) - self.vmin
         v1 = int(el_view[ei, 1]) - self.vmin
         # update forward graph
         adj_list[row_offset[v0]+loffset[v0]] = v1
         if edge_data:
            e_val[row_offset[v0]+loffset[v0]] = edge_data[ei]
         loffset[v0] += 1

         # update backward graph
         if self.directed:
            r_adj_list[col_offset[v1]+r_loffset[v1]] = v0
            if edge_data:
               r_e_val[col_offset[v1]+r_loffset[v1]] = edge_data[ei]
            r_loffset[v1] += 1
         else:
            adj_list[row_offset[v1]+loffset[v1]] = v0
            if edge_data:
               e_val[row_offset[v1]+loffset[v1]] = edge_data[ei]
            loffset[v1] += 1

      return adj_list, e_val, r_adj_list, r_e_val
      
   def __init__(self, 
     edge_list: torch.Tensor, 
     edge_idx: Optional[torch.Tensor] = None, 
     directed: bool = False):
      super(CSRGraph, self).__init__(edge_list, edge_idx)
      self.directed = directed
      self.sorted_cidx = self.sorted_ridx = None

      if edge_list.size(0) != 2:
         edge_list = edge_list.t().contiguous()
         assert edge_list.size(0) == 2
      edge_list = edge_list.to(dtype=torch.int32)
      
      row_offset, adj_list, edge_val, col_offset, r_adj_list, r_edge_val = graph_lib.coo_to_csr(edge_list, edge_idx, directed)
      # row_offset, col_offset, odeg, ideg = self._scan_deg(edge_list)
      # adj_list, edge_val, r_adj_list, r_edge_val = self._compress_edge(edge_list, row_offset, col_offset, edge_idx)
      
      self.row_offset = row_offset
      self.adj_list = adj_list
      self.edge_val = edge_val
      # torch.tensor([row_offset[i+1] - row_offset[i] for i in range(row_offset.size(0)-1)])
      self.odegree = row_offset[1:] - row_offset[:-1]

      if directed:
         self.r_row_offset = col_offset
         self.r_adj_list = r_adj_list
         self.r_edge_val = r_edge_val
         self.idegree = col_offset[1:] - col_offset[:-1]
      else:
         self.r_row_offset = row_offset
         self.r_adj_list = adj_list
         self.r_edge_val = edge_val
         self.idegree = self.odegree

      # if weighted:
      #    if self.edge_val is None:
      #       self.edge_val = torch.ones_like(self.adj_list, dtype=torch.float32)
      #    if self.r_edge_val is None and self.directed:
      #       self.r_edge_val = torch.ones_like(self.r_adj_list, dtype=torch.float32)
         
   def sortVertexIndex(self):
      # ideg_tuple = [ (i,int(v)) for i,v in enumerate(self.idegree) ]
      odeg_tuple = [ (i,int(v)) for i,v in enumerate(self.odegree) ]
      # odeg_tuple.sort(key=lambda t: t[1], reverse=True)
      self.sorted_ridx = torch.tensor([t[0] for t in odeg_tuple], dtype=torch.int32)
      if self.directed:
         ideg_tuple = [ (i,int(v)) for i,v in enumerate(self.idegree) ]
         # ideg_tuple.sort(key=lambda t: t[1], reverse=True)
         self.sorted_cidx = torch.tensor([t[0] for t in ideg_tuple], dtype=torch.int32)
      else:
         self.sorted_cidx = self.sorted_ridx
      
   def edges(self):
      src = []
      for i in range(self.nvertex):
         src += [i] * (self.row_offset[i+1] - self.row_offset[i])
      return src, self.adj_list

   def appendNode(self, num_nodes):
      if num_nodes > self.nvertex:
         append_nums = num_nodes - self.row_offset.size(0) + 1
         dummy_val = self.row_offset[-1]
         self.row_offset = torch.concat((self.row_offset, torch.IntTensor([dummy_val]*append_nums)))   
         self.r_row_offset = torch.concat((self.r_row_offset, torch.IntTensor([dummy_val]*append_nums))) 
         super().appendNode(num_nodes)

   def histogram(self):
      hist = np.array(self.idegree)
      bar = self.avg_deg
      var = ((hist-bar)**2).sum() / self.nvertex

      return hist, var


class ChunkGraphAlv1(MetaGraph):
   def __init__(self, 
     edge_list: torch.Tensor, 
     edge_data: Optional[torch.Tensor] = None,
     directed: bool = False, 
     weighted: bool = False,
     reorder: bool = False,
     normalizer: Callable = None):
      super(ChunkGraphAlv1, self).__init__(edge_list, edge_data)
      self.weighted = weighted
      self.directed = directed

      if edge_list.size(0) != 2:
         edge_list = edge_list.t().contiguous()
      
      edge_list = edge_list.to(dtype=torch.int32)

      if weighted:
         if edge_data is None:
            edge_data = torch.ones(edge_list.size(1))
         if normalizer:
            edge_data = normalizer(edge_list, edge_data)
      
      bos, boe, bosb, hte, htw,\
      bosbd, dmd,\
      boel, lsl, ldps, lwl,\
      bodl, ad, sreord, dreord = graph_lib.coo_to_chunk(edge_list, edge_data, self.nvertex, reorder)
      
      
      if bosbd is None:
         self.blockOffsetSbDn = None
         self.denseMatDat     = None
         self.denseChunks = 0
      else:
         self.blockOffsetSbDn = bosbd
         self.denseMatDat     = dmd
         self.denseChunks = bosbd.size(0)
         
      if bos is None:
         self.blockOffsetSdat  = None
         self.blockOffsetEdat  = None 
         self.blockOffsetSb    = None
         self.heavyTiledEdge   = None
         self.heavyTiledWeight = None 
         self.heavyChunks = 0
      else:
         self.blockOffsetSdat  = bos
         self.blockOffsetEdat  = boe
         self.blockOffsetSb    = bosb
         self.heavyTiledEdge   = hte
         self.heavyTiledWeight = htw
         self.heavyChunks = bos.size(0)
      
      if boel is None:
         self.blockOffsetElst = None
         self.liteSrcLst      = None
         self.liteDstPosSb    = None
         self.liteWeightLst   = None
         self.liteChunks = 0
      else:
         self.blockOffsetElst = boel
         self.liteSrcLst      = lsl
         self.liteDstPosSb    = ldps
         self.liteWeightLst   = lwl
         self.liteChunks  = boel.size(0)-1

      self.blockOffsetDlst = bodl
      self.aggDst          = ad
      self.strmBufSize     = ad.size(0)

      self.srcReorder = sreord
      self.dstReorder = dreord

   def propagate_params(self):
      return (self.blockOffsetSdat, self.blockOffsetEdat, self.blockOffsetSb, self.heavyTiledEdge, self.heavyTiledWeight,
            self.blockOffsetSbDn, self.denseMatDat, 
            self.blockOffsetElst, self.liteSrcLst, self.liteDstPosSb, self.liteWeightLst,
            self.blockOffsetDlst, self.aggDst,
            self.denseChunks, self.heavyChunks, self.liteChunks, self.strmBufSize)


"""
   Graph profiling utitlities
"""
def check_directed(edge_index):
   undir = True
   ideg = [0]*edge_index.size(1)
   odeg = [0]*edge_index.size(1)

   for i in range(edge_index.size(1)):
      src = edge_index[1,i]
      dst = edge_index[0,i]
      ideg[dst]+=1
      odeg[src]+=1

   for i,o in zip(ideg,odeg):
      undir = undir and (i==o)
   
   return "undirected" if undir else "directed"


def compute_GI(hist):
    hist_cf = np.cumsum(hist)
    norm_cf = hist_cf / hist_cf[-1]
    raw_GI = np.arange(norm_cf.size) / norm_cf.size - norm_cf
    return 2 * raw_GI.sum() / raw_GI.size


def compute_Hen(hist, E):
    V = hist.size
    en_terms = hist/(2*E)
    return np.sum(- en_terms * np.log(en_terms)) * np.log(V)


def plot_freq(ax, hist):
    deg, freq = np.unique(hist, return_counts=True)
    ax.scatter(deg[1:], freq[1:], marker=".", linewidths=0.2)
    ax.set_xscale("log")
    ax.set_yscale("log")
    return deg.max(), freq.max()


def sweep_dataset(sname, print_headline=False):
   import os.path as osp
   def gen_file_path(mtx_folder, fname):
      parts = fname.split('.')
      if parts[0] in ["Mtx", "GNNAdvData"]:
         return osp.join(mtx_folder, parts[0], parts[-1]+'.mtx')
      else:
         return osp.join(mtx_folder, *parts, parts[-1]+'.mtx')
         
   is_def = lambda obj, field: field in dir(obj) and not eval(f"obj.{field}") is None
   # identify undifined field

   dataset = warp_dataset(sname)
   if sname == "PPI":
      dl = [Batch.from_data_list(dataset)]
   else:
      dl = dataset

   # if isinstance(data, HeteroData):
   #    graphs = data.edge_types
   #    dataset = [data[tp] for tp in graphs]
   #    data = dataset[0]
   # g.toGraphViz(gen_file_path('data', sname).replace(".mtx", ".gv"))
   if print_headline:
      print("=== Graph profile ===")

      print("| graph number | vertices | edges | Avg Deg | classes | features |  Deg_var | GI | Hen |\n"
            "| ------------ | -------- | ----- |-------- | ------- | -------- | -------- | -- | --- |", )
# | {sname} | {g.nvertex} | {g.nedge} | {g.avg_deg} | {dataset.num_classes if 'num_class' in dir(dataset) else "(NA)"} | {"(NA)" if data.x is None else data.x.size(1)}{" (sparse)" if data.x is not None and data.x.is_sparse else ""}|
   
   for i, data in enumerate(dl):
      # heterograph
      if isinstance(data, tuple):
         i = data[0]
         data = data[1]
         
      edge_index = data.edge_index.to(dtype=torch.int32)
      # if 'edge_attr' in dir(data):
      #    edge_weight = data.edge_attr
      # else:
      #    edge_weight = None
      g = CSRGraph(edge_index, None, directed=True)
      N = g.nvertex
      E = g.nedge
      avgD = g.avg_deg
      n_cls = dataset.num_classes if is_def(dataset, 'num_classes') else "(NA)"
      n_feats = data.x.size(1) if is_def(data, 'x') else "(NA)"
      hist, var = g.histogram()
      hist = hist[hist.nonzero()]
      hist.sort()
      GI = compute_GI(hist)
      Hen = compute_Hen(hist, g.nedge)

      g_name = sname + f"_{i}" if len(dl) > 1 else sname
      # g.toMtx(gen_file_path('data', g_name), with_header=True)
      # g.toGraphViz(gen_file_path('data', sname).replace(".mtx", f"_{i}.gv"))
      # print("| {} | {} | {} | {} | (NA) | (NA) |".format(sname+"_%d" % i, g.nvertex, g.nedge, g.avg_deg))
      print(f"| {g_name} | {N} | {E} | {avgD} | {n_cls} | {n_feats} | {var} | {GI} | {Hen}")
      return 

   