import sys
sys.path.append('')
import time
import torch as th

from torch.nn.modules.module import \
    register_module_forward_pre_hook, register_module_forward_hook

import dgl
from dgl.nn import GraphConv as GCNRef, GATConv as GATRef

import GNNSwitch.nn.functional as mF
from GNNSwitch.helpers import ChunkGraphGCNNormalizer
from GNNSwitch.myGCNConv import *
from GNNSwitch.myGATConv import *
from GNNSwitch.graph import ChunkGraphAlv1, check_directed, CSRGraph
from GNNSwitch.dataset import warp_dataset

from torch_geometric.nn import GCNConv as RefConv
from torch_geometric.transforms import NormalizeFeatures

from functorch.compile import aot_module, aot_module_simplified, \
      make_boxed_func
from torch.fx import GraphModule

def gcn_layer_test(sname):
   mF.perf_init()

   # hook data
   start_times = {}
   end_times = {}

   def _timer_start_hook(model, *extras) -> None:
      start_times[model] = time.perf_counter_ns()


   def _timer_end_hook(model, *extras) -> None:
      end_times[model] = time.perf_counter_ns()

   # shook_handle = register_module_forward_pre_hook(_timer_start_hook)
   # ehook_handle = register_module_forward_hook(_timer_end_hook)


   dataset = warp_dataset(sname)
   data = dataset[0]
   
   x, edge_index, edge_weight = data.x, data.edge_index, data.edge_attr

   graph = CSRGraph(data.edge_index, th.ones(edge_index.size(1), dtype=th.float32), directed=True, weighted=True)
   graph.sortVertexIndex()
   graph.to('cuda')

   dgl_graph = dgl.graph((data.edge_index[1], data.edge_index[0]), device='cuda').formats(['csc', 'csr'])
   # ret = check_directed(edge_index)

   # my = FGCNConv(dataset.num_features, 16, normalize=True, use_cache=True, with_bias=True).to('cuda')
   # my = ChGCNConv(dataset.num_features, 16, with_bias=True).to('cuda')
   my = GCNConv(dataset.num_features, 16, normalize=True, use_cache=True, with_bias=True).to('cuda')
   ref = GCNRef(dataset.num_features, 16, norm='both').to('cuda')
   
   # see if my layer is tracable
   # th.jit.trace(my, (th.randn(graph.nvertex, dataset.num_features), graph))

   test_weight = th.ones_like(ref.weight.data)
   # test_weight = th.randn_like(ref.linear_weight)

   my.linear_weight = Parameter(test_weight.t().clone().detach().requires_grad_(True))
   # ref.linear_weight = Parameter(test_weight.clone().detach().requires_grad_(True))
   ref.weight = Parameter(test_weight.clone().detach().requires_grad_(True))

   assert edge_weight is None
   my_x = x.clone().detach().requires_grad_(True).cuda()
   ref_x = x.clone().detach().requires_grad_(True).cuda()
   my_out = my.forward(my_x, graph)
   # ref_out = ref.forward(ref_x, graph)
   ref_out = ref.forward(dgl_graph, ref_x, edge_weight=data.edge_attr)

   my_out.mean().backward()
   ref_out.mean().backward()

   # cnt = 0
   # for i,j in zip(my_x.grad, ref_x.grad):
   #     print("{} : {}".format(cnt,(i-j).sum()))
   #     cnt += 1
   print("overall value difference : {}".format((my_out - ref_out).abs().max()))
   print("overall grad difference : {}".format((my.linear_weight.grad - ref.weight.grad.t()).max()))
   for layer in start_times.keys():
      print("%s : %.8f ms" % (str(layer), (end_times[layer] - start_times[layer])*1e-6))



def gat_layer_test(sname):
   mF.perf_init()

   # hook data
   start_times = {}
   end_times = {}

   def _timer_start_hook(model, *extras) -> None:
      start_times[model] = time.perf_counter_ns()


   def _timer_end_hook(model, *extras) -> None:
      end_times[model] = time.perf_counter_ns()

   # shook_handle = register_module_forward_pre_hook(_timer_start_hook)
   # ehook_handle = register_module_forward_hook(_timer_end_hook)

   def print_fn(fx_model: GraphModule, _):
      print(fx_model.graph)
      return make_boxed_func(fx_model.forward)

   dataset = warp_dataset(sname)
   data = dataset[0]
   
   x, edge_index, edge_weight = data.x, data.edge_index, data.edge_attr

   graph = CSRGraph(edge_index)
   graph.sortVertexIndex()
   graph.to('cuda')

   dgl_graph = dgl.graph((data.edge_index[1], data.edge_index[0]), device='cuda')
   dgl_graph.formats(['csc', 'csr'])

   my = GATConv(dataset.num_features, 32, 1, residual=True).to('cuda')
   ref = GATRef(dataset.num_features, 32, 1, allow_zero_in_degree=True, residual=True).to('cuda')
   
   # compiled_fn = aot_module(nn.Linear(10, 10, device="cuda:0"), fw_compiler=print_fn)
   compiled_my = aot_module(my, fw_compiler=print_fn)
   compiled_ref = aot_module(ref, fw_compiler=print_fn)
   # see if my layer is tracable
   # th.jit.trace(my, (th.randn(graph.nvertex, dataset.num_features), graph))

   test_weight = th.ones_like(ref.fc.weight.data)

   my.fc.weight.data = Parameter(test_weight.clone().detach().requires_grad_(True))
   ref.fc.weight.data = Parameter(test_weight.clone().detach().requires_grad_(True))

   assert edge_weight is None
   my_x = x.clone().detach().requires_grad_(True).cuda()
   ref_x = x.clone().detach().requires_grad_(True).cuda()
   my_out = my.forward(graph, my_x)
   ref_out = compiled_ref.forward(dgl_graph, ref_x)

   my_out.mean().backward()
   ref_out.mean().backward()

   print("overall value difference : {}".format((my_out - ref_out).abs().max()))
   print("overall grad difference : {}".format((my.fc.weight.data - ref.fc.weight.data).max()))
   for layer in start_times.keys():
      print("%s : %.8f ms" % (str(layer), (end_times[layer] - start_times[layer])*1e-6))


if __name__ == '__main__':
    import sys
    gat_layer_test(sys.argv[1])