#! env python
import sys
sys.path.append('')

import argparse

import torch
import torch.nn.functional as F
from torch_geometric.datasets import *
import torch_geometric.transforms as T
from torch_geometric.nn.inits import normal
from torch_geometric.nn import GCNConv, ChebConv  # noqa
from torch_sparse import SparseTensor

from torch.autograd.grad_mode import enable_grad

import GNNSwitch.nn.functional as mF
from GNNSwitch.myGCNConv import GCNConv as myConv, ChGCNConv as cConv, GCOOConv
from GNNSwitch.graph import CSRGraph, ChunkGraphAlv1, GCOOGraph
from GNNSwitch.dataset import DataFiller, warp_dataset
from GNNSwitch.helpers.normalizers import ChunkGraphGCNNormalizer

import dgl
from dgl.nn import GraphConv

from sklearn.metrics import f1_score

from torch.fx import symbolic_trace

parser = argparse.ArgumentParser()
parser.add_argument('--dataset', default='Planetoid.PubMed', help='dataset to use.')
parser.add_argument('--run_type', default='chunk', help='do sample run')
parser.add_argument('--use_gdc', action='store_true', help='use gdc.')
parser.add_argument('--num_layer', type=int, default=2, help='number of conv layers')
parser.add_argument('--hidden_size', type=int, default=16, help='hidden layer size')
parser.add_argument('--run_mode', choices=['train', 'test', 'both'], default='both', help='hidden layer size')
parser.add_argument('--iterations', type=int, default=100, help='hidden layer size')
args = parser.parse_args()

sname = args.dataset
sname_parts = sname.split(".")
# dataset contains multiple graphs
if sname_parts[-1].isdigit():
   dataset = warp_dataset(sname.replace("."+sname_parts[-1],""))
   data = dataset[int(sname_parts[-1])]
else:
   dataset = warp_dataset(sname)
   data = dataset[0]
   
# fill up data set to run train
filler = DataFiller(data, 500, 2, [1., 1., 1.], 'random')
filler.fill(data)
input_features = data.num_features
num_classes = max(dataset.num_classes, 2)

# mF.perf_init()
mF.no_perf()

if args.use_gdc:
   gdc = T.GDC(self_loop_weight=1, normalization_in='sym',
            normalization_out='col',
            diffusion_kwargs=dict(method='ppr', alpha=0.05),
            sparsification_kwargs=dict(method='topk', k=128,
                                 dim=0), exact=True)
   data = gdc(data)

if args.run_type == 'chunk':
   graph = ChunkGraphAlv1(data.edge_index, data.edge_attr, directed=True, weighted=True, normalizer=ChunkGraphGCNNormalizer(data.x.size(0))).to('cuda')

elif args.run_type == 'gcoo':
   graph = GCOOGraph(data.edge_index, data.edge_attr, directed=True, weighted=True).to('cuda')

elif args.run_type == 'dgl':
   graph = dgl.graph(
      (data.edge_index[1], data.edge_index[0]), 
      num_nodes=data.num_nodes,
      device='cuda')
   # graph.add_self_loop()
   graph.formats(['csc','csr'])

elif args.run_type == 'my':
   graph = CSRGraph(data.edge_index, data.edge_attr, directed=True, weighted=True)
   graph.sortVertexIndex()
   graph.to('cuda')

elif args.run_type == 'sppyg':
   sp_tensor = SparseTensor.from_edge_index(data.edge_index, data.edge_attr)

elif args.run_type != 'pyg':
   raise ValueError(f"unknown run type \"{args.run_type}\"")

# layer builders from different implementations
def put_layer_pyg(input_size, output_size):
   return GCNConv(input_size, output_size, 
                  cached=True, normalize=not args.use_gdc)

def put_layer_dgl(input_size, output_size):
   normalize = 'none' if args.use_gdc else 'both'
   return GraphConv(input_size, output_size, 
                    norm = normalize, allow_zero_in_degree=True)

def put_layer_my(input_size, output_size):
   normalize = not args.use_gdc
   return myConv(input_size, output_size, 
                 normalize=normalize)

def put_layer_gcoo(input_size, output_size):
   return GCOOConv(input_size, output_size)

def put_layer_chunk(input_size, output_size):
   return cConv(input_size, output_size)


class LNet(torch.nn.Module):
   def __init__(self, layer_fn, num_layers=5):
      super(LNet, self).__init__()
      self.num_layers = num_layers
      self.dropout = torch.nn.Dropout(.5)
      self.layers = torch.nn.ModuleList()
      
      self.layers.append(layer_fn(input_features, args.hidden_size))
      for _ in range(2, num_layers):
         self.layers.append(layer_fn(args.hidden_size, args.hidden_size))
      self.layers.append(layer_fn(args.hidden_size, num_classes))
   
   def forward(self):
      h = data.x
      for i, conv in enumerate(self.layers):
         h = conv(h, data.edge_index, data.edge_attr)
         if i != self.num_layers-1:
            h = self.dropout(F.relu(h))
      return h

class GNet(LNet):
   def __init__(self, layer_fn, num_layers=5):
      super(GNet, self).__init__(layer_fn, num_layers)
      
   def forward(self):
      h = data.x
      for i, conv in enumerate(self.layers):
         h = conv(graph, h)
         if i != self.num_layers-1:
            h = self.dropout(F.relu(h))
      return h

class MyNet(LNet):
   def __init__(self, layer_fn, num_layers=5):
      super(MyNet, self).__init__(layer_fn, num_layers)
      
   def forward(self):
      h = data.x
      for i, conv in enumerate(self.layers):
         h = conv(h, graph)
         if i != self.num_layers-1:
            h = self.dropout(F.relu(h))
      return h


if data.y.dim() > 1:
   loss_fn = torch.nn.BCEWithLogitsLoss()
   def f1(z, y, mask):
      pred = z[mask] > 0
      return f1_score(y[mask].cpu(), pred.cpu(), average='macro') if pred.sum() > 0 else 0
   eval_fn = f1
else:
   loss_fn = lambda x, y: F.nll_loss(F.log_softmax(x, dim=-1), y)
   def accuracy(z, y, mask):
      pred = z[mask].max(1)[1]
      return pred.eq(y[mask]).sum().item() / mask.sum().item()
   eval_fn = accuracy
   


device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
if args.run_type in ['pyg', 'sppyg']:
   model, data = LNet(put_layer_pyg, args.num_layer).to(device), data.to(device)
elif args.run_type == 'chunk':
   torch.ops.tuner.init_stream()
   model, data = MyNet(put_layer_chunk, args.num_layer).to(device), data.to(device)
elif args.run_type == 'dgl':
   model, data = GNet(put_layer_dgl, args.num_layer).to(device), data.to(device)
else:
   model, data = MyNet(put_layer_my, args.num_layer).to(device), data.to(device)

optimizer = torch.optim.Adam(model.parameters(), lr=0.01, weight_decay=5e-4)  # Only perform weight-decay on first convolution.

traced_cg = symbolic_trace(model)
print(traced_cg.graph)

# defining hooks
import time
# hook data
start_times = {}
end_times = {}
sum_times = {}
test_timer = 0
test_extra_timer = 0
train_timer = 0

def _timer_start_hook(model, *extras) -> None:
   start_times[model] = time.perf_counter_ns()


def _timer_end_hook(model, *extras) -> None:
   end_times[model] = time.perf_counter_ns()

# register hooks
from torch.nn.modules.module import \
   register_module_forward_pre_hook, register_module_forward_hook
# shook_handle = register_module_forward_pre_hook(_timer_start_hook)
# ehook_handle = register_module_forward_hook(_timer_end_hook)


def train():
   # global train_timer
   model.train()
   optimizer.zero_grad()
   diff = loss_fn(model()[data.train_mask], data.y[data.train_mask])
   diff.backward()
   optimizer.step()
   
   # for layer in start_times.keys():
   #     if layer in sum_times.keys():
   #         sum_times[layer] += (end_times[layer] - start_times[layer])*1e-6
   #     else:
   #         sum_times[layer] = (end_times[layer] - start_times[layer])*1e-6

   #     print("%s : %.8f ms" % (str(layer), (end_times[layer] - start_times[layer])*1e-6))
   # input()
   # start_times.clear()
   # end_times.clear()



@torch.no_grad()
def test():
   model.eval()
   logits, accs = model(), []
   if args.run_mode == 'both':
      for _, mask in data('train_mask', 'val_mask', 'test_mask'):
         accs.append(eval_fn(logits, data.y, mask))
   return accs

if args.run_mode == 'both':
   best_val_acc = test_acc = 0
   # with torch.autograd.profiler.profile(use_cuda=True) as prof:
   t1 = torch.cuda.Event(enable_timing=True)
   t2 = torch.cuda.Event(enable_timing=True)
   torch.cuda.profiler.start()
   t1.record()
   for epoch in range(1, args.iterations+1):
      train()
      train_acc, val_acc, tmp_test_acc = test()
      if val_acc > best_val_acc:
         best_val_acc = val_acc
         test_acc = tmp_test_acc
      log = 'Epoch: {:03d}, Train: {:.4f}, Val: {:.4f}, Test {:.4f}'
      print(log.format(epoch, train_acc, best_val_acc, test_acc))
   t2.record()
   torch.cuda.synchronize()
   torch.cuda.profiler.stop()
   print("elapsed time = {:.7f}".format(t1.elapsed_time(t2)/args.iterations))

elif args.run_mode == 'train':
   t1 = torch.cuda.Event(enable_timing=True)
   t2 = torch.cuda.Event(enable_timing=True)
   torch.cuda.profiler.start()
   t1.record()
   for epoch in range(1, args.iterations+1):
      train()
      
   t2.record()
   torch.cuda.synchronize()
   torch.cuda.profiler.stop()
   print("{:.7f}".format(t1.elapsed_time(t2)/args.iterations))

else:
   t1 = torch.cuda.Event(enable_timing=True)
   t2 = torch.cuda.Event(enable_timing=True)
   torch.cuda.profiler.start()
   t1.record()
   for epoch in range(1, args.iterations+1):
      test()
      
   t2.record()
   torch.cuda.synchronize()
   torch.cuda.profiler.stop()
   print("{:.7f}".format(t1.elapsed_time(t2)/args.iterations))
# print(prof.key_averages().table(sort_by='self_cpu_time_total'))

# print("\ntotal_time = {:.7f} ms".format((t2-t1)/1e6))
# print("train_time = {:.7f} ms".format(train_timer))

# print("  conv forward time: {:.7f} ms ({} runs)".format(*mF.forward_timer_cnt()))
# print("  conv backward time: {:.7f} ms ({} runs)".format(*mF.backward_timer_cnt()))
# print("test time: {:.7f} ms ".format(test_timer))
# print("acc calculation : {:.7f} ms".format(test_extra_timer))
if isinstance(model, GNet) and False: 
   print("conv1 forward time consumption: {:.7f} ms".format(model.conv1.timer))
   print("conv2 forward time consumption: {:.7f} ms".format(model.conv2.timer))
