import sys
sys.path.append('')


import os.path as osp
import argparse

import torch
import torch.nn as nn
import torch.nn.functional as F
from torch_geometric.nn import GATConv
from sklearn.metrics import f1_score

import dgl
from dgl.nn import GATConv as dglGAT

from GNNSwitch.graph import CSRGraph
from GNNSwitch.myGATConv import GATConv as myGAT
from GNNSwitch.dataset import DataFiller, warp_dataset

parser = argparse.ArgumentParser()
parser.add_argument('--dataset', default='Planetoid.PubMed', help='dataset to use.')
parser.add_argument('--run_type', choices=['pyg', 'dgl', 'my'], default='pyg', help='do sample run')
parser.add_argument('--num_layer', type=int, default=3, help='number of conv layers')
parser.add_argument('--layer_heads', type=str, default='4,4,6', help="attention heads for each layer")
parser.add_argument('--hidden_size', type=int, default=16, help='hidden layer size')
parser.add_argument('--run_mode', choices=['train', 'test', 'both'], default='both', help='hidden layer size')
parser.add_argument('--iterations', type=int, default=100, help='hidden layer size')
args = parser.parse_args()

sname = args.dataset
sname_parts = sname.split(".")
# dataset contains multiple graphs
if sname_parts[-1].isdigit():
   dataset = warp_dataset(sname.replace("."+sname_parts[-1],""))
   data = dataset[int(sname_parts[-1])]
else:
   dataset = warp_dataset(sname)
   data = dataset[0]
   
# fill up data set to run train
filler = DataFiller(data, 500, 2, [1., 1., 1.], 'random')
filler.fill(data)
input_features = data.num_features
num_classes = max(dataset.num_classes, 2)

if args.run_type == 'pyg':
   graph = data.edge_index.cuda()
elif args.run_type == 'dgl':
   graph = dgl.graph(
      (data.edge_index[1], data.edge_index[0]),
      num_nodes=data.num_nodes,
      device='cuda'
   )
   graph.formats(['csc', 'csr'])
elif args.run_type == 'my':
   graph = CSRGraph(data.edge_index, weighted=True)
   graph.appendNode(data.num_nodes)
   graph.sortVertexIndex()
   graph.cuda()

class GATConvPacked_pyg(nn.Module):
   def __init__(self, in_channel, out_channel, head, residual, activation=None):
      super().__init__()
      self.conv = GATConv(in_channel, out_channel, heads=head, add_self_loops=False)
      self.head = head
      self.out_channel = out_channel
      self._activation=activation
      if residual:
         if in_channel != out_channel*head:
            self.res = nn.Linear(in_channel, out_channel*head, bias=False)
         else:
            self.res = nn.Identity()
      else:
         self.res = None
   
   def reset_parameter(self):
      gain = nn.init.calculate_gain('relu')
      if isinstance(self.res, nn.Linear):
         nn.init.xavier_normal_(self.res.weight, gain=gain)

   def forward(self, edge_index, x):
      h = self.conv(x, edge_index)
      if not self.res is None:
         h = h + self.res(x)
      if not self._activation is None:
         h = self._activation(h)
      return h.view(-1, self.head, self.out_channel)
      

def put_layer_pyg(input_size, output_size, head, residual=True, activation=None):
   return GATConvPacked_pyg(input_size, output_size, head, residual, activation)

def put_layer_dgl(input_size, output_size, head, residual=True, activation=None):
   return dglGAT(input_size, output_size, num_heads=head,
              allow_zero_in_degree=True, residual=residual, activation=activation)

def put_layer_my(input_size, output_size, head, residual=True, activation=None):
   return myGAT(input_size, output_size, num_heads=head,
                residual=residual, activation=activation)

class Net(nn.Module):
   def __init__(self, layer_fn, num_layer, heads):
      super().__init__()
      self.num_layer = num_layer
      self.layers = nn.ModuleList()
      self.layers.append(layer_fn(input_features,args.hidden_size,heads[0],False,F.elu))
      for i in range(1,num_layer-1):
         self.layers.append(layer_fn(args.hidden_size*heads[i-1], 
                              args.hidden_size, 
                              heads[i], activation=F.elu))
      self.layers.append(layer_fn(args.hidden_size * heads[-2],
                           num_classes, heads[-1]))

   def forward(self, g, x):
      h = x
      for i, conv in enumerate(self.layers):
         h = conv(g, h)
         if i == self.num_layer-1:
            h = h.mean(1)
         else:
            h = h.flatten(1)
      return h


device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
loss_op = torch.nn.BCEWithLogitsLoss()

heads = [int(x) for x in args.layer_heads.split(',')]
if args.run_type == 'pyg':
   model = Net(put_layer_pyg, args.num_layer, heads).to(device)
elif args.run_type == 'dgl':
   model = Net(put_layer_dgl, args.num_layer, heads).to(device)
elif args.run_type == 'my':
   model = Net(put_layer_my, args.num_layer, heads).to(device)

data.to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=0.005)

if data.y.dim() > 1:
   loss_fn = torch.nn.BCEWithLogitsLoss()
   def f1(z, y, mask):
      pred = z[mask] > 0
      return f1_score(y[mask].cpu(), pred.cpu(), average='macro') if pred.sum() > 0 else 0
   eval_fn = f1
else:
   loss_fn = lambda x, y: F.nll_loss(F.log_softmax(x, dim=-1), y)
   def accuracy(z, y, mask):
      pred = z[mask].max(1)[1]
      return pred.eq(y[mask]).sum().item() / mask.sum().item()
   eval_fn = accuracy

def train():
   # global train_timer
   model.train()
   optimizer.zero_grad()
   diff = loss_fn(model(graph, data.x)[data.train_mask], data.y[data.train_mask])
   diff.backward()
   optimizer.step()


@torch.no_grad()
def test():
   model.eval()
   logits, accs = model(graph, data.x), []
   if args.run_mode == 'both':
      for _, mask in data('train_mask', 'val_mask', 'test_mask'):
         accs.append(eval_fn(logits, data.y, mask))
   return accs


if args.run_mode == 'train':
   t1 = torch.cuda.Event(enable_timing=True)
   t2 = torch.cuda.Event(enable_timing=True)
   torch.cuda.profiler.start()
   t1.record()
   for epoch in range(1, args.iterations+1):
     train()
     
   t2.record()
   torch.cuda.synchronize()
   torch.cuda.profiler.stop()
   print("{:.7f}".format(t1.elapsed_time(t2)/args.iterations))

elif args.run_mode == 'test':
   t1 = torch.cuda.Event(enable_timing=True)
   t2 = torch.cuda.Event(enable_timing=True)
   torch.cuda.profiler.start()
   t1.record()
   for epoch in range(1, args.iterations+1):
     test()
     
   t2.record()
   torch.cuda.synchronize()
   torch.cuda.profiler.stop()
   print("{:.7f}".format(t1.elapsed_time(t2)/args.iterations))

else:
   best_val_acc = test_acc = 0
   t1 = torch.cuda.Event(enable_timing=True)
   t2 = torch.cuda.Event(enable_timing=True)
   torch.cuda.profiler.start()
   t1.record()
   for epoch in range(1, args.iterations+1):
     train()
     train_acc, val_acc, tmp_test_acc = test()
     if val_acc > best_val_acc:
       best_val_acc = val_acc
       test_acc = tmp_test_acc
     log = 'Epoch: {:03d}, Train: {:.4f}, Val: {:.4f}, Test {:.4f}'
     print(log.format(epoch, train_acc, best_val_acc, test_acc))
   t2.record()
   torch.cuda.synchronize()
   torch.cuda.profiler.stop()
   print("elapsed time = {:.7f}".format(t1.elapsed_time(t2)/args.iterations))
