
import sys
import argparse
from time import perf_counter as nwtime

import torch
from torch.nn import parameter
from torch_geometric.datasets import *
from torch_geometric.transforms import NormalizeFeatures

import dgl
import dgl.function as dglF

sys.path.append("GNNSwitch")
from graph import *
from dataset import MtxDataset
import nn.functional as mF


parser = argparse.ArgumentParser()
parser.add_argument("--dataset", default="Reddit", help="dataset to profile")
parser.add_argument("--kernel", default="single,dgl", help="kernel to profile")
parser.add_argument("--tune", default='128,512,1000000', help="kernel parameters")
parser.add_argument("--feature_dim", default=32, help="custom feature size")
parser.add_argument("--iteration", default=1, help="iterations to run")


"""
    pyg propagation remastered (copied from gap)
"""

@torch.jit.script
def broadcast(src: torch.Tensor, other: torch.Tensor, dim: int):
    if dim < 0:
        dim = other.dim() + dim
    if src.dim() == 1:
        for _ in range(0, dim):
            src = src.unsqueeze(0)
    for _ in range(src.dim(), other.dim()):
        src = src.unsqueeze(-1)
    src = src.expand_as(other)
    return src


@torch.jit.script
def scatter_sum(src: torch.Tensor, index: torch.Tensor, dim: int = -1,
                out: Optional[torch.Tensor] = None,
                dim_size: Optional[int] = None) -> torch.Tensor:
    index = broadcast(index, src, dim)
    if out is None:
        size = src.size()
        if dim_size is not None:
            size[dim] = dim_size
        elif index.numel() == 0:
            size[dim] = 0
        else:
            size[dim] = 0 
        out = torch.zeros(size, dtype=src.dtype, device=src.device)
        out = out.scatter_add_(dim, index, src)
        return out
    else:
        out = out.scatter_add_(dim, index, src)
        return out


def pyg_propagate_sum(src, edge_index):
    exsrc = torch.index_select(src, 0, edge_index[1])
    output = torch.zeros_like(src)
    output = scatter_sum(exsrc, edge_index[0], 0, output)
    return output


if __name__ == '__main__':
    mF.no_perf()
    validate = False
    kernels = {}

    args = parser.parse_args()
    ds = args.dataset

    if not args.tune is None:
        params = [int(x) for x in args.tune.split(',')]
        if len(params) < 4:
            params += [None]*(4-len(params))
        torch.ops.tuner.set_tune_param(*tuple(params))

    if ds in ["Cora", "CiteSeer", "PubMed"]:    
        path = '/home/limingyi/gnn-workspace/data/Planetoid/'
        dataset = Planetoid(path, ds, transform=NormalizeFeatures())
    elif ds == "mtx":
        dataset = MtxDataset("data/Mtx/unified_5000_125.mtx", with_header=True)
    else:
        dataset = eval(ds+"('data/%s')" % ds)

    if ds != "mtx":
        data = dataset[0]
        data.nvertex = data.x.size(0)
    else:
        data = dataset

    for k in args.kernel.split(","):
        kernels[k] = None
    
    x, edge_index, edge_weight = data.x, data.edge_index, data.edge_attr


    # x = torch.zeros((chg.nvertex,16), dtype=torch.float32, device='cuda').fill_(2.3080e-05)
    # csr_x = x.clone().detach().requires_grad_(False).cuda()

    iters = int(args.iteration)

    if x is None:
        fdim = int(args.feature_dim)
    else:
        fdim = x.size(1) if args.feature_dim is None else int(args.feature_dim)

    # input initialize
    common_in = torch.randn((iters, data.nvertex, fdim)) * 100
    mflop = fdim*data.edge_index.size(1)*2*1e-6


    if 'chunk' in kernels.keys():
        torch.ops.tuner.init_stream()
        chg = ChunkGraphAlv1(edge_index, torch.ones(edge_index.size(1), dtype=torch.float32), directed=False, weighted=True).to('cuda')
        ch_x = common_in.clone().detach_().cuda()
        
        # warp-up
        dm = mF.PyChunkGCN(ch_x[0,:,:], *chg.propagate_params())
        torch.cuda.synchronize()

        # with torch.autograd.profiler.profile(use_cuda=True) as prof:
        t1 = nwtime()    
        for _ in range(iters):
            # csr_x = mF.PyCustomGCN(csr_x, csr_g.row_offset, csr_g.adj_list, csr_g.edge_val)
            dm = mF.PyChunkGCN(ch_x[_,:,:], *chg.propagate_params())
            torch.cuda.synchronize()
        
        print("chunk: {} Gflops".format(mflop/((nwtime()-t1)*1e3/iters)))
        kernels['chunk'] = dm
        # print(prof.key_averages().table(sort_by='self_cpu_time_total'))

    if 'reverse' in kernels.keys():
        csr_g = CSRGraph(edge_index, torch.ones(edge_index.size(1), dtype=torch.float32), directed=True, weighted=True).to('cuda')
        ch_x = common_in.clone().detach_().cuda()

        # i do warm-up too
        dm = torch.ops.custom.spmm_like_bw(ch_x[0,:,:], csr_g.r_row_offset, csr_g.r_adj_list, csr_g.r_edge_val)

        t1 = nwtime()
        for _ in range(iters):
            dm = torch.ops.custom.spmm_like_bw(ch_x[_,:,:], csr_g.r_row_offset, csr_g.r_adj_list, csr_g.r_edge_val)
            torch.cuda.synchronize()

        print("reverse: {} Gflops".format(mflop/((nwtime()-t1)*1e3/iters)))
        kernels['reverse'] = dm

    if 'single' in kernels.keys():
        csr_g = CSRGraph(edge_index, torch.ones(edge_index.size(1), dtype=torch.float32), directed=True, weighted=True).to('cuda')
        ch_x = common_in.clone().detach_().cuda()
        
        # warm-up
        dm = mF.PyOldGCN(ch_x[0,:,:], csr_g.row_offset, csr_g.adj_list, csr_g.edge_val, csr_g.r_row_offset, csr_g.r_adj_list, csr_g.r_edge_val)
        
        t1 = nwtime()    
        for _ in range(iters):
            dm = mF.PyOldGCN(ch_x[_,:,:], csr_g.row_offset, csr_g.adj_list, csr_g.edge_val, csr_g.r_row_offset, csr_g.r_adj_list, csr_g.r_edge_val)
            torch.cuda.synchronize()

        print("single: {} Gflops".format(mflop/((nwtime()-t1)*1e3/iters)))
        kernels['single'] = dm

    if 'gcoo' in kernels.keys():
        coo_g = GCOOGraph(edge_index, torch.ones(edge_index.size(1), dtype=torch.float32), directed=True, weighted=True).to('cuda')
        ch_x = common_in.clone().detach_().cuda()
        
        dm = mF.PyGCOOGCN(ch_x[0,:,:], coo_g.edge_list, coo_g.weight_list, coo_g.grp_offset)
        torch.cuda.synchronize()
        
        t1 = nwtime()    
        for _ in range(iters):
            dm = mF.PyGCOOGCN(ch_x[_,:,:], coo_g.edge_list, coo_g.weight_list, coo_g.grp_offset)
            torch.cuda.synchronize()

        print("gcoo: {} Gflops".format(mflop/((nwtime()-t1)*1e3/iters)))
        kernels['gcoo'] = dm
    
    if 'pyg' in kernels.keys():

        ch_x = common_in.clone().detach_().cuda()

        dm = pyg_propagate_sum(ch_x[_,:,:], edge_index.cuda())
        torch.cuda.synchronize()
        
        t1 = nwtime()
        for _ in range(iters):
            dm = pyg_propagate_sum(ch_x[_,:,:], edge_index.cuda())
            torch.cuda.synchronize()

        print("pyg: {} Gflops".format(mflop/((nwtime()-t1)*1e3/iters)))
        kernels['pyg'] = dm


    if 'dgl' in kernels.keys():
        dgl_g = dgl.graph((edge_index[1].tolist(), edge_index[0].tolist())).formats('csc').to('cuda')
        ch_x = common_in.clone().detach_().cuda()

        dgl_g.ndata['ft'] = ch_x[0,:,:]
        dgl_g.update_all(dglF.copy_src(src='ft', out='m'), dglF.sum(msg='m', out='ft'))
        dm = dgl_g.dstdata['ft']
        torch.cuda.synchronize()

        t1 = nwtime()
        for _ in range(iters):
            dgl_g.ndata['ft'] = ch_x[_,:,:]
            dgl_g.update_all(dglF.copy_src(src='ft', out='m'), dglF.sum(msg='m', out='ft'))
            dm = dgl_g.dstdata['ft']
            torch.cuda.synchronize()

        print("dgl: {} Gflops".format((mflop/((nwtime()-t1)*1e3/iters))))
        kernels['dgl'] = dm

    # print(ch_x)
    cnt = 0
    last = None
    for k in kernels.keys():
        if cnt == 0:
            cnt += 1
            last = k
            continue
        else:
            print("{} diff {} = {}".format(last, k, (kernels[last]- kernels[k]).abs().max()))

    # src_list = edge_index[1,:].tolist()
    # for i in range(csr_g.nvertex):
    #     ans = src_list.count(i)
    #     if int(ch_o[i,0]) != ans:
    #         print("fail on vertex {} expect {} but got {} Gflops".format(i, ans, ch_o[i,0]))