import enum
import sys
import torch as th
import torch.nn.functional as F
import torch.autograd as ag

SIZE = 10

def load_lib(path, libname):
    if libname not in th.ops.__dict__.keys():
        th.ops.load_library(path)
    return  th.ops.__getattr__(libname)


def zero_grad(*args):
    for t in args:
        t.grad = None


def rank2grad(loss, *input_args):
    grad_out = ag.grad([loss], input_args, create_graph=True)[0]
    r2loss = th.pow((grad_out.norm(2,0)-1),2)
    com = loss + r2loss
    com.backward()
    return com


def testA():
    vec = th.randn(SIZE, requires_grad=True)
    bias = th.randn(SIZE, requires_grad=True)
    mat = th.randn([SIZE]*2, requires_grad=True)

    # reference
    res1 = F.linear(vec, mat, bias)
    # res.backward(gradient=th.ones_like(vec))
    loss1 = th.nn.MSELoss()(res1, th.ones_like(res1))
    com = rank2grad(loss1, vec, bias)

    saved_grad = [t.grad for t in [vec, mat, bias, res1]]
    # reset
    zero_grad(vec, bias, mat)

    # load custom function
    lib = load_lib('./build/libAgradTest.so', 'custom')

    # test
    res2 = lib.custom_linear(vec, mat, bias)
    # res.backward(gradient=th.ones_like(vec))
    loss2 = th.nn.MSELoss()(res2, th.ones_like(res2))
    com = rank2grad(loss2, vec, bias)

    tested_grad = [t.grad for t in [vec, mat, bias, res2]]

    for r, t in zip(saved_grad, tested_grad):
        print(sum(r - t))



def testB():
    vec = th.randn(15, requires_grad=True)
    bias1 = th.randn(SIZE, requires_grad=True)
    w1 = th.randn(SIZE,15,requires_grad=True)
    bias2 = th.randn(SIZE, requires_grad=True)
    w2 = th.randn(SIZE,SIZE, requires_grad=True)

    # reference
    im1 = F.linear(vec, w1, bias1)
    res1 = F.linear(im1, w2, bias2)
    loss1 = th.nn.MSELoss()(res1, th.ones_like(res1))
    com = rank2grad(loss1, vec, w1, w2, bias1, bias2)
    
    # reset
    saved_grad = [t.grad for t in [vec, w1, bias1, w2, bias2]]
    lib = load_lib('./build/libAgradTest.so', "custom")
    zero_grad(vec, w1, bias1, w2, bias2)
    
    # test
    # im2 = Linear(vec, w1, bias1)
    res2 = lib.custom_dual_linear(vec, w1, w2, bias1, bias2)
    loss2 = th.nn.MSELoss()(res2, th.ones_like(res2))
    com = rank2grad(loss2, vec, w1, w2, bias1, bias2)

    tested_grad = [t.grad for t in [vec, w1, bias1, w2, bias2]]
    
    for r, t in zip(saved_grad, tested_grad):
        print((r - t))
 

def testC():
    """
      this function tests whether the implemented gcn functional works in real system
    """
    lib = load_lib('/home/limingyi/gnn_switch/build/libAgradTest.so', 'custom')
    ctx = load_lib('/home/limingyi/gnn_switch/build/libAgradTest.so', 'cgcn')

    ctx.init_layer("GSWITCH/script/dataset/web-uk-2005/web-uk-2005.mtx")
    nvertex = ctx.get_num_vertex()
    src = th.randn(nvertex, 64, requires_grad=True, device='cuda')
    print(src)
    out = lib.custom_gcn(src)
    out.mean().backward(retain_grad=True)

    tested_grad = [t.grad for t in [src, out]]

    for r in tested_grad:
        print(r)


def testD():
    """
      this test reveles how autograd deal with mm
    """
    src = th.randn(10,9, requires_grad=True)
    weight = th.randn(5,10, requires_grad=True)

    out = weight.mm(src)
    grad = th.randn_like(out)
    out.backward(gradient=grad)
    
    xs = src.detach().t()
    expect = grad.mm(xs)

    print(src.grad)
    print(expect)


def test_sddmm():
    """
    this test sees whether sddmm extension is ready
    """
    sys.path.append('/home/limingyi/gnn-workspace')
    from GNNSwitch.dataset import warp_dataset
    from GNNSwitch.graph import CSRGraph
    from GNNSwitch.nn.functional import gat_apply_edge
    import dgl
    th.ops.load_library("build/libAgradTest.so")
    data = warp_dataset("Planetoid.Cora")[0]

    dgl_g = dgl.graph( tuple(x.squeeze() for x in data.edge_index.split(1)), device='cuda')
    dgl_g.formats(['csr'])

    my_g = CSRGraph(data.edge_index, directed=False, weighted=False).cuda()
    edge_idx = data.edge_index[1].cuda()

    edge_val = th.randn(edge_idx.size(0)).cuda()
    node_val = th.ones((data.num_nodes,1)).cuda() * 0.3

    dgl_res = dgl.ops.gsddmm(dgl_g, 'mul', edge_val, node_val, 'e', 'v')
    # my_res = th.ops.custom.sddmm_like(3, my_g.row_offset, my_g.adj_list, None, edge_val, node_val, None)
    my_res = gat_apply_edge(my_g, edge_val, node_val, None)

    print(dgl_res)
    print(my_res)
    print((dgl_res.squeeze() - my_res).abs().sum())
        


def test_spmm():
    """
    this test sees whether spmm extension is ready
    """
    sys.path.append('/home/limingyi/gnn-workspace')
    from GNNSwitch.dataset import warp_dataset
    from GNNSwitch.graph import CSRGraph
    import dgl
    
    th.ops.load_library("build/libAgradTest.so")
    data = warp_dataset("Planetoid.Cora")[0]

    my_g = CSRGraph(data.edge_index, weighted=True)
    my_g.sortVertexIndex()
    my_g.cuda()
    
    dgl_g = dgl.graph((data.edge_index[1], data.edge_index[0]), device='cuda', col_sorted=True)

    dgl_keys = [
        ('mul', 'sum'),
        ('copy_rhs', 'max'),
        ('copy_rhs', 'sum')
    ]

    for i, key in enumerate(dgl_keys):
        edge_val = th.randn(data.num_edges).cuda()
        node_val = th.ones((data.num_nodes,1)).cuda()

        dgl_res = dgl.ops.gspmm(dgl_g, *key, node_val, edge_val)
        my_res = th.ops.custom.spmm_like(i, my_g.row_offset, my_g.adj_list, my_g.sorted_ridx, edge_val, node_val)

        print(f"{key} test - diff:{(dgl_res.squeeze() - my_res.squeeze()).abs().sum()}")
        for i, p in enumerate(zip(dgl_res.squeeze(), my_res.squeeze())):
            d = p[0]
            m = p[1]
            if abs(d-m) > 1e-5:
                print(f"output {i}: dgl={d} vs my={m} diff = {abs(d-m)}")


def test_softmax():
    """
    this function checks the difference between pyg & dgl on softmax op
    """
    from torch_geometric.utils import softmax as py_softmax
    import dgl
    from dgl.ops import edge_softmax as dgl_softmax

    sys.path.append('/home/limingyi/gnn-workspace')
    from GNNSwitch.dataset import warp_dataset
    from GNNSwitch.graph import CSRGraph
    from GNNSwitch.nn.functional import edge_softmax as my_softmax

    th.ops.load_library("build/libAgradTest.so")
    data = warp_dataset("Planetoid.Cora")[0]

    my_g = CSRGraph(data.edge_index, weighted=True)
    my_g.sortVertexIndex()
    my_g.cuda()
    
    dgl_g = dgl.graph((data.edge_index[1], data.edge_index[0]), device='cuda')

    edge_val_dgl = th.randn(data.num_edges, requires_grad=True).cuda()
    edge_val_my = edge_val_dgl.clone().detach().requires_grad_(True)

    edge_val_dgl.retain_grad()
    edge_val_my.retain_grad()

    dgl_out = dgl_softmax(dgl_g, edge_val_dgl)
    my_out = my_softmax(my_g, edge_val_my)
    
    # print(f"diff:{(dgl_out.squeeze() - my_out.squeeze()).abs().sum()}")
    # for i, p in enumerate(zip(dgl_out.squeeze(), my_out.squeeze())):
    #         d = p[0]
    #         m = p[1]
    #         if abs(d-m) > 1e-5:
    #             print(f"output {i}: dgl={d} vs my={m} diff = {abs(d-m)}")


    loss_dgl = th.nn.MSELoss()(dgl_out, th.ones_like(dgl_out))
    loss_my = th.nn.MSELoss()(my_out, th.ones_like(my_out))

    loss_dgl.backward()
    loss_my.backward()

    print(f"diff:{(edge_val_dgl.grad.squeeze() - edge_val_my.grad.squeeze()).abs().sum()}")

if __name__ == '__main__':
    test_sddmm()