#!/usr/bin/env python

import torch as th
import time

def perf_init():
    global fw_timer
    global bw_timer
    global fw_cnt
    global bw_cnt
    global perf
    global t1
    bw_timer, fw_timer, fw_cnt, bw_cnt = 0,0,0,0
    perf = True
    t1 = 0.


def no_perf():
    global perf
    perf = False

_spmm_op = { 'std':0, 'max_e':1, 'add_e':2 }
_sddmm_op = { 'std':0, 'add':1, 'sub':2, 'mul':3, 'div':4 }

class PyGCNAggregateOld(th.autograd.Function):

    @staticmethod
    def forward(ctx, input, sorted_ridx, row_offset, adj_list, edge_weight, 
                sorted_cidx, r_ro, r_al, r_ew):

        # global fw_timer, fw_cnt, perf, t1
        # if perf:
        #     t1 = time.perf_counter_ns()
        
        ctx.save_for_backward(sorted_cidx, r_ro, r_al, r_ew);
        ret = th.ops.custom.spmm_like(_spmm_op['std'], row_offset, adj_list, sorted_ridx, edge_weight, input)

        # if perf:
        #     fw_timer += (time.perf_counter_ns()-t1)*1e-6
        #     fw_cnt += 1
        return ret
    
    @staticmethod
    def backward(ctx, dZ):
        global bw_timer, bw_cnt, perf, t1

        # if perf:
        #     t1 = time.perf_counter_ns()
        
        sidx, ro, al, ew = ctx.saved_tensors
        dInput = th.ops.custom.spmm_like(_spmm_op['std'], ro, al, sidx, ew, dZ)

        # if perf:
        #     bw_timer += (time.perf_counter_ns()-t1)*1e-6 
        #     bw_cnt += 1
        return (dInput,)+(None,)*8


class PyFusedLinearAggregate(th.autograd.Function):
    @staticmethod
    def forward(ctx, row_offset, adj_list, edge_weight,
                     r_row_offset, r_adj_list, r_edge_weight,
                     input, weight, bias):
        global fw_timer, fw_cnt, perf, t1

        if perf:
            t1 = time.perf_counter_ns()

        ctx.save_for_backward(r_row_offset, r_adj_list, r_edge_weight, 
                              input, weight, bias)
        out, interval = th.ops.custom.fused_linear_propagate(input, weight, bias, row_offset, adj_list, edge_weight)

        if not bias is None:
            out += bias
            
        # if perf:
        #     fw_timer += (time.perf_counter_ns() - t1)*1e-6
        #     fw_cnt += 1
        return out, interval

    @staticmethod
    def backward(ctx, dOut, dInter):
        # dOut = dZ[0]
        # dInter = dZ[1]
        global bw_timer, bw_cnt, perf, t1
        if perf:
            t1 = time.perf_counter_ns()

        r_ro, r_aj, r_ew, input, weight, bias = ctx.saved_tensors
        w_t = weight.t()

        dIn, dInter_ = th.ops.custom.fused_linear_propagate_reversed(dOut, w_t, r_ro, r_aj, r_ew)
        dInter += dInter_
        dWeight = dInter.t().mm(input)
        if not bias is None:
            dBias = dOut
        else:
            dBias = None

        # if perf:
        #     bw_timer += (time.perf_counter_ns() - t1)*1e-6
        #     bw_cnt += 1
        return (None,)*6 + (dIn, dWeight, dBias)


class PyGCNAggregateChunked(th.autograd.Function):

    @staticmethod
    def forward(ctx, bos, boe, bosb, hte, htw, bosbd, dmd, boel, lsl, ldps, lwl, bodl, ad, dcnt, hcnt, lcnt, sbsize, src_tensor):
        ctx.save_for_backward(bos, boe, bosb, hte, htw, bosbd, dmd, boel, lsl, ldps, lwl, bodl, ad)
        ctx.size_params = dcnt, hcnt, lcnt, sbsize
        ret = th.ops.custom.spmm_like_chunked(bos, boe, bosb, hte, htw, bosbd, dmd, boel, lsl, ldps, lwl, bodl, ad, dcnt, hcnt, lcnt, sbsize, src_tensor)

        return ret

    @staticmethod
    def backward(ctx, dZ):
        tensor_pack = ctx.saved_tensors
        param_pack = ctx.size_params
        dSrc = th.ops.custom.spmm_like_chunked(*tensor_pack, *param_pack, dZ)
        # print(tensor_pack)
        # print(*tensor_pack)
        # print(dZ)
        # print(dSrc)
        return (None,)*17 + (dSrc,)


class PyGCNAggregateGCOO(th.autograd.Function):
    """
      this operator is not ready for directed graph
    """
    @staticmethod
    def forward(ctx, src, el, wl, grp_offset):
        ctx.save_for_backward(el, wl, grp_offset)
        ret = th.ops.custom.spmm_like_gcoo(src, el, grp_offset, wl)
        return ret

    @staticmethod
    def backward(ctx, dZ):
        el, wl, go = ctx.saved_tenosrs
        dSrc = th.ops.custom.spmm_like_gcoo(dZ, el, go, wl)
        return dSrc
        

class GCNPropagation(th.autograd.Function):
    """
    new factored GCN propagation, see wether graph caching works well
    """
    @staticmethod
    def forward(ctx, graph, input):
        ro = graph.row_offset
        al = graph.adj_list
        ridx = graph.sorted_ridx
        ew = graph.edge_val

        out = th.ops.custom.spmm_like(_spmm_op['std'], ro, al, ridx, ew, input)
        ctx.cache = graph
        return out
    
    @staticmethod
    def backward(ctx, dZ):
        graph = ctx.cache

        r_ro = graph.r_row_offset
        r_al = graph.r_adj_list
        r_ew = graph.r_edge_val
        cidx = graph.sorted_cidx

        dInput = th.ops.custom.spmm_like(_spmm_op['std'], r_ro, r_al, cidx, r_ew, dZ)
        return None, dInput

class GATApplyEdge(th.autograd.Function):
    """
    this operator computes sddmm
    """
    @staticmethod
    def forward(ctx, graph, edge_val, input_l, input_r):
        ro = graph.row_offset
        al = graph.adj_list
        sidx = graph.sorted_ridx
        
        out = th.ops.custom.sddmm_like(_sddmm_op['std'], ro, al, sidx, edge_val, input_l, input_r)
        
        ctx.cache =  graph
        ctx.save_for_backward(edge_val, input_l, input_r)
        return out


    @staticmethod
    def backward(ctx, dZe):
        graph = ctx.cache
        
        ro = graph.row_offset
        al = graph.adj_list
        ridx = graph.sorted_ridx

        r_ro = graph.r_row_offset
        r_al = graph.r_adj_list
        cidx = graph.sorted_cidx

        dlhs = th.ops.custom.spmm_like(_spmm_op['add_e'], ro, al, ridx, dZe, None)
        drhs = th.ops.custom.spmm_like(_spmm_op['add_e'], r_ro, r_al, cidx, dZe, None)
        
        return None, None, dlhs, drhs


class GATPropagation(th.autograd.Function):
    """
    this operator computes spmm with edge derivations for gat on undirected graphs
    """
    @staticmethod
    def forward(ctx, graph, edge_val, input):
        ro = graph.row_offset
        al = graph.adj_list
        ridx = graph.sorted_ridx

        ret = th.ops.custom.spmm_like(_spmm_op['std'], ro, al, ridx, edge_val, input)
        
        ctx.cache = graph
        ctx.save_for_backward(edge_val, input)
        return ret
    
    @staticmethod
    def backward(ctx, dZ):
        graph = ctx.cache
        efeat, nfeat = ctx.saved_tensors

        ro = graph.row_offset
        al = graph.adj_list
        ridx = graph.sorted_ridx

        r_ro = graph.r_row_offset
        r_al = graph.r_adj_list
        cidx = graph.sorted_cidx

        dInput = th.ops.custom.spmm_like(_spmm_op['std'], r_ro, r_al, cidx, efeat, dZ)
        dEdge = th.ops.custom.sddmm_like(_sddmm_op['std'], ro, al, ridx, efeat, nfeat, dZ)

        return None, dEdge, dInput


class EdgeSoftMax(th.autograd.Function):
    """
    this operator computes edge softmax copied from dgl
    """
    @staticmethod
    def forward(ctx, graph, score):
        ro = graph.row_offset
        al = graph.adj_list
        ridx = graph.sorted_ridx
        
        score_max = th.ops.custom.spmm_like(_spmm_op['max_e'], ro, al, ridx, score, None)
        score = th.exp(th.ops.custom.sddmm_like(_sddmm_op['sub'], ro, al, ridx, score, score_max, None))
        score_sum = th.ops.custom.spmm_like(_spmm_op['add_e'], ro, al, ridx, score, None)
        out = th.ops.custom.sddmm_like(_sddmm_op['div'], ro, al, ridx, score, score_sum, None)

        ctx.cache = graph
        ctx.save_for_backward(out)
        return out

    @staticmethod
    def backward(ctx, dZ):
        graph = ctx.cache
        out, = ctx.saved_tensors
        ro = graph.r_row_offset
        al = graph.r_adj_list
        ridx = graph.sorted_cidx

        sds = out * dZ
        accum = th.ops.custom.spmm_like(_spmm_op['add_e'], ro, al, ridx, sds, None)
        grad_score = sds - th.ops.custom.sddmm_like(_sddmm_op['mul'], ro, al, ridx, out, accum, None)
        return None, grad_score


def PyOldGCN(input, sridx, ro, aj, ew, scidx, r_ro, r_aj, r_ew):
    return PyGCNAggregateOld.apply(input, sridx, ro, aj, ew, scidx, r_ro, r_aj, r_ew)

def PyFusedLinearGCN(input, weight, bias, ro, aj, ew, r_ro, r_aj, r_ew):
    return PyFusedLinearAggregate.apply(input, weight, bias, ro, aj, ew, r_ro, r_aj, r_ew)[0]

def PyChunkGCN(input, bos, boe, bosb, hte, htw, bosbd, dmd, boel, lsl, ldps, lwl, bodl, ad, dcnt, hcnt, lcnt, sbsize):
    return PyGCNAggregateChunked.apply(bos, boe, bosb, hte, htw, bosbd, dmd, boel, lsl, ldps, lwl, bodl, ad, dcnt, hcnt, lcnt, sbsize, input)

def PyGCOOGCN(input, el, wl, go):
    return PyGCNAggregateGCOO.apply(input, el, wl, go)

def gat_apply_edge(graph, edge_feat, lhs_feat, rhs_feat):
    return GATApplyEdge.apply(graph, edge_feat, lhs_feat, rhs_feat)

def gat_propagate(graph, edge_feat, node_feat):
    return GATPropagation.apply(graph, edge_feat, node_feat)

def gcn_propagate(graph, node_feat):
    return GCNPropagation.apply(graph, node_feat)

def edge_softmax(graph, score):
    return EdgeSoftMax.apply(graph, score)

def backward_timer_cnt():
    global bw_timer
    global bw_cnt
    return bw_timer, bw_cnt

def forward_timer_cnt():
    global fw_timer
    global fw_cnt
    return fw_timer, fw_cnt