#pragma once

#include <vector_types.h>

#include "cuda/cuda_utils.cuh"
#include "../dense.cuh"
#include "../graph_info.cuh"

template <typename T, int SW, int EW = 1, int WARP_SIZE = 32>
static inline __device__ T warp_reduce(T(*op)(T,T), T val) {
    if ( SW == EW ) return val; // reached target width
    val = op(val, __shfl_down_sync(uint(-1), val, SW>>1, WARP_SIZE));
    return warp_reduce<T, (SW>>1), EW>(op, val);
}

template <typename T, int SW, int EW = 1, int WARP_SIZE = 32>
static inline __device__ T warp_all_reduce(T(*op)(T,T), T val) {
    if ( SW == EW ) return val;
    val = op(val, __shfl_xor_sync(uint(-1), val, SW>>1, WARP_SIZE));
    return warp_all_reduce<T, (SW>>1), EW>(op, val);
}

template <typename DType>
__device__ DType LeakyReluExp(DType val, DType slope) {
    return val > 0 ? exp(val) : exp(slope * val);
}

template <typename DType>
__device__ DType gradLeaky(DType val, DType slope) {
    return val > 0 ? 1 : slope;
}

template <typename Idx, typename DType, int HEADS=1, int TILE=32, int UNROLL=1>
__global__ void gatExpLeakyReluSumKernel_vect(GATData<Idx, DType> gdata, CSRInfo<Idx> csr) {
    constexpr Idx vHEADS = HEADS/UNROLL;
    constexpr Idx Warp = TILE*vHEADS;
    using co = llvlib::CoopLite<Warp>;
    using vd = llvlib::VectTypes<DType, UNROLL>;
    using vd_t = typename vd::type;

    vd_t* vel = vd::cast(gdata.el);
    vd_t* ver = vd::cast(gdata.er);
    vd_t* vexp = vd::cast(gdata.exp);
    vd_t* vsum = vd::cast(gdata.sum);

    Idx tx = co::xid();
    Idx ty = co::ty();
    if (ty >= csr.rows) return;

    Idx dst_id = csr.row_ind != nullptr ? csr.row_ind[ty] : ty;
    Idx start_off = __ldg(csr.row_offset + dst_id);
    Idx end_off = __ldg(csr.row_offset + dst_id + 1);

    const Idx stride_x = TILE;
    Idx hid = tx % vHEADS;
    Idx hoff = tx / vHEADS;
    DType _vsum[UNROLL] = {}; 

    for (Idx eid = start_off + hoff; eid < end_off; eid += stride_x) {
        // buffer decl
        DType _vel[UNROLL];
        DType _ver[UNROLL];
        DType _vexp[UNROLL];
        // offset calc
        Idx src_id = __ldg(csr.col_ind + eid);
        Idx feat_off_src = src_id * vHEADS + hid;
        Idx feat_off_dst = dst_id * vHEADS + hid;
        Idx feat_off_out = gdata.eids[eid] * vHEADS + hid;
        // data load
        *vd::cast(&_vel) = vel[feat_off_src];
        *vd::cast(&_ver) = vel[feat_off_dst];
        // calculation
#pragma unroll
        for (int i=0; i<UNROLL; i++) {
            _vexp[i] = LeakyReluExp(_vel[i] + _ver[i], gdata.leaky_relu_slope);
            _vsum[i] += _vexp[i];
        }
        // data write
        vexp[feat_off_out] = *vd::cast(&_vexp);

        // DType tmp = LeakyReluExp(gdata.el[feat_off_src] + gdata.er[feat_off_dst], gdata.leaky_relu_slope);
        // gdata.exp[feat_off_out] = tmp;
        // sum += tmp;
    }

#pragma unroll
    for (int i=0; i<UNROLL; i++)
        _vsum[i]= warp_reduce<DType, Warp, vHEADS>([]__device__(DType a, DType b) { 
                return a+b; }, _vsum[i]);

    if ( tx < vHEADS )
        vsum[dst_id * vHEADS + tx] = *vd::cast(&_vsum);
}


template <typename Idx, typename DType, int TILE>
__global__ void gatSumProdZipDivKernel(GATData<Idx, DType> gdata, CSRInfo<Idx> csr) {
    using co = llvlib::CoopLite<TILE>;

    const Idx num_heads = gdata.heads;
    const Idx dense_col = gdata.feat_src_xlen;
    const Idx feat_dim = dense_col / num_heads;

    Idx tx = co::tx();
    Idx ty = co::ty();
    if ( ty >= csr.rows ) return; 
    const Idx stride_y = co::stride_y();
    const Idx stride_x = co::stride_x();

    Idx start_off = __ldg(csr.row_offset + ty);
    Idx end_off = __ldg(csr.row_offset + ty + 1);

    for (Idx h = 0; h < num_heads; ++h) {
        Idx feat_idx = tx;
        DType s = 0.;
        for (Idx e=start_off; e<end_off; e++) {
            Idx src_vid = csr.col_ind[e];
            s += gdata.exp[gdata.eids[e] * num_heads + h] 
                / gdata.sum[ty * num_heads + h] 
                * gdata.feat_src[src_vid * dense_col + h * feat_dim + feat_idx];
        }
        gdata.ret[ty * dense_col + h * feat_dim + feat_idx] = s;
    }
}


template <typename Idx, typename DType, int HEADS, int TILE, int UNROLL>
__global__ void gatSumProdZipDivKernelv2(GATData<Idx, DType> gdata, CSRInfo<Idx> csr) {
    using co = llvlib::CoopLite<TILE>;
    using dv = llvlib::VectTypes<DType, UNROLL>;
    using dv_t = typename dv::type;

    extern __shared__ char sh[];

    const Idx dense_col = gdata.feat_src_xlen / UNROLL;
    const Idx feat_dim = dense_col / HEADS;

    dv_t *v_feat_src = dv::cast(gdata.feat_src);
    dv_t *v_ret = dv::cast(gdata.ret);
    
    Idx *sh_col = reinterpret_cast<Idx*>(&sh) + co::ybase();
    DType *sh_exp = reinterpret_cast<DType*>(&sh[blockDim.x * blockDim.y * sizeof(Idx)]) + co::ybase();

    Idx lane_id = co::xid();
    Idx tx = co::tx();
    Idx ty = co::ty();

    if ( ty >= csr.rows ) return;

    Idx dst_id = csr.row_ind != nullptr ? csr.row_ind[ty] : ty;
    Idx start_off = __ldg(csr.row_offset + dst_id);
    Idx end_off = __ldg(csr.row_offset + dst_id + 1);

    for ( Idx h = 0; h < HEADS; ++h ) {
        
        // Idx eid = start_off & -4;
        DType s[UNROLL] = {};
        DType dst_sum = gdata.sum[ty * HEADS + h];

        // if ( start_off & 3 ) {
        //     Idx eoff = eid + threadIdx.x;
        //     sh_col[sh_base+threadIdx.x] = eoff < end_off ? csr.col_ind[eoff] : -1;
        //     sh_exp[sh_base+threadIdx.x] = eoff < end_off ? gdata.exp[eoff*HEADS + h] : 0.;

        //     for (Idx _eid=start_off & 3; _eid < TILE; ++_eid) {
        //         Idx src_vid = sh_col[sh_base+_eid];
        //         DType _exp = sh_exp[sh_base+_eid];
        //         if ( src_vid < 0 ) continue;

        //         float4 feat = v_feat_src[src_vid*dense_col + h*feat_dim + tx];
        //         s.x += _exp / dst_sum * feat.x;
        //         s.y += _exp / dst_sum * feat.y;
        //         s.z += _exp / dst_sum * feat.z;
        //         s.w += _exp / dst_sum * feat.w;
        //     }
        //     __syncwarp();
        //     eid += TILE;
        // }

        for (Idx eoff = start_off; eoff < end_off; eoff+=TILE ) {
            Idx eind = eoff + lane_id;
            Idx col_ind_off = eind;
            Idx exp_ind_off = gdata.eids[eind] * HEADS + h;
            sh_col[lane_id] = eind < end_off ? csr.col_ind[eind] * dense_col : -1;
            sh_exp[lane_id] = eind < end_off ? gdata.exp[gdata.eids[eind]*HEADS + h] / dst_sum : 0.;

            for (Idx e=0; e < TILE; ++e) {
                Idx src_off = sh_col[e];
                if ( src_off < 0 ) break;
                DType sum_exp = sh_exp[e];

                DType feat[UNROLL];
                *dv::cast(&feat) = v_feat_src[src_off + h*feat_dim + tx];
                
                #pragma unroll
                for (int i=0; i<UNROLL; i++) s[i] += sum_exp * feat[i];
            }
            __syncwarp();
        }

        v_ret[dst_id*dense_col + h*feat_dim + tx] = *dv::cast(&s);
    }
}

/**
 * @fn This function only uses \b register_caching
 * 
 * @tparam Idx 
 * @tparam DType 
 * @tparam HEADS 
 * @tparam TILE cooperative group size
 * @tparam UNROLL unrolling / vectoriaztion factor
 * @param gdata 
 * @param csr 
 */
template <typename Idx, typename DType, int HEADS, int TILE, int UNROLL>
__global__ void gatSumProdZipDivKernelv3(GATData<Idx, DType> gdata, CSRInfo<Idx> csr) {
    using namespace llvlib;
    using co = CoopLite<TILE>;
    using feat_t = typename VectTypes<DType, UNROLL>::type;
    using DVect = llvlib::Vect<DType, UNROLL>;

    const Idx dense_col = gdata.feat_src_xlen / UNROLL;
    const Idx feat_dim = dense_col / HEADS;

    const feat_t* v_feat_src = reinterpret_cast<feat_t*>(gdata.feat_src);
    feat_t* v_ret = reinterpret_cast<feat_t*>(gdata.ret);

    Idx reg_col;//, reg_col_next;
    DType reg_exp;//, reg_exp_next;

    Idx lane_id = co::xid();
    Idx tx = co::tx();
    Idx ty = co::ty();

    if ( ty >= csr.rows ) return;

    Idx start_off = __ldg(csr.row_offset + ty);
    Idx end_off = __ldg(csr.row_offset + ty + 1);

    for ( Idx h = 0; h < HEADS; ++h ) {

        DType s[UNROLL] = {};
        DType dst_sum = gdata.sum[ty * HEADS + h];

        // prefix-compute

        // if ( start_off & 3 ) {
        //     // eoff += TILE;
        //     Idx eoff = eid + threadIdx.x;
        //     reg_col = eoff < end_off ? csr.col_ind[eoff] : -1;
        //     reg_exp = eoff < end_off ? gdata.exp[eoff*HEADS + h] : 0.;

        //     for (Idx _eid=start_off & 3; _eid < TILE; ++_eid) {
        //         Idx src_vid = __shfl_sync(-1, reg_col, _eid, TILE);
        //         if ( src_vid < 0 ) continue;
        //         DType _exp = __shfl_sync(-1, reg_exp, _eid, TILE) * dst_sum;

        //         __align__(UNROLL*sizeof(DType)) Vect<DType, UNROLL> feat_reg;
        //         feat_reg.template load_thread<feat_t>(gdata.feat_src + src_vid*dense_col + h*feat_dim + tx*UNROLL);
        //         thread_scalar_fma_inline<DType, Vect<DType, UNROLL>, Vect<DType, UNROLL>&, UNROLL>(_exp, feat_reg, s, 
        //             []__device__(DType a, DType b){ return a*b; },
        //             []__device__(DType* acc, DType b){ return *acc += b; });
        //     }
        //     __syncwarp();
        //     // reg_col = reg_col_next;
        //     // reg_exp = reg_exp_next;
        //     eid += TILE;
        // }

        for ( Idx eoff = start_off; eoff < end_off; eoff+=TILE ) {
            Idx eind = eoff + lane_id;
            reg_col = eind < end_off ? csr.col_ind[eind] * dense_col : -1;
            reg_exp = eind < end_off ? gdata.exp[gdata.eids[eind]*HEADS + h] / dst_sum : 0.;

            for (Idx e=0; e < TILE; ++e) {
                Idx src_off = __shfl_sync(-1, reg_col, e, TILE);
                if ( src_off < 0 ) break;
                DType sum_exp = __shfl_sync(-1, reg_exp, e, TILE);

                DType feat_reg[UNROLL];
                *reinterpret_cast<feat_t*>(&feat_reg) = v_feat_src[src_off + h*feat_dim + tx];

                #pragma unroll
                for (int i=0; i<UNROLL; i++) s[i] += sum_exp * feat_reg[i];
            }
        }

        v_ret[ty*dense_col + h*feat_dim + tx] = *reinterpret_cast<feat_t*>(s);
    }
}

/**
 * @fn This kernel uses \b preload and \b register_caching
*/
template <typename Idx, typename DType, int HEADS, int TILE, int UNROLL>
__global__ void gatSumProdZipDivKernelv31(GATData<Idx, DType> gdata, CSRInfo<Idx> csr) {
    using namespace llvlib;
    using co = CoopLite<TILE>;
    using feat_t = typename VectTypes<DType, UNROLL>::type;

    const Idx dense_col = gdata.feat_src_xlen / UNROLL;
    const Idx feat_dim = dense_col / HEADS;

    const feat_t* v_feat_src = reinterpret_cast<feat_t*>(gdata.feat_src);
    feat_t* v_ret = reinterpret_cast<feat_t*>(gdata.ret);

    Idx reg_col, reg_col_next;
    DType reg_exp, reg_exp_next;

    Idx lane_id = co::xid();
    Idx tx = co::tx();
    Idx ty = co::ty();

    if ( ty >= csr.rows ) return;

    Idx dst_id = csr.row_ind != nullptr ? csr.row_ind[ty] : ty;
    Idx start_off = __ldg(csr.row_offset + dst_id);
    Idx end_off = __ldg(csr.row_offset + dst_id + 1);

    for ( Idx h = 0; h < HEADS; ++h ) {
        DType s[UNROLL] = {};
        DType dst_sum = gdata.sum[dst_id * HEADS + h];

        Idx eoff = start_off;
        // pre-load
        Idx eind = eoff + lane_id;
        reg_col = eind < end_off ? csr.col_ind[eind] * dense_col : -1;
        reg_exp = eind < end_off ? gdata.exp[gdata.eids[eind]*HEADS + h] / dst_sum : 0.;
        eoff += TILE;

        for ( ;eoff < end_off; eoff+=TILE ) {
            eind = eoff + lane_id;
            reg_col_next = eind < end_off ? csr.col_ind[eind] * dense_col : -1;
            reg_exp_next = eind < end_off ? gdata.exp[gdata.eids[eind]*HEADS + h] / dst_sum : 0.;

            for (Idx e=0; e < TILE; ++e) {
                Idx src_off = __shfl_sync(-1, reg_col, e, TILE);
                if ( src_off < 0 ) break;
                DType sum_exp = __shfl_sync(-1, reg_exp, e, TILE);

                DType feat_reg[UNROLL];
                *reinterpret_cast<feat_t*>(&feat_reg) = v_feat_src[src_off + h*feat_dim + tx];

                #pragma unroll
                for (int i=0; i<UNROLL; i++) s[i] += sum_exp * feat_reg[i];
            }
            reg_col = reg_col_next;
            reg_exp = reg_exp_next;
        }

        for (Idx e=0; e < TILE; ++e) {
            Idx src_off = __shfl_sync(-1, reg_col, e, TILE);
            if ( src_off < 0 ) break;
            DType sum_exp = __shfl_sync(-1, reg_exp, e, TILE);

            DType feat_reg[UNROLL];
            *reinterpret_cast<feat_t*>(&feat_reg) = v_feat_src[src_off + h*feat_dim + tx];

            #pragma unroll
            for (int i=0; i<UNROLL; i++) s[i] += sum_exp * feat_reg[i];
        }

        // s.template dump_thread_offset<feat_t>(gdata.ret, (ty*dense_col + h*feat_dim + tx)*UNROLL);
        v_ret[dst_id*dense_col + h*feat_dim + tx] = *reinterpret_cast<feat_t*>(s);
        // if ( threadIdx.x % TILE == 0) 
        //     printf("row : %d = %f\n", ty, v_ret[ty*dense_col + h*feat_dim + tx].x);
    }
}

/**
 * @fn Reduce kernel for head parallelization, uses \b register_caching
 * @tparam HEADS Attention head of the model
 *   @note must be less than 4
 * @tparam TILE thread group size to process a sparse row
 *   @note less than 32 to avoid explicit synchronize
 * @tparam UNROLL vector size to load dense matrix
 *   @note must be less than 4
 * 
 * @note threads in the same head must match the same head idx (TILE * UNROLL multiplies feat_dim)
*/
template <typename Idx, typename DType, int HEADS=1, int TILE=32, int UNROLL=1>
__global__ void gatSumProdZipDivKernel_hp(GATData<Idx, DType> gdata, CSRInfo<Idx> csr) {
    using co = llvlib::CoopLite<TILE>;
    using feat = llvlib::VectTypes<DType, UNROLL>;
    using feat_t = typename feat::type;

    const Idx dense_col = gdata.feat_src_xlen / UNROLL;
    const Idx feat_dim = dense_col / HEADS;

    Idx lane_id = co::xid();
    Idx tx = co::tx();
    Idx ty = co::ty(); 
    Idx th = tx / feat_dim;
    if ( ty >= csr.rows ) return;

    Idx dst_id = csr.row_ind != nullptr ? csr.row_ind[ty] : ty;
    Idx start_off = __ldg(csr.row_offset + dst_id);
    Idx end_off = __ldg(csr.row_offset + dst_id + 1);

    feat_t* v_feat_src = feat::cast(gdata.feat_src);
    feat_t* v_ret = feat::cast(gdata.ret);

    DType dst_sum = gdata.sum[ty * HEADS + th];
    DType vacc[UNROLL] = {};

    for ( Idx eoff = start_off; eoff < end_off; eoff += TILE ) {
        Idx eind = eoff + lane_id;
        Idx reg_col = eind < end_off ? csr.col_ind[eind] * dense_col : -1;
        DType reg_exp = eind < end_off ? gdata.exp[gdata.eids[eind]*HEADS + th] / dst_sum : 0.;

        for (Idx e=0; e<TILE; ++e) {
            DType vfeat[UNROLL];
            Idx col_off = __shfl_sync(-1, reg_col, e, TILE);
            if ( col_off < 0 ) break;
            DType softmax = __shfl_sync(-1, reg_exp, e, TILE);
            *feat::cast(&vfeat) = v_feat_src[col_off + tx];
            
            #pragma unroll
            for (int i=0; i<UNROLL; i++) {
                vacc[i] += softmax * vfeat[i];
            }
        }
    }

    v_ret[dst_id * dense_col + tx] = *feat::cast(&vacc);
}

/**
 * @brief general head-parallelism kernel, eliminated the restriction of 
 *        TILE * UNROLL multiplies feat_dim
*/
template <typename Idx, typename DType, int LHD=1, int TILE=32, int UNROLL=1>
__global__ void gatSumProdZipDivKernel_hp_crosshead(GATData<Idx, DType> gdata, CSRInfo<Idx> csr) {
    using co = llvlib::CoopLite<TILE>;
    using nfeat = llvlib::VectTypes<DType, UNROLL>;
    using efeat = llvlib::VectTypes<DType, LHD>;
    using nfeat_t = typename nfeat::type;
    using efeat_t = typename efeat::type;

    extern __shared__ DType sh[];
    DType *ebuf = &sh[co::ybase() * LHD];

    const Idx heads = gdata.heads;
    const Idx vH = heads / LHD;
    const Idx dense_col = gdata.feat_src_xlen / UNROLL;
    const Idx feat_dim = dense_col / heads;

    Idx lane_id = co::xid();
    Idx tx = co::tx();
    Idx ty = co::ty();
    Idx th = tx / feat_dim;
    Idx lhoff = th & (-LHD);
    Idx lth = th % LHD;

    if ( ty >= csr.rows ) return;

    nfeat_t *v_feat_src = nfeat::cast(gdata.feat_src);
    nfeat_t *v_ret = nfeat::cast(gdata.ret);

    Idx dst_id = csr.row_ind != nullptr ? csr.row_ind[ty] : ty;
    Idx start_off = __ldg(csr.row_offset + dst_id);
    Idx end_off = __ldg(csr.row_offset + dst_id + 1);

    Idx eoff = start_off;
    DType vacc[UNROLL] = {};
    DType vexpsum[LHD];
    *efeat::cast(&vexpsum) = *efeat::cast(gdata.sum + dst_id*heads + lhoff);

    for ( ; eoff < end_off; eoff += TILE ) {
        Idx eind = eoff + lane_id;

        Idx reg_col = csr.col_ind[eind] * dense_col;
        DType vedge[LHD];
        *efeat::cast(&vedge) = eind < end_off ? 
                               *efeat::cast(gdata.exp + gdata.eids[eind]*vH + lhoff) : efeat_t{};
        #pragma unroll
        for (int i=0; i<LHD; i++) {
            ebuf[lane_id + TILE * i] = vedge[i] / vexpsum[i];
        }

        for (Idx e=0; e<TILE; ++e) {
            DType vfeat[UNROLL];
            Idx   col_off = __shfl_sync(-1, reg_col, e, TILE);
            DType softmax = ebuf[e + TILE * lth];
            *nfeat::cast(&vfeat) = v_feat_src[col_off + tx];
            
            #pragma unroll
            for (int i=0; i<UNROLL; i++) {
                vacc[i] += softmax * vfeat[i];
            }
        }
    }
    v_ret[dst_id * dense_col + tx] = *nfeat::cast(&vacc);
}

/** 
 * @fn gatAllFused
 * @brief fully fused gat kernel
 * @note loop over head dimension
*/
template <typename Idx, typename DType, int HEADS=1, int TILE=32, int UNROLL=1>
__global__ void gatAllFused(GATData<Idx, DType> gdata, CSRInfo<Idx> csr) {
    using namespace llvlib;
    using feat_t = typename VectTypes<DType, UNROLL>::type;
    using co = CoopLite<TILE>;
    
    Idx tx = co::tx();
    Idx ty = co::ty();
    if ( ty >= csr.rows ) return;
    Idx stride_x = TILE;

    const Idx dense_col = gdata.feat_src_xlen / UNROLL;
    const Idx feat_dim = dense_col / HEADS;
    const feat_t *v_feat_src = reinterpret_cast<feat_t*>(gdata.feat_src);
    feat_t* v_ret = reinterpret_cast<feat_t*>(gdata.ret);

    Idx dst_id = csr.row_ind != nullptr ? csr.row_ind[ty] : ty;
    Idx start_off = *(csr.row_offset + dst_id);
    Idx end_off = *(csr.row_offset + dst_id + 1);
    Idx feat_off_dst = dst_id * HEADS;

    // DType vmax[HEADS] = {}; 
    DType vsumexp[HEADS] = {};

    // for (Idx eid=start_off+tx; eid<end_off; eid+=stride_x) {
    //     Idx src_id = __ldg(csr.col_ind + eid);
    //     #pragma unroll
    //     for (Idx h=0; h < HEADS; ++h) {
    //         Idx feat_idx_src = src_id * HEADS + h;
            
    //         DType tmp = gdata.el[feat_idx_src] + gdata.er[feat_off_dst + h];
    //         vmax[h] = MAX(tmp, vmax[h]);
    //     }
    // }

    // for (Idx h=0; h < HEADS; ++h) {
    //     vmax[h] = warp_all_reduce([]__device__(DType a, DType b) {
    //             return MAX(a, b); }, vmax[h]);
    // }

    for (Idx eid=start_off+tx; eid<end_off; eid+=stride_x) {
        Idx src_id = __ldg(csr.col_ind + eid);
        #pragma unroll
        for (Idx h=0; h < HEADS; ++h) {
            Idx feat_idx_src = src_id * HEADS + h;
            Idx feat_idx_out = gdata.eids[eid] * HEADS + h;
            
            DType tmp = LeakyReluExp(gdata.el[feat_idx_src] + gdata.er[feat_off_dst + h], gdata.leaky_relu_slope);
            gdata.exp[feat_idx_out] = tmp;
            vsumexp[h] += tmp;
        }
    }

    for (Idx h=0; h < HEADS; h++) {
        vsumexp[h] = warp_all_reduce<DType, TILE>([]__device__(DType a, DType b) {
                return a + b; }, vsumexp[h]);
        if (!tx) gdata.sum[feat_off_dst + h] = vsumexp[h];
    }

    for (Idx h=0; h < HEADS; ++h) {
        DType vacc[UNROLL] = {};
        Idx eoff = start_off;
        for ( ;eoff < end_off; eoff+=TILE ) {
            Idx ld_eid = eoff + (threadIdx.x % TILE);
            Idx reg_col = ld_eid < end_off ? csr.col_ind[ld_eid] * dense_col : -1;
            DType reg_exp = ld_eid < end_off ? gdata.exp[gdata.eids[ld_eid]*HEADS + h] / vsumexp[h] : 0.;

            for (Idx e=0; e < TILE; ++e) {
                Idx src_off = __shfl_sync(-1, reg_col, e, TILE);
                if ( src_off < 0 ) break;
                DType sumexp = __shfl_sync(-1, reg_exp, e, TILE);

                DType feat_reg[UNROLL];
                *(reinterpret_cast<feat_t*>(&feat_reg)) = v_feat_src[src_off + h*feat_dim + tx];

                #pragma unroll
                for (int i=0; i<UNROLL; ++i) {
                    vacc[i] += sumexp * feat_reg[i];
                }
            }
        }
        v_ret[dst_id*dense_col + HEADS*h + tx] = *(reinterpret_cast<feat_t*>(&vacc));
    }
}


/**
 * @fn Head parallelism is mixed with feature parallelism
 * @attention limitations: 
 *    gridDim.y == 1 is advised 
 *    hidden_dim == warp(32) / heads * unroll
 *    feature dim is limited to 128 (w/ vectorization w/o unrolling)
 * @attention data format requirement: 
 *   feature: [node, head, hidden]
 *   el\er: [edge, head]
 *   exp: [edge, head]
 *   sum: [node, head]
 *   @todo this is not the best format for current thread mapping
 * @note Iteration space: [node,          head, edge(r)]
 *                        [node, edge(r), head, feat]
 * @note too much unrolling damages L2 locality
*/
template <typename Idx, typename DType, int HEADS=1, int TILE=32, int UNROLL=1>
__global__ void gatAllFusedv2(GATData<Idx, DType> gdata, CSRInfo<Idx> csr) {
    using co = llvlib::CoopLite<TILE>;
    using feat = llvlib::VectTypes<DType, UNROLL>;
    using feat_t = typename feat::type;
    constexpr Idx Warp = TILE * HEADS;
    
    const Idx dense_col = gdata.feat_src_xlen / UNROLL;
    // const Idx feat_dim = dense_col / HEADS;
    // assert(feat_dim == TILE);

    Idx ty = co::ty();
    if (ty >= csr.rows) return;
    Idx tx = co::tx();
    Idx th = co::xid() / TILE;
    Idx tf = co::xid() % TILE;

    Idx stride_x = TILE;
    Idx stride_y = blockDim.x * gridDim.x / TILE;

    feat_t *v_feat_src = feat::cast(gdata.feat_src);
    feat_t* v_ret = feat::cast(gdata.ret);

    Idx dst_vid = csr.row_ind != nullptr ? csr.row_ind[ty] : ty;
    Idx start_off = *(csr.row_offset + dst_vid);
    Idx end_off = *(csr.row_offset + dst_vid + 1);
    Idx feat_off_dst = dst_vid * HEADS;

    DType vsumexp = 0.;
    for (Idx eid=start_off+tf; eid<end_off; eid+=stride_x) {
        Idx src_id = __ldg(csr.col_ind + eid);
        Idx feat_idx_src = src_id * HEADS + th;
        Idx feat_idx_out = gdata.eids[eid] * HEADS + th;
        
        DType tmp = LeakyReluExp(gdata.el[feat_idx_src] + gdata.er[feat_off_dst + th], gdata.leaky_relu_slope);
        gdata.exp[feat_idx_out] = tmp;
        vsumexp += tmp;
    }

    vsumexp = warp_all_reduce<DType, TILE, 1, TILE>([]__device__(DType a, DType b) {
            return a + b; }, vsumexp);
    if ( tf == 0 ) gdata.sum[feat_off_dst + th] = vsumexp;
    // __shared__ DType sh[256];
    // vsumexp = shared_reduce<DType, TILE>(sh, 
    //         []__device__(DType a, DType b) {return a+b;}, vsumexp);
    // if ( !tf ) gdata.sum[feat_off_dst + th] = vsumexp;

    DType vacc[UNROLL] = {};
    for ( Idx eoff = start_off; eoff < end_off; eoff+=TILE ) {
        Idx ld_eid = eoff + tf;
        Idx reg_col = ld_eid < end_off ? csr.col_ind[ld_eid] * dense_col : -1;
        DType reg_exp = ld_eid < end_off ? gdata.exp[gdata.eids[ld_eid] * HEADS + th] / vsumexp : 0.;

        for (Idx e=0; e < TILE; ++e) {
            Idx src_off = __shfl_sync(-1, reg_col, e, TILE);
            if ( src_off < 0 ) break;
            DType softmax = __shfl_sync(-1, reg_exp, e, TILE);

            DType feat_reg[UNROLL];
            *feat::cast(&feat_reg) = v_feat_src[src_off + tx];

            #pragma unroll
            for (int i=0; i<UNROLL; ++i) {
                vacc[i] += softmax * feat_reg[i];
            }
        }
    }
    v_ret[dst_vid*dense_col + tx] = *feat::cast(&vacc);
}