#ifndef _SPMM_CUH__ 
#define _SPMM_CUH__ 

#include <cuda.h> 
#include <vector_types.h>
// #include <cooperative_groups.h>
#include <ATen/cuda/detail/TensorInfo.cuh>
#include <ATen/cuda/detail/IndexUtils.cuh>

#include "cuda_utils.cuh"
#include "utils/intrinsics.cuh"
#include "dense.cuh"
#include "config.hxx"

#include "specification.hxx"

namespace acd = at::cuda::detail;

/**
 * template parameters
 *  - G : the graph template class (device_graph_t<VC>)
 *  - F : the functor template class, expected to have 2 uinterface
 *      - binary : how the multiplication happen between source and edge weight
 *      - reduction : how the weighted elements aggregate to update the target vertex
 */ 
namespace Base {

static inline __host__ __device__ uint warp_friendly_partition(uint thd, uint len)
{
  uint base = 1;
  while(thd > len) {
    base <<= 1; 
    thd >>= 1;
  }
  return base;
}

// 最naive的二维网格实现，说实话在稠密矩阵行住的情况下没有任何优势
// 然而性能好像还可以，，，x,y轴滑动先后不影响性能
// this implimentation anaMYLOGues TM load balance
template <typename F, typename scalar_t, uint CoopSz = 32>
__global__ void _spmm_kernel(
  // graph param
  const uint* __restrict__ row_offset,
  const uint* __restrict__ col_indx,
  const scalar_t * __restrict__ edge_weight,
  // dense param
  const acd::TensorInfo<scalar_t, uint> src_tensor,
  acd::TensorInfo<scalar_t, uint> dst_tensor,
  // dim param
  const uint nedge)
{
  bool weighted = (edge_weight != nullptr);
  
  const uint nvertex = dst_tensor.sizes[0];
  const uint vlen = dst_tensor.sizes[1];

  const scalar_t * src_vecs = src_tensor.data;
  scalar_t * dst_vecs = dst_tensor.data;
  const uint * src_strides = src_tensor.strides;
  const uint * dst_strides = dst_tensor.strides;

  const uint lid = threadIdx.x;
  const uint wid = threadIdx.y;

  const uint RowId = wid + blockDim.y*blockIdx.x;
  const uint dstCol = lid + blockDim.x*blockIdx.y;

  uint es = row_offset[RowId];
  uint ee = row_offset[RowId+1];
  scalar_t local_sum = (scalar_t) 0;
  
  if (dstCol >= vlen) return;

  if (weighted)
    for (uint k = es; k < ee; ++k)
    {
      uint srcInd = src_strides[0] * col_indx[k] + src_strides[1] * dstCol;
      F::reduce(&local_sum, F::binary(src_vecs[srcInd], edge_weight[k]));
      // __syncwarp();
    }
  else 
    for (uint k = es; k < ee; ++k)
    {
      uint srcInd = src_strides[0] * col_indx[k] + src_strides[1] * dstCol;
      F::reduce(&local_sum, src_vecs[srcInd]);
    }

  uint dstInd = dst_strides[0] * RowId + dst_strides[1] * dstCol;
  dst_vecs[dstInd] = local_sum;
}

/**
 * this kernel views graph as CSC, saving time & space for reversed graph
 */ 
template <typename F, typename scalar_t>
__global__ void _spmm_backward_kernel(
  // graph param
  const uint* __restrict__ col_offset,
  const uint* __restrict__ row_indx,
  const scalar_t * __restrict__ edge_weight,
  // dense param
  const acd::TensorInfo<scalar_t, uint> src_tensor,
  acd::TensorInfo<scalar_t, uint> dst_tensor,
  // dim param
  const uint nedge)
{
  const uint nvertex = dst_tensor.sizes[0];
  const uint vlen = dst_tensor.sizes[1];

  const uint Tx = threadIdx.x + blockIdx.x*blockDim.x;
  const uint Ty = threadIdx.y + blockIdx.y*blockDim.y;
  const uint Stride_x = blockDim.x*gridDim.x;
  const uint Stride_y = blockDim.y*gridDim.y;

  // feature dimension be paralelled in segments
  // const uint feat_seg_size = (vlen+Stride_y-1) / Stride_y;
  
  for (uint fdim = Ty; fdim < vlen; fdim += Stride_y)
  {
    for (uint colInd = Tx; colInd < nvertex; colInd += Stride_x)
    {
      uint srcBaseInd = colInd*src_tensor.strides[0] + fdim*src_tensor.strides[1];
      scalar_t src_ele = src_tensor.data[srcBaseInd];

      for (uint k = col_offset[colInd]; k < col_offset[colInd+1]; ++k)
      {
        uint rowInd = row_indx[k];
        uint dstBaseInd = rowInd*dst_tensor.strides[0] + fdim*dst_tensor.strides[1];
        scalar_t agg = (edge_weight == nullptr)? src_ele : F::binary(src_ele, edge_weight[k]);
        
        F::reduceAtomic(dst_tensor.data + dstBaseInd, agg);
      } // for k
    } // for colInd
  } // for feat_seg
}


template <typename F, typename scalar_t>
__global__ void _fused_linear_spmm_kernel(
  const uint* __restrict__ row_offset,
  const uint* __restrict__ col_indx,
  const scalar_t* __restrict__ edge_weight,
  const acd::TensorInfo<scalar_t, uint> src_tensor,
  const acd::TensorInfo<scalar_t, uint> weight_tensor,
  const scalar_t* __restrict__ bias,
  acd::TensorInfo<scalar_t, uint> inter_tensor,
  acd::TensorInfo<scalar_t, uint> dst_tensor,
  const uint nedge)
{
  const uint nvertex = dst_tensor.sizes[0];
  const uint vlen = dst_tensor.sizes[1];

  const uint Bx = blockIdx.x;
  const uint By = blockIdx.y;
  const uint Tid = threadIdx.x+Bx*blockDim.x;

  const uint StrideX = blockDim.x*gridDim.x;
  const uint StrideY = gridDim.y;

  for ( uint ty = By; ty < vlen; ty += StrideY)
  {
    for ( uint tx = Tid; tx < nvertex; tx += StrideX)
    {
      uint dstInd = tx*dst_tensor.strides[0] + ty*dst_tensor.strides[1];
      scalar_t local_reduction = (scalar_t) 0;

      for (uint indptr=row_offset[tx]; indptr<row_offset[tx+1]; ++indptr)
      {
        uint k = col_indx[indptr];
        uint inInd = k*inter_tensor.strides[0] + ty*inter_tensor.strides[1];
        scalar_t src_ele = (scalar_t) 0;

        for ( uint i=0; i<src_tensor.sizes[1]; ++i )
        {
          uint srcInd = k*src_tensor.strides[0] + i*src_tensor.strides[1];
          uint wInd = ty*weight_tensor.strides[0] + i*weight_tensor.strides[1];
          // printf("src[%d, %d](%d) * w[%d, %d](%d)\n", k, i, srcInd, ty, i, wInd);
          src_ele += src_tensor.data[srcInd] * weight_tensor.data[wInd];
        }

        inter_tensor.data[inInd] = src_ele;

        scalar_t agg;
        agg = (edge_weight == nullptr) ? src_ele : F::binary(src_ele, edge_weight[indptr]);
        F::reduce(&local_reduction, agg);
        // printf("reduction update = %f\n", local_reduction);
      }
      if (bias == nullptr) dst_tensor.data[dstInd] = local_reduction;
      else dst_tensor.data[dstInd] = local_reduction + bias[tx];

      // printf("dst[%d,%d] = %f\n", ty, tx, local_reduction);
    }
  }
}

template <typename F, typename scalar_t>
__global__ void _fused_linear_spmm_kernel_reversed(
  const uint* __restrict__ row_offset,
  const uint* __restrict__ col_indx,
  const scalar_t* __restrict__ edge_weight,
  const acd::TensorInfo<scalar_t, uint> src_tensor,
  const acd::TensorInfo<scalar_t, uint> weight_tensor,
  acd::TensorInfo<scalar_t, uint> inter_tensor,
  acd::TensorInfo<scalar_t, uint> dst_tensor,
  const uint nedge)
{
  const uint nvertex = dst_tensor.sizes[0];
  const uint vlen = src_tensor.sizes[1];
  const uint Bx = blockIdx.x;
  const uint By = blockIdx.y;
  const uint Tid = threadIdx.x+Bx*blockDim.x;

  const uint StrideX = blockDim.x*gridDim.x;
  const uint StrideY = gridDim.y;

  for ( uint ty = By; ty < vlen; ty += StrideY )
  {
    for ( uint tx = Tid; tx < nvertex; tx += StrideX )
    {
      uint inInd = tx*inter_tensor.strides[0] + ty*inter_tensor.strides[1];
      // uint dstInd = ty*dst_tensor.sizes[1] + tx;
      scalar_t local_reduction = (scalar_t) 0;
      for (uint indptr=row_offset[tx]; indptr<row_offset[tx+1]; ++indptr)
      {
        uint k = col_indx[indptr];
        uint srcInd = k*src_tensor.strides[0] + ty*src_tensor.strides[1];
        scalar_t src_ele = src_tensor.data[srcInd];

        scalar_t agg;
        agg = (edge_weight == nullptr) ? src_ele : F::binary(src_ele, edge_weight[indptr]);
        F::reduce(&local_reduction, agg);
        // printf("reduction update = %f\n", local_reduction);
      }
      inter_tensor.data[inInd] = local_reduction;

      for (uint i=0; i<dst_tensor.sizes[1]; ++i)
      {
        uint dstInd = tx*dst_tensor.strides[0] + i*dst_tensor.strides[1];
        uint wInd = i*weight_tensor.strides[0] + ty*weight_tensor.strides[1];
        scalar_t update = local_reduction * weight_tensor.data[wInd];
          // this is part of Linear kernel so DO use atomicAdd
        atomicAdd(dst_tensor.data+dstInd, update);
      }
      // printf("dst[%d,%d] = %f\n", ty, tx, local_reduction);
    }
  }
}

};

/**
 * 这个地方尝试动态调度带来的效果
 *  - 在warp mapping的大背景下这个策略起不到什么作用，因为warp mapping的最大优势是灵活
 *    即使在每个warp负载不均衡的情况下，warp调度器也可以高效地对warp进行initialize和retire
 *  - 这种优化最适合的情况应该是调度不那么灵活的场景，即block mapping
 */ 
namespace Alternative1 {

/**
 * try dynamic scheduling
 *  - 这个kernel是优先按照特征维度进行线程映射的，
 */ 

template <typename F, typename scalar_t, uint CoopSz>
__global__ void _spmm_kernel(
  const uint* __restrict__ row_offset,
  const uint* __restrict__ col_indx,
  const scalar_t * __restrict__ edge_weight,
  const acd::TensorInfo<scalar_t, uint> src_tensor,
  acd::TensorInfo<scalar_t, uint> dst_tensor,
  const uint64_t nedge)
{
  bool weighted = (edge_weight != nullptr);
  
  const uint nvertex = dst_tensor.sizes[0];
  const uint vlen = dst_tensor.sizes[1];

  const scalar_t * src_vecs = src_tensor.data;
  scalar_t * dst_vecs = dst_tensor.data;
  const uint * src_strides = src_tensor.strides;
  const uint * dst_strides = dst_tensor.strides;

  const uint lid = threadIdx.x;
  const uint wid = threadIdx.y;

  const uint RowId = wid+blockDim.y*blockIdx.x;

  const uint SpStride = blockDim.y*gridDim.x;
  const uint DnStride = CoopSz;

  SharedMem<char> _sm;
  // uint *smNextRow;
  scalar_t *smEw;
  uint * smCidx = (uint*) _sm.ptr();
  if (weighted)
  {
    smEw = (scalar_t*) (_sm.ptr() + sizeof(uint)*blockDim.x*blockDim.y);
    // smNextRow = (uint*) (_sm.ptr() + sizeof(scalar_t)*blockDim.x*blockDim.y);
  }
  else
  {
    smEw = nullptr;
    // smNextRow = (uint*) (_sm.ptr() + sizeof(uint)*blockDim.x*blockDim.y);
  }

  uint dstCol = lid + blockIdx.y*CoopSz;
  uint sm_base = wid*CoopSz;

  for ( uint row = RowId; row < nvertex; row += SpStride )
  {
    uint es = row_offset[row];
    uint ee = row_offset[row+1];
    scalar_t local_sum = (scalar_t) 0;
    
    for (uint k_base = es; k_base < ee; k_base+=DnStride)
    {
      uint k = k_base + lid;
      __syncthreads(); // this is not needed as long as coopSz = 32
      smCidx[lid+sm_base] = (k < ee) ? col_indx[k] : -1;
      if (weighted) smEw[lid+sm_base] = (k < ee) ? edge_weight[k] : 0;
      __syncthreads();


      if (dstCol < vlen)
      {
        for (uint i = 0; i < CoopSz; ++i)
        {
          if (smCidx[sm_base + i] < 0) break;
          uint srcInd = src_strides[0] * smCidx[sm_base + i] + src_strides[1] * dstCol;
          if (weighted) F::reduce(&local_sum, F::binary(src_vecs[srcInd], smEw[sm_base + i]));
          else F::reduce(&local_sum, src_vecs[srcInd]);
        }
      }
    }

    if (dstCol < vlen)
    {
      uint dstInd = dst_strides[0] * row + dst_strides[1] * dstCol;
      dst_vecs[dstInd] = local_sum;
    }
  }
}

/**
 * try out feature dimension parallelism
 * def y as vertex index
 * def x as feature dim
 */ 
template <typename F, typename scalar_t>
__global__ void _fused_linear_spmm_kernel_reversed(
  const uint* __restrict__ row_offset,
  const uint* __restrict__ col_indx,
  const scalar_t* __restrict__ edge_weight,
  const acd::TensorInfo<scalar_t, uint> src_tensor,
  const acd::TensorInfo<scalar_t, uint> weight_tensor,
  acd::TensorInfo<scalar_t, uint> inter_tensor,
  acd::TensorInfo<scalar_t, uint> dst_tensor,
  const uint nedge)
{
  const uint nvertex = dst_tensor.sizes[0];
  const uint vlen = src_tensor.sizes[1]; 
  const uint Tid = threadIdx.x;
  const uint Bid = blockIdx.x;

  SharedMem<scalar_t> shared;
  __shared__ uint seg_start;
  
  uint featPerBlock, featId, featGrp, stride_x, stride_y;
  
  if (vlen < blockDim.x)
  {
    featPerBlock = Base::warp_friendly_partition(blockDim.x, vlen);
    stride_x = blockDim.x / featPerBlock;
    stride_y = featPerBlock * gridDim.x;
    featId = Tid % stride_x;
    featGrp = Tid / stride_x;
  } else {
    featPerBlock = 1;
    stride_y = gridDim.x;
    stride_x = blockDim.x;
    featId = Tid;
    featGrp = 0;
  }
  
  uint v_dispatch = Bid*featPerBlock+featGrp;
  for ( uint v = v_dispatch; v < nvertex; v+=stride_y )
  {
    for ( uint fid = featId; fid < vlen; fid += stride_x)
    {
      uint inInd = v*inter_tensor.strides[0] + fid*inter_tensor.strides[1];
      if (Tid == 0) seg_start = fid;
      scalar_t agg = (scalar_t) 0;
      for ( uint k = row_offset[v]; k < row_offset[v+1]; ++k )
      {
        uint srcInd = col_indx[k]*src_tensor.strides[0] + fid*src_tensor.strides[1];
        scalar_t src_ele = src_tensor.data[srcInd];
        scalar_t src_out = (edge_weight == nullptr)? src_ele : F::binary(src_ele, edge_weight[k]);
        F::reduce(&agg, src_out);
      }
      shared[Tid] = agg;
      inter_tensor.data[inInd] = agg;
      
      __syncthreads();
      // printf("ok [%d, %d]\n", v, fid);
      uint local_seg_start = seg_start;

      for (uint i=featId; i<nvertex; i+=stride_x)
      {
        uint dstInd = v*dst_tensor.strides[0] + i*dst_tensor.strides[1]; 
        uint boundary = min(local_seg_start+stride_x, vlen);
        scalar_t local_reduce = (scalar_t) 0;
        for (uint j=local_seg_start; j<boundary; ++j)
        {
          uint wInd = i*weight_tensor.strides[0] + j*weight_tensor.strides[1];
          uint shInd = stride_x * featGrp + j - local_seg_start;
          local_reduce += shared[shInd] * weight_tensor.data[wInd];
        }
        dst_tensor.data[dstInd] += local_reduce;
      }
    }
  }
}

}; // namespace Alternative1

namespace Alternative2 {
// this is inheritly the WM model
// try to use shared mem to commmance local reduction
/**
 * 经过实验证明按照顶点去进行WM并不是一个很好的选择，至少现在这个实现不行——与其用滑稽的cross-lane op
 * 为什么不用shared？
 */ 

static __global__ void balanced_partition(const uint* arr, uint* pos, uint* coop, const uint nparts, const uint len)
{
  const uint Tid = threadIdx.x+blockIdx.x*blockDim.x;
  const uint interval = CEIL(arr[len], nparts);
  if (Tid < nparts)
  {
    uint key = Tid*interval;
    pos[Tid] = (uint)__upper_bound(reinterpret_cast<int*>(const_cast<uint*>(arr)), len+1, key);
    __syncthreads();
    coop[Tid] = (pos[Tid] == pos[Tid-1] || pos[Tid] == pos[Tid+1]) ? 1:0;
    __syncthreads();
    // look left
    uint k=0;
    uint coop_size = 0;
    while (coop[Tid+k]>0)
    {
      coop_size++, k++;
    }
    // look right
    k=0;
    while (coop[Tid-k]>0)
    {
      coop_size++, k++;
    }
    coop[Tid] = coop_size;
  }

}


template <typename F, typename scalar_t, uint CoopSz = 32>
__global__ void _spmm_kernel(
  const uint* __restrict__ row_offset,
  const uint* __restrict__ col_indx,
  const scalar_t * __restrict__ edge_weight,
  const acd::TensorInfo<scalar_t, uint> src_tensor,
  acd::TensorInfo<scalar_t, uint> dst_tensor,
  const uint64_t nedge)
{
  bool weighted = (edge_weight != nullptr);
  
  const uint nvertex = dst_tensor.sizes[0];
  const uint vlen = dst_tensor.sizes[1];

  const scalar_t * src_vecs = src_tensor.data;
  scalar_t * dst_vecs = dst_tensor.data;
  const uint * src_strides = src_tensor.strides;
  const uint * dst_strides = dst_tensor.strides;

  const uint lid = threadIdx.x;
  const uint wid = threadIdx.y;

  const uint RowId = wid+blockDim.y*blockIdx.x;

  const uint SpStride = gridDim.x*blockDim.y;
  const uint DnStride = CoopSz;

  uint local_ci, next_ci, cur_ci;
  scalar_t local_ew, next_ew, cur_ew;

  uint dstCol = lid + blockIdx.y*CoopSz;
  uint sm_base = wid*CoopSz;

  for (uint row = RowId; row < nvertex; row+=SpStride)
  {
    uint es = row_offset[row];
    uint ee = row_offset[row+1];
    scalar_t local_sum = (scalar_t) 0;
    
    if (weighted)
    {
      for (uint k_base = es; k_base < ee; k_base+=DnStride)
      {
        uint k = k_base + lid;
        // __syncthreads(); // this is not needed as long as coopSz = 32
        local_ci = (k < ee) ? col_indx[k] : -1;
        next_ci = __shfl_sync(0xffffffff, local_ci, 0);
        local_ew = (k < ee) ? edge_weight[k] : 0;
        next_ew = __shfl_sync(0xffffffff, local_ew, 0);

        for (uint i = 0; i < CoopSz; ++i)
        {
          cur_ci = next_ci;
          cur_ew = next_ew;
          next_ci = __shfl_sync(0xffffffff, local_ci, i+1);
          next_ew = __shfl_sync(0xffffffff, local_ew, i+1);

          if (cur_ci < 0) break;
          
          uint srcInd = src_strides[0] * cur_ci + src_strides[1] * dstCol;
          F::reduce(&local_sum, F::binary(src_vecs[srcInd], cur_ew));
        }
      }
    }
    else
    {
      for (uint k_base = es; k_base < ee; k_base+=DnStride)
      {
        uint k = k_base + lid;
        // __syncthreads(); // this is not needed as long as coopSz = 32
        local_ci = (k < ee) ? col_indx[k] : -1;
        next_ci = __shfl_sync(0xffffffff, local_ci, 0);

        for (uint i = 0; i < CoopSz; ++i)
        {
          cur_ci = next_ci;
          next_ci = __shfl_sync(0xffffffff, local_ci, i+1);

          if (cur_ci < 0) break;
          
          uint srcInd = src_strides[0] * cur_ci + src_strides[1] * dstCol;
          F::reduce(&local_sum, src_vecs[srcInd]);
        }
      }
    }

    if (dstCol < vlen)
    {
      uint dstInd = dst_strides[0] * row + dst_strides[1] * dstCol;
      dst_vecs[dstInd] = local_sum;
    }
  }
}

}; // namespace Alternative2


namespace Alternative3 {
// 尝试一下一维任务划分
// 直接抄袭rocsparse
template <typename F, typename scalar_t, uint CoopSz = 32>
__global__ void _spmm_kernel(
  const uint* __restrict__ row_offset,
  const uint* __restrict__ col_indx,
  const scalar_t * __restrict__ edge_weight,
  const acd::TensorInfo<scalar_t, uint> src_tensor,
  acd::TensorInfo<scalar_t, uint> dst_tensor,
  const uint64_t nedge)
{
  bool weighted = (edge_weight != nullptr);
  
  const uint nvertex = dst_tensor.sizes[0];
  const uint vlen = dst_tensor.sizes[1];

  const scalar_t * src_vecs = src_tensor.data;
  scalar_t * dst_vecs = dst_tensor.data;
  const uint * src_strides = src_tensor.strides;
  const uint * dst_strides = dst_tensor.strides;

  const uint lid = threadIdx.x;
  const uint wid = threadIdx.y;

  const uint RowId = wid+blockDim.y*blockIdx.x;

  const uint SpStride = gridDim.x*blockDim.y;
  const uint DnStride = CoopSz;
  constexpr uint DnThresh = CoopSz * 3 / 4;

  SharedMem<char> _sm;
  char *buf1 = _sm.ptr();
  char *buf2 = _sm.ptr() + (sizeof(uint))*blockDim.x*blockDim.y;

  uint * smCidx = (uint*) buf1;
  scalar_t * smEw = (scalar_t*) buf2;

  uint dstCol = lid + blockIdx.y*CoopSz;
  uint sm_base = wid*CoopSz;

  uint es = row_offset[RowId];
  uint ee = row_offset[RowId+1];
  scalar_t local_sum = (scalar_t) 0;
  
  for (uint k_base = es; k_base < ee; k_base+=DnStride)
  {
    uint k = k_base + lid;

    smCidx[lid+sm_base] = (k < ee) ? col_indx[k] : -1;
    if (weighted) smEw[lid+sm_base] = (k < ee) ? edge_weight[k] : 0;

    if (k_base + DnThresh <= ee)
    {
      if (dstCol < vlen)
      {
        #pragma unroll
        for(uint i = 0; i < CoopSz; ++i)
        {
          if (smCidx[sm_base + i] < 0) continue;
          uint srcInd = src_strides[0] * smCidx[sm_base + i] + src_strides[1] * dstCol;
          if (weighted) F::reduce(&local_sum, F::binary(src_vecs[srcInd], smEw[sm_base + i]));
          else F::reduce(&local_sum, src_vecs[srcInd]);
        }
      }
    } else {
      if (dstCol < vlen)
      {
        #pragma unroll
        for(uint i = 0; i < CoopSz; ++i)
        {
          if (smCidx[sm_base + i] < 0) break;
          uint srcInd = src_strides[0] * smCidx[sm_base + i] + src_strides[1] * dstCol;
          if (weighted) F::reduce(&local_sum, F::binary(src_vecs[srcInd], smEw[sm_base + i]));
          else F::reduce(&local_sum, src_vecs[srcInd]);
        }
      }
    }
  }

  if (dstCol < vlen)
  {
    uint dstInd = dst_strides[0] * RowId + dst_strides[1] * dstCol;
    dst_vecs[dstInd] = local_sum;
  }
}

template <typename F, typename scalar_t, uint CoopSz = 32>
__global__ void _spmm_kernel_sorted(
  const uint* __restrict__ row_index,
  const uint* __restrict__ row_offset,
  const uint* __restrict__ col_indx,
  const scalar_t * __restrict__ edge_weight,
  const acd::TensorInfo<scalar_t, uint> src_tensor,
  acd::TensorInfo<scalar_t, uint> dst_tensor,
  const uint64_t nedge)
{
  bool weighted = (edge_weight != nullptr);
  
  const uint nvertex = dst_tensor.sizes[0];
  const uint vlen = dst_tensor.sizes[1];

  const scalar_t * src_vecs = src_tensor.data;
  scalar_t * dst_vecs = dst_tensor.data;
  const uint * src_strides = src_tensor.strides;
  const uint * dst_strides = dst_tensor.strides;

  const uint lid = threadIdx.x;
  const uint wid = threadIdx.y;
  constexpr uint DnStride = CoopSz;

  if (wid+blockDim.y*blockIdx.x >= nvertex) return;
  const uint RowId = row_index[wid+blockDim.y*blockIdx.x];

  // const uint SpStride = gridDim.x*blockDim.y;
  // constexpr uint DnThresh = CoopSz * 3 / 4;

  SharedMem<char> _sm;
  char *buf1 = _sm.ptr();
  char *buf2 = _sm.ptr() + (sizeof(uint))*SH_SKEW(blockDim.x*blockDim.y);

  uint * smCidx = (uint*) buf1;
  scalar_t * smEw = (scalar_t*) buf2;

  uint dstCol = lid + blockIdx.y*CoopSz;
  uint sm_base = wid*CoopSz;

  uint es = row_offset[RowId];
  uint ee = row_offset[RowId+1];
  scalar_t local_sum = (scalar_t) 0;
  
  for (uint k_base = es; k_base < ee; k_base+=DnStride)
  {
    uint k = k_base + lid;

    smCidx[SH_SKEW(lid+sm_base)] = (k < ee) ? col_indx[k] : -1;
    if (weighted) smEw[SH_SKEW(lid+sm_base)] = (k < ee) ? edge_weight[k] : 0;
    __syncwarp();

    if (dstCol < vlen)
    {
      #pragma unroll
      for(uint i = 0; i < CoopSz; ++i)
      {
        if (smCidx[SH_SKEW(sm_base + i)] < 0) break;
        uint srcInd = src_strides[0] * smCidx[SH_SKEW(sm_base + i)] + src_strides[1] * dstCol;
        if (weighted) F::reduce(&local_sum, F::binary(src_vecs[srcInd], smEw[SH_SKEW(sm_base + i)]));
        else F::reduce(&local_sum, src_vecs[srcInd]);
      }
    }
  }

  if (dstCol < vlen)
  {
    uint dstInd = dst_strides[0] * RowId + dst_strides[1] * dstCol;
    dst_vecs[dstInd] = local_sum;
  }
}

template <typename F, typename scalar_t, uint CoopSz = 32>
__global__ void _spmm_kernel_merged(
  const uint* __restrict__ row_offset,
  const uint* __restrict__ col_indx,
  const scalar_t * __restrict__ edge_weight,
  const acd::TensorInfo<scalar_t, uint> src_tensor,
  acd::TensorInfo<scalar_t, uint> dst_tensor,
  const uint64_t nedge)
{
  bool weighted = (edge_weight != nullptr);
  
  const uint nvertex = dst_tensor.sizes[0];
  const uint vlen = dst_tensor.sizes[1];

  const scalar_t * src_vecs = src_tensor.data;
  scalar_t * dst_vecs = dst_tensor.data;
  const uint * src_strides = src_tensor.strides;
  const uint * dst_strides = dst_tensor.strides;

  const uint lid = threadIdx.x;
  const uint wid = threadIdx.y;

  const uint RowId = wid+blockDim.y*blockIdx.x;

  // const uint SpStride = gridDim.x*blockDim.y;
  const uint DnStride = CoopSz;
  // constexpr uint DnThresh = CoopSz * 3 / 4;

  SharedMem<char> _sm;
  char *buf1 = _sm.ptr();
  char *buf2 = _sm.ptr() + (sizeof(uint))*blockDim.x*blockDim.y;

  uint * smCidx = (uint*) buf1;
  scalar_t * smEw = (scalar_t*) buf2;

  uint dstCol = lid + blockIdx.y*CoopSz;
  uint sm_base = wid*CoopSz;

  uint es = row_offset[RowId];
  uint ee = row_offset[RowId+1];
  scalar_t local_sum = (scalar_t) 0;
  
  for (uint k_base = es; k_base < ee; k_base+=DnStride)
  {
    uint k = k_base + lid;

    smCidx[lid+sm_base] = (k < ee) ? col_indx[k] : -1;
    if (weighted) smEw[lid+sm_base] = (k < ee) ? edge_weight[k] : 0;

    if (dstCol < vlen)
    {
      for(uint i = 0; i < CoopSz; ++i)
      {
        if (smCidx[sm_base + i] < 0) continue;
        uint srcInd = src_strides[0] * smCidx[sm_base + i] + src_strides[1] * dstCol;
        if (weighted) F::reduce(&local_sum, F::binary(src_vecs[srcInd], smEw[sm_base + i]));
        else F::reduce(&local_sum, src_vecs[srcInd]);
      }
    }
  }

  if (dstCol < vlen)
  {
    uint dstInd = dst_strides[0] * RowId + dst_strides[1] * dstCol;
    dst_vecs[dstInd] = local_sum;
  }
}

template <typename F, typename scalar_t, uint CoopSz = 32>
__global__ void _spmm_kernel_unroll(
  const uint* __restrict__ row_offset,
  const uint* __restrict__ col_indx,
  const scalar_t * __restrict__ edge_weight,
  const acd::TensorInfo<scalar_t, uint> src_tensor,
  acd::TensorInfo<scalar_t, uint> dst_tensor,
  const uint64_t nedge)
{
  bool weighted = (edge_weight != nullptr);
  
  const uint nvertex = dst_tensor.sizes[0];
  const uint vlen = dst_tensor.sizes[1];

  const scalar_t * src_vecs = src_tensor.data;
  scalar_t * dst_vecs = dst_tensor.data;
  const uint * src_strides = src_tensor.strides;
  const uint * dst_strides = dst_tensor.strides;

  const uint lid = threadIdx.x;
  const uint wid = threadIdx.y;

  const uint RowId = threadIdx.y+blockDim.y*blockIdx.x;

  const uint SpStride = gridDim.x*blockDim.y;
  const uint DnStride = CoopSz;

  SharedMem<char> _sm;
  uint * smCidx = (uint*) _sm.ptr();
  scalar_t * smEw = (scalar_t*) (_sm.ptr() + sizeof(uint)*blockDim.x*blockDim.y);

  uint dstCol1 = lid + blockIdx.y*CoopSz*2;
  uint dstCol2 = CoopSz+lid + blockIdx.y*CoopSz*2;

  for (uint row = RowId; row < nvertex; row+=SpStride)
  {
    uint es = row_offset[row];
    uint ee = row_offset[row+1];

    scalar_t local_sum_1 = (scalar_t) 0;
    scalar_t local_sum_2 = (scalar_t) 0;
    
    for (uint k_base = es; k_base < ee; k_base+=DnStride)
    {
      uint k = k_base + lid;
      uint sm_base = wid*CoopSz;

      // __syncthreads(); // this is not needed as long as coopSz = 32
      smCidx[lid+sm_base] = (k < ee) ? col_indx[k] : -1;
      if (weighted) smEw[lid+sm_base] = (k < ee) ? edge_weight[k] : 0;
      // __syncthreads();

      
      if (dstCol2 < vlen){
        if (weighted)
          #pragma unroll
          for(uint i = 0; i < CoopSz; ++i)
          {
            if (smCidx[sm_base + i] < 0) continue;
            uint srcInd1 = src_strides[0] * smCidx[sm_base + i] + src_strides[1] * dstCol1;
            uint srcInd2 = src_strides[0] * smCidx[sm_base + i] + src_strides[1] * dstCol2;
            F::reduce(&local_sum_1, F::binary(src_vecs[srcInd1], smEw[sm_base + i]));
            F::reduce(&local_sum_2, F::binary(src_vecs[srcInd2], smEw[sm_base + i]));
          }
        else
          #pragma unroll
          for(uint i = 0; i < CoopSz; ++i)
          {
            if (smCidx[sm_base + i] < 0) continue;
            uint srcInd1 = src_strides[0] * smCidx[sm_base + i] + src_strides[1] * dstCol1;
            uint srcInd2 = src_strides[0] * smCidx[sm_base + i] + src_strides[1] * dstCol2;
            F::reduce(&local_sum_1, src_vecs[srcInd1]);
            F::reduce(&local_sum_2, src_vecs[srcInd2]);
          } 
      }
      else if (dstCol1 < vlen)
      {
        if (weighted)
          #pragma unroll
          for(uint i = 0; i < CoopSz; ++i)
          {
            if (smCidx[sm_base + i] < 0) continue;
            uint srcInd1 = src_strides[0] * smCidx[sm_base + i] + src_strides[1] * dstCol1;
            F::reduce(&local_sum_1, F::binary(src_vecs[srcInd1], smEw[sm_base + i]));
          }
        else
          #pragma unroll
          for(uint i = 0; i < CoopSz; ++i)
          {
            if (smCidx[sm_base + i] < 0) continue;
            uint srcInd1 = src_strides[0] * smCidx[sm_base + i] + src_strides[1] * dstCol1;
            F::reduce(&local_sum_1, src_vecs[srcInd1]);
          }
      }
    }

    if (dstCol1 < vlen)
    {
      uint dstInd1 = dst_strides[0] * row + dst_strides[1] * dstCol1;
      dst_vecs[dstInd1] = local_sum_1;
      // printf("putting %f to [%d, %d]\n", local_sum, row, dstCol);
    }
    
    if (dstCol2 < vlen)
    {
      uint dstInd2 = dst_strides[0] * row + dst_strides[1] * dstCol2;
      dst_vecs[dstInd2] = local_sum_2;
    }
  }
}


template <typename F, typename scalar_t, uint CoopSz = 32>
__global__ void _spmm_kernel_unroll_flat(
  const uint* __restrict__ row_offset,
  const uint* __restrict__ col_indx,
  const scalar_t * __restrict__ edge_weight,
  const acd::TensorInfo<scalar_t, uint> src_tensor,
  acd::TensorInfo<scalar_t, uint> dst_tensor,
  const uint64_t nedge)
{
  bool weighted = (edge_weight != nullptr);
  
  const uint nvertex = dst_tensor.sizes[0];
  const uint vlen = dst_tensor.sizes[1];

  const scalar_t * src_vecs = src_tensor.data;
  scalar_t * dst_vecs = dst_tensor.data;
  const uint * src_strides = src_tensor.strides;
  const uint * dst_strides = dst_tensor.strides;

  const uint lid = threadIdx.x;
  const uint wid = threadIdx.y;

  const uint RowId = threadIdx.y+blockDim.y*blockIdx.x;

  const uint SpStride = gridDim.x*blockDim.y;
  const uint DnStride = CoopSz;

  SharedMem<char> _sm;
  uint * smCidx = (uint*) _sm.ptr();
  scalar_t * smEw = (scalar_t*) (_sm.ptr() + sizeof(uint)*blockDim.x*blockDim.y);

  uint dstCol1 = lid + blockIdx.y*CoopSz*2;
  uint dstCol2 = CoopSz+lid + blockIdx.y*CoopSz*2;

  for (uint row = RowId; row < nvertex; row+=SpStride)
  {
    uint es = row_offset[row];
    uint ee = row_offset[row+1];

    scalar_t local_sum_1 = (scalar_t) 0;
    scalar_t local_sum_2 = (scalar_t) 0;
    
    for (uint k_base = es; k_base < ee; k_base+=DnStride)
    {
      uint k = k_base + lid;
      uint sm_base = wid*CoopSz;

      // __syncthreads(); // this is not needed as long as coopSz = 32
      smCidx[lid+sm_base] = (k < ee) ? col_indx[k] : -1;
      if (weighted) smEw[lid+sm_base] = (k < ee) ? edge_weight[k] : 0;
      // __syncthreads();

      
      if (dstCol2 < vlen){
        if (weighted)
          #pragma unroll
          for(uint i = 0; i < CoopSz; ++i)
          {
            if (smCidx[sm_base + i] < 0) break;
            uint srcInd1 = src_strides[0] * smCidx[sm_base + i] + src_strides[1] * dstCol1;
            uint srcInd2 = src_strides[0] * smCidx[sm_base + i] + src_strides[1] * dstCol2;
            F::reduce(&local_sum_1, F::binary(src_vecs[srcInd1], smEw[sm_base + i]));
            F::reduce(&local_sum_2, F::binary(src_vecs[srcInd2], smEw[sm_base + i]));
          }
        else
          #pragma unroll
          for(uint i = 0; i < CoopSz; ++i)
          {
            if (smCidx[sm_base + i] < 0) break;
            uint srcInd1 = src_strides[0] * smCidx[sm_base + i] + src_strides[1] * dstCol1;
            uint srcInd2 = src_strides[0] * smCidx[sm_base + i] + src_strides[1] * dstCol2;
            F::reduce(&local_sum_1, src_vecs[srcInd1]);
            F::reduce(&local_sum_2, src_vecs[srcInd2]);
          } 
      }
      else if (dstCol1 < vlen)
      {
        if (weighted)
          #pragma unroll
          for(uint i = 0; i < CoopSz; ++i)
          {
            if (smCidx[sm_base + i] < 0) break;
            uint srcInd1 = src_strides[0] * smCidx[sm_base + i] + src_strides[1] * dstCol1;
            F::reduce(&local_sum_1, F::binary(src_vecs[srcInd1], smEw[sm_base + i]));
          }
        else
          #pragma unroll
          for(uint i = 0; i < CoopSz; ++i)
          {
            if (smCidx[sm_base + i] < 0) break;
            uint srcInd1 = src_strides[0] * smCidx[sm_base + i] + src_strides[1] * dstCol1;
            F::reduce(&local_sum_1, src_vecs[srcInd1]);
          }
      }
    }

    if (dstCol1 < vlen)
    {
      uint dstInd1 = dst_strides[0] * row + dst_strides[1] * dstCol1;
      dst_vecs[dstInd1] = local_sum_1;
      // printf("putting %f to [%d, %d]\n", local_sum, row, dstCol);
    }
    
    if (dstCol2 < vlen)
    {
      uint dstInd2 = dst_strides[0] * row + dst_strides[1] * dstCol2;
      dst_vecs[dstInd2] = local_sum_2;
    }
  }
}

template <typename F, typename scalar_t, uint CoopSz = 32>
__global__ void _spmm_kernel_unroll_merged(
  const uint* __restrict__ row_offset,
  const uint* __restrict__ col_indx,
  const scalar_t * __restrict__ edge_weight,
  const acd::TensorInfo<scalar_t, uint> src_tensor,
  acd::TensorInfo<scalar_t, uint> dst_tensor,
  const uint64_t nedge)
{
  bool weighted = (edge_weight != nullptr);
  
  const uint nvertex = dst_tensor.sizes[0];
  const uint vlen = dst_tensor.sizes[1];

  const scalar_t * src_vecs = src_tensor.data;
  scalar_t * dst_vecs = dst_tensor.data;
  const uint * src_strides = src_tensor.strides;
  const uint * dst_strides = dst_tensor.strides;

  const uint lid = threadIdx.x;
  const uint wid = threadIdx.y;

  const uint RowId = threadIdx.y+blockDim.y*blockIdx.x;

  const uint SpStride = gridDim.x*blockDim.y;
  const uint DnStride = CoopSz;

  SharedMem<char> _sm;
  uint * smCidx = (uint*) _sm.ptr();
  scalar_t * smEw = (scalar_t*) (_sm.ptr() + sizeof(uint)*blockDim.x*blockDim.y);

  uint dstCol1 = lid + blockIdx.y*CoopSz*2;
  uint dstCol2 = CoopSz+lid + blockIdx.y*CoopSz*2;

  for (uint row = RowId; row < nvertex; row+=SpStride)
  {
    uint es = row_offset[row];
    uint ee = row_offset[row+1];

    scalar_t local_sum_1 = (scalar_t) 0;
    scalar_t local_sum_2 = (scalar_t) 0;
    
    for (uint k_base = es; k_base < ee; k_base+=DnStride)
    {
      uint k = k_base + lid;
      uint sm_base = wid*CoopSz;

      // __syncthreads(); // this is not needed as long as coopSz = 32
      smCidx[lid+sm_base] = (k < ee) ? col_indx[k] : -1;
      if (weighted) smEw[lid+sm_base] = (k < ee) ? edge_weight[k] : 0;
      // __syncthreads();

      
      if (dstCol2 < vlen){
        if (weighted)
          for(uint i = 0; i < CoopSz; ++i)
          {
            if (smCidx[sm_base + i] < 0) continue;
            uint srcInd1 = src_strides[0] * smCidx[sm_base + i] + src_strides[1] * dstCol1;
            uint srcInd2 = src_strides[0] * smCidx[sm_base + i] + src_strides[1] * dstCol2;
            F::reduce(&local_sum_1, F::binary(src_vecs[srcInd1], smEw[sm_base + i]));
            F::reduce(&local_sum_2, F::binary(src_vecs[srcInd2], smEw[sm_base + i]));
          }
        else
          for(uint i = 0; i < CoopSz; ++i)
          {
            if (smCidx[sm_base + i] < 0) continue;
            uint srcInd1 = src_strides[0] * smCidx[sm_base + i] + src_strides[1] * dstCol1;
            uint srcInd2 = src_strides[0] * smCidx[sm_base + i] + src_strides[1] * dstCol2;
            F::reduce(&local_sum_1, src_vecs[srcInd1]);
            F::reduce(&local_sum_2, src_vecs[srcInd2]);
          } 
      }
      else if (dstCol1 < vlen)
      {
        if (weighted)
          for(uint i = 0; i < CoopSz; ++i)
          {
            if (smCidx[sm_base + i] < 0) continue;
            uint srcInd1 = src_strides[0] * smCidx[sm_base + i] + src_strides[1] * dstCol1;
            F::reduce(&local_sum_1, F::binary(src_vecs[srcInd1], smEw[sm_base + i]));
          }
        else
          for(uint i = 0; i < CoopSz; ++i)
          {
            if (smCidx[sm_base + i] < 0) continue;
            uint srcInd1 = src_strides[0] * smCidx[sm_base + i] + src_strides[1] * dstCol1;
            F::reduce(&local_sum_1, src_vecs[srcInd1]);
          }
      }
    }

    if (dstCol1 < vlen)
    {
      uint dstInd1 = dst_strides[0] * row + dst_strides[1] * dstCol1;
      dst_vecs[dstInd1] = local_sum_1;
      // printf("putting %f to [%d, %d]\n", local_sum, row, dstCol);
    }
    
    if (dstCol2 < vlen)
    {
      uint dstInd2 = dst_strides[0] * row + dst_strides[1] * dstCol2;
      dst_vecs[dstInd2] = local_sum_2;
    }
  }
}

}; // namespace Alternative3

namespace Alternative4 {
template<uint CoopSz>
inline uint _MYLOG_split(uint vlen)
{
  if (vlen < CoopSz-(CoopSz>>2)) return 2*_MYLOG_split<CoopSz/2>(vlen);
  else return 1;
}

template<>
inline uint _MYLOG_split<2>(uint vlen)
{
  return 1;
}

// 这个地方尝试直接展成128bit的访存
template <typename F, typename scalar_t, uint CoopSz = 32>
__global__ void _spmm_kernel_unroll(
  const uint* __restrict__ row_offset,
  const uint* __restrict__ col_indx,
  const scalar_t * __restrict__ edge_weight,
  const acd::TensorInfo<scalar_t, uint> src_tensor,
  acd::TensorInfo<scalar_t, uint> dst_tensor,
  const uint64_t nedge)
{
  constexpr uint _128_align = 16/sizeof(scalar_t);
  bool weighted = (edge_weight != nullptr);
  
  const uint nvertex = dst_tensor.sizes[0];
  const uint vlen = dst_tensor.sizes[1];

  const scalar_t * src_vecs = src_tensor.data;
  scalar_t * dst_vecs = dst_tensor.data;
  const uint * src_strides = src_tensor.strides;
  const uint * dst_strides = dst_tensor.strides;

  const uint lid = threadIdx.x;
  const uint wid = threadIdx.y;

  const uint RowId = threadIdx.y+blockDim.y*blockIdx.x;

  const uint SpStride = gridDim.x*blockDim.y;
  const uint DnStride = CoopSz;

  SharedMem<char> _sm;
  uint * smCidx = (uint*) _sm.ptr();
  scalar_t * smEw = (scalar_t*) (_sm.ptr() + sizeof(uint)*SH_SKEW(blockDim.x*blockDim.y));

  uint dstCol = lid + blockIdx.y*CoopSz*_128_align;

  uint es = row_offset[RowId];
  uint ee = row_offset[RowId+1];

  scalar_t local_sum[_128_align] = {};
  
  for (uint k_base = es; k_base < ee; k_base+=DnStride)
  {
    uint k = k_base + lid;
    uint sm_base = wid*CoopSz;

    // __syncthreads(); // this is not needed as long as coopSz = 32
    smCidx[SH_SKEW(lid+sm_base)] = (k < ee) ? col_indx[k] : -1;
    if (weighted) smEw[SH_SKEW(lid+sm_base)] = (k < ee) ? edge_weight[k] : 0;
    // __syncthreads();

    if (weighted)
    {
      #pragma unroll 6
      for(uint i = 0; i < CoopSz; ++i)
      {
        if ( smCidx[SH_SKEW(sm_base+i)] < 0 ) continue;
        uint srcBase = src_strides[0] * smCidx[SH_SKEW(sm_base+i)] + src_strides[1] * dstCol;
        scalar_t ew = smEw[SH_SKEW(sm_base+i)];
        #pragma unroll
        for (uint j = 0; j < _128_align; ++j)
        {
          if (blockIdx.y != blockDim.y - 1 || dstCol+(j<<5) < vlen) F::reduce(local_sum+j, F::binary(ew, src_vecs[srcBase+(j<<5)]));
        }
      }
      
    }
    else
    {
      #pragma unroll 6
      for(uint i = 0; i < CoopSz; ++i)
      {
        if ( smCidx[SH_SKEW(sm_base+i)] < 0 ) continue;
        uint srcBase = src_strides[0] * smCidx[SH_SKEW(sm_base+i)] + src_strides[1] * dstCol;
        #pragma unroll
        for (uint j=0; j < _128_align; ++j)
        {
          if (blockIdx.y != blockDim.y - 1 || dstCol+(j<<5) < vlen) F::reduce(local_sum+j, src_vecs[srcBase+(j<<5)]);
        }
      }
    }
  }

  uint dstBase = dst_strides[0] * RowId + dst_strides[1] * dstCol;

  for (uint j=0; j<_128_align; ++j)
  {
    if (blockIdx.y != blockDim.y - 1 || dstCol < vlen) dst_vecs[dstBase+(j<<5)] = local_sum[j];
  }
}

template <typename F, typename scalar_t, uint CoopSz = 32, uint vec = 1>
__global__ void _spmm_kernel_unroll_flat(
  const uint* __restrict__ row_offset,
  const uint* __restrict__ col_indx,
  const scalar_t * __restrict__ edge_weight,
  const acd::TensorInfo<scalar_t, uint> src_tensor,
  acd::TensorInfo<scalar_t, uint> dst_tensor,
  const uint64_t nedge)
{
  constexpr uint _128_align = 16/sizeof(scalar_t);
  bool weighted = (edge_weight != nullptr);
  
  const uint nvertex = dst_tensor.sizes[0];
  const uint vlen = dst_tensor.sizes[1];

  const scalar_t * src_vecs = src_tensor.data;
  scalar_t * dst_vecs = dst_tensor.data;
  const uint * src_strides = src_tensor.strides;
  const uint * dst_strides = dst_tensor.strides;

  const uint lid = threadIdx.x;
  const uint wid = threadIdx.y;

  const uint RowId = threadIdx.y+blockDim.y*blockIdx.x;

  const uint SpStride = gridDim.x*blockDim.y;
  const uint DnStride = CoopSz;

  SharedMem<char> _sm;
  uint * smCidx = (uint*) _sm.ptr();
  scalar_t * smEw = (scalar_t*) (_sm.ptr() + sizeof(uint)*blockDim.x*blockDim.y);

  uint dstCol = lid*_128_align + blockIdx.y*CoopSz*_128_align;

  uint es = row_offset[RowId];
  uint ee = row_offset[RowId+1];

  scalar_t local_sum[_128_align] = {};
  
  for (uint k_base = es; k_base < ee; k_base+=DnStride)
  {
    uint k = k_base + lid;
    uint sm_base = wid*CoopSz;

    // __syncthreads(); // this is not needed as long as coopSz = 32
    smCidx[lid+sm_base] = (k < ee) ? col_indx[k] : -1;
    if (weighted) smEw[lid+sm_base] = (k < ee) ? edge_weight[k] : 0;
    // __syncthreads();

    if (weighted)
    {
      for(uint i = 0; i < CoopSz; ++i)
      {
        if ( smCidx[i+sm_base] < 0 ) break;
        uint srcBase = src_strides[0] * smCidx[sm_base+i] + src_strides[1] * dstCol;
        scalar_t ew = smEw[sm_base+i];
        /*if (dstCol < vlen)*/   F::reduce(local_sum,   F::binary(ew, src_vecs[srcBase]));
        /*if (dstCol+32 < vlen)*/ F::reduce(local_sum+1, F::binary(ew, src_vecs[srcBase+1]));
        if (_128_align > 2)
        {
          /*if (dstCol+64 < vlen)*/ F::reduce(local_sum+2, F::binary(ew, src_vecs[srcBase+2]));
          /*if (dstCol+96 < vlen)*/ F::reduce(local_sum+3, F::binary(ew, src_vecs[srcBase+3]));
        }
      }
      
    }
    else
    {
      for(uint i = 0; i < CoopSz; ++i)
      {
        if ( smCidx[i+sm_base] < 0 ) break;
        uint srcBase = src_strides[0] * smCidx[sm_base+i] + src_strides[1] * dstCol;
        /*if (dstCol < vlen)*/   F::reduce(local_sum,   src_vecs[srcBase]);
       /* if (dstCol+32 < vlen)*/ F::reduce(local_sum+1, src_vecs[srcBase+1]);
        if (_128_align > 2)
        {
          /*if (dstCol+64 < vlen)*/ F::reduce(local_sum+2, src_vecs[srcBase+2]);
          /*if (dstCol+96 < vlen)*/ F::reduce(local_sum+3, src_vecs[srcBase+3]);
        }
      }
    }
  }

  uint dstBase = dst_strides[0] * RowId + dst_strides[1] * dstCol;

  /*if (dstCol < vlen)*/   dst_vecs[dstBase]   = local_sum[0];
  /*if (dstCol+32 < vlen)*/ dst_vecs[dstBase+1] = local_sum[1];
  if (_128_align > 2)
  {
    /*if (dstCol+64 < vlen)*/ dst_vecs[dstBase+2] = local_sum[2];
    /*if (dstCol+96 < vlen)*/ dst_vecs[dstBase+3] = local_sum[3];
  }
}

// due to vector datatype there's no other simpler way than restructuring
template <typename F, typename scalar_t, uint CoopSz = 32>
__global__ void _spmm_kernel_unroll_merged(
  const uint* __restrict__ row_offset,
  const uint* __restrict__ col_indx,
  const scalar_t * __restrict__ edge_weight,
  const acd::TensorInfo<scalar_t, uint> src_tensor,
  acd::TensorInfo<scalar_t, uint> dst_tensor,
  const uint64_t nedge) {}

template <typename F, uint CoopSz = 32>
__global__ void _spmm_kernel_unroll_merged<F, float, CoopSz>(
  const uint* __restrict__ row_offset,
  const uint* __restrict__ col_indx,
  const float * __restrict__ edge_weight,
  const acd::TensorInfo<float, uint> src_tensor,
  acd::TensorInfo<float, uint> dst_tensor,
  const uint64_t nedge)
{
  constexpr uint _128_align = 16/sizeof(float);
  bool weighted = (edge_weight != nullptr);
  
  const uint nvertex = dst_tensor.sizes[0];
  const uint vlen = dst_tensor.sizes[1];

  const float4 * src_vecs = reinterpret_cast<const float4*>(src_tensor.data);
  float4 * dst_vecs = reinterpret_cast<float4*>(dst_tensor.data);
  const uint * src_strides = src_tensor.strides;
  const uint * dst_strides = dst_tensor.strides;

  const uint lid = threadIdx.x;
  const uint wid = threadIdx.y;

  const uint RowId = threadIdx.y+blockDim.y*blockIdx.x;

  const uint SpStride = gridDim.x*blockDim.y;
  const uint DnStride = CoopSz;

  SharedMem<char> _sm;
  uint * smCidx = (uint*) _sm.ptr();
  float * smEw = (float*) (_sm.ptr() + sizeof(uint)*blockDim.x*blockDim.y);

  uint dstCol = lid*_128_align + blockIdx.y*CoopSz*_128_align;

  uint es = row_offset[RowId];
  uint ee = row_offset[RowId+1];

  float4 local_sum;
  
  for (uint k_base = es; k_base < ee; k_base+=DnStride)
  {
    uint k = k_base + lid;
    uint sm_base = wid*CoopSz;

    // __syncthreads(); // this is not needed as long as coopSz = 32
    smCidx[lid+sm_base] = (k < ee) ? col_indx[k] : -1;
    if (weighted) smEw[lid+sm_base] = (k < ee) ? edge_weight[k] : 0;
    // __syncthreads();

    if (weighted)
    {
      #pragma unroll 
      for(uint i = 0; i < CoopSz; ++i)
      {
        if ( smCidx[i+sm_base] < 0 ) continue;
        uint srcBase = src_strides[0] * smCidx[sm_base+i] + src_strides[1] * dstCol;
        srcBase /= _128_align;
        float ew = smEw[sm_base+i];
        if (blockIdx.y != (blockDim.y - 1) || dstCol < vlen)   F::reduce(&local_sum.x, F::binary(ew, src_vecs[srcBase].x));
        if (blockIdx.y != (blockDim.y - 1) || dstCol+1 < vlen) F::reduce(&local_sum.y, F::binary(ew, src_vecs[srcBase].y));
        if (blockIdx.y != (blockDim.y - 1) || dstCol+2 < vlen) F::reduce(&local_sum.z, F::binary(ew, src_vecs[srcBase].z));
        if (blockIdx.y != (blockDim.y - 1) || dstCol+3 < vlen) F::reduce(&local_sum.w, F::binary(ew, src_vecs[srcBase].w));
      }
      
    }
    else
    {
      #pragma unroll 
      for(uint i = 0; i < CoopSz; ++i)
      {
        if ( smCidx[i+sm_base] < 0 ) continue;
        uint srcBase = src_strides[0] * smCidx[sm_base+i] + src_strides[1] * dstCol;
        srcBase /= _128_align;
        if (blockIdx.y != (blockDim.y  - 1) || dstCol < vlen)   F::reduce(&local_sum.x, src_vecs[srcBase].x);
        if (blockIdx.y != (blockDim.y  - 1) || dstCol+1 < vlen) F::reduce(&local_sum.y, src_vecs[srcBase].y);
        if (blockIdx.y != (blockDim.y  - 1) || dstCol+2 < vlen) F::reduce(&local_sum.z, src_vecs[srcBase].z);
        if (blockIdx.y != (blockDim.y  - 1) || dstCol+3 < vlen) F::reduce(&local_sum.w, src_vecs[srcBase].w);
      }
    }
  }

  uint dstBase = dst_strides[0] * RowId + dst_strides[1] * dstCol;
  dstBase /= _128_align;
  if (blockIdx.y != (blockDim.y - 1))
  {
    dst_vecs[dstBase] = local_sum;
  } else {
    if (dstCol   < vlen) dst_vecs[dstBase].x = local_sum.x;
    if (dstCol+1 < vlen) dst_vecs[dstBase].y = local_sum.y;
    if (dstCol+2 < vlen) dst_vecs[dstBase].z = local_sum.z;
    if (dstCol+3 < vlen) dst_vecs[dstBase].w = local_sum.w;
  }
}



template <typename F, uint CoopSz = 32>
__global__ void _spmm_kernel_unroll_merged<F, double, CoopSz>(
  const uint* __restrict__ row_offset,
  const uint* __restrict__ col_indx,
  const double * __restrict__ edge_weight,
  const acd::TensorInfo<double, uint> src_tensor,
  acd::TensorInfo<double, uint> dst_tensor,
  const uint64_t nedge)
{
  constexpr uint _128_align = 16/sizeof(double);
  bool weighted = (edge_weight != nullptr);
  
  const uint nvertex = dst_tensor.sizes[0];
  const uint vlen = dst_tensor.sizes[1];

  const double2 * src_vecs = reinterpret_cast<const double2*>(src_tensor.data);
  double2 * dst_vecs = reinterpret_cast<double2*>(dst_tensor.data);
  const uint * src_strides = src_tensor.strides;
  const uint * dst_strides = dst_tensor.strides;

  const uint lid = threadIdx.x;
  const uint wid = threadIdx.y;

  const uint RowId = threadIdx.y+blockDim.y*blockIdx.x;

  const uint SpStride = gridDim.x*blockDim.y;
  const uint DnStride = CoopSz;

  SharedMem<char> _sm;
  uint * smCidx = (uint*) _sm.ptr();
  double * smEw = (double*) (_sm.ptr() + sizeof(uint)*blockDim.x*blockDim.y);

  uint dstCol = lid*_128_align + blockIdx.y*CoopSz*_128_align;

  uint es = row_offset[RowId];
  uint ee = row_offset[RowId+1];

  double2 local_sum;
  
  for (uint k_base = es; k_base < ee; k_base+=DnStride)
  {
    uint k = k_base + lid;
    uint sm_base = wid*CoopSz;

    // __syncthreads(); // this is not needed as long as coopSz = 32
    smCidx[lid+sm_base] = (k < ee) ? col_indx[k] : -1;
    if (weighted) smEw[lid+sm_base] = (k < ee) ? edge_weight[k] : 0;
    // __syncthreads();

    if (weighted)
    {
      #pragma unroll 
      for(uint i = 0; i < CoopSz; ++i)
      {
        if ( smCidx[i+sm_base] < 0 ) continue;
        uint srcBase = src_strides[0] * smCidx[sm_base+i] + src_strides[1] * dstCol;
        srcBase /= _128_align;
        double ew = smEw[sm_base+i];
        if (blockIdx.y != (blockDim.y - 1) || dstCol < vlen)   F::reduce(&local_sum.x, F::binary(ew, src_vecs[srcBase].x));
        if (blockIdx.y != (blockDim.y - 1) || dstCol+1 < vlen) F::reduce(&local_sum.y, F::binary(ew, src_vecs[srcBase].y));
      }
      
    }
    else
    {
      #pragma unroll 
      for(uint i = 0; i < CoopSz; ++i)
      {
        if ( smCidx[i+sm_base] < 0 ) continue;
        uint srcBase = src_strides[0] * smCidx[sm_base+i] + src_strides[1] * dstCol;
        srcBase /= _128_align;
        if (blockIdx.y != (blockDim.y - 1) || dstCol < vlen)   F::reduce(&local_sum.x, src_vecs[srcBase].x);
        if (blockIdx.y != (blockDim.y - 1) || dstCol+1 < vlen) F::reduce(&local_sum.y, src_vecs[srcBase].y);
      }
    }
  }

  uint dstBase = dst_strides[0] * RowId + dst_strides[1] * dstCol;
  dstBase /= _128_align;
  if (blockIdx.y != (blockDim.y - 1))
  {
    dst_vecs[dstBase] = local_sum;
  } else {
    if (dstCol   < vlen) dst_vecs[dstBase].x = local_sum.x;
    if (dstCol+1 < vlen) dst_vecs[dstBase].y = local_sum.y;
  }
}

}; // namespace Alternative4

namespace Alternative5 {

namespace {

using llvlib::ItemPerVec_t;
using llvlib::CoopCoordinator;

enum DenseCompute {
  trivial, border
};

enum EdgeType {
  w, uw
};

enum SparseCompute {
  full, prefix, suffix
};

enum BufferType {
  none, shared_ptr_g, shared_buffer_g, shared_buffer_t, local
};

template <typename CONF, typename T, uint H, uint W, BufferType bt>
struct buffer_t;

template <typename CONF, typename T, uint H, uint W>
struct buffer_t<CONF, T, H, W, BufferType::none>
{
  typedef llvlib::TileView<T, H, W> Tile;
  typedef llvlib::VectView<T, H*W> Vect;

  Tile impl;
  __device__ __forceinline__ Tile& getTile() { return impl; }
  __device__ __forceinline__ Vect& getVect() { return *(reinterpret_cast<Vect*>(&impl)); }
  __device__ __forceinline__ T* getSharedPtr(uint tid) { return impl.data[0]; }
};

template <typename CONF, typename T, uint H, uint W>
struct buffer_t<CONF, T, H, W, BufferType::local>
{
  typedef llvlib::Tile<T, H, W> Tile;
  typedef llvlib::Vect<T, H*W> Vect;

  Tile impl;
  __device__ __forceinline__ Tile& getTile() { return impl; }
  __device__ __forceinline__ Vect& getVect() { return *(reinterpret_cast<Vect*>(&impl)); }
  __device__ __forceinline__ T* getSharedPtr(uint tid) { return reinterpret_cast<T*>(&impl.data); }
};

/**
 * @note this design does not work with H > 1
 */ 
template <typename CONF, typename T, uint H, uint W>
struct buffer_t<CONF, T, H, W, BufferType::shared_ptr_g>
{
  typedef llvlib::TileView<T, CONF::ydim, W> Tile;
  typedef llvlib::VectView<T, W> Vect;

  // BUG: this __shared__ is ignored by compiler
  Vect impl[CONF::ydim];
  __device__ __forceinline__ Tile& getTile() { return *(reinterpret_cast<Tile*>(&impl)); }
  __device__ __forceinline__ Vect& getVect() { return impl[CONF::yid()]; }
  // remember to pass rowid to this function
  __device__ __forceinline__ T* getSharedPtr(uint gid) { return impl[gid].data; }
};

template <typename CONF, typename T, uint H, uint W>
struct buffer_t<CONF, T, H, W, BufferType::shared_buffer_g>
{
  typedef llvlib::Tile<T, H, W> Tile;
  typedef llvlib::Vect<T, H*W> Vect;

  // BUG: this __shared__ is ignored by compiler
  Vect impl[CONF::ydim];
  __device__ __forceinline__ Tile& getTile() { return *(reinterpret_cast<Tile*>(&impl[CONF::yid()])); }
  __device__ __forceinline__ Vect& getVect() { return impl[CONF::yid()]; }
  __device__ __forceinline__ T* getSharedPtr(uint gid) { return reinterpret_cast<T*>(&impl[gid].data); }
};

/**
 * @brief type for vector-typed element
 * 
 * @tparam vector_t vector data type
 * @tparam T scalar data type
 */
// template <typename T, typename vector_t>
// using VecTy = llvlib::Vect<T, ItemPerVec_t<vector_t,T>::value>;

template <typename T, typename vector_t>
using ViewTy = llvlib::VectView<T, ItemPerVec_t<vector_t,T>::value>;

/**
 * @brief entrance for dense streaming part
 * 
 * @tparam F function used
 * @tparam T scalar data type
 * @tparam vector_t vector data type
 * @tparam DnConf thread mapping for row/column in dense computation
 * @tparam W Edge type configuration: weighted or not
 * @tparam DN Computation type confoguration: trivial or border handling
 */
template <typename F, typename T, typename DnConf, uint DnVlen, uint DnIters, BufferType DnBt, EdgeType W, DenseCompute DN>
struct DenseComputer {
  /**
 * @param src_base 
 * @param spval 
 * @param local_sum 
 * @param residue 
 */
  
  typedef typename llvlib::VectTypes<T, DnVlen>::type vect_t;
  typedef llvlib::Vect<T, DnVlen> Vect;
  typedef typename buffer_t<DnConf, T, 1, DnVlen, DnBt>::Vect accumulator_t;
  typedef accumulator_t Accumulator[DnIters];
  typedef F Fn;

  static constexpr uint IPV = DnVlen;
  static constexpr uint Stride = DnConf::xdim*IPV;

  static __device__ __forceinline__ void
  compute_dense_tile(T* src_base, const T spval, accumulator_t* local_sum, uint residue) 
  {
    register Vect dense_reg;
    if (W == EdgeType::uw && DN == trivial)
    {
      #pragma unroll
      for ( uint i=0; i<DnIters; ++i)
      {
        dense_reg.template load_thread<vect_t>(src_base);
        llvlib::thread_binary_elementwise_inline<T, Vect, accumulator_t&, IPV>(dense_reg, local_sum[i], F::reduce);
        // ---debug---
        // if (blockIdx.x == 0 && blockIdx.y == 0)
        // printf("Thread %d,%d: local_sum = [%.2f, %.2f, %.2f, %.2f]\n", DnConf::yid(), DnConf::xid(),
        //     (*local_sum)[i*IPV], (*local_sum)[i*IPV+1], (*local_sum)[i*IPV+2], (*local_sum)[i*IPV+3]);
        src_base += Stride;
      }

    } 
    else if (W == EdgeType::w && DN == trivial)
    {
      #pragma unroll
      for ( uint i=0; i<DnIters; ++i)
      {
        dense_reg.template load_thread<vect_t>(src_base);
        llvlib::thread_scalar_fma_inline<T, Vect, accumulator_t&, IPV>(spval, dense_reg, local_sum[i], F::binary, F::reduce);
        // ---debug---
        // if (blockIdx.x == 0 && blockIdx.y == 0)
        // printf("Thread %d,%d: local_sum = [%.2f, %.2f, %.2f, %.2f]\n", DnConf::yid(), DnConf::xid(),
        //     dense_reg[0], dense_reg[1], dense_reg[2], dense_reg[3]);
        src_base += Stride;
      }
    } 
    // modified to use local_sum in shared mem
    else if (W == EdgeType::uw && DN == border)
    {
      // uint cnt=0;
      // for ( uint i=DnConf::xid(); i<residue; i+=DnConf::xdim )
      // {
      //   F::reduce(&(*local_sum)[cnt++], src_base[i]);
      // }
      uint iter = 0;
      while(residue > 0)
      {
        dense_reg.template load_thread<vect_t>(src_base);
        llvlib::thread_binary_elementwise_inline<T, Vect, accumulator_t&, IPV>(dense_reg, local_sum[iter++], F::reduce);
        src_base += Stride;
        residue  -= Stride;
      }
    }
    else 
    {
      // uint cnt = 0; 
      // for (uint i=DnConf::xid(); i<residue; i+=DnConf::xdim)
      // {
      //   F::reduce(&(*local_sum)[cnt++], F::binary(spval, src_base[i]));
      // }
      uint iter = 0;
      while(residue > 0)
      {
        dense_reg.template load_thread<vect_t>(src_base);
        llvlib::thread_scalar_fma_inline<T, Vect, accumulator_t&, IPV>(spval, dense_reg, local_sum[iter++], F::binary, F::reduce);
        src_base += Stride;
        residue  -= Stride;
      }
    }
  }

  static __device__ __forceinline__ void 
  write_dense(T* dst_base, accumulator_t* local_sum, uint residue)
  {
    if (DN == border)
    {
      // uint cnt = 0;
      // for (uint i=DnConf::xid(); i<residue; i+=DnConf::xdim)
      // {
      //   dst_base[i] = reinterpret_cast<T*>(&local_sum->data)[cnt++];
      // }
        // local_sum[i].template dump_thread<T>(dst_base, residue);
      uint iter = 0;
      while ( residue >= DnVlen )
      {
        local_sum[iter++].template dump_thread<vect_t>(dst_base);
        dst_base += Stride;
        residue -= Stride;
      }

      if ( residue > 0 )
        local_sum[iter].template dump_thread<typename llvlib::VectTypes<T, 1>::type>(dst_base, residue);
    
    } else {
      for ( uint i=0; i<DnIters; ++i)
      {
        local_sum[i].template dump_thread<vect_t>(dst_base);
        dst_base += Stride;
      }
    }
  }
};



template <typename DenseF, typename T, typename IT, typename SpConf, typename DnConf, BufferType SpBt, uint SpVlen, uint SpIters, SparseCompute SP, EdgeType ET>
class SparseIterator{
  
  typedef llvlib::CoopCoordinator<SpConf, DnConf> CoopCoordinator;
  typedef typename llvlib::VectTypes<IT, SpVlen>::type IVT;
  typedef typename llvlib::VectTypes<T, SpVlen>::type VT;
  typedef llvlib::Vect<IT, SpVlen> IVect;
  typedef llvlib::Vect<T, SpVlen> VVect;

  static constexpr uint SpTileWidth = SpConf::xdim*SpVlen;
  static constexpr uint TileRows = SpConf::ydim;
  static constexpr uint SpPerDn = CoopCoordinator::SpPerDn;
  static constexpr uint DnPerSp = CoopCoordinator::DnPerSp;

  template <typename message_t>
  static __device__ __forceinline__ void cache_for_dense(message_t* buffer, message_t val)
  {
    if (SpConf::xdim != DnConf::xdim && !SpConf::xid())
    {
      buffer[SpConf::yid()] = val;
    }
  }

  template <typename message_t>
  static __device__ __forceinline__ void retrive_for_dense(message_t* buffer, message_t& val, uint idx)
  {
    if (SpConf::xdim != DnConf::xdim)
    {
      val = buffer[idx];
    }
  }

  // template <CoopCoordinator::CoopMode mode>
  // struct Sparseloader;

  // template <CoopCoordinator::CoopMode mode>
  // struct CoordinationAdaptor;

  // template <CoopCoordinator::CoopMode mode>
  // struct SparseWalker;

public:
  // typedef buffer_t<SpConf, IT, 1, SpTileWidth, SpBt> IdxBuffer;
  // typedef buffer_t<SpConf, T, 1, SpTileWidth, SpBt> ValBuffer;
  typedef typename buffer_t<SpConf, IT, 1, SpTileWidth, SpBt>::Vect IdxBuffer;
  typedef typename buffer_t<SpConf, T, 1, SpTileWidth, SpBt>::Vect ValBuffer;

  static __device__ __forceinline__ bool vote(const bool pred)
  {
    bool ret = false;
    if ( CoopCoordinator::Mutual || CoopCoordinator::DenseMajor ) { // multi-group against one row
      ret = pred;
    } else if (DnConf::xdim == 32) {
      ret = __ballot_sync(0xffffffff, pred);
    } else if (DnConf::xdim < 32) {  // one group against multi-row
      const uint32_t mask = ((uint32_t)1<<DnConf::xdim)-1;
      const uint32_t shift = threadIdx.x & (31-DnConf::xdim+1);
      ret = __ballot_sync(0xffffffff, pred) & (mask << shift);
    } else {
      __shared__ bool trans_buffer[SpConf::ydim];
      if ( !SpConf::xid() )
      {
        trans_buffer[SpConf::yid()] = pred;
      }
      __syncthreads();
      for ( uint i=0; i<SpPerDn; ++i )
      {
        ret |= trans_buffer[SpPerDn*DnConf::yid()+i];
      }
    }
    return ret;
  }


  static __device__ __forceinline__ void proceed(
      // SpConf sp_conf, DnConf dn_conf, uint subid,
      IT* es_ptr, T* vs_ptr, T* src_data, 
      const uint block_rows, const uint block_cols, uint row_nz,
      const uint ld_src, uint align_prefix,
      // llvlib::Tile<int, TileRows, SpTileWidth>& sparse_indx, llvlib::Tile<T, TileRows, SpTileWidth>& sparse_val,
      IdxBuffer& sparse_indx, ValBuffer& sparse_val,
      typename DenseF::Accumulator *local_sum, uint* cache_row_nz, uint* cache_align_prefix)
  {
    // __shared__ uint cache_row_nz[SpConf::ydim];
    // __shared__ uint cache_align_prefix[SpConf::ydim];
    // __shared__ IT* loc_es_ptr[SpConf::ydim];
    // __shared__ T*  loc_vs_ptr[SpConf::ydim];

    if (SP == SparseCompute::full || SP == SparseCompute::prefix)
    {
      if (row_nz > SpTileWidth)
      {
        sparse_indx.template load_warp<IVT, SpConf>(es_ptr);
        if (ET == EdgeType::w) 
          sparse_val.template load_warp<VT, SpConf>(vs_ptr);

        if ( SP == SparseCompute::prefix ) cache_for_dense(cache_align_prefix, align_prefix);
      } else {
        row_nz = -1;
        cache_for_dense(cache_row_nz, -1);
      }
    }
    else // suffix
    {
      // sparse_indx[SpConf::yid()][i] = es_ptr[i];
      sparse_indx.template load_warp<IVT, SpConf>(es_ptr, row_nz);
      if (ET == EdgeType::w) //sparse_val[SpConf::yid()][i] = vs_ptr[i];
        sparse_val.template load_warp<VT, SpConf>(vs_ptr, row_nz);
      // if (blockIdx.x == 2707 && SpConf::xid() < row_nz) 
      // printf("loading %d:%f from %lx to buffer[%d,%d]@%lx\n", 
      //     sparse_indx.getVect()[SpConf::xid()], sparse_val.getVect()[SpConf::xid()], es_ptr+SpConf::xid(), SpConf::yid(), SpConf::xid(), &sparse_indx.getVect().data);
        
      cache_for_dense(cache_row_nz, row_nz);
      cache_for_dense(cache_align_prefix, align_prefix); 
    
      // sparse_indx.template gather_warp<int, SpConf>(es_ptr, block_rows, nz_row);
      // if (ET == EdgeType::w) sparse_val.template gather_warp<T, SpConf>(vs_ptr, block_rows, nz_row);
    }
    if ( CoopCoordinator::SparseMajor || SpBt != BufferType::none ) SpConf::sync();

    const uint subid = CoopCoordinator::coop_id_dn2sp();
    if ( block_cols > 0 ) for (uint r=0; r<SpPerDn; ++r)
    {
      const uint local_row = SpPerDn * DnConf::yid() + r;
      if ( local_row >= block_rows ) break;
      
      if (SP == SparseCompute::suffix ) retrive_for_dense(cache_row_nz, row_nz, local_row);
      if ( row_nz == -1 ) continue;
      
      if (SP != SparseCompute::full ) retrive_for_dense(cache_align_prefix, align_prefix, local_row);
    
      const uint tile_s = (SP == SparseCompute::full) ? 0 : align_prefix;
      const uint tile_e = (SP == SparseCompute::suffix) ? row_nz : SpTileWidth;
      // const IT *loc_rows  = (SP == SparseCompute::suffix) ? loc_es_ptr[local_row] : sparse_indx[local_row];
      // const T  *loc_vals  = (SP == SparseCompute::suffix) ? loc_vs_ptr[local_row] : sparse_val[local_row];
      
      IT *loc_cols = sparse_indx.data;
      T  *loc_vals = sparse_val.data;

      if (CoopCoordinator::DenseMajor) for (uint i=tile_s+subid; i<tile_e; i+=DnPerSp)
      {
        const IT dense_row = loc_cols[i];
        const T  spval     = (ET == EdgeType::w) ? loc_vals[i] : 0;

        // if (SpConf::ydim*blockIdx.x+local_row == 2707 && DnConf::xid() == 0)
        // printf("process %d Thread %d,%d: loc_vals[%d~%d~%d]=%f\n", SP, DnConf::yid(), DnConf::xid(), tile_s, i, tile_e, loc_vals[i]);
        // if (RowOffset+SpConf::yid() == 1007 && SpConf::xid() == 0) printf("local_group %d loading %d from buffer[%d,%d]\n", SpConf::yid(), dense_row, local_row, i);
        T* src_row = src_data + ld_src*dense_row;
        DenseF::compute_dense_tile(src_row, spval, local_sum[r], block_cols);
      }

      else for (uint i=tile_s; i<tile_e; ++i)
      {
        const IT dense_row = loc_cols[i];
        const T      spval = (ET == EdgeType::w) ? loc_vals[i] : 0;

        // if (SpConf::ydim*blockIdx.x+local_row == 2707 && DnConf::xid() == 0)
        // printf("process %d Thread %d,%d: loc_vals[%d~%d~%d]=%f\n", SP, DnConf::yid(), DnConf::xid(), tile_s, i, tile_e, loc_vals[i]);
        // if (RowOffset+SpConf::yid() == 1007 && SpConf::xid() == 0) printf("local_group %d loading %d from buffer[%d,%d]\n", SpConf::yid(), dense_row, local_row, i);
        T* src_row = src_data + ld_src*dense_row;
        DenseF::compute_dense_tile(src_row, spval, local_sum[r], block_cols);
      }

    }
    if ( CoopCoordinator::SparseMajor || SpBt != BufferType::none ) DnConf::sync();
  }

  static __device__ __forceinline__ void
  proceed_simple(
      // SpConf sp_conf, DnConf dn_conf, uint subid,
      IT* es_ptr, T* vs_ptr, T* src_data, 
      const uint block_rows, const uint block_cols, uint row_nz,
      const uint ld_src, uint align_prefix,
      // llvlib::Tile<int, TileRows, SpTileWidth>& sparse_indx, llvlib::Tile<T, TileRows, SpTileWidth>& sparse_val,
      IdxBuffer& sparse_indx, ValBuffer& sparse_val,
      typename DenseF::Accumulator *local_sum, uint* cache_row_nz, uint* cache_align_prefix)
  {
    if ( block_cols > 0 )
    {
      IT *loc_cols = es_ptr;
      T  *loc_vals = vs_ptr;

      for (uint i=align_prefix; i<row_nz; ++i)
      {
        IT dense_row = loc_cols[i];
        T  spval     = (ET == EdgeType::w) ? loc_vals[i] : 0;

        T* src_row = src_data + ld_src*dense_row;
        DenseF::compute_dense_tile(src_row, spval, local_sum[0], block_cols);
      }
    }
    if ( CoopCoordinator::SparseMajor || SpBt != BufferType::none ) DnConf::sync();
  }

  static __device__ __forceinline__ void 
  writeback(/*T* local_rows[],*/ IT* local_rows, T* dst_data, uint ld_dst,
            const uint block_rows, const uint block_cols,
            typename DenseF::Accumulator *local_sum)
  {
    for (uint r=0; r<SpPerDn; ++r)
    {
      /*
      const uint local_row = SpPerDn*dn_conf.y + r;
      if ( local_row >= block_rows ) break;

      const uint wb_row = SpConf::ydim*blockIdx.x + local_row;
      size_t sp_row    = static_cast<size_t>(row_indx[wb_row]); //TODO: redundant for mutral
      */
      // uint sp_row = local_rows[r];
      // uint sp_row = SpConf::ydim*blockIdx.x + SpPerDn*dn_conf.y + r;
      // size_t sp_row = SpConf::ydim*blockIdx.y + r*DnConf::ydim + DnConf::yid();
      // T* dst_base = dst_data + ld_dst*sp_row;
      // if (SpConf::xid() == 0) printf("row=%d dst_ptr=%lx\n", sp_row, dst_base);
      DenseF::write_dense(dst_data+ld_dst*local_rows[r], local_sum[r], block_cols);
      // DenseF::write_dense(local_rows[r], local_sum[r], block_cols);
    }
  }
};
 
}; // anonymous namespace
/**
 * @brief kernel to calculate spmm with llvlib
 * 
 * @tparam F operator used for mm (mul, add)
 * @tparam SpConf block configuration for loading sparse matrix
 * @tparam DnConf block configuration for loading dense matrix, dnconf.ydim < spconf.ydim
 */
template <typename F, typename index_t, typename scalar_t, 
          typename SpConf, typename DnConf, BufferType SpBt, BufferType DnBt,
          uint SpIPV, uint DnIPV, uint SpIters, uint DnIters>
__global__ void spmm_kernel_veclib(
  const index_t* __restrict__ row_indx,
  const index_t* __restrict__ row_offset,
  const index_t* __restrict__ col_indx,
  const scalar_t * __restrict__ edge_weight,
  const scalar_t * __restrict__ src_vecs,
  const index_t ld_src, 
  scalar_t * __restrict__ dst_vecs,
  const index_t ld_dst, 
  const index_t nvertex,
  const index_t nedge,
  const index_t vlen)
{
  using namespace llvlib;
  // dense computer variants
  using DenseComputer_TW = DenseComputer<F, scalar_t, DnConf, DnIPV, DnIters, DnBt, EdgeType::w, trivial>;
  using DenseComputer_TU = DenseComputer<F, scalar_t, DnConf, DnIPV, DnIters, DnBt, EdgeType::uw, trivial>;
  using DenseComputer_BW = DenseComputer<F, scalar_t, DnConf, DnIPV, DnIters, DnBt, EdgeType::w, border>;
  using DenseComputer_BU = DenseComputer<F, scalar_t, DnConf, DnIPV, DnIters, DnBt, EdgeType::uw, border>;

  // sparse iteration variants
  using SparseIter_PreTW = SparseIterator<DenseComputer_TW, scalar_t, index_t, SpConf, DnConf, SpBt, SpIPV, SpIters, SparseCompute::prefix, EdgeType::w>;
  using SparseIter_PreTU = SparseIterator<DenseComputer_TU, scalar_t, index_t, SpConf, DnConf, SpBt, SpIPV, SpIters, SparseCompute::prefix, EdgeType::uw>;
  using SparseIter_PreBW = SparseIterator<DenseComputer_BW, scalar_t, index_t, SpConf, DnConf, SpBt, SpIPV, SpIters, SparseCompute::prefix, EdgeType::w>;
  using SparseIter_PreBU = SparseIterator<DenseComputer_BU, scalar_t, index_t, SpConf, DnConf, SpBt, SpIPV, SpIters, SparseCompute::prefix, EdgeType::uw>;
  
  using SparseIter_FullTW = SparseIterator<DenseComputer_TW, scalar_t, index_t, SpConf, DnConf, SpBt, SpIPV, SpIters, SparseCompute::full, EdgeType::w>;
  using SparseIter_FullTU = SparseIterator<DenseComputer_TU, scalar_t, index_t, SpConf, DnConf, SpBt, SpIPV, SpIters, SparseCompute::full, EdgeType::uw>;
  using SparseIter_FullBW = SparseIterator<DenseComputer_BW, scalar_t, index_t, SpConf, DnConf, SpBt, SpIPV, SpIters, SparseCompute::full, EdgeType::w>;
  using SparseIter_FullBU = SparseIterator<DenseComputer_BU, scalar_t, index_t, SpConf, DnConf, SpBt, SpIPV, SpIters, SparseCompute::full, EdgeType::uw>;

  using SparseIter_SufTW = SparseIterator<DenseComputer_TW, scalar_t, index_t, SpConf, DnConf, SpBt, SpIPV, SpIters, SparseCompute::suffix, EdgeType::w>;
  using SparseIter_SufTU = SparseIterator<DenseComputer_TU, scalar_t, index_t, SpConf, DnConf, SpBt, SpIPV, SpIters, SparseCompute::suffix, EdgeType::uw>;
  using SparseIter_SufBW = SparseIterator<DenseComputer_BW, scalar_t, index_t, SpConf, DnConf, SpBt, SpIPV, SpIters, SparseCompute::suffix, EdgeType::w>;
  using SparseIter_SufBU = SparseIterator<DenseComputer_BU, scalar_t, index_t, SpConf, DnConf, SpBt, SpIPV, SpIters, SparseCompute::suffix, EdgeType::uw>;

  // vector types
  typedef typename llvlib::VectTypes<index_t, SpIPV>::type sp_ivec_t;
  typedef typename llvlib::VectTypes<scalar_t, SpIPV>::type sp_svec_t;
  typedef typename llvlib::VectTypes<scalar_t, DnIPV>::type dn_svec_t;
  typedef llvlib::CoopCoordinator<SpConf, DnConf> CoopCoordinator;

  // constants
  constexpr uint SpTileWidth = SpIPV*SpIters*SpConf::xdim;
  constexpr uint DnTileWidth = DnIPV*DnIters;
  constexpr uint SpPerDn = CoopCoordinator::SpPerDn;
  constexpr uint DnPerSp = CoopCoordinator::DnPerSp;

  // sparse buffer when `shared` is enabled
  // __shared__ typename SparseIter_FullTW::IdxBuffer _idx_buf[SpConf::ydim];
  // __shared__ typename SparseIter_FullTW::ValBuffer _val_buf[SpConf::ydim];
  // auto& sparse_indx = _idx_buf[SpConf::yid()];
  // auto& sparse_val  = _val_buf[SpConf::yid()];

  // sparse buffer when `local` or `none` is enabled
  typename SparseIter_FullTW::IdxBuffer sparse_indx;
  typename SparseIter_FullTW::ValBuffer sparse_val;

  __shared__ uint cache_row_nz[SpConf::ydim];
  __shared__ uint cache_align_prefix[SpConf::ydim];

  // workload assignment
  const uint RowOffset = SpConf::ydim*blockIdx.x;
  if (RowOffset + SpConf::warp_ybase() >= nvertex) return; // the whole warp is useless, quit

  // sparse runtime variables
  index_t *es_ptr;
  scalar_t *vs_ptr;
  uint align_prefix, row_nz;
  
  uint dense_offset[DnIters];
  uint dense_row[SpPerDn];
  scalar_t *dst_ptr[SpPerDn];
  
  if ( RowOffset + SpConf::yid() >= nvertex )
  {
    es_ptr = nullptr;
    vs_ptr = nullptr;
    align_prefix = row_nz = 0;
  } 
  else 
  {
    // uint RowId = row_indx[RowOffset + SpConf::yid()]; //sorted
    uint RowId = RowOffset + SpConf::yid(); //non-sorted
    uint es    = row_offset[RowId];
    uint ee    = row_offset[RowId+1];
    
    align_prefix = alignDownResidue<index_t, SpIPV>(col_indx+es);
    es_ptr = const_cast<index_t*>(col_indx+es-align_prefix);
    vs_ptr = const_cast<scalar_t*>(edge_weight+es-align_prefix);
    row_nz = ee-es+align_prefix;
    
    // if (RowId < 30 && blockIdx.y ==0 && SpConf::xid()==0) 
    // {
    //   printf("Row%d: RowCnt=%d col_indx[es]=%lx es_ptr=%lx es=%d ee=%d row_nz=%d align_pre=%d \n",
    //     RowId, RowOffset+SpConf::yid(), col_indx+es, es_ptr, es, ee, row_nz, align_prefix);
    // }
    // __syncthreads();
  }
  //   printf("row_indx=%d row=%d es=%d ee=%d col_indx[es]=%lx es_ptr=%lx\n",RowOffset + SpConf::yid(), RowId, es, ee, &col_indx[es], es_ptr);
  // __syncthreads();
  // if ((RowId == 1700) && SpConf::xid()==0)

  
  // execution selector
  bool dn_border = (vlen < DnTileWidth*DnConf::xdim*(blockIdx.y+1));
  bool weighted = (edge_weight != nullptr);
  
  // dense column offset for thread/block
  const uint block_col_base = DnTileWidth*DnConf::xdim*blockIdx.y;
  const uint block_rows = MIN(SpConf::ydim, nvertex-RowOffset);

#pragma unroll
  for (uint i=0; i<SpPerDn; ++i) 
  {
    uint local_row_dn = SpConf::warp_ybase() + DnConf::warp_ydim*i + DnConf::warp_yid();
    // dense_row[i] = (local_row_dn < block_rows) ? row_indx[local_row_dn+RowOffset] : -1; // sorted 
    dense_row[i] = (local_row_dn < block_rows) ? local_row_dn+RowOffset : -1;

  }

#pragma unroll
  for ( uint i=0; i<DnIters; ++i)
  {
    uint offset = block_col_base + DnIPV * (SpConf::xid() + i*DnConf::xdim);
    dense_offset[i] = offset;
  }
  const uint col_base = dense_offset[0];
  const uint block_cols = vlen - dense_offset[0];

#pragma unroll
  for (uint i=0; i<SpPerDn; ++i) 
  {
    dst_ptr[i] = (dense_row[i] >= 0) ? dst_vecs + ld_dst*dense_row[i] + col_base : nullptr;
  }
  
  // -- store accumulator using local memory
  typename DenseComputer_TW::Accumulator local_sum[SpPerDn];

  // --store accumulator using shared memory
  // __shared__ typename DenseComputer_TW::Accumulator accumulators[DnConf::coop_size()][SpPerDn];
  // auto& local_sum = accumulators[threadIdx.x];

  #pragma unroll 
  for ( uint i=0; i<SpPerDn; ++i) 
  {
    if ( dense_row[i] < 0 ) break;
    for ( uint j=0; j<DnIters; ++j )
      if ( dense_offset[j] < vlen )
      {
        local_sum[i][j].init(dst_vecs + ld_dst*dense_row[i] + dense_offset[j]); 
      }
  }
    
  
  // only do these branch when sparse buffer is enabled
  if ( SpBt == BufferType::shared_buffer_g )
  {  
    if ( SparseIter_PreTW::vote(row_nz > SpTileWidth) )
    { // deal with prefix

      if (weighted && !dn_border) 
        SparseIter_PreTW::proceed(//sp_conf, dn_conf, subid_sp2dn, 
                                  es_ptr, vs_ptr, const_cast<scalar_t*>(src_vecs+col_base), 
                                  block_rows, block_cols, row_nz, 
                                  ld_src, align_prefix, sparse_indx, sparse_val, local_sum,
                                  cache_row_nz, cache_align_prefix);
      if (!weighted && !dn_border)
        SparseIter_PreTU::proceed(//sp_conf, dn_conf, subid_sp2dn, 
                                  es_ptr, vs_ptr, const_cast<scalar_t*>(src_vecs+col_base), 
                                  block_rows, block_cols, row_nz, 
                                  ld_src, align_prefix, sparse_indx, sparse_val, local_sum,
                                  cache_row_nz, cache_align_prefix);
      if (weighted && dn_border)
        SparseIter_PreBW::proceed(//sp_conf, dn_conf, subid_sp2dn, 
                                  es_ptr, vs_ptr, const_cast<scalar_t*>(src_vecs+col_base), 
                                  block_rows, block_cols, row_nz, 
                                  ld_src, align_prefix, sparse_indx, sparse_val, local_sum,
                                  cache_row_nz, cache_align_prefix);
      if (!weighted && dn_border)
        SparseIter_PreBU::proceed(//sp_conf, dn_conf, subid_sp2dn, 
                                  es_ptr, vs_ptr, const_cast<scalar_t*>(src_vecs+col_base), 
                                  block_rows, block_cols, row_nz, 
                                  ld_src, align_prefix, sparse_indx, sparse_val, local_sum,
                                  cache_row_nz, cache_align_prefix);
      
      if (row_nz > SpTileWidth)
      {
        es_ptr += SpTileWidth;
        vs_ptr += SpTileWidth;
        row_nz -= SpTileWidth;
        align_prefix = 0;
      }
    }

    while ( SparseIter_FullTW::vote(row_nz > SpTileWidth) ) // deal with long or aligned row uintermidiates
    {
      if (weighted && !dn_border) 
        SparseIter_FullTW::proceed(//sp_conf, dn_conf, subid_sp2dn, 
                                  es_ptr, vs_ptr, const_cast<scalar_t*>(src_vecs+col_base), 
                                  block_rows, block_cols, row_nz, 
                                  ld_src, align_prefix, sparse_indx, sparse_val, local_sum,
                                  cache_row_nz, cache_align_prefix);
      if (!weighted && !dn_border)
        SparseIter_FullTU::proceed(//sp_conf, dn_conf, subid_sp2dn, 
                                  es_ptr, vs_ptr, const_cast<scalar_t*>(src_vecs+col_base), 
                                  block_rows, block_cols, row_nz, 
                                  ld_src, align_prefix, sparse_indx, sparse_val, local_sum,
                                  cache_row_nz, cache_align_prefix);
      if (weighted && dn_border)
        SparseIter_FullBW::proceed(//sp_conf, dn_conf, subid_sp2dn, 
                                  es_ptr, vs_ptr, const_cast<scalar_t*>(src_vecs+col_base), 
                                  block_rows, block_cols, row_nz, 
                                  ld_src, align_prefix, sparse_indx, sparse_val, local_sum,
                                  cache_row_nz, cache_align_prefix);
      if (!weighted && dn_border)
        SparseIter_FullBU::proceed(//sp_conf, dn_conf, subid_sp2dn, 
                                  es_ptr, vs_ptr, const_cast<scalar_t*>(src_vecs+col_base), 
                                  block_rows, block_cols, row_nz, 
                                  ld_src, align_prefix, sparse_indx, sparse_val, local_sum,
                                  cache_row_nz, cache_align_prefix);
      if (row_nz > SpTileWidth)
      {
        es_ptr += SpTileWidth;
        vs_ptr += SpTileWidth;
        row_nz -= SpTileWidth;
      }

      // if (SpConf::ydim == DnConf::ydim)
      // {
      //   vote_full = row_nz > SpTileWidth;
      // } 
      // else 
      // {
      //   vote_full = false;
      //   SparseIter_FullTW::vote(row_nz > SpTileWidth, do_full);
      //   #pragma unroll
      //   for (uint i=0; i<SpPerDn; ++i) vote_full |= do_full[i];
      // }
    }
  }

  // handle suffix and extra-short rows
  // while ( SparseIter_FullTW::vote(row_nz > 0) )
  {
    // if ((RowId == 306 || RowId == 1358 || RowId == 1701) && SpConf::xid()==0)
    // {
    //   printf("Row%d: es_ptr=%lx row_nz=%d align_pre=%d align_suf=%d\n",
    //       RowId, es_ptr, row_nz, align_prefix, align_suffix);
    // }
    // if (weighted && !dn_border) 
      SparseIter_SufTW::proceed(//sp_conf, dn_conf, subid_sp2dn, 
                                es_ptr, vs_ptr, const_cast<scalar_t*>(src_vecs+col_base), 
                                block_rows, block_cols, row_nz, 
                                ld_src, align_prefix, sparse_indx, sparse_val, local_sum,
                                cache_row_nz, cache_align_prefix);

      // Alternative4::proceed_non_cached<F, index_t, scalar_t, SpIPV, DnIPV>(
      //   es_ptr, vs_ptr, const_cast<scalar_t*>(src_vecs+col_base), ld_src, row_nz, local_sum[0][0]
      // );
    // if (!weighted && !dn_border)
    //   SparseIter_SufTU::proceed(sp_conf, dn_conf, subid_sp2dn, 
    //                             es_ptr, vs_ptr, const_cast<scalar_t*>(src_vecs+col_base), 
    //                             block_rows, block_cols, row_nz, 
    //                             ld_src, col_base, align_prefix, sparse_indx, sparse_val, local_sum,
    //                             cache_row_nz, cache_align_prefix);
    // if (weighted && dn_border)
    //   SparseIter_SufBW::proceed(sp_conf, dn_conf, subid_sp2dn, 
    //                             es_ptr, vs_ptr, const_cast<scalar_t*>(src_vecs+col_base), 
    //                             block_rows, block_cols, row_nz, 
    //                             ld_src, col_base, align_prefix, sparse_indx, sparse_val, local_sum,
    //                             cache_row_nz, cache_align_prefix);
    // if (!weighted && dn_border)
    //   SparseIter_SufBU::proceed(sp_conf, dn_conf, subid_sp2dn, 
    //                             es_ptr, vs_ptr, const_cast<scalar_t*>(src_vecs+col_base), 
    //                             block_rows, block_cols, row_nz, 
    //                             ld_src, col_base, align_prefix, sparse_indx, sparse_val, local_sum,
    //                             cache_row_nz, cache_align_prefix);
    
    // if ( SpBt == BufferType::shared_buffer_g )
    // {
    //   if (row_nz > 0)
    //   {
    //     es_ptr += SpTileWidth;
    //     vs_ptr += SpTileWidth;
    //     row_nz -= SpTileWidth;
    //     align_prefix = 0;
    //   }
    // } else break;
  }

  // wirte back
  if (dn_border)
    SparseIter_SufBU::writeback(dense_row, dst_vecs+col_base, ld_dst, block_rows, block_cols, local_sum);
  else
    SparseIter_SufTU::writeback(dense_row, dst_vecs+col_base, ld_dst, block_rows, block_cols, local_sum);
    // SparseIter_SufTU::writeback(dst_ptr, dst_vecs+col_base, ld_dst, block_rows, block_cols, local_sum);
}

}; // namespace alter5

namespace manscript {

template <typename F, typename IT, typename T, typename DnConf, uint DnIPV, uint DnIters>
__device__ __forceinline__ void
dense_walk_full(const T val, const T* src_ptr,  
             IT offset, llvlib::Vect<T,DnIPV> local_sum[])
{
  using namespace llvlib;
  typedef typename VectTypes<T, DnIPV>::type dnv_t;
  constexpr uint stride = DnConf::xdim * DnIPV;
  
  Vect<T, DnIPV> dn_reg;
  for (uint i=0; i<DnIters; ++i)
  {
    dn_reg.template load_thread_offset<dnv_t>(const_cast<T*>(src_ptr), offset);

    thread_scalar_fma_inline<T, Vect<T, DnIPV>, Vect<T, DnIPV>&, DnIPV>(
      val, dn_reg, local_sum[i], F::binary, F::reduce);

    offset += stride;
  }
}


template <typename F, typename IT, typename T, typename DnConf, uint DnIPV, uint DnIters>
__device__ __forceinline__ void
dense_walk_dense_only(const T val, const T* src_ptr,  
                      IT offset, llvlib::Vect<T,DnIPV> local_sum[])
{
  using namespace llvlib;
  typedef typename VectTypes<T, DnIPV>::type dnv_t;
  constexpr uint stride = DnConf::xdim * DnIPV;
  
  Vect<T, DnIPV> dn_reg;
  for (uint i=0; i<DnIters; ++i)
  {
    dn_reg.template load_thread_offset<dnv_t>(const_cast<T*>(src_ptr), offset);

    thread_binary_elementwise_inline<T, Vect<T, DnIPV>, Vect<T, DnIPV>&, DnIPV>(
      dn_reg, local_sum[i], F::reduce);

    offset += stride;
  }
}


template <typename F, typename IT, typename T, typename DnConf, uint DnIPV, uint DnIters>
__device__ __forceinline__ void
dense_walk_sparse_only(const T val, const T* src_ptr,  
                      IT offset, llvlib::Vect<T,DnIPV> local_sum[])
{
  using namespace llvlib;
  typedef typename VectTypes<T, DnIPV>::type dnv_t;
  constexpr uint stride = DnConf::xdim * DnIPV;
  
  for (uint i=0; i<DnIters; ++i)
  {
    thread_scalar_fma_inline<T, T*, Vect<T, DnIPV>&, DnIPV>(
     val, nullptr, local_sum[i], F::binary, F::reduce);

    offset += stride;
  }
}


template <typename F, typename IT, typename T, DnCompute DnCom, typename DnConf, uint DnIPV, uint DnIters>
struct DenseWalkSwitch
{
  using walker_func_t = void (*) (const T, const T*, IT, llvlib::Vect<T,DnIPV>[]);
  static constexpr walker_func_t get_walker()
  {
    if ( DnCom == u_e_v )
    {
      return dense_walk_full<F, IT, T, DnConf, DnIPV, DnIters>;
    }

    if ( DnCom == u_v )
    {
      return dense_walk_dense_only<F, IT, T, DnConf, DnIPV, DnIters>;
    }

    if ( DnCom == e_v )
    {
      return dense_walk_sparse_only<F, IT, T, DnConf, DnIPV, DnIters>;
    }
  }
};

template <typename F, typename IT, typename T, typename SpConf, typename DnConf, uint DnIPV, uint DnIters>
__device__ __forceinline__ void 
writeback_segreduce_shared(T* dst_base, const IT row_base,
                         uint col_base, const uint vlen, 
                         llvlib::Vect<T,DnIPV> local_sum[], IT _k_buf[], T _v_buf[])
{
  using namespace llvlib;
  typedef typename VectTypes<T, DnIPV>::type dnv_t;
  
  const uint stride = DnConf::xdim * DnIPV;
  for (uint i=0; i<DnIters; ++i)
  {
    bool lead = warp_segmented_reduce_shared<IT, T, Vect<T,DnIPV>&, DnIPV, SpConf::xdim, SpConf>(
      local_sum[i], F::reduce, _k_buf, _v_buf);
    if ( lead && col_base < vlen )
    {
      T *dst_ptr = dst_base + (row_base + col_base);
      // local_sum[i].template dump_thread<dnv_t>(dst_ptr);
      thread_binary_elementwise_inline<T, Vect<T,DnIPV>, T*, DnIPV>(local_sum[i], dst_ptr, F::reduceAtomic);
    }
    col_base += stride;
  }
}

template <typename F, typename IT, typename T, typename SpConf, typename DnConf, uint DnIPV, uint DnIters>
__device__ __forceinline__ void 
writeback_segreduce_shuffle(T* dst_base, const IT row_base,
                            uint col_base, const uint vlen, 
                            llvlib::Vect<T,DnIPV> local_sum[], IT _k_buf[], T* _dum2)
{
  using namespace llvlib;
  typedef typename VectTypes<T, DnIPV>::type dnv_t;
  
  const uint stride = DnConf::xdim * DnIPV;
  for (uint i=0; i<DnIters; ++i)
  {
    bool lead = warp_segmented_reduce_shuffle<IT, T, Vect<T,DnIPV>&, DnIPV, SpConf::xdim, SpConf>(
      local_sum[i], F::reduce, _k_buf, _v_buf);
    if ( lead && col_base < vlen )
    {
      T *dst_ptr = dst_base + (row_base + col_base);
      // local_sum[i].template dump_thread<dnv_t>(dst_ptr);
      thread_binary_elementwise_inline<T, Vect<T,DnIPV>, T*, DnIPV>(local_sum[i], dst_ptr, F::reduceAtomic);
    }
    col_base += stride;
  }
}

template <typename F, typename IT, typename T, typename SpConf, typename DnConf, uint DnIPV, uint DnIters>
__device__ __forceinline__ void 
writeback_sreduce(T* dst_base, const IT row_base, uint col_base, const uint vlen, 
                 llvlib::Vect<T,DnIPV> local_sum[], IT* _dum1, T _buffer[])
{
  using namespace llvlib;
  typedef typename VectTypes<T, DnIPV>::type dnv_t;

  const uint stride = DnConf::xdim * DnIPV;
  for (uint i=0; i<DnIters; ++i)
  {
    warp_reduce_shared<T, Vect<T,DnIPV>&, DnIPV, DnConf::xdim, SpConf>(local_sum[i], F::reduce, _buffer);
    if ( SpConf::xid() < DnConf::xdim && col_base < vlen )
    {
      // T *dst_ptr = dst_base + (row_base + col_base);
      // local_sum[i].template dump_thread<dnv_t>(dst_ptr);
      local_sum[i].template dump_thread_offset<dnv_t>(dst_base, row_base+col_base);
    }
    col_base += stride;
  }
}

/**
 * TODO: add shared memory based block reduce for Sp group > 32
 */ 
template <typename F, typename IT, typename T, typename SpConf, typename DnConf, uint DnIPV, uint DnIters>
__device__ __forceinline__ void 
writeback_wreduce(T* dst_base, const IT row_base, uint col_base, const uint vlen, 
                 llvlib::Vect<T,DnIPV> local_sum[], IT* _dum1, T* _dum2)
{
  using namespace llvlib;
  typedef typename VectTypes<T, DnIPV>::type dnv_t;

  const uint stride = DnConf::xdim * DnIPV;
  for (uint i=0; i<DnIters; ++i)
  {
    warp_reduce_shuffle<T, Vect<T,DnIPV>&, DnIPV, DnConf::xdim, SpConf>(local_sum[i], F::reduce);
    if ( col_base < vlen )
    {
      // T *dst_ptr = dst_base + (row_base + col_base);
      // local_sum[i].template dump_thread<dnv_t>(dst_ptr);
      local_sum[i].template dump_thread_offset<dnv_t>(dst_base, row_base+col_base);
    }
    col_base += stride;
  }
}



template <typename IT, typename T, typename Conf, uint DnIPV, uint DnIters>
__device__ __forceinline__ void 
writeback_naive(T* dst_base, const IT row_base, uint col_base, const uint vlen, 
                 llvlib::Vect<T,DnIPV> local_sum[], IT* _dum1, T* _dum2)
{
  typedef typename llvlib::VectTypes<T, DnIPV>::type dnv_t;
  constexpr uint stride = Conf::xdim * DnIPV;

  IT offset = row_base + col_base;
  for (uint i=0; i<DnIters; ++i)
    if (col_base < vlen)
    {
      // local_sum[i].template dump_thread<dnv_t>(dst_ptr);
      local_sum[i].template dump_thread_offset<dnv_t>(dst_base, offset);
      offset += stride;
    }
}

template <typename F, typename IT, typename T, typename DnConf, uint DnIPV, uint DnIters>
__device__ __forceinline__ void 
writeback_atomic_direct(T* dst_base, const IT row_base, uint col_base, const uint vlen, 
                        llvlib::Vect<T,DnIPV> local_sum[], IT* _dum1, T* _dum2)
{
  using namespace llvlib;
  typedef typename VectTypes<T, DnIPV>::type dnv_t;
  
  const uint stride = DnConf::xdim * DnIPV;
  for (uint i=0; i<DnIters; ++i)
  {
    if ( col_base < vlen )
    {
      T *dst_ptr = dst_base + (row_base + col_base);
      // local_sum[i].template dump_thread<dnv_t>(dst_ptr);
      thread_binary_elementwise_inline<T, Vect<T,DnIPV>, T*, DnIPV>(local_sum[i], dst_ptr, F::reduceAtomic);
    }
    col_base += stride;
  }
}


template <typename F, typename IT, typename T, 
          SpWalk spwalk, Reduction reduce, typename SpConf, typename DnConf,
          uint DnIPV, uint DnIters>
struct WritebackSwitch {
  using llvlib;
  typedef CoopCoordinator<SpConf, DnConf> Coord;
  using writeback_func_t = void (*) (T*, const IT, uint, const uint, Vect<T,DnIPV>*, IT*, T*);
  static constexpr writeback_func_t get_writeback()
  {
    if ( spwalk <= pull_dyn )
    {
      if ( Coord::Mutual ) return writeback_naive<IT, T, DnConf, DnIPV, DnIters>;

      if ( reduce == atomic ) return writeback_atomic_direct<F, IT, T, DnConf, DnIPV, DnIters>;
      if ( reduce == shfl_red && SpConf::xdim < 32)
        return writeback_wreduce<F, IT, T, SpConf, DnConf, DnIPV, DnIters>;
      else return writeback_sreduce<F, IT, T, SpConf, DnConf, DnIPV, DnIters>;
    }

    if ( spwalk == psuh_seq ) return writeback_atomic_direct<F, IT, T, DnConf, DnIPV, DnIters>;

    if ( spwalk == push_para )
    {
      if ( SpConf::xdim < 32 && reduce == shfl_red) return writeback_segreduce_shuffle<F, IT, T, SpConf, DnConf, DnIPV, DnIters>;
      else return writeback_segreduce_shared<F, IT, T, SpConf, DnConf, DnIPV, DnIters>;
    }
  }

  static void __device__ __forceinline__ reduce_buffer_init(IT key, T val, IT* _k_buf, T* _v_buf)
  {
    if ( spwalk == push_para && )
  }
};


template <typename F, typename IT, typename T, DnCompute DnCom, typename SpConf, typename DnConf, uint DnIPV, uint DnIters>
__device__ __forceinline__ void 
proceed_pull_non_cached(const IT start, const IT end, const uint col_base, 
                   const IT *col_ptr, const T *val_ptr, const T *src_ptr, 
                   const uint ld_src, llvlib::Vect<T,DnIPV> local_sum[])
{
  static_assert(SpConf::ydim <= DnConf::ydim && "Sparse major is deprecated!");

  constexpr uint dn_coop = DnConf::ydim / SpConf::ydim;
  uint i = start + DnConf::yid()%dn_coop;
  for (; i<end; i+=dn_coop)
  {
    IT col = col_ptr[i];
    T  val = val_ptr[i];

    DenseWalkSwitch<F, IT, T, DnCom, DnConf, DnIPV, DnIters>::get_walker()(val, src_ptr, ld_src*col+col_base, local_sum);
  }
}


template <typename F, typename IT, typename T, 
          DnCompute DnCom, typename SpConf, typename DnConf, 
          uint SpIPV, uint DnIPV, uint DnIters>
__device__ __forceinline__ void 
proceed_pull_local_cached(const IT start, const IT end, const uint col_base, 
                     const IT *col_ptr, const T *val_ptr, const T *src_ptr, 
                     const uint ld_src, llvlib::Vect<T,DnIPV> local_sum[])
{
  using namespace llvlib;
  typedef typename VectTypes<IT, SpIPV>::type spiv_t;
  typedef typename VectTypes<T, SpIPV>::type spvv_t;
  
  constexpr uint dn_coop = DnConf::ydim / SpConf::ydim;
  const uint dn_yid = DnConf::yid() % dn_coop;

  uint i;
  if (SpIPV > 1 && start + SpIPV <= end)
  {
    Vect<IT, SpIPV> spidx_reg;
    Vect<T, SpIPV> spval_reg;
    
    uint prefix = SpIPV - start % SpIPV;
    uint suffix = end - end % SpIPV;
    
    for (uint j=dn_yid; j<prefix; j+=dn_coop)
    {
      IT col = col_ptr[start+j];
      T  val = val_ptr[start+j];

      DenseWalkSwitch<F, IT, T, DnCom, DnConf, DnIPV, DnIters>::get_walker()(val, src_ptr, ld_src*col+col_base, local_sum);
    }

    i = start + prefix + dn_yid*SpIPV;
    for (; i < suffix; i+=SpIPV*dn_coop)
    {
      spidx_reg.template load_thread_offset<spiv_t>(const_cast<IT*>(col_ptr),i);
      spval_reg.template load_thread_offset<spvv_t>(const_cast<T*>(val_ptr),i);
      
      for (uint j=0; j<SpIPV; ++j)
      {
        IT col = spidx_reg[j];
        T  val = spval_reg[j];
        
        DenseWalkSwitch<F, IT, T, DnCom, DnConf, DnIPV, DnIters>::get_walker()(val, src_ptr, ld_src*col+col_base, local_sum);
      }
    }
    i = suffix + dn_yid;
  } 
  else
  {
    i = start + dn_yid;
  }

  for (; i<end; i+=dn_coop)
  {
    IT col = col_ptr[i];
    T  val = val_ptr[i];

    DenseWalkSwitch<F, IT, T, DnCom, DnConf, DnIPV, DnIters>::get_walker()(val, src_ptr, ld_src*col+col_base, local_sum);
  }
}

template <typename F, typename IT, typename T, 
          DnCompute DnCom, typename SpConf, typename DnConf, 
          uint SpIPV, uint DnIPV, uint DnIters>
__device__ __forceinline__ void 
proceed_pull_warp_cached(const IT start, const IT end, const uint col_base, 
                     const IT *col_ptr, const T *val_ptr, const T *src_ptr, 
                     const uint ld_src, llvlib::Vect<T,DnIPV> local_sum[])
{
  // static_assert(SpConf::xdim <= 32 && "only warp-level cooperation is allowed");

  using namespace llvlib;
  typedef typename VectTypes<IT, SpIPV>::type spiv_t;
  typedef typename VectTypes<T, SpIPV>::type spvv_t;

  Vect<IT, SpIPV> spidx_reg;
  Vect<T, SpIPV> spval_reg;
  
  constexpr uint SpTileSize = SpConf::xdim * SpIPV;
  constexpr uint benefit_thresh = 0;
  
  uint i = start;
  for (; i%SpIPV!=0; ++i)
  {
    IT col = col_ptr[i];
    T  val = val_ptr[i];
    DenseWalkSwitch<F, IT, T, DnCom, DnConf, DnIPV, DnIters>::get_walker()(val, src_ptr, ld_src*col+col_base, local_sum);
  }

  if (start + benefit_thresh <= end)
  { 
    for (; i+SpTileSize<=end; i+=SpTileSize)
    {
      spidx_reg.template load_thread_offset<spiv_t>(const_cast<IT*>(col_ptr),i+SpIPV*SpConf::xid());
      spval_reg.template load_thread_offset<spvv_t>(const_cast<T*>(val_ptr),i+SpIPV*SpConf::xid());
      
      for (uint j=0; j<SpTileSize; ++j)
      {
        uint thd = j / SpIPV;
        uint off = j % SpIPV;
        IT col = __shfl_sync(-1, spidx_reg[off], thd, SpConf::xdim);
        IT val = __shfl_sync(-1, spval_reg[off], thd, SpConf::xdim);

        DenseWalkSwitch<F, IT, T, DnCom, DnConf, DnIPV, DnIters>::get_walker()(val, src_ptr, ld_src*col+col_base, local_sum);
      }
    }
  }

  for (; i<end; ++i)
  {
    IT col = col_ptr[i];
    T  val = val_ptr[i];
    DenseWalkSwitch<F, IT, T, DnCom, DnConf, DnIPV, DnIters>::get_walker()(val, src_ptr, ld_src*col+col_base, local_sum);
  }
}

template <typename F, typename IT, typename T, 
          DnCompute DnCom, typename SpConf, typename DnConf, 
          uint SpIPV, uint DnIPV, uint DnIters>
__device__ __forceinline__ void 
proceed_pull_shared_cached(const IT start, const IT end, const uint col_base, 
                     const IT *col_ptr, const T *val_ptr, const T *src_ptr, 
                     const uint ld_src, llvlib::Vect<T,DnIPV> local_sum[])
{
  using namespace llvlib;
  typedef typename VectTypes<IT, SpIPV>::type spiv_t;
  typedef typename VectTypes<T, SpIPV>::type spvv_t;

  constexpr uint SpTileSize = SpConf::xdim * SpIPV;
  __shared__ Vect<IT, SpTileSize> spidx_buffer[SpConf::ydim];
  __shared__ Vect<T, SpTileSize> spval_buffer[SpConf::ydim];

  constexpr uint benefit_thresh = 6;
  
  uint i = start;
  for (; i%SpIPV!=0; ++i)
  {
    IT col = col_ptr[i];
    T  val = val_ptr[i];
    DenseWalkSwitch<F, IT, T, DnCom, DnConf, DnIPV, DnIters>::get_walker()(val, src_ptr, ld_src*col+col_base, local_sum);
  }
  SpConf::sync();

  if (start + benefit_thresh <= end)
  { 
    auto& spidx_local = spidx_buffer[SpConf::yid()];
    auto& spval_local = spval_buffer[SpConf::yid()];
    
    for (; i+SpTileSize<=end; i+=SpTileSize)
    {
      uint border = end - i;
      spidx_local.template load_warp_padded_offset<spiv_t, SpConf>(const_cast<IT*>(col_ptr),i,border);
      spval_local.template load_warp_padded_offset<spvv_t, SpConf>(const_cast<T*>(val_ptr),i,border);
      SpConf::sync();
      
      for (uint j=0; j<SpTileSize; ++j)
      {
        IT col = spidx_local[j];
        T  val = spval_local[j];
        
        DenseWalkSwitch<F, IT, T, DnCom, DnConf, DnIPV, DnIters>::get_walker()(val, src_ptr, ld_src*col+col_base, local_sum);
      }
    }
    SpConf::sync();
  }

  for (; i<end; ++i)
  {
    IT col = col_ptr[i];
    T  val = val_ptr[i];
    DenseWalkSwitch<F, IT, T, DnCom, DnConf, DnIPV, DnIters>::get_walker()(val, src_ptr, ld_src*col+col_base, local_sum);
  }
}


template <typename F, typename IT, typename T, 
          DnCompute DnCom, Reduction reduce, typename SpConf, typename DnConf, 
          uint SpIPV, uint DnIPV, uint DnIters>
__device__ __forceinline__ void
proceed_push_simple(const IT start, const IT end, const uint col_base,
                    const IT *row_offset, const IT *col_ptr, const T *val_ptr,
                    const T *src_ptr, const IT ld_src, T *dst_ptr, const IT ld_dst,
                    const IT nv, const IT vlen, 
                    llvlib::Vect<T,DnIPV> local_sum[])
{
  uint eid = start;
  uint vid = llvlib::binary_search(eid, row_offset, 0, nv);
  uint bound = row_offset[vid+1];
  bool updated = false;
  while ( eid < end )
  {
    if ( eid == bound )
    {
      if ( updated )
      writeback_atomic_direct<IT, T, DnConf, DnIPV, DnIters>(
          F::reduce, F::reduceAtomic, 
          dst_ptr, ld_dst*vid, col_base, vlen, local_sum
          // _key_buf, _val_buf
          );
      for (uint i=0; i<DnIters; ++i) local_sum[i].set_thread(F::init_val);
      vid += 1;
      bound = row_offset[vid+1];
      updated = false;
    }
    IT col = col_ptr[eid];
    T val = val_ptr[eid];
    DenseWalkSwitch<F, IT, T, DnCom, DnConf, DnIPV, DnIters>::get_walker()(val, src_ptr, ld_src*col+col_base, local_sum);
    updated = true;
    eid += 1;
  }
  
  if ( updated )
  writeback_atomic_direct<IT, T, DnConf, DnIPV, DnIters>(
      F::reduce, F::reduceAtomic, 
      dst_ptr, ld_dst*vid, col_base, vlen, local_sum
      // _key_buf, _val_buf
      );
}


template <typename F, typename IT, typename T, 
          DnCompute DnCom, Reduction reduce, typename SpConf, typename DnConf, 
          uint SpIPV, uint DnIPV, uint DnIters>
__device__ __forceinline__ void
proceed_push_shared_cached(const IT start, const IT end, const uint col_base,
                           const IT *row_offset, const IT *col_ptr, const T *val_ptr,
                           const T *src_ptr, const IT ld_src, T *dst_ptr, const IT ld_dst,
                           const IT nv, const IT vlen, 
                           llvlib::Vect<T,DnIPV> local_sum[],
                           llvlib::Vect<IT, SpIPV*SpConf::xdim> spcol_buf[],
                           llvlib::Vect<T, SpIPV*SpConf::xdim>  spval_buf[])
{
  using namespace llvlib;
  typedef typename VectTypes<IT, SpIPV>::type spiv_t;
  typedef typename VectTypes<T, SpIPV>::type spvv_t;

  constexpr uint SpTileSize = SpIPV * SpConf::xdim;

  uint eid = start;
  uint vid = llvlib::binary_search(eid, row_offset, 0u, nv);
  uint bound = row_offset[vid+1];
  bool updated = false;

  auto& spcol = spcol_buf[SpConf::yid()];
  auto& spval = spval_buf[SpConf::yid()];

  for ( ; eid < end; eid += SpTileSize)
  {
    spcol.template load_warp_padded_offset<spiv_t, SpConf>(const_cast<IT*>(col_ptr), eid, end-eid);
    spval.template load_warp_padded_offset<spvv_t, SpConf>(const_cast<T*>(val_ptr),  eid, end-eid);

    for (uint i=0; i<SpTileSize; ++i)
    { 
      if ( eid+i == bound )
      {
        if ( updated )
        writeback_atomic_direct<F, IT, T, DnConf, DnIPV, DnIters>(
            dst_ptr, ld_dst*vid, col_base, vlen, local_sum
            // _key_buf, _val_buf
            );
        for (uint i=0; i<DnIters; ++i) local_sum[i].set_thread(F::init_val);
        vid += 1;
        bound = row_offset[vid+1];
        updated = false;
      }
      IT col = spcol[i];
      T  val = spval[i];
      DenseWalkSwitch<F, IT, T, DnCom, DnConf, DnIPV, DnIters>::get_walker()(val, src_ptr, ld_src*col+col_base, local_sum);
      updated = true;
    } 
  }
  
  if ( updated )
  writeback_atomic_direct<F, IT, T, DnConf, DnIPV, DnIters>(
      dst_ptr, ld_dst*vid, col_base, vlen, local_sum
      // _key_buf, _val_buf
      );
}

template <typename F, typename IT, typename T, 
          DnCompute DnCom, Reduction reduce, typename SpConf, typename DnConf, 
          uint SpIPV, uint DnIPV, uint DnIters>
__device__ __forceinline__ void
proceed_push_reduced(const IT start, const IT end, const uint col_base,
                    const IT *row_offset, const IT *col_ptr, const T *val_ptr,
                    const T *src_ptr, const IT ld_src, T *dst_ptr, const IT ld_dst,
                    const IT nv, const IT vlen, 
                    llvlib::Vect<T,DnIPV> local_sum[])
{
  constexpr uint dn_coop = SpConf::xdim / DnConf::xdim;
  __shared__ IT sh_seg_buffer[SpConf::ydim][dn_coop];
  __shared__ T  sh_val_buffer[SpConf::ydim][SpConf::xdim];

  const uint dn_id = DnConf::yid() % dn_coop; 

  uint eid = start + dn_id;
  uint vid = 0;
  auto _seg_buf = sh_seg_buffer[SpConf::yid()];
  auto _val_buf = sh_val_buffer[SpConf::yid()];

  for ( ;eid < end; eid += dn_coop )
  {
    IT col = col_ptr[eid];
    T val  = val_ptr[eid];
    vid = llvlib::binary_search(eid, row_offset, 0, nv);
    _seg_buf[dn_id] = vid;
    DenseWalkSwitch<F, IT, T, DnCom, DnConf, DnIPV, DnIters>::get_walker()(val, src_ptr, ld_src*col+col_base, local_sum);
    writeback_atomic_sreduce<IT, T, SpConf, DnConf, DnIPV, DnIters>(
      F::reduce,
      F::reduceAtomic,
      dst_ptr, ld_dst*vid, col_base, vlen, local_sum
      ,_seg_buf ,_val_buf
    );
    for (uint i=0; i<DnIters; ++i) local_sum[i].set_thread(F::init_val);
    SpConf::sync();
    _seg_buf[dn_id] = -1;
  }
}

// 软件调度性能不够好
template <SpWalk wk, typename SpConf, typename index_t, bool first_call>
[[__deprecated__]] __device__ __forceinline__ index_t
sparse_row_scheduler(index_t last_offset, index_t *global_cnt, index_t* bcast)
{
  if (wk == pull_fix) 
  {
    if (first_call) return blockIdx.x * SpConf::ydim + SpConf::yid();
    else return last_offset += gridDim.x * SpConf::ydim;
  }
  else {
    if (first_call)
    {
      if(threadIdx.x==0 && blockIdx.x==0) *global_cnt = 0;
      __threadfence();
    }
    if (SpConf::xdim > 32)
    {
      if (!SpConf::xid()) *bcast = atomicAdd(global_cnt, 1);
      SpConf::sync();
      return *bcast;
    }
    else
    {
      uint lead;
      if (!SpConf::xid()) lead = atomicAdd(global_cnt, 1);
      return __shfl_sync(-1, lead, 0, SpConf::xdim);
    }
  }
}


template <typename F, typename index_t, typename scalar_t, 
          SpWalk spwalk, SpLoad spload, DnCompute dncomp, Reduction red,
          typename SpConf, typename DnConf, 
          uint SpIPV, uint DnIPV, uint DnIters>
__global__ void spmm_kernel_push(
  const index_t  * __restrict__ row_offset,
  const index_t  * __restrict__ col_indx,
  const scalar_t * __restrict__ edge_weight,
  const scalar_t * __restrict__ src_ptr,
  const index_t  ld_src,
  scalar_t * __restrict__ dst_ptr,
  const index_t  ld_dst,
  const index_t  nv, 
  const index_t  ne, 
  const index_t  vlen)
{
  constexpr uint SpTileSize = SpIPV * SpConf::xdim;
  __shared__ llvlib::Vect<index_t, SpTileSize> spcol_buf[SpConf::ydim];
  __shared__ llvlib::Vect<scalar_t, SpTileSize> spval_buf[SpConf::ydim];

  const uint col_base = (DnConf::xid() + DnConf::xdim*blockIdx.y)*DnIPV*DnIters;

  const index_t stride = CEIL(ne, gridDim.x*SpConf::ydim);
  const index_t start = (SpConf::yid() + blockIdx.x * SpConf::ydim)*stride;
  const index_t end = MIN(start + stride, ne);

  llvlib::Vect<scalar_t, DnIPV> local_sum[DnIters];
  for (uint i=0; i<DnIters; ++i) local_sum[i].set_thread(F::init_val); 

  if (spwalk == push_seq)
  {
    if (spload == shared)
      proceed_push_shared_cached<F, index_t, scalar_t, 
                                dncomp, SpConf, DnConf, 
                                SpIPV, DnIPV, DnIters>(
          start, end, col_base, row_offset, col_indx,
          edge_weight, src_ptr, ld_src, dst_ptr, ld_dst, nv, vlen, local_sum,
          spcol_buf, spval_buf
          );
    if (spload == none) 
      proceed_push_simple<F, index_t, scalar_t, 
                          dncomp, SpConf, DnConf, 
                          SpIPV, DnIPV, DnIters>(
          start, end, col_base, row_offset, col_indx,
          edge_weight, src_ptr, ld_src, dst_ptr, ld_dst, nv, vlen, local_sum,
          spcol_buf, spval_buf
          );
  }
  if (spwalk == push_para)
  {
    proceed_push_reduced<F, index_t, scalar_t, 
                         dncomp, SpConf, DnConf, 
                         SpIPV, DnIPV, DnIters>(
        start, end, col_base, row_offset, col_indx,
        edge_weight, src_ptr, ld_src, dst_ptr, ld_dst, nv, vlen, local_sum,
        spcol_buf, spval_buf
    );
  }

}

template <typename F, typename index_t, typename scalar_t, 
          SpWalk spwk, SpLoad spload, 
          DnCompute dncomp, Reduction reduction, 
          typename SpConf, typename DnConf, 
          uint SpIPV, uint DnIPV, uint DnIters>
__global__ void spmm_kernel_pull(
  const index_t  * __restrict__ row_indx,
  const index_t  * __restrict__ row_offset,
  const index_t  * __restrict__ col_indx,
  const scalar_t * __restrict__ edge_weight,
  const scalar_t * __restrict__ src_vecs,
  const index_t  ld_src,
  scalar_t * __restrict__ dst_vecs,
  const index_t  ld_dst,
  const index_t  nv, 
  const index_t  ne, 
  const index_t  vlen)
{
  typedef llvlib::CoopCoordinator<SpConf, DnConf> Coord;
  static_assert(!Coord::SparseMajor && "not handled currently");

  constexpr uint SpTileSize = SpIPV * SpConf::xdim;
  __shared__ llvlib::Vect<index_t, SpTileSize> spidx_buffer[SpConf::ydim];
  __shared__ llvlib::Vect<scalar_t, SpTileSize> spval_buffer[SpConf::ydim];
  __shared__ scalar_t sh_reduce_buffer[SpConf::ydim][SpConf::xdim];
  __shared__ index_t sh_row_bcaster[SpConf::ydim];

  const uint col_base = (DnConf::xid() + DnConf::xdim*blockIdx.y)*DnIPV*DnIters;
  const index_t RowOffset = blockIdx.x * SpConf::ydim + SpConf::yid();
  if ( RowOffset >= nv ) return;
  const index_t RowId = spwk == pull_dyn ? row_indx[RowOffset] : RowOffset;

  llvlib::Vect<scalar_t, DnIPV> local_sum[DnIters];
  for (uint i=0; i<DnIters; ++i) local_sum[i].set_thread(F::init_val); 

  if (spload == none) 
  {
    proceed_pull_non_cached<F, index_t, scalar_t, dncomp, SpConf, DnConf, DnIPV, DnIters>(
        row_offset[RowId], row_offset[RowId+1], col_base,
        col_indx, edge_weight, src_vecs, ld_src, local_sum);
  } 
  if (spload == local) {
    proceed_pull_local_cached<F, index_t, scalar_t, dncomp, SpConf, DnConf, SpIPV, DnIPV, DnIters>(
        row_offset[RowId], row_offset[RowId+1], col_base,
        col_indx, edge_weight, src_vecs, ld_src, local_sum);
  } 
  if (spload == shfl) {
    proceed_pull_warp_cached<F, index_t, scalar_t, dncomp, SpConf, DnConf, SpIPV, DnIPV, DnIters>(
        row_offset[RowId], row_offset[RowId+1], col_base,
        col_indx, edge_weight, src_vecs, ld_src, local_sum);
  }
  if (spload == shared) {
    proceed_pull_shared_cached<F, index_t, scalar_t, dncomp, SpConf, DnConf, SpIPV, DnIPV, DnIters>
        (row_offset[RowId], row_offset[RowId+1], col_base,
        col_indx, edge_weight, src_vecs, ld_src, local_sum);
  }

  WritebackSwitch<F, index_t, scalar_t, 
                  spwalk, reduction, 
                  SpConf, DnConf, DnIPV, DnIters>::get_writeback()
    (dst_vecs, ld_dst*RowId, col_base, vlen, local_sum, sh_reduce_buffer[SpConf::yid()]);

  if (Coord::Mutual)
  {
    writeback_naive<index_t, scalar_t, DnConf, DnIPV, DnIters>(
      dst_vecs, ld_dst*RowId, col_base, vlen, local_sum);
  }
  if (Coord::DenseMajor)
  {
    if (reduction == atomic)
    {
      writeback_atomic_direct<F, index_t, scalar_t, DnConf, DnIPV, DnIters>(
          dst_vecs, ld_dst*RowId, col_base, vlen, local_sum);
    }
    
    if (reduction == shfl_red && SpConf::xdim <= 32)
    {
      writeback_wreduce<F, index_t, scalar_t, SpConf, DnConf, DnIPV, DnIters>(
          dst_vecs, ld_dst*RowId, col_base, vlen, local_sum);
    }

    if (reduction == shared_red || SpConf::xdim > 32)
    {
      writeback_sreduce<F, index_t, scalar_t, SpConf, DnConf, DnIPV, DnIters>(
      dst_vecs, ld_dst*RowId, col_base, vlen, local_sum, sh_reduce_buffer[SpConf::yid()]);
    }
  }
}


// this one does not work well
// template <typename F, typename index_t, typename scalar_t, 
//           SpWalk spwk, SpLoad spload, 
//           DnCompute dncomp, Reduction reduction,
//           typename SpConf, typename DnConf, 
//           uint SpIPV, uint DnIPV, uint DnIters>
// __global__ void spmm_kernel_pull(
//   const index_t  * __restrict__ row_indx,
//   const index_t  * __restrict__ row_offset,
//   const index_t  * __restrict__ col_indx,
//   const scalar_t * __restrict__ edge_weight,
//   const scalar_t * __restrict__ src_vecs,
//   const index_t  ld_src,
//   scalar_t * __restrict__ dst_vecs,
//   const index_t  ld_dst,
//   const index_t  nv, 
//   const index_t  ne, 
//   const index_t  vlen,
//   index_t *gl_cnt)
// {
//   typedef llvlib::CoopCoordinator<SpConf, DnConf> Coord;
//   static_assert(!Coord::SparseMajor && "not handled currently");

//   constexpr uint SpTileSize = SpIPV * SpConf::xdim;
//   __shared__ llvlib::Vect<index_t, SpTileSize> spidx_buffer[SpConf::ydim];
//   __shared__ llvlib::Vect<scalar_t, SpTileSize> spval_buffer[SpConf::ydim];
//   __shared__ scalar_t sh_reduce_buffer[SpConf::ydim][SpConf::xdim];
//   __shared__ index_t sh_row_bcaster[SpConf::ydim];

//   bool weighted = (edge_weight != nullptr);
//   bool sorted = (row_indx != nullptr);

//   const uint col_base = (DnConf::xid() + DnConf::xdim*blockIdx.y)*DnIPV*DnIters;
//   index_t RowOffset = sparse_row_scheduler<spwk, SpConf, index_t, true>
//                       (0, gl_cnt+blockIdx.y, &sh_row_bcaster[SpConf::yid()]);
//   while (RowOffset <= nv)
//   {
//     index_t RowId = sorted ? row_indx[RowOffset] : RowOffset;
//     llvlib::Vect<scalar_t, DnIPV> local_sum[DnIters];
//     for (uint i=0; i<DnIters; ++i) local_sum[i].set_thread(F::init_val); 
//     
//     ... ...
// 
//     RowOffset = sparse_row_scheduler<spwk, 
//                                      SpConf, 
//                                      index_t, 
//                                      false>(RowOffset, gl_cnt+blockIdx.y, &sh_row_bcaster[SpConf::yid()]);
//   }
// }

__global__ void spmm_naive(
    const uint32_t* __restrict__ A_row,
    const uint32_t* __restrict__ A_col,
    const float* __restrict__ A_val,
    const float* __restrict__ B,
    float* __restrict__ C,
    uint32_t num_col)
{
    uint32_t rowA = blockIdx.x * blockDim.y + threadIdx.y;
    rowA = __ballot_sync(0xffffffff, (rowA & (1 << threadIdx.x)) != 0);
    uint32_t offA_begin = A_row[rowA];
    uint32_t offA_end = A_row[rowA + 1];
    uint32_t colB = (blockIdx.y * blockDim.x + threadIdx.x) * 4;
    float4 sum = { 0.0f, 0.0f, 0.0f, 0.0f };
    const float4* B_gptr = reinterpret_cast<const float4*>(&B[colB]);

    uint32_t colA_reg_buf;
    float valA_reg_buf;
    if (offA_begin % 32 != 0)
    {
        bool load_flag = (offA_begin & -32) + threadIdx.x >= offA_begin && (offA_begin & -32) + threadIdx.x < offA_end;
        if (load_flag)
        {
            colA_reg_buf = A_col[(offA_begin & -32) + threadIdx.x];
            valA_reg_buf = A_val[(offA_begin & -32) + threadIdx.x];
        }
    }
    for (uint32_t offA = offA_begin; offA < offA_end; offA++)
    {
        bool load_flag = offA % 32 == 0 && offA + threadIdx.x < offA_end;
        if (load_flag)
        {
            colA_reg_buf = A_col[offA + threadIdx.x];
            valA_reg_buf = A_val[offA + threadIdx.x];
        }
        uint32_t colA = __shfl_sync(0xffffffff, colA_reg_buf, offA % 32, 32);
        float4 valB = B_gptr[colA * num_col >> 2];
        float valA = __shfl_sync(0xffffffff, valA_reg_buf, offA % 32, 32);
        sum.x += valA * valB.x;
        sum.y += valA * valB.y;
        sum.z += valA * valB.z;
        sum.w += valA * valB.w;
    }
    float4* C_gptr = reinterpret_cast<float4*>(&C[rowA * num_col + colB]);
    C_gptr[0] = sum;
}


template<bool has_add, uint32_t TDY>
__global__ void spmm_l2(
    const uint32_t* __restrict__ A_row_begin,
    const uint32_t* __restrict__ A_row_end,
    const uint32_t* __restrict__ A_col,
    const float* __restrict__ A_val,
    const float* __restrict__ B,
    float* __restrict__ C,
    uint32_t num_col)
{
    __shared__ float sum_buf[4][32];

    uint32_t rowA = blockIdx.x;
    uint32_t offA_begin_tmp = A_row_begin[rowA];
    uint32_t offA_end_tmp = A_row_end[rowA];
    if (TDY > 1)
    {
        reinterpret_cast<float4*>(sum_buf)[threadIdx.x] = float4{0.0f, 0.0f, 0.0f, 0.0f};
    }
    uint32_t offA_begin = offA_begin_tmp + (offA_end_tmp - offA_begin_tmp) * threadIdx.y / TDY;
    uint32_t offA_end = offA_begin_tmp + (offA_end_tmp - offA_begin_tmp) * (threadIdx.y + 1) / TDY;
    offA_begin = __ballot_sync(0xffffffff, (offA_begin & (1 << threadIdx.x)) != 0);
    offA_end = __ballot_sync(0xffffffff, (offA_end & (1 << threadIdx.x)) != 0);

    uint32_t colB = (blockIdx.y * blockDim.x + threadIdx.x) * 4;
    float4 sum = has_add ? *reinterpret_cast<const float4*>(&C[rowA * num_col + colB]) : float4{ 0.0f, 0.0f, 0.0f, 0.0f };
    const float4* B_gptr = reinterpret_cast<const float4*>(&B[colB]);
    const float4* B_tptr;
    if (offA_begin == offA_end) return;

    uint32_t colA_reg_buf;
    float valA_reg_buf;
    uint32_t colA_reg_buf_next;
    float valA_reg_buf_next;
    uint32_t offA = offA_begin & -32;
    if (offA + threadIdx.x >= offA_begin && offA + threadIdx.x < offA_end)
    {
        colA_reg_buf = A_col[offA + threadIdx.x];
        valA_reg_buf = A_val[offA + threadIdx.x];
    }
    if (offA + 32 + threadIdx.x < offA_end)
    {
        colA_reg_buf_next = A_col[offA + 32 + threadIdx.x];
        valA_reg_buf_next = A_val[offA + 32 + threadIdx.x];
    }
    offA = offA_begin;
    float valA = 0.0f;
    float4 valB = { 0.0f, 0.0f, 0.0f, 0.0f };
    uint32_t colA;
    if (offA < (offA_begin & -32) + 32 && offA < offA_end)
    {
        colA = __shfl_sync(0xffffffff, colA_reg_buf, offA % 32, 32);
        valB = B_gptr[colA * (num_col / 4)];
        valA = __shfl_sync(0xffffffff, valA_reg_buf, offA % 32, 32);
        offA++;
#pragma nounroll
        while (offA < (offA_begin & -32) + 32 && offA < offA_end)
        {   
            colA = __shfl_sync(0xffffffff, colA_reg_buf, offA % 32, 32);
            sum.x += valA * valB.x;
            sum.y += valA * valB.y;
            sum.z += valA * valB.z;
            sum.w += valA * valB.w;
            valB = B_gptr[colA * (num_col / 4)];
            valA = __shfl_sync(0xffffffff, valA_reg_buf, offA % 32, 32);
            offA++;
        }
    }
    offA = (offA_begin & -32) + 32;
    while (offA < offA_end)
    {
        colA_reg_buf = colA_reg_buf_next;
        valA_reg_buf = valA_reg_buf_next;
        if (offA + 32 + threadIdx.x < offA_end)
        {
            colA_reg_buf_next = A_col[offA + 32 + threadIdx.x];
            valA_reg_buf_next = A_val[offA + 32 + threadIdx.x];
        }
        #pragma unroll
        for (uint32_t i = 0; i < 32; i++)
        {
            if (offA >= offA_end)
            {
                break;
            }
            colA = __shfl_sync(0xffffffff, colA_reg_buf, i, 32);
            B_tptr = &B_gptr[colA * (num_col / 4)];
            sum.x += valA * valB.x;
            sum.y += valA * valB.y;
            sum.z += valA * valB.z;
            sum.w += valA * valB.w;
            valB = *B_tptr;
            valA = __shfl_sync(0xffffffff, valA_reg_buf, i, 32);
            offA++;
        }
    }
    sum.x += valA * valB.x;
    sum.y += valA * valB.y;
    sum.z += valA * valB.z;
    sum.w += valA * valB.w;
    if (TDY == 1)
    {
        *reinterpret_cast<float4*>(&C[rowA * num_col + colB]) = sum;
    }
    else
    {
        atomicAdd(&sum_buf[0][threadIdx.x], sum.x);
        atomicAdd(&sum_buf[1][threadIdx.x], sum.y);
        atomicAdd(&sum_buf[2][threadIdx.x], sum.z);
        atomicAdd(&sum_buf[3][threadIdx.x], sum.w);
        __syncthreads();
        if (threadIdx.y == 0)
        {
            sum.x = sum_buf[0][threadIdx.x];
            sum.y = sum_buf[1][threadIdx.x];
            sum.z = sum_buf[2][threadIdx.x];
            sum.w = sum_buf[3][threadIdx.x];
            *reinterpret_cast<float4*>(&C[rowA * num_col + colB]) = sum;
        }
    }
}

template<bool has_add>
__global__ void spmm_l1(
    const uint32_t* __restrict__ A_row_begin,
    const uint32_t* __restrict__ A_row_end,
    const uint32_t* __restrict__ A_col,
    const float* __restrict__ A_val,
    const float* __restrict__ B,
    float* __restrict__ C,
    uint32_t num_col,
    uint32_t sync_colA_step = 48)
{
    uint32_t rowA = blockIdx.x * blockDim.y + threadIdx.y;
    rowA = __ballot_sync(0xffffffff, (rowA & (1 << threadIdx.x)) != 0);
    uint32_t offA_begin = A_row_begin[rowA];
    uint32_t offA_end = A_row_end[rowA];
    uint32_t colB = (blockIdx.y * blockDim.x + threadIdx.x) * 4;
    float4 sum = has_add ? *reinterpret_cast<const float4*>(&C[rowA * num_col + colB]) : float4{ 0.0f, 0.0f, 0.0f, 0.0f };
    const float4* B_gptr = reinterpret_cast<const float4*>(&B[colB]);
    const float4* B_tptr;
    if (offA_begin == offA_end) return;

    uint32_t colA_reg_buf;
    float valA_reg_buf;
    uint32_t colA_reg_buf_next;
    float valA_reg_buf_next;
    uint32_t offA = offA_begin & -32;
    if (offA + threadIdx.x >= offA_begin && offA + threadIdx.x < offA_end)
    {
        colA_reg_buf = A_col[offA + threadIdx.x];
        valA_reg_buf = A_val[offA + threadIdx.x];
    }
    if (offA + 32 + threadIdx.x < offA_end)
    {
        colA_reg_buf_next = A_col[offA + 32 + threadIdx.x];
        valA_reg_buf_next = A_val[offA + 32 + threadIdx.x];
    }
    offA = offA_begin;
    float valA = 0.0f;
    float4 valB = { 0.0f, 0.0f, 0.0f, 0.0f };
    uint32_t colA;
    uint32_t sync_colA = sync_colA_step;
    if (offA < (offA_begin & -32) + 32 && offA < offA_end)
    {
        colA = __shfl_sync(0xffffffff, colA_reg_buf, offA % 32, 32);
        valB = B_gptr[colA * (num_col / 4)];
        valA = __shfl_sync(0xffffffff, valA_reg_buf, offA % 32, 32);
        offA++;
        while (offA < (offA_begin & -32) + 32 && offA < offA_end)
        {
            colA = __shfl_sync(0xffffffff, colA_reg_buf, offA % 32, 32);
            sum.x += valA * valB.x;
            sum.y += valA * valB.y;
            sum.z += valA * valB.z;
            sum.w += valA * valB.w;
            while (__any_sync(0xffffffff, colA >= sync_colA))
            {
                sync_colA += sync_colA_step;
                __syncthreads();
            }
            valB = B_gptr[colA * (num_col / 4)];
            valA = __shfl_sync(0xffffffff, valA_reg_buf, offA % 32, 32);
            offA++;
        }
    }
    offA = (offA_begin & -32) + 32;
    while (offA < offA_end)
    {
        colA_reg_buf = colA_reg_buf_next;
        valA_reg_buf = valA_reg_buf_next;
        if (offA + 32 + threadIdx.x < offA_end)
        {
            colA_reg_buf_next = A_col[offA + 32 + threadIdx.x];
            valA_reg_buf_next = A_val[offA + 32 + threadIdx.x];
        }
        #pragma unroll
        for (uint32_t i = 0; i < 32; i++)
        {
            if (offA >= offA_end)
            {
                break;
            }
            colA = __shfl_sync(0xffffffff, colA_reg_buf, i, 32);
            B_tptr = &B_gptr[colA * (num_col / 4)];
            sum.x += valA * valB.x;
            sum.y += valA * valB.y;
            sum.z += valA * valB.z;
            sum.w += valA * valB.w;
            while (__any_sync(0xffffffff, colA >= sync_colA))
            {
                sync_colA += sync_colA_step;
                __syncthreads();
            }
            valB = *B_tptr;
            valA = __shfl_sync(0xffffffff, valA_reg_buf, i, 32);
            offA++;
        }
    }
    sum.x += valA * valB.x;
    sum.y += valA * valB.y;
    sum.z += valA * valB.z;
    sum.w += valA * valB.w;
    *reinterpret_cast<float4*>(&C[rowA * num_col + colB]) = sum;
}

void lanuch_spmm_l2(uint32_t* A_row, uint32_t* A_col, const float* A_val, const float* B, float* C,
    uint32_t SPARSE_ROW, uint32_t SPARSE_COL, uint32_t DENSE_COL)
{
    dim3 blockDim(SPARSE_ROW, DENSE_COL / 128, 1);
    dim3 threadDim(32, 1, 1);
    spmm_l2<false, 2><<<blockDim, threadDim>>>(A_row, A_row + 1, A_col, A_val, B, C, DENSE_COL);
}

void lanuch_spmm_l1(uint32_t* A_row, uint32_t* A_col, const float* A_val, const float* B, float* C,
    uint32_t SPARSE_ROW, uint32_t SPARSE_COL, uint32_t DENSE_COL)
{
    dim3 blockDim(SPARSE_ROW / 16, DENSE_COL / 128, 1);
    dim3 threadDim(32, 16, 1);
    spmm_l1<false><<<blockDim, threadDim>>>(A_row, A_row + 1, A_col, A_val, B, C, DENSE_COL);
}
};

template <typename F, typename scalar_t>
void _spmm(const uint *row_index, const uint* row_offset, const uint* col_indx, const scalar_t* edge_weight, 
           const acd::TensorInfo<scalar_t, uint> src, acd::TensorInfo<scalar_t, uint> dst, const uint nedge)
{
  const uint _THD = THD;
  const uint coop_size = 128;

  uint by = _THD / coop_size;//1;
  uint col_blocks = CEIL(dst.sizes[1], coop_size);
  uint row_blocks = CEIL(dst.sizes[0], by);

  uint sm_size = 0;//SH_SKEW(bx*by)*sizeof(uint);
  if (edge_weight != nullptr) sm_size += coop_size*by*sizeof(scalar_t);
  if (dst.sizes[1] < 64)
  {
    CUDA_LAUNCH_CHECK(Alternative3::_spmm_kernel_sorted<F, scalar_t, coop_size>
                    // <<<dim3(col_blocks, row_blocks), dim3(bx,by)>>>
                    <<<dim3(row_blocks, col_blocks), _THD, sm_size>>>
                    // (row_offset, col_indx, edge_weight, src, dst, nedge, row_per_block));
                    (row_index,row_offset, col_indx, edge_weight, src, dst, nedge));//, Alternative1::scheduler));
  } else if(dst.sizes[1] < 128) {
    CUDA_LAUNCH_CHECK(Alternative3::_spmm_kernel_unroll<F, scalar_t, coop_size>
                // <<<dim3(col_blocks, row_blocks), dim3(bx,by)>>>
                <<<dim3(row_blocks, CEIL(col_blocks, 2)), dim3(coop_size,by), sm_size>>>
                // (row_offset, col_indx, edge_weight, src, dst, nedge, row_per_block));
                (row_offset, col_indx, edge_weight, src, dst, nedge));//, Alternative1::scheduler)); 
  } else {
    CUDA_LAUNCH_CHECK(Alternative4::_spmm_kernel_unroll<F, scalar_t, coop_size>
                    // <<<dim3(col_blocks, row_blocks), dim3(bx,by)>>>
                    <<<dim3(row_blocks, CEIL(col_blocks, 4)), dim3(coop_size, by), sm_size>>>
                    // (row_offset, col_indx, edge_weight, src, dst, nedge, row_per_block));
                    (row_offset, col_indx, edge_weight, src, dst, nedge)); 
  }
}

template <typename F, typename index_t, typename scalar_t, 
          uint sp_grp, uint dn_grp, 
          Alternative5::BufferType spbufT, Alternative5::BufferType dnbufT, 
          uint sp_vlen, uint dn_vlen, uint sp_iters, uint dn_iters>
void _spmm_llvlib(const index_t *row_index, 
                  const index_t* row_offset, 
                  const index_t* col_indx, 
                  const scalar_t* edge_weight, 
                  acd::TensorInfo<scalar_t, index_t> src, 
                  acd::TensorInfo<scalar_t, index_t> dst, 
                  const uint nedge)
{ 
  constexpr uint thd = dn_vlen >= 2 ? 128 : 256;
  constexpr uint sp_tile_width = sp_grp*sp_vlen*sp_iters;
  constexpr uint dn_tile_width = dn_grp*dn_vlen*dn_iters;

  typedef llvlib::CoopConfig<thd/sp_grp, sp_grp> SpConf;
  typedef llvlib::CoopConfig<thd/dn_grp, dn_grp> DnConf;

  uint row_blocks = CEIL(dst.sizes[0], SpConf::ydim);
  uint col_blocks = CEIL(dst.sizes[1], dn_tile_width);
  scalar_t * src_base = src.data;
  scalar_t * dst_base = dst.data;
  index_t ld_src = src.strides[0];
  index_t ld_dst = dst.strides[0];
  index_t nv = src.sizes[0];
  index_t vlen_total = src.sizes[1];
  
  // cudaMalloc((void**)&cnt, sizeof(uint));
  // cudaMemset(cnt, 0, sizeof);
  for (uint i=0; i<col_blocks; i++){
  scalar_t *src_v = src_base + i * dn_tile_width;
  scalar_t *dst_v = dst_base + i * dn_tile_width;
  index_t vlen = MIN(dn_tile_width, vlen_total - i * dn_tile_width);
  CUDA_LAUNCH_CHECK(Alternative5::spmm_kernel_veclib<F, index_t, scalar_t, SpConf, DnConf, spbufT, dnbufT, sp_vlen, dn_vlen, sp_iters, dn_iters>
                    <<<dim3(row_blocks, 1), thd>>>
                    (row_index, row_offset, col_indx, edge_weight, src_v, ld_src, dst_v, ld_dst, nv, nedge, vlen));//, Alternative1::scheduler));
  }
}

template <typename F, typename index_t, typename scalar_t,
          SpWalk spwalk, SpLoad spload, DnCompute dn_compute,
          Reduction reduction, uint sp_warp, uint dn_warp, 
          uint spipv, uint dnipv, uint dn_unroll>
uint _spmm_manscript_buffer(const index_t *row_index, 
                           const index_t* row_offset, 
                           const index_t* col_indx, 
                           const scalar_t* edge_weight, 
                           const index_t sizes[],
                           const index_t strides[],
                           const index_t nedge,
                           index_t **cntrs)
{
  uint col_blocks = CEIL(sizes[1], dn_warp*dnipv*dn_unroll);
  H_ERR(cudaMalloc(cntrs, sizeof(index_t)*col_blocks));
  // do all the kernel configure in this preprocess function 
  // cudaFuncSetCacheConfig(manscript::spmm_l1, cudaFuncCachePreferL1);
  return sizeof(index_t)*col_blocks;
}

template <typename F, typename index_t, typename scalar_t,
          SpWalk spwalk, SpLoad spload, DnCompute dn_compute,
          Reduction reduction, uint sp_warp, uint dn_warp, 
          uint spipv, uint dnipv, uint dn_unroll>
void _spmm_manscript(const index_t *row_index, 
                     const index_t* row_offset, 
                     const index_t* col_indx, 
                     const scalar_t* edge_weight, 
                     const scalar_t *src,
                     const uint ld_src,
                     scalar_t *dst,
                     const uint ld_dst,
                     const index_t nv, const index_t ne, const index_t vlen,
                     index_t *cntrs)
{
  const uint _THD = THD;

  typedef llvlib::CoopConfig<256/sp_warp, sp_warp> SpConf;
  typedef llvlib::CoopConfig<256/dn_warp, dn_warp> DnConf;

  if (spwalk <= pull_dyn)
  {
    uint col_blocks = CEIL(vlen, DnConf::xdim*dnipv*dn_unroll);
    uint row_blocks = CEIL(nv, SpConf::ydim);

    auto kernel = manscript::spmm_kernel_pull<F, index_t, scalar_t, 
                                              spwalk, spload, dn_compute, reduction,
                                              SpConf, DnConf, spipv, dnipv, dn_unroll>;

    CUDA_LAUNCH_CHECK(kernel<<<dim3(row_blocks, col_blocks), THD>>>(
                      row_index,
                      row_offset, col_indx, edge_weight, 
                      src, ld_src, dst, ld_dst,
                      nv, ne, vlen));

  }
  else if (spwalk == push_seq)
  {
    uint col_blocks = CEIL(vlen, DnConf::xdim*dnipv*dn_unroll);
    uint row_blocks = CEIL(ne, SpConf::xdim);

    CUDA_LAUNCH_CHECK(manscript::spmm_kernel_push<F, index_t, scalar_t, 
                                                  spwalk, spload, dncomp, reduction, 
                                                  SpConf, DnConf, spipv, dnipv, dn_unroll>
                      <<<dim3(row_blocks, col_blocks), THD>>>(
                      row_offset, col_indx, edge_weight, 
                      src, ld_src, dst, ld_dst,
                      nv, ne, vlen));
  } 
  else if ( spwalk == l1_opt ) 
  {
    manscript::lanuch_spmm_l1(reinterpret_cast<uint32_t*>(const_cast<index_t*>(row_offset)), reinterpret_cast<uint32_t*>(const_cast<index_t*>(col_indx)), edge_weight, src, dst, ld_src, ld_dst, vlen);
  } 
  else if ( spwalk == l2_opt ) 
  {
    manscript::lanuch_spmm_l2(reinterpret_cast<uint32_t*>(const_cast<index_t*>(row_offset)), reinterpret_cast<uint32_t*>(const_cast<index_t*>(col_indx)), edge_weight, src, dst, ld_src, ld_dst, vlen);
  }
}

template <typename F, typename index_t, typename scalar_t>
void spmm_kernel_launch(const index_t *row_index, 
                        const index_t* row_offset, 
                        const index_t* col_indx, 
                        const scalar_t* edge_weight, 
                        const acd::TensorInfo<scalar_t, index_t> src, 
                        acd::TensorInfo<scalar_t, index_t> dst, 
                        const index_t nedge);

template <typename F, typename scalar_t>
void _spmm_backward(const uint* row_offset, const uint* col_indx, const scalar_t* edge_weight, 
           const acd::TensorInfo<scalar_t, uint> src, acd::TensorInfo<scalar_t, uint> dst, const uint nedge)
{
  // printf("edge_weight = %lx\n", edge_weight);
  uint bx = 16;
  uint by = 16;
  uint row_blocks = (dst.sizes[0]+bx-1) / bx;
  uint col_blocks = (dst.sizes[1]+by-1) / (by);

  CUDA_SOLE_KERNEL_CHECK(Base::_spmm_backward_kernel<F, scalar_t><<<dim3(row_blocks, col_blocks), dim3(bx,by)>>>(row_offset, col_indx, edge_weight, 
                                                     src, dst, nedge));
}

template <typename F, typename scalar_t>
void _spmm_linear_fused(const uint* row_offset, const uint* col_indx, const scalar_t* edge_weight,
                        const acd::TensorInfo<scalar_t, uint> src,
                        const acd::TensorInfo<scalar_t, uint> weight,
                        const scalar_t* bias,
                        acd::TensorInfo<scalar_t, uint> inter,
                        acd::TensorInfo<scalar_t, uint> dst,
                        const uint nedge)
{
  uint bx = 256;
  uint by = 1;
  uint dn_col = (inter.sizes[1]+bx*2-1) / (bx*2);
  uint sp_col = (dst.sizes[1]+bx*2-1) / (bx*2);
  uint dn_row = 120 / dn_col;
  uint sp_row = (dst.sizes[0]+by-1) / by;
  
  uint rows_per_block = Base::warp_friendly_partition(bx, src.sizes[1]);
  // printf("weight[%d,%d]\n", weight.sizes[0], weight.sizes[1]);
  /*
   CUDA_SOLE_KERNEL_CHECK(Base::_fused_linear_spmm_kernel<F, scalar_t>\
                         <<<dim3(bx,by), nt>>>\
                         (row_offset, col_indx, edge_weight,\
                         src, weight, bias, inter, dst, nedge));
  */
  CUDA_LAUNCH_CHECK(gemm_kernel<<<dim3(dn_col, dn_row), dim3(bx*2,by*2)>>>(weight, src, inter));
  CUDA_LAUNCH_CHECK(Base::_spmm_kernel<F, scalar_t>
                    <<<dim3(sp_col, sp_row), dim3(bx,by)>>>
                    (row_offset, col_indx, edge_weight, inter, dst, nedge));
}

template <typename F, typename scalar_t>
void _spmm_linear_fused_reverse(const uint* row_offset, const uint* col_indx, const scalar_t* edge_weight,
                        const acd::TensorInfo<scalar_t, uint> src, 
                        const acd::TensorInfo<scalar_t, uint> weight, 
                        acd::TensorInfo<scalar_t, uint> inter,
                        acd::TensorInfo<scalar_t, uint> dst,
                        const uint nedge)
{
  uint nt = 256;
  uint bx = 10;
  uint by = 10;
  uint one_b = 240;
  CUDA_SOLE_KERNEL_CHECK(Alternative1::_fused_linear_spmm_kernel_reversed<F, scalar_t>\
                         <<<one_b, nt, nt*sizeof(scalar_t)>>>\
                         (row_offset, col_indx, edge_weight,\
                          src, weight, inter, dst, nedge));
}



template<typename scalar_t>
struct Func{
  static __device__ __forceinline__ scalar_t binary(scalar_t, scalar_t){}
  static __device__ __forceinline__ scalar_t reduce(scalar_t*, scalar_t){}
  static __device__ __forceinline__ scalar_t reduceAtomic(scalar_t*, scalar_t){}
  static constexpr scalar_t init_val = (scalar_t)0;
};

#endif