#ifndef _2D_PARTITIONED_CUH__
#define _2D_PARTITIONED_CUH__
#include <cuda.h>
#include <ATen/cuda/detail/TensorInfo.cuh>
#include "cuda/cuda_utils.cuh"
#include "config.hxx"
// TODO test required on stride {chunk_offset*SM_VLEN, chunk_size, SM_VLEN} 

namespace acd = at::cuda::detail;

namespace Base{

template<class F, class scalar_t>
__global__ void _dense_kernel(
  const int *__restrict__ block_offset_sb,
  const scalar_t* __restrict__ dense_mat_data,
  const acd::TensorInfo<scalar_t, int> src_tensor,
  scalar_t * __restrict__ stream_buffer,
  const int sbsize, const int vlen_base, const int vlen_lim, 
  const int64_t _bsz)
{
  const int CtaPerChunk = BSZ*BSZ/(blockDim.x*blockDim.y);
  const int CtaPerRow   = BSZ/blockDim.x;
  const int CtaPerCol   = CtaPerChunk / CtaPerRow;

  const int Cid = blockIdx.x / CtaPerChunk;
  const int Bid = blockIdx.x % CtaPerChunk;

  const int Bx = Bid % CtaPerRow;
  const int By = Bid / CtaPerRow;

  const int Stride_x = CtaPerRow*blockDim.x;
  const int Stride_y = CtaPerCol*blockDim.y;
  const int Tx = Bx*blockDim.x + threadIdx.x;
  const int Ty = By*blockDim.y + threadIdx.y;

  const int sb_base = block_offset_sb[Cid];
  const int wt_base = BSZ*BSZ*Cid;

  for (int k=0; k<BSZ; ++k)
  {
    for (int y=Ty; y<BSZ; y+=Stride_y)
    {
      for (int x=Tx+vlen_base; x<vlen_lim; x+=Stride_x)
      {
        int srcInd = k*src_tensor.strides[0] + x*src_tensor.strides[1];
        int wtInd  = wt_base+y+BSZ*k;
        int dstInd = SBIDX(sb_base, y, x-vlen_base);

        stream_buffer[dstInd] += dense_mat_data[wtInd] * src_tensor.data[srcInd];
      }
    }
  }
}

template<class F, class scalar_t>
__global__ void _heavy_kernel(
  const int* __restrict__ block_offset_sdat, 
  const int* __restrict__ block_offset_edat, 
  const int* __restrict__ block_offset_sb,
  const int* __restrict__ tiled_edat,
  const scalar_t* __restrict__ tiled_wdat,
  const acd::TensorInfo<scalar_t,int> src_tensor,
  scalar_t * stream_buffer,
  const int sbsize, const int vlen_base, const int vlen_lim,
  const int64_t _bsz)
{
  const int nvertex = src_tensor.sizes[0];
  const int vlen    = src_tensor.sizes[1];

  const int Tx     = threadIdx.x;
  const int Bx     = blockIdx.x;
  const int By     = blockIdx.y;
  const int Stride = blockDim.x;
  
  const bool weighted = (tiled_wdat != nullptr);

  SharedMem<scalar_t> sm_src; // stride {SM_VLEN, BSZ}

  int src_base = block_offset_sdat[Bx]*BSZ; 
  int sb_base  = block_offset_sb[Bx];

  // if (Tx==0) printf("h[%d] src_base = %d, sb_base = %d\n", Bx, src_base, sb_base);

  for (int _it=vlen_base+By*SM_VLEN; _it<vlen_lim; _it+=gridDim.y*SM_VLEN)
  {
    const int vlim = MIN(SM_VLEN, vlen-_it);
    // if (Tx==0) printf("h[%d] clearing sb_base[%d ~ %d] vlim = %d\n", Bx, SBIDX(sb_base,0,_it), SBIDX(sb_base,BSZ,_it), vlim);
    __syncthreads();
    for (int i=Tx; i<BSZ; i+=Stride)
    {
      if ( src_base+i<nvertex ) {
        for (int j=0; j<vlim; ++j)
        {
          int srcInd = (src_base+i)*src_tensor.strides[0] + (j+_it)*src_tensor.strides[1];
          sm_src[SMIDX(i,j)] = src_tensor.data[srcInd];
        }
      }
      // for (int j=0; j<vlim; ++j) stream_buffer[SBIDX(sb_base,i,j+_it-vlen_base)] = (scalar_t) 0;
    }
    // if (Tx==0) printf("h[%d] edge tile form %d~%d\n", Bx, block_offset_edat[Bx], block_offset_edat[Bx+1]);
    __syncthreads();

    int cur_dst = -1;
    scalar_t local_reduce[SM_VLEN];
    CLEAN_REG(local_reduce, SM_VLEN);
    for (int i=block_offset_edat[Bx]+Tx; i<block_offset_edat[Bx+1]; i+=Stride)
    {
      int vtx = tiled_edat[i];
      if (vtx == -1) continue;
      if (vtx < 0) {
        if (cur_dst != -1) {
          // printf("t[%d,%d] dumping local_sum[%d]=%f to sb[%d in %d]\n", Bx, Tx, cur_dst, local_reduce[0], SBIDX(sb_base, cur_dst, _it), sbsize*vlen);
          for (int j=0; j<vlim; ++j)
          {
            F::reduceAtomic(stream_buffer+SBIDX(sb_base, cur_dst, j+_it-vlen_base), local_reduce[j]);
          }
        }
        cur_dst = HASH(vtx);
        CLEAN_REG(local_reduce, SM_VLEN);
        // printf("t[%d,%d] proc new vertex : %d on row %d\n", Bx, Tx, cur_dst, i/Stride);
      } else {
        // printf("t[%d,%d] found src vertex %d on row %d\n", Bx, Tx, vtx, i/Stride);
        for (int j=0; j<vlim; ++j)
        {
          scalar_t wsrc = (weighted)? F::binary(tiled_wdat[i], sm_src[SMIDX(vtx,j)]) : sm_src[SMIDX(vtx,j)];
          F::reduce(&local_reduce[j], wsrc);
        }
      }
      __syncthreads();
    }
    if (cur_dst != -1) 
      for (int j=0; j<vlim; ++j)
        F::reduceAtomic(stream_buffer+SBIDX(sb_base, cur_dst, j+_it-vlen_base), local_reduce[j]);
  }
  
}

template <class F, class scalar_t>
__global__ void _light_kernel(
  const int* __restrict__ block_offset_elst,
  const short* __restrict__ src_list,
  const scalar_t* __restrict__ weight_list,
  const int* __restrict__ dst_offset_sb,
  const acd::TensorInfo<scalar_t,int> src_tensor,
  scalar_t* stream_buffer,
  const int sbsize, const int vlen_base, const int vlen_lim,
  const int64_t _bsz)
{
  const int nvertex = src_tensor.sizes[0];
  const int vlen    = src_tensor.sizes[1];

  const int Tx     = threadIdx.x;
  const int Bx     = blockIdx.x;
  const int By     = blockIdx.y;
  const int Stride = blockDim.x;

  const bool weighted = (weight_list != nullptr);

  SharedMem<scalar_t> sm_src;

  int src_base = Bx*BSZ; 
  // if (Tx == 0) printf("l[%d] %d~%d\n",Bx, block_offset_elst[Bx], block_offset_elst[Bx+1]);

  for (int _it=vlen_base+By*SM_VLEN; _it<vlen_lim; _it+=SM_VLEN*gridDim.y)
  {
    const int vlim = MIN(SM_VLEN, vlen-_it);
    for (int i=Tx; i<BSZ; i+=Stride)
    {
      if (src_base+i<nvertex) {
        for (int j=0; j<vlim; ++j)
        {
          int srcInd = (src_base+i)*src_tensor.strides[0] + (j+_it)*src_tensor.strides[1];
          sm_src[SMIDX(i,j)] = src_tensor.data[srcInd];
        }
      }
    }
    __syncthreads();

    for (int i=block_offset_elst[Bx]+Tx; i<block_offset_elst[Bx+1]; i+=Stride)
    {
      int local_src = src_list[i];
      int dst_sb    = dst_offset_sb[i>>3] + (Tx&7);

      if (local_src != -1) 
      {
        for (int j=0; j<vlim; ++j)
        {
          scalar_t wsrc = (weighted) ? F::binary(weight_list[i], sm_src[SMIDX(local_src,j)]) : sm_src[SMIDX(local_src,j)];
          // if (Tx==0 && Bx==0)printf("t[%d, %d] dump edge %d->%d to sb[%d] i=%d,j=%d,_it=%d,vlen_base=%d\n", Bx, Tx, local_src, dst_sb, SBIDX(0,dst_sb,j+_it-vlen_base), i, j, _it, vlen_base);
          stream_buffer[SBIDX(0,dst_sb,j+_it-vlen_base)] = wsrc;
        }
      }
    }
    __syncthreads();
  }
}

template <class F, class scalar_t>
__global__ void _aggregate_kernel(
  const int* __restrict__ block_offset_dlst,
  const short* __restrict__ agg_pos,
  const scalar_t* stream_buffer,
  acd::TensorInfo<scalar_t,int> dst_tensor,
  const int sbsize, const int vlen_base, const int vlen_lim,
  const int64_t _bsz)
{
  // attention! dst_tensor is transposed!
  const int nvertex = dst_tensor.sizes[0];
  const int vlen    = dst_tensor.sizes[1];

  const int Tx     = threadIdx.x;
  const int Bx     = blockIdx.x;
  const int By     = blockIdx.y;
  const int Stride = blockDim.x;

  const int base = Bx*BSZ;
  SharedMem<scalar_t> sm_dst;

  for (int _it=vlen_base+By*SM_VLEN; _it<vlen_lim; _it+=gridDim.y*SM_VLEN)
  {
    const int vlim = MIN(SM_VLEN, vlen-_it);
    __syncthreads();
    for (int i=Tx; i<BSZ; i+=Stride)
    {
      sm_dst[i] = (scalar_t) 0;
    }
    __syncthreads();

    for (int i=block_offset_dlst[Bx]+Tx; i<block_offset_dlst[Bx+1]; i+=Stride)
    {
      for (int j=0; j<vlim; ++j)
      {
        if(stream_buffer[SBIDX(0,i,j+_it-vlen_base)] != 0 && agg_pos[i]>=0)
        {
          // printf("block %d tries to add sb[%d] = %f to sm[%d]\n", Bx, SBIDX(0,i,j+_it-vlen_base), stream_buffer[SBIDX(0,i,j+_it-vlen_base)], agg_pos[i]);
          F::reduceAtomic(&sm_dst[SMIDX(agg_pos[i], j)], stream_buffer[SBIDX(0,i,j+_it-vlen_base)]);
        }
      }
    }
    __syncthreads();

    for (int i=Tx; i<BSZ; i+=Stride)
    {
      if (base+i < nvertex)
        for (int j=0; j<SM_VLEN; ++j)
        {
          int dstInd = (base+i)*dst_tensor.strides[0] + (j+_it)*dst_tensor.strides[1];
          // if(Bx==0 && Tx==0) printf("%d*strides[1] =  %d\n",j+_it, dstInd);
          dst_tensor.data[dstInd] = sm_dst[SMIDX(i,j)];
        } 
    }
  }
}



}; // namespace base

namespace Alter1 {
  // this alternative disables shared mem for better reuse of index access and better concurrency
template<class F, class scalar_t>
__global__ void _heavy_kernel(
  const int* __restrict__ block_offset_sdat, 
  const int* __restrict__ block_offset_edat, 
  const int* __restrict__ block_offset_sb,
  const int* __restrict__ tiled_edat,
  const scalar_t* __restrict__ tiled_wdat,
  const acd::TensorInfo<scalar_t,int> src_tensor,
  scalar_t * stream_buffer,
  const int sbsize, const int vlen_base, const int vlen_lim,
  const int64_t _bsz)
{
  const int nvertex = src_tensor.sizes[0];
  const int vlen    = src_tensor.sizes[1];

  const int Tx     = threadIdx.x & 31;
  const int Wx     = threadIdx.x >> 5;
  const int Bx     = blockIdx.x;
  const int Stride_plane = blockDim.x;
  const int Stride_n = blockDim.x >> 5;
  const int Stride_v = 32;
  
  const bool weighted = (tiled_wdat != nullptr);

  int src_base = block_offset_sdat[Bx]*BSZ; 
  int sb_base  = block_offset_sb[Bx];

  // if (Tx==0) printf("h[%d] src_base = %d, sb_base = %d\n", Bx, src_base, sb_base);

  for (int _it=vlen_base; _it<vlen_lim; _it+=SM_VLEN*32)
  {
    const int vlim = MIN(SM_VLEN, CEIL(vlen_lim-_it, 32));
    // __syncthreads();
    // if (Tx==0) printf("h[%d] edge tile form %d~%d\n", Bx, block_offset_edat[Bx], block_offset_edat[Bx+1]);
    // __syncthreads();

    int cur_dst = -1;
    scalar_t local_reduce[SM_VLEN];
    CLEAN_REG(local_reduce, SM_VLEN);

    for (int lane=Wx; lane<Stride_plane; lane+=Stride_n)
    for (int i=block_offset_edat[Bx]+lane; i<block_offset_edat[Bx+1]; i+=Stride_plane)
    {
      int vtx = tiled_edat[i];
      if (vtx == -1) continue;
      if (vtx < 0) {
        if (cur_dst != -1) {
          // printf("t[%d,%d] dumping local_sum[%d]=%f to sb[%d in %d]\n", Bx, Tx, cur_dst, local_reduce[0], SBIDX(sb_base, cur_dst, _it), sbsize*vlen);
          for (int j=0; j<vlim; ++j)
          {
            if (j*Stride_v+Tx+_it >= vlen_lim) continue;
            F::reduceAtomic(stream_buffer+SBIDX(sb_base, cur_dst, j*Stride_v+Tx+_it-vlen_base), local_reduce[j]);
          }
        }
        cur_dst = HASH(vtx);
        CLEAN_REG(local_reduce, SM_VLEN);
        // printf("t[%d,%d] proc new vertex : %d on row %d\n", Bx, Tx, cur_dst, i/Stride);
      } else {
        // printf("t[%d,%d] found src vertex %d on row %d\n", Bx, Tx, vtx, i/Stride);
        for (int j=0; j<vlim; ++j)
        {
          if (j*Stride_v+Tx+_it >= vlen_lim) continue;
          int srcInd = (src_base+vtx)*src_tensor.strides[0] + (j*Stride_v+Tx+_it)*src_tensor.strides[1];
          scalar_t wsrc = (weighted)? F::binary(tiled_wdat[i], src_tensor.data[srcInd]) : src_tensor.data[srcInd];
          F::reduce(&local_reduce[j], wsrc);
        }
      }
      __syncthreads();
    }
    if (cur_dst != -1) 
      for (int j=0; j<vlim; ++j)
        F::reduceAtomic(stream_buffer+SBIDX(sb_base, cur_dst, j*Stride_v+Tx+_it-vlen_base), local_reduce[j]);
  }
  
}

template <class F, class scalar_t>
__global__ void _light_kernel(
  const int* __restrict__ block_offset_elst,
  const short* __restrict__ src_list,
  const scalar_t* __restrict__ weight_list,
  const int* __restrict__ dst_offset_sb,
  const acd::TensorInfo<scalar_t,int> src_tensor,
  scalar_t* stream_buffer,
  const int sbsize, const int vlen_base, const int vlen_lim,
  const int64_t _bsz)
{
  const int nvertex = src_tensor.sizes[0];
  const int vlen    = src_tensor.sizes[1];

  const int Tx     = threadIdx.x & 31;
  const int Wx     = threadIdx.x >> 5;
  const int Bx     = blockIdx.x;
  const int Stride_n = blockDim.x >> 5;
  const int Stride_v = 32;

  const bool weighted = (weight_list != nullptr);

  // SharedMem<scalar_t> sm_src;

  int src_base = Bx*BSZ; 
  // if (Tx == 0) printf("l[%d] %d~%d\n",Bx, block_offset_elst[Bx], block_offset_elst[Bx+1]);

  for (int _it=vlen_base; _it<vlen_lim; _it+=SM_VLEN*32)
  {
    const int vlim = MIN(SM_VLEN, CEIL(vlen_lim-_it, 32));

    for (int i=block_offset_elst[Bx]+Wx; i<block_offset_elst[Bx+1]; i+=Stride_n)
    {
      int local_src = src_list[i];
      int dst_sb    = dst_offset_sb[i>>3] + (Wx&7);

      if (local_src != -1) 
      {
        // # pragma unroll 
        for (int j=0; j<vlim; ++j)
        {
          if (j*Stride_v+Tx+_it >= vlen_lim) break;
          int srcInd = src_tensor.strides[0]*(src_base+local_src) + src_tensor.strides[1]*(j*Stride_v+Tx+_it);
          scalar_t wsrc = (weighted) ? F::binary(weight_list[i], src_tensor.data[srcInd]) : src_tensor.data[srcInd];
          // if (Tx==0 && Bx==0)printf("t[%d, %d] dump edge %d->%d to sb[%d] i=%d,j=%d,_it=%d,vlen_base=%d\n", Bx, Tx, local_src, dst_sb, SBIDX(0,dst_sb,j+_it-vlen_base), i, j, _it, vlen_base);
          stream_buffer[SBIDX(0, dst_sb, j*Stride_v+Tx+_it-vlen_base)] = wsrc;
        }
      }
    }
    // __syncthreads();
  }
}


template <class F, class scalar_t>
__global__ void _aggregate_kernel(
  const int* __restrict__ block_offset_dlst,
  const short* __restrict__ agg_pos,
  const scalar_t* stream_buffer,
  acd::TensorInfo<scalar_t,int> dst_tensor,
  const int sbsize, const int vlen_base, const int vlen_lim,
  const int64_t _bsz)
{
  // attention! dst_tensor is transposed!
  const int nvertex = dst_tensor.sizes[0];
  const int vlen    = dst_tensor.sizes[1];

  const int Tx     = threadIdx.x & 31;
  const int Wx     = threadIdx.x >> 5;
  const int Bx     = blockIdx.x;

  const int Stride_n = blockDim.x >> 5;
  const int Stride_v = 32;

  const int base = Bx*BSZ;
  // SharedMem<scalar_t> sm_dst;

  for (int _it=vlen_base; _it<vlen_lim; _it+=SM_VLEN*32 )
  {
    const int vlim = MIN(SM_VLEN, CEIL(vlen_lim-_it, 32));

    for (int i=block_offset_dlst[Bx]+Wx; i<block_offset_dlst[Bx+1]; i+=Stride_n)
    {
      for (int j=0; j<vlim; ++j)
      {
        if(j*Stride_v+Tx+_it < vlen_lim && agg_pos[i]>=0 && stream_buffer[SBIDX(0,i,j*Stride_v+Tx+_it-vlen_base)] != 0)
        {
          int dstInd = (base+agg_pos[i])*dst_tensor.strides[0] + (j*Stride_v+Tx+_it)*dst_tensor.strides[1];
          // printf("block %d tries to add sb[%d] = %f to sm[%d]\n", Bx, SBIDX(0,i,j+_it-vlen_base), stream_buffer[SBIDX(0,i,j+_it-vlen_base)], agg_pos[i]);
          F::reduceAtomic(dst_tensor.data+dstInd, stream_buffer[SBIDX(0,i,j*Stride_v+Tx+_it-vlen_base)]);
        }
      }
    }
    // __syncthreads();
  }
}

};

namespace Alter2{
/**
 * the main modification is added another dim of blocks to parallel the feature processing
 * on a coarse-grained pattern
 *  - according to the original work of Multigraph, heavy blocks are far more less
 *     than light blocks, so the occupancy should not be enough
 */ 
template<class F, class scalar_t>
__global__ void _heavy_kernel(
  const int* __restrict__ block_offset_sdat, 
  const int* __restrict__ block_offset_edat, 
  const int* __restrict__ block_offset_sb,
  const int* __restrict__ tiled_edat,
  const scalar_t* __restrict__ tiled_wdat,
  const acd::TensorInfo<scalar_t,int> src_tensor,
  scalar_t * stream_buffer,
  const int sbsize, const int vlen_base, const int vlen_lim,
  const int64_t _bsz)
{
  const int nvertex = src_tensor.sizes[0];
  const int vlen    = src_tensor.sizes[1];

  const int Tx     = threadIdx.x;
  const int Bx     = blockIdx.x;
  const int By     = blockIdx.y;
  const int Stride = blockDim.x;
  
  const bool weighted = (tiled_wdat != nullptr);

  SharedMem<scalar_t> sm_src; // stride {SM_VLEN, BSZ}

  int src_base = block_offset_sdat[Bx]*BSZ; 
  int sb_base  = block_offset_sb[Bx];

  // if (Tx==0) printf("h[%d] src_base = %d, sb_base = %d\n", Bx, src_base, sb_base);

  for (int _it=vlen_base+By*SM_VLEN; _it<vlen_lim; _it+=SM_VLEN*gridDim.y)
  {
    const int vlim = MIN(SM_VLEN, vlen-_it);
    // if (Tx==0) printf("h[%d] clearing sb_base[%d ~ %d] vlim = %d\n", Bx, SBIDX(sb_base,0,_it), SBIDX(sb_base,BSZ,_it), vlim);
    __syncthreads();
    for (int i=Tx; i<BSZ; i+=Stride)
    {
      if ( src_base+i<nvertex ) {
        for (int j=0; j<vlim; ++j)
        {
          int srcInd = (src_base+i)*src_tensor.strides[0] + (j+_it)*src_tensor.strides[1];
          sm_src[SMIDX(i,j)] = src_tensor.data[srcInd];
        }
      }
      // for (int j=0; j<vlim; ++j) stream_buffer[SBIDX(sb_base,i,j+_it-vlen_base)] = (scalar_t) 0;
    }
    // if (Tx==0) printf("h[%d] edge tile form %d~%d\n", Bx, block_offset_edat[Bx], block_offset_edat[Bx+1]);
    __syncthreads();

    int cur_dst = -1;
    scalar_t local_reduce[SM_VLEN];
    CLEAN_REG(local_reduce, SM_VLEN);
    for (int i=block_offset_edat[Bx]+Tx; i<block_offset_edat[Bx+1]; i+=Stride)
    {
      int vtx = tiled_edat[i];
      if (vtx == -1) continue;
      if (vtx < 0) {
        if (cur_dst != -1) {
          // printf("t[%d,%d] dumping local_sum[%d]=%f to sb[%d in %d]\n", Bx, Tx, cur_dst, local_reduce[0], SBIDX(sb_base, cur_dst, _it), sbsize*vlen);
          for (int j=0; j<vlim; ++j)
          {
            F::reduceAtomic(stream_buffer+SBIDX(sb_base, cur_dst, j+_it-vlen_base), local_reduce[j]);
          }
        }
        cur_dst = HASH(vtx);
        CLEAN_REG(local_reduce, SM_VLEN);
        // printf("t[%d,%d] proc new vertex : %d on row %d\n", Bx, Tx, cur_dst, i/Stride);
      } else {
        // printf("t[%d,%d] found src vertex %d on row %d\n", Bx, Tx, vtx, i/Stride);
        for (int j=0; j<vlim; ++j)
        {
          scalar_t wsrc = (weighted)? F::binary(tiled_wdat[i], sm_src[SMIDX(vtx,j)]) : sm_src[SMIDX(vtx,j)];
          F::reduce(&local_reduce[j], wsrc);
        }
      }
      __syncthreads();
    }
    if (cur_dst != -1) 
      for (int j=0; j<vlim; ++j)
        F::reduceAtomic(stream_buffer+SBIDX(sb_base, cur_dst, j+_it-vlen_base), local_reduce[j]);
  }
  
}

template <class F, class T> __global__ void
_light_kernel(
  const int* __restrict__ block_offset_elst,
  const short* __restrict__ src_list,
  const T* __restrict__ weight_list,
  const int* __restrict__ dst_offset_sb,
  const acd::TensorInfo<T,int> src_tensor,
  T* stream_buffer,
  const int sbsize, const int vlen_base, const int vlen_lim,
  const int64_t _bsz)
{
  const bool weighted = (weight_list != nullptr);

  const int Stride = blockDim.x;
  const int Tx = threadIdx.x+blockIdx.y*Stride;
  const int Bx = blockIdx.x;

  const int edge_start = block_offset_elst[Bx];
  const int edge_end = block_offset_elst[Bx+1];
  const int nnz = edge_end - edge_start;
  const int steps = CEIL(nnz, Stride);

  const short *lsrc = src_list + edge_start;
  const int *ldst = dst_offset_sb + edge_start/8;
  const T *lew = weighted ? weight_list + edge_start : nullptr;

  const int SrcBase = BSZ*Bx;

  SharedMem<char> _sm;

  short* sm_src = (short*)_sm.ptr();
  int* sm_dst = (int*)(_sm.ptr()+sizeof(short)*Stride);
  T* sm_ew = weighted ? (T*) (_sm.ptr() + sizeof(int)*Stride/8 + sizeof(short)*Stride) : nullptr;

  for (int iter = 0; iter < steps; ++iter)
  {
    int eseg_start = iter*Stride;
    int eseg_end = eseg_start + MIN(Stride, nnz - eseg_start);
    int i = eseg_start;
    // if(threadIdx.x == 0) printf("step %d/%d %d~%d\n", iter, steps, eseg_start, eseg_end);
    __syncthreads();

    if (threadIdx.x+eseg_start < eseg_end){
      sm_src[threadIdx.x] = lsrc[i+threadIdx.x];
      if ((threadIdx.x&7) == 0) {
        sm_dst[threadIdx.x>>3] = ldst[(i+threadIdx.x)>>3];
        // printf("[(%d, %d) %d] loading ldst[%d]=%d into sm[%d]\n", Bx, blockIdx.y, threadIdx.x, (i+threadIdx.x)>>3, ldst[(i+threadIdx.x)>>3], threadIdx.x>>3);
      }
      if (weighted) sm_ew[threadIdx.x] = lew[eseg_start+threadIdx.x];
    }
    __syncthreads();

    if ( Tx+vlen_base < vlen_lim )
    {
      int last_src = -1;
      while (i < eseg_end)
      {
        int src, dst, sbInd;
        T srcVal;

        src = sm_src[i-eseg_start];
        if (src < 0) break;
        else if (src != last_src)
        {
          int srcInd = src_tensor.strides[0] * (src+SrcBase) + src_tensor.strides[1] * (Tx+vlen_base);
          srcVal = src_tensor.data[srcInd];
        }
        
        dst   = sm_dst[(i-eseg_start)>>3] + ((i-eseg_start)&7);
        sbInd = SBIDX(0, dst, Tx);

        // printf("[(%d,%d) %d] adding %f -(%d->[%d]=%d+mod 8=%d)->\n", Bx, blockIdx.y, threadIdx.x, srcVal, src, (i-eseg_start)>>3, sm_dst[(i-eseg_start)>>3], dst);
        if (weighted) F::reduce(stream_buffer+sbInd, F::binary(srcVal, sm_ew[i-eseg_start]));
        else F::reduce(stream_buffer+sbInd, srcVal);

        i++;
      }
    }
    __syncthreads();
  }
}

}; // namespace Alter2


template<typename T> void __global__
stream_buffer_scan(const T* __restrict__ sb, uint32_t *bitmap, const int sbsize)
{
  const int Tx = threadIdx.x+blockIdx.x*blockDim.x;
  const int Stride = gridDim.x*blockDim.x;
  for (int i=Tx; i<sbsize; i+=Stride)
  {
    bool changed = sb[i] != 0;
    uint32_t ball = __ballot_sync(0xffffffff, changed);
    if ((threadIdx.x&31) == 0){
      bitmap[i>>5] = ball;
    } 
    __syncthreads();
  }
}


template<class F, class scalar_t> void
_spmm_partitioned(const int* block_offset_sdat, const int* block_offset_edat, const int* block_offset_sb,
                  const int* tiled_edat, const scalar_t* tiled_wdat, 
                  const int* block_offset_sb_dense, const scalar_t * dense_mat_data,
                  const int* block_offset_elst, const short* src_list, const scalar_t* weight_list,
                  const int* dst_offset_sb, const int* block_offset_dlst, const short* agg_pos,
                  acd::TensorInfo<scalar_t,int> src_tensor, acd::TensorInfo<scalar_t,int> dst_tensor,
                  const int dcnt, const int hcnt, const int lcnt, const int sbsize)
{
  static scalar_t *preallocated_sb = nullptr;
  static int buffer_flag = 0;

  const int nvertex = src_tensor.sizes[0];
  const int vlen    = src_tensor.sizes[1];

  const int SB_VLEN     = SB_MAX / (sizeof(scalar_t)*sbsize);
  const int sb_vlen_cap = MIN(SB_VLEN, vlen);

  const int by_base = CEIL(sb_vlen_cap, SM_VLEN);

  if (preallocated_sb == nullptr) H_ERR(cudaMalloc(&preallocated_sb, sizeof(scalar_t)*SB_MAX*2)); // 更好的方案：直接双buffer

  // printf("param check: sb_vlen_cap=%d\n", sb_vlen_cap);
  H_ERR(cudaMemset(preallocated_sb+buffer_flag*SB_MAX, 0, sizeof(scalar_t)*sb_vlen_cap*sbsize));

  for (int vlen_base = 0; vlen_base<vlen; vlen_base += SB_VLEN)
  {
    int vlen_lim    = MIN(vlen_base+SB_VLEN, vlen);
    scalar_t *stbuf = preallocated_sb + buffer_flag*SB_MAX;
    buffer_flag     = (buffer_flag+1) & 1;

    H_ERR(cudaMemsetAsync(preallocated_sb + buffer_flag*SB_MAX, 0, sizeof(scalar_t)*sb_vlen_cap*sbsize, global_helper.stream[0]));
    if (dcnt > 0)
    {
      Base::_dense_kernel<F, scalar_t><<<dcnt*BSZ*BSZ/1024, dim3(32,32), 0, global_helper.stream[3]>>>
      (block_offset_sb_dense, dense_mat_data, src_tensor, stbuf, sbsize, vlen_base, vlen_lim, _bsz);
    }
    
    if (hcnt > 0)
    {
      Base::_heavy_kernel<F, scalar_t><<<dim3(hcnt, by_base), THD, BSZ*SM_VLEN*sizeof(scalar_t), global_helper.stream[1]>>>
      (block_offset_sdat, block_offset_edat, block_offset_sb, 
      tiled_edat, tiled_wdat, src_tensor, stbuf, sbsize, vlen_base, vlen_lim, _bsz);
    }

    if (lcnt > 0)
    {
      // alter 2 dedicated variables
      int by = CEIL(sb_vlen_cap, THD);
      int sm_sz = sizeof(short)*THD + sizeof(int)*THD/8;
      if (weight_list != nullptr) sm_sz += sizeof(scalar_t)*THD;

      // Alter2::_light_kernel<F, scalar_t><<<dim3(lcnt, by), THD, sm_sz, global_helper.stream[2]>>>
      Base::_light_kernel<F, scalar_t><<<dim3(lcnt, by_base), THD, BSZ*SM_VLEN*sizeof(scalar_t), global_helper.stream[2]>>>
      (block_offset_elst, src_list, weight_list, dst_offset_sb, src_tensor, stbuf, sbsize, vlen_base, vlen_lim, _bsz);
    }
    // CUDA_SOLE_KERNEL_CHECK(stream_buffer_scan<scalar_t><<<1,32>>>(stbuf, map, sbsize*vlen));
    // int base = 0;
    // for (int i=0; i<mapsz; i++)
    // {
    //   if (i%16 == 0) {printf("\n %7d~%7d : ", base, base+511); base += 512;}
    //   printf("%x ", map[i]);
    // }

    CUDA_LAUNCH_CHECK(Base::_aggregate_kernel<F, scalar_t><<<dim3(CEIL(nvertex,BSZ), by_base), THD, BSZ*SM_VLEN*sizeof(scalar_t)>>>
    (block_offset_dlst, agg_pos, stbuf, dst_tensor, sbsize, vlen_base, vlen_lim, _bsz));
  }
}

#endif

