#ifndef _SPMM_CUH__ 
#define _SPMM_CUH__ 

#include <cuda.h> 
#include <vector_types.h>
// #include <cooperative_groups.h>

#include "cuda/cuda_utils.cuh"

#include "../dense.cuh"
#include "../specification.hxx"
#include "config.hxx"

namespace manscript {

template <typename F, typename Idx, typename DType, typename DnConf, uint DnIPV, uint DnIters>
__device__ __forceinline__ void
dense_walk_full(const DType val, const DType* src_ptr,  
             Idx offset, llvlib::Vect<DType,DnIPV> local_sum[])
{
  using namespace llvlib;
  using dnv_t = typename VectTypes<DType, DnIPV>::type;
  constexpr uint stride = DnConf::xdim * DnIPV;
  
  Vect<DType, DnIPV> dn_reg;
  for (uint i=0; i<DnIters; ++i)
  {
    dn_reg.template load_thread_offset<dnv_t>(const_cast<DType*>(src_ptr), offset);

    thread_scalar_fma_inline<DType, Vect<DType, DnIPV>, Vect<DType, DnIPV>&, DnIPV>(
      val, dn_reg, local_sum[i], F::binary, F::reduce);

    offset += stride;
  }
}


template <typename F, typename Idx, typename DType, typename DnConf, uint DnIPV, uint DnIters>
__device__ __forceinline__ void
dense_walk_dense_only(const DType val, const DType* src_ptr,  
                      Idx offset, llvlib::Vect<DType,DnIPV> local_sum[])
{
  using namespace llvlib;
  using dnv_t = typename VectTypes<DType, DnIPV>::type ;
  constexpr uint stride = DnConf::xdim * DnIPV;
  
  Vect<DType, DnIPV> dn_reg;
  for (uint i=0; i<DnIters; ++i)
  {
    dn_reg.template load_thread_offset<dnv_t>(const_cast<DType*>(src_ptr), offset);

    thread_binary_elementwise_inline<DType, Vect<DType, DnIPV>, Vect<DType, DnIPV>&, DnIPV>(
      dn_reg, local_sum[i], F::reduce);

    offset += stride;
  }
}


template <typename F, typename Idx, typename DType, typename DnConf, uint DnIPV, uint DnIters>
__device__ __forceinline__ void
dense_walk_sparse_only(const DType val, const DType* src_ptr,  
                      Idx offset, llvlib::Vect<DType,DnIPV> local_sum[])
{
  using namespace llvlib;
  using dnv_t = typename VectTypes<DType, DnIPV>::type;
  constexpr uint stride = DnConf::xdim * DnIPV;
  
  for (uint i=0; i<DnIters; ++i)
  {
    // thread_scalar_fma_inline<DType, DType*, Vect<DType, DnIPV>&, DnIPV>(
    //  val, nullptr, local_sum[i], F::binary, F::reduce);
    #pragma unroll
    for ( int i=0; i<DnIters; i++ ) {
      #pragma unroll
      for ( int j=0; j<DnIPV; j++ ) {
        F::reduce(&local_sum[i][j], val);
      }
    }

    offset += stride;
  }
}


template <typename F, typename Idx, typename DType, DnCompute DnCom, typename DnConf, uint DnIPV, uint DnIters>
struct DenseWalkSwitch
{
  using walker_func_t = void (*) (const DType, const DType*, Idx, llvlib::Vect<DType,DnIPV>[]);
  static constexpr walker_func_t get_walker()
  {
    if ( DnCom == u_e_v )
    {
      return dense_walk_full<F, Idx, DType, DnConf, DnIPV, DnIters>;
    }

    if ( DnCom == u_v )
    {
      return dense_walk_dense_only<F, Idx, DType, DnConf, DnIPV, DnIters>;
    }

    if ( DnCom == e_v )
    {
      return dense_walk_sparse_only<F, Idx, DType, DnConf, DnIPV, DnIters>;
    }
  }
};

template <typename F, typename Idx, typename DType, typename SpConf, typename DnConf, uint DnIPV, uint DnIters>
__device__ __forceinline__ void 
writeback_segreduce_shared(DType* dst_base, const Idx row_base,
                         uint col_base, const uint vlen, 
                         llvlib::Vect<DType,DnIPV> local_sum[], Idx _k_buf[], DType _v_buf[])
{
  using namespace llvlib;
  using dnv_t = typename VectTypes<DType, DnIPV>::type;
  
  const uint stride = DnConf::xdim * DnIPV;
  for (uint i=0; i<DnIters; ++i)
  {
    bool lead = warp_segmented_reduce_shared<Idx, DType, Vect<DType,DnIPV>&, DnIPV, DnConf::xdim, SpConf>(
      local_sum[i], F::reduce, _k_buf, _v_buf);
    if ( lead && col_base < vlen )
    {
      DType *dst_ptr = dst_base + (row_base + col_base);
      // local_sum[i].template dump_thread<dnv_t>(dst_ptr);
      thread_binary_elementwise_inline<DType, Vect<DType,DnIPV>, DType*, DnIPV>(local_sum[i], dst_ptr, F::reduceAtomic);
    }
    col_base += stride;
  }
}

template <typename F, typename Idx, typename DType, typename SpConf, typename DnConf, uint DnIPV, uint DnIters>
__device__ __forceinline__ void 
writeback_segreduce_shuffle(DType* dst_base, const Idx row_base,
                            uint col_base, const uint vlen, 
                            llvlib::Vect<DType,DnIPV> local_sum[], Idx _k_buf[], DType* _dum2)
{
  using namespace llvlib;
  using dnv_t = typename VectTypes<DType, DnIPV>::type;
  
  const uint stride = DnConf::xdim * DnIPV;
  for (uint i=0; i<DnIters; ++i)
  {
    bool lead = warp_segmented_reduce_shuffle<Idx, DType, Vect<DType,DnIPV>&, DnIPV, DnConf::xdim, SpConf>(
      local_sum[i], F::reduce, _k_buf);
    if ( lead && col_base < vlen )
    {
      DType *dst_ptr = dst_base + (row_base + col_base);
      // local_sum[i].template dump_thread<dnv_t>(dst_ptr);
      thread_binary_elementwise_inline<DType, Vect<DType,DnIPV>, DType*, DnIPV>(local_sum[i], dst_ptr, F::reduceAtomic);
    }
    col_base += stride;
  }
}

template <typename F, typename Idx, typename DType, typename SpConf, typename DnConf, uint DnIPV, uint DnIters>
__device__ __forceinline__ void 
writeback_sreduce(DType* dst_base, const Idx row_base, uint col_base, const uint vlen, 
                 llvlib::Vect<DType,DnIPV> local_sum[], Idx* _dum1, DType _buffer[])
{
  using namespace llvlib;
  using dnv_t = typename VectTypes<DType, DnIPV>::type;

  const uint stride = DnConf::xdim * DnIPV;
  for (uint i=0; i<DnIters; ++i)
  {
    warp_reduce_shared<DType, Vect<DType,DnIPV>&, DnIPV, DnConf::xdim, SpConf>(local_sum[i], F::reduce, _buffer);
    if ( SpConf::xid() < DnConf::xdim && col_base < vlen )
    {
      // DType *dst_ptr = dst_base + (row_base + col_base);
      // local_sum[i].template dump_thread<dnv_t>(dst_ptr);
      local_sum[i].template dump_thread_offset<dnv_t>(dst_base, row_base+col_base);
    }
    col_base += stride;
  }
}

/**
 * TODO: add shared memory based block reduce for Sp group > 32
 */ 
template <typename F, typename Idx, typename DType, typename SpConf, typename DnConf, uint DnIPV, uint DnIters>
__device__ __forceinline__ void 
writeback_wreduce(DType* dst_base, const Idx row_base, uint col_base, const uint vlen, 
                 llvlib::Vect<DType,DnIPV> local_sum[], Idx* _dum1, DType* _dum2)
{
  using namespace llvlib;
  using dnv_t = typename VectTypes<DType, DnIPV>::type;

  const uint stride = DnConf::xdim * DnIPV;
  for (uint i=0; i<DnIters; ++i)
  {
    warp_reduce_shuffle<DType, Vect<DType,DnIPV>&, DnIPV, DnConf::xdim, SpConf>(local_sum[i], F::reduce);
    if ( col_base < vlen )
    {
      // DType *dst_ptr = dst_base + (row_base + col_base);
      // local_sum[i].template dump_thread<dnv_t>(dst_ptr);
      local_sum[i].template dump_thread_offset<dnv_t>(dst_base, row_base+col_base);
    }
    col_base += stride;
  }
}



template <typename Idx, typename DType, typename Conf, uint DnIPV, uint DnIters>
__device__ __forceinline__ void 
writeback_naive(DType* dst_base, const Idx row_base, uint col_base, const uint vlen, 
                 llvlib::Vect<DType,DnIPV> local_sum[], Idx* _dum1, DType* _dum2)
{
  using dnv_t = typename llvlib::VectTypes<DType, DnIPV>::type;
  constexpr uint stride = Conf::xdim * DnIPV;

  Idx offset = row_base + col_base;
  for (uint i=0; i<DnIters; ++i)
    if (col_base < vlen)
    {
      // local_sum[i].template dump_thread<dnv_t>(dst_ptr);
      local_sum[i].template dump_thread_offset<dnv_t>(dst_base, offset);
      offset += stride;
    }
}

template <typename F, typename Idx, typename DType, typename DnConf, uint DnIPV, uint DnIters>
__device__ __forceinline__ void 
writeback_atomic_direct(DType* dst_base, const Idx row_base, uint col_base, const uint vlen, 
                        llvlib::Vect<DType,DnIPV> local_sum[], Idx* _dum1, DType* _dum2)
{
  using namespace llvlib;
  using dnv_t = typename VectTypes<DType, DnIPV>::type;
  
  const uint stride = DnConf::xdim * DnIPV;
  for (uint i=0; i<DnIters; ++i)
  {
    if ( col_base < vlen )
    {
      DType *dst_ptr = dst_base + (row_base + col_base);
      // local_sum[i].template dump_thread<dnv_t>(dst_ptr);
      thread_binary_elementwise_inline<DType, Vect<DType,DnIPV>, DType*, DnIPV>(local_sum[i], dst_ptr, F::reduceAtomic);
    }
    col_base += stride;
  }
}


template <typename F, typename Idx, typename DType, 
          SpWalk spwalk, Reduction reduce, typename SpConf, typename DnConf,
          uint DnIPV, uint DnIters>
struct WritebackSwitch {
  using Coord = llvlib::CoopCoordinator<SpConf, DnConf>;
  using writeback_func_t = void (*) (DType*, const Idx, uint, const uint, llvlib::Vect<DType,DnIPV>*, Idx*, DType*);
  static constexpr writeback_func_t get_writeback()
  {
    if ( spwalk <= pull_dyn )
    {
      if ( Coord::Mutual ) return writeback_naive<Idx, DType, DnConf, DnIPV, DnIters>;
      else {
        if ( reduce == atomic ) return writeback_atomic_direct<F, Idx, DType, DnConf, DnIPV, DnIters>;
        if ( reduce == shfl_red && SpConf::xdim <= 32)
          return writeback_wreduce<F, Idx, DType, SpConf, DnConf, DnIPV, DnIters>;
        else return writeback_sreduce<F, Idx, DType, SpConf, DnConf, DnIPV, DnIters>;
      }
    }

    if ( spwalk == push_seq ) return writeback_atomic_direct<F, Idx, DType, DnConf, DnIPV, DnIters>;

    if ( spwalk == push_para )
    {
      if ( SpConf::xdim <= 32 && reduce == shfl_red) return writeback_segreduce_shuffle<F, Idx, DType, SpConf, DnConf, DnIPV, DnIters>;
      else return writeback_segreduce_shared<F, Idx, DType, SpConf, DnConf, DnIPV, DnIters>;
    }
  }

  static __device__ __forceinline__ void
  reduce_buffer_set(Idx key, DType val, Idx* _k_buf, DType* _v_buf)
  {
    if ( spwalk == push_para )
    {
      if ( reduce == shared_red ) {
        _k_buf[SpConf::yid()] = key;
      }

      if ( reduce == shfl_red ) {
        _k_buf[0] = key;
      }
    }
  }

  static __device__ __forceinline__ void
  reduce_buffer_reset(Idx* _k_buf, DType* _v_buf)
  {
    if ( spwalk == push_para )
    {
      if ( reduce == shared_red ) {
        _k_buf[SpConf::yid()] = -1;
      }
    }
  }
};


template <typename F, typename Idx, typename DType, DnCompute DnCom, typename SpConf, typename DnConf, uint DnIPV, uint DnIters>
__device__ __forceinline__ void 
proceed_pull_non_cached(const Idx start, const Idx end, const uint col_base, 
                   const Idx *col_ptr, const DType *val_ptr, const DType *src_ptr, 
                   const uint ld_src, llvlib::Vect<DType,DnIPV> local_sum[])
{
  static_assert(SpConf::ydim <= DnConf::ydim && "Sparse major is deprecated!");

  constexpr uint dn_coop = DnConf::ydim / SpConf::ydim;
  uint i = start + DnConf::yid()%dn_coop;
  for (; i<end; i+=dn_coop)
  {
    Idx  col = col_ptr[i];
    DType val = val_ptr[i];

    DenseWalkSwitch<F, Idx, DType, DnCom, DnConf, DnIPV, DnIters>::get_walker()(val, src_ptr, ld_src*col+col_base, local_sum);
  }
}


template <typename F, typename Idx, typename DType, 
          DnCompute DnCom, typename SpConf, typename DnConf, 
          uint SpIPV, uint DnIPV, uint DnIters>
__device__ __forceinline__ void 
proceed_pull_local_cached(const Idx start, const Idx end, const uint col_base, 
                     const Idx *col_ptr, const DType *val_ptr, const DType *src_ptr, 
                     const uint ld_src, llvlib::Vect<DType,DnIPV> local_sum[])
{
  using namespace llvlib;
  using spiv_t = typename VectTypes<Idx, SpIPV>::type;
  using spvv_t = typename VectTypes<DType, SpIPV>::type;
  using Coord = CoopCoordinator<SpConf, DnConf>;
  using DenseWalkSwitch = DenseWalkSwitch<F, Idx, DType, DnCom, DnConf, DnIPV, DnIters>;
  
  constexpr uint dn_coop = Coord::DnPerSp;
  constexpr uint SpTileSize = SpIPV*dn_coop;
  const uint dn_yid = Coord::dn_yid_in_sp();

  Vect<Idx, SpIPV> spidx_reg;
  Vect<DType, SpIPV> spval_reg;

  uint off = ALIGN_DOWN(start, SpTileSize);
  uint pre_end = ALIGN_DOWN(end, SpTileSize);
  if ( off < start )
  {
    uint border = MIN(off+SpTileSize, end);
    for (uint i=start+dn_yid; i<border; i+=dn_coop)
    {
      Idx col = col_ptr[i];
      DType val = val_ptr[i];

      DenseWalkSwitch::get_walker()(val, src_ptr, ld_src*col+col_base, local_sum);
    }

    off += SpTileSize;
  }

  if ( off + SpTileSize <= pre_end ) {
    for ( ; off < pre_end; off+=SpTileSize)
    {
      uint i = off + dn_yid * SpIPV;
      spidx_reg.template load_thread_offset<spiv_t>(const_cast<Idx*>(col_ptr),i);
      spval_reg.template load_thread_offset<spvv_t>(const_cast<DType*>(val_ptr),i);
      
      for (uint j=0; j<SpIPV; ++j)
      {
        Idx col = spidx_reg[j];
        DType val = spval_reg[j];
        
        DenseWalkSwitch::get_walker()(val, src_ptr, ld_src*col+col_base, local_sum);
      }
    }
  } 

  if ( off < end ) {
    for (uint i=off+dn_yid; i<end; i+=dn_coop)
    {
      Idx col = col_ptr[i];
      DType val = val_ptr[i];

      DenseWalkSwitch::get_walker()(val, src_ptr, ld_src*col+col_base, local_sum);
    }
  }
}

template <typename F, typename Idx, typename DType, 
          DnCompute DnCom, typename SpConf, typename DnConf, 
          uint SpIPV, uint DnIPV, uint DnIters>
__device__ __forceinline__ void 
proceed_pull_warp_cached(const Idx start, const Idx end, const uint col_base, 
                     const Idx *col_ptr, const DType *val_ptr, const DType *src_ptr, 
                     const uint ld_src, llvlib::Vect<DType,DnIPV> local_sum[])
{
  // static_assert(SpConf::xdim <= 32 && "only warp-level cooperation is allowed");

  using namespace llvlib;
  using spiv_t = typename VectTypes<Idx, SpIPV>::type;
  using spvv_t = typename VectTypes<DType, SpIPV>::type;
  using Coord = CoopCoordinator<SpConf, DnConf>;

  Vect<Idx, SpIPV> spidx_reg;
  Vect<DType, SpIPV> spval_reg;
  
  constexpr uint SpTileSize = SpConf::xdim * SpIPV;
  constexpr uint benefit_thresh = 0;
  
  uint i = ALIGN_DOWN(start, SpTileSize);
  // prefix deal
  if ( i < start ) {
    uint pre_e = MIN(SpTileSize, end - i);
    spidx_reg.template load_thread_offset<spiv_t>(const_cast<Idx*>(col_ptr),i+SpIPV*SpConf::xid());
    if ( DnCom == u_e_v )
      spval_reg.template load_thread_offset<spvv_t>(const_cast<DType*>(val_ptr),i+SpIPV*SpConf::xid());
    // 可能会发生__shfl_sync warp内同步失败的问题
    uint j = (start + Coord::dn_yid_in_sp()) % SpTileSize;
    for ( ; j<pre_e; j+=Coord::DnPerSp) {
      uint thd = j / SpIPV;
      uint off = j % SpIPV;
      Idx col = __shfl_sync(-1, spidx_reg[off], thd, SpConf::xdim);
      DType val = __shfl_sync(-1, spval_reg[off], thd, SpConf::xdim);

      DenseWalkSwitch<F, Idx, DType, DnCom, DnConf, DnIPV, DnIters>::get_walker()(val, src_ptr, ld_src*col+col_base, local_sum);
    }
    SpConf::sync();
    i += SpTileSize;
  }

  // middle deal
  uint suf_s = ALIGN_DOWN(end, SpTileSize);
  if (i <= suf_s) { 
    for (; i+SpTileSize<=end; i+=SpTileSize)
    {
      spidx_reg.template load_thread_offset<spiv_t>(const_cast<Idx*>(col_ptr),i+SpIPV*SpConf::xid());
      spval_reg.template load_thread_offset<spvv_t>(const_cast<DType*>(val_ptr),i+SpIPV*SpConf::xid());
      
      for (uint j=Coord::dn_yid_in_sp(); j<SpTileSize; j+=Coord::DnPerSp)
      {
        uint thd = j / SpIPV;
        uint off = j % SpIPV;
        Idx col = __shfl_sync(-1, spidx_reg[off], thd, SpConf::xdim);
        DType val = __shfl_sync(-1, spval_reg[off], thd, SpConf::xdim);

        DenseWalkSwitch<F, Idx, DType, DnCom, DnConf, DnIPV, DnIters>::get_walker()(val, src_ptr, ld_src*col+col_base, local_sum);
      }
    }
  }

  if ( i < end ) {
    spidx_reg.template load_thread_offset<spiv_t>(const_cast<Idx*>(col_ptr),i+SpIPV*SpConf::xid());
    if ( DnCom == u_e_v )
      spval_reg.template load_thread_offset<spvv_t>(const_cast<DType*>(val_ptr),i+SpIPV*SpConf::xid());
    // 可能会发生__shfl_sync warp内同步失败的问题
    uint pre_e = MIN(SpTileSize, end - i);
    for (uint j = Coord::dn_yid_in_sp(); j<pre_e; j+=Coord::DnPerSp) {
      uint thd = j / SpIPV;
      uint off = j % SpIPV;
      Idx col = __shfl_sync(-1, spidx_reg[off], thd, SpConf::xdim);
      DType val = __shfl_sync(-1, spval_reg[off], thd, SpConf::xdim);

      DenseWalkSwitch<F, Idx, DType, DnCom, DnConf, DnIPV, DnIters>::get_walker()(val, src_ptr, ld_src*col+col_base, local_sum);
    }
    SpConf::sync();
  }
}


template <typename F, typename Idx, typename DType, 
          DnCompute DnCom, typename SpConf, typename DnConf, 
          uint SpIPV, uint DnIPV, uint DnIters>
__device__ __forceinline__ void 
proceed_pull_shared_cached(const Idx start, const Idx end, const uint col_base, 
                     const Idx *col_ptr, const DType *val_ptr, const DType *src_ptr, 
                     const uint ld_src, llvlib::Vect<DType,DnIPV> local_sum[], 
                     char* __shared)
{
  using namespace llvlib;
  using spiv_t = typename VectTypes<Idx, SpIPV>::type;
  using spvv_t = typename VectTypes<DType, SpIPV>::type;
  using Coord = CoopCoordinator<SpConf, DnConf>;
  using DenseWalkSwitch = DenseWalkSwitch<F, Idx, DType, DnCom, DnConf, DnIPV, DnIters>;

  constexpr uint SpTileSize = SpConf::xdim * SpIPV;
  uint sh_idx_offset = SpIPV * sizeof(Idx) * SpConf::ybase();
  uint sh_val_offset = SpIPV * ( sizeof(Idx) * SpConf::coop_size() 
                               + sizeof(DType) * SpConf::ybase() );
  VectView<Idx, SpTileSize> spidx_local(&__shared[sh_idx_offset]);
  VectView<DType, SpTileSize> spval_local(&__shared[sh_val_offset]);

  constexpr uint benefit_thresh = 6;
  
  uint i = ALIGN_DOWN(start, SpTileSize);
  // prefix deal
  if ( i < start ) {
    uint pre_e = MIN(SpTileSize, end - i);
    spidx_local.template load_warp_padded_offset<spiv_t, SpConf>(const_cast<Idx*>(col_ptr),i,pre_e);
    if ( DnCom == u_e_v )
      spval_local.template load_warp_padded_offset<spvv_t, SpConf>(const_cast<DType*>(val_ptr),i,pre_e);
    uint j = (start + Coord::dn_yid_in_sp()) % SpTileSize;
    for ( ; j<pre_e; j+=Coord::DnPerSp) {
      Idx col = spidx_local[j];
      DType val = spval_local[j];
      DenseWalkSwitch::get_walker()(val, src_ptr, ld_src*col+col_base, local_sum);
    }
    SpConf::sync();
    i += SpTileSize;
  }

  uint suf_s = ALIGN_DOWN(end, SpTileSize);
  if (i < suf_s)
  { 
    // auto& spidx_local = spidx_buffer[SpConf::yid()];
    // auto& spval_local = spval_buffer[SpConf::yid()];
    
    for (; i<suf_s; i+=SpTileSize)
    {
      spidx_local.template load_warp_offset<spiv_t, SpConf>(const_cast<Idx*>(col_ptr),i);
      spval_local.template load_warp_offset<spvv_t, SpConf>(const_cast<DType*>(val_ptr),i);
      SpConf::sync();
      
      for (uint j=Coord::dn_yid_in_sp(); j<SpTileSize; j+=Coord::DnPerSp)
      {
        Idx col = spidx_local[j];
        DType val = spval_local[j];
        
        DenseWalkSwitch::get_walker()(val, src_ptr, ld_src*col+col_base, local_sum);
      }
    }
    SpConf::sync();
  }

  if ( i < end ) {
    uint border = end - i;
    spidx_local.template load_warp_padded_offset<spiv_t, SpConf>(const_cast<Idx*>(col_ptr),i,border);
    if ( DnCom == u_e_v )
      spval_local.template load_warp_padded_offset<spvv_t, SpConf>(const_cast<DType*>(val_ptr),i,border);
    // 可能会发生__shfl_sync warp内同步失败的问题
    for (uint j = Coord::dn_yid_in_sp(); j<border; j+=Coord::DnPerSp) {
      Idx col = spidx_local[j];
      DType val = spval_local[j];

      DenseWalkSwitch::get_walker()(val, src_ptr, ld_src*col+col_base, local_sum);
    }
    SpConf::sync();
  }
}


template <typename F, typename Idx, typename DType, 
          SpWalk spwk, DnCompute DnCom, Reduction reduce,
          typename SpConf, typename DnConf, uint SpIPV, uint DnIPV, uint DnIters>
__device__ __forceinline__ void
proceed_push_simple(const Idx start, const Idx end, const uint col_base,
                    const Idx *row_offset, const Idx *col_ptr, const DType *val_ptr,
                    const DType *src_ptr, const Idx ld_src, DType *dst_ptr, const Idx ld_dst,
                    const Idx nv, const Idx vlen, 
                    llvlib::Vect<DType,DnIPV> local_sum[])
{
  using DenseWalkSwitch = DenseWalkSwitch<F, Idx, DType, DnCom, DnConf, DnIPV, DnIters>;
  using WritebackSwitch = WritebackSwitch<F, Idx, DType, spwk, reduce, SpConf, DnConf, DnIPV, DnIters>;

  uint eid = start;
  uint vid = llvlib::binary_search(eid, row_offset, 0u, nv);
  uint bound = row_offset[vid+1];
  bool updated = false;
  while ( eid < end )
  {
    if ( eid == bound )
    {
      if ( updated )
      // writeback_atomic_direct<Idx, DType, DnConf, DnIPV, DnIters>(
      WritebackSwitch::get_writeback() (
          dst_ptr, ld_dst*vid, col_base, vlen, local_sum,
          nullptr, nullptr
          );
      for (uint i=0; i<DnIters; ++i) local_sum[i].set_thread(F::init_val);
      vid += 1;
      bound = row_offset[vid+1];
      updated = false;
    }
    Idx col = col_ptr[eid];
    DType val = val_ptr[eid];
    DenseWalkSwitch::get_walker()(val, src_ptr, ld_src*col+col_base, local_sum);
    updated = true;
    eid += 1;
  }
  
  if ( updated )
  // writeback_atomic_direct<Idx, DType, DnConf, DnIPV, DnIters>(
  WritebackSwitch::get_writeback() (
      dst_ptr, ld_dst*vid, col_base, vlen, local_sum,
      nullptr, nullptr
      );
}


template <typename F, typename Idx, typename DType, 
          SpWalk spwk, DnCompute DnCom, Reduction reduce, 
          typename SpConf, typename DnConf, uint SpIPV, uint DnIPV, uint DnIters>
__device__ __forceinline__ void
proceed_push_shared_cached(const Idx start, const Idx end, const uint col_base,
                           const Idx *row_offset, const Idx *col_ptr, const DType *val_ptr,
                           const DType *src_ptr, const Idx ld_src, DType *dst_ptr, const Idx ld_dst,
                           const Idx nv, const Idx vlen, 
                           llvlib::Vect<DType,DnIPV> local_sum[],
                           char *__shared)
{
  using namespace llvlib;
  using spiv_t = typename VectTypes<Idx, SpIPV>::type;
  using spvv_t = typename VectTypes<DType, SpIPV>::type;
  using DenseWalkSwitch = DenseWalkSwitch<F, Idx, DType, DnCom, DnConf, DnIPV, DnIters>;
  using WritebackSwitch = WritebackSwitch<F, Idx, DType, spwk, reduce, SpConf, DnConf, DnIPV, DnIters>;

  constexpr uint SpTileSize = SpIPV * SpConf::xdim;
  uint sh_col_offset = SpConf::yid() * SpTileSize * sizeof(Idx);
  uint sh_val_offset = SpConf::coop_size() * SpIPV * sizeof(Idx)
                     + SpConf::yid() * SpTileSize * sizeof(DType);
  llvlib::VectView<Idx, SpTileSize> spcol(&__shared[sh_col_offset]);
  llvlib::VectView<DType, SpTileSize> spval(&__shared[sh_val_offset]);

  uint eid = start;
  uint vid = llvlib::binary_search(eid, row_offset, 0u, nv);
  uint bound = row_offset[vid+1];
  bool updated = false;

  for ( ; eid < end; eid += SpTileSize)
  {
    spcol.template load_warp_padded_offset<spiv_t, SpConf>(const_cast<Idx*>(col_ptr), eid, end-eid);
    spval.template load_warp_padded_offset<spvv_t, SpConf>(const_cast<DType*>(val_ptr),  eid, end-eid);

    for (uint i=0; i<SpTileSize; ++i)
    { 
      if ( eid+i == bound )
      {
        if ( updated )
        // writeback_atomic_direct<F, Idx, DType, DnConf, DnIPV, DnIters>(
        WritebackSwitch::get_writeback() (
            dst_ptr, ld_dst*vid, col_base, vlen, local_sum,
            nullptr, nullptr
            // _key_buf, _val_buf
            );
        for (uint i=0; i<DnIters; ++i) local_sum[i].set_thread(F::init_val);
        vid += 1;
        bound = row_offset[vid+1];
        updated = false;
      }
      Idx col = spcol[i];
      DType val = spval[i];
      DenseWalkSwitch::get_walker()(val, src_ptr, ld_src*col+col_base, local_sum);
      updated = true;
    } 
  }
  
  if ( updated )
  // writeback_atomic_direct<F, Idx, DType, DnConf, DnIPV, DnIters>(
  WritebackSwitch::get_writeback() (
      dst_ptr, ld_dst*vid, col_base, vlen, local_sum,
      nullptr, nullptr
      // _key_buf, _val_buf
      );
}

template <typename F, typename Idx, typename DType, 
          SpWalk spwk, DnCompute DnCom, Reduction reduce, 
          typename SpConf, typename DnConf, uint SpIPV, uint DnIPV, uint DnIters>
__device__ __forceinline__ void
proceed_push_reduced(const Idx start, const Idx end, const uint col_base,
                    const Idx *row_offset, const Idx *col_ptr, const DType *val_ptr,
                    const DType *src_ptr, const Idx ld_src, DType *dst_ptr, const Idx ld_dst,
                    const Idx nv, const Idx vlen, 
                    llvlib::Vect<DType,DnIPV> local_sum[],
                    char *__shared)
{
  // static_assert((reduce != shfl_red || SpConf::xdim < 32) 
  //               && "Shuffle reduce only works within warp");

  using DenseWalkSwitch = DenseWalkSwitch<F, Idx, DType, DnCom, DnConf, DnIPV, DnIters>;
  using WritebackSwitch = WritebackSwitch<F, Idx, DType, spwk, reduce, SpConf, DnConf, DnIPV, DnIters>;

  constexpr uint dn_coop = SpConf::xdim / DnConf::xdim;

  Idx *_seg_buf;
  DType * _val_buf;
  Idx local_seg_buffer;

  if (reduce == shared_red) {
    uint sh_seg_offset = SpConf::yid() * dn_coop * sizeof(Idx);
    _seg_buf = (Idx*) &__shared[sh_seg_offset];
    __shared += SpConf::ydim * dn_coop * sizeof(Idx);
  } else {
    _seg_buf = &local_seg_buffer;
  }

  uint sh_val_offset = SpConf::ybase() * sizeof(DType);
  _val_buf = (DType*) &__shared[sh_val_offset];

  const uint dn_id = DnConf::yid() % dn_coop; 

  uint eid = start + dn_id;
  uint vid = 0;
                
  for ( ;eid < end; eid += dn_coop )
  {
    Idx   col = col_ptr[eid];
    DType val = val_ptr[eid];
    vid = llvlib::binary_search(eid, row_offset, 0u, nv);
    // _seg_buf[dn_id] = vid;
    WritebackSwitch::reduce_buffer_set(vid, val, _seg_buf, _val_buf);
    DenseWalkSwitch::get_walker()(val, src_ptr, ld_src*col+col_base, local_sum);
    WritebackSwitch::get_writeback() (
      dst_ptr, ld_dst*vid, col_base, vlen, local_sum
      ,_seg_buf ,_val_buf
    );
    for (uint i=0; i<DnIters; ++i) local_sum[i].set_thread(F::init_val);
    SpConf::sync();
    // _seg_buf[dn_id] = -1;
    WritebackSwitch::reduce_buffer_reset(_seg_buf, _val_buf);
  }
}

// 软件调度性能不够好
template <SpWalk wk, typename SpConf, typename Idx, bool first_call>
[[__deprecated__]] __device__ __forceinline__ Idx
sparse_row_scheduler(Idx last_offset, Idx *global_cnt, Idx* bcast)
{
  if (wk == pull_fix) 
  {
    if (first_call) return blockIdx.x * SpConf::ydim + SpConf::yid();
    else return last_offset += gridDim.x * SpConf::ydim;
  }
  else {
    if (first_call)
    {
      if(threadIdx.x==0 && blockIdx.x==0) *global_cnt = 0;
      __threadfence();
    }
    if (SpConf::xdim > 32)
    {
      if (!SpConf::xid()) *bcast = atomicAdd(global_cnt, 1);
      SpConf::sync();
      return *bcast;
    }
    else
    {
      uint lead;
      if (!SpConf::xid()) lead = atomicAdd(global_cnt, 1);
      return __shfl_sync(-1, lead, 0, SpConf::xdim);
    }
  }
}


template <typename F, typename Idx, typename DType, 
          SpWalk spwalk, SpLoad spload, DnCompute dncomp, Reduction red,
          typename SpConf, typename DnConf, 
          uint SpIPV, uint DnIPV, uint DnIters>
__global__ void spmm_kernel_push(
  const Idx   * __restrict__ row_offset,
  const Idx   * __restrict__ col_indx,
  const DType * __restrict__ edge_weight,
  const DType * __restrict__ src_ptr, const Idx ld_src,
  DType * __restrict__ dst_ptr,       const Idx  ld_dst,
  const Idx nv, 
  const Idx ne, 
  const Idx vlen)
{
  constexpr uint SpTileSize = SpIPV * SpConf::xdim;
  extern __shared__ char __shared[];
  char *__shared_end = &__shared[0];

  const uint col_base = (DnConf::xid() + DnConf::xdim*blockIdx.y)*DnIPV*DnIters;

  const Idx stride = CEIL(ne, gridDim.x*SpConf::ydim);
  const Idx start = (SpConf::yid() + blockIdx.x * SpConf::ydim)*stride;
  const Idx end = MIN(start + stride, ne);

  llvlib::Vect<DType, DnIPV> local_sum[DnIters];
  for (uint i=0; i<DnIters; ++i) local_sum[i].set_thread(F::init_val); 

  if (spwalk == push_seq)
  {
    if (spload == shared)
      proceed_push_shared_cached<F, Idx, DType, 
                                spwalk, dncomp, red, 
                                SpConf, DnConf, SpIPV, DnIPV, DnIters>(
          start, end, col_base, row_offset, col_indx,
          edge_weight, src_ptr, ld_src, dst_ptr, ld_dst, nv, vlen, local_sum,
          __shared_end);
      __shared_end += (sizeof(Idx) + sizeof(DType)) * SpConf::coop_size() * SpIPV;
    if (spload == none) 
      proceed_push_simple<F, Idx, DType, 
                          spwalk, dncomp, red, 
                          SpConf, DnConf, SpIPV, DnIPV, DnIters>(
          start, end, col_base, row_offset, col_indx,
          edge_weight, src_ptr, ld_src, dst_ptr, ld_dst, nv, vlen, local_sum
          );
  }
  if (spwalk == push_para)
  {
    proceed_push_reduced<F, Idx, DType, 
                         spwalk, dncomp, red, 
                         SpConf, DnConf, SpIPV, DnIPV, DnIters>(
        start, end, col_base, row_offset, col_indx,
        edge_weight, src_ptr, ld_src, dst_ptr, ld_dst, nv, vlen, local_sum, __shared_end
    );
  }

}

template <typename F, typename Idx, typename DType, 
          SpWalk spwk, SpLoad spload, 
          DnCompute dncomp, Reduction reduction, 
          typename SpConf, typename DnConf, 
          uint SpIPV, uint DnIPV, uint DnIters>
__global__ void spmm_kernel_pull(
  const Idx  * __restrict__ row_indx,
  const Idx  * __restrict__ row_offset,
  const Idx  * __restrict__ col_indx,
  const DType * __restrict__ edge_weight,
  const DType * __restrict__ src_vecs,
  const Idx  ld_src,
  DType * __restrict__ dst_vecs,
  const Idx  ld_dst,
  const Idx  nv, 
  const Idx  ne, 
  const Idx  vlen)
{
  using Coord =  llvlib::CoopCoordinator<SpConf, DnConf>;
  static_assert(!Coord::SparseMajor && "not handled currently");

  // dispatch shared memory base pointers
  extern __shared__ char __shared[];
  char *__shared_end = &__shared[0];

  const uint col_base = (DnConf::xid() + DnConf::xdim*blockIdx.y)*DnIPV*DnIters;
  const Idx RowOffset = blockIdx.x * SpConf::ydim + SpConf::yid();
  if ( RowOffset >= nv ) return;
  const Idx RowId = spwk == pull_dyn ? row_indx[RowOffset] : RowOffset;

  llvlib::Vect<DType, DnIPV> local_sum[DnIters];
  for (uint i=0; i<DnIters; ++i) local_sum[i].set_thread(F::init_val); 

  if (spload == none) 
  {
    proceed_pull_non_cached<F, Idx, DType, dncomp, SpConf, DnConf, DnIPV, DnIters>(
        row_offset[RowId], row_offset[RowId+1], col_base,
        col_indx, edge_weight, src_vecs, ld_src, local_sum);
  } 
  if (spload == local) {
    proceed_pull_local_cached<F, Idx, DType, dncomp, SpConf, DnConf, SpIPV, DnIPV, DnIters>(
        row_offset[RowId], row_offset[RowId+1], col_base,
        col_indx, edge_weight, src_vecs, ld_src, local_sum);
  } 
  if (spload == shfl) {
    proceed_pull_warp_cached<F, Idx, DType, dncomp, SpConf, DnConf, SpIPV, DnIPV, DnIters>(
        row_offset[RowId], row_offset[RowId+1], col_base,
        col_indx, edge_weight, src_vecs, ld_src, local_sum);
  }
  if (spload == shared) {
    proceed_pull_shared_cached<F, Idx, DType, dncomp, SpConf, DnConf, SpIPV, DnIPV, DnIters>
        (row_offset[RowId], row_offset[RowId+1], col_base,
        col_indx, edge_weight, src_vecs, ld_src, local_sum, __shared_end);
    
    // __shared_end += (sizeof(Idx) + sizeof(DType)) * SpConf::coop_size();
  }
  
  if ( reduction == shared_red ) {
    __shared_end += sizeof(DType) * SpConf::ybase();
  }

  WritebackSwitch<F, Idx, DType, 
                  spwk, reduction, 
                  SpConf, DnConf, DnIPV, DnIters>::get_writeback()
    (dst_vecs, ld_dst*RowId, col_base, vlen, local_sum, nullptr, (DType*)__shared_end);

  // if (Coord::Mutual)
  // {
  //   writeback_naive<Idx, DType, DnConf, DnIPV, DnIters>(
  //     dst_vecs, ld_dst*RowId, col_base, vlen, local_sum);
  // }
  // if (Coord::DenseMajor)
  // {
  //   if (reduction == atomic)
  //   {
  //     writeback_atomic_direct<F, Idx, DType, DnConf, DnIPV, DnIters>(
  //         dst_vecs, ld_dst*RowId, col_base, vlen, local_sum);
  //   }
    
  //   if (reduction == shfl_red && SpConf::xdim <= 32)
  //   {
  //     writeback_wreduce<F, Idx, DType, SpConf, DnConf, DnIPV, DnIters>(
  //         dst_vecs, ld_dst*RowId, col_base, vlen, local_sum);
  //   }

  //   if (reduction == shared_red || SpConf::xdim > 32)
  //   {
  //     writeback_sreduce<F, Idx, DType, SpConf, DnConf, DnIPV, DnIters>(
  //     dst_vecs, ld_dst*RowId, col_base, vlen, local_sum, sh_reduce_buffer[SpConf::yid()]);
  //   }
  // }
}


// this one does not work well
// template <typename F, typename Idx, typename DType, 
//           SpWalk spwk, SpLoad spload, 
//           DnCompute dncomp, Reduction reduction,
//           typename SpConf, typename DnConf, 
//           uint SpIPV, uint DnIPV, uint DnIters>
// __global__ void spmm_kernel_pull(
//   const Idx  * __restrict__ row_indx,
//   const Idx  * __restrict__ row_offset,
//   const Idx  * __restrict__ col_indx,
//   const DType * __restrict__ edge_weight,
//   const DType * __restrict__ src_vecs,
//   const Idx  ld_src,
//   DType * __restrict__ dst_vecs,
//   const Idx  ld_dst,
//   const Idx  nv, 
//   const Idx  ne, 
//   const Idx  vlen,
//   Idx *gl_cnt)
// {
//   typedef llvlib::CoopCoordinator<SpConf, DnConf> Coord;
//   static_assert(!Coord::SparseMajor && "not handled currently");

//   constexpr uint SpTileSize = SpIPV * SpConf::xdim;
//   __shared__ llvlib::Vect<Idx, SpTileSize> spidx_buffer[SpConf::ydim];
//   __shared__ llvlib::Vect<DType, SpTileSize> spval_buffer[SpConf::ydim];
//   __shared__ DType sh_reduce_buffer[SpConf::ydim][SpConf::xdim];
//   __shared__ Idx sh_row_bcaster[SpConf::ydim];

//   bool weighted = (edge_weight != nullptr);
//   bool sorted = (row_indx != nullptr);

//   const uint col_base = (DnConf::xid() + DnConf::xdim*blockIdx.y)*DnIPV*DnIters;
//   Idx RowOffset = sparse_row_scheduler<spwk, SpConf, Idx, true>
//                       (0, gl_cnt+blockIdx.y, &sh_row_bcaster[SpConf::yid()]);
//   while (RowOffset <= nv)
//   {
//     Idx RowId = sorted ? row_indx[RowOffset] : RowOffset;
//     llvlib::Vect<DType, DnIPV> local_sum[DnIters];
//     for (uint i=0; i<DnIters; ++i) local_sum[i].set_thread(F::init_val); 
//     
//     ... ...
// 
//     RowOffset = sparse_row_scheduler<spwk, 
//                                      SpConf, 
//                                      Idx, 
//                                      false>(RowOffset, gl_cnt+blockIdx.y, &sh_row_bcaster[SpConf::yid()]);
//   }
// }

__global__ void spmm_naive(
    const uint32_t* __restrict__ A_row,
    const uint32_t* __restrict__ A_col,
    const float* __restrict__ A_val,
    const float* __restrict__ B,
    float* __restrict__ C,
    uint32_t num_col)
{
    uint32_t rowA = blockIdx.x * blockDim.y + threadIdx.y;
    rowA = __ballot_sync(0xffffffff, (rowA & (1 << threadIdx.x)) != 0);
    uint32_t offA_begin = A_row[rowA];
    uint32_t offA_end = A_row[rowA + 1];
    uint32_t colB = (blockIdx.y * blockDim.x + threadIdx.x) * 4;
    float4 sum = { 0.0f, 0.0f, 0.0f, 0.0f };
    const float4* B_gptr = reinterpret_cast<const float4*>(&B[colB]);

    uint32_t colA_reg_buf;
    float valA_reg_buf;
    if (offA_begin % 32 != 0)
    {
        bool load_flag = (offA_begin & -32) + threadIdx.x >= offA_begin && (offA_begin & -32) + threadIdx.x < offA_end;
        if (load_flag)
        {
            colA_reg_buf = A_col[(offA_begin & -32) + threadIdx.x];
            valA_reg_buf = A_val[(offA_begin & -32) + threadIdx.x];
        }
    }
    for (uint32_t offA = offA_begin; offA < offA_end; offA++)
    {
        bool load_flag = offA % 32 == 0 && offA + threadIdx.x < offA_end;
        if (load_flag)
        {
            colA_reg_buf = A_col[offA + threadIdx.x];
            valA_reg_buf = A_val[offA + threadIdx.x];
        }
        uint32_t colA = __shfl_sync(0xffffffff, colA_reg_buf, offA % 32, 32);
        float4 valB = B_gptr[colA * num_col >> 2];
        float valA = __shfl_sync(0xffffffff, valA_reg_buf, offA % 32, 32);
        sum.x += valA * valB.x;
        sum.y += valA * valB.y;
        sum.z += valA * valB.z;
        sum.w += valA * valB.w;
    }
    float4* C_gptr = reinterpret_cast<float4*>(&C[rowA * num_col + colB]);
    C_gptr[0] = sum;
}


template<bool has_add, uint32_t TDY>
__global__ void spmm_l2(
    const uint32_t* __restrict__ A_row_begin,
    const uint32_t* __restrict__ A_row_end,
    const uint32_t* __restrict__ A_col,
    const float* __restrict__ A_val,
    const float* __restrict__ B,
    float* __restrict__ C,
    uint32_t num_col)
{
    __shared__ float sum_buf[4][32];

    uint32_t rowA = blockIdx.x;
    uint32_t offA_begin_tmp = A_row_begin[rowA];
    uint32_t offA_end_tmp = A_row_end[rowA];
    if (TDY > 1)
    {
        reinterpret_cast<float4*>(sum_buf)[threadIdx.x] = float4{0.0f, 0.0f, 0.0f, 0.0f};
    }
    uint32_t offA_begin = offA_begin_tmp + (offA_end_tmp - offA_begin_tmp) * threadIdx.y / TDY;
    uint32_t offA_end = offA_begin_tmp + (offA_end_tmp - offA_begin_tmp) * (threadIdx.y + 1) / TDY;
    offA_begin = __ballot_sync(0xffffffff, (offA_begin & (1 << threadIdx.x)) != 0);
    offA_end = __ballot_sync(0xffffffff, (offA_end & (1 << threadIdx.x)) != 0);

    uint32_t colB = (blockIdx.y * blockDim.x + threadIdx.x) * 4;
    float4 sum = has_add ? *reinterpret_cast<const float4*>(&C[rowA * num_col + colB]) : float4{ 0.0f, 0.0f, 0.0f, 0.0f };
    const float4* B_gptr = reinterpret_cast<const float4*>(&B[colB]);
    const float4* B_tptr;
    if (offA_begin == offA_end) return;

    uint32_t colA_reg_buf;
    float valA_reg_buf;
    uint32_t colA_reg_buf_next;
    float valA_reg_buf_next;
    uint32_t offA = offA_begin & -32;
    if (offA + threadIdx.x >= offA_begin && offA + threadIdx.x < offA_end)
    {
        colA_reg_buf = A_col[offA + threadIdx.x];
        valA_reg_buf = A_val[offA + threadIdx.x];
    }
    if (offA + 32 + threadIdx.x < offA_end)
    {
        colA_reg_buf_next = A_col[offA + 32 + threadIdx.x];
        valA_reg_buf_next = A_val[offA + 32 + threadIdx.x];
    }
    offA = offA_begin;
    float valA = 0.0f;
    float4 valB = { 0.0f, 0.0f, 0.0f, 0.0f };
    uint32_t colA;
    if (offA < (offA_begin & -32) + 32 && offA < offA_end)
    {
        colA = __shfl_sync(0xffffffff, colA_reg_buf, offA % 32, 32);
        valB = B_gptr[colA * (num_col / 4)];
        valA = __shfl_sync(0xffffffff, valA_reg_buf, offA % 32, 32);
        offA++;
#pragma nounroll
        while (offA < (offA_begin & -32) + 32 && offA < offA_end)
        {   
            colA = __shfl_sync(0xffffffff, colA_reg_buf, offA % 32, 32);
            sum.x += valA * valB.x;
            sum.y += valA * valB.y;
            sum.z += valA * valB.z;
            sum.w += valA * valB.w;
            valB = B_gptr[colA * (num_col / 4)];
            valA = __shfl_sync(0xffffffff, valA_reg_buf, offA % 32, 32);
            offA++;
        }
    }
    offA = (offA_begin & -32) + 32;
    while (offA < offA_end)
    {
        colA_reg_buf = colA_reg_buf_next;
        valA_reg_buf = valA_reg_buf_next;
        if (offA + 32 + threadIdx.x < offA_end)
        {
            colA_reg_buf_next = A_col[offA + 32 + threadIdx.x];
            valA_reg_buf_next = A_val[offA + 32 + threadIdx.x];
        }
        #pragma unroll
        for (uint32_t i = 0; i < 32; i++)
        {
            if (offA >= offA_end)
            {
                break;
            }
            colA = __shfl_sync(0xffffffff, colA_reg_buf, i, 32);
            B_tptr = &B_gptr[colA * (num_col / 4)];
            sum.x += valA * valB.x;
            sum.y += valA * valB.y;
            sum.z += valA * valB.z;
            sum.w += valA * valB.w;
            valB = *B_tptr;
            valA = __shfl_sync(0xffffffff, valA_reg_buf, i, 32);
            offA++;
        }
    }
    sum.x += valA * valB.x;
    sum.y += valA * valB.y;
    sum.z += valA * valB.z;
    sum.w += valA * valB.w;
    if (TDY == 1)
    {
        *reinterpret_cast<float4*>(&C[rowA * num_col + colB]) = sum;
    }
    else
    {
        atomicAdd(&sum_buf[0][threadIdx.x], sum.x);
        atomicAdd(&sum_buf[1][threadIdx.x], sum.y);
        atomicAdd(&sum_buf[2][threadIdx.x], sum.z);
        atomicAdd(&sum_buf[3][threadIdx.x], sum.w);
        __syncthreads();
        if (threadIdx.y == 0)
        {
            sum.x = sum_buf[0][threadIdx.x];
            sum.y = sum_buf[1][threadIdx.x];
            sum.z = sum_buf[2][threadIdx.x];
            sum.w = sum_buf[3][threadIdx.x];
            *reinterpret_cast<float4*>(&C[rowA * num_col + colB]) = sum;
        }
    }
}

template<bool has_add>
__global__ void spmm_l1(
    const uint32_t* __restrict__ A_row_begin,
    const uint32_t* __restrict__ A_row_end,
    const uint32_t* __restrict__ A_col,
    const float* __restrict__ A_val,
    const float* __restrict__ B,
    float* __restrict__ C,
    uint32_t num_col,
    uint32_t sync_colA_step = 48)
{
    uint32_t rowA = blockIdx.x * blockDim.y + threadIdx.y;
    rowA = __ballot_sync(0xffffffff, (rowA & (1 << threadIdx.x)) != 0);
    uint32_t offA_begin = A_row_begin[rowA];
    uint32_t offA_end = A_row_end[rowA];
    uint32_t colB = (blockIdx.y * blockDim.x + threadIdx.x) * 4;
    float4 sum = has_add ? *reinterpret_cast<const float4*>(&C[rowA * num_col + colB]) : float4{ 0.0f, 0.0f, 0.0f, 0.0f };
    const float4* B_gptr = reinterpret_cast<const float4*>(&B[colB]);
    const float4* B_tptr;
    if (offA_begin == offA_end) return;

    uint32_t colA_reg_buf;
    float valA_reg_buf;
    uint32_t colA_reg_buf_next;
    float valA_reg_buf_next;
    uint32_t offA = offA_begin & -32;
    if (offA + threadIdx.x >= offA_begin && offA + threadIdx.x < offA_end)
    {
        colA_reg_buf = A_col[offA + threadIdx.x];
        valA_reg_buf = A_val[offA + threadIdx.x];
    }
    if (offA + 32 + threadIdx.x < offA_end)
    {
        colA_reg_buf_next = A_col[offA + 32 + threadIdx.x];
        valA_reg_buf_next = A_val[offA + 32 + threadIdx.x];
    }
    offA = offA_begin;
    float valA = 0.0f;
    float4 valB = { 0.0f, 0.0f, 0.0f, 0.0f };
    uint32_t colA;
    uint32_t sync_colA = sync_colA_step;
    if (offA < (offA_begin & -32) + 32 && offA < offA_end)
    {
        colA = __shfl_sync(0xffffffff, colA_reg_buf, offA % 32, 32);
        valB = B_gptr[colA * (num_col / 4)];
        valA = __shfl_sync(0xffffffff, valA_reg_buf, offA % 32, 32);
        offA++;
        while (offA < (offA_begin & -32) + 32 && offA < offA_end)
        {
            colA = __shfl_sync(0xffffffff, colA_reg_buf, offA % 32, 32);
            sum.x += valA * valB.x;
            sum.y += valA * valB.y;
            sum.z += valA * valB.z;
            sum.w += valA * valB.w;
            while (__any_sync(0xffffffff, colA >= sync_colA))
            {
                sync_colA += sync_colA_step;
                __syncthreads();
            }
            valB = B_gptr[colA * (num_col / 4)];
            valA = __shfl_sync(0xffffffff, valA_reg_buf, offA % 32, 32);
            offA++;
        }
    }
    offA = (offA_begin & -32) + 32;
    while (offA < offA_end)
    {
        colA_reg_buf = colA_reg_buf_next;
        valA_reg_buf = valA_reg_buf_next;
        if (offA + 32 + threadIdx.x < offA_end)
        {
            colA_reg_buf_next = A_col[offA + 32 + threadIdx.x];
            valA_reg_buf_next = A_val[offA + 32 + threadIdx.x];
        }
        #pragma unroll
        for (uint32_t i = 0; i < 32; i++)
        {
            if (offA >= offA_end)
            {
                break;
            }
            colA = __shfl_sync(0xffffffff, colA_reg_buf, i, 32);
            B_tptr = &B_gptr[colA * (num_col / 4)];
            sum.x += valA * valB.x;
            sum.y += valA * valB.y;
            sum.z += valA * valB.z;
            sum.w += valA * valB.w;
            while (__any_sync(0xffffffff, colA >= sync_colA))
            {
                sync_colA += sync_colA_step;
                __syncthreads();
            }
            valB = *B_tptr;
            valA = __shfl_sync(0xffffffff, valA_reg_buf, i, 32);
            offA++;
        }
    }
    sum.x += valA * valB.x;
    sum.y += valA * valB.y;
    sum.z += valA * valB.z;
    sum.w += valA * valB.w;
    *reinterpret_cast<float4*>(&C[rowA * num_col + colB]) = sum;
}

};

template<typename DType>
struct Func{
  static __device__ __forceinline__ DType binary(DType, DType){}
  static __device__ __forceinline__ DType reduce(DType*, DType){}
  static __device__ __forceinline__ DType reduceAtomic(DType*, DType){}
  static constexpr DType init_val = (DType)0;
};

#endif