#include "impl/spmm_kernels.cuh"

namespace manscript{

static inline void lanuch_spmm_l2(uint32_t* A_row, uint32_t* A_col, const float* A_val, const float* B, float* C,
    uint32_t SPARSE_ROW, uint32_t SPARSE_COL, uint32_t DENSE_COL, cudaStream_t stream)
{
    dim3 blockDim(SPARSE_ROW, DENSE_COL / 128, 1);
    dim3 threadDim(32, 1, 1);
    spmm_l2<false, 2><<<blockDim, threadDim, 0, stream>>>(A_row, A_row + 1, A_col, A_val, B, C, DENSE_COL);
}

static inline void lanuch_spmm_l1(uint32_t* A_row, uint32_t* A_col, const float* A_val, const float* B, float* C,
    uint32_t SPARSE_ROW, uint32_t SPARSE_COL, uint32_t DENSE_COL, cudaStream_t stream)
{
    dim3 blockDim(SPARSE_ROW / 16, DENSE_COL / 128, 1);
    dim3 threadDim(32, 16, 1);
    spmm_l1<false><<<blockDim, threadDim, 0, stream>>>(A_row, A_row + 1, A_col, A_val, B, C, DENSE_COL);
}

} // namespace manscript

template <typename F, typename Idx, typename DType,
          SpWalk spwalk, SpLoad spload, DnCompute dn_compute,
          Reduction reduction, uint sp_warp, uint dn_warp, 
          uint spipv, uint dnipv, uint dn_unroll>
uint _spmm_manscript_buffer(const Idx *row_index, 
                           const Idx* row_offset, 
                           const Idx* col_indx, 
                           const DType* edge_weight, 
                           const Idx sizes[],
                           const Idx strides[],
                           const Idx nedge,
                           Idx **cntrs)
{
  uint col_blocks = CEIL(sizes[1], dn_warp*dnipv*dn_unroll);
  H_ERR(cudaMalloc(cntrs, sizeof(Idx)*col_blocks));
  // do all the kernel configure in this preprocess function 
  // cudaFuncSetCacheConfig(manscript::spmm_l1, cudaFuncCachePreferL1);
  return sizeof(Idx)*col_blocks;
}

template <typename F, typename Idx, typename DType,
          SpWalk spwalk, SpLoad spload, DnCompute dn_compute,
          Reduction reduction, uint sp_warp, uint dn_warp, 
          uint spipv, uint dnipv, uint dn_unroll>
void _spmm_manscript(const Idx *row_index, 
                     const Idx *row_offset, 
                     const Idx *col_indx, 
                     const DType *edge_weight, 
                     const DType *src,
                     const uint ld_src,
                     DType *dst,
                     const uint ld_dst,
                     const Idx nv, const Idx ne, const Idx vlen,
                     cudaStream_t stream)
{
  const uint _THD = THD;

  using SpConf = llvlib::CoopConfig<256/sp_warp, sp_warp>;
  using DnConf = llvlib::CoopConfig<256/dn_warp, dn_warp>;

  uint shared_usage = 0;
  if ( spload == shared ) {
    shared_usage = MAX(shared_usage, (sizeof(Idx)+sizeof(DType))*SpConf::coop_size()*spipv);
  }

  if ( reduction == shared_red ) {
    uint red_usage = sizeof(DType)*SpConf::coop_size();
    if ( spwalk == push_para ) red_usage += SpConf::coop_size() / DnConf::xdim; 
    shared_usage = MAX(shared_usage, red_usage);
  }

  if (spwalk <= pull_dyn)
  {
    uint col_blocks = CEIL(vlen, DnConf::xdim*dnipv*dn_unroll);
    uint row_blocks = CEIL(nv, SpConf::ydim);

    auto kernel = manscript::spmm_kernel_pull<F, Idx, DType, 
                                              spwalk, spload, dn_compute, reduction,
                                              SpConf, DnConf, spipv, dnipv, dn_unroll>;

    CUDA_LAUNCH_CHECK(kernel<<<dim3(row_blocks, col_blocks), THD, shared_usage, stream>>>(
                      row_index,
                      row_offset, col_indx, edge_weight, 
                      src, ld_src, dst, ld_dst,
                      nv, ne, vlen));

  }
  else if (spwalk == push_seq)
  {
    uint col_blocks = CEIL(vlen, DnConf::xdim*dnipv*dn_unroll);
    uint row_blocks = CEIL(ne, SpConf::xdim);

    CUDA_LAUNCH_CHECK(manscript::spmm_kernel_push<F, Idx, DType, 
                                                  spwalk, spload, dn_compute, reduction, 
                                                  SpConf, DnConf, spipv, dnipv, dn_unroll>
                      <<<dim3(row_blocks, col_blocks), THD, shared_usage, stream>>>(
                      row_offset, col_indx, edge_weight, 
                      src, ld_src, dst, ld_dst,
                      nv, ne, vlen));
  } 
  else if ( spwalk == l1_opt ) 
  {
    manscript::lanuch_spmm_l1(reinterpret_cast<uint32_t*>(const_cast<Idx*>(row_offset)), reinterpret_cast<uint32_t*>(const_cast<Idx*>(col_indx)), edge_weight, src, dst, ld_src, ld_dst, vlen, stream);
  } 
  else if ( spwalk == l2_opt ) 
  {
    manscript::lanuch_spmm_l2(reinterpret_cast<uint32_t*>(const_cast<Idx*>(row_offset)), reinterpret_cast<uint32_t*>(const_cast<Idx*>(col_indx)), edge_weight, src, dst, ld_src, ld_dst, vlen, stream);
  }
}
