#ifndef _SDDMM_CUH__ 
#define _SDDMM_CUH__ 

#include <cuda.h> 
#include <vector_types.h>

#include "../dense.cuh"
#include "../specification.hxx"

#include "config.hxx"

namespace Alternative1{

template <typename F, typename IT, typename T,
          DnCompute DnCom, typename DnConf, 
          uint DnIPV, uint DnIters>
__device__ __forceinline__ void
dense_walk_full(const T *src_l,  
                const T *src_r,
                const IT ld_src,
                const IT row, 
                const IT col,
                IT col_base, 
                const IT vlen,
                llvlib::Vect<T,1>& local_sum)
{
  using namespace llvlib;
  typedef typename VectTypes<T, DnIPV>::type dnv_t;
  constexpr uint stride = DnConf::xdim * DnIPV;
  uint lhs_off = row*ld_src+col_base;
  uint rhs_off = col*ld_src+col_base;
  
  Vect<T,DnIPV> lhs_reg, rhs_reg;
  for (uint i=0; i<DnIters; ++i)
  {
    if (col_base >= vlen) break;
    // row_feat needed
    if (DnCom != u_e) lhs_reg.template load_thread_offset<dnv_t>(const_cast<T*>(src_l), lhs_off);
    // col_feat needed
    if (DnCom != e_v) rhs_reg.template load_thread_offset<dnv_t>(const_cast<T*>(src_r), rhs_off);

    if ( DnCom == u_e_v || DnCom == u_v )
      F::reduce(&local_sum[0], thread_inner_produce<T, Vect<T, DnIPV>, Vect<T, DnIPV>, DnIPV>(
        lhs_reg, rhs_reg, F::binary, F::reduce));
    
    if ( DnCom == u_e )
      F::reduce(&local_sum[0], thread_reduce<T, Vect<T, DnIPV>, DnIPV>(rhs_reg, F::reduce));

    if ( DnCom == e_v )
      F::reduce(&local_sum[0], thread_reduce<T, Vect<T, DnIPV>, DnIPV>(lhs_reg, F::reduce));

    lhs_off += stride;
    rhs_off += stride;
  }
}


template <typename F, typename IT, typename T, 
           DnCompute DnCom, typename DnConf, 
          uint DnIPV, uint DnIters>
__device__ __forceinline__ void
dense_walk_cached(const T* src_ptr,  
                 const IT ld_src,
                 const IT row, 
                 const IT col,
                 IT col_base, 
                 const IT vlen,
                 llvlib::Vect<T,1>& local_sum,
                 llvlib::Vect<T,DnIPV> dn_cache[])
{
  using namespace llvlib;
  typedef typename VectTypes<T, DnIPV>::type dnv_t;
  constexpr uint stride = DnConf::xdim * DnIPV;
  uint offset = col*ld_src+col_base;
  
  Vect<T,DnIPV> rhs_reg;
  for (int i=0; i<DnIters; ++i)
  {
    if (DnCom != e_v) rhs_reg.template load_thread_offset<dnv_t>(const_cast<T*>(src_ptr), offset);

    if ( DnCom == u_e_v || DnCom == u_v )
      F::reduce(&local_sum[0], thread_inner_produce<T, Vect<T, DnIPV>, Vect<T, DnIPV>, DnIPV>(
        rhs_reg, dn_cache[i], F::binary, F::reduce));
    
    if ( DnCom == u_e )
      F::reduce(&local_sum[0], thread_reduce<T, Vect<T, DnIPV>, DnIPV>(rhs_reg, F::reduce));

    if ( DnCom == e_v )
      F::reduce(&local_sum[0], thread_reduce<T, Vect<T, DnIPV>, DnIPV>(dn_cache[i], F::reduce));

    offset += stride;
  }
}


template <typename F, typename IT, typename T,
          typename SpConf, typename DnConf, 
          int DnIPV, int DnIters>
__device__ __forceinline__ void
cache_dense_local(const T* src_base, 
                   IT offset, 
                   llvlib::Vect<T,DnIPV> dn_cache[])
{
  using namespace llvlib;
  typedef typename VectTypes<T, DnIPV>::type dnv_t;
  constexpr int stride = DnConf::xdim * DnIPV;

#pragma unroll 
  for (int i=0; i<DnIters; ++i)
  {
    dn_cache[i].template load_thread_offset<dnv_t>(const_cast<T*>(src_base), offset);
    offset += stride;
  }
}

template <typename F, typename IT, typename T,
          typename SpConf, typename DnConf, 
          uint DnIPV, uint DnIters>
__device__ __forceinline__ void
cache_dense_shared(const T* src_base, 
                   IT offset, 
                   llvlib::Vect<T, SpConf::xdim*DnIPV> &dn_cache)
{
  using namespace llvlib;
  typedef typename VectTypes<T, DnIPV>::type dnv_t;

  dn_cache.template load_warp_offset<dnv_t, SpConf>(const_cast<T*>(src_base), offset);
  SpConf::sync();
}


template <typename F, typename IT, typename T, 
          DnCompute dncomp, typename DnConf,
          int DnIPV, uint DnIters>
__device__ __forceinline__ void
writeback_wreduce(T val, 
                  T* dst_base, 
                  const IT offset, 
                  llvlib::Vect<T,1>& local_sum)
{
  using namespace llvlib;
  warp_reduce_shuffle<T, Vect<T,1>&, 1, 1, DnConf>(local_sum, F::reduce);
  if (dncomp != u_v) local_sum[0] = F::binary(val, local_sum[0]);
  local_sum.template dump_thread_offset<T>(dst_base, offset);
}


template <typename F, typename IT, typename T, 
          DnCompute dncomp, typename DnConf, 
          uint DnIPV, uint DnIters>
__device__ __forceinline__ void
writeback_atomic_wreduce(T val,  
                         T* dst_base, 
                         const IT offset, 
                         llvlib::Vect<T,1>& local_sum)
{
  using namespace llvlib;
  constexpr uint warpsz = MIN(DnConf::xdim, 32);
  using WarpConf = CoopConfig<0,warpsz>;
  warp_reduce_shuffle<T, Vect<T,1>&, 1, 1, WarpConf>(local_sum, F::reduce);
  
  if (dncomp != u_v) local_sum[0] = F::binary(val, local_sum[0]);
  
  if (!WarpConf::xid())
    thread_binary_elementwise_inline<T, Vect<T,1>, T*, 1>(local_sum, dst_base+offset, F::reduceAtomic);
  // local_sum.template dump_thread_offset<T>(dst_base, offset);
    
}


template <typename IT, typename T, 
          DnCompute dncomp, typename DnConf, 
          uint DnIPV, uint DnIters>
__device__ __forceinline__ void 
writeback_atomic_sreduce(T* dst_base, 
                         const IT offset, 
                         llvlib::Vect<T,1>& local_sum, 
                         T _v_buf[])
{
  using namespace llvlib;
  typedef typename VectTypes<T, DnIPV>::type dnv_t;
  
  const uint stride = DnConf::xdim * DnIPV;
  
}


template <typename F, typename IT, typename T, 
          DnCompute DnCom, typename SpConf, typename DnConf, 
          uint SpIPV, uint DnIPV, uint DnIters>
__device__ __forceinline__ void
proceed_push_simple(const IT start, 
                    const IT end,
                    const uint col_base,
                    const IT *row_offset, 
                    const IT *col_ptr, 
                    const T *val_ptr,
                    const T *src_l, 
                    const T *src_r, 
                    const IT ld_src, 
                    T *dst_ptr, 
                    const IT ld_dst,
                    const IT nv, 
                    const IT vlen)
{
  uint eid = start;
  uint vid = llvlib::binary_search(eid, row_offset, 0, nv);
  uint bound = row_offset[vid+1];
  bool updated = false;
  IT col; T val;

  llvlib::Vect<T,1> local_sum;
  local_sum.set_thread(F::init_val);
  
  while ( eid < end )
  {
    if ( eid == bound )
    {
      if ( updated )
      writeback_atomic_wreduce<F, IT, T, DnCom, DnConf, DnIPV, DnIters>(
          val,
          dst_ptr, ld_dst*vid, col_base, 
          vlen, 
          local_sum
          // _key_buf, _val_buf
          );
      local_sum.set_thread(F::init_val);
      vid += 1;
      bound = row_offset[vid+1];
    }
    col = col_ptr[eid];
    if (DnCom != u_v) val = val_ptr[eid];
    dense_walk_full<F, IT, T, DnCom, DnConf, DnIPV, DnIters>(
        src_l, src_r, ld_src,
        vid, col, col_base, vlen, 
        local_sum);
    updated = true;
    eid += 1;
  }
  
  if ( updated )
  writeback_atomic_wreduce<F, IT, T, DnCom, DnConf, DnIPV, DnIters>(
      val,
      dst_ptr, ld_dst*vid, col_base,
      vlen,
      local_sum
      // _key_buf, _val_buf
      );
}


template <typename F, typename IT, typename T, 
          DnCompute DnCom, typename SpConf, typename DnConf, 
          uint SpIPV, uint DnIPV, uint DnIters>
__device__ __forceinline__ void
proceed_push_reduced(const IT start, 
                     const IT end, 
                     const uint col_base,
                     const IT *row_offset, 
                     const IT *col_ptr, 
                     const T *val_ptr,
                     const T *src_l, 
                     const T *src_r, 
                     const IT ld_src, 
                     T *dst_ptr,
                     const IT nv,
                     const IT vlen)
{
  constexpr uint dn_coop = SpConf::xdim / DnConf::xdim;
  const uint dn_id = DnConf::yid() % dn_coop; 

  uint eid = start + dn_id;
  uint vid = 0;

  for ( ;eid < end; eid += dn_coop )
  {
    IT col = col_ptr[eid];
    T  val = DnCom != u_v ? val_ptr[eid] : 1.;
    llvlib::Vect<T, 1> local_sum;
    local_sum.set_thread(F::init_val);

    vid = llvlib::binary_search(eid, row_offset, 0u, nv);
    dense_walk_full<F, IT, T, DnCom, DnConf, DnIPV, DnIters>(
        src_l, src_r, ld_src, vid, col, col_base, vlen, local_sum
      );
    
    if ( DnConf::xdim > 32 )
    {
      writeback_atomic_wreduce<F, IT, T, DnCom, DnConf, DnIPV, DnIters>(
        val, dst_ptr, eid, local_sum
      );
    }
    else 
    {
      writeback_wreduce<F, IT, T, DnCom, DnConf, DnIPV, DnIters>(
        val, dst_ptr, eid, local_sum
      );
    }
  }
}

template <typename F, typename IT, typename T, 
          DnCompute DnCom, typename SpConf, typename DnConf, 
          uint SpIPV, uint DnIPV, uint DnIters>
__device__ __forceinline__ void
proceed_pull_naive(const IT start, 
                   const IT end, 
                   const uint col_base,
                   const IT row_id, 
                   const IT *col_ptr, 
                   const T *val_ptr,
                   const T *src_ptr, 
                   const IT ld_src, 
                   T *dst_ptr,
                   const IT nv,
                   const IT vlen,
                   llvlib::Vect<T, DnIPV> dn_cache[])
{
  constexpr uint step = SpConf::xdim / DnConf::xdim;
  const uint group_id = DnConf::yid() % step;

  for (uint i=start+group_id; i<end; i+=step)
  {
    IT col = col_ptr[i];
    T  val = DnCom != u_v ? val_ptr[i] : 1.;
    llvlib::Vect<T, 1> local_sum;
    local_sum.set_thread(F::init_val);
    dense_walk_cached<F, IT, T, DnCom, DnConf, DnIPV, DnIters>(
      src_ptr, ld_src, row_id, col, col_base, vlen, local_sum, dn_cache
    );
    
    writeback_wreduce<F, IT, T, DnCom, DnConf, DnIPV, DnIters>(
      val, dst_ptr, i, local_sum
    );
  }
}

template <typename F, typename index_t, typename scalar_t, 
          SpWalk spwk, SpLoad spload, 
          DnCompute dncomp, Reduction reduction, 
          typename SpConf, typename DnConf, 
          uint SpIPV, uint DnIPV, uint DnIters>
__global__ void sddmm_kernel_push(
  const index_t* __restrict__ row_offset, 
  const index_t* __restrict__ col_indx, 
  const scalar_t* __restrict__ edge_weight,
  const scalar_t* __restrict__ src_l,
  const scalar_t* __restrict__ src_r,
  const index_t ld_src,
  scalar_t* __restrict__ dst_edge,
  const index_t nv, const index_t ne, const index_t vlen)
{
  const uint col_base = DnConf::xid()*DnIPV + (DnConf::xdim*blockIdx.y)*DnIPV*DnIters;

  const index_t stride = CEIL(ne, gridDim.x*SpConf::ydim);
  const index_t start = (SpConf::yid() + blockIdx.x * SpConf::ydim)*stride;
  const index_t end = MIN(start + stride, ne);

  proceed_push_reduced<F, index_t, scalar_t, dncomp, SpConf, DnConf, SpIPV, DnIPV, DnIters>(
      start, end, col_base, row_offset, col_indx,
      edge_weight, src_l, src_r, ld_src, dst_edge, nv, vlen);
}


template <typename F, typename index_t, typename scalar_t, 
          SpWalk spwk, SpLoad spload, 
          DnCompute dncomp, Reduction reduction, 
          typename SpConf, typename DnConf, 
          uint SpIPV, uint DnIPV, uint DnIters>
__global__ void sddmm_kernel_pull(
  const index_t*  __restrict__ row_indx,
  const index_t*  __restrict__ row_offset,
  const index_t*  __restrict__ col_indx,
  const scalar_t* __restrict__ edge_weight,
  const scalar_t* __restrict__ src_l,
  const scalar_t* __restrict__ src_r,
  const index_t ld_src,
  scalar_t * __restrict__ dst_edge,
  const index_t nv, const index_t ne, const index_t vlen)
{
  __shared__ scalar_t sh_reduce_buffer[SpConf::ydim][SpConf::xdim];
  __shared__ llvlib::Vect<scalar_t, DnIPV*SpConf::xdim> sh_dn_cache[SpConf::ydim];

  const uint col_base = DnIPV*DnConf::xid() + (DnConf::xdim*blockIdx.y)*DnIPV*DnIters;
  
  index_t RowOffset = blockIdx.x * SpConf::ydim + SpConf::yid();
  index_t RowId = spwk == pull_dyn ? row_indx[RowOffset] : RowOffset;
  // uint RowId = blockIdx.x * Conf::ydim + Conf::yid();
  if (RowId > nv) return;
  llvlib::Vect<scalar_t, 1> local_sum;
  llvlib::Vect<scalar_t, DnIPV> dn_cache[DnIters];
  local_sum.set_thread(F::init_val);

  index_t start = row_offset[RowId];
  index_t end = row_offset[RowId+1];

  const scalar_t *src_ptr = src_l;
  index_t rhs_offset = ld_src*RowId+col_base;
  
  if ( true )
  {
    // put a large rhs dense tile in shared to broadcast through threads
    index_t rhs_preload_offset = RowId*ld_src;
    cache_dense_shared<F, index_t, scalar_t, SpConf, DnConf, DnIPV, DnIters>(
      src_ptr, rhs_preload_offset, sh_dn_cache[SpConf::yid()]
    );
    src_ptr = sh_dn_cache[SpConf::yid()].data.a;
    rhs_offset = DnConf::xid() * DnIPV;
  }

  // do local cache
  cache_dense_local<F, index_t, scalar_t, SpConf, DnConf, DnIPV, DnIters>(
    src_ptr, rhs_offset, dn_cache
  );

  proceed_pull_naive<F, index_t, scalar_t, dncomp, SpConf, DnConf, SpIPV, DnIPV, DnIters>(
    start, end, col_base, RowId, col_indx, edge_weight, 
    src_r, ld_src, dst_edge, nv, vlen, dn_cache
  );
}

};


/**
 * @note the lhs & rhs of src 
 *  sddmm computation :
 *    out = D(src_l) A(sparse) D(src_r)
 */
template <typename F, typename index_t, typename scalar_t,
          SpWalk spwalk, SpLoad spload, DnCompute dn_compute,
          Reduction reduction, uint sp_warp, uint dn_warp, 
          uint spipv, uint dnipv, uint dn_unroll>
void _sddmm_alt1(const index_t *row_index, 
                 const index_t *row_offset, 
                 const index_t *col_indx, 
                 const scalar_t *edge_weight, 
                 const scalar_t *src_l,
                 const scalar_t *src_r,
                 const index_t ld_src,
                 scalar_t* dst_edge, 
                 const index_t nv, const index_t ne, const index_t vlen)
{
  // const uint sp_warp = 128;
  // const uint dn_warp = 1;
  // const uint spipv = 1;
  // const uint dnipv = 4;
  // const uint dn_unroll = 8;

  typedef llvlib::CoopConfig<256/sp_warp, sp_warp> SpConf;
  typedef llvlib::CoopConfig<256/dn_warp, dn_warp> DnConf;

  if (spwalk <= pull_dyn)
  {
    uint col_blocks = CEIL(vlen, DnConf::xdim*dnipv*dn_unroll);
    uint row_blocks = CEIL(nv, SpConf::ydim);
    CUDA_LAUNCH_CHECK(Alternative1::sddmm_kernel_pull<F, uint, scalar_t, spwalk, spload, 
                                                      dn_compute, reduction, 
                                                      SpConf, DnConf, spipv, dnipv, dn_unroll>
                      <<<dim3(row_blocks, col_blocks), THD>>>(
                      row_index,
                      row_offset, col_indx, edge_weight, 
                      src_l, src_r, ld_src, 
                      dst_edge, nv, ne, vlen));

  }
  else if ( spwalk <= push_seq )
  {
    uint col_blocks = CEIL(vlen, DnConf::xdim*dnipv*dn_unroll);
    uint row_blocks = CEIL(ne, SpConf::xdim);

    CUDA_LAUNCH_CHECK(Alternative1::sddmm_kernel_push<F, uint, scalar_t, spwalk, spload, 
                                                      dn_compute, reduction, 
                                                      SpConf, DnConf, spipv, dnipv, dn_unroll>
                      <<<dim3(row_blocks, col_blocks), THD>>>(
                      row_offset, col_indx, edge_weight, 
                      src_l, src_r, ld_src, 
                      dst_edge, nv, ne, vlen));
  }
}

template<bool has_add, uint32_t DENCE_COL_CHUNK>
__global__ void sddmm(
    const uint32_t* __restrict__ A_row_begin,
    const uint32_t* __restrict__ A_row_end,
    const uint32_t* __restrict__ A_col,
    float* __restrict__ A_val,
    const float* __restrict__ B,
    const float* __restrict__ C,
    uint32_t dense_col)
{
    __shared__ float4 valC_shared[DENCE_COL_CHUNK >> 2];
    uint32_t rowA = blockIdx.x;
    uint32_t offA_begin = A_row_begin[rowA];
    uint32_t offA_end = A_row_end[rowA];
    if ((threadIdx.y * 8 + threadIdx.x) * 4 < DENCE_COL_CHUNK) // DENCE_COL_CHUNK <= 256
    {
        float4 valC = *reinterpret_cast<const float4*>(&C[rowA * dense_col + (threadIdx.y * 8 + threadIdx.x) * 4]);
        valC_shared[threadIdx.y * 8 + threadIdx.x] = valC;
    }
    __syncthreads();
    float4 valC_reg[DENCE_COL_CHUNK / 32];
    #pragma unroll
    for (uint32_t i = 0; i < DENCE_COL_CHUNK / 32; i++)
    {
        valC_reg[i] = valC_shared[i * 8 + threadIdx.x];
    }
    for (uint32_t offA = offA_begin + threadIdx.y; offA < offA_end; offA += 4)
    {
        float valA = 0.0f;
        uint32_t colA = A_col[offA];
        #pragma unroll
        for (uint32_t i = 0; i < DENCE_COL_CHUNK / 32; i++)
        {
            float4 valB = *reinterpret_cast<const float4*>(&B[colA * dense_col + i * 32 + threadIdx.x * 4]);
            float4 valC = valC_reg[i];
            valA += valB.x * valC.x;
            valA += valB.y * valC.y;
            valA += valB.z * valC.z;
            valA += valB.w * valC.w;
        }
        valA += __shfl_xor_sync(0xffffffff, valA, 4, 32);
        valA += __shfl_xor_sync(0xffffffff, valA, 2, 32);
        valA += __shfl_xor_sync(0xffffffff, valA, 1, 32);
        if (has_add)
        {
            valA += A_val[offA];
            __threadfence();
        }
        A_val[offA] = valA;
    }
}

void launch_sddmm(uint32_t* A_row, uint32_t* A_col, float* A_val, const float* B, const float* C,
    uint32_t SPARSE_ROW, uint32_t SPARSE_COL, uint32_t DENSE_COL)
{
  const uint32_t DENCE_COL_CHUNK = 128;
  dim3 blockDim(SPARSE_ROW, 1, 1);
  dim3 threadDim(8, 4, 1);
  sddmm<false, DENCE_COL_CHUNK><<<blockDim, threadDim>>>(A_row, A_row + 1, A_col, A_val, B, C, DENSE_COL);
  for (uint32_t i = 1; i * DENCE_COL_CHUNK < DENSE_COL; i++)
  {
    sddmm<true, DENCE_COL_CHUNK><<<blockDim, threadDim>>>(A_row, A_row + 1, A_col, A_val, B + i * DENCE_COL_CHUNK, C + i * DENCE_COL_CHUNK, DENSE_COL);
  }
}

#ifndef _SPMM_CUH__ 
template<typename scalar_t>
struct Func{
  static __device__ __forceinline__ scalar_t binary(scalar_t, scalar_t){}
  static __device__ __forceinline__ scalar_t reduce(scalar_t*, scalar_t){}
  static __device__ __forceinline__ scalar_t reduceAtomic(scalar_t*, scalar_t){}
  static constexpr scalar_t init_val = (scalar_t) 0;
};
#endif

#endif