#ifndef __GCOO_KERNEL_CUH_
#define __GCOO_KERNEL_CUH_

#include <cuda.h>
#include <ATen/cuda/detail/TensorInfo.cuh>
#include "cuda/cuda_utils.cuh"
#include "config.hxx"

namespace acd = at::cuda::detail;

namespace Base {
template<typename F, typename T, int GRP_SIZE = 4>
__global__ void  _gcoo_kernel(
  const int *__restrict__ src_list, 
  const int *__restrict__ dst_list, 
  const T *__restrict__ weight_list,
  const int *__restrict__ group_offset,
  const acd::TensorInfo<T, int> src_tensor,
  acd::TensorInfo<T, int> dst_tensor,
  const int _bsz)
{
  const int Bx = blockIdx.x;
  const int By = blockIdx.y;
  const int Stride = blockDim.x;
  const int Tx = By*Stride + threadIdx.x;

  SharedMem<char> _sm;

  int * sm_src = (int*) _sm.ptr();
  int * sm_dst = (int*) (_sm.ptr() + sizeof(int)*Stride);
  T * sm_ew = (T*) (_sm.ptr() + sizeof(int)*Stride*2);

  const int edge_start = group_offset[Bx];
  const int edge_end = group_offset[Bx+1];
  const int nnz = edge_end - edge_start;
  const int steps = CEIL(nnz, Stride);
  const int vlen = src_tensor.sizes[1];

  const int * lsrc = src_list + edge_start;
  const int * ldst = dst_list + edge_start;
  const T * lew = (weight_list != nullptr) ? weight_list + edge_start : nullptr;

  T res_buffer[GRP_SIZE]; // fuck i have no other options

  CLEAN_REG(res_buffer, GRP_SIZE);
  
  __syncthreads();

  for (int iter=0; iter<steps; ++iter)
  {
    int els = iter*Stride;
    int ele = els + MIN(Stride, nnz-els);
    int i = els;

    // printf("[%d, %d]][%d] reading from %d\n", Bx, By, threadIdx.x, edge_start+els+threadIdx.x);
    if (threadIdx.x+els < ele)
    {
      sm_src[threadIdx.x] = lsrc[els+threadIdx.x];
      sm_dst[threadIdx.x] = ldst[els+threadIdx.x];
      if (weight_list != nullptr) sm_ew[threadIdx.x] = lew[els+threadIdx.x];
    }
    __syncthreads();
    
    if (Tx < vlen)
    {
      int last_src = -1;
      while (i<ele)
      {
        int src = sm_src[i-els];
        int dst = sm_dst[i-els];
        T srcVal;
        if (last_src != src)
        {
          int srcInd = src_tensor.strides[0] * src + src_tensor.strides[1] * Tx;
          srcVal = src_tensor.data[srcInd];
          last_src = src;
        }

        if (weight_list != nullptr) F::reduce(res_buffer+dst%GRP_SIZE, F::binary(sm_ew[i-els], srcVal));
        else F::reduce(res_buffer+dst%GRP_SIZE, srcVal);

        i++;
      }
    }
    __syncthreads();
  
  }
  
  // const int rgrps = (int) sqrtf(gridDim.x);
  if (Tx < vlen) for (int i=0; i<GRP_SIZE; ++i)
  {
    if (res_buffer[i] == 0) continue;
    // int dstBlk = Bx / rgrps;
    int dstInd = dst_tensor.strides[0] * (GRP_SIZE*Bx + i) + dst_tensor.strides[1] * Tx;
    dst_tensor.data[dstInd] = res_buffer[i];
  }
}

}; // namespace Base

namespace Alter1 {

template<typename F, typename T>
__global__ void  _gcoo_kernel(
  const int *__restrict__ src_list, 
  const int *__restrict__ dst_list, 
  const T *__restrict__ weight_list,
  const int *__restrict__ group_offset,
  const acd::TensorInfo<T, int> src_tensor,
  acd::TensorInfo<T, int> dst_tensor,
  const int _bsz)
{
  const int Bx = blockIdx.x;
  const int By = blockIdx.y;
  const int Stride = blockDim.x;
  const int Tx = By*Stride + threadIdx.x;

  SharedMem<char> _sm;

  int * sm_src = (int*) _sm.ptr();
  int * sm_dst = (int*) (_sm.ptr() + sizeof(int)*Stride);
  T * sm_ew = (T*) (_sm.ptr() + sizeof(int)*Stride*2);

  const int edge_start = group_offset[Bx];
  const int edge_end = group_offset[Bx+1];
  const int nnz = edge_end - edge_start;
  const int steps = CEIL(nnz, Stride);
  const int vlen = src_tensor.sizes[1];

  const int * lsrc = src_list + edge_start;
  const int * ldst = dst_list + edge_start;
  const T * lew = (weight_list != nullptr) ? weight_list + edge_start : nullptr;

  for (int iter=0; iter<steps; ++iter)
  {
    int els = iter*Stride;
    int ele = els + MIN(Stride, nnz-els);
    int i = els;

    // printf("[%d, %d]][%d] reading from %d\n", Bx, By, threadIdx.x, edge_start+els+threadIdx.x);
    if (threadIdx.x+els < ele)
    {
      sm_src[threadIdx.x] = lsrc[els+threadIdx.x];
      sm_dst[threadIdx.x] = ldst[els+threadIdx.x];
      if (weight_list != nullptr) sm_ew[threadIdx.x] = lew[els+threadIdx.x];
    }
    __syncthreads();
    
    if (Tx < vlen)
    {
      while (i<ele)
      {
        int k=1;

        int src = sm_src[i-els];
        int dst = sm_dst[i-els];
        int srcInd = src_tensor.strides[0] * src + src_tensor.strides[1] * Tx;
        int dstInd = dst_tensor.strides[0] * dst + dst_tensor.strides[1] * Tx;

        T srcVal = src_tensor.data[srcInd];

        if (weight_list != nullptr) F::reduceAtomic(dst_tensor.data + dstInd, F::binary(sm_ew[i-els], srcVal));
        else F::reduceAtomic(dst_tensor.data + dstInd, srcVal);

        while (i+k < ele)
        {
          int nsrc = sm_src[i+k-els];
          if (nsrc != src) break;
          int dst = sm_dst[i+k-els];

          int dstInd = dst_tensor.strides[0] * dst + dst_tensor.strides[1] * Tx;

          if (weight_list != nullptr) F::reduceAtomic(dst_tensor.data + dstInd, F::binary(sm_ew[k+i-els], srcVal));
          else F::reduceAtomic(dst_tensor.data + dstInd, srcVal);
          ++k;
        }
        i+=k;
      }
    }
    __syncthreads();
  
  }
  
}

}; // namespace Alter1 

template<typename F, typename T> 
void _gcoo_spmm(
  const int *src_list, 
  const int *dst_list, 
  const T *weight_list,
  const int *group_offset,
  const acd::TensorInfo<T, int> src_tensor,
  acd::TensorInfo<T, int> dst_tensor,
  const int nedge, const int ngrps)
{
  assert((BSZ <= 4*THD) && "only block size of 4 times of num_thread is available currently");
  const int vlen = src_tensor.sizes[1];
  int by = CEIL(vlen, THD);

  CUDA_LAUNCH_CHECK(Base::_gcoo_kernel<F, T><<<dim3(ngrps, by), THD, (sizeof(T)+2*sizeof(int))*THD>>>
  (src_list, dst_list, weight_list, group_offset, src_tensor, dst_tensor, _bsz));
}

#endif