#ifndef _DENSE_CUH__
#define _DENSE_CUH__

#include <cuda.h>
#include <cublas_v2.h>

#include "cuda/cuda_utils.cuh"

template <typename scalar_t>
void cublas_gemm(const scalar_t *A, 
                 const scalar_t *B, 
                 scalar_t *C,
                 const size_t lda, const size_t ldb, const size_t ldc)
{
#define CHECK_CUBLAS(func)                                          \
do {                                                                \
    cublasStatus_t status = (func);                                 \
    if (status != CUBLAS_STATUS_SUCCESS) {                          \
        printf("CUSPARSE API failed at line %d with error: (%d)\n", \
              __LINE__, status);                                    \
        exit(-1);                                                   \
    }                                                               \
} while (0)

  cublasHandle_t blas_h;
  scalar_t alpha=static_cast<scalar_t>(1);
  scalar_t beta=0;

  CHECK_CUBLAS(cublasCreate(&blas_h));
  if (typeid(scalar_t)==typeid(float))
    CHECK_CUBLAS(cublasSgemm(
      blas_h, CUBLAS_OP_T, CUBLAS_OP_N,
      &alpha,
      A, lda,
      B, ldb,
      &beta,
      C, ldc
    ));
  else CHECK_CUBLAS(cublasDgemm(
      blas_h, CUBLAS_OP_T, CUBLAS_OP_N,
      &alpha,
      A, lda,
      B, ldb,
      &beta,
      C, ldc
    ));
  cublasDestroy(blas_h);

#undef CHECK_CUBLAS
}


namespace llvlib{

#define DEV_INLINE __device__ __forceinline__

  template <typename T, uint nele>
  struct VectTypes;

#define DECLARE_WRAPPER(T,N,udef)\
template<> struct VectTypes<T, N>{ typedef T##N type;\
static DEV_INLINE void fill_vec(type* vec, T zero = (T)(udef))\
{ T* _vec = reinterpret_cast<T*>(vec); for (uint i=0; i<N; ++i) _vec[i] = zero; }\
static DEV_INLINE type* cast(void* p) {return reinterpret_cast<type*>(p); } }

  DECLARE_WRAPPER(uint, 1, 0);
  DECLARE_WRAPPER(uint, 2, 0);
  DECLARE_WRAPPER(uint, 4, 0);

  DECLARE_WRAPPER(long, 1, 0);
  DECLARE_WRAPPER(long, 2, 0);
  DECLARE_WRAPPER(long, 4, 0);

  DECLARE_WRAPPER(float, 1, 0.);
  DECLARE_WRAPPER(float, 2, 0.);
  DECLARE_WRAPPER(float, 4, 0.);

  DECLARE_WRAPPER(double, 1, 0.);
  DECLARE_WRAPPER(double, 2, 0.);
  DECLARE_WRAPPER(double, 4, 0.);
#undef DECLARE_WRAPPER

  template <typename VT, typename T>
  struct ItemPerVec_t{
    static constexpr uint value = sizeof(VT) / sizeof(T);
  };

  /**
   * @brief thread mapping configuration of memory op & computation
   */ 
  template<uint YDIM, uint XDIM>
  struct CoopConfig {
    static_assert((YDIM % 2 == 0 || YDIM==1) && (XDIM % 2 == 0 || XDIM == 1));

    static constexpr uint xdim = XDIM;
    static constexpr uint ydim = YDIM;
    static constexpr uint warp_ydim = 32 / XDIM;

    static DEV_INLINE uint xid() { return threadIdx.x % XDIM;/*(threadIdx.x+threadIdx.y*blockDim.x) % XDIM;*/ }
    static DEV_INLINE uint yid() { return threadIdx.x / XDIM;/*(threadIdx.x+threadIdx.y*blockDim.x) / XDIM;*/ }
    static DEV_INLINE uint ybase() { return threadIdx.x & (-XDIM);}
    static DEV_INLINE constexpr uint coop_size() { return XDIM*YDIM; }
    static DEV_INLINE uint warp_ybase() { return (threadIdx.x & -32) / XDIM; }
    static DEV_INLINE uint warp_yid() { return (threadIdx.x & 31) / XDIM; }
    static DEV_INLINE bool leader() { return xid() == 0; }
    static DEV_INLINE void sync()
    {
      if (XDIM == 32) __syncwarp();
      if (XDIM > 32) __syncthreads();  
    }
  };

  template<uint XDIM>
  struct CoopLite1 {
    // in-kernel dimension config
    static DEV_INLINE uint xid() { return threadIdx.x & (XDIM-1); }
    static DEV_INLINE uint yid() { return threadIdx.x / XDIM; }
    static DEV_INLINE uint ybase() { return threadIdx.x & (-XDIM);}
    static DEV_INLINE uint tx() { return xid() + blockIdx.y * XDIM; }
    static DEV_INLINE uint ty() { return yid() + blockDim.x * blockIdx.x / XDIM; }
    static DEV_INLINE uint stride_x() { return gridDim.y*XDIM; }
    static DEV_INLINE uint stride_y() { return blockDim.x * gridDim.x / XDIM; }
    static DEV_INLINE bool leader() { return xid()==0; }
    // kernel launch config
    static inline int block_y(const int feat_dim, const int heads=1, const int unroll=1) {
      return CEIL(feat_dim*heads, XDIM*unroll);
    }
    static inline int block_x(const int rows, const int block_size) {
      return CEIL(rows, block_size/XDIM);
    }
  };

  template<uint XDIM>
  struct CoopLite {
    // in-kernel dimension config
    static DEV_INLINE uint xid() { return threadIdx.x; }
    static DEV_INLINE uint yid() { return threadIdx.y; }
    static DEV_INLINE uint ybase() { return threadIdx.y * XDIM;}
    static DEV_INLINE uint tx() { return xid() + blockIdx.y * XDIM; }
    static DEV_INLINE uint ty() { return yid() + blockDim.y * blockIdx.x; }
    static DEV_INLINE uint stride_x() { return gridDim.y * XDIM; }
    static DEV_INLINE uint stride_y() { return blockDim.y * gridDim.x; }
    static DEV_INLINE bool leader() { return xid()==0; }
    static DEV_INLINE void sync()
    {
      if (XDIM > 32) __syncthreads();  
      else __syncwarp();
    }
    // kernel launch config
    static inline int thread_x() { return XDIM; }
    static inline int thread_y( int block_size ) { return block_size / XDIM; }
    static inline int block_y(const int feat_dim, const int heads=1, const int unroll=1) {
      return CEIL(feat_dim*heads, XDIM*unroll);
    }
    static inline int block_x(const int rows, const int block_size) {
      return CEIL(rows, thread_y(block_size));
    }
  };

  template <typename CONF1, typename CONF2>
  struct CoopCoordinator {
    using SpConf = CONF1;
    using DnConf = CONF2;

    enum CoopMode {
      sparse_major, dense_major, mutual
    };

    static_assert( (CONF1::ydim / CONF2::ydim)%2==0 
                && (CONF2::ydim / CONF1::ydim)%2==0 
                || std::is_same<CONF2, CONF1>() );
  
    static constexpr bool SparseMajor = ( SpConf::ydim > DnConf::ydim );
    static constexpr bool DenseMajor  = ( SpConf::ydim < DnConf::ydim );
    static constexpr bool Mutual      = ( SpConf::ydim == DnConf::ydim );

    static constexpr uint SpPerDn = CEIL(DnConf::xdim, SpConf::xdim);
    static constexpr uint DnPerSp = CEIL(SpConf::xdim, DnConf::xdim);

    static DEV_INLINE uint dn_yid_in_sp() { return SpConf::xid() / DnConf::xdim; }
    static DEV_INLINE uint sp_yid_in_dn() { return DnConf::xid() / SpConf::xdim; }
    
    static DEV_INLINE constexpr CoopMode coop_mode() 
    {
      if ( SpConf::ydim == DnConf::ydim ) return mutual;
      if ( SpConf::ydim > DnConf::ydim ) return sparse_major;
      if ( SpConf::ydim < DnConf::ydim ) return dense_major;
    }
    
  };

  /**
   * @brief this class is to be used as a local buffer for shared or register
   *        the template type is the type used for computation, and types for 
   *        ld st functions are for memory access (vector type)
   */ 
  template <typename ArrTy, typename T, uint H, uint W>
  struct AbstractTile{
    ArrTy data;
    static constexpr uint h = H;
    static constexpr uint w = W;

    /**
     *  The constructor binds the buffer to a memory location.
     *  This info is useful for certain subclasses 
     */
    DEV_INLINE AbstractTile() {}
    DEV_INLINE T* operator[](uint32_t hidx) { return &data[hidx*W]; }
    
    DEV_INLINE void clean_thread()
    {
      // #pragma unroll
      for ( uint j=0; j<H*W; ++j )
      {
        data[j] = static_cast<T>(0);
      }

      // memset((void*)&data, 0, sizeof(T)*H*W);
    }

    DEV_INLINE void set_thread(T val)
    {
      for ( uint i=0; i<H*W; ++i ) data[i] = val;
    }

    DEV_INLINE void clean_cta()
    {
      auto _buffer = data.template cast<uint>();
      auto _size = H*W*sizeof(T) / sizeof(uint);

      #pragma unroll
      for ( uint j=threadIdx.x; j<_size; j+=blockDim.x )
      {
        _buffer[j] = 0;
      }
    }

    template <typename VT>
    DEV_INLINE VT& at(uint row, uint idx)
    {
      return *(reinterpret_cast<VT*>(data[row])+idx);
    }
  
  /**
   * To define buffer loop, check the parameters for device lambda in the
   * folloing macros
   */ 
  #define CTA_LOOP_STA(ptr,stride,h,w,...)\
  cta_loop_static<VT,CONF>((ptr), (stride), (h), (w),\
  []__device__(uint j, uint hsize, uint i, uint vwsize, uint vstride, VT* vt_##ptr, VT* vt_data)\
  { constexpr uint vW = CEIL(W*sizeof(T), sizeof(VT)); __VA_ARGS__ })

  #define CTA_LOOP_DYN(ptr,stride,h,w,...)\
  cta_loop_dynamic<VT,CONF>((ptr), (stride), (h), (w),\
  []__device__(uint j, uint hsize, uint i, uint vwsize, uint vstride, VT* vt_##ptr, VT* vt_data)\
  { constexpr uint vW = CEIL(W*sizeof(T), sizeof(VT)); __VA_ARGS__})

  #define CTA_LOOP_STA_OFF(ptr,offset,stride,h,w,...)\
  cta_loop_static<VT,CONF>((ptr), (offset), (stride), (h), (w),\
  []__device__(uint j, uint hsize, uint i, uint vwsize, uint voffset, uint vstride, VT* vt_##ptr, VT* vt_data)\
  { constexpr uint vW = CEIL(W*sizeof(T), sizeof(VT)); __VA_ARGS__ })

  #define CTA_LOOP_DYN_OFF(ptr,offset,stride,h,w,...)\
  cta_loop_dynamic<VT,CONF>((ptr), (offset), (stride), (h), (w),\
  []__device__(uint j, uint hsize, uint i, uint vwsize, uint voffset, uint vstride, VT* vt_##ptr, VT* vt_data)\
  { constexpr uint vW = CEIL(W*sizeof(T), sizeof(VT)); __VA_ARGS__})

  #define WARP_LOOP_STA(ptr,stride,h,w,...)\
  warp_loop_static<VT,CONF>((ptr), (stride), (h), (w),\
  []__device__(uint j, uint hsize, uint i, uint vwsize, uint vstride, VT* vt_##ptr, VT* vt_data)\
  { constexpr uint vW = CEIL(W*sizeof(T), sizeof(VT)); __VA_ARGS__ })

  #define WARP_LOOP_DYN(ptr,stride,h,w,...)\
  warp_loop_dynamic<VT,CONF>((ptr), (stride), (h), (w),\
  []__device__(uint j, uint hsize, uint i, uint vwsize, uint vstride, VT* vt_##ptr, VT* vt_data)\
  { constexpr uint vW = CEIL(W*sizeof(T), sizeof(VT)); __VA_ARGS__})

  #define WARP_LOOP_STA_OFF(ptr,offset,stride,h,w,...)\
  warp_loop_static<VT,CONF>((ptr), (offset), (stride), (h), (w),\
  []__device__(uint j, uint hsize, uint i, uint vwsize, uint voffset, uint vstride, VT* vt_##ptr, VT* vt_data)\
  { constexpr uint vW = CEIL(W*sizeof(T), sizeof(VT)); __VA_ARGS__ })

  #define WARP_LOOP_DYN_OFF(ptr,offset,stride,h,w,...)\
  warp_loop_dynamic<VT,CONF>((ptr), (offset), (stride), (h), (w),\
  []__device__(uint j, uint hsize, uint i, uint vwsize, uint voffset, uint vstride, VT* vt_##ptr, VT* vt_data)\
  { constexpr uint vW = CEIL(W*sizeof(T), sizeof(VT)); __VA_ARGS__})

  #define THD_LOOP_STA(ptr,stride,h,w,...)\
  thread_loop_static<VT>((ptr), (stride), (h), (w),\
  []__device__(uint j, uint hsize, uint i, uint vwsize, uint vstride, VT* vt_##ptr, VT* vt_data)\
  { const uint vW = CEIL(W*sizeof(T), sizeof(VT)); __VA_ARGS__ })

  #define THD_LOOP_DYN(ptr,stride,h,w,...)\
  thread_loop_dynamic<VT>((ptr), (stride), (h), (w),\
  []__device__(uint j, uint hsize, uint i, uint vwsize, uint vstride, VT* vt_##ptr, VT* vt_data)\
  { constexpr uint vW = CEIL(W*sizeof(T), sizeof(VT)); __VA_ARGS__})

  #define THD_LOOP_STA_OFF(ptr,offset,stride,h,w,...)\
  thread_loop_static<VT>((ptr), (offset), (stride), (h), (w),\
  []__device__(uint j, uint hsize, uint i, uint vwsize, uint voffset, uint vstride, VT* vt_##ptr, VT* vt_data)\
  { const uint vW = CEIL(W*sizeof(T), sizeof(VT)); __VA_ARGS__ })

  #define THD_LOOP_DYN_OFF(ptr,offset,stride,h,w,...)\
  thread_loop_dynamic<VT>((ptr), (offset), (stride), (h), (w),\
  []__device__(uint j, uint hsize, uint i, uint vwsize, uint voffset, uint vstride, VT* vt_##ptr, VT* vt_data)\
  { constexpr uint vW = CEIL(W*sizeof(T), sizeof(VT)); __VA_ARGS__})
  
    template <typename VT, typename CONF>
    DEV_INLINE void load_warp(T* src, const uint stride)
    {
      WARP_LOOP_STA(src,stride,H,W,{
        const uint offset = j*vstride + i;
        vt_data[j*vW+i] = vt_src[offset];
      });
    }

    template <typename VT, typename CONF>
    DEV_INLINE void load_warp_offset(T* src, const uint offset, const uint stride)
    {
      WARP_LOOP_STA_OFF(src,offset,stride,H,W,{
        voffset += j*vstride + i;
        vt_data[j*vW+i] = vt_src[voffset];
      });
    }

    template <typename VT, typename CONF>
    DEV_INLINE void 
    load_warp(T* src, const uint stride, const uint hsize, const uint wsize=W)
    {
      WARP_LOOP_DYN(src,stride,hsize,wsize,{
        if (j<hsize && i<vwsize)
        {
          const uint offset = j*vstride+i;
          vt_data[j*vW+i] = vt_src[offset];
        }
      });
    }

    template <typename VT, typename CONF>
    DEV_INLINE void 
    load_warp_offset(T* src, const uint offset, const uint stride, const uint hsize, const uint wsize=W)
    {
      WARP_LOOP_DYN_OFF(src,offset,stride,hsize,wsize,{
        if (j<hsize && i<vwsize)
        {
          voffset += j*vstride+i;
          vt_data[j*vW+i] = vt_src[voffset];
        }
      });
    }

    
    /**
     * @brief function for a warp to perform coaleased data load.
     *  This function forces every element to be refilled, which might be 
     *  useful for reused buffer
     * @param src data src
     * @param stride the row stride of original src matrix
     * @param hsize the remaining rows of 
     * @param wsize the remapped col index of current thread 
     */ 
    template <typename VT, typename CONF>
    DEV_INLINE void 
    load_warp_padded(T* src, const uint stride, const uint hsize, const uint wsize=W)
    {
      WARP_LOOP_STA( src, stride, hsize, wsize, {
        if ( j < hsize && i < vwsize ) {
          const uint offset = j*vstride + i;
          vt_data[j*vW+i] = vt_src[offset];
        } else VectTypes<T,sizeof(VT)/sizeof(T)>::fill_vec(&vt_data[j*vW+i]);
      });
    }

    template <typename VT, typename CONF>
    DEV_INLINE void 
    load_warp_padded_offset(T* src, const uint offset, const uint stride, const uint hsize, const uint wsize=W)
    {
      WARP_LOOP_STA_OFF( src, offset, stride, hsize, wsize, {
        if ( j < hsize && i < vwsize ) {
          voffset += j*vstride + i;
          vt_data[j*vW+i] = vt_src[voffset];
        } else VectTypes<T,sizeof(VT)/sizeof(T)>::fill_vec(&vt_data[j*vW+i]);
      });
    }

    /**
     * @brief function for a warp to cooperatively store the data in this vector to destination
     * 
     * @tparam VT vector data type used as ld/st unit
     * @tparam CONF the co-op configuration of threads
     * @param dst dump destination
     * @param stride stride of row for the dst buffer
     */
    template <typename VT, typename CONF>
    DEV_INLINE void dump_warp(T* dst, const uint stride)
    {
      WARP_LOOP_STA(dst,stride,H,W,{
        const uint offset = j*vstride + i;
        vt_dst[offset] = vt_data[j*vW+i];
      });
    }

    template <typename VT, typename CONF>
    DEV_INLINE void dump_warp_offset(T* dst, const uint offset, const uint stride)
    {
      WARP_LOOP_STA_OFF(dst,offset,stride,H,W,{
        voffset += j*vstride + i;
        vt_dst[voffset] = vt_data[j*vW+i];
      });
    }

    template <typename VT, typename CONF>
    DEV_INLINE void dump_warp(T* dst, const uint stride, const uint hsize, const uint wsize=W)
    {
      WARP_LOOP_DYN(dst,stride,hsize,wsize,{
        const uint offset = j*vstride + i;
        vt_dst[offset] = vt_data[j*vW+i];
      });
    }

    template <typename VT, typename CONF>
    DEV_INLINE void dump_warp_offset(T* dst, const uint offset, const uint stride, const uint hsize, const uint wsize=W)
    {
      WARP_LOOP_DYN_OFF(dst,offset,stride,hsize,wsize,{
        voffset += j*vstride + i;
        vt_dst[voffset] = vt_data[j*vW+i];
      });
    }

    template <typename VT>
    DEV_INLINE void load_thread(T* src, const uint stride)
    {
      THD_LOOP_STA(src,stride,H,W,{
        const uint offset = j*vstride + i;
        vt_data[j*vW+i] = vt_src[offset];
      });
    }

    template <typename VT>
    DEV_INLINE void load_thread_offset(T* src, const uint offset, const uint stride)
    {
      THD_LOOP_STA_OFF(src,offset,stride,H,W,{
        voffset += j*vstride + i;
        vt_data[j*vW+i] = vt_src[voffset];
      });
    }

    template <typename VT>
    DEV_INLINE void 
    load_thread(T* src, const uint stride, const uint hsize, const uint wsize)
    {
      THD_LOOP_STA( src, stride, hsize, wsize, {
        if (j<hsize && i<vwsize) {
          const uint offset = j*vstride + i;
          vt_data[j*vW+i] = vt_src[offset];
        } else VectTypes<T,sizeof(VT)/sizeof(T)>::fill_vec(&vt_data[j*vW+i]);
      });
    }

    template <typename VT>
    DEV_INLINE void 
    load_thread_offset(T* src, const uint offset, const uint stride, const uint hsize, const uint wsize)
    {
      THD_LOOP_STA_OFF( src, offset, stride, hsize, wsize, {
        if (j<hsize && i<vwsize) {
          voffset += j*vstride + i;
          vt_data[j*vW+i] = vt_src[voffset];
        } else VectTypes<T,sizeof(VT)/sizeof(T)>::fill_vec(&vt_data[j*vW+i]);
      });
    }

    template <typename VT>
    DEV_INLINE void dump_thread(T* dst, const uint stride)
    {
      THD_LOOP_STA(dst, stride, H, W, {
        const uint offset = j*vstride + i;
        vt_dst[offset] = vt_data[j*vW+i];
      });
    }

    template <typename VT>
    DEV_INLINE void dump_thread_offset(T* dst, const uint offset, const uint stride)
    {
      THD_LOOP_STA_OFF(dst, offset, stride, H, W, {
        voffset += j*vstride + i;
        vt_dst[voffset] = vt_data[j*vW+i];
      });
    }

    template <typename VT>
    DEV_INLINE void dump_thread(T* dst, const uint stride, const uint hsize, const uint wsize)
    {
      THD_LOOP_DYN(dst, stride, hsize, wsize, {
        const uint offset = j*vstride + i;
        vt_dst[offset] = vt_data[j*vW+i];  
      });
    }

    template <typename VT>
    DEV_INLINE void dump_thread_offset(T* dst, const uint offset, const uint stride, const uint hsize, const uint wsize)
    {
      THD_LOOP_DYN_OFF(dst, offset, stride, hsize, wsize, {
        voffset += j*vstride + i;
        vt_dst[voffset] = vt_data[j*vW+i];  
      });
    }

    /**
     * @brief this function gathers rows from different ptr 
     * @tparam VT 
     * @tparam CONF 
     * @param ptr_start 
     * @param hsize 
     * @param wsize 
     * @return __device__ 
     */
    template <typename VT, typename CONF>
    DEV_INLINE void gather_warp(T* ptr_start)
    {
      WARP_LOOP_STA(ptr_start,0,H,W,{
        vt_data[j*vW+i] = vt_ptr_start[i];
      });
    }

    template <typename VT, typename CONF>
    DEV_INLINE void gather_warp(T* ptr_start, const uint hsize, const uint wsize = W)
    {
      // no need of stride
      WARP_LOOP_DYN(ptr_start,0,hsize,wsize,{
        vt_data[j*vW+i] = vt_ptr_start[i];
      });
    }

    template <typename VT, typename CONF>
    DEV_INLINE void gather_warp_padded(T* ptr_start, const uint hsize, const uint wsize = W)
    {
      // no need of stride
      WARP_LOOP_STA(ptr_start,0,H,W,{
        if (j<hsize && i <vwsize)
          vt_data[j*vW+i] = vt_ptr_start[i];
        else VectTypes<T,sizeof(VT)/sizeof(T)>::fill_vec(&vt_data[j*vW+i]);
      });
    }

    DEV_INLINE void print(uint tid)
    {
      __syncthreads();
      if(threadIdx.x == tid)
      {
        for (uint i=0; i<H; i++)
        {
          for ( uint j=0; j<W; j++ )
          {
            printf("%d ", data[i*W+j]);
          }
          printf("\n");
        }
      }
      __syncthreads();
    }

  private:
  /**
   * this macro is the central body of loop iterator
   */  
  #define VEC_LOOP(jbegin, jend, jstep, ibegin, iend, istep) \
    constexpr uint vW = CEIL(W*sizeof(T), sizeof(VT));        \
    const uint vwsize = CEIL(wsize*sizeof(T), sizeof(VT));    \
    const uint vstride = CEIL(stride*sizeof(T), sizeof(VT));  \
    VT* vt_ptr = reinterpret_cast<VT*>(ptr);                 \
    VT* vt_data = data.template cast<VT>();                   \
    for ( uint j=(jbegin); j<(jend); j+=(jstep) )             \
    for ( uint i=(ibegin); i<(iend); i+=(istep) )


    template<typename VT, typename CONF>
    DEV_INLINE void 
    cta_loop_static(T* ptr, const uint stride, const uint hsize, const uint wsize,
                     void(*F)(uint,uint,uint,uint,uint,VT*,VT*))
    {
      VEC_LOOP(CONF::yid(), H, CONF::ydim,
               CONF::xid(), vW, CONF::xdim)
      {
        F(j,hsize,i,vwsize,vstride,vt_ptr,vt_data);
      }
    }

    template<typename VT, typename CONF>
    DEV_INLINE void 
    cta_loop_static(T* ptr, const uint offset, const uint stride, const uint hsize, const uint wsize,
                     void(*F)(uint,uint,uint,uint,uint,uint,VT*,VT*))
    {
      uint voffset = offset*sizeof(T)/sizeof(VT);
      VEC_LOOP(CONF::yid(), H, CONF::ydim,
               CONF::xid(), vW, CONF::xdim)
      {
        F(j,hsize,i,vwsize,voffset,vstride,vt_ptr,vt_data);
      }
    }

    template<typename VT, typename CONF>
    DEV_INLINE void 
    cta_loop_dynamic(T* ptr, const uint stride, const uint hsize, const uint wsize,
                      void(*F)(uint,uint,uint,uint,uint,VT*,VT*))
    {
      VEC_LOOP(CONF::yid(), hsize, CONF::ydim,
               CONF::xid(), vwsize, CONF::xdim)
      {
        F(j,hsize,i,vwsize,vstride,vt_ptr,vt_data);
      }
    }

    template<typename VT, typename CONF>
    DEV_INLINE void 
    cta_loop_dynamic(T* ptr, const uint offset, const uint stride, const uint hsize, const uint wsize,
                      void(*F)(uint,uint,uint,uint,uint,uint,VT*,VT*))
    {
      uint voffset = offset*sizeof(T)/sizeof(VT);
      VEC_LOOP(CONF::yid(), hsize, CONF::ydim,
               CONF::xid(), vwsize, CONF::xdim)
      {
        F(j,hsize,i,vwsize,voffset,vstride,vt_ptr,vt_data);
      }
    }


    template<typename VT, typename CONF>
    DEV_INLINE void 
    warp_loop_static(T* ptr, const uint stride, const uint hsize, const uint wsize,
                     void(*F)(uint,uint,uint,uint,uint,VT*,VT*))
    {
      VEC_LOOP(0, H, 1, CONF::xid(), vW, CONF::xdim)
      {
        F(j,hsize,i,vwsize,vstride,vt_ptr,vt_data);
      }
    }

    template<typename VT, typename CONF>
    DEV_INLINE void 
    warp_loop_static(T* ptr, const uint offset, const uint stride, const uint hsize, const uint wsize,
                     void(*F)(uint,uint,uint,uint,uint,uint,VT*,VT*))
    {
      uint voffset = offset*sizeof(T)/sizeof(VT);
      VEC_LOOP(0, H, 1, CONF::xid(), vW, CONF::xdim)
      {
        F(j,hsize,i,vwsize,voffset,vstride,vt_ptr,vt_data);
      }
    }

    template<typename VT, typename CONF>
    DEV_INLINE void 
    warp_loop_dynamic(T* ptr, const uint stride, const uint hsize, const uint wsize,
                      void(*F)(uint,uint,uint,uint,uint,VT*,VT*))
    {
      VEC_LOOP(0, hsize, 1, CONF::xid(), vwsize, CONF::xdim)
      {
        F(j,hsize,i,vwsize,vstride,vt_ptr,vt_data);
      }
    }

    template<typename VT, typename CONF>
    DEV_INLINE void 
    warp_loop_dynamic(T* ptr, const uint offset, const uint stride, const uint hsize, const uint wsize,
                      void(*F)(uint,uint,uint,uint,uint,uint,VT*,VT*))
    {
      uint voffset = offset*sizeof(T)/sizeof(VT);
      VEC_LOOP(0, hsize, 1, CONF::xid(), vwsize, CONF::xdim)
      {
        F(j,hsize,i,vwsize,voffset,vstride,vt_ptr,vt_data);
      }
    }

    template <typename VT>
    DEV_INLINE void 
    thread_loop_static(T* ptr, const uint stride, const uint hsize, const uint wsize,
                       void(*F)(uint,uint,uint,uint,uint,VT*,VT*))
    {  
      VEC_LOOP(0, H, 1, 0, vW, 1)
      {
        F(j,hsize,i,vwsize,vstride,vt_ptr,vt_data);
      }
    }

    template <typename VT>
    DEV_INLINE void 
    thread_loop_static(T* ptr, const uint offset, const uint stride, const uint hsize, const uint wsize,
                       void(*F)(uint,uint,uint,uint,uint,uint,VT*,VT*))
    {  
      uint voffset = offset*sizeof(T)/sizeof(VT);
      VEC_LOOP(0, H, 1, 0, vW, 1)
      {
        F(j,hsize,i,vwsize,voffset,vstride,vt_ptr,vt_data);
      }
    }

    template <typename VT>
    DEV_INLINE void 
    thread_loop_dynamic(T* ptr, const uint stride, const uint hsize, const uint wsize,
                        void(*F)(uint,uint,uint,uint,uint,VT*,VT*))
    {
      VEC_LOOP(0, hsize, 1, 0, vwsize, 1)
      {
        F(j,hsize,i,vwsize,vstride,vt_ptr,vt_data);
      }
    }

      template <typename VT>
    DEV_INLINE void 
    thread_loop_dynamic(T* ptr, const uint offset, const uint stride, const uint hsize, const uint wsize,
                        void(*F)(uint,uint,uint,uint,uint,uint,VT*,VT*))
    {
      uint voffset = offset*sizeof(T)/sizeof(VT);
      VEC_LOOP(0, hsize, 1, 0, vwsize, 1)
      {
        F(j,hsize,i,vwsize,voffset,vstride,vt_ptr,vt_data);
      }
    }
    #undef VEC_LOOP
  };

  template <typename ArrTy, typename T, uint W>
  struct AbstractVect: public AbstractTile<ArrTy, T, 1, W>{
    DEV_INLINE T& operator[](uint32_t hidx) { return AbstractTile<ArrTy,T,1,W>::data[hidx]; }

    template <typename VT>
    DEV_INLINE VT& at(uint row, uint idx)
    {
      return *(reinterpret_cast<VT*>(&AbstractTile<ArrTy, T, 1, W>::data)+idx);
    }

    template <typename VT, typename CONF>
    DEV_INLINE void load_warp(T* src)
    {
      AbstractTile<ArrTy,T,1,W>::template load_warp<VT, CONF>(src, 0);
    }

    template <typename VT, typename CONF>
    DEV_INLINE void load_warp_offset(T* src, const uint offset)
    {
      AbstractTile<ArrTy,T,1,W>::template load_warp_offset<VT, CONF>(src, offset, 0);
    }

      template <typename VT, typename CONF>
    DEV_INLINE void load_warp(CONF conf, T* src)
    {
      AbstractTile<ArrTy,T,1,W>::template load_warp<VT, CONF>(conf, src, 0);
    }

    template <typename VT, typename CONF>
    DEV_INLINE void load_warp(T* src, const uint wsize)
    {
      AbstractTile<ArrTy,T,1,W>::template load_warp<VT, CONF>(src, 0, 1, wsize);
    }

    template <typename VT, typename CONF>
    DEV_INLINE void load_warp_offset(T* src, const uint offset, const uint wsize)
    {
      AbstractTile<ArrTy,T,1,W>::template load_warp_offset<VT, CONF>(src, offset, 0, 1, wsize);
    }
    
    template <typename VT, typename CONF>
    DEV_INLINE void load_warp(CONF conf, T* src, const uint wsize)
    {
      AbstractTile<ArrTy,T,1,W>::template load_warp<VT, CONF>(conf, src, 0, 1, wsize);
    }

    template <typename VT, typename CONF>
    DEV_INLINE void load_warp_padded(T* src, const uint wsize)
    {
      AbstractTile<ArrTy,T,1,W>::template load_warp_padded<VT, CONF>(src, 0, 1, wsize);
    }

    template <typename VT, typename CONF>
    DEV_INLINE void load_warp_padded_offset(T* src, const uint offset, const uint wsize)
    {
      AbstractTile<ArrTy,T,1,W>::template load_warp_padded_offset<VT, CONF>(src, offset, 0, 1, wsize);
    }
    
    template <typename VT, typename CONF>
    DEV_INLINE void load_warp_padded(CONF conf, T* src, const uint wsize)
    {
      AbstractTile<ArrTy,T,1,W>::template load_warp_padded<VT, CONF>(conf, src, 0, 1, wsize);
    } 

    template <typename VT, typename CONF>
    DEV_INLINE void dump_warp(T* dst)
    {
      AbstractTile<ArrTy,T,1,W>::template dump_warp<VT, CONF>(dst, 0);
    }

    template <typename VT, typename CONF>
    DEV_INLINE void dump_warp(CONF conf, T* dst)
    {
      AbstractTile<ArrTy,T,1,W>::template dump_warp<VT, CONF>(conf, dst, 0);
    }

    template <typename VT, typename CONF>
    DEV_INLINE void dump_warp_offset(T* dst, const uint offset)
    {
      AbstractTile<ArrTy,T,1,W>::template dump_warp_offset<VT, CONF>(dst, offset, 0);
    }

    template <typename VT, typename CONF>
    DEV_INLINE void dump_warp(T* dst, const uint wsize)
    {
      AbstractTile<ArrTy,T,1,W>::template dump_warp<VT, CONF>(dst, 0, 1, wsize);
    }

    template <typename VT, typename CONF>
    DEV_INLINE void dump_warp(CONF conf, T* dst, const uint wsize)
    {
      AbstractTile<ArrTy,T,1,W>::template dump_warp<VT, CONF>(conf, dst, 0, 1, wsize);
    }

    template <typename VT, typename CONF>
    DEV_INLINE void dump_warp_offset(T* dst, const uint offset, const uint wsize)
    {
      AbstractTile<ArrTy,T,1,W>::template dump_warp_offset<VT, CONF>(dst, offset, 0, 1, wsize);
    }

    template <typename VT>
    DEV_INLINE void load_thread(T* src)
    {
      AbstractTile<ArrTy,T,1,W>::template load_thread<VT>(src, 0);
    }

    template <typename VT>
    DEV_INLINE void load_thread(T* src, const uint wsize)
    {
      AbstractTile<ArrTy,T,1,W>::template load_thread<VT>(src, 0, 1, wsize);
    }

    template <typename VT>
    DEV_INLINE void load_thread_offset(T* src, const uint offset)
    {
      AbstractTile<ArrTy,T,1,W>::template load_thread_offset<VT>(src, offset, 0);
    }

    template <typename VT>
    DEV_INLINE void load_thread_offset(T* src, const uint offset, const uint wsize)
    {
      AbstractTile<ArrTy,T,1,W>::template load_thread_offset<VT>(src, offset, 0, 1, wsize);
    }

    template <typename VT>
    DEV_INLINE void dump_thread(T* dst)
    {
      AbstractTile<ArrTy,T,1,W>::template dump_thread<VT>(dst,0);
    }

    template <typename VT>
    DEV_INLINE void dump_thread(T* dst, const uint wsize)
    {
      AbstractTile<ArrTy,T,1,W>::template dump_thread<VT>(dst,0,1,wsize);
    }


    template <typename VT>
    DEV_INLINE void dump_thread_offset(T* dst, const uint offset)
    {
      AbstractTile<ArrTy,T,1,W>::template dump_thread_offset<VT>(dst,offset,0);
    }

    template <typename VT>
    DEV_INLINE void dump_thread_offset(T* dst, const uint offset, const uint wsize)
    {
      AbstractTile<ArrTy,T,1,W>::template dump_thread_offset<VT>(dst,offset,0,1,wsize);
    }
  };

  template<typename T, uint Size>
  struct _local_data {
    T a[Size];
    DEV_INLINE T& operator[] (const uint i) { return a[i]; }
    template <typename VT>
    DEV_INLINE VT* cast() { return reinterpret_cast<VT*>(&a);}
  };

  template<typename T>
  struct _ptr_data {
    T* a;
    DEV_INLINE T& operator[] (const uint i) { return a[i]; }
    template <typename VT>
    DEV_INLINE VT* cast() { return reinterpret_cast<VT*>(a); }
  };

  template <typename T, uint H, uint W>
  struct Tile : public AbstractTile<_local_data<T, H*W>, T, H, W> {
    DEV_INLINE void init(T val) { AbstractTile<_local_data<T, H*W>, T, H, W>::clean_thread(); }
  };
  
  template <typename T, uint W>
  struct Vect : public AbstractVect<_local_data<T, W>, T, W> {
    DEV_INLINE void init(T val) { AbstractTile<_local_data<T, W>, T, 1, W>::clean_thread(); }
  };

  template <typename T, uint H, uint W>
  struct TileView : public AbstractTile<_ptr_data<T>, T, H, W> {
    using AbstractTile<_ptr_data<T>, T, H, W>::data;
    
    DEV_INLINE TileView(int ptr) { data.a = ptr; }
    DEV_INLINE T* operator[](uint32_t hidx) { return data[hidx]; }
    DEV_INLINE void init() 
    { 
      for ( int i=0; i<H*W; i++) data[i] = 0;
    }

    // template <typename VT, typename CONF>
    // DEV_INLINE void load_warp(T* src, const uint stride)
    // {
    //   if (!CONF::xid()) data[CONF::yid()] = src+stride*CONF::yid();
    // }

    // template <typename VT, typename CONF>
    // DEV_INLINE void load_warp(CONF conf, T* src, const uint stride)
    // {
    //   if (conf.xid) data[conf.yid] = src+stride*conf.yid;
    // }
    
    // template <typename VT, typename CONF>
    // DEV_INLINE void 
    // load_warp_padded(T* src, const uint stride, const uint hsize, const uint wsize=W)
    // {
    //   if (!CONF::xid()) data[CONF::yid()] = (CONF::yid() < hsize)? src+stride*CONF::yid() : nullptr;
    // }

    // template <typename VT, typename CONF>
    // DEV_INLINE void 
    // load_warp_padded(CONF conf, T* src, const uint stride, const uint hsize, const uint wsize=W)
    // {
    //   if (!conf.xid) data[conf.yid] = (conf.yid < hsize)? src + stride * conf.yid : nullptr;
    // }
    
    // template <typename VT, typename CONF>
    // DEV_INLINE void dump_warp(T* dst, const uint stride) {}
    // template <typename VT, typename CONF>
    // DEV_INLINE void dump_warp(T* dst, const uint stride, const uint hsize, const uint wsize=W) {}
    // template <typename VT, typename CONF>
    // DEV_INLINE void dump_warp(CONF conf, T* dst, const uint stride) {}
    // template <typename VT, typename CONF>
    // DEV_INLINE void dump_warp(CONF conf, T* dst, const uint stride, const uint hsize, const uint wsize=W) {}


    // template <typename VT>
    // DEV_INLINE void load_thread(T* src, const uint stride) 
    // {
    //   #pragma unroll
    //   for (uint i=0; i<H; ++i)
    //   {
    //     data[i] = src+stride*i;
    //   }
    // }

    // template <typename VT>
    // DEV_INLINE void 
    // load_thread(T* src, const uint stride, const uint hsize, const uint wsize)
    // {
    //   uint i;
    //   for (i=0; i<hsize; ++i)
    //   {
    //     data[i] = src+stride*i;
    //   }
    //   for (;i<H; ++i)
    //   {
    //     data[i] = nullptr;
    //   }
    // }

    // template <typename VT>
    // DEV_INLINE void dump_thread(T* dst, const uint stride) {}
    // template <typename VT>
    // DEV_INLINE void dump_thread(T* dst, const uint stride, const uint hsize, const uint wsize) {}


    // template <typename VT, typename CONF>
    // DEV_INLINE void gather_warp(T* ptr_start)
    // {
    //   // To avoid invalid load from uninitialized shared memory when CONF::xdim > warp_size
    //   if (!CONF::xid() || !(threadIdx.x & 31)) 
    //   data[CONF::yid()] = ptr_start;
    // }

    // template <typename VT, typename CONF>
    // DEV_INLINE void gather_warp(T* ptr_start, const uint hsize, const uint wsize = W)
    // {
    //   if (!CONF::xid() || !(threadIdx.x & 31)) 
    //   data[CONF::yid()] = (CONF::yid() < hsize) ? ptr_start : nullptr;
    // }

  };

  template <typename T, uint W>
  struct VectView : public AbstractVect<_ptr_data<T>, T, W> {
    using AbstractVect<_ptr_data<T>, T, W>::data;
    DEV_INLINE VectView(void* ptr) { data.a = (T*)ptr; }
    DEV_INLINE T& operator[](uint32_t hidx) { return data[hidx]; }
    DEV_INLINE void init() 
    { 
      for ( int i=0; i<W; i++ ) data[i] = 0;
    }

    // template <typename VT, typename CONF>
    // DEV_INLINE void load_warp(T* src) { data = src; }
    // template <typename VT, typename CONF>
    // DEV_INLINE void load_warp(T* src, const uint wsize, T nan = static_cast<T>(0)) { data = src; }
    // template <typename VT, typename CONF>
    // DEV_INLINE void load_warp_padded(T* src, const uint wsize, T nan = static_cast<T>(0)) { data = src; }
    // template <typename VT, typename CONF>
    // DEV_INLINE void dump_warp(T* dst) {}
    // template <typename VT, typename CONF>
    // DEV_INLINE void dump_warp(T* dst, const uint wsize) {}
    
    // template <typename VT, typename CONF>
    // DEV_INLINE void load_warp(CONF conf, T* src) { data = src; }
    // template <typename VT, typename CONF>
    // DEV_INLINE void load_warp(CONF conf, T* src, const uint wsize, T nan = static_cast<T>(0)) { data = src; }
    // template <typename VT, typename CONF>
    // DEV_INLINE void load_warp_padded(CONF conf, T* src, const uint wsize, T nan = static_cast<T>(0)) { data = src; }
    // template <typename VT, typename CONF>
    // DEV_INLINE void dump_warp(CONF conf, T* dst) {}
    // template <typename VT, typename CONF>
    // DEV_INLINE void dump_warp(CONF conf, T* dst, const uint wsize) {}

    // template <typename VT>
    // DEV_INLINE void load_thread(T* src) { data = src;}
    // template <typename VT>
    // DEV_INLINE void load_thread(T* src, const uint wsize) { data = src; }
    // template <typename VT>
    // DEV_INLINE void dump_thread(T* dst) {}
    // template <typename VT>
    // DEV_INLINE void dump_thread(T* dst, const uint wsize) {}
  };

  /** ---- thread level Vect OPs ----
   * @brief these functions perform vector ops with given operator 
   * @note these functions do not make any type assumptions on vector type
   *   uses are supposed to provide lvalue-type for out arrays 
   *   (pointers to array or refs to objects with []operators)
   */ 
  template <typename T, typename Arr1, typename Acc, uint W>
  static DEV_INLINE void thread_unary_elementwise(Arr1 in, Acc out, T(*udf)(T))
  {
    #pragma unroll
    for (uint i=0; i<W; ++i) 
    {
      out[i] = udf(in[i]);
    }
  }

  template <typename T, typename Arr1, typename Acc, uint W>
  DEV_INLINE void thread_binary_elementwise_inline(Arr1 a, Acc acc, T(*udf)(T*,T))
  {
    #pragma unroll
    for (uint i=0; i<W; ++i)
    {
      udf(&acc[i], a[i]);
    }
  }

  template <typename T, typename Arr1, typename Arr2, typename Acc, uint W>
  DEV_INLINE void thread_fma_inline(Arr1 a, Arr2 b, Acc acc, T(*udf_mul)(T,T), T(*udf_add)(T*,T))
  {
    #pragma unroll
    for (uint i=0; i<W; ++i)
    {
      udf_add(&acc[i], udf_mul(a[i], b[i]));
    }
  }

  template <typename T, typename Arr1, typename Acc, uint W>
  DEV_INLINE void thread_scalar_fma_inline(T a, Arr1 b, Acc acc, T(*udf_mul)(T,T), T(*udf_add)(T*,T))
  {
    #pragma unroll
    for (uint i=0; i<W; ++i)
    {
      udf_add(&acc[i], udf_mul(a, b[i]));
    }
  }

  template <typename T, typename Arr1, typename Arr2, uint W>
  DEV_INLINE T thread_inner_produce(Arr1 a, Arr2 b, T(*udf_mul)(T,T), T(*udf_add)(T*,T), T zero=0)
  {
    T acc = zero;
    #pragma unroll
    for (uint i=0; i<W; ++i)
    {
      udf_add(&acc, udf_mul(a[i], b[i]));
    }
    return acc;
  }
  

  template <typename T, typename Arr, uint W>
  DEV_INLINE T thread_reduce(Arr a, T(*udf)(T*,T), T zero=0)
  {
    T sum = zero;
    #pragma unroll
    for (uint i=0; i<W; ++i)
    {
      udf(&sum, a[i]);
    }
    return sum;
  }

  template <typename T, typename Arr1, typename Arr2, uint W>
  DEV_INLINE void thread_scan(AbstractVect<Arr1,T,W> a, AbstractVect<Arr2,T,W> &out, T zero=0)
  {
    T sum = zero;
    #pragma unroll
    for (uint i=0; i<W; ++i)
    {
      sum = udf(sum, a[i]);
      out[i] = udf(sum, a[i]);
    }
  }

  /// ---- warp level Vect OPs ----
  template <typename T, typename Arr1, typename Arr2, uint W, typename CONF>
  DEV_INLINE void warp_unary_elementwise(AbstractVect<Arr1,T,W> in, AbstractVect<Arr2,T,W> &out, T(*udf)(T))
  {
    for (uint i=CONF::xid(); i<W; i+=CONF::xdim )
    {
      out[i] = udf(in[i]);
    }
  }

  template <typename T, typename Arr1, typename Arr2, typename Arr3, uint W, typename CONF>
  DEV_INLINE void warp_binary_elementwise(AbstractVect<Arr1,T,W> a, AbstractVect<Arr2,T,W> b, AbstractVect<Arr3,T,W> &out, T(*udf)(T,T))
  {
    for (uint i=CONF::xid(); i<W; i+=CONF::xdim )
    {
      out[i] = udf(a[i], b[i]);
    }
  }

  template <typename T, typename Arr1, typename Arr2, typename Arr3, uint W, typename CONF>
  DEV_INLINE void warp_fma_inline(AbstractVect<Arr1,T,W> a, AbstractVect<Arr2,T,W> b, AbstractVect<Arr3,T,W> &acc, T(*udf_mul)(T,T), T(*udf_add)(T*,T))
  {
    for (uint i=CONF::xid(); i<W; i+=CONF::xdim)
    {
      udf_add(acc+i, udf_mul(a[i], b[i]));
    }
  }

  template <typename T, typename Arr1, typename Arr2, uint W, typename CONF>
  DEV_INLINE void warp_scalar_fma_inline(T a, AbstractVect<Arr1,T,W> b, AbstractVect<Arr2,T,W> &acc, T(*udf_mul)(T,T), T(*udf_add)(T*,T))
  {
    for (uint i=CONF::xid(); i<W; i+=CONF::xdim)
    {
      acc[i] = udf_add(acc[i], udf_mul(a, b[i]));
    }
  }

  template <typename T, typename Acc, uint W, uint N_lane, typename CONF>
  DEV_INLINE void warp_reduce_shuffle(Acc a, T(*udf)(T*,T), T zero=0)
  {
    constexpr uint width = CONF::xdim;
    constexpr uint folds = CONF::xdim/N_lane;

    // static_assert(width <= 32 && "cross-lane reduce does not apply to groups > warp_size");

    #pragma unroll
    for (uint i=0; i<W; ++i)
    {
      //TODO 这里需要的是xid相同，同组内，yid不同的线程之间归约，delta应该是16~xdim
      // shuffle_reduce<T, folds>(width, a[i], width/2, udf);
      for (uint delta = width/2; delta >= N_lane; delta/=2)
      {
        udf(&a[i], __shfl_xor_sync(-1, a[i], delta, width));
      }
    }
  }

  template <typename T, typename Arr1, uint W, uint N_lane, typename CONF>
  DEV_INLINE void warp_reduce_shared(
      Arr1 a, T(*udf)(T*,T), T* _buffer, T zero=0)
  {
    constexpr uint width = CONF::xdim;
    constexpr uint folds = CONF::xdim/N_lane;
    
    #pragma unroll
    for (uint i=0; i<W; ++i)
    {
      #pragma unroll
      for (uint delta = width/2; delta >= N_lane; delta/=2)
      {
        if ( CONF::xid() < delta*2 ) _buffer[CONF::xid()] = a[i];
        CONF::sync();
        if ( CONF::xid() < delta ) udf(&a[i], _buffer[CONF::xid()+delta]);
      }
    }
  }

  template <typename K, typename T, typename Arr1, uint W, uint N_lane, typename CONF>
  DEV_INLINE bool warp_segmented_reduce_shared(
      Arr1 a, T(*udf)(T*,T), K key_buf[], T v_buf[], T zero=0)
  {
    constexpr uint width = CONF::xdim;
    const uint dn_id = CONF::xid() / N_lane;
    K seg_key = key_buf[dn_id], ex_key;
    bool lead = !dn_id || seg_key != key_buf[dn_id-1];

    #pragma unroll
    for (uint i=0; i<W; ++i)
    {
      #pragma unroll
      for (uint delta = width/2; delta >= N_lane; delta/=2)
      {
        v_buf[CONF::xid()] = a[i];
        ex_key = ( CONF::xid() < delta ) ? key_buf[dn_id + delta/N_lane] : -1;
        CONF::sync();
        if (ex_key == seg_key) udf(&a[i], v_buf[CONF::xid()+delta]);
      }
    }
    return lead;
  }

  template <typename K, typename T, typename Arr1, uint W, uint N_lane, typename CONF>
  DEV_INLINE bool warp_segmented_reduce_shuffle(
      Arr1 a, T(*udf)(T*,T), K seg_key[], T zero=0)
  {
    constexpr uint width = CONF::xdim;
    const uint dn_id = CONF::xid() / N_lane;
    K ex_key;
    T ex_val;
    bool lead = !dn_id || (seg_key[0] != __shfl_up_sync(-1, seg_key[0], 1, width));
    
    #pragma unroll
    for (uint i=0; i<W; ++i)
    {
      #pragma unroll
      for (uint delta = width/2; delta >= N_lane; delta/=2)
      {
        // v_buf[CONF::xid()] = a[i];
        // ex_key = ( CONF::xid() < delta ) ? key_buf[dn_id + delta/N_lane] : -1;
        ex_key = __shfl_xor_sync(-1, seg_key[0], delta, width);
        ex_val = __shfl_xor_sync(-1, a[i], delta, width);
        if (ex_key == seg_key[0]) udf(&a[i], ex_val);
      }
    }
    return lead;
  }

  template <typename IT, typename T, typename Arr>
  DEV_INLINE IT
  binary_search(T key, Arr list, IT start, IT end)
  {
    while(start < end)
    {
      IT mid = (start + end)>>1;
      if (list[mid] <= key) {
        start = mid+1;
      } else {
        end = mid;
      }
    }
    return end-1;
  }

  template <typename T, typename Arr1, typename Arr2, uint W, typename CONF>
  DEV_INLINE void warp_scan(Arr1 a, Arr2 out, T zero=0)
  {

  }


  template <typename T, uint H, uint W, uint K, typename CONF>
  DEV_INLINE void tile_mm(Tile<T, H, W>& tileC, const Tile<T, H, K>& tileA, const Tile<T, K, W>& tileB)
  {
    constexpr uint x_ts = W / CONF::xdim;
    constexpr uint y_ts = H / CONF::ydim;

    constexpr uint x_offset = CONF::xid()*x_ts;
    constexpr uint y_offset = CONF::yid()*y_ts;

    for (uint k=0; k<K; ++k)
    for (uint j=0; j<y_ts; ++j)
    for (uint i=0; i<x_ts; ++i)
    {
      tileC[y_offset+j][x_offset+i] += tileA[y_offset+j][k] * tileB[k][x_offset+i];
    }
  }

  template <typename T, uint vlen>
  DEV_INLINE T* alignUp(const T* ptr) { return (T*) CEIL((size_t)ptr, (sizeof(T)*vlen)) * sizeof(T)*vlen; }
  template <typename T, uint vlen>
  DEV_INLINE T* alignDown(const T* ptr) { return (T*) (((size_t)ptr) / (sizeof(T)*vlen) * (sizeof(T)*vlen)); }
  template <typename T, uint vlen>
  DEV_INLINE uint alignDownResidue(const T* ptr) { return ((size_t)ptr % (sizeof(T)*vlen))/sizeof(T);} 
  template <typename T, uint vlen>
  DEV_INLINE uint alignUpResidue(const T* ptr) { return vlen - alignDownResidue<T,vlen>(ptr);}

#undef DEV_INLINE
};
#endif