#ifndef _CSRGRAPH_HXX__
#define _CSRGRAPH_HXX__

#include <vector>
// #define DEBUG
#include "config.hxx"
#include "../utils.hxx"
#include "GraphData.cuh"
#include "cuda/ops/graph_info.cuh"

namespace GraphData
{
  template <typename IndexType>
  static inline void _vertex_sort_cpu(const IndexType *offset, IndexType *sorted_idx, const size_t size) {
    struct kv_t {
      IndexType row;
      IndexType nz;
    };
    std::vector<kv_t> sorter;
    for (IndexType i=0; i<size; ++i)
    {
      sorter.push_back({i, offset[i+1] - offset[i]});
    }

    std::sort(sorter.begin(), sorter.end(), [](const kv_t&a ,const kv_t &b){
      return a.nz > b.nz;
    });

    for (size_t i=0; i<size; ++i)
    {
      sorted_idx[i] = sorter[i].row;
    }
  }

  template<typename I, typename T>
  static void __global__ _vertex_sort_cuda_block(const T *offset, 
                                                 I *sorted_idx, 
                                                 const size_t size) {
    constexpr int CAP=2;
    __shared__ T __t_buf[1024*CAP*2];
    __shared__ I __i_buf[1024*CAP*2];
    T t_buf[] = {&__t_buf[0], &__t_buf[1024*CAP]};
    T i_buf[] = {&__i_buf[0], &__i_buf[1024*CAP]};
    // global thread parallel
    const int tid = threadIdx.x;
    const int bsize = blockDim.x;
    const int bid = blockIdx.x;
  
    const int wid = tid >> 5;

    for ( int sh_off = 0; sh_off < CAP; sh_off++) {
      int idx = tid + 1024 * bid * CAP + sh_off * 1024;
      T val = idx < size ? offset[idx+1] - offset[idx] : INT_MAX;
      I ind = idx;
      // warp bitonic sort - bitonic generation
      for ( int w = 2; w < 32; w <<= 1) {
        for (int off = w/2; off > 0; off >>= 1) {
          bool dir = ((w-1) & tid) > off; 
          T nval = __shfl_xor_sync((uint)-1, val, off, w);
          I nind = __shfl_xor_sync((uint)-1, ind, off, w);
          if ((val >= nval) == dir) { // 升序 - dir=0降1升
            val = nval;
            ind = nind;
          }
        }
      }
      // warp bitonic sort - bitonic merge
      for (int off=16; off > 0; off /= 2) {
        T nval = __shfl_xor_sync((uint)-1, val, off, off*2);
        I nind = __shfl_xor_sync((uint)-1, ind, off, off*2);
        if (val < nval) {
          val = nval;
          ind = nind;
        }
      }
      t_buf[0][tid + sh_off * 1024] = val;
      i_buf[0][tid + sh_off * 1024] = ind;
    }

    // shared merge
    int ib = 0;
    for ( int off=32; off<1024*CAP; off<<=1 ) {
      // scan over shared
      for ( int idx = tid; tid < 1024*CAP; tid+=1024 ) {
        // value
        T val = t_buf[ib][idx];
        I ind = i_buf[ib][idx];
        // LR
        bool lr = idx & (2*off-1) & (-off);
        int idx_base = idx & (-2*off);
        int beg = lr ? idx_base + off : idx_base;
        int end = beg + off;
        int idx_ex = binary_search<T>(&t_buf[ib][beg], &t_buf[ib][end], off);
        ib = 1 - ib; // in buf -> out buf
        int idx_loc = idx & (off-1);
        int new_idx = idx_base + idx_loc + idx_ex;
        t_buf[ib][new_idx] = val;
        i_buf[ib][new_idx] = ind;
      }
      __syncthreads();
    } 

    for (int sh_off = 0; sh_off < CAP; sh_off++) {
      int idx = tid + 1024 * bid * CAP + sh_off * 1024;
      sorted_idx[idx] = i_buf[ib][tid + 1024*sh_off];
    }
  }

  template<typename I, typename T>
  static void __global__ _vertex_sort_cuda_global(const T *offset, 
                                                  I *sorted_idx, 
                                                  const size_t size,
                                                  void* sort_buffer) {
    I* idx_buf[] = {sorted_idx, sort_buffer};
    T* val_buf[] = {sort_buffer+sizeof(I)*size, 
                    sort_buffer+(sizeof(I)+sizeof(T))*size};
    
  }
  
  template <typename IndexType>
  static void __global__ _expand_row_idx_cuda(const IndexType *offset,
                                              IndexType *dst_idx, 
                                              const size_t num_n, 
                                              const size_t num_e) {
    const int gtid = threadIdx.x + blockIdx.x * blockDim.x;
    if (gtid >= num_e) return;
    dst_idx[gtid] = binary_search<IndexType>(offset, offset+num_n+1, num_e);
  }

  template <typename IndexType>
  static inline void _expand_row_idx_cpu(const IndexType *offset,
                                         IndexType *dst_idx,
                                         const size_t num_n,
                                         const size_t num_e) {
  #pragma omp parallel for 
    for (int i=0; i<num_n; i++) {
      for ( int j=offset[i]; j<offset[i+1]; j++) {
        dst_idx[j] = i;
      }
    }                                      
  }
  /**
   * @brief This class is a CSR-CSC hybrid class, containing necessary structures for the representation
   */ 
  template <typename IndexType> class CSRView;

  template<typename IndexType>
  struct CSRGraph : public MetaGraph<IndexType>
  {
    IndexType *row_offset;
    IndexType *col_offset;

    IndexType *row_idx;
    IndexType *col_idx;

    IndexType *edge_idx;
    IndexType *eidx_rev;

    IndexType *sorted_ridx{nullptr};
    IndexType *sorted_cidx{nullptr};

    enum PartitionAlg {
      CSR_MERGE_PATH, CSR_ROW_BINNING
    };
    using super = MetaGraph<IndexType>;

    /**
     * @brief Builds a graph from already built arrays, maybe from pytorch tensors
     * @note Graphs built from this constructor should not be freed manually since 
     *       the arrays are managed by python
     * @param nvertex number of vertices
     * @param nedge number of edges
     * @param on_dev where the input arrays resides 
     * @param row_offset ~ edge_idx forward graph representation (CSR)
     * @param col_offset ~ eidx_rev backward graph representation (CSC)
     */ 
    CSRGraph( size_t nvertex, size_t nedge, DeviceTag on_dev,
              IndexType *row_offset, IndexType *col_idx, IndexType *edge_idx,
              IndexType *col_offset = nullptr, IndexType *row_idx = nullptr, IndexType *eidx_rev = nullptr
            ) : MetaGraph<IndexType>(nvertex, nedge, on_dev), row_offset(row_offset), 
                col_idx(col_idx), edge_idx(edge_idx)
    {
      super::independent = false;
      assert(row_offset != nullptr && col_idx != nullptr);

      if (col_offset != nullptr || row_idx != nullptr)
      {
        assert(col_offset != nullptr && row_idx != nullptr);
        assert(!(edge_idx == nullptr xor eidx_rev == nullptr));
      }
    }

    /**
     * @brief Builds a CSR Graph from a row mtx input
     */ 
    CSRGraph( IndexType *src_raw, IndexType *dst_raw, IndexType *eidx_raw, 
              size_t nedge, bool with_reverse = false
            ): MetaGraph<IndexType>(src_raw, dst_raw, eidx_raw, nedge)
    {
      if ( with_reverse ) col_offset = reinterpret_cast<IndexType*>(100); // any value but 'nullptr'
      fromCOO(src_raw, dst_raw, eidx_raw);
    }

    void fromCOO(IndexType *src_raw, IndexType *dst_raw, IndexType *eidx_raw)
    {
      IndexType *odeg, *ideg, *ideg_, *odeg_;
      bool with_reverse = (col_offset != nullptr);
      bool with_eidx = (eidx_raw != nullptr);
      
      MEMALLOC(IndexType, ideg, super::nvertex, super::on_dev);
      MEMALLOC(IndexType, odeg, super::nvertex, super::on_dev);
      MEMCLEAN(ideg, super::nvertex, super::on_dev);
      MEMCLEAN(odeg, super::nvertex, super::on_dev);

      MYLOG("tag 1\n");

      // count i/o degree & count connected edges
      auto label_map = count_degree(src_raw, dst_raw, ideg, odeg, super::nvertex, super::nedge);

      // remove isolated nodes
      MEMALLOC(IndexType, ideg_, label_map.size(), super::on_dev);
      MEMALLOC(IndexType, odeg_, label_map.size(), super::on_dev);
      
      auto r_label_map = prune_empty(super::nvertex, label_map, ideg, odeg,
                                     ideg_, odeg_);
      
      MEMFREE(ideg, super::on_dev);
      MEMFREE(odeg, super::on_dev);
      ideg = ideg_; odeg = odeg_;
      super::nvertex = label_map.size();
      
      _initializeField(super::nvertex, super::nedge, CPU, with_reverse);
      MYLOG("CSR init Ok\n");

      for (size_t i=0; i<super::nvertex; ++i)
      {
        row_offset[i+1] = ideg[i]+row_offset[i];
        if (with_reverse) col_offset[i+1] = odeg[i]+col_offset[i];
        MYLOG("tag 1.5 on i=%d\n", i);
      }

      MEMCLEAN(ideg, super::nvertex, super::on_dev);
      if (with_reverse) MEMCLEAN(odeg, super::nvertex, super::on_dev);
      MYLOG("tag 2\n");

      for (size_t i=0; i<super::nedge; ++i)
      {
        IndexType s,d;
        s = r_label_map[src_raw[i]];
        d = r_label_map[dst_raw[i]];
        assert(s < super::nvertex && d < super::nvertex);
        col_idx[row_offset[d]+ideg[d]] = s;
        edge_idx[row_offset[d]+ideg[d]] = with_eidx ? eidx_raw[i] : i;
        ++ideg[d];
        if (with_reverse)
        {
          row_idx[col_offset[s]+odeg[s]] = d;
          eidx_rev[col_offset[s]+odeg[s]] =  with_eidx ? eidx_raw[i] : i;
          ++odeg[s];
        }
      }

      MYLOG("tag 3\n");
      MEMFREE(ideg, DeviceTag::CPU);
      MEMFREE(odeg, DeviceTag::CPU);
    }

    ~CSRGraph()
    {
      if (super::independent)
      {
        MEMFREE(row_offset, super::on_dev);
        MEMFREE(col_idx, super::on_dev);
        MEMFREE(edge_idx, super::on_dev);
        if (col_offset != nullptr) 
        {
          MEMFREE(col_offset, super::on_dev);
          MEMFREE(row_idx, super::on_dev);
          MEMFREE(eidx_rev, super::on_dev);
        }
      }
    }

    void cuda()
    {                                                   
      auto src_dev = super::on_dev;
      auto dst_dev = DeviceTag::CUDA;

      if (src_dev == dst_dev) return;
      super::on_dev = dst_dev;

      bool reverse = (col_offset != nullptr);           
      IndexType *ro, *co, *ri, *ci;                            
      IndexType *ev = nullptr, *er = nullptr;                  

      MEMALLOC(IndexType,ro,super::nvertex+1,dst_dev);
      MEMALLOC(IndexType,ci,super::nedge,dst_dev);
      MEMALLOC(IndexType,ev,super::nedge,dst_dev);

      if (reverse)                                      
      {                                                 
        MEMALLOC(IndexType,co,super::nvertex+1,dst_dev);
        MEMALLOC(IndexType,ri,super::nedge,dst_dev);
        MEMALLOC(IndexType,er,super::nedge,dst_dev);
      } else {                                          
        co = ri = nullptr;                         
        er = nullptr;
      }                                                 
      H_ERR(TODEV(ro, row_offset, (super::nvertex+1)));     
      H_ERR(TODEV(ci, col_idx, super::nedge));                 
      H_ERR(TODEV(ev, edge_idx, super::nedge));              
      if (reverse)                                      
      {                                                 
        H_ERR(TODEV(co, col_offset, (super::nvertex+1)));      
        H_ERR(TODEV(ri, row_idx, super::nedge));               
        H_ERR(TODEV(er, eidx_rev, super::nedge));            
      }                                                 
      MEMFREE(row_offset, src_dev);                                 
      MEMFREE(col_idx, src_dev);                                    
      MEMFREE(edge_idx, src_dev);                                 
      if (reverse)                                      
      {                                                 
        MEMFREE(col_offset, src_dev);                               
        MEMFREE(row_idx, src_dev);                                  
        MEMFREE(eidx_rev, src_dev);                               
      }                                                 
      row_offset = ro;                                  
      col_idx = ci;                                     
      edge_idx = ev;                                    
      if (reverse)                                      
      {                                                 
        col_offset = co;                                
        row_idx = ri;                                   
        eidx_rev = er;                                  
      }                                                 
    }

    void cpu()
    {
      auto src_dev = super::on_dev;
      auto dst_dev = DeviceTag::CPU;
      if (src_dev == dst_dev) return;
      super::on_dev = CPU;                        

      bool reverse = (col_offset != nullptr);           
      IndexType *ro, *co, *ri, *ci;                            
      IndexType *ev = nullptr, *er = nullptr;                  

      MEMALLOC(IndexType,ro,super::nvertex+1,dst_dev);
      MEMALLOC(IndexType,ci,super::nedge,dst_dev);
      MEMALLOC(IndexType,ev,super::nedge,dst_dev);

      if (reverse)                                      
      {                                                 
        MEMALLOC(IndexType,co,super::nvertex+1,dst_dev);
        MEMALLOC(IndexType,ri,super::nedge,dst_dev);
        MEMALLOC(IndexType,er,super::nedge,dst_dev);
      } else {                                          
        co = ri = nullptr;                         
        er = nullptr;
      }                                                 
      H_ERR(TOHOST(row_offset, ro, (super::nvertex+1)));     
      H_ERR(TOHOST(col_idx, ci, super::nedge));                 
      H_ERR(TOHOST(edge_idx, ev, super::nedge));              
      if (reverse)                                      
      {                                                 
        H_ERR(TOHOST(col_offset, co, (super::nvertex+1)));      
        H_ERR(TOHOST(row_idx, ri, super::nedge));               
        H_ERR(TOHOST(eidx_rev, er, super::nedge));            
      }                                                 
      MEMFREE(row_offset, src_dev);                                 
      MEMFREE(col_idx, src_dev);                                    
      MEMFREE(edge_idx, src_dev);                                 
      if (reverse)                                      
      {                                                 
        MEMFREE(col_offset, src_dev);                               
        MEMFREE(row_idx, src_dev);                                  
        MEMFREE(eidx_rev, src_dev);                               
      }                                                 
      row_offset = ro;                                  
      col_idx = ci;                                     
      edge_idx = ev;                                    
      if (reverse)                                      
      {                                                 
        col_offset = co;                                
        row_idx = ri;                                   
        eidx_rev = er;                                  
      }                                                 
    }

    IndexType* sortVertex(bool reverse=false)
    {
      if (reverse && sorted_cidx != nullptr) return sorted_cidx;
      if (!reverse && sorted_ridx != nullptr) return sorted_ridx;

      IndexType *offset = reverse ? col_offset : row_offset;
      if (offset == nullptr) return nullptr;
      IndexType *sorted_index;
      MEMALLOC(IndexType, sorted_index, super::nvertex, super::on_dev);

      if (super::on_dev == DeviceTag::CPU)
      {
        _vertex_sort_cpu(offset, sorted_index, super::nvertex);
      }
      else if (super::on_dev == DeviceTag::CUDA)
      {
        assert(false && "Uninplemented");
        // uint blks = super::nvertex / 1024;
        // _vertex_sort_cuda<<<blks, 1024>>>(offset, sorted_index.data(), super::nvertex);
      }

      if (reverse) sorted_cidx = sorted_index;
      else sorted_ridx = sorted_index;
      return sorted_index;
    }

    std::vector<CSRView<IndexType>> 
    partition(PartitionAlg alg, 
              IndexType *sorted_index = nullptr,
              const int parts = 8,
              bool reverse = false)
    {
      switch(alg)
      {
        case CSR_MERGE_PATH:
          return merge_path_partition(sorted_index, parts, reverse);
        case CSR_ROW_BINNING:
          
        default:
          printf("unkwon partition algorithm, ignoring\n");
          return {};
      }
    }


  protected:
    void _initializeField( size_t nvertex, size_t nedge, DeviceTag dev=DeviceTag::CPU, 
                           bool with_reverse = false)
    {
      MEMALLOC(IndexType, row_offset, super::nvertex+1, dev);
      MEMALLOC(IndexType, col_idx, super::nedge, dev);
      MEMALLOC(IndexType, edge_idx, super::nedge, dev)
      MEMCLEAN(row_offset, super::nvertex+1, dev);
      
      MYLOG("forward graph allocated\n");

      if (with_reverse)
      {
        MEMALLOC(IndexType, col_offset, super::nvertex+1, dev);
        MEMALLOC(IndexType, row_idx, super::nedge, dev);
        MEMALLOC(IndexType, eidx_rev, super::nedge, dev)
        MEMCLEAN(col_offset, super::nvertex+1, dev);
        
        MYLOG("reverse graph allocated\n");
      } else {
        col_offset = row_idx = nullptr;
        eidx_rev = nullptr;
      }
    }

    // mergepath partition on sorted rows
    std::vector<CSRView<IndexType>> 
    merge_path_partition(IndexType *sorted_index=nullptr, 
                         const int parts=8, 
                         bool reverse=false)
    {
      IndexType *offset = reverse ? col_offset : row_offset;
      IndexType interval = CEIL(super::nedge, parts);
      std::vector<IndexType> part_segs(parts+1, 0);
      std::vector<IndexType> scanner(super::nvertex+1, 0);
      std::vector<CSRView<IndexType>> ret(parts);
      IndexType *h_offset;
      // fetch offset
      if (super::on_dev == DeviceTag::CUDA)
      {
        h_offset = new IndexType[1+super::nvertex];
        H_ERR(TOHOST(offset, h_offset, super::nvertex+1));
      }
      else
      {
        h_offset = offset;
      }

      // collect degree
      #pragma omp parallel for num_threads(OMP_THD)
      for (IndexType i=1; i<=super::nvertex; ++i)
      {
        IndexType v = sorted_index == nullptr ? i-1 : sorted_index[i-1];
        scanner[i] = h_offset[v+1] - h_offset[v];
      }

      // scan
      for (IndexType i=2; i<=super::nvertex; ++i)
      {
        scanner[i] = scanner[i-1] + scanner[i];
      }

      // binary search for lower bound
      #pragma omp parallel for
      for (int i=1; i<parts; ++i)
      {
        IndexType key = i*interval;
        IndexType len = super::nvertex;
        IndexType s = 0;
        while(len>0){
          IndexType half = len>>1;
          IndexType mid = s + half;
          if(scanner[mid] < key){
            s = mid + 1;
            len = len-half-1;
          }else{
            len = half;
          }
        }
        part_segs[i] = s;
      }
      part_segs[parts] = super::nvertex;
      
      #pragma omp parallel for
      for (int i=0; i<parts; ++i)
      {
        ret[i].assign(sorted_index, h_offset, part_segs[i], part_segs[i+1]);
      }

      if (super::on_dev == DeviceTag::CUDA)
      {
        delete h_offset;
      }

      return ret;
    }

  };

  template <typename IndexType>
  class CSRView
  {
    std::vector<IndexType> indexMap;
    CSRGraph<IndexType> *_graph;
    IndexType *_cuda_ptr;
    bool _reverse;
  public:
    IndexType viewNnz{0};
    // ... other useful paramters
    float avgRowNnz{0};
    float viewDivergence{0};

    CSRView<IndexType>() = delete;

    CSRView<IndexType>(CSRGraph<IndexType> &graph, bool reverse,
                         const IndexType begin, 
                         const IndexType end): _graph(&graph), _cuda_ptr(nullptr),
                                               _reverse(reverse)
    {
      if (reverse) assign(graph.sorted_cidx, graph.col_offset, begin, end);
      else assign(graph.sorted_ridx, graph.row_offset, begin, end);
    }

    void assign(const IndexType *sorted_index, 
                const IndexType *row_offset, 
                const IndexType begin, 
                const IndexType end)
    {
      int rows = end - begin;
      vector<IndexType> degrees;
      viewNnz = 0;
      for (IndexType i=begin; i < end; i++)
      {
        IndexType row = (sorted_index == nullptr ? i : sorted_index[i]);
        IndexType row_nz = row_offset[row+1] - row_offset[row];
        indexMap.push_back(row);
        degrees.push_back(row_nz);
        viewNnz += row_nz;
      }

      // ... do other parameter computation
      avgRowNnz = (float)viewNnz / rows;
      viewDivergence = 0.;
      for (auto it = degrees.begin(); it < degrees.end(); it++)
      {
        viewDivergence += ((float)*it-avgRowNnz)*((float)*it-avgRowNnz);
      }
      viewDivergence /= rows;
    }

    IndexType* getCudaPtr()
    {
      if (_cuda_ptr == nullptr)
      {
        int len = indexMap.size();
        MEMALLOC(IndexType, _cuda_ptr, len, DeviceTag::CUDA);
        H_ERR(TODEV(_cuda_ptr, indexMap.data(), len));
      }
      return _cuda_ptr;
    }

    size_t rows() { return indexMap.size(); }

    IndexType& operator[] (const int index) { return indexMap[index]; }

    CSRInfo<IndexType> getInfo() {
      auto offset = _reverse ? _graph->col_offset : _graph->row_offset;
      auto index = _reverse ? _graph->row_idx : _graph->col_idx;
      return {(IndexType)rows(), 
              (IndexType)_graph->nedge, 
              offset,
              index,
              getCudaPtr()};
    }

    ~CSRView()
    {
      if (_cuda_ptr != nullptr)
      {
        cudaFree(_cuda_ptr);
      }
    }
  };

  template <typename IndexType>
  std::vector<CSRView<IndexType>> binning(CSRGraph<IndexType> &graph, 
                                          const std::vector<int> bin_sz,
                                          bool reverse = false,
                                          bool pre_allocate = false) {
    std::vector<CSRView<IndexType>> ret;
    int beg = 0;
    IndexType *sorted_idx = graph.sortVertex(reverse);
    IndexType *offset = reverse ? graph.col_offset : graph.row_offset;
    for ( auto bs : bin_sz ) {
      int end = beg;
      for ( ; end < graph.nvertex; end++) {
        IndexType ind = sorted_idx[end];
        IndexType nnz = offset[ind+1] - offset[ind];
        if ( nnz < bs ) break;
      }

      if ( end > beg ) {
        ret.emplace_back(graph, reverse, beg, end);
        beg = end;
      }
    }

    if ( pre_allocate ) {
      for ( auto& info : ret ) info.getCudaPtr();
    }

    return ret;
  }
}

#endif
