#ifndef _GRAPH_DATA_HXX__
#define _GRAPH_DATA_HXX__

#include <tuple>
#include <vector>
#include <cassert>
#include <algorithm>

#include "macro.hxx"

#ifndef OMP_THD
#define OMP_THD 32
#endif

#define MEMALLOC(type, ptr, nele, dev) {\
if (dev == DeviceTag::CPU) (ptr) = (type*) malloc(sizeof(type)*(nele));\
else if (dev == DeviceTag::CUDA) H_ERR(cudaMalloc(&(ptr),sizeof(type)*(nele)));\
else fprintf(stderr, "WARNING: memory alloc on unknown device type, ignored!\n");}

#define MEMCLEAN(ptr, nele, dev) {\
if (dev == DeviceTag::CPU) memset((ptr), 0, sizeof(*(ptr))*(nele));\
else if (dev == DeviceTag::CUDA) H_ERR(cudaMemset((ptr), 0, sizeof(*(ptr))*(nele)));\
else fprintf(stderr, "WARNING: reseting memory on unknown device, ignored!\n");}

#define MEMSET(ptr, nele, val, dev) {\
if (dev == DeviceTag::CPU) memset((ptr), (val), sizeof(*(ptr))*(nele));\
else if (dev == DeviceTag::CUDA) H_ERR(cudaMemset((ptr), (val), sizeof(*(ptr))*(nele)));\
else fprintf(stderr, "WARNING: reseting memory on unknown device, ignored!\n");}

#define MEMFREE(ptr, dev) {\
if (dev == DeviceTag::CPU) free(ptr); \
else if (dev == DeviceTag::CUDA) H_ERR(cudaFree(ptr));\
else fprintf(stderr, "WARNING: Memory deallocation on unknown device, ignored!\n");}

#define MEMCOPY(dst_ptr, src_ptr, nele, dst_dev, src_dev) {\
if(src_dev == DeviceTag::CPU && dst_dev == DeviceTag::CPU) memcpy((dst_ptr),(src_ptr),sizeof(*(dst_ptr))*(nele));\
if(src_dev == DeviceTag::CPU && dst_dev == DeviceTag::CUDA) H_ERR(TODEV((dst_ptr),(src_ptr),(nele)));\
if(src_dev == DeviceTag::CUDA && dst_dev == DeviceTag::CPU) H_ERR(TOHOST((src_ptr),(dst_ptr),(nele)));\
if(src_dev == DeviceTag::CUDA && dst_dev == DeviceTag::CUDA) H_ERR(cudaMemcpy((dst_ptr),(src_ptr),sizeof(*(src_ptr))*nele,D2D));\
else fprintf(stderr, "WARNING: invalid memory copy, ignored!\n");}

namespace GraphData
{
  using namespace std;

  enum ExitCode {
    GRAPHDATA_SUCCESS, GRAPHDATA_INVALID_ARGUMENT
  };

  enum DeviceTag {
    CPU, CUDA
  };

  void reorder(int *src_list, int *dst_list, int *reorder_map, const int nvertex, const int nedge, const bool dim);
  bool check_directed(int *src_list, int *dst_list, size_t len);

  /**
   * @brief the super class of Graph data structure
   *    containing metadata and input cleaning MYLOGic
   * @note never use as a function return value
   */ 
  template <typename IndexType>
  struct MetaGraph {
    size_t nvertex;
    size_t nedge;
    DeviceTag on_dev;
    bool independent;

    MetaGraph<IndexType>(size_t nvertex, size_t nedge, DeviceTag on_dev = DeviceTag::CPU) : 
      nvertex(nvertex), nedge(nedge), copy_count(0), copy_src(nullptr) {}
    
    MetaGraph<IndexType>(MetaGraph<IndexType>& other): independent(false)
    {

      nvertex = other.nvertex;
      nedge = other.nedge;
      on_dev = other.on_dev;

      auto root = &other;
      while (root->copy_src != nullptr) {
        root = root->copy_src;
      }
      root->copy_count += 1;
      copy_src = root;
    }
    
    MetaGraph<IndexType>(IndexType *src, IndexType *dst, IndexType *edge_idx, size_t nedge): 
              nedge(nedge), on_dev(DeviceTag::CPU), copy_count(0), copy_src(nullptr)
    {
      IndexType min_node=INT_MAX, max_node=0;

      for (size_t i=0; i<nedge; ++i)
      {
        min_node = min({src[i], dst[i], min_node});
        max_node = max({src[i], dst[i], max_node});
      }

      for (size_t i=0; i<nedge; ++i)
      {
        src[i] -= min_node;
        dst[i] -= min_node;
      }

      nvertex = max_node - min_node + 1;
      independent = true;
    }

    ~MetaGraph()
    {
      assert(copy_count == 0 &&
        "this class should never act as a retval and die before it's copy source");
      if (copy_src != nullptr) {
        copy_src->copy_count -= 1;
        independent = false;
      }
    }

    virtual void fromCOO(IndexType*, IndexType*, IndexType*) = 0;
    virtual void cpu() = 0;
    virtual void cuda() = 0;
    // virtual void toTensor(vector<torch::Tensor>&) = 0;

    protected: 
      // kinda reference counter to prevent data corruption by copying
      short copy_count;
      MetaGraph<IndexType> *copy_src;
  };

  template<typename IndexType>
  static inline std::vector<IndexType>
  count_degree(IndexType *src, IndexType *dst, IndexType *ideg, IndexType *odeg, 
               size_t num_nodes, size_t num_edges) {
    std::vector<IndexType> label_map;
    std::vector<bool> exist_map(num_nodes, 0);
    // count i/o degree & count connected edges
    for(size_t i=0; i<num_edges; ++i) {
      exist_map[dst[i]] = true;
      exist_map[src[i]] = true;
      ++ideg[dst[i]];
      ++odeg[src[i]];
    }

    for ( size_t i=0; i<num_nodes; i++ ) {
      if (exist_map[i]) {
        label_map.push_back(i);
      }
    }

    return label_map;
  }
  
  template<typename IndexType>
  static inline std::tuple<IndexType, IndexType> 
  degree_range(IndexType *src, IndexType *dst, size_t num_nodes, size_t num_edges) {
    
    IndexType d_min=INT_MAX, d_max = 0;
    std::vector<IndexType> ideg(num_nodes, 0), odeg(num_nodes, 0);

    count_degree(src, dst, ideg.data(), odeg.data(), num_edges);
    for ( int i=0; i<num_nodes; ++i ) {
      d_min = std::min({d_min, ideg[i], odeg[i]});
      d_max = std::max({d_max, ideg[i], odeg[i]});
    }

    return std::make_tuple(d_min, d_max);
  }

  template<typename IndexType>
  static inline std::vector<IndexType> prune_empty(size_t nv_old, std::vector<IndexType> &label_map, 
                                            IndexType *ideg_in, IndexType *odeg_in,
                                            IndexType *ideg_out, IndexType *odeg_out) {
    std::vector<IndexType> r_label_map(nv_old, 0);

    for (size_t i=0; i<label_map.size(); ++i) {
      ideg_out[i] = ideg_in[label_map[i]];
      odeg_out[i] = odeg_in[label_map[i]];
      r_label_map[label_map[i]] = i;
    }

    return r_label_map;
  }
  

/*
  template <typename T>
  struct GCOOData
  {
    int *edge_list;
    T *weight_list;
    int *grp_offset;

    int ngrps;
    int nvertex;
    int nedge;

    GCOOData() : weight_list(nullptr), edge_list(nullptr), grp_offset(nullptr) {}

    GCOOData(int *edge_list, T *weight_list, int nedge)
    {
      int *dst_list = edge_list;
      int *src_list = edge_list + nedge;

      this->nedge = nedge;
      nvertex = 0;
      for (int i = 0; i < nedge; ++i)
      {
        nvertex = std::max(nvertex, src_list[i]);
        nvertex = std::max(nvertex, dst_list[i]);
      }
      nvertex+=1; // last_vertex_id + 1 = num_vertex
      ngrps = CEIL(nvertex, (BSZ / THD));

      MYLOG("Tag 0\n")

      struct edge_t
      {
        int gid;
        int src;
        int dst;
        T e;
      };
      vector<edge_t> edge_coll;
      vector<int> grp_ne(ngrps, 0);

      for (int i = 0; i < nedge; ++i)
      {
        int s = src_list[i];
        int d = dst_list[i];
        int gid = d / (BSZ / THD);
        T weight = (weight_list == nullptr) ? 1 : weight_list[i];

        edge_coll.push_back({gid, s, d, weight});
        ++grp_ne[gid];
      }
      MYLOG("Tag 1\n")

      sort(edge_coll.begin(), edge_coll.end(),
          [](const edge_t &a, const edge_t &b) -> bool
          {
            if (a.gid == b.gid)
              return a.src < b.src;
            else
              return a.gid < b.gid;
          });

      vector<int> grp_top(ngrps, 0);
      this->edge_list = (int *)malloc(nedge * 2 * sizeof(int));

      auto sl = this->edge_list;
      auto dl = this->edge_list + nedge;
      this->weight_list = (weight_list == nullptr) ? nullptr : (T *)malloc(nedge * sizeof(T));
      grp_offset = (int *)malloc(sizeof(int) * (ngrps + 1));

      for (int i = 1; i < ngrps; ++i)
      {
        MYLOG(" -- grp %d = %d\n", i - 1, grp_ne[i - 1])
        grp_top[i] = grp_top[i - 1] + grp_ne[i - 1];
        grp_offset[i] = grp_top[i];
      }
      MYLOG(" -- grp %d = %d\n", ngrps - 1, grp_ne[ngrps - 1])
      grp_offset[ngrps] = nedge;
      grp_offset[0] = 0;

      MYLOG("Tag 2\n")

      for (auto i = edge_coll.begin(); i < edge_coll.end(); ++i)
      {
        MYLOG(" -- puting %d->%d to grp %d @[%d]\n", i->src, i->dst, i->gid, grp_top[i->gid])
        sl[grp_top[i->gid]] = i->src;
        dl[grp_top[i->gid]] = i->dst;
        if (weight_list != nullptr)
          this->weight_list[grp_top[i->gid]] = i->e;
        ++grp_top[i->gid];
      }

      MYLOG("Tag 3\n")
    }

    void freeData()
    {
      free(edge_list);
      if (weight_list != nullptr)
        free(weight_list);
    }

    void to_tensor(Tensor &elTn, Tensor &wlTn, Tensor &goTn, const TensorOptions &wopt)
    {
      auto iopt = TensorOptions().dtype(torch::kInt32);

#define PACK(tn, ptr, size, ty, opt)                   \
  tn = torch::zeros((size), opt);                      \
  memcpy(tn.data_ptr<ty>(), ptr, (size) * sizeof(ty)); \
  free(ptr)

      PACK(elTn, edge_list, 2 * nedge, int, iopt);
      PACK(goTn, grp_offset, ngrps + 1, int, iopt);

      if (weight_list != nullptr)
      {
        PACK(wlTn, weight_list, nedge, T, wopt);
      }
#undef PACK

      elTn = elTn.reshape({2, nedge});
    }

    TopoGCOO to_topoMYLOGy()
    {
      return TopoGCOO(edge_list, grp_offset, ngrps, nvertex, nedge);
    }
  };
*/
};

#endif