#include "GraphData.cuh"
#include "format/GraphInterface.hxx"
#include "format/CsrGraph.cuh"
#include "format/GcooGraph.cuh"
#include "format/EllGraph.cuh"
#include "format/HybTileGraph.cuh"

std::tuple<torch::Tensor, torch::Tensor>
GraphData::tensor_reorder(torch::Tensor srcTn, torch::Tensor dstTn, const int64_t nvertex)
{
  auto src_reord = torch::zeros(nvertex, dtype(torch::kInt32));
  auto dst_reord = torch::zeros(nvertex, dtype(torch::kInt32));

  reorder(srcTn.data_ptr<int>(), dstTn.data_ptr<int>(), src_reord.data_ptr<int>(), nvertex, srcTn.size(0), true);
  reorder(srcTn.data_ptr<int>(), dstTn.data_ptr<int>(), dst_reord.data_ptr<int>(), nvertex, srcTn.size(0), false);

  return std::make_tuple(src_reord, dst_reord);
}

bool GraphData::tensor_coo_check_directed(const torch::Tensor src_tn, const torch::Tensor dst_tn) {
  CHECK_INPUT(src_tn.dtype() == torch::kInt32);
  CHECK_INPUT(dst_tn.dtype() == torch::kInt32);
  CHECK_INPUT(src_tn.dim() == 1);
  CHECK_INPUT(dst_tn.dim() == 1);
  CHECK_INPUT(dst_tn.size(0) == src_tn.size(0));

  auto src_ptr = src_tn.data_ptr<int>();
  auto dst_ptr = src_tn.data_ptr<int>();

  return GraphData::check_directed(src_ptr, dst_ptr, dst_tn.size(0));
}

template<typename IndexType>
void toTensor(GraphData::CSRGraph<IndexType> &csr_g, std::vector<torch::Tensor>& tlist) {
  using super = GraphData::MetaGraph<IndexType>;
  ASSERT(csr_g.super::on_dev == GraphData::DeviceTag::CPU, "device copy is not available yet");
  ASSERT(tlist.size() == 6, "exact 6 input is required");
  csr_g.super::independent = false;

  torch::ScalarType indTp;
  if (sizeof(IndexType) == 4) {
    indTp = torch::kI32;
  } else if (sizeof(IndexType) == 8) {
    indTp = torch::kI64;
  } else {
    ASSERT(false, "unsupported index type");
  }

  auto indOpt = torch::TensorOptions().dtype(indTp);
  
  #define PACK(tn, ptr, size, opt) tn = torch::from_blob(ptr, {size}, opt)
  
  int64_t rowptr_size = csr_g.super::nvertex + 1;
  int64_t edge_size = csr_g.super::nedge;

  PACK(tlist[0], csr_g.row_offset, rowptr_size, indOpt);
  PACK(tlist[1], csr_g.col_idx, edge_size, indOpt);
  PACK(tlist[2], csr_g.edge_idx, edge_size, indOpt);
 
  if (csr_g.col_offset != nullptr) // with reverse
  {
    PACK(tlist[3], csr_g.col_offset, rowptr_size, indOpt);
    PACK(tlist[4], csr_g.row_idx, edge_size, indOpt);
    PACK(tlist[5], csr_g.eidx_rev, edge_size, indOpt);
  }
  #undef PACK
}

template<typename IndexType>
void toTensor(GraphData::GCOOGraph<IndexType> &gcoo_g, std::vector<torch::Tensor>& tlist) {
  using super = GraphData::MetaGraph<IndexType>;

  ASSERT(gcoo_g.super::on_dev == GraphData::DeviceTag::CPU, "device copy is not available yet");
  ASSERT(tlist.size() == 3, "expect, 3 tensor");
  gcoo_g.super::independent = false;

  torch::ScalarType indTp;
  if (sizeof(IndexType) == 4) {
    indTp = torch::kInt32;
  } else if (sizeof(IndexType) == 8) {
    indTp = torch::kInt64;
  } else {
    ASSERT(false, "unsupported index type");
  }

  auto indOpt = torch::TensorOptions().dtype(indTp);

#define PACK(tn, ptr, size, opt) tn = torch::from_blob(ptr, {size}, opt)

  int64_t grp_ptr_size = gcoo_g.ngrps+1;
  int64_t nedge = gcoo_g.super::nedge;

  PACK(tlist[1], gcoo_g.grp_offset, grp_ptr_size, indOpt);
  PACK(tlist[2], gcoo_g.edge_idx, nedge, indOpt);

#undef PACK

  // the soa edge list is packed manually
  tlist[0] = torch::zeros(gcoo_g.super::nedge*2, indOpt);
  auto edge_list_ptr = tlist[0].data_ptr<IndexType>();
  memcpy(edge_list_ptr, gcoo_g.dst_list, gcoo_g.super::nedge*sizeof(IndexType)); 
  memcpy(edge_list_ptr + gcoo_g.super::nedge, 
          gcoo_g.src_list, gcoo_g.super::nedge*sizeof(IndexType)); 
  free(gcoo_g.src_list); free(gcoo_g.dst_list);
  gcoo_g.dst_list = edge_list_ptr;
  gcoo_g.src_list = edge_list_ptr + gcoo_g.super::nedge;
}


template<typename IndexType>
void toTensor(GraphData::ELLGraph<IndexType> ell_g, std::vector<torch::Tensor>& tlist) {
  using meta = GraphData::MetaGraph<IndexType>;
  ASSERT(ell_g.meta::on_dev == GraphData::DeviceTag::CPU, "device copy is not ailable yet");
  ASSERT(tlist.size() == 4, "exact 4 input is required");
  ell_g.meta::independent = false;
  
  torch::ScalarType indTp;
  if ( sizeof(IndexType) == 4 ) {
    indTp = torch::kI32;
  } else if ( sizeof(IndexType) == 8 ) {
    indTp = torch::kI64;
  } else { 
      ASSERT(false, "unsupported index type");
  }

  auto indOpt = torch::TensorOptions().dtype(indTp);

  tlist[0] = torch::from_blob(ell_g.col_idx, {ell_g.meta::nvertex, ell_g.col_stride}, indTp);
  if ( ell_g.meta::weighted ) 
    tlist[1] = torch::from_blob(ell_g.col_eidx, {ell_g.meta::nvertex, ell_g.col_stride}, indTp);
  
  if ( ell_g.row_idx != nullptr ) {
    tlist[2] = torch::from_blob(ell_g.row_idx, {ell_g.meta::nvertex, ell_g.row_stride}, indTp);
    if ( ell_g.meta::weighted ) 
      tlist[3] = torch::from_blob(ell_g.row_eidx, {ell_g.meta::nvertex, ell_g.col_stride}, indTp);
  }
}


std::tuple<torch::Tensor, torch::Tensor, torch::Tensor, 
           torch::Tensor, torch::Tensor, torch::Tensor>
GraphData::tensor_coo_to_csr(
  torch::Tensor edge_list, 
  torch::optional<torch::Tensor> edge_idx_wrapper, 
  bool directed)
{
  CHECK_INPUT(edge_list.dtype() == torch::kI32);
  CHECK_INPUT(edge_list.size(0) == 2); // there's no reason for me to do transpose in cpp

  const int nedge = edge_list.size(1);
  torch::autograd::variable_list tn_list(6, torch::Tensor());
  torch::autograd::optional_variable_list opt_tn_list(2);

  int *dst_raw = edge_list.data_ptr<int>();
  int *src_raw = dst_raw + nedge;
  int *ew_raw = nullptr;
  if (edge_idx_wrapper.has_value()) {
    auto edge_idx = edge_idx_wrapper.value();
    CHECK_INPUT(edge_idx.dtype() == torch::kI32);
    CHECK_INPUT(edge_list.size(1) == edge_idx.size(0));
    ew_raw = edge_idx.data_ptr<int>();
  } 
  
  GraphData::CSRGraph<int> g(src_raw, dst_raw, ew_raw, nedge, directed);
  toTensor(g, tn_list);

  if ( tn_list[2].dtype() == torch::kF32 ) {
    opt_tn_list[0].emplace(tn_list[2]);
  }

  if ( tn_list[5].dtype() == torch::kF32 ) {
    opt_tn_list[1].emplace(tn_list[5]);
  }

  return {tn_list[0], tn_list[1], tn_list[2], tn_list[3], tn_list[4], tn_list[5]};
}


// std::tuple<torch::Tensor, torch::Tensor, torch::Tensor, 
//            torch::Tensor, torch::optional<torch::Tensor>, torch::Tensor,
//            torch::Tensor, torch::Tensor, torch::Tensor, 
//            torch::Tensor, torch::optional<torch::Tensor>, torch::Tensor, torch::Tensor,
//            torch::optional<torch::Tensor>, torch::optional<torch::Tensor>>
// GraphData::tensor_coo_to_chunk(torch::Tensor& edge_list, torch::optional<torch::Tensor> edge_idx_wrapper, const int64_t nvertex, bool reorder)
// {
//   CHECK_INPUT(edge_list.dtype() == torch::kInt32);
//   CHECK_INPUT(edge_list.size(0) == 2); // there's no reason for me to do transpose in cpp

//   const int nedge = edge_list.size(1);
  
//   torch::Tensor bosTn, boeTn, bosbTn, hteTn, htwTn, 
//                 bosbdTn, dmdTn,
//                 boelTn, lslTn, ldpsTn, lwlTn, bodlTn, adTn,
//                 src_reorder, dst_reorder;

//   auto dst = edge_list.data_ptr<int>();
//   auto src = dst + edge_list.stride(0);

//   if (edge_idx_wrapper.has_value())
//   {
//     auto edge_idx = edge_idx_wrapper.value();
//     CHECK_INPUT(edge_list.size(1) == edge_idx.size(0));

//     AT_DISPATCH_FLOATING_TYPES(
//     edge_idx.scalar_type(), "weighted tensor_coo_to_chunk calc", [&]{

//       auto ew  = edge_idx.data_ptr<scalar_t>();

//       chunk_t<scalar_t> chunked_graph;

//       chunked_graph.from_coo(src, dst, ew, (int) nvertex, nedge, reorder);
//       chunked_graph.to_tensor(bosTn, boeTn, bosbTn, hteTn, htwTn, 
//                               bosbdTn, dmdTn,
//                               boelTn, lslTn, ldpsTn, lwlTn, bodlTn,
//                               adTn, src_reorder, dst_reorder, indOpt);
//     });
//   } else {
//     torch::Tensor _dummy;
//     torch::TensorOptions _dummyOpt;
//     chunk_t<float> chunked_graph;
//     chunked_graph.from_coo(src, dst, nullptr, (int) nvertex, nedge, reorder);
//     chunked_graph.to_tensor(bosTn, boeTn, bosbTn, hteTn, _dummy, bosbdTn, dmdTn, boelTn, lslTn, ldpsTn, _dummy, bodlTn, adTn, src_reorder, dst_reorder, _dummyOpt);
//   }
  
//   return std::make_tuple(bosTn, boeTn, bosbTn, hteTn, htwTn, 
//                          bosbdTn, dmdTn, 
//                          boelTn, lslTn, ldpsTn, lwlTn, bodlTn, adTn, src_reorder, dst_reorder);
// }



std::tuple<torch::Tensor, torch::Tensor, torch::Tensor>
GraphData::tensor_coo_to_gcoo(torch::Tensor edge_list, torch::optional<torch::Tensor> edge_idx)
{
  CHECK_INPUT(edge_list.dtype() == torch::kInt32);
  CHECK_INPUT(edge_list.size(0) == 2);

  const int nedge = edge_list.size(1);

  int *dst_raw = edge_list.data_ptr<int>();
  int *src_raw = dst_raw + nedge;
  int *eidx_raw = nullptr;
  
  std::vector<torch::Tensor> tn_list(3, torch::Tensor());

  if (edge_idx.has_value())
  {
    eidx_raw = edge_idx.value().data_ptr<int>();
  } 

  GraphData::GCOOGraph<int> g(src_raw, dst_raw, eidx_raw, nedge);
  toTensor(g, tn_list);
  return {tn_list[0], tn_list[1], tn_list[2]};
}

