#include <iostream>

#include "agrad.hxx"
#include "format/GraphInterface.hxx"
#include "config.hxx"

// static torch::Tensor broadcast(torch::Tensor& src, torch::Tensor& other, int64_t dim)
// {
//   if (src.dim() == 1)
//     for (auto i = 0; i < dim; i++)
//       src = src.unsqueeze(0);
//   for (auto i = src.dim(); i < other.dim(); i++)
//     src = src.unsqueeze(-1);
//   src = src.expand(other.sizes().vec());
//   return src;
// }

// using torch::autograd::AutogradContext;
// using torch::autograd::Variable;
// using torch::autograd::variable_list;

// using optVariable = torch::optional<torch::autograd::Variable>;

// int64_t index2offset(std::vector<int64_t> index, std::vector<int64_t> stride)
// {
//   AT_ASSERTM(index.size() == stride.size(), "Dimension mismatch for inputs");
//   int64_t dims = index.size();
//   int64_t offset = 0;
//   for (int i = 0; i < dims; i++)
//   {
//     offset += index[i] * stride[i];
//   }
//   return offset;
// }

// Variable custom_linear(Variable& src, Variable& weight, optVariable bias)
// {
//   // weight = weight.contiguous();
//   auto weight_dim = weight.dim();

//   int64_t w = weight.size(weight_dim - 1);
//   int64_t h = weight.size(weight_dim - 2);

//   AT_ASSERTM(w == src.size(0), "size do not match!");

//   auto out = torch::zeros(h);

//   AT_DISPATCH_ALL_TYPES(
//     src.scalar_type(), "custom linear", [&] {
//       auto weight_ptr = weight.data_ptr<scalar_t>();
//       auto src_ptr = src.data_ptr<scalar_t>();
//       auto out_ptr = out.data_ptr<scalar_t>();

//       for (int i = 0; i < h; i++)
//       {
//         scalar_t acc = 0;
//         for (int j = 0; j < w; j++)
//           acc += src_ptr[j] * weight_ptr[index2offset({i, j}, weight.strides().vec())];
//         out[i] = acc;
//       }

//       if (bias.has_value())
//       {
//         auto bias_ptr = bias.value().data_ptr<scalar_t>();
//         for (int i = 0; i < h; i++)
//           out_ptr[i] += bias_ptr[i];
//       }
//     });

//   return out;
// }

// class GCNFunc : public torch::autograd::Function<GCNFunc>
// {
// public:
//   static Variable forward(AutogradContext *ctx, Variable input, 
//                                Variable row_offset, Variable adj_list, optVariable edge_weight,
//                                Variable r_row_offset, Variable r_adj_list, optVariable r_edge_weight)
//   {
//     ctx->save_for_backward({row_offset, adj_list, edge_weight.value(), 
//                             r_row_offset, r_adj_list, r_edge_weight.value()
//                             });
//     auto ret = customgcn::propagate(input, row_offset, adj_list, edge_weight);
//     return ret;
//   }
//   static variable_list backward(AutogradContext *ctx, variable_list grad_out)
//   {
//     auto grad_output = grad_out[0];
//     auto saved_graph = ctx->get_saved_variables();
//     auto ro = saved_graph[0];
//     auto al = saved_graph[1];
//     auto ew = saved_graph[2];
//     auto r_ro = saved_graph[3];
//     auto r_al = saved_graph[4];
//     auto r_ew = saved_graph[5];
//     return {CustomGCN(grad_output, r_ro, r_al, r_ew, ro, al, ew), Variable(), Variable(), Variable()/*, Variable(), Variable(), Variable()*/};
//   }

// };

// /**
//  * first trial - to learn the interface & functionality
//  */
// class CustomLinearFunc : public torch::autograd::Function<CustomLinearFunc>
// {
// public:
//   static variable_list forward(AutogradContext *ctx,
//                  Variable src,
//                  Variable weight,
//                  optVariable bias)
//   {
//     AT_ASSERTM(src.dim() == (weight.dim() - 1));
//     ctx->saved_data["has_bias"] = bias.has_value();

//     // auto result = custom_linear(src, weight, bias);
//     auto result = custom_linear(src, weight, bias);
//     // if (bias.has_value()) result += bias.value();
//     ctx->save_for_backward({src, weight});
//     return {result};
//   }

//   static variable_list backward(AutogradContext *ctx, variable_list grad_out)
//   {
//     AT_ASSERTM(grad_out.size() == 1);
//     auto grad = grad_out[0];
//     auto saved_inputs = ctx->get_saved_variables();
//     auto in_src = saved_inputs[0];
//     auto in_weight = saved_inputs[1];

//     // assume the input is 2-d now
//     // auto d_src = in_weight.t();
//     auto d_weight = broadcast(in_src, in_weight, 1);
//     auto d_bias = Variable();
//     if (ctx->saved_data["has_bias"].toBool())
//       d_bias = grad;
//     return {CustomLinearFunc::apply(grad, in_weight.t(), optVariable())[0], (d_weight.t() * grad).t(), d_bias};
//   }
// };

// class DualLinearFunc : public torch::autograd::Function<DualLinearFunc>
// {
// public:
//   static variable_list forward(AutogradContext *ctx, Variable input, Variable weight1, Variable weight2, optVariable bias1, optVariable bias2)
//   {
//     ctx->saved_data["has_bias1"] = bias1.has_value();
//     ctx->saved_data["has_bias2"] = bias2.has_value();

//     auto res1 = custom_linear(input, weight1, bias1);
//     auto res2 = custom_linear(res1, weight2, bias2);

//     ctx->save_for_backward({input, weight1, weight2, res1});

//     return {res2, res1};
//   }

//   static variable_list backward(AutogradContext *ctx, variable_list grad_out_list)
//   {
//     auto grad_out = grad_out_list[0];
//     auto grad_im = grad_out_list[1];

//     auto saved = ctx->get_saved_variables();
//     auto in_src = saved[0];
//     auto in_w1 = saved[1];
//     auto in_w2 = saved[2];
//     auto in_r1 = saved[3];

//     // grad for input
//     auto in_w2_trans = in_w2.t();
//     auto in_w1_trans = in_w1.t();
//     // grad_im += custom_linear(grad_out, in_w2_trans, {});
//     // auto grad_src = custom_linear(grad_im, in_w1_trans, {});
//     auto back_grads = DualLinearFunc::apply(grad_out, in_w2_trans, in_w1_trans, optVariable(), optVariable());
//     auto grad_src = back_grads[0];
//     grad_im += back_grads[1];
//     // grad for layers
//     auto grad_w2 = grad_out.unsqueeze(-1).mm(in_r1.unsqueeze(0));//(broadcast(in_r1, in_w2, 1).t() * grad_out).t();
//     auto grad_w1 = (broadcast(in_src, in_w1, 1).t() * grad_im).t();

//     // grad for biases
//     auto grad_b1 = Variable();
//     auto grad_b2 = Variable();
//     if (ctx->saved_data["has_bias1"].toBool())
//     {
//       grad_b1 = grad_im;
//     }
//     if (ctx->saved_data["has_bias2"].toBool())
//     {
//       grad_b2 = grad_out;
//     }
//     return {grad_src, grad_w1, grad_w2, grad_b1, grad_b2};
//   }
// };

// class FusedLinearGCN : public torch::autograd::Function<FusedLinearGCN>
// {
// public:
//   static variable_list forward(AutogradContext *ctx,
//                           Variable row_offset, Variable adj_list, optVariable edge_weight,
//                           Variable r_row_offset, Variable r_adj_list, optVariable r_edge_weight,
//                           Variable input, Variable weight, optVariable bias)
//   {
//     ctx->saved_data["has_bias"] = bias.has_value();
//     ctx->save_for_backward({row_offset, adj_list, edge_weight.value(), r_row_offset, r_adj_list, r_edge_weight.value(), input, weight});
//     auto ret = customgcn::fused_linear_propagate(input, weight, bias, row_offset, adj_list, edge_weight);
//     return {std::get<0>(ret), std::get<1>(ret)};
//   }

//   static variable_list backward(AutogradContext *ctx, variable_list grads)
//   {
//     // grads
//     auto grad_output = grads[0];
//     auto grad_inter = grads[1];
    
//     // saved vars
//     auto saved = ctx->get_saved_variables();
//     auto ro = saved[0];
//     auto aj = saved[1];
//     auto ew = saved[2];
//     auto r_ro = saved[3];
//     auto r_aj = saved[4];
//     auto r_ew = saved[5];
//     auto input = saved[6];
//     auto weight = saved[7];
//     auto tw = weight.t();
    
//     auto ret = customgcn::fused_linear_propagate_reversed(grad_output, tw, r_ro, r_aj, r_ew);
//     auto grad_input = std::get<0>(ret);
//     grad_inter += std::get<1>(ret);

//     auto grad_weight = grad_inter.t().mm(input);
//     auto grad_bias = Variable();
//     if (ctx->saved_data["has_bias"].toBool())
//     {
//       grad_bias = grad_output;
//     }
//     return {Variable(), Variable(), Variable(), Variable(), Variable(), Variable(),
//             grad_input, grad_weight, grad_bias};
//   }
// };

// torch::Tensor CustomLinear(torch::Tensor input, torch::Tensor weight, torch::optional<torch::Tensor> bias)
// {
//   return CustomLinearFunc::apply(input, weight, bias)[0];
// }

// torch::Tensor CustomDualLinear(torch::Tensor input, torch::Tensor weight1, torch::Tensor weight2, torch::optional<torch::Tensor> bias1, torch::optional<torch::Tensor> bias2)
// {
//   return DualLinearFunc::apply(input, weight1, weight2, bias1, bias2)[0];
// }

// torch::Tensor CustomGCN(torch::Tensor input, 
//                         torch::Tensor row_offset, torch::Tensor adj_list, torch::optional<torch::Tensor> edge_weight,
//                         torch::Tensor r_row_offset, torch::Tensor r_adj_list, torch::optional<torch::Tensor> r_edge_weight)
// {
//   return GCNFunc::apply(input, row_offset, adj_list, edge_weight, r_row_offset, r_adj_list, r_edge_weight);
// }

// torch::Tensor CustomLGCN(torch::Tensor ro, torch::Tensor aj, torch::optional<torch::Tensor> ew,
//                              torch::Tensor r_ro, torch::Tensor r_aj, torch::optional<torch::Tensor> r_ew, 
//                              torch::Tensor input, torch::Tensor weight, torch::optional<torch::Tensor> bias)
// {
//   return FusedLinearGCN::apply(ro, aj, ew, r_ro, r_aj, r_ew, input, weight, bias)[0];
// }

void set_tune_param(int64_t thd, torch::optional<int64_t> bsz, torch::optional<int64_t> h_thresh, torch::optional<int64_t> d_thresh)
{
    _thd = thd;
    if (bsz.has_value()) _bsz = bsz.value();
    if (h_thresh.has_value()) _h_thresh = h_thresh.value();
    if (d_thresh.has_value()) _d_thresh = d_thresh.value();
}

static auto registry = torch::RegisterOperators()
            // .op("custom::custom_linear", &CustomLinear)
            // .op("custom::custom_dual_linear", &CustomDualLinear)
            // .op("custom::custom_gcn", &CustomGCN)
            // .op("custom::fuse_linear_gcn", &CustomLGCN)
            // tuning params
            .op("tuner::set_tune_param", &set_tune_param)
            .op("tuner::init_stream", &init_stream)
            // interface function...
            .op("helpers::gcn_norm_csr", &gcnhelper::gcn_norm_csr)
            .op("helpers::gcn_norm_coo", &gcnhelper::gcn_norm_coo)
            // propagate functions
            .op("custom::spmm_like", &customgcn::propagate)
            // .op("custom::spmm_like_bw", &customgcn::propagate_backward)
            // .op("custom::fused_linear_propagate", &customgcn::fused_linear_propagate)
            // .op("custom::fused_linear_propagate_reversed", &customgcn::fused_linear_propagate_reversed)
            // .op("custom::spmm_like_chunked", &customgcn::propagate_chunked)
            // .op("custom::spmm_like_gcoo", &customgcn::propagate_gcoo)
            // apply edge functions
            .op("custom::sddmm_like", &customgat::apply_edge)
            // graph data
            .op("graphdata::reorder", &GraphData::tensor_reorder)
            .op("graphdata::check_directed", &GraphData::tensor_coo_check_directed)
            .op("graphdata::coo_to_csr", &GraphData::tensor_coo_to_csr)
            // .op("graphdata::coo_to_csr_w", &GraphData::tensor_coo_to_csr_w)
            // .op("graphdata::coo_to_chunk", &GraphData::tensor_coo_to_chunk)
            // .op("graphdata::coo_to_chunk_w", &GraphData::tensor_coo_to_chunk_w)
            .op("graphdata::coo_to_gcoo", &GraphData::tensor_coo_to_gcoo);