
#include "pyeager.h"


namespace py = pybind11;

namespace pytxdnn {

// PyEager
std::shared_ptr<txdnn_frontend::graph::TensorAttributes>
PyEager::tensor(const std::vector<uint32_t>& dim,
                const std::vector<uint32_t>& stride,
                const txdnnDataType_t data_type,
                const bool& is_virtual,
                const std::string& name) {
  auto props = txdnn_frontend::graph::TensorAttributes()
                    .setDataType(data_type)
                    .setDim(dim)
                    .setStride(stride)
                    .setIsVirtual(is_virtual)
                    .setName(name);
  return eager_.tensor(props);
}

std::shared_ptr<txdnn_frontend::graph::TensorAttributes>
PyEager::tensor_like(const std::shared_ptr<txdnn_frontend::graph::TensorAttributes>& tensor, const std::string& name) {
  return eager_.tensor_like(tensor, name);
}

std::shared_ptr<txdnn_frontend::graph::TensorAttributes>
PyEager::matmul(std::shared_ptr<txdnn_frontend::graph::TensorAttributes>& A,
                std::shared_ptr<txdnn_frontend::graph::TensorAttributes>& B,
                const txdnnDataType_t& data_type,
                const double padding_value,
                const std::string& name) {
  auto attributes = txdnn_frontend::graph::TensorAttributes()
                        .setDataType(data_type)
                        .setName(name)
                        .setPaddingValue(padding_value);
  return eager_.matmul(A, B, attributes);
}

void PyEager::validate() {
  auto status = eager_.validate();
}

void PyEager::build_operation_graph() {
  auto status = eager_.build_operation_graph();
}

void PyEager::create_execution_plans() {
  auto status = eager_.create_execution_plans();
}

void PyEager::check_support() {
  auto status = eager_.check_support();
}

void PyEager::build_plans() {
  auto status = eager_.build_plans();
}

void PyEager::execute(std::unordered_map<int64_t, std::intptr_t> var_pack,
                std::intptr_t workspace,
                std::optional<std::intptr_t> handle) {
  std::unordered_map<int64_t, void*> var_pack_tmp;
  var_pack_tmp.reserve(var_pack.size());
  for (auto& [key, value] : var_pack) {
    var_pack_tmp[key] = reinterpret_cast<void*>(value);
  }

  eager_.execute(var_pack_tmp, workspace, handle);

  return;
}




bindTxdnnEager(py::module &eager) {




} // 


} // namespace pytxdnn