#include "depth_model.h"
#include <fstream>
#include <vector>

namespace mono_depth {
// predefined model training parameters
namespace {
const int input_h = 518;
const int input_w = 518;
const int num_channels = 3;
const float mean[num_channels] = {123.675, 116.28, 103.53};
const float std[num_channels] = {58.395, 57.12, 57.375};
}  // namespace

void DepthAnythingModel::TrtLogger::log(Severity severity, const char* msg) noexcept {
  if (severity <= Severity::kWARNING)
    std::cout << msg << std::endl;
}

DepthAnythingModel::DepthAnythingModel(const std::string& trt_engine_path) {
  std::ifstream engine_stream(trt_engine_path, std::ios::binary);

  // get engine file size
  engine_stream.seekg(0, std::ios::end);
  const size_t engine_size = engine_stream.tellg();
  engine_stream.seekg(0, std::ios::beg);

  // read the engine data
  std::vector<char> engine_data(engine_size);
  engine_stream.read(engine_data.data(), engine_size);
  engine_stream.close();

  // create tensorrt engine
  runtime_.reset(nvinfer1::createInferRuntime(logger_));
  engine_.reset(runtime_->deserializeCudaEngine(engine_data.data(), engine_size));
  show_engine_bindings();
  context_.reset(engine_->createExecutionContext());
  context_->setInputShape("input", nvinfer1::Dims4(1, 3, input_h, input_w));

  // create buffers for input and output data
  cudaStreamCreate(&stream_);
  cudaMalloc(&binding_buffers_[0], 3 * input_h * input_w * sizeof(float));
  cudaMalloc(&binding_buffers_[1], input_h * input_w * sizeof(int));

  // setup bindings
  context_->setTensorAddress("input", binding_buffers_[0]);
  context_->setTensorAddress("output", binding_buffers_[1]);

  // preallocate intermediate buffers
  scaled_input_ = cv::Mat(input_h, input_w, CV_8UC3);
  channels_.resize(num_channels);
  for (auto& ch : channels_) {
    ch = cv::Mat(input_h, input_w, CV_8U);
  }

  input_tensor_.resize(num_channels * input_h * input_w);
  channels_f_.resize(num_channels);
  for (int i = 0; i < num_channels; ++i) {
    // create views to the input_tensor_
    channels_f_[i] = cv::Mat(input_h, input_w, CV_32F, input_tensor_.data() + (i * (input_h * input_w)));
  }

  depth_data_.resize(input_h * input_w);
  output_image_ = cv::Mat(input_h, input_w, CV_32F);
}

DepthAnythingModel::~DepthAnythingModel() {
  cudaFree(stream_);
  cudaFree(binding_buffers_[0]);
  cudaFree(binding_buffers_[1]);
}

void DepthAnythingModel::show_engine_bindings() const {
  std::cout << "Depth-Anything trt engine bindings:\n";
  for (int i = 0; i < engine_->getNbIOTensors(); i++) {
    auto name = engine_->getIOTensorName(i);
    std::cout << "node: " << name << ", ";
    if (engine_->getTensorIOMode(name) == nvinfer1::TensorIOMode::kINPUT) {
      std::cout << "type: input"
                << ", ";
    } else if (engine_->getTensorIOMode(name) == nvinfer1::TensorIOMode::kOUTPUT) {
      std::cout << "type: output"
                << ", ";
    }
    nvinfer1::Dims dim = engine_->getTensorShape(name);
    std::cout << "dimensions: ";
    for (int d = 0; d < dim.nbDims; d++) {
      std::cout << dim.d[d] << " ";
    }
    std::cout << "\n";
  }
}

void DepthAnythingModel::fill_input_tensor(const cv::Mat& input) {
  // we assume the the input is in the RGB format
  cv::resize(input, scaled_input_, scaled_input_.size(), 0, 0, cv::INTER_LINEAR);
  cv::split(scaled_input_, channels_);

  for (int i = 0; i < num_channels; ++i) {
    channels_[i].convertTo(channels_f_[i], CV_32F);
    channels_f_[i] = (channels_f_[i] - mean[i]) / std[i];
  }
}

cv::Mat DepthAnythingModel::process(const cv::Mat input) {
  // preprocess image data for the use in the model
  fill_input_tensor(input);

  cudaMemcpyAsync(
      binding_buffers_[0],
      input_tensor_.data(),
      3 * input_h * input_w * sizeof(float),
      cudaMemcpyHostToDevice,
      stream_
  );

  // inference depth model
  context_->enqueueV3(stream_);
  cudaStreamSynchronize(stream_);

  // get the output
  cudaMemcpyAsync(depth_data_.data(), binding_buffers_[1], input_h * input_w * sizeof(int), cudaMemcpyDeviceToHost);

  // convert into the cv::Mat image
  cv::Mat int_depth_img(input_h, input_w, CV_32S, depth_data_.data());

  int_depth_img.convertTo(output_image_, CV_32F);
  cv::normalize(output_image_, output_image_, 0, 255, cv::NORM_MINMAX, CV_8U);

  // create a colormap from the depth data
  cv::applyColorMap(output_image_, colormap_, cv::COLORMAP_RAINBOW);

  return colormap_;
}
}  // namespace mono_depth
