#include <fstream>
#include <inference_engine.hpp>
#include <iostream>
#include <opencv2/opencv.hpp>
#include <string>
#include <vector>
using namespace InferenceEngine;
using namespace std;
using namespace cv;

string inputName;
string outputName;
InferRequest inferReq;
//初试化模型相关参数
void initModel(string xml, string bin) {
  try {
    Core ie;
    CNNNetReader network_reader;
    network_reader.ReadNetwork(xml);
    network_reader.ReadWeights(bin);
    network_reader.getNetwork().setBatchSize(1);
    CNNNetwork network = network_reader.getNetwork();
    InputInfo::Ptr input_info = network.getInputsInfo().begin()->second;
    inputName = network.getInputsInfo().begin()->first;

    input_info->setLayout(Layout::NCHW);
    input_info->setPrecision(Precision::U8);
    // input_info->setPrecision(Precision::FP32);

    DataPtr output_info = network.getOutputsInfo().begin()->second;
    outputName = network.getOutputsInfo().begin()->first;

    output_info->setPrecision(Precision::FP32);

    ExecutableNetwork executable_network = ie.LoadNetwork(network, "CPU");

    inferReq = executable_network.CreateInferRequest();
  } catch (const std::exception& ex) {
    std::cerr << ex.what() << std::endl;
  }
}
// Mat 转Blob
void matU8ToBlob(const cv::Mat& orig_image, InferenceEngine::Blob::Ptr& blob,
                 int batchIndex = 0) {
  InferenceEngine::SizeVector blobSize = blob->getTensorDesc().getDims();
  const size_t width = blobSize[3];
  const size_t height = blobSize[2];
  const size_t channels = blobSize[1];
  cout << channels << " " << height << " " << width << endl;
  uint8_t* blob_data = blob->buffer().as<uint8_t*>();

  cv::Mat resized_image(orig_image);
  if (static_cast<int>(width) != orig_image.size().width ||
      static_cast<int>(height) != orig_image.size().height) {
    cv::resize(orig_image, resized_image, cv::Size(width, height));
  }

  int batchOffset = batchIndex * width * height * channels;

  for (size_t c = 0; c < channels; c++) {
    for (size_t h = 0; h < height; h++) {
      for (size_t w = 0; w < width; w++) {
        blob_data[batchOffset + c * width * height + h * width + w] =
            resized_image.at<cv::Vec3b>(h, w)[c];
      }
    }
  }
}

void matF32ToBlob(const cv::Mat& image, InferenceEngine::Blob::Ptr& blob,
                  int batchIndex = 0) {
  InferenceEngine::SizeVector blobSize = blob->getTensorDesc().getDims();
  const size_t width = blobSize[3];
  const size_t height = blobSize[2];
  const size_t channels = blobSize[1];
  cout << channels << " " << height << " " << width << endl;
//   uint8_t* blob_data = blob->buffer().as<uint8_t*>();
  _Float32* blob_data = blob->buffer().as<_Float32*>();
  int batchOffset = batchIndex * width * height * channels;

  for (size_t c = 0; c < channels; c++) {
    for (size_t h = 0; h < height; h++) {
      for (size_t w = 0; w < width; w++) {
        blob_data[batchOffset + c * width * height + h * width + w] =
            image.at<cv::Vec3b>(h, w)[c];
      }
    }
  }
}

//前向计算
float infer(cv::Mat rgb) {
  Blob::Ptr imgBlob = inferReq.GetBlob(inputName);
  matU8ToBlob(rgb, imgBlob);
//   matF32ToBlob(rgb, imgBlob);
  cout << "matF32ToBlob done !!!" << endl;
  inferReq.Infer();
  cout << "inferReq.Infer() done !!!" << endl; 
  Blob::Ptr output = inferReq.GetBlob(outputName);
  cout << "inferReq.GetBlob(outputName) done !!!" << endl; 
  float* logits = output->buffer()
                      .as<InferenceEngine::PrecisionTrait<
                          InferenceEngine::Precision::FP32>::value_type*>();

  return logits[0];
}

// 读取图片，归一化后转换成训练的格式
Mat getImg_norm(string imgPath) {
  Blob::Ptr blob = inferReq.GetBlob(inputName);
  InferenceEngine::SizeVector blobSize = blob->getTensorDesc().getDims();
  const size_t w = blobSize[3];
  const size_t h = blobSize[2];
  const size_t c = blobSize[1];
  Mat imgBGR = imread(imgPath);
  blur(imgBGR, imgBGR, Size(5, 5));
  resize(imgBGR, imgBGR, Size(w, h));
  Mat imgRGB;
  cvtColor(imgBGR, imgRGB, COLOR_BGR2RGB);
  imgRGB.convertTo(imgRGB, CV_32F);
  Mat normImg = (imgRGB / 255.0);
  vector<Mat> normChs;
  split(normImg, normChs);
  normChs[0] = (normChs[0] - 0.485) / 0.229;  // R
  normChs[1] = (normChs[1] - 0.456) / 0.224;  // G
  normChs[2] = (normChs[2] - 0.406) / 0.225;  // B
  merge(normChs, normImg);

  return normImg;
}

// 读取图片，并做均值滤波
Mat getImg(string imgPath) {
  Blob::Ptr blob = inferReq.GetBlob(inputName);
  InferenceEngine::SizeVector blobSize = blob->getTensorDesc().getDims();
  const size_t w = blobSize[3];
  const size_t h = blobSize[2];
  const size_t c = blobSize[1];
  Mat imgBGR = imread(imgPath);
  blur(imgBGR, imgBGR, Size(5, 5));
  resize(imgBGR, imgBGR, Size(w, h));
  Mat imgRGB;
  cvtColor(imgBGR, imgRGB, COLOR_BGR2RGB);

  return imgRGB;
}

//测试
int main(int argc, char const* argv[]) {
  string xml = "resnet50_cv_blur_5x5.xml";
  string bin = "resnet50_cv_blur_5x5.bin";
  string imgPath = "0_0.881091.jpg";
  initModel(xml, bin);

  Mat rgb = getImg(imgPath);
  cout << "getImg done !!!" << endl;

  float p;
  p = infer(rgb);
  cout << p << endl;
}
