/*
 * Copyright(C) 2022. Huawei Technologies Co.,Ltd. All rights reserved.
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 * http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#include "fstream"
#include "chrono"
#include "time.h"
#include "imageProcess.h"
#include "model.h"
#include "opencv2/opencv.hpp"
#include "boost/filesystem.hpp"
#include "string.h"

using namespace MxBase;
namespace fs = boost::filesystem;

namespace
{
  // Yolo's input size
  const Size YOLO_SIZE(416, 416);
  // Yolo's postprocess crop size
  const Size YOLO_POST_SIZE(224, 224);
  // Expand border when croping
  const double_t LEFT_EXPAND_RATIO = 0.25;
  const double_t RIGHT_EXPAND_RATIO = 0.25;
  const double_t UP_EXPAND_RATIO = 0.35;
  const double_t DOWN_EXPAND_RATIO = 0.1;
  const int COLOR_255 = 255;
  const int ARGS_NUM = 2;
  const int TEST_IMAGE_COUNT = 13;
}

// detect face and crop it,then move to whenet,infer,finally get whenetDraw information
APP_ERROR DetectAndEstimate(const std::string imgPath,
                            const std::shared_ptr<ImageProcess> &imageProcess,
                            const std::shared_ptr<Yolo> &yolo,
                            const std::shared_ptr<WheNet> &wheNet,
                            double_t &inferCostTime)
{
  // read image for opencv paint
  cv::Mat originalMat = cv::imread(imgPath);
  if (!originalMat.data)
  {
    LogError << "Can not read this image !";
    return APP_ERR_INVALID_FILE;
  }

  // get image width and height
  int width = originalMat.cols;
  int height = originalMat.rows;
  std::cout << "原始图片的宽高:[" << width << ", " << height << "]" << std::endl;

  // read image for infer
  Image originalImg;
  imageProcess->ReadImage(imgPath, originalImg);

  // resize for yolo's input
  Image yoloInput;
  imageProcess->Resize(originalImg, yoloInput, YOLO_SIZE);

  // validate image width and height
  Size imageSize;
  imageSize = yoloInput.GetSize();
  std::cout << "yolo模型预处理之后图片的宽度:" << imageSize.width << " 高度:" << imageSize.height << std::endl;

  // do yolo infer
  std::vector<Tensor> yoloOutput;
  std::chrono::high_resolution_clock::time_point yoloStart = std::chrono::high_resolution_clock::now();
  yolo->Infer(yoloInput, yoloOutput);
  std::chrono::high_resolution_clock::time_point yoloEnd = std::chrono::high_resolution_clock::now();
  inferCostTime = std::chrono::duration_cast<std::chrono::milliseconds>(yoloEnd - yoloStart).count();

  // get face crop config
  std::vector<Rect> cropConfigVec;
  yolo->PostProcess(yoloOutput, cropConfigVec, width, height);

  if (cropConfigVec.size() == 0)
  {
    LogInfo << "No face detected in input image!";
  }
  else
  {
    // for every image to be croped
    for (size_t i = 0; i < cropConfigVec.size(); i++)
    {
      auto cropConfig = cropConfigVec[i];

      Image wheNetOriginalImg;
      imageProcess->ReadImage(imgPath, wheNetOriginalImg);

      // crop and resize image
      Image wheNetCropedImage;
      imageProcess->Crop(wheNetOriginalImg, wheNetCropedImage, cropConfig, LEFT_EXPAND_RATIO, RIGHT_EXPAND_RATIO, UP_EXPAND_RATIO, DOWN_EXPAND_RATIO);
      std::cout << "whenet(pre)-->crop后的图片大小:[" << wheNetCropedImage.GetSize().width << ", " << wheNetCropedImage.GetSize().height << "]" << std::endl;
      Image wheNetResizedImage;
      imageProcess->Resize(wheNetCropedImage, wheNetResizedImage, YOLO_POST_SIZE);
      std::cout << "whenet(pre)-->resize后的图片大小:[" << wheNetResizedImage.GetSize().width << ", " << wheNetResizedImage.GetSize().height << "]" << std::endl;

      std::vector<Tensor> wheNetOutput;

      // do wheNet model infer
      std::chrono::high_resolution_clock::time_point wheNetStart = std::chrono::high_resolution_clock::now();
      wheNet->Infer(wheNetResizedImage, wheNetOutput);
      std::chrono::high_resolution_clock::time_point wheNetEnd = std::chrono::high_resolution_clock::now();
      inferCostTime += std::chrono::duration_cast<std::chrono::milliseconds>(wheNetEnd - wheNetStart).count();

      // wheNet postProcess
      std::map<std::string, double> drawMap;
      wheNet->PostProcess(wheNetOutput, cropConfig, drawMap);

      float tdx = (cropConfig.x0 + cropConfig.x1) / 2;
      float tdy = (cropConfig.y0 + cropConfig.y1) / 2;
      cv::Point p0((int)tdx, (int)tdy),
          p1((int)drawMap["yaw_x"], (int)drawMap["yaw_y"]),
          p2((int)drawMap["pitch_x"], (int)drawMap["pitch_y"]),
          p3((int)drawMap["roll_x"], (int)drawMap["roll_y"]);

      // opencv paint
      cv::Scalar red(COLOR_255, 0, 0);
      cv::Scalar green(0, COLOR_255, 0);
      cv::Scalar blue(0, 0, COLOR_255);
      int thicknessLine = 2;

      cv::Rect rect((int)(cropConfig.x0 - (cropConfig.x1 - cropConfig.x0) * LEFT_EXPAND_RATIO),
                    (int)(cropConfig.y0 - (cropConfig.y1 - cropConfig.y0) * UP_EXPAND_RATIO),
                    (int)((cropConfig.x1 - cropConfig.x0) * (1 + LEFT_EXPAND_RATIO + RIGHT_EXPAND_RATIO)),
                    (int)((cropConfig.y1 - cropConfig.y0) * (1 + UP_EXPAND_RATIO + DOWN_EXPAND_RATIO)));
      cv::rectangle(originalMat, rect, blue);

      cv::line(originalMat, p0, p1, red, thicknessLine);
      cv::line(originalMat, p0, p2, green, thicknessLine);
      cv::line(originalMat, p0, p3, blue, thicknessLine);
    }
    cv::imwrite("./test-output.jpg", originalMat);
  }
  return APP_ERR_OK;
}

void paramInit(V2Param &yoloParam, V2Param &wheNetParam, const uint32_t deviceId)
{
  yoloParam.configPath = "./model/yolov4.cfg";
  yoloParam.labelPath = "./model/coco.names";
  yoloParam.modelPath = "./model/yolov4_detection.om";
  yoloParam.deviceId = deviceId;

  wheNetParam.configPath = "";
  wheNetParam.labelPath = "";
  wheNetParam.modelPath = "./model/WHENet_b2_a1_modified.om";
  wheNetParam.deviceId = deviceId;
}

int main(int argc, char *argv[])
{
  if (argc != ARGS_NUM)
  {
    LogError << "请检查参数的完整性！默认格式为：程序名 运行模式";
    return 0;
  }

  if (strcmp(argv[1], "run") == 0)
  {
    std::string imgPath = "test1.jpg";
    const uint32_t deviceId = 0;

    V2Param yoloParam, wheNetParam;
    paramInit(yoloParam, wheNetParam, deviceId);

    // global init
    APP_ERROR ret;
    ret = MxInit();
    if (ret != APP_ERR_OK)
    {
      LogError << "MxInit failed, ret=" << ret << ".";
      return ret;
    }

    // model init
    auto imageProcess = std::make_shared<ImageProcess>(deviceId);

    // include resize,postprocess,infer,etc.
    double_t e2eTime = 0;
    // only include model infer time
    double_t inferTime = 0;

    auto yolo = std::make_shared<Yolo>(yoloParam);
    auto wheNet = std::make_shared<WheNet>(wheNetParam);

    std::map<std::string, double> drawMap;

    std::chrono::high_resolution_clock::time_point start = std::chrono::high_resolution_clock::now();
    ret = DetectAndEstimate(imgPath, imageProcess, yolo, wheNet, inferTime);
    std::chrono::high_resolution_clock::time_point end = std::chrono::high_resolution_clock::now();
    e2eTime = std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count();

    if (ret != APP_ERR_OK)
    {
      LogError << "Recognize failed.Please read infos above.";
      return APP_ERR_OK;
    }

    LogInfo << "the picture's E2E total time is " << e2eTime << "ms";
    LogInfo << "the picture's model infer total time is " << inferTime << "ms";
  }
  else if (strcmp(argv[1], "test") == 0)
  {
    // global init
    APP_ERROR ret;
    ret = MxInit();
    if (ret != APP_ERR_OK)
    {
      LogError << "MxInit failed, ret=" << ret << ".";
      return ret;
    }

    // model init
    const uint32_t deviceId = 0;
    auto imageProcess = std::make_shared<ImageProcess>(deviceId);
    V2Param yoloParam, wheNetParam;
    paramInit(yoloParam, wheNetParam, deviceId);

    int i = 0;
    while (i < TEST_IMAGE_COUNT)
    {
      i++;
      std::string imgPath = "test" + std::to_string(i) + ".jpg";
      
      // include resize,postprocess,infer,etc.
      double_t e2eTime = 0;
      // only include model infer time
      double_t inferTime = 0;

      auto yolo = std::make_shared<Yolo>(yoloParam);
      auto wheNet = std::make_shared<WheNet>(wheNetParam);

      std::map<std::string, double> drawMap;

      std::chrono::high_resolution_clock::time_point start = std::chrono::high_resolution_clock::now();
      ret = DetectAndEstimate(imgPath, imageProcess, yolo, wheNet, inferTime);
      std::chrono::high_resolution_clock::time_point end = std::chrono::high_resolution_clock::now();
      e2eTime = std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count();

      if (ret != APP_ERR_OK)
      {
        LogError << "Recognize failed.Please read infos above.";
        return APP_ERR_OK;
      }

      LogInfo << "the picture's E2E total time is " << e2eTime << "ms";
      LogInfo << "the picture's model infer total time is " << inferTime << "ms";
    }
  }
}