// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "utils/transforms.h"
#include "utils/model_config.h"
#include "cls_predictor.h"

namespace paddle_infer {

ClsPredictor::ClsPredictor(LogInfo *lg):
  log_ifo(lg) {}

int ClsPredictor::Run(cv::Mat& img,
                      std::vector<BaseInfoPD*>& cls_info, 
                      PDModelManager* mng,
                      int md_idx) 
{
    if (mng->configs[md_idx]->algorithm == pd_cls){
      return RunCls(img, cls_info, mng, md_idx);
    }
    return pd_not_implement_error;
}
int ClsPredictor::Run(std::vector<cv::Mat>& imgs,
              std::vector<BaseInfoPD*>& cls_ifos, 
              PDModelManager* mng,
              int md_idx) {
  if (mng->configs[md_idx]->algorithm == pd_cls){
    return RunCls(imgs, cls_ifos, mng, md_idx);
  }
  return pd_not_implement_error;
}

int ClsPredictor::RunCls(cv::Mat& img,
                        std::vector<BaseInfoPD*>& cls_info, 
                        PDModelManager* mng,
                        int md_idx) {
  int ret = 0;
  if (img.empty()){return PD_IMAGE_EMPTY_ERROR;}
  PDModelConfig *cfg = mng->configs[md_idx];
  ImageBlob img_blob = ImageBlob();
  cfg->transforms->run(img, img_blob, cfg);

  rec_mut.lock();
  auto input_names = mng->predictors[md_idx]->GetInputNames();
  if (log_ifo->log_level_4) { log_tensor_shape("input_names", input_names); }
  
  std::string tensor_name = "x";
  if (cfg->arch == "resnet_torch") { tensor_name = "x0"; }
  auto in_tensor = mng->predictors[md_idx]->GetInputTensor(tensor_name);
  if (cfg->data_format == "CHW"){ in_tensor->Reshape({ 1, cfg->channels, img_blob.new_im_shape[0], img_blob.new_im_shape[1] });}
  else { in_tensor->Reshape({ 1, img_blob.new_im_shape[0], img_blob.new_im_shape[1], cfg->channels }); }
  in_tensor->copy_from_cpu(img_blob.im_data.data());
  mng->predictors[md_idx]->ZeroCopyRun();

  auto output_names = mng->predictors[md_idx]->GetOutputNames();
  if (log_ifo->log_level_4) { log_tensor_shape("output_names", output_names); }

  std::vector<float> outputs;
  auto output_tensor = mng->predictors[md_idx]->GetOutputTensor(output_names[0]);
  std::vector<int> output_shape = output_tensor->shape();
  if (log_ifo->log_level_4) { log_tensor_shape("output_shape", output_shape); 
  }
  int size = std::accumulate(output_shape.begin(), output_shape.end(), 1, std::multiplies<int>());
  outputs.resize(size);
  output_tensor->copy_to_cpu(outputs.data());
  rec_mut.unlock();

  auto ptr = std::max_element(std::begin(outputs), std::end(outputs));
  int class_idx = std::distance(std::begin(outputs), ptr);
  cls_info.push_back(new ClsInfoPD(*ptr, class_idx, cfg->label_list[class_idx]));
  return ret;
}

int ClsPredictor::RunCls(std::vector<cv::Mat>& imgs,
              std::vector<BaseInfoPD*>& cls_ifos, 
              PDModelManager* mng,
              int md_idx) {
  int ret = 0;
  if (log_ifo->log_level_4) spdlog::get("logger")->info("1.2.1.0 RunCls");

  PDModelConfig *cfg = mng->configs[md_idx];
  int thread_num = std::min(uint(imgs.size()), std::thread::hardware_concurrency());

  auto total_size = imgs.size();
  int batch = total_size / thread_num
              + ((total_size % thread_num) != 0);
  if (log_ifo->log_level_4) { spdlog::get("logger")->info("total_size: {} batch: {}",total_size, batch); }

  for (int u = 0; u < batch; ++u) {
    int batch_size = thread_num;
    if (u == (batch - 1) && (total_size % thread_num)) {
        batch_size = total_size % thread_num;
    }
    if (log_ifo->log_level_4) { spdlog::get("logger")->info("u: {} batch_size: {}",u, batch_size); }

    bool deal_success = true;
    std::vector<ImageBlob> blob_batch;
    blob_batch.assign(batch_size, ImageBlob());
    #pragma omp parallel for num_threads(batch_size)
    for (int i = 0; i < batch_size; ++i) {
      int idx = u * thread_num + i;
      cv::Mat im = imgs[idx].clone();
      // if (!cfg->transforms->run(im, blob_batch[i], cfg)) {
        // deal_success = false;;
      // }
    }
    if (!deal_success) { return pd_cls_preprocess_error;}
    if (log_ifo->log_level_4) spdlog::get("logger")->info("1.2.1.1 RunCls");

    auto input_names = mng->predictors[md_idx]->GetInputNames();
    if (log_ifo->log_level_4) { log_tensor_shape("input_names", input_names); }
    int h = blob_batch[0].new_im_shape[0];
    int w = blob_batch[0].new_im_shape[1];
    auto in_tensor = mng->predictors[md_idx]->GetInputTensor("x");
    if (cfg->data_format == "CHW"){ in_tensor->Reshape({ batch_size, cfg->channels, h, w});}
    else { in_tensor->Reshape({ batch_size, h, w, cfg->channels }); }
    if (log_ifo->log_level_4) { log_tensor_shape("input_shape", in_tensor->shape()); }

    std::vector<float> inputs_data(batch_size * cfg->channels * h * w);
    for (int i = 0; i < batch_size; ++i) {
      std::copy(blob_batch[i].im_data.begin(),
                blob_batch[i].im_data.end(),
                inputs_data.begin() + i * cfg->channels * h * w);
    }
    if (log_ifo->log_level_4) spdlog::get("logger")->info("1.2.1.2 RunCls");
    
    in_tensor->copy_from_cpu(inputs_data.data());
    mng->predictors[md_idx]->ZeroCopyRun();
    if (log_ifo->log_level_4) spdlog::get("logger")->info("1.2.1.3 RunCls");

    auto output_names = mng->predictors[md_idx]->GetOutputNames();
    if (log_ifo->log_level_4) { log_tensor_shape("output_names", output_names); }

    std::vector<float> outputs;
    auto output_tensor = mng->predictors[md_idx]->GetOutputTensor(output_names[0]);
    std::vector<int> output_shape = output_tensor->shape();
    if (log_ifo->log_level_4) { log_tensor_shape("output_shape", output_shape); }

    int size = std::accumulate(output_shape.begin(), output_shape.end(), 1, std::multiplies<int>());
    outputs.resize(size);
    output_tensor->copy_to_cpu(outputs.data());

    int single_batch_size = size / batch_size;
    if (log_ifo->log_level_4) { spdlog::get("logger")->info("outputs.size: {} batch_size:{} single_batch_size:{}", size, batch_size, single_batch_size); }
    if (log_ifo->log_level_4) spdlog::get("logger")->info("1.2.1.4 RunCls");

    for (int i = 0; i < batch_size; ++i) {
      if (log_ifo->log_level_4) { spdlog::get("logger")->info("start for loop"); }

      auto start_ptr = std::begin(outputs);
      auto end_ptr = std::begin(outputs);
      std::advance(start_ptr, i * single_batch_size);
      std::advance(end_ptr, (i + 1) * single_batch_size);
      if (log_ifo->log_level_4) { spdlog::get("logger")->info("middle for loop"); }

      auto ptr = std::max_element(start_ptr, end_ptr);
      int class_idx = std::distance(start_ptr, ptr);

      cls_ifos.push_back(new ClsInfoPD(*ptr, class_idx, cfg->label_list[class_idx]));
      if (log_ifo->log_level_4) { spdlog::get("logger")->info("end for loop: {}", class_idx,cfg->label_list[class_idx]); }
    }
    if (log_ifo->log_level_4) spdlog::get("logger")->info("1.2.1.5 RunCls");

  }
  return ret;
}


} // namespace paddle_infer
