// Copyright (C) 2023-2024 Arm Technology (China) Co. Ltd.
//
// SPDX-License-Identifier: Apache-2.0

/**
 * @file  main.cpp
 * @brief AIPU UMD test application: basic benchmark test for arm64 platforms
 */

#include <memory>
#include <chrono>

#include "model/model.h"
#include "model/model_runner.h"
#include "common/cmd_line_parsing.h"


int main(int argc, char* argv[]) {
  cmd_opt_t opt;
  if (init_test_bench(argc, argv, &opt, "benchmark_test")) {
    AIPU_ERR()("invalid command line options/args\n");
    return -1;
  }

  std::shared_ptr<ModelLoader> model = std::make_shared<ModelLoader>(std::string(opt.bin_file_name));
  int ret = model->Init();
  if (ret < 0) {
    std::cout << "model load failed!" << std::endl;
    return -1;
  }

  std::shared_ptr<ModelRunner> model_runner = std::make_shared<ModelRunner>();
  ret = model_runner->Init(model);
  if (ret < 0) {
    std::cout << "model runner failed!" << std::endl;
  }
  uint64_t input_frame_0, input_frame_1;
  model_runner->CreateBuffer(&input_frame_0);
  model_runner->CreateBuffer(&input_frame_1);
  auto start = std::chrono::high_resolution_clock::now();
  for (int i = 0; i < 2000; ++i) {
    // std::cout << i << std::endl;
    model_runner->Run(input_frame_0, opt.inputs);
    model_runner->Run(input_frame_1, opt.inputs);
  }
  auto end = std::chrono::high_resolution_clock::now();
  // model_runner->Run();
  std::cout << "total time: " << std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count() << "ms" << std::endl;

  deinit_test_bench(&opt);
  return 0;
}

// #include <errno.h>
// #include <fcntl.h>
// #include <math.h>
// #include <stdio.h>
// #include <string.h>
// #include <sys/mman.h>
// #include <sys/stat.h>
// #include <unistd.h>

// #include <fstream>
// #include <iostream>
// #include <vector>

// #include "common/cmd_line_parsing.h"
// #include "common/dbg.hpp"
// #include "common/helper.h"
// #include "kmd/armchina_aipu.h"  // for aipu_ioctl()
// #include "standard_api.h"


// using namespace std;

// int main(int argc, char* argv[]) {
//   aipu_status_t ret = AIPU_STATUS_SUCCESS;
//   aipu_ctx_handle_t* ctx;
//   const char* msg = nullptr;
//   uint64_t graph_id, job_id;
//   uint32_t input_cnt, output_cnt;
//   vector<aipu_tensor_desc_t> input_desc;
//   vector<char*> input_data;
//   vector<aipu_tensor_desc_t> output_desc;
//   vector<char*> output_data;
//   vector<char*> gt;
//   cmd_opt_t opt;
//   uint32_t frame_cnt = 1000;
//   int pass = -1;
//   aipu_create_job_cfg create_job_cfg;
//   struct aipu_config_clusters config_cluster;
//   config_cluster.clusters[0].en_core_cnt = 2;
//   bool en_config = false;
//   aipu_driver_version_t drv_ver = {0};
//   aipu_bin_buildversion_t buildver = {0};
//   aipu_job_config_dump_t mem_dump_config;
//   uint64_t cfg_types = 0;

//   if (init_test_bench(argc, argv, &opt, "benchmark_test")) {
//     AIPU_ERR()("invalid command line options/args\n");
//     goto finish;
//   }

//   ret = aipu_init_context(&ctx);
//   if (ret != AIPU_STATUS_SUCCESS) {
//     aipu_get_error_message(ctx, ret, &msg);
//     AIPU_ERR()("aipu_init_context: %s\n", msg);
//     goto finish;
//   }
//   AIPU_INFO()("aipu_init_context success\n");

//   // get driver's UMD and KMD version
//   memset(drv_ver.umd_version, 0, sizeof(drv_ver.umd_version));
//   memset(drv_ver.kmd_version, 0, sizeof(drv_ver.kmd_version));
//   ret = aipu_ioctl(ctx, AIPU_IOCTL_GET_VERSION, &drv_ver);
//   if (ret != AIPU_STATUS_SUCCESS) {
//     aipu_get_error_message(ctx, ret, &msg);
//     AIPU_ERR()("aipu_ioctl: %s\n", msg);
//     goto deinit_ctx;
//   }
//   AIPU_INFO()("Driver UMD: %s, KMD: %s\n", drv_ver.umd_version, drv_ver.kmd_version);

//   ret = aipu_load_graph(ctx, opt.bin_file_name, &graph_id);
//   if (ret != AIPU_STATUS_SUCCESS) {
//     aipu_get_error_message(ctx, ret, &msg);
//     AIPU_ERR()("aipu_load_graph_helper: %s (%s)\n", msg, opt.bin_file_name);
//     goto deinit_ctx;
//   }0
//   AIPU_INFO()("aipu_load_graph_helper success: %s\n", opt.bin_file_name);

//   buildver.graph_id = graph_id;
//   ret = aipu_ioctl(ctx, AIPU_IOCTL_GET_AIPUBIN_BUILDVERSION, &buildver);
//   if (ret != AIPU_STATUS_SUCCESS) {
//     aipu_get_error_message(ctx, ret, &msg);
//     AIPU_ERR()("aipu_ioctl: %s\n", msg);
//     goto deinit_ctx;
//   }
//   AIPU_INFO()("AIPU BIN buildversion: %x\n", buildver.aipubin_buildversion);

//   ret = aipu_get_tensor_count(ctx, graph_id, AIPU_TENSOR_TYPE_INPUT, &input_cnt);
//   if (ret != AIPU_STATUS_SUCCESS) {
//     aipu_get_error_message(ctx, ret, &msg);
//     AIPU_ERR()("aipu_get_tensor_count: %s\n", msg);
//     goto unload_graph;
//   }
//   // AIPU_INFO()("aipu_get_tensor_count success: input cnt = %d\n", input_cnt);

//   for (uint32_t i = 0; i < input_cnt; i++) {
//     aipu_tensor_desc_t desc;
//     ret = aipu_get_tensor_descriptor(ctx, graph_id, AIPU_TENSOR_TYPE_INPUT, i, &desc);
//     if (ret != AIPU_STATUS_SUCCESS) {
//       aipu_get_error_message(ctx, ret, &msg);
//       AIPU_ERR()("aipu_get_tensor_descriptor: %s\n", msg);
//       goto unload_graph;
//     }
//     input_desc.push_back(desc);
//   }

//   ret = aipu_get_tensor_count(ctx, graph_id, AIPU_TENSOR_TYPE_OUTPUT, &output_cnt);
//   if (ret != AIPU_STATUS_SUCCESS) {
//     aipu_get_error_message(ctx, ret, &msg);
//     AIPU_ERR()("aipu_get_tensor_count: %s\n", msg);
//     goto unload_graph;
//   }
//   // AIPU_INFO()("aipu_get_tensor_count success: output cnt = %d\n", output_cnt);

//   for (uint32_t i = 0; i < output_cnt; i++) {
//     aipu_tensor_desc_t desc;
//     ret = aipu_get_tensor_descriptor(ctx, graph_id, AIPU_TENSOR_TYPE_OUTPUT, i, &desc);
//     if (ret != AIPU_STATUS_SUCCESS) {
//       aipu_get_error_message(ctx, ret, &msg);
//       AIPU_ERR()("aipu_get_tensor_descriptor: %s\n", msg);
//       goto unload_graph;
//     }
//     output_desc.push_back(desc);
//   }
//   // fprintf(stderr, "[TEST INFO] aipu_get_tensor_descriptor done\n");

//   /**
//    * here you can specify feature map and weight buffer from
//    * specific regions. the low level allcation logic will firstly
//    * try to allocate buffer from those regions. if there's no enough
//    * free buffer, it will try according to the below order:
//    * DTCM->SRAM->DDR, until fail to allocate.
//    */
//   create_job_cfg.fm_mem_region = AIPU_MEM_REGION_SRAM;
//   ret = aipu_create_job(ctx, graph_id, &job_id, &create_job_cfg);
//   if (ret != AIPU_STATUS_SUCCESS) {
//     aipu_get_error_message(ctx, ret, &msg);
//     AIPU_ERR()("aipu_create_job: %s\n", msg);
//     goto unload_graph;
//   }
//   AIPU_INFO()("aipu_create_job success\n");

//   memset(&mem_dump_config, 0, sizeof(mem_dump_config));
//   strcpy(opt.dump_dir, "./");
//   mem_dump_config.dump_dir = opt.dump_dir;
//   // cfg_types = 0x3ff;//AIPU_JOB_CONFIG_TYPE_DUMP_INPUT | AIPU_JOB_CONFIG_TYPE_DUMP_OUTPUT;
//   cfg_types = AIPU_JOB_CONFIG_TYPE_DUMP_INPUT | AIPU_JOB_CONFIG_TYPE_DUMP_OUTPUT;

//   ret = aipu_config_job(ctx, job_id, cfg_types, &mem_dump_config);
//   if (ret != AIPU_STATUS_SUCCESS) {
//     aipu_get_error_message(ctx, ret, &msg);
//     AIPU_ERR()("[TEST ERROR] aipu_config_job: %s\n", msg);
//   }

//   if (opt.inputs.size() != input_cnt) {
//     AIPU_INFO()("input file count (%u) != input tensor count (%u)\n", (uint32_t)opt.inputs.size(), input_cnt);
//   }

//   for (uint32_t i = 0; i < output_cnt; i++) {
//     char* output = new char[output_desc[i].size];
//     output_data.push_back(output);
//   }

//   if (en_config) {
//     ret = aipu_ioctl(ctx, AIPU_IOCTL_CONFIG_CLUSTERS, &config_cluster);
//     if (ret != AIPU_STATUS_SUCCESS) {
//       aipu_get_error_message(ctx, ret, &msg);
//       AIPU_ERR()("aipu_ioctl (config clusters): %s\n", msg);
//       goto unload_graph;
//     }
//   }

//   /* run with with multiple frames */
//   if (opt.frame_cnt == 0) {
//     frame_cnt = 1000;
//   } else {
//     frame_cnt = opt.frame_cnt;
//   }
//   AIPU_INFO()("frame count is : %d\n", frame_cnt);

//   for (uint32_t frame = 0; frame < frame_cnt; frame++) {
//     AIPU_INFO()("Frame #%u\n", frame);
//     for (uint32_t i = 0; i < min((uint32_t)opt.inputs.size(), input_cnt); i++) {
//       if (input_desc[i].size > opt.inputs_size[i]) {
//         AIPU_ERR()
//         ("input file %s len 0x%x < input tensor %u size 0x%x\n", opt.input_files[i].c_str(), opt.inputs_size[i], i,
//          input_desc[i].size);
//         goto clean_job;
//       }
//       ret = aipu_load_tensor(ctx, job_id, i, opt.inputs[i]);
//       if (ret != AIPU_STATUS_SUCCESS) {
//         aipu_get_error_message(ctx, ret, &msg);
//         AIPU_ERR()("aipu_load_tensor: %s\n", msg);
//         goto clean_job;
//       }
//       AIPU_INFO()("load input tensor %d from %s (%u/%u)\n", i, opt.input_files[i].c_str(), i + 1, input_cnt);
//     }

//     ret = aipu_finish_job(ctx, job_id, -1);
//     if (ret != AIPU_STATUS_SUCCESS) {
//       aipu_get_error_message(ctx, ret, &msg);
//       AIPU_ERR()("aipu_finish_job: %s\n", msg);
//       goto clean_job;
//     }
//     AIPU_INFO()("aipu_finish_job success\n");

//     for (uint32_t i = 0; i < output_cnt; i++) {
//       ret = aipu_get_tensor(ctx, job_id, AIPU_TENSOR_TYPE_OUTPUT, i, output_data[i]);
//       if (ret != AIPU_STATUS_SUCCESS) {
//         aipu_get_error_message(ctx, ret, &msg);
//         AIPU_ERR()("aipu_get_tensor: %s\n", msg);
//         goto clean_job;
//       }
//       AIPU_INFO()("get output tensor %u success (%u/%u)\n", i, i + 1, output_cnt);

//       std::stringstream ss;
//       ss << "./Output_1round_dump_" << i << ".bin";
//       std::string output_filename1 = ss.str();
//       AIPU_INFO()("Output file: %s\n", output_filename1.c_str());
//       ofstream output_file1(output_filename1, ios::binary);
//       if (!output_file1.is_open()) {
//         AIPU_ERR()("Failed to open file %s for writing\n", output_filename1.c_str());
//         goto clean_job;
//       }
//       output_file1.write(output_data[i], output_desc[i].size);
//       output_file1.close();
//       AIPU_INFO()("Saved output tensor %u to %s\n", i, output_filename1.c_str());
//     }

//     pass = check_results_helper(output_data, output_desc, opt.gts, opt.gts_size);

//     if (pass == -1) {
//       usleep(100000);
//       AIPU_INFO()("usleep for 100000, and now 2th round dump and check start\n");
//       for (uint32_t i = 0; i < output_cnt; i++) {
//         ret = aipu_get_tensor(ctx, job_id, AIPU_TENSOR_TYPE_OUTPUT, i, output_data[i]);
//         if (ret != AIPU_STATUS_SUCCESS) {
//           aipu_get_error_message(ctx, ret, &msg);
//           AIPU_ERR()("aipu_get_tensor: %s\n", msg);
//           goto clean_job;
//         }
//         AIPU_INFO()("get output tensor %u success (%u/%u)\n", i, i + 1, output_cnt);

//         std::stringstream ss2;
//         ss2 << "./Output_2round_dump_" << i << ".bin";
//         std::string output_filename2 = ss2.str();
//         ofstream output_file2(output_filename2, ios::binary);
//         if (!output_file2.is_open()) {
//           AIPU_ERR()("Failed to open file %s for writing\n", output_filename2.c_str());
//           goto clean_job;
//         }
//         output_file2.write(output_data[i], output_desc[i].size);
//         output_file2.close();
//         AIPU_INFO()("Saved output tensor %u to %s\n", i, output_filename2.c_str());
//       }
//       pass = check_results_helper(output_data, output_desc, opt.gts, opt.gts_size);
//     }

//     if (pass == -1) {
//       AIPU_INFO()("ERROR check result twice, BREAK loop now!!!\n");
//       break;
//     }
//   }

// clean_job:
//   ret = aipu_clean_job(ctx, job_id);
//   if (ret != AIPU_STATUS_SUCCESS) {
//     aipu_get_error_message(ctx, ret, &msg);
//     AIPU_ERR()("aipu_clean_job: %s\n", msg);
//     goto unload_graph;
//   }
//   AIPU_INFO()("aipu_clean_job success\n");

// unload_graph:
//   ret = aipu_unload_graph(ctx, graph_id);
//   if (ret != AIPU_STATUS_SUCCESS) {
//     aipu_get_error_message(ctx, ret, &msg);
//     AIPU_ERR()("aipu_unload_graph: %s\n", msg);
//     goto deinit_ctx;
//   }
//   AIPU_INFO()("aipu_unload_graph success\n");

// deinit_ctx:
//   ret = aipu_deinit_context(ctx);
//   if (ret != AIPU_STATUS_SUCCESS) {
//     aipu_get_error_message(ctx, ret, &msg);
//     AIPU_ERR()("aipu_deinit_ctx: %s\n", msg);
//     goto finish;
//   }
//   AIPU_INFO()("aipu_deinit_ctx success\n");

// finish:
//   if (AIPU_STATUS_SUCCESS != ret) pass = -1;

//   for (uint32_t i = 0; i < output_data.size(); i++) delete[] output_data[i];

//   deinit_test_bench(&opt);

//   return pass;
// }
