// /*
//  * @Descripttion: 
//  * @version: 
//  * @Author: sueRimn
//  * @Date: 2021-10-10 15:02:47
//  * @LastEditors: sueRimn
//  * @LastEditTime: 2021-11-30 21:42:34
//  */
// /**
// * Copyright 2020 Huawei Technologies Co., Ltd
// *
// * Licensed under the Apache License, Version 2.0 (the "License");
// * you may not use this file except in compliance with the License.
// * You may obtain a copy of the License at

// * http://www.apache.org/licenses/LICENSE-2.0

// * Unless required by applicable law or agreed to in writing, software
// * distributed under the License is distributed on an "AS IS" BASIS,
// * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// * See the License for the specific language governing permissions and
// * limitations under the License.

// * File main.cpp
// * Description: dvpp sample main func
// */

// #include <iostream>
// #include <stdlib.h>
// #include <dirent.h>
// #include <sys/time.h>

// #include "retina/object_detect.h"
// #include "retina/utils.h"
// using namespace std;

// namespace {
// uint32_t kModelWidth = 608;
// uint32_t kModelHeight = 608;
// const char* kModelPath = "../model/RetinaNet_sim_608.om";
// }

// cv::Mat origImage;
// string imageName;

// int main(int argc, char *argv[]) {
//     Utils::UDPThreadInit();

//     cpu_set_t cpuset;
//     CPU_ZERO(&cpuset);
//     CPU_SET(1,&cpuset);
//     int rc = pthread_setaffinity_np(pthread_self(),sizeof(cpu_set_t), &cpuset);
    
//     //Instantiate the target detection class with the parameters of the classification model path and the required width and height of the model input
//     ObjectDetect detect(kModelPath, kModelWidth, kModelHeight);
//     //Initializes the ACL resource for categorical reasoning, loads the model and requests the memory used for reasoning input
//     Result ret = detect.Init();
//     if (ret != SUCCESS) {
//         ERROR_LOG("Classification Init resource failed");
//         return FAILED;
//     }
//     // string videoFile = string(argv[1]);
//     // std::vector<string> file_vec;
//     // Utils::GetAllFiles(videoFile, file_vec);
//     fstream f1("./preprocess.txt", ios::out);
//     fstream f2("./inference.txt", ios::out);
//     fstream f3("./postprocess.txt", ios::out);
//     while(1)
//     {
//         // imageName = imageFile;
//         unique_lock<mutex> ilck(mtxImageInput);
//         if(queueImageInput.empty()){
//             cvImage.wait(ilck);
//         }
//         origImage = queueImageInput.front();
//         queueImageInput.pop();

//         // origImage = cv::imread(imageFile, CV_LOAD_IMAGE_GRAYSCALE);
//         struct timeval start, end;
//         gettimeofday(&start, NULL);
//         cv::cvtColor(origImage, origImage, CV_GRAY2RGB);

//         Result ret = detect.Preprocess(origImage);
//         if (ret != SUCCESS) {
//             ERROR_LOG("Read file failed, continue to read next");
//             vector<BBox> results = {};
//             unique_lock<mutex> rlck(mtxResult);
//             queueResult.push(results);
//             cvResult.notify_one();
//             continue;
//         }
//         gettimeofday(&end, NULL);
//         f1<<(double)(end.tv_sec - start.tv_sec)* 1000 + (double)(end.tv_usec - start.tv_usec)* 0.001<<endl;
//          gettimeofday(&start, NULL);
//         //The preprocessed images are fed into model reasoning and the reasoning results are obtained
//         aclmdlDataset* inferenceOutput = nullptr;
//         ret = detect.Inference(inferenceOutput);
//         if ((ret != SUCCESS) || (inferenceOutput == nullptr)) {
//             ERROR_LOG("Inference model inference output data failed");
//             vector<BBox> results = {};
//             unique_lock<mutex> rlck(mtxResult);
//             queueResult.push(results);
//             cvResult.notify_one();
//             continue;
//             //return FAILED;
//         }
//         gettimeofday(&end, NULL);
//         f2<<(double)(end.tv_sec - start.tv_sec)* 1000 + (double)(end.tv_usec - start.tv_usec)* 0.001<<endl;
//         gettimeofday(&start, NULL);
//         //Parses the inference output and sends the inference class, location, confidence, and image to the Presenter Server for display
//         ret = detect.PostprocessRetina(origImage, inferenceOutput);
//         if (ret != SUCCESS) {
//             ERROR_LOG("Process model inference output data failed");
//             vector<BBox> results = {};
//             unique_lock<mutex> rlck(mtxResult);
//             queueResult.push(results);
//             cvResult.notify_one();
//             continue;
//         }
//         gettimeofday(&end, NULL);
//         f3<<(double)(end.tv_sec - start.tv_sec)* 1000 + (double)(end.tv_usec - start.tv_usec)* 0.001<<endl;

//         // static int count = 0;
//         // count++;
//         // if(count >= 10)break;
//     }
//     f1.close();
//     f2.close();
//     f3.close();

//     INFO_LOG("Execute video object detection success");
//     return SUCCESS;
// }
