#ifndef __OPTICAL_CLIP_DETECTOR_H
#define __OPTICAL_CLIP_DETECTOR_H

#include <string>
#include <vector>
#include <thread>
#include <numeric>
#include <pthread.h>
#include <iostream>
#include <condition_variable>
#include "opencv2/opencv.hpp"
#include <memory>
#include <atomic>
#include <opencv2/opencv.hpp>
#include "acl/acl.h"
#include "acllite/AclLiteModel.h"
#include "acllite/AclLiteUtils.h"
#include "acllite/AclLiteResource.h"


struct message_pre {
    std::string filename_visible;
    std::string filename_infrared;
    cv::Mat frame_visible;
    cv::Mat frame_infrared;
};

struct message_Excute {
    std::string filename_visible;
    std::string filename_infrared;
    aclmdlDataset * detectData; // true if the processed data is divisible by 2, false otherwise
};

struct message_result {
    std::string filename_visible;
    std::string filename_infrared;
    std::vector<std::vector<float>> finalOutput;
};

template<typename T>
class Queue {
    std::queue<T> queue;
    mutable std::mutex mutex;
    std::condition_variable cond;

public:
    void push(T value) {
        std::lock_guard<std::mutex> lock(mutex);
        queue.push(std::move(value));
        cond.notify_one();
    }

    bool try_pop(T& value) {
        std::lock_guard<std::mutex> lock(mutex);
        if(queue.empty()) return false;
        value = std::move(queue.front());
        queue.pop();
        return true;
    }

    void wait_and_pop(T& value) {
        std::unique_lock<std::mutex> lock(mutex);
        cond.wait(lock, [this]{ return !queue.empty(); });
        value = std::move(queue.front());
        queue.pop();
    }

    // 添加此方法以允许外部通知所有等待的线程
    void notify_all() {
        cond.notify_all();
    }
};


class Yolov3CLIPDetector 
  {
  public:
    Yolov3CLIPDetector();
    ~Yolov3CLIPDetector();
    bool init(const char * ModelPath);
    void initThread();
    void *CopyDataDeviceToLocal(void* deviceData, uint32_t dataSize);
    void* GetInferenceOutputItem(uint32_t& itemDataSize,aclmdlDataset* inferenceOutput,uint32_t idx);
    AclLiteError modelExecute();
    void Preprocess();
    void Postprocess();
    std::vector<message_result> inference(std::vector<std::string> &imageLists);
    void DestroyResource();
    AclLiteError CreateInput();
    void wait_for_completion();

  private:
    const char*  g_modelPath_;
    uint32_t  g_modelWidth_ = 256;
    uint32_t  g_modelHeight_ = 256;
    aclmdlDataset *input = nullptr;
    aclmdlDataset *output = nullptr;
    void* outputDeviceData0 = nullptr;
    void* outputDeviceData1 = nullptr;
    void* outputHostData0 = nullptr; 
    void* outputHostData1 = nullptr; 
    aclmdlDesc *modelDesc;
    uint32_t modelId;
    uint32_t  modelInputSize;
    void*     modelInput0Buf = nullptr;
    void*     modelInput1Buf = nullptr;
    aclrtRunMode  g_runMode_;
    size_t buffer_size;
    u_int32_t preprocess_thread_num = 2;
    u_int32_t postprocess_thread_num = 2;
    std::atomic<bool> done{false};
    std::mutex results_mutex;
    std::mutex output_mutex;
    int task_counter = 0;
    std::vector<InferenceOutput> inferenceOutput;
    aclrtContext context;
    int32_t deviceId = 0;
    std::condition_variable conda;
    std::vector<message_result> results;
    Queue<message_pre>queue_input;
    Queue<message_pre>queue_pre;
    Queue<message_result>queue_post;
    std::vector<std::thread> threads;
  };



#endif