#ifndef THREADPOOL_H
#define THREADPOOL_H


#include <thread>
#include <mutex>
#include <atomic>
#include <condition_variable>
#include <functional>
#include <future>
#include <queue>
#include <type_traits>
#include <utility>
#include <vector>
#include <map>
#include <QMutex>
#include <QScopedPointer>
#include "MU_API.h"
#include "coroutine.h"





/* 后台常驻线程 */
typedef enum THREAD_INDEX_HOLD{
            MU_THREAD_NO            = 0,
            MU_THREAD_LOG ,             /// 后台日志线程
            MU_THREAD_CONTINOUS     ,   /// 多点连续的线程
            MU_THREAD_EXP    ,          /// 异常管理
            MU_THREAD_SAVE,             /// 图像后台存储
            MU_THREAD_UPDATE,           /// 后台刷新线程(RobotPos+图像)
            MU_THREAD_WATCHDOG,         /// 看门狗
            MU_THREAD_CHECK,            /// 检查线程，内嵌协程
}THREAD_INDEX;


typedef std::future<MU_RETURNS>          FUTURE_Async_Fun;

//template< typename T>
//std::function<MU_RETURNS(T y)> task_type;

//template< typename T,typename... Args> std::function<MU_RETURNS(T &t,Args&&... args)> task_type;        /// 函数智能指针


class ThreadPool
{
public:
    ThreadPool();

public:
    static ThreadPool* GetInstance(){
        static QMutex mutex;
        static QScopedPointer<ThreadPool> inst;
        if (Q_UNLIKELY(!inst)) {
            mutex.lock();
            if (!inst) {
                inst.reset(new ThreadPool);
            }
            mutex.unlock();
        }
        return inst.data();
    }

    ~ThreadPool()
    {
        cond_.notify_all();
    }


    /**
     * @brief pushPermanentAsync
     * @param index
     * @param async_thread
     * @details 计入后台线程到map容器
     */
    void pushPermanentAsync(THREAD_INDEX index,std::thread *async_thread){
        permanentThread[index] = async_thread;
        async_thread->detach();
    }


    template<typename R>
    R getTempAsync(){
        R ret = std::move(asyncThread.front()) ;
        asyncThread.pop_front();
        return ret;
    }



private:
    std::atomic<bool> stop_;
    std::mutex mtx_;
    std::condition_variable cond_;

    std::map<THREAD_INDEX,std::thread *> permanentThread;     ///后台常驻线程
    std::deque<FUTURE_Async_Fun> asyncThread;                   /// 后台非常驻异步线程
    std::vector<std::thread> syncThread;                    /// 线程池的存储对象

//    template<class Function, class... Args>
//    int addAsync(Function&& fcn, Args&&... args)
//    {
//        std::thread async_thread(std::bind(std::forward<Function>(fcn), std::forward<Args>(args)...), this);

//        //asyncThread.emplace
//        //cond_.notify_one();
//        return asyncThread.size();
//    }
//    template<class Function, class... Args>
//    int addSync(Function&& fcn, Args&&... args)
//    {
//        std::thread sync_thread(std::bind(std::forward<Function>(fcn), std::forward<Args>(args)...), this);

//        return syncThread.size();
//    }

//    template<class Function, class... Args>
//    void add(Function&& fcn, Args&&... args)
//    {
//        typedef typename std::result_of<Function(Args...)>::type return_type;
//        typedef std::packaged_task<return_type()> task;
//        auto t = std::make_shared<task>(std::bind(std::forward<Function>(fcn), std::forward<Args>(args)...));

//        {
//            std::lock_guard<std::mutex> lg(mtx_);
//            if (stop_.load(std::memory_order_acquire))
//                throw std::runtime_error("thread pool has stopped");
//            asyncThread.emplace([t]{(*t)(); });
//        }
//        cond_.notify_one();
//    }
};





#endif // THREADPOOL_H
