#include <atomic>
#include <condition_variable>
#include <functional>
#include <future>
#include <iostream>
#include <memory>
#include <mutex>
#include <print>
#include <queue>
#include <thread>

template <typename T>
class LockedQueue {
public:
    explicit LockedQueue() = default;
    ~LockedQueue() {
        close();
    }

public:
    // 判断是否关闭
    bool closed() {
        return __closed.load(std::memory_order_relaxed);
    }

    // 关闭
    void close() {
        if (closed()) return;
        __closed.store(true, std::memory_order_relaxed);
        __cv.notify_all();
    }

    // 获取队列大小
    std::size_t size() {
        return __size.load(std::memory_order_relaxed);
    }

    // 判断队列是否为空
    bool empty() {
        return size() == 0;
    }

public:
    // 添加元素
    void push(T val) {
        if (closed()) return;  // 判断队列是否关闭
        {
            std::lock_guard lock{__mtx};   // 加锁
            __queue.push(std::move(val));  // 添加元素
        }
        __size.fetch_add(1, std::memory_order_relaxed);  // 队列大小+1
        __cv.notify_one();                               // 通知一个线程
    }

    bool pop(T &val) {
        std::unique_lock lock{__mtx};
        __cv.wait(lock, [&]() { return !empty() || closed(); });
        if (empty()) {
            return false;
        }
        val = std::move(__queue.front());
        __queue.pop();
        __size.fetch_sub(1, std::memory_order_relaxed);
        return true;
    }

    bool try_pop(T &val) {
        if (empty()) {
            return false;
        }
        {
            std::lock_guard lock{__mtx};
            val = std::move(__queue.front());
            __queue.pop();
            __size.fetch_sub(1, std::memory_order_relaxed);
            return true;
        }
    }

private:
    std::mutex              __mtx;
    std::condition_variable __cv;
    std::queue<T>           __queue;
    std::atomic_size_t      __size{0};
    std::atomic_bool        __closed{false};
};


class ThreadPool {
public:
    static std::shared_ptr<ThreadPool> getInstance(std::size_t thread_num = std::thread::hardware_concurrency()) {
        std::call_once(__flag,[&]() {
            __instance = std::shared_ptr<ThreadPool>(new ThreadPool(thread_num));
        });
        return __instance;
    }

    ~ThreadPool() {
        close();
        for (auto& t:__threads) {
            if (t.joinable()) {
                t.join();
            }
        }
    }
public:
    void close() {
        __tasks.close();
    }

    bool closed() {
        return __tasks.closed();
    }


    template<typename F,typename ...Args>
    std::future<std::invoke_result_t<F,Args...>> addTask(F &&f,Args&& ...args) {
        using return_type = std::invoke_result_t<F,Args...>;
        auto task = std::make_shared<std::packaged_task<return_type()>>(
            std::bind(std::forward<F>(f),std::forward<Args>(args)...));
        auto res = task->get_future();
        __tasks.push([task](){(*task)();});
        return res;
    }

private:
    ThreadPool(std::size_t thread_num) {
        for (std::size_t i=0;i<thread_num;++i) {
            __threads.emplace_back([this]() {
                work();
            });
        }
    }


    void work() {
        std::function<void()> task;
        while (__tasks.pop(task)) {
            task();
        }
    }

private:
   static inline  std::shared_ptr<ThreadPool> __instance{nullptr};
    static  inline std::once_flag __flag;
    LockedQueue<std::function<void()>> __tasks;
    std::vector<std::thread> __threads;
};

int main() {
    auto pool = ThreadPool::getInstance();
    std::vector<std::future<int>> tasks;
    for (int i=0;i<10;i++) {
        tasks.emplace_back(pool->addTask(
            [i]() {
                return i;
            }));
    }

    for (auto &t:tasks) {
        std::cout<<"get res :"<<t.get()<<std::endl;
    }
    return 0;
}
