#ifndef TASK_MANAGER_H_
#define TASK_MANAGER_H_
#include <queue>
#include <mutex>
#include <condition_variable>
#include <vector>
#include <memory>
#include <unordered_set>
#include <functional>
#include <variant>
#include <map>
#include <iostream>
#include <stdexcept>
#include <future>
#include <fstream>
#include <ctime>
#include <any>
#include <memory>
#include <chrono>
#include <thread>
#include <unistd.h>
#include <nlohmann/json.hpp>
#ifdef linux
#include <sys/socket.h>
#include <netinet/in.h>
#include <arpa/inet>
#endif

//---------------------
namespace task_manager {

//----------------------
// 任务实现类
//----------------------
template <typename Fx, typename Arg>
class task_impl
{
public:
    // 构造函数 - 通用lambda捕获
    template<typename Fn>
    explicit task_impl(Fn&& lambda) noexcept;

    // 虚析构函数，支持多态
    virtual ~task_impl() = default;

    // then方法 - 任务链式调用
    template<typename Fn>
    auto then(Fn&& lambda) const noexcept;

    // then_catch方法 - 异常处理
    template<typename Fn>
    auto then_catch(Fn&& lambda) const noexcept;

    // 执行任务
    Fx run(const Arg& x = Arg{}) const;

protected:
    // 函数对象存储
    const std::function<Fx(Arg)> func;
};

// 默认实现 - 未定义TASK_MANAGER_IMPL时抛出异常
#ifndef TASK_MANAGER_IMPL

template<typename Fx, typename Arg>
template<typename Fn>
task_impl<Fx, Arg>::task_impl(Fn&&) noexcept
{
    static_assert(sizeof...(Fx) == 0, "Task manager not implemented. Define TASK_MANAGER_IMPL=1 to enable implementation.");
}

template<typename Fx, typename Arg>
template<typename Fn>
auto task_impl<Fx, Arg>::then(Fn&&) const noexcept
{
    static_assert(sizeof...(Fx) == 0, "Task manager not implemented. Define TASK_MANAGER_IMPL=1 to enable implementation.");
}

template<typename Fx, typename Arg>
template<typename Fn>
auto task_impl<Fx, Arg>::then_catch(Fn&&) const noexcept
{
    static_assert(sizeof...(Fx) == 0, "Task manager not implemented. Define TASK_MANAGER_IMPL=1 to enable implementation.");
}

template<typename Fx, typename Arg>
Fx task_impl<Fx, Arg>::run(const Arg&) const
{
    throw std::runtime_error("Task manager not implemented. Define TASK_MANAGER_IMPL=1 to enable implementation.");
}

#else // TASK_MANAGER_IMPL已定义情况

// 构造函数实现
template<typename Fx, typename Arg>
template<typename Fn>
task_impl<Fx, Arg>::task_impl(Fn&& lambda) noexcept : func{std::forward<Fn>(lambda)}
{}

// then方法实现
template<typename Fx, typename Arg>
template<typename Fn>
auto task_impl<Fx, Arg>::then(Fn&& lambda) const noexcept
{
    return task_impl<decltype(lambda(std::declval<Fx>())) , Arg> {
        [this, lambda = std::forward<Fn>(lambda)](const Arg& arg) {
            return lambda(func(arg));
        }
    };
}

// then_catch方法实现
template<typename Fx, typename Arg>
template<typename Fn>
auto task_impl<Fx, Arg>::then_catch(Fn&& lambda) const noexcept
{
    return task_impl<Fx, Arg> {
        [this, lambda = std::forward<Fn>(lambda)](const Arg& arg) {
            try {
                return func(arg);
            }
            catch(const std::exception& e) {
                std::cerr << "Caught exception: " << e.what() << std::endl;
                return lambda(e);
            }
        }
    };
}

// run方法实现
template<typename Fx, typename Arg>
Fx task_impl<Fx, Arg>::run(const Arg& arg) const
{
    return func(arg);
}

#endif // TASK_MANAGER_IMPL

//----------------------
// 任务类 - 默认使用std::monostate作为参数
//----------------------
template<typename Fx>
class task : public task_impl<Fx, std::monostate>
{
public:
    // 构造函数
    template<typename Fn>
    explicit task(Fn&& lambda);

    // 禁用拷贝构造和赋值
    task(const task&) = delete;
    task& operator=(const task&) = delete;

    // 支持移动构造和赋值
    task(task&&) noexcept = default;
    task& operator=(task&&) noexcept = default;
};

// 构造函数实现
template<typename Fx>
template<typename Fn>
task<Fx>::task(Fn&& lambda) 
    : task_impl<Fx, std::monostate> { 
        [lambda = std::forward<Fn>(lambda)](auto&&) {
            return lambda();
        } 
    }
{}


//----------------------
// 扩展功能 - 支持异步任务
//----------------------
template<typename Fx>
class async_task : public task<std::future<Fx>>
{
public:
    template<typename Fn>
    explicit async_task(Fn&& lambda) 
        : task<std::future<Fx>> { 
            [lambda = std::forward<Fn>(lambda)]() mutable {
                return std::async(std::launch::async, [lambda = std::move(lambda)]() mutable {
                    return lambda();
                });
            } 
        }
    {}
};

class delayed_task
{
public:
    template<typename Fn>
    explicit delayed_task(Fn&& lambda, std::chrono::milliseconds delay)
        : func_{std::forward<Fn>(lambda)}, delay_{delay}
    {}

    void run() const
    {
        std::this_thread::sleep_for(delay_);
        func_();
    }

private:
    std::function<void()> func_;
    std::chrono::milliseconds delay_;
};

class priority_task
{
public:
    using TaskFunction = std::function<void()>;

    explicit priority_task(int priority, TaskFunction func)
        : priority_{priority}, func_{std::move(func)}
    {}

    int priority() const { return priority_; }
    void operator()() const { func_(); }

private:
    int priority_;
    TaskFunction func_;
};

class priority_task_manager
{
public:
    void add_task(int priority, std::function<void()> func)
    {
        tasks_.push_back(std::make_pair(priority, func));
    }

    void run_highest_priority_task()
    {
        if (tasks_.empty()) return;

        auto it = std::max_element(tasks_.begin(), tasks_.end(),
            [](const auto& a, const auto& b) { return a.first < b.first; });

        it->second();
        tasks_.erase(it);
    }

private:
    std::vector<std::pair<int, std::function<void()>>> tasks_;
};


class progress_task
{
public:
    using ProgressCallback = std::function<void(double)>;

    template<typename Fn>
    progress_task(Fn&& func, ProgressCallback progress_cb)
        : func_{std::forward<Fn>(func)}, progress_cb_{progress_cb}
    {}

    void run()
    {
        func_(progress_cb_);
    }

private:
    std::function<void(ProgressCallback)> func_;
    ProgressCallback progress_cb_;
};


class cancelable_task
{
public:
    template<typename Fn>
    explicit cancelable_task(Fn&& func)
        : func_{std::forward<Fn>(func)}, cancel_flag_{false}
    {}

    std::future<void> start()
    {
        return std::async(std::launch::async, [this] {
            while (!cancel_flag_) {
                if (!func_()) {
                    break;
                }
                std::this_thread::sleep_for(std::chrono::milliseconds(100));
            }
        });
    }

    void cancel()
    {
        cancel_flag_ = true;
    }

private:
    std::function<bool()> func_;
    std::atomic<bool> cancel_flag_;
};


class task_group
{
public:
    template<typename Fn>
    explicit task_group(Fn&& func)
        : func_{std::forward<Fn>(func)}, completed_{false}
    {}

    void run()
    {
        if (completed_) return;

        // 启动任务
        auto future = std::async(std::launch::async, [this] {
            func_();
        });

        // 等待任务完成
        future.wait();
        completed_ = true;
    }

    void add_group(task_group* group)
    {
        sub_groups_.push_back(group);
    }

    void run_group()
    {
        if (completed_) return;

        // 递归运行子组
        for (auto* group : sub_groups_) {
            group->run_group();
        }

        // 运行当前任务
        run();
    }

private:
    std::function<void()> func_;
    std::vector<task_group*> sub_groups_;
    bool completed_;
};


class task_dependency
{
public:
    using TaskFunction = std::function<void()>;

    explicit task_dependency(TaskFunction func)
        : func_{std::move(func)}, completed_{false}
    {}

    void add_dependency(task_dependency* dep)
    {
        dependencies_.insert(dep);
    }

    void run()
    {
        if (completed_) return;

        // 检查所有依赖是否完成
        for (auto* dep : dependencies_) {
            if (!dep->completed_) {
                dep->run();
            }
        }

        // 执行任务
        func_();
        completed_ = true;
    }

private:
    TaskFunction func_;
    std::unordered_set<task_dependency*> dependencies_;
    bool completed_;
};



class sequential_task
{
public:
    template<typename Fn>
    explicit sequential_task(Fn&& func)
        : func_{std::forward<Fn>(func)}, completed_{false}
    {}

    void run()
    {
        if (completed_) return;

        // 启动任务
        auto future = std::async(std::launch::async, [this] {
            func_();
        });

        // 等待任务完成
        future.wait();
        completed_ = true;
    }

    void add_task(sequential_task* task)
    {
        tasks_.push_back(task);
    }

    void run_sequence()
    {
        if (completed_) return;

        // 递归运行任务
        for (auto* task : tasks_) {
            task->run_sequence();
        }

        // 运行当前任务
        run();
    }

private:
    std::function<void()> func_;
    std::vector<sequential_task*> tasks_;
    bool completed_;
};


class task_scheduler
{
public:
    explicit task_scheduler() : running_{true} {}

    ~task_scheduler()
    {
        running_ = false;
        if (scheduler_thread_.joinable()) {
            scheduler_thread_.join();
        }
    }

    template<typename Fn>
    void schedule(Fn&& func, std::chrono::milliseconds delay)
    {
        std::lock_guard<std::mutex> lock(mutex_);
        tasks_.emplace(std:: chrono::steady_clock::now() + delay, std::forward<Fn>(func));
    }

    void start()
    {
        scheduler_thread_ = std::thread([this] {
            while (running_) {
                std::vector<std::function<void()>> ready_tasks;

                {
                    std::lock_guard<std::mutex> lock(mutex_);
                    auto now = std::chrono::steady_clock::now();
                    auto it = tasks_.begin();
                    while (it != tasks_.end()) {
                        if (it->first <= now) {
                            ready_tasks.push_back(std::move(it->second));
                            it = tasks_.erase(it);
                        } else {
                            ++it;
                        }
                    }
                }

                for (auto& task : ready_tasks) {
                    task();
                }

                std::this_thread::sleep_for(std::chrono::milliseconds(100));
            }
        });
    }

private:
    std::thread scheduler_thread_;
    std::mutex mutex_;
    std::multimap<std::chrono::steady_clock::time_point, std::function<void()>> tasks_;
    std::atomic<bool> running_;
};


class task_analyzer
{
public:
    using TaskFunction = std::function<void()>;

    explicit task_analyzer(TaskFunction func)
        : func_{std::move(func)}, execution_time_{0}
    {}

    void run()
    {
        auto start = std::chrono::high_resolution_clock::now();
        func_();
        auto end = std::chrono::high_resolution_clock::now();

        execution_time_ = std::chrono::duration_cast<std::chrono::microseconds>(end - start).count();
        std::cout << "Task completed in " << execution_time_ << " microseconds" << std::endl;
    }

    void analyze()
    {
        std::cout << "Task analysis:" << std::endl;
        std::cout << "Execution time: " << execution_time_ << " microseconds" << std::endl;
    }

private:
    TaskFunction func_;
    long long execution_time_;
};


class concurrent_task_scheduler
{
public:
    explicit concurrent_task_scheduler(int num_threads = std::thread::hardware_concurrency())
        : running_{true}
    {
        for (int i = 0; i < num_threads; ++i) {
            workers_.emplace_back([this] {
                while (running_) {
                    std::function<void()> task;
                    {
                        std::unique_lock<std::mutex> lock(mutex_);
                        condition_.wait(lock, [this] { return !running_ || !tasks_.empty(); });
                        if (!running_ && tasks_.empty()) return;
                        task = std::move(tasks_.front());
                        tasks_.pop();
                    }
                    task();
                }
            });
        }
    }

    ~concurrent_task_scheduler()
    {
        running_ = false;
        condition_.notify_all();
        for (auto& worker : workers_) {
            worker.join();
        }
    }

    template<typename Fn>
    void schedule(Fn&& func)
    {
        {
            std::lock_guard<std::mutex> lock(mutex_);
            tasks_.push(std::forward<Fn>(func));
        }
        condition_.notify_one();
    }

private:
    std::vector<std::thread> workers_;
    std::queue<std::function<void()>> tasks_;
    std::mutex mutex_;
    std::condition_variable condition_;
    std::atomic<bool> running_;
};

#if 0
class thread_local_storage
{
public:
    template<typename T, typename... Args>
    void set_local_data(Args&&... args)
    {
        thread_local std::unordered_map<std::type_index, std::any> local_data;
        local_data[typeid(T)] = std::any(T(std::forward<Args>(args)...));
    }

    template<typename T>
    T& get_local_data()
    {
        thread_local std::unordered_map<std::type_index, std::any> local_data;
        auto it = local_data.find(typeid(T));
        if (it == local_data.end()) {
            throw std::runtime_error("Local data not found");
        }
        return std::any_cast<T&>(it->second);
    }
};
#endif

class thread_pool
{
public:
    explicit thread_pool(size_t num_threads = std::thread::hardware_concurrency())
    {
        for (size_t i = 0; i < num_threads; ++i) {
            workers_.emplace_back([this] {
                while (true) {
                    std::function<void()> task;
                    {
                        std::unique_lock<std::mutex> lock(mutex_);
                        condition_.wait(lock, [this] { return stop_ || !tasks_.empty(); });
                        if (stop_ && tasks_.empty()) return;
                        task = std::move(tasks_.front());
                        tasks_.pop();
                    }
                    task();
                }
            });
        }
    }

    ~thread_pool()
    {
        {
            std::lock_guard<std::mutex> lock(mutex_);
            stop_ = true;
        }
        condition_.notify_all();
        for (auto& worker : workers_) {
            worker.join();
        }
    }

    template<typename Fn>
    void enqueue(Fn&& func)
    {
        std::lock_guard<std::mutex> lock(mutex_);
        tasks_.push(std::forward<Fn>(func));
        condition_.notify_one();
    }

private:
    std::vector<std::thread> workers_;
    std::queue<std::function<void()>> tasks_;
    std::mutex mutex_;
    std::condition_variable condition_;
    bool stop_{false};
};


template<typename T>
class thread_safe_task_chain
{
public:
    template<typename Fn>
    void start_chain(Fn&& func)
    {
        std::lock_guard<std::mutex> lock(mutex_);
        chain_.push_back([func = std::forward<Fn>(func)](T input) -> T {
            return func(input);
        });
    }

    template<typename Fn>
    void then(Fn&& func)
    {
        std::lock_guard<std::mutex> lock(mutex_);
        auto prev_size = chain_.size();
        chain_.push_back([func = std::forward<Fn>(func), prev_size](T input) -> T {
            if (prev_size == 0) {
                return func(input);
            }
            return func(std::any_cast<T>(input));
        });
    }

    T execute_chain(T initial_value)
    {
        T result = initial_value;
        std::lock_guard<std::mutex> lock(mutex_);
        for (auto& func : chain_) {
            result = func(result);
        }
        return result;
    }

private:
    std::vector<std::function<T(T)>> chain_;
    mutable std::mutex mutex_;
};


class thread_safe_task_analyzer
{
public:
    template<typename Fn>
    void analyze_task(Fn&& func)
    {
        std::lock_guard<std::mutex> lock(mutex_);
        auto start = std::chrono::high_resolution_clock::now();
        func();
        auto end = std::chrono::high_resolution_clock::now();
        auto duration = std::chrono::duration_cast<std::chrono::microseconds>(end - start).count();

        {
            std::lock_guard<std::mutex> lock(stats_mutex_);
            task_durations_.push_back(duration);
        }
    }

    void print_statistics()
    {
        std::lock_guard<std::mutex> lock(stats_mutex_);
        if (task_durations_.empty()) {
            std::cout << "No tasks analyzed" << std::endl;
            return;
        }

        double total = 0.0;
        for (auto duration : task_durations_) {
            total += duration;
        }
        double average = total / task_durations_.size();

        std::cout << "Task analysis statistics:" << std::endl;
        std::cout << "Total tasks: " << task_durations_.size() << std::endl;
        std::cout << "Average duration: " << average << " microseconds" << std::endl;
        std::cout << "Total duration: " << total << " microseconds" << std::endl;
    }

private:
    mutable std::mutex mutex_;
    mutable std::mutex stats_mutex_;
    std::vector<long long> task_durations_;
};

#if 0
class task_monitor
{
public:
    explicit task_monitor(const std::string& log_file = "task_log.txt")
        : log_file_{log_file}
    {
        log_file_.open(log_file, std::ios::app);
    }

    ~task_monitor()
    {
        if (log_file_.is_open()) {
            log_file_.close();
        }
    }

    template<typename Fn>
    void monitor_task(const std::string& task_name, Fn&& func)
    {
        auto wrapped_func = [this, task_name = task_name, func = std::forward<Fn>(func)]() mutable {
            auto start = std::chrono::high_resolution_clock::now();
            log("Task '" + task_name + "' started");

            try {
                func();
                log("Task '" + task_name + "' completed successfully");
            } catch (const std::exception& e) {
                log("Task '" + task_name + "' failed with exception: " + e.what());
                throw;
            } catch (...) {
                log("Task '" + task_name + "' failed with unknown exception");
                throw;
            }

            auto end = std::chrono::high_resolution_clock::now();
            auto duration = std::chrono::duration_cast<std::chrono::microseconds>(end - start).count();
            log("Task '" + task_name + "' execution time: " + std::to_string(duration) + " microseconds");
        };

        return wrapped_func;
    }

    void log(const std::string& message)
    {
        std::lock_guard<std::mutex> lock(mutex_);
        if (log_file_.is_open()) {
            auto now = std::time(nullptr);
            char buffer[80];
            std::strftime(buffer, sizeof(buffer), "%Y-%m-%d %H:%M:%S", std::localtime(&now));
            log_file_ << buffer << " - " << message << std::endl;
            log_file_.flush();
        }
    }

private:
    std::ofstream log_file_;
    mutable std::mutex mutex_;
};
#endif

#ifdef linux
class distributed_task_manager
{
public:
    distributed_task_manager(int port, int num_threads = std::thread::hardware_concurrency())
        : port_{port}, running_{true}
    {
        // 启动任务调度线程
        for (int i = 0; i < num_threads; ++i) {
            worker_threads_.emplace_back([this] {
                while (running_) {
                    std::function<void()> task;
                    {
                        std::unique_lock<std::mutex> lock(mutex_);
                        condition_.wait(lock, [this] { return !running_ || !task_queue_.empty(); });
                        if (!running_ && task_queue_.empty()) return;
                        task = std::move(task_queue_.front());
                        task_queue_.pop();
                    }
                    task();
                }
            });
        }

        // 启动服务器监听线程
        server_thread_ = std::thread([this] {
            int server_fd = socket(AF_INET, SOCK_STREAM, 0);
            if (server_fd < 0) {
                std::cerr << "Failed to create socket" << std::endl;
                return;
            }

            sockaddr_in address{};
            address.sin_family = AF_INET;
            address.sin_addr.s_addr = INADDR_ANY;
            address.sin_port = htons(port_);

            if (bind(server_fd, reinterpret_cast<sockaddr*>(&address), sizeof(address)) < 0) {
                std::cerr << "Failed to bind socket" << std::endl;
                close(server_fd);
                return;
            }

            if (listen(server_fd, 5) < 0) {
                std::cerr << "Failed to listen on socket" << std::endl;
                close(server_fd);
                return;
            }

            std::cout << "Server listening on port " << port_ << std::endl;

            while (running_) {
                sockaddr_in client_addr{};
                socklen_t client_len = sizeof(client_addr);
                int client_fd = accept(server_fd, reinterpret_cast<sockaddr*>(&client_addr), &client_len);
                if (client_fd < 0) {
                    continue;
                }

                std::thread([this, client_fd] {
                    handle_client(client_fd);
                }).detach();
            }

            close(server_fd);
        });
    }

    ~distributed_task_manager()
    {
        running_ = false;
        condition_.notify_all();
        for (auto& worker : worker_threads_) {
            if (worker.joinable()) {
                worker.join();
            }
        }
        if (server_thread_.joinable()) {
            server_thread_.join();
        }
    }

    template<typename Fn>
    void enqueue_local(Fn&& func)
    {
        std::lock_guard<std::mutex> lock(mutex_);
        task_queue_.push(std::forward<Fn>(func));
        condition_.notify_one();
    }

    template<typename Fn>
    void enqueue_remote(const std::string& host, Fn&& func)
    {
        int client_fd = socket(AF_INET, SOCK_STREAM, 0);
        if (client_fd < 0) {
            std::cerr << "Failed to create socket" << std::endl;
            return;
        }

        sockaddr_in server_addr{};
        server_addr.sin_family = AF_INET;
        server_addr.sin_port = htons(port_);
        if (inet_pton(AF_INET, host.c_str(), &server_addr.sin_addr) <= 0) {
            std::cerr << "Invalid address" << std::endl;
            close(client_fd);
            return;
        }

        if (connect(client_fd, reinterpret_cast<sockaddr*>(&server_addr), sizeof(server_addr)) < 0) {
            std::cerr << "Connection failed" << std::endl;
            close(client_fd);
            return;
        }

        // 序列化函数并发送到远程服务器
        // 这里简化处理，实际应用中需要更复杂的序列化机制
        std::string serialized_task = "[serialized_task_data]";
        send(client_fd, serialized_task.c_str(), serialized_task.size(), 0);
        close(client_fd);
    }

private:
    void handle_client(int client_fd)
    {
        char buffer[1024] = {0};
        read(client_fd, buffer, 1024);

        // 反序列化任务并执行
        // 这里简化处理，实际应用中需要更复杂的反序列化机制
        std::function<void()> task = [] {
            std::cout << "Remote task executed" << std::endl;
        };

        enqueue_local(task);
        close(client_fd);
    }

    int port_;
    std::vector<std::thread> worker_threads_;
    std::thread server_thread_;
    std::queue<std::function<void()>> task_queue_;
    std::mutex mutex_;
    std::condition_variable condition_;
    std::atomic<bool> running_;
};
#endif // linux


class persistent_task
{
public:
    explicit persistent_task(const std::string& storage_file)
        : storage_file_{storage_file}
    {
        load_tasks();
    }

    template<typename Fn>
    void enqueue(Fn&& func)
    {
        std::lock_guard<std::mutex> lock(mutex_);
        auto task_id = next_task_id_++;
        tasks_[task_id] = {std::forward<Fn>(func), false, ""};
        save_tasks();
    }

    void run_all()
    {
        std::lock_guard<std::mutex> lock(mutex_);
        for (auto& [id, task] : tasks_) {
            if (!task.completed) {
                try {
                    task.func();
                    task.completed = true;
                    task.result = "Success";
                } catch (const std::exception& e) {
                    task.result = "Error: " + std::string(e.what());
                }
                save_tasks();
            }
        }
    }

    void print_task_status(int task_id)
    {
        std::lock_guard<std::mutex> lock(mutex_);
        auto it = tasks_.find(task_id);
        if (it != tasks_.end()) {
            auto& task = it->second;
            std::cout << "Task " << task_id << ": " << task.result << std::endl;
        } else {
            std::cout << "Task " << task_id << " not found" << std::endl;
        }
    }

private:
    struct TaskInfo {
        std::function<void()> func;
        bool completed;
        std::string result;
    };

    void load_tasks()
    {
        std::ifstream file(storage_file_);
        if (file.is_open()) {
            nlohmann::json json_data;
            file >> json_data;
            file.close();

            for (auto& [id, task_data] : json_data.items()) {
                int task_id = task_data["id"];
                bool completed = task_data["completed"];
                std::string result = task_data["result"];

                // 这里简化处理，实际应用中需要更复杂的反序列化机制
                tasks_[task_id] = {[] {} , completed, result};
                next_task_id_ = std::max(next_task_id_, task_id + 1);
            }
        }
    }

    void save_tasks()
    {
        nlohmann::json json_data;
        using std::to_string;
        for (auto& [id, task] : tasks_) {
            json_data[to_string(id)] = {
                {"id", id},
                {"completed", task.completed},
                {"result", task.result}
            };
        }

        std::ofstream file(storage_file_);
        if (file.is_open()) {
            file << json_data.dump(4);
            file.close();
        }
    }

    std::string storage_file_;
    mutable std::mutex mutex_;
    std::unordered_map<int, TaskInfo> tasks_;
    int next_task_id_{1};
};


class task_with_retry
{
public:
    template<typename Fn>
    task_with_retry(Fn&& func, int max_retries = 0, std::chrono::milliseconds retry_delay = std::chrono::milliseconds(100))
        : func_{std::forward<Fn>(func)}, max_retries_{max_retries}, retry_delay_{retry_delay}
    {}

    void run()
    {
        int retries = 0;
        while (true) {
            try {
                func_();
                break;
            } catch (const std::exception& e) {
                if (retries >= max_retries_) {
                    std::cerr << "Task failed after " << retries + 1 << " attempts: " << e.what() << std::endl;
                    throw;
                }
                std::cerr << "Task failed, retrying in " << retry_delay_.count() << " ms: " << e.what() << std::endl;
                std::this_thread::sleep_for(retry_delay_);
                retries++;
            }
        }
    }

private:
    std::function<void()> func_;
    int max_retries_;
    std::chrono::milliseconds retry_delay_;
};

#if 0
class priority_task_queue
{
public:
    using Task = std::function<void()>;

    explicit priority_task_queue(int num_threads = std::thread::hardware_concurrency())
        : running_{true}
    {
        for (int i = 0; i < num_threads; ++i) {
            workers_.emplace_back([this] {
                while (running_) {
                    Task task;
                    {
                        std::unique_lock<std::mutex> lock(mutex_);
                        condition_.wait(lock, [this] { return !running_ || !tasks_.empty(); });
                        if (!running_ && tasks_.empty()) return;
                        task = std::move(tasks_.top().second);
                        tasks_.pop();
                    }
                    task();
                }
            });
        }
    }

    ~priority_task_queue()
    {
        running_ = false;
        condition_.notify_all();
        for (auto& worker : workers_) {
            worker.join();
        }
    }

    void enqueue(Task task, int priority = 0)
    {
        {
            std::lock_guard<std::mutex> lock(mutex_);
            tasks_.emplace(priority, std::move(task));
        }
        condition_.notify_one();
    }

private:
    std::vector<std::thread> workers_;
    std::priority_queue<std::pair<int, Task>, std::vector<std::pair<int, Task>>, std::greater<std::pair<int, Task>>> tasks_;
    std::mutex mutex_;
    std::condition_variable condition_;
    std::atomic<bool> running_;
};
#endif



} // namespace task_manager

#endif // TASK_MANAGER_H_





// compile ref link: https://zhuanlan.zhihu.com/p/1900602162192573476
// g++ .\task_manager.h -o .\task_manager.o -std=c++20
// g++ .\task_manager.h  -std=c++20 -fPIC -shared -o libtask.so -DTASK_MANAGER_IMPL=1
// g++ .\task_manager.h -std=c++20 -fPIC -static -o libtask.lib -DTASK_MANAGER_IMPL=1


// ref links:
// map: https://c.biancheng.net/view/7190.html