#ifndef THREADPOOL_H
#define THREADPOOL_H
/**
 * @file ThreadPool.h
 * @brief Header file for the ThreadPool class.
 * 
 * This file contains the declaration of the ThreadPool class, which is a versatile and efficient 
 * thread pool implementation for managing and executing a large number of tasks concurrently. 
 * The ThreadPool class is capable of handling any callable function, including lambda expressions, 
 * bound functions, or function objects, providing an easy and efficient way to manage parallel task execution.
 * 
 * @author liuzhiyu (liuzhiyu27@foxmail.com)
 * @copyright Copyright (c) 2023 by liuzhiyu, All Rights Reserved.
 */
#include <vector>
#include <queue>
#include <memory>
#include <thread>
#include <mutex>
#include <condition_variable>
#include <future>
#include <functional>
#include <stdexcept>
#include <type_traits>
#include <atomic>
#include <unordered_map>
#include <sstream>
#include "ThreadWrapper.h"
#include "SoftWareVersion.h"
#include "IDGenerator/UniqueIDGenerator.h"
#include "Utils/info_retrieval.h"  

#if __cplusplus >= 202002L  // 检查C++版本是否为C++20或更高
    #include <concepts>
#elif __cplusplus >= 201703L 
    #include <tuple>
#endif


/********************************************************************************************************/
/**
 * @struct Pri_Task
 * @brief Represents a task in a thread pool with priority and timing information.
 * 
 * This structure encapsulates details of a task that can be scheduled in a thread pool,
 * including its priority, timestamps for when it was enqueued and executed, a history of
 * task executions, and the task itself. It supports comparison operators for prioritizing
 * tasks in the scheduling queue based on their priority and timestamps.
 * 
 * @var Pri_Task::priority
 * The priority level of the task. Tasks with higher priority values are intended to be
 * executed before those with lower priorities.
 * 
 * @var Pri_Task::timestamp
 * The timestamp (e.g., in milliseconds since some epoch) when the task was added to the
 * scheduling queue. This is used to break ties when two tasks have the same priority.
 * 
 * @var Pri_Task::exec_time
 * The timestamp (e.g., in milliseconds since some epoch) when the task was last executed.
 * This can be used to track task execution for profiling or debugging.
 * 
 * @var Pri_Task::taskHistory
 * A vector of strings, each representing a historical note or log about the task's
 * lifecycle events, such as when it was enqueued, started, and completed.
 * 
 * @var Pri_Task::task
 * A `std::function<void()>` representing the actual task to be executed. This allows
 * for any callable object to be scheduled as a task, providing flexibility in what
 * can be executed by the thread pool.
 * 
 * Usage example:
 * ```
 * Pri_Task myTask;
 * myTask.priority = 10;
 * myTask.timestamp = getCurrentTimestamp();
 * myTask.task = []() { std::cout << "Performing task." << std::endl; };
 * threadPool.enqueue(myTask);
 * ```
 * 
 * The comparison operators (`<`, `>`, `==`) are provided to facilitate the ordering
 * and equality checking of tasks within a priority queue or similar data structure
 * used by the thread pool to manage tasks. They compare tasks primarily based on
 * priority and secondarily based on the timestamp to ensure that tasks are executed
 * in the correct order.
 */
struct Pri_Task {
    std::string ID;               ///< ID of the task.
    int priority;               ///< Priority of the task.
    std::int64_t timestamp;        ///< Timestamp when the task was enqueued.
    std::int64_t exec_time;        ///< Timestamp when the task was executed.

    std::vector<std::string> taskHistory; 
  
    std::function<void()> task; ///< The actual task to be executed.

    /**
     * @brief Invokes the task.
     */
    void operator()() {
        task();
    }

    /**
     * @brief Less-than comparison operator for Pri_Task objects.
     * 
     * This operator compares two Pri_Task objects to determine their relative ordering
     * in a priority queue context. It is designed to facilitate a priority queue where
     * tasks with a lower priority value or an earlier timestamp are considered to have
     * higher priority.
     * 
     * @param t The Pri_Task object to compare with the current object.
     * @return true If the current task should be considered as having a lower priority
     * than the task 't', based on the comparison logic defined below.
     * @return false Otherwise, indicating that the current task has higher priority or
     * the same priority as the task 't'.
     * 
     * @details
     * The comparison logic is as follows:
     * - If both tasks have the same priority value, the task with the later timestamp
     *   is considered to have lower priority, i.e., 'this' task is considered less than
     *   't' if its timestamp is greater.
     * - If the tasks have different priority values, the task with the higher priority
     *   value is considered to have lower priority. This means that 'this' task is
     *   considered less than 't' if its priority is greater.
     * 
     * This comparison logic ensures that within the priority queue, tasks are ordered
     * first by their priority value (lower values have higher priority) and then by their
     * timestamp (earlier tasks have higher priority) when priority values are equal.
     */
    bool operator<(const Pri_Task& t) const {
        if (priority == t.priority)
            return timestamp > t.timestamp; // 如果优先级相同，时间戳更晚的优先级更低
        return priority > t.priority; // 数值更大的优先级更低
    }

    /**
     * @brief Greater-than comparison operator.
     * 
     * @param t The Pri_Task object to compare with.
     * @return true If this task has higher priority or same priority but earlier timestamp.
     * @return false Otherwise.
     */
    bool operator>(const Pri_Task& t) const {
        if (priority == t.priority)
            return timestamp < t.timestamp; // 如果优先级相同，则时间戳更早的优先级更高
        return priority < t.priority; // 数值更小的优先级更高
    }


    /**
     * @brief Equality comparison operator.
     * 
     * @param t The Pri_Task object to compare with.
     * @return true If both priority and timestamp are equal.
     * @return false Otherwise.
     */
    bool operator==(const Pri_Task& t) const {
        return priority == t.priority && timestamp == t.timestamp;
    }   
};

/**
 * @brief A ThreadPool class for managing a pool of threads.
 * 
 * This class provides a flexible pool of threads that can execute any callable function including 
 * lambda expressions, bound functions, or any function objects. It allows for parallel execution 
 * of tasks with improved performance and efficiency. Tasks can be added to the pool dynamically, 
 * and they are executed as soon as a thread becomes available. The ThreadPool manages the lifecycle 
 * of the threads, creating and destroying them as needed, up to a specified maximum limit.
 * 
 * The ThreadPool is especially useful in scenarios where tasks are numerous and short-lived, 
 * or for server-like applications where tasks arrive intermittently and need to be processed 
 * in parallel.
 * 
 * Example usage:
 *     ThreadPool pool(4); // Create a ThreadPool with 4 threads
 *     auto result = pool.enqueue([](int answer) { return answer; }, 42);
 *     std::cout << result.get(); // prints 42
 * 
 * @note The ThreadPool class is designed to be thread-safe. It uses mutexes and condition 
 *       variables to synchronize access to its task queue.
 */
class ThreadPool { 
public:
    /***************************************************************
     *  @note      ThreadPool constructor
     *  @param     maxThreads Maximum number of threads in the pool
     ***************************************************************/
    ThreadPool(size_t maxThreads);
    /**
     * @brief Destructor for ThreadPool.fm_workingThreads
     * @note Joins all threads before destruction.
     */
    ~ThreadPool();
/**
 * @brief Enqueues a task with a specified priority into the thread pool.
 * 
 * This function enqueues a new task into the thread pool with a given priority. 
 * The task is wrapped in a std::packaged_task to allow for future retrieval of the result.
 * The priority and timestamp are used to order tasks in the priority queue.
 * 
 * @tparam F        Function type of the task.
 * @tparam Args     Argument types of the task.
 * @param priority  The priority of the task. Tasks with higher priority will be executed first.
 * @param f         The function object representing the task.
 * @param args      Arguments to be passed to the function.
 * @requires        F and Args... must be invocable as F(Args...).
 * @return          std::future corresponding to the return type of the task. It will contain the task's result once it has been executed.
 * 
 * @note            This function uses std::forward to perfectly forward arguments to the task function.
 * @note            Tasks enqueued after stopping the thread pool will cause a runtime error.
 * @note            The returned std::future object remains valid even if the original packaged_task is moved.
 * 
 * @exception       std::runtime_error If the thread pool has been stopped before enqueuing the task.
 */
#if __cplusplus >= 202002L 
template<class F, class... Args>
requires std::invocable<F, Args...>
auto enqueueWithPriority(int priority, F&& f, Args&&... args) -> std::future<decltype(std::forward<F>(f)(std::forward<Args>(args)...))>
#else
template<class F, class... Args>
auto enqueueWithPriority(int priority, F&& f, Args&&... args) -> std::future<typename std::result_of<F(Args...)>::type>
#endif
{
    auto current_timestamp = std::chrono::duration_cast<std::chrono::milliseconds>(
        std::chrono::system_clock::now().time_since_epoch()).count();

    Pri_Task taskWithPriority;
    taskWithPriority.priority = priority;
    taskWithPriority.timestamp = current_timestamp;
    taskWithPriority.ID = UniqueIDGenerator::getInstance().generateID();
    if(m_historyRecordEnabled){
        _insertTaskHistory(taskWithPriority.ID,priority, current_timestamp, f, args...);
    }
#if __cplusplus >= 202002L 
    // 这里使用std::forward保证参数的正确转发（保持值类别）
    using return_type = decltype(std::forward<F>(f)(std::forward<Args>(args)...));
    auto task = std::make_shared<std::packaged_task<return_type()>>(
            // 捕获f和args，并在lambda内部使用std::forward来转发参数
            [f = std::forward<F>(f), ...args = std::forward<Args>(args)]() mutable {
                // 直接调用f并传递参数，lambda返回f的返回值
                return f(args...);
            }
        );
#else
    using return_type = typename std::result_of<F(Args...)>::type;
    auto task = std::make_shared< std::packaged_task<return_type()> >(
            std::bind(std::forward<F>(f), std::forward<Args>(args)...)
        );
#endif
    auto ret = task->get_future();

    {
        std::unique_lock<std::mutex> lock(m_queue_mutex);

        // 不允许在停止线程池后加入新的任务
        if(m_stopflag.load()) {
            throw std::runtime_error("m_tasks queue on stopped ThreadPool");
        }
        taskWithPriority.task = [task = std::move(task)]() mutable { (*task)(); };

        m_priorityTasks.emplace(taskWithPriority);

        this->m_condition.notify_one();
        
    }
    return ret;
}

    template<class F, class... Args>
    auto enqueue(F&& f, Args&&... args) 
        -> decltype(auto)
    {
        return enqueueWithPriority(0, std::forward<F>(f), std::forward<Args>(args)...);
    }
    
    /**
     * @brief Get the number of working threads in the current thread pool.
     * @return Number of working threads.
     */
    size_t getWorkingThreadCount() {
        return m_workingThreads.load(std::memory_order_acquire);
    }

    /**
     * @brief Set the flag to indicate whether logging is enabled.
     * 
     * @param enabled Flag to indicate whether logging is enabled.
     */
    void setLogEnabled(bool enabled) {
        m_historyRecordEnabled = enabled;
    }
    /**
     * @brief Set the maximum number of tasks to be logged.
     * 
     * @param limit The maximum number of tasks to be logged.
     */
    void setLogLimit(std::size_t limit) {
        m_historyRecordLimit = limit;
        std::lock_guard<std::mutex> lock(m_log_mutex); // 加锁保护m_historyRecord
        if(m_historyRecordVector.size() > limit) {
            m_historyRecordVector.erase(m_historyRecordVector.begin(), m_historyRecordVector.end() - limit);
        }
    }

private:
    inline std::string _extractId(const std::string& task) {
        size_t pos = task.find(':');
        if (pos != std::string::npos) {
            // 提取ID（不包括冒号）
            return task.substr(0, pos);
        }
        return ""; // 如果找不到冒号，返回空字符串
    }
    inline std::vector<std::string> _trimAndReserveVector(std::vector<std::shared_ptr<std::string>>& vec, size_t limit) {
        std::vector<std::string> removedIds;
        std::cout << "start= vec.size():" << vec.size() << std::endl;
        if (vec.size() >= limit) {
            size_t elementsToRemove = limit / 2;
            // 假设每个记录的ID可以从记录字符串中解析出来
            for (size_t i = 0; i < elementsToRemove; ++i) {
                // 解析ID的逻辑需要根据实际情况实现
                std::string id = _extractId(*vec[i]);
                removedIds.push_back(id);
            }
            // 删除和移动操作
            vec.erase(vec.begin(), vec.begin() + elementsToRemove);
            // 重新预留所需的额外空间，而不是使用shrink_to_fit
            vec.reserve(limit + limit / 2); // 假设你想保留50%的预留空间
        }
        std::cout << "end= vec.size():" << vec.size() << std::endl;
        return removedIds;
    }
    void _insertTaskHistory(std::string ID,int priority, std::int64_t timestamp, auto&& f, auto&&... args) {
        std::string currenttask = ID + "::" + " timestamp: " + std::to_string(timestamp) 
        + " priority: " + std::to_string(priority) 
        + " object: " + args_to_string(f) 
        + " args:" + args_to_string(args...);
        
        auto record = std::make_shared<std::string>(currenttask);

        //std::cout << "currenttask: " << currenttask << std::endl;

        std::lock_guard<std::mutex> lock(m_log_mutex);
        m_historyRecordMap[ID] = record;
    }
    // 更新任务执行时间戳
    void _updateExecTime(const Pri_Task & task) {
        // Temporarily disable logging logic for debugging
        
        auto exec_time = std::chrono::duration_cast<std::chrono::milliseconds>(
            std::chrono::system_clock::now().time_since_epoch()).count();
        std::lock_guard<std::mutex> lock(m_log_mutex); 
        auto record_it = m_historyRecordMap.find(task.ID); 
        std::shared_ptr<std::string> record_ptr;

        if(record_it != m_historyRecordMap.end()) {
            record_ptr = record_it->second; 
            if(record_ptr) { 
                std::string execTimeStr = " exec_time: " + std::to_string(exec_time);
                *record_ptr += execTimeStr; 
            }
        }

        if(m_historyRecordVector.size() >= m_historyRecordVectorMaxSize) {
            auto removedIds = _trimAndReserveVector(m_historyRecordVector, m_historyRecordLimit);
            for (const auto& id : removedIds) {
                m_historyRecordMap.erase(id); 
            }
            // std::cout << "m_historyRecordVector.size() >= m_historyRecordVectorMaxSize" << std::endl;
        }
        
        if (record_ptr) { 
            m_historyRecordVector.push_back(record_ptr); 
            // std::cout << "m_historyRecord :" << *record_ptr << std::endl; 
        }
        
    }

    /**
     * @brief Flag to indicate whether the thread pool is stopped.
     * 
     * When set to true, no new tasks can be added to the pool, and the pool will begin shutting down.
     */
    std::atomic<bool> m_stopflag;
    /**
     * @brief Maximum number of threads allowed in the pool.
     */
    size_t m_maxThreads;

    /**
     * @brief Atomic variable representing the number of working threads.
     * 
     * This variable keeps track of the number of threads currently executing tasks.
     */
    std::atomic<size_t> m_workingThreads;

    /**
     * @brief Vector of thread wrappers.
     * 
     * Workers are responsible for executing the tasks in the pool.
     */
    std::vector<std::unique_ptr<ThreadWrapper>> m_workers;

    /**
     * @brief Queue of tasks.
     * 
     * Tasks are functions that are added to the thread pool to be executed.
     */
    std::priority_queue<Pri_Task> m_priorityTasks;
    
    /**
     * @brief Mutex for synchronizing access to the task queue.
     */
    std::mutex m_queue_mutex;

    /**
     * @brief Condition variable used to notify worker threads about available tasks.
     */
    std::condition_variable m_condition;

    /**
     * @brief Flag to indicate whether logging is enabled.
     * 
     * When set to true, the thread pool will log the tasks executed by each thread.
     */
    bool m_historyRecordEnabled;
    /**
     * @brief Maximum number of tasks to be logged.
     * 
     * This variable determines the maximum number of tasks to be logged by each thread.
     * When the number of tasks exceeds this limit, the oldest tasks will be removed from the log.
     */
    std::size_t m_historyRecordLimit;
    std::size_t m_historyRecordVectorMaxSize;
    /**
     * @brief Vector of strings representing the tasks executed by the thread pool.
     * 
     * This vector is used for logging purposes.
     */
    std::vector<std::shared_ptr<std::string>> m_historyRecordVector;
    std::unordered_map<std::string, std::shared_ptr<std::string>> m_historyRecordMap;

    /**
     * @brief The mutex used to protect the log vector.
     */
    std::mutex m_log_mutex;
    
};

#endif
