#include "tasksys.h"

IRunnable::~IRunnable() {}

ITaskSystem::ITaskSystem(int num_threads) {}
ITaskSystem::~ITaskSystem() {}

/*
 * ================================================================
 * Serial task system implementation
 * ================================================================
 */

const char *TaskSystemSerial::name()
{
    return "Serial";
}

TaskSystemSerial::TaskSystemSerial(int num_threads) : ITaskSystem(num_threads)
{
}

TaskSystemSerial::~TaskSystemSerial() {}

void TaskSystemSerial::run(IRunnable *runnable, int num_total_tasks)
{
    for (int i = 0; i < num_total_tasks; i++)
    {
        runnable->runTask(i, num_total_tasks);
    }
}

TaskID TaskSystemSerial::runAsyncWithDeps(IRunnable *runnable, int num_total_tasks,
                                          const std::vector<TaskID> &deps)
{
    for (int i = 0; i < num_total_tasks; i++)
    {
        runnable->runTask(i, num_total_tasks);
    }

    return 0;
}

void TaskSystemSerial::sync()
{
    return;
}

/*
 * ================================================================
 * Parallel Task System Implementation
 * ================================================================
 */

const char *TaskSystemParallelSpawn::name()
{
    return "Parallel + Always Spawn";
}

TaskSystemParallelSpawn::TaskSystemParallelSpawn(int num_threads) : ITaskSystem(num_threads)
{
    // NOTE: CS149 students are not expected to implement TaskSystemParallelSpawn in Part B.
    this->num_threads = num_threads;
}

TaskSystemParallelSpawn::~TaskSystemParallelSpawn() {}

void TaskSystemParallelSpawn::run(IRunnable *runnable, int num_total_tasks)
{
    std::vector<std::thread> threads(this->num_threads);
    // int num_task_per_thread = num_total_tasks / this->num_threads;

    // One Implementation using lambda
    // std::mutex mutex;
    // for(int i = 0; i < this->num_threads; i++) {
    //     // std::lock_guard<std::mutex> lock_grd(mutex);
    //     // std::unique_lock<std::mutex> uni_lck(mutex);
    //     threads[i] = std::thread(
    //         [=]() {
    //             int start = i * num_task_per_thread;
    //             int end = (i == this->num_threads - 1) ? num_total_tasks : (i + 1) * num_task_per_thread; // 确保最后一个线程处理剩余的任务
    //             for (int j = start; j < end; j++) {
    //                 runnable->runTask(j, num_total_tasks);
    //             }
    //         }
    //     );
    //     //uni_lck.unlock();
    // }

    // Static Assignment - Block Assigment -> mandelbrot 8 threads 69.144ms
    // int i = 0;
    // while(i < this->num_threads)
    // {
    //     int start = i * num_task_per_thread;
    //     int end = (i == this->num_threads - 1) ? num_total_tasks : (i + 1) * num_task_per_thread;

    //     // start and end must using value reference
    //     threads[i] = std::thread(
    //         [this, &runnable, start, end, &num_total_tasks]() {
    //             for(int j = start; j < end; j++)
    //             {
    //                 runnable->runTask(j, num_total_tasks);
    //             }
    //         }
    //     );

    //     i++;
    // }

    // Static Assignment - Interleaved Assignment -> mandelbrot 8 threads 68.640ms
    int thread_idx = 0;
    while (thread_idx < this->num_threads)
    {
        std::vector<int> task_idx;
        int j = thread_idx;
        while (j < num_total_tasks)
        {
            task_idx.push_back(j);
            j += this->num_threads;
        }

        threads[thread_idx] = std::thread(
            [this, &runnable, task_idx, &num_total_tasks]()
            {
                for (const auto &idx : task_idx)
                {
                    runnable->runTask(idx, num_total_tasks);
                }
            });

        thread_idx++;
    }

    for (int j = 0; j < this->num_threads; j++)
    {
        threads[j].join();
    }
}

TaskID TaskSystemParallelSpawn::runAsyncWithDeps(IRunnable *runnable, int num_total_tasks,
                                                 const std::vector<TaskID> &deps)
{
    // NOTE: CS149 students are not expected to implement TaskSystemParallelSpawn in Part B.
    for (int i = 0; i < num_total_tasks; i++)
    {
        runnable->runTask(i, num_total_tasks);
    }

    return 0;
}

void TaskSystemParallelSpawn::sync()
{
    // NOTE: CS149 students are not expected to implement TaskSystemParallelSpawn in Part B.
    return;
}

/*
 * ================================================================
 * Parallel Thread Pool Spinning Task System Implementation
 * ================================================================
 */

const char *TaskSystemParallelThreadPoolSpinning::name()
{
    return "Parallel + Thread Pool + Spin";
}

TaskSystemParallelThreadPoolSpinning::TaskSystemParallelThreadPoolSpinning(int num_threads) : ITaskSystem(num_threads)
{
    // NOTE: CS149 students are not expected to implement TaskSystemParallelThreadPoolSpinning in Part B.
    this->num_threads = num_threads;

    this->stop = false;

    for (int i = 0; i < num_threads; i++)
    {
        workers.emplace_back(
            [this, i]() -> void
            {
                for (;;)
                {
                    std::function<void()> task;

                    {
                        // printf("Thread %d: waiting for task\n", i);
                        std::unique_lock<std::mutex> lock(this->queue_mutex);
                        if (this->stop && this->task_queue.empty())
                        {
                            // printf("Thread %d: stopping\n", i);
                            return;
                        }
                        if (this->task_queue.empty() == false)
                        {
                            // printf("Thread %d: got task\n", i);
                            task = this->task_queue.front();
                            this->task_queue.pop();
                        }

                        // this scope end -> the lock will call lock.unlock();
                        // printf("Thread %d: lock released and Spining\n", i);
                    }

                    try
                    {
                        if (task)
                        {
                            task();
                        }
                    }
                    catch (const std::exception &e)
                    {
                        std::cerr << "Task threw an exception: " << e.what() << std::endl;
                    }
                }
            });
    }
}

TaskSystemParallelThreadPoolSpinning::~TaskSystemParallelThreadPoolSpinning()
{
    {
        std::unique_lock<std::mutex> lock(this->queue_mutex);
        this->stop = true;
    }

    for (auto &worker : this->workers)
    {
        worker.join();
    }
}

template <typename F, typename... Args>
auto TaskSystemParallelThreadPoolSpinning::enqueue(F &&f, Args &&...args)
    -> std::future<typename std::result_of<typename std::decay<F>::type(typename std::decay<Args>::type...)>::type>
{
    // std::cout << __PRETTY_FUNCTION__ << std::endl;
    using return_type = typename std::result_of<F(Args...)>::type;

    // ----------------Using std::packaged_task Implementation----------------------
    auto task = std::make_shared<std::packaged_task<return_type(void)>>(
        std::bind(std::forward<F>(f), std::forward<Args>(args)...));

    std::future<return_type> res = task->get_future();
    // -----------------------------------------------------------------------------

    {
        std::unique_lock<std::mutex> lock(this->queue_mutex);
        if (this->stop)
            throw std::runtime_error("enqueue on stopped thread pool");

        if (task)
        {
            this->task_queue.emplace(
                [task]()
                {
                    (*task)();
                });
        }
    }

    return res;
}

void TaskSystemParallelThreadPoolSpinning::run(IRunnable *runnable, int num_total_tasks)
{
    // NOTE: CS149 students are not expected to implement TaskSystemParallelThreadPoolSpinning in Part B.
    int num_tasks_per_thread = num_total_tasks / num_threads;
    std::vector<std::future<void>> futures;

    for (int idx = 0; idx < num_threads; ++idx)
    {
        int start = idx * num_tasks_per_thread;
        int end = (idx == num_threads - 1) ? num_total_tasks : (idx + 1) * num_tasks_per_thread;

        futures.emplace_back(enqueue([runnable, start, end, num_total_tasks]()
                                     {
            for (int i = start; i < end; i++) {
                runnable->runTask(i, num_total_tasks);
            } }));
    }

    for (auto &future : futures)
    {
        future.get();
    }
}

TaskID TaskSystemParallelThreadPoolSpinning::runAsyncWithDeps(IRunnable *runnable, int num_total_tasks,
                                                              const std::vector<TaskID> &deps)
{
    // NOTE: CS149 students are not expected to implement TaskSystemParallelThreadPoolSpinning in Part B.
    for (int i = 0; i < num_total_tasks; i++)
    {
        runnable->runTask(i, num_total_tasks);
    }

    return 0;
}

void TaskSystemParallelThreadPoolSpinning::sync()
{
    // NOTE: CS149 students are not expected to implement TaskSystemParallelThreadPoolSpinning in Part B.
    return;
}

/*
 * ================================================================
 * Parallel Thread Pool Sleeping Task System Implementation
 * ================================================================
 */

const char *TaskSystemParallelThreadPoolSleeping::name()
{
    return "Parallel + Thread Pool + Sleep";
}

TaskSystemParallelThreadPoolSleeping::TaskSystemParallelThreadPoolSleeping(int num_threads) : ITaskSystem(num_threads)
{
    //
    // TODO: CS149 student implementations may decide to perform setup
    // operations (such as thread pool construction) here.
    // Implementations are free to add new class member variables
    // (requiring changes to tasksys.h).
    //

    // std::cout << __PRETTY_FUNCTION__ << std::endl;
    this->num_threads = num_threads;
    this->stop = false;
    this->id = {0};

    for (int i = 0; i < num_threads; i++)
    {
        workers.emplace_back(
            [this, i]() -> void
            {
                std::function<void()> task;

                while (true)
                {
                    {
                        std::unique_lock<std::mutex> lock(this->queue_mutex);
                        // std::unique_lock<std::mutex> lock_waiting(this->waiting_queue_mutex);
                        // std::scoped_lock lock(this->queue_mutex, this->waiting_queue_mutex);
                        this->condition.wait(lock, [this]() -> bool
                                             { return this->task_queue.empty() == false || this->stop == true; });
                        if (this->stop && this->task_queue.empty() == true)
                        {
                            // printf("Thread %d return \n", i);
                            return;
                        }
                        // lock_waiting.unlock();
                        task = std::move(this->task_queue.front());
                        this->task_queue.pop();
                    }
                    // auto start_time = CycleTimer::currentSeconds();
                    task();
                    // auto end_time = CycleTimer::currentSeconds();
                    // printf("Thread %d doing task done, Cost time : %f \n", i, (end_time - start_time) * 1000);
                    // start_time = CycleTimer::currentSeconds();
                    // this->check_waiting_queue();
                    // end_time = CycleTimer::currentSeconds();
                    // printf("Thread %d checking waiting queue done , Cost time : %f \n", i, (end_time - start_time) * 1000);
                }
            });
    }
}

TaskSystemParallelThreadPoolSleeping::~TaskSystemParallelThreadPoolSleeping()
{
    // std::cout << __PRETTY_FUNCTION__ << std::endl;
    //
    // TODO: CS149 student implementations may decide to perform cleanup
    // operations (such as thread pool shutdown construction) here.
    // Implementations are free to add new class member variables
    // (requiring changes to tasksys.h).
    //
    // Sync() First -> Finish all task
    // this->sync();

    // Set ThreadPool to stop
    {
        std::unique_lock<std::mutex> lock(this->queue_mutex);
        this->stop = true;
    }
    this->condition.notify_all();

    for (auto &worker : workers)
    {
        worker.join();
    }
}

void TaskSystemParallelThreadPoolSleeping::run(IRunnable *runnable, int num_total_tasks)
{
    // std::cout << __PRETTY_FUNCTION__ << std::endl;
    //
    // TODO: CS149 students will modify the implementation of this
    // method in Parts A and B.  The implementation provided below runs all
    // tasks sequentially on the calling thread.
    //

    if (num_total_tasks <= this->num_threads)
    {
        std::vector<std::future<void>> futures;

        for (int i = 0; i < num_total_tasks; i++)
        {
            int start = i;
            int end = (i == num_total_tasks - 1) ? num_total_tasks : (i + 1);
            futures.push_back(
                enqueue([&runnable, start, end, &num_total_tasks]()
                        {
                    for(int i = start; i < end; i ++)
                    {
                        runnable->runTask(i, num_total_tasks);
                    } }));
        }

        for (auto &future : futures)
        {
            future.get();
        }
    }
    else
    {
        // After Implementation of runAysncWithDeps and sync
        // start all task with no dependencies -> Putting into waiting_queue
        this->runAsyncWithDeps(runnable, num_total_tasks, {});

        this->sync();
    }
}

TaskID TaskSystemParallelThreadPoolSleeping::runAsyncWithDeps(IRunnable *runnable, int num_total_tasks,
                                                              const std::vector<TaskID> &deps)
{
    // std::cout << __PRETTY_FUNCTION__ << std::endl;

    auto task = TaskInfo(this->id, runnable, num_total_tasks, deps);

    this->id++;

    // depend on the task_total_num to adjust the granularity
    // this->task_granularity = int(num_total_tasks / this->num_threads);

    {
        std::unique_lock<std::mutex> lock(this->waiting_queue_mutex);
        waiting_queue.push(
            std::make_pair<TaskID, TaskInfo>(
                TaskID(task.id), std::move(task)));
    }

    return TaskID(task.id);
}

void TaskSystemParallelThreadPoolSleeping::sync()
{
    // std::cout << __PRETTY_FUNCTION__ << std::endl;
    // {
    //     std::unique_lock<std::mutex> lock(this->future_mutex);
    //     for (auto &sub_pair: subTask_futures)
    //     {
    //         for(auto & future : sub_pair.second)
    //         {
    //             future.get();
    //         }
    //     }
    // }

    // check waiting queue and until all task is done
    {
        std::unique_lock<std::mutex> lock(this->waiting_queue_mutex);
        while (!waiting_queue.empty())
        {
            // Unlock the mutex before calling check_waiting_queue
            // and re-lock it afterwards
            // printf("enter in sync() while\n");
            // std::cout << "enter in sync() while" << std::endl;
            lock.unlock();
            check_waiting_queue();
            lock.lock();
            // printf("exit in sync() while\n");
            // std::cout << "exit in sync() while" << std::endl;
        }
        // std::cout << "Now, waiting_queue is empty" << std::endl;
        // printf("Now, waiting_queue is empty\n");
    }

    // once again manage futures
    {
        std::unique_lock<std::mutex> lock(this->future_mutex);
        for (auto &sub_pair : subTask_futures)
        {
            for (auto &future : sub_pair.second)
            {
                if (future.valid())
                    future.get();
            }
        }
    }

    return;
}

template <typename F, typename... Args>
auto TaskSystemParallelThreadPoolSleeping::enqueue(F &&f, Args &&...args)
    -> std::future<typename std::result_of<typename std::decay<F>::type(typename std::decay<Args>::type...)>::type>
{
    // std::cout << __PRETTY_FUNCTION__ << std::endl;
    using return_type =
        typename std::result_of<typename std::decay<F>::type(typename std::decay<Args>::type...)>::type;

    auto task = std::make_shared<std::packaged_task<return_type(void)>>(
        std::bind(std::forward<F>(f), std::forward<Args>(args)...));

    std::future<void> res = task->get_future();

    {
        std::unique_lock<std::mutex> lock(this->queue_mutex);
        this->task_queue.emplace(
            [task]()
            {
                (*task)();
            });
    }
    this->condition.notify_one();

    return res;
}

void TaskSystemParallelThreadPoolSleeping::check_waiting_queue()
{
    // std::cout << __PRETTY_FUNCTION__ << std::endl;
    {
        // std::cout << 2 << std::endl;
        // check waiting queue and pusts sub_task into task_queue
        std::unique_lock<std::mutex> waiting_queue_lock(this->waiting_queue_mutex);
        // std::unique_lock<std::mutex> subTask_lock(this->subTask_mutex);
        // std::cout << 3 << std::endl;
        if (!waiting_queue.empty())
        {
            // std::cout << 4 << std::endl;
            auto it = waiting_queue.top();
            // waiting_queue_lock.unlock();
            auto id = it.first;
            // std::cout << id << std::endl;
            auto info = it.second;

            bool can_run = true;
            auto depend = info.dependency;
            if (id != 0 && depend.size() != 0)
            {
                // auto depend = info.dependency;
                std::unique_lock<std::mutex> subTask_lock(this->subTask_mutex);
                for (auto &dep : depend)
                {
                    // std::cout << "ID : " << id << " depends : " << dep << std::endl;
                    // std::cout << "completed[dep] : " << subTask[dep].is_done << std::endl;
                    can_run = can_run && subTask[dep].is_done;
                }
            }
            // std::cout << "ID : " << id << " " << " can run " << can_run << std::endl;

            if (can_run)
            {
                waiting_queue.pop();
                // waiting_queue_lock.unlock();
                // std::cout << 5 << std::endl;
                // ------------- Implementation didn't take account of task granularity -------------------
                // futures.push_back(
                //     enqueue(
                //         [this, id, info]()
                //         {
                //             // std::unique_lock<std::mutex> map_lock(this->completed_mutex);
                //             completed[id] = false;
                //             // map_lock.unlock();

                //             for (int i = 0; i < info.num_total_tasks; i++)
                //                 info.runnable->runTask(i, info.num_total_tasks);

                //             // map_lock.lock();
                //             completed[id] = true;
                //             // map_lock.unlock();

                //             // check_waiting_queue();
                //         }));
                // ------------- Implementation didn't take account of task granularity -------------------
                //------------- Wrong Implementation -------------------
                // for(int i = 0; i < this->num_threads; i ++)
                // {
                //     int start = i * info.num_total_tasks / this->num_threads;
                //     int end = (i == this->num_threads - 1) ? info.num_total_tasks : (i + 1) * info.num_total_tasks / this->num_threads;

                //     futures.push_back(
                //         enqueue(
                //         [this, id, info, start, end]()
                //         {
                //             std::unique_lock<std::mutex> map_lock(this->completed_mutex);
                //             completed[id] = false;
                //             map_lock.unlock();

                //             for (int i = start; i < end; i++)
                //                 info.runnable->runTask(i, info.num_total_tasks);

                //             map_lock.lock();
                //             completed[id] = true;
                //             map_lock.unlock();

                //             check_waiting_queue();
                //         }
                //         )
                //     );
                // }
                // ----------------- Wrong Implementation -------------------

                // ----------------- Implementation take account of task granularity -------------------
                int sub_task_nums = (info.num_total_tasks + this->task_granularity - 1) / this->task_granularity;
                // std::cout << "sub_task_nums : " << sub_task_nums << std::endl;
                // printf("sub_task_nums : %d \n", sub_task_nums);
                {
                    std::unique_lock<std::mutex> subTask_lock(this->subTask_mutex);
                    // printf("id : %d , sub_task_nums : %d \n", id, sub_task_nums);
                    // subTask.emplace(id, std::pair<std::atomic_int64_t, std::atomic_bool>(std::atomic_int64_t(0), std::atomic_bool(false)));
                    // subTask.emplace(std::piecewise_construct,
                    // std::forward_as_tuple(id),
                    // std::forward_as_tuple(std::atomic_int64_t(0), std::atomic_bool(false)));
                    subTaskManage taskData{std::atomic_int64_t(sub_task_nums), std::atomic_bool(false)};
                    subTask[id] = std::move(taskData);
                }
                // Initialize task complete false first
                // {
                //     std::unique_lock<std::mutex> map_lock(this->completed_mutex);
                //     completed[id] = false;
                // }

                for (int i = 0; i < sub_task_nums; i++)
                {
                    int task_start = i * this->task_granularity;
                    // std::cout << "task_start : " << task_start << std::endl;
                    int task_end = std::min(task_start + this->task_granularity, info.num_total_tasks);
                    // std::cout << "task_end : " << task_end << std::endl;
                    {
                        std::unique_lock<std::mutex> future_lock(this->future_mutex);
                        subTask_futures[id].push_back(
                            enqueue(
                                [this, id, info, task_start, task_end]()
                                {
                                    for (int j = task_start; j < task_end; j++)
                                    {
                                        info.runnable->runTask(j, info.num_total_tasks);
                                    }
                                    // completed this subTask !
                                    {
                                        std::unique_lock<std::mutex> subTask_lock(this->subTask_mutex);
                                        if (subTask[id].num_sub_tasks > 0)
                                            subTask[id].num_sub_tasks--;
                                        if (subTask[id].num_sub_tasks == 0)
                                            subTask[id].is_done = true;
                                        // printf("completed subTask in : %d, left subtask num is : %d \n", id, int(subTask[id].num_sub_tasks));
                                    }
                                }));
                        // this->condition.notify_one();
                    }
                }

                // waiting_queue_lock.lock();
                // waiting_queue.pop();
            }
        }
    }

    // {
    //     std::unique_lock<std::mutex> lock(this->subTask_mutex);
    //     // check subTask Map whether task(taskID impl) has been completed
    //     for (const auto &task : subTask)
    //     {
    //         auto &ID = task.first;
    //         // printf("ID : %d\n", ID);
    //         auto &count = task.second;
    //         if (count == 0)
    //         {
    //             printf("Task %d completed \n", ID);
    //             {
    //                 std::unique_lock<std::mutex> map_lock(this->completed_mutex);
    //                 completed[ID] = true;
    //                 map_lock.unlock();
    //             }
    //         }
    //     }
    //     lock.unlock();
    // }
}