#include "tasksys.h"


IRunnable::~IRunnable() {}

ITaskSystem::ITaskSystem(int num_threads) {}
ITaskSystem::~ITaskSystem() {}

/*
 * ================================================================
 * Serial task system implementation
 * ================================================================
 */

const char* TaskSystemSerial::name() {
    return "Serial";
}

TaskSystemSerial::TaskSystemSerial(int num_threads): ITaskSystem(num_threads) {
}

TaskSystemSerial::~TaskSystemSerial() {}

void TaskSystemSerial::run(IRunnable* runnable, int num_total_tasks) {
    for (int i = 0; i < num_total_tasks; i++) {
        runnable->runTask(i, num_total_tasks);
    }
}

TaskID TaskSystemSerial::runAsyncWithDeps(IRunnable* runnable, int num_total_tasks,
                                          const std::vector<TaskID>& deps) {
    // You do not need to implement this method.
    return 0;
}

void TaskSystemSerial::sync() {
    // You do not need to implement this method.
    return;
}

/*
 * ================================================================
 * Parallel Task System Implementation
 * ================================================================
 */

const char* TaskSystemParallelSpawn::name() {
    return "Parallel + Always Spawn";
}

TaskSystemParallelSpawn::TaskSystemParallelSpawn(int num_threads): ITaskSystem(num_threads) {
    //
    // TODO: CS149 student implementations may decide to perform setup
    // operations (such as thread pool construction) here.
    // Implementations are free to add new class member variables
    // (requiring changes to tasksys.h).
    //
    // Just Spawn the thread
    this->num_threads = num_threads;
}

TaskSystemParallelSpawn::~TaskSystemParallelSpawn() {}

void TaskSystemParallelSpawn::run(IRunnable* runnable, int num_total_tasks) {
    std::vector<std::thread> threads(this->num_threads);
    // int num_task_per_thread = num_total_tasks / this->num_threads;
    
    // One Implementation using lambda
    // std::mutex mutex;
    // for(int i = 0; i < this->num_threads; i++) {
    //     // std::lock_guard<std::mutex> lock_grd(mutex);
    //     // std::unique_lock<std::mutex> uni_lck(mutex);
    //     threads[i] = std::thread(
    //         [=]() { 
    //             int start = i * num_task_per_thread;
    //             int end = (i == this->num_threads - 1) ? num_total_tasks : (i + 1) * num_task_per_thread; // 确保最后一个线程处理剩余的任务
    //             for (int j = start; j < end; j++) {
    //                 runnable->runTask(j, num_total_tasks);
    //             }
    //         }
    //     );
    //     //uni_lck.unlock();
    // }

    // Static Assignment - Block Assigment -> mandelbrot 8 threads 69.144ms
    // int i = 0;
    // while(i < this->num_threads)
    // {
    //     int start = i * num_task_per_thread;
    //     int end = (i == this->num_threads - 1) ? num_total_tasks : (i + 1) * num_task_per_thread;

    //     // start and end must using value reference
    //     threads[i] = std::thread(
    //         [this, &runnable, start, end, &num_total_tasks]() {
    //             for(int j = start; j < end; j++)
    //             {
    //                 runnable->runTask(j, num_total_tasks);
    //             }
    //         }
    //     );

    //     i++;
    // }
    
    // Static Assignment - Interleaved Assignment -> mandelbrot 8 threads 68.640ms
    int thread_idx = 0;
    while(thread_idx < this->num_threads)
    {
        std::vector<int> task_idx;
        int j = thread_idx;
        while(j < num_total_tasks)
        {
            task_idx.push_back(j);
            j += this->num_threads;
        }

        threads[thread_idx] = std::thread(
            [this, &runnable, task_idx, &num_total_tasks]()
            {
                for(const auto& idx : task_idx)
                {
                    runnable->runTask(idx, num_total_tasks);
                }
            }
        );

        thread_idx ++;
    }

    for(int j = 0; j < this->num_threads; j++) {
        threads[j].join(); 
    }
}


TaskID TaskSystemParallelSpawn::runAsyncWithDeps(IRunnable* runnable, int num_total_tasks,
                                                 const std::vector<TaskID>& deps) {
    // You do not need to implement this method.
    return 0;
}

void TaskSystemParallelSpawn::sync() {
    // You do not need to implement this method.
    return;
}

/*
 * ================================================================
 * Parallel Thread Pool Spinning Task System Implementation
 * ================================================================
 */

const char* TaskSystemParallelThreadPoolSpinning::name() {
    return "Parallel + Thread Pool + Spin";
}

TaskSystemParallelThreadPoolSpinning::TaskSystemParallelThreadPoolSpinning(int num_threads): ITaskSystem(num_threads) {
    //
    // TODO: CS149 student implementations may decide to perform setup
    // operations (such as thread pool construction) here.
    // Implementations are free to add new class member variables
    // (requiring changes to tasksys.h).
    //

    this->num_threads = num_threads;

    this->stop = false;

    for(int i = 0; i < num_threads; i ++)
    {
        workers.emplace_back(
            [this, i]() -> void
            {
                for(;;)
                {
                    std::function<void()> task;

                    {
                        // printf("Thread %d: waiting for task\n", i);
                        std::unique_lock<std::mutex> lock(this->queue_mutex);
                        if(this->stop && this->task_queue.empty()) 
                        {
                            // printf("Thread %d: stopping\n", i);
                            return;
                        }
                        if(this->task_queue.empty() == false)
                        {
                            // printf("Thread %d: got task\n", i);
                            task = this->task_queue.front();
                            this->task_queue.pop();
                        }

                        // this scope end -> the lock will call lock.unlock();
                        // printf("Thread %d: lock released and Spining\n", i);
                    }

                    try {
                        if (task) {
                            task();
                        }
                    } catch (const std::exception& e) {
                        std::cerr << "Task threw an exception: " << e.what() << std::endl;
                    }
                }
            }
        );
    }
}

TaskSystemParallelThreadPoolSpinning::~TaskSystemParallelThreadPoolSpinning() 
{
    {
        std::unique_lock<std::mutex> lock(this->queue_mutex);
        this->stop = true;
    }

    for(auto& worker : this->workers)
    {
        worker.join();
    }
}

template<typename F, typename ... Args>
auto TaskSystemParallelThreadPoolSpinning::enqueue(F&& f, Args&& ... args)
-> std::future<typename std::result_of<typename std::decay<F>::type(typename std::decay<Args>::type...)>::type>
{
    // std::cout << __PRETTY_FUNCTION__ << std::endl;
    using return_type = typename std::result_of<F(Args...)>::type;

    // ----------------Using std::packaged_task Implementation----------------------
    auto task = std::make_shared< std::packaged_task<return_type(void)>>(
        std::bind(std::forward<F>(f), std::forward<Args>(args)...)
    );

    std::future<return_type> res = task->get_future();
    // -----------------------------------------------------------------------------

    {
        std::unique_lock<std::mutex> lock(this->queue_mutex);
        if(this->stop) throw std::runtime_error("enqueue on stopped thread pool");

        if(task)
        {
            this->task_queue.emplace(
                [task]()
                {
                    (*task)();
                }
            );
        }

    }

    return res;
}

void run_task(IRunnable* runnable, int start, int end, int num_total_tasks)
{
    for(int i = start; i < end; i ++)
    {
        runnable->runTask(i, num_total_tasks);
    }
}

// void TaskSystemParallelThreadPoolSpinning::run(IRunnable* runnable, int num_total_tasks) {


//     //
//     // TODO: CS149 students will modify the implementation of this
//     // method in Part A.  The implementation provided below runs all
//     // tasks sequentially on the calling thread.
//     //

//     // for (int i = 0; i < num_total_tasks; i++) {
//     //     runnable->runTask(i, num_total_tasks);
//     // }

//     // Static Assignment 
//     int num_tasks_per_thread = num_total_tasks / num_threads;
//     int idx = 0;
//     std::vector<bool> complete(num_threads, false);
//     std::vector<std::future<void>> futures;
//     while(idx < this->num_threads)
//     {
//         int start = idx * num_tasks_per_thread;
//         int end = (idx == this->num_threads - 1) ? num_total_tasks : (idx + 1) * num_tasks_per_thread;

//         {
//             std::unique_lock<std::mutex> lock(this->queue_mutex);
//             enqueue(
//                 [&runnable, start, end, idx, &num_total_tasks, &complete]() -> void
//                 {
//                     for(int i = start; i < end; i ++)
//                     {
//                         runnable->runTask(i, num_total_tasks);
//                     }
//                     complete[idx] = true;
//                 }
//             );
//         }
//         // {
//         //     // std::unique_lock<std::mutex> lock(this->queue_mutex);
//         //     enqueue(run_task, runnable, start, end, num_total_tasks);
//         // }

//         idx ++;
//     }

//     bool done = false;
//     std::vector<bool> complete_copy(num_threads, true);
//     while(true)
//     {
//         if(complete == complete_copy)
//         {
//             done = true;
//             break;
//         }
//     }
//     if(done)
//     {
//         {
//             std::unique_lock<std::mutex> lock(this->queue_mutex);
//             this->stop = true;
//         }
//     }

// }

void TaskSystemParallelThreadPoolSpinning::run(IRunnable* runnable, int num_total_tasks) {
    int num_tasks_per_thread = num_total_tasks / num_threads;
    std::vector<std::future<void>> futures;

    for (int idx = 0; idx < num_threads; ++idx) {
        int start = idx * num_tasks_per_thread;
        int end = (idx == num_threads - 1) ? num_total_tasks : (idx + 1) * num_tasks_per_thread;

        futures.emplace_back(enqueue([runnable, start, end, num_total_tasks]() {
            for (int i = start; i < end; i++) {
                runnable->runTask(i, num_total_tasks);
            }
        }));
    }

    for (auto& future : futures) {
        future.get();
    }
}


TaskID TaskSystemParallelThreadPoolSpinning::runAsyncWithDeps(IRunnable* runnable, int num_total_tasks,
                                                              const std::vector<TaskID>& deps) {
    // You do not need to implement this method.
    return 0;
}

void TaskSystemParallelThreadPoolSpinning::sync() {
    // You do not need to implement this method.
    return;
}

/*
 * ================================================================
 * Parallel Thread Pool Sleeping Task System Implementation
 * ================================================================
 */

const char* TaskSystemParallelThreadPoolSleeping::name() {
    return "Parallel + Thread Pool + Sleep";
}

TaskSystemParallelThreadPoolSleeping::TaskSystemParallelThreadPoolSleeping(int num_threads): ITaskSystem(num_threads) {
    //
    // TODO: CS149 student implementations may decide to perform setup
    // operations (such as thread pool construction) here.
    // Implementations are free to add new class member variables
    // (requiring changes to tasksys.h).
    //

    this->num_threads = num_threads;
    this->stop = false;

    for(int i = 0; i < num_threads; i ++)
    {
        this->workers.emplace_back(
            [this]() -> void
            {
                while(true)
                {
                    std::function<void()> task;

                    {
                        std::unique_lock<std::mutex> lock(this->queue_mutex);
                        this->condition.wait(lock,
                        [this]() -> bool
                        {
                            return this->stop || !this->task_queue.empty();
                        }
                        );
                        if(this->stop && this->task_queue.empty() == true) return;
                        task = std::move(this->task_queue.front());
                        this->task_queue.pop();
                    }

                    task();
                }
            }
        );
    }
}

TaskSystemParallelThreadPoolSleeping::~TaskSystemParallelThreadPoolSleeping() {
    //
    // TODO: CS149 student implementations may decide to perform cleanup
    // operations (such as thread pool shutdown construction) here.
    // Implementations are free to add new class member variables
    // (requiring changes to tasksys.h).
    //
    {
        std::unique_lock<std::mutex> lock(this->queue_mutex);
        this->stop = true;
    }
    this->condition.notify_all();
    for(auto& worker : this->workers)
    {
        worker.join();
    }
}

template<typename F, typename ... Args>
auto TaskSystemParallelThreadPoolSleeping::enqueue(F&& f, Args&& ... args)
-> std::future<typename std::result_of<typename std::decay<F>::type(typename std::decay<Args>::type...)>::type>
{
    using return_type = typename std::result_of<
    typename std::decay<F>::type(typename std::decay<Args>::type ...)>::type ;

    auto task = std::make_shared<std::packaged_task<return_type(void)>>(
        std::bind(std::forward<F>(f), std::forward<Args>(args)...));

    std::future<return_type> res = task->get_future();

    {
        std::unique_lock<std::mutex> lock(this->queue_mutex);
        if(this->stop) throw std::runtime_error("enqueue on stopped thread pool");

        if(task)
        {
            this->task_queue.emplace(
                [task]()
                {
                    (*task)();
                }
            );
        }
    }
    this->condition.notify_one();
    
    return res;
}

void TaskSystemParallelThreadPoolSleeping::run(IRunnable* runnable, int num_total_tasks) {


    std::vector<std::future<void>> futures;

    for(int i = 0; i < this->num_threads; i ++)
    {
        int start = i * num_total_tasks / this->num_threads;
        int end = (i == this->num_threads - 1) ? num_total_tasks : (i + 1) * num_total_tasks / this->num_threads;
        futures.push_back(
            enqueue([&runnable, start, end, &num_total_tasks]()
            {
                for(int i = start; i < end; i ++)
                {
                    runnable->runTask(i, num_total_tasks);
                }
            })
        );
    }

    for(auto& future : futures)
    {
        future.get();
    }
}

void TaskSystemParallelThreadPoolSleeping::check_waiting_queue()
{
    std::unique_lock<std::mutex> lock(this->waiting_queue_mutex);
    for (auto it = this->waiting_queue.begin(); it != this->waiting_queue.end();)
    {
        auto id = it->first;
        auto task_info = it->second;

        bool can_run = true;
        if (id != 0) {
            auto& depend = task_info.dependency;
            std::unique_lock<std::mutex> completed_lock(this->completed_mutex);
            for (auto& dep : depend) {
                can_run = can_run && completed[dep];
            }
        }

        if (can_run) {
            std::unique_lock<std::mutex> future_lock(this->futures_mutex);
            futures.push_back(
            enqueue([this, id, task_info]() {
                std::unique_lock<std::mutex> lock(this->completed_mutex);
                completed[id] = false;
                lock.unlock(); 
                
                for (int i = 0; i < task_info.num_total_tasks; i++)
                    task_info.runnable->runTask(i, task_info.num_total_tasks);

                lock.lock(); 
                completed[id] = true;
                lock.unlock();

                check_waiting_queue();
            }));
            future_lock.unlock();


            it = this->waiting_queue.erase(it);
        } else {
            ++it; 
        }
    }
}

TaskID TaskSystemParallelThreadPoolSleeping::runAsyncWithDeps(IRunnable* runnable, int num_total_tasks,
                                                    const std::vector<TaskID>& deps) {

    auto task = TaskInfo(this->id, runnable, num_total_tasks, deps);

    {
        std::unique_lock<std::mutex> lock(this->waiting_queue_mutex);
        waiting_queue.push_back(
                std::make_pair<TaskID, TaskInfo>(
                    TaskID(this->id), std::move(task)
            )
        );
    }

    return TaskID(++this->id);
}

void TaskSystemParallelThreadPoolSleeping::sync() {

    {
        std::unique_lock<std::mutex> lock(this->futures_mutex);
        for(auto& future : futures)
        {
            future.get();
        }
    }

    // chech waiting queue and until all task is done
    {
        std::unique_lock<std::mutex> lock(this->waiting_queue_mutex);
        while(!waiting_queue.empty())
        {
            lock.unlock();
            check_waiting_queue();
        }
    }

    // once again manage futures
    {
        std::unique_lock<std::mutex> lock(this->futures_mutex);
        for(auto& future : futures)
        {
            future.get();
        }
    }
}
