/*
* Author: Dzlua
* QQ    : 505544956
* time  : 2017/08/06
*/
#pragma once

#include <mutex>
#include <queue>
#include <vector>
#include <atomic>
#include <memory>
#include <thread>
#include <future>
#include <stdexcept>
#include <functional>
#include <condition_variable>

namespace dzlua {

namespace thread {

/*
*   the Priority, used for PriorityPool
*   the high Priority will run first,
*   the low Priority will run last.
*/
enum class Priority : uint8_t {
    normal, high, low
};

/*
*   translate Priority to uint_8
*   it be used by PriorityCompare.
*/
uint8_t ts_priority(Priority &priority) {
    switch (priority) {
        case Priority::normal: return 128;
        case Priority::high: return 255;
        case Priority::low: return 0;
    }
    return 128;
};

/*
*   the normal pool.
*/
class Pool {
public:
    /*
    *   thread pool threads counts.
    */
    explicit Pool(size_t thread_num) : stop_(false) {
        // create and add threads to workers_.
        for (size_t i = 0; i < thread_num; ++i) {
            workers_.emplace_back( [this] {
                    for(;;) {
                        std::function<void()> task;

                        {
                            /*
                            *   lock and wait notify.
                            *   only stop_
                            *   or had task can wake up.
                            */
                            std::unique_lock<std::mutex> lock(this->queue_mutex_);
                            this->condition_.wait(lock, [this] { 
                                    return this->stop_ || !this->tasks_.empty();
                                } );
                            
                            /*
                            *   should stop if no task, otherwise,
                            *   run task untile tasks_ empty.
                            */
                            if(this->stop_ && this->tasks_.empty())
                                return;
                            
                            // get a task.
                            task = std::move(this->tasks_.front());
                            this->tasks_.pop();
                        }

                        // run task.
                        task();
                    }
                } );
        }
    }

    /*
    *   notify all, and join thread.
    */
    ~Pool() {
        // stop and wait quit.
        stop_ = true;
        condition_.notify_all();
        for(std::thread &worker: workers_)
            worker.join();
    }

    /*
    *   post a task to tasks queue, and it will run at right time.
    *   return a future.
    */        
    template<class Fun, class... Args>
    auto PostTask(Fun&& fun, Args&&... args) 
            -> std::future<typename std::result_of<Fun(Args...)>::type> {
        
        /*
        *   make a task.
        */
        using return_type = typename std::result_of<Fun(Args...)>::type;
        auto task = std::make_shared< std::packaged_task<return_type()> >(
                std::bind(std::forward<Fun>(fun), std::forward<Args>(args)...)
            );

        /*
        *   make a future.
        */    
        std::future<return_type> res = task->get_future();
        {
            std::unique_lock<std::mutex> lock(queue_mutex_);

            // don't allow enqueueing after stopping the pool
            if(stop_)
                throw std::runtime_error("PostTask on stopped Pool.");

            // add task to tasks_.
            tasks_.emplace([task]() {
                    (*task)(); 
                } );
        }

        // wake up on thread to run task.
        condition_.notify_one();
        return res;
    }
private:
    // work threads
    std::vector< std::thread > workers_;
    // tasks queue
    std::queue< std::function<void()> > tasks_;
    // lock
    std::mutex queue_mutex_;
    // 
    std::condition_variable condition_;
    // is stop
    std::atomic<bool> stop_;
}; // end class Pool

//-----------------------//
/*
*   you post a task use Priority.
*/
class PriorityPool {
public:
    /*
    *   thread pool threads counts.
    */
    explicit PriorityPool(size_t thread_num) : stop_(false) {
        // create and add threads to workers_.
        for (size_t i = 0; i < thread_num; ++i) {
            // add it to vector.
            workers_.emplace_back( [this] {
                    for(;;) {
                        TaskPtr taskptr = nullptr;

                        {
                            /*
                            *   lock and wait notify.
                            *   only stop_
                            *   or had task can wake up.
                            */
                            std::unique_lock<std::mutex> lock(this->queue_mutex_);
                            this->condition_.wait(lock, [this] { 
                                    return this->stop_ || !this->tasks_.empty();
                                } );
                            
                            /*
                            *   should stop if no task, otherwise,
                            *   run task untile tasks_ empty.
                            */
                            if(this->stop_ && this->tasks_.empty())
                                return;
                            
                            // get a task.
                            taskptr = this->tasks_.top();
                            this->tasks_.pop();
                        }

                        // run task if had.
                        if (taskptr) taskptr->task();
                    }
                } );
        }
    }

    /*
    *   notify all, and join thread.
    */
    ~PriorityPool() {
        stop_ = true;
        condition_.notify_all();
        for(std::thread &worker: workers_)
            worker.join();
    }

    /*
    *   post a task to tasks queue, and it will run at right time.
    *   return a future.
    */        
    template<class Fun, class... Args>
    auto PostTask(Priority priority, Fun&& fun, Args&&... args) 
            -> std::future<typename std::result_of<Fun(Args...)>::type> {
        
        /*
        *   make a task.
        */
        using return_type = typename std::result_of<Fun(Args...)>::type;

        auto task = std::make_shared< std::packaged_task<return_type()> >(
                std::bind(std::forward<Fun>(fun), std::forward<Args>(args)...)
            );
            
        /*
        *   make a future.
        */
        std::future<return_type> res = task->get_future();
        {
            /*
            *   make and init a Task. 
            */
            auto taskptr = std::make_shared<Task>();
            taskptr->priority = priority;
            taskptr->task = [task]() { (*task)(); };

            std::unique_lock<std::mutex> lock(queue_mutex_);

            // don't allow enqueueing after stopping the pool
            if(stop_)
                throw std::runtime_error("PostTask on stopped Pool");

            // add task to tasks_.
            tasks_.emplace(taskptr);
        }

        // wake up on thread to run task.
        condition_.notify_one();
        return res;
    }
private:
    /*
    *   task, use by priority_queue.
    */
    struct Task {
        Priority priority;
        std::function<void()> task;
    };
    typedef std::shared_ptr<Task> TaskPtr;

    /*
    *   for priority_queue, used to sort tasks.
    */
    struct PriorityCompare {
        bool operator()(const TaskPtr &t1, const TaskPtr &t2) {
            return ts_priority(t1->priority) < ts_priority(t2->priority);
        }
    };
    
    // work threads
    std::vector< std::thread > workers_;
    // tasks queue
    std::priority_queue<TaskPtr, std::vector<TaskPtr>, PriorityCompare> tasks_;
    // lock
    std::mutex queue_mutex_;
    // 
    std::condition_variable condition_;
    // is stop
    std::atomic<bool> stop_;
}; // end class Pool

//----------------------//

/*
*   1. When the thread pool is created, min_num empty threads are created.
*   2. When we discharge a task into a thread pool, an empty thread takes
*      over the task and then runs. As we continue to thread pool into the task,
*      process lines in the thread pool and perform tasks over one by one.
*   3. With the continuous increase in the number of tasks, a task time will exceed the limit,
*      then the number of threads is not enough, but the thread pool does not immediately create a new thread,
*      but wait about timeout milliseconds, the purpose of doing so is to see if there are other threads to
*      complete the task to handle this request at this time this can be avoided due to the consumption of
*      creating a new thread. If no thread completes the task during this time, a new thread is created to
*      perform the new task.
*   4. After the number of tasks exceeds the lower limit, the number of threads in the thread pool continues to
*      increase until the number of threads reaches the upper limit until the new task is continuously discharged.
*   5. When the number of threads reaches an upper limit, the task continues to increase,
*      and the number of threads will no longer increase. For example, when you drop 50 tasks into a thread pool,
*      only 20 enter the thread pool (the same as the upper limit), and the other 30 line up outside the thread pool.
*      When a thread in a thread pool completes a task, it does not terminate immediately, but instead continues to 
*      execute a task from the waiting queue, thus reducing the time it takes to create and destroy threads.
*   6. As the task completes, the tasks waiting outside the thread pool are gradually transferred into the thread pool,
*      and the number of tasks decreases gradually, but the number of threads remains constant,
*      always 20 (and the upper limit is the same).
*   7. As the tasks are completed gradually, there is always a time when the number of tasks will be less
*      than the upper limit, when threads in the pool will be freed in 2 minutes and recycle the relevant resources.
*      The number of threads is gradually reduced until the lower limit is reached.
*   8. When the number of tasks is reduced to the lower limit, the number of threads in the thread pool
*      remains the same (always the same as the lower limit), some of which are executing the task
*      and the other part is in the empty running state.
*   9. When all tasks have been completed, the thread pool recovers its initial state and runs min_num empty threads.
*/
class DynamicPool {
public:
    /*
    *   min_num_threads: min num threads, it will create min_num_threads threads when init.
    *   max_num_threads: max num threads, it will not create if threads counts equel to max_num_threads.
    *   milliseconds_create: if not reach to max_num_threads and threads not enough to run a task,
    *           it wait milliseconds_create milli seconds and then create a new thread.
    *   milliseconds_destroy: if some threads are empty and have more then min_num_threads threads,
    *           it will destroy threads after milliseconds_destroy milli seconds(not less then min_num_threads).
    */
    explicit DynamicPool(size_t min_num_threads, size_t max_num_threads
        , size_t milliseconds_create = 500, size_t milliseconds_destroy = 120000)
                : min_num_(min_num_threads), max_num_(max_num_threads)
                , milliseconds_create_(std::chrono::milliseconds(milliseconds_create))
                , milliseconds_destroy_( std::chrono::milliseconds(milliseconds_destroy))
                , stop_(false), release_thread_(false) {
        // init to min_num_ threads.
        this->createThread(min_num_);

        /*
        *   after milliseconds_destroy, release thread if can.
        */
    }
    
    ~DynamicPool() {
        // stop and wait quit.
        stop_ = true;
        condition_.notify_all();
        for(WorkerPtr worker: workers_)
            worker->td.join();
    }

    /*
    *   post a task to tasks queue, and it will run at right time.
    *   return a future.
    */        
    template<class Fun, class... Args>
    auto PostTask(Fun&& fun, Args&&... args) 
        -> std::future<typename std::result_of<Fun(Args...)>::type> {
        
        /*
        *   make a task.
        */
        using return_type = typename std::result_of<Fun(Args...)>::type;
        auto task = std::make_shared< std::packaged_task<return_type()> >(
                std::bind(std::forward<Fun>(fun), std::forward<Args>(args)...)
            );
        
        /*
        *   make a future.
        */
        std::future<return_type> res = task->get_future();
        {

            std::unique_lock<std::mutex> lock(queue_mutex_);

            // don't allow enqueueing after stopping the DynamicPool
            if(stop_)
                throw std::runtime_error("PostTask on stopped DynamicPool.");

            // add task to tasks_.
            tasks_.emplace([task]() {
                    (*task)(); 
                } );
        }

        // wake up on thread to run task.
        condition_.notify_one();

        // after milliseconds_create_, create a thread if can.
       
        return res;
    }
private:
    /*
    *   create thread_num threads
    *   if stop, not do nothing.
    */
    void createThread(size_t thread_num) {
        // if stop, should not add thread.
        if (stop_) return;

        // lock workers_.
        std::lock_guard<std::mutex> lock(worker_mutex_);

        // calculate the count of threads need to create.
        thread_num = workers_.size() + thread_num > max_num_
                ? max_num_ - workers_.size()
                : thread_num;

        // create and add threads to workers_.
        for (size_t i = 0; i < thread_num; ++i) {
            // make and init a worker
            auto worker = std::make_shared<Worker>();
            worker->state = eState::wait;
            worker->td = std::thread([this](Worker *worker) {
                    for(;;) {
                        std::function<void()> task;

                        {
                            /*
                            *   lock and wait notify.
                            *   only stop_
                            *   (release_thread_ and eState::close)
                            *   or had task can wake up.
                            */
                            std::unique_lock<std::mutex> lock(this->task_mutex_);
                            this->condition_.wait(lock, [this, worker] { 
                                    return this->stop_
                                        || !this->tasks_.empty()
                                        || (this->release_thread_
                                            && worker->state == eState::close);
                                } );
                            
                            /*
                            *   for thread release, do not pop task.
                            *   if this thread should close, close.
                            */
                            if (this->release_thread_
                                    && worker->state == eState::close) {
                                return;
                            }

                            /*
                            *   should stop if no task, otherwise,
                            *   run task untile tasks_ empty.
                            */
                            if(this->stop_ && this->tasks_.empty())
                                return;

                            // get a task.
                            task = std::move(this->tasks_.front());
                            this->tasks_.pop();
                        }

                        // mark thread is running.
                        worker->state = eState::run;

                        // run task.
                        task();

                        // mark thread is waitting.
                        worker->state = eState::wait;
                    }
                }, worker.get());

            // add it to vector.
            workers_.emplace_back(worker);
        }
    }

    /*
    *   release threads to min_num_ if can.
    *   scan wait threas, and notify them to exit. 
    */
    void releaseThread() {
        // lock workers_.
        std::lock_guard<std::mutex> lock(worker_mutex_);

        // calculate the count of threads need to release.
        size_t thread_num = 0;
        thread_num = workers_.size() > min_num_
                ? workers_.size() - min_num_
                : 0;
        if (0 == thread_num)
            return;

        /*
        *   release eState::wait thread.
        *   set release_thread_ mod
        *   and set eState::close state.
        */
        size_t count = 0;
        release_thread_ = true;
        for(WorkerPtr worker: workers_) {
            // only release eState::wait thread.
            if (worker->state != eState::wait)
                continue;
            
            // set it will close
            worker->state = eState::close;
            // detach this thread.
            worker->td.detach();

            // only release thread_num if enough.
            if (++count >= thread_num)
                break;
        }
        condition_.notify_all();
    }

private:
    /*
    *   for thread.
    *   wait : thread is wait.
    *   run  : thread is running.
    *   close: thread need to be close.
    */
    enum class eState : uint8_t {
        wait, run, close
    };

    /*
    *   workers
    *   thread and state for task.
    */
    struct Worker {
        std::thread td;
        std::atomic<eState> state;
    };
    typedef std::shared_ptr<Worker> WorkerPtr;
    
    // min number threads
    std::atomic<size_t> min_num_;
    // max number threads
    std::atomic<size_t> max_num_;
    // the time wait to create thread.
    std::chrono::milliseconds milliseconds_create_;
    // the time wait to destroy thread.
    std::chrono::milliseconds milliseconds_destroy_;

    // work threads
    std::vector<WorkerPtr> workers_;
    // lock
    std::mutex worker_mutex_;
    // tasks queue
    std::queue< std::function<void()> > tasks_;
    // lock
    std::mutex task_mutex_;
    // 
    std::condition_variable condition_;
    // is stop
    std::atomic<bool> stop_;
    // is release thread
    std::atomic<bool> release_thread_;

}; // end class DynamicPool

} // end namespace thread

} // end namespace dzlua