/**
 * Copyright (c) Huawei Technologies Co., Ltd. 2023. All rights reserved.
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 * http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
#ifndef __AOE_RUNTIME_KB_COMMON_THREAD_POOL_H__
#define __AOE_RUNTIME_KB_COMMON_THREAD_POOL_H__
#include <cstdint>
#include <thread>
#include <vector>
#include <queue>
#include <mutex>
#include <future>
#include <atomic>
#include <functional>
#include "aoe/runtime_kb/common/kb_common.h"
#include "aoe/runtime_kb/common/utils/system_utils.h"

namespace RuntimeKb {
constexpr int32_t MAX_THREAD_POOL_NUM           = 64;
constexpr int32_t DEFAULT_THREAD_POOL_NUM       = 8;

class ThreadPool
{
public:
    static ThreadPool *GetInstance() {
     static ThreadPool instance;
     return &instance;
   }

    template<typename F, typename... Args>
    auto Submit(F &&func, Args &&...args)->std::future<decltype(func(args...))>
    {
        std::lock_guard<std::mutex> lk(workerMtx_);
        std::function<decltype(func(args...))()> f = std::bind(std::forward<F>(func), std::forward<Args>(args)...);
        // Encapsulate it into a shared pointer in order to be able to copy construct
        auto task = MakeShared<std::packaged_task<decltype(func(args...))()>>(f);
        std::future<decltype(func(args...))> errFuture;
        if (task == nullptr) {
            return errFuture;
        }
        std::future<decltype(func(args...))> future = task->get_future();
        if (idle_ == 0 && running_.load() < size_) {
            running_++;
            workers_.emplace_back([this] {
                PushTask();
            });
        }
        if (this->idle_.load() > 0) {
            this->idle_--;
        }

        {
            std::unique_lock<std::mutex> lock(queTaskMtx_);
            taskQueue_.emplace([task]() { (*task)(); });
        }
        cvRun_.notify_one();
        return future;
    }

    int32_t GetSize() {
        return size_;
    }

private:
    ThreadPool() {};
    ThreadPool(const ThreadPool &) = delete;
    ThreadPool &operator=(const ThreadPool &) = delete;

    void PushTask()
    {
        for (;;) {
            std::function<void()> doTask;
            {
                std::unique_lock<std::mutex> lock(this->queTaskMtx_);
                this->cvRun_.wait(lock,
                    [this] { return this->stop_.load() || !this->taskQueue_.empty(); });
                if (this->stop_.load() && this->taskQueue_.empty()) {
                    return;
                }
                doTask = std::move(this->taskQueue_.front());
                this->taskQueue_.pop();
            }
            doTask();
            this->idle_++;
        }
    }

    void Join()
    {
        for (auto &it : workers_) {
            if (it.joinable()) {
                it.join();
            }
        }
    }

    void Stop()
    {
        if (!stop_.load()) {
            {
                std::unique_lock<std::mutex> lock(this->queTaskMtx_);
                stop_.store(true);
            }
            cvRun_.notify_all();
            Join();
        }
    }

    ~ThreadPool()
    {
        Stop();
    }

    std::atomic<bool> stop_;
    std::atomic<int32_t> running_;
    std::atomic<int32_t> idle_;
    int32_t size_ = std::min(
        SystemUtils::GetCpuCoreNum() > 0 ? SystemUtils::GetCpuCoreNum() : DEFAULT_THREAD_POOL_NUM,
        MAX_THREAD_POOL_NUM);
    std::mutex queTaskMtx_;
    std::mutex workerMtx_;
    std::queue<std::function<void()>> taskQueue_;
    std::vector<std::thread> workers_;
    std::condition_variable cvRun_;
};
} // RuntimeKb
#endif // __AOE_RUNTIME_KB_COMMON_THREAD_POOL_H__