// Copyright (C) Kumo inc. and its affiliates.
// Author: Jeff.li lijippy@163.com
// All rights reserved.
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published
// by the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program.  If not, see <https://www.gnu.org/licenses/>.
//

#include <nebula/future/thread_pool.h>
#include <algorithm>
#include <condition_variable>
#include <deque>
#include <list>
#include <mutex>
#include <string>
#include <thread>
#include <vector>
#include <turbo/bootstrap/atfork.h>
#include <nebula/version.h>
#include <turbo/files/io_util.h>
#include <turbo/log/logging.h>
#include <nebula/future/mutex.h>
#include <turbo/utility/environment.h>
#include <nebula/trace/tracing_internal.h>

namespace nebula {
    namespace internal {

        struct ThreadPool::State {
            State() = default;

            // NOTE: in case locking becomes too expensive, we can investigate lock-free FIFOs
            // such as https://github.com/cameron314/concurrentqueue

            std::mutex mutex_;
            std::condition_variable cv_;
            std::condition_variable cv_shutdown_;
            std::condition_variable cv_idle_;

            std::list<std::thread> workers_;
            // Trashcan for finished threads
            std::vector<std::thread> finished_workers_;
            std::deque<Task> pending_tasks_;

            // Desired number of threads
            int desired_capacity_ = 0;

            // Total number of tasks that are either queued or running
            int tasks_queued_or_running_ = 0;

            // Are we shutting down?
            bool please_shutdown_ = false;
            bool quick_shutdown_ = false;

            std::vector<std::shared_ptr<Resource>> kept_alive_resources_;

            // At-fork machinery

            void BeforeFork() { mutex_.lock(); }

            void ParentAfterFork() { mutex_.unlock(); }

            void ChildAfterFork() {
                int desired_capacity = desired_capacity_;
                bool please_shutdown = please_shutdown_;
                bool quick_shutdown = quick_shutdown_;
                new(this) State;  // force-reinitialize, including synchronization primitives
                desired_capacity_ = desired_capacity;
                please_shutdown_ = please_shutdown;
                quick_shutdown_ = quick_shutdown;
            }

            std::shared_ptr<turbo::AtForkHandler> atfork_handler_;
        };

        // The worker loop is an independent function so that it can keep running
        // after the ThreadPool is destroyed.
        static void worker_loop(std::shared_ptr<ThreadPool::State> state,
                               std::list<std::thread>::iterator it) {
            std::unique_lock<std::mutex> lock(state->mutex_);

            // Since we hold the lock, `it` now points to the correct thread object
            // (launch_workers_unlocked has exited)
                    DKCHECK_EQ(std::this_thread::get_id(), it->get_id());

            // If too many threads, we should secede from the pool
            const auto should_secede = [&]() -> bool {
                return state->workers_.size() > static_cast<size_t>(state->desired_capacity_);
            };

            while (true) {
                // By the time this thread is started, some tasks may have been pushed
                // or shutdown could even have been requested.  So we only wait on the
                // condition variable at the end of the loop.

                // execute pending tasks if any
                while (!state->pending_tasks_.empty() && !state->quick_shutdown_) {
                    // We check this opportunistically at each loop iteration since
                    // it releases the lock below.
                    if (should_secede()) {
                        break;
                    }

                            DKCHECK_GE(state->tasks_queued_or_running_, 0);
                    {
                        Task task = std::move(state->pending_tasks_.front());
                        state->pending_tasks_.pop_front();
                        StopToken *stop_token = &task.stop_token;
                        lock.unlock();
                        if (!stop_token->is_stop_requested()) {
                            std::move(task.callable)();
                        } else {
                            if (task.stop_callback) {
                                std::move(task.stop_callback)(stop_token->Poll());
                            }
                        }
                        TURBO_UNUSED(std::move(task));  // release resources before waiting for lock
                        lock.lock();
                    }
                    if (TURBO_UNLIKELY(--state->tasks_queued_or_running_ == 0)) {
                        state->cv_idle_.notify_all();
                    }
                }
                // Now either the queue is empty *or* a quick shutdown was requested
                if (state->please_shutdown_ || should_secede()) {
                    break;
                }
                // Wait for next wakeup
                state->cv_.wait(lock);
            }
                    DKCHECK_GE(state->tasks_queued_or_running_, 0);

            // We're done.  Move our thread object to the trashcan of finished
            // workers.  This has two motivations:
            // 1) the thread object doesn't get destroyed before this function finishes
            //    (but we could call thread::detach() instead)
            // 2) we can explicitly join() the trashcan threads to make sure all OS threads
            //    are exited before the ThreadPool is destroyed.  Otherwise subtle
            //    timing conditions can lead to false positives with Valgrind.
                    DKCHECK_EQ(std::this_thread::get_id(), it->get_id());
            state->finished_workers_.push_back(std::move(*it));
            state->workers_.erase(it);
            if (state->please_shutdown_) {
                // Notify the function waiting in shutdown().
                state->cv_shutdown_.notify_one();
            }
        }

        void ThreadPool::wait_for_idle() {
            std::unique_lock<std::mutex> lk(state_->mutex_);
            state_->cv_idle_.wait(lk, [this] { return state_->tasks_queued_or_running_ == 0; });
        }

        ThreadPool::ThreadPool()
                : sp_state_(std::make_shared<ThreadPool::State>()),
                  state_(sp_state_.get()),
                  shutdown_on_destroy_(true) {
            // Eternal thread pools would produce false leak reports in the vector of
            // atfork handlers.
#if !(defined(_WIN32) || defined(ADDRESS_SANITIZER) || defined(NEBULA_VALGRIND))
            state_->atfork_handler_ = std::make_shared<turbo::AtForkHandler>(
                    /*before=*/
                    [weak_state = std::weak_ptr<ThreadPool::State>(sp_state_)]() {
                        auto state = weak_state.lock();
                        if (state) {
                            state->BeforeFork();
                        }
                        return state;  // passed to after-forkers
                    },
                    /*parent_after=*/
                    [](std::any token) {
                        auto state = std::any_cast<std::shared_ptr<ThreadPool::State>>(token);
                        if (state) {
                            state->ParentAfterFork();
                        }
                    },
                    /*child_after=*/
                    [](std::any token) {
                        auto state = std::any_cast<std::shared_ptr<ThreadPool::State>>(token);
                        if (state) {
                            state->ChildAfterFork();
                        }
                    });
            turbo::register_at_fork(state_->atfork_handler_);
#endif
        }

        ThreadPool::~ThreadPool() {
            if (shutdown_on_destroy_) {
                TURBO_UNUSED(shutdown(false /* wait */));
            }
        }

        turbo::Status ThreadPool::set_capacity(int threads) {
            std::unique_lock<std::mutex> lock(state_->mutex_);
            if (state_->please_shutdown_) {
                return turbo::invalid_argument_error("operation forbidden during or after shutdown");
            }
            if (threads <= 0) {
                return turbo::invalid_argument_error("ThreadPool capacity must be > 0");
            }
            collect_finished_workers_unlocked();

            state_->desired_capacity_ = threads;
            // See if we need to increase or decrease the number of running threads
            const int required = std::min(static_cast<int>(state_->pending_tasks_.size()),
                                          threads - static_cast<int>(state_->workers_.size()));
            if (required > 0) {
                // Some tasks are pending, spawn the number of needed threads immediately
                launch_workers_unlocked(required);
            } else if (required < 0) {
                // Excess threads are running, wake them so that they stop
                state_->cv_.notify_all();
            }
            return turbo::OkStatus();
        }

        int ThreadPool::get_capacity() {
            std::unique_lock<std::mutex> lock(state_->mutex_);
            return state_->desired_capacity_;
        }

        int ThreadPool::GetNumTasks() {
            std::unique_lock<std::mutex> lock(state_->mutex_);
            return state_->tasks_queued_or_running_;
        }

        int ThreadPool::GetActualCapacity() {
            std::unique_lock<std::mutex> lock(state_->mutex_);
            return static_cast<int>(state_->workers_.size());
        }

        turbo::Status ThreadPool::shutdown(bool wait) {
            std::unique_lock<std::mutex> lock(state_->mutex_);

            if (state_->please_shutdown_) {
                return turbo::invalid_argument_error("shutdown() already called");
            }
            state_->please_shutdown_ = true;
            state_->quick_shutdown_ = !wait;
            state_->cv_.notify_all();
            state_->cv_shutdown_.wait(lock, [this] { return state_->workers_.empty(); });
            if (!state_->quick_shutdown_) {
                        DKCHECK_EQ(state_->pending_tasks_.size(), 0);
            } else {
                state_->pending_tasks_.clear();
            }
            collect_finished_workers_unlocked();
            return turbo::OkStatus();
        }

        void ThreadPool::collect_finished_workers_unlocked() {
            for (auto &thread: state_->finished_workers_) {
                // Make sure OS thread has exited
                thread.join();
            }
            state_->finished_workers_.clear();
        }

        thread_local ThreadPool *current_thread_pool_ = nullptr;

        bool ThreadPool::owns_this_thread() { return current_thread_pool_ == this; }

        void ThreadPool::launch_workers_unlocked(int threads) {
            std::shared_ptr<State> state = sp_state_;

            for (int i = 0; i < threads; i++) {
                state_->workers_.emplace_back();
                auto it = --(state_->workers_.end());
                *it = std::thread([this, state, it] {
                    current_thread_pool_ = this;
                    worker_loop(state, it);
                });
            }
        }

        turbo::Status ThreadPool::spawn_real(TaskHints hints, turbo::FnOnce<void()> task, StopToken stop_token,
                                            StopCallback &&stop_callback) {
            {
#ifdef NEBULA_WITH_OPENTELEMETRY
                // Wrap the task to propagate a parent tracing span to it
                // This task-wrapping needs to be done before we grab the mutex because the
                // first call to OT (whatever that happens to be) will attempt to grab this mutex
                // when calling keep_alive to keep the OT infrastructure alive.
                struct {
                  void operator()() {
                    auto scope = ::nebula::internal::tracing::GetTracer()->WithActiveSpan(activeSpan);
                    std::move(func)();
                  }
                  FnOnce<void()> func;
                  opentelemetry::nostd::shared_ptr<opentelemetry::trace::Span> activeSpan;
                } wrapper{std::forward<FnOnce<void()>>(task),
                          ::nebula::internal::tracing::GetTracer()->GetCurrentSpan()};
                task = std::move(wrapper);
#endif
                std::lock_guard<std::mutex> lock(state_->mutex_);
                if (state_->please_shutdown_) {
                    return turbo::invalid_argument_error("operation forbidden during or after shutdown");
                }
                collect_finished_workers_unlocked();
                state_->tasks_queued_or_running_++;
                if (static_cast<int>(state_->workers_.size()) < state_->tasks_queued_or_running_ &&
                    state_->desired_capacity_ > static_cast<int>(state_->workers_.size())) {
                    // We can still spin up more workers so spin up a new worker
                    launch_workers_unlocked(/*threads=*/1);
                }
                state_->pending_tasks_.push_back(
                        {std::move(task), std::move(stop_token), std::move(stop_callback)});
            }
            state_->cv_.notify_one();
            return turbo::OkStatus();
        }

        void ThreadPool::keep_alive(std::shared_ptr<Executor::Resource> resource) {
            // Seems unlikely but we might as well guard against concurrent calls to keep_alive
            std::lock_guard<std::mutex> lk(state_->mutex_);
            state_->kept_alive_resources_.push_back(std::move(resource));
        }

        turbo::Result<std::shared_ptr<ThreadPool>> ThreadPool::create(int threads) {
            auto pool = std::shared_ptr<ThreadPool>(new ThreadPool());
            TURBO_RETURN_NOT_OK(pool->set_capacity(threads));
            return pool;
        }

        turbo::Result<std::shared_ptr<ThreadPool>> ThreadPool::create_internal(int threads) {
            TURBO_MOVE_OR_RAISE(auto pool, create(threads));
            // On Windows, the ThreadPool destructor may be called after non-main threads
            // have been killed by the OS, and hang in a condition variable.
            // On Unix, we want to avoid leak reports by Valgrind.
#ifdef _WIN32
            pool->shutdown_on_destroy_ = false;
#endif
            return pool;
        }

// ----------------------------------------------------------------------
// Global thread pool

        static int ParseOMPEnvVar(const char *name) {
            // OMP_NUM_THREADS is a comma-separated list of positive integers.
            // We are only interested in the first (top-level) number.
            auto result = turbo::get_env_string(name);
            if (!result.ok()) {
                return 0;
            }
            auto str = *std::move(result);
            auto first_comma = str.find_first_of(',');
            if (first_comma != std::string::npos) {
                str = str.substr(0, first_comma);
            }
            try {
                return std::max(0, std::stoi(str));
            } catch (...) {
                return 0;
            }
        }

        int ThreadPool::default_capacity() {
            int capacity, limit;
            capacity = ParseOMPEnvVar("OMP_NUM_THREADS");
            if (capacity == 0) {
                capacity = std::thread::hardware_concurrency();
            }
            limit = ParseOMPEnvVar("OMP_THREAD_LIMIT");
            if (limit > 0) {
                capacity = std::min(limit, capacity);
            }
            if (capacity == 0) {
                KLOG(WARNING) << "Failed to determine the number of available threads, "
                                       "using a hardcoded arbitrary value";
                capacity = 4;
            }
            return capacity;
        }


        // Helper for the singleton pattern
        std::shared_ptr<ThreadPool> ThreadPool::make_cpu_thread_pool() {
            auto maybe_pool = ThreadPool::create_internal(ThreadPool::default_capacity());
            if (!maybe_pool.ok()) {
                maybe_pool.status().abort("Failed to create global CPU thread pool");
            }
            return *std::move(maybe_pool);
        }

        ThreadPool *get_cpu_thread_pool() {
            // Avoid using a global variable because of initialization order issues (ARROW-18383)
            static std::shared_ptr<ThreadPool> singleton = ThreadPool::make_cpu_thread_pool();
            return singleton.get();
        }

    }  // namespace internal

    int get_cpu_thread_pool_capacity() { return internal::get_cpu_thread_pool()->get_capacity(); }

    turbo::Status set_cpu_thread_pool_capacity(int threads) {
        return internal::get_cpu_thread_pool()->set_capacity(threads);
    }

}  // namespace nebula
