// Copyright (C) Kumo inc. and its affiliates.
// Author: Jeff.li lijippy@163.com
// All rights reserved.
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published
// by the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program.  If not, see <https://www.gnu.org/licenses/>.
//

#pragma once

#include <cstdint>
#include <memory>
#include <queue>
#include <type_traits>
#include <unordered_set>
#include <utility>


#include <turbo/utility/status.h>
#include <nebula/future/cancel.h>
#include <nebula/future/executor.h>
#include <nebula/future/serial_executor.h>
#include <nebula/version.h>
#include <turbo/functional/functional.h>
#include <turbo/functional/iterator.h>
#include <turbo/base/macros.h>

#if defined(_MSC_VER)
// Disable harmless warning for decorated name length limit
#pragma warning(disable : 4503)
#endif

namespace nebula {

    /// \brief Get the capacity of the global thread pool
    ///
    /// Return the number of worker threads in the thread pool to which
    /// Nebula dispatches various CPU-bound tasks.  This is an ideal number,
    /// not necessarily the exact number of threads at a given point in time.
    ///
    /// You can change this number using set_cpu_thread_pool_capacity().
    TURBO_EXPORT int get_cpu_thread_pool_capacity();

    /// \brief Set the capacity of the global thread pool
    ///
    /// Set the number of worker threads int the thread pool to which
    /// Nebula dispatches various CPU-bound tasks.
    ///
    /// The current number is returned by get_cpu_thread_pool_capacity().
    TURBO_EXPORT turbo::Status set_cpu_thread_pool_capacity(int threads);

    namespace internal {


        /// An Executor implementation spawning tasks in FIFO manner on a fixed-size
        /// pool of worker threads.
        ///
        /// Note: Any sort of nested parallelism will deadlock this executor.  Blocking waits are
        /// fine but if one task needs to wait for another task it must be expressed as an
        /// asynchronous continuation.
        class TURBO_EXPORT ThreadPool : public Executor {
        public:
            // Construct a thread pool with the given number of worker threads
            static turbo::Result<std::shared_ptr<ThreadPool>> create(int threads);

            // Like create(), but takes care that the returned ThreadPool is compatible
            // with destruction late at process exit.
            static turbo::Result<std::shared_ptr<ThreadPool>> create_internal(int threads);

            // Destroy thread pool; the pool will first be shut down
            ~ThreadPool() override;

            // Return the desired number of worker threads.
            // The actual number of workers may lag a bit before being adjusted to
            // match this value.
            int get_capacity() override;

            // Return the number of tasks either running or in the queue.
            int GetNumTasks();

            bool owns_this_thread() override;

            // Dynamically change the number of worker threads.
            //
            // This function always returns immediately.
            // If fewer threads are running than this number, new threads are spawned
            // on-demand when needed for task execution.
            // If more threads are running than this number, excess threads are reaped
            // as soon as possible.
            turbo::Status set_capacity(int threads);

            // Heuristic for the default capacity of a thread pool for CPU-bound tasks.
            // This is exposed as a static method to help with testing.
            static int default_capacity();

            // shutdown the pool.  Once the pool starts shutting down, new tasks
            // cannot be submitted anymore.
            // If "wait" is true, shutdown waits for all pending tasks to be finished.
            // If "wait" is false, workers are stopped as soon as currently executing
            // tasks are finished.
            turbo::Status shutdown(bool wait = true);

            // Wait for the thread pool to become idle
            //
            // This is useful for sequencing tests
            void wait_for_idle();

            void keep_alive(std::shared_ptr<Executor::Resource> resource) override;

            struct State;

        protected:
            FRIEND_TEST(TestThreadPool, set_capacity);

            FRIEND_TEST(TestGlobalThreadPool, Capacity);

            friend ThreadPool *get_cpu_thread_pool();

            ThreadPool();

            turbo::Status spawn_real(TaskHints hints, turbo::FnOnce<void()> task, StopToken,
                                    StopCallback &&) override;

            // Collect finished worker threads, making sure the OS threads have exited
            void collect_finished_workers_unlocked();

            // Launch a given number of additional workers
            void launch_workers_unlocked(int threads);

            // Get the current actual capacity
            int GetActualCapacity();

            static std::shared_ptr<ThreadPool> make_cpu_thread_pool();

            std::shared_ptr<State> sp_state_;
            State *state_;
            bool shutdown_on_destroy_;
        };

        // Return the process-global thread pool for CPU-bound tasks.
        TURBO_EXPORT ThreadPool *get_cpu_thread_pool();

        /// \brief Potentially run an async operation serially (if use_threads is false)
        /// \see RunSerially
        ///
        /// If `use_threads` is true, the global CPU executor is used.
        /// If `use_threads` is false, a temporary SerialExecutor is used.
        /// `get_future` is called (from this thread) with the chosen executor and must
        /// return a future that will eventually finish. This function returns once the
        /// future has finished.
        template<typename Fut, typename VT = typename Fut::value_type>
        typename Fut::SyncType run_synchronously(turbo::FnOnce<Fut(Executor *)> get_future,
                                                bool use_threads) {
            if (use_threads) {
                auto fut = std::move(get_future)(get_cpu_thread_pool());
                return FutureToSync(fut);
            } else {
                return SerialExecutor::RunInSerialExecutor<VT>(std::move(get_future));
            }
        }

        /// \brief Potentially iterate an async generator serially (if use_threads is false)
        /// \see IterateGenerator
        ///
        /// If `use_threads` is true, the global CPU executor will be used.  Each call to
        ///   the iterator will simply wait until the next item is available.  Tasks may run in
        ///   the background between calls.
        ///
        /// If `use_threads` is false, the calling thread only will be used.  Each call to
        ///   the iterator will use the calling thread to do enough work to generate one item.
        ///   Tasks will be left in a queue until the next call and no work will be done between
        ///   calls.
        template<typename T>
        turbo::Iterator<T> iterate_synchronously(
                turbo::FnOnce<turbo::Result<std::function<Future<T>()>>(Executor *)> get_gen, bool use_threads) {
            if (use_threads) {
                auto maybe_gen = std::move(get_gen)(get_cpu_thread_pool());
                if (!maybe_gen.ok()) {
                    return turbo::make_error_iterator<T>(maybe_gen.status());
                }
                return create_generator_iterator(*maybe_gen);
            } else {
                return SerialExecutor::IterateGenerator(std::move(get_gen));
            }
        }

    }  // namespace internal
}  // namespace nebula
