// Copyright (C) Kumo inc. and its affiliates.
// Author: Jeff.li lijippy@163.com
// All rights reserved.
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published
// by the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program.  If not, see <https://www.gnu.org/licenses/>.
//

#pragma once

#include <turbo/base/macros.h>
#include <turbo/functional/functional.h>
#include <nebula/future/cancel.h>
#include <nebula/future/future.h>

namespace nebula::internal {

        // Hints about a task that may be used by an Executor.
        // They are ignored by the provided ThreadPool implementation.
        struct TaskHints {
            // The lower, the more urgent
            int32_t priority = 0;
            // The IO transfer size in bytes
            int64_t io_size = -1;
            // The approximate CPU cost in number of instructions
            int64_t cpu_cost = -1;
            // An application-specific ID
            int64_t external_id = -1;
        };


    class TURBO_EXPORT Executor {
            public:
            using StopCallback = turbo::FnOnce<void(const turbo::Status&)>;

            virtual ~Executor();

            // Spawn a fire-and-forget task.
            template <typename Function>
            turbo::Status spawn(Function&& func) {
                return spawn_real(TaskHints{}, std::forward<Function>(func), StopToken::unstoppable(),
                                 StopCallback{});
            }
            template <typename Function>
            turbo::Status spawn(Function&& func, StopToken stop_token) {
                return spawn_real(TaskHints{}, std::forward<Function>(func), std::move(stop_token),
                                 StopCallback{});
            }
            template <typename Function>
            turbo::Status spawn(TaskHints hints, Function&& func) {
                return spawn_real(hints, std::forward<Function>(func), StopToken::unstoppable(),
                                 StopCallback{});
            }
            template <typename Function>
            turbo::Status spawn(TaskHints hints, Function&& func, StopToken stop_token) {
                return spawn_real(hints, std::forward<Function>(func), std::move(stop_token),
                                 StopCallback{});
            }
            template <typename Function>
            turbo::Status spawn(TaskHints hints, Function&& func, StopToken stop_token,
            StopCallback stop_callback) {
                return spawn_real(hints, std::forward<Function>(func), std::move(stop_token),
                                 std::move(stop_callback));
            }

            // Transfers a future to this executor.  Any continuations added to the
            // returned future will run in this executor.  Otherwise they would run
            // on the same thread that called mark_finished.
            //
            // This is necessary when (for example) an I/O task is completing a future.
            // The continuations of that future should run on the CPU thread pool keeping
            // CPU heavy work off the I/O thread pool.  So the I/O task should transfer
            // the future to the CPU executor before returning.
            //
            // By default this method will only transfer if the future is not already completed.  If
            // the future is already completed then any callback would be run synchronously and so
            // no transfer is typically necessary.  However, in cases where you want to force a
            // transfer (e.g. to help the scheduler break up units of work across multiple cores)
            // then you can override this behavior with `always_transfer`.
            template <typename T>
            Future<T> transfer(Future<T> future) {
                return do_transfer(std::move(future), false);
            }

            // Overload of transfer which will always schedule callbacks on new threads even if the
            // future is finished when the callback is added.
            //
            // This can be useful in cases where you want to ensure parallelism
            template <typename T>
            Future<T> transfer_always(Future<T> future) {
                return do_transfer(std::move(future), true);
            }

            // submit a callable and arguments for execution.  Return a future that
            // will return the callable's result value once.
            // The callable's arguments are copied before execution.
            template <typename Function, typename... Args,
            typename FutureType = typename ::nebula::detail::ContinueFuture::ForSignature<
            Function && (Args && ...)>>
            turbo::Result<FutureType> submit(TaskHints hints, StopToken stop_token, Function&& func,
            Args&&... args) {
                using VT = typename FutureType::value_type;

                auto future = FutureType::create();
                auto task = std::bind(::nebula::detail::ContinueFuture{}, future,
                                      std::forward<Function>(func), std::forward<Args>(args)...);
                struct {
                    WeakFuture<VT> weak_fut;

                    void operator()(const turbo::Status& st) {
                        auto fut = weak_fut.get();
                        if (fut.is_valid()) {
                            fut.mark_finished(st);
                        }
                    }
                } stop_callback{WeakFuture<VT>(future)};
                TURBO_RETURN_NOT_OK(spawn_real(hints, std::move(task), std::move(stop_token),
                                              std::move(stop_callback)));

                return future;
            }

            template <typename Function, typename... Args,
            typename FutureType = typename ::nebula::detail::ContinueFuture::ForSignature<
            Function && (Args && ...)>>
            turbo::Result<FutureType> submit(StopToken stop_token, Function&& func, Args&&... args) {
                return submit(TaskHints{}, stop_token, std::forward<Function>(func),
                              std::forward<Args>(args)...);
            }

            template <typename Function, typename... Args,
            typename FutureType = typename ::nebula::detail::ContinueFuture::ForSignature<
            Function && (Args && ...)>>
            turbo::Result<FutureType> submit(TaskHints hints, Function&& func, Args&&... args) {
                return submit(std::move(hints), StopToken::unstoppable(),
                              std::forward<Function>(func), std::forward<Args>(args)...);
            }

            template <typename Function, typename... Args,
            typename FutureType = typename ::nebula::detail::ContinueFuture::ForSignature<
            Function && (Args && ...)>>
            turbo::Result<FutureType> submit(Function&& func, Args&&... args) {
                return submit(TaskHints{}, StopToken::unstoppable(), std::forward<Function>(func),
                              std::forward<Args>(args)...);
            }

            // Return the level of parallelism (the number of tasks that may be executed
            // concurrently).  This may be an approximate number.
            virtual int get_capacity() = 0;

            // Return true if the thread from which this function is called is owned by this
            // Executor. Returns false if this Executor does not support this property.
            virtual bool owns_this_thread() { return false; }

            // Return true if this is the current executor being called
            // n.b. this defaults to just calling owns_this_thread
            // unless the threadpool is disabled
            virtual bool is_current_executor() { return owns_this_thread(); }

            /// \brief An interface to represent something with a custom destructor
            ///
            /// \see keep_alive
            class TURBO_EXPORT Resource {
                public:
                virtual ~Resource() = default;
            };

            /// \brief Keep a resource alive until all executor threads have terminated
            ///
            /// Executors may have static storage duration.  In particular, the CPU and I/O
            /// executors are currently implemented this way.  These threads may access other
            /// objects with static storage duration such as the OpenTelemetry runtime context
            /// the default memory pool, or other static executors.
            ///
            /// The order in which these objects are destroyed is difficult to control.  In order
            /// to ensure those objects remain alive until all threads have finished those objects
            /// should be wrapped in a Resource object and passed into this method.  The given
            /// shared_ptr will be kept alive until all threads have finished their worker loops.
            virtual void keep_alive(std::shared_ptr<Resource> resource);

            protected:
            TURBO_DISALLOW_COPY_AND_ASSIGN(Executor);

            Executor() = default;

            template <typename T, typename FT = Future<T>, typename FTSync = typename FT::SyncType>
            Future<T> do_transfer(Future<T> future, bool always_transfer = false) {
                auto transferred = Future<T>::create();
                if (always_transfer) {
                    CallbackOptions callback_options = CallbackOptions::defaults();
                    callback_options.should_schedule = ShouldSchedule::Always;
                    callback_options.executor = this;
                    auto sync_callback = [transferred](const FTSync& result) mutable {
                        transferred.mark_finished(result);
                    };
                    future.add_callback(sync_callback, callback_options);
                    return transferred;
                }

                // We could use add_callback's ShouldSchedule::IfUnfinished but we can save a bit of
                // work by doing the test here.
                auto callback = [this, transferred](const FTSync& result) mutable {
                    auto spawn_status =
                            spawn([transferred, result]() mutable { transferred.mark_finished(result); });
                    if (!spawn_status.ok()) {
                        transferred.mark_finished(spawn_status);
                    }
                };
                auto callback_factory = [&callback]() { return callback; };
                if (future.try_add_callback(callback_factory)) {
                    return transferred;
                }
                // If the future is already finished and we aren't going to force spawn a thread
                // then we don't need to add another layer of callback and can return the original
                // future
                return future;
            }

            // Subclassing API
            virtual turbo::Status spawn_real(TaskHints hints, turbo::FnOnce<void()> task, StopToken,
            StopCallback&&) = 0;
    };

    struct Task {
        turbo::FnOnce<void()> callable;
        StopToken stop_token;
        Executor::StopCallback stop_callback;
    };
}  // namespace nebula::internal

