// Copyright (C) Kumo inc. and its affiliates.
// Author: Jeff.li lijippy@163.com
// All rights reserved.
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published
// by the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program.  If not, see <https://www.gnu.org/licenses/>.
//

#pragma once

#include <nebula/future/executor.h>
#include <turbo/functional/iterator.h>

namespace nebula::internal {

    /// \brief An executor implementation that runs all tasks on a single thread using an
    /// event loop.
    ///
    /// Note: Any sort of nested parallelism will deadlock this executor.  Blocking waits are
    /// fine but if one task needs to wait for another task it must be expressed as an
    /// asynchronous continuation.
    class TURBO_EXPORT SerialExecutor : public Executor {
    public:
        template<typename T = turbo::EmptyResult>
        using TopLevelTask = turbo::FnOnce<Future<T>(Executor *)>;

        ~SerialExecutor() override;

        int get_capacity() override { return 1; };

        bool owns_this_thread() override;

        turbo::Status spawn_real(TaskHints hints, turbo::FnOnce<void()> task, StopToken,
                                StopCallback &&) override;

        // Return the number of tasks either running or in the queue.
        int GetNumTasks();

        /// \brief Runs the TopLevelTask and any scheduled tasks
        ///
        /// The TopLevelTask (or one of the tasks it schedules) must either return an invalid
        /// status or call the finish signal. Failure to do this will result in a deadlock.  For
        /// this reason it is preferable (if possible) to use the helper methods (below)
        /// run_synchronously/RunSerially which delegates the responsibility onto a Future
        /// producer's existing responsibility to always mark a future finished (which can
        /// someday be aided by ARROW-12207).
        template<typename T = turbo::EmptyResult, typename FT = Future<T>,
                typename FTSync = typename FT::SyncType>
        static FTSync RunInSerialExecutor(TopLevelTask<T> initial_task) {
            Future<T> fut = SerialExecutor().Run<T>(std::move(initial_task));
            return FutureToSync(fut);
        }

        /// \brief Transform an AsyncGenerator into an turbo::Iterator
        ///
        /// An event loop will be created and each call to Next will power the event loop with
        /// the calling thread until the next item is ready to be delivered.
        ///
        /// Note: The iterator's destructor will run until the given generator is fully
        /// exhausted. If you wish to abandon iteration before completion then the correct
        /// approach is to use a stop token to cause the generator to exhaust early.
        template<typename T>
        static turbo::Iterator<T> IterateGenerator(
                turbo::FnOnce<turbo::Result<std::function<Future<T>()>>(Executor *)> initial_task) {
            auto serial_executor = std::unique_ptr<SerialExecutor>(new SerialExecutor());
            auto maybe_generator = std::move(initial_task)(serial_executor.get());
            if (!maybe_generator.ok()) {
                return turbo::make_error_iterator<T>(maybe_generator.status());
            }
            auto generator = maybe_generator.move_value_unsafe();
            struct SerialIterator {
                SerialIterator(std::unique_ptr<SerialExecutor> executor,
                               std::function<Future<T>()> generator)
                        : executor(std::move(executor)), generator(std::move(generator)) {}
                TURBO_DISALLOW_COPY_AND_ASSIGN(SerialIterator);
                TURBO_DEFAULT_MOVE_AND_ASSIGN(SerialIterator);

                ~SerialIterator() {
                    // A serial iterator must be consumed before it can be destroyed.  Allowing it to
                    // do otherwise would lead to resource leakage.  There will likely be deadlocks at
                    // this spot in the future but these will be the result of other bugs and not the
                    // fact that we are forcing consumption here.

                    // If a streaming API needs to support early abandonment then it should be done so
                    // with a cancellation token and not simply discarding the iterator and expecting
                    // the underlying work to clean up correctly.
                    if (executor && !executor->IsFinished()) {
                        while (true) {
                            turbo::Result<T> maybe_next = next();
                            if (!maybe_next.ok() || turbo::is_iteration_end(*maybe_next)) {
                                break;
                            }
                        }
                    }
                }

                turbo::Result<T> next() {
                    executor->Unpause();
                    // This call may lead to tasks being scheduled in the serial executor
                    Future<T> next_fut = generator();
                    next_fut.add_callback([this](const turbo::Result<T> &res) {
                        // If we're done iterating we should drain the rest of the tasks in the executor
                        if (!res.ok() || turbo::is_iteration_end(*res)) {
                            executor->finish();
                            return;
                        }
                        // Otherwise we will break out immediately, leaving the remaining tasks for
                        // the next call.
                        executor->Pause();
                    });
                    // future must run on this thread
                    // Borrow this thread and run tasks until the future is finished
                    executor->RunLoop();
                    if (!next_fut.is_finished()) {
                        // Not clear this is possible since RunLoop wouldn't generally exit
                        // unless we paused/finished which would imply next_fut has been
                        // finished.
                        return turbo::invalid_argument_error(
                                "Serial executor terminated before next result computed");
                    }
                    // At this point we may still have tasks in the executor, that is ok.
                    // We will run those tasks the next time through.
                    return next_fut.result();
                }

                std::unique_ptr<SerialExecutor> executor;
                std::function<Future<T>()> generator;
            };
            return turbo::Iterator<T>(SerialIterator{std::move(serial_executor), std::move(generator)});
        }

    protected:
        virtual void RunLoop();

        // State uses mutex
        struct State;
        std::shared_ptr<State> state_;

        SerialExecutor();

        // We mark the serial executor "finished" when there should be
        // no more tasks scheduled on it.  It's not strictly needed but
        // can help catch bugs where we are trying to use the executor
        // after we are done with it.
        void finish();

        bool IsFinished();

        // We pause the executor when we are running an async generator
        // and we have received an item that we can deliver.
        void Pause();

        void Unpause();

        template<typename T, typename FTSync = typename Future<T>::SyncType>
        Future<T> Run(TopLevelTask<T> initial_task) {
            auto final_fut = std::move(initial_task)(this);
            final_fut.add_callback([this](const FTSync &) { finish(); });
            RunLoop();
            return final_fut;
        }

    };

}  // namespace nebula::internal
