// Copyright (C) Kumo inc. and its affiliates.
// Author: Jeff.li lijippy@163.com
// All rights reserved.
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published
// by the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program.  If not, see <https://www.gnu.org/licenses/>.
//

#include <nebula/future/task_group.h>

#include <atomic>
#include <condition_variable>
#include <cstdint>
#include <mutex>
#include <utility>

#include <turbo/base/checked_cast.h>
#include <nebula/version.h>
#include <turbo/log/logging.h>
#include <nebula/future/thread_pool.h>
#include <turbo/log/logging.h>
namespace nebula::internal {

    namespace {

        ////////////////////////////////////////////////////////////////////////
        // Serial TaskGroup implementation

        class SerialTaskGroup : public TaskGroup {
        public:
            explicit SerialTaskGroup(StopToken stop_token) : stop_token_(std::move(stop_token)) {}

            void AppendReal(turbo::FnOnce<turbo::Status()> task) override {
                        DKCHECK(!finished_);
                if (stop_token_.is_stop_requested()) {
                    status_ &= stop_token_.Poll();
                    return;
                }
                if (status_.ok()) {
                    status_ &= std::move(task)();
                }
            }

            turbo::Status current_status() override { return status_; }

            bool ok() const override { return status_.ok(); }

            turbo::Status finish() override {
                if (!finished_) {
                    finished_ = true;
                }
                KLOG(WARNING)<<status_;
                return status_;
            }

            Future<> FinishAsync() override { return Future<>::make_finished(finish()); }

            int parallelism() override { return 1; }

            StopToken stop_token_;
            turbo::Status status_;
            bool finished_ = false;
        };

        ////////////////////////////////////////////////////////////////////////
        // Threaded TaskGroup implementation

        class ThreadedTaskGroup : public TaskGroup {
        public:
            ThreadedTaskGroup(Executor *executor, StopToken stop_token)
                    : executor_(executor),
                      stop_token_(std::move(stop_token)),
                      nremaining_(0),
                      ok_(true),
                      finished_(false) {}

            ~ThreadedTaskGroup() override {
                // Make sure all pending tasks are finished, so that dangling references
                // to this don't persist.
                TURBO_UNUSED(finish());
            }

            void AppendReal(turbo::FnOnce<turbo::Status()> task) override {
                        DKCHECK(!finished_);
                if (stop_token_.is_stop_requested()) {
                    UpdateStatus(stop_token_.Poll());
                    return;
                }

                // The hot path is unlocked thanks to atomics
                // Only if an error occurs is the lock taken
                if (ok_.load(std::memory_order_acquire)) {
                    nremaining_.fetch_add(1, std::memory_order_acquire);

                    auto self = turbo::checked_pointer_cast<ThreadedTaskGroup>(shared_from_this());

                    auto callable = [self = std::move(self), task = std::move(task),
                            stop_token = stop_token_]() mutable {
                        if (self->ok_.load(std::memory_order_acquire)) {
                            turbo::Status st;
                            if (stop_token.is_stop_requested()) {
                                st = stop_token.Poll();
                            } else {
                                // XXX what about exceptions?
                                st = std::move(task)();
                            }
                            self->UpdateStatus(std::move(st));
                        }
                        self->OneTaskDone();
                    };
                    UpdateStatus(executor_->spawn(std::move(callable)));
                }
            }

            turbo::Status current_status() override {
                std::lock_guard<std::mutex> lock(mutex_);
                return status_;
            }

            bool ok() const override { return ok_.load(); }

            turbo::Status finish() override {
                std::unique_lock<std::mutex> lock(mutex_);
                if (!finished_) {
                  cv_.wait(lock, [&]() { return nremaining_.load() == 0; });
                  // Current tasks may start other tasks, so only set this when done
                  finished_ = true;
                }
                return status_;
            }

            Future<> FinishAsync() override {
                std::lock_guard<std::mutex> lock(mutex_);
                if (!completion_future_.has_value()) {
                    if (nremaining_.load() == 0) {
                        completion_future_ = Future<>::make_finished(status_);
                    } else {
                        completion_future_ = Future<>::create();
                    }
                }
                return *completion_future_;
            }

            int parallelism() override { return executor_->get_capacity(); }

        protected:
            void UpdateStatus(turbo::Status &&st) {
                // Must be called unlocked, only locks on error
                if (TURBO_UNLIKELY(!st.ok())) {
                    std::lock_guard<std::mutex> lock(mutex_);
                    ok_.store(false, std::memory_order_release);
                    status_ &= std::move(st);
                }
            }

            void OneTaskDone() {
                // Can be called unlocked thanks to atomics
                auto nremaining = nremaining_.fetch_sub(1, std::memory_order_release) - 1;
                        DKCHECK_GE(nremaining, 0);
                if (nremaining == 0) {
                    // Take the lock so that ~ThreadedTaskGroup cannot destroy cv
                    // before cv.notify_one() has returned
                    std::unique_lock<std::mutex> lock(mutex_);
                    cv_.notify_one();
                    if (completion_future_.has_value()) {
                        // mark_finished could be slow.  We don't want to call it while we are holding
                        // the lock.
                        auto &future = *completion_future_;
                        const auto finished = completion_future_->is_finished();
                        const auto &status = status_;
                        // This will be redundant if the user calls finish and not FinishAsync
                        if (!finished && !finished_) {
                            finished_ = true;
                            lock.unlock();
                            future.mark_finished(status);
                        } else {
                            lock.unlock();
                        }
                    }
                }
            }

            // These members are usable unlocked
            Executor *executor_;
            StopToken stop_token_;
            std::atomic<int32_t> nremaining_;
            std::atomic<bool> ok_;
            std::atomic<bool> finished_;

            // These members use locking
            std::mutex mutex_;
            std::condition_variable cv_;
            turbo::Status status_;
            std::optional<Future<>> completion_future_;
        };

    }  // namespace

    std::shared_ptr<TaskGroup> TaskGroup::MakeSerial(StopToken stop_token) {
        return std::shared_ptr<TaskGroup>(new SerialTaskGroup{stop_token});
    }

    std::shared_ptr<TaskGroup> TaskGroup::MakeThreaded(Executor *thread_pool,
                                                       StopToken stop_token) {
        return std::shared_ptr<TaskGroup>(new ThreadedTaskGroup{thread_pool, stop_token});
    }

}  // namespace nebula::internal
