// Copyright (C) Kumo inc. and its affiliates.
// Author: Jeff.li lijippy@163.com
// All rights reserved.
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published
// by the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program.  If not, see <https://www.gnu.org/licenses/>.
//

#include <nebula/future/future.h>

#include <algorithm>
#include <atomic>
#include <chrono>
#include <condition_variable>
#include <mutex>
#include <numeric>

#include <turbo/base/checked_cast.h>
#include <nebula/version.h>
#include <turbo/log/logging.h>
#include <nebula/future/thread_pool.h>
#include <nebula/trace/tracing_internal.h>

namespace nebula {


    class ConcreteFutureImpl : public FutureImpl {
    public:
        void do_mark_finished() { DoMarkFinishedOrFailed(FutureState::SUCCESS); }

        void DoMarkFailed() { DoMarkFinishedOrFailed(FutureState::FAILURE); }

        void CheckOptions(const CallbackOptions &opts) {
            if (opts.should_schedule != ShouldSchedule::Never) {
                        DKCHECK_NE(opts.executor, nullptr)
                        << "An executor must be specified when adding a callback that might schedule";
            }
        }

        void add_callback(Callback callback, CallbackOptions opts) {
            CheckOptions(opts);
            std::unique_lock<std::mutex> lock(mutex_);
#ifdef NEBULA_WITH_OPENTELEMETRY
            callback = [func = std::move(callback),
                        active_span = ::nebula::internal::tracing::GetTracer()->GetCurrentSpan()](
                           const FutureImpl& impl) mutable {
              auto scope = ::nebula::internal::tracing::GetTracer()->WithActiveSpan(active_span);
              std::move(func)(impl);
            };
#endif
            CallbackRecord callback_record{std::move(callback), opts};
            if (IsFutureFinished(state_)) {
                lock.unlock();
                RunOrScheduleCallback(shared_from_this(), std::move(callback_record),
                        /*in_add_callback=*/true);
            } else {
                callbacks_.push_back(std::move(callback_record));
            }
        }

        bool try_add_callback(const std::function<Callback()> &callback_factory,
                            CallbackOptions opts) {
            CheckOptions(opts);
            std::unique_lock<std::mutex> lock(mutex_);
            if (IsFutureFinished(state_)) {
                return false;
            } else {
                callbacks_.push_back({callback_factory(), opts});
                return true;
            }
        }

        static bool ShouldScheduleCallback(const CallbackRecord &callback_record,
                                           bool in_add_callback) {
            switch (callback_record.options.should_schedule) {
                case ShouldSchedule::Never:
                    return false;
                case ShouldSchedule::Always:
                    return true;
                case ShouldSchedule::IfUnfinished:
                    return !in_add_callback;
                case ShouldSchedule::IfDifferentExecutor:
                    return !(callback_record.options.executor->is_current_executor());
                default:
                            DKCHECK(false) << "Unrecognized ShouldSchedule option";
                    return false;
            }
        }

        static void RunOrScheduleCallback(const std::shared_ptr<FutureImpl> &self,
                                          CallbackRecord &&callback_record,
                                          bool in_add_callback) {
            if (ShouldScheduleCallback(callback_record, in_add_callback)) {
                // Need to keep `this` alive until the callback has a chance to be scheduled.
                auto task = [self, callback = std::move(callback_record.callback)]() mutable {
                    return std::move(callback)(*self);
                };
                        KCHECK_OK(callback_record.options.executor->spawn(std::move(task)));
            } else {
                std::move(callback_record.callback)(*self);
            }
        }

        void DoMarkFinishedOrFailed(FutureState state) {
            std::vector<CallbackRecord> callbacks;
            std::shared_ptr<FutureImpl> self;
            {
                std::unique_lock<std::mutex> lock(mutex_);
#ifdef NEBULA_WITH_OPENTELEMETRY
                if (this->span_) {
                  util::tracing::Span& span = *span_;
                  END_SPAN(span);
                }
#endif

                        DKCHECK(!IsFutureFinished(state_)) << "Future already marked finished";
                if (!callbacks_.empty()) {
                    callbacks = std::move(callbacks_);
                    auto self_inner = shared_from_this();
                    self = std::move(self_inner);
                }

                state_ = state;
                // We need to notify while holding the lock.  This notify often triggers
                // waiters to delete the future and it is not safe to delete a cv_ while
                // it is performing a notify_all
                cv_.notify_all();
            }
            if (callbacks.empty()) return;

            // run callbacks, lock not needed since the future is finished by this
            // point so nothing else can modify the callbacks list and it is safe
            // to iterate.
            //
            // In fact, it is important not to hold the locks because the callback
            // may be slow or do its own locking on other resources
            for (auto &callback_record: callbacks) {
                RunOrScheduleCallback(self, std::move(callback_record), /*in_add_callback=*/false);
            }
        }

        void DoWait() {
            std::unique_lock<std::mutex> lock(mutex_);

            cv_.wait(lock, [this] { return IsFutureFinished(state_); });
        }

        bool DoWait(double seconds) {
            std::unique_lock<std::mutex> lock(mutex_);

            cv_.wait_for(lock, std::chrono::duration<double>(seconds),
                         [this] { return IsFutureFinished(state_); });
            return IsFutureFinished(state_);
        }

        std::mutex mutex_;
        std::condition_variable cv_;
    };

    namespace {

        ConcreteFutureImpl *GetConcreteFuture(FutureImpl *future) {
            return turbo::checked_cast<ConcreteFutureImpl *>(future);
        }

    }  // namespace

    std::unique_ptr<FutureImpl> FutureImpl::create() {
        return std::make_unique<ConcreteFutureImpl>();
    }

    std::unique_ptr<FutureImpl> FutureImpl::make_finished(FutureState state) {
        std::unique_ptr<ConcreteFutureImpl> ptr(new ConcreteFutureImpl());
        ptr->state_ = state;
        return ptr;
    }

    FutureImpl::FutureImpl() : state_(FutureState::PENDING) {}

    void FutureImpl::Wait() { GetConcreteFuture(this)->DoWait(); }

    bool FutureImpl::Wait(double seconds) { return GetConcreteFuture(this)->DoWait(seconds); }

    void FutureImpl::mark_finished() { GetConcreteFuture(this)->do_mark_finished(); }

    void FutureImpl::MarkFailed() { GetConcreteFuture(this)->DoMarkFailed(); }

    void FutureImpl::add_callback(Callback callback, CallbackOptions opts) {
        GetConcreteFuture(this)->add_callback(std::move(callback), opts);
    }

    bool FutureImpl::try_add_callback(const std::function<Callback()> &callback_factory,
                                    CallbackOptions opts) {
        return GetConcreteFuture(this)->try_add_callback(callback_factory, opts);
    }

    Future<> AllComplete(const std::vector<Future<>> &futures) {
        struct State {
            explicit State(int64_t n_futures) : mutex(), n_remaining(n_futures) {}

            std::mutex mutex;
            std::atomic<size_t> n_remaining;
        };

        if (futures.empty()) {
            return Future<>::make_finished();
        }

        auto state = std::make_shared<State>(futures.size());
        auto out = Future<>::create();
        for (const auto &future: futures) {
            future.add_callback([state, out](const turbo::Status &status) mutable {
                if (!status.ok()) {
                    std::unique_lock<std::mutex> lock(state->mutex);
                    if (!out.is_finished()) {
                        out.mark_finished(status);
                    }
                    return;
                }
                if (state->n_remaining.fetch_sub(1) != 1) return;
                out.mark_finished();
            });
        }
        return out;
    }

    Future<> AllFinished(const std::vector<Future<>> &futures) {
        return All(futures).Then([](const std::vector<turbo::Result<turbo::EmptyResult>> &results) {
            for (const auto &res: results) {
                if (!res.ok()) {
                    return res.status();
                }
            }
            return turbo::OkStatus();
        });
    }

}  // namespace nebula
