/*
 * Copyright (c) 2025 Huawei Device Co., Ltd.
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#ifndef SMART_SERVE_SERVICES_SERVER_SCHEDULER_SIMPLE_H
#define SMART_SERVE_SERVICES_SERVER_SCHEDULER_SIMPLE_H

#include "scheduler/scheduler.h"
#include "smart_serve_log.h"

#include "model/model.h"
#include "service/service.h"
#include "session/session.h"

namespace OHOS {
namespace SmartServe {

class SimpleScheduler : public Scheduler {
public:
    SimpleScheduler() = default;

protected:
    virtual void ThreadMain()
    {
        while (true) {
            std::unique_lock<std::mutex> lock(mutex_);
            cv_.wait(lock, [this]() { return !events_.empty() || terminating_; });

            if (terminating_) {
                SMART_SERVE_LOGD("[SimpleScheduler] terminating");
                return;
            }

            handleEvents();
            Schedule();
        }
    }

    void handleEvents()
    {
        for (auto& event : events_) {
            switch (event->type_) {
                case EventType::SCHEDULE: {
                    break;
                }
                case EventType::MODEL_APPLIED: {
                    handle(static_cast<ModelAppliedEvent*>(event.get()));
                    break;
                }
                case EventType::MODEL_RELEASE: {
                    handle(static_cast<ModelReleseEvent*>(event.get()));
                    break;
                }
                case EventType::SESSION_APPLIED: {
                    handle(static_cast<SessionAppliedEvent*>(event.get()));
                    break;
                }
                case EventType::SESSION_RELEASE: {
                    handle(static_cast<SessionReleseEvent*>(event.get()));
                    break;
                }
                case EventType::DEBUG: {
                    handle(static_cast<DebugEvent*>(event.get()));
                    break;
                }
                default: {
                    SMART_SERVE_LOGE("Wrong type in maintenances queue: %d", static_cast<int>(event->type_));
                }
            }
        }

        events_.clear();
    }

    void handle(ModelAppliedEvent* event)
    {
        postUpdate(event->preConfig_, event->postConfig_, event->successful_);
    }

    void handle(ModelReleseEvent* event)
    {
        delete event->model_;
    }

    void handle(SessionAppliedEvent* event)
    {
        postUpdate(event->preConfig_, event->postConfig_, event->successful_);
    }

    void handle(SessionReleseEvent* event)
    {
        delete event->session_;
    }

    void handle(DebugEvent* event)
    {
        SMART_SERVE_LOGD("[DebugEvent] %s", event->options_.c_str());
    }

    void Schedule()
    {
        auto service = Service::Instance();
        std::unique_lock<std::mutex> lockMgmt(service->msMgmtMutex_);

        // Model
        std::vector<Model*> unusedModels; // 无 Session 使用的 Model
        for (auto& mItem : service->modelManager_.allModels_) {
            auto model = mItem.second.get();

            if (model->sessions_.empty()) {
                unusedModels.push_back(model);
            }
        }

        // 回收无 Session 使用的 Model
        for (auto model : unusedModels) {
            model->TriggerTerminating();
        }

        // Session
        std::vector<Session*> waitingSessions; // 无 pending、有任务、但资源不满足需求的 Session
        for (auto& sItem : service->sessionManager_.allSessions_) {
            auto session = sItem.second.get();
            std::unique_lock<std::mutex> lockSession(session->mutex_);

            // TODO：回收长时间不用的 Session

            if (session->pending_) { // 不能重复下 pending
                continue;
            }
            // 目前状态：无 pending

            if (session->isRunnable()) {
                // 目前状态：无 pending、可运行

                // 暂停无任务的 Session
                if (session->tasks_.empty()) {
                    Pause(session);
                    continue;
                }
                session->pending_ = true;
                continue;
            }
            // 目前状态：无 pending、不可运行

            // 获取等待中 Session
            if (!session->tasks_.empty()) {
                waitingSessions.push_back(session);
                continue;
            }
            // 目前状态：无 pending、不可运行、无任务
        }

        // TODO: 排序等待中 Session
        SMART_SERVE_LOGD("#waitingSessions = %lu", waitingSessions.size());

        // 拉起等待中 Session
        for (auto& session : waitingSessions) {
            std::unique_lock<std::mutex> lockSession(session->mutex_);
            auto res = GrantResource(session);
            if (res == GrantResult::FAILED_NO_RESOURCE) {
                break;
            }
        }
    }

    void Pause(Session* session) // 要求加 Session 锁
    {
        ASSERT(!session->pending_);

        auto nrc = session->currentResConfig_;
        nrc.nCPUs = 0;
        nrc.nGPUs = 0;
        nrc.nNPUs = 0;

        TriggerPending(session, nrc);
    }

    enum class GrantResult {
        FAILED_MODEL_PENDING,
        FAILED_NO_RESOURCE,
        OK_MODEL,
        OK_SESSION,
    };

    GrantResult GrantResource(Session* session) // 要求加管理锁、Session 锁
    {
        ASSERT(!session->pending_);

        // TODO: 检查现有资源

        // 选择待使用的 Session 资源
        auto src = session->resConfigs_[0];
        auto current = session->currentResConfig_;

        // 计算资源增量
        auto requiredCPUs = src.nCPUs - current.nCPUs;

        // 确认 Session 计算资源开销
        if (devManager_.AvailableNumCPUs() < requiredCPUs) {
            return GrantResult::FAILED_NO_RESOURCE;
        }

        // 确认依赖的 Model 状态
        std::vector<Model*> nonRunnableModels;
        for (auto& model : session->models_) {
            // Model pending 未完成不可拉起 Session
            if (model->pending_) {
                return GrantResult::FAILED_MODEL_PENDING;
            }

            if (!model->isRunnable()) {
                nonRunnableModels.push_back(model);
            }
        }

        // 统计内存资源开销
        auto requiredMemSize_ = src.memSize_ > current.memSize_ ? src.memSize_ - current.memSize_ : 0;
        for (auto& model : nonRunnableModels) {
            requiredMemSize_ += model->resConfigs_[0].memSize_ - model->currentResConfig_.memSize_;
        }

        if (requiredMemSize_ > devManager_.AvailableMemorySize()) {
            return GrantResult::FAILED_NO_RESOURCE;
        }

        // 先拉起依赖的 Model，才能拉起 Session
        if (nonRunnableModels.empty()) {
            // 分配计算资源
            devManager_.FindAvailableCPUs(src);

            TriggerPending(session, src);
            return GrantResult::OK_SESSION;
        }

        for (auto& model : nonRunnableModels) {
            TriggerPending(model, model->resConfigs_[0]);
        }
        return GrantResult::OK_MODEL;
    }

    enum class DepriveResult {
        FAILED_NO_ALTERNATIVE,
        FAILED_WAIT_SESSION_PENDING,
        OK,
    };

    template<typename ResConfigType>
    auto LocateResConfig(const std::vector<ResConfigType>& resConfigs, const ResConfigType& current)
    {
        auto it = resConfigs.begin();
        while (it != resConfigs.end() && !it->isSatisfiedBy(current)) {
            ++it;
        }
        return it;
    }

    DepriveResult DepriveResource(Model* model) // 要求加管理锁，内部加 Session 锁
    {
        ASSERT(!model->pending_);

        // 寻找下一个低资源配置
        // TODO：策略
        auto it = LocateResConfig(model->resConfigs_, model->currentResConfig_);
        if (it == model->resConfigs_.end() || ++it == model->resConfigs_.end()) {
            return DepriveResult::FAILED_NO_ALTERNATIVE;
        }

        auto& mrc = *it;
        if (!mrc.isSatisfiedBy(model->currentResConfig_)) {
            return DepriveResult::FAILED_NO_ALTERNATIVE;
        }

        // 停止所有使用该 Model 的 Session
        bool hasRunnableSession = false;
        for (auto& session : model->sessions_) {
            std::unique_lock<std::mutex> lockSession(session->mutex_);
            if (session->isRunnable()) {
                hasRunnableSession = true;

                if (!session->pending_) {
                    Pause(session);
                }
            }
        }

        // 仅当所有 Session 均停止时
        if (hasRunnableSession) {
            return DepriveResult::FAILED_WAIT_SESSION_PENDING;
        }

        // 才可以下 Model pending
        TriggerPending(model, *it);

        return DepriveResult::OK;
    }

    void TriggerPending(Model* model, const ModelResConfig& newConfig)
    {
        ASSERT(!model->pending_);
        preUpdate(model->currentResConfig_, newConfig);
        model->TriggerPending(newConfig);
    }

    void TriggerPending(Session* session, const SessionResConfig& newConfig)
    {
        ASSERT(!session->pending_);
        preUpdate(session->currentResConfig_, newConfig);
        session->TriggerPending(newConfig);
    }

    void preUpdate(const ModelResConfig& preConfig, const ModelResConfig& postConfig)
    {
        SMART_SERVE_LOGD("[model before preUpdate] from %u to %u", preConfig.memSize_, postConfig.memSize_);
        auto savedMyMemSize = devManager_.AvailableMemorySize();

        if (postConfig.memSize_ > preConfig.memSize_) { // 给予资源情形
            devManager_.OccupyMemory(postConfig.memSize_ - preConfig.memSize_);
        }

        SMART_SERVE_LOGD(
            "[model after preUpdate] memory = from %lu to %zu", savedMyMemSize, devManager_.AvailableMemorySize());
    }

    void postUpdate(const ModelResConfig& preConfig, const ModelResConfig& postConfig, bool successful)
    {
        SMART_SERVE_LOGD("[model before postUpdate] successful = %s", successful ? "true" : "false");
        auto savedMyMemSize = devManager_.AvailableMemorySize();

        if (postConfig.memSize_ > preConfig.memSize_) { // 给予资源情形
            if (!successful) {
                devManager_.ReleaseMemory(postConfig.memSize_ - preConfig.memSize_);
            }
        } else { // 剥夺资源情形
            if (successful) {
                devManager_.OccupyMemory(preConfig.memSize_ - postConfig.memSize_);
            }
        }

        SMART_SERVE_LOGD(
            "[model after postUpdate]: memory = from %lu to %zu", savedMyMemSize, devManager_.AvailableMemorySize());
    }

    void preUpdate(const SessionResConfig& preConfig, const SessionResConfig& postConfig)
    {
        SMART_SERVE_LOGD("[session before preUpdate] memSize_ from %d to %d", preConfig.memSize_, postConfig.memSize_);
        SMART_SERVE_LOGD("[session before preUpdate] nCPUs from %d to %d", preConfig.nCPUs, postConfig.nCPUs);
        auto savedMyMemSize = devManager_.AvailableMemorySize();
        auto savedNAvailCPUs_ = devManager_.AvailableNumCPUs();

        devManager_.OccupyCPUs(postConfig);

        if (postConfig.memSize_ > preConfig.memSize_) { // 给予资源情形
            devManager_.OccupyMemory(postConfig.memSize_ - preConfig.memSize_);
        }

        SMART_SERVE_LOGD(
            "[session after preUpdate] memory = from %lu to %zu", savedMyMemSize, devManager_.AvailableMemorySize());
        SMART_SERVE_LOGD("[session after preUpdate] available CPUs = from %lu to %lu",
                         savedNAvailCPUs_,
                         devManager_.AvailableNumCPUs());
    }

    void postUpdate(const SessionResConfig& preConfig, const SessionResConfig& postConfig, bool successful)
    {
        SMART_SERVE_LOGD("[session before postUpdate] successful = %s", successful ? "true" : "false");
        auto savedMyMemSize = devManager_.AvailableMemorySize();
        auto savedNAvailCPUs_ = devManager_.AvailableNumCPUs();

        if (successful) {
            devManager_.ReleaseCPUs(preConfig);
            devManager_.OccupyCPUs(postConfig);
        } else {
            devManager_.ReleaseCPUs(postConfig);
            devManager_.OccupyCPUs(preConfig);
        }

        if (postConfig.memSize_ > preConfig.memSize_) { // 给予资源情形
            if (!successful) {
                devManager_.OccupyMemory(postConfig.memSize_ - preConfig.memSize_);
            }
        } else { // 剥夺资源情形
            if (successful) {
                devManager_.OccupyMemory(preConfig.memSize_ - postConfig.memSize_);
            }
        }

        SMART_SERVE_LOGD(
            "[session after postUpdate] memory = from %lu to %zu", savedMyMemSize, devManager_.AvailableMemorySize());
        SMART_SERVE_LOGD("[session after postUpdate] available CPUs = from %lu to %lu",
                         savedNAvailCPUs_,
                         devManager_.AvailableNumCPUs());
    }
};

} // namespace SmartServe
} // namespace OHOS

#endif // SMART_SERVE_SERVICES_SERVER_SCHEDULER_SIMPLE_H
