/*
 * Copyright (c) 2025 Huawei Device Co., Ltd.
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#ifndef SMART_SERVE_PLUGIN_LLAMACPP_QWEN_SESSION_H
#define SMART_SERVE_PLUGIN_LLAMACPP_QWEN_SESSION_H

#include <assertion.h>
#include <Utf8Assembler.h>
#include <smart_serve_log.h>

#include <model/adapter.h>
#include <session/adapter.h>
#include <session/creator.h>

#include "./model.h"

namespace OHOS {
namespace llamacpp {

#define TAG "[llamacpp | QwenSessionAdapter] "

class QwenSessionAdapter : public SmartServe::SessionAdapter {
public:
    QwenSessionAdapter(std::string name, const std::vector<SmartServe::ModelAdapter*>& modelAdapters) : name_(name)
    {
        ASSERT(modelAdapters.size() == 1);
        ma_ = static_cast<QwenModelAdapter*>(modelAdapters[0]);

        SmartServe::SessionResConfig rc{.nCPUs = 1};
        if (name == "qwen3-0.6b") {
            rc.nCPUs = 1;
            rc.memSize_ = 10;
        } else {
            SMART_SERVE_LOGE(TAG "unknown session name: %s", name.c_str());
            ASSERT(false);
        }
        resConfigs_.emplace_back(rc);
    }

    const std::string& Name() const override
    {
        return name_;
    }

    const std::vector<SmartServe::SessionResConfig>& InitialResConfigs() const override
    {
        return resConfigs_;
    }

    SmartServe::SessionApplyResult ApplyResConfig(const SmartServe::SessionResConfig& target) override
    {
        SMART_SERVE_LOGD(TAG "%s: ApplyResConfig", name_.c_str());

        // 服务层错误
        if (!resConfigs_[0].isSatisfiedBy(target) || !ma_->model_) {
            return {SmartServe::ApplyResult::FATAL, {}};
        }

        // 已经初始化
        if (ctx_) {
            // 确认是否为暂停
            auto result = SmartServe::ApplyResult::UNCHANGED;
            if (target.nCPUs == 0) {
                result = SmartServe::ApplyResult::SUCCESSFUL;
            }
            return {result, {}};
        }

        // 初始化
        if (!InitCtx() || !InitSampler()) {
            CleanUp();
            return {SmartServe::ApplyResult::FATAL, {}};
        }

        return {SmartServe::ApplyResult::SUCCESSFUL, {}};
    }

    SmartServe::SessionEvalResult Eval(SmartServe::EvalTask* _task) override
    {
        SMART_SERVE_LOGD(TAG "%s: Eval", name_.c_str());
        auto task = static_cast<SmartServe::EvalLLMTask*>(_task);

        // 新任务到来的准备工作
        if (newTask_) {
            if (!Tokenize(task->inputs_)) {
                task->Done(SmartServe::EvalTask::TaskState::ABORTED);
                return EndTask();
            }
        }

        // 推理
        llama_batch batch;

        SmartServe::Utf8Assembler assembler;
        char buf[128];
        for (std::size_t i = 0; i < 3; ++i) {
            // 构造输入
            PrepareBatch(batch);

            // 推理
            if (llama_decode(ctx_, batch)) {
                task->Done(SmartServe::EvalTask::TaskState::ABORTED);
                return EndTask();
            }
            nProcessed_ = tokens_.size();

            // 采样
            auto token = llama_sampler_sample(sampler_, ctx_, -1);
            auto n = llama_token_to_piece(vocab_, token, buf, static_cast<int32_t>(sizeof(buf)), 0, true);
            if (n < 0) {
                task->Done(SmartServe::EvalTask::TaskState::ABORTED);
                return EndTask();
            }
            if (n > 0) {
                std::string piece(buf, buf + n);
                std::string utf8 = assembler.feed(piece);
                if (!utf8.empty()) {
                    // 保存结果
                    task->AppendResult(utf8);
                }
            }
            tokens_.push_back(token);

            // 判断结束
            if (llama_vocab_is_eog(vocab_, token) || tokens_.size() >= cparams_.n_ctx) {
                task->Done(SmartServe::EvalTask::TaskState::FINISHED);
                return EndTask();
            }
        }

        // 准备下一轮
        return ContinueTask();
    }

    void Finalize() override
    {
        SMART_SERVE_LOGD(TAG "%s: Finalize", name_.c_str());

        CleanUp();
    }

private:
    std::string name_;
    std::vector<SmartServe::SessionResConfig> resConfigs_;
    QwenModelAdapter* ma_;

    static constexpr uint32_t N_CTX = 512;
    llama_context_params cparams_;
    llama_context* ctx_ = nullptr;

    llama_sampler* sampler_ = nullptr;
    const llama_vocab* vocab_ = nullptr;

    std::vector<llama_token> tokens_;
    std::size_t nProcessed_ = 0;
    bool newTask_ = true;

    bool InitCtx()
    {
        cparams_ = llama_context_default_params();
        cparams_.n_ctx = N_CTX;

        ctx_ = llama_init_from_model(ma_->model_, cparams_);
        if (!ctx_) {
            SMART_SERVE_LOGE(TAG "%s: fail to init context", name_.c_str());
            return false;
        }

        return true;
    }

    bool InitSampler()
    {
        auto schain_params = llama_sampler_chain_default_params();
        sampler_ = llama_sampler_chain_init(schain_params);
        auto greedy = llama_sampler_init_greedy();

        if (sampler_ && greedy) {
            llama_sampler_chain_add(sampler_, greedy);
            vocab_ = llama_model_get_vocab(ma_->model_);
            return true;
        }

        SMART_SERVE_LOGE(TAG "%s: fail to init sampler", name_.c_str());

        // 清理局部变量
        if (greedy) {
            llama_sampler_free(greedy);
        }
        return false;
    }

    void CleanUp()
    {
        vocab_ = nullptr;

        if (sampler_) {
            llama_sampler_free(sampler_);
            sampler_ = nullptr;
        }

        if (ctx_) {
            llama_free(ctx_);
            ctx_ = nullptr;
        }

        tokens_.clear();
    }

    SmartServe::SessionEvalResult EndTask()
    {
        newTask_ = true;
        return {SmartServe::EvalResult::FINISHED, {}};
    }

    SmartServe::SessionEvalResult ContinueTask()
    {
        newTask_ = false;
        return {SmartServe::EvalResult::CONTINUE, {}};
    }

    bool Tokenize(const std::string& inputs)
    {
        // Tokenize
        std::vector<llama_token> newTokens;
        newTokens.resize(cparams_.n_ctx);

        auto n = llama_tokenize(vocab_,
                                inputs.c_str(),
                                static_cast<int32_t>(inputs.size()),
                                newTokens.data(),
                                static_cast<int32_t>(newTokens.size()),
                                true,
                                true);

        if (n <= 0) {
            return false;
        }

        newTokens.resize(static_cast<std::size_t>(n));

        // 复用 KV Cache
        tokens_.resize(nProcessed_);
        nProcessed_ = 0;
        for (; nProcessed_ < newTokens.size() && nProcessed_ < tokens_.size(); ++nProcessed_) {
            if (newTokens[nProcessed_] != tokens_[nProcessed_]) {
                break;
            }
        }
        nProcessed_ = std::min(nProcessed_, newTokens.size() - 1); // 实际上不应该发生
        llama_memory_seq_keep(llama_get_memory(ctx_), static_cast<llama_seq_id>(nProcessed_));

        // 准备 batch
        tokens_.swap(newTokens);
        return true;
    }

    void PrepareBatch(llama_batch& batch)
    {
        if (nProcessed_ >= tokens_.size()) {
            SMART_SERVE_LOGE(TAG "%s: nProcessed_ reaches the number of all saved tokens", name_.c_str());
            nProcessed_ = tokens_.size() - 1;
        }
        batch = llama_batch_get_one(tokens_.data() + nProcessed_, static_cast<int32_t>(tokens_.size() - nProcessed_));
    }
};

#undef TAG

} // namespace llamacpp
} // namespace OHOS

#endif // SMART_SERVE_PLUGIN_LLAMACPP_QWEN_SESSION_H
