/*
 * Copyright (c) 2025 Huawei Device Co., Ltd.
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#ifndef SMART_SERVE_PLUGIN_LLAMACPP_QWEN_MODEL_H
#define SMART_SERVE_PLUGIN_LLAMACPP_QWEN_MODEL_H

#include <assertion.h>
#include <smart_serve_log.h>

#include <model/adapter.h>

#include <llama.h>

namespace OHOS {
namespace llamacpp {

#define TAG "[llamacpp | QwenModelAdapter] "

class QwenModelAdapter : public SmartServe::ModelAdapter {
public:
    QwenModelAdapter(const std::string& name, const std::string& model_path) : name_(name), model_path_(model_path)
    {
        SmartServe::ModelResConfig rc;
        if (name_ == "qwen3-0.6b") {
            rc.memSize_ = 10; // TODO
        } else {
            SMART_SERVE_LOGE(TAG "unknown model name: %s", name.c_str());
            ASSERT(false);
        }
        resConfigs_.emplace_back(rc);
    }

    const std::string& Name() const override
    {
        return name_;
    }

    const std::vector<SmartServe::ModelResConfig>& InitialResConfigs() const override
    {
        return resConfigs_;
    }

    SmartServe::ModelApplyResult ApplyResConfig(const SmartServe::ModelResConfig& target) override
    {
        SMART_SERVE_LOGD(TAG "%s: ApplyResConfig", name_.c_str());

        // 服务层错误
        if (!resConfigs_[0].isSatisfiedBy(target)) {
            return SmartServe::ModelApplyResult::FATAL;
        }

        // 已经初始化，不理会
        if (model_) {
            return SmartServe::ModelApplyResult::UNCHANGED;
        }

        // 加载模型参数
        llama_model_params mparams = llama_model_default_params();
        model_ = llama_model_load_from_file(model_path_.c_str(), mparams);
        if (!model_) {
            SMART_SERVE_LOGE(TAG "%s: failed to load model on %s", name_.c_str(), model_path_.c_str());
            return SmartServe::ModelApplyResult::FATAL;
        }

        return SmartServe::ModelApplyResult::SUCCESSFUL;
    }

    void Finalize() override
    {
        SMART_SERVE_LOGD(TAG "%s: Finalize", name_.c_str());

        if (model_) {
            llama_model_free(model_);
            model_ = nullptr;
        }
    }

    llama_model* model_ = nullptr;

private:
    std::string name_;
    std::string model_path_;

    std::vector<SmartServe::ModelResConfig> resConfigs_;
};

#undef TAG

} // namespace llamacpp
} // namespace OHOS

#endif // SMART_SERVE_PLUGIN_LLAMACPP_QWEN_MODEL_H
