# -*- coding: utf-8 -*-
"""
# --------------------------------------------------------
# @Author : Pan
# @E-mail : 
# @Date   : 2025-10-29 15:08:32
# @Brief  :
# --------------------------------------------------------
"""
import os
import base64
import json
from PIL import Image

os.environ["CUDA_VISIBLE_DEVICES"] = "0"

from libs.minicpm.gradio.server.models.minicpmv4_5_v1 import ModelMiniCPMV4_5

print("pwd:", os.getcwd())


def test_minicpm():
    """快速测试函数"""
    model_path = "../../output/MiniCPM-V-4_5"
    image_file = "../../data/image1.jpg"
    assert os.path.exists(model_path), f"model_path {model_path} not exists"
    assert os.path.exists(image_file), f"image_file {image_file} not exists"
    # 1. 初始化模型
    print("加载模型中...")
    model = ModelMiniCPMV4_5(model_path)

    # 2. 准备输入数据
    with open(image_file, "rb") as f:
        image_base64 = base64.b64encode(f.read()).decode('utf-8')

    # 构建消息
    message = [{
        "role": "user",
        "content": [
            {"type": "image", "pairs": image_base64},
            {"type": "text", "pairs": "描述这张图片的内容"}
        ]
    }]

    # 3. 准备输入数据
    input_data = {
        "image": image_base64,
        "question": json.dumps(message),
        "params": json.dumps({
            "max_new_tokens": 512,
            "temperature": 0.7,
            "enable_thinking": False
        })
    }

    # 4. 调用模型
    print("调用模型中...")
    answer, token_count = model(input_data)

    print("\n" + "=" * 60)
    print("模型回复:")
    print("=" * 60)
    print(answer)
    print("=" * 60)
    print(f"输出token数量: {token_count}")


if __name__ == "__main__":
    test_minicpm()
