# -*- coding: utf-8 -*-
"""
# --------------------------------------------------------
# @Author : Pan
# @E-mail : 
# @Date   : 2025-09-28 18:08:41
# @Brief  : https://github.com/abetlen/llama-cpp-python
# --------------------------------------------------------
"""
from llama_cpp import Llama
from llama_cpp.llama_chat_format import MiniCPMv26ChatHandler
chat_handler = MiniCPMv26ChatHandler(clip_model_path="/home/PKing/nasdata/Project/LLM/MLLM-Factory/output/MiniCPM-V-4_5/model/mmproj-model-f16.gguf")
print(chat_handler)
llm = Llama(
  model_path="/home/PKing/nasdata/Project/LLM/MLLM-Factory/output/MiniCPM-V-4_5/model/ggml-model-Q4_K_M.gguf",
  chat_handler=chat_handler,
  n_ctx=2048, # n_ctx should be increased to accommodate the image embedding
)
# llm.create_chat_completion(
#     messages = [
#         {"role": "system", "content": "You are an assistant who perfectly describes images."},
#         {
#             "role": "user",
#             "content": [
#                 {"type" : "text", "text": "What's in this image?"},
#                 {"type": "image_url", "image_url": {"url": "https://pic.nximg.cn/file/20221230/31723731_111324918104_2.jpg" } }
#             ]
#         }
#     ]
# )