# -*- coding: utf-8 -*-
"""
# --------------------------------------------------------
# @Author : Pan
# @E-mail : 
# @Date   : 2025-10-10 08:53:53
# @Brief  : https://www.modelscope.cn/models/OpenBMB/MiniCPM-V-4_5/?st=1Y5ujGXt5rPd7Pq03xbrKcA
# --------------------------------------------------------
"""

import torch
from PIL import Image
from modelscope import AutoModel, AutoTokenizer

torch.manual_seed(100)

model_file = "../../output/MiniCPM-V-4_5"
model = AutoModel.from_pretrained(model_file,
                                  trust_remote_code=True,  # or openbmb/MiniCPM-o-2_6
                                  attn_implementation='sdpa',
                                  dtype=torch.bfloat16)  # sdpa or flash_attention_2, no eager
model = model.eval().cuda()
tokenizer = AutoTokenizer.from_pretrained(model_file, trust_remote_code=True)  # or openbmb/MiniCPM-o-2_6

image_file = "../../data/image1.jpg"
image = Image.open(image_file).convert('RGB')

enable_thinking = False  # If `enable_thinking=True`, the thinking mode is enabled.
stream = True  # If `stream=True`, the answer is string

# First round chat
question = "请描述这张图片"
msgs = [{'role': 'user', 'content': [image, question]}]

answer = model.chat(
    msgs=msgs,
    tokenizer=tokenizer,
    thinking=enable_thinking,
    stream=True
)

generated_text = ""
for new_text in answer:
    generated_text += new_text
    print(new_text, flush=True, end='')

# Second round chat, pass history context of multi-turn conversation
msgs.append({"role": "assistant", "content": [generated_text]})
msgs.append({"role": "user", "content": ["他正在做什么?"]})

answer = model.chat(
    msgs=msgs,
    tokenizer=tokenizer,
    stream=True
)

generated_text = ""
for new_text in answer:
    generated_text += new_text
    print(new_text, flush=True, end='')
