from PIL import Image

# 1. 加载本地模型和处理器
model_path = "./qwen2.5_vl_7B"
from transformers import AutoTokenizer, AutoModelForCausalLM
tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_path, device_map="auto", torch_dtype=torch.bfloat16)
# 2. 加载本地图片
image_path = "./1.jpeg"
image = Image.open(image_path).convert("RGB")

# 3. 设置提示词
prompt = "描述图片，使用中文"

# 4. 准备输入
inputs = processor(
text=prompt,
images=image,
return_tensors="pt"
).to(model.device)

# 5. 进行推理
output_ids = model.generate(
**inputs,
max_new_tokens=512, # 设置生成的最大 token 数，可根据需要调整
do_sample=False, # 使用贪婪解码（可改为 True 使用采样）
)

# 6. 解码输出
generated_text = processor.batch_decode(output_ids, skip_special_tokens=True)[0]

# 7. 打印结果
print("模型输出:", generated_text)
