from transformers import Qwen2VLForConditionalGeneration, AutoProcessor
import torch
# 指定本地模型路径
model_dir = 'D:/work/program/pytorch_models/Qwen/Qwen2-VL-2B-Instruct-GPTQ-Int8'

# 加载模型
model = Qwen2VLForConditionalGeneration.from_pretrained(
    model_dir,
    torch_dtype=torch.float32,  # 使用 float32 数据类型
    device_map="cpu"  # 使用 CPU
)

# 加载处理器
processor = AutoProcessor.from_pretrained(model_dir)

# 输入消息
messages = [
    {
        "role": "user",
        "content": [
            {"type": "text", "text": "Hello, how are you?"}
        ]
    }
]

# 准备推理
text = processor.apply_chat_template(
    messages, tokenize=False, add_generation_prompt=True
)
inputs = processor(text=[text], padding=True, return_tensors="pt")

# 将输入数据移动到 CPU
inputs = inputs.to("cpu")

# 推理：生成输出
generated_ids = model.generate(**inputs, max_new_tokens=128)
output_text = processor.batch_decode(
    generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False
)
print(output_text)