import torch
import os
from modelscope import Qwen2_5_VLForConditionalGeneration, AutoTokenizer, AutoProcessor
from qwen_vl_utils import process_vision_info

def init_model():
    print("正在初始化模型...")
    local_dir = os.path.expanduser("~/.cache/modelscope/hub/models/Qwen/Qwen2.5-VL-7B-Instruct")
    
    print("加载模型中...")
    processor = AutoProcessor.from_pretrained(local_dir)
    
    model = Qwen2_5_VLForConditionalGeneration.from_pretrained(
        local_dir,
        torch_dtype="auto",
        device_map="auto"
    ).eval()
    
    print("模型加载完成！")
    return model, processor

def chat_one_turn(model, processor, image_path, prompt):
    # 构建消息
    messages = [
        {
            "role": "user",
            "content": [
                {
                    "type": "image",
                    "image": image_path,
                },
                {"type": "text", "text": prompt},
            ],
        }
    ]
    
    # 准备推理输入
    text = processor.apply_chat_template(
        messages, tokenize=False, add_generation_prompt=True
    )
    image_inputs, video_inputs = process_vision_info(messages)
    inputs = processor(
        text=[text],
        images=image_inputs,
        videos=video_inputs,
        padding=True,
        return_tensors="pt",
    )
    inputs = inputs.to("cuda")
    
    # 生成回答
    generated_ids = model.generate(**inputs, max_new_tokens=128)
    generated_ids_trimmed = [
        out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
    ]
    output_text = processor.batch_decode(
        generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
    )
    return output_text[0]

def main():
    # 初始化模型
    model, processor = init_model()
    
    # 开始对话
    while True:
        try:
            # 获取用户输入
            image = input("\n请输入图片路径 (输入q退出): ")
            if image.lower() == 'q':
                break
                
            prompt = input("请输入您的问题: ")
            
            # 生成回答
            response = chat_one_turn(model, processor, image, prompt)
            print(f"\n模型回答: {response}\n")
            
        except Exception as e:
            print(f"发生错误: {str(e)}")
            continue

if __name__ == "__main__":
    main() 