import torch
from modelscope import snapshot_download, AutoConfig, AutoModelForCausalLM, AutoTokenizer, GenerationConfig

def load_model():
    print("正在加载模型...")
    model_id = "Qwen/Qwen2.5-VL-7B-Instruct"
    revision = "master"
    
    # 下载模型
    model_dir = snapshot_download(model_id, revision=revision)
    
    # 加载模型和tokenizer
    model = AutoModelForCausalLM.from_pretrained(
        model_dir,
        device_map="auto",
        trust_remote_code=True,
        torch_dtype=torch.float16
    )
    model.generation_config = GenerationConfig.from_pretrained(model_dir, trust_remote_code=True)
    
    tokenizer = AutoTokenizer.from_pretrained(
        model_dir,
        trust_remote_code=True
    )
    
    print("模型加载完成！")
    return model, tokenizer

def process_input(model, tokenizer, image_path, prompt):
    # 准备输入
    query = tokenizer.from_list_format([
        {'image': image_path},
        {'text': prompt},
    ])
    
    # 生成回答
    response, history = model.chat(tokenizer, query=query, history=None)
    
    return response

def main():
    model, tokenizer = load_model()
    
    while True:
        image_path = input("\n请输入图片路径（输入'quit'退出）: ")
        if image_path.lower() == 'quit':
            break
            
        prompt = input("请输入提示词: ")
        
        try:
            response = process_input(model, tokenizer, image_path, prompt)
            print("\n模型回答:", response)
        except Exception as e:
            print(f"处理出错: {str(e)}")

if __name__ == "__main__":
    main() 