import torch
from PIL import Image
from transformers import AutoModelForCausalLM, AutoProcessor
from peft import PeftModel
import argparse


def load_finetuned_model(model_id="Qwen/Qwen2.5-VL-3B-Instruct", lora_weights_path="./qwen2.5-vl-3b-lora-finetuned"):
    """
    加载微调后的模型
    
    Args:
        model_id (str): 原始模型ID
        lora_weights_path (str): LoRA权重路径
        
    Returns:
        tuple: (model, processor)
    """
    print("Loading base model...")
    # 加载预训练模型
    model = AutoModelForCausalLM.from_pretrained(
        model_id,
        torch_dtype=torch.bfloat16,
        device_map="auto"
    )
    
    print("Loading LoRA weights...")
    # 应用LoRA权重
    model = PeftModel.from_pretrained(model, lora_weights_path)
    
    print("Loading processor...")
    # 加载processor
    processor = AutoProcessor.from_pretrained(model_id)
    
    return model, processor


def run_inference(model, processor, image_path, prompt):
    """
    执行推理
    
    Args:
        model: 微调模型
        processor: 模型processor
        image_path (str): 图像路径
        prompt (str): 提示文本
        
    Returns:
        str: 生成的文本
    """
    # 加载图像
    image = Image.open(image_path)
    
    # 处理输入
    inputs = processor(
        text=[prompt], 
        images=[image], 
        return_tensors="pt",
        padding=True,
        truncation=True
    ).to("cuda")
    
    # 生成文本
    generated_ids = model.generate(**inputs, max_new_tokens=100)
    result = processor.batch_decode(generated_ids, skip_special_tokens=True)
    
    return result


def main():
    """主函数"""
    parser = argparse.ArgumentParser(description="Qwen2.5-VL-3B-Instruct微调模型推理")
    parser.add_argument("--model_id", type=str, default="Qwen/Qwen2.5-VL-3B-Instruct", help="原始模型ID")
    parser.add_argument("--lora_weights_path", type=str, default="./qwen2.5-vl-3b-lora-finetuned", help="LoRA权重路径")
    parser.add_argument("--image_path", type=str, required=True, help="图像路径")
    parser.add_argument("--prompt", type=str, required=True, help="提示文本")
    
    args = parser.parse_args()
    
    # 加载微调后的模型
    model, processor = load_finetuned_model(args.model_id, args.lora_weights_path)
    
    # 执行推理
    print("Running inference...")
    result = run_inference(model, processor, args.image_path, args.prompt)
    
    print("Generated text:")
    print(result)


if __name__ == "__main__":
    main()