import argparse
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch #gpu

parser = argparse.ArgumentParser(description="Compare LoRA model with base model.")

parser.add_argument("--base-model-path", 
                    type=str,
                    help="Path to the base model directory.",
                    default="/data/llm/models/DeepSeek-R1-Distill-Qwen-1.5B")

# 检查是否有可用的GPU
device = "cuda" if torch.cuda.is_available() else "cpu"

args = parser.parse_args()

# 加载原始模型
original_model_name = args.base_model_path
original_tokenizer = AutoTokenizer.from_pretrained(original_model_name)
original_model = AutoModelForCausalLM.from_pretrained(original_model_name).to(device)


# 加载微调后的模型
finetuned_model_path = "/workspace/LLaMA-Factory/merged_model/"
finetuned_tokenizer = AutoTokenizer.from_pretrained(finetuned_model_path)
finetuned_model = AutoModelForCausalLM.from_pretrained(finetuned_model_path).to(device)

# 准备测试数据
test_prompts = [
    "人体最小的骨头是什么？",
    "人体最小的骨头有什么功能？请详细说明",
    "人体最小的骨头属于哪个器官？",
]

# 对比原始模型和微调模型的输出
for prompt in test_prompts:
    print("\n" + "="*50 + "\n")
    print(f"Prompt: {prompt}")
    
    print("\n" + "-"*50 + "\n")
    # 原始模型推理
    original_inputs = original_tokenizer(prompt, return_tensors="pt").to(device)
    original_outputs = original_model.generate(**original_inputs, max_length=100)
    original_text = original_tokenizer.decode(original_outputs[0], skip_special_tokens=True)
    print(f"Original Model Output: {original_text}")
    
    print("\n" + "-"*50 + "\n")
    # 微调模型推理
    finetuned_inputs = finetuned_tokenizer(prompt, return_tensors="pt").to(device)
    finetuned_outputs = finetuned_model.generate(**finetuned_inputs, max_length=100)
    finetuned_text = finetuned_tokenizer.decode(finetuned_outputs[0], skip_special_tokens=True)
    print(f"Finetuned Model Output: {finetuned_text}")
    
