#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
合并模型脚本
将基座模型和LoRA微调模型合并为一个完整模型
"""

import torch
import os
import argparse
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel

def merge_lora_model(base_model_name, lora_path, output_path):
    """
    将基座模型和LoRA微调模型合并为一个完整模型
    
    Args:
        base_model_name (str): 基座模型名称或路径
        lora_path (str): LoRA微调模型路径
        output_path (str): 合并后模型的保存路径
    """
    print(f"正在加载基座模型: {base_model_name}")
    
    # 检测设备类型
    if torch.cuda.is_available():
        device = "cuda"
        torch_dtype = torch.bfloat16
        print("使用 CUDA 设备")
    elif torch.backends.mps.is_available():
        device = "mps"
        torch_dtype = torch.float32
        print("使用 MPS 设备")
    else:
        device = "cpu"
        torch_dtype = torch.float32
        print("使用 CPU 设备")
    
    # 加载分词器
    tokenizer = AutoTokenizer.from_pretrained(base_model_name, use_fast=True)
    
    # 加载基座模型
    base_model = AutoModelForCausalLM.from_pretrained(
        base_model_name,
        torch_dtype=torch_dtype,
        device_map=None,  # 禁用自动设备映射
        low_cpu_mem_usage=True,
        trust_remote_code=True
    )
    
    # 手动移动模型到设备
    base_model = base_model.to(device)
    
    # 检查LoRA路径是否存在
    if not os.path.exists(lora_path):
        raise FileNotFoundError(f"LoRA模型路径不存在: {lora_path}")
    
    print(f"正在加载LoRA模型: {lora_path}")
    # 加载LoRA模型
    model = PeftModel.from_pretrained(base_model, lora_path)
    
    # 合并LoRA权重到基座模型
    print("正在合并模型...")
    merged_model = model.merge_and_unload()
    
    # 确保输出目录存在
    os.makedirs(output_path, exist_ok=True)
    
    # 保存合并后的模型和分词器
    print(f"正在保存合并后的模型到: {output_path}")
    merged_model.save_pretrained(output_path)
    tokenizer.save_pretrained(output_path)
    
    print("模型合并完成!")
    return merged_model, tokenizer

def main():
    parser = argparse.ArgumentParser(description="合并基座模型和LoRA微调模型")
    parser.add_argument(
        "--base_model", 
        type=str, 
        default="deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
        help="基座模型名称或路径"
    )
    parser.add_argument(
        "--lora_path", 
        type=str, 
        default="./qwen15b-lora",
        help="LoRA微调模型路径"
    )
    parser.add_argument(
        "--output_path", 
        type=str, 
        default="./merged_model",
        help="合并后模型的保存路径"
    )
    parser.add_argument(
        "--test", 
        action="store_true",
        help="合并后测试模型"
    )
    
    args = parser.parse_args()
    
    # 合并模型
    merged_model, tokenizer = merge_lora_model(
        args.base_model, 
        args.lora_path, 
        args.output_path
    )
    
    # 如果需要测试
    if args.test:
        print("\n=== 测试合并后的模型 ===")
        # 简单测试用例
        test_prompts = [
            "指令: 解释什么是人工智能\n回答:",
            "指令: 写一首关于春天的短诗\n回答:",
            "指令: 翻译以下句子\n输入: Hello, how are you?\n回答:"
        ]
        
        for i, prompt in enumerate(test_prompts, 1):
            print(f"\n测试 {i}:")
            print(f"提示: {prompt}")
            
            # 编码输入
            inputs = tokenizer(prompt, return_tensors="pt").to(merged_model.device)
            
            # 生成回复
            with torch.no_grad():
                outputs = merged_model.generate(
                    **inputs,
                    max_length=512,
                    temperature=0.7,
                    do_sample=True,
                    top_p=0.9,
                    top_k=50,
                    repetition_penalty=1.1,
                    pad_token_id=tokenizer.eos_token_id
                )
            
            # 解码输出
            response = tokenizer.decode(outputs[0], skip_special_tokens=True)
            print(f"回复: {response}")
            print("-" * 50)

if __name__ == "__main__":
    main()