#!/usr/bin/env python3

import torch
from safetensors import safe_open
from transformers import AutoModelForCausalLM, AutoTokenizer
import os

def compare_models(base_model_path, finetuned_model_path, adapter_path):
    """比较基础模型和微调模型的差异"""
    print("=== 模型比较分析 ===")
    print(f"基础模型路径: {base_model_path}")
    print(f"微调模型路径: {finetuned_model_path}")
    print(f"适配器路径: {adapter_path}")
    
    # 加载基础模型
    print("\n1. 加载基础模型信息...")
    base_model = AutoModelForCausalLM.from_pretrained(base_model_path, local_files_only=True)
    base_params = sum(p.numel() for p in base_model.parameters())
    trainable_params = sum(p.numel() for p in base_model.parameters() if p.requires_grad)
    
    print(f"基础模型总参数量: {base_params:,}")
    print(f"基础模型可训练参数量: {trainable_params:,}")
    print(f"模型层数: {len(base_model.model.layers)}")
    
    # 检查适配器参数
    print("\n2. 分析LoRA适配器...")
    if os.path.exists(adapter_path):
        with safe_open(adapter_path, framework="pt", device="cpu") as f:
            adapter_keys = list(f.keys())
            adapter_params = 0
            for key in adapter_keys:
                tensor = f.get_tensor(key)
                adapter_params += tensor.numel()
                
        print(f"适配器参数量: {adapter_params:,}")
        print(f"适配器参数矩阵数量: {len(adapter_keys)}")
        
        # 计算参数占比
        ratio = (adapter_params / base_params) * 100
        print(f"适配器参数占总参数比例: {ratio:.2f}%")
        
        # 显示一些适配器参数示例
        print("\n3. 适配器参数示例:")
        with safe_open(adapter_path, framework="pt", device="cpu") as f:
            sample_keys = adapter_keys[:4]  # 显示前4个
            for key in sample_keys:
                tensor = f.get_tensor(key)
                print(f"  {key}: {list(tensor.shape)}")
    else:
        print("未找到适配器文件")
    
    # 检查微调模型配置
    print("\n4. 微调模型配置文件:")
    if os.path.exists(finetuned_model_path):
        files = os.listdir(finetuned_model_path)
        for file in sorted(files):
            if file.endswith(('.json', '.txt')):
                print(f"  - {file}")
    
    print("\n=== 分析完成 ===")

if __name__ == "__main__":
    base_model_path = "models/Qwen1.5-1.8B"
    finetuned_model_path = "training/models/finetuned_model_tianqi"
    adapter_path = "training/models/finetuned_model_tianqi/adapter_model.safetensors"
    
    compare_models(base_model_path, finetuned_model_path, adapter_path)
