import torch
from modelscope import AutoModelForCausalLM, AutoTokenizer
import os

def print_model_info(model_path):
    """
    加载模型并打印其结构信息和矩阵维度信息
    
    Args:
        model_path: 模型文件路径
    """
    # 检查模型路径是否存在
    if not os.path.exists(model_path):
        print(f"错误: 模型路径不存在 - {model_path}")
        return
    
    try:
        print(f"正在加载模型: {model_path}")
        
        # 加载模型，使用4位量化以节省内存
        model = AutoModelForCausalLM.from_pretrained(
            model_path,
            torch_dtype=torch.float16,
            device_map="auto",
            load_in_4bit=True,
            trust_remote_code=True
        )
        
        print("\n===== 模型基本信息 =====")
        print(f"模型类型: {type(model)}")
        print(f"模型设备: {next(model.parameters()).device}")
        
        print("\n===== 模型结构概览 =====")
        print(model)
        print(model.layers)
        print("\n===== 模型参数矩阵维度信息 =====")
        total_params = 0
        for name, param in model.named_parameters():
            # 只打印有可学习参数的层
            if param.requires_grad:
                print(f"参数名称: {name}, 维度: {param.shape}, 参数数量: {param.numel()}")
                total_params += param.numel()
        
        print(f"\n===== 模型总参数数量 =====")
        print(f"总可学习参数: {total_params:,}")
        print(f"总可学习参数 ( billions ): {total_params / 1e9:.2f}B")
        
    except Exception as e:
        print(f"加载模型时发生错误: {str(e)}")

if __name__ == "__main__":
    # 模型路径
    model_path = "/home/ZJQ/.cache/modelscope/hub/models/Qwen/Qwen2___5-7B-Instruct"
    
    # 打印模型信息
    print_model_info(model_path)
