import psutil
import platform
import sys
import os
try:
    import torch
    TORCH_AVAILABLE = True
except ImportError:
    TORCH_AVAILABLE = False

def get_cpu_info():
    """获取CPU信息"""
    cpu_count = psutil.cpu_count(logical=False)  # 物理CPU核心数
    cpu_count_logical = psutil.cpu_count(logical=True)  # 逻辑CPU核心数
    cpu_freq = psutil.cpu_freq()
    return {
        "物理核心数": cpu_count,
        "逻辑核心数": cpu_count_logical,
        "CPU频率": f"{cpu_freq.current:.2f}MHz" if cpu_freq else "未知"
    }

def get_memory_info():
    """获取内存信息"""
    memory = psutil.virtual_memory()
    return {
        "总内存": f"{memory.total / (1024**3):.2f}GB",
        "可用内存": f"{memory.available / (1024**3):.2f}GB",
        "内存使用率": f"{memory.percent}%"
    }

def get_disk_info():
    """获取磁盘信息"""
    disk = psutil.disk_usage('/')
    return {
        "总空间": f"{disk.total / (1024**3):.2f}GB",
        "可用空间": f"{disk.free / (1024**3):.2f}GB",
        "使用率": f"{disk.percent}%"
    }

def get_gpu_info():
    """获取GPU信息"""
    if not TORCH_AVAILABLE:
        return {"GPU状态": "未检测到PyTorch"}
    
    if not torch.cuda.is_available():
        return {"GPU状态": "PyTorch已安装但未检测到可用GPU"}
    
    return {
        "GPU状态": "可用",
        "GPU型号": torch.cuda.get_device_name(0),
        "GPU数量": torch.cuda.device_count(),
        "当前GPU内存": f"{torch.cuda.memory_allocated(0) / (1024**3):.2f}GB",
        "GPU内存占用率": f"{torch.cuda.memory_allocated(0) / torch.cuda.get_device_properties(0).total_memory * 100:.2f}%"
    }

def check_environment():
    """检查环境是否满足运行小模型的要求"""
    print("正在检查系统环境...")
    print("\n系统信息:")
    print(f"操作系统: {platform.system()} {platform.version()}")
    print(f"Python版本: {sys.version.split()[0]}")

    print("\nCPU信息:")
    cpu_info = get_cpu_info()
    for key, value in cpu_info.items():
        print(f"{key}: {value}")

    print("\n内存信息:")
    memory_info = get_memory_info()
    for key, value in memory_info.items():
        print(f"{key}: {value}")

    print("\n磁盘信息:")
    disk_info = get_disk_info()
    for key, value in disk_info.items():
        print(f"{key}: {value}")

    print("\nGPU信息:")
    gpu_info = get_gpu_info()
    for key, value in gpu_info.items():
        print(f"{key}: {value}")

    # 检查是否满足最低要求
    memory = psutil.virtual_memory()
    min_requirements = {
        "cpu_cores": 2,
        "memory_gb": 4,
        "free_disk_gb": 10
    }

    print("\n系统要求检查:")
    meets_requirements = True
    
    # CPU检查
    if cpu_info["物理核心数"] < min_requirements["cpu_cores"]:
        print(f"❌ CPU核心数不足: 需要至少{min_requirements['cpu_cores']}核，当前{cpu_info['物理核心数']}核")
        meets_requirements = False
    else:
        print(f"✅ CPU核心数满足要求")

    # 内存检查
    if memory.total / (1024**3) < min_requirements["memory_gb"]:
        print(f"❌ 内存不足: 需要至少{min_requirements['memory_gb']}GB，当前{memory.total / (1024**3):.2f}GB")
        meets_requirements = False
    else:
        print(f"✅ 内存满足要求")

    # 磁盘空间检查
    disk = psutil.disk_usage('/')
    if disk.free / (1024**3) < min_requirements["free_disk_gb"]:
        print(f"❌ 可用磁盘空间不足: 需要至少{min_requirements['free_disk_gb']}GB，当前{disk.free / (1024**3):.2f}GB")
        meets_requirements = False
    else:
        print(f"✅ 磁盘空间满足要求")

    print("\n总体评估:")
    if meets_requirements:
        print("✅ 您的系统满足运行小型AI模型的基本要求")
        if not TORCH_AVAILABLE or not torch.cuda.is_available():
            print("⚠️ 注意：未检测到GPU，模型将使用CPU运行，这可能会影响性能")
    else:
        print("❌ 您的系统可能无法良好运行AI模型，建议升级配置")

if __name__ == "__main__":
    check_environment()
