#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
GPU检查脚本
"""

import torch
import logging

logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

def main():
    logger.info("检查GPU环境...")
    
    # 检查CUDA
    if torch.cuda.is_available():
        logger.info(" CUDA可用")
        
        # GPU信息
        gpu_count = torch.cuda.device_count()
        logger.info(f" GPU数量: {gpu_count}")
        
        for i in range(gpu_count):
            gpu_name = torch.cuda.get_device_name(i)
            gpu_memory = torch.cuda.get_device_properties(i).total_memory / (1024**3)
            logger.info(f" GPU {i}: {gpu_name} ({gpu_memory:.1f}GB)")
        
        # 测试GPU
        logger.info(" 测试GPU计算...")
        try:
            x = torch.randn(1000, 1000).cuda()
            y = torch.mm(x, x.t())
            logger.info(" GPU计算测试通过")
            
            # 显存使用情况
            allocated = torch.cuda.memory_allocated(0) / (1024**3)
            cached = torch.cuda.memory_reserved(0) / (1024**3)
            logger.info(f" 显存使用: {allocated:.2f}GB / {cached:.2f}GB")
            
        except Exception as e:
            logger.error(f"GPU计算测试失败: {e}")
        
        # 推荐配置
        total_memory = torch.cuda.get_device_properties(0).total_memory / (1024**3)
        if total_memory >= 8:
            batch_size = 16
        elif total_memory >= 4:
            batch_size = 8
        else:
            batch_size = 4
            
        logger.info(f" 推荐batch_size: {batch_size}")
        logger.info(" GPU环境就绪，可以开始训练！")
        
    else:
        logger.error(" CUDA不可用")
        logger.error("请检查：")
        logger.error("1. NVIDIA驱动是否安装")
        logger.error("2. CUDA Toolkit是否安装")
        logger.error("3. PyTorch是否支持CUDA")
        
        # 检查PyTorch版本
        logger.info(f"PyTorch版本: {torch.__version__}")
        logger.info("请安装GPU版本的PyTorch:")
        logger.info("pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118")

if __name__ == "__main__":
    main()
