# ==============================================================================
# Author: Haisheng Hui
# Date: 2024-04-10
# Copyright (C) 2024 Optimizing AI. All rights reserved.
# Description:# This Python script/file is designed to test functionality and integrity
# of cuda\cudnn\pytorch-gpu.
# ==============================================================================

# -*- coding: utf-8 -*-
import subprocess
import torch

# 定义在任意目录下运行的命令列表
general_commands = [
    "nvcc -V",  # 查看CUDA版本
    "nvidia-smi",  # 查看CUDA和CuDNN信息
]

# CUDA工具包的特定目录，包含bandwidthTest.exe和deviceQuery.exe
cuda_tools_dir = r"C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.1\extras\demo_suite"

# 在特定目录中运行的命令列表
specific_commands = [
    "bandwidthTest.exe",
    "deviceQuery.exe",
]

# 运行不挑目录的命令
for command in general_commands:
    result = subprocess.run(command, capture_output=True, text=True, encoding='utf-8', shell=True)
    print(result.stdout)
    if result.stderr:
        print("错误:", result.stderr)

# 切换目录并执行特定目录下的命令
for command in specific_commands:
    # 构建完整的命令路径
    full_command_path = f"{cuda_tools_dir}\\{command}"
    # 运行命令
    result = subprocess.run(full_command_path, capture_output=True, text=True, encoding='utf-8', shell=True)
    print(result.stdout)
    if result.stderr:
        print("错误:", result.stderr)

# 检查PyTorch的CUDA支持情况
if torch.cuda.is_available():
    print("CUDA is available in PyTorch. GPU support is enabled.")
    print(f"PyTorch版本: {torch.__version__}")
    print(f"CUDA版本: {torch.version.cuda}")
    print(f"可用的GPU数量: {torch.cuda.device_count()}")
    for i in range(torch.cuda.device_count()):
        print(f"GPU {i}: {torch.cuda.get_device_name(i)}")
else:
    print("CUDA is not available in PyTorch. GPU support is not enabled.")
