#include <iostream>
#include <cuda_runtime.h>

int main() {
    // 检查CUDA驱动和运行时版本
    int driverVersion = 0;
    int runtimeVersion = 0;
    
    cudaDriverGetVersion(&driverVersion);
    cudaRuntimeGetVersion(&runtimeVersion);
    
    std::cout << "CUDA Driver Version: " << driverVersion/1000 << "." 
              << (driverVersion%100)/10 << std::endl;
    std::cout << "CUDA Runtime Version: " << runtimeVersion/1000 << "." 
              << (runtimeVersion%100)/10 << std::endl;
    
    // 获取GPU设备数量
    int deviceCount = 0;
    cudaError_t error = cudaGetDeviceCount(&deviceCount);
    
    if (error != cudaSuccess) {
        std::cerr << "Error getting device count: " << cudaGetErrorString(error) << std::endl;
        return -1;
    }
    
    if (deviceCount == 0) {
        std::cout << "No CUDA-capable devices found." << std::endl;
        return -1;
    }
    
    std::cout << "Found " << deviceCount << " CUDA-capable device(s)" << std::endl;
    
    // 遍历每个设备并打印详细信息
    for (int i = 0; i < deviceCount; ++i) {
        cudaDeviceProp prop;
        cudaGetDeviceProperties(&prop, i);
        
        std::cout << "\nDevice " << i << ": " << prop.name << std::endl;
        std::cout << "  Compute capability: " << prop.major << "." << prop.minor << std::endl;
        std::cout << "  Global memory: " << prop.totalGlobalMem / (1024*1024) << " MB" << std::endl;
        std::cout << "  Multiprocessors: " << prop.multiProcessorCount << std::endl;
        std::cout << "  Max threads per block: " << prop.maxThreadsPerBlock << std::endl;
        std::cout << "  Max grid size: " << prop.maxGridSize[0] << " x " 
                  << prop.maxGridSize[1] << " x " << prop.maxGridSize[2] << std::endl;
        std::cout << "  Max block size: " << prop.maxThreadsDim[0] << " x " 
                  << prop.maxThreadsDim[1] << " x " << prop.maxThreadsDim[2] << std::endl;
    }
    
    // 测试简单的CUDA操作
    float *d_a;
    size_t size = 1024 * sizeof(float);
    
    // 分配GPU内存
    error = cudaMalloc(&d_a, size);
    if (error != cudaSuccess) {
        std::cerr << "Error allocating GPU memory: " << cudaGetErrorString(error) << std::endl;
        return -1;
    }
    
    std::cout << "\nSuccessfully allocated " << size/sizeof(float) 
              << " floats on GPU" << std::endl;
    
    // 释放GPU内存
    cudaFree(d_a);
    
    std::cout << "CUDA environment verified successfully!" << std::endl;
    
    return 0;
}