#include <iostream>
#include <cuda_runtime.h>
#include "../common/common.h"
#define DEVICENUM 0

int main(int argc, char **argv)
{
    // 1.获取你服务器中gpu nums
    int deviceCount = 0;
    CHECK(cudaGetDeviceCount(&deviceCount)); // 此时deviceCount会被赋值
    std::cout << "检测到你服务器上含有 " << deviceCount << " 块gpu" << std::endl;

    // 2. 设置你要使用的gpu num
    cudaSetDevice(DEVICENUM);

    // 3. 获取gpu驱动版本和cuda版本
    int driverVersion = 0, runtimeVersion = 0;
    cudaDriverGetVersion(&driverVersion);
    cudaRuntimeGetVersion(&runtimeVersion);
    // 加入cuda驱动版本是11.6,driverVersion-->11060,所以可以使用driverVersion / 1000, (driverVersion % 100) / 10来展示
    // runtimeVersion同上，用法相同,runtimeVersion-->11010,...
    std::cout << "CUDA 驱动版本: " << driverVersion << std::endl;
    std::cout << "CUDA 运行时版本: " << runtimeVersion << std::endl;

    // 4. 获取你的gpu基本属性
    cudaDeviceProp deviceProp;
    cudaGetDeviceProperties(&deviceProp, DEVICENUM);
    std::cout << "GPU 类型: " << deviceProp.name << std::endl;
    std::cout << "GPU SM(流多处理器)数量: " << deviceProp.multiProcessorCount << std::endl; // 注意：和sm_86不一样，86是指算力8.6
    // major:定义设备计算能力的主要修订号,minor:定义设备计算能力的次要修订号,用来计算算力sm_86指的是算力8.6
    std::cout << "GPU 每个SM中线程束最大数目：" << deviceProp.maxThreadsPerMultiProcessor / 32 << std::endl;
    std::cout << "GPU 计算能力：" << deviceProp.major << "." << deviceProp.minor << std::endl;
    std::cout << "GPU 算力: sm_" << deviceProp.major * 10 + deviceProp.minor << std::endl;
    std::cout << "GPU 显存大小: " << (float)deviceProp.totalGlobalMem / pow(1024.0, 3) << "GB" << std::endl; // 默认是bytes
    std::cout << "GPU 内存总线宽度: " << deviceProp.memoryBusWidth << "-bits" << std::endl;
    std::cout << "GPU L2 Cache内存大小: " << deviceProp.l2CacheSize << " bytes" << std::endl;
    std::cout << "GPU常量内存大小：" << deviceProp.totalConstMem / 1024.0 << " kB" << std::endl;
    std::cout << "GPU共享内存大小：" << deviceProp.sharedMemPerBlock / 1024.0 << " KB" << std::endl;
    std::cout << "GPU每个SM(流多处理器)的最大线程数：" << deviceProp.maxThreadsPerMultiProcessor << std::endl;
    std::cout << "GPU每个block寄存器内存大小：" << deviceProp.regsPerBlock << " bytes" << std::endl;
    std::cout << "GPU每个block线程束大小：" << deviceProp.warpSize << std::endl;
    std::cout << "GPU每个block块的最大线程数：" << deviceProp.maxThreadsPerBlock << std::endl;
    std::cout << "GPU每个block块的维度最大尺寸："
              << deviceProp.maxThreadsDim[0] << " x " << deviceProp.maxThreadsDim[1] << " x " << deviceProp.maxThreadsDim[2] << std::endl;
    std::cout << "GPU每个grid的维度最大尺寸："
              << deviceProp.maxGridSize[0] << " x " << deviceProp.maxGridSize[1] << " x " << deviceProp.maxGridSize[2] << std::endl;

    // 允许通过cudaMallocPitch()为包含存储器区域的存储器复制函数分配的最大间距(pitch)
    std::cout << "Maximu memory pitch:" << deviceProp.memPitch << " bytes" << std::endl;
    std::cout << "GPU 最大纹理尺寸(x,y,z): "
              << "1D=" << deviceProp.maxTexture1D << " "
              << "2D=(" << deviceProp.maxTexture2D[0] << "," << deviceProp.maxTexture2D[1] << ") "
              << "3D=(" << deviceProp.maxTexture3D[0] << "," << deviceProp.maxTexture3D[1] << "," << deviceProp.maxTexture3D[2] << ")" << std::endl;

    return EXIT_SUCCESS;
}
