#include <stdio.h>
#include <torch.h>


const int mem_sz = 64;
__device__ float static_mem[mem_sz];
__global__ void my_kernel(float *, const int);


int main()
{
    auto grid_sz = dim3(2, 2, 2);
    auto blk_sz = dim3(2, 2, 2);

    float host_arr[mem_sz];
    for (int i = 0; i < mem_sz; i++)
    {
        host_arr[i] = 2 * i + 1;
    }
    cudaMemcpyToSymbol(static_mem, host_arr, sizeof(float) * mem_sz);

    float * static_mem_pnt;
    cudaGetSymbolAddress((void **) &static_mem_pnt, static_mem);
    my_kernel<<<grid_sz, blk_sz>>>(static_mem_pnt, mem_sz);
    cudaDeviceSynchronize();

    cudaDeviceProp device_prop;
    cudaGetDeviceProperties(&device_prop, 0);
    printf("The name of my GPU device is %s!\n", device_prop.name);
    printf("The computation capability of my GPU device is %d.%d!\n", 
           device_prop.major, device_prop.minor);

    return 0;
}

__global__ void my_kernel(float * d_x, const int N)
{
    const int thd_local_idx = threadIdx.x + threadIdx.y * blockDim.x + \
                              threadIdx.z * blockDim.y * blockDim.x;
    const int thd_per_blk = blockDim.z * blockDim.y * blockDim.x;
    const int blk_idx = blockIdx.x + blockIdx.y * gridDim.x + \
                        blockIdx.z * gridDim.y * gridDim.x;
    const int thd_global_idx = thd_local_idx + blk_idx * thd_per_blk;

    printf("The global index of CUDA thread is %d!, and "
           "The float value in the static global memory is %.0f!\n", 
           thd_global_idx, d_x[thd_global_idx]);
    if (thd_global_idx == 1)
    {
        printf("\n");
    }
    __syncthreads();
    d_x[thd_global_idx] += 1;

    printf("Now I've changed the float value in the static global memory, " 
           "and the new value in the thread %d is %.0f!\n", 
           thd_global_idx, d_x[thd_global_idx]);
}
