#include <cstdio>
#include <random>
#include <stdlib.h>

#define ceil(a, b) ((a) + (b) - 1) / (b)

template <const int TILE_DIM>
__global__ void copy(const float *A, float *B, const int N) {
    // 覆盖矩阵宽度
    const int idx_x = blockIdx.x * TILE_DIM + threadIdx.x;
    // 覆盖矩阵高度
    const int idx_y = blockIdx.y * TILE_DIM + threadIdx.y;
    const int index_global = idx_y * N + idx_x;
    if(idx_x < N && idx_y < N) {
        B[index_global] = A[index_global];
    }
}


void randomize_matrix(float *mat, int N) {
    std::random_device rd;
    std::mt19937 gen(rd());

    // 创建一个在[0, 2000)之间均匀分布的分布对象  
    std::uniform_int_distribution<> dis(0, 2000); 
    for (int i = 0; i < N; i++) {
        // 生成随机数，限制范围在[-1.0,1.0]
        mat[i] = (dis(gen)-1000)/1000.0;  
    }
}


int main() {
    const int TILE_DIM = 32;
    const int N = 128;
    const dim3 grid_size(ceil(N, TILE_DIM), ceil(N, TILE_DIM));
    const dim3 block_size(TILE_DIM, TILE_DIM);

    // 1.init
    float *h_a = (float *)malloc(N * N * sizeof(float));
    randomize_matrix(h_a, N * N);

    float *d_a, *d_b;
    cudaMalloc((void **) &d_a, sizeof(float) * N * N);
    cudaMalloc((void **) &d_b, sizeof(float) * N * N);
    cudaMemcpy(d_a, h_a, N * N * sizeof(float), cudaMemcpyHostToDevice);

    // 2. call copy
    copy<TILE_DIM><<<grid_size, block_size>>>(d_a, d_b, N);
    cudaDeviceSynchronize();

    free(h_a);
    cudaFree(d_a);
    cudaFree(d_b);

    return 0;
}