#include <iostream>
#include <cuda_runtime.h>
#include "../common/common.h"
#define DEVICENUM 0
#define BDIMX 32 // 列数
#define BDIMY 16 // 行数
#define IPAD 2

__global__ void setRowReadRow(int *out)
{
    __shared__ int tile[BDIMY][BDIMX];                         // 设置矩形静态共享内存
    unsigned int idx = threadIdx.x + threadIdx.y * blockDim.x; // 由于只有一个block，直接计算线程编号即可

    tile[threadIdx.y][threadIdx.x] = idx; // 将线程编号当成值赋值给共享内存，按照行主序设置，无冲突
    __syncthreads();                      // 等待所有线程完成

    out[idx] = tile[threadIdx.y][threadIdx.x]; // 共享内存按照行主序读取到gpu数组中，无冲突
}

// 注意：这个核函数设置矩形静态共享内存的大小是BDIMX为行
__global__ void setColReadColTran(int *out)
{
    __shared__ int tile[BDIMX][BDIMY];                         // 这里和别的核函数设置不一样，属于转置设置
    unsigned int idx = threadIdx.x + threadIdx.y * blockDim.x; // 计算线程编号即可

    tile[threadIdx.x][threadIdx.y] = idx; // 将线程编号当成值赋值给共享内存，按照列主序设置，BDIMY个冲突，需串行访问存储体
    __syncthreads();                      // 等待所有线程完成

    out[idx] = tile[threadIdx.x][threadIdx.y]; // 共享内存按列主序读取到gpu数组中，BDIMY个冲突，需串行访问存储体
}

__global__ void setColReadColNormal(int *out)
{
    __shared__ int tile[BDIMY][BDIMX];
    unsigned int idx = threadIdx.y * blockDim.x + threadIdx.x;

    // 将idx转换为转置坐标 (row, col)
    unsigned int irow = idx / blockDim.y;
    unsigned int icol = idx % blockDim.y;

    tile[icol][irow] = idx;
    __syncthreads();
    out[idx] = tile[icol][irow];
}

__global__ void setRowReadCol(int *out)
{
    __shared__ int tile[BDIMY][BDIMX];                         // 设置矩形静态共享内存
    unsigned int idx = threadIdx.y * blockDim.x + threadIdx.x; // 计算线程编号即可

    // 将idx转换为转置坐标 (row, col)
    unsigned int irow = idx / blockDim.y;
    unsigned int icol = idx % blockDim.y;

    tile[threadIdx.y][threadIdx.x] = idx; // 按照行主序读取，不冲突
    __syncthreads();                      // 等待所有线程完成

    out[idx] = tile[icol][irow]; // 共享内存按列主序读取到gpu数组中，BDIMY个冲突，需串行访问存储体
}

__global__ void setRowReadColPad(int *out)
{
    __shared__ int tile[BDIMY][BDIMX + IPAD];                  // 设置矩形静态共享内存,此时多了一列
    unsigned int idx = threadIdx.y * blockDim.x + threadIdx.x; // 计算线程编号

    // 将idx转换为转置坐标 (row, col)
    unsigned int irow = idx / blockDim.y;
    unsigned int icol = idx % blockDim.y;

    tile[threadIdx.y][threadIdx.x] = idx;
    __syncthreads();             // 等待所有线程完成
    out[idx] = tile[icol][irow]; // 虽然按列主序读取到gpu数组中，但由于进行了填充，此时也是无冲突访问
}

__global__ void setRowReadColDyn(int *out)
{
    extern __shared__ int tile[];

    unsigned int idx = threadIdx.y * blockDim.x + threadIdx.x;

    unsigned int irow = idx / blockDim.y;
    unsigned int icol = idx % blockDim.y;

    unsigned int col_idx = icol * blockDim.x + irow;

    tile[idx] = idx;
    __syncthreads();

    out[idx] = tile[col_idx]; // 共享内存按列主序读取到gpu数组中，冲突BDIMY次
}

__global__ void setRowReadColDynPad(int *out)
{
    extern __shared__ int tile[];

    unsigned int g_idx = threadIdx.y * blockDim.x + threadIdx.x;

    unsigned int irow = g_idx / blockDim.y;
    unsigned int icol = g_idx % blockDim.y;

    unsigned int row_idx = threadIdx.y * (blockDim.x + IPAD) + threadIdx.x;
    unsigned int col_idx = icol * (blockDim.x + IPAD) + irow;

    tile[row_idx] = g_idx;
    __syncthreads();

    out[g_idx] = tile[col_idx]; // 共享内存按列主序读取到gpu数组中，由于进行了填充，无冲突
}

int main(int argc, char **argv)
{
    cudaSetDevice(DEVICENUM);
    int nElem = BDIMX * BDIMY;
    size_t nBytes = nElem * sizeof(int);

    // 设置用于接收矩形共享内存的gpu数组
    int *device_array_res = nullptr;
    CHECK(cudaMalloc((int **)&device_array_res, nBytes));
    CHECK(cudaMemset(device_array_res, 0, nBytes));

    // 设置用于接收gpu结果数组的cpu指针
    int *gpu_res_to_cpu = (int *)malloc(nBytes);
    memset(gpu_res_to_cpu, 0, nBytes);

    dim3 gridSize(1, 1);
    dim3 blockSize(BDIMX, BDIMY);

    // 1. 矩形共享内存按照行设置，按照行读取
    setRowReadRow<<<gridSize, blockSize>>>(device_array_res);
    CHECK(cudaMemcpy(gpu_res_to_cpu, device_array_res, nBytes, cudaMemcpyDeviceToHost));

    // 2. 矩形共享内存按照列设置，按照列读取
    setColReadColTran<<<gridSize, blockSize>>>(device_array_res);
    CHECK(cudaMemcpy(gpu_res_to_cpu, device_array_res, nBytes, cudaMemcpyDeviceToHost));

    // 3. 矩形共享内存按照列设置，按照列读取
    setColReadColNormal<<<gridSize, blockSize>>>(device_array_res);
    CHECK(cudaMemcpy(gpu_res_to_cpu, device_array_res, nBytes, cudaMemcpyDeviceToHost));

    // 4. 矩形共享内存按照行设置，按照列读取
    setRowReadCol<<<gridSize, blockSize>>>(device_array_res);
    CHECK(cudaMemcpy(gpu_res_to_cpu, device_array_res, nBytes, cudaMemcpyDeviceToHost));

    // 5. 使用填充解决存储体冲突
    setRowReadColPad<<<gridSize, blockSize>>>(device_array_res);
    CHECK(cudaMemcpy(gpu_res_to_cpu, device_array_res, nBytes, cudaMemcpyDeviceToHost));

    // 6. 设置动态共享内存，用一维模拟矩形内存，按照行设置，按照列读取
    setRowReadColDyn<<<gridSize, blockSize, nElem * sizeof(int)>>>(device_array_res);
    CHECK(cudaMemcpy(gpu_res_to_cpu, device_array_res, nBytes, cudaMemcpyDeviceToHost));

    // 7. 设置动态共享内存，用一维模拟矩形内存，按照行设置，按照列读取
    setRowReadColDynPad<<<gridSize, blockSize, (BDIMX + IPAD) * BDIMY * sizeof(int)>>>(device_array_res);
    CHECK(cudaMemcpy(gpu_res_to_cpu, device_array_res, nBytes, cudaMemcpyDeviceToHost));

    cudaFree(device_array_res);
    free(gpu_res_to_cpu);

    return 0;
}