#include<iostream>
#include<cuda_runtime.h>
#include<vector>
#include "../MyTimer.h"
#include<string>

bool same(float* a, float* b, int len){
    for(int i = 0; i < len; i++)
    {
        if(a[i] != b[i]){
            return 0;
        }
    }
    return 1;
}
float* init_common(int x, int y)
{
    float* m = new float[x * y];
    for(int i = 0 ; i < x * y; i++)
        m[i] = (float)i;
    return m;
}
float* init_CUDA(int x, int y)
{
    float* m;
    cudaHostAlloc((float**)&m, sizeof(float) * x * y, cudaHostAllocMapped);
    for(int i = 0 ; i < x * y; i++)
        m[i] = (float)i;
    return m;
}

void common_T(float* m, float* m_T, int x, int y)
{
    //一行有x个。在第j行，所以前面有j* x个。
    for(int j = 0; j < y; j++)
        for(int i = 0; i < x; i++)
            m_T[i * y + j] = m[j * x + i]; //i * x + j是每次会跳y个float数据的交叉访问。这是最差的
}

float* common_run(
    int x, int y, float* m,
    std::vector<bool> &results, std::vector<float> &times, std::vector<std::string> &names
)
{
    GPUTimer t;
    t.start();
    float* m_T = new float[x * y];
    common_T(m, m_T, x, y);
    float ms = t.stop();
    results.push_back(1);
    times.push_back(ms);
    names.push_back("baseline");
    return m_T;
}

__global__ void cuda_plain_row(int x, int y, float*m, float* m_T)
{
    int x_idx = blockDim.x * blockIdx.x + threadIdx.x;
    int y_idx = blockDim.y * blockIdx.y + threadIdx.y;

    if(x_idx < x && y_idx << y)
    {
        m_T[x_idx * y + y_idx] = m[y_idx * x + x_idx];
    }
}
__global__ void cuda_plain_col(int x, int y, float*m, float* m_T)
{
    int x_idx = blockDim.x * blockIdx.x + threadIdx.x;
    int y_idx = blockDim.y * blockIdx.y + threadIdx.y;

    if(x_idx < x && y_idx << y)
    {
        m_T[y_idx * x + x_idx] = m[x_idx * y + y_idx];
    }

}

__global__ void cuda_unroll_4(int x, int y, float*m, float* m_T)
{
    //这个需要测试一下一级缓存是否打开的情况。
    int x_idx = blockDim.x * blockIdx.x * 4 + threadIdx.x;
    int y_idx = blockDim.y * blockIdx.y + threadIdx.y;

    int m_i = y_idx * x + x_idx; //相当于[y_idx][x_idx]
    int m_T_i = x_idx * y + y_idx //[x_idx][y_idx]

    if(m_i + 3 * blockDim.x < x && y_idx < y){
        m_T[m_T_i] = m[m_i];
        m_T[m_T_i + blockDim.x * y] = m[m_i + blockDim.x]; //[(x_idx + blockDim.x) * y + y_idx],即[x_idx + blockDim.x][y_idx] = [y_idx][x_idx + blockDim.x]
        m_T[m_T_i + 2 * blockDim.x * y] = m[m_i + 2 * blockDim.x];
        m_T[m_T_i + 3 * blockDim.x * y] = m[m_i + 3 * blockDim.x];
    }

}

/*
cudaFuncSetCacheConfig(your_kernel_function, cudaFuncCachePreferL1);


(
    cudaFuncCachePreferNone        // 默认均衡分配
    cudaFuncCachePreferShared      // 更大共享内存
    cudaFuncCachePreferL1          // 更大 L1 Cache
    cudaFuncCachePreferEqual       // 共享内存和 L1 均等

)
*/


/*
直角坐标到笛卡尔对焦坐标的转化：(其实直角坐标是笛卡尔坐标的一个特例)
such as:

0 1 2 
3 4 5
6 7 8
  ↓ T
0 3 6
1 4 7
2 5 8
  ↓ move
0 3 6
  1 4 7
    2 5 8
  ↓ %
0 3 6
7 1 4
5 8 2

所以这个变化天然包含我们需要的转置操作。
x_Cartesian(直角坐标)
x_les(对角坐标)

坐标变换
x_les = (y_Cartesian + x_Cartesian) % dimx
y_les = x_Cartesian (这里是因为对角坐标是直角坐标转置来的，然后转置完后的调整工作实际上没有对y轴产生影响。)


这么做的好处是：
设备内存中的连续的 256 字节区域被分配到连续的分区。比如0-255是分区1，……
如果我们使用直角坐标系，那么y_idx * nx + x_idx的时候，由于每个线程访问的数据在我们存储的数组中实际上是连着的，所以会出现大部分线程命中同一个分区，这需要排队。（即使处理器并行了，但是内存读写没法并行）
（连续访问是好的（coalesced access），但必须分布在多个内存分区。）

连续访问：
thread 0	input[0]
thread 1	input[1]
thread 2	input[2]
这会被GPU打包成为一个合并的内存请求

而分区冲突：GPU 的设备内存（global memory）分成了若干个 DRAM 分区，比如 32 个，每个负责处理一定范围的地址。
每个分区一次只能处理一个请求。
如果很多线程同时访问落在同一个分区的地址，就会排队。

分区	地址范围
P0	0–255
P1	256–511
P2	512–767
现在我们有一个 warp（32 个线程），每个线程访问 float input[i]，float 占 4 字节：
线程 0 访问地址 0 * 4 = 0
线程 1 访问地址 1 * 4 = 4
线程 31 访问地址 31 * 4 = 124
这 32 个地址都在 0–124，所以全都落在 P0 分区，就会发生冲突（排队）！


*/
__global__ void cuda_les(int x, int y, float*m, float* m_T)
{
    int block_y = blockIdx.y;
    int block_x = (blockIdx.y + blockIdx.x) % blockDim.x;

    int x_idx = block_x * blockDim.x + threadIdx.x;
    int y_idx = block_y * blockDim.y + threadIdx.y;

    if(x_idx < x && y_idx < y)
        m_T[x_idx * y + y_idx] = m[y_idx * x + x_idx];
}


void cuda_T(
    int x, int y, float* m, float* answer, 
    std::vector<bool> &results, std::vector<float> &times, std::vector<std::string> &names,
    int type
)
{
    int nBs = sizeof(float) * x * y;
    int block_x, block_y;
    dim3 block;
    dim3 grid;

    GPUTimer t;
    t.start();
    float* temp = new float[x * y];
    float* c_m;
    float* c_m_T;
    cudaMalloc((float**)&c_m, nBs);
    cudaMalloc((float**)&c_m_T, nBs);
    cudaMemcpy(c_m, m, nBs, cudaMemcpyHostToDevice);

    switch(type)
    {
        case 1:
            //这个是没有任何内存处理，硬并行的行的
            block_x = 1 << 8;
            block_y = 1 << 8;
            block.x = block_x;
            block.y = block_y;
            grid.x = (x + block_x - 1) / block_x;
            grid.y = (y + block_y - 1) / block_y;

            cuda_plain_row<<<grid, block>>>(x, y, c_m, c_m_T);
            names.push_back("cuda_plain_row");
            break;
        case 2:
            //这个是没有任何内存处理，硬并行的列的
            block_x = 1 << 8;
            block_y = 1 << 8;
            block.x = block_x;
            block.y = block_y;
            grid.x = (x + block_x - 1) / block_x;
            grid.y = (y + block_y - 1) / block_y;

            cuda_plain_col<<<grid, block>>>(x, y, c_m, c_m_T);
            names.push_back("cuda_plain_col");
            break;
        
        case 3:
            //展开四路
            block_x = 1 << 8;
            block_y = 1 << 8;
            block.x = block_x; 
            block.y = block_y;
            grid.x = (x + block_x * 4 - 1) / (block_x * 4);
            grid.y = (y + block_y - 1) / block_y;

            cuda_unroll_4<<<grid, block>>>(x, y, c_m, c_m_T);
            names.push_back("cuda_unroll_4");
            break;
        case 4:
            block_x = 1 << 8;
            block_y = 1 << 8;
            block.x = block_x; 
            block.y = block_y;
            grid.x = (x + block_x - 1) / block_x;
            grid.y = (y + block_y - 1) / block_y;

            cuda_les<<<grid, block>>>(x, y, c_m, c_m_T);
            names.push_back("cuda_les");
            break;
    }

    cudaDeviceSynchronize();
    cudaMemcpy(temp, c_m_T, nBs, cudaMemcpyDeviceToHost);
    float ms = t.stop();


    results.push_back(same(temp, answer, x * y));
    times.push_back(ms);
    delete[] temp;
    cudaFree(c_m);
    cudaFree(c_m_T);


}


void tell_ok(std::vector<bool> &results, std::vector<float> &times, std::vector<std::string> &names)
{
    for(int i = 0; i < times.size(); i++)
    {
        std::cout<<names[i]<<" is "<< ((results[i]) ? "right":"wrong" ) << " " << results[0]  << " "<< results[i] <<", time used: "<<times[i] <<std::endl;
    }
}

int main()
{
    int x = 1 << 10, y = 1 << 10;
    float* common_m = init_common(x, y);
    float* cuda_m = init_CUDA(x, y);

    std::vector<bool> results;
    std::vector<float> times;
    std::vector<std::string> names;

    float* answer = common_run(x, y, common_m, results, times, names);
    cuda_T(x, y, common_m, answer, results, times, names, 1);
    cuda_T(x, y, common_m, answer, results, times, names, 2);

    tell_ok(results, times, names);


}







