#include<iostream>
#include<cuda_runtime.h>
#include<vector>
#include "../MyTimer.h"
#include<string>


double* init_common(int len)
{
    double *f = new double[len];
    for(int i = 0; i < len; i++)
        f[i] = double(i);
    return f;
}
double* init_CUDA(int len)
{
    double *f;
    cudaHostAlloc((void**)&f, sizeof(double) * len, cudaHostAllocMapped);
    for(int i = 0; i < len; i++)
        f[i] = double(i);
    return f;
}



void common(double *arr, double* temp,int len){
    for(int i = 1 ; i < len; i++)
        arr[0] += arr[i];
    temp[0] = arr[0];
}
void cpu_guiyue(double *arr, double* temp, int len)
{
    for(int stride = 1; stride < len; stride *= 2)
    {
        for(int i = 0; i < len; i+= (stride << 1))
        {
            arr[i] += arr[i + stride];
        }
    }
    temp[0] = arr[0];
}

__global__ void GPU1(double *arr, double* temp, int len)
{
    //这是最普通的，没有七七八八的东西，只有最简单的按位置规约的，也没有线程束优化
    int tid = threadIdx.x;
    int block_start = blockDim.x * blockIdx.x;
    double* block_head = arr + block_start;
    if (block_start + tid >= len) return;

    for(int stride = blockDim.x / 2; stride > 0 ; stride >>= 1)
    {
        if(tid < stride && (block_start + tid + stride) < len)
            block_head[tid] += block_head[tid + stride];

        __syncthreads();
    }
    __syncthreads();
    if (tid == 0)
        temp[blockIdx.x] = block_head[0];
}

__global__ void GPU1_share(double *arr, double* temp, int len)
{
    //这是在GPU1上改用共享内存
    extern __shared__ double s_data[]; //CUDA 专门为动态共享内存设计的语法. 这个数组的大小在 kernel 启动时才确定，由第三个参数决定。


    int tid = threadIdx.x;
    int block_start = blockDim.x * blockIdx.x;
    double* block_head = arr + block_start;
    if (block_start + tid >= len)
    {
        s_data[tid] = 0;
        return;
    }
    else{
        s_data[tid] = block_head[tid]; //每个线程把自己的位置对应的数据从全局内存中读入共享内存，即可实现共享内存的填充
    }
    __syncthreads();

    for(int stride = blockDim.x / 2; stride > 0 ; stride >>= 1)
    {
        if(tid < stride && (block_start + tid + stride) < len)
            s_data[tid] += s_data[tid + stride];

        __syncthreads();
    }
    __syncthreads();
    if (tid == 0)
        temp[blockIdx.x] = s_data[0];



}

__global__ void GPU2(double *arr, double* temp, int len)
{
    //既然已经确认了共享内存是有效的，以后就默认启用共享内存了。
    //这里启用二路的归并。（其实只是比起GPU1少启用一半完全没起到作用的线程）

    extern __shared__ double s_data[]; 
    int tid = threadIdx.x;
    int block_start = blockDim.x * blockIdx.x * 2;
    double* block_head = arr + block_start;
    if (block_start + tid >= len)
    {
        s_data[tid] = 0;
        s_data[tid + blockDim.x] = 0;
        return;
    }
    else{
        s_data[tid] = block_head[tid]; //每个线程把自己的位置对应的数据从全局内存中读入共享内存，即可实现共享内存的填充
        s_data[tid + blockDim.x] = block_head[tid + blockDim.x]; //每个线程把自己的位置对应的数据从全局内存中读入共享内存，即可实现共享内存的填充
    }
    __syncthreads();


    for(int stride = blockDim.x; stride > 0; stride >>= 1)
    {
        if(tid < stride && (block_start + tid + stride < len))
            s_data[tid] += s_data[tid + stride];
        __syncthreads();
    }
    __syncthreads();
    if (tid == 0)
        temp[blockIdx.x] = s_data[0];

}

__global__ void GPU2_warp(double *arr, double* temp, int len)
{
    //考虑到线程束分化问题
        extern __shared__ double s_data[]; 
    int tid = threadIdx.x;
    int block_start = blockDim.x * blockIdx.x * 2;
    double* block_head = arr + block_start;
    if (block_start + tid >= len)
    {
        s_data[tid] = 0;
        s_data[tid + blockDim.x] = 0;
        return;
    }
    else{
        s_data[tid] = block_head[tid]; //每个线程把自己的位置对应的数据从全局内存中读入共享内存，即可实现共享内存的填充
        s_data[tid + blockDim.x] = block_head[tid + blockDim.x]; //每个线程把自己的位置对应的数据从全局内存中读入共享内存，即可实现共享内存的填充
    }
    __syncthreads();


    for(int stride = blockDim.x; stride >= 32; stride >>= 1)
    {
        if(tid < stride && (block_start + tid + stride < len))
            s_data[tid] += s_data[tid + stride];
        __syncthreads();
    }
    __syncthreads();


    if(tid < 32)
    {
        float val = s_data[tid];
        val += __shfl_down_sync(0xffffffff, val, 16);
        val += __shfl_down_sync(0xffffffff, val, 8);
        val += __shfl_down_sync(0xffffffff, val, 4);
        val += __shfl_down_sync(0xffffffff, val, 2);
        val += __shfl_down_sync(0xffffffff, val, 1);
        //printf("%f", val);
        if(tid == 0)
            temp[blockIdx.x] = val;
    }

}
__global__ void GPU2_warp8way(double *arr, double* temp, int len)
{
    //考虑到线程束分化问题
    extern __shared__ double s_data[]; 
    int tid = threadIdx.x;
    int block_start = blockDim.x * blockIdx.x * 8;
    double* block_head = arr + block_start;
    if (block_start + tid >= len)
    {
        s_data[tid + 0 * blockDim.x] = 0;
        s_data[tid + 1 * blockDim.x] = 0;
        s_data[tid + 2 * blockDim.x] = 0;
        s_data[tid + 3 * blockDim.x] = 0;
        s_data[tid + 4 * blockDim.x] = 0;
        s_data[tid + 5 * blockDim.x] = 0;
        s_data[tid + 6 * blockDim.x] = 0;
        s_data[tid + 7 * blockDim.x] = 0;
        return;
    }
    else{
        s_data[tid]  = block_head[tid];
        s_data[tid] += block_head[tid + 1 * blockDim.x];
        s_data[tid] += block_head[tid + 2 * blockDim.x];
        s_data[tid] += block_head[tid + 3 * blockDim.x];
        s_data[tid] += block_head[tid + 4 * blockDim.x];
        s_data[tid] += block_head[tid + 5 * blockDim.x];
        s_data[tid] += block_head[tid + 6 * blockDim.x];
        s_data[tid] += block_head[tid + 7 * blockDim.x];
    }
    __syncthreads();


    for(int stride = blockDim.x / 2; stride >= 32; stride >>= 1)
    {
        if(tid < stride && (block_start + tid + stride < len))
            s_data[tid] += s_data[tid + stride];
        __syncthreads();
    }
    __syncthreads();

    if(tid < 32)
    {
        float val = s_data[tid];
        val += __shfl_down_sync(0xffffffff, val, 16);
        val += __shfl_down_sync(0xffffffff, val, 8);
        val += __shfl_down_sync(0xffffffff, val, 4);
        val += __shfl_down_sync(0xffffffff, val, 2);
        val += __shfl_down_sync(0xffffffff, val, 1);
        //printf("%f", val);
        if(tid == 0)
            temp[blockIdx.x] = val;
    }

}




void tell_ok(std::vector<std::string> &names, std::vector<double> &times, std::vector<double> &results)
{
    for(int i = 0; i < times.size(); i++)
    {
        std::cout<<names[i]<<" is "<< ((results[0] == results[i]) ? "right":"wrong" ) << " " << results[0]  << " "<< results[i] <<", time used: "<<times[i] <<std::endl;
    }
}


void baseline(int len, double* f, std::vector<std::string> &names, std::vector<double> &times, std::vector<double> &results)
{
    GPUTimer t;
    int nBs = sizeof(double) * len;
    double* c_f = new double[len], *temp = new double[1];
    memcpy(c_f,f,  nBs);

    t.start();
    common(c_f, temp, len);
    double ms = t.stop();
    double answer = temp[0];

    results.push_back(answer);
    times.push_back(ms);
    names.push_back("baseline");

    delete[] c_f;
    delete[] temp;

}

void cpu_plain(int len, double* f, std::vector<std::string> &names, std::vector<double> &times, std::vector<double> &results)
{
    GPUTimer t;
    int nBs = sizeof(double) * len;
    double* c_f = new double[len], *temp = new double[1];
    memcpy(c_f,f,  nBs);
    t.start();
    cpu_guiyue(c_f, temp, len);
    double ms = t.stop();
    double answer = temp[0];
    results.push_back(answer);
    times.push_back(ms);
    names.push_back("cpu_plain");
    delete[] c_f;
    delete[] temp;

}

void GPU1_run(
    int len, double* f, 
    std::vector<std::string> &names, std::vector<double> &times, std::vector<double> &results,
    int type
)
{    
    GPUTimer t;
    int nBs = sizeof(double) * len;

    int blocksize = 512;
    int gridsize;
    switch(type){
        case 1: case 2:
            gridsize = (len + blocksize - 1) / blocksize;
            break;
        case 3: case 4:
            gridsize = (len + (blocksize * 2) - 1) / (blocksize * 2);
            break;
        case 5:
            gridsize = (len + (blocksize * 8) - 1) / (blocksize * 8);
            break;
    }
    


    double answer = 0;
    double* GPU1_results = new double[gridsize];

    double*c_f, *temp;
    //std::cout <<nBs <<" " <<gridsize * sizeof(double);
    cudaMalloc((double **)&c_f, nBs);
    cudaMalloc((double **)&temp, gridsize * sizeof(double));


    t.start();
    cudaMemcpy(c_f, f, nBs,cudaMemcpyHostToDevice);
    switch(type){
        case 1:
        default:
            GPU1<<<gridsize, blocksize>>>(c_f, temp, len); //最朴素的
            names.push_back("GPU1");
            break;
        case 2:
            GPU1_share<<<gridsize, blocksize, blocksize * sizeof(double)>>>(c_f, temp, len); //1的基础上添加了共享内存
            names.push_back("GPU1_shared");
            break;
        case 3:    
            GPU2<<<gridsize, blocksize, 2 * blocksize * sizeof(double)>>>(c_f, temp, len); //1的基础上添加了共享内存
            names.push_back("GPU2");
            break;
        case 4:    
            GPU2_warp<<<gridsize, blocksize, 2 * blocksize * sizeof(double)>>>(c_f, temp, len); //1的基础上添加了共享内存
            names.push_back("GPU2_warp");
            break;
        case 5:    
            GPU2_warp8way<<<gridsize, blocksize, 8 * blocksize * sizeof(double)>>>(c_f, temp, len); //1的基础上添加了共享内存
            names.push_back("GPU2_warp_8_way");
            break;
        
        
    }

    cudaDeviceSynchronize();
    cudaMemcpy(GPU1_results, temp, gridsize * sizeof(double), cudaMemcpyDeviceToHost);
    for(int i = 0; i < gridsize; i ++)
        answer += GPU1_results[i];
    double ms = t.stop();
    

    cudaFree(c_f);
    cudaFree(temp);
    delete[] GPU1_results;
    results.push_back(answer);
    times.push_back(ms);



}
int main()
{   
    int len = 1 << 20;
    double* common_f = init_common(len);
    double* CUDA_f = init_CUDA(len);
    std::vector<double> results;
    std::vector<double> times;
    std::vector<std::string> names;

    baseline(len, common_f, names, times, results);
    cpu_plain(len, common_f, names, times, results);
    GPU1_run(len, common_f, names, times, results, 1);
    GPU1_run(len, common_f, names, times, results, 2);
    GPU1_run(len, common_f, names, times, results, 3);
    GPU1_run(len, common_f, names, times, results, 4);
    GPU1_run(len, common_f, names, times, results, 5);

    GPU1_run(len, CUDA_f, names, times, results, 1);
    GPU1_run(len, CUDA_f, names, times, results, 2);
    GPU1_run(len, CUDA_f, names, times, results, 3);
    GPU1_run(len, CUDA_f, names, times, results, 4);
    GPU1_run(len, CUDA_f, names, times, results, 5);




    
    tell_ok(names, times, results);

}


















