#include <cuda_runtime.h>
#include <algorithm>
#include <iostream>
#include <reduce_ops.h>
using namespace std;
#define BLOCK 1024

template<typename T>
__global__ void reduce_sum_interleave(T*d_out,T *d_in,const unsigned int N){
    auto idx = threadIdx.x+blockDim.x*blockIdx.x;
    if(idx>N)
        return;
    int span = blockDim.x/2;
    for(;span>=1;span/=2){
        if(threadIdx.x<span){
            d_in[blockIdx.x*blockDim.x+threadIdx.x]+=d_in[blockIdx.x*blockDim.x+threadIdx.x+span];
        }
    }
    __syncthreads();
    if(threadIdx.x==0){
        d_out[blockIdx.x] = d_in[threadIdx.x];
    }
}
template<typename T>
__global__ void reduce_sum_neighbored(T*d_out,T *d_in,const unsigned int N){
    auto idx = threadIdx.x+blockDim.x*blockIdx.x;
    if(idx>N)
        return;
    for(int span = 1;span<blockDim.x;span*=2){
        if(threadIdx.x%2==0)
            d_in[blockIdx.x*blockDim.x+threadIdx.x]+=d_in[blockIdx.x*blockDim.x+threadIdx.x+span];
    }
    __syncthreads();
    if(threadIdx.x==0){
        d_out[blockIdx.x] = d_in[threadIdx.x];
    }
}


template<typename T>
__global__ void reduce_sum_unroll2(T*d_out,T *d_in,const unsigned int N){
    auto unroll_block_idx = 2*blockDim.x*blockIdx.x;
    //unroll2
    if(unroll_block_idx+blockDim.x+threadIdx.x<N){
        d_in[unroll_block_idx+threadIdx.x] += d_in[unroll_block_idx+blockDim.x+threadIdx.x];
    }
    __syncthreads();

    for(int span = blockDim.x/2;span>=1;span/=2){
        if(threadIdx.x<span)
        d_in[unroll_block_idx+threadIdx.x]+=d_in[unroll_block_idx+span+threadIdx.x];
    }
    __syncthreads();
    if(threadIdx.x==0){
        d_out[blockIdx.x] = d_in[unroll_block_idx];
    }
}

template<typename T>
__global__ void reduce_sum_unroll4(T*d_out,T *d_in,const unsigned int N){
    auto unroll_block_idx = 4*blockDim.x*blockIdx.x;
    //unroll4
    if(unroll_block_idx+3*blockDim.x+threadIdx.x<N){
        d_in[unroll_block_idx+threadIdx.x] += d_in[unroll_block_idx+blockDim.x+threadIdx.x]+d_in[unroll_block_idx+2*blockDim.x+threadIdx.x]+d_in[unroll_block_idx+3*blockDim.x+threadIdx.x];
    }
    __syncthreads();

    for(int span = blockDim.x/2;span>=1;span/=2){
        if(threadIdx.x<span)
        d_in[unroll_block_idx+threadIdx.x]+=d_in[unroll_block_idx+span+threadIdx.x];
    }
    __syncthreads();
    if(threadIdx.x==0){
        d_out[blockIdx.x] = d_in[unroll_block_idx];
    }
}

template <typename T>
__global__ void reduce_sum_unroll8(T *d_out, T *d_in, const unsigned int n)
{
    // set thread ID
    unsigned int tid = threadIdx.x;
    unsigned int idx = blockIdx.x * blockDim.x * 8 + threadIdx.x;

    // convert global data pointer to the local pointer of this block
    T *idata = d_in + blockIdx.x * blockDim.x * 8;

    // unrolling 8
    if (idx + 7 * blockDim.x < n)
    {
        T a1 = d_in[idx];
        T a2 = d_in[idx + blockDim.x];
        T a3 = d_in[idx + 2 * blockDim.x];
        T a4 = d_in[idx + 3 * blockDim.x];
        T b1 = d_in[idx + 4 * blockDim.x];
        T b2 = d_in[idx + 5 * blockDim.x];
        T b3 = d_in[idx + 6 * blockDim.x];
        T b4 = d_in[idx + 7 * blockDim.x];
        d_in[idx] = a1 + a2 + a3 + a4 + b1 + b2 + b3 + b4;
    }

    __syncthreads();

    // in-place reduction in global memory
    for (int stride = blockDim.x / 2; stride > 0; stride >>= 1)
    {
        if (tid < stride)
        {
            idata[tid] += idata[tid + stride];
        }

        // synchronize within threadblock
        __syncthreads();
    }

    // write result for this block to global mem
    if (tid == 0) d_out[blockIdx.x] = idata[0];
}


template <typename T>
__global__ void reduce_sum_warp_unroll8(T *g_odata,T *g_idata,  const unsigned int n)
{
    // set thread ID
    unsigned int tid = threadIdx.x;
    unsigned int idx = blockIdx.x * blockDim.x * 8 + threadIdx.x;

    // convert global data pointer to the local pointer of this block
    T *idata = g_idata + blockIdx.x * blockDim.x * 8;

    // unrolling 8
    if (idx + 7 * blockDim.x < n)
    {
        T a1 = g_idata[idx];
        T a2 = g_idata[idx + blockDim.x];
        T a3 = g_idata[idx + 2 * blockDim.x];
        T a4 = g_idata[idx + 3 * blockDim.x];
        T b1 = g_idata[idx + 4 * blockDim.x];
        T b2 = g_idata[idx + 5 * blockDim.x];
        T b3 = g_idata[idx + 6 * blockDim.x];
        T b4 = g_idata[idx + 7 * blockDim.x];
        g_idata[idx] = a1 + a2 + a3 + a4 + b1 + b2 + b3 + b4;
    }

    __syncthreads();
    if(blockDim.x>=1024 && threadIdx.x<512)
        idata[threadIdx.x]+=idata[threadIdx.x+512];
    __syncthreads();
    if(blockDim.x>=512 && threadIdx.x<256)
        idata[threadIdx.x]+=idata[threadIdx.x+256];
    __syncthreads();
    if(blockDim.x>=256 && threadIdx.x<128)
        idata[threadIdx.x]+=idata[threadIdx.x+128];
    __syncthreads();
    if(blockDim.x>=128 && threadIdx.x<64)
        idata[threadIdx.x]+=idata[threadIdx.x+64];
    __syncthreads();
    if(threadIdx.x<32){
        volatile T *vmem = idata;
        vmem[threadIdx.x]+=vmem[threadIdx.x+32];
        vmem[threadIdx.x]+=vmem[threadIdx.x+16];
        vmem[threadIdx.x]+=vmem[threadIdx.x+8];
        vmem[threadIdx.x]+=vmem[threadIdx.x+4];
        vmem[threadIdx.x]+=vmem[threadIdx.x+2];
        vmem[threadIdx.x]+=vmem[threadIdx.x+1];
    }

    // write result for this block to global mem
    if (tid == 0) g_odata[blockIdx.x] = idata[0];
}


template <typename T>
__global__ void reduce_sum_warp_smem_unroll8(T *d_out,T *d_in,  const unsigned int n)
{
    unsigned int tid = threadIdx.x;
    unsigned int idx = blockIdx.x * blockDim.x * 8 + threadIdx.x;
    __shared__ T smem[BLOCK];

    T *idata = d_out + blockIdx.x * blockDim.x * 8;

    if (idx + 7 * blockDim.x < n)
    {
        T a1 = d_in[idx];
        T a2 = d_in[idx + blockDim.x];
        T a3 = d_in[idx + 2 * blockDim.x];
        T a4 = d_in[idx + 3 * blockDim.x];
        T b1 = d_in[idx + 4 * blockDim.x];
        T b2 = d_in[idx + 5 * blockDim.x];
        T b3 = d_in[idx + 6 * blockDim.x];
        T b4 = d_in[idx + 7 * blockDim.x];
        d_in[idx] = a1 + a2 + a3 + a4 + b1 + b2 + b3 + b4;
    }
    __syncthreads();
    // 缓存块数据到SMEM中
    smem[threadIdx.x] = d_in[idx];
    __syncthreads();

    if(blockDim.x>=1024 && threadIdx.x<512)
        smem[threadIdx.x]+=smem[threadIdx.x+512];
    __syncthreads();
    if(blockDim.x>=512 && threadIdx.x<256)
        smem[threadIdx.x]+=smem[threadIdx.x+256];
    __syncthreads();
    if(blockDim.x>=256 && threadIdx.x<128)
        smem[threadIdx.x]+=smem[threadIdx.x+128];
    __syncthreads();
    if(blockDim.x>=128 && threadIdx.x<64)
        smem[threadIdx.x]+=smem[threadIdx.x+64];
    __syncthreads();
    if(threadIdx.x<32){
        volatile T *vmem = smem;
        vmem[threadIdx.x]+=vmem[threadIdx.x+32];
        vmem[threadIdx.x]+=vmem[threadIdx.x+16];
        vmem[threadIdx.x]+=vmem[threadIdx.x+8];
        vmem[threadIdx.x]+=vmem[threadIdx.x+4];
        vmem[threadIdx.x]+=vmem[threadIdx.x+2];
        vmem[threadIdx.x]+=vmem[threadIdx.x+1];
    }

    if (tid == 0) d_out[blockIdx.x] = smem[0];
}

template <int BlockSize,typename T>
__global__ void reduce_sum_warp_unroll8_opt(T *g_odata,T *g_idata,  const unsigned int n)
{
    // set thread ID
    unsigned int tid = threadIdx.x;
    unsigned int idx = blockIdx.x * blockDim.x * 8 + threadIdx.x;

    // convert global data pointer to the local pointer of this block
    T *idata = g_idata + blockIdx.x * blockDim.x * 8;

    // unrolling 8
    if (idx + 7 * blockDim.x < n)
    {
        T a1 = g_idata[idx];
        T a2 = g_idata[idx + blockDim.x];
        T a3 = g_idata[idx + 2 * blockDim.x];
        T a4 = g_idata[idx + 3 * blockDim.x];
        T b1 = g_idata[idx + 4 * blockDim.x];
        T b2 = g_idata[idx + 5 * blockDim.x];
        T b3 = g_idata[idx + 6 * blockDim.x];
        T b4 = g_idata[idx + 7 * blockDim.x];
        g_idata[idx] = a1 + a2 + a3 + a4 + b1 + b2 + b3 + b4;
    }

    __syncthreads();
    if(BlockSize>=1024 && threadIdx.x<512)
        idata[threadIdx.x]+=idata[threadIdx.x+512];
    __syncthreads();
    if(BlockSize>=512 && threadIdx.x<256)
        idata[threadIdx.x]+=idata[threadIdx.x+256];
    __syncthreads();
    if(BlockSize>=256 && threadIdx.x<128)
        idata[threadIdx.x]+=idata[threadIdx.x+128];
    __syncthreads();
    if(BlockSize>=128 && threadIdx.x<64)
        idata[threadIdx.x]+=idata[threadIdx.x+64];
    __syncthreads();

    if(threadIdx.x<32){
        volatile T *vmem = idata;
        vmem[threadIdx.x]+=vmem[threadIdx.x+32];
        vmem[threadIdx.x]+=vmem[threadIdx.x+16];
        vmem[threadIdx.x]+=vmem[threadIdx.x+8];
        vmem[threadIdx.x]+=vmem[threadIdx.x+4];
        vmem[threadIdx.x]+=vmem[threadIdx.x+2];
        vmem[threadIdx.x]+=vmem[threadIdx.x+1];
    }

    // write result for this block to global mem
    if (tid == 0) g_odata[blockIdx.x] = idata[0];
}

template<typename T>
T reduce_sum_cuda(T *h_in,const unsigned int N,REDUCE_MODE mode){
    T *d_in,*d_out;
    const int grid = (N+BLOCK-1)/BLOCK;
    T *h_out = new T[grid];
    size_t nbytes = sizeof(T)*N;
    size_t obytes = sizeof(T)*grid;
    cudaMalloc(reinterpret_cast<void**>(&d_in),nbytes);
    cudaMalloc(reinterpret_cast<void**>(&d_out),obytes);
    cudaMemcpy(d_in,h_in,nbytes,cudaMemcpyHostToDevice);
    T sum = T();
    switch (mode) {
        case INTERLEAVE:
            reduce_sum_interleave<T><<<grid,BLOCK>>>(d_out,d_in,N);
            break;
        case NEIGHBORED:
            cout<<"NEIGHBORED mode\n";
            reduce_sum_neighbored <T><<<grid,BLOCK>>>(d_out,d_in,N);
            break;
        case UNROLL2:
            cout<<"UNROLL2 mode\n";
            reduce_sum_unroll2<T><<<grid/2,BLOCK>>>(d_out, d_in, N);
            break;
        case UNROLL4:
            cout<<"UNROLL4 mode\n";
            reduce_sum_unroll4<T><<<grid/4,BLOCK>>>(d_out, d_in, N);
            break;
        case UNROLL8:
            cout<<"UNROLL8 mode\n";
            reduce_sum_unroll8<T><<<grid/8,BLOCK>>>(d_out, d_in, N);
            break;
        case WARP_UNROLL8:
            cout<<"WARP UNROLL8 mode\n";
            reduce_sum_warp_unroll8<T><<<grid/8,BLOCK>>>(d_out, d_in, N);
            break;
        case WARP_UNROLL8_OPT:
            cout<<"WARP UNROLL8 OPT mode\n";
            switch (BLOCK)
            {
                case 1024:
                    reduce_sum_warp_unroll8_opt<1024><<<grid / 8, BLOCK>>>(d_out, d_in,N);
                    break;

                case 512:
                    reduce_sum_warp_unroll8_opt<512><<<grid / 8, BLOCK>>>(d_out, d_in,N);
                    break;

                case 256:
                    reduce_sum_warp_unroll8_opt<256><<<grid / 8, BLOCK>>>(d_out,d_in,N);
                    break;

                case 128:
                    reduce_sum_warp_unroll8_opt<128><<<grid / 8, BLOCK>>>(d_out,d_in,N);
                    break;
                case 64:
                    reduce_sum_warp_unroll8_opt<64><<<grid / 8, BLOCK>>>(d_out, d_in, N);
                    break;
            }
            break;
        case WARP_SMEM_UNROLL8:
            cout<<"WARP with 8";
            reduce_sum_warp_smem_unroll8<T><<<grid/8,BLOCK>>>(d_out, d_in, N);
            break;
        default:
            break;
    
    }
    cudaMemcpy(h_out,d_out,obytes,cudaMemcpyDeviceToHost);
    cudaDeviceSynchronize();
    sum = reduce_sum_cpu(h_out,grid/8);
    cudaFree(d_in);
    cudaFree(d_out);
    delete [] h_out;
    return sum;
}
int main(){
    const unsigned int N = 1<<24;
    int *h_in = new int [N];
    fill(h_in,h_in+N,1);
    int sum_cpu = reduce_sum_cpu(h_in,N);
    // float sum_gpu = reduce_sum_cuda(h_in,N,NEIGHBORED);
    int sum_gpu1 = reduce_sum_cuda(h_in,N);
    // int sum_gpu = reduce_sum_cuda(h_in,N,WARP_UNROLL8);
    int sum_gpu = reduce_sum_cuda(h_in,N,WARP_UNROLL8_OPT);
    if (abs(sum_cpu-sum_gpu)<1e-4){
        cout<<"计算正确\n";
    }else{
        cout<<"CPU = "<<sum_cpu<<" GPU = "<<sum_gpu<<" 计算错误\n";
    }

    delete [] h_in;

}
