// #include "error.cuh"
#include <cuda_runtime.h>
#include "stdio.h"
#include <cooperative_groups.h>

const int N = 1024 * 1024 * 100;
const int M = sizeof(double) * N;
const int NUM_REAPEATS = 20;
const int BLOCK_SIZE = 1024;

__device__ void atomicAdd_double(double* address, double value) {
    unsigned long long int* address_as_ull = (unsigned long long int*)address;
    unsigned long long int old = *address_as_ull, assumed;
    do {
        assumed = old;
        old = atomicCAS(address_as_ull, assumed, __double_as_longlong(__longlong_as_double(assumed) + value));
    } while (assumed != old);
}




void reduceCpu(
    const double *h_x,
    double *h_y,
    const int N) {

    for (int i=0; i<N; ++i) h_y[0] += h_x[i];
}

__global__ void reduceAtomic(
    const double *d_x,
    double *d_y,
    const int N) {

    const int n = threadIdx.x + blockDim.x * blockIdx.x;
    extern __shared__ double s_y[];
    s_y[threadIdx.x] = n < N ? d_x[n] : 0.0; 
    __syncthreads();
    for (int offset = blockDim.x >> 1; offset > 0; offset >>= 1) {
        if (threadIdx.x < offset) s_y[threadIdx.x] += s_y[threadIdx.x + offset];
        __syncthreads();
    }
    if (threadIdx.x == 0) atomicAdd_double(&(d_y[0]), s_y[0]);
}

__global__ void reduceWarp(
    const double *d_x, 
    double *d_y, 
    const int N) {
    
    const int n = threadIdx.x + blockDim.x * blockIdx.x;
    extern __shared__ double s_y[];
    s_y[threadIdx.x] = n < N ? d_x[n] : 0.0;
    __syncthreads();
    for (int offset = blockDim.x >> 1; offset >= 32; offset >>= 1) {
        if (threadIdx.x < offset) s_y[threadIdx.x] += s_y[threadIdx.x + offset];
        __syncthreads();
    }
    for (int offset = 16; offset > 0; offset >>= 1) {
        if (threadIdx.x < offset) s_y[threadIdx.x] += s_y[threadIdx.x + offset];
        __syncwarp();
    }
    if (threadIdx.x == 0) atomicAdd_double(&d_y[0], s_y[0]);
}

__global__ void reduceShfl(
    const double *d_x, 
    double *d_y, 
    const int N) {

    const int n = threadIdx.x + blockDim.x * blockIdx.x;
    extern __shared__ double s_y[];
    s_y[threadIdx.x] = n < N ? d_x[n] : 0.0;
    __syncthreads();
    for (int offset = blockDim.x >> 1; offset >= 32; offset >>= 1) {
        if (threadIdx.x < offset) s_y[threadIdx.x] += s_y[threadIdx.x + offset];
        __syncthreads();
    }
    double y = s_y[threadIdx.x];
    for (int offset = 16; offset > 0; offset >>= 1) {
        y += __shfl_down_sync(0xffffffff, y, offset);
    }
    if (threadIdx.x == 0) atomicAdd_double(&d_y[0], y);
}

__global__ void reduceShfl_2(
    const double *d_x, 
    double *d_y, 
    const int N) {
    
    const int n = threadIdx.x + blockDim.x * blockIdx.x;
    extern __shared__ double s_y[];
    s_y[threadIdx.x] = n < N ? d_x[n] : 0.0;
    __syncthreads();
    for (int offset = blockDim.x >> 1; offset >= 32; offset >>= 1) {
        if (threadIdx.x < offset) s_y[threadIdx.x] += s_y[threadIdx.x + offset];
        __syncthreads();
    }
    double y = 0;
    if(threadIdx.x < 32) y = s_y[threadIdx.x];
    for (int offset = 16; offset > 0; offset >>= 1) {
        y += __shfl_down_sync(0xffffffff, y, offset);
    }
    if (threadIdx.x == 0) atomicAdd_double(&d_y[0], y);
    
    
    /*
    extern __shared__ double s_y[];
    

    const int tid = threadIdx.x;
    int idx = blockIdx.x * blockDim.x + tid;
    
    double local_sum = 0.0;
    
    // Load data into shared memory and reduce within the block
    while (idx < N) {
        s_y[tid] = d_x[idx];
        __syncthreads();  // Synchronize to ensure all data is loaded
        
        // Perform reduction within the block
        for (int stride = 1; stride < blockDim.x; stride *= 2) {
            int index = 2 * stride * tid;
            if (index < blockDim.x) {
                s_y[index] += s_y[index + stride];
            }
            __syncthreads();
        }
        
        // Accumulate the block's result
        if (tid == 0) {
            atomicAdd_double(&d_y[0], s_y[0]);
        }
        
        // Move to the next chunk of data
        idx += gridDim.x * blockDim.x;
    }
    */

}

__global__ void reduceCG(
    const double *d_x, 
    double *d_y, 
    const int N) {
    
    namespace cg = cooperative_groups;
    cg::thread_block g = cg::this_thread_block();
    const int n = threadIdx.x + blockDim.x * blockIdx.x;
    extern __shared__ double s_y[];
    s_y[threadIdx.x] = n < N ? d_x[n] : 0.0;
    g.sync();
    for (int offset = g.size() >> 1; offset >= 32; offset >>= 1) {
        if (threadIdx.x < offset) s_y[threadIdx.x] += s_y[threadIdx.x + offset];
        g.sync();
    }
    double y = s_y[threadIdx.x];
    cg::thread_block_tile<32> g32 = cg::tiled_partition<32>(g);
    for (int offset = g32.size() >> 1; offset > 0; offset >>= 1) {
        y += g32.shfl_down(y, offset);
    }
    if (threadIdx.x == 0) atomicAdd_double(&d_y[0], y);
}

__global__ void reduceParallelism(
    const double *d_x, 
    double *d_y, 
    const int N) {

    namespace cg = cooperative_groups;
    cg::thread_block g = cg::this_thread_block();
    int n = threadIdx.x + blockDim.x * blockIdx.x;
    extern __shared__ double s_y[];
    double y = 0.0;
    const int stride = blockDim.x * gridDim.x;
    for (; n < N; n+=stride) y += d_x[n];
    s_y[threadIdx.x] = y;
    g.sync();
    for (int offset = g.size() >> 1; offset >= 32; offset >>= 1) {
        if (threadIdx.x < offset) s_y[threadIdx.x] += s_y[threadIdx.x + offset];
        g.sync();
    }
    y = s_y[threadIdx.x];
    cg::thread_block_tile<32> g32 = cg::tiled_partition<32>(cg::this_thread_block());
    for (int offset = g32.size() >> 1; offset > 0; offset >>= 1) {
        y += g32.shfl_down(y, offset);
    }
    if (threadIdx.x == 0) d_y[blockIdx.x] = y;
}

__global__ void fakeCombine(const double *d_x, 
    double *d_y) {
    if(threadIdx.x == 0) {
        d_y[0] = 0;
        for(int i = 0; i < 10240; i++) {
            d_y[0] += d_x[i];
        }
    }
}

void reduce(
    const double *h_x,
    const double *d_x,
    double *h_y,
    double *d_y,
    double *d_tmp,
    const int N,
    const int method) {

    switch (method)
    {
    case 0:
        reduceCpu(h_x, h_y, N);
        break;
    case 1:
        reduceAtomic<<<(N-1)/BLOCK_SIZE+1, BLOCK_SIZE, sizeof(double)*BLOCK_SIZE>>>(d_x, d_y, N);
        break;
    case 2:
        reduceWarp<<<(N-1)/BLOCK_SIZE+1, BLOCK_SIZE, sizeof(double)*BLOCK_SIZE>>>(d_x, d_y, N);
        break;
    case 3:
        reduceShfl<<<(N-1)/BLOCK_SIZE+1, BLOCK_SIZE, sizeof(double)*BLOCK_SIZE>>>(d_x, d_y, N);
        break;
    case 4:
        reduceCG<<<(N-1)/BLOCK_SIZE+1, BLOCK_SIZE, sizeof(double)*BLOCK_SIZE>>>(d_x, d_y, N);
        break;
    case 5:
        reduceParallelism<<<10240, BLOCK_SIZE, sizeof(double)*BLOCK_SIZE>>>(d_x, d_tmp, N);
        reduceParallelism<<<1, 1024, sizeof(double)*1024>>>(d_tmp, d_y, 10240);
        // very low
        //fakeCombine<<<1, 32>>>(d_tmp, d_y);
        break;
        
    case 6:
        reduceShfl_2<<<(N-1)/BLOCK_SIZE+1, BLOCK_SIZE, sizeof(double)*BLOCK_SIZE>>>(d_x, d_y, N);
        break;
    
    default:
        break;
    }
}

void timing(
    const double *h_x,
    const double *d_x,
    double *h_y,
    double *d_y,
    double *d_tmp,
    const int N, 
    const int method) {
    
    float tSum = 0.0;
    float t2Sum = 0.0;
    for (int i=0; i<NUM_REAPEATS; ++i) {
        h_y[0] = 0.0;
        cudaMemcpy(d_y, h_y, sizeof(double), cudaMemcpyHostToDevice);
        cudaEvent_t start, stop;
        cudaEventCreate(&start);
        cudaEventCreate(&stop);
        cudaEventRecord(start);
        reduce(h_x, d_x, h_y, d_y, d_tmp, N, method);
        cudaEventRecord(stop);
        cudaEventSynchronize(stop);
        float elapsedTime;
        cudaEventElapsedTime(&elapsedTime, start, stop);
        tSum += elapsedTime;
        t2Sum += elapsedTime * elapsedTime;
        cudaEventDestroy(start);
        cudaEventDestroy(stop);
        
        /**
        CHECK(cudaMemcpy(d_y, h_y, sizeof(double), cudaMemcpyHostToDevice));
        
        cudaEvent_t start, stop;
        CHECK(cudaEventCreate(&start));
        CHECK(cudaEventCreate(&stop));
        CHECK(cudaEventRecord(start));
        cudaEventQuery(start);

        reduce(h_x, d_x, h_y, d_y, d_tmp, N, method);

        CHECK(cudaEventRecord(stop));
        CHECK(cudaEventSynchronize(stop));
        float elapsedTime;
        CHECK(cudaEventElapsedTime(&elapsedTime, start, stop));
        tSum += elapsedTime;
        t2Sum += elapsedTime * elapsedTime;
        CHECK(cudaEventDestroy(start));
        CHECK(cudaEventDestroy(stop));
        */
    }
    if (method > 0) cudaMemcpy(h_y, d_y, sizeof(double), cudaMemcpyDeviceToHost);
    //if (method > 0) CHECK(cudaMemcpy(h_y, d_y, sizeof(double), cudaMemcpyDeviceToHost));
    float tAVG = tSum / NUM_REAPEATS;
    float tERR = sqrt(t2Sum / NUM_REAPEATS - tAVG * tAVG);
    printf("sum = %f \n", h_y[0]);
    printf("Time = %g +- %g ms.\n", tAVG, tERR);
}



int main() {
    double *h_x = new double[N];
    double h_y[1] = {0.0};
    for (int i=0; i<N; i++) h_x[i] = 1.23;
    double *d_x, *d_y;
    cudaMalloc((void **)&d_x, M);
    cudaMalloc((void **)&d_y, sizeof(double));
    cudaMemcpy(d_x, h_x, M, cudaMemcpyHostToDevice);
    double *d_tmp;
    cudaMalloc((void **)&d_tmp, sizeof(double) * 10240); 
    
    /**
    CHECK(cudaMalloc((void **)&d_x, M));
    CHECK(cudaMalloc((void **)&d_y, sizeof(double)));
    CHECK(cudaMemcpy(d_x, h_x, M, cudaMemcpyHostToDevice));
    double *d_tmp;
    CHECK(cudaMalloc((void **)&d_tmp, sizeof(double) * 10240)); 
    */

    printf("Using CPU:\n");
    timing(h_x, d_x, h_y, d_y, d_tmp, N, 0);
    
    printf("Using shared mem:\n");
    timing(h_x, d_x, h_y, d_y, d_tmp, N, 1);

    printf("Using shared mem and warp:\n");
    timing(h_x, d_x, h_y, d_y, d_tmp, N, 2);

    printf("Using shared mem and shfl:\n");
    timing(h_x, d_x, h_y, d_y, d_tmp, N, 3);
    
    printf("Using shared mem and shfl_2:\n");
    timing(h_x, d_x, h_y, d_y, d_tmp, N, 6);

    printf("Using shared mem and cooperative groups:\n");
    timing(h_x, d_x, h_y, d_y, d_tmp, N, 4);

    printf("Using shared mem, cooperative groups and parallelism:\n");
    timing(h_x, d_x, h_y, d_y, d_tmp, N, 5);
    
    


    delete[] h_x;
    cudaFree(d_x);
    cudaFree(d_tmp);
    /**
    CHECK(cudaFree(d_x));
    CHECK(cudaFree(d_tmp));
    */
    return 0;
}
