#include <stdio.h>

#include <cuda.h>
#include <cuda_runtime.h>

#include <driver_functions.h>

#include <thrust/scan.h>
#include <thrust/device_ptr.h>
#include <thrust/device_malloc.h>
#include <thrust/device_free.h>

#include "CycleTimer.h"
#include "../include/helper_cuda.h"

#define THREADS_PER_BLOCK 32

#define THREADS_PER_WRAP

/*
不使用 Block-Shared-Memory 版本是正确的 -> inplace 版本 和 Version2 版本都正确
但是使用 Block-Shared-Memory 版本因为需要跨Block进行通信，不正确
*/

void check_up_sweep_stage(int *, int, int, int);

// helper function to round an integer up to the next power of 2
static inline int nextPow2(int n)
{
    n--;
    n |= n >> 1;
    n |= n >> 2;
    n |= n >> 4;
    n |= n >> 8;
    n |= n >> 16;
    n++;
    return n;
}

__global__ void cuda_scan_up(int *input, int N, int two_dplus1, int two_d)
{
    int index = blockIdx.x * blockDim.x + threadIdx.x;

    if (index % two_dplus1 == 0)
        input[index + two_dplus1 - 1] += input[index + two_d - 1];

    // Wrong ! Multiple For Loop
    // if (index == N - 1)
    //     input[index] = 0;
}

__global__ void cuda_scan_up_version2(int* input, int N, int two_dplus1, int two_d)
{
    // printf("blockIdx.x : %d , threadIdx.x : %d \n", blockIdx.x, threadIdx.x);
    // printf("two_dplus1 : %d , two_d : %d \n", two_dplus1, two_d);
    int index = blockIdx.x * blockDim.x + threadIdx.x;
    int index_a = two_dplus1 * (index + 1) - 1;
    int index_b = index_a - two_d;
    input[index_a] += input[index_b];
}

__global__ void cuda_scan_down_version2(int* input, int N, int two_dplus1, int two_d)
{
    // printf("blockIdx.x : %d , threadIdx.x : %d \n", blockIdx.x, threadIdx.x);
    // printf("two_dplus1 : %d , two_d : %d \n", two_dplus1, two_d);
    int index = blockIdx.x * blockDim.x + threadIdx.x;
    int index_a = two_dplus1 * (index + 1) - 1;
    // if(index_a >= N) return;
    int index_b = index_a - two_d;
    int temp = input[index_a];
    input[index_a] += input[index_b];
    input[index_b] = temp;
}

void exclusive_scan_version2(int *input, int N, int *result, int i)
{
    // make sure the num_elements is a power of 2 !
    const int num_elements = nextPow2(N);

    // Up-Sweep Phase
    int thread_to_launch = num_elements / 2;
    for (int two_d = 1 ; two_d <= num_elements / 2; two_d *= 2)
    {
        int two_dplus1 = two_d * 2;
        // NVIDIA 3080 BLOCK_MAX_THREAD : 1024
        int num_block = 1;
        int num_thread = thread_to_launch;
        if(thread_to_launch > 1024)
        {
            num_block = thread_to_launch / 1024;
            num_thread = 1024;
        }
        cuda_scan_up_version2<<<num_block, num_thread>>>(result, num_elements, two_dplus1, two_d);
        checkCudaErrors(cudaDeviceSynchronize());
        // std::cout << "num_block : "  << num_block << " " << "thread_to_launch :" << thread_to_launch << std::endl;
        thread_to_launch /= 2;
    }
    
    // if(i == 0)
    //     check_up_sweep_stage(result, num_elements, 2, 1);

    cudaMemset(result + num_elements - 1, 0, sizeof(int));

    // Down-Sweep Phase
    thread_to_launch = 1;
    for (int two_d = num_elements / 2; two_d >= 1; two_d /= 2)
    {
        int num_block = 1;
        int num_thread = thread_to_launch;
        if(thread_to_launch > 1024)
        {
            num_block = thread_to_launch / 1024;
            num_thread = 1024;
        }
        int two_dplus1 = two_d * 2;
        cuda_scan_down_version2<<<num_block, num_thread>>>(result, num_elements, two_dplus1, two_d);
        checkCudaErrors(cudaDeviceSynchronize());
        thread_to_launch *= 2;
    }
}

__global__ void cuda_scan_up_shared(int *input, int N, int two_dplus1, int two_d)
{
    // start from 0
    // printf("blockDim.x : %d \n", blockDim.x);
    int index = blockIdx.x * blockDim.x + threadIdx.x;

    int temp_index_two_dplus1 = index + two_dplus1 - 1;
    int temp_index_two_dplus1_blockID = (temp_index_two_dplus1) / THREADS_PER_BLOCK;
    int temp_index_two_dplus1_threadID = (temp_index_two_dplus1) % THREADS_PER_BLOCK;
    int temp_index_two_d = index + two_d - 1;
    int temp_index_two_d_blockID = temp_index_two_d / THREADS_PER_BLOCK;
    int temp_index_two_d_threadID = temp_index_two_d % THREADS_PER_BLOCK;

    if (index >= N)
        return;

    __shared__ int support[THREADS_PER_BLOCK + 1];

    support[threadIdx.x] = input[index];
    __syncthreads();

   
    if (index % two_dplus1 == 0)
    {
        // printf("index is %d ", index);
        // 在同一个板块内的计算的情况 -> No Problem
        if (temp_index_two_dplus1_blockID == temp_index_two_d_blockID)
        {
            printf("temp_index_two_dplus1_blockID : %d, support[temp_index_two_d_threadID] : %d, support[temp_index_two_dplus1_threadID] : %d \n", temp_index_two_d_blockID, support[temp_index_two_d_threadID], support[temp_index_two_dplus1_threadID]);
            support[temp_index_two_dplus1_threadID] += support[temp_index_two_d_threadID];
        }
        //跨板块通信
        else
        {
            printf("temp_index_two_dplus1_blockID != temp_index_two_d_blockID \n");
            printf("temp_index_two_d_blockID : %d, temp_index_two_dplus1_blockID: %d \n", temp_index_two_d_blockID, temp_index_two_dplus1_blockID);
            // 这里执行不到的原因:index = 0, temp_index_two_d_blockID = 0,  temp_index_two_dplus1_blockID = 1
            // 在index所在的block中，无法跨版跨的访问到block1的版块共享内存
            if(blockIdx.x == temp_index_two_dplus1_blockID)
            {
                printf("Before blockIdx.x : %d, support[temp_index_two_dplus1_threadID] : %d, input[temp_index_two_d] : %d \n", blockIdx.x, support[temp_index_two_dplus1_threadID], input[temp_index_two_d]);
                support[temp_index_two_dplus1_threadID] += input[temp_index_two_d];
                printf("After blockIdx.x : %d, support[temp_index_two_dplus1_threadID] : %d, input[temp_index_two_d] : %d \n", blockIdx.x, support[temp_index_two_dplus1_threadID], input[temp_index_two_d]);
            }
        }
    }


    __syncthreads();

    input[index] = support[threadIdx.x];
}

__global__ void cuda_scan_down(int *input, int *output, int N, int two_dplus1, int two_d)
{
    int index = blockIdx.x * blockDim.x + threadIdx.x;

    if (index % two_dplus1 == 0)
    {
        output[index + two_d - 1] = input[index + two_dplus1 - 1];
        output[index + two_dplus1 - 1] = input[index + two_dplus1 - 1] + input[index + two_d - 1];
    }
}

__global__ void cuda_scan_down_inplace(int *input, int N, int two_dplus1, int two_d)
{
    int index = blockIdx.x * blockDim.x + threadIdx.x;
    if (index >= N)
        return;

    if (index % two_dplus1 == 0)
    {
        int temp = input[index + two_d - 1];
        input[index + two_d - 1] = input[index + two_dplus1 - 1];
        input[index + two_dplus1 - 1] = temp + input[index + two_dplus1 - 1];
    }
}

__global__ void cuda_scan_down_inplace_shared(int *input, int N, int two_dplus1, int two_d)
{
    int index = blockIdx.x * blockDim.x + threadIdx.x;
    if (index >= N)
        return;

    __shared__ int support[THREADS_PER_BLOCK];

    support[threadIdx.x] = input[index];
    __syncthreads();

    if (index % two_dplus1 == 0)
    {
        int temp_index_two_dplus1 = (index + two_dplus1 - 1) % THREADS_PER_BLOCK;
        int temp_index_two_d = (index + two_d - 1) % THREADS_PER_BLOCK;

        int temp = support[temp_index_two_d];
        support[temp_index_two_d] = support[temp_index_two_dplus1];
        support[temp_index_two_dplus1] = temp + support[temp_index_two_dplus1];
    }

    input[index] = support[threadIdx.x];
}

// __device__ void cuda_scan_down_shared(int *input, int *output, int N, int two_dplus1, int two_d)
// {
//     int index = blockIdx.x * blockDim.x + threadIdx.x;

//     if (index >= N)
//         return;

//     __shared__ int support[THREADS_PER_BLOCK];
//     support[threadIdx.x] = input[index];
//     __syncthreads();

//     if (threadIdx.x % two_dplus1 == 0)
//     {
//         output[index + two_d - 1] = support[threadIdx.x + two_dplus1 - 1];
//         output[index + two_dplus1 - 1] = support[threadIdx.x + two_dplus1 - 1] + support[threadIdx.x + two_d - 1];
//     }
// }

// exclusive_scan --
//
// Implementation of an exclusive scan on global memory array `input`,
// with results placed in global memory `result`.
//
// N is the logical size of the input and output arrays, however
// students can assume that both the start and result arrays we
// allocated with next power-of-two sizes as described by the comments
// in cudaScan().  This is helpful, since your parallel scan
// will likely write to memory locations beyond N, but of course not
// greater than N rounded up to the next power of 2.
//
// Also, as per the comments in cudaScan(), you can implement an
// "in-place" scan, since the timing harness makes a copy of input and
// places it in result
void exclusive_scan(int *input, int N, int *result)
{

    // Up-Sweep
    /*
    for d = 0 to (log2(N) - 1) do
        forall k = 0 to N - 1 by 2^(d+1) do
            a[k + 2^(d+1) - 1] = a[k + 2^d - 1] + a[k + 2^(d+1) - 1]
    if d is from 0 to (log2(N) - 1) -> the 2^d is from 1 to N/2
    equal to
    for two_d = 1 to N/2 by 2 do
        forall k = 0 to N - 1 by 2^(two_d+1) do
            a[k + 2^(two_d+1) - 1] = a[k + 2^two_d - 1] + a[k + 2^(two_d+1) - 1]
    */
    constexpr int num_threads_per_block = THREADS_PER_BLOCK;
    const int num_blocks = (N + num_threads_per_block - 1) / num_threads_per_block;
    const int num_elements = nextPow2(N);

    // Up-Sweep Phase
    for (int two_d = 1; two_d <= num_elements / 2; two_d *= 2)
    {
        int two_dplus1 = two_d * 2;
        cuda_scan_up<<<num_blocks, num_threads_per_block>>>(result, num_elements, two_dplus1, two_d);
        checkCudaErrors(cudaDeviceSynchronize());
    }
    // checkCudaErrors(cudaDeviceSynchronize());
    cudaMemset(result + num_elements - 1, 0, sizeof(int));

    // Down-Sweep Phase
    for (int two_d = num_elements / 2; two_d >= 1; two_d /= 2)
    {
        int two_dplus1 = two_d * 2;
        // cuda_scan_down<<<num_blocks, num_threads_per_block>>>(input, result, N, two_dplus1, two_d);
        cuda_scan_down_inplace<<<num_blocks, num_threads_per_block>>>(result, num_elements, two_dplus1, two_d);
        checkCudaErrors(cudaDeviceSynchronize());
    }
}

void exclusive_scan_inplace(int *input, int N, int *result, int i)
{
    const int num_blocks = (N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
    // make sure the num_elements is a power of 2 !
    const int num_elements = nextPow2(N);

    // Up-Sweep Phase
    for (int two_d = 1, thread_to_launch = num_elements / 2; two_d <= num_elements / 2; two_d *= 2, thread_to_launch /= 2)
    {
        int two_dplus1 = two_d * 2;
        cuda_scan_up<<<num_blocks, THREADS_PER_BLOCK>>>(result, num_elements, two_dplus1, two_d);
        checkCudaErrors(cudaDeviceSynchronize());
    }

    cudaMemset(result + num_elements - 1, 0, sizeof(int));

    // Down-Sweep Phase
    for (int two_d = num_elements / 2, thread_to_lauch = 1; two_d >= 1; two_d /= 2, thread_to_lauch *= 2)
    {
        int two_dplus1 = two_d * 2;
        cuda_scan_down_inplace<<<num_blocks, THREADS_PER_BLOCK>>>(result, num_elements, two_dplus1, two_d);
        checkCudaErrors(cudaDeviceSynchronize());
    }
}

//
// cudaScan --
//
// This function is a timing wrapper around the student's
// implementation of scan - it copies the input to the GPU
// and times the invocation of the exclusive_scan() function
// above. Students should not modify it.
double cudaScan(int *inarray, int *end, int *resultarray, int i)
{
    int *device_result;
    int *device_input;
    int N = end - inarray;

    // This code rounds the arrays provided to exclusive_scan up
    // to a power of 2, but elements after the end of the original
    // input are left uninitialized and not checked for correctness.
    //
    // Student implementations of exclusive_scan may assume an array's
    // allocated length is a power of 2 for simplicity. This will
    // result in extra work on non-power-of-2 inputs, but it's worth
    // the simplicity of a power of two only solution.

    int rounded_length = nextPow2(end - inarray);

    cudaMalloc((void **)&device_result, sizeof(int) * rounded_length);
    cudaMalloc((void **)&device_input, sizeof(int) * rounded_length);

    // For convenience, both the input and output vectors on the
    // device are initialized to the input values. This means that
    // students are free to implement an in-place scan on the result
    // vector if desired.  If you do this, you will need to keep this
    // in mind when calling exclusive_scan from find_repeats.
    cudaMemcpy(device_input, inarray, (end - inarray) * sizeof(int), cudaMemcpyHostToDevice);
    cudaMemcpy(device_result, inarray, (end - inarray) * sizeof(int), cudaMemcpyHostToDevice);

    double startTime = CycleTimer::currentSeconds();

    // exclusive_scan(device_input, N, device_result);
    exclusive_scan_version2(device_input, N, device_result, i);

    // Wait for completion
    cudaDeviceSynchronize();
    double endTime = CycleTimer::currentSeconds();

    cudaMemcpy(resultarray, device_result, (end - inarray) * sizeof(int), cudaMemcpyDeviceToHost);

    // if (i == 0)
    // {
    //     std::cout << "Input : " << std::endl;
    //     for (int j = 0; j < (end - inarray); j++)
    //     {
    //         std::cout << inarray[j] << " ";
    //     }
    //     std::cout << std::endl;

    //     std::cout << "Output : " << std::endl;
    //     for (int j = 0; j < (end - inarray); j++)
    //     {
    //         std::cout << resultarray[j] << " ";
    //     }
    //     std::cout << std::endl;
    // }

    double overallDuration = endTime - startTime;
    return overallDuration;
}

// cudaScanThrust --
//
// Wrapper around the Thrust library's exclusive scan function
// As above in cudaScan(), this function copies the input to the GPU
// and times only the execution of the scan itself.
//
// Students are not expected to produce implementations that achieve
// performance that is competition to the Thrust version, but it is fun to try.
double cudaScanThrust(int *inarray, int *end, int *resultarray)
{

    int length = end - inarray;
    thrust::device_ptr<int> d_input = thrust::device_malloc<int>(length);
    thrust::device_ptr<int> d_output = thrust::device_malloc<int>(length);

    cudaMemcpy(d_input.get(), inarray, length * sizeof(int), cudaMemcpyHostToDevice);

    double startTime = CycleTimer::currentSeconds();

    thrust::exclusive_scan(d_input, d_input + length, d_output);

    cudaDeviceSynchronize();
    double endTime = CycleTimer::currentSeconds();

    cudaMemcpy(resultarray, d_output.get(), length * sizeof(int), cudaMemcpyDeviceToHost);

    thrust::device_free(d_input);
    thrust::device_free(d_output);

    double overallDuration = endTime - startTime;
    return overallDuration;
}

__global__ void find_repeat_atomic(int *input, int *output, int *count, int N)
{
    int index = blockIdx.x * blockDim.x + threadIdx.x;
    if (index >= N)
        return;

    if (input[index] == input[index + 1])
    {
        int old = atomicAdd(count, 1);
        output[old] = index;
    }
}

// find_repeats --
//
// Given an array of integers `device_input`, returns an array of all
// indices `i` for which `device_input[i] == device_input[i+1]`.
//
// Returns the total number of pairs found
int find_repeats_no_scan(int *device_input, int length, int *device_output)
{

    // CS149 TODO:
    //
    // Implement this function. You will probably want to
    // make use of one or more calls to exclusive_scan(), as well as
    // additional CUDA kernel launches.
    //
    // Note: As in the scan code, the calling code ensures that
    // allocated arrays are a power of 2 in size, so you can use your
    // exclusive_scan function with them. However, your implementation
    // must ensure that the results of find_repeats are correct given
    // the actual array length.

    // Do Not Use Scan to implement find_repeats
    int *count;
    int N = nextPow2(length);
    checkCudaErrors(cudaHostAlloc((void **)(&count), sizeof(int) * 1, cudaHostAllocDefault));
    *count = 0;

    int *device_count;
    checkCudaErrors(cudaMalloc((void **)&device_count, sizeof(int)));
    checkCudaErrors(cudaMemcpy(device_count, count, sizeof(int), cudaMemcpyHostToDevice));

    const int num_blocks = (N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;

    find_repeat_atomic<<<num_blocks, THREADS_PER_BLOCK>>>(device_input, device_output, device_count, N);
    checkCudaErrors(cudaDeviceSynchronize());

    int result = *count;

    checkCudaErrors(cudaFreeHost(count));
    checkCudaErrors(cudaFree(device_count));

    return result;
}

//
// cudaFindRepeats --
//
// Timing wrapper around find_repeats. You should not modify this function.
double cudaFindRepeats(int *input, int length, int *output, int *output_length)
{

    int *device_input;
    int *device_output;
    int rounded_length = nextPow2(length);

    cudaMalloc((void **)&device_input, rounded_length * sizeof(int));
    cudaMalloc((void **)&device_output, rounded_length * sizeof(int));
    cudaMemcpy(device_input, input, length * sizeof(int), cudaMemcpyHostToDevice);

    cudaDeviceSynchronize();
    double startTime = CycleTimer::currentSeconds();

    int result = find_repeats_no_scan(device_input, length, device_output);

    cudaDeviceSynchronize();
    double endTime = CycleTimer::currentSeconds();

    // set output count and results array
    *output_length = result;
    cudaMemcpy(output, device_output, length * sizeof(int), cudaMemcpyDeviceToHost);

    cudaFree(device_input);
    cudaFree(device_output);

    float duration = endTime - startTime;
    return duration;
}

void printCudaInfo()
{
    int deviceCount = 0;
    cudaError_t err = cudaGetDeviceCount(&deviceCount);

    printf("---------------------------------------------------------\n");
    printf("Found %d CUDA devices\n", deviceCount);

    for (int i = 0; i < deviceCount; i++)
    {
        cudaDeviceProp deviceProps;
        cudaGetDeviceProperties(&deviceProps, i);
        printf("Device %d: %s\n", i, deviceProps.name);
        printf("   SMs:        %d\n", deviceProps.multiProcessorCount);
        printf("   Global mem: %.0f MB\n",
               static_cast<float>(deviceProps.totalGlobalMem) / (1024 * 1024));
        printf("   CUDA Cap:   %d.%d\n", deviceProps.major, deviceProps.minor);
    }
    printf("---------------------------------------------------------\n");
}

void check_up_sweep_stage(int *device_res, int num_elements, int num_blocks, int num_threads_per_block)
{
    int *host_temp_res;
    checkCudaErrors(cudaHostAlloc((void **)&host_temp_res, sizeof(int) * num_elements, cudaHostAllocDefault));

    cudaMemcpy(host_temp_res, device_res, num_elements * sizeof(int), cudaMemcpyDeviceToHost);

    // std::cout << "Check Up Sweep Stage: " << std::endl;
    for (int i = 0; i < num_elements; i++)
    {
        std::cout << host_temp_res[i] << " ";
    }
    std::cout << std::endl;

    cudaFreeHost(host_temp_res);
}