#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <iostream>
#include <chrono>
#include <algorithm>

#define GPU_CHECK(call)                               \
do                                                    \
{                                                     \
    const cudaError_t error_code = call;              \
    if (error_code != cudaSuccess)                    \
    {                                                 \
        printf("CUDA Error:\n");                      \
        printf("    File:       %s\n", __FILE__);     \
        printf("    Line:       %d\n", __LINE__);     \
        printf("    Error code: %d\n", error_code);   \
        printf("    Error text: %s\n",                \
            cudaGetErrorString(error_code));          \
        exit(1);                                      \
    }                                                 \
} while (0)

typedef unsigned long long ull;

#define WARP_SIZE 32
#define RANGE 16
#define TOPK 100

__device__ void printff(ull *arr, int n) {
    for(int i = 0; i < n; i++) {
        printf("%llu ", arr[i]);  
    }
    printf("\n");
}

__device__ void printff(int *arr, int n) {
    for(int i = 0; i < n; i++) {
        printf("%d ", arr[i]);  
    }
    printf("\n");
}


__device__ void countSort(ull* arr, int n, int exp, ull* output, int* count, int *start) {
    // 清空共享内存中的计数数组
    for (int i = threadIdx.x; i < RANGE; i += blockDim.x)
        start[i] = count[i] = 0;
    __syncthreads();

    // 计算每个基数的出现次数
    for (int i = threadIdx.x; i < n; i += blockDim.x)
        atomicAdd(&count[(arr[i] >> exp) & 15], 1);
    __syncthreads();

    // printff(count, 2);

    // 将计数数组累加，以便确定每个基数在排序后的数组中的位置



    if(threadIdx.x == 0) {
        for (int i = threadIdx.x + 1; i < RANGE; i++)
            start[i] = count[i - 1] + start[i - 1];
        // printff(count, RANGE);
        // printff(start, RANGE);
        // 从原数组构建排序后的数组
        for (int i = 0; i < n; i++) {
            int index = (arr[i] >> exp) & 15;
            output[start[index]++] = arr[i];
        }
        // printff(arr, n);
        // printff(output, n);
    }
    __syncthreads();


    for (int i = threadIdx.x; i < n; i += blockDim.x) {
        arr[i] = output[i];   
    }
}

__global__ void radixSort(ull* arr, ull* d_result, int n) {
    __shared__ ull output[2048];
    __shared__ int count[2048];
    __shared__ int start[2048];

    register auto temp_result_length = min(2048, n - blockIdx.x * 2048);
    register auto small_len = min(temp_result_length, TOPK);
    
    // 从最低位到最高位，依次对每个位进行计数排序
    for (int exp = 0; exp < 32; exp += 4) {
        countSort(arr + blockIdx.x * 2048, temp_result_length, exp, output, count, start);
    }

    for (int i = threadIdx.x; i < small_len; i += blockDim.x) {
        d_result[i + blockIdx.x * TOPK] = output[temp_result_length - i - 1];
    }
}

// Initialize data on the host.
void initialize_data(ull *dst, unsigned int nitems) {
    // Fixed seed for illustration
    srand(2047);

    // Fill dst with random values
    for (unsigned i = 0; i < nitems; i++) {
        dst[i] = rand() % nitems;
    }
}

// Verify the results.
void print_results(ull *results_d, int n) {
    ull *results_h = new ull[n];
    GPU_CHECK(cudaMemcpy(results_h, results_d, n * sizeof(ull), cudaMemcpyDeviceToHost));
    std::cout << "Sort data : \n";
    for (int i = 0; i < n; ++i){
        std::cout << results_h[i] << " ";
    }
    std::cout << std::endl;
    delete[] results_h;
}

void check_results(ull *results_d, ull *data_h, int n)
{
    ull *results_h = new ull[n];
    GPU_CHECK(cudaMemcpy(results_h, results_d, n*sizeof(ull), cudaMemcpyDeviceToHost));

    for (int i = 0; i < n ; ++i) {
        if(results_h[i] != data_h[i]) {
            std::cout << "Invalid item[" << i << "]: " << results_h[i] << " greater than " << data_h[i] << std::endl;
            // std::cout << "Invalid item[" << i << "]: " << results_h[i] << " greater than " << data_h[i].score << std::endl;
            // exit(EXIT_FAILURE);
        }
    }

    std::cout << "OK" << std::endl;
    delete[] results_h;
}

bool cmp(const int &a, const int &b) {
    return a > b;
}

template <typename T>
void cpu_swap(T** a, T** b) {
    T* temp = *a;
    *a = *b;
    *b = temp;
}


// Main entry point.
int main(int argc, char **argv) {
    // Find/set device and get device properties
    int device = 0;
    cudaDeviceProp deviceProp;
    GPU_CHECK(cudaGetDeviceProperties(&deviceProp, device));
    
    if (!(deviceProp.major > 3 ||
          (deviceProp.major == 3 && deviceProp.minor >= 5))) {
        printf("GPU %d - %s  does not support CUDA Dynamic Parallelism\n Exiting.",
            device, deviceProp.name);
        return 0;
    }
    

    int num_items = atoi(argv[1]); // 7853052
    bool verbose = num_items <= 16;


    // Create input data
    ull *h_data = 0;
    ull *d_data = 0;
    ull *d_result = 0;
    ull *d_mid_result = 0;

    // Allocate CPU memory and initialize data.
    std::cout << "Initializing data:" << std::endl;
    h_data = (ull *)malloc(num_items * sizeof(ull));
    initialize_data(h_data, num_items);
    if (verbose) {
        std::cout << "Raw  data : \n";
        for (int i = 0; i < num_items; i++)
            std::cout << h_data[i] << " ";
        std::cout << std::endl;
    }

    // Allocate GPU memory.
    GPU_CHECK(cudaMalloc((void **)&d_data, num_items * sizeof(ull)));
    GPU_CHECK(cudaMalloc((void **)&d_result, num_items * sizeof(ull)));
    GPU_CHECK(cudaMalloc((void **)&d_mid_result, num_items * sizeof(ull)));
    
    GPU_CHECK(cudaMemcpy(d_data, h_data, num_items * sizeof(ull), cudaMemcpyHostToDevice));
    
    // Execute
    std::cout << "Running quicksort on " << num_items << " elements" << std::endl;
    

    std::chrono::high_resolution_clock::time_point t1 = std::chrono::high_resolution_clock::now();

    //grid要不大于10
    int block = 32;
    int group_size = 2048;
    int temp_grid = (num_items + group_size - 1) / (group_size);
    unsigned int temp_length = num_items;
    // printf("%d %d\n", temp_grid, block);
    // std::chrono::high_resolution_clock::time_point t2 = std::chrono::high_resolution_clock::now();
    radixSort<<<temp_grid, block>>>(d_data, d_mid_result, temp_length);
    cudaDeviceSynchronize();
    
    
    std::chrono::high_resolution_clock::time_point t3 = std::chrono::high_resolution_clock::now();
    temp_length = (temp_grid - 1) * TOPK + min(num_items % group_size == 0 ? group_size: num_items % group_size, TOPK);
    printf("tmp_length %d\n", temp_length);
    // print_results(d_mid_result, temp_length);
    while(temp_length > 2048) {
        temp_grid = (temp_length + group_size - 1) / (group_size);
        cpu_swap(&d_mid_result, &d_data);
        radixSort<<<temp_grid, block>>>(d_data, d_mid_result, temp_length);
        temp_length = (temp_grid - 1) * TOPK + min(num_items % group_size == 0 ? group_size: num_items % group_size, TOPK);
        printf("tmp_length %d\n", temp_length);
    }

    printf("tmp_length %d\n", temp_length);
    radixSort<<<1, 32>>>(d_mid_result, d_result, temp_length);
    cudaDeviceSynchronize();
    
    std::chrono::high_resolution_clock::time_point t8 = std::chrono::high_resolution_clock::now();
    std::cout<<std::chrono::duration_cast<std::chrono::microseconds>(t8-t1).count()<<std::endl;

    std::sort(h_data, h_data + num_items, cmp);
    print_results(d_result, num_items < TOPK? num_items : TOPK);
    check_results(d_result, h_data, num_items < TOPK? num_items : TOPK);
    return 0;
}

// int _main() {
//     const int n = 7853052;
//     int *arr = (int *)malloc(n * sizeof(int));
//     initialize_data(arr, n);
//     // for (int i = 0; i < n; i++)
//     //     std::cout << arr[i] << " ";

//     int* d_arr;
//     GPU_CHECK(cudaMalloc((void**)&d_arr, n * sizeof(int)));
//     GPU_CHECK(cudaMemcpy(d_arr, arr, n * sizeof(int), cudaMemcpyHostToDevice));

//     // 设置CUDA线程块和线程数
//     int block = 32;
//     int grid = (n + 2048 - 1) / 2048;

//     // 调用核函数进行基数排序
//     std::chrono::high_resolution_clock::time_point t1 = std::chrono::high_resolution_clock::now();
//     radixSort<<<grid, block>>>(d_arr, n);
//     std::chrono::high_resolution_clock::time_point t2 = std::chrono::high_resolution_clock::now();

//     std::cout<<std::chrono::duration_cast<std::chrono::microseconds>(t2-t1).count()<<std::endl;
//     // 将排序后的数组从设备复制回主机
//     GPU_CHECK(cudaMemcpy(arr, d_arr, n * sizeof(int), cudaMemcpyDeviceToHost));

//     // 打印排序后的数组
//     for(int k = 0; k < grid; k++) {
//         for (int i = 0; i < 10; i++)
//             std::cout << arr[k * 2048 + i] << " ";
//         std::cout << std::endl;
//     }

//     cudaFree(d_arr);

//     return 0;
// }
