// #pragma once
#include <cstdio>
#include <iostream>
#include "config.cuh"
#include "cuda_runtime_api.h"
#include <sys/time.h>
#include <typeinfo>
#include <algorithm>
#include <chrono>
// #include <thrust/extrema.h>

#define GPU_CHECK(call)                               \
do                                                    \
{                                                     \
    const cudaError_t error_code = call;              \
    if (error_code != cudaSuccess)                    \
    {                                                 \
        printf("CUDA Error:\n");                      \
        printf("    File:       %s\n", __FILE__);     \
        printf("    Line:       %d\n", __LINE__);     \
        printf("    Error code: %d\n", error_code);   \
        printf("    Error text: %s\n",                \
            cudaGetErrorString(error_code));          \
        exit(1);                                      \
    }                                                 \
} while (0)

template <typename T>
inline __device__ void gpu_swap(T* a, T* b) {
    T temp = *b;
    *b = *a;
    *a = temp;
}

template <typename T>
void cpu_swap(T** a, T** b) {
    T* temp = *a;
    *a = *b;
    *b = temp;
}

inline unsigned int cpu_dtNextPow2_p(unsigned int v) {
    unsigned cishu = 0, vv = 1;
    while(v > vv) {
        vv <<= 1;
        cishu++;
    }
	return cishu;
}

inline __device__ unsigned int gpu_dtNextPow2_p(unsigned int v) {
    unsigned cishu = 0, vv = 1;
    while(v > vv) {
        vv <<= 1;
        cishu++;
    }
	return cishu;
}

inline __device__ unsigned int gpu_dtNextPow2(unsigned int v)
{
	v--;
	v |= v >> 1;
	v |= v >> 2;
	v |= v >> 4;
	v |= v >> 8;
	v |= v >> 16;
	v++;
	return v;
}

// 双调排序 n_p 可以等于0 但是那样的话不会排序，代表2^0=1
__device__ void gpu_topk_impl(ull* data, const int n_p) {
    register int stride_p, half_stride_p, s_p, hs_p, hs, i, j, k, hn;
    register bool orange, j_bigger_k;
    hn = 1 << (n_p - 1);
    half_stride_p = 0;
    
    for (stride_p = 1; stride_p <= n_p; stride_p++) {
        s_p = stride_p;
        while (s_p >= 1) {
            hs_p = s_p - 1;
            hs = 1 << hs_p;
            for (i = threadIdx.x; i < hn; i += blockDim.x) {
                orange = (i >> half_stride_p) % 2 == 0;
                j = ((i >> hs_p) << s_p) + (i % hs);
                k = j + hs;
                j_bigger_k = data[j] > data[k];
                if ((orange ^ j_bigger_k)) {
                    gpu_swap(&data[k], &data[j]);
                }
            }
            __syncthreads();
            s_p = hs_p;
        }
        half_stride_p++;
    }
}


__global__ void gpu_topk(ull *data, ull *d_result, const unsigned int length, const unsigned int group_size = 1024) {
    register auto tid = blockIdx.x * blockDim.x + threadIdx.x;
    register auto tnum = gridDim.x * blockDim.x;
    // printf("adasd\n");

    if(tid >= length || blockIdx.x % (group_size / blockDim.x) > 0) {
        return;
    }

    __shared__ ull temp_result[4096]; // 1024 * 16 * 3

    register auto temp_result_length = min(group_size, length - blockIdx.x * blockDim.x);


    // 赋值
#pragma unroll
    for (register auto i = threadIdx.x; i < temp_result_length; i += blockDim.x) {
        // printf("%d %f\n", data[i + blockIdx.x * TOPK].idx, data[i + blockIdx.x * TOPK].score);        
        temp_result[i] = data[i + blockIdx.x * blockDim.x]; 
    }
    

    register unsigned int n_p = gpu_dtNextPow2_p(temp_result_length);
    register unsigned int n = gpu_dtNextPow2(temp_result_length);
    // register unsigned int double_minus_n = n / 2;


    // 对于超过数组长度的地方赋值-1
#pragma unroll
    for (register auto i = temp_result_length + threadIdx.x; i < n; i += blockDim.x) {
        temp_result[i] = 0;
    }
    // if(temp_result_length <= double_minus_n + threadIdx.x && double_minus_n + threadIdx.x < n) {
        

    __syncthreads();

    gpu_topk_impl(temp_result, n_p);

    // // printf("adasd\n");

    // if(threadIdx.x >= ) {
    //     return;
    // }
    
    // printf("%d,%d,%d\n",temp_result_length, n_p, n);

    // 赋值到变量
    register auto small_len = min(temp_result_length, TOPK);
    if(threadIdx.x < small_len) {
        d_result[threadIdx.x + (tid / group_size) * TOPK] = temp_result[threadIdx.x];
    }
    // register auto offset = ;
    // for(register int i = threadIdx.x; i < small_len; i += tnum) {
    //     // if(d_result[i + blockIdx.x * TOPK].idx != (int)d_result[i + blockIdx.x * TOPK].score || )
    // }

    // if(gridDim.x == 1 && threadIdx.x == 0) {
    //     for(register int i = threadIdx.x; i < small_len; i++) 
    //         printf("%d %f\n", d_result[i].idx, d_result[i].score);
    //     printf("*"); 
    // }
}


__forceinline__ __device__ void max_heapify(ull *arr, const int left, const int right) {
    //建立父节点指标和子节点指标
    int dad = left;
    int son = dad * 2 + 1;
    while (son <= right) { //若子节点指标在范围内才做比较
        if (son + 1 <= right && arr[son] > arr[son + 1]) //先比较两个子节点大小，选择最小的
            son++;
        if (arr[son] > arr[dad]) //如果父节点小于子节点代表调整完毕，直接跳出函数
            return;
        else { //否则交换父子内容再继续子节点和孙节点比较
            // printf("%d,%d",dad,son);
            gpu_swap(&arr[dad], &arr[son]);
            dad = son;
            son = dad * 2 + 1;
        }
    }
}

#define BLOCK_SIZE 41
 __global__ void gpu_topk_reduce(ull *input, ull *output, int length, int k)
{
    __shared__ ull ken[BLOCK_SIZE * TOPK];
    register ull top_array[TOPK];

    // 堆已经建好
    for(int i = 0; i < TOPK; i++) {
        top_array[i] = input[TOPK - i - 1 + TOPK * threadIdx.x];
    }

    for(int i = 0; i < TOPK && (BLOCK_SIZE + threadIdx.x) * TOPK + i < length; i++) {
        if(input[(BLOCK_SIZE + threadIdx.x) * TOPK + i] > top_array[0]) {
            top_array[0] = input[(BLOCK_SIZE + threadIdx.x) * TOPK + i];
            max_heapify(top_array, 0, TOPK - 1);
        }
        // max_heapify(top_array, TOPK, input[(BLOCK_SIZE + threadIdx.x) * TOPK + i]);
    }

    // for(int idx = BLOCK_SIZE * TOPK + threadIdx.x; idx < length; idx += gridDim.x * blockDim.x)
    // {
    //     insert_value(top_array, TOPK, input[idx]);
    // }

    //ken也是一个堆排序
    for(int i = 0; i < TOPK; i++) {
        ken[TOPK * threadIdx.x + i] = top_array[i];
    }
    __syncwarp();

    for(int i = BLOCK_SIZE >> 1; i>=1; i >>= 1) {
        if(threadIdx.x < i) {
            for(int m=0; m<TOPK; m++) {
                if(ken[TOPK *(threadIdx.x + i) + m] > top_array[0]) {
                    top_array[0] = ken[TOPK *(threadIdx.x + i) + m];
                    max_heapify(top_array, 0, TOPK - 1);
                }
                // insert_value(top_array, TOPK, ken[TOPK *(threadIdx.x + i) + m]);
            }
        }
        __syncwarp();
        if(threadIdx.x < i)
        {
            for(int m=0; m<TOPK; m++) {
                ken[TOPK * threadIdx.x + m] = top_array[m];
            }
        }
        __syncwarp();
    }

    
    for(int i = threadIdx.x; i < TOPK; i += blockDim.x) {
        output[i] = ken[i];
    }
}

// Initialize data on the host.
void initialize_data(ull *dst, unsigned int nitems) {
    // Fixed seed for illustration
    srand(2047);

    // Fill dst with random values
    for (unsigned i = 0; i < nitems; i++) {
        dst[i] = rand();
    }
}

// Verify the results.
void print_results(ull *results_d, int n) {
    ull *results_h = new ull[n];
    GPU_CHECK(cudaMemcpy(results_h, results_d, n * sizeof(ull), cudaMemcpyDeviceToHost));
    std::cout << "Sort data : \n";
    for (int i = 0; i < n; ++i){
        std::cout << results_h[i] << "\n";
    }
    std::cout << std::endl;
    delete[] results_h;
}

void check_results(ull *results_d, ull *data_h, int n)
{
    ull *results_h = new ull[n];
    GPU_CHECK(cudaMemcpy(results_h, results_d, n*sizeof(ull), cudaMemcpyDeviceToHost));

    for (int i = 0; i < n ; ++i) {
        if(results_h[i] != data_h[i]) {
            std::cout << "Invalid item[" << i << "]: " << results_h[i] << " greater than " << data_h[i] << std::endl;
            // std::cout << "Invalid item[" << i << "]: " << results_h[i] << " greater than " << data_h[i].score << std::endl;
            // exit(EXIT_FAILURE);
        }
    }
        // if (cpu_compare(results_h[i], results_h[i-1]))
        // {
        //     std::cout << "Invalid item[" << i-1 << "]: " << results_h[i-1].idx << " greater than " << results_h[i].idx << std::endl;
        //     std::cout << "Invalid item[" << i-1 << "]: " << results_h[i-1].score << " greater than " << results_h[i].score << std::endl;
        //     exit(EXIT_FAILURE);
        // }

    std::cout << "OK" << std::endl;
    delete[] results_h;
}

bool cmp(const int &a, const int &b) {
    return a > b;
}

// Main entry point.
int main(int argc, char **argv) {
    // Find/set device and get device properties
    int device = 0;
    cudaDeviceProp deviceProp;
    GPU_CHECK(cudaGetDeviceProperties(&deviceProp, device));
    
    if (!(deviceProp.major > 3 ||
          (deviceProp.major == 3 && deviceProp.minor >= 5))) {
        printf("GPU %d - %s  does not support CUDA Dynamic Parallelism\n Exiting.",
            device, deviceProp.name);
        return 0;
    }
    

    int num_items = atoi(argv[1]); // 7853052
    bool verbose = num_items <= 16;


    // Create input data
    ull *h_data = 0;
    ull *d_data = 0;
    ull *d_result = 0;
    ull *d_mid_result = 0;

    // Allocate CPU memory and initialize data.
    std::cout << "Initializing data:" << std::endl;
    h_data = (ull *)malloc(num_items * sizeof(ull));
    initialize_data(h_data, num_items);
    if (verbose) {
       std::cout << "Raw  data : \n";
        for (int i = 0; i < num_items; i++)
            std::cout << h_data[i] << "\n"; 
    }

    // Allocate GPU memory.
    GPU_CHECK(cudaMalloc((void **)&d_data, num_items * sizeof(ull)));
    GPU_CHECK(cudaMalloc((void **)&d_result, num_items * sizeof(ull)));
    GPU_CHECK(cudaMalloc((void **)&d_mid_result, num_items * sizeof(ull)));
    
    GPU_CHECK(cudaMemcpy(d_data, h_data, num_items * sizeof(ull), cudaMemcpyHostToDevice));
    
    // Execute
    std::cout << "Running quicksort on " << num_items << " elements" << std::endl;
    

    std::chrono::high_resolution_clock::time_point t1 = std::chrono::high_resolution_clock::now();

    //grid要不大于10
    int block = N_THREADS_IN_ONE_BLOCK;
    int group_size = 4096;
    int temp_grid = (num_items + block - 1) / (block);
    unsigned int temp_length = num_items;
    // printf("%d %d\n", temp_grid, block);
    // std::chrono::high_resolution_clock::time_point t2 = std::chrono::high_resolution_clock::now();
    gpu_topk<<<temp_grid, block>>>(d_data, d_mid_result, temp_length, group_size);
    cudaDeviceSynchronize();
    
    
    std::chrono::high_resolution_clock::time_point t3 = std::chrono::high_resolution_clock::now();
    temp_length = ((temp_grid - (temp_length % group_size == 0 ? 0: 1)) / (group_size / block)) * TOPK + min(temp_length % group_size, TOPK);
    // printf("tmp_length %d\n", temp_length);
    // // print_results(d_mid_result, temp_length);
    while(temp_length > 5000) {
        temp_grid = (temp_length + block - 1) / (block);
        cpu_swap(&d_mid_result, &d_result);
        // print_results(d_result, temp_length);
        // std::chrono::high_resolution_clock::time_point t4 = std::chrono::high_resolution_clock::now();
        gpu_topk<<<temp_grid, block>>>(d_result, d_mid_result, temp_length, group_size);
        cudaDeviceSynchronize();
        // std::chrono::high_resolution_clock::time_point t5 = std::chrono::high_resolution_clock::now();

        temp_length = ((temp_grid - (temp_length % group_size == 0 ? 0: 1)) / (group_size / block)) * TOPK + min(temp_length % group_size, TOPK);
        // printf("tmp_length %d\n", temp_length);
        // print_results(d_mid_result, temp_length);
        // std::cout<<std::chrono::duration_cast<std::chrono::milliseconds>(t5-t4).count()<<std::endl;
    }
    gpu_topk_reduce<<<1, BLOCK_SIZE>>>(d_mid_result, d_result, temp_length, TOPK);

    // // // printf("while finish\n");
    // // // gBitonicSort<<<1, temp_length>>>(d_data, 3);
    // // // std::chrono::high_resolution_clock::time_point t6 = std::chrono::high_resolution_clock::now();
    // gpu_topk<<<1, 1024>>>(d_mid_result, d_result, temp_length, group_size);
    cudaDeviceSynchronize();
    // std::chrono::high_resolution_clock::time_point t7 = std::chrono::high_resolution_clock::now();
    // print_results(d_result, temp_length < TOPK? temp_length : TOPK);

    
    std::chrono::high_resolution_clock::time_point t8 = std::chrono::high_resolution_clock::now();

    // std::cout<<"Hello\n";
    // std::cout<<std::chrono::duration_cast<std::chrono::milliseconds>(t2-t1).count()<<std::endl;
    // std::cout<<std::chrono::duration_cast<std::chrono::milliseconds>(t2-t1).count()<<std::endl;
    // std::cout<<std::chrono::duration_cast<std::chrono::milliseconds>(t3-t2).count()<<std::endl;
    // std::cout<<std::chrono::duration_cast<std::chrono::milliseconds>(t7-t6).count()<<std::endl;
    std::cout<<std::chrono::duration_cast<std::chrono::microseconds>(t8-t1).count()<<std::endl;

    // print result
    // print_results(d_result, TOPK);
    // check result
    // std::cout << "Validating results: ";
    std::sort(h_data, h_data + num_items, cmp);
    // check_results(d_result, h_data, temp_length < TOPK? temp_length : TOPK);
    free(h_data);
    GPU_CHECK(cudaFree(d_data));

    return 0;
}

/* 
编译：nvcc -o quicksort_cuda --gpu-architecture=sm_70 -rdc=truequicksort_cuda.cu
*/