#pragma once
#include <cstdio>
#include <iostream>
#include "config.cuh"
#include "cuda_runtime_api.h"
#include <sys/time.h>
#include "myBitonic.cuh"
#include <typeinfo>
#include <algorithm>
#include <chrono>
#include <assert.h>


__device__ void warpReduce(iAS* cache, int tid){
    cache[tid] = gpu_compare(cache[tid], cache[tid + 32]) ? cache[tid] : cache[tid + 32]; __syncwarp();
    cache[tid] = gpu_compare(cache[tid], cache[tid + 16]) ? cache[tid] : cache[tid + 16];__syncwarp();
    cache[tid] = gpu_compare(cache[tid], cache[tid + 8]) ? cache[tid] : cache[tid + 8]; __syncwarp();
    cache[tid] = gpu_compare(cache[tid], cache[tid + 4]) ? cache[tid] : cache[tid + 4]; __syncwarp();
    cache[tid] = gpu_compare(cache[tid], cache[tid + 2]) ? cache[tid] : cache[tid + 2]; __syncwarp();
    cache[tid] = gpu_compare(cache[tid], cache[tid + 1]) ? cache[tid] : cache[tid + 1]; 
}

// 针对2048维度进行归并，多了就裂开
__global__ void reduceSort(iAS *data, iAS *d_result, const unsigned int length) {
    const register int shm_size = 2048; // 每次比较要比多少组
    register unsigned int ii = blockIdx.x*(blockDim.x*2)+threadIdx.x;
    register unsigned int _tid = blockIdx.x * blockDim.x + threadIdx.x;
    register unsigned int tid = threadIdx.x;
    register int i = 0;
    
    
    // assert(length / 100 < 2048);
    
    __shared__ iAS original_data[shm_size];
    __shared__ iAS temp_result[shm_size]; // 1024 * 16 * 3
    __shared__ bool had_out[shm_size]; // 1024 * 16 * 3

    register auto temp_result_length = min(shm_size, length - blockIdx.x * blockDim.x);

    for (i = threadIdx.x; i < shm_size && i + blockDim.x * blockIdx.x < length; i += blockDim.x) {       
        had_out[i] = false; 
        // printf("%d ", data[i].score);
        original_data[i].score = data[i + blockDim.x * blockIdx.x].score; 
        original_data[i].idx = data[i + blockDim.x * blockIdx.x].idx;
    }
    for (i = threadIdx.x + temp_result_length; i < shm_size; i += blockDim.x) {       
        had_out[i] = true; 
        original_data[i].score = -1; 
    }

    // printf("B%d\n", length);
    __syncthreads();
// #pragma unroll
    for(register int cishu = 0; cishu < 1; cishu++) {

// #pragma unroll
        for (i = threadIdx.x; i < shm_size; i += blockDim.x) {
            // printf("%d %f\n", data[i + blockIdx.x * TOPK].idx, data[i + blockIdx.x * TOPK].score);        
            // printf("A%d\n", i * TOPK + offset[k]);        
            temp_result[i].score = had_out[i] ? -1 : original_data[i].score; 
            temp_result[i].idx = original_data[i].idx;
        }
        
        // if(blockIdx.x == 0 && threadIdx.x == 0) {
        //     printf("%d\n", shm_size);
        //     for(int j = 0; j < shm_size; j++) {
        //         printf("%d ", temp_result[j].score);
        //     }
        //     printf("\n");
        // }
        // __syncthreads();
        // return;
        // printf("%d %d\n", ii, ii + blockDim.x);
        
        temp_result[tid] = gpu_compare(temp_result[tid], temp_result[tid + blockDim.x]) ? temp_result[tid] : temp_result[tid + blockDim.x];
        __syncthreads();
        // return;
    
        // do reduction in shared mem
#pragma unroll
        for(unsigned int s=blockDim.x/2; s>32; s>>=1){
            if(tid < s){
                temp_result[tid] = gpu_compare(temp_result[tid], temp_result[tid + s]) ? temp_result[tid] : temp_result[tid + s];
            }
            __syncthreads();
        }
        // return;
        if(tid < 32) warpReduce(temp_result, tid);
        __syncthreads();
        // return;

        // 赋值为0
#pragma unroll
        for (i = threadIdx.x; i < shm_size; i += blockDim.x) {
            if(temp_result[0].idx == original_data[i].idx) {
                had_out[i] = true;
            }
        }

        // 赋值到答案
        if(threadIdx.x == 0) {
            d_result[cishu + TOPK * blockIdx.x].score = temp_result[0].score;
            d_result[cishu + TOPK * blockIdx.x].idx = temp_result[0].idx;
        }
        // __syncthreads();
    }
}

// Main entry point.
int main(int argc, char **argv) {
    // Find/set device and get device properties
    int device = 0;
    cudaDeviceProp deviceProp;
    GPU_CHECK(cudaGetDeviceProperties(&deviceProp, device));
    
    if (!(deviceProp.major > 3 ||
          (deviceProp.major == 3 && deviceProp.minor >= 5))) {
        printf("GPU %d - %s  does not support CUDA Dynamic Parallelism\n Exiting.",
            device, deviceProp.name);
        return 0;
    }

    int num_items = std::stoi(argv[1]); // 7853052
    bool verbose = num_items <= 16;

    // Create input data
    iAS *h_data = 0;
    iAS *d_data = 0;
    iAS *d_result = 0;
    iAS *d_mid_result = 0;

    // Allocate CPU memory and initialize data.
    std::cout << "Initializing data:" << std::endl;
    h_data = (iAS *)malloc(num_items * sizeof(iAS));
    initialize_data(h_data, num_items);
    if (verbose) {
       std::cout << "Raw  data : \n";
        for (int i = 0; i < num_items; i++)
            std::cout << h_data[i].idx << " " << h_data[i].score << "\n";
    }

    // Allocate GPU memory.
    GPU_CHECK(cudaMalloc((void **)&d_data, num_items * sizeof(iAS)));
    GPU_CHECK(cudaMalloc((void **)&d_result, num_items * sizeof(iAS)));
    GPU_CHECK(cudaMalloc((void **)&d_mid_result, num_items * sizeof(iAS)));
    
    // GPU_CHECK(cudaMemset(d_mid_result, -1, num_items * sizeof(iAS)));
    // GPU_CHECK(cudaMemset(d_result, -1, num_items * sizeof(iAS)));

    GPU_CHECK(cudaMemcpy(d_data, h_data, num_items * sizeof(iAS), cudaMemcpyHostToDevice));
    
    // Execute
    std::cout << "Running quicksort on " << num_items << " elements" << std::endl;
    


    //grid要不大于10
    // int block = N_THREADS_IN_ONE_BLOCK;
    // int temp_grid = (num_items + block - 1) / (block);
    // unsigned int temp_length = num_items;
    // printf("%d %d\n", temp_grid, block);
    // std::chrono::high_resolution_clock::time_point t2 = std::chrono::high_resolution_clock::now();
    std::chrono::high_resolution_clock::time_point t1 = std::chrono::high_resolution_clock::now();
    int block = 2048;
    // int group_size = 2048;
    int temp_grid = (num_items + block - 1) / (block);
    unsigned int temp_length = num_items;
    
    // printf("%d\n", temp_length);
    for(int i = 0; i < 100; i++)
        reduceSort<<<temp_grid, 1024>>>(d_data, d_mid_result, temp_length);
    cudaDeviceSynchronize();
    std::chrono::high_resolution_clock::time_point t5 = std::chrono::high_resolution_clock::now();
    // print_results(d_mid_result, TOPK);


    
    // std::chrono::high_resolution_clock::time_point t6 = std::chrono::high_resolution_clock::now();

    // std::cout<<"Hello\n";
    // std::cout<<std::chrono::duration_cast<std::chrono::milliseconds>(t2-t1).count()<<std::endl;
    // std::cout<<std::chrono::duration_cast<std::chrono::milliseconds>(t2-t1).count()<<std::endl;
    // std::cout<<std::chrono::duration_cast<std::chrono::milliseconds>(t3-t2).count()<<std::endl;
    // std::cout<<std::chrono::duration_cast<std::chrono::milliseconds>(t7-t6).count()<<std::endl;
    // std::cout<<std::chrono::duration_cast<std::chrono::microseconds>(t8-t1).count()<<std::endl;
    std::cout<<std::chrono::duration_cast<std::chrono::microseconds>(t5-t1).count()<<std::endl;
    // std::cout<<std::chrono::duration_cast<std::chrono::microseconds>(t3-t2).count()<<std::endl;
    // std::cout<<std::chrono::duration_cast<std::chrono::microseconds>(t4-t3).count()<<std::endl;
    // std::cout<<std::chrono::duration_cast<std::chrono::microseconds>(t5-t4).count()<<std::endl;


    // print result
    // check result
    std::cout << "Validating results: ";
    std::sort(h_data, h_data + num_items);
    // check_results(d_result, h_data, TOPK);
    free(h_data);
    GPU_CHECK(cudaFree(d_data));

    return 0;
}

/* 
编译：nvcc -o quicksort_cuda --gpu-architecture=sm_70 -rdc=truequicksort_cuda.cu
*/
