// #pragma once
#include <cstdio>
#include <iostream>
#include "config.cuh"
#include "cuda_runtime_api.h"
#include <sys/time.h>
#include <typeinfo>
#include <algorithm>
#include <chrono>
#include <thrust/sort.h>
#include <thrust/functional.h>
#include <thrust/execution_policy.h>
#include <cub/cub.cuh>
// #include <thrust/extrema.h>
#include <cub/block/block_load.cuh>
#include <cub/block/block_store.cuh>
#include <cub/block/block_radix_sort.cuh>

#define GPU_CHECK(call)                               \
do                                                    \
{                                                     \
    const cudaError_t error_code = call;              \
    if (error_code != cudaSuccess)                    \
    {                                                 \
        printf("CUDA Error:\n");                      \
        printf("    File:       %s\n", __FILE__);     \
        printf("    Line:       %d\n", __LINE__);     \
        printf("    Error code: %d\n", error_code);   \
        printf("    Error text: %s\n",                \
            cudaGetErrorString(error_code));          \
        exit(1);                                      \
    }                                                 \
} while (0)

// template <typename T>
__forceinline__ __device__ void gpu_swap(ull* a, ull* b) {
    register ull temp = *b;
    *b = *a;
    *a = temp;
}

template <typename T>
void cpu_swap(T** a, T** b) {
    T* temp = *a;
    *a = *b;
    *b = temp;
}

inline unsigned int cpu_dtNextPow2_p(unsigned int v) {
    unsigned cishu = 0, vv = 1;
    while(v > vv) {
        vv <<= 1;
        ++cishu;
    }
	return cishu;
}

__forceinline__ __device__ unsigned int gpu_dtNextPow2_p(unsigned int v) {
    unsigned cishu = 0, vv = 1;
    while(v > vv) {
        vv <<= 1;
        ++cishu;
    }
	return cishu;
}

__forceinline__ __device__ unsigned int gpu_dtNextPow2(unsigned int v)
{
	v--;
	v |= v >> 1;
	v |= v >> 2;
	v |= v >> 4;
	v |= v >> 8;
	v |= v >> 16;
	++v;
	return v;
}

void __global__ docQueryScoringCoalescedMemoryAccessSampleKernel(
        const __restrict__ uint16_t *docs, 
        const __restrict__ uint16_t *doc_lens, const size_t n_docs, 
        const __restrict__ uint16_t *query_1, const  __restrict__ uint16_t *query_2, 
        const __restrict__ uint16_t *query_3, const  __restrict__ uint16_t *query_4,
        const __restrict__ uint16_t *query_5, const  __restrict__ uint16_t *query_6,
        const __restrict__ uint16_t *query_7, 
        const int query_1_len, const int query_2_len, const int query_3_len, 
        const int query_4_len, const int query_5_len, const int query_6_len,
        const int query_7_len,
        ull __restrict__ *index_and_score) {
    // each thread process one doc-query pair scoring task
    register uint tid = blockIdx.x * blockDim.x + threadIdx.x;
    // register uint tnum = gridDim.x * blockDim.x;
    
    // printf("aa");
    // register auto a_jump = min((size_t)(n_docs - blockIdx.x * blockDim.x), (size_t)blockDim.x);
    
    const int shm_size = 11200;
    __shared__ uint query_on_shm[shm_size];
    
#pragma unroll
    for (register auto i = threadIdx.x; i < shm_size; i += blockDim.x) {
        query_on_shm[i] = 0; 
        // atomicExch(&query_on_shm[i], 0);
    }

    register group_t loaded; register uint16_t *doc_segment;

    if(tid >= n_docs) return;

    __syncthreads();

    // 可以保证每个grid都有足够的threadIdx，但是当doc_size变化就不好说
    // register int temp_query;
    if(threadIdx.x * 8< query_1_len) {
        loaded = ((group_t *)query_1)[threadIdx.x];
        doc_segment = (uint16_t*)(&loaded); 
        atomicOr(&query_on_shm[doc_segment[0] / 32], (1u << (doc_segment[0] % 32)));
        atomicOr(&query_on_shm[doc_segment[1] / 32], (1u << (doc_segment[1] % 32)));
        atomicOr(&query_on_shm[doc_segment[2] / 32], (1u << (doc_segment[2] % 32)));
        atomicOr(&query_on_shm[doc_segment[3] / 32], (1u << (doc_segment[3] % 32)));
        atomicOr(&query_on_shm[doc_segment[4] / 32], (1u << (doc_segment[4] % 32)));
        atomicOr(&query_on_shm[doc_segment[5] / 32], (1u << (doc_segment[5] % 32)));
        atomicOr(&query_on_shm[doc_segment[6] / 32], (1u << (doc_segment[6] % 32)));
        atomicOr(&query_on_shm[doc_segment[7] / 32], (1u << (doc_segment[7] % 32)));
    }
    if(threadIdx.x * 8< query_2_len) {
        loaded = ((group_t *)query_2)[threadIdx.x];
        doc_segment = (uint16_t*)(&loaded); 
        atomicOr(&query_on_shm[1600 + doc_segment[0] / 32], (1u << (doc_segment[0] % 32)));
        atomicOr(&query_on_shm[1600 + doc_segment[1] / 32], (1u << (doc_segment[1] % 32)));
        atomicOr(&query_on_shm[1600 + doc_segment[2] / 32], (1u << (doc_segment[2] % 32)));
        atomicOr(&query_on_shm[1600 + doc_segment[3] / 32], (1u << (doc_segment[3] % 32)));
        atomicOr(&query_on_shm[1600 + doc_segment[4] / 32], (1u << (doc_segment[4] % 32)));
        atomicOr(&query_on_shm[1600 + doc_segment[5] / 32], (1u << (doc_segment[5] % 32)));
        atomicOr(&query_on_shm[1600 + doc_segment[6] / 32], (1u << (doc_segment[6] % 32)));
        atomicOr(&query_on_shm[1600 + doc_segment[7] / 32], (1u << (doc_segment[7] % 32)));
    }
    if(threadIdx.x * 8< query_3_len) {
        loaded = ((group_t *)query_3)[threadIdx.x];
        doc_segment = (uint16_t*)(&loaded); 
        atomicOr(&query_on_shm[3200 + doc_segment[0] / 32], (1u << (doc_segment[0] % 32)));
        atomicOr(&query_on_shm[3200 + doc_segment[1] / 32], (1u << (doc_segment[1] % 32)));
        atomicOr(&query_on_shm[3200 + doc_segment[2] / 32], (1u << (doc_segment[2] % 32)));
        atomicOr(&query_on_shm[3200 + doc_segment[3] / 32], (1u << (doc_segment[3] % 32)));
        atomicOr(&query_on_shm[3200 + doc_segment[4] / 32], (1u << (doc_segment[4] % 32)));
        atomicOr(&query_on_shm[3200 + doc_segment[5] / 32], (1u << (doc_segment[5] % 32)));
        atomicOr(&query_on_shm[3200 + doc_segment[6] / 32], (1u << (doc_segment[6] % 32)));
        atomicOr(&query_on_shm[3200 + doc_segment[7] / 32], (1u << (doc_segment[7] % 32)));
    }
    if(threadIdx.x * 8< query_4_len) {
        loaded = ((group_t *)query_4)[threadIdx.x];
        doc_segment = (uint16_t*)(&loaded); 
        atomicOr(&query_on_shm[4800 + doc_segment[0] / 32], (1u << (doc_segment[0] % 32)));
        atomicOr(&query_on_shm[4800 + doc_segment[1] / 32], (1u << (doc_segment[1] % 32)));
        atomicOr(&query_on_shm[4800 + doc_segment[2] / 32], (1u << (doc_segment[2] % 32)));
        atomicOr(&query_on_shm[4800 + doc_segment[3] / 32], (1u << (doc_segment[3] % 32)));
        atomicOr(&query_on_shm[4800 + doc_segment[4] / 32], (1u << (doc_segment[4] % 32)));
        atomicOr(&query_on_shm[4800 + doc_segment[5] / 32], (1u << (doc_segment[5] % 32)));
        atomicOr(&query_on_shm[4800 + doc_segment[6] / 32], (1u << (doc_segment[6] % 32)));
        atomicOr(&query_on_shm[4800 + doc_segment[7] / 32], (1u << (doc_segment[7] % 32)));
    }
    if(threadIdx.x * 8< query_5_len) {
        loaded = ((group_t *)query_5)[threadIdx.x];
        doc_segment = (uint16_t*)(&loaded); 
        atomicOr(&query_on_shm[6400 + doc_segment[0] / 32], (1u << (doc_segment[0] % 32)));
        atomicOr(&query_on_shm[6400 + doc_segment[1] / 32], (1u << (doc_segment[1] % 32)));
        atomicOr(&query_on_shm[6400 + doc_segment[2] / 32], (1u << (doc_segment[2] % 32)));
        atomicOr(&query_on_shm[6400 + doc_segment[3] / 32], (1u << (doc_segment[3] % 32)));
        atomicOr(&query_on_shm[6400 + doc_segment[4] / 32], (1u << (doc_segment[4] % 32)));
        atomicOr(&query_on_shm[6400 + doc_segment[5] / 32], (1u << (doc_segment[5] % 32)));
        atomicOr(&query_on_shm[6400 + doc_segment[6] / 32], (1u << (doc_segment[6] % 32)));
        atomicOr(&query_on_shm[6400 + doc_segment[7] / 32], (1u << (doc_segment[7] % 32)));
    }
    if(threadIdx.x * 8< query_6_len) {
        loaded = ((group_t *)query_6)[threadIdx.x];
        doc_segment = (uint16_t*)(&loaded); 
        atomicOr(&query_on_shm[8000 + doc_segment[0] / 32], (1u << (doc_segment[0] % 32)));
        atomicOr(&query_on_shm[8000 + doc_segment[1] / 32], (1u << (doc_segment[1] % 32)));
        atomicOr(&query_on_shm[8000 + doc_segment[2] / 32], (1u << (doc_segment[2] % 32)));
        atomicOr(&query_on_shm[8000 + doc_segment[3] / 32], (1u << (doc_segment[3] % 32)));
        atomicOr(&query_on_shm[8000 + doc_segment[4] / 32], (1u << (doc_segment[4] % 32)));
        atomicOr(&query_on_shm[8000 + doc_segment[5] / 32], (1u << (doc_segment[5] % 32)));
        atomicOr(&query_on_shm[8000 + doc_segment[6] / 32], (1u << (doc_segment[6] % 32)));
        atomicOr(&query_on_shm[8000 + doc_segment[7] / 32], (1u << (doc_segment[7] % 32)));
    }
    if(threadIdx.x * 8< query_7_len) {
        loaded = ((group_t *)query_7)[threadIdx.x];
        doc_segment = (uint16_t*)(&loaded); 
        atomicOr(&query_on_shm[9600 + doc_segment[0] / 32], (1u << (doc_segment[0] % 32)));
        atomicOr(&query_on_shm[9600 + doc_segment[1] / 32], (1u << (doc_segment[1] % 32)));
        atomicOr(&query_on_shm[9600 + doc_segment[2] / 32], (1u << (doc_segment[2] % 32)));
        atomicOr(&query_on_shm[9600 + doc_segment[3] / 32], (1u << (doc_segment[3] % 32)));
        atomicOr(&query_on_shm[9600 + doc_segment[4] / 32], (1u << (doc_segment[4] % 32)));
        atomicOr(&query_on_shm[9600 + doc_segment[5] / 32], (1u << (doc_segment[5] % 32)));
        atomicOr(&query_on_shm[9600 + doc_segment[6] / 32], (1u << (doc_segment[6] % 32)));
        atomicOr(&query_on_shm[9600 + doc_segment[7] / 32], (1u << (doc_segment[7] % 32)));
    }
    // printf("bb\n");
    __syncthreads();

    // register int i_bound = MAX_DOC_SIZE / (sizeof(group_t) / sizeof(uint16_t));
    // register int j_bound = sizeof(group_t) / sizeof(uint16_t);
    register ull tmp_score_1, tmp_score_2, tmp_score_3, tmp_score_4, tmp_score_5, tmp_score_6, tmp_score_7;
    register ull _, _ii;
    register uint _00, _000;
    register uint16_t _doc_len;
    // register float  = 0.;
    // register float ;
    // printf("cc\n");
    for(register auto ii = tid; ii < n_docs; ii += n_docs) {
        tmp_score_1 = tmp_score_2 = tmp_score_3 = tmp_score_4 = tmp_score_5 = tmp_score_6 = tmp_score_7 = 0.0;
#pragma unroll
        for (register auto i = 0; i < 16; ++i) {
            loaded = ((group_t *)docs)[i * n_docs + ii];
            doc_segment = (uint16_t*)(&loaded); 
            if(doc_segment[0] == 0) break;
            _00 = (1u << (doc_segment[0] % 32)); _000 = doc_segment[0] / 32;
            tmp_score_1 += (((query_on_shm[_000]) & _00) > 0); tmp_score_2 += (((query_on_shm[1600 + _000]) & _00) > 0); tmp_score_3 += (((query_on_shm[3200 + _000]) & _00) > 0); tmp_score_4 += (((query_on_shm[4800 + _000]) & _00) > 0); tmp_score_5 += (((query_on_shm[6400 + _000]) & _00) > 0); tmp_score_6 += (((query_on_shm[8000 + _000]) & _00) > 0); tmp_score_7 += (((query_on_shm[9600 + _000]) & _00) > 0); 
            
            if(doc_segment[1] == 0) break;
            _00 = (1u << (doc_segment[1] % 32)); _000 = doc_segment[1] / 32;
            tmp_score_1 += (((query_on_shm[_000]) & _00) > 0); tmp_score_2 += (((query_on_shm[1600 + _000]) & _00) > 0); tmp_score_3 += (((query_on_shm[3200 + _000]) & _00) > 0); tmp_score_4 += (((query_on_shm[4800 + _000]) & _00) > 0); tmp_score_5 += (((query_on_shm[6400 + _000]) & _00) > 0); tmp_score_6 += (((query_on_shm[8000 + _000]) & _00) > 0); tmp_score_7 += (((query_on_shm[9600 + _000]) & _00) > 0);
            
            if(doc_segment[2] == 0) break;
            _00 = (1u << (doc_segment[2] % 32)); _000 = doc_segment[2] / 32;
            tmp_score_1 += (((query_on_shm[_000]) & _00) > 0); tmp_score_2 += (((query_on_shm[1600 + _000]) & _00) > 0); tmp_score_3 += (((query_on_shm[3200 + _000]) & _00) > 0); tmp_score_4 += (((query_on_shm[4800 + _000]) & _00) > 0); tmp_score_5 += (((query_on_shm[6400 + _000]) & _00) > 0); tmp_score_6 += (((query_on_shm[8000 + _000]) & _00) > 0); tmp_score_7 += (((query_on_shm[9600 + _000]) & _00) > 0);
            
            if(doc_segment[3] == 0) break;
            _00 = (1u << (doc_segment[3] % 32)); _000 = doc_segment[3] / 32;
            tmp_score_1 += (((query_on_shm[_000]) & _00) > 0); tmp_score_2 += (((query_on_shm[1600 + _000]) & _00) > 0); tmp_score_3 += (((query_on_shm[3200 + _000]) & _00) > 0); tmp_score_4 += (((query_on_shm[4800 + _000]) & _00) > 0); tmp_score_5 += (((query_on_shm[6400 + _000]) & _00) > 0); tmp_score_6 += (((query_on_shm[8000 + _000]) & _00) > 0); tmp_score_7 += (((query_on_shm[9600 + _000]) & _00) > 0);
            
            if(doc_segment[4] == 0) break;
            _00 = (1u << (doc_segment[4] % 32)); _000 = doc_segment[4] / 32;
            tmp_score_1 += (((query_on_shm[_000]) & _00) > 0); tmp_score_2 += (((query_on_shm[1600 + _000]) & _00) > 0); tmp_score_3 += (((query_on_shm[3200 + _000]) & _00) > 0); tmp_score_4 += (((query_on_shm[4800 + _000]) & _00) > 0); tmp_score_5 += (((query_on_shm[6400 + _000]) & _00) > 0); tmp_score_6 += (((query_on_shm[8000 + _000]) & _00) > 0); tmp_score_7 += (((query_on_shm[9600 + _000]) & _00) > 0);
            
            if(doc_segment[5] == 0) break;
            _00 = (1u << (doc_segment[5] % 32)); _000 = doc_segment[5] / 32;
            tmp_score_1 += (((query_on_shm[_000]) & _00) > 0); tmp_score_2 += (((query_on_shm[1600 + _000]) & _00) > 0); tmp_score_3 += (((query_on_shm[3200 + _000]) & _00) > 0); tmp_score_4 += (((query_on_shm[4800 + _000]) & _00) > 0); tmp_score_5 += (((query_on_shm[6400 + _000]) & _00) > 0); tmp_score_6 += (((query_on_shm[8000 + _000]) & _00) > 0); tmp_score_7 += (((query_on_shm[9600 + _000]) & _00) > 0);
            
            if(doc_segment[6] == 0) break;
            _00 = (1u << (doc_segment[6] % 32)); _000 = doc_segment[6] / 32;
            tmp_score_1 += (((query_on_shm[_000]) & _00) > 0); tmp_score_2 += (((query_on_shm[1600 + _000]) & _00) > 0); tmp_score_3 += (((query_on_shm[3200 + _000]) & _00) > 0); tmp_score_4 += (((query_on_shm[4800 + _000]) & _00) > 0); tmp_score_5 += (((query_on_shm[6400 + _000]) & _00) > 0); tmp_score_6 += (((query_on_shm[8000 + _000]) & _00) > 0); tmp_score_7 += (((query_on_shm[9600 + _000]) & _00) > 0);
            
            if(doc_segment[7] == 0) break;
            _00 = (1u << (doc_segment[7] % 32)); _000 = doc_segment[7] / 32;
            tmp_score_1 += (((query_on_shm[_000]) & _00) > 0); tmp_score_2 += (((query_on_shm[1600 + _000]) & _00) > 0); tmp_score_3 += (((query_on_shm[3200 + _000]) & _00) > 0); tmp_score_4 += (((query_on_shm[4800 + _000]) & _00) > 0); tmp_score_5 += (((query_on_shm[6400 + _000]) & _00) > 0); tmp_score_6 += (((query_on_shm[8000 + _000]) & _00) > 0); tmp_score_7 += (((query_on_shm[9600 + _000]) & _00) > 0);
        }
        _ii = UINT_MAX - ii; _doc_len = doc_lens[ii];
        _ = (tmp_score_1 * 65536 / (query_1_len > _doc_len ? query_1_len : _doc_len));
        index_and_score[ii] = (_ << 32)  + _ii;

        // 2号位赋值
        _ = (tmp_score_2 * 65536 / (query_2_len > _doc_len ? query_2_len : _doc_len) );
        index_and_score[ii + n_docs] = (_ << 32)  + (UINT_MAX - ii);

        // 3号位赋值
        _ = (tmp_score_3 * 65536 / (query_3_len > _doc_len ? query_3_len : _doc_len) );
        index_and_score[ii + 2 * n_docs] = (_ << 32)  + _ii;

        // 4号位赋值
        _ = (tmp_score_4 * 65536 / (query_4_len > _doc_len ? query_4_len : _doc_len) );
        index_and_score[ii + 3 * n_docs] = (_ << 32)  + _ii;

        // 5号位赋值
        _ = (tmp_score_5 * 65536 / (query_5_len > _doc_len ? query_5_len : _doc_len) );
        index_and_score[ii + 4 * n_docs] = (_ << 32)  + _ii;

        // 6号位赋值
        _ = (tmp_score_6 * 65536 / (query_6_len > _doc_len ? query_6_len : _doc_len) );
        index_and_score[ii + 5 * n_docs] = (_ << 32)  + _ii;

        // 7号位赋值
        _ = (tmp_score_7 * 65536 / (query_7_len > _doc_len ? query_7_len : _doc_len) );
        index_and_score[ii + 6 * n_docs] = (_ << 32)  + _ii;
    }
    // printf("dd\n");
    // 1号位赋值
    // if(UINT_MAX - (uint)index_and_score[tid] != tid) printf("Wrong");
    // printf("%llu\n", index_and_score[tid]);
    // printf("%u\n", d_score2rank[_]);
    // printf("%llu\n", (ull)d_score2rank[_] << 32);
    // printf("A%u\n", UINT_MAX - tid);
    // if(_ >= 129 && threadIdx.x < 1024) {
        // printf("B%d\n", _);
        // printf("C%u %llu %u %u %llu\n", d_score2rank[_], ((ull)d_score2rank[_]) << 32, UINT_MAX, UINT_MAX - tid, index_and_score[tid]);
        // printf("D%llu\n", );
        // printf("E%u\n", );
        // printf("F%u\n", );
    // }
    // index_and_score[tid] = tid;
    // index_and_score[tid] = ((int)1 * tmp_score / max(query_len, doc_lens[tid]) * 1000000000) << 31 + (UINT_MAX - tid); // tid
}


// 双调排序 n_p 可以等于0 但是那样的话不会排序，代表2^0=1
__forceinline__ __device__ void gpu_topk_impl(ull* data, const int n_p) {
    register int stride_p, half_stride_p, s_p, hs_p, hs, i, j, k, hn;
    // register bool orange;
    hn = 1 << (n_p - 1);
    half_stride_p = 0;
    
    for (stride_p = 1; stride_p <= n_p; ++stride_p) {
        s_p = stride_p;
        while (s_p >= 1) {
            hs_p = s_p - 1;
            hs = 1 << hs_p;
            for (i = threadIdx.x; i < hn; i += blockDim.x) {
                // orange = (i >> half_stride_p) % 2 == 0;
                j = ((i >> hs_p) << s_p) + (i % hs);
                k = j + hs;
                if (((((i >> half_stride_p) & 1) == 0) ^ (data[j] > data[k]))) {
                    gpu_swap(&data[k], &data[j]);
                }
            }
            __syncthreads();
            s_p = hs_p;
        }
        ++half_stride_p;
    }
}


// 假设K个grid可以覆盖到length
// 0 - K-1 第一个
__global__ void gpu_topk(
    const __restrict__ ull *data, const unsigned int length, 
    const int data_offset, const size_t result_offset, const int grid_offset,
    __restrict__ ull *d_result, const unsigned int group_size) {
    
    // register auto tid = blockIdx.x * blockDim.x + threadIdx.x;
    // register auto tnum = gridDim.x * blockDim.x;
    // printf("adasd\n");

    // if(blockIdx.x % (group_size / blockDim.x) > 3) {
    //     return;
    // }

    __shared__ ull temp_result[2048]; // 1024 * 16 * 3
    // __shared__ ull temp_result_out[2048]; // 1024 * 16 * 3

    register auto blockIdxx = blockIdx.x % grid_offset; // 用于确定处理数据的哪一段
    // register auto block_number = blockIdx.x % (group_size / blockDim.x);
    register auto data_number = blockIdx.x / grid_offset; // 用于确定要处理第几号数据
    register auto temp_result_length = min(group_size, length - blockIdxx * group_size);
    
    register unsigned int n_p = gpu_dtNextPow2_p(temp_result_length);
    register unsigned int n = gpu_dtNextPow2(temp_result_length);
    
    register auto small_len = min(temp_result_length, TOPK);

    // register group_t loaded; register ull *doc_segment;
    // 赋值, i * 2 + 1 < temp_result_length 有机会错，如果这里是个奇数的话
#pragma unroll
    for (register auto i = threadIdx.x; i < temp_result_length; i += blockDim.x) {
        // loaded = ((group_t *)data)[(blockIdxx * blockDim.x + block_number * data_offset) / 2 + i];
        // doc_segment = (ull*)(&loaded); 
        // temp_result[i * 2] = doc_segment[0]; 
        // temp_result[i * 2 + 1] = doc_segment[1];
        // temp_result[i] = data[i + blockIdxx * blockDim.x + block_number * data_offset];  
        temp_result[i] = data[i + blockIdxx * group_size + data_number * data_offset];  
        // printf("%d %f\n", data[i + blockIdx.x * TOPK].idx, data[i + blockIdx.x * TOPK].score);      
        // if(i + blockIdxx * blockDim.x == 7853051) {
        //     printf("%d %d %d %d %d %d\n", gridDim.x, blockDim.x, blockIdx.x, threadIdx.x, n, n_p);
        // }   
    }


    // 对于超过数组长度的地方赋值-1
#pragma unroll
    for (register auto i = temp_result_length + threadIdx.x; i < 2048; i += blockDim.x) {
        temp_result[i] = 0;
    }
    // if(temp_result_length <= double_minus_n + threadIdx.x && double_minus_n + threadIdx.x < n) {
        
    __syncthreads();
    
    enum { TILE_SIZE = 512 * 4 };
    // Specialize BlockLoad type for our thread block (uses warp-striped loads for coalescing, then transposes in shared memory to a blocked arrangement)
    typedef cub::BlockLoad<ull, 512, 4, cub::BLOCK_LOAD_WARP_TRANSPOSE> BlockLoadT;
    // Specialize BlockRadixSort type for our thread block
    typedef cub::BlockRadixSort<ull, 512, 4> BlockRadixSortT;
    // Shared memory
    __shared__ union TempStorage
    {
        typename BlockLoadT::TempStorage        load;
        typename BlockRadixSortT::TempStorage   sort;
    } temp_storage;
    // Per-thread tile items
    ull items[4];
    // Our current block's offset
    int block_offset = blockIdxx * TILE_SIZE;
    // Load items into a blocked arrangement
    BlockLoadT(temp_storage.load).Load(temp_result + block_offset, items);
    // Barrier for smem reuse
    // __syncthreads();
    // Start cycle timer
    // Sort keys
    // __shared__ typename BlockRadixSort::TempStorage temp_storage;
    BlockRadixSortT(temp_storage.sort).SortDescending(items);
    printf("%llu %llu %llu %llu", items[0], items[1], items[2], items[3]);
    // printf("%llu %llu %llu %llu", temp_storage[0], temp_storage[1], temp_storage[2], temp_storage[3]);
    // Stop cycle timer
    // clock_t stop = clock();
    // Store output in striped fashion
    // cub::StoreDirectStriped<512>(threadIdx.x, d_result + block_offset, items);
    // gpu_topk_impl(temp_result, n_p);
    // __syncthreads();


    // if(threadIdx.x < small_len) {
    //     // printf("%d\n", temp_result[threadIdx.x]);
    //     d_result[threadIdx.x + blockIdxx * TOPK + data_number * result_offset] = temp_result[threadIdx.x];
    // }
    // __syncthreads();

    // // printf("adasd\n");
    
    // printf("%d,%d,%d\n",temp_result_length, n_p, n);

    // 赋值到变量
    // if(threadIdx.x < small_len) {
    //     d_result[threadIdx.x + (tid / group_size) * TOPK] = temp_result[threadIdx.x];
        
    //     d_result[threadIdx.x + (tid / group_size) * TOPK + 2 * result_offset] = temp_result[threadIdx.x + 2 * n];
    // }
    // register auto offset = ;
    // for(register int i = threadIdx.x; i < small_len; i += tnum) {
    //     // if(d_result[i + blockIdx.x * TOPK].idx != (int)d_result[i + blockIdx.x * TOPK].score || )
    // }

    // if(gridDim.x == 1 && threadIdx.x == 0) {
    //     for(register int i = threadIdx.x; i < small_len; i++) 
    //         printf("%d %f\n", d_result[i].idx, d_result[i].score);
    //     printf("*"); 
    // }
    // if(threadIdx.x == 0 && temp_result_length % 2 == 1) {
    //     printf("%d %d\n", gridDim.x, temp_result_length);
    // }
}


// Initialize data on the host.
void initialize_data(ull *dst, unsigned int nitems) {
    // Fixed seed for illustration
    srand(2047);

    // Fill dst with random values
    for (unsigned i = 0; i < nitems; i++) {
        dst[i] = rand() % nitems;
    }
}

// Verify the results.
void print_results(ull *results_d, int n) {
    ull *results_h = new ull[n];
    GPU_CHECK(cudaMemcpy(results_h, results_d, n * sizeof(ull), cudaMemcpyDeviceToHost));
    std::cout << "Sort data : \n";
    for (int i = 0; i < n; ++i){
        std::cout << results_h[i] << "\n";
    }
    std::cout << std::endl;
    delete[] results_h;
}

void check_results(ull *results_d, ull *data_h, int n)
{
    ull *results_h = new ull[n];
    GPU_CHECK(cudaMemcpy(results_h, results_d, n*sizeof(ull), cudaMemcpyDeviceToHost));

    for (int i = 0; i < n ; ++i) {
        if(results_h[i] != data_h[i]) {
            std::cout << "Invalid item[" << i << "]: " << results_h[i] << " greater than " << data_h[i] << std::endl;
            // std::cout << "Invalid item[" << i << "]: " << results_h[i] << " greater than " << data_h[i].score << std::endl;
            // exit(EXIT_FAILURE);
        }
    }
        // if (cpu_compare(results_h[i], results_h[i-1]))
        // {
        //     std::cout << "Invalid item[" << i-1 << "]: " << results_h[i-1].idx << " greater than " << results_h[i].idx << std::endl;
        //     std::cout << "Invalid item[" << i-1 << "]: " << results_h[i-1].score << " greater than " << results_h[i].score << std::endl;
        //     exit(EXIT_FAILURE);
        // }

    std::cout << "OK" << std::endl;
    delete[] results_h;
}

bool cmp(const int &a, const int &b) {
    return a > b;
}

// Main entry point.
// int _main(int argc, char **argv) {
//     // Find/set device and get device properties
//     int device = 0;
//     cudaDeviceProp deviceProp;
//     GPU_CHECK(cudaGetDeviceProperties(&deviceProp, device));
    
//     if (!(deviceProp.major > 3 ||
//           (deviceProp.major == 3 && deviceProp.minor >= 5))) {
//         printf("GPU %d - %s  does not support CUDA Dynamic Parallelism\n Exiting.",
//             device, deviceProp.name);
//         return 0;
//     }
    

//     int num_items = atoi(argv[1]); // 7853052
//     bool verbose = num_items <= 16;


//     // Create input data
//     ull *h_data = 0;
//     ull *d_data = 0;
//     ull *d_result = 0;
//     ull *d_mid_result = 0;

//     // Allocate CPU memory and initialize data.
//     std::cout << "Initializing data:" << std::endl;
//     h_data = (ull *)malloc(num_items * sizeof(ull));
//     initialize_data(h_data, num_items);
//     if (verbose) {
//        std::cout << "Raw  data : \n";
//         for (int i = 0; i < num_items; i++)
//             std::cout << h_data[i] << "\n"; 
//     }

//     // Allocate GPU memory.
//     GPU_CHECK(cudaMalloc((void **)&d_data, num_items * sizeof(ull)));
//     GPU_CHECK(cudaMalloc((void **)&d_result, num_items * sizeof(ull)));
//     GPU_CHECK(cudaMalloc((void **)&d_mid_result, num_items * sizeof(ull)));
    
//     GPU_CHECK(cudaMemcpy(d_data, h_data, num_items * sizeof(ull), cudaMemcpyHostToDevice));
    
//     // Execute
//     std::cout << "Running quicksort on " << num_items << " elements" << std::endl;
    

//     std::chrono::high_resolution_clock::time_point t1 = std::chrono::high_resolution_clock::now();

//     //grid要不大于10
//     int block = N_THREADS_IN_ONE_BLOCK;
//     int group_size = 2048;
//     int temp_grid = (num_items + block - 1) / (block);
//     unsigned int temp_length = num_items;
//     // printf("%d %d\n", temp_grid, block);
//     // std::chrono::high_resolution_clock::time_point t2 = std::chrono::high_resolution_clock::now();
//     gpu_topk<<<temp_grid, block>>>(d_data, d_mid_result, temp_length, group_size);
//     cudaDeviceSynchronize();
    
    
//     std::chrono::high_resolution_clock::time_point t3 = std::chrono::high_resolution_clock::now();
//     temp_length = ((temp_grid - (temp_length % group_size == 0 ? 0: 1)) / (group_size / block)) * TOPK + min(temp_length % group_size, TOPK);
//     printf("tmp_length %d\n", temp_length);
//     // print_results(d_mid_result, temp_length);
//     while(temp_length > 1024) {
//         temp_grid = (temp_length + block - 1) / (block);
//         cpu_swap(&d_mid_result, &d_result);
//         // print_results(d_result, temp_length);
//         // std::chrono::high_resolution_clock::time_point t4 = std::chrono::high_resolution_clock::now();
//         gpu_topk<<<temp_grid, block>>>(d_result, d_mid_result, temp_length, group_size);
//         cudaDeviceSynchronize();
//         // std::chrono::high_resolution_clock::time_point t5 = std::chrono::high_resolution_clock::now();

//         temp_length = ((temp_grid - (temp_length % group_size == 0 ? 0: 1)) / (group_size / block)) * TOPK + min(temp_length % group_size, TOPK);
//         // printf("tmp_length %d\n", temp_length);
//         // print_results(d_mid_result, temp_length);
//         // std::cout<<std::chrono::duration_cast<std::chrono::milliseconds>(t5-t4).count()<<std::endl;
//     }

//     // // printf("while finish\n");
//     // // gBitonicSort<<<1, temp_length>>>(d_data, 3);
//     // // std::chrono::high_resolution_clock::time_point t6 = std::chrono::high_resolution_clock::now();
//     gpu_topk<<<1, 1024>>>(d_mid_result, d_result, temp_length, group_size);
//     cudaDeviceSynchronize();
//     // std::chrono::high_resolution_clock::time_point t7 = std::chrono::high_resolution_clock::now();
//     // print_results(d_result, temp_length < TOPK? temp_length : TOPK);

    
//     std::chrono::high_resolution_clock::time_point t8 = std::chrono::high_resolution_clock::now();

//     // std::cout<<"Hello\n";
//     // std::cout<<std::chrono::duration_cast<std::chrono::milliseconds>(t2-t1).count()<<std::endl;
//     // std::cout<<std::chrono::duration_cast<std::chrono::milliseconds>(t2-t1).count()<<std::endl;
//     // std::cout<<std::chrono::duration_cast<std::chrono::milliseconds>(t3-t2).count()<<std::endl;
//     // std::cout<<std::chrono::duration_cast<std::chrono::milliseconds>(t7-t6).count()<<std::endl;
//     std::cout<<std::chrono::duration_cast<std::chrono::microseconds>(t8-t1).count()<<std::endl;

//     // print result
//     // print_results(d_result, TOPK);
//     // check result
//     // std::cout << "Validating results: ";
//     std::sort(h_data, h_data + num_items, cmp);
//     check_results(d_result, h_data, temp_length < TOPK? temp_length : TOPK);
//     free(h_data);
//     GPU_CHECK(cudaFree(d_data));

//     return 0;
// }

// /* 
// 编译：nvcc -o quicksort_cuda --gpu-architecture=sm_70 -rdc=truequicksort_cuda.cu
// */