// #pragma once
#include <cstdio>
#include <iostream>
#include "config.cuh"
#include "cuda_runtime_api.h"
#include <sys/time.h>
#include <typeinfo>
#include <algorithm>
#include <chrono>
// #include <thrust/extrema.h>

#define GPU_CHECK(call)                               \
do                                                    \
{                                                     \
    const cudaError_t error_code = call;              \
    if (error_code != cudaSuccess)                    \
    {                                                 \
        printf("CUDA Error:\n");                      \
        printf("    File:       %s\n", __FILE__);     \
        printf("    Line:       %d\n", __LINE__);     \
        printf("    Error code: %d\n", error_code);   \
        printf("    Error text: %s\n",                \
            cudaGetErrorString(error_code));          \
        exit(1);                                      \
    }                                                 \
} while (0)

// template <typename T>
__forceinline__ __device__ void gpu_swap(ull* a, ull* b) {
    register ull temp = *b;
    *b = *a;
    *a = temp;
}

template <typename T>
void cpu_swap(T** a, T** b) {
    T* temp = *a;
    *a = *b;
    *b = temp;
}

inline unsigned int cpu_dtNextPow2_p(unsigned int v) {
    unsigned cishu = 0, vv = 1;
    while(v > vv) {
        vv <<= 1;
        ++cishu;
    }
	return cishu;
}

__forceinline__ __device__ unsigned int gpu_dtNextPow2_p(unsigned int v) {
    unsigned cishu = 0, vv = 1;
    while(v > vv) {
        vv <<= 1;
        ++cishu;
    }
	return cishu;
}

__forceinline__ __device__ unsigned int gpu_dtNextPow2(unsigned int v)
{
	v--;
	v |= v >> 1;
	v |= v >> 2;
	v |= v >> 4;
	v |= v >> 8;
	v |= v >> 16;
	++v;
	return v;
}

// 利用bitmap来完成计分
void __global__ docQueryScoringCoalescedMemoryAccessSampleKernel(
        const __restrict__ uint16_t *docs, 
        const __restrict__ uint16_t *doc_lens, const size_t n_docs, 
        const __restrict__ uint16_t *query_1, const  __restrict__ uint16_t *query_2, 
        const __restrict__ uint16_t *query_3, const  __restrict__ uint16_t *query_4,
        const __restrict__ uint16_t *query_5, const  __restrict__ uint16_t *query_6,
        const __restrict__ uint16_t *query_7, 
        const int query_1_len, const int query_2_len, const int query_3_len, 
        const int query_4_len, const int query_5_len, const int query_6_len,
        const int query_7_len,
        ull __restrict__ *index_and_score) {
    // each thread process one doc-query pair scoring task
    register uint tid = blockIdx.x * blockDim.x + threadIdx.x;
    // register uint tnum = gridDim.x * blockDim.x;
    
    // printf("aa");
    // register auto a_jump = min((size_t)(n_docs - blockIdx.x * blockDim.x), (size_t)blockDim.x);
    
    const int shm_size = 11200;
    __shared__ uint query_on_shm[shm_size];
    
#pragma unroll
    for (register auto i = threadIdx.x; i < shm_size; i += blockDim.x) {
        query_on_shm[i] = 0; 
        // atomicExch(&query_on_shm[i], 0);
    }

    register group_t loaded; register uint16_t *doc_segment;

    if(tid >= n_docs) return;

    __syncthreads();

    // 可以保证每个grid都有足够的threadIdx，但是当doc_size变化就不好说
    // register int temp_query;
    if(threadIdx.x * 8< query_1_len) {
        loaded = ((group_t *)query_1)[threadIdx.x];
        doc_segment = (uint16_t*)(&loaded); 
        atomicOr(&query_on_shm[doc_segment[0] / 32], (1u << (doc_segment[0] % 32)));
        atomicOr(&query_on_shm[doc_segment[1] / 32], (1u << (doc_segment[1] % 32)));
        atomicOr(&query_on_shm[doc_segment[2] / 32], (1u << (doc_segment[2] % 32)));
        atomicOr(&query_on_shm[doc_segment[3] / 32], (1u << (doc_segment[3] % 32)));
        atomicOr(&query_on_shm[doc_segment[4] / 32], (1u << (doc_segment[4] % 32)));
        atomicOr(&query_on_shm[doc_segment[5] / 32], (1u << (doc_segment[5] % 32)));
        atomicOr(&query_on_shm[doc_segment[6] / 32], (1u << (doc_segment[6] % 32)));
        atomicOr(&query_on_shm[doc_segment[7] / 32], (1u << (doc_segment[7] % 32)));
    }
    if(threadIdx.x * 8< query_2_len) {
        loaded = ((group_t *)query_2)[threadIdx.x];
        doc_segment = (uint16_t*)(&loaded); 
        atomicOr(&query_on_shm[1600 + doc_segment[0] / 32], (1u << (doc_segment[0] % 32)));
        atomicOr(&query_on_shm[1600 + doc_segment[1] / 32], (1u << (doc_segment[1] % 32)));
        atomicOr(&query_on_shm[1600 + doc_segment[2] / 32], (1u << (doc_segment[2] % 32)));
        atomicOr(&query_on_shm[1600 + doc_segment[3] / 32], (1u << (doc_segment[3] % 32)));
        atomicOr(&query_on_shm[1600 + doc_segment[4] / 32], (1u << (doc_segment[4] % 32)));
        atomicOr(&query_on_shm[1600 + doc_segment[5] / 32], (1u << (doc_segment[5] % 32)));
        atomicOr(&query_on_shm[1600 + doc_segment[6] / 32], (1u << (doc_segment[6] % 32)));
        atomicOr(&query_on_shm[1600 + doc_segment[7] / 32], (1u << (doc_segment[7] % 32)));
    }
    if(threadIdx.x * 8< query_3_len) {
        loaded = ((group_t *)query_3)[threadIdx.x];
        doc_segment = (uint16_t*)(&loaded); 
        atomicOr(&query_on_shm[3200 + doc_segment[0] / 32], (1u << (doc_segment[0] % 32)));
        atomicOr(&query_on_shm[3200 + doc_segment[1] / 32], (1u << (doc_segment[1] % 32)));
        atomicOr(&query_on_shm[3200 + doc_segment[2] / 32], (1u << (doc_segment[2] % 32)));
        atomicOr(&query_on_shm[3200 + doc_segment[3] / 32], (1u << (doc_segment[3] % 32)));
        atomicOr(&query_on_shm[3200 + doc_segment[4] / 32], (1u << (doc_segment[4] % 32)));
        atomicOr(&query_on_shm[3200 + doc_segment[5] / 32], (1u << (doc_segment[5] % 32)));
        atomicOr(&query_on_shm[3200 + doc_segment[6] / 32], (1u << (doc_segment[6] % 32)));
        atomicOr(&query_on_shm[3200 + doc_segment[7] / 32], (1u << (doc_segment[7] % 32)));
    }
    if(threadIdx.x * 8< query_4_len) {
        loaded = ((group_t *)query_4)[threadIdx.x];
        doc_segment = (uint16_t*)(&loaded); 
        atomicOr(&query_on_shm[4800 + doc_segment[0] / 32], (1u << (doc_segment[0] % 32)));
        atomicOr(&query_on_shm[4800 + doc_segment[1] / 32], (1u << (doc_segment[1] % 32)));
        atomicOr(&query_on_shm[4800 + doc_segment[2] / 32], (1u << (doc_segment[2] % 32)));
        atomicOr(&query_on_shm[4800 + doc_segment[3] / 32], (1u << (doc_segment[3] % 32)));
        atomicOr(&query_on_shm[4800 + doc_segment[4] / 32], (1u << (doc_segment[4] % 32)));
        atomicOr(&query_on_shm[4800 + doc_segment[5] / 32], (1u << (doc_segment[5] % 32)));
        atomicOr(&query_on_shm[4800 + doc_segment[6] / 32], (1u << (doc_segment[6] % 32)));
        atomicOr(&query_on_shm[4800 + doc_segment[7] / 32], (1u << (doc_segment[7] % 32)));
    }
    if(threadIdx.x * 8< query_5_len) {
        loaded = ((group_t *)query_5)[threadIdx.x];
        doc_segment = (uint16_t*)(&loaded); 
        atomicOr(&query_on_shm[6400 + doc_segment[0] / 32], (1u << (doc_segment[0] % 32)));
        atomicOr(&query_on_shm[6400 + doc_segment[1] / 32], (1u << (doc_segment[1] % 32)));
        atomicOr(&query_on_shm[6400 + doc_segment[2] / 32], (1u << (doc_segment[2] % 32)));
        atomicOr(&query_on_shm[6400 + doc_segment[3] / 32], (1u << (doc_segment[3] % 32)));
        atomicOr(&query_on_shm[6400 + doc_segment[4] / 32], (1u << (doc_segment[4] % 32)));
        atomicOr(&query_on_shm[6400 + doc_segment[5] / 32], (1u << (doc_segment[5] % 32)));
        atomicOr(&query_on_shm[6400 + doc_segment[6] / 32], (1u << (doc_segment[6] % 32)));
        atomicOr(&query_on_shm[6400 + doc_segment[7] / 32], (1u << (doc_segment[7] % 32)));
    }
    if(threadIdx.x * 8< query_6_len) {
        loaded = ((group_t *)query_6)[threadIdx.x];
        doc_segment = (uint16_t*)(&loaded); 
        atomicOr(&query_on_shm[8000 + doc_segment[0] / 32], (1u << (doc_segment[0] % 32)));
        atomicOr(&query_on_shm[8000 + doc_segment[1] / 32], (1u << (doc_segment[1] % 32)));
        atomicOr(&query_on_shm[8000 + doc_segment[2] / 32], (1u << (doc_segment[2] % 32)));
        atomicOr(&query_on_shm[8000 + doc_segment[3] / 32], (1u << (doc_segment[3] % 32)));
        atomicOr(&query_on_shm[8000 + doc_segment[4] / 32], (1u << (doc_segment[4] % 32)));
        atomicOr(&query_on_shm[8000 + doc_segment[5] / 32], (1u << (doc_segment[5] % 32)));
        atomicOr(&query_on_shm[8000 + doc_segment[6] / 32], (1u << (doc_segment[6] % 32)));
        atomicOr(&query_on_shm[8000 + doc_segment[7] / 32], (1u << (doc_segment[7] % 32)));
    }
    if(threadIdx.x * 8< query_7_len) {
        loaded = ((group_t *)query_7)[threadIdx.x];
        doc_segment = (uint16_t*)(&loaded); 
        atomicOr(&query_on_shm[9600 + doc_segment[0] / 32], (1u << (doc_segment[0] % 32)));
        atomicOr(&query_on_shm[9600 + doc_segment[1] / 32], (1u << (doc_segment[1] % 32)));
        atomicOr(&query_on_shm[9600 + doc_segment[2] / 32], (1u << (doc_segment[2] % 32)));
        atomicOr(&query_on_shm[9600 + doc_segment[3] / 32], (1u << (doc_segment[3] % 32)));
        atomicOr(&query_on_shm[9600 + doc_segment[4] / 32], (1u << (doc_segment[4] % 32)));
        atomicOr(&query_on_shm[9600 + doc_segment[5] / 32], (1u << (doc_segment[5] % 32)));
        atomicOr(&query_on_shm[9600 + doc_segment[6] / 32], (1u << (doc_segment[6] % 32)));
        atomicOr(&query_on_shm[9600 + doc_segment[7] / 32], (1u << (doc_segment[7] % 32)));
    }
    // printf("bb\n");
    __syncthreads();

    // register int i_bound = MAX_DOC_SIZE / (sizeof(group_t) / sizeof(uint16_t));
    // register int j_bound = sizeof(group_t) / sizeof(uint16_t);
    register float tmp_score_1, tmp_score_2, tmp_score_3, tmp_score_4, tmp_score_5, tmp_score_6, tmp_score_7;
    register ull _, _ii;
    register uint _00, _000;
    register uint16_t _doc_len;
    // register float  = 0.;
    // register float ;
    // printf("cc\n");
    for(register auto ii = tid; ii < n_docs; ii += n_docs) {
        tmp_score_1 = tmp_score_2 = tmp_score_3 = tmp_score_4 = tmp_score_5 = tmp_score_6 = tmp_score_7 = 0.0;
        _doc_len = doc_lens[ii];
#pragma unroll
        for (register auto i = 0; i < 16; ++i) {
            loaded = ((group_t *)docs)[i * n_docs + ii];
            doc_segment = (uint16_t*)(&loaded); 
            if(i * 8 + 0 == _doc_len) break;
            _00 = (1u << (doc_segment[0] % 32)); _000 = doc_segment[0] / 32;
            tmp_score_1 += (((query_on_shm[_000]) & _00) > 0); tmp_score_2 += (((query_on_shm[1600 + _000]) & _00) > 0); tmp_score_3 += (((query_on_shm[3200 + _000]) & _00) > 0); tmp_score_4 += (((query_on_shm[4800 + _000]) & _00) > 0); tmp_score_5 += (((query_on_shm[6400 + _000]) & _00) > 0); tmp_score_6 += (((query_on_shm[8000 + _000]) & _00) > 0); tmp_score_7 += (((query_on_shm[9600 + _000]) & _00) > 0); 
            
            if(i * 8 + 1 == _doc_len) break;
            _00 = (1u << (doc_segment[1] % 32)); _000 = doc_segment[1] / 32;
            tmp_score_1 += (((query_on_shm[_000]) & _00) > 0); tmp_score_2 += (((query_on_shm[1600 + _000]) & _00) > 0); tmp_score_3 += (((query_on_shm[3200 + _000]) & _00) > 0); tmp_score_4 += (((query_on_shm[4800 + _000]) & _00) > 0); tmp_score_5 += (((query_on_shm[6400 + _000]) & _00) > 0); tmp_score_6 += (((query_on_shm[8000 + _000]) & _00) > 0); tmp_score_7 += (((query_on_shm[9600 + _000]) & _00) > 0);
            
            if(i * 8 + 2 == _doc_len) break;
            _00 = (1u << (doc_segment[2] % 32)); _000 = doc_segment[2] / 32;
            tmp_score_1 += (((query_on_shm[_000]) & _00) > 0); tmp_score_2 += (((query_on_shm[1600 + _000]) & _00) > 0); tmp_score_3 += (((query_on_shm[3200 + _000]) & _00) > 0); tmp_score_4 += (((query_on_shm[4800 + _000]) & _00) > 0); tmp_score_5 += (((query_on_shm[6400 + _000]) & _00) > 0); tmp_score_6 += (((query_on_shm[8000 + _000]) & _00) > 0); tmp_score_7 += (((query_on_shm[9600 + _000]) & _00) > 0);
            
            if(i * 8 + 3 == _doc_len) break;
            _00 = (1u << (doc_segment[3] % 32)); _000 = doc_segment[3] / 32;
            tmp_score_1 += (((query_on_shm[_000]) & _00) > 0); tmp_score_2 += (((query_on_shm[1600 + _000]) & _00) > 0); tmp_score_3 += (((query_on_shm[3200 + _000]) & _00) > 0); tmp_score_4 += (((query_on_shm[4800 + _000]) & _00) > 0); tmp_score_5 += (((query_on_shm[6400 + _000]) & _00) > 0); tmp_score_6 += (((query_on_shm[8000 + _000]) & _00) > 0); tmp_score_7 += (((query_on_shm[9600 + _000]) & _00) > 0);
            
            if(i * 8 + 4 == _doc_len) break;
            _00 = (1u << (doc_segment[4] % 32)); _000 = doc_segment[4] / 32;
            tmp_score_1 += (((query_on_shm[_000]) & _00) > 0); tmp_score_2 += (((query_on_shm[1600 + _000]) & _00) > 0); tmp_score_3 += (((query_on_shm[3200 + _000]) & _00) > 0); tmp_score_4 += (((query_on_shm[4800 + _000]) & _00) > 0); tmp_score_5 += (((query_on_shm[6400 + _000]) & _00) > 0); tmp_score_6 += (((query_on_shm[8000 + _000]) & _00) > 0); tmp_score_7 += (((query_on_shm[9600 + _000]) & _00) > 0);
            
            if(i * 8 + 5 == _doc_len) break;
            _00 = (1u << (doc_segment[5] % 32)); _000 = doc_segment[5] / 32;
            tmp_score_1 += (((query_on_shm[_000]) & _00) > 0); tmp_score_2 += (((query_on_shm[1600 + _000]) & _00) > 0); tmp_score_3 += (((query_on_shm[3200 + _000]) & _00) > 0); tmp_score_4 += (((query_on_shm[4800 + _000]) & _00) > 0); tmp_score_5 += (((query_on_shm[6400 + _000]) & _00) > 0); tmp_score_6 += (((query_on_shm[8000 + _000]) & _00) > 0); tmp_score_7 += (((query_on_shm[9600 + _000]) & _00) > 0);
            
            if(i * 8 + 6 == _doc_len) break;
            _00 = (1u << (doc_segment[6] % 32)); _000 = doc_segment[6] / 32;
            tmp_score_1 += (((query_on_shm[_000]) & _00) > 0); tmp_score_2 += (((query_on_shm[1600 + _000]) & _00) > 0); tmp_score_3 += (((query_on_shm[3200 + _000]) & _00) > 0); tmp_score_4 += (((query_on_shm[4800 + _000]) & _00) > 0); tmp_score_5 += (((query_on_shm[6400 + _000]) & _00) > 0); tmp_score_6 += (((query_on_shm[8000 + _000]) & _00) > 0); tmp_score_7 += (((query_on_shm[9600 + _000]) & _00) > 0);
            
            if(i * 8 + 7 == _doc_len) break;
            _00 = (1u << (doc_segment[7] % 32)); _000 = doc_segment[7] / 32;
            tmp_score_1 += (((query_on_shm[_000]) & _00) > 0); tmp_score_2 += (((query_on_shm[1600 + _000]) & _00) > 0); tmp_score_3 += (((query_on_shm[3200 + _000]) & _00) > 0); tmp_score_4 += (((query_on_shm[4800 + _000]) & _00) > 0); tmp_score_5 += (((query_on_shm[6400 + _000]) & _00) > 0); tmp_score_6 += (((query_on_shm[8000 + _000]) & _00) > 0); tmp_score_7 += (((query_on_shm[9600 + _000]) & _00) > 0);
        }
        _ii = UINT_MAX - ii;
        _ = (tmp_score_1 / max(query_1_len, _doc_len) * 65536);
        index_and_score[ii] = (_ << 32)  + _ii;

        // 2号位赋值
        _ = (tmp_score_2 / max(query_2_len, _doc_len) * 65536);
        index_and_score[ii + n_docs] = (_ << 32)  + _ii;

        // 3号位赋值
        _ = (tmp_score_3 / max(query_3_len, _doc_len) * 65536);
        index_and_score[ii + 2 * n_docs] = (_ << 32)  + _ii;

        // 4号位赋值
        _ = (tmp_score_4 / max(query_4_len, _doc_len) * 65536);
        index_and_score[ii + 3 * n_docs] = (_ << 32)  + _ii;

        // 5号位赋值
        _ = (tmp_score_5 / max(query_5_len, _doc_len) * 65536);
        index_and_score[ii + 4 * n_docs] = (_ << 32)  + _ii;

        // 6号位赋值
        _ = (tmp_score_6 / max(query_6_len, _doc_len) * 65536);
        index_and_score[ii + 5 * n_docs] = (_ << 32)  + _ii;

        // 7号位赋值
        _ = (tmp_score_7 / max(query_7_len, _doc_len) * 65536);
        index_and_score[ii + 6 * n_docs] = (_ << 32)  + _ii;
    }
}


// 双调排序 n_p 可以等于0 但是那样的话不会排序，代表2^0=1
__forceinline__ __device__ void gpu_topk_impl(ull* data, const int n_p) {
    register int stride_p, half_stride_p, s_p, hs_p, hs, i, j, k, hn;
    // register bool orange;
    hn = 1 << (n_p - 1);
    half_stride_p = 0;

    for (stride_p = 1; stride_p <= n_p; ++stride_p) {
        s_p = stride_p;
        while (s_p >= 1) {
            hs_p = s_p - 1;
            hs = 1 << hs_p;
            for (i = threadIdx.x; i < hn; i += blockDim.x) {
                // orange = (i >> half_stride_p) % 2 == 0;
                j = ((i >> hs_p) << s_p) + (i % hs);
                k = j + hs;
                if (((((i >> half_stride_p) & 1) == 0) ^ (data[j] > data[k]))) {
                    gpu_swap(&data[k], &data[j]);
                }
            }
            __syncthreads();
            s_p = hs_p;
        }
        ++half_stride_p;
    }
}


// 假设K个grid可以覆盖到length
// 0 - K-1 第一个
__global__ void gpu_topk(
    const __restrict__ ull *data, const unsigned int length, 
    const int data_offset, const size_t result_offset, const int grid_offset,
    __restrict__ ull *d_result, const unsigned int group_size) {
    
    __shared__ ull temp_result[2048]; // 1024 * 16 * 3
    register auto blockIdxx = blockIdx.x % grid_offset; // 用于确定处理数据的哪一段
    register auto data_number = blockIdx.x / grid_offset; // 用于确定要处理第几号数据
    register auto temp_result_length = min(group_size, length - blockIdxx * group_size);
    
    register unsigned int n_p = gpu_dtNextPow2_p(temp_result_length);
    register unsigned int n = gpu_dtNextPow2(temp_result_length);
    
    register auto small_len = min(temp_result_length, TOPK);


#pragma unroll
    for (register auto i = threadIdx.x; i < temp_result_length; i += blockDim.x) {
        temp_result[i] = data[i + blockIdxx * group_size + data_number * data_offset];  
    }


    // 对于超过数组长度的地方赋值-1
#pragma unroll
    for (register auto i = temp_result_length + threadIdx.x; i < n; i += blockDim.x) {
        temp_result[i] = 0;
    }
        
    __syncthreads();
    gpu_topk_impl(temp_result, n_p);



    if(threadIdx.x < small_len) {
        d_result[threadIdx.x + blockIdxx * TOPK + data_number * result_offset] = temp_result[threadIdx.x];
    }
}