#ifndef TOH
#define TOH

#include "cuda_runtime_api.h"
#include <iostream>
// #include <algorithm>
#include <vector>
#include <cuda.h>
#include "config.cuh"
#include "myBitonic.cuh"
#include <sys/time.h>
#include <typeinfo>
#include <iostream>
#include <thread>
#include <chrono>
#include "omp.h"

void initNumber(const int &id, uint16_t *h_docs, const std::vector<std::vector<uint16_t>> &docs,
    const std::vector<uint16_t> &lens, const int group_sz, const int n_docs) {

    register int begin_offset = 0, temp_offset = 8;
#pragma unroll
    for (register int i = id; i < n_docs - MAIN_HELP_ME; i += THREAD_SIZE) { // vector到底能不能当作函数
        begin_offset = 0, temp_offset = 8;
        for (register int j = 0; j < lens[i]; ++j) {
            if(temp_offset < 8) {
                h_docs[begin_offset + temp_offset] = docs[i][j];
                ++temp_offset;
            } else {
                begin_offset = j / group_sz * n_docs * group_sz + i * group_sz + j % group_sz;
                h_docs[begin_offset] = docs[i][j];
                temp_offset = 1;
            }
        }    
    }
}

void doc_query_scoring_gpu_function(std::vector<std::vector<uint16_t>> &querys,
    std::vector<std::vector<uint16_t>> &docs,
    std::vector<uint16_t> &lens,
    std::vector<std::vector<int>> &indices //shape [querys.size(), TOPK]
    ) {
    // std::chrono::high_resolution_clock::time_point t1 = std::chrono::high_resolution_clock::now();
    // 基本变量的声明 1 仅声明这些变量可以开启initNumber
    const register int group_size = 2048, block = N_THREADS_IN_ONE_BLOCK, n_docs = docs.size(), querys_len = querys.size();
    register size_t pitch; std::thread th[THREAD_SIZE + 1];
    
    // 初始化h_docs，并传输结果至d_docs
    uint16_t *h_docs = new uint16_t[MAX_DOC_SIZE * n_docs]; 
    // std::chrono::high_resolution_clock::time_point t2 = std::chrono::high_resolution_clock::now();
    for (int i = 0; i < THREAD_SIZE; ++i) {
        th[i] = std::thread(initNumber, i, h_docs, std::ref(docs), std::ref(lens), sizeof(group_t) / sizeof(uint16_t), n_docs);
        th[i].detach();
    }
    // std::chrono::high_resolution_clock::time_point t3 = std::chrono::high_resolution_clock::now();
    

    // 声明重要的device变量,初始化GPU
    uint16_t *d_docs, *d_querys, *d_doc_lens;
    ull *index_and_score, *d_mid_result, *d_result;
    cudaMalloc((void **)&index_and_score, sizeof(ull) * n_docs * BATCH_SIZE * stream_lens);
    // cudaMalloc((void **)&d_mid_result, sizeof(ull) * n_docs * BATCH_SIZE * stream_lens);
    cudaMalloc((void **)&d_result, sizeof(ull) * TOPK * n_docs);

    // 异步流
    cudaStream_t stream[stream_lens];
    for (int i = 0; i < stream_lens; ++i)
        cudaStreamCreateWithFlags(&stream[i], cudaStreamNonBlocking);

    // 选择使用的GPU
    cudaDeviceProp device_props;
    cudaGetDeviceProperties(&device_props, 0);
    cudaSetDevice(0);

    // 传输d_doc_lens变量至显存
    cudaMalloc(&d_doc_lens, sizeof(uint16_t) * n_docs);
    cudaMemcpy(d_doc_lens, lens.data(), sizeof(uint16_t) * n_docs, cudaMemcpyHostToDevice);

    // 基本变量的声明 2 
    const register int real_batch = BATCH_SIZE * stream_lens;
    const register int cnt_2_real_bacth = (real_batch - querys_len % real_batch) % real_batch;
    const register int change_len = querys_len + cnt_2_real_bacth;

    // 初始化h_querys，并传输结果至d_querys
    // th[THREAD_SIZE - 1] = std::thread(init_d_querys, &h_querys, std::ref(querys), querys_len, change_len);
    uint16_t *h_querys = new uint16_t[MAX_QUERY_SIZE * GUESS_QUERY_LEN]();
#pragma unroll
    for(register int i = 0; i < querys_len; ++i) {
        for(register int j = 0; j < querys[i].size(); ++j) {
            (h_querys)[i * MAX_QUERY_SIZE + j] = querys[i][j];
        }
    }
    
    for(int i = querys_len; i < change_len; ++i) {
        std::vector<uint16_t> k; k.push_back(1); querys.push_back(k);
        (h_querys)[i * MAX_QUERY_SIZE + 0] = 1;
    }

    cudaMalloc(&d_querys, sizeof(uint16_t) * MAX_QUERY_SIZE * GUESS_QUERY_LEN);
    cudaMemcpy(d_querys, h_querys, sizeof(uint16_t) * (querys_len) * MAX_QUERY_SIZE, cudaMemcpyHostToDevice);
    
    
    // 基本变量的声明 3
    register int grid = (n_docs + block - 1) / block;
    register int temp_grid, temp_length, real_grid_for_one;
    ull* h_final_results = new ull[TOPK * querys_len];
    std::vector<int> h_results(TOPK);

    // MAIN线程帮忙初始化h_docs
    register int begin_offset = 0, temp_offset = 8, group_sz = 8;
#pragma unroll
    for (register int i = max(0, n_docs - MAIN_HELP_ME); i < n_docs; i++) { // vector到底能不能当作函数
        begin_offset = 0, temp_offset = 8;
        for (register int j = 0; j < lens[i]; ++j) {
            if(temp_offset < 8) {
                h_docs[begin_offset + temp_offset] = docs[i][j];
                ++temp_offset;
            } else {
                begin_offset = j / group_sz * n_docs * group_sz + i * group_sz + j % group_sz;
                h_docs[begin_offset] = docs[i][j];
                temp_offset = 1;
            }
        }    
    }

    // 初始化h_docs，传输h_docs结果至docs
    cudaMallocPitch((void **)&d_docs, &pitch, sizeof(uint16_t) * MAX_DOC_SIZE, n_docs);
    // std::chrono::high_resolution_clock::time_point t11 = std::chrono::high_resolution_clock::now();
    // for (register int i = 0; i < THREAD_SIZE; ++i)
    //    th[i].join();
    // std::chrono::high_resolution_clock::time_point t22 = std::chrono::high_resolution_clock::now();
    
    cudaMemcpy2D(d_docs, pitch, h_docs, sizeof(uint16_t) * MAX_DOC_SIZE * 2, sizeof(uint16_t) * MAX_DOC_SIZE * 2, n_docs / 2, cudaMemcpyHostToDevice); // 瓶颈，但是没办法优化了
    // cudaDeviceSynchronize();

    // std::chrono::high_resolution_clock::time_point t33 = std::chrono::high_resolution_clock::now();

    // std::cout<<std::chrono::duration_cast<std::chrono::microseconds>(t33-t1).count()<<std::endl;
    // std::cout<<std::chrono::duration_cast<std::chrono::microseconds>(t3-t2).count()<<std::endl;
    // std::cout<<std::chrono::duration_cast<std::chrono::microseconds>(t2-t1).count()<<std::endl;
    
    // std::cout<<std::chrono::duration_cast<std::chrono::microseconds>(t22-t11).count()<<std::endl;
    // std::cout<<std::chrono::duration_cast<std::chrono::microseconds>(t33-t22).count()<<std::endl;
   
  
    std::cout.flush();

    // std::chrono::high_resolution_clock::time_point t5 = std::chrono::high_resolution_clock::now();
#pragma unroll
    for(register int i = 0; i < change_len; i += real_batch) {
        for(int j = 0; j < stream_lens; ++j) {
            docQueryScoringCoalescedMemoryAccessSampleKernel<<<grid, block, 0, stream[j]>>>(
                d_docs, d_doc_lens, n_docs, 
                d_querys + ((i + 0 + j * BATCH_SIZE) * MAX_QUERY_SIZE), d_querys + ((i + 1 + j * BATCH_SIZE) * MAX_QUERY_SIZE), 
                d_querys + ((i + 2 + j * BATCH_SIZE) * MAX_QUERY_SIZE), d_querys + ((i + 3 + j * BATCH_SIZE) * MAX_QUERY_SIZE),
                d_querys + ((i + 4 + j * BATCH_SIZE) * MAX_QUERY_SIZE), d_querys + ((i + 5 + j * BATCH_SIZE) * MAX_QUERY_SIZE),
                d_querys + ((i + 6 + j * BATCH_SIZE) * MAX_QUERY_SIZE), 
                querys[i + 0 + j * BATCH_SIZE].size(), querys[i + 1 + j * BATCH_SIZE].size(), querys[i + 2 + j * BATCH_SIZE].size(), 
                querys[i + 3 + j * BATCH_SIZE].size(), querys[i + 4 + j * BATCH_SIZE].size(), querys[i + 5 + j * BATCH_SIZE].size(), 
                querys[i + 6 + j * BATCH_SIZE].size(),
                index_and_score + j * BATCH_SIZE * n_docs);
        }
        
        temp_grid = (n_docs + block - 1) / (block);
        temp_length = n_docs;
        real_grid_for_one = (temp_length + group_size - 1) / group_size;

        for(int j = 0; j < stream_lens; ++j) {
            gpu_topk<<<real_grid_for_one * BATCH_SIZE, block, 0, stream[j]>>>(
                index_and_score + j * BATCH_SIZE * n_docs, temp_length, 
                n_docs, n_docs, real_grid_for_one, 
                index_and_score + j * BATCH_SIZE * n_docs, group_size);
        }

        temp_length = ((temp_grid - (temp_length % group_size == 0 ? 0: 1)) / (group_size / block)) * TOPK + min(temp_length % group_size, TOPK);
        while(temp_length > 2048) {
            temp_grid = (temp_length + block - 1) / (block);
            real_grid_for_one = (temp_length + group_size - 1) / group_size;
            // cpu_swap(&d_mid_result, &index_and_score);

            for(int j = 0; j < stream_lens; ++j) {
                gpu_topk<<<real_grid_for_one * BATCH_SIZE, block, 0, stream[j]>>>(
                    index_and_score + j * BATCH_SIZE * n_docs, temp_length, 
                    n_docs, n_docs, real_grid_for_one, 
                    index_and_score + j * BATCH_SIZE * n_docs, group_size);
            }

            temp_length = ((temp_grid - (temp_length % group_size == 0 ? 0: 1)) / (group_size / block)) * TOPK + min(temp_length % group_size, TOPK);
        }

        for(int j = 0; j < stream_lens; ++j) {
            gpu_topk<<<BATCH_SIZE, block, 0, stream[j]>>>(
                index_and_score + j * BATCH_SIZE * n_docs, temp_length, 
                n_docs, TOPK, 1,
                d_result + (i + j * BATCH_SIZE) * TOPK, group_size);
        }
    }
    // cudaDeviceSynchronize();
    cudaMemcpyAsync(h_final_results, d_result, sizeof(ull) * TOPK * querys_len, cudaMemcpyDeviceToHost, stream[1]);    
    // std::chrono::high_resolution_clock::time_point t13 = std::chrono::high_resolution_clock::now();
    
    
    for(int i = 0; i < querys_len; ++i) {
        for(int j = 0; j < TOPK; ++j) {
            h_results[j] = UINT_MAX - ((uint)h_final_results[i * TOPK + j]);
        }
        indices.push_back(h_results);
    }
    // std::chrono::high_resolution_clock::time_point t14 = std::chrono::high_resolution_clock::now();

    // std::cout<<std::chrono::duration_cast<std::chrono::microseconds>(t13-t14).count()<<std::endl;
    // std::cout<<std::chrono::duration_cast<std::chrono::microseconds>(t13-t5).count()<<std::endl;
    // std::cout<<std::chrono::duration_cast<std::chrono::milliseconds>(t5-t4).count()<<std::endl;
    // std::cout<<std::chrono::duration_cast<std::chrono::milliseconds>(t2-t1).count()<<std::endl;
    // std::cout<<std::chrono::duration_cast<std::chrono::milliseconds>(t3-t2).count()<<std::endl;
    
    
    // deallocation
    // cudaFree(d_docs);
    // cudaFree(d_querys);
    // cudaFree(d_result);
    // cudaFree(d_mid_result);
    // cudaFree(index_and_score);
    // cudaFree(d_doc_lens);
    // cudaFree(d_score2rank);
    // free(h_querys);
    // free(h_docs);
    // free(h_final_results);
}

#endif