#include <iostream>
#include <cuda_runtime.h>
#include <random>
#include <ctime>
#include <sys/time.h>

// thrust sort
#include <thrust/sort.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>

const int BLOCK_SIZE = 1024;
const int DATA_ELN = 32 * 1024 * 1024; // 32M

#define PERF(name) Perf perf_##name##__COUNTER__(#name)
#define PERF_CPU(name) PerfCPU perf_CPU_##name##__COUNTER__(#name)

class PerfCPU
{
public:
    PerfCPU(const std::string& name) {
        m_name = name;
        gettimeofday(&m_start, NULL);
    }

    ~PerfCPU() {
        gettimeofday(&m_end, NULL);
        float elapsed_time = (m_end.tv_sec - m_start.tv_sec) * 1000.0 + (m_end.tv_usec - m_start.tv_usec) / 1000.0;
        std::cout << m_name << " elapse: " << elapsed_time << " ms" << std::endl;
    }

private:
    std::string m_name;
    struct timeval m_start, m_end;
}; // class PerfCPU

class Perf
{
public:
    Perf(const std::string& name) {
        m_name = name;
        cudaEventCreate(&m_start);
        cudaEventCreate(&m_end);
        cudaEventRecord(m_start);
        cudaEventSynchronize(m_start);
    }

    ~Perf() {
        cudaEventRecord(m_end);
        cudaEventSynchronize(m_end);
        float elapsed_time = 0.0;
        cudaEventElapsedTime(&elapsed_time, m_start, m_end);
        std::cout << m_name << " elapse: " << elapsed_time << " ms" << std::endl;
    }

private:
    std::string m_name;
    cudaEvent_t m_start, m_end;
}; // class Perf


__device__ void swap(float* a, float* b)
{
    float tmp = *a;
    *a = *b;
    *b = tmp;
}


// 1535 / 256 = 6， 4080每个sm最多启动1536个线程，每个线程块256，则最多同时6个block
// 128k / 6 = 20K, 每个block最多算20k个bytes
// 20 * 1024 / 256 / 4 = 20， 所以每个thread最多有20个float数据放入到shm中
// 20 / 2 = 10，考虑到每个thread要缓存，这里/2，每个线程只算10个float数据的排序
// 这里取8对齐
#define SORT_NUM_PER_THREAD (8)

__global__ void GPUBlockMergeSort(float* input, float* output, int len)
{
    __shared__ float cache[BLOCK_SIZE];
    __shared__ float compare_cache[BLOCK_SIZE];
    int tid = threadIdx.x;
    int offset = blockIdx.x * blockDim.x + threadIdx.x;
    
    // 分线程的时候，divide已经做好了
    cache[threadIdx.x] = input[offset];
    __syncthreads();

    // conquer and merge
    for (int s = 1; s < blockDim.x; s *= 2) {
        // 每次都是一半的线程处理任务
        // 避免严重的warp divergence
        if (tid * s * 2 < blockDim.x) {
            int start = tid * s * 2;
            int left = start;
            int end = start + s * 2 - 1;
            int mid = (start + end) / 2;

            int low = start;
            int high = mid + 1;
            while(low <= mid && high <= end) {
                if (cache[low] > cache[high]) {
                    compare_cache[start] = cache[high];
                    high++;
                } else {
                    compare_cache[start] = cache[low];
                    low++;
                }
                start++;
            }

            while (low <= mid) {
                compare_cache[start] = cache[low];
                low++;
                start++;
            }

            while (high <= end) {
                compare_cache[start] = cache[high];
                high++;
                start++;
            }

            low = left;
            while (low <= end) {
                cache[low] = compare_cache[low];
                low++;
            }
            __syncthreads();
        }
    }
    output[tid] = cache[tid];
}

void CPUMerge(float* data, int left, int mid, int end)
{
    int len = end - left + 1;
    float* tmp = new float[len];
    int i = left;
    int j = mid + 1;
    int k = 0;
    while (i <= mid && j <= end) {
        if (data[i] <= data[j]) {
            tmp[k] = data[i];
            i++;
        } else {
            tmp[k] = data[j];
            j++;
        }
        k++;
    }

    while (i <= mid) {
        tmp[k] = data[i];
        i++;
        k++;
    }

    while (j <= end) {
        tmp[k] = data[j];
        j++;
        k++;
    }

    int m = 0;
    while (m < len) {
        data[left + m] = tmp[m];
        m++;
    }

    delete [] tmp;
    tmp = NULL;
}

void CPUMergeSort(float* data, int left, int end)
{
    if (left >= end) {
        return;
    }

    int mid = (left + end) / 2;
    CPUMergeSort(data, left, mid);
    CPUMergeSort(data, mid + 1, end);
    CPUMerge(data, left, mid, end);
}

void QuickSortPart(float* data, int left, int right)
{
    if (left >= right) {
        return;
    }
    int high = right;
    int low = left;

    float value = data[left];
    while (left < right) {
        while (left < right && (data[right] >= value)) {
            right--;
        }
        data[left] = data[right];
        while (left < right && (data[left] <= value)) {
            left++;
        }
        if (left < right) {
            data[right] = data[left];
        }
    }
    data[left] = value;
    QuickSortPart(data, low, left - 1);
    QuickSortPart(data, left + 1, high);
}

void CPUInplaceQuickSort(float* data, int len)
{
    for (int i = 0; i < DATA_ELN; ++i) {
        // std::cout << "data: " << data[i] << std::endl;
    }
    QuickSortPart(data, 0, len - 1);
}


void CPUInplaceBubbleSort(float* data, int len)
{
    float tmp = 0.0;
    // 从小到大排序
    for (int i = 0; i < len - 1; ++i) {
        for (int j = 0; j < len - 1 - i; ++j) {
            // 冒泡排序，相邻的两个值比较
            // 如果前面的值大于后面的值，则交换它们的位置
            if (data[j] > data[j + 1]) {
                tmp = data[j];
                data[j] = data[j + 1];
                data[j + 1] = tmp;
            }
        }

    }
}

int main(int argc, char* argv[])
{
    int block_dim = BLOCK_SIZE;
    int grid_dim = (DATA_ELN - 1) / BLOCK_SIZE + 1;
    
    // cpu mem
    float* input = new float[DATA_ELN];

    std::default_random_engine generator;
    std::uniform_real_distribution<float> distribution(1.0, 10000.0);
    for (int i = 0; i < DATA_ELN; ++i) {
        input[i] = distribution(generator);
    }

    // gpu mem
    float* d_input;
    float* gpu_sort_output = new float[DATA_ELN];
    cudaMalloc((void**)&d_input, DATA_ELN * sizeof(float));

    cudaMemcpy(d_input, input, DATA_ELN * sizeof(float), cudaMemcpyHostToDevice);

    {
        PERF(gpu_hand_merge_sort);
        GPUBlockMergeSort<<<1, BLOCK_SIZE>>>(d_input, gpu_sort_output, DATA_ELN);
        cudaDeviceSynchronize();
        cudaMemcpy(gpu_sort_output, d_input, grid_dim * block_dim * sizeof(float), cudaMemcpyDeviceToHost);
    }
    std::cout << "gpu hand merge sort result: ";
    for (int i = 0; i < DATA_ELN % 33; ++i) {
        std::cout << gpu_sort_output[i] << ", ";
    }

        std::cout << std::endl;
        cudaFree(d_input);


    thrust::host_vector<float> thrust_host_input(DATA_ELN);
    for (int i = 0; i < DATA_ELN; ++i) {
        thrust_host_input[i] = input[i];
    }

    thrust::device_vector<float> thrust_device_input = thrust_host_input;
    {
        PERF(thrust_sort);
        thrust::sort(thrust_device_input.begin(), thrust_device_input.end());
        thrust_host_input = thrust_device_input;
    }
    std::cout << "thrust result: " << std::endl;
    for (int i = 0; i < 32; ++i) {
        std::cout << thrust_host_input[i] << ", ";
    }
    std::cout << std::endl;

    // cpu sort

    {
        PERF_CPU(cpu_sort_inplace);
        // CPUInplaceBubbleSort(input, DATA_ELN);
        // CPUInplaceQuickSort(input, DATA_ELN);
        CPUMergeSort(input, 0, DATA_ELN - 1);
    }

    std::cout << "cpu result: " << std::endl;
    for (int i = 0; i < DATA_ELN % 33; ++i) {
        std::cout << input[i] << ", ";
    }
    std::cout << std::endl;

    std::cout << "gpu result: " << std::endl;
    for (int i = 0; i < DATA_ELN % 33; ++i) {
        if (std::abs(gpu_sort_output[i] - input[i]) > 10e-3) {
            std::cout << "error: " << gpu_sort_output[i] << " != " << input[i] << std::endl;
            break;
        }
    }

    delete[] input;
    delete[] gpu_sort_output;
    return 0;
}