#include <thrust/sort.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <iostream>
#include <cuda_runtime.h>

#define THREADS 32
#define SHARED_SIZE_LIMIT 1024

// 打印数组
void print_arr(int *h, int size,  const char* lab) {
    printf("%s\n", lab);
    for (int i = 0; i < size; i++) {
        printf("%d ", h[i]);
    }
    printf("\n");
}

// 调用thrust库进行排序
void thrustSort(int *arr, int size){
    // 直接将数组数据复制到device vector
    thrust::device_vector<int> d_vec(arr, arr + size);
    // 对device vector进行排序
    thrust::sort(d_vec.begin(), d_vec.end());
    // 将排序后的数据直接复制回原始数组
    thrust::copy(d_vec.begin(), d_vec.end(), arr);
}

// CPU版本的串行归并排序
void merge(int* arr, int l, int m, int r) {
    // 计算左子数组的长度：从l到m
    int n1 = m - l + 1;
    // 计算右子数组的长度：从m+1到r
    int n2 = r - m;
    
    // 为左子数组分配临时内存空间
    int* L = (int*)malloc(n1 * sizeof(int));
    // 为右子数组分配临时内存空间
    int* R = (int*)malloc(n2 * sizeof(int));
    // 内存分配检查
    if (!L || !R) {  
        free(L); free(R);
        printf("malloc error\n");
        return;
    }

    // 将原数组左半部分复制到临时数组L
    for (int i = 0; i < n1; i++) L[i] = arr[l + i];
    // 将原数组右半部分复制到临时数组R
    for (int j = 0; j < n2; j++) R[j] = arr[m + 1 + j];

    // 初始化三个指针：i - 左数组指针，j - 右数组指针，k - 原数组指针
    int i = 0, j = 0, k = l;
    // 合并两个已排序的子数组
    while (i < n1 && j < n2) {
        // 如果左数组当前元素较小，放入原数组, 否则放入右数组当前元素
        arr[k++] = (L[i] <= R[j]) ? L[i++] : R[j++];
    }
    
    // 如果左数组还有剩余元素，全部复制到原数组
    while (i < n1) arr[k++] = L[i++];
    // 如果右数组还有剩余元素，全部复制到原数组
    while (j < n2) arr[k++] = R[j++];

    // 释放临时数组的内存
    free(L); free(R);
}

void mergeSort(int* arr, int l, int r) {
    if (l < r) {
        int m = l + (r - l) / 2;
        mergeSort(arr, l, m);
        mergeSort(arr, m + 1, r);
        merge(arr, l, m, r);
    }
}

// GPU并行归并排序
__global__ void gpuMerge(int* arr, int* temp, int n, int width) {
    int idx = blockIdx.x * blockDim.x + threadIdx.x;
    int start = idx * 2 * width;
    if (start >= n) return;

    int middle = min(start + width, n);
    int end = min(start + 2 * width, n);
    
    // 边界检查
    if (end > n || middle > n) return;

    int i = start, j = middle, k = start;
    while (i < middle && j < end) {
        if (arr[i] <= arr[j]) temp[k++] = arr[i++];
        else temp[k++] = arr[j++];
    }
    while (i < middle) temp[k++] = arr[i++];
    while (j < end) temp[k++] = arr[j++];
    
    __syncthreads();
    for (int m = start; m < end; m++) {
        if (m < n) arr[m] = temp[m]; // 二次边界检查
    }
}

void mergeSortGPU(int* h_arr, int n) {
    int *d_arr, *d_temp;
    cudaMalloc(&d_arr, n * sizeof(int));
    cudaMalloc(&d_temp, n * sizeof(int));
    cudaMemcpy(d_arr, h_arr, n * sizeof(int), cudaMemcpyHostToDevice);

    for (int width = 1; width < n; width <<= 1) {
        int threads = THREADS;
        int blocks = (n + 2 * width * threads - 1) / (2 * width * threads);
        gpuMerge<<<blocks, threads>>>(d_arr, d_temp, n, width);
        cudaDeviceSynchronize();
    }

    cudaMemcpy(h_arr, d_arr, n * sizeof(int), cudaMemcpyDeviceToHost);
    cudaFree(d_arr); cudaFree(d_temp);
}


// 使用共享内存优化的归并内核
__global__ void gpuMergeOptimized(int* arr, int* temp, int n, int width) {
    extern __shared__ int shared_mem[];
    int idx = blockIdx.x * blockDim.x + threadIdx.x;
    int start = idx * 2 * width;
    if (start >= n) return;

    int middle = min(start + width, n);
    int end = min(start + 2 * width, n);
    
    // 边界检查
    if (end > n) end = n;
    if (middle > n) middle = n;

    int local_size = end - start;
    if (local_size <= SHARED_SIZE_LIMIT && local_size > 0) {
        // 共享内存版本
        for (int i = 0; i < local_size && (start + i) < n; i++) {
            shared_mem[i] = arr[start + i];
        }
        __syncthreads();
        
        int i = 0, j = min(width, local_size);
        int k = start;
        while (i < width && j < local_size && k < n) {
            if (shared_mem[i] <= shared_mem[j]) 
                temp[k++] = shared_mem[i++];
            else 
                temp[k++] = shared_mem[j++];
        }
        while (i < width && k < n) temp[k++] = shared_mem[i++];
        while (j < local_size && k < n) temp[k++] = shared_mem[j++];
    } else {
        // 全局内存版本
        int i = start, j = middle, k = start;
        while (i < middle && j < end && k < n) {
            temp[k++] = (arr[i] <= arr[j]) ? arr[i++] : arr[j++];
        }
        while (i < middle && k < n) temp[k++] = arr[i++];
        while (j < end && k < n) temp[k++] = arr[j++];
    }
    
    __syncthreads();
    for (int m = start; m < end && m < n; m++) {
        arr[m] = temp[m];
    }
}

void mergeSortGPU2(int* h_arr, int n) {
    int *d_arr, *d_temp;
    cudaMalloc(&d_arr, n * sizeof(int));
    cudaMalloc(&d_temp, n * sizeof(int));
    cudaMemcpy(d_arr, h_arr, n * sizeof(int), cudaMemcpyHostToDevice);

    for (int width = 1; width < n; width <<= 1) {
        int threads = min(THREADS, (n + 2*width - 1)/(2*width));
        int blocks = (n + 2*width*threads - 1) / (2*width*threads);
        
        size_t shared_mem_size = min(2*width, SHARED_SIZE_LIMIT) * sizeof(int);
        gpuMergeOptimized<<<blocks, threads, shared_mem_size>>>(d_arr, d_temp, n, width);
        cudaDeviceSynchronize();
    }
    
    cudaMemcpy(h_arr, d_arr, n * sizeof(int), cudaMemcpyDeviceToHost);
    cudaFree(d_arr); 
    cudaFree(d_temp);
}



int main() {
    const int N = 1 << 10;
    int* arr = (int*)malloc(N * sizeof(int));
    int* h_arr = (int*)malloc(N * sizeof(int));
    int* t_arr = (int*)malloc(N * sizeof(int));
    
    // 生成随机数据
    srand(time(0));
    for (int i = 0; i < N; i++) {
        arr[i] = rand() % 1000000;
        h_arr[i] = arr[i];
        t_arr[i] = arr[i];
    }

    // CPU排序
    clock_t cpu_start = clock();
    mergeSort(arr, 0, N-1);
    clock_t cpu_end = clock();
    double cpuTime = (double)(cpu_end - cpu_start) / CLOCKS_PER_SEC * 1000;

    // GPU排序
    cudaEvent_t start, stop;
    cudaEventCreate(&start); cudaEventCreate(&stop);
    cudaEventRecord(start);
    mergeSortGPU2(h_arr, N);
    cudaEventRecord(stop);
    cudaEventSynchronize(stop);
    float gpuTime;
    cudaEventElapsedTime(&gpuTime, start, stop);

    // thrust排序
    clock_t cpu_start2 = clock();
    thrustSort(t_arr, N);
    clock_t cpu_end2 = clock();
    double thrustTime = (double)(cpu_end2 - cpu_start2) / CLOCKS_PER_SEC * 1000;

    // 验证结果
    bool gpu_correct = true;
    bool t_correct = true;
    for (int i = 0; i < N; i++) {
        if (arr[i] != h_arr[i]) {
            gpu_correct = false;
        }
        if (arr[i] != t_arr[i]) {
            t_correct = false;
        }
    }

    printf("CPU Time: %.2f ms\n", cpuTime);
    printf("GPU Time: %.2f ms\n", gpuTime);
    printf("THRUST Time: %.2f ms\n", thrustTime);
    printf("GPU Result: %s\n", gpu_correct ? "CORRECT" : "INCORRECT");
    printf("THRUST Result: %s\n", t_correct ? "CORRECT" : "INCORRECT");
   

    free(arr); free(h_arr);free(t_arr);
    return 0;
}
