#include <cuda_runtime.h>
#include <iostream>

#define ARRAY_SIZE 1024
#define BIN_COUNT 256
#define THREADS_PER_BLOCK 256

// 并行直方图计算的CUDA内核函数
__global__ void histogramKernel(unsigned int *d_data, unsigned int *d_bins, int dataSize)
{
    int tid = blockIdx.x * blockDim.x + threadIdx.x;

    if (tid < dataSize)
    {
        // 使用原子操作更新直方图
        atomicAdd(&(d_bins[d_data[tid]]), 1);
    }
}

int main()
{
    unsigned int h_data[ARRAY_SIZE];
    unsigned int h_bins[BIN_COUNT];

    // 初始化数据
    for (int i = 0; i < ARRAY_SIZE; ++i)
    {
        h_data[i] = rand() % BIN_COUNT;
    }

    // 初始化直方图
    for (int i = 0; i < BIN_COUNT; ++i)
    {
        h_bins[i] = 0;
    }

    unsigned int *d_data, *d_bins;

    // 设备端内存分配
    cudaMalloc((void **)&d_data, ARRAY_SIZE * sizeof(unsigned int));
    cudaMalloc((void **)&d_bins, BIN_COUNT * sizeof(unsigned int));

    // 将数据从主机端复制到设备端
    cudaMemcpy(d_data, h_data, ARRAY_SIZE * sizeof(unsigned int), cudaMemcpyHostToDevice);
    cudaMemcpy(d_bins, h_bins, BIN_COUNT * sizeof(unsigned int), cudaMemcpyHostToDevice);

    // 定义线程块和线程网格的维度
    int blocksPerGrid = (ARRAY_SIZE + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;

    // 启动CUDA内核
    histogramKernel<<<blocksPerGrid, THREADS_PER_BLOCK>>>(d_data, d_bins, ARRAY_SIZE);

    // 将结果从设备端复制到主机端
    cudaMemcpy(h_bins, d_bins, BIN_COUNT * sizeof(unsigned int), cudaMemcpyDeviceToHost);

    // 输出结果
    for (int i = 0; i < BIN_COUNT; ++i)
    {
        std::cout << "Bin " << i << ": " << h_bins[i] << std::endl;
    }

    // 释放设备端内存
    cudaFree(d_data);
    cudaFree(d_bins);

    return 0;
}
