/* Udacity HW5
   Histogramming for Speed

   The goal of this assignment is compute a histogram
   as fast as possible.  We have simplified the problem as much as
   possible to allow you to focus solely on the histogramming algorithm.

   The input values that you need to histogram are already the exact
   bins that need to be updated.  This is unlike in HW3 where you needed
   to compute the range of the data and then do:
   bin = (val - valMin) / valRange to determine the bin.

   Here the bin is just:
   bin = val

   so the serial histogram calculation looks like:
   for (i = 0; i < numElems; ++i)
     histo[val[i]]++;

   That's it!  Your job is to make it run as fast as possible!

   The values are normally distributed - you may take
   advantage of this fact in your implementation.

*/

#include "utils.h"
// 若 numBins ≤ 1024：使用每线程私有直方图 + 共享内存归约

const int THREADS =  1024;

__global__ void naiveHisto(const unsigned int* const vals, unsigned int* const histo, int numVals){
	int tid = threadIdx.x;
	int idx = tid + blockDim.x*blockIdx.x;
	if (idx >= numVals) return;
	atomicAdd(&(histo[vals[idx]]), 1);
}

// 利用共享内存减少对全局内存的原子操作频率，提高直方图计算的并行效率。
// 每个 block 先在共享内存中统计局部直方图，最后统一原子写回全局内存，
// 是一种典型的 "reduce-local-then-global" 优化策略
__global__ void reduceblockHisto(const unsigned int* const vals, unsigned int* const histo, int numVals,int numBins) {
  // 1. 每个 block 分配一块共享内存 sharedHisto，大小为 numBins
	extern __shared__ unsigned int sharedHisto[]; 
  // 2. block 内所有线程合作将 sharedHisto 清零。
	for (int i = threadIdx.x; i < numBins; i += blockDim.x) {
		sharedHisto[i] = 0;
	}
	__syncthreads();

	int globalid = threadIdx.x + blockIdx.x*blockDim.x;
  if (globalid >= numVals) return;
  // 3. 每个线程根据其全局 ID 读取一个输入值 vals[globalid]，并在 sharedHisto 中对应 bin 原子加 1。
	atomicAdd(&sharedHisto[vals[globalid]], 1);
	// 4. block 内同步，确保局部直方图构建完成。
	__syncthreads();
  // 5. 每个线程将 sharedHisto 的各个 bin 原子加到全局 histo 数组中
	for (int i = threadIdx.x; i < numBins; i += blockDim.x) {
    if (sharedHisto[i] > 0) atomicAdd(&histo[i], sharedHisto[i]);
	}
}

// -------------------------------------------------------------------------------------
// 若 numBins 大或数据极多：使用 局部 histo + 归约 kernel
__global__
void histoLocal(const unsigned int* vals, 
                unsigned int* localHisto,
                int numVals, int numBins) {

    extern __shared__ unsigned int sharedHisto[];

    // 初始化共享内存
    for (int i = threadIdx.x; i < numBins; i += blockDim.x) {
        sharedHisto[i] = 0;
    }
    __syncthreads();

    // grid-stride loop
    int globalId = threadIdx.x + blockIdx.x * blockDim.x;
    int stride = gridDim.x * blockDim.x;

    for (int i = globalId; i < numVals; i += stride) {
        unsigned int val = vals[i];
        if (val < numBins) {
            atomicAdd(&sharedHisto[val], 1);  // 原子加
        }
    }
    __syncthreads();

    // 写回全局内存
    int blockStart = blockIdx.x * numBins;
    for (int i = threadIdx.x; i < numBins; i += blockDim.x) {
        localHisto[blockStart + i] = sharedHisto[i];
    }
}

__global__
void reduceHisto(const unsigned int* localHisto, 
                 unsigned int* finalHisto, 
                 int numBlocks, int numBins) {

    int binId = blockIdx.x * blockDim.x + threadIdx.x;
    if (binId >= numBins) return;

    unsigned int sum = 0;
    for (int b = 0; b < numBlocks; b++) {
        sum += localHisto[b * numBins + binId];
    }
    finalHisto[binId] = sum;
}

void reduceHistogram(const unsigned int* const d_vals, //INPUT
                      unsigned int* const d_histo,      //OUTPUT
                      const unsigned int numBins,
                      const unsigned int numElems){
  int blocks = (numElems + THREADS - 1) / THREADS;
  int blocksPerGrid = min(65535, blocks);

  size_t localHistoSize = blocksPerGrid * numBins * sizeof(unsigned int);
  unsigned int* d_localHisto;
  cudaMalloc(&d_localHisto, localHistoSize);

  // Step 1: 每个 block 计算局部直方图
  histoLocal<<<blocksPerGrid, THREADS, numBins * sizeof(unsigned int)>>>(
      d_vals, d_localHisto, numElems, numBins);

  // Step 2: 归约所有局部结果
  dim3 reduceGrid((numBins + 255) / 256), reduceBlock(256);
  reduceHisto<<<reduceGrid, reduceBlock>>>(d_localHisto, d_histo, blocksPerGrid, numBins);
  cudaFree(d_localHisto);
}
// -------------------------------------------------------------------------------------
// 两级直方图设计
__global__
void twoLevelHisto(const unsigned int* const vals, 
                   unsigned int* const histo, 
                   int numVals, 
                   int numBins) {
    int fineSize = blockDim.x;                    // 每个 coarse bin 内再划分 fineSize 个 bins
    int coarseBin = blockIdx.x;                   // 当前 block 负责的 coarse bin 编号
    int coarseBins = (numBins + fineSize - 1) / fineSize;  // 总共需要多少 coarse bins

    if (coarseBin >= coarseBins) return;

    extern __shared__ unsigned int sharedHisto[];
    
    // 初始化共享内存：每个线程清零一个或多个位置
    for (int i = threadIdx.x; i < fineSize; i += blockDim.x) {
        sharedHisto[i] = 0;
    }
    __syncthreads();

    // 每个线程遍历 vals 数组，grid-stride loop
    for (int i = threadIdx.x; i < numVals; i += blockDim.x) {
        unsigned int val = vals[i];
        // 判断 val 应该属于哪个 bin
        if (val >= numBins) continue;  // 越界忽略
        int bucket = val / fineSize;   // coarse bin ID
        int fineBin = val % fineSize;  // fine bin ID
        // 只有当前 block 负责的 coarse bin 才处理
        if (bucket == coarseBin) {
            atomicAdd(&sharedHisto[fineBin], 1);
        }
    }
    __syncthreads();

    // 将结果写回全局 histo
    for (int i = threadIdx.x; i < fineSize; i += blockDim.x) {
        if (sharedHisto[i] > 0) {
            int globalBin = coarseBin * fineSize + i;
            if (globalBin < numBins) {
                // atomicAdd(&histo[globalBin], sharedHisto[i]);
                histo[globalBin] += sharedHisto[i];
            }
        }
    }
}

void twoLevelHistogram(const unsigned int* const vals, 
                   unsigned int* const histo, 
                   int numVals, 
                   int numBins){
  int fineSize = 512;  // 2的冥， 每个 coarse bin 内再划分 fineSize 个 bins
  int coarseBins = (numBins + fineSize - 1) / fineSize; // coarse bin数量
  dim3 block(fineSize);
  dim3 grid(coarseBins);  // 一个 block 负责一个 coarse bin
  size_t sharedMemSize = fineSize * sizeof(unsigned int);
  twoLevelHisto<<<grid, block, sharedMemSize>>>(vals, histo, numVals, numBins);
}
// ---------------------------------------------------------------------------------------

void computeHistogram(const unsigned int* const d_vals, //INPUT
                      unsigned int* const d_histo,      //OUTPUT
                      const unsigned int numBins,
                      const unsigned int numElems)
{
  //TODO Launch the yourHisto kernel
  int blocks = (numElems + THREADS - 1) / THREADS;
	// naiveHisto <<< blocks, N_THREADS >>> (d_vals, d_histo, numElems);
	reduceblockHisto<<<blocks, THREADS, sizeof(unsigned int)*numBins >>> (d_vals, d_histo, numElems, numBins);

  // reduceHistogram(d_vals, d_histo, numBins, numElems);
  // twoLevelHistogram(d_vals, d_histo, numElems, numBins);
  cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
}
