#include <iostream>
#include <cuda_runtime.h>
#include <stdio.h>
#include <cuda.h>
#include <math.h>

#define EPSILON 1e-6

// 一个 block 计算一行
__global__ void warpshf_layernorm(float *X, float *P, int m, int n){

    __shared__ float smem[1024];
    float *smem_mean = smem;
    float *smem_var = smem + 512;
    
    int row = blockIdx.x; //one block per row
    int tidx = threadIdx.x;
    int warp_size = warpSize;

    int warp_id = tidx % warp_size;

    if(row < m){

        float *row_in = X + row * n;
        float *row_out = P + row * n;

        float lmean = 0.0f;
        float lvar = 0.0f;

        // local mean and var
        for(int i = tidx; i < n; i+=blockDim.x){
            float a = row_in[i]; // load from global mem into register
            lmean += a;
            lvar += (a*a);
        }

        __syncthreads();
        float lrmean = lmean;
        float lrvar = lvar;
        __syncthreads();

        for(int offset = warp_size / 2; offset > 0; offset /= 2){
            lrmean += __shfl_down_sync(0xffffffff, lrmean, offset);
            lrvar  += __shfl_down_sync(0xffffffff, lrvar, offset);
        }

        // at this point, each warp finished summing values at each warp
        // sum of each warp is stored at 0 index of each warp

        if(warp_id == 0){
            smem_mean[tidx/warp_size] = lrmean;   // 每个warp的sum存储在smem的 tidx/warp_size 位置
            smem_var[tidx/warp_size]  = lrvar;
        }
        __syncthreads();

        // only first warp 用第一个warp 来完成 thread block 内的累加
        if(tidx < warp_size){ 
            lrmean = (tidx < (blockDim.x + warp_size - 1) / warp_size) ? smem_mean[tidx] : 0.0f;
            lrvar  = (tidx < (blockDim.x + warp_size - 1) / warp_size) ? smem_var[tidx] : 0.0f;
            for(int offset = warp_size / 2; offset > 0; offset /=2){
                lrmean += __shfl_down_sync(0xffffffff, lrmean, offset);
                lrvar  += __shfl_down_sync(0xffffffff, lrvar, offset);
            }

            if(tidx == 0){
                smem_mean[0] = lrmean;
                smem_var[0]  = lrvar;
            }
        }
        __syncthreads();

        float global_mean = smem_mean[0] / n;
        float gvar = (smem_var[0] / n) -  (global_mean * global_mean); // Load global variance

        gvar = fmaxf(gvar, 0.0f);
        float stddev = rsqrtf(gvar + EPSILON); // 1/stddev

        for(int i = tidx; i < n; i += blockDim.x){
            row_out[i] = (row_in[i] - global_mean) * stddev;
        }
    }
    else{
        return;
    } 
}


void run_shfl_ln(float *D_in, float *D_out, int m, int n){

    dim3 threadsPerBlock(256);
    dim3 blocksPerGrid(m); // one block per row

    cudaEvent_t start, stop;
    cudaEventCreate(&start);
    cudaEventCreate(&stop);
    float ms = 0.f;
    cudaEventRecord(start);

    warpshf_layernorm<<<blocksPerGrid, threadsPerBlock>>>(D_in, D_out, m, n);
    cudaDeviceSynchronize();

    cudaEventRecord(stop);
    cudaEventSynchronize(stop);
    cudaEventElapsedTime(&ms, start, stop);
    std::cout << "Kernel execution time: " << ms << " ms\n";
    cudaEventDestroy(start);
    cudaEventDestroy(stop);
}