#include <iostream>
#include <cuda_runtime.h>
#include <stdio.h>
#include <cuda.h>

#define EPSILON 1e-6
#define WARP_SIZE 32


__global__ void vectorized_layernorm(float *X, float *P, int m, int n){
    int row = blockIdx.x;
    int tidx = threadIdx.x;
    int warp_id = tidx / WARP_SIZE;
    int lane_id = tidx % WARP_SIZE;

    if(row >= m) return;

    __shared__ float smem_mean[8];  // 假设最多8个warp
    __shared__ float smem_var[8];

    float *row_in = X + row * n;
    float *row_out = P + row * n;
    float lmean = 0.0f;
    float lvar = 0.0f;
    
    int vec_iters = n / 4;

    // 向量化处理
    for(int i = tidx; i < vec_iters; i += blockDim.x){
        float4 v = reinterpret_cast<float4 *>(row_in)[i];
        lmean += v.x + v.y + v.z + v.w;
        lvar += v.x * v.x + v.y * v.y + v.z * v.z + v.w * v.w;
    }

    // 处理剩余元素
    for(int i = vec_iters * 4 + tidx; i < n; i += blockDim.x){
        float val = row_in[i];
        lmean += val;
        lvar += val * val;
    }

    // 在warp内进行归约
    for(int offset = WARP_SIZE / 2; offset > 0; offset /= 2){
        lmean += __shfl_down_sync(0xffffffff, lmean, offset);
        lvar += __shfl_down_sync(0xffffffff, lvar, offset);
    }

    // 每个warp的第一个线程将结果写入shared memory
    if(lane_id == 0){
        smem_mean[warp_id] = lmean;
        smem_var[warp_id] = lvar;
    }
    __syncthreads();

    // 第一个warp完成最终的跨warp归约
    if(warp_id == 0){
        // 从shared memory读取所有warp的结果
        lmean = (lane_id < (blockDim.x + WARP_SIZE - 1) / WARP_SIZE) ? smem_mean[lane_id] : 0.0f;
        lvar = (lane_id < (blockDim.x + WARP_SIZE - 1) / WARP_SIZE) ? smem_var[lane_id] : 0.0f;
        
        // 再次在warp内归约
        for(int offset = WARP_SIZE / 2; offset > 0; offset /= 2){
            lmean += __shfl_down_sync(0xffffffff, lmean, offset);
            lvar += __shfl_down_sync(0xffffffff, lvar, offset);
        }

        // 第一个线程计算全局统计量并写入shared memory
        if(lane_id == 0){
            smem_mean[0] = lmean / n;
            smem_var[0] = (lvar / n) - (smem_mean[0] * smem_mean[0]);
        }
    }
    __syncthreads();

    // 所有线程读取全局统计量
    float gmean = smem_mean[0];
    
    float gvar = smem_var[0];
    gvar = fmaxf(gvar, 0.0f);
    float std_inv = rsqrtf(gvar + EPSILON);

    // 向量化写入
    for(int i = tidx; i < vec_iters; i += blockDim.x){
        float4 v = reinterpret_cast<float4 *>(row_in)[i];
        v.x = (v.x - gmean) * std_inv;
        v.y = (v.y - gmean) * std_inv;
        v.z = (v.z - gmean) * std_inv;
        v.w = (v.w - gmean) * std_inv;
        reinterpret_cast<float4 *>(row_out)[i] = v;
    }

    // 处理剩余元素
    for(int i = vec_iters * 4 + tidx; i < n; i += blockDim.x){
        row_out[i] = (row_in[i] - gmean) * std_inv;
    }
}


void run_vect_ln(float *D_in, float *D_out, int m, int n){

    dim3 threadsPerBlock(256);
    dim3 blocksPerGrid(m);
    //dim3 blocksPerGrid(ceil(m/threadsPerBlock.x));

    cudaEvent_t start, stop;
    cudaEventCreate(&start);
    cudaEventCreate(&stop);
    float ms = 0.f;
    cudaEventRecord(start);

    vectorized_layernorm<<<blocksPerGrid, threadsPerBlock>>>(D_in, D_out, m, n);
    cudaDeviceSynchronize();

    cudaEventRecord(stop);
    cudaEventSynchronize(stop);
    cudaEventElapsedTime(&ms, start, stop);
    std::cout << "Kernel execution time: " << ms << " ms\n";
    cudaEventDestroy(start);
    cudaEventDestroy(stop);
}