#include <iostream>
#include <cuda_runtime.h>
#include <cub/cub.cuh>
/*cub is a header-only library, and it is located in cuda*/


// Kernel function declaration
__global__ void layerNormKernelnaive(float *pInput, float *pOutput, float* alpha, float* beta);

// Helper function to check CUDA errors
#define cudaCheckError() {                                         \
    cudaError_t e=cudaGetLastError();                              \
    if(e!=cudaSuccess) {                                           \
        std::cerr << "Cuda failure: " << cudaGetErrorString(e)     \
                  << " at " << __FILE__ << ":" << __LINE__ << std::endl; \
        exit(EXIT_FAILURE);                                        \
    }                                                              \
}



//https://github.com/Oneflow-Inc/oneflow/blob/master/oneflow/core/cuda/layer_norm.cuh
/**
 * TODO: add it
 *
 *
*/


/**
 * cubv1
*/
template<typename T>
__device__ T epsilon() {
    return 1e-5;
}

#if 0
template<typename T>
__global__ void layerNormKernelcubv1(T *pInput, float *pGamma, float *pBeta, T *pOutput) {
    const int n = 256;
    const int tx = threadIdx.x, index = blockIdx.x * n + tx;
    T _x = pInput[index], _b = (T)pGamma[tx], _a = (T)pBeta[tx];

    __shared__ T mean_shared, var_shared;

    typedef cub::BlockReduce<T, n> BlockReduce;
    __shared__ typename BlockReduce::TempStorage temp;
    T& ref0 = _x;
    T sum = BlockReduce(temp).Sum(ref0);
    //__syncthreds();
    if(tx == 0) {
        mean_shared = sum / T(n);
    }
    __syncthreads();

    T moment = _x - mean_shared, moment2 = moment * moment;
    T& ref1 = moment2;
    T var = BlockReduce(temp).Sum(ref1);
    //__syncthreds();
    if(tx == 0) {
        var_shared = var / T(n);
    }
    __syncthreads();

    pOutput[index] = (moment) * (T)rsqrtf(var_shared + epsilon<T>()) * _b + _a;
}


/*
CUB v2
https://github.com/NVIDIA/TensorRT/blob/master/plugin/skipLayerNormPlugin/skipLayerNormKernel.cu
*/
template<typename T, int TPB, int VPT>
__global__ void layerNormKernelcvbv2(const T* input, const T* gamma, const T* beta, T* output) {
    const int idx = blockIdx.x * 256 + threadIdx.x * VPT;
    T localX[VPT], localGamma[VPT], localBeta[VPT];

    // Copy input values to local array
    copy(sizeof(T) * VPT, &input[idx], localX);

    float2 localFloat2 = {0.f, 0.f};
    const float rld = float(1) / float(256);

    #pragma unroll
    for (int it = 0; it < VPT; ++it) {
        const float tmp = rld * (float)localX[it];
        localFloat2.x += tmp;
        localFloat2.y += tmp * (float)localX[it];
    }

    // Copy beta and gamma values to local arrays
    copy(sizeof(T) * VPT, &beta[threadIdx.x * VPT], localBeta);
    copy(sizeof(T) * VPT, &gamma[threadIdx.x * VPT], localGamma);

    using BlockReduce = cub::BlockReduce<float2, TPB>;
    __shared__ typename BlockReduce::TempStorage temp_storage;
    __shared__ float mu;
    __shared__ float rsigma;

    const float2 sumKV = BlockReduce(temp_storage).Reduce(localFloat2, cub::Sum());

    if (threadIdx.x == 0) {
        mu = sumKV.x;
        rsigma = rsqrt(sumKV.y - mu * mu + 1e-6);
    }
    __syncthreads();

    #pragma unroll
    for (int it = 0; it < VPT; ++it) {
        localX[it] = (float)localGamma[it] * (((float)localX[it] - mu) * rsigma) + (float)localBeta[it];
    }

    // Copy the normalized values back to output
    copy(sizeof(T) * VPT, localX, &output[idx]);
}
#endif

/*
* naive Kernel implementation
* in each threadblock, we caculate the mean and variance in each block
* in each thread, we calculate the code from every two blocks
*/
__global__ void layerNormKernelnaive(float *pInput, float *pGamma, float * pBeta, float *pOutput) {
    const int tx = threadIdx.x, index = blockIdx.x * 256 + threadIdx.x;
    __shared__ float temp[128];

    float _b0 = pGamma[tx], _a0 = pBeta[tx];
    float _b1 = pGamma[tx+ 128], _a1 = pBeta[tx+128];
    float value0 = pInput[index], value1 = pInput[index + 128];
    temp[tx] = value0 + value1;
    __syncthreads();

    for (int stride = 64; stride >= 1; stride /= 2) {
        if (tx < stride) {
            temp[tx] += temp[tx + stride];
        }
        __syncthreads();
    }

    //calculate mean
    float mean = temp[0] / 256.;
    __syncthreads();

    temp[tx] = (value0 - mean) * (value0 - mean) + (value1 - mean) * (value1 - mean);
    __syncthreads();

    for (int stride = 64; stride >= 1; stride /= 2) {
        if (tx < stride) {
            temp[tx] += temp[tx + stride];
        }
        __syncthreads();
    }
    float var = temp[0] / 256.f;
    pOutput[index] = _b0 * (value0 - mean) * rsqrtf(var + 6e-6) + _a0;
    pOutput[index + 128] = _b1* (value1 - mean) * rsqrtf(var + 6e-6)+ _a1;


    // pOutput[index] = (value0 - mean) * rsqrtf(var + 6e-6);
    // pOutput[index + 128] = (value1 - mean) * rsqrtf(var + 6e-6);
}


int main() {
    const int numSequence = 256; // Number of elements processed by one block
    const int numBatch = 1;
    const int dataSize = numSequence* numBatch* sizeof(float);
    const int sequenceSize = numSequence* sizeof(float);
    const int numElements = numSequence * numBatch;

    // Allocate and initialize host memory
    float* h_input  = new float[numElements];
    float* h_output = new float[numElements];
    float* h_gamma =  new float[numSequence];
    float* h_beta =  new float[numSequence];


    for (int i = 0; i < numElements; ++i) {
        h_input[i] = static_cast<float>(rand()) / RAND_MAX; // Initialize input with random values
    }

    for (int i = 0; i < numSequence; ++i) {
        h_gamma[i] = 1;
        h_beta[i] = 0;
    }

    // Allocate device memory
    float *d_input = nullptr;
    float *d_output = nullptr;
    float *d_gamma = nullptr;
    float *d_beta = nullptr;


    cudaMalloc((void **)&d_input, dataSize);
    cudaMalloc((void **)&d_output, dataSize);
    cudaMalloc((void **)&d_gamma, sequenceSize);
    cudaMalloc((void **)&d_beta,  sequenceSize);
    cudaCheckError();

    // Copy input data from host to device
    cudaMemcpy(d_input, h_input, dataSize, cudaMemcpyHostToDevice);

    cudaMemcpy(d_gamma, h_gamma, sequenceSize, cudaMemcpyHostToDevice);
    cudaMemcpy(d_beta, h_beta, sequenceSize, cudaMemcpyHostToDevice);
    cudaCheckError();

    // Launch the layer normalization kernel
    dim3 blockDim(128); // Number of threads per block
    dim3 gridDim(numBatch);    // Number of blocks in the grid
    layerNormKernelnaive<<<gridDim, blockDim>>>(d_input,  d_gamma, d_beta, d_output);
    cudaCheckError();

    // Copy the output data from device to host
    cudaMemcpy(h_output, d_output, dataSize, cudaMemcpyDeviceToHost);
    cudaCheckError();

    // Display the results
    std::cout << "Input:" << std::endl;
    for (int i = 0; i < numElements; ++i) {
        std::cout << h_input[i] << " ";
    }
    std::cout << std::endl;

    std::cout << "Output:" << std::endl;
    for (int i = 0; i < numElements; ++i) {
        std::cout << h_output[i] << " ";
    }
    std::cout << std::endl;

    // Clean up
    cudaFree(d_gamma);
    cudaFree(d_beta);
    cudaFree(d_input);
    cudaFree(d_output);
    cudaCheckError();

    delete[] h_beta;
    delete[] h_gamma;
    delete[] h_input;
    delete[] h_output;


    return 0;
}
