#include <iostream>
#include <algorithm>
#include <cmath>
#include "stdio.h"
#include "rmsnorm_kernel.h"
#include "tensor.h"
#include "norm_weight.h"

#define CHECK(call)                          \
do                                           \
{                                            \
    const cudaError_t error_code = call;     \
    if (error_code != cudaSuccess) {         \
        printf("CUDA Error:\n");             \
        printf("    File:   %s\n", __FILE__);   \
        printf("    Line    %d\n", __LINE__);   \
        printf("    Error code: %d\n", error_code); \
        printf("    Error text: %s\n", cudaGetErrorString(error_code)); \
        exit(1);    \
    }   \
} while (0);


void CPUFusedResidualandRMSNorm(
    float *cpu_decoder_out,
    float *cpu_scale,
    float eps,
    int hidden_units,
    int num_tokens) 
{
    for (int i = 0; i < num_tokens; ++i) {
        float sum = 0.f;
        float inv_mean = 0.f;
        for (int j = 0; j < hidden_units; ++j) {
            float tmp = cpu_decoder_out[i * hidden_units + j];
            sum += tmp * tmp;
        }
        inv_mean = rsqrt(sum / hidden_units + eps);
        for (int j = 0; j < hidden_units; ++j) {
            cpu_decoder_out[i * hidden_units + j] *= (inv_mean * cpu_scale[j]);
        }
    }
}

template <typename T>
bool CheckResult(float *cpu_out, T *gpu_out, int num_tokens, int hidden_units) {
    for (int i = 0; i < num_tokens; ++i) {
        for (int j = 0; j < hidden_units; ++j) {
            float gpu_val = static_cast<float>(gpu_out[i * hidden_units + j]);
            float cpu_val = static_cast<float>(cpu_out[i * hidden_units + j]);
            if (std::fabs(gpu_val - cpu_val) > 1e-6) {
                printf("the %d token %d hidden unit value is not equal, CPU Out: %f, GPU Out %f\n", i, j, cpu_val, gpu_val);
                return false;
            }
        }
    }
}

int main(int argc, char *argv[]) {
    const int num_tokens = 64;
    const int hidden_units = 4096;
    const int total_size = num_tokens * hidden_units;
    float eps = 1e-6;

    if (argc != 1 && atoi(argv[1]) == 1) {
        std::cout << "test float" << std::endl;
        float *h_decoder_out = (float*)malloc(sizeof(float)*total_size);
        float *gpu_decoder_out = (float*)malloc(sizeof(float)*total_size);
        float *h_scale = (float*)malloc(sizeof(float)*hidden_units);
        float *h_decoder_residual = (float*)malloc(sizeof(float)*total_size);
        float *d_decoder_out;
        cudaMalloc((void**)&d_decoder_out, sizeof(float)*total_size);
        float *d_scale;
        cudaMalloc((void**)&d_scale, sizeof(float)*hidden_units);
        float *d_decoder_residual;
        cudaMalloc((void**)&d_decoder_residual, sizeof(float)*total_size);

        for (int i = 0; i < total_size; ++i) {
            h_decoder_out[i] = (float)(i % 2 + 1);
        }
        for (int i = 0; i < hidden_units; ++i) {
            h_scale[i] = 0.1 + 0.1 * (i % 2);
        }
        cudaMemcpy(d_decoder_out, h_decoder_out, sizeof(float)*total_size, cudaMemcpyHostToDevice);
        cudaMemcpy(d_scale, h_scale, sizeof(float)*hidden_units, cudaMemcpyHostToDevice);

        DataType type = getTensorType<float>();
        TensorWrapper<float>* decoder_out_tensor = new TensorWrapper<float>(Device::GPU, type, {num_tokens, hidden_units}, d_decoder_out);
        NormWeight<float> norm_weight;
        norm_weight.gamma = d_scale;
        TensorWrapper<float>* decoder_residual_tensor = new TensorWrapper<float>(Device::GPU, type, {num_tokens, hidden_units}, d_decoder_residual);

        std::cout << "before launch kernel" << std::endl;
        launchRMSNorm(decoder_out_tensor, decoder_residual_tensor, norm_weight, eps);
        std::cout << "after launch kernel" << std::endl;
        CHECK(cudaMemcpy(gpu_decoder_out, d_decoder_out, sizeof(float)*total_size, cudaMemcpyDeviceToHost));

        float *CPUOut = (float*)malloc(sizeof(float)*total_size);
        float *CPUScale = (float*)malloc(sizeof(float)*hidden_units);
        for (int i = 0; i < total_size; ++i) {
            CPUOut[i] = (float)(i % 2 + 1);
        }
        for (int i = 0; i < hidden_units; ++i) {
            CPUScale[i] = 0.1 + 0.1 * (i % 2);
        }

        CPUFusedResidualandRMSNorm(CPUOut, CPUScale, eps, num_tokens, hidden_units);

        bool is_right = CheckResult(CPUOut, gpu_decoder_out, num_tokens, hidden_units);
        if (is_right) {
            std::cout << "test passed" << std::endl;
        } else {
            std::cout << "test failed" << std::endl;
        }

        delete decoder_out_tensor;
        delete decoder_residual_tensor;

        free(h_decoder_out);
        free(gpu_decoder_out);
        free(h_scale);
        free(h_decoder_residual);
        cudaFree(d_decoder_out);
        cudaFree(d_scale);
        cudaFree(d_decoder_residual);
        free(CPUOut);
        free(CPUScale);
    } else {
        std::cout << "test half" << std::endl;

        half *h_decoder_out = (half*)malloc(sizeof(half)*total_size);
        half *gpu_decoder_out = (half*)malloc(sizeof(half)*total_size);
        half *h_scale = (half*)malloc(sizeof(half)*hidden_units);
        half *h_decoder_residual = (half*)malloc(sizeof(half)*total_size);
        half *d_decoder_out;
        cudaMalloc((void**)&d_decoder_out, sizeof(half)*total_size);
        half *d_scale;
        cudaMalloc((void**)&d_scale, sizeof(half)*hidden_units);
        half *d_decoder_residual;
        cudaMalloc((void**)&d_decoder_residual, sizeof(half)*total_size);

        for (int i = 0; i < total_size; ++i) {
            // h_decoder_out[i] = (half)(i % 2 + 1);
            h_decoder_out[i] = 1.0f;
        }
        for (int i = 0; i < hidden_units; ++i) {
            // h_scale[i] = 0.1 + 0.1 * (i % 2);
            h_scale[i] = (half)1;
        }
        cudaMemcpy(d_decoder_out, h_decoder_out, sizeof(half)*total_size, cudaMemcpyHostToDevice);
        cudaMemcpy(d_scale, h_scale, sizeof(half)*hidden_units, cudaMemcpyHostToDevice);

        DataType type = getTensorType<half>();
        TensorWrapper<half>* decoder_out_tensor = new TensorWrapper<half>(Device::GPU, type, {num_tokens, hidden_units}, d_decoder_out);
        NormWeight<half> norm_weight;
        norm_weight.gamma = d_scale;
        TensorWrapper<half>* decoder_residual_tensor = new TensorWrapper<half>(Device::GPU, type, {num_tokens, hidden_units}, d_decoder_residual);

        std::cout << "before launch kernel" << std::endl;
        launchRMSNorm(decoder_out_tensor, decoder_residual_tensor, norm_weight, eps);
        std::cout << "after launch kernel" << std::endl;
        CHECK(cudaMemcpy(gpu_decoder_out, d_decoder_out, sizeof(half)*total_size, cudaMemcpyDeviceToHost));

        float *CPUOut = (float*)malloc(sizeof(float)*total_size);
        float *CPUScale = (float*)malloc(sizeof(float)*hidden_units);
        for (int i = 0; i < total_size; ++i) {
            // CPUOut[i] = (float)(i % 2 + 1);
            CPUOut[i] = 1.0f;
        }
        for (int i = 0; i < hidden_units; ++i) {
            // CPUScale[i] = 0.1 + 0.1 * (i % 2);
            CPUScale[i] = (float)1;
        }
        CPUFusedResidualandRMSNorm(CPUOut, CPUScale, eps, num_tokens, hidden_units);

        bool is_right = CheckResult(CPUOut, gpu_decoder_out, num_tokens, hidden_units);
        if (is_right) {
            std::cout << "test passed" << std::endl;
        } else {
            std::cout << "test failed" << std::endl;
        }

        delete decoder_out_tensor;
        delete decoder_residual_tensor;

        free(h_decoder_out);
        free(gpu_decoder_out);
        free(h_scale);
        free(h_decoder_residual);
        cudaFree(d_decoder_out);
        cudaFree(d_scale);
        cudaFree(d_decoder_residual);
        free(CPUOut);
        free(CPUScale);
    }
}