#include <iostream>
#include <cuda_runtime.h>
#include <stdio.h>
#include <cuda.h>
#include <stdlib.h>

#include "kernels/naive-layernorm.cu"
#include "kernels/smem-layernorm.cu"
#include "kernels/warpshf-layernorm.cu"
#include "kernels/vectorized-layernorm.cu"

void print_usage(const char* program_name) {
    printf("用法: %s <kernel_type> [M] [N]\n", program_name);
    printf("kernel_type: 1=naive, 2=shared_memory, 3=warp_shuffle, 4=Vectorized\n");
    printf("M: 矩阵行数 (默认: 1024)\n");
    printf("N: 矩阵列数 (默认: 1024)\n");
    printf("示例: %s 1 512 512\n", program_name);
}

int main(int argc, char* argv[]){

    // 检查参数数量
    if(argc < 2) {
        print_usage(argv[0]);
        return -1;
    }

    // 解析kernel类型
    int kernel_type = atoi(argv[1]);
    if(kernel_type < 1 || kernel_type > 4) {
        printf("错误: kernel_type 必须是 1, 2, 3 或 4\n");
        print_usage(argv[0]);
        return -1;
    }

    // 解析矩阵维度
    int M = (argc >= 3) ? atoi(argv[2]) : 1024;
    int N = (argc >= 4) ? atoi(argv[3]) : 1024;

    printf("运行参数: kernel_type=%d, M=%d, N=%d\n", kernel_type, M, N);

    size_t matrix_size = M*N*sizeof(float);
    float *X_input, *P_output;
    float *D_input, *D_output;

    X_input = (float*)malloc(matrix_size);
    P_output = (float*)malloc(matrix_size);

    // 初始化输入数据
    for(int i = 0; i < M*N; i++){
        X_input[i] = i+1;
    }

    cudaMalloc((void**)&D_input, matrix_size);
    cudaMalloc((void**)&D_output, matrix_size);

    cudaMemcpy(D_input, X_input, matrix_size, cudaMemcpyHostToDevice);

    // 根据参数选择要运行的kernel
    switch(kernel_type) {
        case 1:
        // 最朴素的实现 global memory 直接取数据计算
            printf("\n=== 运行 Naive LayerNorm ===\n");
            run_naive_ln(D_input, D_output, M, N);
            break;
        case 2:
        // 利用 shared mem 加速访存
            printf("\n=== 运行 Shared Memory LayerNorm ===\n");
            run_smem_ln(D_input, D_output, M, N);
            break;
        case 3:
        // 利用 warp shuffle 加速计算
            printf("\n=== 运行 Warp Shuffle LayerNorm ===\n");
            run_shfl_ln(D_input, D_output, M, N);
            break;
        case 4:
            printf("\n=== 运行 Vectorized LayerNorm ===\n");
            run_vect_ln(D_input, D_output, M, N);
            break;
        default:
            printf("\n=== 运行所有 LayerNorm 实现 ===\n");
            printf("\n--- Naive LayerNorm ---\n");
            run_naive_ln(D_input, D_output, M, N);
            printf("\n--- Shared Memory LayerNorm ---\n");
            run_smem_ln(D_input, D_output, M, N);
            printf("\n--- Warp Shuffle LayerNorm ---\n");
            run_shfl_ln(D_input, D_output, M, N);
            printf("\n--- Vectorized LayerNorm ---\n");
            run_vect_ln(D_input, D_output, M, N);
            break;
    }

    cudaMemcpy(P_output, D_output, matrix_size, cudaMemcpyDeviceToHost);

    // 显示结果
    printf("\n输入矩阵 (前5x5): \n");
    for(int i=0; i<5 && i<M; i++){
        for(int j=0; j<5 && j<N; j++){
            printf("%8.3f ", X_input[i*N+j]);
        }
        printf("\n");
    }

    printf("\n输出矩阵 (前5x5): \n");
    for(int i=0; i<5 && i<M; i++){
        for(int j=0; j<5 && j<N; j++){
            printf("%8.3f ", P_output[i*N+j]);
        }
        printf("\n");
    }

    free(P_output); 
    free(X_input);
    cudaFree(D_input); 
    cudaFree(D_output);

    return 0;
}
