#include <iostream>
#include <algorithm>
#include "input_embedding.h"
#include <random> 
#include "math.h"
#include <cuda.h>
#include <cuda_fp16.h>
#include <cuda_runtime.h>
#include "embedding_weight.h"
#include "tensor.h"
#include "string"


// template<typename T>
// void launchInputEmbedding(TensorWrapper<int>* input_ids,
//                           TensorWrapper<T>* out,
//                           EmbeddingWeight<T>* embed_table);


void cpuEmbedding(const int* input_ids, float* output, float* embed_table, const int max_context_token_num, const int hidden_size) {
    for (int i = 0; i < max_context_token_num; ++i) {
        for (int j = 0; j < hidden_size; ++j) {
            output[i * hidden_size + j] = embed_table[input_ids[i] * hidden_size + j];
        }
    }
}

bool checkResult(float* cpu_out, float* gpu_out, const int output_size) {
    // float *gpu_out = (float*)malloc(sizeof(float) * output_size);
    // cudaMemcpy(gpu_out, d_gpu_out, sizeof(float)*output_size, cudaMemcpyDeviceToHost);
    for (int i = 0; i < output_size; ++i) {
        if (fabs(cpu_out[i] - gpu_out[i]) > 1e-5) {
            std::cout << "打印不一样位置前后10个数" << std::endl;
            std::cout << "CPU: ";
            for (int j = min(0, i-10); j < max(output_size, i+10); ++j) {
                std::cout << cpu_out[j] << " ";
            }
            std::cout << std::endl;

            std::cout << "GPU: ";
            for (int j = min(0, i-10); j < max(output_size, i+10); ++j) {
                std::cout << gpu_out[j] << " ";
            }
            std::cout << std::endl;
            // free(gpu_out);
            return false;
        }
    }
    std::cout << "测试通过" << std::endl;
    // free(gpu_out);
    return true;
}

int main(int argc, char *argv[]) {

    std::cout << "测试输入嵌入层" << std::endl;

    const int max_context_token_num = 512;
    const int hidden_size = 2048;
    const int vocab_size = 30000;
    const int input_size = max_context_token_num;
    const int table_size = hidden_size * vocab_size;
    const int output_size = max_context_token_num * hidden_size;

    int *input_ids = (int*)malloc(sizeof(int)*input_size);
    // 创建一个随机数引擎  
    std::random_device rd;  
    std::mt19937 gen(rd()); 
    // 创建一个在指定范围内的均匀分布  
    std::uniform_int_distribution<> dis(0, vocab_size-1);
    for (int i = 0; i < input_size; ++i) {
        input_ids[i] = dis(gen);
    }  
    if (std::string(argv[1]) == "float") {
        float* embed_table = (float*)malloc(sizeof(float)*table_size);
        std::uniform_real_distribution<float> dis(-2.0, 2.0); 
        for (int i = 0; i < table_size; ++i) {
            embed_table[i] = dis(gen);
        }
        float* d_embed_table;
        cudaMalloc((void**)&d_embed_table, sizeof(float)*table_size);
        cudaMemcpy(d_embed_table, embed_table, sizeof(float)*table_size, cudaMemcpyHostToDevice);
        EmbeddingWeight<float> embed_table_wrapper;
        embed_table_wrapper.shape = {vocab_size, hidden_size};
        embed_table_wrapper.data = d_embed_table;
        embed_table_wrapper.type = WeightType::FP32_W;
        int* d_input_ids;
        cudaMalloc((void**)&d_input_ids, sizeof(int)*input_size);
        cudaMemcpy(d_input_ids, input_ids, sizeof(int)*input_size, cudaMemcpyHostToDevice);
        TensorWrapper<int> input_ids_wrapper(Device::GPU, DataType::INT32, {max_context_token_num}, d_input_ids);

        float* gpu_output = (float*)malloc(sizeof(float)*output_size);
        float* d_gpu_output;
        cudaMalloc((void**)&d_gpu_output, sizeof(float)*output_size);
        TensorWrapper<float> output_wrapper(Device::GPU, DataType::FP32, {max_context_token_num, hidden_size}, d_gpu_output);
        launchInputEmbedding<float>(&input_ids_wrapper, &output_wrapper, &embed_table_wrapper);
        cudaMemcpy(gpu_output, d_gpu_output, sizeof(float)*output_size, cudaMemcpyDeviceToHost);

        float* cpu_output = (float*)malloc(sizeof(float)*output_size);
        cpuEmbedding(input_ids, cpu_output, embed_table, max_context_token_num, hidden_size);

        checkResult(cpu_output, gpu_output, output_size);

        free(embed_table);
        free(gpu_output);
        free(cpu_output);
        cudaFree(d_embed_table);
        cudaFree(d_input_ids);
        cudaFree(d_gpu_output);
    } else {

    }

    free(input_ids);
    return 0;
}