#include <gtest/gtest.h>
#include "math_ops.h"
#include "tensor.h"
#include "common.h"

TEST(AdditionTest, CPUVersion) {
    // 准备两个输入张量
    Tensor tensor_a = allocateTensorOnCPU({3, 4}, DataType::FLOAT32);
    initTensorData(tensor_a);
    Tensor tensor_b = allocateTensorOnCPU({3, 4}, DataType::FLOAT32);
    initTensorData(tensor_b);

    // 调用加法运算函数
    Tensor result = add_cpu(tensor_a, tensor_b);

    // 验证输出结果张量维度是否符合预期
    EXPECT_EQ(result.dims.size(), 2);
    EXPECT_EQ(result.dims[0], 3);
    EXPECT_EQ(result.dims[1], 4);

    // 释放内存
    freeTensor(tensor_a);
    freeTensor(tensor_b);
    freeTensor(result);
}

TEST(AdditionTest, GPUVersion) {
    // 准备两个输入张量
    Tensor tensor_a_gpu = allocateTensorOnGPU({3, 4}, DataType::FLOAT32);
    Tensor tensor_a_cpu = allocateTensorOnCPU({3, 4}, DataType::FLOAT32);

    initTensorData(tensor_a_cpu);
    Tensor tensor_b_gpu = allocateTensorOnGPU({3, 4}, DataType::FLOAT32);
    Tensor tensor_b_cpu = allocateTensorOnCPU({3, 4}, DataType::FLOAT32);
    initTensorData(tensor_b_cpu);

    copyToGPU(tensor_a_cpu, tensor_a_gpu);
    copyToGPU(tensor_b_cpu, tensor_b_gpu);

    // 调用加法运算函数
    Tensor result_gpu = add_gpu(tensor_a_gpu, tensor_b_gpu);

    Tensor result_cpu = allocateTensorOnCPU({3, 4}, DataType::FLOAT32);

    Tensor result_host = add_cpu(tensor_a_cpu, tensor_b_cpu);

    copyToGPU(result_gpu, result_cpu);

    // 验证输出结果张量维度是否符合预期
    EXPECT_EQ(result_cpu.dims.size(), 2);
    EXPECT_EQ(result_cpu.dims[0], 3);
    EXPECT_EQ(result_cpu.dims[1], 4);
    
    float* gpu_tmp = static_cast<float*>(result_cpu.data);
    float* host_tmp = static_cast<float*>(result_host.data);
    int num_elements = result_cpu.num_dims;
    for (int i = 0; i < num_elements; i++) {
        // std::cout << gpu_tmp[i] << " " << host_tmp[i] << std::endl;
        EXPECT_FLOAT_EQ(gpu_tmp[i], host_tmp[i]);
    }

    // 释放内存
    freeTensor(tensor_a_cpu);
    freeTensor(tensor_b_cpu);
    freeTensor(tensor_a_gpu);
    freeTensor(tensor_b_gpu);
    freeTensor(result_gpu);
    freeTensor(result_cpu);
    freeTensor(result_host);
}


