#include "core/function_test.h"
#include "core/function.h"
#include "log.h"

namespace  ldl
{
namespace core
{
void FunctionTest::test()
{
    // 测试exp正向传播
    {
        auto tensor = std::make_shared<Tensor<float>>(Tensor<float>({2.0f, 3.0f, 4.0f, 5.0f, 6.0f}));
        auto result = tensor->exp();
        LogInfo() << "result: " << result;
    }

    // 测试exp反向传播
    {
        auto tensor = std::make_shared<Tensor<float>>(Tensor<float>({{1.0f, 2.0f, 3.0f}, {4.0f, 5.0f, 6.0f}}));
        auto result = tensor->exp();
        result->backward();
        auto grad = tensor->grad();
        LogInfo() << "grad: " << grad;
    }

    // 测试连续运算
    {
        auto tensor = std::make_shared<Tensor<float>>(Tensor<float>({2.0f, 3.0f, 4.0f, 5.0f, 6.0f}));
        auto result = tensor->exp()->exp();
        LogInfo() << "result: " << *result;
        result->backward();
        LogInfo() << "tensor.grad(): " << tensor->grad();
    }

    // 测试加法（两个输入的运算符）
    {
        auto tensor_1 = std::make_shared<Tensor<float>>(Tensor<float>({2.0f, 3.0f, 4.0f, 5.0f, 6.0f}));

        auto tensor_2 = std::make_shared<Tensor<float>>(Tensor<float>({12.0f, 13.0f, 14.0f, 15.0f, 16.0f}));
        auto result = (*tensor_1) + (*tensor_2);
        LogInfo() << "result: " << *result;
        result->backward();
        LogInfo() << "tensor_1.grad(): " << tensor_1->grad();
        LogInfo() << "tensor_2.grad(): " << tensor_2->grad();
    }

    // 测试加法 自身相加
    {
        auto tensor = std::make_shared<Tensor<float>>(Tensor<float>({2.0f, 3.0f, 4.0f, 5.0f, 6.0f}));

        auto result = (*tensor) + (*tensor);
        LogInfo() << "result: " << *result;
        result->backward();
        LogInfo() << "tensor_1.grad(): " << tensor->grad();
    }

    // 测试乘法
    {
        auto tensor_1 = std::make_shared<Tensor<float>>(Tensor<float>({2.0f, 3.0f, 4.0f, 5.0f, 6.0f}));

        auto tensor_2 = std::make_shared<Tensor<float>>(Tensor<float>({12.0f, 13.0f, 14.0f, 15.0f, 16.0f}));
        auto tensor_3 = std::make_shared<Tensor<float>>(Tensor<float>({1.0f, 2.0f, 3.0f, 4.0f, 5.0f}));
        auto result = *((*tensor_1) * (*tensor_2)) * (*tensor_3);
        LogInfo() << "result: " << *result;
        result->backward();
        LogInfo() << "tensor_1.grad(): " << tensor_1->grad();
        LogInfo() << "tensor_2.grad(): " << tensor_2->grad();
        LogInfo() << "tensor_3.grad(): " << tensor_3->grad();
    }
}
}
}