#include <gtest/gtest.h>

#include "trt_define.h"
#include "utils/logger.h"
#include "utils/file_utils.h"
#include "utils/nv_utils.h"
#include "trt_engine/trt_tensor.h"

_TRT_INFER_BEGIN

class TensorTest : public ::testing::Test
{
protected:
    TensorTest() {LOGER_INST.init();}
    ~TensorTest() override {}
    void SetUp() override {}
    void TearDown() override {}
};

TEST_F(TensorTest, test_tensor)
{
    LOG_INFO << "test_tensor";
    std::string source_model_path = "D:\\learnSpace\\trt_infer\\test_data\\classifier.onnx";
    std::string save_model_path = "D:\\learnSpace\\trt_infer\\test_data\\classifier.engine";
    std::string image_path = "D:\\learnSpace\\trt_infer\\test_data\\dog.jpg";

    auto trt_log = LOGER_INST.getTRTLogger();
    auto runtime = make_nvshared(nvinfer1::createInferRuntime(trt_log));
    auto engine_data = FileUtils::load_file(save_model_path);
    EXPECT_FALSE(engine_data.empty()) << "load engine failed";
    auto engine = make_nvshared(runtime->deserializeCudaEngine(engine_data.data(), engine_data.size()));
    EXPECT_TRUE(engine) << "deserializeCudaEngine failed";

    cudaStream_t stream;
    checkRuntime(cudaStreamCreate(&stream));
    auto context = make_nvshared(engine->createExecutionContext());
    int input_batch   = 1;
    int input_channel = 3;
    int input_height  = 224;
    int input_width   = 224;
    int input_numel   = input_batch * input_channel * input_height * input_width;

    // Memory is not allocated imediately, but rather when it is first used.
    Tensor input_data({input_batch, input_channel, input_height, input_width}, DataType::Float);

    // Associate the tensor with the stream to ensure they operate on the same pipeline.
    input_data.set_stream(stream);

    auto image = cv::imread(image_path);
    ASSERT_NE(image.data, nullptr) << "read image failed";
    float mean[] = {0.406, 0.456, 0.485};
    float std[]  = {0.225, 0.224, 0.229};

    cv::resize(image, image, cv::Size(input_width, input_height));
    image.convertTo(image, CV_32F);

    cv::Mat channel_based[3];
    for(int i = 0; i < 3; i++)
    {
        channel_based[i] = cv::Mat(input_height, input_width, CV_32F, input_data.cpu<float>(0, 2-i)); 
    }

    cv::split(image, channel_based);
    for(int i = 0; i < 3; ++i)
        channel_based[i] = (channel_based[i] / 255.0f - mean[i]) / std[i];
    input_data.to_gpu();
    const int num_classes = 1000;
    Tensor output_data({input_batch, num_classes}, DataType::Float);
    output_data.set_stream(stream);

    auto input_dims = context->getBindingDimensions(0);
    input_dims.d[0] = input_batch;

    context->setBindingDimensions(0, input_dims);
    float* bindings[] = {input_data.gpu<float>(), output_data.gpu<float>()};

    bool success      = context->enqueueV2((void**)bindings, stream, nullptr);
    checkRuntime(cudaStreamSynchronize(stream));

    float* prob = output_data.cpu<float>();
    int predict_label = std::max_element(prob, prob + num_classes) - prob;
    float confidence  = prob[predict_label];
    LOG_INFO << "predict_label: " << predict_label << ", confidence: " << confidence;
    checkRuntime(cudaStreamDestroy(stream));
}

TEST_F(TensorTest, Create)
{
    std::vector shape = {1, 3, 224, 224};
    Tensor tensor(shape, DataType::UInt8);
    EXPECT_EQ(tensor.shape(0), 1);
    EXPECT_EQ(tensor.shape(1), 3);
    EXPECT_EQ(tensor.shape(2), 224);
    EXPECT_EQ(tensor.shape(3), 224);
}


_TRT_INFER_END
