#include "test.h"


std::vector<float> get_data(const std::vector<uint32_t>& shape)
{
    uint32_t count = vkinfer::shape_size(shape);
    std::vector<float> data(count);
    for (uint32_t i = 0; i < count; ++i)
        data[i] = 1;
    return data;
}


float vector_sum(const std::vector<float>& data)
{
    float sum = 0.0f;
    for (size_t i = 0; i < data.size(); ++i)
        sum += data[i];
    return sum;
}

void test_add()
{
    std::cout << "addition infer\n";
    std::string model_file("D:/projects/vulkan-compute/resources/data/add.onnx");
    std::shared_ptr<vkinfer::Graph> graph = std::make_shared<vkinfer::Graph>(model_file);

    uint32_t input_size = 20;
    std::vector<uint32_t> input_shape = { 1, input_size };
    std::vector<float> inputs1 = get_data(input_shape);
    std::vector<float> inputs2 = get_data(input_shape);
    graph->feed_input("x1", inputs1, input_shape);
    graph->feed_input("x2", inputs2, input_shape);
    graph->forward();
    std::vector<float> outputs;
    graph->load_output("y", outputs);

#if 1
    float v = vector_sum(outputs);
    std::cout << "sum of outputs: " << v << std::endl;
    std::cout << "output size: " << outputs.size() << std::endl;
#endif
}

void test_sample()
{
    std::cout << "sample infer\n";
    std::string model_file("D:/projects/vulkan-compute/resources/data/sample.onnx");
    std::shared_ptr<vkinfer::Graph> graph = std::make_shared<vkinfer::Graph>(model_file);

    uint32_t input_size = 224;
    std::vector<uint32_t> input_shape = { 1, 3, input_size, input_size };
    std::vector<float> inputs = get_data(input_shape);
    graph->feed_input("input", inputs, input_shape);
    // run first dummy forward.
    graph->forward();

    vkinfer::VkTimer timer;
    timer.start();
    graph->forward();
    float t = timer.stop();
    std::cout << "inference time: " << t << std::endl;

    std::vector<float> outputs;
    timer.start();
    graph->load_output("output", outputs);
    t = timer.stop();
    std::cout << "data loading time: " << t << std::endl;

#if 1
    float v = vector_sum(outputs);
    std::cout << "sum of outputs: " << v << std::endl;
    std::cout << "output size: " << outputs.size() << std::endl;
#endif
}

void test_linear()
{
    std::cout << "large linear infer\n";
    std::string model_file("D:/projects/vulkan-compute/resources/data/linear_large.onnx");
    std::shared_ptr<vkinfer::Graph> graph = std::make_shared<vkinfer::Graph>(model_file);

    uint32_t input_size = 512;
    std::vector<uint32_t> input_shape = { 1, input_size };
    std::vector<float> inputs = get_data(input_shape);
    graph->feed_input("x", inputs, input_shape);

    vkinfer::VkTimer timer;
    timer.start();
    graph->forward();
    float t = timer.stop();
    std::cout << "inference time: " << t << std::endl;

    std::vector<float> outputs;
    timer.start();
    graph->load_output("y", outputs);
    t = timer.stop();
    std::cout << "data loading time: " << t << std::endl;
}

void test_classification()
{
    std::cout << "classification infer\n";
    std::string model_file("D:/projects/vulkan-compute/resources/data/resnet.onnx");
    std::shared_ptr<vkinfer::Graph> graph = std::make_shared<vkinfer::Graph>(model_file);

    const std::string image_path("D:/projects/vulkan-compute/resources/data/anime_sample.jpg");
    vkinfer::Image image = vkinfer::Image::imread(image_path);
    vkinfer::ImagePreprocessor preprocess;
    std::shared_ptr<vkinfer::Tensor<float>> tensor = preprocess.process(image);
    graph->feed_input("input", tensor);
    // dummy forward
    graph->forward();

    std::vector<float> outputs;
    vkinfer::VkTimer timer;
    timer.start();

    graph->forward();
    float t1 = timer.stop();
    timer.start();
    graph->load_output("output", outputs);
    float t2 = timer.stop();

    std::cout << "inference time: " << t1 << std::endl;
    std::cout << "data loading time: " << t2 << std::endl;

#if 1
    float v = vector_sum(outputs);
    std::cout << "sum of outputs: " << v << std::endl;
    std::cout << "output size: " << outputs.size() << std::endl;
#endif
}

void test_yolov5()
{
    std::cout << "yolov5 infer\n";
    std::string model_file("D:/projects/vulkan-compute/resources/data/yolov5m.onnx");
    std::shared_ptr<vkinfer::Graph> graph = std::make_shared<vkinfer::Graph>(model_file);

    if (!graph->valid())
        return;

    const std::string image_path("D:/projects/vulkan-compute/resources/data/anime_sample.jpg");
    vkinfer::Image image = vkinfer::Image::imread(image_path);
    vkinfer::ImagePreprocessor preprocess;
    preprocess.width = 640;
    preprocess.height = 640;
    std::shared_ptr<vkinfer::Tensor<float>> tensor = preprocess.process(image);
    graph->feed_input("input", tensor);
    // dummy forward
    graph->forward();

    std::vector<float> outputs;
    vkinfer::VkTimer timer;
    timer.start();

    graph->forward();
    float t1 = timer.stop();
    timer.start();
    graph->load_output("output", outputs);
    float t2 = timer.stop();

    std::cout << "inference time: " << t1 << std::endl;
    std::cout << "data loading time: " << t2 << std::endl;
}
