#include <iostream>
#include <fstream>
#include <vector>
#include <cstring>
#include <net.h>
#include "float_tensor.pb.h"

struct SShape
{
    int m_c;
    int m_h;
    int m_w;

    SShape() {
            m_c = m_h = m_w = 0;
    }
};

bool read_float_tensor(
        const char* p_str_pb_file_path,
        FloatTensor& float_tensor)
{
        std::ifstream pb_file(p_str_pb_file_path, std::ios::in | std::ios::binary);
        if (!float_tensor.ParseFromIstream(&pb_file)) {
                std::cerr << "can not parse data from file: " << p_str_pb_file_path << std::endl;
                return false;
        }
        std::cout << "input shape: "
                  << float_tensor.shape().channel() << " x "
                  << float_tensor.shape().height()  << " x "
                  << float_tensor.shape().width();
        std::cout << std::endl;
}

std::vector<float> inference_by_ncnn(
        const char* p_str_ncnn_param_file_path,
        const char* p_str_ncnn_bin_file_path,
        const float* p_input_data,
        const SShape& input_shape,
        const char* p_str_input_tensor_name,
        const char* p_str_output_tensor_name)
{
        ncnn::Net net;
        net.load_param(p_str_ncnn_param_file_path);
        std::cout << "ncnn load param succeed" << std::endl;
        net.load_model(p_str_ncnn_bin_file_path);
        std::cout << "ncnn load model succeed" << std::endl;

        ncnn::Extractor ex = net.create_extractor();
        ex.set_light_mode(false);

        int size = input_shape.m_c * input_shape.m_h * input_shape.m_w;
        ncnn::Mat in(size, (void*)p_input_data);
        ncnn::Mat out;

        in = in.reshape(input_shape.m_w, input_shape.m_h, input_shape.m_c);
        ex.input(p_str_input_tensor_name, in);
        ex.extract(p_str_output_tensor_name, out);

        size_t out_size = size_t(out.c * out.h * out.w);
        std::vector<float> result(out_size);
        std::memcpy(&result[0], out.data, sizeof(float) * out_size);
        return result;
}

int main(int argc, char** argv)
{
        const char* input_tensor_file_path = argv[1];
        const char* expected_output_tensor_file_path = argv[2];
        const char* ncnn_param_file_path = argv[3];
        const char* ncnn_bin_file_path = argv[4];
        const char* input_tensor_name = argv[5];
        const char* output_tensor_name = argv[6];

        // step 1: read input tensor and expected tensor from protobuf files
        FloatTensor input_tensor, expected_output_tensor;
        read_float_tensor(input_tensor_file_path, input_tensor);
        read_float_tensor(expected_output_tensor_file_path, expected_output_tensor);

        // step 2: compute output tensor by ncnn
        SShape input_shape;
        input_shape.m_c = input_tensor.shape().channel();
        input_shape.m_h = input_tensor.shape().height();
        input_shape.m_w = input_tensor.shape().width();
        std::vector<float> output = inference_by_ncnn(
                ncnn_param_file_path, ncnn_bin_file_path,
                &input_tensor.data()[0], input_shape,
                input_tensor_name, output_tensor_name);

        // step 3: compare expected output and output
        std::cout << "output tensor start 10 values: " << std::endl;
        for (int i = 0; i < 10; ++i) {
                if (i > 0 && i % 5 == 0) std::cout << std::endl;
                std::cout << output[i] << " ";
        }
        std::cout << std::endl;
        int false_count = 0;
        for (int i = 0; i < output.size(); ++i) {
                if (fabsf(output[i] - expected_output_tensor.data()[i]) > 1e-4) {
                        false_count += 1;
                }
        }
        std::cout << "false count is: " << false_count << std::endl;

        return 0;
}
