#include "classification_engine.h"
#include <iostream>
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>


namespace vortex
{
    classificationEngine::classificationEngine(const std::string& engine_path, 
        const BlobInfo& input_info, const BlobInfo& output_info)
    {
        // save attributes for later use.
        m_InputInfo = input_info;
        m_OutputInfo = output_info;

        // load engine for single io networks
        std::vector<BlobInfo> input_infos;
        input_infos.push_back(input_info);
        std::vector<BlobInfo> output_infos;
        output_infos.push_back(output_info);

        this->SetIOInfo(input_infos, output_infos);
        this->LoadEngine(engine_path);
    }

    bool classificationEngine::Preprocess(const std::string& image_path, int offset)
    {
        // default preprocessing
        cv::Mat image = cv::imread(image_path);
        uint32_t width = m_InputInfo.shape[3];
        uint32_t height = m_InputInfo.shape[2];
        cv::Mat temp;
        cv::resize(image, temp, cv::Size(width, height));

        uint32_t image_area = width * height;
        float* input_buffer = m_InputBlobs[0]->data_host;
        input_buffer = input_buffer + (image_area * 3) * offset;

        float* pBlue = input_buffer;
        float* pGreen = input_buffer + image_area;
        float* pRed = input_buffer + image_area * 2;

        unsigned char* pImage = temp.data;
        for (uint32_t i = 0; i < image_area; ++i)
        {
            pRed[i] = (pImage[3 * i + 0] / 255.0 - IMAGENET_DEFAULT_MEAN[0]) / IMAGENET_DEFAULT_STD[0];
            pGreen[i] = (pImage[3 * i + 1] / 255.0 - IMAGENET_DEFAULT_MEAN[1]) / IMAGENET_DEFAULT_STD[1];
            pBlue[i] = (pImage[3 * i + 2] / 255.0 - IMAGENET_DEFAULT_MEAN[2]) / IMAGENET_DEFAULT_STD[2];
        }
        return true;
    }

    bool classificationEngine::Infer(const std::string& image_path, std::vector<float>& output)
    {
        Preprocess(image_path, 0);

        // transfer data onto gpu
        for (auto blob : m_InputBlobs)
            blob->ToDevice();
        
        InternalInfer();

        uint32_t stride = m_OutputInfo.Stride();
        m_OutputBlobs[0]->ToHost();
        output.resize(stride); // single image output
        m_OutputBlobs[0]->CopyToHost(output.data(), 0, stride);

        return true;
    }

    bool classificationEngine::InferN(const std::vector<std::string>& image_paths, std::vector<std::vector<float>>& output)
    {
        size_t num_images = image_paths.size();
        output.resize(num_images);

        uint32_t num_batch = static_cast<uint32_t>(num_images / m_BatchSize);
        for (uint32_t b = 0; b < num_batch; ++b)
        {
            // batched infer
            uint32_t batch_start = b * m_BatchSize;
            // feed inputs
            for (int i = 0; i < m_BatchSize; ++i)
            {
                this->Preprocess(image_paths[batch_start + i], i);
            }
            
            // transfer to gpu
            for (auto blob : m_InputBlobs)
                blob->ToDevice();
            
            // infer
            InternalInfer();

            // copy output
            uint32_t stride = m_OutputInfo.Stride();
            for (auto blob : m_OutputBlobs)
                blob->ToHost();

            for (int i = 0; i < m_BatchSize; ++i)
            {
                output[batch_start + i].resize(stride);
                m_OutputBlobs[0]->CopyToHost(output[batch_start + i].data(), i * stride, stride);
            }
        }
        return true;
    }
}
