#include "Camera.hpp"
#include "ProgramOptionsProcessing.hpp"
#include "model_output_labels_loader.hpp"

#include "armnn/BackendId.hpp"
#include "armnn/IRuntime.hpp"
#include "armnnTfLiteParser/ITfLiteParser.hpp"
#include "InferenceTest.hpp"
#include "TensorIOUtils.hpp"

#include <algorithm>
#include <boost/filesystem.hpp>
#include <boost/variant.hpp>
#include <iostream>
#include <iterator>
#include <map>

using TContainer = boost::variant<std::vector<float>, std::vector<int>, std::vector<unsigned char>>;


std::vector<CategoryNames> modelOutputLabels;
// ------------------------------------------------------------------------
// MobileNetSSD input/output tensor description
// ------------------------------------------------------------------------
const std::string inputName = "normalized_input_image_tensor";
const std::string outputName1 = "TFLite_Detection_PostProcess";
const std::string outputName2 = "TFLite_Detection_PostProcess:1";
const std::string outputName3 = "TFLite_Detection_PostProcess:2";
const std::string outputName4 = "TFLite_Detection_PostProcess:3";
const unsigned int inputTensorWidth = 300;
const unsigned int inputTensorHeight = 300;
const unsigned int inputTensorBatchSize = 1;
const armnn::DataLayout inputTensorDataLayout = armnn::DataLayout::NHWC;

std::vector<float> PreprocessFrame(const cv::Mat &frame);
void PrintResults(
    const std::vector<float>& detectedPositions, 
    const std::vector<float>& detectedClasses, 
    const std::vector<float>& detectedConfidence, 
    const std::vector<float>& detectedObjectCount, 
    cv::Mat& frame);

int main(int argc, char* argv[])
{
    armnn::LogSeverity level = armnn::LogSeverity::Info;
    armnn::ConfigureLogging(true, true, level);

    // ------------------------------------------------------------------------
    // Get program options
    // ------------------------------------------------------------------------

    ProgramOptions programOptions;
    if (GetProgramOptions(argc, argv, programOptions) != 0)
    {
        return EXIT_FAILURE;
    }
    
    // ------------------------------------------------------------------------
    // Open Camera and prepare window for drawing
    // ------------------------------------------------------------------------
    auto camera = new demo::Camera(0);

    std::string window_name = "My Camera Feed";
    cv::namedWindow(window_name); //create a window called "My Camera Feed"

    // ------------------------------------------------------------------------
    // Load model output labels
    // ------------------------------------------------------------------------
    modelOutputLabels = LoadModelOutputLabels(programOptions.modelOutputLabelsPath);

    // ------------------------------------------------------------------------
    // Prepare output tensor data container
    // ------------------------------------------------------------------------

    // Output tensor size is equal to the number of model output labels
    static constexpr unsigned int kShape = 10u; 		// 10 objects can be detected per inference
    static constexpr unsigned int kOutputSize1 = kShape * 4u; 	// xmin, ymin, xmax, ymax for each detected object
    static constexpr unsigned int kOutputSize2 = kShape;	// Detected Object Class
    static constexpr unsigned int kOutputSize3 = kShape;	// Confidence of detection
    static constexpr unsigned int kOutputSize4 = 1u; 		// Number of detections  

    std::vector<TContainer> outputDataContainers = { 
	std::vector<float>(kOutputSize1),
	std::vector<float>(kOutputSize2),
	std::vector<float>(kOutputSize3),
	std::vector<float>(kOutputSize4) };
    
    // ------------------------------------------------------------------------
    // Import graph
    // ------------------------------------------------------------------------

    // Import the TensorFlowLite model.
    using IParser = armnnTfLiteParser::ITfLiteParser;
    auto armnnparser(IParser::Create());
    armnn::INetworkPtr network = armnnparser->CreateNetworkFromBinaryFile(programOptions.modelPath.c_str());

    // Find the binding points for the input and output nodes
    using BindingPointInfo = armnnTfLiteParser::BindingPointInfo;
    const std::vector<BindingPointInfo> inputBindings  = { armnnparser->GetNetworkInputBindingInfo(0, inputName) };
    const std::vector<BindingPointInfo> outputBindings = { 
	armnnparser->GetNetworkOutputBindingInfo(0, outputName1),
	armnnparser->GetNetworkOutputBindingInfo(0, outputName2),
	armnnparser->GetNetworkOutputBindingInfo(0, outputName3),
	armnnparser->GetNetworkOutputBindingInfo(0, outputName4) };

    // ------------------------------------------------------------------------
    // Optimize graph and load the optimized graph onto a compute device
    // ------------------------------------------------------------------------

    // Optimize the network for a specific runtime compute device, e.g. CpuAcc, GpuAcc
    armnn::IRuntime::CreationOptions options;
    armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
    armnn::IOptimizedNetworkPtr optimizedNet =
        armnn::Optimize(*network, programOptions.computeDevice, runtime->GetDeviceSpec());

    // Load the optimized network onto the runtime device
    armnn::NetworkId networkId;
    runtime->LoadNetwork(networkId, std::move(optimizedNet));
    
    while(true)
    {
	// ------------------------------------------------------------------------
	// Prapare Input image
	// ------------------------------------------------------------------------
	// get frame from camera                       
	auto frame = camera->getNextFrame();
	
	auto input_array = PreprocessFrame(frame);
	
	// Load and preprocess input image
	const std::vector<TContainer> inputDataContainers =
	{ input_array };
		
	// ------------------------------------------------------------------------
	// Run graph on device
	// ------------------------------------------------------------------------
	// start time (for time measure)
	auto start = std::chrono::high_resolution_clock::now();
	
	// run inference
	runtime->EnqueueWorkload(networkId,
		armnnUtils::MakeInputTensors(inputBindings, inputDataContainers),
		armnnUtils::MakeOutputTensors(outputBindings, outputDataContainers));
	
	// end time and calculate time for inference
	auto end = std::chrono::high_resolution_clock::now();
	std::chrono::duration<double> diff = end-start;
	
	std::cout << "===========" << std::endl;
	std::cout << "Time : " << diff.count() << " s\n";
	
	// ------------------------------------------------------------------------
	// Process the output tensors and print results
	// ------------------------------------------------------------------------
	auto detectedPositions = boost::get<std::vector<float>>(outputDataContainers[0]);
	auto detectedClasses = boost::get<std::vector<float>>(outputDataContainers[1]);
	auto detectedConfidence = boost::get<std::vector<float>>(outputDataContainers[2]);
	auto detectedObjectCount = boost::get<std::vector<float>>(outputDataContainers[3]);
	
	PrintResults(detectedPositions, detectedClasses, detectedConfidence, detectedObjectCount, frame);

	// show camera image
	cv::imshow(window_name, frame);

	// check if exit
	if (cv::waitKey(10) == 27)
	{
	    std::cout << "Esc key is pressed by user. Stoppig the video" << std::endl;
	    break;
	}
    }
    
    delete camera;

    return 0;
	
}

std::vector<float> PreprocessFrame(const cv::Mat &frame)
{
    // resize to fit input tensor
    cv::Mat resized;
    cv::resize(frame, resized, cv::Size(inputTensorWidth, inputTensorHeight));

    // convert to float (if doing float32 model)
    cv::Mat image;
    resized.convertTo(image, CV_32FC3);
    image = image / 255.f;
	    
    // write to vector
    std::vector<float> input_array;
    input_array.assign((float*)image.datastart, (float*)image.dataend);
    
    return input_array;
}
    
void PrintResults(
    const std::vector<float>& detectedPositions, 
    const std::vector<float>& detectedClasses, 
    const std::vector<float>& detectedConfidence, 
    const std::vector<float>& detectedObjectCount, 
    cv::Mat& frame)
{
    const size_t numberOfDetections = boost::numeric_cast<size_t>(detectedObjectCount[0]);

    for(uint32_t i = 0; i < numberOfDetections; i++)
    {
	// threshold for printing result >= 50%
	if(detectedConfidence[i] < 0.5f)
	    continue;
	    
	// Convert 1-hot output to an integer label
	const size_t labelIndex = boost::numeric_cast<size_t>(detectedClasses[i]);

	std::cout << "Prediction " << labelIndex << ": ";
	// print the label name(s)
	for (const auto& label : modelOutputLabels[labelIndex])
	{
	    std::cout << label << ", ";
	}
	std::cout << ": " << detectedConfidence[i] << std::endl;
	
	// Process the object position, scale by frame size
	cv::Point topLeft(detectedPositions[i*4+1] * frame.cols , detectedPositions[i*4+0] * frame.rows);
	cv::Point bottomRight(detectedPositions[i*4+3] * frame.cols , detectedPositions[i*4+2] * frame.rows);
	// print to console
	std::cout << "Position: " << topLeft.x << ", " << topLeft.y << ", " << bottomRight.x << ", " << bottomRight.y << std::endl;
	
	// draw red rectangle around detected object
	cv::rectangle(frame, topLeft, bottomRight, cv::Scalar(0,0,255));
	
	// print the label name
	int baseline = 0;
	cv::Size textSize = cv::getTextSize(modelOutputLabels[labelIndex][0], cv::FONT_HERSHEY_PLAIN, 1.0, 1.0, &baseline);
	baseline += 1.2;
	cv::Point textOrg(std::max(0, topLeft.x), std::max(0, topLeft.y));
	cv::rectangle(frame, textOrg, textOrg + cv::Point(textSize.width, textSize.height), cv::Scalar(255,0,0), -1);
	
	cv::putText(frame, modelOutputLabels[labelIndex][0], textOrg + cv::Point(0, textSize.height), cv::FONT_HERSHEY_PLAIN, 1.0, cv::Scalar::all(255), 1.0, 8);
    }
}
