// reference from https://docs.openvinotoolkit.org/latest/openvino_docs_IE_DG_supported_plugins_GPU_RemoteBlob_API.html

#pragma once


#include <algorithm>
#include <chrono>
#include <memory>
#include <map>
#include <string>
#include <vector>
#include <utility>

#include <fstream>

#if 0
#include <ie_core.hpp>
#include <ie_plugin_config.hpp>
#include <cpp/ie_infer_request.hpp>
#include <ie_blob.h>
#else
#include <inference_engine.hpp>
#endif

#include "slog.hpp"

#include <opencv2/opencv.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/video/video.hpp>
#include <opencv2/opencv.hpp>

#include <gpu/gpu_context_api_dx.hpp>
#include <gpu/gpu_context_api_ocl.hpp>
//
#include "CL/cl.h"
#include "CL/cl2.hpp"

using namespace std;
using namespace InferenceEngine;
using namespace std::chrono;

cv::Mat jpg;

static void loadjpg(const char * jpgname, int width, int height)
{
	//loadimage(&jpg, jpgname);//
	cv::Mat jpg_2x;
	jpg = cv::imread(jpgname);
	cout << "load image: " << jpgname << " resize: w=" << width << " h=" << height << endl;
	//resize to width*height

	std::cout << "convert img to Gray" << std::endl;
	cv::cvtColor(jpg, jpg, cv::COLOR_BGR2GRAY);  //COLOR_BGR2YCrCb or COLOR_BGR2YUV

	cv::resize(jpg, jpg, cv::Size(width, height), 0, 0, cv::INTER_CUBIC);
	cv::resize(jpg, jpg_2x, cv::Size(width * 2, height * 2), 0, 0, cv::INTER_CUBIC);
	cv::imshow("bic_2x", jpg_2x);
	cv::imwrite("palace_gray_bic_2x.png", jpg_2x);
}

int main(int argc, char *argv[]) {
	try {

		string FLAGS_d = "GPU"; //"GPU";
		string FLAGS_m = "C:\\work\\opencl_2020\\cmake_fsrcnn_ov2021\\src\\FSRCNN_x2_FP16.xml";
		string FLAGS_i = "C:\\work\\opencl_2020\\cmake_fsrcnn_ov2021\\src\\palace.jpg";
		int FLAGS_nt = 10;

		cout << "starting" << endl;
		const Version *IEversion;
		IEversion = GetInferenceEngineVersion();
		cout << "InferenceEngine: API version " << IEversion->apiVersion.major << "." << IEversion->apiVersion.minor << endl;
		cout << "InferenceEngine: Build : " << IEversion->buildNumber << endl << endl;

		// --------------------------- 1. Load inference engine -------------------------------------
		cout << "Creating Inference Engine" << endl;

		Core ie;
		// -----------------------------------------------------------------------------------------------------

				// --------------------------- 2. Read IR Generated by ModelOptimizer (.xml and .bin files) ------------
		cout << "Loading network files" << endl;

		/** Read network model **/
		CNNNetwork network = ie.ReadNetwork(FLAGS_m);
		cout << "network layer count: " << network.layerCount() << endl;
		// -----------------------------------------------------------------------------------------------------

				// --------------------------- 3. Configure input & output ---------------------------------------------

			// --------------------------- Prepare input blobs -----------------------------------------------------
		cout << "Preparing input blobs" << endl;

		/** Taking information about all topology inputs **/
		InputsDataMap inputInfo(network.getInputsInfo());
		if (inputInfo.size() != 1) throw std::logic_error("Sample supports topologies with 1 input only");

		auto inputInfoItem = *inputInfo.begin();

		/** Specifying the precision and layout of input data provided by the user.
		 * This should be called before load of the network to the device **/
		inputInfoItem.second->setPrecision(Precision::U8);
		inputInfoItem.second->setLayout(Layout::NCHW);

		//cout << FLAGS_i << endl;
		loadjpg(FLAGS_i.c_str(), inputInfoItem.second->getTensorDesc().getDims()[3],
			inputInfoItem.second->getTensorDesc().getDims()[2]);

		if (jpg.data == NULL)
		{
			cout << "Valid input images were not found!" << endl;
		}

		/** Setting batch size to 1 **/
		network.setBatchSize(1);
		size_t batchSize = network.getBatchSize();
		cout << "Batch size is " << std::to_string(batchSize) << endl;


		// --------------------------- 4. Loading model to the device ------------------------------------------
		cout << "Loading model to the device: " << FLAGS_d << endl;
		ExecutableNetwork executable_network = ie.LoadNetwork(network, FLAGS_d);
		// -----------------------------------------------------------------------------------------------------

		// --------------------------- 5. Create infer request -------------------------------------------------
		cout << "Create infer request" << endl;
		InferRequest inferRequest_regular = executable_network.CreateInferRequest();
		// -----------------------------------------------------------------------------------------------------

		// --------------------------- 6. Prepare input --------------------------------------------------------
		for (auto & item : inputInfo) {
			Blob::Ptr inputBlob = inferRequest_regular.GetBlob(item.first);
			SizeVector dims = inputBlob->getTensorDesc().getDims();
			/** Fill input tensor with images. First b channel, then g and r channels **/
			size_t num_channels = dims[1];
			std::cout << "num_channles = " << num_channels << std::endl;
			size_t image_size = dims[3] * dims[2];

			MemoryBlob::Ptr minput = as<MemoryBlob>(inputBlob);
			if (!minput) {
				cout << "We expect MemoryBlob from inferRequest_regular, but by fact we were not able to cast inputBlob to MemoryBlob" << endl;
				return 1;
			}
			// locked memory holder should be alive all time while access to its buffer happens
			auto minputHolder = minput->wmap();

			auto data = minputHolder.as<PrecisionTrait<Precision::U8>::value_type *>();
			unsigned char* pixels = (unsigned char*)(jpg.data);

			cout << "image_size = " << image_size << endl;
			/** Iterate over all pixel in image (b,g,r) **/
			for (size_t pid = 0; pid < image_size; pid++) {
				/** Iterate over all channels **/
				for (size_t ch = 0; ch < num_channels; ++ch) {
					/**          [images stride + channels stride + pixel id ] all in bytes            **/
					data[ch * image_size + pid] = pixels[pid*num_channels + ch];
				}
			}
		}

		milliseconds start_ms = duration_cast<milliseconds>(
			system_clock::now().time_since_epoch()
			);
		// --------------------------- 7. Do inference ---------------------------------------------------------
#if 0
		//for async inference
		size_t numIterations = 10;
		size_t curIteration = 0;
		std::condition_variable condVar;

		inferRequest_regular.SetCompletionCallback(
			[&] {
			curIteration++;
			cout << "Completed " << curIteration << " async request execution" << endl;
			if (curIteration < numIterations) {
				/* here a user can read output containing inference results and put new input
				   to repeat async request again */
				inferRequest_regular.StartAsync();
			}
			else {
				/* continue sample execution after last Asynchronous inference request execution */
				condVar.notify_one();
			}
		});

		/* Start async request for the first time */
		cout << "Start inference (" << numIterations << " asynchronous executions)" << endl;
		inferRequest_regular.StartAsync();

		/* Wait all repetitions of the async request */
		std::mutex mutex;
		std::unique_lock<std::mutex> lock(mutex);
		condVar.wait(lock, [&] { return curIteration == numIterations; });
#else
		/* Start sync request */
		cout << "Start inference " << endl;
		inferRequest_regular.Infer();
#endif
		milliseconds end_ms = duration_cast<milliseconds>(
			system_clock::now().time_since_epoch()
			);
		std::cout << "total cost time: " << (end_ms - start_ms).count() << " ms" << std::endl;
		float total_time = (end_ms - start_ms).count() / 1000.0;
		std::cout << "FPS: " << (float)1.0 / total_time << std::endl;

		// -----------------------------------------------------------------------------------------------------

		// --------------------------- 8. Process output -------------------------------------------------------
		cout << "Processing output blobs" << endl;
		OutputsDataMap outputInfo(network.getOutputsInfo());

		cout << "output blob name: " << outputInfo.begin()->first << endl;
		if (outputInfo.size() != 1) throw std::logic_error("Sample supports topologies with 1 output only");
		MemoryBlob::CPtr moutput = as<MemoryBlob> (inferRequest_regular.GetBlob(outputInfo.begin()->first));

		/** Validating -nt value **/
		const size_t resultsCnt = moutput->size() / batchSize;
		if (FLAGS_nt > resultsCnt || FLAGS_nt < 1) {
			cout << "-nt " << FLAGS_nt << " is not available for this network (-nt should be less than " \
				<< resultsCnt + 1 << " and more than 0)\n            will be used maximal value : " << resultsCnt << endl;
			FLAGS_nt = resultsCnt;
		}


		if (!moutput) {
			throw std::logic_error("We expect output to be inherited from MemoryBlob, "
				"but by fact we were not able to cast it to MemoryBlob");
		}
		// locked memory holder should be alive all time while access to its buffer happens
		auto lmoHolder = moutput->rmap();
		const auto output_data = lmoHolder.as<const PrecisionTrait<Precision::FP32>::value_type *>();

		size_t num_images = moutput->getTensorDesc().getDims()[0];
		size_t num_channels = moutput->getTensorDesc().getDims()[1];
		size_t H = moutput->getTensorDesc().getDims()[2];
		size_t W = moutput->getTensorDesc().getDims()[3];
		size_t nPixels = W * H;


		std::cout << "Output size [N,C,H,W]: " << num_images << ", " << num_channels << ", " << H << ", " << W << std::endl;

		{
			std::vector<float> data_img(nPixels * num_channels);

			if (num_channels == 1)
			{
				cv::Mat Img(H, W, CV_8U);
				unsigned char *image_ptr = Img.data;

				for (size_t n = 0; n < num_images; n++) {
					for (size_t i = 0; i < nPixels; i++) {
						data_img[i ] = static_cast<float>(output_data[i + n * nPixels ])*255.0;

						//std::cout << "i:" << i << "  data:" << data_img[i] << std::endl;

						if (data_img[i  ] < 0) data_img[i  ] = 0;
						if (data_img[i  ] > 255) data_img[i  ] = 255;
						image_ptr[i] = data_img[i];

					}
				}

				imshow("FSRCNN_2x", Img);
				cv::imwrite("palace_FSRCNN_gray_2x.png", Img);
				std::cout << "Output Image created" << std::endl;

			}
			else
			{
				cv::Mat Img(H, W, CV_8UC3);
				unsigned char *image_ptr = Img.data;

				for (size_t n = 0; n < num_images; n++) {
					for (size_t i = 0; i < nPixels; i++) {
						data_img[i * num_channels] = static_cast<float>(output_data[i + n * nPixels * num_channels]);
						data_img[i * num_channels + 1] = static_cast<float>(
							output_data[(i + nPixels) + n * nPixels * num_channels]);
						data_img[i * num_channels + 2] = static_cast<float>(
							output_data[(i + 2 * nPixels) + n * nPixels * num_channels]);

						//switch BGR->RGB
						float temp = data_img[i * num_channels];
						data_img[i * num_channels] = data_img[i * num_channels + 2];
						data_img[i * num_channels + 2] = temp;

						if (data_img[i * num_channels] < 0) data_img[i * num_channels] = 0;
						if (data_img[i * num_channels] > 255) data_img[i * num_channels] = 255;

						if (data_img[i * num_channels + 1] < 0) data_img[i * num_channels + 1] = 0;
						if (data_img[i * num_channels + 1] > 255) data_img[i * num_channels + 1] = 255;

						if (data_img[i * num_channels + 2] < 0) data_img[i * num_channels + 2] = 0;
						if (data_img[i * num_channels + 2] > 255) data_img[i * num_channels + 2] = 255;
					}
				}
				std::cout << "not finished yet" << std::endl;
			}


			while (1)
			{
				if (cv::waitKey(30) == 27 /*ESC*/)
				{
					break;
				}
			}
		}

	}
	catch (const std::exception& error) {
		cout << error.what() << endl;
		return 1;
	}
	catch (...) {
		cout << "Unknown/internal exception happened." << endl;
		return 1;
	}

	cout << "Execution successful" << endl;
	cout << endl << "This sample is an API example, for any performance measurements "
		"please use the dedicated benchmark_app tool" << endl;

	// -----------------------------------------------------------------------------------------------------
	return 0;
}
