#include "onnxruntime_cxx_api.h"
#include "cpu_provider_factory.h"
#include <opencv2/opencv.hpp>
#include <fstream>

std::string labels_txt_file = "D:/python/pytorch_openvino_demo/ch5/faster_rcnn.txt";
std::map<int, std::string> classNames;
void readClassNames()
{
	std::ifstream fp(labels_txt_file);
	std::string name;
	while (!fp.eof()) {
		getline(fp, name);
		std::cout << name << std::endl;
		if (name.length()) {
			int pos = static_cast<int>(name.find_first_of(":"));
			int cid = std::stoi(name.substr(0, pos));
			classNames.insert(std::pair<int, std::string>(cid, name.substr(pos + 1)));
		}
	}
	fp.close();
}

int main(int argc, char** argv) {
	cv::RNG rng;
	readClassNames();
	cv::Mat frame = cv::imread("D:/images/messi_player.jpg");
	// ����InferSession, ��ѯ֧��Ӳ���豸
	// GPU Mode, 0 - gpu device id
	std::string onnxpath = "D:/python/pytorch_openvino_demo/ch5/mask_rcnn.onnx";
	std::wstring modelPath = std::wstring(onnxpath.begin(), onnxpath.end());
	Ort::SessionOptions session_options;
	Ort::Env env = Ort::Env(ORT_LOGGING_LEVEL_ERROR, "mask-rcnn-onnx");

	session_options.SetGraphOptimizationLevel(ORT_ENABLE_BASIC);
	std::cout << "onnxruntime inference try to use GPU Device" << std::endl;
	OrtSessionOptionsAppendExecutionProvider_CUDA(session_options, 0);
	// OrtSessionOptionsAppendExecutionProvider_CPU(session_options, 0);
	Ort::Session session_(env, onnxpath.c_str(), session_options);

	// get input and output info
	int input_nodes_num = session_.GetInputCount();
	int output_nodes_num = session_.GetOutputCount();
	std::vector<std::string> input_node_names;
	std::vector<std::string> output_node_names;
	Ort::AllocatorWithDefaultOptions allocator;

	// query input data format
	for (int i = 0; i < input_nodes_num; i++) {
		auto input_name = session_.GetInputNameAllocated(i, allocator);
		input_node_names.push_back(input_name.get());
	}

	// query output data format�� 
	for (int i = 0; i < output_nodes_num; i++) {
		auto output_name = session_.GetOutputNameAllocated(i, allocator);
		output_node_names.push_back(output_name.get());
		// auto outShapeInfo = session_.GetOutputTypeInfo(i).GetTensorTypeAndShapeInfo().GetShape();
	}

	// ͼ��Ԥ���� - ��ʽ������
	int64 start = cv::getTickCount();

	cv::Mat blob = cv::dnn::blobFromImage(frame, 1.0 / 255.0, cv::Size(frame.cols, frame.rows), cv::Scalar(0, 0, 0), true, false);
	size_t tpixels = frame.rows * frame.cols * 3;
	std::array<int64_t, 4> input_shape_info{ 1, 3, frame.rows, frame.cols };

	// set input data and inference
	auto allocator_info = Ort::MemoryInfo::CreateCpu(OrtDeviceAllocator, OrtMemTypeCPU);
	Ort::Value input_tensor_ = Ort::Value::CreateTensor<float>(allocator_info, blob.ptr<float>(), tpixels, input_shape_info.data(), input_shape_info.size());
	const std::array<const char*, 1> inputNames = { input_node_names[0].c_str() };
	const std::array<const char*, 4> outNames = { output_node_names[0].c_str(), output_node_names[1].c_str(), output_node_names[2].c_str(), output_node_names[3].c_str() };

	std::vector<Ort::Value> ort_outputs;
	try {
		ort_outputs = session_.Run(Ort::RunOptions{ nullptr }, inputNames.data(), &input_tensor_, 1, outNames.data(), outNames.size());
	}
	catch (std::exception e) {
		std::cout << e.what() << std::endl;
	}

	// boxes, labels, scores for faster-rcnn
	const float* boxes = ort_outputs[0].GetTensorMutableData<float>();
	const int64* labels = ort_outputs[1].GetTensorMutableData<int64>();
	const float* scores = ort_outputs[2].GetTensorMutableData<float>();
	const float* mask_prob = ort_outputs[3].GetTensorMutableData<float>();
	auto outShape = ort_outputs[0].GetTensorTypeAndShapeInfo().GetShape();
	size_t rows = outShape[0];
	std::cout << "fixed number:" << rows << std::endl;
	auto mask_tensorShapeInfo = ort_outputs[3].GetTensorTypeAndShapeInfo();
	auto mask_shape = mask_tensorShapeInfo.GetShape();
	std::cout<<"mask format: "<<mask_shape[0]<<"x"<<mask_shape[1]<<"x"<<mask_shape[2]<<"x"<<mask_shape[3]<<std::endl;
	// ����, 1x84x8400,  box , 80- min/max
	cv::Mat det_output(rows, 4, CV_32F, (float*)boxes);
	for (int i = 0; i < det_output.rows; i++) {
		double conf = scores[i];
		int cid = labels[i];
		// ���Ŷ� 0��1֮��
		if (conf > 0.25)
		{
			float x1 = det_output.at<float>(i, 0);
			float y1 = det_output.at<float>(i, 1);
			float x2 = det_output.at<float>(i, 2);
			float y2 = det_output.at<float>(i, 3);
			cv::Rect box;
			box.x = x1;
			box.y = y1;
			box.width = x2 - x1;
			box.height = y2 - y1;

			int mw = mask_shape[3];
			int mh = mask_shape[2];
			int index = i * mw * mh;
			cv::Mat det_mask(mh, mw, CV_32F, (float*)&mask_prob[index]);
			cv::threshold(det_mask, det_mask, 0.5, 1.0, cv::THRESH_BINARY);
			cv::Mat mask, rgb;
			det_mask = det_mask * rng.uniform(0, 255);
			det_mask.convertTo(mask, CV_8UC1);
			cv::Mat rimage = cv::Mat::zeros(mask.size(), mask.type());
			add(rimage, cv::Scalar(rng.uniform(0, 255)), rimage, mask);
			cv::Mat gimage = cv::Mat::zeros(mask.size(), mask.type());
			std::vector<cv::Mat> mlist;
			mlist.push_back(rimage);
			mlist.push_back(gimage);
			mlist.push_back(mask);
			cv::merge(mlist, rgb);
			cv::addWeighted(frame, 1.0, rgb, 0.5, 0, frame);

			cv::rectangle(frame, box, cv::Scalar(0, 0, 255), 2, 8, 0);
			putText(frame, classNames.find(cid)->second, box.tl(), cv::FONT_HERSHEY_PLAIN, 1.0, cv::Scalar(255, 0, 0), 1, 8);

		}
	}
	// ����FPS render it
	float t = (cv::getTickCount() - start) / static_cast<float>(cv::getTickFrequency());
	putText(frame, cv::format("FPS: %.2f", 1.0 / t), cv::Point(20, 40), cv::FONT_HERSHEY_PLAIN, 2.0, cv::Scalar(255, 0, 0), 2, 8);

	cv::imshow("ONNXRUNTIME1.13 + Mask-RCNN ������ʾ", frame);
	cv::waitKey(0);

	// relase resource
	session_options.release();
	session_.release();
	return 0;
}