/*
 * FeatureTensor.cpp
 *
 *  Created on: Dec 15, 2017
 *      Author: zy
 */

#include "FeatureTensor.h"
#include <iostream>
// using namespace tensorflow;

// #define TENSORFLOW_MODEL_META "111.meta"
// #define TENSORFLOW_MODEL "mars-small128.ckpt-68577"

FeatureTensor *FeatureTensor::instance = NULL;

FeatureTensor *FeatureTensor::getInstance() {
	if(instance == NULL) {
		instance = new FeatureTensor();
	}
	return instance;
}



FeatureTensor::FeatureTensor(){
	//prepare model:
	// bool status = init(model_path);
	// if(status == false)
	//   {
	//     std::cout<<"init failed"<<std::endl;
	//   exit(1);
	//   }
	// else {
	//     std::cout<<"init succeed"<<std::endl;
	//   }
}

FeatureTensor::~FeatureTensor() {
	// session->Close();
	// delete session;
	// output_tensors.clear();
	// outnames.clear();
	for(auto& name: input_node_name) {
		free(const_cast<char*>(name.data()));
	}
	for(auto& name: output_node_name) {
		free(const_cast<char*>(name.data()));
	}
	if(instance) {
		delete instance;
		instance = NULL;
	}
}

FeatureTensor::preprocess_info FeatureTensor::preprocess(const cv::Mat& image){
	int w = image.cols;
	int h = image.rows;
	int _max = std::max(h,w);
	cv::Mat resized_image = cv::Mat::zeros(cv::Size(_max, _max), image.type());
	cv::Rect roi(0,0,w,h);
	image.copyTo(resized_image(roi));

	// Fix bug, boxes consistency!
    float x_factor = image.cols / static_cast<float>(input_model_width);
    float y_factor = image.rows / static_cast<float>(input_model_height);

    cv::Mat blob = cv::dnn::blobFromImage(resized_image, 1 / 255.0, cv::Size(input_model_width, input_model_height), cv::Scalar(0, 0, 0), true, false);
	CV_Assert(blob.type() == CV_32F);
    size_t tpixels = input_model_height * input_model_width * 3;
    std::array<int64_t, 4> input_shape_info{ 1, 3, input_model_height, input_model_width };
	return { blob, tpixels, input_shape_info, x_factor, y_factor };  	
}

bool FeatureTensor::init(const std::string &model_path) {
	Ort::Env env(ORT_LOGGING_LEVEL_ERROR, "FeatureTensor");
	
	session_options.SetIntraOpNumThreads(0);
	session_options.SetGraphOptimizationLevel(GraphOptimizationLevel::ORT_ENABLE_EXTENDED);

	session = std::make_unique<Ort::Session>(env, model_path.c_str(), session_options);

	Ort::AllocatorWithDefaultOptions allocator;


    size_t num_input_nodes = session->GetInputCount();
    input_node_name.resize(num_input_nodes);
    for (size_t i = 0; i < num_input_nodes; i++) {
		Ort::AllocatedStringPtr input_name = session->GetInputNameAllocated(i, allocator);
		input_node_name[i] = input_name.get();
    }
    
    // 获取输出名称
    size_t num_output_nodes = session->GetOutputCount();
    output_node_name.resize(num_output_nodes);
    for (size_t i = 0; i < num_output_nodes; i++) {
        Ort::AllocatedStringPtr output_name = session->GetOutputNameAllocated(i, allocator);
        output_node_name[i] = output_name.get();
    }
    
    std::cout << "ONNX ResNet50 feature extractor initialized." << std::endl;

	// tensorflow::SessionOptions sessOptions;
	// sessOptions.config.mutable_gpu_options()->set_allow_growth(true);
	// session = NewSession(sessOptions);
	// if(session == nullptr) return false;

	// const tensorflow::string pathToGraph = TENSORFLOW_MODEL_META;
	// Status status;
	// MetaGraphDef graph_def;
    //tensorflow::GraphDef graph_def;//不同类型的文件不同的定义

	// status = ReadBinaryProto(tensorflow::Env::Default(), pathToGraph, &graph_def);
	// if(status.ok() == false) return false;

	// status = session->Create(graph_def.graph_def());
	// if(status.ok() == false) return false;
	// std::vector<std::string> node_names;
	// for (const auto &node : graph_def.graph_def().node()) {
	// 	printf("node name:%s\n", node.name().c_str());

	// }
	// const tensorflow::string checkpointPath = TENSORFLOW_MODEL;
	// Tensor checkpointTensor(DT_STRING, TensorShape());
    // checkpointTensor.scalar<tensorflow::tstring>()() = checkpointPath;

	// status = session->Run(
	// 		{ {graph_def.saver_def().filename_tensor_name(), checkpointTensor}, },
	// 		{}, {graph_def.saver_def().restore_op_name()}, nullptr );
	// if(status.ok() == false) return false;

	// input_layer = "Placeholder:0";
	// outnames.push_back("truediv:0");
    // feature_dim = 128;



	return true;
}

bool FeatureTensor::getRectsFeature(const cv::Mat& img, DETECTIONS& d) {
	
	preprocess_info pre_info = preprocess(img);
	std::vector<int64_t> input_shape = {1, 3, input_model_height, input_model_width};
	auto memory_info = Ort::MemoryInfo::CreateCpu(OrtArenaAllocator, OrtMemTypeDefault);

	cv::Mat& blob = pre_info.resized_image; // 获取blob
	float* blob_data = blob.ptr<float>(); // 获取指向浮点数据的指针

	Ort::Value input_tensor = Ort::Value::CreateTensor<float>(memory_info, blob_data,
	 pre_info.tpixels, input_shape.data(), input_shape.size());

	const std::array<const char*, 1> input_names = {input_node_name[0].c_str()};
	const std::array<const char*, 1> output_names = {output_node_name[0].c_str()};
	
	
	auto output_tensors = session->Run(
		Ort::RunOptions{nullptr},
		input_names.data(), &input_tensor, 1,
		output_names.data(), output_node_name.size()
	);

	float* output_data = output_tensors[0].GetTensorMutableData<float>();
	size_t feature_size = output_tensors[0].GetTensorTypeAndShapeInfo().GetElementCount();

	// std::vector<cv::Mat> mats;
	// for(DETECTION_ROW& dbox : d) {
	// 	cv::Rect rc = cv::Rect(int(dbox.tlwh(0)), int(dbox.tlwh(1)),
	// 			int(dbox.tlwh(2)), int(dbox.tlwh(3)));
	// 	rc.x -= (rc.height * 0.5 - rc.width) * 0.5;
	// 	rc.width = rc.height * 0.5;
	// 	rc.x = (rc.x >= 0 ? rc.x : 0);
	// 	rc.y = (rc.y >= 0 ? rc.y : 0);
	// 	rc.width = (rc.x + rc.width <= img.cols? rc.width: (img.cols-rc.x));
	// 	rc.height = (rc.y + rc.height <= img.rows? rc.height:(img.rows - rc.y));

	// 	cv::Mat mattmp = img(rc).clone();
	// 	//cv::Mat mattmp = img.clone();
	// 	cv::resize(mattmp, mattmp, cv::Size(64, 128));
	// 	mats.push_back(mattmp);
	// }
	// int count = mats.size();

	// Tensor input_tensor(DT_UINT8, TensorShape({count, 128, 64, 3}));
	// tobuffer(mats, input_tensor.flat<uint8>().data());
	// std::vector<std::pair<tensorflow::string, Tensor>> feed_dict = {
	// 		{input_layer, input_tensor},
	// };

	// Status status = session->Run(feed_dict, outnames, {}, &output_tensors);
	// if(status.ok() == false)
	//   return false;
	// float* tensor_buffer = output_tensors[0].flat<float>().data();
	int i = 0;
	for(DETECTION_ROW& dbox : d) {
		for(int j = 0; j < feature_dim; j++)
			dbox.feature[j] = output_data[i*feature_dim+j];
		i++;
	}
	return true;
}

void FeatureTensor::tobuffer(const std::vector<cv::Mat> &imgs, uint8 *buf) {
	int pos = 0;
	for(const cv::Mat& img : imgs) {
		int Lenth = img.rows * img.cols * 3;
		int nr = img.rows;
		int nc = img.cols;
		if(img.isContinuous()) {
			nr = 1;
			nc = Lenth;
		}
		for(int i = 0; i < nr; i++) {
			const uchar* inData = img.ptr<uchar>(i);
			for(int j = 0; j < nc; j++) {
				buf[pos] = *inData++;
				pos++;
			}
		}//end for
	}//end imgs;
}
void FeatureTensor::test()
{
return;
}
