//
// Created by syshen on 2021/4/17.
//

#include <algorithm>

#include "retinaface.hpp"

#define syshen_min(a,b) ((a)<(b) ? (a) : (b))
#define syshen_max(a,b) ((a)>(b) ? (a) : (b))

bool cmp(landmark a, landmark b) {
    if (a.s > b.s)
        return true;
    return false;
}

void nms(std::vector<landmark> &input_boxes, float nms_thresh)
{
    std::vector<float>vArea(input_boxes.size());
    for (int i = 0; i < int(input_boxes.size()); ++i)
    {
        vArea[i] = (input_boxes.at(i).x2 - input_boxes.at(i).x1 + 1)
                   * (input_boxes.at(i).y2 - input_boxes.at(i).y1 + 1);
    }
    for (int i = 0; i < int(input_boxes.size()); ++i) {
        for (int j = i + 1; j < int(input_boxes.size());) {
            float xx1 = std::max(input_boxes[i].x1, input_boxes[j].x1);
            float yy1 = std::max(input_boxes[i].y1, input_boxes[j].y1);
            float xx2 = std::min(input_boxes[i].x2, input_boxes[j].x2);
            float yy2 = std::min(input_boxes[i].y2, input_boxes[j].y2);
            float w = std::max(float(0), xx2 - xx1 + 1);
            float h = std::max(float(0), yy2 - yy1 + 1);
            float inter = w * h;
            float ovr = inter / (vArea[i] + vArea[j] - inter);
            if (ovr >= nms_thresh) {
                input_boxes.erase(input_boxes.begin() + j);
                vArea.erase(vArea.begin() + j);
            }
            else {
                j++;
            }
        }
    }
}

cv::Mat preprocess_img(cv::Mat& img, int input_h, int input_w) {
    int w, h, x, y;
    float r_w = input_w / (img.cols*1.0f);
    float r_h = input_h / (img.rows*1.0f);
    if (r_h > r_w) {
        w = input_w;
        h = int(r_w * static_cast<float>(img.rows));
        x = 0;
        y = (input_h - h) / 2;
    }
    else {
        w = int(r_h * img.cols);
        h = input_h;
        x = (input_w - w) / 2;
        y = 0;
    }
    cv::Mat re(h, w, CV_8UC3);
    cv::resize(img, re, re.size(), 0, 0, cv::INTER_CUBIC);
    cv::Mat out(input_h, input_w, CV_8UC3, cv::Scalar(128, 128, 128));
    re.copyTo(out(cv::Rect(x, y, re.cols, re.rows)));

    return out;
}

Retinaface::Retinaface(
	std::string mnn_path, int img_size, float threshold, float nms, int num_thread, bool retinaface) :
	_threshold(threshold),
	_nms(nms),
	_num_thread(num_thread),
	img_size(img_size),
	_mean_val{ 104.f, 117.f, 123.f },
	_retinaface(retinaface) {
	det_interpreter = std::shared_ptr<MNN::Interpreter>(MNN::Interpreter::createFromFile(mnn_path.c_str()));
	MNN::ScheduleConfig config;
	config.backupType = MNN_FORWARD_VULKAN;
	config.numThread = _num_thread;
	MNN::BackendConfig backendConfig;
	//backendConfig.precision = (MNN::BackendConfig::PrecisionMode) 2;
	config.backendConfig = &backendConfig;

    det_session = det_interpreter->createSession(config);

	input_tensor = det_interpreter->getSessionInput(det_session, nullptr);
	if (_retinaface)
		create_anchor_retinaface(anchors, img_size, img_size);
	else
		create_anchor(anchors, img_size, img_size);

    det_interpreter->resizeTensor(input_tensor, { 1, 3, img_size, img_size });
    det_interpreter->resizeSession(det_session);
	pretreat = std::shared_ptr<MNN::CV::ImageProcess>(
		MNN::CV::ImageProcess::create(MNN::CV::BGR, MNN::CV::BGR, _mean_val, 3, _norm_vals, 3));
	
}

void Retinaface::compose_results(
	cv::Mat img, MNN::Tensor *scores, MNN::Tensor *boxes, MNN::Tensor *landmarks, std::vector<landmark> &results) {
	// #pragma omp parallel for num_threads(2)
	//std::vector<landmark> total_box;
	float *sptr = scores->host<float>();
	float *bptr = boxes->host<float>();
	float *ldptr = landmarks->host<float>();
	for (int i = 0; i < anchors.size(); ++i)
	{
		float score = *(sptr + 1);
		if (score > _threshold)
		{
			rect_box anchor = anchors[i];
			//box tmp1;
			landmark result;

			float x_center = anchor.cx + bptr[0] * 0.1f * anchor.sx;
			float y_center = anchor.cy + bptr[1] * 0.1f * anchor.sy;
			float w = anchor.sx * exp(bptr[2] * 0.2f);
			float h = anchor.sy * exp(bptr[3] * 0.2f);

			result.x1 = (x_center - w / 2) * img_size;
			result.x1 = result.x1 > 0 ? result.x1 : 0;

			result.y1 = (y_center - h / 2) * img_size;
			result.y1 = result.y1 > 0 ? result.y1 : 0;

			result.x2 = (x_center + w / 2) * img_size;
			result.x2 = result.x2 <= img_size - 1 ? result.x2 : img_size - 1;

			result.y2 = (y_center + h / 2) * img_size;
			result.y2 = result.y2 <= img_size - 1 ? result.y2 : img_size - 1;

			result.s = score;

			// landmark

			for (int j = 0; j < 5; ++j) {
				result.point[j]._x = (anchor.cx + *(ldptr + (j << 1)) * 0.1f * anchor.sx) * img_size;
				result.point[j]._y = (anchor.cy + *(ldptr + (j << 1) + 1) * 0.1f * anchor.sy) * img_size;
			}

			{
				int x1, x2, y1, y2;
				float r_w = float(img_size / (img.cols * 1.0));
				float r_h = float(img_size / (img.rows * 1.0));
				if (r_h > r_w) {
					float pad = float(img_size - r_w * img.rows) / 2;
					x1 = int(result.x1);
					x2 = int(result.x2);
					y1 = int(result.y1 - pad);
					y2 = int(result.y2 - pad);
					x1 = int(x1 / r_w);
					x2 = int(x2 / r_w);
					y1 = int(y1 / r_w);
					y2 = int(y2 / r_w);
					for (int index = 0; index < 5; index++) {
						result.point[index]._y -= pad;
						result.point[index]._y /= r_w;
						result.point[index]._x /= r_w;
					}
				}
				else {
					float pad = (img_size - r_h * img.cols) / 2;
					x1 = int(result.x1 - pad);
					x2 = int(result.x2 - pad);
					y1 = int(result.y1);
					y2 = int(result.y2);
					x1 = int(x1 / r_h);
					x2 = int(x2 / r_h);
					y1 = int(y1 / r_h);
					y2 = int(y2 / r_h);
					for (int index = 0; index < 5; index++) {
						result.point[index]._x -= pad;
						result.point[index]._x /= r_h;
						result.point[index]._y /= r_h;
					}
				}
				result.x1 = float(x1);
				result.x2 = float(x2);
				result.y1 = float(y1);
				result.y2 = float(y2);
			}

			results.push_back(result);
		}
		sptr += 2;
		bptr += 4;
		ldptr += 10;
	}

	std::sort(results.begin(), results.end(), cmp);
	
	nms(results, _nms);
}


int Retinaface::face_detect(cv::Mat& img, std::vector<landmark>& boxes)
{
    if (img.empty()) {
        std::cout << "image is empty ,please check!" << std::endl;
        return -1;
    }

    cv::Mat image = preprocess_img(img, img_size, img_size);

    pretreat->convert(image.data, img_size, img_size, int(image.step[0]), input_tensor);

    // auto start = chrono::steady_clock::now();

    // run network
    det_interpreter->runSession(det_session);

    // get output data

    std::string output[3] = { "scores", "boxes","landmarks" };
    MNN::Tensor *tensor_scores = det_interpreter->getSessionOutput(det_session, output[0].c_str());
    MNN::Tensor *tensor_boxes = det_interpreter->getSessionOutput(det_session, output[1].c_str());
    MNN::Tensor *tensor_ldmarks = det_interpreter->getSessionOutput(det_session, output[2].c_str());

    MNN::Tensor tensor_scores_host(tensor_scores, tensor_scores->getDimensionType());
    MNN::Tensor tensor_boxes_host(tensor_boxes, tensor_boxes->getDimensionType());
    MNN::Tensor tensor_ldmarks_host(tensor_ldmarks, tensor_ldmarks->getDimensionType());

    tensor_scores->copyToHostTensor(&tensor_scores_host);
    tensor_boxes->copyToHostTensor(&tensor_boxes_host);
    tensor_ldmarks->copyToHostTensor(&tensor_ldmarks_host);

    compose_results(img, tensor_scores, tensor_boxes, tensor_ldmarks, boxes);

    if (boxes.size() <= 0)
        return -1;

    landmark box;
    float max_score = 0.0f;
    for (auto box_: boxes)
    {
        if (box_.s > max_score)
        {
            box = box_;
        }
    }
    boxes.clear();
    boxes.emplace_back(box);

    return 0;
}

int Retinaface::face_detect(cv::Mat& img, landmark &face)
{
    if (img.empty()) {
        std::cout << "image is empty ,please check!" << std::endl;
        return -1;
    }

    cv::Mat image = preprocess_img(img, img_size, img_size);

    pretreat->convert(image.data, img_size, img_size, int(image.step[0]), input_tensor);

    // auto start = chrono::steady_clock::now();

    // run network
    det_interpreter->runSession(det_session);

    // get output data

    std::string output[3] = { "scores", "boxes","landmarks" };
    MNN::Tensor *tensor_scores = det_interpreter->getSessionOutput(det_session, output[0].c_str());
    MNN::Tensor *tensor_boxes = det_interpreter->getSessionOutput(det_session, output[1].c_str());
    MNN::Tensor *tensor_ldmarks = det_interpreter->getSessionOutput(det_session, output[2].c_str());

    MNN::Tensor tensor_scores_host(tensor_scores, tensor_scores->getDimensionType());
    MNN::Tensor tensor_boxes_host(tensor_boxes, tensor_boxes->getDimensionType());
    MNN::Tensor tensor_ldmarks_host(tensor_ldmarks, tensor_ldmarks->getDimensionType());

    tensor_scores->copyToHostTensor(&tensor_scores_host);
    tensor_boxes->copyToHostTensor(&tensor_boxes_host);
    tensor_ldmarks->copyToHostTensor(&tensor_ldmarks_host);
    std::vector<landmark> boxes;
    compose_results(img, tensor_scores, tensor_boxes, tensor_ldmarks, boxes);

    if (boxes.size() > 0) {
        face = boxes[0];
        return 0;
    }
    else
        return -1;

}

Retinaface::~Retinaface() {
    det_interpreter->releaseModel();
    det_interpreter->releaseSession(det_session);
}

void Retinaface::create_anchor(std::vector<rect_box> &anchor, int w, int h)
{
	//    anchor.reserve(num_boxes);
	anchor.clear();
	std::vector<std::vector<int> > feature_map(4), min_sizes(4);
	float steps[] = { 8, 16, 32, 64 };
	for (int i = 0; i < feature_map.size(); ++i) {
		feature_map[i].push_back(int(ceil(h / steps[i])));
		feature_map[i].push_back(int(ceil(w / steps[i])));
	}
	std::vector<int> minsize1 = { 10, 16, 24 };
	min_sizes[0] = minsize1;
	std::vector<int> minsize2 = { 32, 48 };
	min_sizes[1] = minsize2;
	std::vector<int> minsize3 = { 64, 96 };
	min_sizes[2] = minsize3;
	std::vector<int> minsize4 = { 128, 192, 256 };
	min_sizes[3] = minsize4;


	for (int k = 0; k < feature_map.size(); ++k)
	{
		std::vector<int> min_size = min_sizes[k];
		for (int i = 0; i < feature_map[k][0]; ++i)
		{
			for (int j = 0; j < feature_map[k][1]; ++j)
			{
				for (int l = 0; l < min_size.size(); ++l)
				{
					float s_kx = min_size[l] * 1.0f / w;
					float s_ky = min_size[l] * 1.0f / h;
					float cx = (j + 0.5f) * steps[k] / w;
					float cy = (i + 0.5f) * steps[k] / h;
					rect_box axil = { cx, cy, s_kx, s_ky };
					anchor.push_back(axil);
				}
			}
		}

	}

}

void Retinaface::create_anchor_retinaface(std::vector<rect_box> &anchor, int w, int h)
{
	//    anchor.reserve(num_boxes);
	anchor.clear();
	std::vector<std::vector<int> > feature_map(3), min_sizes(3);
	float steps[] = { 8, 16, 32 };
	for (int i = 0; i < feature_map.size(); ++i) {
		feature_map[i].push_back(int(ceil(h / steps[i])));
		feature_map[i].push_back(int(ceil(w / steps[i])));
	}
	std::vector<int> minsize1 = { 10, 20 };
	min_sizes[0] = minsize1;
	std::vector<int> minsize2 = { 32, 64 };
	min_sizes[1] = minsize2;
	std::vector<int> minsize3 = { 128, 256 };
	min_sizes[2] = minsize3;

	for (int k = 0; k < feature_map.size(); ++k)
	{
		std::vector<int> min_size = min_sizes[k];
		for (int i = 0; i < feature_map[k][0]; ++i)
		{
			for (int j = 0; j < feature_map[k][1]; ++j)
			{
				for (int l = 0; l < min_size.size(); ++l)
				{
					float s_kx = min_size[l] * 1.0f / w;
					float s_ky = min_size[l] * 1.0f / h;
					float cx = (j + 0.5f) * steps[k] / w;
					float cy = (i + 0.5f) * steps[k] / h;
					rect_box axil = { cx, cy, s_kx, s_ky };
					anchor.push_back(axil);
				}
			}
		}

	}

}

FaceLandMark::FaceLandMark(std::string mnn_path, int img_size, int num_thread):
        img_size(img_size), _num_thread(num_thread)
{
    landmark_interpreter = std::shared_ptr<MNN::Interpreter>(MNN::Interpreter::createFromFile(mnn_path.c_str()));
    MNN::ScheduleConfig config;
    config.backupType = MNN_FORWARD_VULKAN;
    config.numThread = _num_thread;
    MNN::BackendConfig backendConfig;
    //backendConfig.precision = (MNN::BackendConfig::PrecisionMode) 2;
    config.backendConfig = &backendConfig;

    landmark_session = landmark_interpreter->createSession(config);

    input_tensor = landmark_interpreter->getSessionInput(landmark_session, nullptr);

    landmark_interpreter->resizeTensor(input_tensor, { 1, 1, img_size, img_size });
    landmark_interpreter->resizeSession(landmark_session);

    pretreat = std::shared_ptr<MNN::CV::ImageProcess>(
            MNN::CV::ImageProcess::create(MNN::CV::GRAY, MNN::CV::GRAY, \
            _mean_val, 1, _norm_vals, 1));
}

void FaceLandMark::landmark_detect(cv::Mat& img, cv::Rect &rect, std::vector<cv::Point>& left_corner, \
                                    std::vector<cv::Point>& right_corner)
{
    if (img.empty()) {
        std::cout << "image is empty ,please check!" << std::endl;
        return;
    }

    int bbox_width = rect.width;
    int bbox_height = rect.height;

    cv::Mat image;
    int add = int(syshen_max(bbox_width, bbox_height));
    copyMakeBorder(img, image, add, add, add, add,cv::BORDER_CONSTANT, \
                    cv::Scalar(127.0f, 127.0f, 127.0f));

    float face_width = (1.0f + 2.0f * 0.1f) * (bbox_width);
    float face_height = (1.0f + 2.0f * 0.2f) * (bbox_height);
    float center_x = rect.x + add + bbox_width / 2;
    float center_y = rect.y + add + bbox_height / 2;

    int x0 = int(center_x - face_width / 2 + 0.5f);
    int y0 = int(center_y - face_height / 2 + 0.5f);
    int x1 = int(center_x + face_width / 2 + 0.5f);
    int y1 = int(center_y + face_height / 2 + 0.5f);

    cv::Rect rect_(x0, y0, int(x1 - x0), int(y1 - y0));
    cv::Mat crop_image;
    image(rect_).copyTo(crop_image);

    int h = crop_image.rows;
    int w = crop_image.cols;
    int detail[5] = {h, w, y0, x0, add};

    cv::resize(crop_image, crop_image, cv::Size(img_size, img_size));
    cv::cvtColor(crop_image, crop_image, cv::COLOR_RGB2GRAY);
    pretreat->convert(crop_image.data, img_size, img_size, int(crop_image.step[0]), input_tensor);

    landmark_interpreter->runSession(landmark_session);

    std::string output[1] = { "output_0" };
    MNN::Tensor *tensor_points = landmark_interpreter->getSessionOutput(landmark_session, output[0].c_str());
    MNN::Tensor tensor_scores_host(tensor_points, tensor_points->getDimensionType());
    tensor_points->copyToHostTensor(&tensor_scores_host);

//    int points_list[5] = {400, 450, 500, 550, 600};
    float *ptr = tensor_points->host<float>();
    for (size_t i = 0; i < (1946 >> 1); ++i) //int(1946 / 2)
    {
        //if (i <= 400 || i > 600)
        if (i > 530 && i < 600) // right eye
        {
            size_t index = i;

            float x = ptr[2 * index];
            float y = ptr[2 * index + 1];
            x = x * detail[1] + detail[3] - detail[4];
            y = y * detail[0] + detail[2] - detail[4];

            cv::Point point;
            point.y = int(y + 0.5f);
            point.x = int(x + 0.5f);
            right_corner.emplace_back(point);
        }
        else if (i > 400 && i < 470) // left eye
        {
            size_t index = i;

            float x = ptr[2 * index];
            float y = ptr[2 * index + 1];
            x = x * detail[1] + detail[3] - detail[4];
            y = y * detail[0] + detail[2] - detail[4];

            cv::Point point;
            point.y = int(y + 0.5f);
            point.x = int(x + 0.5f);
            left_corner.emplace_back(point);
        }
    }
}

FaceLandMark::~FaceLandMark()
{
    landmark_interpreter->releaseModel();
    landmark_interpreter->releaseSession(landmark_session);
}

EyeSegmentation::EyeSegmentation(std::string mnn_path, int img_size, int num_thread):
        img_size(img_size), _num_thread(num_thread)
{
    interpreter = std::shared_ptr<MNN::Interpreter>(MNN::Interpreter::createFromFile(mnn_path.c_str()));
    MNN::ScheduleConfig config;
    config.backupType = MNN_FORWARD_CPU;//MNN_FORWARD_VULKAN;
    config.numThread = _num_thread;
    MNN::BackendConfig backendConfig;
    //backendConfig.precision = (MNN::BackendConfig::PrecisionMode) 2;
    config.backendConfig = &backendConfig;

    session = interpreter->createSession(config);

    input_tensor = interpreter->getSessionInput(session, nullptr);
    interpreter->resizeTensor(input_tensor, { 1, 3, 32, 96 });
    interpreter->resizeSession(session);

    pretreat = std::shared_ptr<MNN::CV::ImageProcess>(
            MNN::CV::ImageProcess::create(MNN::CV::RGB, MNN::CV::RGB, \
            _mean_val, 3, _norm_vals, 3));
}

cv::Mat EyeSegmentation::eye_segment(const cv::Mat& img, const cv::Rect &rect, cv::Point &centroid)
{
    if (img.empty())
    {
        std::cout << "image is empty ,please check!" << std::endl;
        return img;
    }
    cv::Mat crop_image;
    img(rect).copyTo(crop_image);

    int height_origin = crop_image.rows;
    int width_origin = crop_image.cols;

    int height = 32;
    int width = 96;

    cv::resize(crop_image, crop_image, cv::Size(width, height));//w, h
    cv::cvtColor(crop_image, crop_image, cv::COLOR_BGR2RGB);

    pretreat->convert(crop_image.data, width, height, int(crop_image.step[0]), input_tensor);//w, h

    interpreter->runSession(session);

    std::string output[1] = { "output_0" };
    MNN::Tensor *tensor_points = interpreter->getSessionOutput(session, output[0].c_str());
    MNN::Tensor tensor_scores_host(tensor_points, tensor_points->getDimensionType());
    tensor_points->copyToHostTensor(&tensor_scores_host);

    cv::Mat mask(height, width, CV_8UC1); //h, w
    float *ptr = tensor_points->host<float>();
    for (size_t row = 0; row < height; row++)
    {
        for (size_t col = 0; col < width; col++)
        {
            size_t shift = row * width + col;
            float back_ = expf(ptr[(shift << 1) + 0]);
            float fore_ = expf(ptr[(shift << 1) + 1]);
            float back = back_ / (back_ + fore_);
            float fore = fore_ / (back_ + fore_);

            mask.data[shift] = fore > back ? (fore > 0.7 ? 255 : 0) : 0;// fore > back ? 255 : 0;
        }
    }
    cv::resize(mask, mask, cv::Size(width_origin, height_origin));
    //cv::imwrite("D:\\code\\github\\face\\eyeLike\\test.png", mask);
    cv::Mat kernel = getStructuringElement(cv::MORPH_RECT, cv::Size(5, 5));
    cv::erode(mask, mask, kernel);

    std::vector<std::vector<cv::Point>> contours;
    std::vector<cv::Vec4i> hierarchy;
    cv::findContours(mask, contours, hierarchy, cv::RETR_TREE, cv::CHAIN_APPROX_SIMPLE, cv::Point());

    size_t max_idx = 0;
    float max_area = 0.0f;
    for (size_t i = 0; i < contours.size(); i++)
    {
        float area = float(cv::contourArea(contours[i]));
        if (area > max_area)
        {
            max_area = area;
            max_idx = i;
        }
    }

    if (max_area == 0.0f)
    {
        return mask;
    }

    cv::Moments mu = cv::moments(contours[max_idx], false);
    centroid.x = int(mu.m10 / mu.m00 + rect.x + 0.5);
    centroid.y = int(mu.m01 / mu.m00 + rect.y + 0.5);

    return mask;
}

EyeSegmentation::~EyeSegmentation()
{
    interpreter->releaseModel();
    interpreter->releaseSession(session);
}