#include <stdio.h>
#include <stdlib.h>
#include <cstring>
#include <iostream>
#include <fstream>
#include "stereo_depth.h"
#include "fbs.hpp"
#include "jbf.h"
#define CV_CPU_HAS_SUPPORT_NEON 1
#include "opencv2/core/hal/intrin.hpp"
#include <opencv2/opencv.hpp>
#include "color_corr.h"
#include <omp.h>

#ifndef SQR(x)
#define SQR(x) ((x)*(x))
#endif


r3d_depth_image::R3DStereoDepth::R3DStereoDepth() {}

r3d_depth_image::R3DStereoDepth::~R3DStereoDepth() {}


void r3d_depth_image::R3DStereoDepth::Init(const ParamStereo & param) {
	IMAGE_DEPTH_LOGI("INIT BEGIN");
	double tick_freq = cv::getTickFrequency();
	double tick_start = TICK(), tick_stop;
	double tick_sub_start = tick_start, tick_sub_stop;
	
    m_param.copy(param);
	micro_dist_thr = 127; // 40
	macro_dist_thr = 5;

	depth_range_max = 8 * 1000;
	depth_clip_max = 5 * 1000;

	switch (m_param.alg_type) {
	case R3D_IMAGE_DEPTH_ALG_FAST:
		m_param.depth_size = cv::Size(480, 360);
		target_size = cv::Size(816, 612);
		m_param.max_disp = 16;
		margin_x = 36 + m_param.max_disp;
		margin_y = 24;
		break;
	case R3D_IMAGE_DEPTH_ALG_NORMAL:
		m_param.depth_size = cv::Size(640, 480);
		target_size = cv::Size(816, 612);
		m_param.max_disp = 32;
		margin_x = 48 + m_param.max_disp;
		margin_y = 32;
		break;
	case R3D_IMAGE_DEPTH_ALG_BEST:
		m_param.depth_size = cv::Size(800, 600);
		target_size = cv::Size(960, 720);
		m_param.max_disp = 64;
		margin_x = 60 + m_param.max_disp;
		margin_y = 40;
		break;
	}

	trigger_max_disp = 0.75 * m_param.max_disp;
	
	if (m_param.orientation == R3D_IMAGE_ROTATE_COUNTER_CLOCKWISE_90 || 
		m_param.orientation == R3D_IMAGE_ROTATE_CLOCKWISE_90) {
		m_param.full_size = cv::Size(m_param.full_size.height, m_param.full_size.width);
		m_param.crop_size = cv::Size(m_param.crop_size.height, m_param.crop_size.width);
		m_param.depth_size = cv::Size(m_param.depth_size.height, m_param.depth_size.width);
	}
	
	this->min_disp = m_param.min_disp - m_param.delta_disp_per_degree;
	IMAGE_DEPTH_LOGI("\t calibrated min_disp=%6.3f", this->min_disp);
	this->mat_row_align.create(3, 3, CV_32FC1);

	if (m_param.alg_type == R3D_IMAGE_DEPTH_ALG_FAST) this->m_param.win_size = 7;
	else this->m_param.win_size = 9;

	this->m_param.num_feat = 2000;
	this->m_param.max_iter = 300;
	this->m_param.res_level = 0.5;
	this->m_param.num_worst = 3;

    // in case of cropping view on main camera
	int dpth_w = m_param.depth_size.width;
	int dpth_h = m_param.depth_size.height;
	int cali_w = std::fmax(m_param.cali_size.width, 1);
	int cali_h = std::fmax(m_param.cali_size.height, 1);
	int full_w = std::fmax(m_param.full_size.width, 1);
	int full_h = std::fmax(m_param.full_size.height, 1);
	int crop_w = std::fmax(m_param.crop_size.width, 1);
	int crop_h = std::fmax(m_param.crop_size.height, 1);

    crop_cali_size.width = cali_w - (cali_w / (float)full_w) * (full_w - crop_w);
    crop_cali_size.height = cali_h - (cali_h / (float)full_h) * (full_h - crop_h);
	
    down_scale = m_param.depth_size.width / (double)crop_cali_size.width;
	
	double fx1, fx2, fy1, fy2;
	fx1 = m_param.intrinsics.cam_mat_sub.at<double>(0, 0);
	fy1 = m_param.intrinsics.cam_mat_sub.at<double>(1, 1);
	fx2 = m_param.intrinsics.cam_mat_main.at<double>(0, 0);
	fy2 = m_param.intrinsics.cam_mat_main.at<double>(1, 1);
	scale_aux_x = down_scale * fx2 / fx1;
	scale_aux_y = down_scale * fy2 / fy1;

    crop_delta_size.width = down_scale * cali_w - dpth_w;
    crop_delta_size.height = down_scale * cali_h - dpth_h;

	// step 1. scale down main to depth size, 
	// and scale down aux to the same zoom level of main in depth size.
    // update intrinsics for sub camera
    m_param.intrinsics.cam_mat_sub.at<double>(0, 0) *= scale_aux_x;
    m_param.intrinsics.cam_mat_sub.at<double>(0, 2) *= scale_aux_x;
    m_param.intrinsics.cam_mat_sub.at<double>(1, 1) *= scale_aux_y;
    m_param.intrinsics.cam_mat_sub.at<double>(1, 2) *= scale_aux_y;

	// update intrinsics for main camera
    m_param.intrinsics.cam_mat_main.at<double>(0, 0) *= down_scale;
    m_param.intrinsics.cam_mat_main.at<double>(0, 2) = 
		m_param.intrinsics.cam_mat_main.at<double>(0, 2) * down_scale - crop_delta_size.width / 2.0;
    m_param.intrinsics.cam_mat_main.at<double>(1, 1) *= down_scale;
    m_param.intrinsics.cam_mat_main.at<double>(1, 2) = 
		m_param.intrinsics.cam_mat_main.at<double>(1, 2) * down_scale - crop_delta_size.height / 2.0;

    // generate distortion maps
    cv::Mat E = cv::Mat::diag(cv::Mat::ones(cv::Size(1, 3), CV_64F));
    cv::Mat D = cv::Mat::zeros(cv::Size(8, 1), CV_64F);
	
    // normalize both views
	cv::Size aux_size;
	aux_size.width = scale_aux_x * cali_w;
	aux_size.height = scale_aux_y * cali_h;
	try {
		cv::initUndistortRectifyMap(
			m_param.intrinsics.cam_mat_sub,
			m_param.intrinsics.dist_coef_sub,
			E,
			m_param.intrinsics.cam_mat_sub,
			aux_size,
			CV_32F,
			map11, map12);
		cv::initUndistortRectifyMap(
			m_param.intrinsics.cam_mat_main,
			m_param.intrinsics.dist_coef_main,
			E,
			m_param.intrinsics.cam_mat_main,
			m_param.depth_size,
			CV_32F,
			map21, map22);
	}
	catch (...) {
		init_success = false;
		IMAGE_DEPTH_LOGE("Init Failed: illegal calibration data!");
		return;
	}

	tick_sub_stop = TICK();
	IMAGE_DEPTH_LOGI("\t distortion map generation: %.1f ms", (1000 * (tick_sub_stop - tick_sub_start) / tick_freq));
	tick_sub_start = tick_sub_stop;
	
	if (m_param.alg_type != R3D_IMAGE_DEPTH_ALG_FAST) {
		// inverse map for the main camera only
		imap1 = Mat::zeros(m_param.depth_size, CV_32FC1);
		imap2 = Mat::zeros(m_param.depth_size, CV_32FC1);

		tick_sub_stop = TICK();
		IMAGE_DEPTH_LOGI("\t inverse map allocation: %.1f ms", (1000 * (tick_sub_stop - tick_sub_start) / tick_freq));
		tick_sub_start = tick_sub_stop;

		std::vector<cv::Point2f> pts, pts_dst;

		pts.resize(dpth_w * dpth_h);
		pts_dst.resize(dpth_w * dpth_h);

		for (int i = 0; i < dpth_h; i++) {
			for (int j = 0; j < dpth_w; j++) {
				pts[i * dpth_w + j].x = j;
				pts[i * dpth_w + j].y = i;
			}
		}

		tick_sub_stop = TICK();
		IMAGE_DEPTH_LOGI("\t points generation: %.1f ms", (1000 * (tick_sub_stop - tick_sub_start) / tick_freq));
		tick_sub_start = tick_sub_stop;

#define OMP_THREADS 4
#pragma omp parallel for num_threads(OMP_THREADS)
		for (int idx = 0; idx < OMP_THREADS; idx++) {
			std::vector<cv::Point2f> pts_temp(
				pts.begin() +
				idx * dpth_w * dpth_h / OMP_THREADS,
				pts.begin() +
				(idx + 1) * dpth_w * dpth_h / OMP_THREADS);
			std::vector<cv::Point2f> pts_temp2(
				pts_dst.begin() +
				idx * dpth_w * dpth_h / OMP_THREADS,
				pts_dst.begin() +
				(idx + 1) * dpth_w * dpth_h / OMP_THREADS);
			cv::undistortPoints(
				pts_temp,
				pts_temp2,
				m_param.intrinsics.cam_mat_main,
				m_param.intrinsics.dist_coef_main,
				E,
				m_param.intrinsics.cam_mat_main);
			std::copy(
				pts_temp2.begin(),
				pts_temp2.end(),
				pts_dst.begin() +
				idx * dpth_w * dpth_h / OMP_THREADS);
		}

		tick_sub_stop = TICK();
		IMAGE_DEPTH_LOGI("\t points undistortion: %.1f ms", (1000 * (tick_sub_stop - tick_sub_start) / tick_freq));
		tick_sub_start = tick_sub_stop;

		for (int i = 0; i < dpth_h; i++) {
			for (int j = 0; j < dpth_w; j++) {
				imap1.at<float>(i, j) = pts_dst[i * dpth_w + j].x;
				imap2.at<float>(i, j) = pts_dst[i * dpth_w + j].y;
			}
		}

		tick_sub_stop = TICK();
		IMAGE_DEPTH_LOGI("\t inverse map filling: %.1f ms", (1000 * (tick_sub_stop - tick_sub_start) / tick_freq));
		tick_sub_start = tick_sub_stop;

		pts.erase(pts.begin(), pts.end());
		pts_dst.erase(pts_dst.begin(), pts_dst.end());

		tick_sub_stop = TICK();
		IMAGE_DEPTH_LOGI("\t finalizing: %.1f ms", (1000 * (tick_sub_stop - tick_sub_start) / tick_freq));
		tick_sub_start = tick_sub_stop;
	}
	init_success = true;
	tick_stop = TICK();
	IMAGE_DEPTH_LOGI("INIT DONE: %.1f ms", (1000*(tick_stop - tick_start)/tick_freq));
}


void r3d_depth_image::R3DStereoDepth::Preprocess(
        const cv::Mat & im_L_o,
        const cv::Mat & im_R_o
	) {
	IMAGE_DEPTH_LOGI("PREPROCESS BEGIN");
    if(im_L_o.size().width <= 0){
		IMAGE_DEPTH_LOGE("INPUT IMAGES ARE EMPTY!");
		FAIL_EXIT(-1);
    }

	double tick_freq = cv::getTickFrequency();
	double tick_start = TICK(), tick_stop;
	double tick_sub_start = tick_start, tick_sub_stop;

    // in case of cropping view on main camera
    int cali_w = m_param.cali_size.width;
    int cali_h = m_param.cali_size.height;
    int full_w = m_param.full_size.width;
    int full_h = m_param.full_size.height;
    int crop_w = m_param.crop_size.width;
    int crop_h = m_param.crop_size.height;
	int dpth_w = m_param.depth_size.width;
	int dpth_h = m_param.depth_size.height;

    // for main camera
	cv::resize(im_R_o, im_guide, target_size, cv::INTER_LINEAR);
	if (m_param.orientation == R3D_IMAGE_ROTATE_NONE) {
		cv::resize(im_guide, im_R, m_param.depth_size, 0, 0, INTER_LINEAR);
	}
	else if (m_param.orientation == R3D_IMAGE_ROTATE_180) {
		cv::resize(im_guide, im_R, m_param.depth_size, 0, 0, INTER_LINEAR);
		cv::rotate(im_R, im_R, cv::ROTATE_180);
	}
	else if (m_param.orientation == R3D_IMAGE_ROTATE_COUNTER_CLOCKWISE_90) {
		cv::Size tmp_size(m_param.depth_size.height, m_param.depth_size.width);
		cv::resize(im_guide, im_R, tmp_size, 0, 0, INTER_LINEAR);
		cv::rotate(im_R, im_R, cv::ROTATE_90_COUNTERCLOCKWISE);
	}
	else if (m_param.orientation == R3D_IMAGE_ROTATE_CLOCKWISE_90) {
		cv::Size tmp_size(m_param.depth_size.height, m_param.depth_size.width);
		cv::resize(im_guide, im_R, tmp_size, 0, 0, INTER_LINEAR);
		cv::rotate(im_R, im_R, cv::ROTATE_90_CLOCKWISE);
	}

	tick_sub_stop = TICK();
	IMAGE_DEPTH_LOGI("\t MAIN resize and rotate: %.1f ms", (1000 * (tick_sub_stop - tick_sub_start) / tick_freq));
	tick_sub_start = tick_sub_stop;

	cv::remap(im_R, im_R_small, map21, map22, cv::INTER_LINEAR);
	im_R = cv::Mat::zeros(cv::Size(dpth_w + 2 * margin_x, dpth_h + 2 * margin_y), CV_8UC3);
	im_R_small.copyTo(im_R(cv::Rect(margin_x, margin_y, dpth_w, dpth_h)));

	tick_sub_stop = TICK();
	IMAGE_DEPTH_LOGI("\t MAIN remap and pad: %.1f ms", (1000 * (tick_sub_stop - tick_sub_start) / tick_freq));
	tick_sub_start = tick_sub_stop;

    // for aux camera
    cv::Size size_small = cv::Size((int)(scale_aux_x * cali_w), (int)(scale_aux_y * cali_h));
	if (m_param.orientation == R3D_IMAGE_ROTATE_NONE) {
		cv::resize(im_L_o, im_L, size_small);
	}
	else if (m_param.orientation == R3D_IMAGE_ROTATE_180) {
		cv::resize(im_L_o, im_L, size_small, 0, 0, INTER_LINEAR);
		cv::rotate(im_L, im_L, cv::ROTATE_180);
	}
	else if (m_param.orientation == R3D_IMAGE_ROTATE_COUNTER_CLOCKWISE_90) {
		cv::Size tmp_size(size_small.height, size_small.width);
		cv::resize(im_L_o, im_L, tmp_size);
		cv::rotate(im_L, im_L, cv::ROTATE_90_COUNTERCLOCKWISE);
	}
	else if (m_param.orientation == R3D_IMAGE_ROTATE_CLOCKWISE_90) {
		cv::Size tmp_size(size_small.height, size_small.width);
		cv::resize(im_L_o, im_L, tmp_size);
		cv::rotate(im_L, im_L, cv::ROTATE_90_CLOCKWISE);
	}

	tick_sub_stop = TICK();
	IMAGE_DEPTH_LOGI("\t AUX resize and rotate: %.1f ms", (1000 * (tick_sub_stop - tick_sub_start) / tick_freq));
	tick_sub_start = tick_sub_stop;

	cv::remap(im_L, im_L_small, map11, map12, cv::INTER_LINEAR);
	float crop_x, crop_y;
	crop_x = (size_small.width - (dpth_w + 2 * margin_x)) / 2.0;
	crop_y = (size_small.height - (dpth_h + 2 * margin_y)) / 2.0;
	if (crop_x < 0 || crop_y < 0) {
		IMAGE_DEPTH_LOGE("\t cropping exceeding range of aux camera!");
		IMAGE_DEPTH_LOGE("\t crop_x: %6.1f crop_y: %6.1f", crop_x, crop_y);
		cv::Size tmp_size(dpth_w + 2 * margin_x, dpth_h + 2 * margin_y);
		im_L = cv::Mat::zeros(tmp_size, CV_8UC1);
		cv::Rect2f src_roi, dst_roi;
		src_roi.x = std::fmax(0, crop_x);
		src_roi.y = std::fmax(0, crop_y);
		src_roi.width = im_L_small.size().width - 2 * src_roi.x;
		src_roi.height = im_L_small.size().height - 2 * src_roi.y;
		dst_roi.x = std::fmax(0, -crop_x);
		dst_roi.y = std::fmax(0, -crop_y);
		dst_roi.width = src_roi.width;
		dst_roi.height = src_roi.height;
		im_L_small(src_roi).copyTo(im_L(dst_roi));
	}
	else {
		im_L = im_L_small(cv::Rect(
			crop_x,
			crop_y,
			dpth_w + 2 * margin_x,
			dpth_h + 2 * margin_y)).clone();
	}

	tick_sub_stop = TICK();
	IMAGE_DEPTH_LOGI("\t AUX remap and crop/pad: %.1f ms", (1000 * (tick_sub_stop - tick_sub_start) / tick_freq));
	tick_sub_start = tick_sub_stop;

	ShowImg("remap_L", im_L);
	ShowImg("remap_R", im_R);
	
	real_max_disp = m_param.max_disp;
	min_disp = m_param.min_disp - m_param.delta_disp_per_degree;
	
    if (!RowAlign_grid(
		im_L_small,
		im_R, // main
		im_L, // aux
        m_param.num_feat,
        m_param.max_iter,
        m_param.res_level,
        m_param.num_worst,
		margin_x,
		margin_y,
		real_max_disp,
		min_disp, 
		this->mat_row_align)
    ) {
		IMAGE_DEPTH_LOGE("!!! ROW-ALIGNMENT FAILED !!!");
    }
	
	im_R_small = im_R(cv::Rect(margin_x, margin_y, m_param.max_disp + dpth_w, dpth_h)).clone();
	im_L_small = im_L_small(cv::Rect(margin_x, margin_y, m_param.max_disp + dpth_w, dpth_h)).clone();

	tick_sub_stop = TICK();
	IMAGE_DEPTH_LOGI("\t row align and crop: %.1f ms", (1000 * (tick_sub_stop - tick_sub_start) / tick_freq));
	tick_sub_start = tick_sub_stop;

	if (m_param.dump_enabled) DumpImage(im_R_small, m_param.dump_path, "rowalign_r", uid);
	if (m_param.dump_enabled) DumpImage(im_L_small, m_param.dump_path, "rowalign_l", uid);
	
	tick_stop = TICK();
	IMAGE_DEPTH_LOGI("PREPROCESS DONE: %.1f ms", (1000 * (tick_stop - tick_start) / tick_freq));
}

int r3d_depth_image::R3DStereoDepth::ComputeDepthImpl(
        cv::Mat & output,
        const cv::Mat & main_,
        const cv::Mat & aux_
){
	IMAGE_DEPTH_LOGI("\t ComputeDepthImpl BEGIN");

#pragma region initialization
	double tick_freq = cv::getTickFrequency();
	double tick_start = TICK(), tick_stop;
	double tick_sub_start = tick_start;
	double tick_module_start = tick_start;

	if (output.type() != CV_8UC1 || output.size() != main_.size()) output.create(main_.size(), CV_8UC1);
	cv::Mat main_grey, aux_grey;
#pragma endregion

	// start the module : calc-min-max-disp
#pragma region calculate min disparity
	// check the min-disparity according to bm in low-reso
	float bm_raw_scale = 0.25; // the raw scale of depth image for BM algorithm
	cv::Size bm_raw_size(main_.size().width * bm_raw_scale, main_.size().height * bm_raw_scale);
	int bm_max_disp = std::max(32, (int(m_param.max_disp * bm_raw_scale - 1)/16 + 1)*16);
	int bm_block_size = 9;
	cv::Ptr<cv::StereoBM> bm = cv::StereoBM::create(bm_max_disp, bm_block_size);
	bm->setUniquenessRatio(50);//30
	bm->setSpeckleRange(800);
	bm->setSpeckleWindowSize(32);
	cv::Mat main_bm, disp_bm_s16, disp_bm_u8;
	cv::Mat main_bm_gray, aux_bm_gray;

	cv::resize(main_, main_bm, bm_raw_size);
	cv::resize(aux_, aux_bm_gray, bm_raw_size);
	tick_stop = TICK();
	IMAGE_DEPTH_LOGI("\t\t bm resize: %.1f ms", (1000 * (tick_stop - tick_sub_start) / tick_freq));
	tick_sub_start = tick_stop;
	
	cv::cvtColor(main_bm, main_bm_gray, cv::COLOR_BGR2GRAY);
	tick_stop = TICK();
	IMAGE_DEPTH_LOGI("\t\t bm rgb2gray: %.1f ms", (1000 * (tick_stop - tick_sub_start) / tick_freq));
	tick_sub_start = tick_stop;

	bm->compute(main_bm_gray, aux_bm_gray, disp_bm_s16);
	disp_bm_s16.convertTo(disp_bm_u8, CV_8UC1, 255.0 / (bm_max_disp * 16));
	tick_stop = TICK();
	IMAGE_DEPTH_LOGI("\t\t bm compute: %.1f ms", (1000 * (tick_stop - tick_sub_start) / tick_freq));
	tick_sub_start = tick_stop;

	ShowImg("bm-raw", disp_bm_u8);
	if (m_param.dump_enabled) DumpImage(disp_bm_u8, m_param.dump_path, "disp_bm_u8", uid);
	
	// remove white spots
	/*float max_disp_bm = real_max_disp * bm_raw_scale;
	max_disp_bm = std::fmin(max_disp_bm * 1.2 * (256.0 / bm_max_disp) - 1, 255.0);
	cv::Mat mask_ = disp_bm_u8 <= (uint8_t)(max_disp_bm);
	disp_bm_u8 = disp_bm_u8.mul(mask_ / 255);
	ShowImg("bm-raw", disp_bm_u8);
	if (m_param.dump_enabled) DumpImage(disp_bm_u8, m_param.dump_path, "disp_bm_raw", uid);
	tick_stop = TICK();
	IMAGE_DEPTH_LOGI("\t\t bm max clip: %.1f ms", (1000 * (tick_stop - tick_sub_start) / tick_freq));
	tick_sub_start = tick_stop;*/
	
	HistBasedFilter(disp_bm_u8, 0.01, true); // 0.04
	if (m_param.dump_enabled) DumpImage(disp_bm_u8, m_param.dump_path, "disp_bm_hist", uid);
	tick_stop = TICK();
	IMAGE_DEPTH_LOGI("\t\t bm min clip(zero ignored): %.1f ms", (1000 * (tick_stop - tick_sub_start) / tick_freq));
	tick_sub_start = tick_stop;
	
	FillBlackHoles_new(disp_bm_u8, main_bm, false);
	ShowImg("bm-FillBlackHoles_new", disp_bm_u8);
	if (m_param.dump_enabled) DumpImage(disp_bm_u8, m_param.dump_path, "disp_bm_fill_holes", uid);
	tick_stop = TICK();
	IMAGE_DEPTH_LOGI("\t\t bm fill holes: %.1f ms", (1000 * (tick_stop - tick_sub_start) / tick_freq));
	tick_sub_start = tick_stop;

	HistBasedFilter(disp_bm_u8, 0.004, false);
	ShowImg("bm-HistBasedFilter", disp_bm_u8);
	if (m_param.dump_enabled) DumpImage(disp_bm_u8, m_param.dump_path, "disp_bm_hist_2", uid);
	tick_stop = TICK();
	IMAGE_DEPTH_LOGI("\t\t bm min clip: %.1f ms", (1000 * (tick_stop - tick_sub_start) / tick_freq));
	tick_sub_start = tick_stop;
	
	cv::Ptr<cv::ximgproc::DisparityWLSFilter> wls_bm =
		cv::ximgproc::createDisparityWLSFilterGeneric(false);
	wls_bm->setSigmaColor(0.4);//0.4
	wls_bm->setLambda(8000);//8000
	cv::Mat disp_bm_wls;
	wls_bm->filter(disp_bm_u8, disp_bm_u8, disp_bm_wls);
	ShowImg("bm-wls", disp_bm_wls);
	if (m_param.dump_enabled) DumpImage(disp_bm_u8, m_param.dump_path, "disp_bm_wls", uid);
	tick_stop = TICK();
	IMAGE_DEPTH_LOGI("\t\t bm wls: %.1f ms", (1000 * (tick_stop - tick_sub_start) / tick_freq));
	tick_sub_start = tick_stop;

	float bm_min, bm_max;
	cv::Mat t_;
	cv::reduce(disp_bm_wls, t_, 0, cv::REDUCE_MIN);
	cv::reduce(t_, t_, 1, cv::REDUCE_MIN);
	bm_min = (float)(t_.at<uint8_t>(0, 0)) / 255.0F * bm_max_disp / bm_raw_scale - 4; // -4
	IMAGE_DEPTH_LOGI("\t\t bm->min disparity= %6.3f", bm_min);
	
	bm_min = std::fmax(bm_min, 0);
	tick_stop = TICK();
	IMAGE_DEPTH_LOGI("\t\t bm calc min disp: %.1f ms", (1000 * (tick_stop - tick_sub_start) / tick_freq));
	tick_sub_start = tick_stop;

	//check the real maximum disparity
	cv::reduce(disp_bm_wls, t_, 0, cv::REDUCE_MAX);
	cv::reduce(t_, t_, 1, cv::REDUCE_MAX);
	bm_max = (float)(t_.at<uint8_t>(0, 0)) / 255.0F * bm_max_disp / bm_raw_scale + 3;
	//bm_max *= 1.2;
	IMAGE_DEPTH_LOGI("\t\t bm->max disparity= %6.3f", bm_max);

	tick_stop = TICK();
	IMAGE_DEPTH_LOGI("\t\t bm calc max disp: %.1f ms", (1000 * (tick_stop - tick_sub_start) / tick_freq));
	tick_sub_start = tick_stop;
#pragma endregion

	// update maximium disparity in current scene
	real_max_disp -= bm_min;
	bm_max -= bm_min;

	// calc-min-max-disp module finished
	IMAGE_DEPTH_LOGI("\t\t [module] calc-min-max-disp: %.1f ms", (1000 * (tick_stop - tick_module_start) / tick_freq));
	tick_module_start = tick_stop;

	// macro-distance mode
	if (bm_max <= macro_dist_thr) {
		IMAGE_DEPTH_LOGI("\t\t !!! Macro-Distance Shot detected !!!");
		WaitKey(0);
		// directly return with full white depth map to avoid mis-bokeh effect
		output = cv::Mat::zeros(target_size, CV_8UC1) + 255;
		return 1;
	}

	// micro-distance mode
	if (bm_max >= micro_dist_thr) {
		IMAGE_DEPTH_LOGI("\t\t !!! Micro-Distance Shot detected !!!");
		WaitKey(0);
		// directly return with full white depth map since we are unable to handle this case.
		output = cv::Mat::zeros(target_size, CV_8UC1) + 255;
		return 1;
	}

#pragma region shift aux view
	// shift the aux view to enforce min disp to be 0
	cv::Mat aux_shifted = cv::Mat::zeros(aux_.size(), aux_.type());
	cv::Rect roi_dst(bm_min, 0, aux_.size().width - bm_min, aux_.size().height);
	cv::Rect roi_src(0, 0, aux_.size().width - bm_min, aux_.size().height);
	aux_(roi_src).copyTo(aux_shifted(roi_dst));
	tick_stop = TICK();
	IMAGE_DEPTH_LOGI("\t\t shift aux: %.1f ms", (1000 * (tick_stop - tick_sub_start) / tick_freq));
	tick_sub_start = tick_stop;
#pragma endregion

#pragma region sgbm

	cv::Ptr<cv::StereoSGBM> sgbm_ = cv::StereoSGBM::create();
	sgbm_->setBlockSize(m_param.win_size);
	sgbm_->setNumDisparities(m_param.max_disp);
	sgbm_->setMinDisparity(0);
	sgbm_->setPreFilterCap(63);//42
	sgbm_->setP1(24 * m_param.win_size * m_param.win_size);//24
	sgbm_->setP2(96 * m_param.win_size * m_param.win_size);//96
	sgbm_->setUniquenessRatio(30);//30
	sgbm_->setMode(cv::StereoSGBM::MODE_HH4); //MODE_HH4

	// calculate the real maximum disparity
	this->sgbm_max_disp = (std::floorf(bm_max / 16) + 1) * 16;
	sgbm_->setNumDisparities(this->sgbm_max_disp);

	cv::Mat main_resized;
	main_resized = main_;
	aux_grey = aux_shifted;
	
	cv::cvtColor(main_resized, main_grey, cv::COLOR_BGR2GRAY);
	ShowImg("main_grey", main_grey);
	ShowImg("aux_grey", aux_grey);
	tick_stop = TICK();
	IMAGE_DEPTH_LOGI("\t\t resize/- and rgb2gray: %.1f ms", (1000 * (tick_stop - tick_sub_start) / tick_freq));
	tick_sub_start = tick_stop;

	cv::Ptr<cv::CLAHE> clahe = cv::createCLAHE(6.0, cv::Size(11,11));
	cv::Mat main_clahe, aux_clahe;
	clahe->apply(aux_grey, aux_clahe);
	clahe->apply(main_grey, main_clahe);
	ShowImg("main-clahe", main_clahe);
	ShowImg("aux-clahe", aux_clahe);
	tick_stop = TICK();
	IMAGE_DEPTH_LOGI("\t\t clahe: %.1f ms", (1000 * (tick_stop - tick_sub_start) / tick_freq));
	tick_sub_start = tick_stop;

	cv::Mat disp_left;
	sgbm_->compute(main_clahe, aux_clahe, disp_left);
	disp_left.convertTo(output, CV_8UC1, 255.0 / (16.0 * sgbm_->getNumDisparities()), 0);
	tick_stop = TICK();
	IMAGE_DEPTH_LOGI("\t\t SGBM: %.1f ms", (1000 * (tick_stop - tick_sub_start) / tick_freq));
	tick_sub_start = tick_stop;
	ShowImg("depth-sgbm", output);
#pragma endregion


#pragma region preprocess disparity map
	// disparity map preprocess
	cv::Mat img_blur;
	cv::Mat guide_rgb;
	
	output = output(cv::Rect(m_param.max_disp, 0, m_param.depth_size.width, 
		m_param.depth_size.height)).clone();
	ShowImg("raw-depth", output);
	tick_stop = TICK();
	IMAGE_DEPTH_LOGI("\t\t route#2 crop: %.1f ms", (1000 * (tick_stop - tick_sub_start) / tick_freq));
	tick_sub_start = tick_stop;

	// crop the surrounding zone with mis-calibration
	//CropBall(output, 0.05); // 0.05
	//ShowImg("crop-ball", output);
	//tick_stop = TICK();
	//IMAGE_DEPTH_LOGI("\t\t route#2 crop-ball: %.1f ms", (1000 * (tick_stop - tick_sub_start) / tick_freq));
	//tick_sub_start = tick_stop;

	// restrict the disparity value
	uint8_t max_disp_u8 = (uint8_t)std::fmin(bm_max * 1.2 * (256.0 / sgbm_max_disp) - 1, 255);
	cv::threshold(output, output, max_disp_u8, 255, cv::THRESH_TOZERO_INV);
	ShowImg("restricted-depth", output);
	tick_stop = TICK();
	IMAGE_DEPTH_LOGI("\t\t route#2 max clip: %.1f ms", (1000 * (tick_stop - tick_sub_start) / tick_freq));
	tick_sub_start = tick_stop;

	removeWhiteEdge(output);
	ShowImg("removeWhiteEdge", output);
	tick_stop = TICK();
	IMAGE_DEPTH_LOGI("\t\t route#2 clean edges: %.1f ms", (1000 * (tick_stop - tick_sub_start) / tick_freq));
	tick_sub_start = tick_stop;

	removeSpot(output);
	ShowImg("removeSpot", output);
	tick_stop = TICK();
	IMAGE_DEPTH_LOGI("\t\t route#2 remove spots: %.1f ms", (1000 * (tick_stop - tick_sub_start) / tick_freq));
	tick_sub_start = tick_stop;
		
	//// remove invalid disparity region
	//cv::threshold(output, output, 24, 255, cv::THRESH_TOZERO);
	//ShowImg("Remove Low Values", output);
	//tick_stop = TICK();
	//IMAGE_DEPTH_LOGI("\t\t route#2 remove low values: %.1f ms", (1000 * (tick_stop - tick_sub_start) / tick_freq));
	//tick_sub_start = tick_stop;

	HistBasedFilter(output, 0.04, true);
	ShowImg("HistBasedFilter1", output);
	tick_stop = TICK();
	IMAGE_DEPTH_LOGI("\t\t route#2 min clip(zero ignored): %.1f ms", (1000 * (tick_stop - tick_sub_start) / tick_freq));
	tick_sub_start = tick_stop;

	//HistBasedPeakFilter_(output, 0.004, 16);
	//ShowImg("HistBasedPeakFilter_", output);
	//tick_stop = TICK();
	//IMAGE_DEPTH_LOGI("\t\t route#2 peak clip: %.1f ms", (1000 * (tick_stop - tick_sub_start) / tick_freq));
	//tick_sub_start = tick_stop;

	//cv::Mat guide_grey = main_grey(cv::Rect(m_param.max_disp, 0, m_param.depth_size.width, m_param.depth_size.height)).clone();
	//removeTextureless(output, guide_grey);
	//ShowImg("removeTextureless", output);
	//tick_stop = TICK();
	//IMAGE_DEPTH_LOGI("\t\t route#2 remove textureless: %.1f ms", (1000 * (tick_stop - tick_sub_start) / tick_freq));
	//tick_sub_start = tick_stop;

	guide_rgb = main_(cv::Rect(m_param.max_disp, 0, m_param.depth_size.width, m_param.depth_size.height)).clone();
	tick_stop = TICK();
	IMAGE_DEPTH_LOGI("\t\t route#2 crop main: %.1f ms", (1000 * (tick_stop - tick_sub_start) / tick_freq));
	tick_sub_start = tick_stop;

	//cv::medianBlur(guide_rgb, img_blur, 5);
	//tick_stop = TICK();
	//IMAGE_DEPTH_LOGI("\t\t route#2 blur main: %.1f ms", (1000 * (tick_stop - tick_sub_start) / tick_freq));
	//tick_sub_start = tick_stop;

	//FillBlackHoles_new(output, guide_rgb, true);
	FillBlackHolesSmallRegion(output, guide_rgb, 3);

	ShowImg("FillBlackHoles", output);
	tick_stop = TICK();
	IMAGE_DEPTH_LOGI("\t\t route#2 fill holes: %.1f ms", (1000 * (tick_stop - tick_sub_start) / tick_freq));
	tick_sub_start = tick_stop;

	HistBasedFilter(output, 0.004, false);
	ShowImg("HistBasedFilter2", output);
	tick_stop = TICK();
	IMAGE_DEPTH_LOGI("\t\t route#2 min clip: %.1f ms", (1000 * (tick_stop - tick_sub_start) / tick_freq));
	tick_sub_start = tick_stop;

	cv::Mat img = guide_rgb;
	tick_stop = TICK();
	IMAGE_DEPTH_LOGI("\t\t crop main/-: %.1f ms", (1000 * (tick_stop - tick_sub_start) / tick_freq));
	tick_sub_start = tick_stop;
	
	ChangeSaturation(img, 95);
	tick_stop = TICK();
	IMAGE_DEPTH_LOGI("\t\t enhance saturation: %.1f ms", (1000 * (tick_stop - tick_sub_start) / tick_freq));
	tick_sub_start = tick_stop;

	cv::Mat image_cc = img.clone();
	imageColorCorrection(image_cc, img);
	tick_stop = TICK();
	IMAGE_DEPTH_LOGI("\t\t color correction: %.1f ms", (1000 * (tick_stop - tick_sub_start) / tick_freq));
	tick_sub_start = tick_stop;

	cv::medianBlur(img, img, 3); // 5
	tick_stop = TICK();
	IMAGE_DEPTH_LOGI("\t\t blur main: %.1f ms", (1000 * (tick_stop - tick_sub_start) / tick_freq));
	tick_sub_start = tick_stop;
	
	cv::Ptr<cv::ximgproc::DisparityWLSFilter> wls =
		cv::ximgproc::createDisparityWLSFilterGeneric(false);
	wls->setSigmaColor(3);//3
	wls->setLambda(800);//800
	wls->filter(output, img, output);
	ShowImg("wls-small-guide", img);
	ShowImg("wls-small", output);
	tick_stop = TICK();
	IMAGE_DEPTH_LOGI("\t\t wls: %.1f ms", (1000 * (tick_stop - tick_sub_start) / tick_freq));
	tick_sub_start = tick_stop;
#pragma endregion

	IMAGE_DEPTH_LOGI("\t ComputeDepthImpl DONE: %.1f ms", (1000 * (tick_stop - tick_start) / tick_freq));
	return 0;
}


int r3d_depth_image::R3DStereoDepth::ComputeDepth(){
	IMAGE_DEPTH_LOGI("ComputeDepth BEGIN");
	
	double tick_freq = cv::getTickFrequency();
	double tick_start = TICK(), tick_stop;
	double tick_sub_start = tick_start, tick_sub_stop;

	cv::flip(im_L_small, im_L, -1);
    cv::flip(im_R_small, im_R, -1);
	tick_sub_stop = TICK();
	IMAGE_DEPTH_LOGI("\t flip: %.1f ms", (1000 * (tick_sub_stop - tick_sub_start) / tick_freq));
	tick_sub_start = tick_sub_stop;

	if (ComputeDepthImpl(depth_u8, im_R, im_L)) {
		tick_sub_stop = TICK();
		IMAGE_DEPTH_LOGI("\t ComputeDepthImpl: %.1f ms", (1000 * (tick_sub_stop - tick_sub_start) / tick_freq));
		tick_sub_start = tick_sub_stop;

		depth_out = depth_u8;

		tick_stop = TICK();
		IMAGE_DEPTH_LOGI("ComputeDepth DONE: %.1f ms", (1000 * (tick_stop - tick_start) / tick_freq));
		return 1;
	}
	tick_sub_stop = TICK();
	IMAGE_DEPTH_LOGI("\t ComputeDepthImpl: %.1f ms", (1000 * (tick_sub_stop - tick_sub_start) / tick_freq));
	tick_sub_start = tick_sub_stop;

    // transform back to original view
    cv::flip(depth_u8, depth_u8, -1);
	tick_sub_stop = TICK();
	IMAGE_DEPTH_LOGI("\t flip back: %.1f ms", (1000 * (tick_sub_stop - tick_sub_start) / tick_freq));
	tick_sub_start = tick_sub_stop;

	if (m_param.alg_type != R3D_IMAGE_DEPTH_ALG_FAST)
		cv::remap(depth_u8, depth_out, imap1, imap2, INTER_LINEAR);
	else depth_out = depth_u8;
	ShowImg("depth_out", depth_out);
	tick_sub_stop = TICK();
	IMAGE_DEPTH_LOGI("\t remap/-: %.1f ms", (1000 * (tick_sub_stop - tick_sub_start) / tick_freq));
	tick_sub_start = tick_sub_stop;

	if (m_param.dump_enabled) DumpImage(depth_out, m_param.dump_path, "depth_raw_", uid);
	
	tick_stop = TICK();
	IMAGE_DEPTH_LOGI("ComputeDepth DONE: %.1f ms", (1000 * (tick_stop - tick_start) / tick_freq));

	return 0;
};


void r3d_depth_image::R3DStereoDepth::Postprocess() {
	IMAGE_DEPTH_LOGI("Postprocess BEGIN");

	double tick_freq = cv::getTickFrequency();
	double tick_start = TICK(), tick_stop;
	double tick_sub_start = tick_start, tick_sub_stop;

	cv::Ptr<cv::ximgproc::DisparityWLSFilter> wls =
		cv::ximgproc::createDisparityWLSFilterGeneric(false);

#pragma region resize_and_rotate
	cv::Mat im_R_rgb, disp_raw, out_;
	im_R_rgb = im_guide;
	if (m_param.orientation == R3D_IMAGE_ROTATE_NONE) {
		cv::resize(depth_out, disp_raw, target_size, cv::INTER_LINEAR);
	}
	else if (m_param.orientation == R3D_IMAGE_ROTATE_180) {
		cv::rotate(depth_out, disp_raw, cv::ROTATE_180);
		cv::resize(disp_raw, disp_raw, target_size, cv::INTER_LINEAR);
	}
	else if (m_param.orientation == R3D_IMAGE_ROTATE_COUNTER_CLOCKWISE_90) {
		cv::rotate(depth_out, disp_raw, cv::ROTATE_90_CLOCKWISE);
		cv::resize(disp_raw, disp_raw, target_size, cv::INTER_LINEAR);
	}
	else if (m_param.orientation == R3D_IMAGE_ROTATE_CLOCKWISE_90) {
		cv::rotate(depth_out, disp_raw, cv::ROTATE_90_COUNTERCLOCKWISE);
		cv::resize(disp_raw, disp_raw, target_size, cv::INTER_LINEAR);
	}
	ShowImg("rgb", im_R_rgb);
	if (m_param.dump_enabled) DumpImage(im_R_rgb, m_param.dump_path, "r3d_main", uid);
	tick_sub_stop = TICK();
	IMAGE_DEPTH_LOGI("\t resize main and rotate disp: %.1f ms", (1000 * (tick_sub_stop - tick_sub_start) / tick_freq));
	tick_sub_start = tick_sub_stop;
#pragma endregion

#pragma region depth_preprocess
	HistBasedFilter(disp_raw, 0.04, false);
	tick_sub_stop = TICK();
	IMAGE_DEPTH_LOGI("\t min-max clip: %.1f ms", (1000 * (tick_sub_stop - tick_sub_start) / tick_freq));
	tick_sub_start = tick_sub_stop;

	//#define CONVERT_DEPTH_MAP
#ifdef CONVERT_DEPTH_MAP
	// convert from disparity into depth map
	float disp_min_, disp_max_;
	disp_min_ = 1;
	disp_max_ = 255;
	Disparity2Depth(disp_raw, disp_min_, disp_max_);
	ShowImg("depth-raw", disp_raw);
	if (m_param.dump_enabled) DumpImage(disp_raw, m_param.dump_path, "depth_raw", uid);
#endif

#pragma endregion


#define WITH_JBF
#ifdef WITH_JBF
	cv::Mat mask_jbf(disp_raw.size(), CV_8UC1, cv::Scalar(255));
	cv::Mat disp_jbf;

	cv::Mat im_R_rgb_enhanced;
	if (m_param.alg_type == R3D_IMAGE_DEPTH_ALG_FAST) {
		im_R_rgb_enhanced = im_R_rgb;
	}
	else {
		im_R_rgb_enhanced = im_R_rgb.clone();
		imageColorCorrection(im_R_rgb, im_R_rgb_enhanced);
		tick_sub_stop = TICK();
		IMAGE_DEPTH_LOGI("\t exposure correction: %.1f ms", (1000 * (tick_sub_stop - tick_sub_start) / tick_freq));
		tick_sub_start = tick_sub_stop;
	}

	ChangeSaturation(im_R_rgb_enhanced, 95);
	tick_sub_stop = TICK();
	IMAGE_DEPTH_LOGI("\t enhance saturation: %.1f ms", (1000 * (tick_sub_stop - tick_sub_start) / tick_freq));
	tick_sub_start = tick_sub_stop;

	jointBilateralFilter_(im_R_rgb_enhanced, disp_raw, mask_jbf, disp_jbf, 50, 15, 40, BORDER_DEFAULT);//80,15,40
	tick_sub_stop = TICK();
	IMAGE_DEPTH_LOGI("\t jbf: %.1f ms", (1000 * (tick_sub_stop - tick_sub_start) / tick_freq));
	tick_sub_start = tick_sub_stop;

	cv::medianBlur(disp_jbf, disp_jbf, 3);
	tick_sub_stop = TICK();
	IMAGE_DEPTH_LOGI("\t median blur: %.1f ms", (1000 * (tick_sub_stop - tick_sub_start) / tick_freq));
	tick_sub_start = tick_sub_stop;

	ShowImg("jbf", disp_jbf);
	if (m_param.dump_enabled) DumpImage(disp_jbf, m_param.dump_path, "disp_jb", uid);

	wls->setSigmaColor(0.4);//0.4
	wls->setLambda(8000);//8000
	wls->filter(disp_jbf, disp_jbf, out_);
	ShowImg("wls", out_);
	tick_sub_stop = TICK();
	IMAGE_DEPTH_LOGI("\t wls: %.1f ms", (1000 * (tick_sub_stop - tick_sub_start) / tick_freq));
	tick_sub_start = tick_sub_stop;

	if (m_param.alg_type != R3D_IMAGE_DEPTH_ALG_FAST) {
		cv::Mat disp_jbf_f32, gray_norm;
		disp_jbf.convertTo(disp_jbf_f32, CV_32FC1);
		cv::normalize(disp_jbf_f32, gray_norm, 0.0, 255.0, cv::NORM_MINMAX);
		gray_norm = gray_norm / 255.0;
		cv::Mat out_f32;
		out_.convertTo(out_f32, CV_32FC1);
		out_f32 = out_f32.mul(gray_norm) + disp_jbf_f32.mul(1.0 - gray_norm);
		out_f32.convertTo(out_, CV_8UC1);
		ShowImg("merged", out_);
		tick_sub_stop = TICK();
		IMAGE_DEPTH_LOGI("\t merge: %.1f ms", (1000 * (tick_sub_stop - tick_sub_start) / tick_freq));
		tick_sub_start = tick_sub_stop;
	}
#endif


//#define WITH_FBS
#ifdef WITH_FBS
	// fast bilateral filter
	double fbs_spatial = 48.0; // 48
	double fbs_luma = 8.0; // 8
	double fbs_chroma = 8.0; // 8
	double fbs_depth = 32.0; // 32
	double fbs_lambda = 128.0; // 128
	int fbs_itr = 25;
	double fbs_err = 1E-5;
	int fbs_rate = 3;

	double start = TICK();
	double freq = cv::getTickFrequency();
	cv::Mat disp_fbs;
#ifdef WITH_JBF
	out_.copyTo(disp_raw);
#endif
	double stop = TICK();
	IMAGE_DEPTH_LOGI("HistBasedFilter: %.3f ms", 1000 * (stop - start) / freq);
	ShowImg("disp_raw", disp_raw);

	// get a binary mask of processing region using confidence map
	start = TICK();
	cv::Mat edge_mask, process_mask;
	int conf_thres = 255 - 32;
	// get laplacian edges
	cv::Mat conf_map;
	cv::Laplacian(disp_raw, conf_map, -1);
	conf_map = 255 - conf_map;
	ShowImg("conf_map", conf_map);
	edge_mask = conf_map < conf_thres;
	int dilate_radius = 24;
	cv::dilate(edge_mask, process_mask, cv::Mat(),
		cv::Point(-1, -1), dilate_radius);
	stop = TICK();
	IMAGE_DEPTH_LOGI("dilate: %.3f ms", 1000 * (stop - start) / freq);
	ShowImg("edge_mask", edge_mask);
	ShowImg("process_mask", process_mask);

	start = TICK();
	cv::Ptr<FastBilateralSolverFilter_> fbs =
		cv::Ptr<FastBilateralSolverFilter_>(
			FastBilateralSolverFilter_::create(
				im_R_rgb,
				disp_raw,
				fbs_spatial,
				fbs_luma,
				fbs_chroma,
				fbs_depth,
				fbs_lambda,
				fbs_itr,
				fbs_err,
				process_mask,
				fbs_rate,
				edge_mask));
	fbs->filter(disp_raw, conf_map / 255.0, disp_fbs);
	stop = TICK();
	IMAGE_DEPTH_LOGI("FastBilateralSolverFilter: %.3f ms",
		1000 * (stop - start) / freq);
	ShowImg("depth_fbs", disp_fbs);

	// fill unprocessed points
	start = TICK();
	FillBlackHoles_new(disp_fbs, im_R_rgb, true);
	stop = TICK();
	IMAGE_DEPTH_LOGI("Fill: %.3f ms", 1000 * (stop - start) / freq);
	ShowImg("depth_fbs_filled", disp_fbs);

	// check bad foreground expansion
	start = TICK();
	//cv::max(disp_fbs, disp_raw, disp_raw);
	disp_fbs.copyTo(disp_raw);
	stop = TICK();
	IMAGE_DEPTH_LOGI("erase foreground expansion: %.3f ms", 1000 * (stop - start) / freq);
	ShowImg("depth_fbs_blur", disp_raw);

	// use the filtered gray to guide
	start = TICK();
#ifndef WITH_JBF
	cv::Ptr<cv::ximgproc::DisparityWLSFilter> wls =
		cv::ximgproc::createDisparityWLSFilterGeneric(false);
#endif
	wls->setSigmaColor(3);
	wls->setLambda(8000);
	wls->filter(disp_raw, disp_raw, out_);
	stop = TICK();
	IMAGE_DEPTH_LOGI("WLS Filter: %.3f ms", 1000 * (stop - start) / freq);
	ShowImg("depth_fbs_wls", out_);
#endif


#pragma region disparity_refinement
	if (m_param.alg_type != R3D_IMAGE_DEPTH_ALG_FAST) {
		HistBasedFilter(out_, 0.04, false);
		tick_sub_stop = TICK();
		IMAGE_DEPTH_LOGI("\t 2nd min-max clip: %.1f ms", (1000 * (tick_sub_stop - tick_sub_start) / tick_freq));
		tick_sub_start = tick_sub_stop;

		//cv::medianBlur(out_, out_, 3);
		tick_sub_stop = TICK();
		IMAGE_DEPTH_LOGI("\t 2nd median blur: %.1f ms", (1000 * (tick_sub_stop - tick_sub_start) / tick_freq));
		tick_sub_start = tick_sub_stop;
	}
#pragma endregion

	ShowImg("final-disp", out_);
	out_.copyTo(depth_out);

	// convert into depth map
	cv::Mat depth_f32;
	GetDepth(out_, depth_f32);

	cv::Mat vis_depth;
	VisualizeDepth(depth_f32, vis_depth);
	ShowImg("vis_depth", vis_depth);

	// reconstruct the scene in 3D space
	cv::Mat color_map;
	cv::Mat alpha_map = cv::Mat::zeros(im_R_rgb.size(), CV_8UC1) + 255;
	MergeAlphaChannel(im_R_rgb, alpha_map, color_map);
	
	// Get the 3d point cloud with color map
	cv::Mat pt_cloud;
	// to get the corresponding camera matrix
	cv::Mat cmm;
	m_param.intrinsics.cam_mat_main.copyTo(cmm);
	if (m_param.orientation == R3D_IMAGE_ROTATE_NONE ||
		m_param.orientation == R3D_IMAGE_ROTATE_180) {
		double scale_ratio = ((double)target_size.width) / m_param.depth_size.width;
		cmm.at<double>(0, 0) *= scale_ratio;
		cmm.at<double>(1, 1) *= scale_ratio;
		cmm.at<double>(0, 2) *= scale_ratio;
		cmm.at<double>(1, 2) *= scale_ratio;
		if (m_param.orientation == R3D_IMAGE_ROTATE_180) {
			cmm.at<double>(0, 2) = target_size.width - cmm.at<double>(0, 2);
			cmm.at<double>(1, 2) = target_size.height - cmm.at<double>(1, 2);
		}
	}
	else {
		double scale_ratio = ((double)target_size.width) / m_param.depth_size.height;
		cv::Mat cmm_main = m_param.intrinsics.cam_mat_main;
		cmm.at<double>(0, 0) = cmm_main.at<double>(1, 1)*scale_ratio;
		cmm.at<double>(1, 1) = cmm_main.at<double>(0, 0)*scale_ratio;
		cmm.at<double>(0, 2) = cmm_main.at<double>(1, 2)*scale_ratio;
		cmm.at<double>(1, 2) = cmm_main.at<double>(0, 2)*scale_ratio;
		if (m_param.orientation == R3D_IMAGE_ROTATE_CLOCKWISE_90)
			cmm.at<double>(1, 2) = target_size.height - cmm.at<double>(1, 2);
		else
			cmm.at<double>(0, 2) = target_size.width - cmm.at<double>(0, 2);
	}
	
	Get3DPointCloud(color_map, depth_f32, cmm, pt_cloud);

	// visualize cloud point without normal vectors
	cv::Mat vis_cloud;
	cv::Mat depth_map;
	Camera cam (
		cv::Point3f(0, 0, 0),
		cv::Point3f(1.0, 0.0, 0.0),
		cv::Point3f(0.0, 1.0, 0.0),
		cmm,
		im_R_rgb.size() );

	// op#0
	cam.CapturePoints(pt_cloud, vis_cloud, depth_map);
	FillBlackHolesSimple(vis_cloud, depth_map);
	//FillBlackHolesSimple(vis_cloud, depth_map);
#ifdef _WIN32
	cv::Mat vis_cloud_0 = vis_cloud.clone();
#endif

	// op#1
	cam.LookAt(cv::Point3f(0, 0, 300));
	cam.CapturePoints(pt_cloud, vis_cloud, depth_map);
	FillBlackHolesSimple(vis_cloud, depth_map);
	//FillBlackHolesSimple(vis_cloud, depth_map);
#ifdef _WIN32
	cv::Mat vis_cloud_1 = vis_cloud.clone();
#endif
	
	// op#2
	//cam.MoveAhead(30);
	cam.MoveUp(10);
	cam.CapturePoints(pt_cloud, vis_cloud, depth_map);
	FillBlackHolesSimple(vis_cloud, depth_map);
	//FillBlackHolesSimple(vis_cloud, depth_map);
#ifdef _WIN32
	cv::Mat vis_cloud_2 = vis_cloud.clone();
#endif
	
	// op#3
	cam.Yaw(-3);
	cam.CapturePoints(pt_cloud, vis_cloud, depth_map);
	FillBlackHolesSimple(vis_cloud, depth_map);
	//FillBlackHolesSimple(vis_cloud, depth_map);
#ifdef _WIN32
	cv::Mat vis_cloud_3 = vis_cloud.clone();
#endif

	// op#4
	cam.Pitch(5);
	cam.CapturePoints(pt_cloud, vis_cloud, depth_map);
	FillBlackHolesSimple(vis_cloud, depth_map);
	//FillBlackHolesSimple(vis_cloud, depth_map);
#ifdef _WIN32
	cv::Mat vis_cloud_4 = vis_cloud.clone();
#endif

	// Estimate a normal map for the 3d point cloud
	cv::Mat normal_map;
	int sample_rate = 2;
	EstimateNormal(pt_cloud, normal_map, 3, sample_rate, 20, 0.5*7*7);

	// save the point cloud in file (PLY format)
	SavePointCloudAsPLY(pt_cloud, normal_map, "F:/3DRecon/portrait.ply", 1000, true);

	// Reconstruct the 3d model consisting of millions of triangles
	std::vector<Triangle> mesh;
	Recon3DModel(mesh, pt_cloud, normal_map);

	WaitKey(0);
	tick_stop = TICK();
	IMAGE_DEPTH_LOGI("Postprocess DONE: %.1f ms", (1000 * (tick_stop - tick_start) / tick_freq));
}


void r3d_depth_image::R3DStereoDepth::Compute(
	cv::Mat & depth,
	const cv::Mat & im_aux, // gray scale
	const cv::Mat & im_main, // rgb
	const cv::String & uid,
	float fx, // in ratio form, like (0.5, 0.5) being the center of the image
	float fy
	){
	this->uid = uid;
	this->fx = fx;
	this->fy = fy;
    double tick_start, tick_stop;
    double tick_freq = cv::getTickFrequency();
    tick_start = TICK();

	if (!init_success) {
		IMAGE_DEPTH_LOGE("Not available: module uninitialized or initialization failed!");
		depth = cv::Mat::zeros(target_size, CV_8UC1) + 255;
		return;
	}
    
	this->Preprocess(im_aux, im_main);
    tick_stop = TICK();
	IMAGE_DEPTH_LOGI("DEPTH PREPROCESS TIME: %6.3f ms", 1000*(tick_stop - tick_start) / tick_freq);
    tick_start = TICK();
	
	if (this->ComputeDepth()) {
		depth_out.copyTo(depth);
		tick_stop = TICK();
		IMAGE_DEPTH_LOGI("DEPTH COMPUTATION TIME: %6.3f ms", 1000 * (tick_stop - tick_start) / tick_freq);
		tick_start = TICK();
		return;
	};
    tick_stop = TICK();
	IMAGE_DEPTH_LOGI("DEPTH COMPUTATION TIME: %6.3f ms", 1000*(tick_stop - tick_start) / tick_freq);
    tick_start = TICK();
	
	this->Postprocess();
    tick_stop = TICK();
	IMAGE_DEPTH_LOGI("DEPTH POSTPROCESS TIME: %6.3f ms", 1000*(tick_stop - tick_start) / tick_freq);
    depth_out.copyTo(depth);
}


float r3d_depth_image::R3DStereoDepth::GetFocusDistance(
        float focus_x,
        float focus_y) {
    if (focus_x<0 || focus_x>=1 || focus_y<0 || focus_y>=1){
		return -1;
    }
    Mat point_(4, 1, CV_64F);
    float x_, y_, d_;
    int radius = 3; // the radius to search the depth of focus
    float sum_of_depth;
    int begin_x, begin_y, end_x, end_y;

    x_ = (1 - focus_x) * depth_out.size().width;
    y_ = (1 - focus_y) * depth_out.size().height;
    begin_x = max(0, int(x_) - radius);
    begin_y = max(0, int(y_) - radius);
    end_x = min(depth_out.size().width, int(x_) + radius);
    end_y = min(depth_out.size().height, int(y_) + radius);
    sum_of_depth = 0.f;

    for (int i = begin_y; i<end_y; ++i) {
        for (int j = begin_x; j<end_x; ++j) {
            sum_of_depth += depth_out.at<unsigned char>(i, j);
        }
    }

    d_ = sum_of_depth / ((end_x - begin_x) * (end_x - begin_x));
    d_ = d_ * 16 * m_param.max_disp / 255;
    point_.at<double>(0, 0) = x_;
    point_.at<double>(1, 0) = y_;
    point_.at<double>(2, 0) = d_;
    point_.at<double>(3, 0) = 1.0F;
    Mat physic_point = Q * point_;
    float dis = physic_point.at<double>(0, 2) / physic_point.at<double>(0, 3);
    return dis * 16;
}


void r3d_depth_image::R3DStereoDepth::GetDepth(const cv::Mat & i_disp, cv::Mat & o_depth) {
	double base = m_param.extrinsics.trans.at<double>(0, 0);
	double fx = m_param.intrinsics.cam_mat_main.at<double>(0, 0);
	// convert the disparity to physicial depth in millimeter using equation:
	// d = b\times f / D
	cv::Mat disp_f32;
	i_disp.convertTo(disp_f32, CV_32FC1, sgbm_max_disp/255.0);
	o_depth = (base * fx) / disp_f32;
}


void r3d_depth_image::R3DStereoDepth::VisualizeDepth(
	const cv::Mat & depth_f32, 
	cv::Mat & vis_depth
	) {
	// check the inputs
	if (depth_range_max < depth_clip_max) {
		IMAGE_DEPTH_LOGE("[VisualizeDepth] assertion failed: depth_range_max < depth_clip_max!");
		WaitKey(0);
		FAIL_EXIT(EXIT_FAILURE);
	}
	// normalize the depth map to visualize it under gray scale
	cv::Mat mask_ = depth_f32 <= depth_clip_max;
	cv::Mat mask_f32;
	mask_.convertTo(mask_f32, CV_32FC1, 1.0 / depth_range_max);
	mask_f32 = mask_f32.mul(depth_f32);
	mask_f32.convertTo(vis_depth, CV_8UC1);
	mask_ = 255 - mask_;
	mask_.convertTo(mask_, CV_8UC1, depth_clip_max / depth_range_max);
	vis_depth += mask_;
}


void r3d_depth_image::R3DStereoDepth::GetDisparity(
	const cv::Mat & i_depth, // uint8
	cv::Mat & o_disp
	) {
	double base = m_param.extrinsics.trans.at<double>(0, 0);
	double fx = m_param.intrinsics.cam_mat_main.at<double>(0, 0);
	// convert the disparity to physicial depth in millimeter using equation:
	// d = b\times f / D
	cv::Mat depth_f64;
	i_depth.convertTo(depth_f64, CV_64FC1, depth_range_max/255.0);
	depth_f64 = (base * fx) / depth_f64;
	depth_f64.convertTo(o_disp, CV_8UC1, 255.0 / sgbm_max_disp);
}


void r3d_depth_image::R3DStereoDepth::MergeAlphaChannel(
	const cv::Mat & i_rgb,
	const cv::Mat & i_alpha,
	cv::Mat & o_rgba
	) {
	if (i_rgb.type() != CV_8UC3 ||
		i_rgb.rows*i_rgb.cols == 0 ||
		!i_rgb.isContinuous()) {
		IMAGE_DEPTH_LOGE("[MergeAlphaChannel] parameter [i_rgb] illegal!");
		FAIL_EXIT(1);
	}
	if (i_alpha.type() != CV_8UC1 ||
		i_alpha.rows*i_alpha.cols == 0 ||
		!i_alpha.isContinuous()) {
		IMAGE_DEPTH_LOGE("[MergeAlphaChannel] parameter [i_alpha] illegal!");
		FAIL_EXIT(1);
	}
	if (i_rgb.size() != i_alpha.size()) {
		IMAGE_DEPTH_LOGE("[MergeAlphaChannel] parameter size mismatched!");
		FAIL_EXIT(1);
	}
	// allocate the memeory used for output
	if (o_rgba.type() != CV_8UC4 || o_rgba.size() != i_rgb.size()) {
		o_rgba = cv::Mat(i_rgb.size(), CV_8UC4);
	}
	// expose the pointers
	unsigned char * pRGBA = (unsigned char*)(o_rgba.data);
	unsigned char * pA = (unsigned char*)(i_alpha.data);
	unsigned char * pRGB = (unsigned char*)(i_rgb.data);

	int nRow = i_rgb.rows;
	int nCol = i_rgb.cols;
	for (int i = 0; i < nRow; ++i) {
		for (int j = 0; j < nCol; ++j) {
			int off_ = i*nCol + j;
			pRGBA[4 * off_] = pRGB[3 * off_];
			pRGBA[4 * off_ + 1] = pRGB[3 * off_ + 1];
			pRGBA[4 * off_ + 2] = pRGB[3 * off_ + 2];
			pRGBA[4 * off_ + 3] = pA[off_];
		}
	}
}


void r3d_depth_image::R3DStereoDepth::Get3DPointCloud(
	const cv::Mat & i_rgba, // color map in CV_8UC4
	const cv::Mat & i_depth, // CV_32FC1
	const cv::Mat & i_cmm, // camera matrix in 64FC1
	cv::Mat & o_cloud) { // CV_32FC4: XYZ[RGBA]
	// check if the input is of illegal data type
	if (i_depth.type() != CV_32FC1 || 
		i_depth.rows*i_depth.cols==0 ||
		!i_depth.isContinuous()) {
		IMAGE_DEPTH_LOGE("[Get3DPointCloud] parameter [i_depth] illegal!");
		FAIL_EXIT(1);
	}
	if (i_rgba.type() != CV_8UC4 ||
		i_rgba.rows*i_rgba.cols == 0 ||
		!i_rgba.isContinuous()) {
		IMAGE_DEPTH_LOGE("[Get3DPointCloud] parameter [i_rgba] illegal!");
		FAIL_EXIT(1);
	}
	if (i_rgba.size() != i_depth.size()) {
		IMAGE_DEPTH_LOGE("[Get3DPointCloud] parameter size mismatched!");
		FAIL_EXIT(1);
	}
	// allocate the memeory used for output
	if (o_cloud.type() != CV_32FC4 || o_cloud.size() != i_depth.size()) {
		o_cloud = cv::Mat(i_depth.size(), CV_32FC4);
	}
	// expose the pointers
	float * pRGBA = (float*)(i_rgba.data);
	float * pD = (float*)(i_depth.data);
	float * pC = (float*)(o_cloud.data);
	// get intrinsic parameter from camera matrix
	float fx = i_cmm.at<double>(0, 0);
	// Calculate the XYZ coordinates
	// Convert XY into polar coordinates
	// Theta in polar form holds between conversion
	int nRow = i_depth.rows;
	int nCol = i_depth.cols;
	float cx = i_cmm.at<double>(0, 2);
	float cy = i_cmm.at<double>(1, 2);

	for (int i = 0; i < nRow; ++i) {
		for (int j = 0; j < nCol; ++j) {
			// get (theta, radius) of a single pixel
			float x = j - cx;
			float y = i - cy;
			float r_ = std::sqrtf(x*x + y*y);
			float sin_ = y / r_;
			float cos_ = x / r_;
			int off_ = i*nCol + j;
			float R_ = r_*pD[off_] / fx;
			// use new Radius to update XYZ coordinates
			pC[4 * off_] = R_*cos_; // X
			pC[4 * off_ + 1] = R_*sin_; // Y
			pC[4 * off_ + 2] = pD[off_]; // Z
			pC[4 * off_ + 3] = pRGBA[off_]; // RGBA
		}
	}
}


// dense cloud only
void r3d_depth_image::R3DStereoDepth::EstimateNormal(
	const cv::Mat & i_cloud, // CV_32FC4
	cv::Mat & o_normal,// CV_32FC3
	int radius,
	int sample_rate,
	float threshold,
	int min_support){ 
	// search neighbors for each point to estimate the normals
	// this can only works with dense cloud
	// for sparse cloud, using KDTree instead
	
	// to avoid discontinous memory allocation
	cv::Mat c;
	if (!i_cloud.isContinuous())
		c = i_cloud.clone();
	else c = i_cloud;
	
	int & r = radius;
	int & sr = sample_rate;
	float t = SQR(threshold);
	int & ms = min_support;
	cv::Mat& n = o_normal;

	// allocation
	if (n.size() != c.size() || n.type() != CV_32FC3 || 
		(!n.isContinuous())) {
		n = cv::Mat::zeros(c.size(), CV_32FC3);
	}
	else n(cv::Rect2d(0, 0, n.size().width, n.size().height)) = 0.0;

	// search neighbors
	int w = c.size().width;
	int h = c.size().height;

	float * pC = (float*)(c.data);
	float * pN = (float*)(n.data);

#pragma omp parallel for
	for (int i = 0; i < h; i += sr) {
		for (int j = 0; j < w; j += sr) {
			int p = (i*w + j) << 2;
			float x0 = pC[p], y0 = pC[p + 1], z0 = pC[p + 2];

			int min_x = std::max(j - r, 0);
			int max_x = std::min(j + r, w - 1);
			int min_y = std::max(i - r, 0);
			int max_y = std::min(i + r, h - 1);

			//std::vector<cv::Point3f> nb; // neighbors
			//nb.reserve(SQR(2 * r + 1)-1); // reserve memory to speed up
			// average of xx,xy,xz,yy,yz,zz,x,y,z of neighbors
			float av[9] = {0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0};
			int nn = 0; // number of neighbors
			
			for (int y = min_y; y < max_y; ++y) {
				for (int x = min_x; x < max_x; ++x) {
					int q = (y*w + x) << 2;
					float x1 = pC[q], y1 = pC[q + 1], z1 = pC[q + 2];
					float dis = SQR(x0 - x1) + SQR(y0 - y1) + SQR(z0 - z1);
					if (dis < t) { // within neighborhood
						//nb.push_back(cv::Point3f(pC[q], pC[q + 1], pC[q + 2]));
						++nn;
						av[0] += x1 * x1;
						av[1] += x1 * y1;
						av[2] += x1 * z1;
						av[3] += y1 * y1;
						av[4] += y1 * z1;
						av[5] += z1 * z1;
						av[6] += x1;
						av[7] += y1;
						av[8] += z1;
					}
				}
			}
			
			if (nn < ms) {
				// set the normal as (0,0,0)
				//p = (p>>2) * 3;
				//pN[p] = pN[p + 1] = pN[p + 2] = 0;
				continue; // neighbors are insufficient
			}

			// calculate the covariance matrix based on neighbors
			cv::Mat cov(3, 3, CV_32FC1);
			float *pCov = (float*)(cov.data);

			av[0] /= nn;
			av[1] /= nn;
			av[2] /= nn;
			av[3] /= nn;
			av[4] /= nn;
			av[5] /= nn;
			av[6] /= nn;
			av[7] /= nn;
			av[8] /= nn;

			/*
			| \sigma(x,x) \sigma(y,x) \sigma(z,x) |
			| \sigma(x,y) \sigma(y,y) \sigma(z,y) |
			| \sigma(x,z) \sigma(y,z) \sigma(z,z) |
			*/

			// \sigma(x,x) = \hat{x^2} - \hat{x}^2 
			pCov[0] = av[0] - av[6] * av[6];
			// \sigma(x,y) = \hat{x^2} - \hat{x}\hat{y} 
			pCov[1] = av[1] - av[6] * av[7];
			// \sigma(x,z) = \hat{xz} - \hat{x}\hat{z} 
			pCov[2] = av[2] - av[6] * av[8];
			// \sigma(y,x) = \sigma(x,y)
			pCov[3] = pCov[1];
			// \sigma(y,y) = \hat{y^2} - \hat{y}^2
			pCov[4] = av[3] - av[7] * av[7];
			// \sigma(y,z) = \hat{yz} - \hat{y}\hat{z}
			pCov[5] = av[4] - av[7] * av[8];
			// \sigma(z,x) = \sigma(x,z)
			pCov[6] = pCov[2];
			// \sigma(z,y) = \sigma(y,z)
			pCov[7] = pCov[5];
			// \sigma(z,z) = \hat{z^2} - \hat{z}^2
			pCov[8]= av[5] - av[8] * av[8];

			// solve the egien values and corresponding vectors of cov
			cv::Mat eig, eig_vec;
			if (!cv::eigen(cov, eig, eig_vec)) {
				IMAGE_DEPTH_LOGE("failed to solve eigen values of cov-mat!");
				FAIL_EXIT(-1);
			}
			
			float * pE = (float*)(eig.data);
			float * pEV = (float*)(eig_vec.data);
			
			// check if there's more than one zero eigen values
			if (CHECK_ZERO(pE[0]) + CHECK_ZERO(pE[1]) + CHECK_ZERO(pE[2]) > 1) {
				IMAGE_DEPTH_LOGW("Line has no normal!");
				// set the normal as (0,0,0)
				//p = (p >> 2) * 3;
				//pN[p] = pN[p + 1] = pN[p + 2] = 0;
				continue;
			}

			// find out the eigen vector of minimum eigen value
			float min_eig = std::fmin(std::fmin(pE[0], pE[1]), pE[2]);
			p = (p >> 2) * 3;
			int _off = 0;
			if (pE[2] == min_eig) _off = 6;
			else if (pE[1] == min_eig) _off = 3;

			// flip the correspoding eigen vector according to viewpoint
			if (pEV[_off] * x0 + pEV[_off + 1] * y0 + pEV[_off + 2] * z0 < 0) {
				pN[p] = pEV[_off];
				pN[p + 1] = pEV[_off + 1];
				pN[p + 2] = pEV[_off + 2];
			}
			else {
				pN[p] = -pEV[_off];
				pN[p + 1] = -pEV[_off + 1];
				pN[p + 2] = -pEV[_off + 2];
			}
		}
	}
}


void r3d_depth_image::R3DStereoDepth::SavePointCloudAsPLY(
	const cv::Mat & cloud,
	const cv::Mat & normals,
	const char * file,
	float max_depth,
	bool use_ascii
	) {
	if (cloud.type() != CV_32FC4 || normals.type() != CV_32FC3) {
		IMAGE_DEPTH_LOGE("[%s][%s][%d]", __FILE__, __FUNCDNAME__, __LINE__);
		FAIL_EXIT(-1);
	}
	if (cloud.size() != normals.size()) {
		IMAGE_DEPTH_LOGE("[%s][%s][%d]", __FILE__, __FUNCDNAME__, __LINE__);
		FAIL_EXIT(-1);
	}
	// a demo for PLY formatted file
	/*
	ply
	format ascii 1.0
	element vertex 4
	property float x
	property float y
	property float z
	property float nx
	property float ny
	property float nz
	property uchar b
	property uchar g
	property uchar r
	end_header
	0 0 1 0 0 1
	1 0 1 0 0 1
	1 1 1 0 0 1
	0 1 1 0 0 1
	*/
	
	// create a file for writing data
	std::fstream ply_file;
	ply_file.open(file, std::ios::out);
	
	// header begin
	ply_file << "ply" << std::endl;
	if (use_ascii) ply_file << "format ascii 1.0" << std::endl;
	else { IMAGE_DEPTH_LOGE("NOT IMPLEMENTED YET!"); FAIL_EXIT(-1); }
	
	// check if memory is continuous in memory
	cv::Mat normals_ref;
	if (!normals.isContinuous()) normals_ref = normals.clone();
	else normals_ref = normals;

	cv::Mat cloud_ref;
	if (!cloud.isContinuous()) cloud_ref = cloud.clone();
	else cloud_ref = cloud;

	const float * pN = (float*)(normals_ref.data);
	const float * pC = (float*)(cloud_ref.data);
	
	// count the point number in cloud
	int w = normals.size().width;
	int h = normals.size().height;
	
	float x, y, z, nx, ny, nz;
	int n_pts = 0;
	int off_ = 0;
	for (int i = 0; i < h; ++i) {
		for (int j = 0; j < w; ++j) {
			nx = pN[3 * (off_ + j)];
			ny = pN[3 * (off_ + j) + 1];
			nz = pN[3 * (off_ + j) + 2];
			if (nx != 0 || ny != 0 || nz != 0) {
				z = pC[4 * (off_ + j) + 2];
				if (z <= max_depth) ++n_pts;
			}
		}
		off_ += w;
	}
	ply_file << "element vertex " << n_pts << std::endl;
	
	// property list
	ply_file << "property float x" << std::endl;
	ply_file << "property float y" << std::endl;
	ply_file << "property float z" << std::endl;
	ply_file << "property float nx" << std::endl;
	ply_file << "property float ny" << std::endl;
	ply_file << "property float nz" << std::endl;
	ply_file << "property uchar blue" << std::endl;
	ply_file << "property uchar green" << std::endl;
	ply_file << "property uchar red" << std::endl;

	// finish header
	ply_file << "end_header" << std::endl;

	// write down all points
	off_ = 0;
	int r, g, b;
	const uint8_t * pBGRA;
	for (int i = 0; i < h; ++i) {
		for (int j = 0; j < w; ++j) {
			nx = pN[3 * (off_ + j)];
			ny = pN[3 * (off_ + j) + 1];
			nz = pN[3 * (off_ + j) + 2];
			if (nx!=0 || ny!=0 || nz!=0) {
				x = pC[4 * (off_ + j)];
				y = pC[4 * (off_ + j) + 1];
				z = pC[4 * (off_ + j) + 2];
				if (z > max_depth) continue;
				pBGRA = (const uint8_t*)(&pC[4 * (off_ + j) + 3]);
				b = pBGRA[0];
				g = pBGRA[1];
				r = pBGRA[2];
				ply_file << x << " " << y << " " << z << " "
					<< nx << " " << ny << " " << nz << " "
					<< b << " " << g << " " << r
					<< std::endl;
			}
		}
		off_ += w;
	}
	ply_file.close();
	WaitKey(0);
}


void r3d_depth_image::R3DStereoDepth::Recon3DModel(
	std::vector<Triangle> & mesh, 
	const cv::Mat & cloud, 
	const cv::Mat & normals
	) {
	// using Poisson Estimation to guess the 3d model

}

