//
// Created by Administrator on 2019\11\1 0001.
//
#include "TCLStereoDepthAlgUtils.h"
#include <iostream>

void tcl_depth_video::VisualizeOpticalFlow(
        cv::Mat & output,
        const std::vector<cv::Point2f> & src,
        const std::vector<cv::Point2f> & dst) {
    float dx, dy, angle;
    if (src.size() != dst.size()) {
        VIDEO_DEPTH_LOGE("[VisualizeOpticalFlow]: source point number dose not match");
        exit(-1);
    }
    for (int i = 0; i<dst.size(); i++) {
        dy = dst[i].y - src[i].y;
        dx = dst[i].x - src[i].x;
        float theta;
        if (dx==0){
            if(dy>0) theta = 3.14159/2;
            else if (dy<0) theta = -3.14159/2;
            else theta = 0;
        } else theta = atan(dy / dx);
        angle = theta / 3.1415926 * 180;
        if (angle < 0) {
            cv::circle(output, src[i], -angle / 2, cv::Scalar(0, 100, 200), 1);
            cv::arrowedLine(output, src[i], dst[i], cv::Scalar(100, 200, 0), 1, 8, 0);
        }
        else {
            cv::circle(output, src[i], angle / 2, cv::Scalar(0, 200, 100), 1);
            cv::arrowedLine(output, src[i], dst[i], cv::Scalar(200, 100, 0), 1, 8, 0);
        }
    }
}


// To use SIMD128, keep operation restricted in only [+-*/]
// Suppose depth of neighbors: 255-{v1,v2,v3,v4};
// Suppose color distance of neighbors: 255-{d1,d2,d3,d4}
// s = v1^k + v2^k + v3^k + v4^k(k = 1, 2,...)
// polarize d_{i} to be 0 or 1:
// t = (d1 + d2 + d3 + d4)/4;
// d_{i} = d_{i} > t
// t = d1 + d2 + d3 + d4
// v = v1*(v1^{k}/s*d1/t) +
//     v2*(v2^{k}/s*d2/t) +
//     v3*(v3^{k}/s*d3/t) +
//     v4*(v4^{k}/s*d4/t);
void tcl_depth_video::Erode_v3(
        cv::Mat & o_,
        const cv::Mat & i_,
        const cv::Mat & rgb,
        int r,
        uint8_t thr_
) {
    if (i_.type() != CV_8UC1 ||
        rgb.type() != CV_8UC3 ||
        i_.size() != rgb.size()) {
        VIDEO_DEPTH_LOGE("[Erode_v3]: illegal inputs");
        exit(-1);
    }
    if (o_.size() != i_.size() || o_.type() != i_.type()) {
        o_.create(i_.size(), i_.type());
    }

    unsigned char * p_o = o_.data;
    unsigned char * p_i = i_.data;
    const unsigned char * p_c = rgb.data;

    int h, w;
    h = i_.size().height;
    w = i_.size().width;
    unsigned char v_o; // value of depth at the origin
    unsigned char v[4]; // depth of neighbors
    unsigned char c[3]; // color of origin
    unsigned short d_u16[4]; // // color distance in u16
    unsigned char d_u8[4]; // color distance in u8
    unsigned char d_mean_u8; // mean color distance in u8
    unsigned short sum_u16; // sum of weights in u16
    unsigned short v_u16; // the weighted sum of depth
    int addr_[4], addr_0;
    unsigned char zero_flag; // the flag for sum being zero or not

    // use non-uniform weighted sum to replace min/max
    for (int i = 0; i < h; ++i) {
        for (int j = 0; j < w; ++j) {
            // origin
            addr_0 = i * w + j;
            v_o = p_i[addr_0];
            addr_0 *= 3;
            c[0] = p_c[addr_0];
            c[1] = p_c[addr_0 + 1];
            c[2] = p_c[addr_0 + 2];
            // neighbors
            zero_flag = i >= r;
            addr_[0] = zero_flag * (i - r) * w + j;
            zero_flag = i + r < h;
            addr_[1] = (zero_flag * (i + r) + (1 - zero_flag) * (h - 1)) * w + j;
            zero_flag = j >= r;
            addr_[2] = i * w + zero_flag * (j - r);
            zero_flag = j + r < w;
            addr_[3] = i * w + zero_flag * (j + r) + (1 - zero_flag) * (w - 1);
            v[0] = p_i[addr_[0]];
            v[1] = p_i[addr_[1]];
            v[2] = p_i[addr_[2]];
            v[3] = p_i[addr_[3]];
            // channel R
            addr_[0] *= 3;
            addr_[1] *= 3;
            addr_[2] *= 3;
            addr_[3] *= 3;
            d_u16[0] = std::abs(p_c[addr_[0]] - c[0]);
            d_u16[1] = std::abs(p_c[addr_[1]] - c[0]);
            d_u16[2] = std::abs(p_c[addr_[2]] - c[0]);
            d_u16[3] = std::abs(p_c[addr_[3]] - c[0]);
            // channel G
            addr_[0] ++;
            addr_[1] ++;
            addr_[2] ++;
            addr_[3] ++;
            d_u16[0] += std::abs(p_c[addr_[0]] - c[1]);
            d_u16[1] += std::abs(p_c[addr_[1]] - c[1]);
            d_u16[2] += std::abs(p_c[addr_[2]] - c[1]);
            d_u16[3] += std::abs(p_c[addr_[3]] - c[1]);
            // channel B
            addr_[0] ++;
            addr_[1] ++;
            addr_[2] ++;
            addr_[3] ++;
            d_u16[0] += std::abs(p_c[addr_[0]] - c[2]);
            d_u16[1] += std::abs(p_c[addr_[1]] - c[2]);
            d_u16[2] += std::abs(p_c[addr_[2]] - c[2]);
            d_u16[3] += std::abs(p_c[addr_[3]] - c[2]);
            // normalize color distance to uint8
            d_u8[0] = d_u16[0] >> 2;
            d_u8[1] = d_u16[1] >> 2;
            d_u8[2] = d_u16[2] >> 2;
            d_u8[3] = d_u16[3] >> 2;
            // get average of color distances
            sum_u16 = d_u8[0];
            sum_u16 += d_u8[1];
            sum_u16 += d_u8[2];
            sum_u16 += d_u8[3];
            d_mean_u8 = sum_u16 >> 2;
            d_mean_u8 = d_mean_u8 < thr_ ? d_mean_u8 : thr_;
            // polarize color distances
            d_u8[0] = d_u8[0] <= d_mean_u8;
            d_u8[1] = d_u8[1] <= d_mean_u8;
            d_u8[2] = d_u8[2] <= d_mean_u8;
            d_u8[3] = d_u8[3] <= d_mean_u8;
            // get average of depth
            sum_u16 = v[0];
            sum_u16 += v[1];
            sum_u16 += v[2];
            sum_u16 += v[3];
            d_mean_u8 = sum_u16 >> 2;
            // convert from depth to weight[only on erode]
            d_u16[0] = v[0] <= d_mean_u8;
            d_u16[1] = v[1] <= d_mean_u8;
            d_u16[2] = v[2] <= d_mean_u8;
            d_u16[3] = v[3] <= d_mean_u8;
            // compute the weight for each neighbor
            d_u16[0] &= d_u8[0];
            d_u16[1] &= d_u8[1];
            d_u16[2] &= d_u8[2];
            d_u16[3] &= d_u8[3];
            // sum of weights
            sum_u16 = d_u16[0];
            sum_u16 += d_u16[1];
            sum_u16 += d_u16[2];
            sum_u16 += d_u16[3];
            zero_flag = sum_u16 == 0;
            // get the final weighted depth
            v_u16 = v[0] * d_u16[0];
            v_u16 += v[1] * d_u16[1];
            v_u16 += v[2] * d_u16[2];
            v_u16 += v[3] * d_u16[3];
            sum_u16 += zero_flag;
            v_u16 /= sum_u16;
            v_u16 *= (!zero_flag);
            v_o *= zero_flag;
            v_u16 += v_o;
            p_o[i * w + j] = v_u16;
        }
    }
}


void tcl_depth_video::Dilate_v3(
        cv::Mat & o_,
        const cv::Mat & i_,
        const cv::Mat & rgb,
        int r,
        uint8_t thr_
) {
    if (i_.type() != CV_8UC1 ||
        rgb.type() != CV_8UC3 ||
        i_.size() != rgb.size()) {
        VIDEO_DEPTH_LOGE("[Dilate_v3]: illegal inputs");
        exit(-1);
    }
    if (o_.size() != i_.size() || o_.type() != i_.type()) {
        o_.create(i_.size(), i_.type());
    }

    unsigned char * p_o = o_.data;
    unsigned char * p_i = i_.data;
    const unsigned char * p_c = rgb.data;

    int h, w;
    h = i_.size().height;
    w = i_.size().width;
    unsigned char v_o; // value of depth at the origin
    unsigned char v[4]; // depth of neighbors
    unsigned char c[3]; // color of origin
    unsigned short d_u16[4]; // // color distance in u16
    unsigned char d_u8[4]; // color distance in u8
    unsigned char d_mean_u8; // mean color distance in u8
    unsigned short sum_u16; // sum of weights in u16
    unsigned short v_u16; // the weighted sum of depth
    int addr_[4], addr_0;
    unsigned char zero_flag; // the flag for sum being zero or not

    // use non-uniform weighted sum to replace min/max
    for (int i = 0; i < h; ++i) {
        for (int j = 0; j < w; ++j) {
            // origin
            addr_0 = i * w + j;
            v_o = p_i[addr_0];
            addr_0 *= 3;
            c[0] = p_c[addr_0];
            c[1] = p_c[addr_0 + 1];
            c[2] = p_c[addr_0 + 2];
            // neighbors
            zero_flag = i >= r;
            addr_[0] = zero_flag * (i - r) * w + j;
            zero_flag = i + r < h;
            addr_[1] = (zero_flag * (i + r) + (1 - zero_flag) * (h - 1)) * w + j;
            zero_flag = j >= r;
            addr_[2] = i * w + zero_flag * (j - r);
            zero_flag = j + r < w;
            addr_[3] = i * w + zero_flag * (j + r) + (1 - zero_flag) * (w - 1);
            v[0] = p_i[addr_[0]];
            v[1] = p_i[addr_[1]];
            v[2] = p_i[addr_[2]];
            v[3] = p_i[addr_[3]];
            // channel R
            addr_[0] *= 3;
            addr_[1] *= 3;
            addr_[2] *= 3;
            addr_[3] *= 3;
            d_u16[0] = std::abs(p_c[addr_[0]] - c[0]);
            d_u16[1] = std::abs(p_c[addr_[1]] - c[0]);
            d_u16[2] = std::abs(p_c[addr_[2]] - c[0]);
            d_u16[3] = std::abs(p_c[addr_[3]] - c[0]);
            // channel G
            addr_[0] ++;
            addr_[1] ++;
            addr_[2] ++;
            addr_[3] ++;
            d_u16[0] += std::abs(p_c[addr_[0]] - c[1]);
            d_u16[1] += std::abs(p_c[addr_[1]] - c[1]);
            d_u16[2] += std::abs(p_c[addr_[2]] - c[1]);
            d_u16[3] += std::abs(p_c[addr_[3]] - c[1]);
            // channel B
            addr_[0] ++;
            addr_[1] ++;
            addr_[2] ++;
            addr_[3] ++;
            d_u16[0] += std::abs(p_c[addr_[0]] - c[2]);
            d_u16[1] += std::abs(p_c[addr_[1]] - c[2]);
            d_u16[2] += std::abs(p_c[addr_[2]] - c[2]);
            d_u16[3] += std::abs(p_c[addr_[3]] - c[2]);
            // normalize color distance to uint8
            d_u8[0] = d_u16[0] >> 2;
            d_u8[1] = d_u16[1] >> 2;
            d_u8[2] = d_u16[2] >> 2;
            d_u8[3] = d_u16[3] >> 2;
            // get average of color distances
            sum_u16 = d_u8[0];
            sum_u16 += d_u8[1];
            sum_u16 += d_u8[2];
            sum_u16 += d_u8[3];
            d_mean_u8 = sum_u16 >> 2;
            d_mean_u8 = d_mean_u8 < thr_ ? d_mean_u8 : thr_;
            // polarize color distances
            d_u8[0] = d_u8[0] <= d_mean_u8;
            d_u8[1] = d_u8[1] <= d_mean_u8;
            d_u8[2] = d_u8[2] <= d_mean_u8;
            d_u8[3] = d_u8[3] <= d_mean_u8;
            // get average of depth
            sum_u16 = v[0];
            sum_u16 += v[1];
            sum_u16 += v[2];
            sum_u16 += v[3];
            d_mean_u8 = sum_u16 >> 2;
            // convert from depth to weight[only on dilate]
            d_u16[0] = v[0] >= d_mean_u8;
            d_u16[1] = v[1] >= d_mean_u8;
            d_u16[2] = v[2] >= d_mean_u8;
            d_u16[3] = v[3] >= d_mean_u8;
            // compute the weight for each neighbor
            d_u16[0] &= d_u8[0];
            d_u16[1] &= d_u8[1];
            d_u16[2] &= d_u8[2];
            d_u16[3] &= d_u8[3];
            // sum of weights
            sum_u16 = d_u16[0];
            sum_u16 += d_u16[1];
            sum_u16 += d_u16[2];
            sum_u16 += d_u16[3];
            zero_flag = sum_u16 == 0;
            // get the final weighted depth
            v_u16 = v[0] * d_u16[0];
            v_u16 += v[1] * d_u16[1];
            v_u16 += v[2] * d_u16[2];
            v_u16 += v[3] * d_u16[3];
            sum_u16 += zero_flag;
            v_u16 /= sum_u16;
            v_u16 *= (!zero_flag);
            v_o *= zero_flag;
            v_u16 += v_o;
            p_o[i * w + j] = v_u16;
        }
    }
}


int tcl_depth_video::SmoothDisparity(cv::Mat & o_,
                     const cv::Mat & i_,
                     const cv::Mat & edge,
                     bool use_mask,
                     int min_support,
                     ThreadState * thread_state
) {
    if (i_.type() != CV_8UC1) {
        VIDEO_DEPTH_LOGE("[SmoothDisparity]: expect input disparity map in CV_8UC1!");
        exit(-1);
    }

    if (edge.type() != CV_8UC1) {
        VIDEO_DEPTH_LOGE("[SmoothDisparity]: expect input edge in CV_8UC1");
        exit(-1);
    }

    int h, w;
    h = i_.size().height;
    w = i_.size().width;
    if (*thread_state != THREAD_STATE_PLAY) return 0;
    if (o_.size() != i_.size() || o_.type() != i_.type()) {
        o_.create(i_.size(), i_.type());
    }

    unsigned char * data_o = o_.data;
    const unsigned char * data_i = i_.data;
    const unsigned char * data_edge = edge.data;
    unsigned char v;

    // create a label matrix to store labels for every pixel in main view
    if (*thread_state != THREAD_STATE_PLAY) return 0;
    cv::Mat id_map = cv::Mat::zeros(i_.size(), CV_16UC1);
    unsigned short * data_id = (unsigned short *)(id_map.data);
    std::vector<cv::Point2i> nodes;
    if (*thread_state != THREAD_STATE_PLAY) return 0;
    nodes.reserve(h * w / 4);
    nodes.resize(0);
    std::vector<int> counts;
    if (*thread_state != THREAD_STATE_PLAY) return 0;
    counts.reserve(h * w / 4);
    counts.resize(0);
    std::vector<int> avgs;
    if (*thread_state != THREAD_STATE_PLAY) return 0;
    avgs.reserve(h * w / 4);
    avgs.resize(0);
    int uid = 0;
    const int r = 1;
    int x_min, x_max, y_min, y_max;
    bool flag;

    // local vars
    cv::Point2i pt_;
    int ind;
    if (*thread_state != THREAD_STATE_PLAY) return 0;
    for (int i = 0; i < h; ++i) {
        if (*thread_state != THREAD_STATE_PLAY) return 0;
        for (int j = 0; j < w; ++j) {
            if (*thread_state != THREAD_STATE_PLAY) return 0;
            ind = i * w + j;
            if (data_edge[ind] || data_id[ind]) continue;
            // check if this point be a potential source
            x_min = std::max(0, i - r);
            x_max = std::min(h - 1, i + r);
            y_min = std::max(0, j - r);
            y_max = std::min(w - 1, j + r);
            flag = true;
            if (*thread_state != THREAD_STATE_PLAY) return 0;
            for (int s = x_min; s <= x_max && flag; ++s) {
                for (int t = y_min; t <= y_max; ++t) {
                    if (data_edge[s*w + t]) {
                        flag = false;
                        break;
                    }
                }
            }
            if (!flag) continue;
            // begin to search connected domains
            uid++;
            counts.push_back(0);
            avgs.push_back(0);
            // search for connected domain
            nodes.resize(0);
            nodes.push_back(cv::Point2i(i, j)); // center
            data_id[ind] = uid;
            counts[uid - 1]++;
            avgs[uid - 1] += data_i[ind];
            if (*thread_state != THREAD_STATE_PLAY) return 0;
            while (nodes.size() > 0) {
                pt_ = nodes.back();
                nodes.pop_back();
                // check if this point be a potential source
                x_min = std::max(0, pt_.x - r);
                x_max = std::min(h - 1, pt_.x + r);
                y_min = std::max(0, pt_.y - r);
                y_max = std::min(w - 1, pt_.y + r);
                flag = true;
                if (*thread_state != THREAD_STATE_PLAY) return 0;
                for (int s = x_min; s <= x_max && flag; ++s) {
                    for (int t = y_min; t <= y_max; ++t) {
                        if (data_edge[s*w + t]) {
                            flag = false;
                            break;
                        }
                    }
                }

                if (!flag) continue;

                // search for 4 neighbors
                if (*thread_state != THREAD_STATE_PLAY) return 0;
                if (pt_.x >= 1) {
                    ind = (pt_.x - 1)*w + pt_.y;
                    if (!data_edge[ind] && !data_id[ind]) {
                        nodes.push_back(cv::Point2i(pt_.x - 1, pt_.y)); // up
                        data_id[ind] = uid;
                        if (data_i[ind]) { // black holes do not count
                            counts[uid - 1]++;
                            avgs[uid - 1] += data_i[ind];
                        }
                    }
                }
                if (pt_.y >= 1) {
                    ind = pt_.x*w + pt_.y - 1;
                    if (!data_edge[ind] && !data_id[ind]) {
                        nodes.push_back(cv::Point2i(pt_.x, pt_.y - 1)); // left
                        data_id[ind] = uid;
                        if (data_i[ind]) { // black holes do not count
                            counts[uid - 1]++;
                            avgs[uid - 1] += data_i[ind];
                        }
                    }
                }
                if (pt_.y <= w - 2) {
                    ind = pt_.x*w + pt_.y + 1;
                    if (!data_edge[ind] && !data_id[ind]) {
                        nodes.push_back(cv::Point2i(pt_.x, pt_.y + 1)); // right
                        data_id[ind] = uid;
                        if (data_i[ind]) { // black holes do not count
                            counts[uid - 1]++;
                            avgs[uid - 1] += data_i[ind];
                        }
                    }
                }
                if (pt_.x <= h - 2) {
                    ind = (pt_.x + 1)*w + pt_.y;
                    if (!data_edge[ind] && !data_id[ind]) {
                        nodes.push_back(cv::Point2i(pt_.x + 1, pt_.y)); // bottom
                        data_id[ind] = uid;
                        if (data_i[ind]) { // black holes do not count
                            counts[uid - 1]++;
                            avgs[uid - 1] += data_i[ind];
                        }
                    }
                }
            }
        }
    }
    if (*thread_state != THREAD_STATE_PLAY) return 0;
    for (int i = 0; i < counts.size(); ++i) {
        if ( counts[i] >= min_support ) avgs[i] /= counts[i];
        else avgs[i] = 0;
    }

    // check out all connected domains
    int stride = 0;
    if (*thread_state != THREAD_STATE_PLAY) return 0;
    for (int i = 0; i < h; ++i) {
        if (*thread_state != THREAD_STATE_PLAY) return 0;
        for (int j = 0; j < w; ++j) {
            ind = stride + j;
            if (data_id[ind] && (!use_mask || !data_i[ind]))
                data_o[ind] = avgs[data_id[ind] - 1];
            else data_o[ind] = data_i[ind];
        }
        stride += w;
    }
    return 1;
}


void tcl_depth_video::GetFullEdges(cv::Mat & o_, const cv::Mat & i_,
                  int thres_selc, int thres_conn) {
    std::vector<cv::Mat> channels, edges;
    if (i_.type() == CV_8UC3) {
        cv::split(i_, channels);
        edges.resize(channels.size());
        cv::Canny(channels[0], edges[0], thres_conn, thres_selc);
        cv::Canny(channels[1], edges[1], thres_conn, thres_selc);
        cv::Canny(channels[2], edges[2], thres_conn, thres_selc);
        cv::bitwise_or(edges[0], edges[1], edges[0]);
        cv::bitwise_or(edges[0], edges[2], edges[0]);
        edges[0].copyTo(o_);
    }
    else if (i_.type() == CV_8UC1) {
        edges.resize(1);
        cv::Canny(i_, o_, thres_conn, thres_selc);
    }
    else {
        VIDEO_DEPTH_LOGE("[GetFullEdges]: illegal inputs");
        exit(-1);
    }
}


void tcl_depth_video::GetFullEdgesYUV(
	cv::Mat & o_, 
	const cv::Mat & Y,
	const cv::Mat & U,
	const cv::Mat & V,
	int thres_selc, 
	int thres_conn
	) {
	std::vector<cv::Mat> channels, edges;
	if (Y.type() == CV_8UC1 && U.type() == CV_8UC1 && V.type() == CV_8UC1) {
		edges.resize(3);
		cv::Canny(Y, edges[0], thres_conn, thres_selc);
		cv::Canny(U, edges[1], thres_conn, thres_selc);
		cv::Canny(V, edges[2], thres_conn, thres_selc);
		cv::bitwise_or(edges[0], edges[1], edges[0]);
		cv::bitwise_or(edges[0], edges[2], edges[0]);
		edges[0].copyTo(o_);
	}
	else {
		VIDEO_DEPTH_LOGE("[GetFullEdgesYUV]: illegal inputs");
		exit(-1);
	}
}


void tcl_depth_video::FindConnectedDomain(
        cv::Mat & domain_map,
        std::vector<int> & counts,
        cv::Rect & visible_zone,
        const cv::Mat & edge
) {
    if (edge.type() != CV_8UC1) {
        VIDEO_DEPTH_LOGE("Error: expect input edge in CV_8UC1!");
        exit(-1);
    }
    if (domain_map.type() != CV_16SC1 && domain_map.size() != edge.size()) {
        domain_map.create(edge.size(), CV_16SC1);
    }
    counts.resize(0);

    int h, w;
    h = edge.size().height;
    w = edge.size().width;

    int bound_x[2], bound_y[2];

    bound_x[0] = visible_zone.x;
    bound_x[1] = visible_zone.x + visible_zone.width;
    bound_y[0] = visible_zone.y;
    bound_y[1] = visible_zone.y + visible_zone.height;

    const unsigned char * data_edge = (unsigned char *)(edge.data);
    unsigned char v;

    // create a label matrix to store labels for every pixel in main view
    short * data_id = (short *)(domain_map.data);
    std::vector<cv::Point2i> nodes;
    nodes.reserve(h * w / 4);
    nodes.resize(0);
    counts.reserve(h * w / 4);
    counts.resize(0);

    int uid = 0;
    const int r = 1;
    int x_min, x_max, y_min, y_max;
    bool flag;

    // local vars
    cv::Point2i pt_;
    int ind;

    for (int i = 0; i < h; ++i) {
        for (int j = 0; j < w; ++j) {
            data_id[i * w + j] = 0;
        }
    }

    for (int i = bound_y[0]; i < bound_y[1]; ++i) {
        for (int j = bound_x[0]; j < bound_x[1]; ++j) {
            ind = i * w + j;
            if (data_edge[ind] || data_id[ind]) continue;
            // check if this point be a potential source
            x_min = std::max(bound_x[0], j - r);
            x_max = std::min(bound_x[1] - 1, j + r);
            y_min = std::max(bound_y[0], i - r);
            y_max = std::min(bound_y[1] - 1, i + r);
            flag = true;

            for (int s = y_min; s <= y_max && flag; ++s) {
                for (int t = x_min; t <= x_max; ++t) {
                    if (data_edge[s*w + t]) {
                        flag = false;
                        break;
                    }
                }
            }
            if (!flag) continue;
            // begin to search connected domains
            uid++;
            counts.push_back(0);
            // search for connected domain
            nodes.resize(0);
            nodes.push_back(cv::Point2i(j, i)); // center
            data_id[ind] = uid;
            counts[uid - 1]++;

            while (nodes.size() > 0) {
                pt_ = nodes.back();
                nodes.pop_back();
                // check if this point be a potential source
                x_min = std::max(bound_x[0], pt_.x - r);
                x_max = std::min(bound_x[1] - 1, pt_.x + r);
                y_min = std::max(bound_y[0], pt_.y - r);
                y_max = std::min(bound_y[1] - 1, pt_.y + r);
                flag = true;

                for (int s = y_min; s <= y_max && flag; ++s) {
                    for (int t = x_min; t <= x_max; ++t) {
                        if (data_edge[s*w + t]) {
                            flag = false;
                            break;
                        }
                    }
                }

                if (!flag) continue;

                // search for 4 neighbors
                if (pt_.y >= bound_y[0] + 1) {
                    ind = (pt_.y - 1)*w + pt_.x;
                    if (!data_edge[ind] && !data_id[ind]) {
                        nodes.push_back(cv::Point2i(pt_.x, pt_.y - 1)); // up
                        data_id[ind] = uid;
                        counts[uid - 1]++;
                    }
                }
                if (pt_.x >= bound_x[0] + 1) {
                    ind = pt_.y*w + pt_.x - 1;
                    if (!data_edge[ind] && !data_id[ind]) {
                        nodes.push_back(cv::Point2i(pt_.x - 1, pt_.y)); // left
                        data_id[ind] = uid;
                        counts[uid - 1]++;
                    }
                }
                if (pt_.x <= bound_x[1] - 2) {
                    ind = pt_.y*w + pt_.x + 1;
                    if (!data_edge[ind] && !data_id[ind]) {
                        nodes.push_back(cv::Point2i(pt_.x + 1, pt_.y)); // right
                        data_id[ind] = uid;
                        counts[uid - 1]++;
                    }
                }
                if (pt_.y <= bound_y[1] - 2) {
                    ind = (pt_.y + 1)*w + pt_.x;
                    if (!data_edge[ind] && !data_id[ind]) {
                        nodes.push_back(cv::Point2i(pt_.x, pt_.y + 1)); // bottom
                        data_id[ind] = uid;
                        counts[uid - 1]++;
                    }
                }
            }
        }
    }

    // check if counter works well
    int num_pts = 0;
    for (int i = 0; i < counts.size(); ++i) {
        num_pts += counts[i];
    }
    if (num_pts > visible_zone.area()) {
        VIDEO_DEPTH_LOGE("[FindConnectedDomains]: counters error!");
        exit(-1);
    }
}


void tcl_depth_video::visualize_domains(cv::Mat & rgb, const cv::Mat & domains, int num_domain) {
    unsigned char color_table[7][3];
    // red
    color_table[0][0] = 0xff;
    color_table[0][1] = 0;
    color_table[0][2] = 0;
    // orange
    color_table[1][0] = 0xff;
    color_table[1][1] = 0x7f;
    color_table[1][2] = 0;
    // yellow
    color_table[2][0] = 0xff;
    color_table[2][1] = 0xff;
    color_table[2][2] = 0;
    // green
    color_table[3][0] = 0;
    color_table[3][1] = 0xff;
    color_table[3][2] = 0;
    // olive
    color_table[4][0] = 0;
    color_table[4][1] = 0xff;
    color_table[4][2] = 0xff;
    // blue
    color_table[5][0] = 0;
    color_table[5][1] = 0;
    color_table[5][2] = 0xff;
    // purple
    color_table[6][0] = 0x88;
    color_table[6][1] = 0;
    color_table[6][2] = 0xff;

    int h, w;
    h = domains.size().height;
    w = domains.size().width;
    int domain_id;

    rgb.create(domains.size(), CV_8UC3);
    unsigned char * p_rgb = (unsigned char *)(rgb.data);
    short * p_domain = (short *)(domains.data);
    int s;

    for (int i = 0; i < h; ++i) {
        s = i * w;
        for (int j = 0; j < w; ++j) {
            domain_id = p_domain[s + j];
            if (domain_id > 0) {
                p_rgb[3 * (s + j)] = color_table[domain_id % 7][0];
                p_rgb[3 * (s + j) + 1] = color_table[domain_id % 7][1];
                p_rgb[3 * (s + j) + 2] = color_table[domain_id % 7][2];
            }
            else {
                p_rgb[3 * (s + j)] = 0;
                p_rgb[3 * (s + j) + 1] = 0;
                p_rgb[3 * (s + j) + 2] = 0;
            }
        }
    }
}


void tcl_depth_video::ColorBasedFilter(
        cv::Mat & o_,
        const cv::Mat & i_,
        const cv::Mat & rgb,
        int r
) {
    if (i_.type() != CV_8UC1 ||
        rgb.type() != CV_8UC3 ||
        i_.size() != rgb.size()) {
        VIDEO_DEPTH_LOGE("[ColorBasedFilter]: illegal inputs");
        exit(-1);
    }
    if (o_.size() != i_.size() || o_.type() != i_.type()) {
        o_.create(i_.size(), i_.type());
    }

    unsigned char * p_o = o_.data;
    unsigned char * p_i = i_.data;
    const unsigned char * p_c = rgb.data;

    int h, w;
    h = i_.size().height;
    w = i_.size().width;
    unsigned char v[4]; // depth of neighbors
    unsigned char c[3]; // color of origin
    unsigned short d_u16[4]; // color distance in u16
    unsigned char d_u8[4]; // color distance in u8
    unsigned char d_mean_u8; // mean color distance in u8
    unsigned short sum_u16; // sum of weights in u16
    unsigned short v_u16; // the weighted sum of depth
    int addr_[4], addr_0;
    unsigned char zero_flag; // the flag for sum being zero or not

    // use non-uniform weighted sum to replace min/max
    for (int i = 0; i < h; ++i) {
        for (int j = 0; j < w; ++j) {
            // origin
            addr_0 = i * w + j;
            addr_0 *= 3;
            c[0] = p_c[addr_0];
            c[1] = p_c[addr_0 + 1];
            c[2] = p_c[addr_0 + 2];
            // neighbors
            zero_flag = i >= r;
            addr_[0] = zero_flag * (i - r) * w + j;
            zero_flag = i + r < h;
            addr_[1] = (zero_flag * (i + r) + (1 - zero_flag) * (h - 1)) * w + j;
            zero_flag = j >= r;
            addr_[2] = i * w + zero_flag * (j - r);
            zero_flag = j + r < w;
            addr_[3] = i * w + zero_flag * (j + r) + (1 - zero_flag) * (w - 1);
            v[0] = p_i[addr_[0]];
            v[1] = p_i[addr_[1]];
            v[2] = p_i[addr_[2]];
            v[3] = p_i[addr_[3]];
            // channel R
            addr_[0] *= 3;
            addr_[1] *= 3;
            addr_[2] *= 3;
            addr_[3] *= 3;
            d_u16[0] = std::abs(p_c[addr_[0]] - c[0]);
            d_u16[1] = std::abs(p_c[addr_[1]] - c[0]);
            d_u16[2] = std::abs(p_c[addr_[2]] - c[0]);
            d_u16[3] = std::abs(p_c[addr_[3]] - c[0]);
            // channel G
            addr_[0] ++;
            addr_[1] ++;
            addr_[2] ++;
            addr_[3] ++;
            d_u16[0] += std::abs(p_c[addr_[0]] - c[1]);
            d_u16[1] += std::abs(p_c[addr_[1]] - c[1]);
            d_u16[2] += std::abs(p_c[addr_[2]] - c[1]);
            d_u16[3] += std::abs(p_c[addr_[3]] - c[1]);
            // channel B
            addr_[0] ++;
            addr_[1] ++;
            addr_[2] ++;
            addr_[3] ++;
            d_u16[0] += std::abs(p_c[addr_[0]] - c[1]);
            d_u16[1] += std::abs(p_c[addr_[1]] - c[1]);
            d_u16[2] += std::abs(p_c[addr_[2]] - c[1]);
            d_u16[3] += std::abs(p_c[addr_[3]] - c[1]);
            // normalize color distance to uint8
            d_u8[0] = d_u16[0] >> 2;
            d_u8[1] = d_u16[1] >> 2;
            d_u8[2] = d_u16[2] >> 2;
            d_u8[3] = d_u16[3] >> 2;
            // get average of color distances
            sum_u16 = d_u8[0];
            sum_u16 += d_u8[1];
            sum_u16 += d_u8[2];
            sum_u16 += d_u8[3];
            d_mean_u8 = sum_u16 >> 2;
            // polarize color distances
            d_u8[0] = d_u8[0] <= d_mean_u8;
            d_u8[1] = d_u8[1] <= d_mean_u8;
            d_u8[2] = d_u8[2] <= d_mean_u8;
            d_u8[3] = d_u8[3] <= d_mean_u8;
            // get average of depth
            sum_u16 = v[0];
            sum_u16 += v[1];
            sum_u16 += v[2];
            sum_u16 += v[3];
            d_mean_u8 = sum_u16 >> 2;
            // sum of weights
            sum_u16 = d_u8[0];
            sum_u16 += d_u8[1];
            sum_u16 += d_u8[2];
            sum_u16 += d_u8[3];
            // get the final weighted depth
            v_u16 = v[0] * d_u8[0];
            v_u16 += v[1] * d_u8[1];
            v_u16 += v[2] * d_u8[2];
            v_u16 += v[3] * d_u8[3];
            v_u16 /= sum_u16;
            p_o[i * w + j] = v_u16;
        }
    }
}


class tcl_depth_video::Erode_v3_fastest_class : public cv::ParallelLoopBody
{
public:
    Erode_v3_fastest_class(cv::Mat & o,
                           const cv::Mat &i,
                           const cv::Mat &rgb_,
                           int r_,
                           int thread_num_,
                           uint8_t thr_
    ) {
        o_ = o;
        i_ = i;
        rgb = rgb_;
        r = r_;
        this->thr_ = thr_;

        h = i_.size().height;
        w = i_.size().width;

        i_step = i.step[0];
        rgb_step = rgb_.step[0];
        left_r = std::max(r, 8);

        r_x_w = r * i.step[0], r_x_3w = r * rgb_.step[0];
        r_x_3 = r * 3, w___r_x_3 = (w - left_r) * 3, h___1_x_3w = (h - 1) * rgb_.step[0];
        w___r = (w - left_r), h___1_x_w = (h - 1) * i.step[0];

        p_o_org = o_.data;
        p_i_org = i_.data;
        p_rgb_org = rgb.data;

        this->thread_num = thread_num_;
    }
    virtual void operator ()(const cv::Range& range) const
    {
        //for (int t = range.start; t < range.end; ++t)
        {
            //for(int i = t; i < h; i+=thread_num)
            for (int i = range.start; i < range.end; ++i)
            {
                size_t step_i = i_.step[0];
                size_t step_rgb = rgb.step[0];

                //SIMD灰度图中心点
                cv::v_int16x8 r_center_int16x8;
                cv::v_int16x8 r_top_int16x8;
                cv::v_int16x8 r_bottom_int16x8;
                cv::v_int16x8 r_left_int16x8;
                cv::v_int16x8 r_right_int16x8;
                cv::v_int16x8 r_mean_int16x8;

                cv::v_int16x8 g_center_int16x8;
                cv::v_int16x8 g_top_int16x8;
                cv::v_int16x8 g_bottom_int16x8;
                cv::v_int16x8 g_left_int16x8;
                cv::v_int16x8 g_right_int16x8;
                cv::v_int16x8 g_mean_int16x8;

                cv::v_int16x8 b_center_int16x8;
                cv::v_int16x8 b_top_int16x8;
                cv::v_int16x8 b_bottom_int16x8;
                cv::v_int16x8 b_left_int16x8;
                cv::v_int16x8 b_right_int16x8;
                cv::v_int16x8 b_mean_int16x8;

                cv::v_uint16x8 v_center_depth_uint16x8, v_top_depth_uint16x8,
                        v_bottom_depth_uint16x8, v_left_depth_uint16x8,
                        v_right_depth_uint16x8, v_mean_depth_uint16x8,
                        v_ones_uint16x8, v_zeros_uint16x8, v_weight_uint16x8, v_new_depth_uint16x8;

                cv::v_uint16x8 top_flag, bottom_flag, left_flag, right_flag, tmp_flag;

                cv::v_uint16x8 tmp_left_uint16x8, tmp_right_uint16x8;
                cv::v_uint8x16 tmp_r_uint8x16, tmp_g_uint8x16, tmp_b_uint8x16;

                v_ones_uint16x8 = cv::v_setall_u16(1);
                v_zeros_uint16x8 = cv::v_setall_u16(0);


                unsigned char * p_o_line = p_o_org + i * i_step;
                unsigned char * p_i_line = p_i_org + i * i_step;
                unsigned char * p_rgb_line = p_rgb_org + i * rgb_step;
                unsigned char * p_o = p_o_line;
                unsigned char * p_i = p_i_line;
                unsigned char * p_rgb = p_rgb_line;
                unsigned char * p_tmp;

                ushort v_new_depth_uint16_arr8[8], weight_depth_uint16_arr8[8];
                ushort *v_new_depth_uint16_arr8_pt0 = &(v_new_depth_uint16_arr8[0]);
                ushort *v_new_depth_uint16_arr8_pt1 = &(v_new_depth_uint16_arr8[1]);
                ushort *v_new_depth_uint16_arr8_pt2 = &(v_new_depth_uint16_arr8[2]);
                ushort *v_new_depth_uint16_arr8_pt3 = &(v_new_depth_uint16_arr8[3]);
                ushort *v_new_depth_uint16_arr8_pt4 = &(v_new_depth_uint16_arr8[4]);
                ushort *v_new_depth_uint16_arr8_pt5 = &(v_new_depth_uint16_arr8[5]);
                ushort *v_new_depth_uint16_arr8_pt6 = &(v_new_depth_uint16_arr8[6]);
                ushort *v_new_depth_uint16_arr8_pt7 = &(v_new_depth_uint16_arr8[7]);

                ushort *weight_depth_uint16_arr8_pt0 = &(weight_depth_uint16_arr8[0]);
                ushort *weight_depth_uint16_arr8_pt1 = &(weight_depth_uint16_arr8[1]);
                ushort *weight_depth_uint16_arr8_pt2 = &(weight_depth_uint16_arr8[2]);
                ushort *weight_depth_uint16_arr8_pt3 = &(weight_depth_uint16_arr8[3]);
                ushort *weight_depth_uint16_arr8_pt4 = &(weight_depth_uint16_arr8[4]);
                ushort *weight_depth_uint16_arr8_pt5 = &(weight_depth_uint16_arr8[5]);
                ushort *weight_depth_uint16_arr8_pt6 = &(weight_depth_uint16_arr8[6]);
                ushort *weight_depth_uint16_arr8_pt7 = &(weight_depth_uint16_arr8[7]);

                for (int j = 0; j < w; j += 8) {
                    ///////////r通道
                    cv::v_load_deinterleave(p_rgb, tmp_r_uint8x16, tmp_g_uint8x16, tmp_b_uint8x16);
                    cv::v_expand(tmp_r_uint8x16, tmp_left_uint16x8, tmp_right_uint16x8);
                    r_center_int16x8 = cv::v_reinterpret_as_s16(tmp_left_uint16x8);
                    cv::v_expand(tmp_g_uint8x16, tmp_left_uint16x8, tmp_right_uint16x8);
                    g_center_int16x8 = cv::v_reinterpret_as_s16(tmp_left_uint16x8);
                    cv::v_expand(tmp_b_uint8x16, tmp_left_uint16x8, tmp_right_uint16x8);
                    b_center_int16x8 = cv::v_reinterpret_as_s16(tmp_left_uint16x8);

                    p_tmp = i >= r ? p_rgb - r_x_3w : p_rgb_org + (p_rgb - p_rgb_line);
                    cv::v_load_deinterleave(p_tmp, tmp_r_uint8x16, tmp_g_uint8x16, tmp_b_uint8x16);
                    cv::v_expand(tmp_r_uint8x16, tmp_left_uint16x8, tmp_right_uint16x8);
                    r_top_int16x8 = cv::v_reinterpret_as_s16(tmp_left_uint16x8);
                    cv::v_expand(tmp_g_uint8x16, tmp_left_uint16x8, tmp_right_uint16x8);
                    g_top_int16x8 = cv::v_reinterpret_as_s16(tmp_left_uint16x8);
                    cv::v_expand(tmp_b_uint8x16, tmp_left_uint16x8, tmp_right_uint16x8);
                    b_top_int16x8 = cv::v_reinterpret_as_s16(tmp_left_uint16x8);

                    p_tmp = i < h - r ? p_rgb + r_x_3w : p_rgb_org + h___1_x_3w + (p_rgb - p_rgb_line);
                    cv::v_load_deinterleave(p_tmp, tmp_r_uint8x16, tmp_g_uint8x16, tmp_b_uint8x16);
                    cv::v_expand(tmp_r_uint8x16, tmp_left_uint16x8, tmp_right_uint16x8);
                    r_bottom_int16x8 = cv::v_reinterpret_as_s16(tmp_left_uint16x8);
                    cv::v_expand(tmp_g_uint8x16, tmp_left_uint16x8, tmp_right_uint16x8);
                    g_bottom_int16x8 = cv::v_reinterpret_as_s16(tmp_left_uint16x8);
                    cv::v_expand(tmp_b_uint8x16, tmp_left_uint16x8, tmp_right_uint16x8);
                    b_bottom_int16x8 = cv::v_reinterpret_as_s16(tmp_left_uint16x8);

                    p_tmp = j >= r ? p_rgb - r_x_3 : p_rgb_line;
                    //p_tmp = p_rgb - r_x_3;
                    cv::v_load_deinterleave(p_tmp, tmp_r_uint8x16, tmp_g_uint8x16, tmp_b_uint8x16);
                    cv::v_expand(tmp_r_uint8x16, tmp_left_uint16x8, tmp_right_uint16x8);
                    r_left_int16x8 = cv::v_reinterpret_as_s16(tmp_left_uint16x8);
                    cv::v_expand(tmp_g_uint8x16, tmp_left_uint16x8, tmp_right_uint16x8);
                    g_left_int16x8 = cv::v_reinterpret_as_s16(tmp_left_uint16x8);
                    cv::v_expand(tmp_b_uint8x16, tmp_left_uint16x8, tmp_right_uint16x8);
                    b_left_int16x8 = cv::v_reinterpret_as_s16(tmp_left_uint16x8);

                    p_tmp = j + left_r < w ? p_rgb + r_x_3 : p_rgb_line + w___r_x_3;
                    //p_tmp = p_rgb + r_x_3;
                    cv::v_load_deinterleave(p_tmp, tmp_r_uint8x16, tmp_g_uint8x16, tmp_b_uint8x16);
                    cv::v_expand(tmp_r_uint8x16, tmp_left_uint16x8, tmp_right_uint16x8);
                    r_right_int16x8 = cv::v_reinterpret_as_s16(tmp_left_uint16x8);
                    cv::v_expand(tmp_g_uint8x16, tmp_left_uint16x8, tmp_right_uint16x8);
                    g_right_int16x8 = cv::v_reinterpret_as_s16(tmp_left_uint16x8);
                    cv::v_expand(tmp_b_uint8x16, tmp_left_uint16x8, tmp_right_uint16x8);
                    b_right_int16x8 = cv::v_reinterpret_as_s16(tmp_left_uint16x8);
                    v_top_depth_uint16x8 = cv::v_absdiff(r_center_int16x8, r_top_int16x8) +
                                           cv::v_absdiff(g_center_int16x8, g_top_int16x8) +
                                           cv::v_absdiff(b_center_int16x8, b_top_int16x8);
                    v_bottom_depth_uint16x8 = cv::v_absdiff(r_center_int16x8, r_bottom_int16x8) +
                                              cv::v_absdiff(g_center_int16x8, g_bottom_int16x8) +
                                              cv::v_absdiff(b_center_int16x8, b_bottom_int16x8);
                    v_left_depth_uint16x8 = cv::v_absdiff(r_center_int16x8, r_left_int16x8) +
                                            cv::v_absdiff(g_center_int16x8, g_left_int16x8) +
                                            cv::v_absdiff(b_center_int16x8, b_left_int16x8);
                    v_right_depth_uint16x8 = cv::v_absdiff(r_center_int16x8, r_right_int16x8) +
                                             cv::v_absdiff(g_center_int16x8, g_right_int16x8) +
                                             cv::v_absdiff(b_center_int16x8, b_right_int16x8);

                    v_top_depth_uint16x8 = v_top_depth_uint16x8 >> 2;
                    v_bottom_depth_uint16x8 = v_bottom_depth_uint16x8 >> 2;
                    v_left_depth_uint16x8 = v_left_depth_uint16x8 >> 2;
                    v_right_depth_uint16x8 = v_right_depth_uint16x8 >> 2;

                    v_mean_depth_uint16x8 = (v_top_depth_uint16x8
                                             + v_bottom_depth_uint16x8
                                             + v_left_depth_uint16x8
                                             + v_right_depth_uint16x8) >> 2;

                    cv::v_uint16x8 v_min_color_diff = cv::v_setall_u16(thr_);
                    v_mean_depth_uint16x8 = cv::v_min(v_mean_depth_uint16x8, v_min_color_diff);

                    top_flag = v_top_depth_uint16x8 <= v_mean_depth_uint16x8;
                    bottom_flag = v_bottom_depth_uint16x8 <= v_mean_depth_uint16x8;
                    left_flag = v_left_depth_uint16x8 <= v_mean_depth_uint16x8;
                    right_flag = v_right_depth_uint16x8 <= v_mean_depth_uint16x8;

                    ///////深度图
                    //v_center_depth_uint16x8 = cv::v_load_expand(p_i);

                    p_tmp = i >= r ? p_i - r_x_w : p_i_org + j;
                    v_top_depth_uint16x8 = cv::v_load_expand(p_tmp);

                    p_tmp = i < h - r ? p_i + r_x_w : p_i_org + h___1_x_w + j;
                    v_bottom_depth_uint16x8 = cv::v_load_expand(p_tmp);

                    p_tmp = j >= r ? p_i - r : p_i_line;
                    //p_tmp = p_i - r;
                    v_left_depth_uint16x8 = cv::v_load_expand(p_tmp);

                    p_tmp = j + left_r < w ? p_i + r : p_i_line + w___r;
                    //p_tmp = p_i + r;
                    v_right_depth_uint16x8 = cv::v_load_expand(p_tmp);

                    v_mean_depth_uint16x8 = v_top_depth_uint16x8 + v_bottom_depth_uint16x8 + v_left_depth_uint16x8 + v_right_depth_uint16x8;
                    v_mean_depth_uint16x8 = v_mean_depth_uint16x8 >> 2;


                    top_flag = top_flag & (v_top_depth_uint16x8 <= v_mean_depth_uint16x8);
                    bottom_flag = bottom_flag & (v_bottom_depth_uint16x8 <= v_mean_depth_uint16x8);
                    left_flag = left_flag & (v_left_depth_uint16x8 <= v_mean_depth_uint16x8);
                    right_flag = right_flag & (v_right_depth_uint16x8 <= v_mean_depth_uint16x8);


                    v_new_depth_uint16x8 = cv::v_select(top_flag, v_top_depth_uint16x8, v_zeros_uint16x8);
                    v_new_depth_uint16x8 += cv::v_select(bottom_flag, v_bottom_depth_uint16x8, v_zeros_uint16x8);
                    v_new_depth_uint16x8 += cv::v_select(left_flag, v_left_depth_uint16x8, v_zeros_uint16x8);
                    v_new_depth_uint16x8 += cv::v_select(right_flag, v_right_depth_uint16x8, v_zeros_uint16x8);

                    v_weight_uint16x8 = cv::v_select(top_flag, v_ones_uint16x8, v_zeros_uint16x8);
                    v_weight_uint16x8 += cv::v_select(bottom_flag, v_ones_uint16x8, v_zeros_uint16x8);
                    v_weight_uint16x8 += cv::v_select(left_flag, v_ones_uint16x8, v_zeros_uint16x8);
                    v_weight_uint16x8 += cv::v_select(right_flag, v_ones_uint16x8, v_zeros_uint16x8);

                    //T_T v_new_depth_uint16_arr8 没有除法

                    cv::v_store(v_new_depth_uint16_arr8, v_new_depth_uint16x8);
                    cv::v_store(weight_depth_uint16_arr8, v_weight_uint16x8);
#ifndef MIN_SUPPORT
#define MIN_SUPPORT 1
#endif
                    *p_o = (*weight_depth_uint16_arr8_pt0 >= MIN_SUPPORT) ? (*v_new_depth_uint16_arr8_pt0) / (*weight_depth_uint16_arr8_pt0) : *p_i;
                    *(p_o + 1) = (*weight_depth_uint16_arr8_pt1 >= MIN_SUPPORT) ? (*v_new_depth_uint16_arr8_pt1) / (*weight_depth_uint16_arr8_pt1) : *(p_i + 1);
                    *(p_o + 2) = (*weight_depth_uint16_arr8_pt2 >= MIN_SUPPORT) ? (*v_new_depth_uint16_arr8_pt2) / (*weight_depth_uint16_arr8_pt2) : *(p_i + 2);
                    *(p_o + 3) = (*weight_depth_uint16_arr8_pt3 >= MIN_SUPPORT) ? (*v_new_depth_uint16_arr8_pt3) / (*weight_depth_uint16_arr8_pt3) : *(p_i + 3);
                    *(p_o + 4) = (*weight_depth_uint16_arr8_pt4 >= MIN_SUPPORT) ? (*v_new_depth_uint16_arr8_pt4) / (*weight_depth_uint16_arr8_pt4) : *(p_i + 4);
                    *(p_o + 5) = (*weight_depth_uint16_arr8_pt5 >= MIN_SUPPORT) ? (*v_new_depth_uint16_arr8_pt5) / (*weight_depth_uint16_arr8_pt5) : *(p_i + 5);
                    *(p_o + 6) = (*weight_depth_uint16_arr8_pt6 >= MIN_SUPPORT) ? (*v_new_depth_uint16_arr8_pt6) / (*weight_depth_uint16_arr8_pt6) : *(p_i + 6);
                    *(p_o + 7) = (*weight_depth_uint16_arr8_pt7 >= MIN_SUPPORT) ? (*v_new_depth_uint16_arr8_pt7) / (*weight_depth_uint16_arr8_pt7) : *(p_i + 7);

                    p_rgb += 24;
                    p_o += 8;
                    p_i += 8;
                }
            }
        }
    }

    Erode_v3_fastest_class& operator=(const Erode_v3_fastest_class &) {
        return *this;
    };

private:
    cv::Mat o_;
    cv::Mat i_;
    cv::Mat rgb;
    int r, thread_num;
    int h, w;
    size_t i_step, rgb_step;
    size_t r_x_w, r_x_3w;
    size_t r_x_3, w___r_x_3, h___1_x_3w;
    size_t w___r, h___1_x_w;
    size_t left_r;
    unsigned char * p_o_org;
    unsigned char * p_i_org;
    unsigned char * p_rgb_org;
    uint8_t thr_;
};


void tcl_depth_video::Erode_v3_fastest(
        cv::Mat & o_,
        const cv::Mat & i_,
        const cv::Mat & rgb,
        int r,
        int threads_num,
        uint8_t threshold
) {
    if (i_.type() != CV_8UC1 ||
        rgb.type() != CV_8UC3 ||
        i_.size() != rgb.size()) {
        VIDEO_DEPTH_LOGE("[Erode_v3_fastest]");
        exit(-1);
    }
    if (o_.size() != i_.size() || o_.type() != i_.type()) {
        o_.create(i_.size(), i_.type());
    }
    int org_threads_num = cv::getNumThreads();
    cv::setNumThreads(threads_num);
    Erode_v3_fastest_class p_obj = Erode_v3_fastest_class(o_, i_, rgb, r, threads_num, threshold);
    cv::parallel_for_(cv::Range(0, i_.rows), p_obj);
    cv::setNumThreads(org_threads_num);
}



class tcl_depth_video::Dilate_v3_fastest_class : public cv::ParallelLoopBody
{
public:
    Dilate_v3_fastest_class(cv::Mat & o,
                            const cv::Mat &i,
                            const cv::Mat &rgb_,
                            int r_,
                            uint8_t thr_
    ) {
        o_ = o;
        i_ = i;
        rgb = rgb_;
        r = r_;
        this->thr_ = thr_;

        h = i_.size().height;
        w = i_.size().width;

        i_step = i.step[0];
        rgb_step = rgb_.step[0];

        left_r = std::max(r, 8);
        r_x_w = r * i.step[0], r_x_3w = r * rgb_.step[0];
        r_x_3 = r * 3, w___r_x_3 = (w - left_r) * 3, h___1_x_3w = (h - 1) * rgb_.step[0];
        w___r = (w - left_r), h___1_x_w = (h - 1) * i.step[0];

        p_o_org = o_.data;
        p_i_org = i_.data;
        p_rgb_org = rgb.data;
    }
    virtual void operator ()(const cv::Range& range) const
    {
        for (int i = range.start; i < range.end; ++i)
        {
            size_t step_i = i_.step[0];
            size_t step_rgb = rgb.step[0];

            //SIMD灰度图中心点
            cv::v_int16x8 r_center_int16x8;
            cv::v_int16x8 r_top_int16x8;
            cv::v_int16x8 r_bottom_int16x8;
            cv::v_int16x8 r_left_int16x8;
            cv::v_int16x8 r_right_int16x8;
            cv::v_int16x8 r_mean_int16x8;

            cv::v_int16x8 g_center_int16x8;
            cv::v_int16x8 g_top_int16x8;
            cv::v_int16x8 g_bottom_int16x8;
            cv::v_int16x8 g_left_int16x8;
            cv::v_int16x8 g_right_int16x8;
            cv::v_int16x8 g_mean_int16x8;

            cv::v_int16x8 b_center_int16x8;
            cv::v_int16x8 b_top_int16x8;
            cv::v_int16x8 b_bottom_int16x8;
            cv::v_int16x8 b_left_int16x8;
            cv::v_int16x8 b_right_int16x8;
            cv::v_int16x8 b_mean_int16x8;

            cv::v_uint16x8 v_center_depth_uint16x8, v_top_depth_uint16x8,
                    v_bottom_depth_uint16x8, v_left_depth_uint16x8,
                    v_right_depth_uint16x8, v_mean_depth_uint16x8,
                    v_ones_uint16x8, v_zeros_uint16x8, v_weight_uint16x8, v_new_depth_uint16x8;

            cv::v_uint16x8 top_flag, bottom_flag, left_flag, right_flag, tmp_flag;

            cv::v_uint16x8 tmp_left_uint16x8, tmp_right_uint16x8;
            cv::v_uint8x16 tmp_r_uint8x16, tmp_g_uint8x16, tmp_b_uint8x16;

            v_ones_uint16x8 = cv::v_setall_u16(1);
            v_zeros_uint16x8 = cv::v_setall_u16(0);


            unsigned char * p_o_line = p_o_org + i * i_step;
            unsigned char * p_i_line = p_i_org + i * i_step;
            unsigned char * p_rgb_line = p_rgb_org + i * rgb_step;
            unsigned char * p_o = p_o_line;
            unsigned char * p_i = p_i_line;
            unsigned char * p_rgb = p_rgb_line;
            unsigned char * p_tmp;

            ushort v_new_depth_uint16_arr8[8], weight_depth_uint16_arr8[8];
            ushort *v_new_depth_uint16_arr8_pt0 = &(v_new_depth_uint16_arr8[0]);
            ushort *v_new_depth_uint16_arr8_pt1 = &(v_new_depth_uint16_arr8[1]);
            ushort *v_new_depth_uint16_arr8_pt2 = &(v_new_depth_uint16_arr8[2]);
            ushort *v_new_depth_uint16_arr8_pt3 = &(v_new_depth_uint16_arr8[3]);
            ushort *v_new_depth_uint16_arr8_pt4 = &(v_new_depth_uint16_arr8[4]);
            ushort *v_new_depth_uint16_arr8_pt5 = &(v_new_depth_uint16_arr8[5]);
            ushort *v_new_depth_uint16_arr8_pt6 = &(v_new_depth_uint16_arr8[6]);
            ushort *v_new_depth_uint16_arr8_pt7 = &(v_new_depth_uint16_arr8[7]);

            ushort *weight_depth_uint16_arr8_pt0 = &(weight_depth_uint16_arr8[0]);
            ushort *weight_depth_uint16_arr8_pt1 = &(weight_depth_uint16_arr8[1]);
            ushort *weight_depth_uint16_arr8_pt2 = &(weight_depth_uint16_arr8[2]);
            ushort *weight_depth_uint16_arr8_pt3 = &(weight_depth_uint16_arr8[3]);
            ushort *weight_depth_uint16_arr8_pt4 = &(weight_depth_uint16_arr8[4]);
            ushort *weight_depth_uint16_arr8_pt5 = &(weight_depth_uint16_arr8[5]);
            ushort *weight_depth_uint16_arr8_pt6 = &(weight_depth_uint16_arr8[6]);
            ushort *weight_depth_uint16_arr8_pt7 = &(weight_depth_uint16_arr8[7]);

            for (int j = 0; j < w; j += 8) {
                ///////////r通道
                cv::v_load_deinterleave(p_rgb, tmp_r_uint8x16, tmp_g_uint8x16, tmp_b_uint8x16);
                cv::v_expand(tmp_r_uint8x16, tmp_left_uint16x8, tmp_right_uint16x8);
                r_center_int16x8 = cv::v_reinterpret_as_s16(tmp_left_uint16x8);
                cv::v_expand(tmp_g_uint8x16, tmp_left_uint16x8, tmp_right_uint16x8);
                g_center_int16x8 = cv::v_reinterpret_as_s16(tmp_left_uint16x8);
                cv::v_expand(tmp_b_uint8x16, tmp_left_uint16x8, tmp_right_uint16x8);
                b_center_int16x8 = cv::v_reinterpret_as_s16(tmp_left_uint16x8);

                p_tmp = i >= r ? p_rgb - r_x_3w : p_rgb_org + (p_rgb - p_rgb_line);
                cv::v_load_deinterleave(p_tmp, tmp_r_uint8x16, tmp_g_uint8x16, tmp_b_uint8x16);
                cv::v_expand(tmp_r_uint8x16, tmp_left_uint16x8, tmp_right_uint16x8);
                r_top_int16x8 = cv::v_reinterpret_as_s16(tmp_left_uint16x8);
                cv::v_expand(tmp_g_uint8x16, tmp_left_uint16x8, tmp_right_uint16x8);
                g_top_int16x8 = cv::v_reinterpret_as_s16(tmp_left_uint16x8);
                cv::v_expand(tmp_b_uint8x16, tmp_left_uint16x8, tmp_right_uint16x8);
                b_top_int16x8 = cv::v_reinterpret_as_s16(tmp_left_uint16x8);

                p_tmp = i < h - r ? p_rgb + r_x_3w : p_rgb_org + h___1_x_3w + (p_rgb - p_rgb_line);
                cv::v_load_deinterleave(p_tmp, tmp_r_uint8x16, tmp_g_uint8x16, tmp_b_uint8x16);
                cv::v_expand(tmp_r_uint8x16, tmp_left_uint16x8, tmp_right_uint16x8);
                r_bottom_int16x8 = cv::v_reinterpret_as_s16(tmp_left_uint16x8);
                cv::v_expand(tmp_g_uint8x16, tmp_left_uint16x8, tmp_right_uint16x8);
                g_bottom_int16x8 = cv::v_reinterpret_as_s16(tmp_left_uint16x8);
                cv::v_expand(tmp_b_uint8x16, tmp_left_uint16x8, tmp_right_uint16x8);
                b_bottom_int16x8 = cv::v_reinterpret_as_s16(tmp_left_uint16x8);

                p_tmp = j >= r ? p_rgb - r_x_3 : p_rgb_line;
                //p_tmp = p_rgb - r_x_3;
                cv::v_load_deinterleave(p_tmp, tmp_r_uint8x16, tmp_g_uint8x16, tmp_b_uint8x16);
                cv::v_expand(tmp_r_uint8x16, tmp_left_uint16x8, tmp_right_uint16x8);
                r_left_int16x8 = cv::v_reinterpret_as_s16(tmp_left_uint16x8);
                cv::v_expand(tmp_g_uint8x16, tmp_left_uint16x8, tmp_right_uint16x8);
                g_left_int16x8 = cv::v_reinterpret_as_s16(tmp_left_uint16x8);
                cv::v_expand(tmp_b_uint8x16, tmp_left_uint16x8, tmp_right_uint16x8);
                b_left_int16x8 = cv::v_reinterpret_as_s16(tmp_left_uint16x8);

                p_tmp = j + left_r < w ? p_rgb + r_x_3 : p_rgb_line + w___r_x_3;
                //p_tmp = p_rgb + r_x_3;
                cv::v_load_deinterleave(p_tmp, tmp_r_uint8x16, tmp_g_uint8x16, tmp_b_uint8x16);
                cv::v_expand(tmp_r_uint8x16, tmp_left_uint16x8, tmp_right_uint16x8);
                r_right_int16x8 = cv::v_reinterpret_as_s16(tmp_left_uint16x8);
                cv::v_expand(tmp_g_uint8x16, tmp_left_uint16x8, tmp_right_uint16x8);
                g_right_int16x8 = cv::v_reinterpret_as_s16(tmp_left_uint16x8);
                cv::v_expand(tmp_b_uint8x16, tmp_left_uint16x8, tmp_right_uint16x8);
                b_right_int16x8 = cv::v_reinterpret_as_s16(tmp_left_uint16x8);
                v_top_depth_uint16x8 = cv::v_absdiff(r_center_int16x8, r_top_int16x8) +
                                       cv::v_absdiff(g_center_int16x8, g_top_int16x8) +
                                       cv::v_absdiff(b_center_int16x8, b_top_int16x8);
                v_bottom_depth_uint16x8 = cv::v_absdiff(r_center_int16x8, r_bottom_int16x8) +
                                          cv::v_absdiff(g_center_int16x8, g_bottom_int16x8) +
                                          cv::v_absdiff(b_center_int16x8, b_bottom_int16x8);
                v_left_depth_uint16x8 = cv::v_absdiff(r_center_int16x8, r_left_int16x8) +
                                        cv::v_absdiff(g_center_int16x8, g_left_int16x8) +
                                        cv::v_absdiff(b_center_int16x8, b_left_int16x8);
                v_right_depth_uint16x8 = cv::v_absdiff(r_center_int16x8, r_right_int16x8) +
                                         cv::v_absdiff(g_center_int16x8, g_right_int16x8) +
                                         cv::v_absdiff(b_center_int16x8, b_right_int16x8);

                v_top_depth_uint16x8 = v_top_depth_uint16x8 >> 2;
                v_bottom_depth_uint16x8 = v_bottom_depth_uint16x8 >> 2;
                v_left_depth_uint16x8 = v_left_depth_uint16x8 >> 2;
                v_right_depth_uint16x8 = v_right_depth_uint16x8 >> 2;

                v_mean_depth_uint16x8 = (v_top_depth_uint16x8
                                         + v_bottom_depth_uint16x8
                                         + v_left_depth_uint16x8
                                         + v_right_depth_uint16x8) >> 2;

                cv::v_uint16x8 v_min_color_diff = cv::v_setall_u16(thr_);
                v_mean_depth_uint16x8 = cv::v_min(v_mean_depth_uint16x8, v_min_color_diff);

                top_flag = v_top_depth_uint16x8 <= v_mean_depth_uint16x8;
                bottom_flag = v_bottom_depth_uint16x8 <= v_mean_depth_uint16x8;
                left_flag = v_left_depth_uint16x8 <= v_mean_depth_uint16x8;
                right_flag = v_right_depth_uint16x8 <= v_mean_depth_uint16x8;



                ///////深度图
                //v_center_depth_uint16x8 = cv::v_load_expand(p_i);

                p_tmp = i >= r ? p_i - r_x_w : p_i_org + j;
                v_top_depth_uint16x8 = cv::v_load_expand(p_tmp);

                p_tmp = i < h - r ? p_i + r_x_w : p_i_org + h___1_x_w + j;
                v_bottom_depth_uint16x8 = cv::v_load_expand(p_tmp);

                p_tmp = j >= r ? p_i - r : p_i_line;
                //p_tmp = p_i - r;
                v_left_depth_uint16x8 = cv::v_load_expand(p_tmp);

                p_tmp = j + left_r < w ? p_i + r : p_i_line + w___r;
                //p_tmp = p_i + r;
                v_right_depth_uint16x8 = cv::v_load_expand(p_tmp);

                v_mean_depth_uint16x8 = v_top_depth_uint16x8 + v_bottom_depth_uint16x8 + v_left_depth_uint16x8 + v_right_depth_uint16x8;
                v_mean_depth_uint16x8 = v_mean_depth_uint16x8 >> 2;


                top_flag = top_flag & (v_top_depth_uint16x8 >= v_mean_depth_uint16x8);
                bottom_flag = bottom_flag & (v_bottom_depth_uint16x8 >= v_mean_depth_uint16x8);
                left_flag = left_flag & (v_left_depth_uint16x8 >= v_mean_depth_uint16x8);
                right_flag = right_flag & (v_right_depth_uint16x8 >= v_mean_depth_uint16x8);


                v_new_depth_uint16x8 = cv::v_select(top_flag, v_top_depth_uint16x8, v_zeros_uint16x8);
                v_new_depth_uint16x8 += cv::v_select(bottom_flag, v_bottom_depth_uint16x8, v_zeros_uint16x8);
                v_new_depth_uint16x8 += cv::v_select(left_flag, v_left_depth_uint16x8, v_zeros_uint16x8);
                v_new_depth_uint16x8 += cv::v_select(right_flag, v_right_depth_uint16x8, v_zeros_uint16x8);

                v_weight_uint16x8 = cv::v_select(top_flag, v_ones_uint16x8, v_zeros_uint16x8);
                v_weight_uint16x8 += cv::v_select(bottom_flag, v_ones_uint16x8, v_zeros_uint16x8);
                v_weight_uint16x8 += cv::v_select(left_flag, v_ones_uint16x8, v_zeros_uint16x8);
                v_weight_uint16x8 += cv::v_select(right_flag, v_ones_uint16x8, v_zeros_uint16x8);

                //T_T v_new_depth_uint16_arr8 没有除法

                cv::v_store(v_new_depth_uint16_arr8, v_new_depth_uint16x8);
                cv::v_store(weight_depth_uint16_arr8, v_weight_uint16x8);
#ifndef MIN_SUPPORT
#define MIN_SUPPORT 1
#endif
                *p_o = (*weight_depth_uint16_arr8_pt0 >= MIN_SUPPORT) ? (*v_new_depth_uint16_arr8_pt0) / (*weight_depth_uint16_arr8_pt0) : *p_i;
                *(p_o + 1) = (*weight_depth_uint16_arr8_pt1 >= MIN_SUPPORT) ? (*v_new_depth_uint16_arr8_pt1) / (*weight_depth_uint16_arr8_pt1) : *(p_i + 1);
                *(p_o + 2) = (*weight_depth_uint16_arr8_pt2 >= MIN_SUPPORT) ? (*v_new_depth_uint16_arr8_pt2) / (*weight_depth_uint16_arr8_pt2) : *(p_i + 2);
                *(p_o + 3) = (*weight_depth_uint16_arr8_pt3 >= MIN_SUPPORT) ? (*v_new_depth_uint16_arr8_pt3) / (*weight_depth_uint16_arr8_pt3) : *(p_i + 3);
                *(p_o + 4) = (*weight_depth_uint16_arr8_pt4 >= MIN_SUPPORT) ? (*v_new_depth_uint16_arr8_pt4) / (*weight_depth_uint16_arr8_pt4) : *(p_i + 4);
                *(p_o + 5) = (*weight_depth_uint16_arr8_pt5 >= MIN_SUPPORT) ? (*v_new_depth_uint16_arr8_pt5) / (*weight_depth_uint16_arr8_pt5) : *(p_i + 5);
                *(p_o + 6) = (*weight_depth_uint16_arr8_pt6 >= MIN_SUPPORT) ? (*v_new_depth_uint16_arr8_pt6) / (*weight_depth_uint16_arr8_pt6) : *(p_i + 6);
                *(p_o + 7) = (*weight_depth_uint16_arr8_pt7 >= MIN_SUPPORT) ? (*v_new_depth_uint16_arr8_pt7) / (*weight_depth_uint16_arr8_pt7) : *(p_i + 7);

                p_rgb += 24;
                p_o += 8;
                p_i += 8;
            }
        }
    }

    Dilate_v3_fastest_class& operator=(const Dilate_v3_fastest_class &) {
        return *this;
    };

private:
    cv::Mat o_;
    cv::Mat i_;
    cv::Mat rgb;
    int r;
    int h, w;
    size_t i_step, rgb_step;
    size_t r_x_w, r_x_3w;
    size_t r_x_3, w___r_x_3, h___1_x_3w;
    size_t w___r, h___1_x_w;
    size_t left_r;
    unsigned char * p_o_org;
    unsigned char * p_i_org;
    unsigned char * p_rgb_org;
    uint8_t thr_;
};


void tcl_depth_video::Dilate_v3_fastest(
        cv::Mat & o_,
        const cv::Mat & i_,
        const cv::Mat & rgb,
        int r,
        int threads_num,
        uint8_t threshold
) {
    if (i_.type() != CV_8UC1 ||
        rgb.type() != CV_8UC3 ||
        i_.size() != rgb.size()) {
        VIDEO_DEPTH_LOGE("[Dilate_v3_fastest]");
        exit(-1);
    }
    if (o_.size() != i_.size() || o_.type() != i_.type()) {
        o_.create(i_.size(), i_.type());
    }
    int org_threads_num = cv::getNumThreads();
    cv::setNumThreads(threads_num);
    Dilate_v3_fastest_class p_obj = Dilate_v3_fastest_class(o_, i_, rgb, r, threshold);
    cv::parallel_for_(cv::Range(0, i_.rows), p_obj);
    cv::setNumThreads(org_threads_num);
}


void tcl_depth_video::FillBlackBorder(cv::Mat & disp_pri, int std_max_disp, int win_size) {
    if (disp_pri.type() != CV_8UC1) {
        VIDEO_DEPTH_LOGE("[FillBlackBorder]: illegal inputs");
        exit(-1);
    }
    int w = disp_pri.size().width;
    int h = disp_pri.size().height;
    unsigned char * data = disp_pri.data;
    int blank_width = std_max_disp + win_size / 2;

    int idx = blank_width;
    int stride = 0;

    for (int i = 0; i < h; ++i) {
        for (int j = 0; j < blank_width; ++j) {
            data[stride + j] = data[idx];
        }
        stride += w;
        idx += w;
    }
}


void tcl_depth_video::Histogram(cv::Mat & hist, const cv::Mat & depth, int hist_size){
    float range[] = {0, 256};
    const float * histRanges = { range };
    cv::calcHist(&depth, 1, 0, cv::Mat(), hist, 1,
                 &hist_size, &histRanges, true, false);
    hist = hist / (depth.size().width * depth.size().height);
}


void tcl_depth_video::VisualizeHistogram(cv::String title, cv::Mat & hist){
    // visualize the histogram
    int hist_h = 400;
    int hist_w = 512;
    int hist_size = hist.size().height;
    int bin_w = hist_w / hist_size;
    cv::Mat hist_image(hist_w, hist_h, CV_8UC3, cv::Scalar(0, 0, 0));
    cv::Mat hist_norm;
    cv::normalize(hist, hist_norm, 0, hist_h,
                  cv::NORM_MINMAX, -1, cv::Mat());

    for (int i = 1; i < hist_size; i++) {
        cv::line(
                hist_image,
                cv::Point((i - 1)*bin_w,
                          hist_h - round(hist_norm.at<float>(i - 1))),
                cv::Point(i*bin_w,
                          hist_h - round(hist_norm.at<float>(i))),
                cv::Scalar(255, 0, 0), 2, cv::LINE_AA);
    }
}


void tcl_depth_video::StaticStereoRectify(
        const cv::Mat & M1,
        const cv::Mat & D1,
        const cv::Mat & M2,
        const cv::Mat & D2,
        const cv::Mat & R,
        const cv::Mat & T,
        const cv::Size & size,
        cv::Mat & map11,
        cv::Mat & map12,
        cv::Mat & map21,
        cv::Mat & map22,
        cv::Mat & imap1,
        cv::Mat & imap2,
        cv::Mat & Q
){
    // code to put here comes from Calibration Team,
    // required to meet Func API as follow:
    // StaticStereoRectify(M1, D1, M2, D2, R, T, size, map11, map12, map21, map22, imap1, imap2, Q);
    // {M1, D1, M2, D2 , R, T, size} are inputs;
    // map11-map22 are output forward maps;
    // imap1-imap2 are output backward maps for main camera only.
    // Q is the matrix to transform disparity into physical distance.

    // normalize both views
    cv::Mat R1, P1, R2, P2;
    cv::stereoRectify(M1, D1, M2, D2, size, R, T,
                      R1, R2, P1, P2, Q, CALIB_ZERO_DISPARITY, 1, size);
    cv::initUndistortRectifyMap(M1, D1, R1, P1, size, CV_32F, map11, map12);
    cv::initUndistortRectifyMap(M2, D2, R2, P2, size, CV_32F, map21, map22);

    // inverse map for the right view only
    imap1 = Mat::zeros(size, CV_32FC1);
    imap2 = Mat::zeros(size, CV_32FC1);

    std::vector<cv::Point2f> pts, pts_dst;

    pts.resize(size.width * size.height);
    pts_dst.resize(size.width * size.height);

    for (int i = 0; i < size.height; i++) {
        for (int j = 0; j < size.width; j++) {
            pts[i * size.width + j].x = j;
            pts[i * size.width + j].y = i;
        }
    }

#pragma omp parallel for num_threads(4)
    for (int idx = 0; idx < 4; idx++) {
        std::vector<cv::Point2f> pts_temp(
                pts.begin() + idx * size.width * size.height / 4,
                pts.begin() + (idx + 1) * size.width * size.height / 4);
        std::vector<cv::Point2f> pts_temp2(
                pts_dst.begin() + idx * size.width * size.height / 4,
                pts_dst.begin() + (idx + 1) * size.width * size.height / 4);
        cv::undistortPoints(
                pts_temp, pts_temp2, M2, D2, R2, P2);
        std::copy(
                pts_temp2.begin(),
                pts_temp2.end(),
                pts_dst.begin() +
                idx * size.width * size.height / 4);
    }

    for (int i = 0; i < size.height; i++) {
        for (int j = 0; j < size.width; j++) {
            imap1.at<float>(i, j) = pts_dst[i * size.width + j].x;
            imap2.at<float>(i, j) = pts_dst[i * size.width + j].y;
        }
    }
    pts.erase(pts.begin(), pts.end());
    pts_dst.erase(pts_dst.begin(), pts_dst.end());
}


int tcl_depth_video::RowAlign_grid_tws(
        cv::Mat & im2_aligned, // aligned aux
        const cv::Mat & im1, // main
        const cv::Mat & im2, // aux
        int num_feat,
        int max_iter,
        float res_level,
        int num_worst,
        cv::Mat & map_x,
        cv::Mat & map_y,
        bool update_map,
        ThreadState * thread_state,
        bool & success_flag,
        float & real_max_disp,
        std::vector<float> & disp_spa,
        std::vector<cv::Point2f> & coord_spa
){
    if (*thread_state!=THREAD_STATE_PLAY) return 0;
    double start, stop, freq = cv::getTickFrequency();
    if (update_map || map_x.empty() || map_y.empty()) {
        cv::Mat M;
        float fx, fy, ppx, ppy;
        ppx = im1.size().width / 2.0;
        ppy = im1.size().height / 2.0;
        fx = 1;
        fy = 1;
        M.create(3, 3, CV_32F);
        M.at<float>(0, 0) = fx;
        M.at<float>(0, 1) = 0;
        M.at<float>(0, 2) = ppx;
        M.at<float>(1, 0) = 0;
        M.at<float>(1, 1) = fy;
        M.at<float>(1, 2) = ppy;
        M.at<float>(2, 0) = 0;
        M.at<float>(2, 1) = 0;
        M.at<float>(2, 2) = 1;

        cv::Mat tracker_im1, tracker_im2;
        cv::Mat mask;
        int total;
        int num_good;
        std::vector<cv::Point2f> pts1_good, pts2_good;
        cv::Mat map11, map12, map21, map22;
        int i, iter_;
        int ransac_used;
        float min_[2], max_[2], miu_[3], sigma_[3];
        float sum_[3], sum_of_square[3];
        int nSigma = 3;

        start = TICK();
        if (*thread_state!=THREAD_STATE_PLAY) return 0;
        cv::Ptr<cv::CLAHE> clahe = cv::createCLAHE(3.0, cv::Size(4, 4));
        cv::Mat left_gray, right_gray;
        if (*thread_state!=THREAD_STATE_PLAY) return 0;
        clahe->apply(im1, right_gray);
        if (*thread_state!=THREAD_STATE_PLAY) return 0;
        clahe->apply(im2, left_gray);

        stop = TICK();
        VIDEO_DEPTH_LOGI("RowAlign_grid_tws->CLAHE: %6.3fms", 1000*(stop-start)/freq);

        std::vector<cv::Point2f> left_pts;
        std::vector<cv::Point2f> right_pts;
        start = TICK();
        int w_ = im1.cols, h_ = im1.rows;
        if (*thread_state!=THREAD_STATE_PLAY) return 0;
        for (int y = 20; y < h_; y += 20) {
            for (int x = 20; x < w_; x += 20) {
                right_pts.push_back(cv::Point2f(x, y));
            }
        }
        stop = TICK();
        VIDEO_DEPTH_LOGI("RowAlign_grid_tws->sample points: %6.3fms", 1000*(stop-start)/freq);
        VIDEO_DEPTH_LOGI("RowAlign_grid_tws->points: %6lu", right_pts.size());

        std::vector<uchar> status;
        std::vector<float> err;
        start = TICK();
        if (*thread_state!=THREAD_STATE_PLAY) return 0;
        cv::calcOpticalFlowPyrLK(right_gray, left_gray,
                                 right_pts, left_pts, status, err,
                                 cv::Size(11, 11), 3);
        stop = TICK();
        VIDEO_DEPTH_LOGI("RowAlign_grid_tws->calcOpticalFlowPyrLK: %6.3fms", 1000*(stop-start)/freq);
        VIDEO_DEPTH_LOGI("RowAlign_grid_tws->points: %6lu", right_pts.size());

        if (left_pts.size() < 10 || right_pts.size() < 10) {
            success_flag = false;
            return 0;
        }

        start = TICK();
        std::vector<cv::Point2f> leftPoints, rightPoints;
        int size = right_pts.size();
        if (*thread_state!=THREAD_STATE_PLAY) return 0;
        GetRightPoint(right_pts, left_pts, rightPoints, leftPoints, status);

        if (rightPoints.size() < num_worst + 10) {
            success_flag = false;
            return 0; // failed with auto alignment
        }
        stop = TICK();
        VIDEO_DEPTH_LOGI("RowAlign_grid_tws->GetRightPoint: %6.3fms", 1000*(stop-start)/freq);
        VIDEO_DEPTH_LOGI("RowAlign_grid_tws->points: %6lu", rightPoints.size());

        // do some analysis to these selected points
        if (*thread_state!=THREAD_STATE_PLAY) return 0;
        start = TICK();
        min_[0] = min_[1] = 1e5;
        max_[0] = max_[1] = -1e5;
        sum_[0] = sum_[1] = sum_[2] = 0;
        sum_of_square[0] = sum_of_square[1] = sum_of_square[2] = 0;

        float dx, dy;
        if (*thread_state!=THREAD_STATE_PLAY) return 0;
        for (i = 0; i < rightPoints.size(); ++i) {
            dx = leftPoints[i].x - rightPoints[i].x;
            dy = leftPoints[i].y - rightPoints[i].y;
            sum_[0] += dx;
            sum_[1] += dy;
            float theta;
            if (dx==0){
                if(dy>0) theta = 3.14159/2;
                else if (dy<0) theta = -3.14159/2;
                else theta = 0;
            } else theta = atan(dy / dx);
            sum_[2] += theta;

            if (dx < min_[0]) min_[0] = dx;
            if (dx > max_[0]) max_[0] = dx;
            if (dy < min_[1]) min_[1] = dy;
            if (dy > max_[1]) max_[1] = dy;
        }
        miu_[0] = sum_[0] / rightPoints.size();
        miu_[1] = sum_[1] / rightPoints.size();
        miu_[2] = sum_[2] / rightPoints.size();
        if (*thread_state!=THREAD_STATE_PLAY) return 0;
        for (i = 0; i < rightPoints.size(); ++i) {
            dx = leftPoints[i].x - rightPoints[i].x;
            dy = leftPoints[i].y - rightPoints[i].y;
            sum_of_square[0] += (dx - miu_[0]) * (dx - miu_[0]);
            sum_of_square[1] += (dy - miu_[1]) * (dy - miu_[1]);
            float theta;
            if (dx==0){
                if(dy>0) theta = 3.14159/2;
                else if (dy<0) theta = -3.14159/2;
                else theta = 0;
            } else theta = atan(dy / dx);
            sum_of_square[2] += (theta - miu_[2]) * (theta - miu_[2]);
        }
        sigma_[0] = sqrt(sum_of_square[0] / (rightPoints.size() - 1));
        sigma_[1] = sqrt(sum_of_square[1] / (rightPoints.size() - 1));
        sigma_[2] = sqrt(sum_of_square[2] / (rightPoints.size() - 1));
        if (*thread_state!=THREAD_STATE_PLAY) return 0;
        pts1_good.resize(rightPoints.size());
        pts2_good.resize(leftPoints.size());
        if (*thread_state!=THREAD_STATE_PLAY) return 0;
        // filter out the point pairs using some constraints like 3-sigma principle
        for (i = 0, num_good = 0; i < rightPoints.size(); ++i) {
            dx = leftPoints[i].x - rightPoints[i].x;
            dy = leftPoints[i].y - rightPoints[i].y;
            float theta;
            if (dx==0){
                if(dy>0) theta = 3.14159/2;
                else if (dy<0) theta = -3.14159/2;
                else theta = 0;
            } else theta = atan(dy / dx);
            if (abs(dx - miu_[0]) < nSigma * sigma_[0] &&
                abs(dy - miu_[1]) < nSigma * sigma_[1] &&
                abs(theta - miu_[2]) < nSigma * sigma_[2]
                ){
                pts1_good[num_good] = rightPoints[i];
                pts2_good[num_good] = leftPoints[i];
                ++num_good;
            }
        }
        if (*thread_state!=THREAD_STATE_PLAY) return 0;
        pts1_good.resize(num_good);
        pts2_good.resize(num_good);
        if (*thread_state!=THREAD_STATE_PLAY) return 0;
        std::swap(rightPoints, pts1_good);
        std::swap(leftPoints, pts2_good);
        stop = TICK();
        VIDEO_DEPTH_LOGI("RowAlign_grid_tws->3-sigma: %6.3fms", 1000*(stop-start)/freq);
        VIDEO_DEPTH_LOGI("RowAlign_grid_tws->points: %6lu", rightPoints.size());

        // solve row-aligning transform matrix with dynamic training samples
        if (*thread_state!=THREAD_STATE_PLAY) return 0;
        start = TICK();
        int j;
        float u, v, u1, v1;
        float a21, a22, a23, a31, a32;
        int cntr;
        float max_res[3], avg_res, this_res;
        std::vector<unsigned char> mask_accepted;
        int worst_pairs[3], total_pairs;
        Mat mat_x, vec_y, vec_beta, vec_betaT, mat_xT, mat_xTx, mat_xTx_inv, vec_res;
        if (*thread_state!=THREAD_STATE_PLAY) return 0;
        mat_x.create(rightPoints.size(), 5, CV_32F);
        vec_y.create(rightPoints.size(), 1, CV_32F);
        vec_beta.create(5, 1, CV_32F);
        float *ptr_x = (float *) mat_x.data;
        float *ptr_y = (float *) vec_y.data;

        if (*thread_state!=THREAD_STATE_PLAY) return 0;
        mask_accepted.resize(rightPoints.size());
        ransac_used = rightPoints.size();
        avg_res = 1e5;

        if (ransac_used < num_worst + 10) {
            success_flag = false;
            return 0; // failed with auto alignment
        }
        if (*thread_state!=THREAD_STATE_PLAY) return 0;
        while (avg_res > res_level) {
            if (*thread_state!=THREAD_STATE_PLAY) return 0;
            mat_x.resize(ransac_used);
            vec_y.resize(ransac_used);
            if (*thread_state!=THREAD_STATE_PLAY) return 0;
            for (i = 0; i < ransac_used; ++i) {
                v1 = (rightPoints[i].y - ppy) / fy;
                u = (leftPoints[i].x - ppx) / fx;
                v = (leftPoints[i].y - ppy) / fy;
                ptr_x[i * 5] = u;
                ptr_x[i * 5 + 1] = v;
                ptr_x[i * 5 + 2] = 1;
                ptr_x[i * 5 + 3] = -u * v1;
                ptr_x[i * 5 + 4] = -v * v1;
                ptr_y[i] = v1;
            }
            if (*thread_state!=THREAD_STATE_PLAY) return 0;
            cv::transpose(mat_x, mat_xT);
            if (*thread_state!=THREAD_STATE_PLAY) return 0;
            mat_xTx = mat_xT * mat_x;
            if (*thread_state!=THREAD_STATE_PLAY) return 0;
            if (cv::invert(mat_xTx, mat_xTx_inv) == 0) {
                // this approach failed with bad selecting point pairs
                success_flag = false;
                return 0; // failed with auto alignment
            }
            if (*thread_state!=THREAD_STATE_PLAY) return 0;
            vec_beta = mat_xT * vec_y;
            vec_beta = mat_xTx_inv * vec_beta;
            if (*thread_state!=THREAD_STATE_PLAY) return 0;
            // apply the solved model to all samples to find out the worst N cases
            vec_res = mat_x * vec_beta;
            vec_res = cv::abs(vec_res - vec_y);
            avg_res = 0;
            max_res[0] = 0;
            cntr = 0;
            if (*thread_state!=THREAD_STATE_PLAY) return 0;
            for (i = 0; i < ransac_used; ++i) {
                this_res = vec_res.at<float>(i);
                avg_res += this_res;
                if (this_res >= max_res[0]) {
                    for (j = num_worst - 1; j > 0; --j) {
                        max_res[j] = max_res[j - 1];
                        worst_pairs[j] = worst_pairs[j - 1];
                    }
                    max_res[0] = this_res;
                    worst_pairs[0] = i;
                    ++cntr;
                }
            }
            if (*thread_state!=THREAD_STATE_PLAY) return 0;
            // update average residual error for this round
            avg_res = avg_res / ransac_used;
            // filter out these worst N cases
            for (i = 0; i < ransac_used; ++i) {
                mask_accepted[i] = 1;
            }
            cntr = cntr < num_worst ? cntr : num_worst;
            if (*thread_state!=THREAD_STATE_PLAY) return 0;
            for (i = 0; i < cntr; ++i) {
                mask_accepted[worst_pairs[i]] = 0;
            }
            total_pairs = ransac_used;
            ransac_used = 0;
            if (*thread_state!=THREAD_STATE_PLAY) return 0;
            for (i = 0; i < total_pairs; ++i) {
                if (mask_accepted[i]) {
                    pts1_good[ransac_used] = rightPoints[i];
                    pts2_good[ransac_used] = leftPoints[i];
                    ++ransac_used;
                }
            }
            if (*thread_state!=THREAD_STATE_PLAY) return 0;
            pts1_good.resize(ransac_used);
            pts2_good.resize(ransac_used);
            if (*thread_state!=THREAD_STATE_PLAY) return 0;
            std::swap(rightPoints, pts1_good);
            std::swap(leftPoints, pts2_good);
            // check if the points are enough
            if (ransac_used < 10) {
                success_flag = false;
                return 0; // faild with no enough points
            }
        }
        a21 = vec_beta.at<float>(0);
        a22 = vec_beta.at<float>(1);
        a23 = vec_beta.at<float>(2);
        a31 = vec_beta.at<float>(3);
        a32 = vec_beta.at<float>(4);
        cv::Mat mat_row_align;
        if (*thread_state!=THREAD_STATE_PLAY) return 0;
        mat_row_align.create(3, 3, CV_32F);
        mat_row_align.at<float>(0, 0) = 1;
        mat_row_align.at<float>(0, 1) = 0;
        mat_row_align.at<float>(0, 2) = 0;
        mat_row_align.at<float>(1, 0) = a21;
        mat_row_align.at<float>(1, 1) = a22;
        mat_row_align.at<float>(1, 2) = a23;
        mat_row_align.at<float>(2, 0) = a31;
        mat_row_align.at<float>(2, 1) = a32;
        mat_row_align.at<float>(2, 2) = 1;

        // Use Shear Transformation to restore the image in good shape
        // find the center points on 4 borders transformed using row-aligning matrix
        cv::Point2f centers[4];
        float a11, a12, a13;
        float h, w;
        h = im2.size().height;
        w = im2.size().width;
        centers[0].x = (w - 1) / 2.0;
        centers[0].y = 0;
        centers[1].x = w - 1;
        centers[1].y = (h - 1) / 2.0;
        centers[2].x = (w - 1) / 2.0;
        centers[2].y = h - 1;
        centers[3].x = 0;
        centers[3].y = (h - 1) / 2.0;

        if (*thread_state!=THREAD_STATE_PLAY) return 0;
        // apply the row-align transform
        for (i = 0; i < 4; ++i) {
            u = (centers[i].x - ppx) / fx;
            v = (centers[i].y - ppy) / fy;
            centers[i].x = (u / (a31 * u + a32 * v + 1)) * fx + ppx;
            centers[i].y = ((a21 * u + a22 * v + a23) / (a31 * u + a32 * v + 1)) * fy + ppy;
        }

        cv::Point2f u_, v_;
        u_ = centers[1] - centers[3];
        v_ = centers[0] - centers[2];

        a11 = (h * h * u_.y * u_.y + w * w * v_.y * v_.y) / (h * w * (u_.y * v_.x - u_.x * v_.y));
        a12 = (h * h * u_.x * u_.y + w * w * v_.x * v_.y) / (h * w * (u_.x * v_.y - u_.y * v_.x));

        // update all the transform the points of right view
        float min_disp = 1e5;
        real_max_disp = -1e5;
        if (*thread_state!=THREAD_STATE_PLAY) return 0;
        disp_spa.resize(ransac_used);
        for (i = 0; i < ransac_used; ++i) {
            u = (leftPoints[i].x - ppx) / fx;
            v = (leftPoints[i].y - ppy) / fy;
            u1 = (rightPoints[i].x - ppx) / fx;

            u = u / (a31 * u + a32 * v + 1);
            v = (a21 * u + a22 * v + a23) / (a31 * u + a32 * v + 1);
            u = a11 * u + a12 * v;

            if (u - u1 < min_disp) min_disp = u - u1;
            if (u - u1 > real_max_disp) real_max_disp = u - u1;
            disp_spa[i] = u - u1;
        }
        min_disp = fmin(0, min_disp);
        a13 = -min_disp + 1;
        real_max_disp += a13;

        // setup sparse disparities
        coord_spa.resize(ransac_used);
        for (i = 0; i < ransac_used; ++i) {
            coord_spa[i].x = rightPoints[i].x / w_;
            coord_spa[i].y = rightPoints[i].y / h_;
            disp_spa[i] += a13;
        }
        stop = TICK();
        VIDEO_DEPTH_LOGI("RowAlign_grid_tws->save_sparse_disp: %6.3fms", 1000*(stop-start)/freq);
        //if (!(map_x.empty() || map_x.empty())){ // if not the first time
        //    success_flag = true;
        //    return 1;
        //}

        cv::Mat mat_shear;
        mat_shear.create(3, 3, CV_32F);
        mat_shear.at<float>(0, 0) = a11;
        mat_shear.at<float>(0, 1) = a12;
        mat_shear.at<float>(0, 2) = a13;
        mat_shear.at<float>(1, 0) = 0;
        mat_shear.at<float>(1, 1) = 1;
        mat_shear.at<float>(1, 2) = 0;
        mat_shear.at<float>(2, 0) = 0;
        mat_shear.at<float>(2, 1) = 0;
        mat_shear.at<float>(2, 2) = 1;
        if (*thread_state!=THREAD_STATE_PLAY) return 0;
        cv::Mat mat_final_transform;
        mat_final_transform = mat_shear * mat_row_align;
        stop = TICK();
        VIDEO_DEPTH_LOGI("RowAlign_grid_tws->solve transform matrix: %6.3fms", 1000*(stop-start)/freq);
        VIDEO_DEPTH_LOGI("RowAlign_grid_tws->points: %6lu", rightPoints.size());
		
		//DumpMatrix(mat_final_transform, "Final Transform");

        start = TICK();
        if (*thread_state!=THREAD_STATE_PLAY) return 0;
        cv::Mat D = cv::Mat::zeros(cv::Size(5, 1), CV_32F);
        if (*thread_state!=THREAD_STATE_PLAY) return 0;
        cv::initUndistortRectifyMap(M, D, mat_final_transform, M, im2.size(), CV_32F, map_x, map_y);
        stop = TICK();
        VIDEO_DEPTH_LOGI("RowAlign_grid_tws->initialize map: %6.3fms", 1000*(stop-start)/freq);
    }
    start = TICK();
    if (*thread_state!=THREAD_STATE_PLAY) return 0;
    cv::remap(im2, im2_aligned, map_x, map_y, cv::INTER_LINEAR);
    stop = TICK();
    VIDEO_DEPTH_LOGI("RowAlign_grid_tws->remap: %6.3fms", 1000*(stop-start)/freq);
    success_flag = true;
    return 1;
}



int tcl_depth_video::RowAlign_xiang(
        float & v_x, // velocity in x
        float & v_y, // velocity in y
        cv::Mat & im1_aligned, // aligned main
        cv::Mat & im2_aligned, // aligned aux
        const cv::Mat & im1, // main
        const cv::Mat & im2, // aux
        float res_level,
        int num_worst,
        ThreadState * thread_state,
        RotateCode & rotate_code,
        float & real_max_disp
){
//    success_flag = true;
    v_x = 0;
    v_y = 0;
    if (*thread_state!=THREAD_STATE_PLAY) return 0;
    double start, stop, freq = cv::getTickFrequency();

    cv::Mat mask;
    cv::Mat im2_rotated;
    int total;
    int num_good;
    std::vector<cv::Point2f> pts1_good, pts2_good;
    int i, iter_;
    int ransac_used;
    float min_[2], max_[2], miu_[3], sigma_[3];
    float sum_[3], sum_of_square[3];
    int nSigma = 3;

    start = TICK();
    if (*thread_state!=THREAD_STATE_PLAY) return 0;
    cv::Ptr<cv::CLAHE> clahe = cv::createCLAHE(3.0, cv::Size(4, 4));
    cv::Mat left_gray, right_gray;
    if (*thread_state!=THREAD_STATE_PLAY) return 0;
    clahe->apply(im1, right_gray);
    if (*thread_state!=THREAD_STATE_PLAY) return 0;
    clahe->apply(im2, left_gray);

    stop = TICK();
    VIDEO_DEPTH_LOGI("RowAlign_xiang->CLAHE: %6.3fms", 1000*(stop-start)/freq);

    std::vector<cv::Point2f> left_pts;
    std::vector<cv::Point2f> right_pts;
    start = TICK();
    int w_ = im1.cols, h_ = im1.rows;
    if (*thread_state!=THREAD_STATE_PLAY) return 0;
    for (int y = 15; y < h_; y += 15) {
        for (int x = 15; x < w_; x += 15) {
            right_pts.push_back(cv::Point2f(x, y));
        }
    }
    stop = TICK();
    VIDEO_DEPTH_LOGI("RowAlign_xiang->sample points: %6.3fms", 1000*(stop-start)/freq);
    VIDEO_DEPTH_LOGI("RowAlign_xiang->points: %6lu", right_pts.size());

    std::vector<uchar> status;
    std::vector<float> err;
    start = TICK();
    if (*thread_state!=THREAD_STATE_PLAY) return 0;
    cv::calcOpticalFlowPyrLK(right_gray, left_gray,
                             right_pts, left_pts, status, err,
                             cv::Size(11, 11), 3);
    stop = TICK();
    VIDEO_DEPTH_LOGI("RowAlign_xiang->calcOpticalFlowPyrLK: %6.3fms", 1000*(stop-start)/freq);
    VIDEO_DEPTH_LOGI("RowAlign_xiang->points: %6lu", right_pts.size());

    if (left_pts.size() < 10 || right_pts.size() < 10) {
//        success_flag = false;
        return 0;
    }

    start = TICK();
    std::vector<cv::Point2f> leftPoints, rightPoints;
    int size = right_pts.size();
    if (*thread_state!=THREAD_STATE_PLAY) return 0;
    GetRightPoint(right_pts, left_pts, rightPoints, leftPoints, status);

    if (rightPoints.size() < num_worst + 10) {
//        success_flag = false;
        return 0; // failed with auto alignment
    }
    stop = TICK();
    VIDEO_DEPTH_LOGI("RowAlign_xiang->GetRightPoint: %6.3fms", 1000*(stop-start)/freq);
    VIDEO_DEPTH_LOGI("RowAlign_xiang->points: %6lu", rightPoints.size());

    // do some analysis to these selected points
    if (*thread_state!=THREAD_STATE_PLAY) return 0;
    start = TICK();
    min_[0] = min_[1] = 1e5;
    max_[0] = max_[1] = -1e5;
    sum_[0] = sum_[1] = sum_[2] = 0;
    sum_of_square[0] = sum_of_square[1] = sum_of_square[2] = 0;

    float dx, dy;
    if (*thread_state!=THREAD_STATE_PLAY) return 0;
    for (i = 0; i < rightPoints.size(); ++i) {
        dx = leftPoints[i].x - rightPoints[i].x;
        dy = leftPoints[i].y - rightPoints[i].y;
        sum_[0] += dx;
        sum_[1] += dy;
        float theta;
        if (dx==0){
            if(dy>0) theta = 3.14159/2;
            else if (dy<0) theta = -3.14159/2;
            else theta = 0;
        } else theta = atan(dy / dx);
        sum_[2] += theta;

        if (dx < min_[0]) min_[0] = dx;
        if (dx > max_[0]) max_[0] = dx;
        if (dy < min_[1]) min_[1] = dy;
        if (dy > max_[1]) max_[1] = dy;
    }
    miu_[0] = sum_[0] / rightPoints.size();
    miu_[1] = sum_[1] / rightPoints.size();
    miu_[2] = sum_[2] / rightPoints.size();
    if (*thread_state!=THREAD_STATE_PLAY) return 0;
    for (i = 0; i < rightPoints.size(); ++i) {
        dx = leftPoints[i].x - rightPoints[i].x;
        dy = leftPoints[i].y - rightPoints[i].y;
        sum_of_square[0] += (dx - miu_[0]) * (dx - miu_[0]);
        sum_of_square[1] += (dy - miu_[1]) * (dy - miu_[1]);
        float theta;
        if (dx==0){
            if(dy>0) theta = 3.14159/2;
            else if (dy<0) theta = -3.14159/2;
            else theta = 0;
        } else theta = atan(dy / dx);
        sum_of_square[2] += (theta - miu_[2]) * (theta - miu_[2]);
    }
    sigma_[0] = sqrt(sum_of_square[0] / (rightPoints.size() - 1));
    sigma_[1] = sqrt(sum_of_square[1] / (rightPoints.size() - 1));
    sigma_[2] = sqrt(sum_of_square[2] / (rightPoints.size() - 1));
    if (*thread_state!=THREAD_STATE_PLAY) return 0;
    pts1_good.resize(rightPoints.size());
    pts2_good.resize(leftPoints.size());
    if (*thread_state!=THREAD_STATE_PLAY) return 0;
    // filter out the point pairs using some constraints like 3-sigma principle
    for (i = 0, num_good = 0; i < rightPoints.size(); ++i) {
        dx = leftPoints[i].x - rightPoints[i].x;
        dy = leftPoints[i].y - rightPoints[i].y;
        float theta;
        if (dx==0){
            if(dy>0) theta = 3.14159/2;
            else if (dy<0) theta = -3.14159/2;
            else theta = 0;
        } else theta = atan(dy / dx);
        if (abs(dx - miu_[0]) < nSigma * sigma_[0] &&
            abs(dy - miu_[1]) < nSigma * sigma_[1] &&
            abs(theta - miu_[2]) < nSigma * sigma_[2]
                ){
            pts1_good[num_good] = rightPoints[i];
            pts2_good[num_good] = leftPoints[i];
            ++num_good;
        }
    }
    if (*thread_state!=THREAD_STATE_PLAY) return 0;
    pts1_good.resize(num_good);
    pts2_good.resize(num_good);
    if (*thread_state!=THREAD_STATE_PLAY) return 0;
    std::swap(rightPoints, pts1_good);
    std::swap(leftPoints, pts2_good);
    stop = TICK();
    VIDEO_DEPTH_LOGI("RowAlign_xiang->3-sigma: %6.3fms", 1000*(stop-start)/freq);
    VIDEO_DEPTH_LOGI("RowAlign_xiang->points: %6lu", rightPoints.size());

    // check movement
    v_x = miu_[0];
    v_y = miu_[1];
    VIDEO_DEPTH_LOGI("v_x=%6.3f, v_y=%6.3f", v_x, v_y);

    // check if the movement is enough
    float delta_min = 8;
    if ( std::max(std::fabs(v_x), std::fabs(v_y)) < delta_min ) {
        // movement is not enough
        VIDEO_DEPTH_LOGW("movement is not enough!");
        return 0;
    }

    // check if to rotate image
    if ( std::fabs(dx) >= std::fabs(dy) ) {
        if (dx > 0) rotate_code = ROTATE_CODE_0;
        else rotate_code = ROTATE_CODE_180;
    } else {
        if (dy > 0) rotate_code = ROTATE_CODE_270;
        else rotate_code = ROTATE_CODE_90;
    }

    float h, w;
    h = im2.size().height;
    w = im2.size().width;

    if(rotate_code==ROTATE_CODE_0){
        if (*thread_state!=THREAD_STATE_PLAY) return 0;
        im1.copyTo(im1_aligned);
        if (*thread_state!=THREAD_STATE_PLAY) return 0;
        im2.copyTo(im2_rotated);
    } else if (rotate_code==ROTATE_CODE_90){
        if (*thread_state!=THREAD_STATE_PLAY) return 0;
        cv::rotate(im1, im1_aligned, cv::ROTATE_90_CLOCKWISE);
        if (*thread_state!=THREAD_STATE_PLAY) return 0;
        cv::rotate(im2, im2_rotated, cv::ROTATE_90_CLOCKWISE);
        // matched points are updated respectively
        if (*thread_state!=THREAD_STATE_PLAY) return 0;
        for(int i=0; i<rightPoints.size(); ++i){
            float t = rightPoints[i].x;
            rightPoints[i].x = h - rightPoints[i].y;
            rightPoints[i].y = t;
            t = leftPoints[i].x;
            leftPoints[i].x = h - leftPoints[i].y;
            leftPoints[i].y = t;
        }
    } else if (rotate_code==ROTATE_CODE_180){
        if (*thread_state!=THREAD_STATE_PLAY) return 0;
        cv::flip(im1, im1_aligned, -1);
        if (*thread_state!=THREAD_STATE_PLAY) return 0;
        cv::flip(im2, im2_rotated, -1);
        // matched points are updated respectively
        if (*thread_state!=THREAD_STATE_PLAY) return 0;
        for(int i=0; i<rightPoints.size(); ++i){
            rightPoints[i].x = w - rightPoints[i].x;
            rightPoints[i].y = h - rightPoints[i].y;
            leftPoints[i].x = w - leftPoints[i].x;
            leftPoints[i].y = h - leftPoints[i].y;
        }
    } else if (rotate_code==ROTATE_CODE_270){
        if (*thread_state!=THREAD_STATE_PLAY) return 0;
        cv::rotate(im1, im1_aligned, cv::ROTATE_90_COUNTERCLOCKWISE);
        if (*thread_state!=THREAD_STATE_PLAY) return 0;
        cv::rotate(im2, im2_rotated, cv::ROTATE_90_COUNTERCLOCKWISE);
        // matched points are updated respectively
        if (*thread_state!=THREAD_STATE_PLAY) return 0;
        for(int i=0; i<rightPoints.size(); ++i){
            float t = rightPoints[i].x;
            rightPoints[i].x = rightPoints[i].y;
            rightPoints[i].y = w - t;
            t = leftPoints[i].x;
            leftPoints[i].x = leftPoints[i].y;
            leftPoints[i].y = w - t;
        }
    }

    cv::Mat M;
    float fx, fy, ppx, ppy;
    ppx = im2_rotated.size().width / 2.0;
    ppy = im2_rotated.size().height / 2.0;
    fx = 1;
    fy = 1;
    M.create(3, 3, CV_32F);
    M.at<float>(0, 0) = fx;
    M.at<float>(0, 1) = 0;
    M.at<float>(0, 2) = ppx;
    M.at<float>(1, 0) = 0;
    M.at<float>(1, 1) = fy;
    M.at<float>(1, 2) = ppy;
    M.at<float>(2, 0) = 0;
    M.at<float>(2, 1) = 0;
    M.at<float>(2, 2) = 1;

    // solve row-aligning transform matrix with dynamic training samples
    if (*thread_state!=THREAD_STATE_PLAY) return 0;
    start = TICK();
    int j;
    float u, v, u1, v1;
    float a21, a22, a23, a31, a32;
    int cntr;
    float max_res[3], avg_res, this_res;
    std::vector<unsigned char> mask_accepted;
    int worst_pairs[3], total_pairs;
    Mat mat_x, vec_y, vec_beta, vec_betaT, mat_xT, mat_xTx, mat_xTx_inv, vec_res;
    if (*thread_state!=THREAD_STATE_PLAY) return 0;
    mat_x.create(rightPoints.size(), 5, CV_32F);
    vec_y.create(rightPoints.size(), 1, CV_32F);
    vec_beta.create(5, 1, CV_32F);
    float *ptr_x = (float *) mat_x.data;
    float *ptr_y = (float *) vec_y.data;

    if (*thread_state!=THREAD_STATE_PLAY) return 0;
    mask_accepted.resize(rightPoints.size());
    ransac_used = rightPoints.size();
    avg_res = 1e5;

    if (ransac_used < num_worst + 10) {
//        success_flag = false;
        return 0; // failed with auto alignment
    }
    if (*thread_state!=THREAD_STATE_PLAY) return 0;
    while (avg_res > res_level) {
        if (*thread_state!=THREAD_STATE_PLAY) return 0;
        mat_x.resize(ransac_used);
        vec_y.resize(ransac_used);
        if (*thread_state!=THREAD_STATE_PLAY) return 0;
        for (i = 0; i < ransac_used; ++i) {
            v1 = (rightPoints[i].y - ppy) / fy;
            u = (leftPoints[i].x - ppx) / fx;
            v = (leftPoints[i].y - ppy) / fy;
            ptr_x[i * 5] = u;
            ptr_x[i * 5 + 1] = v;
            ptr_x[i * 5 + 2] = 1;
            ptr_x[i * 5 + 3] = -u * v1;
            ptr_x[i * 5 + 4] = -v * v1;
            ptr_y[i] = v1;
        }
        if (*thread_state!=THREAD_STATE_PLAY) return 0;
        cv::transpose(mat_x, mat_xT);
        if (*thread_state!=THREAD_STATE_PLAY) return 0;
        mat_xTx = mat_xT * mat_x;
        if (*thread_state!=THREAD_STATE_PLAY) return 0;
        if (cv::invert(mat_xTx, mat_xTx_inv) == 0) {
            // this approach failed with bad selecting point pairs
//            success_flag = false;
            return 0; // failed with auto alignment
        }
        if (*thread_state!=THREAD_STATE_PLAY) return 0;
        vec_beta = mat_xT * vec_y;
        vec_beta = mat_xTx_inv * vec_beta;
        if (*thread_state!=THREAD_STATE_PLAY) return 0;
        // apply the solved model to all samples to find out the worst N cases
        vec_res = mat_x * vec_beta;
        vec_res = cv::abs(vec_res - vec_y);
        avg_res = 0;
        max_res[0] = 0;
        cntr = 0;
        if (*thread_state!=THREAD_STATE_PLAY) return 0;
        for (i = 0; i < ransac_used; ++i) {
            this_res = vec_res.at<float>(i);
            avg_res += this_res;
            if (this_res >= max_res[0]) {
                for (j = num_worst - 1; j > 0; --j) {
                    max_res[j] = max_res[j - 1];
                    worst_pairs[j] = worst_pairs[j - 1];
                }
                max_res[0] = this_res;
                worst_pairs[0] = i;
                ++cntr;
            }
        }
        if (*thread_state!=THREAD_STATE_PLAY) return 0;
        // update average residual error for this round
        avg_res = avg_res / ransac_used;
        // filter out these worst N cases
        for (i = 0; i < ransac_used; ++i) {
            mask_accepted[i] = 1;
        }
        cntr = cntr < num_worst ? cntr : num_worst;
        if (*thread_state!=THREAD_STATE_PLAY) return 0;
        for (i = 0; i < cntr; ++i) {
            mask_accepted[worst_pairs[i]] = 0;
        }
        total_pairs = ransac_used;
        ransac_used = 0;
        if (*thread_state!=THREAD_STATE_PLAY) return 0;
        for (i = 0; i < total_pairs; ++i) {
            if (mask_accepted[i]) {
                pts1_good[ransac_used] = rightPoints[i];
                pts2_good[ransac_used] = leftPoints[i];
                ++ransac_used;
            }
        }
        if (*thread_state!=THREAD_STATE_PLAY) return 0;
        pts1_good.resize(ransac_used);
        pts2_good.resize(ransac_used);
        if (*thread_state!=THREAD_STATE_PLAY) return 0;
        std::swap(rightPoints, pts1_good);
        std::swap(leftPoints, pts2_good);
        // check if the points are enough
        if (ransac_used < 10) {
//            success_flag = false;
            return 0; // faild with no enough points
        }
    }
    a21 = vec_beta.at<float>(0);
    a22 = vec_beta.at<float>(1);
    a23 = vec_beta.at<float>(2);
    a31 = vec_beta.at<float>(3);
    a32 = vec_beta.at<float>(4);
    cv::Mat mat_row_align;
    if (*thread_state!=THREAD_STATE_PLAY) return 0;
    mat_row_align.create(3, 3, CV_32F);
    mat_row_align.at<float>(0, 0) = 1;
    mat_row_align.at<float>(0, 1) = 0;
    mat_row_align.at<float>(0, 2) = 0;
    mat_row_align.at<float>(1, 0) = a21;
    mat_row_align.at<float>(1, 1) = a22;
    mat_row_align.at<float>(1, 2) = a23;
    mat_row_align.at<float>(2, 0) = a31;
    mat_row_align.at<float>(2, 1) = a32;
    mat_row_align.at<float>(2, 2) = 1;

    // Use Shear Transformation to restore the image in good shape
    // find the center points on 4 borders transformed using row-aligning matrix
    cv::Point2f centers[4];
    float a11, a12, a13;
    h = im2_rotated.size().height;
    w = im2_rotated.size().width;
    centers[0].x = (w - 1) / 2.0;
    centers[0].y = 0;
    centers[1].x = w - 1;
    centers[1].y = (h - 1) / 2.0;
    centers[2].x = (w - 1) / 2.0;
    centers[2].y = h - 1;
    centers[3].x = 0;
    centers[3].y = (h - 1) / 2.0;

    if (*thread_state!=THREAD_STATE_PLAY) return 0;
    // apply the row-align transform
    for (i = 0; i < 4; ++i) {
        u = (centers[i].x - ppx) / fx;
        v = (centers[i].y - ppy) / fy;
        centers[i].x = (u / (a31 * u + a32 * v + 1)) * fx + ppx;
        centers[i].y = ((a21 * u + a22 * v + a23) / (a31 * u + a32 * v + 1)) * fy + ppy;
    }

    cv::Point2f u_, v_;
    u_ = centers[1] - centers[3];
    v_ = centers[0] - centers[2];

    a11 = (h * h * u_.y * u_.y + w * w * v_.y * v_.y) / (h * w * (u_.y * v_.x - u_.x * v_.y));
    a12 = (h * h * u_.x * u_.y + w * w * v_.x * v_.y) / (h * w * (u_.x * v_.y - u_.y * v_.x));

    float du_mean = 0;
    float du_sigma = 0;
    std::vector<int> disp_;
    std::vector<bool> mask_;

    if (*thread_state!=THREAD_STATE_PLAY) return 0;
    disp_.resize(ransac_used);
    if (*thread_state!=THREAD_STATE_PLAY) return 0;
    mask_.resize(ransac_used);

    if (*thread_state!=THREAD_STATE_PLAY) return 0;
    for (int i = 0; i < ransac_used; ++i) {
        u = (leftPoints[i].x - ppx) / fx;
        v = (leftPoints[i].y - ppy) / fy;
        u1 = (rightPoints[i].x - ppx) / fx;

        u = u / (a31 * u + a32 * v + 1);
        v = (a21 * u + a22 * v + a23) / (a31 * u + a32 * v + 1);
        u = a11 * u + a12 * v;

        disp_[i] = u - u1;
        du_mean += u - u1;
    }
    // use 3-sgima to remove bad match
    du_mean /= ransac_used;

    if (*thread_state!=THREAD_STATE_PLAY) return 0;
    for (int i = 0; i<ransac_used; ++i) {
        du_sigma += (disp_[i] - du_mean) * (disp_[i] - du_mean);
    }
    du_sigma = std::sqrt(du_sigma / ransac_used);

    if (*thread_state!=THREAD_STATE_PLAY) return 0;
    for (int i = 0; i<ransac_used; ++i) {
        if (std::abs(disp_[i] - du_mean) > 3 * du_sigma)
            mask_[i] = false;
        else mask_[i] = true;
    }

    float du_min = 1e5;
    float du_max = -1e5;

    if (*thread_state!=THREAD_STATE_PLAY) return 0;
    for (int i = 0; i<ransac_used; ++i) {
        if (mask_[i]) {
            if (disp_[i] < du_min) du_min = disp_[i];
            if (disp_[i] > du_max) du_max = disp_[i];
        }
    }
    VIDEO_DEPTH_LOGI("min_disp=%6.3f", du_min);
    VIDEO_DEPTH_LOGI("max_disp=%6.3f", du_max);
    a13 = -du_min + 1;
    if (du_max - du_min >= 32) a13 = 32 - du_max;
    real_max_disp = du_max + a13;

    cv::Mat mat_shear;
    mat_shear.create(3, 3, CV_32F);
    mat_shear.at<float>(0, 0) = a11;
    mat_shear.at<float>(0, 1) = a12;
    mat_shear.at<float>(0, 2) = a13;
    mat_shear.at<float>(1, 0) = 0;
    mat_shear.at<float>(1, 1) = 1;
    mat_shear.at<float>(1, 2) = 0;
    mat_shear.at<float>(2, 0) = 0;
    mat_shear.at<float>(2, 1) = 0;
    mat_shear.at<float>(2, 2) = 1;
    if (*thread_state!=THREAD_STATE_PLAY) return 0;
    cv::Mat mat_final_transform;
    mat_final_transform = mat_shear * mat_row_align;
    stop = TICK();
    VIDEO_DEPTH_LOGI("RowAlign_xiang->solve transform matrix: %6.3fms", 1000*(stop-start)/freq);
    VIDEO_DEPTH_LOGI("RowAlign_xiang->points: %6lu", rightPoints.size());

    start = TICK();
    if (*thread_state!=THREAD_STATE_PLAY) return 0;
    cv::Mat D = cv::Mat::zeros(cv::Size(5, 1), CV_32F);
    cv::Mat map_x, map_y;
    if (*thread_state!=THREAD_STATE_PLAY) return 0;
    cv::initUndistortRectifyMap(M, D, mat_final_transform, M, im2_rotated.size(), CV_32F, map_x, map_y);
    stop = TICK();
    VIDEO_DEPTH_LOGI("RowAlign_xiang->initialize map: %6.3fms", 1000*(stop-start)/freq);

    start = TICK();
    if (*thread_state!=THREAD_STATE_PLAY) return 0;
    cv::remap(im2_rotated, im2_aligned, map_x, map_y, cv::INTER_LINEAR);
    stop = TICK();
    VIDEO_DEPTH_LOGI("RowAlign_xiang->remap: %6.3fms", 1000*(stop-start)/freq);
    return 1;
}


void tcl_depth_video::HistBasedFilter(cv::Mat & im_disp, float min_support, bool ignore_zero) {
	cv::Mat hist;
	const int hist_size = 256;
	tcl_depth_video::Histogram(hist, im_disp, hist_size);
	//VisualizeHistogram("hist", hist);

	int idx_min, idx_max;
	float * p_hist = (float *)(hist.data);
	float sum_ = 0;

	for (int i = 1; i<hist_size; ++i) {
		idx_min = i;
		sum_ += p_hist[idx_min];
		if (sum_ >= min_support) break;
	}

	sum_ = 0;
	for (int i = 0; i<hist_size; ++i) {
		idx_max = 255 - i;
		sum_ += p_hist[idx_max];
		if (sum_ >= min_support) break;
	}

	cv::Mat mask_, tmp;
	cv::Mat zero_mask;
	if (ignore_zero) zero_mask = im_disp != 0;

	mask_ = im_disp >= idx_min;
	cv::bitwise_and(im_disp, mask_, im_disp);
	cv::bitwise_not(mask_, mask_);
	cv::bitwise_and(idx_min, mask_, tmp);
	im_disp += tmp;

	cv::threshold(im_disp, im_disp, idx_max, idx_max, cv::THRESH_TRUNC);

	if (ignore_zero) cv::bitwise_and(im_disp, zero_mask, im_disp);
}


void tcl_depth_video::HistBasedPeakFilter(cv::Mat & im_disp, float min_support, int min_disp){
    cv::Mat hist;
    const int hist_size = 256;
    Histogram(hist, im_disp, hist_size);
    //VisualizeHistogram("hist", hist);

    int idx_max;
    float * p_hist = (float *)(hist.data);
    float sum_ = 0;

    for (int i=0; i<255-min_disp; ++i){
        sum_ += p_hist[255-i];
        if(sum_ >= min_support) break;
        idx_max = 255 - i;
    }

    cv::Mat mask_min;
    mask_min = im_disp < idx_max;
    mask_min /= 255;
    im_disp = im_disp.mul(mask_min);
}


void tcl_depth_video::FillBlackHoles(cv::Mat & io_, const cv::Mat & guide) {
    // search from the center to the first nonzero value in all 4 ways.
    uint8_t *data = io_.data;
    uint8_t *data_g = guide.data;

    int h = io_.size().height;
    int w = io_.size().width;

    int offset = 0;
    int depth_[4];
    int color_[5][3];
    bool is_found[4];

    for (int i = 0; i < h; ++i) {
        for (int j = 0; j < w; ++j) {
            if (!data[offset]) { // this pixel is within a black hole
                color_[4][0] = data_g[offset * 3];
                color_[4][1] = data_g[offset * 3 + 1];
                color_[4][2] = data_g[offset * 3 + 2];
                is_found[0] = false;
                int end_idx = offset - j;
                for (int s = offset - 1; s >= end_idx; --s) { // search left
                    if (data[s]) { // meet a nonzero value
                        depth_[0] = data[s];
                        color_[0][0] = data_g[s * 3];
                        color_[0][1] = data_g[s * 3 + 1];
                        color_[0][2] = data_g[s * 3 + 2];
                        is_found[0] = true;
                        break;
                    }
                }
                is_found[1] = false;
                end_idx = offset - j + w - 1;
                for (int s = offset + 1; s <= end_idx; ++s) { // search right
                    if (data[s]) { // meet a nonzero value
                        depth_[1] = data[s];
                        color_[1][0] = data_g[s * 3];
                        color_[1][1] = data_g[s * 3 + 1];
                        color_[1][2] = data_g[s * 3 + 2];
                        is_found[1] = true;
                        break;
                    }
                }
                is_found[2] = false;
                end_idx = j;
                for (int s = offset - w; s >= end_idx; s -= w) { // search up
                    if (data[s]) { // meet a nonzero value
                        depth_[2] = data[s];
                        color_[2][0] = data_g[s * 3];
                        color_[2][1] = data_g[s * 3 + 1];
                        color_[2][2] = data_g[s * 3 + 2];
                        is_found[2] = true;
                        break;
                    }
                }
                is_found[3] = false;
                end_idx = j + (h - 1) * w;
                for (int s = offset + w; s <= end_idx; s += w) { // search down
                    if (data[s]) { // meet a nonzero value
                        depth_[3] = data[s];
                        color_[3][0] = data_g[s * 3];
                        color_[3][1] = data_g[s * 3 + 1];
                        color_[3][2] = data_g[s * 3 + 2];
                        is_found[3] = true;
                        break;
                    }
                }
                // interpolate according to color similarity
                int sum_ = 0;
                int sum_depth = 0;
                for (int s = 0; s < 4; ++s) {
                    if (is_found[s]) {
                        int w_ = std::abs(color_[4][0] - color_[s][0]) +
                                 std::abs(color_[4][1] - color_[s][1]) +
                                 std::abs(color_[4][2] - color_[s][2]);
                        w_ = (255 * 255) / (w_ + 1);
                        sum_ += w_;
                        sum_depth += w_ * depth_[s];
                    }
                }
                if (sum_ > 0) data[offset] = (uint8_t) (sum_depth / sum_);
            }
            ++offset;
        }
    }
}


void tcl_depth_video::FillBlackHoles_Gray(cv::Mat & io_, const cv::Mat & guide) {
    // search from the center to the first nonzero value in all 4 ways.
    uint8_t *data = io_.data;
    uint8_t *data_g = guide.data;

    int h = io_.size().height;
    int w = io_.size().width;

    int offset = 0;
    int depth_[4];
    int color_[5];
    bool is_found[4];

    for (int i = 0; i < h; ++i) {
        for (int j = 0; j < w; ++j) {
            if (!data[offset]) { // this pixel is within a black hole
                color_[4] = data_g[offset];
                is_found[0] = false;
                int end_idx = offset - j;
                for (int s = offset - 1; s >= end_idx; --s) { // search left
                    if (data[s]) { // meet a nonzero value
                        depth_[0] = data[s];
                        color_[0] = data_g[s];
                        is_found[0] = true;
                        break;
                    }
                }
                is_found[1] = false;
                end_idx = offset - j + w - 1;
                for (int s = offset + 1; s <= end_idx; ++s) { // search right
                    if (data[s]) { // meet a nonzero value
                        depth_[1] = data[s];
                        color_[1] = data_g[s];
                        is_found[1] = true;
                        break;
                    }
                }
                is_found[2] = false;
                end_idx = j;
                for (int s = offset - w; s >= end_idx; s -= w) { // search up
                    if (data[s]) { // meet a nonzero value
                        depth_[2] = data[s];
                        color_[2] = data_g[s];
                        is_found[2] = true;
                        break;
                    }
                }
                is_found[3] = false;
                end_idx = j + (h - 1) * w;
                for (int s = offset + w; s <= end_idx; s += w) { // search down
                    if (data[s]) { // meet a nonzero value
                        depth_[3] = data[s];
                        color_[3] = data_g[s];
                        is_found[3] = true;
                        break;
                    }
                }
                // interpolate according to color similarity
                int sum_ = 0;
                int sum_depth = 0;
                for (int s = 0; s < 4; ++s) {
                    if (is_found[s]) {
                        int w_ = (255 * 255) / (std::abs(color_[4] - color_[s]) + 1);
                        sum_ += w_;
                        sum_depth += w_ * depth_[s];
                    }
                }
                if (sum_ > 0) data[offset] = (uint8_t) (sum_depth / sum_);
            }
            ++offset;
        }
    }
}


void tcl_depth_video::StrokeGridLines(cv::Mat & dis,
                     const cv::Mat & curr,
                     const std::vector<uint8_t> & disp,
                     const std::vector<cv::Point2i> & pts){
    // assertion
    if (pts.size() != disp.size()) {
        VIDEO_DEPTH_LOGE("StrokeGridLines: assertion failed!");
        exit(-1);
    }
    if (dis.size() != curr.size() || dis.type() != CV_8UC1)
        dis.create(curr.size(), CV_8UC1);
    dis.setTo(0);

    uint8_t * data_d = dis.data;
    uint8_t * data_g = curr.data;

    int h = dis.size().height;
    int w = dis.size().width;

    int _x1, _y1;
    int _x2, _y2;
    int _G1, _G2;
    int _D1, _D2;

    _x1 = pts[0].x;
    _y1 = pts[0].y;
    _D1 = disp[0];
    data_d[_y1*w+_x1] = disp[0];

    // horizontal lines
    for(int i=1; i<pts.size(); ++i) {
        _x2 = pts[i].x;
        _y2 = pts[i].y;
        _D2 = disp[i];
        data_d[_y2 * w + _x2] = disp[i];
        if (_y1 == _y2) { // a horizontal line can be drawn
            uint8_t *_pG = data_g + _y1 * w;
            uint8_t *_pD = data_d + _y1 * w;
            _G1 = _pG[_x1];
            _G2 = _pG[_x2];
            for (int j = _x1 + 1; j < _x2; ++j) {
                float _w1 = (255 * 255) / (std::abs(_pG[j] - _G1) + 1);
                float _w2 = (255 * 255) / (std::abs(_pG[j] - _G2) + 1);
                _pD[j] = (uint8_t) ((_w1 * _D1 + _w2 * _D2) / (_w1 + _w2));
            }
        }
        _x1 = _x2;
        _y1 = _y2;
        _D1 = _D2;
    }

    // vertical lines
    std::vector<int> mask_used;
    mask_used.resize(pts.size());
    for(int i=0; i<mask_used.size(); ++i)
        mask_used[i] = 0;

    for(int i=0; i<pts.size(); ++i){
        if (mask_used[i]) continue;
        mask_used[i] = 1;
        _x1 = pts[i].x;
        _y1 = pts[i].y;
        _D1 = disp[i];
        // search in all nodes to find points in same column
        for(int j=i; j<pts.size(); ++j){
            if (mask_used[j]) continue;
            _x2 = pts[j].x;
            _y2 = pts[j].y;
            _D2 = disp[j];
            if(_x1==_x2){ // a vertical line can be drawn
                mask_used[j] = 1;
                uint8_t *_pG = data_g + _x1;
                uint8_t *_pD = data_d + _x1;
                _G1 = _pG[_y1*w];
                _G2 = _pG[_y2*w];
                for (int k = _y1 + 1; k < _y2; ++k){
                    float _w1 = (255 * 255) / (std::abs(_pG[k*w] - _G1) + 1);
                    float _w2 = (255 * 255) / (std::abs(_pG[k*w] - _G2) + 1);
                    _pD[k*w] = (uint8_t) ((_w1 * _D1 + _w2 * _D2) / (_w1 + _w2));
                }
                _y1 = _y2;
                _D1 = _D2;
            }
        }
    }
}


void tcl_depth_video::DumpImage(const cv::Mat & im_,
               const cv::String & path,
               const cv::String & postfix,
               const cv::String & uid
){
#ifdef _WIN32
		cv::String cmd(path.c_str());
		std::replace(cmd.begin(), cmd.end(), '/', '\\');
		cmd = "md " + cmd;
		system(cmd.c_str());
#else
		cv::String cmd(path.c_str());
		cmd = "mkdir -p " + cmd;
		system(cmd.c_str());
#endif
		if (postfix.empty())
			cv::imwrite(path + "/" + uid + ".png", im_);
		else
			cv::imwrite(path + "/" + uid + "_" + postfix + ".png", im_);
}


void tcl_depth_video::Disparity2Depth(cv::Mat &disp) {
    cv::Mat disp_f32;
    disp.convertTo(disp_f32, CV_32FC1);
    disp_f32 = 65025.0 / disp_f32;
    cv::normalize(disp_f32, disp_f32, 1, 255, cv::NORM_MINMAX, -1);
    disp_f32.convertTo(disp, CV_8UC1);
}


void tcl_depth_video::Disparity2Depth_v2(cv::Mat &disp) {
    disp = 255 - disp;
}


void tcl_depth_video::Depth2Disparity(cv::Mat &depth) {
    depth = 255 - depth;
}


void tcl_depth_video::ChangeSaturation(cv::Mat &in_out, int degree){
    if ( in_out.type() != CV_8UC3 ) {
        VIDEO_DEPTH_LOGE("Assertion Failure in [ChangeSaturation]!");
        exit(-1);
    }
    cv::MatIterator_<cv::Vec3b> p_cur, p_end;
    p_cur = in_out.begin<cv::Vec3b>();
    p_end = in_out.end<cv::Vec3b>();

    float Increment = std::min(std::max(degree, -100), 100) / 100.0F;

    int t1, t2, t3, minVal, maxVal;
    float delta, L, S, alpha;

    if (Increment >= 0) {
        for (; p_cur!=p_end; p_cur++) {
            t1 = (*p_cur)[0];
            t2 = (*p_cur)[1];
            t3 = (*p_cur)[2];

            minVal = std::min(std::min(t1, t2),t3);
            maxVal = std::max(std::max(t1, t2),t3);

            delta = maxVal - minVal + 1e-3;
            L = (maxVal + minVal) / 2.0;

            if (L < 127.5) S = 0.5*delta/(L + 1e-3);
            else S = 0.5*delta/(255 - L + 1e-3);

            alpha = std::max(S, 1 - Increment);
            float decay = (255-L)/255.0;
            decay *= decay;
            alpha = (1.0/alpha - 1)*decay;

            (*p_cur)[0] = t1 + (t1 - L)*alpha;
            (*p_cur)[1] = t2 + (t2 - L)*alpha;
            (*p_cur)[2] = t3 + (t3 - L)*alpha;
        }
    } else {
        for (; p_cur!=p_end; p_cur++) {
            t1 = (*p_cur)[0];
            t2 = (*p_cur)[1];
            t3 = (*p_cur)[2];

            minVal = std::min(std::min(t1, t2),t3);
            maxVal = std::max(std::max(t1, t2),t3);
            L = (maxVal + minVal) / 2.0;

            (*p_cur)[0] = L + (t1 - L)*(1 + Increment);
            (*p_cur)[1] = L + (t2 - L)*(1 + Increment);
            (*p_cur)[2] = L + (t3 - L)*(1 + Increment);
        }
    }
}


bool tcl_depth_video::CheckMotion(const cv::Mat & im, const cv::Mat & im_moved, float threshold){
    // assertion check
    cv::Mat im_gray, im_moved_gray;
    if(im.type() != CV_8UC1 || im_moved.type() != CV_8UC1){
        cv::cvtColor(im, im_gray, cv::COLOR_BGR2GRAY);
        cv::cvtColor(im_moved, im_moved_gray, cv::COLOR_BGR2GRAY);
    } else {
        im_gray = im;
        im_moved_gray = im_moved;
    }
    // retrieve motion info by the displacement of histogram
    cv::Mat hist;
    int hist_size = 256;
    float ranges[] = {0, 256};
    const float * hist_ranges = { ranges }; // must be a 2d array
    cv::calcHist(&im_gray, 1, 0, cv::Mat(), hist,
                 1, &hist_size, &hist_ranges, true, false);
    // get the largest bar in hist
    int max_idx;
    float max_sup = 0;
    const float * p_hist =  (float*)(hist.data);
    for(int i=0; i<hist_size;++i){
        if (p_hist[i] > max_sup){
            max_sup = p_hist[i];
            max_idx = i;
        }
    }
    // calc the center of this largest bar
    int h = im_gray.size().height;
    int w = im_gray.size().width;
    const unsigned char * p_im = im_gray.data;
    const unsigned char * p_im_moved = im_moved_gray.data;
    int stride = 0;
    long int sum_im_x = 0;
    long int sum_im_y = 0;
    long int sum_im_moved_x = 0;
    long int sum_im_moved_y = 0;
    int flag;

    for(int i=0; i<h; ++i){
        for(int j=0; j<w; ++j){
            if(p_im[stride + j] == max_idx) {
                sum_im_x += j;
                sum_im_y += i;
            }
            if(p_im_moved[stride + j] == max_idx) {
                sum_im_moved_x += j;
                sum_im_moved_y += i;
            }
        }
        stride += w;
    }

    // determine the movement
    float dis_x = (sum_im_moved_x - sum_im_x) / p_hist[max_idx];
    float dis_y = (sum_im_moved_y - sum_im_y) / p_hist[max_idx];
    float dis = std::sqrt(dis_x * dis_x + dis_y * dis_y);
    VIDEO_DEPTH_LOGI("dis=%6.3f pixel(s)", dis);
    return (dis > threshold);
}



int tcl_depth_video::GuessMotion(float &dx, float &dy,
                const cv::Mat & im, const cv::Mat & im_moved,
                ThreadState* thread_state
){
    // assertion check
    cv::Mat im_gray, im_moved_gray;
    if(im.type() != CV_8UC1 || im_moved.type() != CV_8UC1){
        if (*thread_state!=THREAD_STATE_PLAY) return 0;
        cv::cvtColor(im, im_gray, cv::COLOR_BGR2GRAY);
        if (*thread_state!=THREAD_STATE_PLAY) return 0;
        cv::cvtColor(im_moved, im_moved_gray, cv::COLOR_BGR2GRAY);
    } else {
        im_gray = im;
        im_moved_gray = im_moved;
    }
    // retrieve motion info by the displacement of histogram
    cv::Mat hist;
    int hist_size = 256;
    float ranges[] = {0, 256};
    const float * hist_ranges = { ranges }; // must be a 2d array
    if (*thread_state!=THREAD_STATE_PLAY) return 0;
    cv::calcHist(&im_gray, 1, 0, cv::Mat(), hist,
                 1, &hist_size, &hist_ranges, true, false);
    // get the largest bar in hist
    int max_idx;
    float max_sup = 0;
    const float * p_hist =  (float*)(hist.data);
    if (*thread_state!=THREAD_STATE_PLAY) return 0;
    for(int i=0; i<hist_size;++i){
        if (p_hist[i] > max_sup){
            max_sup = p_hist[i];
            max_idx = i;
        }
    }
    // calc the center of this largest bar
    int h = im_gray.size().height;
    int w = im_gray.size().width;
    const unsigned char * p_im = im_gray.data;
    const unsigned char * p_im_moved = im_moved_gray.data;
    int stride = 0;
    long int sum_im_x = 0;
    long int sum_im_y = 0;
    long int sum_im_moved_x = 0;
    long int sum_im_moved_y = 0;
    int cntr = 0, cntr_moved = 0;
    if (*thread_state!=THREAD_STATE_PLAY) return 0;
    for(int i=0; i<h; ++i){
        if (*thread_state!=THREAD_STATE_PLAY) return 0;
        for(int j=0; j<w; ++j){
            if(p_im[stride + j] == max_idx) {
                sum_im_x += j;
                sum_im_y += i;
                ++cntr;
            }
            if(p_im_moved[stride + j] == max_idx) {
                sum_im_moved_x += j;
                sum_im_moved_y += i;
                ++cntr_moved;
            }
        }
        stride += w;
    }

    // determine the movement
    dx = sum_im_moved_x/cntr_moved - sum_im_x/cntr;
    dy = sum_im_moved_y/cntr_moved - sum_im_y/cntr;
    VIDEO_DEPTH_LOGI("(dx, dy) = (%6.3f, %6.3f)", dx, dy);
    return 1;
}


void tcl_depth_video::ScaleDownAndCenterCrop(cv::Mat & o_,
                            const cv::Mat & i_,
                            double scale,
                            const cv::Size & base,
                            const cv::Size & crop){
    cv::Size size_small = cv::Size((int)(scale * base.width),
                                   (int)(scale * base.height));
    cv::Mat t_;
    cv::Size target = size_small - crop;
    cv::resize(i_, t_, size_small, 0, 0, cv::INTER_LINEAR);
    if (crop.width >= 0) {
        o_ = t_(cv::Rect(crop.width / 2, crop.height / 2,
                         target.width, target.height)).clone();
    } else if (crop.width < 0) {
        o_ = cv::Mat::zeros(target, i_.type());
        t_.copyTo(o_(cv::Rect(
                -crop.width / 2,
                -crop.height / 2,
                size_small.width,
                size_small.height
        )));
    }
}


void tcl_depth_video::DumpMatrix(const cv::Mat & mat, const cv::String & name){
    stringstream ss("");
    ss << mat;
    VIDEO_DEPTH_LOGI("%s = \n%s", name.c_str(), ss.str().c_str());
}


void tcl_depth_video::UpdateCameraMatrix(cv::Mat cm, double scale, const cv::Size & crop){
    // update intrinsics for both cameras
    cm.at<double>(0, 0) *= scale;
    cm.at<double>(0, 2) = cm.at<double>(0, 2) * scale - crop.width / 2.0;
    cm.at<double>(1, 1) *= scale;
    cm.at<double>(1, 2) = cm.at<double>(1, 2) * scale - crop.height / 2.0;
}


void tcl_depth_video::GenerateInverseMap(
        cv::Mat & imap1,
        cv::Mat & imap2,
        const cv::Size size,
        const cv::Mat & M,
        const cv::Mat & D,
        const cv::Mat & R,
        const cv::Mat & P
){
    imap1.create(size, CV_32FC1);
    imap2.create(size, CV_32FC1);

    std::vector<cv::Point2f> pts, pts_dst;
    int w, h;
    unsigned long npix;
    w = size.width;
    h = size.height;
    npix = (unsigned long)(h * w);

    pts.resize(npix);
    pts_dst.resize(npix);

    for (int i = 0; i < h; i++) {
        for (int j = 0; j < w; j++) {
            int id = i * w + j;
            pts[id].x = j;
            pts[id].y = i;
        }
    }

#pragma omp parallel for num_threads(4)
    for (int idx = 0; idx < 4; idx++) {
        std::vector<cv::Point2f> pts_temp(
                pts.begin() + idx * npix / 4,
                pts.begin() + (idx + 1) * npix / 4);
        std::vector<cv::Point2f> pts_temp2(
                pts_dst.begin() + idx * npix / 4,
                pts_dst.begin() + (idx + 1) * npix / 4);
        cv::undistortPoints(
                pts_temp, pts_temp2, M, D, R, P);
        std::copy(
                pts_temp2.begin(),
                pts_temp2.end(),
                pts_dst.begin() +
                idx * npix / 4);
    }

    for (int i = 0; i < h; i++) {
        for (int j = 0; j < w; j++) {
            int id = i * w + j;
            imap1.at<float>(i, j) = pts_dst[id].x;
            imap2.at<float>(i, j) = pts_dst[id].y;
        }
    }
    pts.erase(pts.begin(), pts.end());
    pts_dst.erase(pts_dst.begin(), pts_dst.end());
}


void tcl_depth_video::GetRightPoint(std::vector<cv::Point2f> &rightVec,
                   std::vector<cv::Point2f> &leftVec,
                   std::vector<cv::Point2f> &rightVecOut,
                   std::vector<cv::Point2f> &leftVecOut,
                   std::vector<uchar> &status
	){

	const float GRP_POINT_SCORE = 0.3; // 0.3
	const float GRP_POINT_INTERVAL = 0.01;
	const int maxnum = 2000;

    int i, j;
    double dx, dy, dx1, dy1, dx2, dy2;
    float nx[maxnum];
    float ny[maxnum];
    float sumi[maxnum];
    float norm[maxnum];

    int numpoint;
    int distflag;
    float th;
    float n2;

    numpoint = rightVec.size();
    for (i = 0; i<numpoint; i++) {
        dx = rightVec[i].x - leftVec[i].x;
        dy = rightVec[i].y - leftVec[i].y;
        norm[i] = sqrt(dx*dx + dy*dy);
        if (norm[i] > 1e-6) {
            dx = dx / norm[i];
            dy = dy / norm[i];
        } else {
            dx = 0;
            dy = 0;
        }
        nx[i] = dx;//std v vector
        ny[i] = dy;//std v vector
    }
    //compute score
    for (i = 0; i<numpoint; i++) {
        if (!status[i]) {
            sumi[i] = 0;       //score = 0 when status = 0
        } else {
            sumi[i] = 0;
            dx1 = rightVec[i].x - leftVec[i].x;//
            dy1 = rightVec[i].y - leftVec[i].y;
            for (j = 0; j<numpoint; j++) {
                distflag = 1;
                dx2 = rightVec[j].x - leftVec[j].x;//
                dy2 = rightVec[j].y - leftVec[j].y;
                distflag = ((dx1 - dx2)*(dx1 - dx2) + (dy1 - dy2)*(dy1 - dy2))>5.0 ? 0 : 1;//distance too large, the score is 0;

                if ((norm[i]<GRP_POINT_INTERVAL) && (norm[j]<GRP_POINT_INTERVAL))
                    n2 = 1;
                else
                    n2 = nx[i] * nx[j] + ny[i] * ny[j];

                sumi[i] = sumi[i] + n2*status[j] * distflag;//compute score, score=0 when distance too large or status=0
            }
        }
    }

    float max = 0.0;
    for (i = 0; i < numpoint; i++) {
        if (sumi[i] >= max) {
            max = sumi[i];
        }
    }
    th = GRP_POINT_SCORE * max;
    //only score >= th is valid
    for (i = 0; i<numpoint; i++) {
        if (sumi[i] >= th) {
            rightVecOut.push_back(rightVec[i]);
            leftVecOut.push_back(leftVec[i]);
        }
    }
}


void tcl_depth_video::read_yuv_file(uint8_t * y_plane,
                   uint8_t * uv_plane,
                   uint64_t y_size,
                   uint64_t uv_size,
                   const char * file){
    std::fstream fs(file, std::ios::in|std::ios::binary);
	if (!fs.is_open()) {
		VIDEO_DEPTH_LOGI("failed to open file [%s]!", file);
		FAIL_EXIT(-1);
	}
	fs.read((char*)y_plane, y_size);
	uint64_t n_read = fs.gcount();
	if (n_read < y_size) {
		VIDEO_DEPTH_LOGE("read_yuv_file failed: expected %lu bits, got %lu!", y_size, n_read);
		FAIL_EXIT(-1);
	}
	fs.read((char*)uv_plane, uv_size);
	n_read = fs.gcount();
	if (n_read < uv_size) {
		VIDEO_DEPTH_LOGE("read_yuv_file failed: expected %lu bits, got %lu!", uv_size, n_read);
		FAIL_EXIT(-1);
	}
    //fs.read((char*)y_plane, y_size);
    //fs.read((char*)uv_plane, uv_size);
    fs.close();
}


LPTCL_IMAGE tcl_depth_video::create_yuv_image(int w, int h, int pitch, uint32_t mode){
    LPTCL_IMAGE yuv = new TCL_IMAGE();
    yuv->i32Width = w;
    yuv->i32Height = h;
    yuv->pi32Pitch[0] = pitch;
    yuv->pi32Pitch[1] = pitch;
    yuv->u32PixelArrayFormat = mode;
    uint64_t y_size = (uint64_t)(w*h);
    uint64_t uv_size = y_size >> 1;
    yuv->ppu8Plane[0] = new uint8_t[y_size]; // y plane
    yuv->ppu8Plane[1] = new uint8_t[uv_size]; // uv plane
    return yuv;
}


void tcl_depth_video::load_yuv_data(LPTCL_IMAGE yuv, const char * file){
    uint64_t y_size = (uint64_t)(yuv->i32Width*yuv->i32Height);
    uint64_t uv_size = y_size >> 1;
    read_yuv_file(yuv->ppu8Plane[0], yuv->ppu8Plane[1],
                  y_size, uv_size, file);
}


void tcl_depth_video::free_yuv_image(LPTCL_IMAGE yuv){
    delete [] yuv->ppu8Plane[0]; // free y plane
    delete [] yuv->ppu8Plane[1]; // free uv plane
    yuv->ppu8Plane[0] = NULL;
    yuv->ppu8Plane[1] = NULL;
    delete yuv;
}


void tcl_depth_video::copy_yuv_image(LPTCL_IMAGE dst, const LPTCL_IMAGE src){
    uint64_t y_size = (uint64_t)(src->i32Width*src->i32Height);
    uint64_t uv_size = y_size >> 1;
    if (dst->i32Width!=src->i32Width || dst->i32Height!=src->i32Height) {
        dst->i32Width = src->i32Width;
        dst->i32Height = src->i32Height;
        if (dst->ppu8Plane[0])
            delete[] dst->ppu8Plane[0];
        if (dst->ppu8Plane[1])
            delete[] dst->ppu8Plane[1];
        dst->ppu8Plane[0] = new uint8_t[y_size]; // y plane
        dst->ppu8Plane[1] = new uint8_t[uv_size]; // uv plane
    }
    dst->pi32Pitch[0] = src->pi32Pitch[0];
    dst->pi32Pitch[1] = src->pi32Pitch[1];
    dst->u32PixelArrayFormat = src->u32PixelArrayFormat;
    memcpy(dst->ppu8Plane[0], src->ppu8Plane[0], y_size);
    memcpy(dst->ppu8Plane[1], src->ppu8Plane[1], uv_size);
}


void tcl_depth_video::sample_Y_3x3(cv::Mat & dst, const LPTCL_IMAGE src){
    int w = src->pi32Pitch[0];
    int h = src->i32Height;
    if ((w%3) || (h%3)) {
        VIDEO_DEPTH_LOGE("sample_Y_3x3 assertion failed: src size invalid");
        exit(-1);
    }
    int w_ = w/3;
    int h_ = h/3;
    if (dst.size().width != w_ || dst.size().height!=h_) {
        dst.create(cv::Size(w_, h_), CV_8UC1);
    }
    if (!dst.isContinuous()) {
        VIDEO_DEPTH_LOGE("sample_on_yuv assertion failed: dst memory discontinuous");
        exit(-1);
    }
    uchar * pDst = dst.data;
    uchar * pSrc = src->ppu8Plane[0];
    int w_f = src->i32Width;
    for(int i=0; i<h_; ++i){
        for(int j=0; j<w_; ++j){
            *pDst = pSrc[(3*i + 1) * w_f + (3*j + 1)];
            pDst++;
        }
    }
}


void tcl_depth_video::sample_Y_4x4(cv::Mat & dst, const LPTCL_IMAGE src) {
	int w = src->pi32Pitch[0];
	int h = src->i32Height;
	if ((w % 4) || (h % 4)) {
		VIDEO_DEPTH_LOGE("sample_Y_3x3 assertion failed: src size invalid");
		exit(-1);
	}
	int w_ = w / 4;
	int h_ = h / 4;
	if (dst.size().width != w_ || dst.size().height != h_) {
		dst.create(cv::Size(w_, h_), CV_8UC1);
	}
	if (!dst.isContinuous()) {
		VIDEO_DEPTH_LOGE("sample_on_yuv assertion failed: [dst] memory discontinuous");
		exit(-1);
	}
	uchar * pDst = dst.data;
	uchar * pSrc = src->ppu8Plane[0];
	int w_f = src->i32Width;
	for (int i = 0; i<h_; ++i) {
		for (int j = 0; j<w_; ++j) {
			*pDst = pSrc[(4 * i + 1) * w_f + (4 * j + 1)];
			pDst++;
		}
	}
}


void tcl_depth_video::sample_Y_2x2(cv::Mat & dst, const LPTCL_IMAGE src){
    int w = src->pi32Pitch[0];
    int h = src->i32Height;
    if ((w%2) || (h%2)) {
        VIDEO_DEPTH_LOGE("sample_Y_2x2 assertion failed: src size invalid");
		FAIL_EXIT(-1);
    }
    int w_ = w/2;
    int h_ = h/2;
    if (dst.size().width != w_ || dst.size().height!=h_ || dst.channels()!=1) {
        dst.create(cv::Size(w_, h_), CV_8UC1); // single channel
    }
    if (!dst.isContinuous()) {
        VIDEO_DEPTH_LOGE("sample_on_yuv assertion failed: [dst] memory discontinuous");
        exit(-1);
    }
    uchar * pDst = dst.data;
    uchar * pSrc = src->ppu8Plane[0];
    int w_f = src->i32Width;
	
    for(int i=0; i<h_; ++i){
        for(int j=0; j<w_; ++j){
            *pDst = pSrc[(2*i + 1) * w_f + (2*j + 1)];
            pDst++;
        }
    }
}


void tcl_depth_video::sample_NV21_2x2(
	cv::Mat & dst_Y, 
	cv::Mat & dst_U, 
	cv::Mat & dst_V, 
	const LPTCL_IMAGE src
	) {
	if (src->u32PixelArrayFormat != TCL_PAF_NV21) {
		VIDEO_DEPTH_LOGE("YUV Format invalid: expected NV21!");
		FAIL_EXIT(-1);
	}
	int pitchY = src->pi32Pitch[0];
	int pitchUV = src->pi32Pitch[1];
	int w = src->i32Width;
	int h = src->i32Height;
	if ((w % 2) || (h % 2)) {
		VIDEO_DEPTH_LOGE("sample_YUV_2x2 assertion failed: src size invalid");
		FAIL_EXIT(-1);
	}
	int w_ = w / 2;
	int h_ = h / 2;
	if (dst_Y.size().width != w_ || dst_Y.size().height != h_ || dst_Y.channels() != 1) {
		dst_Y.create(cv::Size(w_, h_), CV_8UC1); // single channel
	}
	if (dst_U.size().width != w_ || dst_U.size().height != h_ || dst_U.channels() != 1) {
		dst_U.create(cv::Size(w_, h_), CV_8UC1); // single channel
	}
	if (dst_V.size().width != w_ || dst_V.size().height != h_ || dst_V.channels() != 1) {
		dst_V.create(cv::Size(w_, h_), CV_8UC1); // single channel
	}
	if (!dst_Y.isContinuous() || !dst_U.isContinuous() || !dst_V.isContinuous()) {
		VIDEO_DEPTH_LOGE("sample_on_yuv assertion failed: [dst] memory discontinuous");
		FAIL_EXIT(-1);
	}
	uchar * pDstY = dst_Y.data;
	uchar * pDstU = dst_U.data;
	uchar * pDstV = dst_V.data;
	uchar * pSrcY = src->ppu8Plane[0];
	uchar * pSrcUV = src->ppu8Plane[1];

	// arrangement: plane#1: YYYY...(full height) plane#2: VUVU...(half height)
	for (int i = 0; i<h_; ++i) {
		for (int j = 0; j<w_; ++j) {
			*pDstY = pSrcY[(2 * i + 1) * pitchY + (2 * j + 1)];
			*pDstV = pSrcUV[i * pitchUV + 2 * j];
			*pDstU = pSrcUV[i * pitchUV + 2 * j + 1];
			pDstY++;
			pDstU++;
			pDstV++;
		}
	}
}


void tcl_depth_video::sample_NV12_2x2(
	cv::Mat & dst_Y,
	cv::Mat & dst_U,
	cv::Mat & dst_V,
	const LPTCL_IMAGE src
	) {
	if (src->u32PixelArrayFormat != TCL_PAF_NV12) {
		VIDEO_DEPTH_LOGE("YUV Format invalid: expected NV21!");
		FAIL_EXIT(-1);
	}
	int pitchY = src->pi32Pitch[0];
	int pitchUV = src->pi32Pitch[1];
	int w = src->i32Width;
	int h = src->i32Height;
	if ((w % 2) || (h % 2)) {
		VIDEO_DEPTH_LOGE("sample_YUV_2x2 assertion failed: src size invalid");
		FAIL_EXIT(-1);
	}
	int w_ = w / 2;
	int h_ = h / 2;
	if (dst_Y.size().width != w_ || dst_Y.size().height != h_ || dst_Y.channels() != 1) {
		dst_Y.create(cv::Size(w_, h_), CV_8UC1); // single channel
	}
	if (dst_U.size().width != w_ || dst_U.size().height != h_ || dst_U.channels() != 1) {
		dst_U.create(cv::Size(w_, h_), CV_8UC1); // single channel
	}
	if (dst_V.size().width != w_ || dst_V.size().height != h_ || dst_V.channels() != 1) {
		dst_V.create(cv::Size(w_, h_), CV_8UC1); // single channel
	}
	if (!dst_Y.isContinuous() || !dst_U.isContinuous() || !dst_V.isContinuous()) {
		VIDEO_DEPTH_LOGE("sample_on_yuv assertion failed: [dst] memory discontinuous");
		FAIL_EXIT(-1);
	}
	uchar * pDstY = dst_Y.data;
	uchar * pDstU = dst_U.data;
	uchar * pDstV = dst_V.data;
	uchar * pSrcY = src->ppu8Plane[0];
	uchar * pSrcUV = src->ppu8Plane[1];

	// arrangement: plane#1: YYYY...(full height) plane#2: UVUV...(half height)
	for (int i = 0; i<h_; ++i) {
		for (int j = 0; j<w_; ++j) {
			*pDstY = pSrcY[(2 * i + 1) * pitchY + (2 * j + 1)];
			*pDstU = pSrcUV[i * pitchUV + 2 * j];
			*pDstV = pSrcUV[i * pitchUV + 2 * j + 1];
			pDstY++;
			pDstU++;
			pDstV++;
		}
	}
}



#ifndef INT2UCHAR
#define INT2UCHAR(x) ((x > 0 ? x : 0) < 255 ? x : 255)
#endif


void tcl_depth_video::YUV2RGB_simple(cv::Mat & rgb, cv::Mat & Y, const cv::Mat & U, const cv::Mat & V) {
	if (!Y.isContinuous() || !U.isContinuous() || !V.isContinuous()) {
		VIDEO_DEPTH_LOGE("sample_on_yuv assertion failed: dst memory discontinuous");
		FAIL_EXIT(-1);
	}

	if (rgb.type() != CV_8UC1 || rgb.size() != Y.size()) {
		rgb.create(Y.size(), CV_8UC3);
	}

	uchar * pY = Y.data;
	uchar * pU = U.data;
	uchar * pV = V.data;
	uchar * pRGB = rgb.data;

	int w = Y.size().width;
	int h = Y.size().height;

	// R = Y + ((360 * (V - 128))>>8) ; 
	// G = Y - (((88 * (U - 128) + 184 * (V - 128))) >> 8);
	// B = Y + ((455 * (U - 128)) >> 8);
	for (int i = 0; i < h; ++i) {
		for (int j = 0; j < w; ++j) {
			int idx = i * w + j;
			int y_, u_, v_;
			y_ = pY[idx];
			u_ = pU[idx];
			v_ = pV[idx];
			idx *= 3;
			// BGR format
			pRGB[idx] = INT2UCHAR(y_ + ((455 * (u_ - 128)) >> 8));
			pRGB[idx + 1] = INT2UCHAR(y_ - (((88 * (u_ - 128) + 184 * (v_ - 128))) >> 8));
			pRGB[idx + 2] = INT2UCHAR(y_ + ((360 * (v_ - 128)) >> 8));
		}
	}
}

