#include "fisheye.hpp"
#include "visionLanding.hpp"
#include "INIReader.h"
#include <math.h>
#include <stdlib.h>
#include <stdio.h>
#ifndef _WIN32
#include "System.h"
#endif
using namespace cv;
using namespace std;
using namespace xyVision;

xyVision::visionLanding::visionLanding(string configName)
{
    this->setParameters(configName);
    frameCounter = 0;
    frameCounterTarget = 0;
    num_cont_det = 0;
    isDetected = true;
    pre_loc = Point3f(0.0f, 0.0f, -1.0f);
    thre_cont_frame = 3;

    //Mat P;
    //xyVision::fisheye::estimateNewCameraMatrixForUndistortRectify(cameraInfo.cameraMatrix, cameraInfo.distortCoeff,
    //    cameraInfo.imageSize, cv::Matx33d::eye(), P, 0.8, cameraInfo.newSize, 1);
    // multiple scales
    this->proInfo.scale_ini = this->proInfo.scale;
    proInfo.scales.push_back(1.0f);
    proInfo.scales.push_back(0.9f);
    proInfo.scales.push_back(0.7f);
    proInfo.scales.push_back(0.6f);

    for (int s = 0; s < (int)proInfo.scales.size(); ++s) {
        Mat map1_t, map2_t;
        Size newSize_t = Size((int)floor(cameraInfo.imageSize.width*proInfo.scale_ini*proInfo.scales[s]),
            (int)floor(cameraInfo.imageSize.height*proInfo.scale_ini*proInfo.scales[s]));
        float f_t = cameraInfo.cameraMatrix(0,0)*proInfo.scale_ini*proInfo.scales[s];
        Matx33f newCameraMatrix_t = Matx33f(f_t, 0.0f,newSize_t.width / 2.0f,
            0.0f, f_t, newSize_t.height / 2.0f,
            0.0f, 0.0f, 1.0f);
        xyVision::fisheye::initUndistortRectifyMap(cameraInfo.cameraMatrix, cameraInfo.distortCoeff,
            cv::Matx33d::eye(), newCameraMatrix_t, newSize_t, CV_16SC2, map1_t, map2_t);
        cameraInfo.map1_scale.push_back(map1_t);
        cameraInfo.map2_scale.push_back(map2_t);
    }
    cameraInfo.map1 = cameraInfo.map1_scale[0].clone();
    cameraInfo.map2 = cameraInfo.map2_scale[0].clone();


    cameraInfo.newCameraMatrix = Matx33f(cameraInfo.cameraMatrix(0,0)*proInfo.scale, 0.0f, cameraInfo.newSize.width/2.0f,
        0.0f, cameraInfo.cameraMatrix(0,0)*proInfo.scale, cameraInfo.newSize.height/2.0f,
        0.0f, 0.0f, 1.0f);
    //cout << cameraInfo.newCameraMatrix << endl;
    //xyVision::fisheye::initUndistortRectifyMap(cameraInfo.cameraMatrix, cameraInfo.distortCoeff,
    //    cv::Matx33d::eye(), cameraInfo.newCameraMatrix, cameraInfo.newSize, CV_16SC2, cameraInfo.map1,
    //    cameraInfo.map2);
    // initialize Kalman fileter
    this->KF = KalmanFilter(6, 3, 0);
    this->KF.transitionMatrix = *(Mat_<float>(6,6) << 1,0,0,1,0,0,   0,1,0,0,1,0,  0,0,1,0,0,1,  0,0,0,1,0,0,  0,0,0,0,1,0,  0,0,0,0,0,1);
    setIdentity(this->KF.measurementMatrix);
    setIdentity(this->KF.processNoiseCov, Scalar::all(1e-4));
    setIdentity(this->KF.measurementNoiseCov, Scalar::all(0.5));
    setIdentity(this->KF.errorCovPost, Scalar::all(1e-2));

    // set states
    this->height_state = HEIGHT_UNKNOWN;
    this->detect_state = DET_UNKONOWN;
}

void xyVision::visionLanding::setParameters(string configName)
{
    INIReader reader(configName);
    string calibConfigName;
    const char* section = "cameraCamConf";
    const char* key = "calibPath";
    calibConfigName = reader.Get(section, key, " ");
    cv::FileStorage fs(calibConfigName, cv::FileStorage::READ);

    Mat temp;
    fs["camera_matrix"] >> temp;
    cameraInfo.cameraMatrix = Matx33f(temp);
    fs["distortion_coefficients"] >> temp;
    temp = temp.t();
    cameraInfo.distortCoeff = Matx14f(temp);

    key = "imageWidth";
    int width = reader.GetInteger(section, key, -1);
    key = "imageHeight";
    int height = reader.GetInteger(section, key, -1);
    cameraInfo.imageSize = Size(width, height);

    section = "Board";
    key = "version";
    this->cameraInfo.boardVersion = reader.Get(section, key, " ");

    key = "histGT_red";
    string histGTPath = reader.Get(section, key, " ");
    if (histGTPath != " ")
    {
        this->binaryMethod = "hist";
        histGT_red.clear();
        fs = cv::FileStorage(histGTPath, cv::FileStorage::READ);
        fs["hist_ab"] >> temp;
        this->histGT_red.push_back(temp.clone());
        fs["hist_uv"] >> temp;
        this->histGT_red.push_back(temp.clone());
        fs["hist_hs"] >> temp;
        this->histGT_red.push_back(temp.clone());

        histGT_blue.clear();
        key = "histGT_blue";
        histGTPath = reader.Get(section, key, " ");
        fs = cv::FileStorage(histGTPath, cv::FileStorage::READ);
        fs["hist_ab"] >> temp;
        this->histGT_blue.push_back(temp.clone());
        fs["hist_uv"] >> temp;
        this->histGT_blue.push_back(temp.clone());
        fs["hist_hs"] >> temp;
        this->histGT_blue.push_back(temp.clone());
    }
    else
    {
        this->binaryMethod = "threshold";
    }

    section = "largeBoard";
    key = "num";
    this->largeBoardInfo.numSquareBoard = reader.GetInteger(section, key, -1);
    this->largeBoardInfo.centers.resize(this->largeBoardInfo.numSquareBoard);
    this->largeBoardInfo.widths.resize(this->largeBoardInfo.numSquareBoard);
    this->largeBoardInfo.heights.resize(this->largeBoardInfo.numSquareBoard);
    this->largeBoardInfo.confidence.resize(this->largeBoardInfo.numSquareBoard);
    char dy_key[20];
    for (int i = 1; i <= this->largeBoardInfo.numSquareBoard; ++i)
    {
        sprintf(dy_key, "x%d", i);
        this->largeBoardInfo.centers[i-1].x = (float)reader.GetReal(section, dy_key, -1.0f);
        sprintf(dy_key, "y%d", i);
        this->largeBoardInfo.centers[i-1].y = (float)reader.GetReal(section, dy_key, -1.0f);
        sprintf(dy_key, "width%d", i);
        this->largeBoardInfo.widths[i-1] = (float)reader.GetReal(section, dy_key, -1.0f);
        sprintf(dy_key, "height%d", i);
        this->largeBoardInfo.heights[i-1] = (float)reader.GetReal(section, dy_key, -1.0f);
    }
    #ifndef _WIN32
    section = "SLAM";
    key = "voc_file";
    string voc_file = reader.Get(section, key, " ");
    if (voc_file != " ")
    {
        section = "SLAM";
        key = "voc_file";
        slamInfo.voc_file = reader.Get(section, key, " ");
        key = "settings_file";
        slamInfo.settings_file = reader.Get(section, key, " ");
        key = "map_file";
        slamInfo.map_file = reader.Get(section, key, " ");
        this->SLAM = new ORB_SLAM2::System(slamInfo.voc_file, slamInfo.settings_file,
            ORB_SLAM2::System::MONOCULAR, false, true, false, slamInfo.map_file);
        slamInfo.ts = 0.;
        key = "scale";
        slamInfo.scale = reader.GetReal(section, key, 0.f);
        this->slamInfo.offset = Mat(4, 1, CV_32F);
        key = "offsetX";
        this->slamInfo.offset.at<float>(0) = reader.GetReal(section, key, 0.f);
        key = "offsetY";
        this->slamInfo.offset.at<float>(1) = reader.GetReal(section, key, 0.f);
        key = "offsetZ";
        this->slamInfo.offset.at<float>(2) = reader.GetReal(section, key, 0.f);
        this->slamInfo.offset.at<float>(3) = 1.0f;
    }
    #endif
    section = "processConf";
    key = "scale";
    this->proInfo.scale = (float)reader.GetReal(section, key, -1.0);
    this->cameraInfo.newSize = Size(int(width*this->proInfo.scale), int(height*this->proInfo.scale));
    key = "max_height";
    this->proInfo.max_height = (float)reader.GetReal(section, key, -1.0f);
    key = "low_height";
    this->proInfo.low_height = (float)reader.GetReal(section, key, -1.0f);
    key = "min_height";
    this->proInfo.min_height = (float)reader.GetReal(section, key, -1.0f);

    cout << "camera matrix " << endl;
    cout << this->cameraInfo.cameraMatrix << endl;
    cout << "image size" << endl;
    cout << this->cameraInfo.imageSize.width << " " << this->cameraInfo.imageSize.height << endl;
    cout << "new image size" << endl;
    cout << this->cameraInfo.newSize.width << " " << this->cameraInfo.newSize.height << endl;
}

void xyVision::visionLanding::setPlaneState(int s)
{
    if (s == 0)
    {
        plane_state = xyVision::LANDING;
    }
    else if (s == 1)
    {
        plane_state = xyVision::TAKE_OFF;
    }
}

void xyVision::visionLanding::adjustImg(const Mat& input, Mat& output)
{
    cv::remap(input, output, cameraInfo.map1, cameraInfo.map2, INTER_LINEAR, BORDER_CONSTANT);
}

void xyVision::visionLanding::compute_hsv_lab_luv()
{
    cvtColor(this->rectifiedImg, this->img_hsv, CV_BGR2HSV);
    cvtColor(this->rectifiedImg, this->img_lab, CV_BGR2Lab);
    cvtColor(this->rectifiedImg, this->img_luv, CV_BGR2Luv);
}

void xyVision::visionLanding::binarizeTarget_LAB(Mat& bi, const Scalar lb, const Scalar ub)
{
    cv::inRange(this->img_lab, lb, ub, bi);
}
void xyVision::visionLanding::binarizeTarget_HSV(Mat& bi, const Scalar lb, const Scalar ub)
{
    cv::inRange(this->img_hsv, lb, ub, bi);
}

void xyVision::visionLanding::binarizeRed(Mat&bi, const vector<Scalar> lb, const vector<Scalar> ub)
{
    // red use lab and luv
    // LAB, LUV, HSV
    Mat bi_0, bi_1;
    cv::inRange(this->img_lab, lb[0], ub[0], bi_0);
    cv::inRange(this->img_luv, lb[1], ub[1], bi_1);

    Mat(bi_0.mul(bi_1)).copyTo(bi);
}

void xyVision::visionLanding::binarizeBlue(Mat&bi, const vector<Scalar> lb, const vector<Scalar> ub)
{
    // blue use all
    // LAB, LUV, HSV
    Mat bi_0, bi_1, bi_2;
    cv::inRange(this->img_lab, lb[0], ub[0], bi_0);
    cv::inRange(this->img_luv, lb[1], ub[1], bi_1);
    cv::inRange(this->img_hsv, lb[2], ub[2], bi_2);

    Mat(Mat(bi_0.mul(bi_1)).mul(bi_2)).copyTo(bi);
}

void xyVision::visionLanding::binarizeGreen(Mat&bi, const vector<Scalar> lb, const vector<Scalar> ub)
{
    // HSV
    Mat bi_0;
    cv::inRange(this->img_hsv, lb[0], ub[0], bi_0);
    Mat bi_1;
    cv::inRange(this->img_lab, Scalar(100, 0, 0), Scalar(255, 255, 255), bi_1);
    Mat(bi_0.mul(bi_1)).copyTo(bi);
}

void xyVision::visionLanding::binarizeYellow(Mat&bi, const vector<Scalar> lb, const vector<Scalar> ub)
{
    // yellow use all
    // LAB, LUV, HSV
    Mat bi_0, bi_1, bi_2;
    cv::inRange(this->img_lab, lb[0], ub[0], bi_0);
    cv::inRange(this->img_luv, lb[1], ub[1], bi_1);
    cv::inRange(this->img_hsv, lb[2], ub[2], bi_2);

    Mat(Mat(bi_0.mul(bi_1)).mul(bi_2)).copyTo(bi);
}

void xyVision::visionLanding::binarizePurple(Mat&bi, const vector<Scalar> lb, const vector<Scalar> ub)
{
    // purple use all
    // LAB, LUV, HSV
    Mat bi_0, bi_1, bi_2;
    cv::inRange(this->img_lab, lb[0], ub[0], bi_0);
    cv::inRange(this->img_luv, lb[1], ub[1], bi_1);
    cv::inRange(this->img_hsv, lb[2], ub[2], bi_2);

    Mat(Mat(bi_0.mul(bi_1)).mul(bi_2)).copyTo(bi);
}

void xyVision::visionLanding::binarizeFromHist(const vector<Mat>& histGT, const vector<Mat>& img, Mat& bi, float thre)
{
    // img is ab, uv, hs
    Mat score = cv::Mat::zeros(cv::Size(img[0].cols, img[0].rows), CV_32F);
    Size histSize = histGT[0].size();
    int h_range = 180, other_range = 255;
    vector<Size> scales(3);
    // hs scale
    scales[0].width = other_range / histSize.width + 1;
    scales[0].height = other_range / histSize.width + 1;
    scales[1].width = other_range / histSize.width + 1;
    scales[1].height = other_range / histSize.width + 1;
    scales[2].width = h_range / histSize.width + 1;
    scales[2].height = other_range / histSize.width + 1;
    for (int cs = 0; cs < (int)histGT.size(); ++cs)
    {
        // img is in a specific color space
        Mat score_cs;
        vector<Mat> imgs_of_cs;
        split(img[cs], imgs_of_cs);
        imgs_of_cs[0].convertTo(imgs_of_cs[0], CV_32FC1);
        imgs_of_cs[1].convertTo(imgs_of_cs[1], CV_32FC1);
        imgs_of_cs[0] = imgs_of_cs[0] / scales[cs].width - 0.5f;
        imgs_of_cs[1] = imgs_of_cs[1] / scales[cs].height - 0.5f;
        imgs_of_cs[0].convertTo(imgs_of_cs[0], CV_8UC1);
        imgs_of_cs[1].convertTo(imgs_of_cs[1], CV_8UC1);
        imgs_of_cs[0].convertTo(imgs_of_cs[0], CV_32FC1);
        imgs_of_cs[1].convertTo(imgs_of_cs[1], CV_32FC1);
        //for (int r = 0; r < img[cs].rows; ++r)
        //{
        //    float* p0 = imgs_of_cs[0].ptr<float>(r);
        //    float* p1 = imgs_of_cs[1].ptr<float>(r);
        //    for (int c = 0; c < img[cs].cols; ++c)
        //    {
        //        p0[c] = (float)floor(p0[c] / scales[cs].width);
        //        p1[c] = (float)floor(p1[c] / scales[cs].height);
        //    }
        //}
        //float* p0 = imgs_of_cs[0].ptr<float>(0);
        //float* p1 = imgs_of_cs[1].ptr<float>(0);
        //for (int r = 0; r < img[cs].rows; ++r)
        //{
        //    for (int c = 0; c< img[cs].cols; ++c)
        //    {
        //        p0[r+c*img[cs].rows] = (float)floor(p0[r+c*img[cs].rows] / scales[cs].width);
        //        p1[r+c*img[cs].rows] = (float)floor(p1[r+c*img[cs].rows] / scales[cs].height);
        //    }
        //}
        cv::remap(histGT[cs], score_cs, imgs_of_cs[1], imgs_of_cs[0], INTER_NEAREST);
        score += score_cs;
        //for(int r = 0; r < score.rows; ++r)
        //{
        //    for(int c = 0;  c< score.cols; ++c)
        //    {
        //        int v1 = img[cs].at<cv::Vec2b>(r,c)[0] / scales[cs].width;
        //        int v2 = img[cs].at<cv::Vec2b>(r,c)[1] / scales[cs].height;
        //        score.at<float>(r, c) += histGT[cs].at<float>(v1, v2);
        //    }
        //}
    }
    threshold(score, bi, thre, 255, THRESH_BINARY);
    bi.convertTo(bi, CV_8UC1);
}

void xyVision::visionLanding::select_channels(const Mat& img_lab_, const Mat& img_luv_, const Mat& img_hsv_,
    Mat& img_ab, Mat& img_uv, Mat& img_hs)
{
    vector<Mat> imgs_in_colorspaces;
    vector<Mat> full_channels;
    vector<Mat> sel_channels;

    cv::split(img_lab_, full_channels);
    sel_channels.push_back(full_channels[1]);
    sel_channels.push_back(full_channels[2]);
    cv::merge(sel_channels, img_ab);
    full_channels.clear();sel_channels.clear();

    cv::split(img_luv_, full_channels);
    sel_channels.push_back(full_channels[1]);
    sel_channels.push_back(full_channels[2]);
    cv::merge(sel_channels, img_uv);
    full_channels.clear();sel_channels.clear();

    cv::split(img_hsv_, full_channels);
    sel_channels.push_back(full_channels[0]);
    sel_channels.push_back(full_channels[1]);
    cv::merge(sel_channels, img_hs);

}
void xyVision::visionLanding::determineScale(const Mat& input, Mat& output)
{
    if ((this->detect_state == CONSIST_DET || this->detect_state == PRE_DET) && this->pre_loc.z < 2000.0f && this->pre_loc.z >= 1500.0f)
    {
        proInfo.scale = proInfo.scale_ini * proInfo.scales[1];
        cameraInfo.newSize = Size((int)floor(cameraInfo.imageSize.width * proInfo.scale),
            (int)floor(cameraInfo.imageSize.height * proInfo.scale));
        cameraInfo.newCameraMatrix = Matx33f(cameraInfo.cameraMatrix(0,0)*proInfo.scale, 0.0f, cameraInfo.newSize.width/2.0f,
            0.0f, cameraInfo.cameraMatrix(0,0)*proInfo.scale, cameraInfo.newSize.height/2.0f,
            0.0f, 0.0f, 1.0f);
        cv::remap(input, output, cameraInfo.map1_scale[1], cameraInfo.map2_scale[1], INTER_LINEAR, BORDER_CONSTANT);
        //cameraInfo.map1 = cameraInfo.map1_scale[1].clone();
        //cameraInfo.map2 = cameraInfo.map2_scale[1].clone();
    }
    if ((this->detect_state == CONSIST_DET || this->detect_state == PRE_DET) && this->pre_loc.z < 1500.0f && this->pre_loc.z >= 1000.0f)
    {
        proInfo.scale = proInfo.scale_ini * proInfo.scales[2];
        cameraInfo.newSize = Size((int)floor(cameraInfo.imageSize.width * proInfo.scale),
            (int)floor(cameraInfo.imageSize.height * proInfo.scale));
        cameraInfo.newCameraMatrix = Matx33f(cameraInfo.cameraMatrix(0,0)*proInfo.scale, 0.0f, cameraInfo.newSize.width/2.0f,
            0.0f, cameraInfo.cameraMatrix(0,0)*proInfo.scale, cameraInfo.newSize.height/2.0f,
            0.0f, 0.0f, 1.0f);
        //cameraInfo.map1 = cameraInfo.map1_scale[2].clone();
        //cameraInfo.map2 = cameraInfo.map2_scale[2].clone();
        cv::remap(input, output, cameraInfo.map1_scale[2], cameraInfo.map2_scale[2], INTER_LINEAR, BORDER_CONSTANT);
    }
    if ((this->detect_state == CONSIST_DET || this->detect_state == PRE_DET) && this->pre_loc.z < 1000.0f)
    {
        proInfo.scale = proInfo.scale_ini * proInfo.scales[3];
        cameraInfo.newSize = Size((int)floor(cameraInfo.imageSize.width * proInfo.scale),
            (int)floor(cameraInfo.imageSize.height * proInfo.scale));
        cameraInfo.newCameraMatrix = Matx33f(cameraInfo.cameraMatrix(0,0)*proInfo.scale, 0.0f, cameraInfo.newSize.width/2.0f,
            0.0f, cameraInfo.cameraMatrix(0,0)*proInfo.scale, cameraInfo.newSize.height/2.0f,
            0.0f, 0.0f, 1.0f);
        //cameraInfo.map1 = cameraInfo.map1_scale[3].clone();
        //cameraInfo.map2 = cameraInfo.map2_scale[3].clone();
        cv::remap(input, output, cameraInfo.map1_scale[3], cameraInfo.map2_scale[3], INTER_LINEAR, BORDER_CONSTANT);
    }
    if (this->detect_state == PRE_LOST || this->detect_state == DET_UNKONOWN || this->pre_loc.z >= 2000.0f)
    {
        proInfo.scale = proInfo.scale_ini * proInfo.scales[0];
        cameraInfo.newSize = Size((int)floor(cameraInfo.imageSize.width * proInfo.scale),
            (int)floor(cameraInfo.imageSize.height * proInfo.scale));
        cameraInfo.newCameraMatrix = Matx33f(cameraInfo.cameraMatrix(0,0)*proInfo.scale, 0.0f, cameraInfo.newSize.width/2.0f,
            0.0f, cameraInfo.cameraMatrix(0,0)*proInfo.scale, cameraInfo.newSize.height/2.0f,
            0.0f, 0.0f, 1.0f);
        //cameraInfo.map1 = cameraInfo.map1_scale[0].clone();
        //cameraInfo.map2 = cameraInfo.map2_scale[0].clone();
        cv::remap(input, output, cameraInfo.map1_scale[0], cameraInfo.map2_scale[0], INTER_LINEAR, BORDER_CONSTANT);
    }
}

Point3f xyVision::visionLanding::getCurrentLoc(const Mat& image, const float& rawHeight)
{
    try{

    this->raw_height = rawHeight;
    this->oriImg = image.clone();
    // to shutdown multi-scale, comment determineScale and use adjustImg
    if (this->cameraInfo.boardVersion == "1.3")
    {
        this->determineScale(image, this->rectifiedImg);
    }
    else if (this->cameraInfo.boardVersion == "slam")
    {
        this->adjustImg(image, this->rectifiedImg);
    }
    //this->adjustImg(image, this->rectifiedImg);
    vector<Scalar> red_lb, red_ub, blue_lb, blue_ub,
        green_lb, green_ub, purple_lb, purple_ub,
        yellow_lb, yellow_ub, black_lb, black_ub;

    //
    Scalar red_lb_lab = Scalar(20, 140, 100);
    Scalar red_ub_lab = Scalar(255, 215, 200);
    Scalar red_lb_luv = Scalar(20, 110, 100);
    Scalar red_ub_luv = Scalar(255, 255, 190);
    Scalar red_lb_hsv = Scalar(150, 50, 140);
    Scalar red_ub_hsv = Scalar(180, 255, 255);

    Scalar blue_lb_lab = Scalar(50, 60, 70);
    Scalar blue_ub_lab = Scalar(255, 150, 140);
    Scalar blue_lb_luv = Scalar(50, 45, 40);
    Scalar blue_ub_luv = Scalar(255, 100, 200);
    Scalar blue_lb_hsv = Scalar(75, 50, 120);
    Scalar blue_ub_hsv = Scalar(115, 255, 255);


    Scalar yellow_lb_lab = Scalar(250, 115, 115);
    Scalar yellow_ub_lab = Scalar(250, 135, 135);
    Scalar yellow_lb_luv = Scalar(20, 60, 120);
    Scalar yellow_ub_luv = Scalar(250, 140, 255);
    Scalar yellow_lb_hsv = Scalar(20, 33, 80);
    Scalar yellow_ub_hsv = Scalar(40, 255, 255);

    Scalar purple_lb_lab = Scalar(70, 120, 70);
    Scalar purple_ub_lab = Scalar(255, 180, 130);
    Scalar purple_lb_luv = Scalar(70, 75, 50);
    Scalar purple_ub_luv = Scalar(255, 120, 140);
    Scalar purple_lb_hsv = Scalar(105, 10, 100);
    Scalar purple_ub_hsv = Scalar(145, 150, 255);

    Scalar black_lb_lab = Scalar(0, 120, 100);
    Scalar black_ub_lab = Scalar(160, 140, 130);
    Scalar black_lb_luv = Scalar(0, 80, 100);
    Scalar black_ub_luv = Scalar(160, 100, 150);
    Scalar black_lb_hsv = Scalar(100, 50, 0);
    Scalar blcak_ub_hsv = Scalar(120,255, 230);

    red_lb.push_back(red_lb_lab);
    red_lb.push_back(red_lb_luv);
    red_lb.push_back(red_lb_hsv);
    red_ub.push_back(red_ub_lab);
    red_ub.push_back(red_ub_luv);
    red_ub.push_back(red_ub_hsv);

    blue_lb.push_back(blue_lb_lab);
    blue_lb.push_back(blue_lb_luv);
    blue_lb.push_back(blue_lb_hsv);
    blue_ub.push_back(blue_ub_lab);
    blue_ub.push_back(blue_ub_luv);
    blue_ub.push_back(blue_ub_hsv);

    yellow_lb.push_back(yellow_lb_lab);
    yellow_lb.push_back(yellow_lb_luv);
    yellow_lb.push_back(yellow_lb_hsv);
    yellow_ub.push_back(yellow_ub_lab);
    yellow_ub.push_back(yellow_ub_luv);
    yellow_ub.push_back(yellow_ub_hsv);

    purple_lb.push_back(purple_lb_lab);
    purple_lb.push_back(purple_lb_luv);
    purple_lb.push_back(purple_lb_hsv);
    purple_ub.push_back(purple_ub_lab);
    purple_ub.push_back(purple_ub_luv);
    purple_ub.push_back(purple_ub_hsv);

    black_lb.push_back(black_lb_lab);
    black_lb.push_back(black_lb_luv);
    black_lb.push_back(black_lb_hsv);
    black_ub.push_back(black_ub_lab);
    black_ub.push_back(black_ub_luv);
    black_ub.push_back(blcak_ub_hsv);

    // green
    Scalar green_lb_hsv = Scalar(70, 120, 200);
    Scalar green_ub_hsv = Scalar(90, 170, 250);

    green_lb.push_back(green_lb_hsv);
    green_ub.push_back(green_ub_hsv);

    compute_hsv_lab_luv();
    Mat img_ab, img_uv, img_hs;
    vector<Mat> imgs_in_colorspaces;
    select_channels(this->img_lab, this->img_luv, this->img_hsv, img_ab, img_uv, img_hs);

    imgs_in_colorspaces.push_back(img_ab);
    imgs_in_colorspaces.push_back(img_uv);
    imgs_in_colorspaces.push_back(img_hs);
    if (this->binaryMethod == "threshold")
    {
        binarizeRed(this->bi_img_red, red_lb, red_ub);
        binarizeBlue(this->bi_img_blue, blue_lb, blue_ub);
        binarizeBlue(this->bi_img_black, black_lb, black_ub);
        if (pre_loc.z > 0 && pre_loc.z < 1500)
        {
            vector<Point> locations;
            cv::findNonZero(this->bi_img_black, locations);
            for (int i = 0; i < (int)locations.size(); ++i)
            {
                this->bi_img_blue.at<unsigned char>(locations[i].y, locations[i].x) = 0;
            }
        }

    }
    else if(this->binaryMethod == "hist")
    {
        binarizeFromHist(this->histGT_red, imgs_in_colorspaces, this->bi_img_red, 2e-2f);
        binarizeFromHist(this->histGT_blue, imgs_in_colorspaces, this->bi_img_blue, 2e-2f);
    }

#ifdef MYDEBUG
    namedWindow("bi_red");
    namedWindow("bi_blue");
    imshow("bi_red", this->bi_img_red);
    imshow("bi_blue", this->bi_img_blue);
    cv::waitKey(0);
#endif
    if (this->cameraInfo.boardVersion == "1.1")
    {
        binarizeGreen(this->bi_img_green, green_lb, green_ub);
    }
    if (this->cameraInfo.boardVersion == "1.2")
    {
        binarizeYellow(this->bi_img_yellow, yellow_lb, yellow_ub);
        binarizePurple(this->bi_img_purple, purple_lb, purple_ub);
    }

    Point3f loc_;
    // one by one detect
    if (this->cameraInfo.boardVersion == "1.2")
    {
        loc_ = getloc_v_1_2_2();
    }
    // cross
    if (this->cameraInfo.boardVersion == "1.3")
    {
        loc_ = getloc_v_1_3();
    }
    // slam
    #ifndef _WIN32
    if (this->cameraInfo.boardVersion == "slam")
    {
        loc_ = getloc_slam();
    }
    #endif
    return loc_;
    }
    catch( cv::Exception& e ) {
        const char* err_msg = e.what();
        std::cout << "exception caught: " << err_msg << std::endl;
        this->isDetected = false;
        return Point3f(0.0f, 0.0f, 0.0f);
        }

}

bool xyVision::visionLanding::getLocsFromBi(Mat& bi, const float& min_width,
    const float& max_width,vector<Point3f>& locs,  vector<vector<Point> >& tarContours,
    vector<RotatedRect>& boxes, bool filter_square, bool filter_area)
{
    tarContours.clear();
    boxes.clear();
    vector<Point>  tarContour;
    //vector<vector<Point> > tarContours;
    bool has_candidate = contourDetect(bi, tarContour, tarContours,
        min_width, max_width, filter_square, filter_area);
    if (!has_candidate)
    {
        return has_candidate;
    }
    locs.clear();
    for (int i = 0; i < (int)tarContours.size(); ++i)
    {
        cv::RotatedRect box = minAreaRect(tarContours[i]);
        boxes.push_back(box);
        Point3f loc_r = locate_box(box, this->largeBoardInfo.widths[0]);
        locs.push_back(loc_r);
    }
    return has_candidate;
}

Point3f xyVision::visionLanding::getloc_v_1_2_2()
{
    float raw_height_ub = this->raw_height + 2000;
    float raw_height_lb = std::max(this->proInfo.min_height, this->raw_height - 2000);
    float min_width = this->cameraInfo.newCameraMatrix(0,0) / raw_height_ub
        * this->largeBoardInfo.widths[0];
    float max_width = this->cameraInfo.newCameraMatrix(0,0) / raw_height_lb
        * this->largeBoardInfo.widths[0];
    flag_num_detect = 0;
    // detect red
    vector<vector<Point> > tarContours_red;
    vector<Point3f> locs_red;
    vector<RotatedRect> boxes_red;
    bool has_red_condidate = this->getLocsFromBi(this->bi_img_red, min_width,
        max_width, locs_red, tarContours_red, boxes_red, true, true);
    //bool has_red_candidate = contourDetect(this->bi_img_red, tarContour, tarContours,
    //    min_width, max_width);
    if (!has_red_condidate)
    {
        this->isDetected = false;
        this->frameCounter ++;
        flag_num_detect = 0;
        return Point3f(0.0f, 0.0f, 0.0f);
    }
    flag_num_detect = 1;

    // detect blue
    vector<Point3f> locs_blue;
    vector<vector<Point> > tarContours_blue;
    vector<RotatedRect> boxes_blue;
    bool has_blue_candidate = this->getLocsFromBi(this->bi_img_blue, min_width,
        max_width, locs_blue, tarContours_blue, boxes_blue, true, false);
    //bool has_blue_candidate = contourDetect(this->bi_img_blue, tarContour, tarContours,
    //    min_width, max_width);
    if (!has_blue_candidate)
    {
        this->isDetected = false;
        this->frameCounter ++;
        this->num_cont_det = 0;
        return Point3f(0.0f, 0.0f, 0.0f);
    }
    flag_num_detect = 2;

    // compute pair wise distance
    // compute boxes pairs that are close
    //int sel_idx_red, sel_idx_blue;
    //float min_dis = 1000000.0f;
    vector<pair<int, int> > sel_pairs;
    for (int i = 0; i < (int)locs_red.size(); ++i)
    {
        for (int j = 0; j < (int)locs_blue.size(); ++j)
        {
            //float dis = compute_dis(locs_red[i], locs_blue[j]);
            //if (dis < min_dis)
            //{
            //    sel_idx_red = i;sel_idx_blue = j;
            //    min_dis = dis;
            //}
            // box distance, box width close, fillin ratio
            float box_dis = (float)cv::norm(boxes_red[i].center - boxes_blue[j].center);
            if (box_dis > 3.0f*boxes_red[i].size.height)
            {
                continue;
            }
            else
            {
                float red_width = (boxes_red[i].size.width + boxes_red[i].size.height) / 2.0f;
                float blue_width = (boxes_blue[j].size.width + boxes_blue[j].size.height) / 2.0f;
                float ratio = red_width / blue_width;
                if (ratio < 0.5 || ratio > 2)
                {
                    continue;
                }
                else
                {
                    sel_pairs.push_back(make_pair(i,j));
                }
            }
        }
    }

    //if (min_dis > 0.5f*locs_red[sel_idx_red].z)
    if ((int)sel_pairs.size() == 0)
    {
        this->isDetected = false;
        this->frameCounter ++;
        this->num_cont_det = 0;
        return Point3f(0.0f, 0.0f, 0.0f);
    }
    // find largest fillin ratio
    float max_fillin_ratio = -1;
    cv::RotatedRect box_red_blue_sel;
    vector<Point> red_blue_sel_contour_con;
    for (int i = 0; i < (int)sel_pairs.size(); ++i)
    {
        // concatenate red and blue contours
        vector<Point> red_blue_contour_con = tarContours_red[sel_pairs[i].first];
        red_blue_contour_con.insert(red_blue_contour_con.end(),
            tarContours_blue[sel_pairs[i].second].begin(), tarContours_blue[sel_pairs[i].second].end());
        cv::RotatedRect box_red_blue = minAreaRect(red_blue_contour_con);
        float fillinratio = (float)computeFillinRatio2(this->bi_img_red.size(), box_red_blue,
            this->bi_img_red+this->bi_img_blue);
        if (fillinratio > max_fillin_ratio)
        {
            max_fillin_ratio = fillinratio;
            box_red_blue_sel = box_red_blue;
            red_blue_sel_contour_con = red_blue_contour_con;
        }
    }
    if (max_fillin_ratio < 0.6)
    {
        this->isDetected = false;
        this->frameCounter ++;
        this->num_cont_det = 0;
        return Point3f(0.0f, 0.0f, 0.0f);
    }
    Point3f loc_red_blue = locate_box_2x1(box_red_blue_sel, this->largeBoardInfo.widths[0]);

    // detect purple
    vector<Point3f> locs_purple;
    vector<vector<Point> > tarContours_purple;
    vector<RotatedRect> boxes_purple;
    bool has_purple_candidate = this->getLocsFromBi(this->bi_img_purple, min_width,
        max_width, locs_purple, tarContours_purple, boxes_purple, true, true);
    if (!has_purple_candidate)
    {
        // successively detect 3 frames, then check if it is near to last detection
        bool far_pre = (abs(loc_red_blue.x - this->pre_loc.x) + abs(loc_red_blue.y - this->pre_loc.y) > 500) ? true: false;
        if(this->num_cont_det >= this->thre_cont_frame && far_pre)
        {
            this->isDetected = false;
            this->num_cont_det = 0;
            this->frameCounter ++;
            return Point3f(0.0f, 0.0f, 0.0f);
        }
        else
        {
            this->isDetected = true;
            this->frameCounterTarget ++;
            this->num_cont_det ++;
            this->frameCounter ++;
            this->pre_loc = loc_red_blue;
            return loc_red_blue;
        }

    }


    int sel_idx_purple = -1;;
    float best_ratio = 10000;
    for (int i = 0; i < (int)locs_purple.size(); ++i)
    {
        float box_dis = (float)cv::norm(boxes_purple[i].center - box_red_blue_sel.center);
        if (box_dis > 2.0f*boxes_purple[i].size.height)
        {
            continue;
        }
        else
        {
            float red_blue_width = (box_red_blue_sel.size.height + box_red_blue_sel.size.width) / 3.0f;
            float purple_width = boxes_purple[i].size.width;
            float ratio = red_blue_width / purple_width;
            if (abs(ratio - 1.0f) < best_ratio)
            {
                sel_idx_purple = i;
                best_ratio = abs(ratio - 1.0f);
            }
        }
    }
    //if (min_dis > 0.5f * locs_red[sel_idx_red].z)
    if(sel_idx_purple == -1)
    {
        // successively detect 3 frames, then check if it is near to last detection
        bool far_pre = (abs(loc_red_blue.x - this->pre_loc.x) + abs(loc_red_blue.y - this->pre_loc.y) > 500) ? true: false;
        if(this->num_cont_det >= this->thre_cont_frame && far_pre)
        {
            this->isDetected = false;
            this->num_cont_det = 0;
            this->frameCounter ++;
            return Point3f(0.0f, 0.0f, 0.0f);
        }
        else
        {
            flag_num_detect = 2;
            this->isDetected = true;
            this->frameCounter ++;
            this->num_cont_det ++;
            this->pre_loc = loc_red_blue;
            return loc_red_blue;
        }

    }
    flag_num_detect = 3;
    // choose largest fillin ratio
    //concatenate red blue and purple
    vector<Point>  red_blue_purple_contour_con = red_blue_sel_contour_con;
    red_blue_purple_contour_con.insert(red_blue_purple_contour_con.end(),
        tarContours_purple[sel_idx_purple].begin(), tarContours_purple[sel_idx_purple].end());
    cv::RotatedRect box_all = minAreaRect(red_blue_purple_contour_con);
    Point3f loc_all = locate_box_2x2(box_all, this->largeBoardInfo.widths[0]);

    //Point3f loc_red_purple = Point3f(locs_red[sel_idx_red].x/2.0f + locs_purple[sel_idx_purple].x/2.0f,
    //    locs_red[sel_idx_red].y/2.0f + locs_purple[sel_idx_purple].y/2.0f,
    //    locs_red[sel_idx_red].z/2.0f + locs_purple[sel_idx_purple].z/2.0f);


    // if successively detect 3 frames, then check if it is near to last detection
    bool far_pre = (abs(loc_red_blue.x - this->pre_loc.x) + abs(loc_red_blue.y - this->pre_loc.y) > 500) ? true: false;
    if(this->num_cont_det >= thre_cont_frame && far_pre)
    {
        this->isDetected = false;
        this->num_cont_det = 0;
        this->frameCounter ++;
        return Point3f(0.0f, 0.0f, 0.0f);
    }

    this->isDetected = true;
    this->frameCounter ++;
    this->num_cont_det ++;
    this->pre_loc = loc_all;
    return loc_all;
}

//bool xyVision::visionLanding::locate_aruco(Point3f& loc)
//{
//    vector<int> markerIds;
//    vector< vector<Point2f> > markerCorners;
//    aruco::detectMarkers(this->rectifiedImg, this->arucoBoardInfo.board.dictionary, markerCorners, markerIds);
//    if (markerIds.size() > 0)
//    {
//        cv::Mat rvec, tvec;
//        bool _isdetected = (aruco::estimatePoseBoard(markerCorners, markerIds,
//            this->arucoBoardInfo.board, this->cameraInfo.newCameraMatrix,
//            cv::Mat::zeros(1, 5, CV_32F), rvec, tvec) > 0);
//        if (_isdetected) {
//            loc = Point3f(tvec);
//            return true;
//        }
//        else {
//            return false;
//        }
//    }
//    else
//    {
//        return false;
//    }
//}

Point3f xyVision::visionLanding::getloc_v_1_3()
{
    Mat kernel = cv::Mat::ones(3, 3, CV_8UC1);

    dilate(this->bi_img_red, this->bi_img_red, kernel);
    erode(this->bi_img_red, this->bi_img_red, kernel);

    dilate(this->bi_img_blue, this->bi_img_blue, kernel);
    erode(this->bi_img_blue, this->bi_img_blue, kernel);

    this->bi_img_plus = this->bi_img_red + this->bi_img_blue;
    dilate(this->bi_img_plus, this->bi_img_plus, kernel);
    erode(this->bi_img_plus, this->bi_img_plus, kernel);
    erode(this->bi_img_plus, this->bi_img_plus, kernel);
    dilate(this->bi_img_plus, this->bi_img_plus, kernel);

#ifdef MYDEBUG
    cv::namedWindow("bi_plus");
    cv::imshow("bi_plus",this->bi_img_plus);
#endif
    float raw_height_ub = this->raw_height + 2000;
    float raw_height_lb = std::max(this->proInfo.min_height, this->raw_height - 2000);
    float min_width = this->cameraInfo.newCameraMatrix(0,0) / raw_height_ub
        * this->largeBoardInfo.widths[0] * 2;
    float max_width = this->cameraInfo.newCameraMatrix(0,0) / raw_height_lb
        * this->largeBoardInfo.widths[0] * 2;
    vector<Point>  tarContour;
    vector<vector<Point> > tarContours;

    bool has_candidates = contourDetect(this->bi_img_plus, tarContour, tarContours, min_width,
        max_width, true, true);
    if (has_candidates)
    {
        float max_score = 0;
        int max_idx = 0;
        for (int i = 0; i < (int)tarContours.size(); ++i)
        {
            float score = cross_check(tarContours[i]);
            if (score > max_score)
            {
                max_score = score;
                max_idx = i;
            }
        }
        RotatedRect box = minAreaRect(tarContours[max_idx]);
        Point3f loc_c = locate_box(box, this->largeBoardInfo.widths[0]*2);
        bool far_pre = (abs(loc_c.x - this->pre_loc.x) + abs(loc_c.y - this->pre_loc.y) > 500) ? true: false;
        if (max_score < 0.52f || (this->num_cont_det >= this->thre_cont_frame && far_pre))
        {
            this->isDetected = false;
            this->frameCounter ++;
            this->num_cont_det = 0;
            this->detect_state = PRE_LOST;
            return Point3f(0.0f, 0.0f, 0.0f);
        }

        else
        {
            this->isDetected = true;
            this->frameCounter ++;
            this->frameCounterTarget ++;
            this->num_cont_det ++;
            this->pre_loc = loc_c;
            this->detect_state = PRE_DET;
            if (this->num_cont_det >= this->thre_cont_frame) {
                this->detect_state = CONSIST_DET;
            }
            return loc_c;
        }
    }
    else
    {
        this->isDetected = false;
        this->frameCounter ++;
        this->num_cont_det = 0;
        this->detect_state = PRE_LOST;
        return Point3f(0.0f, 0.0f, 0.0f);
    }

}
/*bool xyVision::visionLanding::locate_square_board(Point3f& loc, bool islarge)
{
    vector<vector<vector<Point> > > tarContours;    //squareboard                        square        contour            contour        Point
    vector<vector<float> > confidences;
    int num_square_board;
    vector<Mat> bi_maps;
    squareBoard _board;
    if (islarge)
    {
        num_square_board = this->largeBoardInfo.numSquareBoard;
        bi_maps = bi_img_large;
        _board = this->largeBoardInfo;
    }
    else
    {
        num_square_board = this->smallBoardInfo.numSquareBoard;
        bi_maps = bi_img_small;
        _board = this->smallBoardInfo;
    }
    tarContours.resize(num_square_board);
    confidences.resize(num_square_board);

    vector<Point> tmp;
    for (int i = 0; i < (int)tarContours.size(); ++i)
    {
        float min_width = this->largeBoardInfo.widths[i] / this->proInfo.max_height * this->cameraInfo.newCameraMatrix(0,0);
        this->contourDetect(this->bi_img_large[i], tmp, tarContours[i], min_width, 500);
        confidences[i].resize(tarContours[i].size());
    }
    //                                     2
    if (_board.numSquareBoard >= 2)
    {
        for (int i = 0; i < _board.numSquareBoard; ++i)
        {
            //                     confidence
            float max_c = 0;
            int max_idx = 0;
            for (int c = 0; c < (int)tarContours[i].size(); ++c)
            {
                confidences[i][c] = computeConfidence(tarContours[i][c], i, bi_maps, _board);
                if (confidences[i][c] > max_c)
                {
                    max_c = confidences[i][c];
                    max_idx = c;
                }
            }
            _board.detectedBoxes[i] = cv::minAreaRect(Mat(tarContours[i][max_idx]));
            _board.confidence[i] = max_c;
        }
    }
    //                         2
    else
    {
        float max_area = 0;
        cv::RotatedRect max_box;
        for (int c = 0;  c < (int)tarContours[0].size(); ++c)
        {
            cv::RotatedRect _box = cv::minAreaRect(Mat(tarContours[0][c]));
            if (_box.size.width*_box.size.height > max_area)
            {
                max_area = _box.size.width * _box.size.height;
                max_box = _box;
            }
        }
        _board.confidence[0] = -1;
        _board.detectedBoxes[0] = max_box;
    }
    vector<Point3f> locations(_board.numSquareBoard);

    for (int i = 0; i < _board.numSquareBoard; ++i)
    {
        Point3f center_3d = Point3f(_board.detectedBoxes[i].center.x, _board.detectedBoxes[i].center.y, 1.0f);
        Point3f pointPlane = this->cameraInfo.newCameraMatrix.inv()*center_3d;
        float ave_len = (_board.detectedBoxes[i].size.width + _board.detectedBoxes[i].size.height)/2;
        locations[i] = pointPlane*this->cameraInfo.newCameraMatrix(0,0)*(_board.widths[i] / ave_len);
    }
    loc = compute_ave_loc(locations);
    return true;
}*/

void xyVision::visionLanding::clearStates()
{
    this->frameCounter = 0;
    this->frameCounterTarget = 0;
    this->isDetected = false;
}
Point3f xyVision::visionLanding::locate_box_2x1(const RotatedRect box, const float board_width)
{
    Point3f center_3d = Point3f(box.center.x, box.center.y, 1.0f);
    Point3f pointPlane = this->cameraInfo.newCameraMatrix.inv()*center_3d;
    float ave_len = (box.size.width + box.size.height) / 3;
    Point3f loc;
    loc = pointPlane*this->cameraInfo.newCameraMatrix(0,0)*(board_width / ave_len);
    return loc;
}
Point3f xyVision::visionLanding::locate_box_2x2(const RotatedRect box, const float board_width)
{
    Point3f center_3d = Point3f(box.center.x, box.center.y, 1.0f);
    Point3f pointPlane = this->cameraInfo.newCameraMatrix.inv()*center_3d;
    float ave_len = (box.size.width + box.size.height) / 4;
    Point3f loc;
    loc = pointPlane*this->cameraInfo.newCameraMatrix(0,0)*(board_width / ave_len);
    return loc;
}
Point3f xyVision::visionLanding::locate_box(const RotatedRect box, const float board_width)
{
    Point3f center_3d = Point3f(box.center.x, box.center.y, 1.0f);
    Point3f pointPlane = this->cameraInfo.newCameraMatrix.inv()*center_3d;
    float ave_len = box.size.width;
    if(this->cameraInfo.boardVersion == "1.1" || this->cameraInfo.boardVersion == "1.2")
    {
        ave_len = (box.size.width + box.size.height)/2;
    }
    if (this->cameraInfo.boardVersion == "1.1s" || this->cameraInfo.boardVersion == "1.3")
    {
        ave_len = box.size.width;
    }
    Point3f loc;
    loc = pointPlane*this->cameraInfo.newCameraMatrix(0,0)*(board_width / ave_len);
    return loc;
}
bool xyVision::visionLanding::contourDetect(Mat& bi, vector<Point> & tarContour,
    vector<vector<Point> > & tarContours, const float min_width, const float max_width,
    bool filter_square, bool filter_area)
{
    vector<vector<Point> > contours;
    vector<Vec4i> hierarchy;
    Size sz = bi.size();

    findContours(bi.clone(), contours, hierarchy, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE);
    if ((int)contours.size() == 0)
        return false;

    vector<vector<Point> > contours_filter;
    std::vector<double> contours_areas_filter;
    std::vector<double> center_dis_filter;
    std::vector<double> ratio_filter;
    std::vector<RotatedRect> boxs_filter;

    // filter by areas
    if (filter_area)
    {
        for (int i = 0; i < (int)contours.size(); ++i)
        {
            //double area = contourArea(contours[i]);
            RotatedRect box = minAreaRect(contours[i]);
            double area = box.size.area();
            float ratio = min(box.size.width, box.size.height) / max(box.size.width, box.size.height);
            if (this->cameraInfo.boardVersion == "1.1s")
            {
                area = area / 2;
            }
            //if (this->cameraInfo.boardVersion == "1.2")
            //{
            //    area = area / 4;
            //}
            if (area > (min_width*min_width*ratio) && area < (max_width*max_width/ratio))
            {
                contours_filter.push_back(contours[i]);
            }
        }
        contours = contours_filter;
    }


    if (filter_square)
    {
        contours_filter.resize(0);

        // filter by square
        for (int i = 0; i < (int)contours.size(); ++i)
        {
            RotatedRect box = minAreaRect(Mat(contours[i]));
            float height = float(box.size.height);
            float width = float(box.size.width);
            float ratio = min(width, height) / max(width, height);
            float thre = 2.5f;

            if (ratio > 1.0f / thre)
            {
                contours_filter.push_back(contours[i]);
            }
        }
        contours = contours_filter;
    }

    contours_filter.resize(0);

    // filter by fill in ratio
    for (int i = 0; i < (int)contours.size(); ++i)
    {
        RotatedRect box = minAreaRect(Mat(contours[i]));
        double aera = contourArea(contours[i]);
        double ratio = computeFillinRatio2(sz, box, bi);

        if (ratio > 0.5)
        {
            contours_filter.push_back(contours[i]);
            boxs_filter.push_back(box);
            contours_areas_filter.push_back(aera);
            ratio_filter.push_back(ratio);
        }
    }
    contours = contours_filter;


    if ((int)contours.size() == 0)
    {
        return false;
    }

    // choose the max one
    int choosen_idx = 0;
    int maxIdx = 0;
    double maxAera = contours_areas_filter[maxIdx];
    for (int i = 1; i < (int)contours.size(); ++i)
    {
        double conAera_i = contours_areas_filter[i];
        if (conAera_i > maxAera)
        {
            maxAera = conAera_i;
            maxIdx = i;
        }
    }
    choosen_idx = maxIdx;
    //for (int i = 0; i < (int)contours.size(); ++i)
    //{
    //    if (i != choosen_idx)
    //    {
    //        drawContours(bi, contours, i, cv::Scalar(0), CV_FILLED, 8);
    //        //drawContours(img_t, contours, i, cv::Scalar(0), CV_FILLED, 8);
    //    }
    //}
    tarContour = contours[choosen_idx];
    tarContours = contours;
    return true;
}

double xyVision::visionLanding::computeFillinRatio(
    cv::Size sz, RotatedRect box, const Mat& bi)
{

    Point2f vertices[4];
    box.points(vertices);

    Point points[4];
    for (int i = 0; i < 4; ++i)
    {
        points[i] = Point((int)vertices[i].x, (int)vertices[i].y);
    }

    Mat tmp = Mat::zeros(sz, CV_8U);
    cv::fillConvexPoly(tmp, points, 4, Scalar(1));

    Mat boxIdx;
    findNonZero(tmp, boxIdx);
    double n_pixels = (double)boxIdx.total();
    double n_nonZero = 0;
    for (int i = 0; i < (int)boxIdx.total(); ++i)
    {
        Point _p = boxIdx.at<Point>(i);
        if (bi.at<unsigned char>(_p.y, _p.x) > 0)
        {
            n_nonZero = n_nonZero + 1;
        }
    }
    return n_nonZero / n_pixels;
}

double xyVision::visionLanding::computeFillinRatio2(const cv::Size sz, const RotatedRect box, const Mat& bi)
{
    float angle = box.angle;
    Mat M = cv::getRotationMatrix2D(box.center, angle, 1.0);
    Mat bi_warp;
    int flag =  INTER_NEAREST;
    cv::warpAffine(bi, bi_warp, M, sz, flag);
    int col_left = max(0, (int)box.center.x - (int)box.size.width/2);
    int col_right = min(sz.width-1, (int)box.center.x + (int)box.size.width/2);
    int row_up = max(0, (int)box.center.y - (int)box.size.height/2);
    int row_down = min(sz.height-1, (int)box.center.y + (int)box.size.height/2);
    Mat roi = bi_warp.colRange(col_left, col_right+1).rowRange(row_up, row_down+1);
    //Mat roi = bi_warp.colRange((int)box.center.x - (int)box.size.width/2, (int)box.center.x + (int)box.size.width/2)
    //    .rowRange((int)box.center.y - (int)box.size.height/2, (int)box.center.y + (int)box.size.height/2);
    Mat nonIdx;
    findNonZero(roi, nonIdx);
    return double(nonIdx.total())*nonIdx.channels()/2.0/roi.total();

    //Mat tmp = Mat::zeros(sz, CV_8U);
    //tmp.colRange(box.center.x - box.size.width/2, box.center.x + box.size.width/2)
    //    .rowRange(box.center.y - box.size.height/2, box.center.y + box.size.height/2) = 255;
}

double xyVision::visionLanding::compute_box_angle_by_blue_red(const RotatedRect box)
{
    Rect rect = box.boundingRect();
    Mat bi_red_plus_blue = this->bi_img_red + this->bi_img_blue;
    //namedWindow("bi_red_plus_blue");
    //imshow("bi_red_plus_blue", bi_red_plus_blue);
    //waitKey(0);
    rect.width = min(rect.width, bi_red_plus_blue.cols-rect.x-1);
    rect.height = min(rect.height, bi_red_plus_blue.rows-rect.y-1);
    rect.x = max(0, rect.x);
    rect.y = max(0, rect.y);
    Mat roi = bi_red_plus_blue(rect);
    vector<vector<Point> > contours;
    vector<Vec4i> hierarchy;
    findContours(roi.clone(), contours, hierarchy, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE);
    if (contours.size() == 0)
    {
        return 0.0;
    }
    // largest
    //int choosen_idx = 0;
    int maxIdx = 0;
    double maxAera = contourArea(contours[0]);
    for (int i = 0; i < (int)contours.size(); ++i)
    {
        double conAera_i = contourArea(contours[i]);
        if (conAera_i > maxAera)
        {
            maxAera = conAera_i;
            maxIdx = i;
        }
    }
    RotatedRect box_out = minAreaRect(Mat(contours[maxIdx]));
    return box_out.angle;
}

float xyVision::visionLanding::cross_check(const vector<Point> contours)
{
    RotatedRect box = minAreaRect(contours);
    Mat M;
    M = getRotationMatrix2D(box.center, box.angle, 1.0);
    Size sz = this->cameraInfo.newSize;
    int flag = cv::INTER_NEAREST;
    Mat bi_img_plus_warp, bi_img_red_warp, bi_img_blue_warp;
    warpAffine(this->bi_img_plus, bi_img_plus_warp, M, sz, flag);
    warpAffine(this->bi_img_red, bi_img_red_warp, M, sz, flag);
    warpAffine(this->bi_img_blue, bi_img_blue_warp, M, sz, flag);

    vector<Point2f> red_blue_ratios(4);
    Mat roi_red, roi_blue, non_zero_red, non_zero_blue;
    vector<Point2i> up_left_move(4), right_down_move(4);
    up_left_move[0] = Point2i(-int(box.size.width/2), -int(box.size.height/2));
    right_down_move[0] = Point2i(0,0);
    up_left_move[1] = Point2i(0, -int(box.size.height/2));
    right_down_move[1] = Point2i(int(box.size.width/2), 0);
    up_left_move[2] = Point2i(0,0);
    right_down_move[2] =  Point2i(int(box.size.width/2), int(box.size.height/2));
    up_left_move[3] = Point2i(-int(box.size.width/2),0);
    right_down_move[3] = Point2i(0, int(box.size.height/2));

    // 0 1
    // 3 2
    for (int i = 0; i < 4; ++i)
    {
        int x1 = max(0, (int)box.center.x + up_left_move[i].x);
        int y1 = max(0, (int)box.center.y + up_left_move[i].y);
        int x2 = min(sz.width, (int)box.center.x + right_down_move[i].x);
        int y2 = min(sz.height, int(box.center.y) + right_down_move[i].y);

        Rect rect(x1, y1, x2-x1, y2-y1);
        roi_red = bi_img_red_warp(rect);
        roi_blue = bi_img_blue_warp(rect);
        cv::findNonZero(roi_red, non_zero_red);
        cv::findNonZero(roi_blue, non_zero_blue);
        red_blue_ratios[i].x = (float)non_zero_red.total() / ((x2 - x1)*(y2-y1));
        red_blue_ratios[i].y = (float)non_zero_blue.total() / ((x2 - x1)*(y2-y1));
        if (red_blue_ratios[i].x < 0.3)
        {
            red_blue_ratios[i].x = 0;
        }
        if (red_blue_ratios[i].y < 0.3)
        {
            red_blue_ratios[i].y = 0;
        }
        //if (red_blue_ratios[i].x >= 0.7)
        //{
        //    red_blue_ratios[i].x = 1;
        //}
        //if (red_blue_ratios[i].y >= 0.7)
        //{
        //    red_blue_ratios[i].y = 1;
        //}
    }
    float sum_diff = 0;
    for (int i = 0; i < 3; ++i)
    {
        sum_diff += abs(red_blue_ratios[i].x - red_blue_ratios[i+1].x) + abs(red_blue_ratios[i].y - red_blue_ratios[i+1].y);
    }
    sum_diff += abs(red_blue_ratios[3].x - red_blue_ratios[0].x) + abs(red_blue_ratios[3].y - red_blue_ratios[0].y);

    return sum_diff / 8.0f;
}

void xyVision::visionLanding::setSegModel(string mode)
{
    if (mode == "threshold" || mode == "hist")
    {
        this->binaryMethod = mode;
    }
    else
    {
        cout << "Invalid segmentation model" << endl;
    }

}
#ifndef _WIN32
Point3f xyVision::visionLanding::getloc_slam()
{
    //Mat img_dummy;
    //cv::resize(this->oriImg, img_dummy, cv::Size(640, 360));
    //Mat Tcw = this->SLAM->TrackMonocular(img_dummy, this->slamInfo.ts);
    Mat Tcw = this->SLAM->TrackMonocular(this->rectifiedImg, this->slamInfo.ts);
    ORB_SLAM2::Tracking* tr = SLAM->GetTracker();
    slamInfo.ts += 0.2;
    if (tr->mState == ORB_SLAM2::Tracking::OK)
    {
        //Mat Twc = Tcw.inv();
        Tcw.rowRange(0,3).col(3) = Tcw.rowRange(0,3).col(3)*slamInfo.scale;
        Mat MapPoint = Tcw*this->slamInfo.offset;
        this->isDetected = true;
        this->frameCounter++;
        this->frameCounterTarget++;
        this->num_cont_det++;
        return Point3f(MapPoint.at<float>(0), MapPoint.at<float>(1), MapPoint.at<float>(2));
    }
    else
    {
        this->isDetected = false;
        this->frameCounter ++;
        this->num_cont_det = 0;
        return Point3f(0.0f, 0.0f, 0.0f);
    }
}
#endif
void xyVision::visionLanding::setBoardVersion(const string& version)
{
    this->cameraInfo.boardVersion = version;
}
