#pragma once

#include <iostream>
#include <stdio.h>
#include <opencv2/opencv.hpp>
#include "palmdll.h"
#include <math.h>

using namespace std;
using namespace cv;

int palm_segment_e(const cv::Mat &img, cv::Mat &result, string filename = NULL) {
    auto palm_segment_otsu = [](const cv::Mat &img, cv::Mat &result) -> int {
        cvtColor(img, result, CV_BGR2GRAY);
        cv::threshold(result, result, 20, 255, CV_THRESH_OTSU);
        return 1;
    };

    auto palm_segment_fixthre = [](const cv::Mat &img, cv::Mat &result) -> int {
        if (img.channels() > 2)
            cvtColor(img, result, CV_BGR2GRAY);
        else
            result = img;
        cv::threshold(result, result, 45, 255, CV_THRESH_BINARY);
        cv::erode(result, result, cv::Mat(), cv::Point(-1, -1), 2);
        // cv::dilate(result, result, cv::Mat(), cv::Point(-1, -1), 2);
        return 1;
    };

    auto palm_segment_grabcut = [](const cv::Mat &img, cv::Mat &result) -> int {
        cv::Mat img_gray, mask, bgMod, fgMod;
        cv::Rect rect;

        cvtColor(img, img_gray, CV_BGR2GRAY);
        cv::threshold(img_gray, result, 28, 255, CV_THRESH_BINARY);

        result.copyTo(mask);
        for (auto it = mask.begin<uchar>(); it != mask.end<uchar>(); ++it) {
            if (*it == 255) {
                *it = GC_PR_FGD;
            }
        }
        cv::grabCut(img, mask, rect, bgMod, fgMod, 1, GC_INIT_WITH_MASK);
        cv::threshold(mask, result, 1, 255, CV_THRESH_BINARY);
        // g_SearchStart = rect.y;
        // std::cout << "g_SearchStart=" << g_SearchStart << std::endl;
        return 1;
    };

    auto palm_segment_deeplab = [](const cv::Mat &img, cv::Mat &result, string &filename) -> int {
        //TODO: call python code would be better
        result = cv::imread(
                "/home/yjy/dataset/palmprint_dectection_tongji/LHand/segment_result_nn/raw_segmentation_results/b'" +
                filename.substr(0, 17) + "'.png");
        cv::cvtColor(result, result, CV_BGR2GRAY);
        // result.convertTo(result, CV_8U);
        return 1;
    };
    // return palm_segment_deeplab(img, result, filename);
    return palm_segment_fixthre(img, result);
}

//获得手掌区域，最大连通域
IplImage *myGetClearPalmArea(IplImage *thImg) {
    IplImage *tmpImg = cvCreateImage(cvGetSize(thImg), 8, 3);
    IplImage *outputImg = cvCloneImage(thImg);
    cvZero(tmpImg);

    int w = thImg->width;
    int h = thImg->height;
    CvMemStorage *storage = cvCreateMemStorage(0);
    CvSeq *contour = 0;

    //note: this function will change thImg
    int contour_num = cvFindContours(thImg, storage, &contour, sizeof(CvContour), CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE);

    CvSeq *_contour = contour;
    double maxarea = 0;
    double minarea = w * h / 9;
    int m = 0;
    int maxIndex = 0;
    for (; contour != 0; contour = contour->h_next) {
        double tmparea = fabs(cvContourArea(contour));
        m++;
        if (tmparea < minarea) {
            continue;
        }
        if (tmparea > maxarea) {
            maxarea = tmparea;
            maxIndex = m - 1;
        }
    }

    CvScalar color = CV_RGB(0, 255, 255);
    contour = _contour;
    int count = 0;
    for (; contour != 0; contour = contour->h_next) {
        if (count == maxIndex) {
            CvScalar color = CV_RGB(255, 255, 255);
            cvDrawContours(tmpImg, contour, color, color, -1, -1, 8);
        }
        count++;
    }
    cvCvtColor(tmpImg, outputImg, CV_BGR2GRAY);

    cvReleaseMemStorage(&storage);
    cvReleaseImage(&tmpImg);

    return outputImg;
}

IplImage *getROI(IplImage *imgInput, bool showDebugInfo, const string &filename) {
    cv::Mat img(imgInput), img_gray, segment_result;
    if (img.channels() > 2)
        cv::cvtColor(img, img_gray, CV_BGR2GRAY);
    else
        img_gray = img.clone();
    palm_segment_e(img, segment_result, filename);
    imgInput = new IplImage(img_gray);
    IplImage *imgTh = new IplImage(segment_result);
//    debug
//    cvShowImage("imgTh", imgTh);
//    cvWaitKey(0);
    //-------------------------------------

    IplImage *imgPalm = cvCreateImage(cvSize(320, 240), 8, 1);   //标准手掌图像
    IplImage *imgBinary = cvCreateImage(cvSize(320, 240), 8, 1); //二值化手掌区域
    cvResize(imgInput, imgPalm);
    cvResize(imgTh, imgBinary);

    IplImage *imgPalmArea = myGetClearPalmArea(imgBinary); //最终的手掌区域
    IplImage *roi = cvCreateImage(cvSize(128, 128), 8, 1); //roi 图像

    uchar *palmData = (uchar *) imgPalm->imageData;   //获得手掌图像数据
    uchar *thData = (uchar *) imgPalmArea->imageData; //
    uchar *roiData = (uchar *) roi->imageData;        //

    int Sx[4], Sy[4]; //ROI坐标

    int ret = Lnpreprocessing(palmData, thData, roiData, Sx, Sy); //图像预处理

    if (!ret) //出错返回
    {
        cout << "preprocessing error!" << endl;
        return NULL;
    }

    printf("The input image has beed resized to320x240\n up :(x,y): %d, %d \ndown:(x,y): %d, %d\n",
           holeList[0][lastP1Index][0], holeList[0][lastP1Index][1], holeList[1][lastP2Index][0],
           holeList[1][lastP2Index][1]);

    if (showDebugInfo) {
        cvShowImage("inputImg", imgInput);
        cvShowImage("thImg", imgTh);     //输入手掌图像
        cvShowImage("imgPalm", imgPalm); //手掌
        cvShowImage("imgBinary", imgBinary);
        cvShowImage("imgPalmArea", imgPalmArea); //手掌区域
        cvShowImage("roi", roi);                 //roi
        cvWaitKey(0);
    }

    delete imgTh;
    delete imgInput;
    //cvReleaseImage(&imgTh);
    cvReleaseImage(&imgPalm);
    cvReleaseImage(&imgBinary);
    cvReleaseImage(&imgPalmArea);
    return roi;
}

double arc2degree(double arcAngle) {
    const double PI = 3.1415926535;
    return arcAngle * 180 / PI;
}

double getRotationBias(cv::Mat &img) {
    auto get_r_b_moment = [](cv::Mat &img) -> double {
        cv::Moments m = cv::moments(img);
        //关于y轴的夹角
        double theta = -atan(2 * m.m11 / (m.m20 - m.m02)) / 2;
        return arc2degree(theta);
    };
    auto get_r_b_search = [](cv::Mat &img) -> double {
        auto test_theta = [](double theta, cv::Mat &img) -> bool {
            cv::Mat adjusted;
            cv::Point2d rotateCenter(img.cols / 2, img.rows / 2);
            const double scaleRate = 1.0;
            cv::Mat rotateMatrix = cv::getRotationMatrix2D(rotateCenter, theta, scaleRate);
            cv::warpAffine(img, adjusted, rotateMatrix, img.size());
            IplImage *p = new IplImage(adjusted); //浅拷贝
            IplImage *roi = getROI(p, false, NULL);
            if (roi != NULL) {
                cvReleaseImage(&roi);
                return true;
            }
            return false;
        };

        const double search_start = -36;
        const double search_end = 36;
        const double search_step = 3;
        double search_result = 0;

        for (search_result = 0; search_result > search_start; search_result -= search_step) {
            if (test_theta(search_result, img)) {
                return search_result;
            }
        }
        for (search_result = 0; search_result < search_end; search_result += search_step) {
            if (test_theta(search_result, img)) {
                return search_result;
            }
        }

        return 0; //search failed
    };

    cv::Mat img2;
    cv::threshold(img, img2, 28, 255, cv::THRESH_BINARY);
    double theta = get_r_b_search(img2);

    //debug
    // cv::Point2d startPoint(0, img.rows / 2);
    // cv::Point2d endPoint(100 * cos(theta) + startPoint.x, 100 * sin(theta) + startPoint.y);
    // cv::line(img, startPoint, endPoint, cv::Scalar(0, 0, 255));
    // cv::imshow("img", img);
    // cv::imshow("img2", img2);
    // cv::waitKey();

    return theta; //角度制结果
}

IplImage *adjustRotation(IplImage *imgInput1) {
    auto debug = [](cv::Mat &img) -> void {
        cv::imshow("img", img);
        cv::waitKey();
    };

    cv::Mat img, adjusted;
    img = cvarrToMat(imgInput1);
    //debug(img);

    cv::Point2d rotateCenter(imgInput1->width / 2, imgInput1->height / 2);
    double theta = getRotationBias(img);
    cout << "theta degree = " << theta << endl;
    const double scaleRate = 1.0;
    //角度制
    cv::Mat rotateMatrix = cv::getRotationMatrix2D(rotateCenter, theta, scaleRate);
    cv::warpAffine(img, adjusted, rotateMatrix, img.size());
    //debug(adjusted);

    //深拷贝只要再加一次复制数据：
    //output = cvCloneImage(output);

    return new IplImage(adjusted);
}
