#include "CameraDetect.hpp"

vector<Point2f> m_markerCorners2d;
vector<Point3f> m_markerCorners3d;
Size markerSize(64, 64);
Size m_minContourLengthAllowed(100, 100);
Mat_<float> camMatrix, distCoeff;

void findContour(cv::Mat& thresholdImg, vector<vector<Point> >& contours, int minContourPointsAllowed , bool isClear = true)
{
    vector< vector<Point> > allContours;
    findContours(thresholdImg, allContours, CV_RETR_LIST, CV_CHAIN_APPROX_NONE);

    if(isClear) contours.clear();
    for (size_t i = 0; i<allContours.size(); i++)
    {
        int contourSize = allContours[i].size();
        if (contourSize > minContourPointsAllowed)
        {
            contours.push_back(allContours[i]);
        }
    }
}

float perimeter(const vector<Point2f> &a)
{
    float sum = 0, dx, dy;
    for (size_t i = 0; i<a.size(); i++)
    {
        size_t i2 = (i + 1) % a.size();
        dx = a[i].x - a[i2].x;
        dy = a[i].y - a[i2].y;
        sum += (dx*dx + dy*dy); // reduce the calculation
        //sum += sqrt(dx*dx + dy*dy);
    }
    return sum;
}

void findCandidates(const vector<vector<Point> >& contours, vector<Marker_J>& detectedMarkers, int m_minContourLengthAllowed)
{
    vector<Point> approxCurve;
    vector<Marker_J> possibleMarkers;

    for (size_t i = 0; i<contours.size(); i++)
    {
        //double eps = contours[i].size()*0.05;
        double eps = contours[i].size() >> 4;// Approximate calculation reduce the calculation
        approxPolyDP(contours[i], approxCurve, eps, true);

        if (approxCurve.size() != 4)
            continue;

        if (!isContourConvex(approxCurve))
            continue;

        float minDist = 1e10;

        for (int i = 0; i<4; i++)
        {
            Point side = approxCurve[i] - approxCurve[(i + 1) % 4];
            float squaredSideLength = side.dot(side);
            minDist = min(minDist, squaredSideLength);
        }

        if (minDist<m_minContourLengthAllowed)
            continue;

        Marker_J m;
        for (int i = 0; i<4; i++)
            m.points.push_back(Point2f(approxCurve[i].x, approxCurve[i].y));

        Point v1 = m.points[1] - m.points[0];
        Point v2 = m.points[2] - m.points[0];

        double o = (v1.x * v2.y) - (v1.y * v2.x);

        if (o<0.0)
            swap(m.points[1], m.points[3]);

        possibleMarkers.push_back(m);
    }

    vector< pair<int, int> > tooNearCandidates;
    for (size_t i = 0; i<possibleMarkers.size(); i++)
    {
        const Marker_J& m1 = possibleMarkers[i];

        for (size_t j = i + 1; j<possibleMarkers.size(); j++)
        {
            const Marker_J& m2 = possibleMarkers[j];
            float distSquared = 0;
            for (int c = 0; c<4; c++)
            {
                Point v = m1.points[c] - m2.points[c];
                distSquared += v.dot(v);
            }
            distSquared /= 4;

            if (distSquared < 100)
            {
                tooNearCandidates.push_back(pair<int, int>(i, j));
            }
        }
    }
    vector<bool> removalMask(possibleMarkers.size(), false);
    for (size_t i = 0; i<tooNearCandidates.size(); i++)
    {
        float p1 = perimeter(possibleMarkers[tooNearCandidates[i].first].points);
        float p2 = perimeter(possibleMarkers[tooNearCandidates[i].second].points);

        size_t removalIndex;
        if (p1 > p2)
            removalIndex = tooNearCandidates[i].second;
        else
            removalIndex = tooNearCandidates[i].first;

        removalMask[removalIndex] = true;
    }

    detectedMarkers.clear();
    for (size_t i = 0; i<possibleMarkers.size(); i++)
    {
        if (!removalMask[i])
            detectedMarkers.push_back(possibleMarkers[i]);
    }

}
void recognizeMarkers(const Mat& grayscale, vector<Marker_J>& detectedMarkers)
{
    Mat canonicalMarkerImage;
    char name[20] = "";
    vector<Marker_J> goodMarkers;
    for (size_t i = 0; i<detectedMarkers.size(); i++)
    {
        Marker_J& marker = detectedMarkers[i];
        Mat markerTransform = getPerspectiveTransform(marker.points, m_markerCorners2d);
        warpPerspective(grayscale, canonicalMarkerImage, markerTransform, markerSize);
        int nRotations;
        int id = Marker_J::getMarkerId(canonicalMarkerImage, nRotations);
#if UNIQUEID
        for (size_t k = 0; k < goodMarkers.size(); k++) 
            if (goodMarkers[k].id == id) 
                id = -1;
#endif
        if (id != -1)
        {
            marker.id = id;
            rotate(marker.points.begin(), marker.points.begin() + 4 - nRotations, marker.points.end());
            goodMarkers.push_back(marker);
        }
    }

    if (goodMarkers.size() > 0)
    {
        vector<Point2f> preciseCorners(4 * goodMarkers.size());
        for (size_t i = 0; i<goodMarkers.size(); i++)
        {
            Marker_J& marker = goodMarkers[i];

            for (int c = 0; c<4; c++)
            {
                preciseCorners[i * 4 + c] = marker.points[c];
            }
        }

#if refinesCorner
        TermCriteria termCriteria = TermCriteria(TermCriteria::MAX_ITER | TermCriteria::EPS, 30, 0.01);
        cornerSubPix(grayscale, preciseCorners, cvSize(5, 5), cvSize(-1, -1), termCriteria);
        for (size_t i = 0; i<goodMarkers.size(); i++)
        {
            Marker_J& marker = goodMarkers[i];
            for (int c = 0; c<4; c++)
            {
                marker.points[c] = preciseCorners[i * 4 + c];
                //cout<<"X:"<<marker.points[c].x<<"Y:"<<marker.points[c].y<<endl;
            }
        }
#endif // refinesCorner



    }
    detectedMarkers = goodMarkers;
#ifdef SHOW_DEBUG_IMAGES
    Mat markerCornersMat(grayscale.size(), grayscale.type());
    markerCornersMat = Scalar(0);
    for (size_t i = 0; i<goodMarkers.size(); i++)
    {
        goodMarkers[i].drawContour(markerCornersMat, Scalar(255));
    }

    imshow("Markers refined edges", grayscale*0.5 + markerCornersMat);
#endif
}

void markerInit()
{
    bool centerOrigin = true;
    if (centerOrigin)
    {
        m_markerCorners3d.push_back(Point3f(-0.5f, -0.5f, 0));
        m_markerCorners3d.push_back(Point3f(+0.5f, -0.5f, 0));
        m_markerCorners3d.push_back(Point3f(+0.5f, +0.5f, 0));
        m_markerCorners3d.push_back(Point3f(-0.5f, +0.5f, 0));
    }
    else
    {
        m_markerCorners3d.push_back(Point3f(0, 0, 0));
        m_markerCorners3d.push_back(Point3f(1, 0, 0));
        m_markerCorners3d.push_back(Point3f(1, 1, 0));
        m_markerCorners3d.push_back(Point3f(0, 1, 0));
    }

    m_markerCorners2d.push_back(Point2f(0, 0));
    m_markerCorners2d.push_back(Point2f(markerSize.width - 1, 0));
    m_markerCorners2d.push_back(Point2f(markerSize.width - 1, markerSize.height - 1));
    m_markerCorners2d.push_back(Point2f(0, markerSize.height - 1));

}

void readCameraParameter()
{
    camMatrix = Mat::eye(3, 3, CV_64F);
    distCoeff = Mat::zeros(8, 1, CV_64F);

    FileStorage fs("out_camera_data.yml", FileStorage::READ);
    if (!fs.isOpened())
    {
        cout << "Could not open the configuration file!" << endl;
        exit(1);
    }
    fs["Camera_Matrix"] >> camMatrix;
    fs["Distortion_Coefficients"] >> distCoeff;
    fs.release();
#ifdef SHOW_DEBUG_IMAGES
    cout << camMatrix << endl;
    cout << distCoeff << endl;
#endif // SHOW_DEBUG_IMAGES


}

void estimatePosition(vector<Marker_J>& detectedMarkers, Mat_<float>& camMatrix, Mat_<float>& distCoeff)
{
    for (size_t i = 0; i<detectedMarkers.size(); i++)
    {
        Marker_J& m = detectedMarkers[i];

        Mat Rvec;
        Mat_<float> Tvec;
        Mat raux, taux;

        solvePnP(m_markerCorners3d, m.points, camMatrix, distCoeff, raux, taux);
        m.camPos.x = *(double *)taux.ptr(0);
        m.camPos.y = *(double *)taux.ptr(1);
        m.camPos.z = *(double *)taux.ptr(2);
        m.camAngles.x = *(double *)raux.ptr(0);
        m.camAngles.y = *(double *)raux.ptr(1);
        m.camAngles.z = *(double *)raux.ptr(2);
    }
}

void thresBG(Mat &simg, Mat &rimg)
{
    //Mat img_blue, img_green, hsv_img;
    //cvtColor(simg, hsv_img, CV_BGR2HSV);
    //inRange(hsv_img, Scalar(100, 43, 46), Scalar(130, 255, 255), rimg);
    //rimg = ~rimg;
    Mat timg(simg.rows, simg.cols, CV_8UC1);
    uchar *bgrPtr, *grayPtr;
    for (size_t i = 0; i < simg.rows; i++)
    {
        bgrPtr = simg.ptr(i);
        grayPtr = timg.ptr(i);
        for (size_t j = 0; j < simg.cols; j++)
        {
            if (bgrPtr[0] > bgrPtr[1])
                *grayPtr = 0;
            else
                *grayPtr = 255;
            grayPtr++;
            bgrPtr += 3;
        }
    }
    rimg = timg;
    return;
}

void markDetectInit()
{
    readCameraParameter();
    markerInit();
}
int calcMaxMark(Mat &mimg, Point &centerpt, float &area)
{
    Mat element = getStructuringElement(MORPH_RECT, Size(15, 15));
    vector<vector<Point> > contours;
    morphologyEx(mimg, mimg, MORPH_OPEN, element);
    findContours(mimg, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_NONE);
    double maxArea = 0, tmpArea = 0;
    size_t i, index;
    // find the max contours and calc the area
    if (contours.size() == 0)
    {
        return 0;
    }
    for (i = 0; i<contours.size(); i++)
    {
        tmpArea = contourArea(contours[i]);
        if (tmpArea > maxArea)
        {
            maxArea = tmpArea;
            index = i;
        }
    }
    // calc rect center point
    Rect brect = boundingRect(contours[index]);
    centerpt.x = brect.x + brect.width / 2;
    centerpt.y = brect.y + brect.height / 2;
    area = maxArea;
    return 1;
}

extern "C" {
    void updatePosition(CamPos * camPos, CP_MUTEXLOCK * mlock)
    {
        markDetectInit();
        VideoCapture cap(0);
        if (!cap.isOpened())
        {
            camPos->err_no = CP_ERR_CAMNOTOPEN;
            return;
        }
        Mat markImg, hsv_img, red_img1, red_img2, red_img, blue_img, yellow_img, all_img;
        vector<Marker_J> mark_BG;
        int resNum = 0;
        size_t i;
        Point tmpPoint;
        float initArea;
        int tmpInitDet;
        while (true)
        {
            cap >> markImg;
            cvtColor(markImg, hsv_img, CV_BGR2HSV);
            inRange(hsv_img, Scalar(0, 43, 46), Scalar(10, 255, 255), red_img1);
            inRange(hsv_img, Scalar(156, 43, 46), Scalar(180, 255, 255), red_img2);
            red_img = red_img1 + red_img2;
           inRange(hsv_img, Scalar(100, 43, 46), Scalar(124, 255, 255), blue_img);
            inRange(hsv_img, Scalar(26, 43, 46), Scalar(34, 255, 255), yellow_img);
            all_img = blue_img + yellow_img + red_img;
            tmpInitDet = calcMaxMark(all_img, tmpPoint, initArea);

            //resNum = detectCamPos(markImg, mark_BG, thres_BG);
            if (resNum > MAX_MARK) resNum = MAX_MARK;
            // data analysis
            WaitMutexCP(*mlock);
            camPos->isInitDet = tmpInitDet;
            camPos->initPt.x = tmpPoint.x;
            camPos->initPt.y = tmpPoint.y;
            camPos->initArea = initArea;
            if (resNum <= 0)
            {
                camPos->isDetected = 0;
            }
            else
            {
                camPos->quantity = resNum;
                camPos->isDetected = 1;
                for (i = 0; i < resNum; i++)
                {
                    camPos->markInfo[i].id = mark_BG[i].id;
                    camPos->markInfo[i].x = mark_BG[i].camPos.x;
                    camPos->markInfo[i].y = mark_BG[i].camPos.y;
                    camPos->markInfo[i].z = mark_BG[i].camPos.z;
                    camPos->markInfo[i].spin_x = mark_BG[i].camAngles.x;
                    camPos->markInfo[i].spin_y = mark_BG[i].camAngles.y;
                    camPos->markInfo[i].spin_z = mark_BG[i].camAngles.z;
                }
            }
            if (camPos->stopRem == 1)
            {
                ExitMutexCP(*mlock);
                break;
            }
            ExitMutexCP(*mlock);
            //imshow("show", markImg);
            waitKey(20);
        }
    }
}

int detectCamPos(Mat &frameImg, vector<Marker_J>& markVec , int thresmode)
{
    Mat ImgGray, thresholdImg;
    vector<vector<Point> > potVec;
    if (thresmode != thres_BG)
        cvtColor(frameImg, ImgGray, CV_BGRA2GRAY);
    else
    {
        thresBG(frameImg, thresholdImg);
        thresholdImg.copyTo(ImgGray);
        findContour(thresholdImg, potVec, minContourPoints);
    }
    //medianBlur(ImgGray, ImgGray, 7);
    //equalizeHist(ImgGray, ImgGray);
    if (thresmode == thres_adapt)
    {
        adaptiveThreshold(ImgGray, thresholdImg, 255, CV_ADAPTIVE_THRESH_MEAN_C, CV_THRESH_BINARY_INV, thres_blocksize, thres_constvalue);
        findContour(thresholdImg, potVec, minContourPoints);
    }
    else if (thresmode == thres_otsu)
    {
        threshold(ImgGray, thresholdImg, 255, 255, CV_THRESH_OTSU);
        findContour(thresholdImg, potVec, minContourPoints);
    }
    else if (thresmode == thres_mix)
    {
        threshold(ImgGray, thresholdImg, 255, 255, CV_THRESH_OTSU);
        findContour(thresholdImg, potVec, minContourPoints);
        adaptiveThreshold(ImgGray, thresholdImg, 255, CV_ADAPTIVE_THRESH_MEAN_C, CV_THRESH_BINARY_INV, thres_blocksize, thres_constvalue);
        findContour(thresholdImg, potVec, minContourPoints, false);
    }
    else if (thresmode == thres_traversal)
    {
        int thresValue = 30;
        threshold(ImgGray, thresholdImg, thresValue, 255, CV_THRESH_BINARY);
        findContour(thresholdImg, potVec, minContourPoints);
        while (thresValue < 220)
        {
            thresValue = thresValue + 30;
            threshold(ImgGray, thresholdImg, thresValue, 255, CV_THRESH_BINARY);
            findContour(thresholdImg, potVec, minContourPoints, false);
        }
    }
#ifdef SHOW_DEBUG_IMAGES
    imshow("threshold image: ", thresholdImg);
#endif

#ifdef SHOW_DEBUG_IMAGES
    Mat contoursImage(thresholdImg.size(), CV_8UC1, Scalar(0));
    drawContours(contoursImage, potVec, -1, cv::Scalar(255), 2, CV_AA);
    imshow("Contours image: ", contoursImage);
#endif
    findCandidates(potVec, markVec, minContourLength); 
    recognizeMarkers(ImgGray, markVec);
    estimatePosition(markVec, camMatrix, distCoeff);
    return markVec.size();
}
