#include "MarkerLocator.h"
#include <stdio.h>
#include <string>

using namespace cv;

MarkerLocator::MarkerLocator(Mat r_cameraMatrix, Mat r_distCoeff, Mat r_robotLoc, float r_markerSize)
    : m_markerSize(r_markerSize), m_markerWorldPoints(4,3,CV_32FC1), m_markerImagePoints(4), m_perspectiveMap(100, 100, CV_8UC3)
{
    // Clone these so they cannot be changed by an external source on accident
    m_cameraMatrix = r_cameraMatrix.clone();
    m_distCoeff = r_distCoeff.clone();
    m_robotLoc = r_robotLoc.clone();

    // Find the forward and side vectors of the robot.
    double y = r_robotLoc.at<double>(1, 0);
    double z = r_robotLoc.at<double>(2, 0);

    if(fabs(y) < 0.01 && fabs(z) < 0.01) {
        m_robotForward = (Mat_<double>(3, 1) << 0.0, 0.0, 1.0);
        m_robotSide = (Mat_<double>(3, 1) << 1.0, 0.0, 0.0);
    }
    else {
        if(z < 0)
            m_robotForward = (Mat_<double>(3, 1) << 0.0, -z, y);
        else
            m_robotForward = (Mat_<double>(3, 1) << 0.0, z, -y);
        m_robotSide = m_robotForward.cross(r_robotLoc);
    }

    normalize(m_robotForward, m_robotForward);
    normalize(m_robotSide, m_robotSide);

    // Set up the 3d points
    double halfSize = r_markerSize / 2.0;
    m_markerWorldPoints.at<float>(1,0)=-halfSize;
    m_markerWorldPoints.at<float>(1,1)=halfSize;
    m_markerWorldPoints.at<float>(1,2)=0;
    m_markerWorldPoints.at<float>(2,0)=halfSize;
    m_markerWorldPoints.at<float>(2,1)=halfSize;
    m_markerWorldPoints.at<float>(2,2)=0;
    m_markerWorldPoints.at<float>(3,0)=halfSize;
    m_markerWorldPoints.at<float>(3,1)=-halfSize;
    m_markerWorldPoints.at<float>(3,2)=0;
    m_markerWorldPoints.at<float>(0,0)=-halfSize;
    m_markerWorldPoints.at<float>(0,1)=-halfSize;
    m_markerWorldPoints.at<float>(0,2)=0;

    // Set up the 2d perspective map
    m_markerImagePoints[0] = Point2f(0,0);
    m_markerImagePoints[1] = Point2f(m_perspectiveMap.cols - 1, 0);
    m_markerImagePoints[2] = Point2f(m_perspectiveMap.cols - 1, m_perspectiveMap.rows - 1);
    m_markerImagePoints[3] = Point2f(0, m_perspectiveMap.rows - 1);
}

MarkerLocator::~MarkerLocator()
{
    //dtor
}

void MarkerLocator::UpdateMarkers(Mat r_frame)
{
    // Clear the range and bearing for the known markers
    for(int i = 0; i < m_knownMarkers.size(); i++)
        m_knownMarkers[i].m_location = Mat();

    // Begin by undistorting the frame... Not! Do that later.
    Mat undistored, gray, binary;

    // Convert the frame to grayscale
    if(r_frame.type() != CV_8UC1) {
        // Convert the frame to grayscale
        cvtColor(r_frame, gray, CV_BGR2GRAY);
    }
    else
        gray = r_frame;

    undistort(gray, undistored, m_cameraMatrix, m_distCoeff);

    // Now do an adaptive threshold to help find the markers
    adaptiveThreshold(gray, binary, 255, CV_ADAPTIVE_THRESH_MEAN_C, CV_THRESH_BINARY_INV, 7, 7);
    //Canny(gray, binary, 50, 100);

    #ifdef GUI
    // If we want a GUI, now is the time to copy the binary image to our output
    if(m_binaryOut.data && m_binaryOut.cols == binary.cols && m_binaryOut.rows == binary.rows) {
        cvtColor(binary, m_binaryOut, CV_GRAY2BGR);
    }
    #endif

    // Once we have our thresholded image, we can look for quads
    std::vector<std::vector<Point2i> > contours; // Difficult to avoid reallocation
    std::vector<std::vector<Point2f> > quads; // Difficult to avoid reallocation

    // Start by finding the contours in the image
    findContours(binary, contours, CV_RETR_TREE, CV_CHAIN_APPROX_NONE);

    // The min and max sizes determine the range in which we can detect markers
    // based on their size. range = (percent of image) * (a quad has four point)
    int minSize = (int)(0.04 * std::max(binary.rows, binary.cols) * 4);
//    int maxSize = (int)(0.5 * std::max(binary.rows, binary.cols) * 4);

    // Now find the quads
    std::vector<Point> poly;
    for(int i = 0; i < contours.size(); i++) {

        // Check to see if the contour meets our size requirements
        if(contours[i].size() > minSize/* && contours[i].size() < maxSize*/) {
            poly.clear();

            // Estimate a polygon to the contour
            approxPolyDP(contours[i], poly, arcLength(contours[i], true) * 0.1, true);

            // Make sure it's a quad
            if(poly.size() == 4 && isContourConvex(poly)) {
                // Check to make sure that all of the points are far enough away?

                // Annoying code needed to convert vector<Point> to vector<Point2f>
                quads.push_back(std::vector<Point2f>(4));
                for(int y = 0; y < 4; y++) {
                    Point2f floatPoint(poly[y].x, poly[y].y);
                    quads.back()[y] = floatPoint;
                }
            }
        }
    }

    if(quads.size() == 0)
        return;

    // We want to arrange the quads to be going counter clockwise. If there are doubles, one will be the inner contour
    // and the other will be the outer. Inner and outer contours go opposite clock directions.
    for(int i = 0; i < quads.size(); i++) {
        float dx1 = quads[i][1].x - quads[i][0].x;
        float dy1 = quads[i][1].y - quads[i][0].y;
        float dx2 = quads[i][2].x - quads[i][0].x;
        float dy2 = quads[i][2].y - quads[i][0].y;

        // The sign of the cross product will tell us whether or not the quad is going clockwise
        // or not. In theory, it shouldn't matter as long as it's uniform.
        float cross = (dy2 * dx1) - (dy1 * dx2);
        if(cross < 0) {
            std::swap(quads[i][1], quads[i][3]);
        }
    }

    // We have a list of quads in the scene. First thing we need to do is check if there are doubles
    std::vector<bool> quadValid(quads.size(), true);
    for(int i = 0; i < quads.size() - 1; i++) {
        if(!quadValid[i])
            continue; // This quad is already invalid

        for(int u = i + 1; u < quads.size(); u++) {
            if(!quadValid[u])
                continue;

            bool sameQuad = false;

            for(int j = 0; j < 4; j++) {
                float distSqr = (quads[i][j].x - quads[u][j].x) * (quads[i][j].x - quads[u][j].x) + (quads[i][j].y - quads[u][j].y) * (quads[i][j].y - quads[u][j].y);
                if(distSqr < 10 * 10) { // Should we do the average?
                    sameQuad = true;
                    break;
                }
            }

            if(sameQuad) {
                // We need to determine which quad is the inner one
                quadValid[(arcLength(quads[i], true) < arcLength(quads[u], true)) ? i : u] = false;
            }
        }
    }

    // Now that we have our quads, we can check to see which ones are markers
    #ifdef GUI
    // Keep track of how many markers we've put on the output image
    int markersDrawn = 0;
    m_markersOut.setTo(Scalar::all(0));
    #endif
    for(int i = 0; i < quads.size(); i++) {
        if(!quadValid[i])
            continue;

        Mat perspectiveTransMat = getPerspectiveTransform(quads[i], m_markerImagePoints);
        warpPerspective(gray, m_perspectiveMap, perspectiveTransMat, m_perspectiveMap.size(), INTER_NEAREST);
        // We should now have a square that contains the perspectivally transformed potential marker

        threshold(m_perspectiveMap, m_perspectiveMap, 127, 255, THRESH_BINARY | THRESH_OTSU);

         // Get the id of the marker
        int id = GetMarkerId(m_perspectiveMap);

        // If the id is negative, we wont think of it as a valid marker
        if(id >= 0) {
            Mat* pos = 0x0;

            // Find the EKF id of this marker
            for(int u = 0; u < m_knownMarkers.size(); u++) {
                if(m_knownMarkers[u].m_id == id) {
                    pos = &(m_knownMarkers[u].m_location);
                    break;
                }
            }
            if(pos == 0x0) {
                Marker m;
                m.m_id = id;
                m_knownMarkers.push_back(m);

                pos = &(m_knownMarkers.back().m_location);
            }

            Mat rotVec, transVec;
            solvePnP(m_markerWorldPoints, quads[i], m_cameraMatrix, m_distCoeff, rotVec, transVec);

            double range, bearing;
            TVectToRangeBaring(transVec, range, bearing);

            // Put it all the the location matrix
            (*pos) = (Mat_<double>(2, 1) << range, bearing);

            #ifdef GUI
            // If we have room, add the marker to the marker image
            if(m_markersOut.data && m_markersOut.cols == m_perspectiveMap.cols) {
                if(markersDrawn * m_perspectiveMap.rows < m_markersOut.rows) {
                    Rect roi(0, markersDrawn * m_perspectiveMap.rows, m_perspectiveMap.cols, m_perspectiveMap.rows);
                    Mat focus = m_markersOut(roi);
                    cvtColor(m_perspectiveMap, focus, CV_GRAY2BGR);

                    char tmp[32];
                    sprintf(tmp, "%.0f", range);
                    putText(focus, std::string(tmp), Point(0, m_perspectiveMap.rows / 2), FONT_HERSHEY_SIMPLEX, 0.5, Scalar(0, 0, 255), 2);

                    markersDrawn++;
                }
            }
            #endif
        }
    }
}

Mat MarkerLocator::GetLocation(int r_markerId) {
    if(r_markerId >= m_knownMarkers.size()) {
        return Mat();
    }
    else
        return m_knownMarkers[r_markerId].m_location;
}

#ifdef GUI
void MarkerLocator::SetDrawMats(Mat r_binary, Mat r_markers)
{
    m_binaryOut = r_binary;
    m_markersOut = r_markers;
}
#endif

bool MarkerLocator::GetBit(Mat r_marker, int r_x, int r_y, int r_width)
{
    Rect roi(r_x * r_width, r_y * r_width, r_width, r_width);

    return countNonZero(r_marker(roi)) > (r_width * r_width) / 2;
}

int MarkerLocator::GetMarkerId(Mat r_marker)
{
    const int bitsPerMarker = 7;

    // The marker has seven sections - two edges and five bits
    int bitWidth = r_marker.rows / bitsPerMarker;

    // Start by checking the edges to see if it is a marker
    for(int y = 0; y < bitsPerMarker; y++) {
        if(GetBit(r_marker, 0, y, bitWidth))
            return -1;
        if(GetBit(r_marker, bitsPerMarker - 1, y, bitWidth))
            return -1;

        if(y == 0 || y == bitsPerMarker - 1) {
            for(int x = 1; x < bitsPerMarker - 1; x++) {
                if(GetBit(r_marker, x, y, bitWidth))
                    return -1;
            }
        }
    }

    Mat bits[4] =  { Mat::zeros(5, 5, CV_8UC1) };
    for(int row = 1; row < bitsPerMarker - 1; row++) {
        for(int col = 1; col < bitsPerMarker - 1; col++)
            bits[0].at<uchar>(row - 1, col - 1) = GetBit(r_marker, col, row, bitWidth);
    }

    // Get all the rotations
    for(int i = 1; i < 4; i++) {
        transpose(bits[i - 1], bits[i]);
        flip(bits[i], bits[i], 0);
    }

    int id = 0;
    for(int i = 0; i < 4; i++) {
        bool valid = true;

        /// TODO: Add error correction?
        for(int row = 0; row < bitsPerMarker - 2; row++) {
            // Check to make sure bit 0 is the opposite of bit 1
            if(bits[i].at<uchar>(row, 0) == bits[i].at<uchar>(row, 1)) {
                valid = false;
                break;
            }

            // Check to make sure bit 2 is the same as bit 3
            if(bits[i].at<uchar>(row, 2) != bits[i].at<uchar>(row, 3)) {
                valid = false;
                break;
            }

            // Check the parity
            if(bits[i].at<uchar>(row, 4) != (bits[i].at<uchar>(row, 1) != bits[i].at<uchar>(row, 3))) {
                valid = false;
                break;
            }

            // So far, so good. Add to the ID
            id = id << 1;
            id += bits[i].at<uchar>(row, 1);
            id = id << 1;
            id += bits[i].at<uchar>(row, 3);
        }

        // If we have a valid marker, return its id
        if(!valid) {
            // This does not mean the whole maker was invalid, just that this rotation isn't the upright one.
            // Don't make that mistake again.
            id = 0;
        }
        else {
            return id;
        }
    }

    // No valid rotations
    return -1;
}

void MarkerLocator::ChangeSpace(Mat& r_point)
{
    double wz = r_point.dot(m_robotForward) / m_robotForward.dot(m_robotForward);
    double wx = r_point.dot(m_robotSide) / m_robotSide.dot(m_robotSide);

    r_point = (Mat_<double>(3, 1) << wx, 0.0, wz);
}

void MarkerLocator::TVectToRangeBaring(Mat r_tVec, double& rr_range, double& rr_bearing)
{
    // Get the vector relative to the robot rather than the camera
    subtract(r_tVec, m_robotLoc, r_tVec);

    // Start by projecting our transform vector onto the robot's plane
    ChangeSpace(r_tVec);

    // Range is easy
    rr_range = sqrt(r_tVec.dot(r_tVec));
    rr_bearing = atan(r_tVec.at<double>(0, 0)/r_tVec.at<double>(2, 0));
}
