/**
 * Senior project: Augmented reality
 * imagehandler.cpp
 */

#include "imagehandler.h"

using namespace cv;

ImageHandler::ImageHandler() {
    theCapture = new cv::VideoCapture(0);
    theCapture->set(CV_CAP_PROP_FRAME_WIDTH, 640);
    theCapture->set(CV_CAP_PROP_FRAME_HEIGHT, 480);
}

ImageHandler::~ImageHandler() {
    delete theCapture;
    std::cout << "Deconstructed ImageHandler" << std::endl;
}

/**
 * Captures a frame and returns it as an RGB image
 */
cv::Mat ImageHandler::getNextFrame() {
    cv::Mat inFrame;
    cv::Mat frameToReturn;
    cv::Mat t;
    
    // get frame
    theCapture->grab();
    theCapture->retrieve(inFrame);
    
    // convert frame from BGR to RGB
    cvtColor(inFrame, frameToReturn, CV_BGR2RGB);
    
    // return!
    return frameToReturn;
    
    // there is no step four.
}

cv::Mat ImageHandler::getNextFrameAndMatches(vector<aruco::Marker> *markers) {
    cv::Mat inFrame;
    cv::Mat frameToReturn;
    
    // get frame
    theCapture->grab();
    theCapture->retrieve(inFrame);
    
    theDetector.detect(inFrame,
                       *markers,
                       Mat(),
                       Mat(),
                       -1.0);
    
    // convert frame from BGR to RGB
    // cvtColor(inFrame, frameToReturn, CV_BGR2RGB);
    
    // return!
    return frameToReturn;
}

vector<aruco::Marker> ImageHandler::findTemplateMatches(Mat inFrame) {
    Mat bgrFrame;
    vector<aruco::Marker> markers;
    
    cvtColor(inFrame, bgrFrame, CV_RGB2BGR);
    
    theDetector.detect(bgrFrame,
                       markers,
                       Mat(),
                       Mat(),
                       -1.0);
    
    // convert frame from BGR to RGB
    //cvtColor(inFrame, frameToReturn, CV_BGR2RGB);
    
    // return!
    return markers;
}

void ImageHandler::detectMotion(Mat previousFrameIn,
                                           Mat nextFrameIn,
                                           vector<Point2f> *previousPoints,
                                           vector<Point2f> &nextPoints) {
    vector<uchar> outputStatus;
    vector<float> outputErr;
    
//    vector<Point2f> prevPoints;
//    vector<Point2f> nxPoints;
    
    // Turn the input frames into grayscale
    Mat prevGray;
    cvtColor(previousFrameIn, prevGray, CV_RGB2GRAY);
    Mat nextGray;
    cvtColor(nextFrameIn, nextGray, CV_RGB2GRAY);
    
    goodFeaturesToTrack(prevGray,            // image to check
                        nextPoints,         // output vector
                        100,                // max corners
                        0.01,               // quality level
                        5.0);               // minimum Euclidian distance
    
    calcOpticalFlowPyrLK(prevGray,
                         nextGray,
                         *previousPoints,
                         nextPoints,
                         outputStatus,
                         outputErr,
                         Size(15, 15));
}

cv::Mat ImageHandler::findDiff(Mat background, Mat image, int thresholdValue) {
    Mat backgroundGray;
    Mat imageGray;
    cvtColor(background, backgroundGray, CV_RGB2GRAY, 1);
    cvtColor(image, imageGray, CV_RGB2GRAY, 1);
    
    Mat diffed;
    cv::absdiff(imageGray, backgroundGray, diffed);
    
    Mat threshed;
    cv::erode(diffed, diffed, cv::Mat(), Point(-1, -1), 1);
    threshold(diffed, threshed, thresholdValue, 255, THRESH_BINARY);
//    cv::adaptiveThreshold(diffed,
//                          threshed,
//                          255,
//                          CV_ADAPTIVE_THRESH_MEAN_C,
//                          CV_THRESH_BINARY_INV,
//                          3,
//                          5);
    
    return threshed;
}
