#include "blink.h"
#include <qmath.h>

BlinkDetector::BlinkDetector( CvCapture * cap, QObject *parent)
    :QThread( parent )
{
    capture = cap;
    _error = false;
    _init_width = 10;
}

BlinkDetector::~BlinkDetector()
{
    exit_nicely( "Blink detector: Exit peacefully." );
    this->terminate();
}

void BlinkDetector::run()
{
    CvRect  window, window2, eye, eye2;
    int	    key = 0, nc, found;
    int	    stage = STAGE_INIT;
    int     blink_timer = 0; // how long (frames) does the eye disapper
    int     fail_timer  = 0; // how long to switch from tracking to init stage
                             //  this is to avoid too fast stage switching.
    init();

    forever
    {
        frame = cvQueryFrame( capture );
        if ( !frame )
        {
            exit_nicely( "cannot query frame!" );
        }

        if ( stage == STAGE_INIT )
        {
            window  = cvRect( 0, 0, frame->width, frame->height );
            window2 = cvRect( 0, 0, frame->width, frame->height );
            eye     = cvRect( 0, 0, 0, 0 );
            eye2    = cvRect( 0, 0, 0, 0 );
        }

        cvCvtColor( frame, gray, CV_BGR2GRAY ); // for colour webcams
        using_full_window = 0;
        if( stage == STAGE_INIT )
        {
            if( fail_timer == 0 )
            {
                nc = get_eye_pairs( gray, prev, &eye, &eye2 );
                if( nc == 1 )
                {
                    delay_frames( 2 );
                    frame = cvQueryFrame( capture );
                    cvCvtColor( frame, gray, CV_BGR2GRAY ); // for colour webcams

                    /* First template */
                    cvSetImageROI( gray, eye );
                    cvCopy( gray, tpl );
                    cvResetImageROI( gray );
                    /* Second template */
                    cvSetImageROI( gray, eye2 );
                    cvCopy( gray, tpl2 );
                    cvResetImageROI( gray );
                    blink_timer = 0;
                    stage = STAGE_TRACKING;
                    _init_width = qAbs( eye.x - eye2.x );
                }
            }
            else
            {
                fail_timer --;
            }
        }

        if ( stage == STAGE_TRACKING )
        {
            float scale = qPow( (float) qAbs( eye.x - eye2.x ) / (float)_init_width, 0.8 );

            if( scale > 1.2 )
            {
                scale = 1.2;
            }
            else if( scale < 0.8 )
            {
                scale = 0.8;
            }

            if( key == 'w' )
            {

                found = locate_eye( gray, tpl, &window, &eye, 0, scale );
                found = locate_eye( gray, tpl2, &window2, &eye2, 0, scale );
            }
            else
            {
                found = locate_eye( gray, tpl, &window, &eye, 1, scale );
                found = locate_eye( gray, tpl2, &window2, &eye2, 1, scale );
            }

            if ( !found || qAbs( eye.x - eye2.x ) < 20
                 || window.width < eye.width || window.height < eye.height )
            {
                found = false;
                blink_timer ++;
                fail_timer = 3;
            }
            else
            {
                // the distance between the two found eye positions
                int w = qSqrt( (eye.x - eye2.x) * (eye.x - eye2.x) -
                               (eye.y - eye2.y) * (eye.y - eye2.y));
                if( w < MIN_EYE_BASELINE || w > MAX_EYE_BASELINE ||
                        qAbs( eye.y - eye2.y ) > MAX_COMP_VDIFF )
                {
                    found = false;
                    blink_timer ++;
                }
                else
                {
                    // tell the object rendering to change the viewing
                    emit povChanged( (eye.x + eye2.x)/2, (eye.y + eye2.y)/2, w );
                }
            }

            // only draw the eye rectangles if the conditions are met.
            if ( (!found && blink_timer > 2) || key == 'r')
            {
                stage = STAGE_INIT;
                frame = cvQueryFrame( capture );
                cvCvtColor( frame, gray, CV_BGR2GRAY ); // for colour webcams
            }
            else
            {
                DRAW_RECTS(frame, diff, window, eye);
                DRAW_RECTS(frame, diff, window2, eye2);
            }

            // locating iris if not using full size window
            if( key != 'w' && found && using_full_window == 0 )
            {
                IplImage *timg, *timg2, *tm, *scaled_iris;
                scaled_iris = cvCreateImage( cvSize( iris->width * scale, iris->height * scale ),
                                             iris->depth, iris->nChannels );
                cvResize( iris, scaled_iris );
                CvPoint  minloc, maxloc, minloc2, maxloc2;
                double   minval, maxval, minval2, maxval2;
                int w  = eye.width  - scaled_iris->width  + 1;
                int h  = eye.height - scaled_iris->height + 1;
                tm     = cvCreateImage( cvSize( w, h ), IPL_DEPTH_32F, 1 );
                timg   = cvCreateImage( cvSize( eye.width, eye.height ), gray->depth, 1 );
                timg2  = cvCreateImage( cvSize( eye2.width, eye2.height ), gray->depth, 1 );

                cvSetImageROI( gray, eye );
                cvCopy( gray, timg );
                cvMatchTemplate( gray, scaled_iris, tm, CV_TM_CCORR_NORMED );
                cvMinMaxLoc( tm, &minval, &maxval, &minloc, &maxloc, 0 );
                maxloc = cvPoint( maxloc.x + eye.x + scaled_iris->width/2,
                                  maxloc.y + eye.y + scaled_iris->height/2 ); // some offset
                cvCircle( frame, maxloc, 5, cvScalarAll(255) );
                cvResetImageROI( gray );

                cvSetImageROI( gray, eye2 );
                cvCopy( gray, timg2 );
                cvMatchTemplate( gray, scaled_iris, tm, CV_TM_CCORR_NORMED );
                cvMinMaxLoc( tm, &minval2, &maxval2, &minloc2, &maxloc2, 0 );

                maxloc2 = cvPoint( maxloc2.x + eye2.x + scaled_iris->width/2,
                                   maxloc2.y + eye2.y + scaled_iris->height/2 );
                cvCircle( frame, maxloc2, 5, cvScalarAll(255) );

                if( maxloc.x > maxloc2.x )
                {
                    emit povChanged( Point2f( maxloc ), Point2f( maxloc2 ) );
                }
                else
                {
                    emit povChanged( Point2f( maxloc2 ), Point2f( maxloc ) );
                }

                cvResetImageROI(gray);
                /* release things */
                cvReleaseImage(&tm);
                cvReleaseImage(&timg);
                cvReleaseImage(&timg2);
                cvReleaseImage(&scaled_iris);
            }
            else
            {
                emit povChanged( Point2f( -1, -1 ), Point2f( -1, -1 ) );
            }
        }
        cvResetImageROI( gray );
        cvResetImageROI( prev );
        cvCopy( gray, prev );
        //cvShowImage(wnd_name, frame);
        //cvShowImage( "kmeans", kmimg );
        key  = cvWaitKey(30);
        if( stage == STAGE_TRACKING )
            emit imageChanged( frame, 0 );
        else
            emit imageChanged( frame, kmimg );
    }
}


/* the k-means eye detection implementation */
int BlinkDetector::get_eye_pairs( IplImage* img, IplImage* prev, CvRect* eye, CvRect* eye2  )
{
    IplImage* tdiff;
    int n = 0;
    cvZero(diff);

    /* motion analysis */
    cvAbsDiff(img, prev, diff);
    cvThreshold(diff, diff, 45, 255, CV_THRESH_BINARY);
    cvMorphologyEx(diff, diff, NULL, kernel, CV_MOP_OPEN, OPEN_ITERATIONS );

    tdiff = (IplImage*)cvClone(diff);

    // using k-means to partition the data points
    int sampleCount = cvCountNonZero( diff ), countA = 0;
    if( sampleCount >= MAX_CLUSTERS && sampleCount < MAX_POINTS )
    {
        CvMat *points = cvCreateMat( sampleCount, 1, CV_32FC2 );
        CvMat *clusters = cvCreateMat( sampleCount, 1, CV_32SC1 );

        CvScalar pnt;
        int count = 0;
        for( int i = 0; i < diff->height; i ++ )
        {
            for( int j = 0; j< diff->width; j ++ )
            {
                pnt = cvGet2D( diff, i, j );
                if( pnt.val[0] > 0 )
                {
                    ((CvPoint2D32f*)points->data.fl)[count++] = cvPoint2D32f( j, i );
                }
            }
        }
        // apply kmeans
        cvKMeans2( points, MAX_CLUSTERS, clusters,
                   cvTermCriteria( CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 5, 1.0 ) );

        cvZero( kmimg );
        // draw classification
        for( int i = 0; i < sampleCount; i ++ )
        {
            CvPoint2D32f pt = ((CvPoint2D32f*)points->data.fl)[i];
            int cluster_idx = clusters->data.i[i];
            countA += cluster_idx;
            cvCircle( kmimg, cvPointFrom32f(pt), 2, color_tab[cluster_idx], CV_FILLED );
        }
        /* Divide the point set into two sets */
        CvMat *pointsA = cvCreateMat( countA, 1, CV_32FC2 );
        CvMat *pointsB = cvCreateMat( sampleCount - countA, 1, CV_32FC2 );
        int ca = 0, cb = 0;
        count = 0;
        for( int i = 0; i < sampleCount; i ++ )
        {
            if( clusters->data.i[i] == 1 )
            {
                ((CvPoint2D32f*)pointsA->data.fl)[ca++] = ((CvPoint2D32f*)points->data.fl)[count++];
            }
            else
            {
                ((CvPoint2D32f*)pointsB->data.fl)[cb++] = ((CvPoint2D32f*)points->data.fl)[count++];
            }
        }
        n = is_eye_pair2( pointsA, pointsB, eye, eye2 );
        cvReleaseMat( &points );
        cvReleaseMat( &pointsA );
        cvReleaseMat( &pointsB );
        cvReleaseMat( &clusters );
    }
    cvReleaseImage(&tdiff);
    return n;
}

/**
* Determine whether the given two point regions are eyes or not (for kmeans)
*
* @param	CvMat*   ptsa  input first point set 
* @param	CvMat*   ptsb  second first point set 
* @return	CvRect*  eye   output parameter, will contain the rectagle of the first set
*/

int BlinkDetector::is_eye_pair2( CvMat *ptsa, CvMat *ptsb, CvRect *eye, CvRect *eye2 )
{
    if( !ptsa || !ptsb || !ptsa->rows || !ptsb->rows )
    {
        qDebug("is_eye_pair2: input point sets are not sufficiently given.");
        return 0;
    }
    CvRect r1, r2;
    r1 = cvBoundingRect( ptsa );
    r2 = cvBoundingRect( ptsb );

    cvRectangle( kmimg, cvPoint( r1.x, r1.y ), cvPoint( r1.x + r1.width, r1.y + r1.height ), color_tab[2] );
    cvRectangle( kmimg, cvPoint( r2.x, r2.y ), cvPoint( r2.x + r2.width, r2.y + r2.height ), color_tab[3] );

    r1.x += TPL_HOFF;
    r2.x += TPL_HOFF;
    r1.y += TPL_VOFF;
    r2.y += TPL_VOFF;

    /* the areas should not be too big nor too small */
    if( r1.width  < MIN_COMP_WIDTH  ||
            r1.height < MIN_COMP_HEIGHT ||
            r1.width  > MAX_COMP_WIDTH  ||
            r1.height > MAX_COMP_HEIGHT ||
            r2.width  < MIN_COMP_WIDTH  ||
            r2.height < MIN_COMP_HEIGHT ||
            r2.width  > MAX_COMP_WIDTH  ||
            r2.height > MAX_COMP_HEIGHT )
    {
        qDebug() << "Is eye pair: the areas should not be too big nor too small" ;
        return 0;
    }
    /* regions should not cross over */
    if( ( r2.x - r1.x ) * ( r1.width + r1.x - r2.x ) > 0  ||
            ( r2.y - r1.y ) * ( r1.height + r1.y - r2.y ) > 0)
    {
        qDebug() << "Is eye pair: regions should not cross over " ;
        return 0;
    }
    /* the width of the components are about the same */
    if (abs(r1.width - r2.width) >= MAX_COMP_WDIFF)
    {
        qDebug() << "Is eye pair: the width of the components are about the same " ;
        return 0;
    }
    /* the height f the components are about the same */
    if (abs(r1.height - r2.height) >= MAX_COMP_HDIFF)
    {
        qDebug() << "Is eye pair: the height f the components are about the same " ;
        return 0;
    }

    /* vertical distance is small */
    if (abs(r1.y - r2.y) >= MAX_COMP_VDIFF)
    {
        qDebug() << "Is eye pair: vertical distance is small " ;
        return 0;
    }
    /* reasonable horizontal distance, based on the components' width */
    int dist_ratio = abs(r1.x - r2.x) / r1.width;
    if (    abs(r1.x - r2.x) > MAX_EYE_BASELINE ||
            abs(r1.x - r2.x) < MIN_EYE_BASELINE ||
            dist_ratio < MIN_COMP_HRATIO ||
            dist_ratio > MAX_COMP_HRATIO)
    {
        qDebug() << "Is eye pair: reasonable horizontal distance, based on the components' width " ;
        return 0;
    }
    // make sure the templates going to make are within the frame
    if( r1.x < 50 || r2.x < 50 || r1.y < 50 || r2.y < 50 ||
            r1.x + TPL_WIDTH >= frame->width   - 50 ||
            r2.x + TPL_WIDTH >= frame->width   - 50 ||
            r1.y + TPL_HEIGHT >= frame->height - 50 ||
            r2.y + TPL_HEIGHT >= frame->height - 50 )
    {
        qDebug() << "Is eye pair: eye template created will be out of bound";
        return 0;
    }


    /* return eye boundaries */
    *eye = cvRect(
                r1.x,
                r1.y,
                TPL_WIDTH,
                TPL_HEIGHT
                );

    *eye2 = cvRect(
                r2.x,
                r2.y,
                TPL_WIDTH,
                TPL_HEIGHT
                );
    return 1;
}


/**
* Locate the user's eye with template matching
*
* @param	IplImage* img     the source image
* @param	IplImage* tpl     the eye template
* @param	CvRect*   window  output window(for viewing facility)
* @param	CvRect*   eye     output parameter, will contain the current
*                                 location of user's eye
* @param	int	  use_window  '1' then use predefined window size, '0' use full size
* @return	int               '1' if found, '0' otherwise
*/
int BlinkDetector::locate_eye( IplImage* img, IplImage* tpl, CvRect* window, CvRect* eye, int use_window, float scale )
{
    IplImage*	tm, *scaled_tpl;
    CvRect	win;
    CvPoint	minloc, maxloc, point;
    double	minval, maxval;
    int		w, h;

    scaled_tpl = cvCreateImage( cvSize( tpl->width * scale, tpl->height * scale ),
                                tpl->depth, tpl->nChannels );
    cvResize( tpl, scaled_tpl );
    if( use_window )
    {
        /* get the centroid of eye */
        point = cvPoint(
                    (*eye).x + (*eye).width * scale / 2 ,
                    (*eye).y + (*eye).height * scale / 2
                    );

        /* setup search window 
        replace the predefined WIN_WIDTH and WIN_HEIGHT above 
        for your convenient */
        win = cvRect(
                    point.x - WIN_WIDTH * scale / 2,
                    point.y - WIN_HEIGHT * scale / 2,
                    WIN_WIDTH * scale,
                    WIN_HEIGHT * scale
                    );
        /* make sure that the search window is still within the frame */
        if (win.x < 0)
            win.x = 0;
        if (win.y < 0)
            win.y = 0;
        if (win.x + win.width > img->width)
            win.x = img->width - win.width;
        if (win.y + win.height > img->height)
            win.y = img->height - win.height;
    }
    else
    {
        printf("Using full size window\n");
        win = cvRect(0, 0, frame->width, frame->height);
        using_full_window ++;
    }

    /* create new image for template matching result where: 
    width  = W - w + 1, and
    height = H - h + 1 */
    w  = win.width  - scaled_tpl->width  + 1;
    h  = win.height - scaled_tpl->height + 1;
    tm = cvCreateImage(cvSize(w, h), IPL_DEPTH_32F, 1);

    /* apply the search window */
    cvSetImageROI(img, win);

    /* template matching */
    cvMatchTemplate(img, scaled_tpl, tm, CV_TM_CCORR_NORMED);
    cvMinMaxLoc(tm, &minval, &maxval, &minloc, &maxloc, 0);

    /* release things */
    cvResetImageROI(img);
    cvReleaseImage(&tm);
    cvReleaseImage(&scaled_tpl);

    /* only good matches */
    if (maxval < TM_THRESHOLD)
    {
        if(use_window == 0)
            return 0;
        else
            return locate_eye(img, tpl, window, eye, 0, scale);
    }
    /* return the search window */
    *window = win;

    /* return eye location */
    *eye = cvRect(
                win.x + maxloc.x,
                win.y + maxloc.y,
                TPL_WIDTH * scale,
                TPL_HEIGHT * scale
                );
    return 1;
}

/**
* Delay for the specified frame count. I have to write this custom 
* delay function for these reasons:
* - usleep() is not available in Windows
* - usleep() and Sleep() will freeze the video for the given interval
*/
void BlinkDetector::delay_frames(int nframes)
{
    int i;

    for (i = 0; i < nframes; i++)
    {
        frame = cvQueryFrame(capture);
        if (!frame)
            exit_nicely("cannot query frame");
        //cvShowImage(wnd_name, frame);
        if (diff && SHOW_DIFF)
            //cvShowImage(wnd_debug, diff);
            cvWaitKey(30);
    }
}

/**
* Initialize images, memory, and windows
*/
void BlinkDetector::init()
{
    frame = cvQueryFrame(capture);
    if (!frame)
        exit_nicely("cannot query frame!");

    cvInitFont(&font, CV_FONT_HERSHEY_SIMPLEX, 0.4, 0.4, 0, 1, 8);
    //cvNamedWindow(wnd_name, 1);

    storage = cvCreateMemStorage(0);
    if (!storage)
        exit_nicely("cannot allocate memory storage!");

    kernel = cvCreateStructuringElementEx(3, 3, 1, 1, CV_SHAPE_CROSS, NULL);
    gray   = cvCreateImage(cvGetSize(frame), 8, 1);
    prev   = cvCreateImage(cvGetSize(frame), 8, 1);
    diff   = cvCreateImage(cvGetSize(frame), 8, 1);
    tpl	   = cvCreateImage(cvSize(TPL_WIDTH, TPL_HEIGHT), 8, 1);
    tpl2   = cvCreateImage(cvSize(TPL_WIDTH, TPL_HEIGHT), 8, 1);
    kmimg  = cvCreateImage( cvGetSize(frame), 8, 3 );

    iris   = cvLoadImage( "iris.png", 0 );

    color_tab[0] = CV_RGB( 255, 0, 0 );
    color_tab[1] = CV_RGB( 0, 255, 0 );
    color_tab[2] = CV_RGB( 0, 255, 255 );
    color_tab[3] = CV_RGB( 255, 255, 0 );
    color_tab[4] = CV_RGB( 0, 0, 255 );

    if( !iris )
        exit_nicely("iris image is not found!");

    if (!kernel || !gray || !prev || !diff || !tpl)
        exit_nicely("system error.");
    gray->origin  = frame->origin;
    prev->origin  = frame->origin;
    diff->origin  = frame->origin;
}


/**
* This function provides a way to exit nicely
* from the system
*
* @param char* msg error message to display
*/
void BlinkDetector::exit_nicely(char* msg)
{
    _error = true;
    cvDestroyAllWindows();
    if (capture)
        cvReleaseCapture(&capture);
    if (gray)
        cvReleaseImage(&gray);
    if (prev)
        cvReleaseImage(&prev);
    if (diff)
        cvReleaseImage(&diff);
    if (tpl)
        cvReleaseImage(&tpl);
    if (storage)
        cvReleaseMemStorage(&storage);
    if (iris)
        cvReleaseImage(&iris);
    if (kmimg)
        cvReleaseImage(&kmimg);

    if (msg != NULL)
    {
        qDebug() << msg;
    }
}
/**
  * this class is started from an eye tracking implementation on
* http://nashruddin.com/Real_Time_Eye_Tracking_and_Blink_Detection
*/
