#include "WEB_CAM.h"

video::video()
{
image=1;
connect(this,SIGNAL(terminated()),this,SLOT(end()));

//int minHessian=1000;
//detector.
ratiomin=0.2;
for(int i=1;i<26+1;i++)
{
QString tmp,tmp2;
tmp.setNum(i);
tmp.append(".bmp");
img_object[i-1]=cvLoadImage(tmp.toAscii().data(),1);

detector.detect( img_object[i-1], keypoints_object[i-1]);
extractor.compute( img_object[i-1], keypoints_object[i-1], descriptors_object[i-1] );
}
detectorinit=detector;
extractorinit=extractor;

running=true;
see_descriptor=false;

img_object[999]=cvLoadImage("1000.bmp",1);

}

video::~video()
{


}


void video::load_cam()
{
    avi = cvCreateCameraCapture(0);
    namedWindow( "WEBCAM", CV_WINDOW_AUTOSIZE );
namedWindow( "SOLUS", CV_WINDOW_AUTOSIZE );
}

void video::run()
{
/* Ouverture de la video */
Mat img_scene,img_matches;
QImage OUTPUT;

QMutex mutex;
/// On converti les images.
while(running)
{
mutex.lock();

    img_scene = cvRetrieveFrame(avi);
//-- Step 1: Detect the keypoints using SURF Detector
    int solus=999;
    double test=10000;

std::vector<KeyPoint> keypoints_scene;
detector=detectorinit;
extractor=extractorinit;
detector.detect( img_scene, keypoints_scene );

//-- Step 2: Calculate descriptors (feature vectors)

Mat descriptors_scene;
extractor.compute( img_scene, keypoints_scene, descriptors_scene );

//-- Step 3: Matching descriptor vectors using FLANN matcher
FlannBasedMatcher matcher;
std::vector< DMatch > matches;
//-- Draw only "good" matches (i.e. whose distance is less than 3*min_dist )
std::vector< DMatch > good_matches;
for(int j=0;j<26;j++)
{
//-- Draw only "good" matches (i.e. whose distance is less than 3*min_dist )
std::vector< DMatch > good_matchestmp;
matcher.match( descriptors_object[j], descriptors_scene, matches );
double moy=0;int nb=0;
for( int i = 0; i < descriptors_object[j].rows; i++ )
{
good_matchestmp.push_back( matches[i]);
moy+=matches[i].distance;nb++;
}
moy=moy/(nb+1);

if(moy<test)
{
test=moy;
solus=j;
good_matches=good_matchestmp;
}

}
if(ratiomin<test)
{
solus=999;
}



emit result(solus);
emit match(test);




//H = findHomography( obj, scene, CV_RANSAC );


//perspectiveTransform( obj_corners, scene_corners, H);

//-- Draw lines between the corners (the mapped object in the scene - image_2 )
//CvScalar line_color = CV_RGB(255,0,0);
//int line_thickness = 1;

if(see_descriptor && solus<999)
{
    drawMatches( img_object[solus], keypoints_object[solus], img_scene, keypoints_scene,good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
    //-- Localize the object
    std::vector<Point2f> obj;
    std::vector<Point2f> scene;

    for( int i = 0; i < good_matches.size(); i++ )
    {
    //-- Get the keypoints from the good matches
    obj.push_back( keypoints_object[solus][ good_matches[i].queryIdx ].pt );
    scene.push_back( keypoints_scene[ good_matches[i].trainIdx ].pt );
    }
    //-- Get the corners from the image_1 ( the object to be "detected" )
    std::vector<Point2f> obj_corners(4);
    obj_corners[0] = cvPoint(0,0); obj_corners[1] = cvPoint( img_object[solus].cols, 0 );
    obj_corners[2] = cvPoint( img_object[solus].cols, img_object[solus].rows ); obj_corners[3] = cvPoint( 0, img_object[solus].rows );
    std::vector<Point2f> scene_corners(4);

    imshow("SOLUS",img_object[solus]);
    imshow("WEBCAM",img_matches);

}
else
{
/*line( img_scene, scene_corners[0] , scene_corners[1] , line_color, line_thickness, CV_AA, 0 );
line( img_scene, scene_corners[1] , scene_corners[2], line_color, line_thickness, CV_AA, 0 );
line( img_scene, scene_corners[2], scene_corners[3] , line_color, line_thickness, CV_AA, 0 );
line( img_scene, scene_corners[3] , scene_corners[0] , line_color, line_thickness, CV_AA, 0 );*/
   imshow("SOLUS",img_object[solus]);
    imshow("WEBCAM",img_scene);
}


//emit video_updated( OUTPUT);
mutex.unlock();
msleep(20);
}


}

 QImage video::IplImage2QImage( Mat iplImage)
{

 int height = iplImage.rows;
 int width = iplImage.cols;
QImage img((const uchar*)iplImage.data, width, height, QImage::Format_RGB888);
return  img.rgbSwapped();
}



void video::end()
 {cvReleaseCapture(&avi);}
