#include "featurestracker.h"



//cascade options
const QString pasta_base = "/Users/Jovan/QtProjects/CascadeXML/";

const QString cascade_frontal_face = pasta_base + "haarcascade_frontalface_alt2.xml";//haarcascade_frontalface_alt_tree.xml
//haarcascade_frontalface_default.xml  //haarcascade_frontalface_alt.xml
const QString cascade_eye_left = pasta_base + "haarcascade_mcs_lefteye.xml";//haarcascade_lefteye_2splits.xml
const QString cascade_eye_rigth = pasta_base + "haarcascade_mcs_righteye.xml"; //haarcascade_righteye_2splits.xml
const QString cascade_eye = pasta_base + "haarcascade_eye.xml";

const QString cascade_mouth = pasta_base + "haarcascade_mcs_mouth.xml";

const QString cascade_nose = pasta_base + "haarcascade_mcs_nose.xml";


const float cascade_param_scale_factor = 1.1;//Cascade parameter scale factort. Parameter specifying how much the image size is reduced at each image scale.
const int cascade_param_min_neighbours = 4;//Cascade parameter min neighbours. Parameter specifying how many neighbors each candidate rectangle should have to retain it
const int cascade_param_min_size_w = 20;//Cascade parameter min size (width). Minimum possible object size. Objects smaller than that are ignored.
const int cascade_param_min_size_h = 20;//Cascade parameter min size (height). Minimum possible object size. Objects smaller than that are ignored.

const Scalar COLOR_BLUE = Scalar(255);
const Scalar COLOR_RED = Scalar(0,0,255);
const Scalar COLOR_GREEN = Scalar(0,255,0);
const Scalar COLOR_YELLOW = Scalar(0,255,255);


featuresTracker::featuresTracker()
{
    useGPU = false;
    if(gpu::getCudaEnabledDeviceCount() == 0){
        useGPU = false;
        qDebug() << "Dispositivo nao possui suporte a Cuda ou nao está instalado corretamente!";
    }
}

void featuresTracker::setUseGpu(bool value)
{
    if(value){
        if(gpu::getCudaEnabledDeviceCount() != 0){
            useGPU = true;
        }else{
            useGPU = false;
            qDebug() << "Dispositivo nao possui suporte a Cuda ou nao está instalado corretamente!";
        }
    }else{
        useGPU = false;
    }
}

Rect featuresTracker::detectFace(Mat frame)
{
    if(!frame.empty()){
        if(useGPU){
            return detectAndDisplayGPU(frame,cascade_frontal_face.toStdString(),4,cascade_param_min_size_w,cascade_param_min_size_h);
        }else{
            return detectAndDisplayCPU(frame,cascade_frontal_face.toStdString(),cascade_param_min_size_w,cascade_param_min_size_h,COLOR_BLUE);
        }
    }
    return cv::Rect(Point(0,0),Point(0,0));
}

Rect featuresTracker::detectLeftEye(Mat frame)
{
    if(!frame.empty()){
        if(useGPU){
            return detectAndDisplayGPU(frame,cascade_eye_left.toStdString(),1.3,20,20);
        }else{
            return detectAndDisplayCPU(frame,cascade_eye_left.toStdString(),20,20,COLOR_YELLOW);
        }
    }
    return cv::Rect(Point(0,0),Point(0,0));
}

Rect featuresTracker::detectRigthEye(Mat frame)
{
    if(!frame.empty()){
        if(useGPU){
            return detectAndDisplayGPU(frame,cascade_eye_rigth.toStdString(),1.3,20,20);
        }else{
            return detectAndDisplayCPU(frame,cascade_eye_rigth.toStdString(),20,20,COLOR_YELLOW);
        }
    }
    return cv::Rect(Point(0,0),Point(0,0));
}

Rect featuresTracker::detectMouth(Mat frame)
{
    if(!frame.empty()){
        if(useGPU){
            return detectAndDisplayGPU(frame,cascade_mouth.toStdString(),1.3,30,30);
        }else{
            return detectAndDisplayCPU(frame,cascade_mouth.toStdString(),30,30,COLOR_GREEN);
        }
    }
    return cv::Rect(Point(0,0),Point(0,0));
}

Rect featuresTracker::detectNose(Mat frame)
{
    if(!frame.empty()){
        if(useGPU){
            return detectAndDisplayGPU(frame,cascade_nose.toStdString(),1,20,20);
        }else{
            return detectAndDisplayCPU(frame,cascade_nose.toStdString(),20,20,COLOR_RED);
        }
    }
    return cv::Rect(Point(0,0),Point(0,0));
}


Rect featuresTracker::getRightEyeRoiFromFaceArea(cv::Rect faceROI)
{
    Rect rightEyeSearchArea = faceROI;
    rightEyeSearchArea.x += ceil(faceROI.width /2);
    rightEyeSearchArea.width = ceil(faceROI.width /2);
    rightEyeSearchArea.height= ceil(faceROI.height / 2);

    return rightEyeSearchArea;
}

Rect featuresTracker::getLeftEyeRoiFromFaceArea(Rect faceROI)
{
    Rect leftEyeSearchArea = faceROI;
    leftEyeSearchArea.width = ceil(faceROI.width /2);
    leftEyeSearchArea.height= ceil(faceROI.height / 2);


    return leftEyeSearchArea;
}

Rect featuresTracker::getNoseRoiFromFaceArea(Rect faceROI)
{

    Rect noseSearchArea = faceROI;
    noseSearchArea.y += ceil(faceROI.height / 4);
    noseSearchArea.height = ceil(faceROI.height / 2);

    return noseSearchArea;
}

Rect featuresTracker::getMouthRoiFromFaceArea(Rect faceROI)
{
    Rect mouthSearchArea = faceROI;
    mouthSearchArea.y += ceil(faceROI.height / 2);
    mouthSearchArea.height = ceil(faceROI.height / 2);

    return mouthSearchArea;
}


Rect featuresTracker::detectAndDisplayCPU(cv::Mat frame, cv::String cascadeClassifierName, int minWindowSizeW, int minWindowSizeH, Scalar color)
{

    Mat frameGray;
    std::vector<Rect> detectedObjects;
    CascadeClassifier cascadeClassifier;
    cascadeClassifier.load(cascadeClassifierName);
    if(cascadeClassifier.empty()){
        qDebug("Não foi possível carregar o xml do classificador");
         return cv::Rect(Point(0,0),Point(0,0));
    }
    if(frame.channels() == 3){
        cvtColor(frame ,frameGray,CV_BGR2GRAY);
    }else if(frame.channels() == 4){
        cvtColor(frame ,frameGray,CV_BGRA2GRAY);
    }else{
        frameGray = frame;
    }
    //encolhendo a imagem para melhorar a performance
    const int DETECTTION_WIDTH = 320;
    Mat smallImg;
    int scale = frameGray.cols /(float) DETECTTION_WIDTH;
    if(frame.cols > DETECTTION_WIDTH){
        // Escalando a imagem para manter o mesmo aspect ratio
        int scaledHeight = round(frameGray.rows /scale);
        resize(frameGray,smallImg,Size(DETECTTION_WIDTH,scaledHeight));
    }else{
        // Acessa diretamente a imagem pq ela ja esta pequena
        smallImg = frameGray;
    }

    equalizeHist(smallImg,smallImg);
    //Params:
    // minFeatures X maxFeatures- detectar de longe 20 x 20 de muito perto ate 80 x 80 (em celular geralmente)
    //searchScaleFactor - quantos tamanhos diferentes de face é pra procurar, normalmente 1.1 ou 1.2 para melhorar a rapidez mas perde algumas faces
    //minNeighbors - o quanta certeza o detector deve ter de que o objeto encontrado é uma face.nromalmente 3 ou maior se vc quer faces mais confiáveis.
    //.flags - CASCADE_FIND_BIGGEST_OBJECT para olhar somente para maior face se nãõ ele olha prqa todas.
    cascadeClassifier.detectMultiScale(smallImg,detectedObjects,cascade_param_scale_factor,
                                       cascade_param_min_neighbours,CV_HAAR_FIND_BIGGEST_OBJECT | CV_HAAR_DO_ROUGH_SEARCH,Size(minWindowSizeW,minWindowSizeH));

    //Voltando ao tamanho normal da imagem
    if(frame.cols > DETECTTION_WIDTH){
        for (int i = 0; i < (int)detectedObjects.size(); i++) {
            detectedObjects[i].x = round(detectedObjects[i].x * scale);
            detectedObjects[i].y = round(detectedObjects[i].y * scale);
            detectedObjects[i].width = round(detectedObjects[i].width * scale);
            detectedObjects[i].height = round(detectedObjects[i].height * scale);
        }
    }

    //se o objeto detectado está´na borda, dei ele na imagem
    for (int i = 0; i < (int)detectedObjects.size(); i++) {
        if(detectedObjects[i].x < 0)
            detectedObjects[i].x = 0;
        if(detectedObjects[i].y < 0)
            detectedObjects[i].y = 0;
        if(detectedObjects[i].x + detectedObjects[i].width > frame.cols)
            detectedObjects[i].x = frame.cols - detectedObjects[i].width;
        if(detectedObjects[i].y + detectedObjects[i].height > frame.rows)
            detectedObjects[i].y = frame.rows - detectedObjects[i].height;
    }
    if(!detectedObjects.empty()){
        cv::rectangle(frame,detectedObjects[0],color);
        return detectedObjects[0];
    }else{
        return cv::Rect(Point(0,0),Point(0,0));
    }

}

//Rect featuresTracker::detectAndDisplayCPU(cv::Mat frame, cv::String cascadeClassifierName, float scale, int minWindowSizeW, int minWindowSizeH)
//{

//    Mat frameGray;
//    Mat smallImg = Mat(cvRound(frame.rows/scale),
//                       cvRound(frame.cols/scale), CV_8UC1);
//    std::vector<Rect> detectedObjects;
//    CascadeClassifier cascadeClassifier;
//    cascadeClassifier.load(cascadeClassifierName);
//    if(cascadeClassifier.empty()){
//        qDebug("Não foi possível carregar o xml do classificador");
//         return cv::Rect(Point(0,0),Point(0,0));
//    }
//    cvtColor(frame ,frameGray,CV_BGR2GRAY);
//    resize(frameGray,smallImg,smallImg.size(),0,0,CV_INTER_LINEAR);
//    equalizeHist(smallImg,smallImg);
//    cascadeClassifier.detectMultiScale(smallImg,detectedObjects,cascade_param_scale_factor,cascade_param_min_neighbours,0|CV_HAAR_SCALE_IMAGE,Size(minWindowSizeW,minWindowSizeH));
//    Point pt1,pt2;
//    for (int i = 0; i < detectedObjects.size(); ++i) {
//        pt1.x = detectedObjects[i].x * scale;
//        pt2.x = (detectedObjects[i].x * scale + detectedObjects[i].width * scale);
//        pt1.y = detectedObjects[i].y * scale;
//        pt2.y = (detectedObjects[i].y * scale+ detectedObjects[i].height * scale);
//        cv::rectangle(frame,pt1,pt2,Scalar(255));
//        //frame( detectedObjects[i]);
//    }
//    if(!detectedObjects.empty())
//        return Rect(pt1,pt2);
//    else
//        return cv::Rect(Point(0,0),Point(0,0));
//}


Rect featuresTracker::detectAndDisplayGPU(cv::Mat frame, cv::String cascadeClassifierName, float scale, int minWindowSizeW, int minWindowSizeH)
{

    cv::gpu::CascadeClassifier_GPU cascadeGPU;
    cascadeGPU.findLargestObject = true;
    if(!cascadeGPU.load(cascadeClassifierName) ){
       return cv::Rect(Point(0,0),Point(0,0));
    }
   gpu::GpuMat frame_GPU,frame_GPU_Gray,detectedObjects;
   gpu::GpuMat smallMat = gpu::GpuMat(cvRound(frame.rows / scale),cvRound(frame.cols / scale),CV_8UC1);
   Mat frame_Cpu;
   frame_GPU.upload(frame);
   gpu::cvtColor(frame_GPU, frame_GPU_Gray, CV_BGR2GRAY);
   gpu::resize(frame_GPU_Gray, smallMat, smallMat.size(),0,0);
   gpu::equalizeHist(smallMat, smallMat);


   int detections_num;

   // Do face detection first.
   // Note: ONLY the largest face is processed.
   //       (otherwise face detection would be too slow)
//   objCascadeClassfifier.detectMultiScale( img, faces,
//       1.2, 2, CV_HAAR_FIND_BIGGEST_OBJECT
//                   //|CV_HAAR_DO_ROUGH_SEARCH
//                                           , Size(60, 60) );
//                   |CV_HAAR_SCALE_IMAGE
   detections_num = cascadeGPU.detectMultiScale(smallMat,detectedObjects,cascade_param_scale_factor,cascade_param_min_neighbours,cv::Size(minWindowSizeW,minWindowSizeH));
   detectedObjects.colRange(0, detections_num).download(frame_Cpu);
   Point pt1,pt2;
   if (detections_num)
   {
       Rect* objectDetected = frame_Cpu.ptr<Rect>();
       for (int i = 0; i < detections_num; ++i) {
           pt1.x = objectDetected[i].x * scale;
           pt2.x = (objectDetected[i].x * scale + objectDetected[i].width * scale);
           pt1.y = objectDetected[i].y * scale;
           pt2.y = (objectDetected[i].y * scale  + objectDetected[i].height * scale);

           cv::rectangle(frame,pt1,pt2,Scalar(255));
       }
       return Rect(pt1,pt2);
   }else{
       return cv::Rect(Point(0,0),Point(0,0));
   }


}

void featuresTracker::putEyeControlPoints(Mat frame, Rect eyeROI)
{
    Mat gray,img_sobel,img_abs;
    //Aplicando FIltro Gaussiano para diminuir o ruido
    //GaussianBlur(frame,frame,Size(3,3),0,0,BORDER_DEFAULT);
//    cvtColor(frame,gray,CV_BGR2GRAY);
//    Sobel(gray,img_sobel,0,1,-1);
//    convertScaleAbs(sobel,img_abs,1,0);
}
