#include "ros/ros.h"
#include "sensor_msgs/PointCloud2.h"
#include "sensor_msgs/JointState.h"
#include <pcl_ros/point_cloud.h>
#include <pcl/point_types.h>
#include <boost/foreach.hpp>
#include "pcl/io/io.h"
#include "pcl/io/pcd_io.h"
#include <image_transport/image_transport.h>
#include <cv_bridge/cv_bridge.h>
#include <opencv2/objdetect/objdetect.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <cv_bridge/cv_bridge.h>
#include <sensor_msgs/image_encodings.h>
#include <boost/thread/thread.hpp>
#include <trajectory_msgs/JointTrajectory.h>
#include <iostream>
#include <stdio.h>
#include <pcl/sample_consensus/method_types.h>
#include <pcl/sample_consensus/model_types.h>
#include <pcl/segmentation/sac_segmentation.h>
#include <trajectory_msgs/JointTrajectory.h>
#include "sensor_msgs/JointState.h"
#define METER_PER_UNIT 0.04
using namespace std;
using namespace cv;
namespace enc = sensor_msgs::image_encodings;
ros::Publisher r_arm_pub,l_arm_pub;
sensor_msgs::Image depth_msg;
sensor_msgs::Image rgb_msg;
sensor_msgs::JointState joint_msg;
CascadeClassifier faceCascade, upperbodyCascade;
bool depth_ready=false;
bool rgb_ready=false;
double scale = 1;
int imgcnt=0;
char dir[]="dual_image";
int posID;
#define EYE_ACT 2
#define CHIN_ACT 1
#define NO_ACT 0

#ifndef EPISLON
#define EPISLON 0.0001
#endif

void HandDetect(Mat rgbImg, Mat depthImg, Rect face, Rect upperbody);

void detectAndDraw();

bool CheckFaceDepth(int depth, int size)
{

    double idealDepth = 0.0524 * size * size - 7.2307 * size + 286.0086;

    if ((depth > idealDepth - 5) && (depth < idealDepth + 5))
        return true;
    else
        return false;
}

bool CheckUpperbodyDepth(int depth, int size)
{

    double idealDepth = 0.001 * size * size - 0.7095 * size + 130.9962;

    if ((depth > idealDepth - 8) && (depth < idealDepth + 8))
        return true;
    else
        return false;
}


void getDepth(const sensor_msgs::Image& msg) {
    depth_msg=msg;
    depth_ready=true;
}

void getRGB(const sensor_msgs::Image& msg) {
    rgb_msg=msg;
    rgb_ready=true;
}

void getJoint(const sensor_msgs::JointState& msg) {
    joint_msg=msg;
}


void moveRightArm(vector<double> &des, double t) {
    //int id=find(joint_msg.name.begin(),joint_msg.name.end(),"l_shoulder_pan_joint")-joint_msg.name.begin();
//     unsigned int i=0;
//     for (;i<joint_msg.name.size();i++)
//         if (joint_msg.name[i].compare("r_shoulder_pan_joint")==0)
//             break;
//     double pos=joint_msg.position[i];
//     printf("%f\n",pos);
//     if(pos<0)
// 	pos=theta;
//     else
// 	pos=-theta;
    trajectory_msgs::JointTrajectory trajectory;
    //to find loaded controllers: rosrun pr2_controller_manager pr2_controller_manager list
    //to find the joints controlled by a controller: rosparam get /r_arm_controller/joints
    trajectory.joint_names.resize(7);
    trajectory.joint_names[0] = "r_shoulder_pan_joint";
    trajectory.joint_names[1] = "r_shoulder_lift_joint";
    trajectory.joint_names[2] = "r_upper_arm_roll_joint";
    trajectory.joint_names[3] = "r_elbow_flex_joint";
    trajectory.joint_names[4] = "r_forearm_roll_joint";
    trajectory.joint_names[5] = "r_wrist_flex_joint";
    trajectory.joint_names[6] = "r_wrist_roll_joint";

    trajectory.points.resize(1);
    //trajectory.points[0].positions.resize(7);
    trajectory.points[0].positions =des;
    for(int i=0;i<7;i++)
	printf("%f\t",des[i]);
    printf("\n");
    for(int i=0;i<7;i++)
	printf("%f\t",trajectory.points[0].positions[i]);
    printf("\n");
    trajectory.points[0].time_from_start = ros::Duration(t);
    trajectory.header.stamp = ros::Time::now();
    r_arm_pub.publish(trajectory);

}

void moveLeftArm(vector<double> &des, double t) {

    trajectory_msgs::JointTrajectory trajectory;
    //to find loaded controllers: rosrun pr2_controller_manager pr2_controller_manager list
    //to find the joints controlled by a controller: rosparam get /r_arm_controller/joints
    trajectory.joint_names.resize(7);
    trajectory.joint_names[0] = "l_shoulder_pan_joint";
    trajectory.joint_names[1] = "l_shoulder_lift_joint";
    trajectory.joint_names[2] = "l_upper_arm_roll_joint";
    trajectory.joint_names[3] = "l_elbow_flex_joint";
    trajectory.joint_names[4] = "l_forearm_roll_joint";
    trajectory.joint_names[5] = "l_wrist_flex_joint";
    trajectory.joint_names[6] = "l_wrist_roll_joint";

    trajectory.points.resize(1);
    //trajectory.points[0].positions.resize(7);
    trajectory.points[0].positions =des;
    for(int i=0;i<7;i++)
	printf("%f\t",des[i]);
    printf("\n");
    for(int i=0;i<7;i++)
	printf("%f\t",trajectory.points[0].positions[i]);
    printf("\n");
    trajectory.points[0].time_from_start = ros::Duration(t);
    trajectory.header.stamp = ros::Time::now();
    l_arm_pub.publish(trajectory);

}

void eyeAction(){
    vector<double> des(7,0);
    moveLeftArm(des,0.5);
    des[1]=-1;
    moveRightArm(des,0.5);
}

void chinAction(){
    vector<double> des(7,0);
    
    moveRightArm(des,0.5);
    des[1]=-1;
    moveLeftArm(des,0.5);
    
}

void faceAction(){
    vector<double> des(7,0);
    des[5]=-0.5;
    moveRightArm(des,0.5);
    des[5]=-0.5;
    moveLeftArm(des,0.5);
}

void noAction(){
    vector<double> des(7,0);
    moveLeftArm(des,0.5);
    moveRightArm(des,0.5);
}

void detectAndDraw() {
    ros::Rate rt(1);
    if (!depth_ready||!rgb_ready)
        rt.sleep();

    cv_bridge::CvImagePtr depth_ptr;
    cv_bridge::CvImagePtr rgb_ptr;
    int height=depth_msg.height;
    int width=depth_msg.width;
    unsigned char* data=new unsigned char[height*width*4];
    for (;;) {
        for (unsigned int j=0;j<depth_msg.data.size();j++)
            data[j]=depth_msg.data[j];
        float* ptr=(float*)data;
	for (int i=0;i<height*width;i++){
	    //float t=*(ptr+i);
	    if(isnan(ptr[i]))
		//*(ptr+i)=0;
		ptr[i]=0;
	    else
		//*(ptr+i)=*(ptr+i)/METER_PER_UNIT;
		ptr[i]=ptr[i]/METER_PER_UNIT;
	}
	cv::Mat Ma=cv::Mat(height, width, CV_32FC1, ptr);
	imgcnt++;
	
	cv::imwrite("depth.jpg",Ma);
        try
        {
            rgb_ptr = cv_bridge::toCvCopy(rgb_msg, enc::BGR8);
        }
        catch (cv_bridge::Exception& e)
        {
            ROS_ERROR("cv_bridge exception: %s", e.what());
            return;
        }
    
	cv::imwrite("rgb.jpg",rgb_ptr->image);
        Mat img=imread("rgb.jpg");
	Mat depthimg=imread("depth.jpg");
	
       int i = 0;
    double t = 0;
    vector<Rect> faces, bodies;
    
    Mat gray, smallImg(cvRound (img.rows/scale), cvRound(img.cols/scale), CV_8UC1);

    cvtColor(img, gray, CV_BGR2GRAY);
    resize(gray, smallImg, smallImg.size(), 0, 0, INTER_LINEAR);
    equalizeHist(smallImg, smallImg);

    t = (double)cvGetTickCount();
    faceCascade.detectMultiScale(smallImg, faces, 1.1, 2, 0|CV_HAAR_SCALE_IMAGE, Size(30, 30));
    upperbodyCascade.detectMultiScale(smallImg, bodies, 1.1, 2, 0|CV_HAAR_SCALE_IMAGE, Size(40, 40));
    t = (double)cvGetTickCount() - t;

    printf("detection  time = %g ms\n", t/((double)cvGetTickFrequency()*1000.));
    i = faces.size() - 1;
    printf("detect %d face candidates.\n",faces.size());
    printf("detect %d upper body candidates.\n",bodies.size());
    for (int r = (int)faces.size() - 1; r >= 0; r--)
    {
        int depth;
        if (depthimg.type() == CV_8UC3)
            depth = depthimg.at<Vec3b>(faces[r].y + faces[r].height/2, faces[r].x + faces[r].width/2, 0)[0];
        else
        {
            cerr << "Image type is not covered" << endl;
            abort();
        }

        bool flag = CheckFaceDepth(depth, faces[r].width);
//        CvPoint pt1 = cvPoint(r->x, r->y);
//        CvPoint pt2 = cvPoint(r->x + r->width, r->y + r->height);
        if (!flag)
//        {
//            rectangle(img, pt1, pt2, CV_RGB(200, 0, 0), 1, 8, 0);
            faces.erase(faces.begin() + r);
//        }
//        else
//            rectangle(img, pt1, pt2, CV_RGB(200, 0, 0), 3, 8, 0);
        //i--;
    }

//    for (vector<Rect>::const_iterator r = bodies.begin(); r != bodies.end(); r++)
    i = bodies.size() - 1;
    for (int r = (int)bodies.size() - 1; r >= 0; r--)
    {
//        cout << "Face center: (" << r->x + r->width / 2 << ","
//            << r->y + r->height / 2 << ")    ";
//        cout << "Face size: (" << r->width << "," << r->height << ")    ";

        int depth;
        if (depthimg.type() == CV_8UC3)
            depth = depthimg.at<Vec3b>(bodies[r].y + bodies[r].height/2, bodies[r].x + bodies[r].width/2, 0)[0];
        else
        {
            cerr << "Image type is not covered" << endl;
            abort();
        }

        bool flag = CheckUpperbodyDepth(depth, bodies[r].width);
//        CvPoint pt1 = cvPoint(r->x, r->y);
//        CvPoint pt2 = cvPoint(r->x + r->width, r->y + r->height);
        if (!flag)
//        {
//            rectangle(img, pt1, pt2, CV_RGB(0, 200, 0), 1, 8, 0);
            bodies.erase(bodies.begin() + r);
//        }
//        else
//            rectangle(img, pt1, pt2, CV_RGB(0, 255, 0), 3, 8, 0);
        //i--;
    }
 
   
    vector<Rect> finalFaces;  // final results are in "bodies" and "finalFaces"
    for (int r = bodies.size() - 1; r >= 0; r--)
    {
        int s = 0;
        for (s = 0; s <(int) faces.size(); s++)
        {
            if ((faces[s].x > bodies[r].x + bodies[r].width / 8)
                  && (faces[s].x + faces[s].width < bodies[r].x + bodies[r].width * 7 / 8)
                  && (faces[s].y + faces[s].height < bodies[r].y + bodies[r].height * 7 / 8))
            {
                finalFaces.push_back(faces[s]);
                break;
            }
        }
        if (s == (int)faces.size())
            bodies.erase(bodies.begin() + r);
    }
    printf("find %d matches.\n",finalFaces.size());
    assert(bodies.size() == finalFaces.size());
    for (int r = 0; r < (int)bodies.size(); r++)
    {
        CvPoint pt1 = cvPoint(bodies[r].x, bodies[r].y);
        CvPoint pt2 = cvPoint(bodies[r].x + bodies[r].width, bodies[r].y + bodies[r].height);
        rectangle(img, pt1, pt2, CV_RGB(0, 255, 0), 3, 8, 0);
        pt1 = cvPoint(finalFaces[r].x, finalFaces[r].y);
        pt2 = cvPoint(finalFaces[r].x + finalFaces[r].width, 
              finalFaces[r].y + finalFaces[r].height);
        rectangle(img, pt1, pt2, CV_RGB(255, 0, 0), 3, 8, 0);

        HandDetect(img, depthimg, finalFaces[r], bodies[r]);
    }
        if(finalFaces.size()>0){
	    if(posID==EYE_ACT)
		eyeAction();
	    else if(posID==CHIN_ACT)
		chinAction();
	    else
		faceAction();
	}else
	    noAction();
	    
        cv::imshow( "result", img );
        cv::waitKey(0);
        //sleep(4);
    }
    delete data;
}

int main( int argc, char** argv )
{
    ros::init(argc, argv, "tracker");
    ros::NodeHandle n;

    if (!faceCascade.load("haarcascade_frontalface_alt.xml"))
    {
        cerr << "ERROR: Could not load face cascade" << endl;
        return -1;
    }
    if (!upperbodyCascade.load("haarcascade_mcs_upperbody.xml"))
    {
        cerr << "ERROR: Could not load upperbody cascade" << endl;
        return -1;
    }

    cvNamedWindow( "result", 1 );

    ros::Subscriber sub_depth = n.subscribe("/camera/depth/image", 10, getDepth);
    ros::Subscriber sub_rgb = n.subscribe("/camera/rgb/image_color", 10, getRGB);
    ros::Subscriber sub_joint=n.subscribe("/joint_states",10,getJoint);
    r_arm_pub = n.advertise<trajectory_msgs::JointTrajectory>("r_arm_controller/command", 100);
    l_arm_pub = n.advertise<trajectory_msgs::JointTrajectory>("l_arm_controller/command", 100);
    ros::Rate rt(1);
    rt.sleep();
    boost::thread detect_thread = boost::thread::thread(boost::bind(&detectAndDraw));

    ros::spin();
    cvDestroyWindow("result");
    return 0;
}




// normalize the RGB image and depth image such that 
void Normalize(Mat oriRgbImg, Mat oriDepthImg, Rect oriFace, Rect oriBody, 
               Mat &rgbImg, Mat &depthImg, Rect &face, Rect &body)
{
    // extracting sub-matrix
    int left = max(oriBody.x - oriBody.width, 0);
    int right = min(oriBody.x + oriBody.width * 2, oriRgbImg.cols);
    int top = max(oriBody.y - oriBody.height / 2, 0);
    int bottom = min(oriBody.y + oriBody.height * 5 / 2, oriRgbImg.rows);

    Mat tmpRgbImg(bottom - top, right - left, oriRgbImg.type());
    tmpRgbImg = oriRgbImg(Range(top, bottom), Range(left, right));
    Mat tmpDepthImg(bottom - top, right - left, oriDepthImg.type());
    tmpDepthImg = oriDepthImg(Range(top, bottom), Range(left, right));

    Rect tmpFace = Rect(oriFace.x - left, oriFace.y - top, oriFace.width, oriFace.height);
    Rect tmpBody = Rect(oriBody.x - left, oriBody.y - top, oriBody.width, oriBody.height);

    // re-scaling
    double scale = 125 / (double)tmpBody.width;
    rgbImg.create(cvRound(tmpRgbImg.rows * scale), cvRound(tmpRgbImg.cols * scale),
          tmpRgbImg.type());
    resize(tmpRgbImg, rgbImg, rgbImg.size());
    depthImg.create(cvRound(tmpDepthImg.rows * scale), cvRound(tmpDepthImg.cols * scale),
          tmpDepthImg.type());
    resize(tmpDepthImg, depthImg, depthImg.size());

    face.x = tmpFace.x * scale;
    face.y = tmpFace.y * scale;
    face.width = tmpFace.width * scale;
    face.height = tmpFace.height * scale;
    body.x = tmpBody.x * scale;
    body.y = tmpBody.y * scale;
    body.width = tmpBody.width * scale;
    body.height = tmpBody.height * scale;
}



void ConvexDegreeFeature(Mat depthImg, Mat &cdfImg, int innerR, int outerR)
{
    #define CDF_BIN_NUM 32
    #define CDF_BIN_SIZE 8

    int imgHeight = depthImg.rows;
    int imgWidth = depthImg.cols;
    float outHist[CDF_BIN_NUM], inHist[CDF_BIN_NUM], value, distance;
    float *outHistPTR, *inHistPTR;
    float outSum, inSum, maxCDF = 0;

    for (int h = outerR; h < imgHeight - outerR; h++)
    {
        for (int w = outerR; w < imgWidth - outerR; w++)
        {
            memset(outHist, 0, CDF_BIN_NUM * sizeof(int));
            memset(inHist, 0, CDF_BIN_NUM * sizeof(int));

            for (int y = -outerR; y <= outerR; y++)
            {
                for (int x = -outerR; x <= outerR; x++)
                {
                    value = depthImg.at<Vec3b>(h + y, w + x)[0];
                    if (value < 15)
                        continue;
                    if ((y >= -innerR) && (y <= innerR) && (x >= -innerR) && (x <= innerR))
                        inHist[(int)(value / CDF_BIN_SIZE)] += 1;
                    else
                        outHist[(int)(value / CDF_BIN_SIZE)] += 1;
                }
            }

            // normalize the distributions
            outSum = 0;
            inSum = 0;
            outHistPTR = outHist;
            inHistPTR = inHist;
            for (int i = 0; i < CDF_BIN_NUM; i++)
            {
                outSum += *outHistPTR;
                outHistPTR++;
                inSum += *inHistPTR;
                inHistPTR++;
            }
            if ((outSum == 0) || (inSum == 0))
                continue;

            outHistPTR = outHist;
            inHistPTR = inHist;
            for (int i = 0; i < CDF_BIN_NUM; i++)
            {
                *outHistPTR /= outSum;
                outHistPTR++;
                *inHistPTR /= inSum;
                inHistPTR++;
            }

            // compute the chi-square distance
            outHistPTR = outHist;
            inHistPTR = inHist;
            distance = 0;
            for (int i = 0; i < CDF_BIN_NUM; i++)
            {
                if ((*outHistPTR == 0) && (*inHistPTR == 0))
                {
                    outHistPTR++;
                    inHistPTR++;
                    continue;
                }
                distance += (*outHistPTR - *inHistPTR) * (*outHistPTR - *inHistPTR) 
                      / (*outHistPTR + *inHistPTR);
                outHistPTR++;
                inHistPTR++;
            }

            cdfImg.at<float>(h, w) = distance;
            if (distance > maxCDF)
                maxCDF = distance;
        }
    }
}


void DepthFilter(Mat &cdfImg, Mat depthImg, Rect face, Rect body, int tolerance)
{
    int faceDepth = depthImg.at<Vec3b>(face.y + face.height / 2, face.x + face.width / 2)[0];
    int bodyDepth = depthImg.at<Vec3b>(body.y + body.height, body.x + body.width / 2)[0];
    int imgHeight = depthImg.rows;
    int imgWidth = depthImg.cols;
    int value;

    for (int h = 0; h < imgHeight; h++)
    {
        for (int w = 0; w < imgWidth; w++)
        {
            value = depthImg.at<Vec3b>(h, w)[0];
            if (((value < faceDepth - tolerance) || (value > faceDepth + tolerance))
                  && ((value < bodyDepth - tolerance) || (value > bodyDepth + tolerance)))
                cdfImg.at<float>(h, w) = 0;
        }
    }
}


void ColorFilter(Mat &cdfImg, Mat rgbImg, Rect face, int tolerance)
{
    #define CLR_BIN_SIZE 32
    #define CLR_SBIN_NUM 8
    #define CLR_BIN_NUM 8 * 8 * 8

    int xEnd = face.x + face.width * 3 / 4;
    int yEnd = face.y + face.height * 3 / 4;
    int hist[CLR_BIN_NUM], valueR, valueG, valueB, histID;
    memset(hist, 0, CLR_BIN_NUM * sizeof(int));
    for (int y = face.y + face.height / 4; y < yEnd; y++)
    {
        for (int x = face.x + face.width / 4; x < xEnd; x++)
        {
            valueR = rgbImg.at<Vec3b>(y, x)[0];
            valueG = rgbImg.at<Vec3b>(y, x)[1];
            valueB = rgbImg.at<Vec3b>(y, x)[2];
            histID = valueR / CLR_BIN_SIZE * CLR_SBIN_NUM * CLR_SBIN_NUM + valueG 
                  / CLR_BIN_SIZE * CLR_SBIN_NUM + valueB / CLR_BIN_SIZE;
            hist[histID] += 1;
        }
    }

    // the max component in the RGB hist
    int maxHist = 0, maxID;
    int *histPTR = hist;
    for (int i = 0; i < CLR_BIN_NUM; i++)
    {
        if (maxHist < *histPTR)
        {
            maxID = i;
            maxHist = *histPTR;
        }
        histPTR++;
    }
    int maxRMin = (int)(maxID / (CLR_SBIN_NUM * CLR_SBIN_NUM)) * CLR_BIN_SIZE;
    int maxRMax = maxRMin + CLR_BIN_SIZE;
    int maxGMin = (int)((maxID % (CLR_SBIN_NUM * CLR_SBIN_NUM)) / CLR_SBIN_NUM) * CLR_BIN_SIZE;
    int maxGMax = maxGMin + CLR_BIN_SIZE;
    int maxBMin = (maxID % CLR_SBIN_NUM) * CLR_BIN_SIZE;
    int maxBMax = maxBMin + CLR_BIN_SIZE;
    
    int imgHeight = rgbImg.rows;
    int imgWidth = rgbImg.cols;
    for (int h = 0; h < imgHeight; h++)
    {
        for (int w = 0; w < imgWidth; w++)
        {
            if (cdfImg.at<float>(h, w) == 0)
                continue;

            valueR = rgbImg.at<Vec3b>(h, w)[0];
            valueG = rgbImg.at<Vec3b>(h, w)[1];
            valueB = rgbImg.at<Vec3b>(h, w)[2];
            if ((valueR < maxRMin - tolerance) || (valueR > maxRMax + tolerance)
                  || (valueG < maxGMin - tolerance) || (valueG > maxGMax + tolerance)
                  || (valueB < maxBMin - tolerance) || (valueB > maxBMax + tolerance))
                cdfImg.at<float>(h, w) = 0;
        }
    }
}


void CDFFilter(Mat &cdfImg, float tolerance)
{
    int imgHeight = cdfImg.rows;
    int imgWidth = cdfImg.cols;
    for (int h = 0; h < imgHeight; h++)
    {
        for (int w = 0; w < imgWidth; w++)
        {
            if (cdfImg.at<float>(h, w) < tolerance)
                cdfImg.at<float>(h, w) = 0;
        }
    }
}


void AppFilter(Mat &cdfImg, Rect face, int &posID)
{
    int left = face.x - face.width / 6;
    int right = face.x + face.width * 7 / 6;
    int top = face.y;
    int bottom = face.y + face.height * 3;
    for (int y = top; y < bottom; y++)
    {
        for (int x = left; x < right; x++)
            cdfImg.at<float>(y, x) = 0;
    }
   // cv::imshow("f1", cdfImg);

    int imgHeight = cdfImg.rows;
    int imgWidth = cdfImg.cols;
    left = max(0, face.x - face.width / 3);
    right = min(imgWidth, face.x + face.width * 4 / 3);
    top = max(0, face.y);
    bottom = min(imgHeight, face.y + face.height * 4 / 3);
    int int1 = face.y + face.height / 3;
    int int2 = face.y + face.height * 2 / 3;
    int int3 = face.y + face.height;
    int posNum[4] = {0, 0, 0, 0};

    for (int h = 0; h < imgHeight; h++)
    {
        for (int w = 0; w < imgWidth; w++)
        {
            if ((w > left) && (w < right) && (h > top) && (h < bottom))
            {
                if (cdfImg.at<float>(h, w) > 0.001)
                {
                    cdfImg.at<float>(h, w) = 0.99;
                    if (h < int1)
                        posNum[0]++;  // pointing head
                    else if (h < int2)
                        posNum[1]++;  // pointing eye
                    else if (h < int3)
                        posNum[2]++;  // pointing cheek
                    else
                        posNum[3]++;  // pointing chin
                }
                continue;
            }
            cdfImg.at<float>(h, w) = 0;
        }
    }
   // cv::imshow("f2", cdfImg);

    if ((posNum[1] > posNum[0]) && (posNum[1] > posNum[2]) && (posNum[1] > posNum[3]))
        posID = EYE_ACT;  // eye
    else if ((posNum[3] > posNum[0]) && (posNum[3] > posNum[1]) && (posNum[3] > posNum[2]))
        posID = CHIN_ACT;  // chin
    else
        posID = NO_ACT;
    cout << posNum[0] << " " << posNum[1] << " " << posNum[2] << " " << posNum[3] << endl;
}


void HandDetect(Mat oriRgbImg, Mat oriDepthImg, Rect oriFace, Rect oriBody)
{
    double scale = 125 / (double)oriBody.width;  // the normalized width of upper body is 125
    Mat rgbImg, depthImg;  // normalized images
    Rect face, body;  // normalized face and upper body
    Normalize(oriRgbImg, oriDepthImg, oriFace, oriBody, rgbImg, depthImg, face, body);

    // candidate hand regions: Convex Degree Feature feature
    Mat cdfImg = Mat::zeros(depthImg.rows, depthImg.cols, CV_32F);
    ConvexDegreeFeature(depthImg, cdfImg, 10, 15);
//    cv::imshow("cdf", cdfImg);

    // filter based on depth
    int depthTolerance = 4;
    DepthFilter(cdfImg, depthImg, face, body, depthTolerance);
   // cv::imshow("cdf2", cdfImg);

    // filter based on color information
    int colorTolerance = 32;
    ColorFilter(cdfImg, rgbImg, face, colorTolerance);
 //   cv::imshow("cdf3", cdfImg);
    
    // filter based on CDF value
    float cdfTolerance = 0.2;
    CDFFilter(cdfImg, cdfTolerance);
   // cv::imshow("cdf4", cdfImg);

    // filter based on application based prior
    
    AppFilter(cdfImg, face, posID);
    //cv::imshow("cdf5", cdfImg);
    if (posID == EYE_ACT)
        cout << endl << "EYE" << endl;
    else if (posID == CHIN_ACT)
        cout << endl << "CHIN" << endl;
    else
        cout << endl << "NO ACTION" << endl;

    CvPoint pt1 = cvPoint(body.x, body.y);
    CvPoint pt2 = cvPoint(body.x + body.width, body.y + body.height);
    rectangle(rgbImg, pt1, pt2, CV_RGB(0, 255, 0), 3, 8, 0);
    rectangle(depthImg, pt1, pt2, CV_RGB(0, 255, 0), 3, 8, 0);
    pt1 = cvPoint(face.x, face.y);
    pt2 = cvPoint(face.x + face.width, face.y + face.height);
    rectangle(rgbImg, pt1, pt2, CV_RGB(255, 0, 0), 3, 8, 0);
    rectangle(depthImg, pt1, pt2, CV_RGB(255, 0, 0), 3, 8, 0);
  //  cv::imshow("rgb", rgbImg);
  //  cv::imshow("depth", depthImg);
}

















































