#include <iostream>
#include <cstdio>
#include <cstring>
#include <ctime>
#include <cvaux.h>
#include <highgui.h>
using namespace std;

CvSize getCaptureSize(CvCapture *capture)
{
    CvSize sz;
	sz.width = (int)cvGetCaptureProperty(capture, CV_CAP_PROP_FRAME_WIDTH);
	sz.height = (int)cvGetCaptureProperty(capture, CV_CAP_PROP_FRAME_HEIGHT);
	return sz;
}


//Anil K.Jain no linear skin filter
void skinFilterByYCbCr(IplImage *img, IplImage *mask) {
    CvSize imageSize = cvSize(img->width, img->height);
    IplImage *imgY = cvCreateImage(imageSize, IPL_DEPTH_8U, 1); 
    IplImage *imgCr = cvCreateImage(imageSize, IPL_DEPTH_8U, 1); 
    IplImage *imgCb = cvCreateImage(imageSize, IPL_DEPTH_8U, 1); 
    
    
    IplImage *imgYCrCb = cvCreateImage(imageSize, img->depth, img->nChannels);
    cvCvtColor(img,imgYCrCb,CV_BGR2YCrCb);
    cvSplit(imgYCrCb, imgY, imgCr, imgCb, 0); 

    int y, cr, cb, l;
    unsigned char *pY, *pCr, *pCb, *pMask;
    
    pY = (unsigned char *)imgY->imageData;
    pCr = (unsigned char *)imgCr->imageData;
    pCb = (unsigned char *)imgCb->imageData;
    pMask = (unsigned char *)mask->imageData;
    cvSetZero(mask);
    l = img->height * img->width;

    double a = 25.39;
    double b = 14.03;
    double ecx = 1.60;
    double ecy = 2.41;
    double cx = 109.38;
    double cy = 152.02;
    double CR; 
    double CB; 
    double value;
    double x1, y1; 

     for (int i = 0; i < l; i++){
        y  = *pY;
        cr = *pCr;
        cb = *pCb;
        
        a = 25.39;
        b = 14.03;  
        if (y > 230) {
            a = 1.1 * a;
            b = 1.1 * b;
        } else if (y < 20) {
            *pMask = 0;
            pY++;
            pCr++;
            pCb++;
            pMask++;
            continue;
        }

        CB = double(cb);
        CR = double(cr);
        CB = CB - cx;
        CR = CR - cy;

        x1 = -0.818734599 * CB + 0.574172148 * CR;
        y1 = -0.574172148 * CB - 0.818734599 * CR;
        value = ((((x1 - ecx) * (x1 - ecx))) / (a * a)) + ((y1 - ecy) * (y1 - ecy)) / (b * b);
         
        if (value > 1) {
            // It's not skin color
            *pMask = 0;
        } else {
            *pMask = 255;
        }

        pY++;
        pCr++;
        pCb++;
        pMask++;
    }

    cvReleaseImage(&imgY);
    cvReleaseImage(&imgCr);
    cvReleaseImage(&imgCb);
    cvReleaseImage(&imgYCrCb);
}



static const int iPerformanceWidth = 320;
CvSize optimal_size(CvSize orig) {
    float ratio = ((float)orig.width) / orig.height;
    //cdebug("input ratio: %f", ratio);
    if (orig.width == orig.height) {
        return cvSize(iPerformanceWidth, iPerformanceWidth);
    }

    if (orig.width > orig.height) {
        return cvSize(iPerformanceWidth*ratio, iPerformanceWidth);

    } else {
        return cvSize(iPerformanceWidth, iPerformanceWidth * ratio);
    }
}

const static char *mCascadeFile = "/usr/local/share/OpenCV/haarcascades/haarcascade_mcs_eyepair_big.xml";
static CvMemStorage *memScaledDetectRes = NULL;
static CvHaarClassifierCascade* mFaceCascade = NULL;
CvSeq *face_detect(IplImage *image, CvMemStorage *memStorage) 
{
    cvClearMemStorage(memStorage);
    CvSize sz_scaled;
    IplImage *iplScaled = cvCreateImage(sz_scaled = optimal_size(cvGetSize(image)), image->depth, image->nChannels);
    float ratio = ((float) image->width) / sz_scaled.width;
    //int min_face = ((float) ((sz_scaled.width > sz_scaled.height) ? sz_scaled.width : sz_scaled.height)) / 16;//opencv suggest 1/4 to 1/16 in video sampling
    int min_face = 10;

    if(NULL == memScaledDetectRes) {
        memScaledDetectRes = cvCreateMemStorage();
    }
    cvClearMemStorage(memScaledDetectRes);
    cvResize(image, iplScaled, CV_INTER_CUBIC);
    CvSeq *seqScaledDetectRes = cvHaarDetectObjects(iplScaled, mFaceCascade, memScaledDetectRes,
                                            1.3, 2, CV_HAAR_DO_CANNY_PRUNING,
                                            cvSize(min_face, min_face) );
    //cvShowImage("window_mScaled", iplScaled);
    CvSeq *faces = cvCreateSeq(0, sizeof(CvSeq), sizeof(CvRect), memStorage);
    for(int face_count = 0; face_count < seqScaledDetectRes->total; face_count ++) {
        CvRect *rect = (CvRect *) cvGetSeqElem(seqScaledDetectRes, face_count);
        cvSeqPush(faces, &(cvRect(rect->x * ratio, rect->y * ratio, rect->width*ratio, rect->height*ratio)));
    }

    cvReleaseImage(&iplScaled);
    return faces;
}

IplImage *getHistImg(IplImage *src, IplImage *dst, IplImage *mask) {
    assert(NULL != src && NULL != dst);
    assert(dst->width >= 256);

    int     size=256;
	float   range[]={0, 255};
	float   *ranges[]={range};
	float   max=0;
    
    //cout<<"HERE___: "<<__LINE__ << endl;
	CvHistogram* hist=cvCreateHist(1, &size, CV_HIST_ARRAY, ranges, 1);
	cvCalcHist(&src, hist, 0, mask);

	cvGetMinMaxHistValue(hist, NULL, &max, NULL, NULL);
    cvSet(dst,cvScalarAll(255),0);
	double  bin_width = (double)dst->width / size;
	double  bin_unith = (double)dst->height / max;
	for(int i=0; i < size; i++)
	{
		CvPoint p0 = cvPoint(i*bin_width, dst->height);
		CvPoint p1 = cvPoint((i+1)*bin_width, dst->height - cvGetReal1D(hist->bins,i)*bin_unith);
		cvRectangle(dst, p0, p1, cvScalar(0),-1,8,0);
	}

    cvReleaseHist(&hist);
    return dst;
}

#if 0
IplImage *getHist(IplImage *src_arg, IplImage *hist_arg, IplImage *mask){
	IplImage * src= src_arg;
    if(src->depth != 8 || src->nChannels != 3) {
        return NULL;
    }

	IplImage* hsv = cvCreateImage( cvGetSize(src), 8, 3 );
	IplImage* h_plane = cvCreateImage( cvGetSize(src), 8, 1 );
	IplImage* s_plane = cvCreateImage( cvGetSize(src), 8, 1 );
	IplImage* v_plane = cvCreateImage( cvGetSize(src), 8, 1 );
	IplImage* planes[] = { h_plane, s_plane };
 
	/** H 分量划分为16个等级，S分量划分为8个等级 */
	int h_bins = 16, s_bins = 8;
	int hist_size[] = {h_bins, s_bins};
 
	/** H 分量的变化范围 */
	float h_ranges[] = { 0, 180 }; 
 
	/** S 分量的变化范围*/
	float s_ranges[] = { 0, 255 };
	float* ranges[] = { h_ranges, s_ranges };
 
	/** 输入图像转换到HSV颜色空间 */
	cvCvtColor( src, hsv, CV_BGR2HSV );
	cvCvtPixToPlane( hsv, h_plane, s_plane, v_plane, 0 );
 
	/** 创建直方图，二维, 每个维度上均分 */
	CvHistogram * hist = cvCreateHist( 2, hist_size, CV_HIST_ARRAY, ranges, 1 );
	/** 根据H,S两个平面数据统计直方图 */
	cvCalcHist( planes, hist, 0, mask );
 
	/** 获取直方图统计的最大值，用于动态显示直方图 */
	float max_value;
	cvGetMinMaxHistValue( hist, 0, &max_value, 0, 0 );
 
 
	/** 设置直方图显示图像 */
	int height = 240;
	int width = (h_bins*s_bins*6);
	IplImage* hist_img = cvCreateImage( cvSize(width,height), 8, 3 );
	cvZero( hist_img );
 
	/** 用来进行HSV到RGB颜色转换的临时单位图像 */
	IplImage * hsv_color = cvCreateImage(cvSize(1,1),8,3);
	IplImage * rgb_color = cvCreateImage(cvSize(1,1),8,3);
	int bin_w = width / (h_bins * s_bins);
	for(int h = 0; h < h_bins; h++)
	{
		for(int s = 0; s < s_bins; s++)
		{
			int i = h*s_bins + s;
			/** 获得直方图中的统计次数，计算显示在图像中的高度 */
			float bin_val = cvQueryHistValue_2D( hist, h, s );
			int intensity = cvRound(bin_val*height/max_value);
 
			/** 获得当前直方图代表的颜色，转换成RGB用于绘制 */
			cvSet2D(hsv_color,0,0,cvScalar(h*180.f / h_bins,s*255.f/s_bins,255,0));
			cvCvtColor(hsv_color,rgb_color,CV_HSV2BGR);
			CvScalar color = cvGet2D(rgb_color,0,0);
 
			cvRectangle( hist_img, cvPoint(i*bin_w,height),
				cvPoint((i+1)*bin_w,height - intensity),
				color, -1, 8, 0 );
		}
	}
 
	cvNamedWindow( "H-S Histogram", 1 );
	cvShowImage( "H-S Histogram", hist_img );
 
    return hist_img;
}
#endif

using namespace cv;
cv::Mat rotate(const cv::Mat& image, int degree/*abs(degree)=[0,360]*/)
{
    degree = degree % 360;
    float angle = ((float) degree) / 180 * CV_PI;
    float sin_a = fabs(sin(angle));
    float cos_a = fabs(cos(angle));

    int new_width = image.size().width*cos_a + image.size().height*sin_a; 
    float width_scale = (float) image.size().width / new_width;
    int new_height = image.size().height*cos_a + image.size().width*sin_a; 
    float height_scale = (float) image.size().height / new_height;

    Point2f src_center(image.cols/2.0F, image.rows/2.0F);
    cv::Mat rot_matrix = getRotationMatrix2D(src_center, degree, min(width_scale, height_scale));
    cv::Mat rotated_img(Size(image.size().height, image.size().width), image.type());
    warpAffine(image, rotated_img, rot_matrix, image.size());

    stringstream ss;
    ss << "degree= " << degree;
    putText(rotated_img, ss.str(), Point(0, image.rows/2), FONT_HERSHEY_SIMPLEX, 1.0f, Scalar(255,255,255), 2); //, int lineType=8, bool bottomLeftOrigin=false );

    cout << "degree = " << degree << ",\tangle = " << angle << ",\torig:" << image.cols << "x" << image.rows << ",\tnew:" << new_width << "x" <<new_height 
        << ",\tscale: (" << width_scale << ", " << height_scale << ")" << endl;
    return rotated_img;
}

int main(int argc, char **argv) {
	CvAdaptiveSkinDetector filter(1,
				      CvAdaptiveSkinDetector::
				      MORPHING_METHOD_ERODE_DILATE);

	CvCapture *capture = cvCreateCameraCapture(CV_CAP_ANY);
	IplImage *maskImg = cvCreateImage(getCaptureSize(capture), IPL_DEPTH_8U, 1); 
	IplImage *maskSmoothed = cvCreateImage(getCaptureSize(capture), IPL_DEPTH_8U, 1); 
	IplImage *skinImg = cvCreateImage(getCaptureSize(capture), IPL_DEPTH_8U, 3); 
    IplImage *smoothedImg = cvCreateImage(getCaptureSize(capture), IPL_DEPTH_8U, 3); 
    IplImage *maskEyeEllipseImg = NULL;
    IplImage *eyeImg = NULL;
	IplImage *frame = NULL;
    IplImage *channelRImg = NULL;
    IplImage *histImg = cvCreateImage(cvSize(400, 300), IPL_DEPTH_8U, 3);
    cv::Mat  *alignedImg;

    mFaceCascade = (CvHaarClassifierCascade*)cvLoad(mCascadeFile, 0, 0, 0 );
    CvMemStorage *face_storage = cvCreateMemStorage();

    cvNamedWindow("orig", CV_WINDOW_AUTOSIZE);
    cvNamedWindow("mask", CV_WINDOW_AUTOSIZE);
    cvNamedWindow("masksmoothed", CV_WINDOW_AUTOSIZE);
	//cvNamedWindow("skin", CV_WINDOW_AUTOSIZE);
    cvNamedWindow("maskEyeEllipse", CV_WINDOW_AUTOSIZE);
    cvNamedWindow("eye", CV_WINDOW_AUTOSIZE);
    cvNamedWindow("channelR", CV_WINDOW_AUTOSIZE);
    cvNamedWindow("hist", CV_WINDOW_AUTOSIZE);
    namedWindow("aligned");
	//cvNamedWindow("source", CV_WINDOW_AUTOSIZE);


    CvMemStorage *storage = cvCreateMemStorage();
    CvSeq *contour = NULL;
    CvSeq *contourmax = NULL;

    CvBox2D32f      *box2D32f;
    CvPoint         *pointArray;
    CvPoint2D32f    *pointArray2D32f;

    CvMemStorage *memBoxs = cvCreateMemStorage();
    CvSeq *seqBoxs = cvCreateSeq(0, sizeof(CvSeq), sizeof(CvBox2D32f), memBoxs);

    CvMemStorage *memHough = cvCreateMemStorage();
    CvSeq *seqHough = NULL;

    int gaussian_init_value = 1;
    int gaussian_value = 1;

    int highInt_init_value = 2;  //just to set the init value of trackbar
    int lowInt_init_value = 1;
    int highInt = 100;
    int lowInt = 50;

    int threshold_init_value = 64;
    int threshold_value = 64;
    cvCreateTrackbar( "Gaussian Kernel", "eye", &gaussian_init_value, 5, NULL);
    cvCreateTrackbar( "Hough High Int", "eye", &highInt_init_value, 7/*count*/, NULL/*callback func*/);
    cvCreateTrackbar("Threshold Value" , "eye", &threshold_init_value, 255, NULL);

	for (;;) {

        highInt = cvGetTrackbarPos("Hough High Int", "eye");
        switch( highInt ){
            case 0:
                highInt= 20;
                break;
            case 1:
                highInt = 30;
                break;
            case 2:
                highInt = 50;
                break;
            case 3:
                highInt = 80;
                break;
            case 4:
                highInt = 120;
                break;
            case 5:
                highInt = 160;
                break;

            case 6:
                highInt = 240;
                break;

            case 7:
                highInt = 320;
                break;
        }
        lowInt = ((float) highInt) / 2.5;       //opencv 建议 high/low c (1/3, 1/2)
        threshold_value = cvGetTrackbarPos("Threshold Value", "eye");

        gaussian_value = cvGetTrackbarPos( "Gaussian Kernel", "eye");
        switch(gaussian_value) {
            case 0:
                gaussian_value = 0;
                break;
            case 1:
                gaussian_value = 3;
                break;
            case 2:
                gaussian_value = 5;
                break;
            case 3:
                gaussian_value = 7;
                break;
            case 4:
                gaussian_value = 9;
                break;
            case 5:
                gaussian_value = 11;
                break;
        
        }

		cvZero(skinImg);

		frame = cvQueryFrame(capture);
		if (!frame)
			break;

        cvSmooth(frame, smoothedImg, CV_MEDIAN, 3);
		//filter.process(smoothedImg, maskImg);	// process the frame
        skinFilterByYCbCr(smoothedImg, maskImg); //process the frame by another function
        cvSmooth(maskImg, maskSmoothed, CV_MEDIAN, 3);
        
        //cvErode(maskSmoothed, maskSmoothed, NULL, 3);
        //cvDilate(maskSmoothed, maskSmoothed, NULL, 3);

        cvCopy(maskSmoothed, maskImg);

		cvCopy(smoothedImg, skinImg, maskSmoothed);

        cvClearMemStorage(storage);
        cvFindContours(maskSmoothed, storage, &contour, sizeof(CvContour), CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE, cvPoint(0,0));
        int contour_total = contour->total;
        //printf("%d contours founded\n", contour_total);
        //cvDrawContours(skinImg, contour, CV_RGB(255,0,0), CV_RGB(255, 0, 0), 2, 2, 8, cvPoint(0,0));
        double area,maxArea = 100;//设面积最大值大于100Pixel

        for(;contour ;contour = contour->h_next ) {

             area = fabs(cvContourArea( contour, CV_WHOLE_SEQ )); //获取当前轮廓面积, 未用到
             if(area > maxArea)
             {
                  contourmax = contour;
                  maxArea = area;
             }

            int i;
            int count= contour->total;//此轮廓点个数
            if (count<6)
            {
                continue;
            }
            pointArray = (CvPoint *)malloc(count*sizeof(CvPoint));
            pointArray2D32f = (CvPoint2D32f*)malloc(count*sizeof(CvPoint2D32f));
            box2D32f = (CvBox2D32f *)malloc(sizeof(CvBox2D32f));
            //得到点集
            cvCvtSeqToArray(contour,pointArray,CV_WHOLE_SEQ);
            //将CvPoint点集转化为CvBox2D32f集合
            for (i=0;i<count;i++)
            {
                pointArray2D32f[i].x=(float)pointArray[i].x;
                pointArray2D32f[i].y=(float)pointArray[i].y;
            }
            //拟合当前轮廓, 每个轮廓都拟合成一个椭圆
            cvFitEllipse(pointArray2D32f,count,box2D32f);

            cvSeqPush(seqBoxs, box2D32f);    //压入seq, 此box为长轴和短轴的长度,非半轴
             
            free(pointArray);
            free(pointArray2D32f);
            free(box2D32f);

        }       

        //printf("max area == %lf\n", maxArea);
        CvRect aRect = cvBoundingRect( contourmax, 0 ); //最大联通区域的外接矩形, 未用到
        //cvSetImageROI( frame,aRect);

        CvSeq *face_seq = face_detect(skinImg, face_storage);

        //只取haardetect出来的最大的矩形区域 ++++++++++
        CvRect *rect = NULL; 
        CvRect *rect_max = NULL;
        for(int face_count = 0; face_count < face_seq->total; face_count ++) {
            rect = (CvRect *) cvGetSeqElem(face_seq, face_count);
            if(rect_max == NULL || rect_max->width < rect->width) {
                rect_max = rect;
            }
        }
        //只取haardetect出来的最大的矩形区域 ---------- 

        if(rect_max != NULL) {

            cvSetImageROI(skinImg, *rect_max);
            maskEyeEllipseImg = cvCreateImage(cvGetSize(skinImg), IPL_DEPTH_8U, 1);
            eyeImg = cvCreateImage(cvGetSize(skinImg), skinImg->depth, skinImg->nChannels);
            cvResetImageROI(skinImg);

            cvZero(maskEyeEllipseImg);
            cvZero(eyeImg);
            channelRImg = cvCreateImage(cvGetSize(eyeImg), IPL_DEPTH_8U, 1);

            /*
            cvRectangle(skinImg, cvPoint(rect_max->x, rect_max->y), 
                                cvPoint(rect_max->x + rect_max->width, rect_max->y + rect_max->height), 
                                cvScalar(255, 255, 255));
            */

            //因为cvEllipse需要的类型为CvPoint && CvSize, NND, NOT CvPoint2D32f && ...
            CvPoint center;
            CvSize size;
            int ellipse_axis_long = 0;
            int eyes_count = 0;
            CvPoint eye_centers[2];
            for(int i = 0; i < seqBoxs->total; i ++) {
                box2D32f = (CvBox2D32f *)cvGetSeqElem(seqBoxs, i);
                center.x = cvRound(box2D32f->center.x);
                center.y = cvRound(box2D32f->center.y);
                size.width = cvRound(box2D32f->size.width*0.5);     //因为cvEllipse() 需要的参数box为半长轴和半短轴的长度
                size.height = cvRound(box2D32f->size.height*0.5);
                box2D32f->angle = box2D32f->angle;
                //box2D32f->angle = -box2D32f->angle;       //mirror problem

                int axis_long, axis_short;
                float ratio_long, ratio_short;
                if(size.width >= size.height) {
                    axis_long = 2 * size.width;
                    axis_short = 2 * size.height;
                } else {
                    axis_long = 2 * size.height;
                    axis_short = 2 * size.width;
                }
                ratio_long = (float) rect_max->width / axis_long;
                ratio_short = (float) rect_max->height / axis_short;

                /*
                int axis_long = 2 * (size.width >= size.height ? size.width : size:height);     //椭圆的长轴
                int axis_short =  2 * (size.width <= size.height ? size.width : size:height);   //椭圆的短轴
                */

                //画椭圆
                 
                if(center.x > rect_max->x && center.x < (rect_max->x + rect_max->width) 
                    && center.y > rect_max->y && center.y < (rect_max->y + rect_max->height)
                    && ratio_long > 2.0f && ratio_long < 7.0f /*I GUESS*/ ) { 
                    
                    //做mask image
                    cvEllipse(maskEyeEllipseImg, cvPoint(center.x - rect_max->x, center.y - rect_max->y), 
                                size, box2D32f->angle, 0, 360, CV_RGB(255, 255, 255), 
                                -1/*thickness, <0 to fill*/ /*,line_type=connected line*/ /*,shift=0*/);

                    //画到skin image 上
                    cvEllipse(skinImg, center, size, box2D32f->angle, 0, 360, CV_RGB(0,0,255), 
                                2/*thickness, <0 to fill*/ ,CV_AA/*line_type=connected line*/ /*,shift=0*/);

                    ellipse_axis_long = ellipse_axis_long > axis_long ? ellipse_axis_long : axis_long;

                    printf("rect center(%d,%d) -> %d*%d\t\tellipse center(%d, %d) -> %d*%d,angle=%f\t\tratio(%f, %f)\n", 
                            rect_max->x + rect_max->width/2, 
                            rect_max->y + rect_max->height/2, 
                            rect_max->width, 
                            rect_max->height, 
                            center.x, center.y, 
                            axis_long, axis_short, box2D32f->angle, 
                            ratio_long, ratio_short);

                    if(eyes_count < (sizeof(eye_centers) / sizeof(CvPoint))) {
                        eye_centers[eyes_count].x = center.x;
                        eye_centers[eyes_count].y = center.y;
                    }
                    eyes_count ++;
                }
            }
            int eyes_degree = 0;
            if((eyes_count) == (sizeof(eye_centers) / sizeof(CvPoint))) {
                int left_eye = 0, right_eye = 1;
                if(eye_centers[0].x < eye_centers[1].x) {
                    left_eye = 0;
                    right_eye = 1;
                } else {
                    left_eye = 1;
                    right_eye = 0;
                }
                //因为opencv坐标系原点为left-top, 所以degree应为(left.y) - (right.y) / (right.x - left.x)
                printf("left-right eye degree:\t%d\n",  eyes_degree = (atan2((eye_centers[left_eye].y - eye_centers[right_eye].y), (eye_centers[right_eye].x - eye_centers[left_eye].x))*180/CV_PI));
                Mat matFrame = frame;
                //应该旋转的角度为 (-夹角)
                imshow("aligned", rotate(matFrame, (- eyes_degree)));
            }


            //霍夫圆拟合 +++++++++++
            if(0 != ellipse_axis_long) {

                cvSetImageROI(frame, *rect_max);
                cvCopy(frame, eyeImg, maskEyeEllipseImg);
                cvResetImageROI(frame);

                cvSetImageCOI(eyeImg, 3);
                cvCopy(eyeImg, channelRImg);
                cvSetImageCOI(eyeImg, 0);

                //cvCvtColor(eyeImg, channelRImg, CV_BGR2GRAY);
                //Simple blur and Gaussian blur support 1- or 3-channel, 8-bit and 32-bit floating point images. These two methods can process images in-place.
                //so gaussian 平滑支持原地操作, so (src = dst)
                if(0 != gaussian_value ) {
                    cvSmooth(channelRImg, channelRImg, CV_GAUSSIAN, gaussian_value, gaussian_value);
                }
                int max_radius = 0.4f * ellipse_axis_long;  //如此最大的虹膜直径占眼睛长度的0.8
                int min_radius = 0.1f * ellipse_axis_long; //最小虹膜直径占眼睛长度0.2
                int min_dist = 0.5f * ellipse_axis_long;    //small enough
                seqHough = cvHoughCircles(channelRImg, memHough, CV_HOUGH_GRADIENT, 2/*dp*/, min_dist, 
                            lowInt, highInt, min_radius, max_radius);

                printf("hough circle min=%d, max=%d, detected %d circles--------->\n", min_radius, max_radius, seqHough->total);
                for(int count_hough = 0; count_hough < seqHough->total; count_hough ++) {
                    float *circle = (float *) cvGetSeqElem(seqHough, count_hough);
                    int r_hough = circle[2];
                    CvPoint center = cvPoint(circle[0] , circle[1] );
                    //cvCircle(mCannyed, center, r_hough, cvScalar(255,255,255));
                    cvCircle(eyeImg, center, r_hough, cvScalar(0,0,255));
                    printf("---------> circle[%d]: Center(%d,%d), R=%d\n", count_hough, center.x, center.y, r_hough);
                }

                //cvThreshold(channelRImg, channelRImg, threshold_value, 128, CV_THRESH_BINARY);
                //cvEqualizeHist(channelRImg, channelRImg);         //sucks
                getHistImg(channelRImg, histImg, maskEyeEllipseImg);
            }
            //霍夫圆拟合 ----------- 


            printf("\n\n");
            cvClearSeq(seqBoxs);
            cvShowImage("maskEyeEllipse", maskEyeEllipseImg);
            cvShowImage("eye", eyeImg);
            cvShowImage("channelR", channelRImg);
            cvShowImage("hist", histImg);
            cvReleaseImage(&maskEyeEllipseImg);
            cvReleaseImage(&eyeImg);
            cvReleaseImage(&channelRImg);

        }

        cvShowImage("mask", maskImg);
        cvShowImage("masksmoothed", maskSmoothed);
        cvShowImage("orig", frame);
		//cvShowImage("skin", skinImg);
        //cvResetImageROI(frame);

		//cvShowImage("source", frame);
		if (cvWaitKey(1) == 27)
			break;

	}

    cvReleaseMemStorage(&memBoxs);
    cvReleaseMemStorage(&memHough);

	cvReleaseImage(&skinImg);
	cvReleaseImage(&maskImg);
    cvReleaseImage(&maskSmoothed);
    cvReleaseImage(&smoothedImg);
	cvReleaseCapture(&capture);

    cvDestroyAllWindows();

	return 0;
}

