/*
 * (C) Copyright 2012 Compal Electronics, Inc. 
 *
 * This software is the property of Compal Electronics, Inc.
 * You have to accept the terms in the license file before use.
 *
 */

//#define _DEBUG_OUTPUT_ true
#define LOG_TAG "[OpenCVFacialDetectorBase]"
#include "OpenCVFacialDetectorBase.h"

using namespace cv;

namespace facial_compal {

    bool OpenCVFacialDetectorBase::isHaarLoaded = false;
    CvHaarClassifierCascade *OpenCVFacialDetectorBase::haarCascadeClassifiers[FACE_NONE];

    OpenCVFacialDetectorBase::OpenCVFacialDetectorBase(const char *haarAbsoluteDirectory) : CASCADE_FILE_DIRBASE(haarAbsoluteDirectory) {
        cdebug("%s-%s++++\n", __FILE__, __func__);

        if(!isHaarLoaded) {

            sCascadeFiles[FACE_LEFT_EYE] =
                CASCADE_FILE_DIRBASE + "haarcascade_lefteye_2splits.xml";
            sCascadeFiles[FACE_RIGHT_EYE] =
                CASCADE_FILE_DIRBASE + "haarcascade_righteye_2splits.xml";
            sCascadeFiles[FACE_EYES] =
                CASCADE_FILE_DIRBASE + "haarcascade_mcs_eyepair_big.xml";
            sCascadeFiles[FACE_NOSE] =
                CASCADE_FILE_DIRBASE + "haarcascade_mcs_nose.xml";
            sCascadeFiles[FACE_MOUTH] =
                CASCADE_FILE_DIRBASE + "haarcascade_mcs_mouth.xml";
            sCascadeFiles[FACE_FACE] =
                CASCADE_FILE_DIRBASE + "haarcascade_frontalface_alt2.xml";

            for (int i = 0; i < FACE_NONE; i++) {
                haarCascadeClassifiers[i] = (sCascadeFiles[i].size() > 0) ?
                        (CvHaarClassifierCascade *) cvLoad(sCascadeFiles[i].c_str(), 0, 0,0) :
                        NULL;
                if(sCascadeFiles[i].size() > 0 && NULL != haarCascadeClassifiers[i]) {
                    cdebug("%s loaded\n", sCascadeFiles[i].c_str());
                }
            } 
            isHaarLoaded = true;
        } else {
            cdebug("cascade classifiers already loaded\n");
        }
        
        cdebug("%s-%s----\n", __FILE__, __func__);
    }
    
    OpenCVFacialDetectorBase::~OpenCVFacialDetectorBase() {
    	cdebug("%s-%s ++++++++\n", __FILE__, __func__);

        /* static member haarCascadeClassifier need NOT be released
		for (int i = 0; i < FACE_NONE; i++) {
			if (haarCascadeClassifiers[i])
				cvReleaseHaarClassifierCascade( &(haarCascadeClassifiers[i]));
			cdebug("%s released\n", sCascadeFiles[i].c_str());
		}
        */

		cdebug("%s-%s ------\n", __FILE__, __func__);
    }


    CvSize OpenCVFacialDetectorBase::optimal_size(CvSize orig) {
        int iPerformanceWidth = 500;
        float ratio = ((float)orig.width) / orig.height;
        //cdebug("input ratio: %f", ratio);
        if (orig.width == orig.height) {
            return cvSize(iPerformanceWidth, iPerformanceWidth);
        }

        return cvSize(iPerformanceWidth, cvRound((float)iPerformanceWidth / ratio));
    }

	IplImage *OpenCVFacialDetectorBase::scale_gray_image(const IplImage * srcImg) {
        CvSize szScaled = optimal_size(cvGetSize(srcImg));

		IplImage *tmp2 =
		    cvCreateImage(szScaled, srcImg->depth, srcImg->nChannels);
		IplImage *tmp1 =
		    cvCreateImage(szScaled, IPL_DEPTH_8U, 1);

		//resize
		cvResize(srcImg, tmp2, CV_INTER_CUBIC);

		//cvt color
        if(3 == tmp2->nChannels || 4 == tmp2->nChannels) {
            cvCvtColor(tmp2, tmp1, CV_BGR2GRAY);
        } else {
            cvCopy(tmp2, tmp1);
        }

		cvReleaseImage(&tmp2);
		return (tmp1);
	}
    
    CvSeq * OpenCVFacialDetectorBase::facial_detect(
                IplImage *srcImg, const FaceFeature ff, CvMemStorage *memStorage) {
        
        cdebug("%s(srcImg->size=%d*%d, depth=%d, channels=%d\n", 
                __func__, srcImg->width, srcImg->height, srcImg->depth, srcImg->nChannels);

        IplImage *skinImg = NULL,*skinMask = NULL, *iplScaledGrayImg = NULL; 

        if(srcImg -> nChannels >= 3) {

            switch(ff) {
            case FACE_FACE:{
                skinImg = cvCreateImage(cvGetSize(srcImg), srcImg->depth, srcImg->nChannels);
                skinMask = cvCreateImage(cvGetSize(srcImg), IPL_DEPTH_8U, 1);
                cvZero(skinMask);
                cvZero(skinImg);
                this -> skinFilterByYCbCr(srcImg, skinMask);
                cvCopy(srcImg, skinImg, skinMask);

                /*
                char s[256];
                sprintf(s, "/tmp/skin_%d.bmp", static_count ++);
                cvSaveImage(s, skinImg);
                */
                cvReleaseImage(&skinMask);  //will release skinImg later
                break;
            }
            
            default:
                break;
            } 
        }
        
        iplScaledGrayImg = scale_gray_image(NULL == skinImg ? srcImg : skinImg);
        /*
        cvNamedWindow("scale_gray");
        cvShowImage("scale_gray", iplScaledGrayImg);
        */
        if(NULL != skinImg) {
            cvReleaseImage(&skinImg);
        }

		cvClearMemStorage(memStorage);
		if (NULL == haarCascadeClassifiers[ff]) {
			cdebug("%s not loaded\n", sCascadeFiles[ff].c_str());
			return NULL;
		} else if (NULL == iplScaledGrayImg) {
			cdebug("iplScaledGrayImg NULL\n");
			return NULL;
		}
        float fScaleRatio = ((float)(srcImg->width)) / ((float)(iplScaledGrayImg->width));

		CvSeq *detectedSeqs =
		    cvCreateSeq(0, sizeof(CvSeq), sizeof(CvRect), memStorage);

		//
		CvMemStorage *memFace = cvCreateMemStorage(0);
		if (NULL == haarCascadeClassifiers[FACE_FACE]) {
			cdebug("%s not loaded\n",
			       sCascadeFiles[FACE_FACE].c_str());
            cvReleaseImage(&iplScaledGrayImg);
			return NULL;
		}

		IplImage *iplFace = NULL;
		IplImage *iplCroped = NULL;
		CvMemStorage *memSeqFeaturesInFaceHolder =
		    cvCreateMemStorage(0);
		CvSeq *seqFeaturesInFace = NULL;
		CvSeq *seqFaces =
		    cvHaarDetectObjects(iplScaledGrayImg,
					haarCascadeClassifiers[FACE_FACE],
					memFace, 1.05, 2,
					CV_HAAR_DO_CANNY_PRUNING, cvSize(15,15));
		int iFaceNum = seqFaces->total;

		CvRect rect4Crop;	//store locatio[1:OpenCVFacialDetectorBase.h]*[7:OpenCVAdapter.h]n according to iplScaledGrayImg
		cdebug("detected %d faces\n", iFaceNum);

        int iDetectedNum = 0;
		iSmileDegree = -1;
		for (int i = 0; i < iFaceNum; i++) {

			CvRect *rectFace = (CvRect *) cvGetSeqElem(seqFaces, i);
			cdebug("detected face[%d], (%d,%d) with %dx%d\n", i,
			       rectFace->x, rectFace->y, rectFace->width,
			       rectFace->height);

			if (NULL == get_roi_rect4ff(rectFace, &rect4Crop, ff)) {
				cdebug("%s:%d, get_roi_rect4ff() return NULL\n",
				       __func__, __LINE__);
				iDetectedNum = 0;
				break;
			}
			//reserve iplFace to debug
			iplFace =
			    cvCreateImage(cvSize
					  (rectFace->width, rectFace->height),
					  iplScaledGrayImg->depth,
					  iplScaledGrayImg->nChannels);
			cvSetImageROI(iplScaledGrayImg, *rectFace);
			cvCopy(iplScaledGrayImg, iplFace);
			cvResetImageROI(iplScaledGrayImg);


			IplImage *iplWillUse = NULL;
			//use iplCroped to do feature detect
			if (!
			    (rectFace->x == rect4Crop.x
			     && rectFace->y == rect4Crop.y
			     && rectFace->width == rect4Crop.width
			     && rectFace->height == rect4Crop.height)) {
				//rectFace & rect4Crop not the same
				cdebug
				    ("rectFace ---> rect4Croped: (%d,%d) with %d*%d ---> (%d,%d) with %d*%d\n",
				     rectFace->x, rectFace->y, rectFace->width,
				     rectFace->height, rect4Crop.x, rect4Crop.y,
				     rect4Crop.width, rect4Crop.height);

				iplCroped =
				    cvCreateImage(cvSize
						  (rect4Crop.width,
						   rect4Crop.height),
						  iplScaledGrayImg->depth,
						  iplScaledGrayImg->nChannels);
				cvSetImageROI(iplScaledGrayImg, rect4Crop);
				cvCopy(iplScaledGrayImg, iplCroped);
				cvResetImageROI(iplScaledGrayImg);
				//CVSAVEIMAGE("/sdcard/iplCroped.bmp", iplCroped);
				iplWillUse = iplCroped;
				/*
				   cdebug
				   ("iplCroped saved to /sdcard/iplCroped.bmp");
				 */
			} else {
				cdebug("iplCroped need not to crop\n");
				iplWillUse = iplFace;
			}


			if(FACE_FACE == ff) {
                //no need to do detect again
                seqFeaturesInFace = cvCreateSeq(0, sizeof(CvSeq), sizeof(CvRect), memSeqFeaturesInFaceHolder);
                CvRect result;

                //i think that an actual face is in a rect of (7,6) with 36*39 in a 50*50 image
                //maybe i am wrong altough
                result.x = 7.0 / 50 * rectFace -> width;
                result.y = 6.0 / 50 * rectFace -> height;
                result.width = 36.0 / 50 * rectFace -> width;
                result.height = 39.0 / 50 * rectFace -> height;
                cvSeqPush(seqFeaturesInFace, &result);
                
            } else if (FACE_SMILE == ff) {
				seqFeaturesInFace = 
					cvHaarDetectObjects(iplWillUse,
							haarCascadeClassifiers[ff],
							memSeqFeaturesInFaceHolder,
							1.1,
							4,
							CV_HAAR_DO_CANNY_PRUNING,
							cvSize(11, 5));
			} else {
				seqFeaturesInFace =
				    cvHaarDetectObjects(iplWillUse,
							haarCascadeClassifiers
							[ff],
							memSeqFeaturesInFaceHolder,
							1.1, 2,
							CV_HAAR_DO_CANNY_PRUNING,
							cvSize(20, 20));
			}

			for (int j = 0; j < seqFeaturesInFace->total; j++) {	//charge whether this features location is valid
				CvRect *rectInFace =
				    (CvRect *) cvGetSeqElem(seqFeaturesInFace,
							    j);
				cdebug
				    ("detected feature[%d], (%d,%d) with %dx%d\n",
				     j, rectInFace->x, rectInFace->y,
				     rectInFace->width, rectInFace->height);

				if (true) {	//valid, need to convert (detected rect in face) to (rect in iplSrcImg), ALWAYS TRUE NOW, BECAUSE WE USE CROPED IMAGE
                    /*
					cvRectangle(iplFace,
						    cvPoint(rect4Crop.x -
							    rectFace->x +
							    rectInFace->x,
							    rect4Crop.y -
							    rectFace->y +
							    rectInFace->y),
						    cvPoint(rect4Crop.x -
							    rectFace->x +
							    rectInFace->x +
							    rectInFace->width,
							    rect4Crop.y -
							    rectFace->y +
							    rectInFace->y +
							    rectInFace->height),
						    CV_RGB(255, 250, 250), 2, 8,
						    0);
                    */

					/*
					   cvRectangle(iplScaledGrayImg,   cvPoint(rectFace->x+rectInFace->x, rectFace->y+rectInFace->y), 
					   cvPoint(rectFace->x + rectInFace->x + rectInFace->width, rectFace->y + rectInFace->y + rectInFace->height), 
					   cvScalar(0, 0, 0, 0));
					   cvRectangle(iplSrcImg,          cvPoint((rectFace->x+rectInFace->x)*fScaleRatio, (rectFace->y+rectInFace->y)*fScaleRatio), 
					   cvPoint((rectFace->x+rectInFace->x+rectInFace->width)*fScaleRatio , (rectFace->y + rectInFace->y + rectInFace->height) * fScaleRatio), 
					   cvScalar(0, 0, 0, 0));
					 */
					CvRect rValid;	//according to srcImage, not scaled
					rValid.x =
					    (rect4Crop.x +
					     rectInFace->x) * fScaleRatio;
					rValid.y =
					    (rect4Crop.y +
					     rectInFace->y) * fScaleRatio;
					rValid.width =
					    (rectInFace->width) * fScaleRatio;
					rValid.height =
					    (rectInFace->height) * fScaleRatio;

					cvSeqPush(detectedSeqs, &rValid);
					iDetectedNum++;

					if ( FACE_SMILE == ff) {
						CvAvgComp *comp = (CvAvgComp *) cvGetSeqElem(seqFeaturesInFace, j);
						iSmileDegree = iSmileDegree > comp->neighbors ? iSmileDegree : comp->neighbors;
						cdebug("degree[%d][%d]: %d\n", i, j, iSmileDegree);
					}
				}

			}// for features in ( FACE )

            /*
			sprintf(debug_save_img_path,
            "/sdcard/face_iplFace_%d.bmp", i);
            CVSAVEIMAGE(debug_save_img_path, iplFace);
            cdebug("iplFace saved to %s\n", debug_save_img_path);
			 */

			if (iplCroped != NULL) {
				cvReleaseImage(&iplCroped);
				iplCroped = NULL;
			}
			cvReleaseImage(&iplFace);
		}//for face in (Faces)

		cvReleaseMemStorage(&memFace);
		cvReleaseMemStorage(&memSeqFeaturesInFaceHolder);
		//

		cdebug("detect num:%d of this feature\n", iDetectedNum);

		//debug : draw && save image to $debug_save_img_path
		for (int i = 0; i < iDetectedNum; i++) {
            /*
			CvRect *rect = (CvRect *) cvGetSeqElem(detectedSeqs, i);

			cvRectangle(iplScaledGrayImg, cvPoint(rect->x, rect->y),
				    cvPoint(rect->x + rect->width,
					    rect->y + rect->height), cvScalar(0,
									      0,
									      0,
									      0));
			cvRectangle(srcImg, cvPoint(rect->x, rect->y),
				    cvPoint(rect->x + rect->width,
					    rect->y + rect->height), cvScalar(0,
									      0,
									      0,
									      0));
            */
		}

		//CVSAVEIMAGE(debug_save_img_path, iplSrcImg);

		/*
		   cdebug("iplSrcImg saved to %s\n", debug_save_img_path);
		 */

		//CVSAVEIMAGE(debug_save_img_path, iplScaledGrayImg);
		/*
		   cdebug("iplScaledGrayImg saved to %s\n", debug_save_img_path);
		 */

		/*
		   sprintf(debug_save_img_path, "/sdcard/src_drawed_%d.bin", debug_init_count);
		   FILE *ws = fopen(debug_save_img_path, "w");
		   fwrite(iplSrcImg->imageData, 1, iplSrcImg->imageSize, ws);
		   fclose(ws);     ws = NULL;
		   cdebug("iplSrcimage binary data write to %s", debug_save_img_path);

		   sprintf(debug_save_img_path, "/sdcard/grayscale_drawed_%d.bin", debug_init_count);
		   ws = fopen(debug_save_img_path, "w");
		   fwrite(iplSrcImg->imageData, 1, iplSrcImg->imageSize, ws);
		   fclose(ws);     ws = NULL;
		   cdebug("iplScaledGray binary data write to %s", debug_save_img_path);
		 */

        cvReleaseImage(&iplScaledGrayImg);
        //removeDuplicate(detectedSeqs);

		return detectedSeqs;
    }

    vector<cv::Rect> &OpenCVFacialDetectorBase::facial_detect(const Mat& image, vector<Rect>& rects, const FaceFeature ff) {
        CvMemStorage *memStorage = cvCreateMemStorage();   
        IplImage img = image;
        CvSeq *seq = this -> facial_detect(&img, ff, memStorage);
        rects.clear();
        for(int i = 0; i < seq->total; i ++) {
            CvRect *r = (CvRect *) cvGetSeqElem(seq, i);
            rects.push_back(Rect(r->x, r->y, r->width, r->height));
        }
        
        cvReleaseMemStorage(&memStorage);
        return rects;
    }

	CvRect *OpenCVFacialDetectorBase::get_roi_rect4ff(CvRect * faceRect,
						    CvRect * dstRect,
						    FaceFeature ff) {
		//dstRect is position according to whole image, not according to FaceImage

		switch (ff) {
		case FACE_LEFT_EYE:{
				//use right 1/4 of face
				cdebug("case FACE_LEFT_EYE:\n");
				dstRect->x =
				    faceRect->x + (faceRect->width / 2);
				dstRect->y = faceRect->y;
				dstRect->width = faceRect->width / 2;
				dstRect->height = faceRect->height / 2;
				return dstRect;
				break;
			}
		case FACE_RIGHT_EYE:{
				cdebug("case FACE_RIGHT_EYE:\n");
				dstRect->x = faceRect->x;
				dstRect->y = faceRect->y;
				dstRect->width = faceRect->width / 2;
				dstRect->height = faceRect->height / 2;
				return dstRect;
				break;
			}
		case FACE_EYES:{
				cdebug("case FACE_EYES:\n");
				/*
				   dstRect -> x = faceRect -> x ;
				   dstRect -> y = faceRect -> y ;
				   dstRect -> width = faceRect -> width;
				   dstRect -> height = faceRect -> height * 2 / 3;     // 1/2 is not very good, causing low recog rate
				   return dstRect;
				 */
				break;
			}
		case FACE_FACE:{
				cdebug("case FACE_FACE:\n");
				break;
			}
		case FACE_NOSE:{
				cdebug("case FACE_NOSE:\n");
				dstRect->x = faceRect->x;
				dstRect->y =
				    faceRect->y + (faceRect->height / 3);
				dstRect->width = faceRect->width;
				dstRect->height = (faceRect->height / 2);
				return dstRect;
				break;
			}
		case FACE_MOUTH:
		case FACE_SMILE: {
				//use bottom 1/3 of face
				cdebug("case FACE_MOUTH:\n");
				dstRect->x = faceRect->x;
				dstRect->y =
				    faceRect->y + (faceRect->height) * 2 / 3;
				dstRect->width = faceRect->width;
				dstRect->height =
				    (int)((float)(faceRect->height)) / 3;
				return dstRect;
				break;	//never goes here
			}

		case FACE_NONE:{
				break;
			}
        default:
                break;
		}

		dstRect->x = faceRect->x;
		dstRect->y = faceRect->y;
		dstRect->width = faceRect->width;
		dstRect->height = faceRect->height;
		return dstRect;
	}

    void OpenCVFacialDetectorBase::skinFilterByYCbCr(IplImage *img, IplImage *mask) {
        cdebug("%s ++++++\n", __func__);
        if(3 != img->nChannels && 4!= img->nChannels) {
            cvZero(mask);
            cdebug("%s -----\n", __func__);
            return ;
        }

        CvSize imageSize = cvSize(img->width, img->height);
        IplImage *imgY = cvCreateImage(imageSize, IPL_DEPTH_8U, 1); 
        IplImage *imgCr = cvCreateImage(imageSize, IPL_DEPTH_8U, 1); 
        IplImage *imgCb = cvCreateImage(imageSize, IPL_DEPTH_8U, 1); 
        
        
        IplImage *imgYCrCb = cvCreateImage(imageSize, img->depth, img->nChannels);
        cvCvtColor(img,imgYCrCb,CV_BGR2YCrCb);
        cvSplit(imgYCrCb, imgY, imgCr, imgCb, 0); 

        int y, cr, cb, l;
        unsigned char *pY, *pCr, *pCb, *pMask;
        
        pY = (unsigned char *)imgY->imageData;
        pCr = (unsigned char *)imgCr->imageData;
        pCb = (unsigned char *)imgCb->imageData;
        pMask = (unsigned char *)mask->imageData;
        cvSetZero(mask);
        l = img->height * img->width;

        double a = 25.39;
        double b = 14.03;
        double ecx = 1.60;
        double ecy = 2.41;
        double cx = 109.38;
        double cy = 152.02;
        double CR; 
        double CB; 
        double value;
        double x1, y1; 

         for (int i = 0; i < l; i++){
            y  = *pY;
            cr = *pCr;
            cb = *pCb;
            
            a = 25.39;
            b = 14.03;  
            if (y > 230) {
                a = 1.1 * a;
                b = 1.1 * b;
            } else if (y < 20) {
                *pMask = 0;
                pY++;
                pCr++;
                pCb++;
                pMask++;
                continue;
            }

            CB = double(cb);
            CR = double(cr);
            CB = CB - cx;
            CR = CR - cy;

            x1 = -0.818734599 * CB + 0.574172148 * CR;
            y1 = -0.574172148 * CB - 0.818734599 * CR;
            value = ((((x1 - ecx) * (x1 - ecx))) / (a * a)) + ((y1 - ecy) * (y1 - ecy)) / (b * b);
             
            if (value > 1) {
                // It's not skin color
                *pMask = 0;
            } else {
                *pMask = 255;
            }

            pY++;
            pCr++;
            pCb++;
            pMask++;
        }

        cvReleaseImage(&imgY);
        cvReleaseImage(&imgCr);
        cvReleaseImage(&imgCb);
        cvReleaseImage(&imgYCrCb);
    }

int OpenCVFacialDetectorBase::getSmileDegree() {
	return iSmileDegree;
}

#define SET_RECT_VIA_2P(rect, p1, p2) do{\
                (rect).x = min((p1).x, (p2).x);\
                (rect).y = min((p1).y, (p2).y);\
                (rect).width = abs((p2).x - (p1).x);\
                (rect).height= abs((p2).y - (p1).y); } while(0)

    static Rect& measureRects(const Rect &r1, const Rect &r2, Rect &joinRect) {
        cdebug("%s +++++\n", __func__);
        Point centerR1(r1.x + r1.width/2, r1.y + r1.height/2); 
        Point centerR2(r2.x + r2.width/2, r2.y + r2.height/2);

        if((abs(centerR1.x - centerR2.x) < r1.width/2 + r2.width/2)
                && (abs(centerR1.y - centerR2.y) < r1.height/2 + r2.height/2)) {   //必有重合的部分
            Point joinP1(max(r1.x, r2.x), max(r1.y, r2.y));
            Point joinP2(min(r1.x + r1.width, r2.x + r2.width), min(r1.y + r1.height, r2.y + r2.height));
            SET_RECT_VIA_2P(joinRect, joinP1, joinP2);
        } else {
            joinRect.x = joinRect.y = joinRect.width = joinRect.height = 0;
        }

        cdebug("%s -------\n", __func__);
        return joinRect;
    }

#define rectCmp(r1, r2) ((r1).width*(r1).height - (r2).width*(r2).height)
    CvSeq *OpenCVFacialDetectorBase::removeDuplicate(CvSeq *input_seq) {
    cdebug("%s +++++\n", __func__);
        vector<Rect> vRects;
        vector<int>  vDeletes;
        for(int i = 0; i < input_seq -> total; i ++) {
            CvRect *cvRect = (CvRect*)cvGetSeqElem(input_seq, i);
            Rect rect;
            rect.x = cvRect -> x;
            rect.y = cvRect -> y;
            rect.width = cvRect -> width;
            rect.height = cvRect -> height;
            vRects.push_back(rect);
        }

        for(size_t i = 0; i < vRects.size(); i ++) {
            for (size_t j = i + 1; j < vRects.size(); j ++) {

                Rect joinRect;
                measureRects(vRects[i], vRects[j], joinRect);
                size_t smaller = (rectCmp(vRects[i], vRects[j]) < 0) ? i : j;
                if(joinRect.width*joinRect.height > (vRects[smaller].width * vRects[smaller].height * 1 / 3)) {
                    if(j == smaller) {
                        vDeletes.push_back(j);
                        //cout << "deteled " << j << endl;
                        //vRects.erase(vRects.begin() + j);
                        //j --

                    } else {
                        vDeletes.push_back(i);
                        //cout << "deteled " << i << endl;
                        //vRects.erase(vRects.begin() + i);
                        //i --;
                    }
                }
                    
            }
        }
        for(size_t delete_count = 0; delete_count < vDeletes.size(); delete_count ++) {
            cvSeqRemove(input_seq, vDeletes[delete_count]);
        }
    

        cdebug("%s -------\n", __func__);
        return input_seq;
    }

}
