
#pragma comment(lib, "opencv_core246d.lib")
#pragma comment(lib, "opencv_imgproc246d.lib")
#pragma comment(lib, "opencv_highgui246d.lib")
#pragma comment(lib, "opencv_ml246d.lib")
#pragma comment(lib, "opencv_video246d.lib")
#pragma comment(lib, "opencv_features2d246d.lib")
#pragma comment(lib, "opencv_calib3d246d.lib")


#include <stdio.h>
#include <stdlib.h>
#include <opencv\cv.h>
#include <opencv\highgui.h>
#include "BlobLabeling.h"


IplImage        *image = NULL;
IplImage        *gray = NULL;
IplImage        *labeled = NULL;
IplImage        *pCaptureImg = NULL;
CvCapture       *pCapture = NULL;
CBlobLabeling   blob;


void FindFourCorner(CvPoint *pCorner, CvSeq *pContours)
{
    int x;
    double fMaxDist;

    for( x=0; x < 4; x++ )
        pCorner[x] = cvPoint(0, 0);

    // 초기 위치 설정
    CvPoint *st  = (CvPoint *)cvGetSeqElem( pContours, 0 );

    // 첫 번 째 꼭지점 추출(최대 거리를 가지는 점 선택)
    fMaxDist = 0.0;

    for( x = 1; x < pContours->total; x++ )
    {
        CvPoint* pt = (CvPoint *)cvGetSeqElem( pContours, x );

        double fDist = sqrt( (double)(( st->x - pt->x ) * ( st->x - pt->x ) + ( st->y - pt->y ) * ( st->y - pt->y ) ));

        if( fDist > fMaxDist )
        {
            pCorner[0] = *pt;

            fMaxDist = fDist;
        }
    }

    // 두 번 째 꼭지점 추출(첫 번 째 꼭지점에서 최대 거리를 가지는 점 선택)
    fMaxDist = 0.0;

    for( x = 0; x < pContours->total; x++ )
    {
        CvPoint* pt = (CvPoint *)cvGetSeqElem( pContours, x );

        double fDist = sqrt( (double)(( pCorner[0].x - pt->x ) * ( pCorner[0].x - pt->x ) + ( pCorner[0].y - pt->y ) * ( pCorner[0].y - pt->y ) ));

        if( fDist > fMaxDist )
        {
            pCorner[1] = *pt;

            fMaxDist = fDist;
        }
    }

    // 세 번 째 꼭지점 추출(첫 번 째, 두 번 째 꼭지점에서 최대 거리를 가지는 점 선택)
    fMaxDist = 0.0;

    for( x = 0; x < pContours->total; x++ )
    {
        CvPoint* pt = (CvPoint *)cvGetSeqElem( pContours, x );

        double fDist =      sqrt( (double)(( pCorner[0].x - pt->x ) * ( pCorner[0].x - pt->x ) + ( pCorner[0].y - pt->y ) * ( pCorner[0].y - pt->y ) ))
                        +   sqrt( (double)(( pCorner[1].x - pt->x ) * ( pCorner[1].x - pt->x ) + ( pCorner[1].y - pt->y ) * ( pCorner[1].y - pt->y ) ));

        if( fDist > fMaxDist )
        {
            pCorner[2] = *pt;

            fMaxDist = fDist;
        }
    }

    // 네 번 째 꼭지점 추출
    // (벡터 내적을 이용하여 좌표평면에서 사각형의 너비의 최대 값을 구한다.)
    //                                                       thanks to 송성원
    int x1 = pCorner[0].x;   int y1 = pCorner[0].y;
    int x2 = pCorner[1].x;   int y2 = pCorner[1].y;
    int x3 = pCorner[2].x;   int y3 = pCorner[2].y;

    int nMaxDim = 0;

    for( x = 0; x < pContours->total; x++ )
    {
        CvPoint* pt = (CvPoint *)cvGetSeqElem( pContours, x );

        int x = pt->x;
        int y = pt->y;

        int nDim =      abs( ( x1 * y2 + x2 * y  + x  * y1 ) - ( x2 * y1 + x  * y2 + x1 * y  ) )
                    +   abs( ( x1 * y  + x  * y3 + x3 * y1 ) - ( x  * y1 + x3 * y  + x1 * y3 ) )
                    +   abs( ( x  * y2 + x2 * y3 + x3 * y  ) - ( x2 * y  + x3 * y2 + x  * y3 ) );

        if( nDim > nMaxDim )
        {
            pCorner[3] = *pt;

            nMaxDim = nDim;
        }
    }
}


void DrawingVideotoBlob(int i, CvPoint *pCorner)
{
    int nCount = 500;

    while(nCount >= 0)
    {
        nCount--;

        if(cvGrabFrame(pCapture))
        {
			CvPoint tmp;
			int f;
			CvPoint2D32f ptSource[4], ptPrespective[4];

			pCaptureImg = cvRetrieveFrame(pCapture);


			// 변환할 네 점의 좌표는 생성할 이미지의 꼭지점 좌표로 설정하였다.
			ptSource[0]	= cvPoint2D32f(0, 0);
			ptSource[1]	= cvPoint2D32f(pCaptureImg->width, 0);
			ptSource[2]	= cvPoint2D32f(pCaptureImg->width, pCaptureImg->height);
			ptSource[3]	= cvPoint2D32f(0, pCaptureImg->height);

			// 변환될 네 점의 좌표를 정렬 (LEFT-TOP - RIGHT-TOP - RIGHT-BOTTOM - LEFT_BOTTOM 순)
			for( f=0; f < 3; f++ )
            {
    			for( int g=f+1; g < 4; g++ )
    			{
    				if( pCorner[f].y > pCorner[g].y )
    				{
    					tmp			= pCorner[f];
    					pCorner[f]	= pCorner[g];
    					pCorner[g]	= tmp;
    				}
    			}
            }

			if( pCorner[0].x > pCorner[1].x )
			{
				tmp			= pCorner[1];
				pCorner[1]	= pCorner[0];
				pCorner[0]	= tmp;
			}

			if( pCorner[3].x > pCorner[2].x )
			{
				tmp			= pCorner[3];
				pCorner[3]	= pCorner[2];
				pCorner[2]	= tmp;
			}

			// 변환될 네 점의 좌표는 앞에서 여러가지 방법으로 구해진 점들을 대입하는 형태로 구성하였다.
			for( f=0; f < 4; f++ )
                ptPrespective[f] = cvPoint2D32f(pCorner[f].x + blob.m_recBlobs[i].x, pCorner[f].y + blob.m_recBlobs[i].y );

			float newm[9];
			CvMat mat = cvMat( 3, 3, CV_32F, newm );  // 변환행렬 초기화

			cvWarpPerspectiveQMatrix(ptSource, ptPrespective, &mat);  // 변환행렬 구하기

			////////////////////////////////////////////////////
			// 이미지 와핑
			cvWarpPerspective(pCaptureImg, labeled, &mat, CV_INTER_LINEAR );

            cvShowImage("Gray image-2", labeled);

            cvWaitKey(1);
		}
    }
}


void DrawingContourLine(int i, CvPoint *pCorner)
{
    CvFont font;
    cvInitFont(&font, CV_FONT_HERSHEY_SIMPLEX, .5, .5, 0, 1, 8);

    char text[100];

    // 모서리를 잇는 직선(BLUE)
    for( int m=0;   m < 3; m++ )
    for( int n=m+1; n < 4; n++ )
    {
        int x1 = pCorner[m].x + blob.m_recBlobs[i].x;
        int y1 = pCorner[m].y + blob.m_recBlobs[i].y;
        int x2 = pCorner[n].x + blob.m_recBlobs[i].x;
        int y2 = pCorner[n].y + blob.m_recBlobs[i].y;

        cvLine( image, cvPoint(x1, y1), cvPoint(x2, y2), CV_RGB( 0, 0, 255), 1 );
    }

    for( int m=0; m < 4; m++ )
    {
        int x = pCorner[m].x + blob.m_recBlobs[i].x;
        int y = pCorner[m].y + blob.m_recBlobs[i].y;

        cvCircle( image, cvPoint(x, y), 2, CV_RGB(0,0,255), 2 );

        sprintf(text, "(%d, %d)", x, y );

        x += 5; // 5픽셀 밀어서 출력
        cvPutText (image, text, cvPoint(x, y), &font, CV_RGB(255,0,255));
    }
}

//#define MAX(a, b)   ((a>=b) ? a : b)

int threshold = 100;

void FindCircle(IplImage *pGray)
{
    static IplImage     *pTmpGray = NULL;
    CvMemStorage        *pStorage = cvCreateMemStorage(0);
    CvSeq               *pCircleSeq = NULL;
    CvSeq               *pLineSeq = NULL;
    int                 i = 0;

    cvNamedWindow("circle", 0);

    cvCreateTrackbar("T", "circle", &threshold, 255, NULL);

    pTmpGray = cvCloneImage(pGray);

    cvSmooth(pTmpGray, pTmpGray, CV_GAUSSIAN, 11, 11, 0, 1);

    #if 0
        pCircleSeq = cvHoughCircles(pTmpGray, pStorage, CV_HOUGH_GRADIENT, 1, 100, 20, 50, 10, MAX(pGray->width, pGray->height));

        for(i=0 ; i<pCircleSeq->total ; i++)
        {
            float   *pPoint = (float*)cvGetSeqElem(pCircleSeq, i);

            cvCircle(pTmpGray, cvPoint(cvRound(pPoint[0]), cvRound(pPoint[1])), 3, CV_RGB(0, 255, 0), -1, 8, 0);
            cvCircle(pTmpGray, cvPoint(cvRound(pPoint[0]), cvRound(pPoint[1])), cvRound(pPoint[2]), CV_RGB(255, 0, 0), 3, 8, 0);
        }
    #else
        cvCanny( pTmpGray, pTmpGray, 50, 200, 3 );
        pLineSeq = cvHoughLines2( pTmpGray, pStorage, CV_HOUGH_STANDARD, 1, CV_PI/180, threshold, 0, 0 );

        for( int i = 0; i < MIN(pLineSeq->total,100); i++ )
        {
            float* line = (float*)cvGetSeqElem(pLineSeq, i);
            float rho = line[0];
            float theta = line[1];
            CvPoint pt1, pt2;
            double a = cos(theta), b = sin(theta);
            double x0 = a*rho, y0 = b*rho;
            pt1.x = cvRound(x0 + 1000*(-b));
            pt1.y = cvRound(y0 + 1000*(a));
            pt2.x = cvRound(x0 - 1000*(-b));
            pt2.y = cvRound(y0 - 1000*(a));
            cvLine( image, pt1, pt2, CV_RGB(255,0,0), 1, 1 );
        }
    #endif

    cvShowImage("circle", image);
    cvShowImage("original", pTmpGray);

    cvWaitKey(1);
}

void main(void)
{
	//image = cvLoadImage("D:\\Download\\DSC_0933_1.jpg", CV_LOAD_IMAGE_COLOR);

    pCapture = cvCaptureFromAVI("D:\\Download\\Cars.2006.XviD.AC3.2AUDIO.CD2-WAF.avi");

	//gray = cvCreateImage(cvGetSize(image), 8, 1);
	//cvCvtColor(image, gray, CV_RGB2GRAY);
	//cvShowImage("Gray image-0", gray);
	//cvThreshold(gray, gray, 0.0, 255.0, CV_THRESH_BINARY_INV | CV_THRESH_OTSU);

    while(1)
    {
        cvGrabFrame(pCapture);

        image = cvRetrieveFrame(pCapture);

		int i = 0;

        if(NULL == gray)
            gray = cvCreateImage(cvGetSize(image), 8, 1);
        cvCvtColor(image, gray, CV_RGB2GRAY);

        FindCircle(gray);
#if 0
        cvThreshold(gray, gray, 0.0, 255.0, CV_THRESH_BINARY_INV | CV_THRESH_OTSU);

        if(NULL == labeled)
    		labeled = cvCreateImage(cvGetSize(image), 8, 3);
		cvCvtColor(gray, labeled, CV_GRAY2BGR);

		blob.SetParam(gray, 100);
		blob.DoLabeling();

		// De-Noising
		{
			int nMaxWidth = gray->width * 8 / 10;
			int nMaxHeight = gray->height * 8 / 10;

			blob.BlobSmallSizeConstraint(30, 30);
			blob.BlobBigSizeConstraint(nMaxWidth, nMaxHeight);
		}

		for(i=0 ; i<blob.m_nBlobs ; i++)
		{
			CvPoint pt1 = cvPoint(blob.m_recBlobs[i].x,
									blob.m_recBlobs[i].y);
			CvPoint pt2 = cvPoint(pt1.x + blob.m_recBlobs[i].width,
									pt1.y + blob.m_recBlobs[i].height);

            {
                int                 j = 0;
                IplImage            *sub_gray = NULL;
    			CBlobLabeling       inner;
    			int nSubMinWidth	= 0;
    			int nSubMinHeight	= 0;
    			int nSubMaxWidth	= 0;
    			int nSubMaxHeight	= 0;
                int Flag = 0;

    			// 이미지 관심영역 설정
    			cvSetImageROI( gray, blob.m_recBlobs[i] );

    			sub_gray = cvCreateImage( cvSize( blob.m_recBlobs[i].width, blob.m_recBlobs[i].height ), 8, 1 );

    			cvThreshold( gray, sub_gray, 1, 255, CV_THRESH_BINARY_INV );

    			// 관심영역 해제
    			cvResetImageROI( gray );

    			////////////////////////////
    			// 레이블링
    			inner.SetParam( sub_gray, 100 );

    			inner.DoLabeling();

    			nSubMinWidth	= sub_gray->width * 2 / 10;
    			nSubMinHeight	= sub_gray->height * 2 / 10;
    			nSubMaxWidth	= sub_gray->width * 9 / 10;
    			nSubMaxHeight	= sub_gray->height * 9 / 10;

    			inner.BlobSmallSizeConstraint( nSubMinWidth, nSubMinHeight );
    			inner.BlobBigSizeConstraint( nSubMaxWidth, nSubMaxHeight );

    			for(j=0; j < inner.m_nBlobs; j++ )
    			{
    				int nThick = 5;

    				if(		inner.m_recBlobs[j].x < nThick
    					||	inner.m_recBlobs[j].y < nThick
    					||	( sub_gray->width - (inner.m_recBlobs[j].x + inner.m_recBlobs[j].width) ) < nThick
    					||	( sub_gray->height - (inner.m_recBlobs[j].y + inner.m_recBlobs[j].height) ) < nThick )	continue;

    				CvPoint	s_pt1 = cvPoint(	pt1.x + inner.m_recBlobs[j].x,
    											pt1.y + inner.m_recBlobs[j].y );
    				CvPoint s_pt2 = cvPoint(	s_pt1.x + inner.m_recBlobs[j].width,
    											s_pt1.y + inner.m_recBlobs[j].height );

    				// green
    				CvScalar green = cvScalar( 0, 255, 0 );
                    CvScalar yellow = cvScalar( 255, 255, 0 );

    				cvDrawRect( image, s_pt1, s_pt2, green, 2 );

                    Flag = 1;
    			}

    			cvReleaseImage(&sub_gray);

                if(1 == Flag)
                {
                    CvMemStorage    *pStorage = cvCreateMemStorage(0);
                    CvSeq           *pContours = 0;
                    int             nTmpWidth = blob.m_recBlobs[i].width;
                    int             nTmpHeight = blob.m_recBlobs[i].height;
                    IplImage        *pMarker = cvCreateImage(cvSize(nTmpWidth, nTmpHeight), 8, 1);
    				CvScalar        green = cvScalar( 0, 255, 0 );
                    CvScalar        yellow = cvScalar( 255, 255, 0 );

                    blob.GetBlobImage(pMarker, i);

                    cvFindContours(pMarker, pStorage, &pContours, sizeof(CvContour), CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, cvPoint(0,0));

                    cvDrawContours(image, pContours, yellow, green, -1, 1, CV_AA, cvPoint(blob.m_recBlobs[i].x, blob.m_recBlobs[i].y));

                    {
           				CvPoint		corner[4];

        				FindFourCorner(corner, pContours);

                        DrawingContourLine(i, corner);

                        //DrawingVideotoBlob(i, corner);

                        cvReleaseMemStorage(&pStorage);
                        cvReleaseImage(&pMarker);
                    }
                }
    		}

			//CvScalar color = cvScalar(0, 0, 255);

			//cvDrawRect(labeled, pt1, pt2, color, 2);
		}

		cvShowImage("Gray image-1", image);
		cvShowImage("Gray image-2", labeled);

        cvReleaseImage(&labeled);
        cvReleaseImage(&gray);

      	cvWaitKey(1);
      #endif
	}

	cvWaitKey(0);

	cvReleaseImage(&image);
	cvReleaseImage(&gray);


	return;
}

