import com.sun.jna.Pointer;

import name.audet.samuel.javacv.*;
import static name.audet.samuel.javacv.jna.cxcore.*;
import static name.audet.samuel.javacv.jna.cv.*;

public class Test2 {
    public static void main(String[] args) throws Exception {
        String cascadeName = args.length > 0 ? args[0] : "/Users/sami/opencv/opencv/data/haarcascades/haarcascade_frontalface_default.xml";
        
        
        // Make sure to call JavaCvErrorCallback.redirectError() to prevent your 
        // application from simply crashing with no warning on some error of OpenCV.
        // JavaCvErrorCallback may be subclassed for finer control of exceptions.
        new JavaCvErrorCallback().redirectError();

        // CanvasFrame is a JFrame containing a Canvas component, which is hardware accelerated.  
        // It can also switch into full-screen mode when called with a screenNumber.
        CanvasFrame frame = new CanvasFrame("Some Title");
        
        frame.setCanvasSize(300,300);
        // OpenCVFrameGrabber uses highgui, but other more versatile FrameGrabbers include
        // DC1394FrameGrabber, FlyCaptureFrameGrabber, and FFmpegFrameGrabber.
        FrameGrabber grabber = new OpenCVFrameGrabber(0);
        grabber.start();

        // FAQ about IplImage:
        // - For custom raw processing of data, getByteBuffer() returns an NIO direct
        //   buffer wrapped around the memory pointed by imageData.
        // - To get a BufferedImage from an IplImage, you may call getBufferedImage().
        // - The createFrom() factory method can construct an IplImage from a BufferedImage.
        // - There are also a few copy*() methods for BufferedImage<->IplImage data transfers.
        IplImage grabbedImage = grabber.grab(),
                 grayImage    = IplImage.create(grabbedImage.width, grabbedImage.height, IPL_DEPTH_8U, 1),
                 rotatedImage = grabbedImage.clone();

        // Let's create some random 3D rotation...
      //  CvMat randomR = CvMat.create(3, 3), randomAxis = CvMat.create(3, 1);
        // We can easily and efficiently access the elements of CvMat objects
        // with the set of get() and put() methods.
       // randomAxis.put((Math.random()-0.5)/4, (Math.random()-0.5)/4, (Math.random()-0.5)/4);
       // cvRodrigues2(randomAxis, randomR, null);
      //  double f = (grabbedImage.width + grabbedImage.height)/2.0;
//                                                randomR.put(0, 2, randomR.get(0, 2)*f); 
//                                                randomR.put(1, 2, randomR.get(1, 2)*f);
       // randomR.put(2, 0, randomR.get(2, 0)/f); randomR.put(2, 1, randomR.get(2, 1)/f);
       // System.out.println(randomR);
        
        // We can "cast" Pointer objects by instantiating a new object of the desired class.
        CvHaarClassifierCascade cascade = new CvHaarClassifierCascade(cvLoad(cascadeName));
        
       // cvLoad(cascadeName);
      //  Pointer cascade =    cvLoad(cascadeName);
        // Objects allocated with a create*() or clone() factory method are automatically 
        // garbage collected, but may also explicitly be freed with the release() method.
        CvMemStorage storage = CvMemStorage.create();

        // Contiguous regions of native memory may be allocated using createArray() factory methods.
        CvPoint[] hatPoints = CvPoint.createArray(3);
        CvSeq.PointerByReference contourPointer = new CvSeq.PointerByReference(); 
        int sizeofCvContour = com.sun.jna.Native.getNativeSize(CvContour.ByValue.class);

//        // Again, FFmpegFrameRecorder also exists as a more versatile alternative.
//        FrameRecorder recorder = new OpenCVFrameRecorder("output.avi", grabbedImage.width, grabbedImage.height);
//        recorder.start();

        while (frame.isVisible() && (grabbedImage = grabber.grab()) != null) {

            // Let's try to detect some faces! but we need a grayscale image...
            cvCvtColor(grabbedImage, grayImage, CV_BGR2GRAY);
            CvSeq faces = cvHaarDetectObjects(grayImage, cascade, storage, 1.1, 3, 0/*CV_HAAR_DO_CANNY_PRUNING*/);
            for (int i = 0; i < faces.total; i++) {
                CvRect r = new CvRect(cvGetSeqElem(faces, i));
                cvRectangle(grabbedImage, cvPoint(r.x, r.y), cvPoint(r.x+r.width, r.y+r.height), CvScalar.RED, 1, CV_AA, 0);
             /*   hatPoints[0].x = r.x-r.width/10;    hatPoints[0].y = r.y-r.height/10;
                hatPoints[1].x = r.x+r.width*11/10; hatPoints[1].y = r.y-r.height/10;
                hatPoints[2].x = r.x+r.width/2;     hatPoints[2].y = r.y-r.height/2;*/
              //  cvFillConvexPoly(grabbedImage, hatPoints, hatPoints.length, CvScalar.GREEN, CV_AA, 0);
            }

//            // Let's find some contours! but first some thresholding...
 //           cvThreshold(grayImage, grayImage, 64, 255, CV_THRESH_BINARY);

//            // To get the value of a doubly indirect output parameter, we need to use the getValue() or
//            // getStructure() method of its *PointerByReference object, which obviously has to be created
//            // prior to the call if we want to get the data after the call.
//            cvFindContours(grayImage, storage, contourPointer, sizeofCvContour, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE); 
//            CvSeq contour = contourPointer.getStructure();
//            while (contour != null) {
//                if (contour.elem_size > 0) {
//                    CvSeq points = cvApproxPoly(contour.getPointer(), sizeofCvContour,
//                            storage, CV_POLY_APPROX_DP, cvContourPerimeter(contour.getPointer())*0.02, 0);
//                    cvDrawContours(grabbedImage, points, CvScalar.BLUE, CvScalar.BLUE, -1, 1, CV_AA);
//                }
//                contour = contour.h_next;
//            }
//
           // cvWarpPerspective(grabbedImage, rotatedImage, randomR);

           frame.showImage(grabbedImage);
//            recorder.record(rotatedImage);
//
//            cvClearMemStorage(storage);
////        }
//      recorder.stop();
//     grabber.stop();
 //    frame.dispose();
    }
}
}