/*=+--+=#=+--     Unmanned Aerial System Management Software      --+=#=+--+=#*\
|          Copyright (C) 2011 Regents of the University of Colorado.           |
|                             All Rights Reserved.                             |

     This program is free software: you can redistribute it and/or modify
     it under the terms of the GNU General Public License version 2 as
     published by the Free Software Foundation.

     This program is distributed in the hope that it will be useful,
     but WITHOUT ANY WARRANTY; without even the implied warranty of
     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
     GNU General Public License for more details.

     You should have received a copy of the GNU General Public License
     along with this program.  If not, see <http://www.gnu.org/licenses/>.

            Jack Elston                       Cory Dixon                        
|           elstonj@colorado.edu              dixonc@colorado.edu              |
|                                                                              |
\*=+--+=#=+--                 --+=#=+--+=#=+--                    --+=#=+--+=#*/

/***********************************************************************
*
* FILENAME:
* camera.cxx
*
* PURPOSE:
*  Provide video positioning system
*
* CREATED:
* 05/2008 by Cory Dixon
*
***********************************************************************/

#include <iostream>

#include <errno.h>
#include <fcntl.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#include <assert.h>
#include <malloc.h>

#include <dirent.h>
#include <unistd.h>

#include "camera.h"

using namespace std;

inline void enumerate_menu (int fd, struct v4l2_queryctrl &queryctrl);
inline int camdev_size_set(int val, int min, int max, char *s);
inline int xioctl(int fd, int request, void * arg);
inline int doesDirExist(char* path);
/*String timeStr( char *format = "%a-%b-%d-%Y_%H-%M-%S" );*/


Camera::Camera( unsigned int myId, const char *dev, unsigned int w, unsigned int h ) : 
   devName(dev)
{
	id = myId;	
	fd = -1;

	width = w;
	height = h;

	frameCount = 0;
	frame_ind = 0;

	maxwidth = 0;
	minwidth = 0;
	maxheight = 0;
	minheight = 0;

	buffers = NULL;	
	n_buffers = 0;

	inputMethod = INPUT_METHOD_MMAP;
	buf_type    = V4L2_BUF_TYPE_VIDEO_CAPTURE;
	video_std   = V4L2_STD_NTSC_M;

	useCam = true;
	saveVideo = false;

	ts_last.stamp();
	ts_fps.stamp();

	fps   = 0;
	fps_d = 0;

	img_bgr    = NULL;
	img_rgb    = NULL;
	gdkImg    = NULL;
	img_bg_acc = NULL;
	img_bg     = NULL;
	img_fg     = NULL;
	img_gray   = NULL;
	fg_binary  = NULL;
	img_bg_mat = NULL;
	img_bg_acc = NULL;
	useBackgroundSub = false;
	getBackgroundImg = false; 

	bin_thresh = 75;
	led_thresh = 250;
	dilate     = 2;

	video_writer = NULL;

#ifdef USE_FFMPEG_SWS
	img_convert_ctx = NULL;
#endif

	deinterlaceVideo = false;
	undistortVideo   = true;

	intrinsic    = NULL;
	distortion   = NULL;
	translation  = NULL;
	rotation     = NULL;

	img_deint = NULL; // most recent deinterlaced image
	img_frame = NULL; // most recent image in buffer with YUV format
}

void Camera::setIntrinsic( float intrin[] )
{
	if( !intrinsic ) {
		intrinsic = cvCreateMat( 3, 3, CV_32FC1 );
	}

	//cout << "Intrinsic coef vector: ";
	for(int i=0; i<9; i++) {
		intrinsic->data.fl[i] = intrin[i];
		//cout << intrinsic->data.fl[i] << " ";
	}
	//cout << endl;
}
void Camera::setDistortion( float dist[] )
{
	if( !distortion )
		distortion = cvCreateMat( 4, 1, CV_32FC1 );

	//cout << "Distortion coef vector: ";
	for(int i=0; i<4; i++) {
		distortion->data.fl[i] = dist[i];
		//cout << distortion->data.fl[i] << " ";
	}
	//cout << endl;
}
void Camera::setTranslation( float t[] )
{
	if( !translation )
		translation = cvCreateMat( 3, 1, CV_32FC1 );

	//cout << "Translation vector: ";
	for(int i=0; i<3; i++) {
		translation->data.fl[i] = t[i];
		//cout << translation->data.fl[i] << " ";
	}
	//cout << endl;
}
void Camera::setRotation( float rot[] )
{
	if( !rotation )
		rotation = cvCreateMat( 3, 1, CV_32FC1 );

	//cout << "Rodrigues Rotation: ";
	for(int i=0; i<3; i++) {
		rotation->data.fl[i] = rot[i];
		//cout << rotation->data.fl[i] << " ";
	}
	//cout << endl;
}

Camera::~Camera( )
{
	if( fd >= 0 )
		close();

	if( video_writer != NULL ) 
		cvReleaseVideoWriter(&video_writer);

	free_buffers();

	if( img_deint != NULL ) {
		av_free(img_deint);
	}
/*
	if( deint_buffer != NULL ) {
		//delete [] deint_buffer;
		free( deint_buffer );
	}
*/

	if( img_frame != NULL )
		av_free(img_frame);
	
#ifdef USE_FFMPEG_SWS
	if( img_convert_ctx != NULL )
		sws_freeContext(img_convert_ctx);
#endif

	if( img_bgr != NULL )
		cvReleaseImage( &img_bgr );
	if( gdkImg )
		gdk_pixbuf_unref( gdkImg );
	if( img_bg_acc != NULL )
		cvReleaseImage( &img_bg_acc );
	if( img_bg != NULL )
		cvReleaseImage( &img_bg );
	if( img_fg != NULL )
		cvReleaseImage( &img_fg );
	if( fg_binary != NULL )
		cvReleaseImage( &fg_binary );
	if( img_gray != NULL )
		cvReleaseImage( &img_gray );

	if( intrinsic != NULL )
		cvReleaseMat(&intrinsic);
	if( distortion != NULL )
		cvReleaseMat(&distortion);	
	if( translation != NULL )
		cvReleaseMat(&translation);
	if( rotation != NULL )
		cvReleaseMat(&rotation);

}

void Camera::free_buffers()
{
	if( buffers != NULL ) {
		unsigned int i;
		switch (inputMethod) {
			case INPUT_METHOD_READ:
				free (buffers[0].start);
				break;

			case INPUT_METHOD_MMAP:
				for (i = 0; i < n_buffers; ++i)
					if (-1 == munmap (buffers[i].start, buffers[i].length))
						perror("munmap");
				break;

			case INPUT_METHOD_USERPTR:
				for (i = 0; i < n_buffers; ++i)
					free (buffers[i].start);
				break;
		}

		free(buffers);
	}
}

int Camera::open(const char* dev)
{
	struct stat st; 

	if( dev != NULL )
		devName = dev;

	cout << "Opening camera " << id << ": " << devName << " ==>" << endl;

	if (-1 == stat (devName.c_str(), &st)) {
		printf ("\tCannot identify camera: %s\n", strerror(errno));
		return -1;
	}

	if (!S_ISCHR (st.st_mode)) {
		printf ("\tis not a device\n");
		return -1;
	}

	//fd = ::open(devName, O_RDWR | O_NONBLOCK, 0);
	fd = ::open(devName.c_str(), O_RDWR, 0);

	if (fd < 0) {
		printf("\tUnable to open (%s)\n", strerror(errno));
		return -1;
	}

	return fd;
}

int Camera::close()
{
	int ret = -1;
	if( fd >= 0 )
		ret = ::close(fd);

	printf("\tClosed camera %i\n", id);
	fd = -1;
	return ret;
}

int Camera::init()
{
	// first we need to gurantee state of camera
	video_std = V4L2_STD_NTSC_M;
	if( xioctl(fd, VIDIOC_S_STD, &video_std) < 0)
		printf("ioctl(VIDIOC_S_STD) (set video standard) failed: %s\n", strerror(errno));

	channel = 0;
	if( xioctl(fd, VIDIOC_S_INPUT, &channel) < 0)
		printf("ioctl(VIDIOC_S_INPUT) (video channel select) failed: %s\n", strerror(errno));

	set_fps(29.97); // doesn't matter for bttv
	set_defaultCrop();

	// if we have been called before, then we need to free our buffers
	free_buffers();

	if( width == 0 || height == 0 )
		set_size(maxwidth, maxheight);

	switch (inputMethod) {
		case INPUT_METHOD_READ:
			init_read ();
			break;

		case INPUT_METHOD_MMAP:
			init_mmap ();
			break;

		case INPUT_METHOD_USERPTR:
			init_userp ();
			break;
	}

	// init font for drawing on windows
	double fontScale = 0.5, fontShear = 0;
	int fontThickness = 1, lineType = 8; //CV_AA;
	cvInitFont( &font , CV_FONT_VECTOR0, fontScale, fontScale, fontShear, fontThickness, lineType );

	return 1;
}

int Camera::set_defaultCrop()
{
	/* Select video input, video standard and tune here. */
	CLEAR (cropcap);
	cropcap.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;

	if (0 == xioctl (fd, VIDIOC_CROPCAP, &cropcap)) {

		//maxwidth = cropcap.bounds.width - cropcap.bounds.left;
		//maxheight = cropcap.bounds.height - cropcap.bounds.top;
		maxwidth = cropcap.bounds.width;
		maxheight = cropcap.bounds.height;

/*
		printf("left=%i width=%i top=%i height=%i\n",  cropcap.bounds.left, cropcap.bounds.width,  
				cropcap.bounds.top, cropcap.bounds.height);
		printf("maxW=%i maxH=%i\n", cropcap.bounds.width - cropcap.bounds.left, 
			cropcap.bounds.height - cropcap.bounds.top);
*/

		crop.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
		crop.c = cropcap.defrect; /* reset to default rectangle */
		if (-1 == xioctl (fd, VIDIOC_S_CROP, &crop)) {
			switch (errno) {
			case EINVAL:
				/* Cropping not supported. */
				printf("!!!! Camera::set_defaultCrop - Cropping not supported !!!!\n");
				break;
			default:
				printf("!!!! Camera::set_defaultCrop - unknown cropping error !!!!\n");
				/* Errors ignored. */
				break;
			}
			return -1;
		}
	} else {	
		printf("Camera::set_defaultCrop - ioctl(VIDIOC_CROPCAP) (get cropping capabilities) failed: %s\n", strerror(errno));
		return -1;
	}
	return 1;
}

// note, the bttv driver doest doe anything with the FPS, this is only currently
// used for saving the video file
int Camera::set_fps(float f)
{
	fps_d = f;

	return 1;
}

int Camera::set_size(int w, int h)
{
	if( maxwidth == 0 )
		set_defaultCrop();

	if( w == 0 || h == 0 ) {
		width = maxwidth;
		height = maxheight;
	} else {
		width = w;
		height = h;
	}

	// with new width and height, we need to re-initialize the camera
	//init();

	CLEAR (fmt);
	fmt.type			    = V4L2_BUF_TYPE_VIDEO_CAPTURE;
	fmt.fmt.pix.width       = width; 
	fmt.fmt.pix.height      = height;
	fmt.fmt.pix.pixelformat = PIXFMT_V4L2; 
	fmt.fmt.pix.field       = V4L2_FIELD_ANY; //V4L2_FIELD_INTERLACED;

	if (-1 == xioctl (fd, VIDIOC_S_FMT, &fmt)) {
		printf("ioctl(VIDIOC_S_FMT) (set video format) failed: %s\n", strerror(errno));
		return -1;
	}

	buffer_size = fmt.fmt.pix.sizeimage;

	if( img_deint != NULL )
		av_free(img_deint);
	img_deint = avcodec_alloc_frame();

	if( img_frame != NULL )
		av_free(img_frame);
	img_frame = avcodec_alloc_frame();


/*
	if( deint_buffer != NULL ) 
		free(deint_buffer);
	// we add four bytes to because for some reason the MMX decode operation
	// has a 4 byte alignment issue. It is not a problem, but this just adds
	// a safe byte alignment
	deint_buffer = (uint8_t*)malloc(buffer_size + 4); 
	tmp = avpicture_fill((AVPicture *)img_deint, deint_buffer, PIXFMT_AV, width, height);
	if(tmp < 0 )
		printf("avpicture_fill: error filling dst\n");
*/

	if( img_bgr != NULL )
		cvReleaseImage( &img_bgr );
	img_bgr = cvCreateImage(cvSize(width, height), IPL_DEPTH_8U, 3);

	if( img_rgb != NULL )
		cvReleaseImage( &img_rgb );
	img_rgb = cvCreateImage(cvSize(width, height), IPL_DEPTH_8U, 3);

	if( gdkImg )
		gdk_pixbuf_unref( gdkImg );

	// convert iplimage to gtk, do not delete this variable, disp_img,
	// as the memory is allocated in cam_img
	gdkImg = gdk_pixbuf_new_from_data(
	      (guchar*)(img_rgb->imageData),
			GDK_COLORSPACE_RGB,
			FALSE,
			8,
			img_bgr->width,
			img_bgr->height,
			img_bgr->widthStep,
			NULL,
			NULL);


#ifdef USE_FFMPEG_SWS
	// setup the image conversion context required for ffmpeg libswscale
	if( img_convert_ctx != NULL ) {
		sws_freeContext(img_convert_ctx);
		img_convert_ctx = NULL;
	}

	if(img_convert_ctx == NULL) {
		img_convert_ctx = sws_getContext(width, height, PIXFMT_AV, 
						 width, height, PIX_FMT_BGR24, 
						 SWS_BICUBIC, NULL, NULL, NULL);
		if(img_convert_ctx == NULL) {
			fprintf(stderr, "cannot initialize the conversion context!\n");
			exit(EXIT_FAILURE);
		}
	}
#endif

	cout << "\tSize: width=" << fmt.fmt.pix.width << " height=" << fmt.fmt.pix.height << endl;

	return 1;
}

int Camera::start_capture()
{
	unsigned int i = frame_ind;
	struct v4l2_buffer buf;

	CLEAR (buf);

	switch (inputMethod) {
		case INPUT_METHOD_READ:
			// Nothing to do
			return 1;

		case INPUT_METHOD_MMAP:
			for (i = 0; i < n_buffers; ++i) {
				struct v4l2_buffer buf;
				CLEAR (buf);

				buf.type		= V4L2_BUF_TYPE_VIDEO_CAPTURE;
				buf.memory	= V4L2_MEMORY_MMAP;
				buf.index	= i;

				if (-1 == xioctl (fd, VIDIOC_QBUF, &buf)) {
					printf("ioctl(VIDIOC_QBUF) (queue buffer) failed: %s\n", strerror(errno));
					return -1;
				}
			}
			break;

		case INPUT_METHOD_USERPTR:
			for (i = 0; i < n_buffers; ++i) {
				struct v4l2_buffer buf;
				CLEAR (buf);

				buf.type			= V4L2_BUF_TYPE_VIDEO_CAPTURE;
				buf.memory		= V4L2_MEMORY_USERPTR;
				buf.index		= i;
				buf.m.userptr	= (unsigned long) buffers[i].start;
				buf.length		= buffers[i].length;

				if (-1 == xioctl (fd, VIDIOC_QBUF, &buf)) {
					printf("ioctl(VIDIOC_QBUF) (queue buffer) failed: %s\n", strerror(errno));
					return -1;
				}
			}
			break;
	}

	//printf("starting with index = %i\n", buf.index);

	// start the first buffer
	//if (-1 == xioctl (fd, VIDIOC_QBUF, &buf)) {
		//printf("ioctl(VIDIOC_QBUF) (queue buffer) failed: %s\n", strerror(errno));
		//return -1;
	//}

	// start streaming
	buf_type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
	if (-1 == xioctl (fd, VIDIOC_STREAMON, &buf_type)) {
		printf("ioctl(VIDIOC_STREAMON) (start video streaming) failed: %s\n", strerror(errno));
		return -1;
	}

	return 1;
}

int Camera::stop_capture()
{
  	switch (inputMethod) {
		case INPUT_METHOD_READ:
			/* Nothing to do. */
			return 1;;

		case INPUT_METHOD_MMAP:
		case INPUT_METHOD_USERPTR:
			if (-1 == xioctl (fd, VIDIOC_STREAMOFF, &buf_type)) {
				printf("ioctl(VIDIOC_STREAMOFF) (turn streaming off) failed: %s\n", strerror(errno));
				return -1;
			}
			break;
	}

	return 1;
}

void Camera::cap_dump()
{
	if( fd < 0 ) {
		printf("Camera device not open\n");
		return;
	}

	if (xioctl(fd, VIDIOC_QUERYCAP, &capability) != 0)
		printf("ioctl(VIDIOC_QUERYCAP) (get video capabilites) failed: %s\n", strerror(errno));

	printf("\n\n----------------------------------------------------------------\n");
	printf ("OpenCV version: %s (%d.%d.%d)\n", CV_VERSION,
		CV_MAJOR_VERSION, CV_MINOR_VERSION, CV_SUBMINOR_VERSION);
	
	printf("\nCamera %i (%s)\n", id, devName.c_str());
	printf("------------------------------\n");

	printf("  Card: %s\n", capability.card);
	printf("  Driver: %s\n", capability.driver);
	printf("  Version: %u.%u.%u\n",
			(capability.version >> 16) & 0xFF,
			(capability.version >> 8) & 0xFF,
			capability.version & 0xFF);

	printf("\n");
	printf("Capability info\n");
	printf("------------------------------\n");
	printf("  Can %scapture video\n", (capability.capabilities & V4L2_CAP_VIDEO_CAPTURE) ? "" : "not ");
	printf("  Can %soutput video\n", (capability.capabilities & V4L2_CAP_VIDEO_OUTPUT) ? "" : "not ");
	printf("  Can %soverlay video \n", (capability.capabilities & V4L2_CAP_VIDEO_OVERLAY) ? "" : "not ");
	printf("  Is %sraw VBI capture device\n", (capability.capabilities & V4L2_CAP_VBI_CAPTURE) ? "a " : "not ");
	printf("  Is %sraw VBI output device\n", (capability.capabilities & V4L2_CAP_VBI_OUTPUT) ? "a " : "not ");
	printf("  Is %ssliced VBI capture device\n", (capability.capabilities & V4L2_CAP_SLICED_VBI_CAPTURE) ? "a " : "not ");
	printf("  Is %sliced VBI output device\n", (capability.capabilities & V4L2_CAP_SLICED_VBI_OUTPUT) ? "a " : "not ");
	printf("  Is %sRDS data capture device\n", (capability.capabilities & V4L2_CAP_RDS_CAPTURE) ? "a " : "not ");
	printf("  Can %sdo video output overlay \n", (capability.capabilities & V4L2_CAP_VIDEO_OUTPUT_OVERLAY) ? "" : "not ");
	printf("  %s a tuner\n", (capability.capabilities & V4L2_CAP_TUNER) ? "Has" : "Doesn't have");
	printf("  %s audio\n", (capability.capabilities & V4L2_CAP_AUDIO) ? "Has" : "Doesn't have");
	printf("  %s radio\n", (capability.capabilities & V4L2_CAP_RADIO) ? "Has" : "Doesn't have");
	printf("  Can %sdo read/write systemcalls\n", (capability.capabilities & V4L2_CAP_READWRITE) ? "" : "not ");
	printf("  Can %sdo async I/O\n", (capability.capabilities & V4L2_CAP_ASYNCIO) ? "" : "not ");
	printf("  Can %sdo streaming I/O\n", (capability.capabilities & V4L2_CAP_STREAMING) ? "" : "not ");

	memset (&vidcap, 0, sizeof (vidcap));
	if( xioctl(fd, VIDIOCGCAP, &vidcap) < 0)
		printf("ioctl(VIDIOCGCAP) (get video capabilites) failed: %s\n", strerror(errno));

	printf("  Can%s receive teletext\n", (vidcap.type & VID_TYPE_TELETEXT) ? "" : "not");
	printf("  Overlay is %schromakeyed\n", (vidcap.type & VID_TYPE_CHROMAKEY) ? "" : "not ");
	printf("  Overlay clipping is %ssupported\n", (vidcap.type & VID_TYPE_CLIPPING) ? "" : "not ");
	printf("  Overlay %s frame buffer mem\n", (vidcap.type & VID_TYPE_FRAMERAM) ? "overwrites" : "doesn't overwrite");
	printf("  Hardware image scaling %ssupported\n", (vidcap.type & VID_TYPE_SCALES) ? "" : "not ");
	printf("  Captures in %s\n", (vidcap.type & VID_TYPE_MONOCHROME) ? "grayscale only" : "color");
	printf("  Can %scapture sub-area of image\n", (vidcap.type & VID_TYPE_SUBCAPTURE) ? "" : "not ");
	printf("  Can %sdecode MPEG streams\n", (vidcap.type & VID_TYPE_MPEG_DECODER) ? "" : "not ");
	printf("  Can %sencode MPEG streams\n", (vidcap.type & VID_TYPE_MPEG_ENCODER) ? "" : "not ");
	printf("  Can %sdecode MJPEG streams\n", (vidcap.type & VID_TYPE_MJPEG_DECODER) ? "" : "not ");
	printf("  Can %sencode MJPEG streams\n", (vidcap.type & VID_TYPE_MJPEG_ENCODER) ? "" : "not ");

	printf("\n");
	printf("Available video inputs\n");
	printf("------------------------------\n");
	struct v4l2_input input;
	input.index = 0;
	while (0 == xioctl (fd, VIDIOC_ENUMINPUT, &input)) {
		printf ("  %s: is a ", input.name);
		if( input.type == V4L2_INPUT_TYPE_TUNER )
			printf("tuner");
		else if( input.type == V4L2_INPUT_TYPE_CAMERA )
			printf("camera");

		printf(", its status is: ");
		if( input.status == 0 )
			printf("OK");
		else {
			if( input.status & V4L2_IN_ST_NO_POWER ) printf("has no power, ");
			if( input.status & V4L2_IN_ST_NO_SIGNAL ) printf("has no signal, ");
			if( input.status & V4L2_IN_ST_NO_H_LOCK ) printf("has no horizontal sync lock");
		}
		printf("\n");

		input.index++;
	}
	if( errno != EINVAL )
		printf("ioctl(VIDIOC_ENUMINPUT) (get video inputs) failed: %s\n", strerror(errno));

	printf("\n");
	printf("Available video capture formats\n");
	printf("------------------------------\n");
	struct v4l2_standard standard;
	standard.index = 0;

	while (0 == xioctl (fd, VIDIOC_ENUMSTD, &standard)) {
		printf ("  %s:\n", standard.name);
		standard.index++;

		video_std = standard.id;
		if( xioctl(fd, VIDIOC_S_STD, &video_std) < 0)
			printf("ioctl(VIDIOC_S_STD) (set video standard) failed: %s\n", strerror(errno));
		else {
			streamparm.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
			if( xioctl(fd, VIDIOC_G_PARM, &streamparm) < 0 )
				printf("ioctl(VIDIOC_G_PARM) (get video capabilites) failed: %s\n", strerror(errno));
			else {
				 printf("    time/frame = %i/%i (%f)\n", 
						streamparm.parm.capture.timeperframe.numerator,
						streamparm.parm.capture.timeperframe.denominator, 
						streamparm.parm.capture.timeperframe.numerator / 
						(float)(streamparm.parm.capture.timeperframe.denominator) );
			}

			memset (&vidcap, 0, sizeof (vidcap));
			if( xioctl(fd, VIDIOCGCAP, &vidcap) < 0)
				printf("ioctl(VIDIOCGCAP) (get video capabilites) failed: %s\n", strerror(errno));
			else {
				printf("    frame size\n");
				printf("      Min: %i x %i\n", vidcap.minwidth, vidcap.minheight);
				printf("      Max: %i x %i\n", vidcap.maxwidth, vidcap.maxheight);

			}
		}
	}
	
	printf("\n");
	printf("Available video output formats\n");
	printf("------------------------------\n");
	struct v4l2_fmtdesc fmtdesc;
	fmtdesc.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
	fmtdesc.index = 0;

	while(0 == xioctl(fd, VIDIOC_ENUM_FMT, &fmtdesc) ) {
		printf ("  %s\n", fmtdesc.description);

/**********************************
 * this is not implemented in BTTV driver *
 **********************************
		printf("  frame grabbing sizes \n");
		printf("  --------------------n");

		struct v4l2_frmsizeenum sizeenum;
		memset (&sizeenum, 0, sizeof (sizeenum));
		sizeenum.pixel_format = fmtdesc.pixelformat;
		sizeenum.index = 0;

		printf("   frame sizing is ");
		while(0 == ioctl(fd, VIDIOC_ENUM_FRAMESIZES, &sizeenum) ) {
			if( sizeenum.type == V4L2_FRMSIZE_TYPE_DISCRETE )
				printf("discrete\n");
			else if( sizeenum.type == V4L2_FRMSIZE_TYPE_DISCRETE )
				printf("continuous\n");
			else if( sizeenum.type == V4L2_FRMSIZE_TYPE_DISCRETE )
				printf("step-wise\n");

			//printf("    Min: %ix%i\n", sizeenum.min_width, sizeenum.min_height);
			//printf("    Max: %ix%i\n", sizeenum.max_width, sizeenum.max_height);
			sizeenum.index++;
		}
		if( errno != EINVAL )
			printf("ioctl(VIDIOC_ENUM_FRAMESIZES) (get video capabilites) failed: %s\n", strerror(errno));
			
*/

		fmtdesc.index++;
	}

	struct v4l2_queryctrl queryctrl;
	__u32 id;

	printf("\n");
	printf("Available device controls\n");
	printf("------------------------------\n");

	for (id = V4L2_CID_BASE; id < V4L2_CID_LASTP1; id++) {
		queryctrl.id = id;
		if (0 == xioctl (fd, VIDIOC_QUERYCTRL, &queryctrl)) {
			if (queryctrl.flags & V4L2_CTRL_FLAG_DISABLED)
				continue;

			printf ("  %s id=%i\n", queryctrl.name, queryctrl.id);

			if (queryctrl.type == V4L2_CTRL_TYPE_MENU)
				enumerate_menu(fd,queryctrl);
		} else {
			/* ignore errors from going through list */
			break;
		}
	}

	for (id = V4L2_CID_PRIVATE_BASE;; id++) {
		queryctrl.id = id;
		if (0 == xioctl (fd, VIDIOC_QUERYCTRL, &queryctrl)) {
			if (queryctrl.flags & V4L2_CTRL_FLAG_DISABLED)
				continue;

			printf ("  %s\n", queryctrl.name);

			if (queryctrl.type == V4L2_CTRL_TYPE_MENU)
				enumerate_menu(fd,queryctrl);
		} else {
			/* ignore errors from going through list */
			break;
		}
	}

}
void Camera::init_read()
{
	buffers = (Camera::buffer*)calloc (1, sizeof (*buffers));

	if (!buffers) {
		printf ("Out of memory\n");
		exit (EXIT_FAILURE);
	}

	buffers[0].length = buffer_size;
	buffers[0].start  = malloc(buffer_size);

	if (!buffers[0].start) {
		printf ("Out of memory\n");
		exit (EXIT_FAILURE);
	}
}

void Camera::init_mmap(void)
{
	struct v4l2_requestbuffers req;

	CLEAR (req);

	req.count	= 4;
	req.type		= V4L2_BUF_TYPE_VIDEO_CAPTURE;
	req.memory	= V4L2_MEMORY_MMAP;

	if (-1 == xioctl (fd, VIDIOC_REQBUFS, &req)) {
		if (EINVAL == errno) {
			printf ("%s does not support memory mapping\n", devName.c_str());
			exit (EXIT_FAILURE);
		} else {
			printf("ioctl(VIDIOC_REQBUFS) (request buffs) failed: %s\n", strerror(errno));
		}
	}

	if (req.count < 2) {
		printf ("Insufficient buffer memory on %s\n", devName.c_str());
		exit (EXIT_FAILURE);
	}

	buffers = (Camera::buffer*)calloc (req.count, sizeof (*buffers));

	if (!buffers) {
		printf ("Out of memory\n");
		exit (EXIT_FAILURE);
	}

	for (n_buffers = 0; n_buffers < req.count; ++n_buffers) {
		struct v4l2_buffer buf;

		CLEAR (buf);

		buf.type		= V4L2_BUF_TYPE_VIDEO_CAPTURE;
		buf.memory	= V4L2_MEMORY_MMAP;
		buf.index	= n_buffers;

		if (-1 == xioctl (fd, VIDIOC_QUERYBUF, &buf))
			printf("ioctl(VIDIOC_QUERYBUF) (query buffs) failed: %s\n", strerror(errno));

		buffers[n_buffers].length = buf.length;
		buffers[n_buffers].start =
			mmap (NULL /* start anywhere */,
			      buf.length,
			      PROT_READ | PROT_WRITE /* required */,
			      MAP_SHARED /* recommended */,
			      fd, buf.m.offset);

		if (MAP_FAILED == buffers[n_buffers].start)
			printf("mmap failed: %s\n", strerror(errno));
	}
}

void Camera::init_userp()
{
	struct v4l2_requestbuffers req;
	unsigned int page_size;

	page_size = getpagesize ();
	buffer_size = (buffer_size + page_size - 1) & ~(page_size - 1);

	CLEAR (req);

	req.count	= 4;
	req.type		= V4L2_BUF_TYPE_VIDEO_CAPTURE;
	req.memory	= V4L2_MEMORY_USERPTR;

	if (-1 == xioctl (fd, VIDIOC_REQBUFS, &req)) {
		if (EINVAL == errno) {
			printf ("%s does not support user pointer i/o\n", devName.c_str());
			exit (EXIT_FAILURE);
		} else {
			printf("ioctl(VIDIOC_REQBUFS) (request buffs) failed: %s\n", strerror(errno));
		}
	}

	buffers = (Camera::buffer*)calloc(4, sizeof (*buffers));

	if (!buffers) {
		printf ("Out of memory\n");
		exit (EXIT_FAILURE);
	}

	for (n_buffers = 0; n_buffers < 4; ++n_buffers) {
		buffers[n_buffers].length = buffer_size;
		buffers[n_buffers].start = memalign (/* boundary */ page_size,
						     buffer_size);

		if (!buffers[n_buffers].start) {
			printf ("Out of memory\n");
			exit (EXIT_FAILURE);
		}
	}
}

int Camera::trigger_capture(void)
{
	struct v4l2_buffer buf;
	CLEAR (buf);


	buf.type   = V4L2_BUF_TYPE_VIDEO_CAPTURE;
	buf.index  = frame_ind;

	frame_ind = (++frame_ind) % n_buffers;

	//printf("trigger index = %i\n", buf.index);

	switch (inputMethod) {
		case INPUT_METHOD_READ:
			return 1;

		case INPUT_METHOD_MMAP:
			buf.memory = V4L2_MEMORY_MMAP;
			break;

		case INPUT_METHOD_USERPTR:
			buf.memory		= V4L2_MEMORY_USERPTR;
			buf.m.userptr	= (unsigned long) buffers[frame_ind].start;
			buf.length		= buffers[frame_ind].length;
			break;
	}

	// requeue next frame
	if (-1 == xioctl (fd, VIDIOC_QBUF, &buf)) {
		printf("Camera::trigger_capture -- ioctl(VIDIOC_QBUF) (queue buffer) failed: %s\n", strerror(errno));
		return -1;
	}

	return 1;
}

int Camera::read_frame(void)
{
	struct v4l2_buffer buf;
	unsigned int i;

	switch (inputMethod) {
	case INPUT_METHOD_READ:
		if (-1 == read (fd, buffers[0].start, buffers[0].length)) {
			switch (errno) {
			case EAGAIN:
				return 0;

			case EIO:
				/* Could ignore EIO, see spec. */
				/* fall through */
			default:
				printf("read failed: %s\n", strerror(errno));
				return -1;
			}
		}

		img_ind = 0;

		break;

	case INPUT_METHOD_MMAP:
		CLEAR (buf);

		buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
		buf.memory = V4L2_MEMORY_MMAP;

		if (-1 == xioctl (fd, VIDIOC_DQBUF, &buf)) {
			switch (errno) {
			case EAGAIN:
				printf("ioctl(VIDIOC_DQBUF) no active buffer to retrive image\n");
				return 0;

			case EIO:
				/* Could ignore EIO, see spec. */
				/* fall through */
				printf("ioctl(VIDIOC_DQBUF) (de-queue buffer) failed with EIO: %s\n", strerror(errno));

			default:
				printf("ioctl(VIDIOC_DQBUF) (de-queue buffer) un-caught failure: %s\n", strerror(errno));
				return -1;
			}
		}

		assert (buf.index < n_buffers);
		img_ind = buf.index;

		//printf("read image index = %i\n", buf.index);

		break;

	case INPUT_METHOD_USERPTR:
		CLEAR (buf);

		buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
		buf.memory = V4L2_MEMORY_USERPTR;

		if (-1 == xioctl (fd, VIDIOC_DQBUF, &buf)) {
			switch (errno) {
			case EAGAIN:
				printf("ioctl(VIDIOC_DQBUF) no active buffer to retrive image\n");
				return 0;

			case EIO:
				/* Could ignore EIO, see spec. */
				/* fall through */
				printf("ioctl(VIDIOC_DQBUF) (de-queue buffer) failed with EIO: %s\n", strerror(errno));

			default:
				printf("ioctl(VIDIOC_DQBUF) (de-queue buffer) failed: %s\n", strerror(errno));
				return -1;
			}
		}

		for (i = 0; i < n_buffers; ++i)
			if (buf.m.userptr == (unsigned long) buffers[i].start && buf.length == buffers[i].length)
				break;

		assert (i < n_buffers);
		img_ind = i;

		break;
	}
	


	// mark read time of image
	ts_last.stamp();

	frameCount++;
	if( frameCount % 10 == 0 ) {
		fps = 10.0 / (float)(ts_last - ts_fps) * SEC2MICRO;
		ts_fps = ts_last;

		//cout << "frameCount=" << frameCount << " FPS=" << fps << endl;
	}

	return 1;
}

void Camera::process_image()
{
#if (PIXFMT_AV != PIX_FMT_BGR24)
	int tmp;
	tmp = avpicture_fill((AVPicture *)img_frame, (uint8_t*)(buffers[img_ind].start), 
										 PIXFMT_AV, width, height);
	if(tmp < 0 )
		printf("avpicture_fill: error filling img_frame\n");

 #ifdef CAM_RUN_DEINTERLACE
	tmp = avpicture_fill((AVPicture *)img_deint, (uint8_t*)(buffers[img_ind].start), 
										 PIXFMT_AV, width, height);
	if(tmp < 0 )
		printf("avpicture_fill: error filling img_deint\n");

	// deinterlace image in buffer
	if( deinterlaceVideo )
		deinterlace();
 #endif
#endif

	// fill the bgr img
#if (PIXFMT_AV == PIX_FMT_BGR24)
	memcpy( img_bgr->imageData, buffers[img_ind].start, img_bgr->imageSize); 
#else
	fill_bgr_img();
#endif

	// undistort image
	if( undistortVideo )
		undistortImg();

	// put text in bgr image
	sprintf(videoText, "Frame #=%i   FPS=%.2f", frameCount, fps);
	cvPutText( img_bgr, videoText, cvPoint(10,20), &font, CV_RGB(0,0,0));

	// copy to gray scale 
	cvCvtColor( img_bgr, img_rgb, CV_BGR2RGB);
}

void Camera::fill_bgr_img()
{
	int tmp; 

	// destination buffer is in img_bgr
	AVFrame *dst = (AVFrame *)avcodec_alloc_frame();
	tmp = avpicture_fill((AVPicture *)dst, (uint8_t*)img_bgr->imageData, PIX_FMT_BGR24, width, height);
	if(tmp < 0 )
		printf("avpicture_fill: error filling frame\n");

	// convert image to bgr
#ifdef USE_FFMPEG_SWS
	tmp = sws_scale(img_convert_ctx, img_frame->data, img_frame->linesize, 
			 0, height, dst->data, dst->linesize);
	if(tmp <= 0 )
		printf("sws_scale: error converting frame\n");
				 
#elif defined(USE_FFMPEG_CONVERT)
	tmp = img_convert((AVPicture *)dst, PIX_FMT_BGR24, (AVPicture *)img_frame, PIXFMT_AV, 
			  width, height ); 
	if(tmp < 0 )
		printf("img_convert: error converting frame\n");
#endif

	av_free( dst );
}

IplImage *const Camera::get_fg_img()
{
	return img_fg;    // foreground image
}

IplImage *const Camera::get_fg_bin()
{
	return fg_binary;    // foreground image
}

IplImage *const Camera::get_bg_img()
{
	return img_bg;    // foreground image
}

IplImage *const Camera::get_bgr_img()
{
	return img_bgr; // bgr image
}

GdkPixbuf * Camera::get_pixbuf()
{
	// update gdk buffer
	return gdkImg;
	//return gdk_pixbuf_copy(gdkImg);
}

IplImage * Camera::get_img()
{
	IplImage * tmpImg;
	if( V4L2_PIX_FMT_GREY == PIXFMT_V4L2 )
		tmpImg = cvCreateImage(cvSize(width, height), IPL_DEPTH_8U, 1);
	else
		tmpImg = cvCreateImage(cvSize(width, height), IPL_DEPTH_8U, 3);

	// copy image
	//cvCopyImage( img, tmpImg);
	memcpy( tmpImg->imageData, buffers[img_ind].start, tmpImg->imageSize);

	return tmpImg;
}

int my_avpicture_deinterlace (AVPicture *dstPict, const AVPicture *srcPict, int pix_fmt, int width, int height)
{
	int i, x, y;
	
	// We only handle YUV420 with different source and destination. Call the original function in
	// all other cases. 
	if ((width & 3) != 0 || (height & 3) != 0 || srcPict == dstPict || pix_fmt != PIX_FMT_YUV420P)
		return avpicture_deinterlace (dstPict, srcPict, pix_fmt, width, height);
		
		
	for (i = 0; i < 3; i++)
	{
		// Get pointers to source and destination data and the distance between consecutive lines.
		uint8_t *src = srcPict->data[i];
		uint8_t *dst = dstPict->data[i];
		int src_wrap = srcPict->linesize[i];
		int dst_wrap = dstPict->linesize[i];
		
		if (i == 1)
		{
			height /= 2;
			width /= 2;
		}
		
		// First copy everything. As a special case, use one single memcpy to improve speed if there
		// is no gap between lines.
		if (src_wrap == width && dst_wrap == width)
		{
			memcpy (dst, src, height * width);
		}
		else
		{
			for (y = 0; y < height; y++, src += src_wrap, dst += dst_wrap)
				memcpy (dst, src, width);
		}
		
		if (i >= 1)
			continue;
		
		src += src_wrap;
		dst += dst_wrap;

		for (y = 0; y < height; y += 2, src += 2*src_wrap, dst += 2*dst_wrap)
		{
			// Make pointers to the previous and next two source lines; special case for the first
			// and last lines to stay inside existing data.
			uint8_t *src_m2 = (y == 0 ? src - src_wrap : src - 2*src_wrap);
			uint8_t *src_m1 = src - src_wrap;
			uint8_t *src_p1 = src + (y == height - 2 ? 0 : src_wrap);
			uint8_t *src_p2 = src + (y == height - 2 ? 0 : 2*src_wrap);

			for (x = 0; x < width; x += 1)
			{
				int even1 = src_m1 [x];
				int even2 = src_p1 [x];
				int odd1 = src [x];
				int odd2 = src_p2 [x];

				if (abs (even2 - even1) + abs (odd2 - odd1) <= abs (odd1 - even1) + abs (odd2 - even2) - 4)
				{
					// Pixels that are two lines apart are much close together than pixels on
					// consecutive lines: This means we have movement and should apply the tap
					// filter. 
					int sum = -src_m2 [x] + (src_m1 [x] << 2) + (src [x] << 1) + (src_p1 [x] << 2) - src_p2 [x];
					dst [x] = sum < 0 ? 0 : sum >= 2040 ? 255 : (sum + 4) >> 3;
//					dst [x] = dst [x - dst_wrap] = 0;
				}
			}
		}
	}
	
	return 0;
}


void Camera::deinterlace()
{
#ifndef CAM_RUN_DEINTERLACE
	printf("Camera::deinterlace() - the camera code was not compiled to deinterlace video,\n");
	printf("			recompile camera.cxx with CAM_RUN_DEINTERLACE defined in camera.h\n");
#else
	int tmp;
 #ifdef USE_AVPICTURE_DEINTERLACE
	tmp = avpicture_deinterlace((AVPicture *)img_deint, (AVPicture *)img_frame,
										 PIXFMT_AV, width, height);
 #else
	tmp = my_avpicture_deinterlace ((AVPicture *)img_deint, (AVPicture *)img_frame,
					PIXFMT_AV, width, height);
 #endif
	if(tmp < 0 )
		printf("Camera::deinterlace() - error deinterlacing frame\n");
#endif
}

int Camera::saveSnapshot(const char *filePath, const char *fileName)
{
	String pictFilePath;
	String pictFileName;


	if( filePath )
		pictFilePath = filePath;
	else 
		pictFilePath = SYSCONF_DIR "pictures/";


	if (fileName)
		pictFileName = fileName;
	else{
		char tmpFileName[30];
		sprintf(tmpFileName,"Cam_%i_", id);
		pictFileName = tmpFileName + timeStr() + String(".jpeg");
	}					

	int dirExists = doesDirExist(pictFilePath.c_str());
	if ( !dirExists ){
		cout << "\tWARNING: Directory does not exist " << pictFilePath << endl;
		cout << "\tDirecotry being created";
		mkdir(pictFilePath.c_str(), 0666);
	}
	
	String pictFilePathName = pictFilePath + pictFileName;

	cout << "\tsaving picture file: " << pictFilePathName << endl;

	int val = cvSaveImage(pictFilePathName.c_str(), img_bgr);
	return val;
}

int Camera::init_videoFile(const char *filePath, const char *fileName)
{
	int isColor = true;
	if( video_writer != NULL )
		cvReleaseVideoWriter(&video_writer);

	String videoFilePath;
	String videoFileName;

	if( filePath)
		videoFilePath = filePath;
	else 
		videoFilePath = SYSCONF_DIR "movies/";


	if (fileName)
		videoFileName = fileName;
	else{
		char tmpFileName[30];
		sprintf(tmpFileName,"Cam_%i_", id);
		String ext = ".mpeg";

		videoFileName = tmpFileName + timeStr() + ext;
	}

	cout << "\tcreating video file: " << videoFilePath << videoFileName << endl;
	// CV_FOURCC('P','I','M','1') is MPEG-1 
	// CV_FOURCC('M','J','P','G') is motion-jpeg 
	
	// for now, use the frame rate of the camera
	int fourCC = CV_FOURCC_DEFAULT;
	//fps_d = fps;
	
	int dirExists = doesDirExist(videoFilePath.c_str());
	if (!dirExists)
	{
		cout << "\tWARNING: Directory does not exist " << videoFilePath << endl;
		cout << "\tDirecotry being created";
		mkdir(videoFilePath.c_str(), 0666);
	}

	String videoFilePathName = videoFilePath + videoFileName;
	video_writer = cvCreateVideoWriter(videoFilePathName.c_str(), fourCC,
					(double)fps_d,cvSize(width,height), isColor);
	if( video_writer == NULL ) {
		printf("Camera::init_videoFile() -- could not create video writer\n");
		exit(EXIT_FAILURE);
	}
	return 1;
}

int Camera::write_videoFrame()
{
	if( video_writer != NULL )
		return cvWriteFrame(video_writer, img_bgr);      // add the frame to the file
	return 0;
}

void Camera::close_videoFile()
{
	if( video_writer != NULL ) {
		cvReleaseVideoWriter(&video_writer);
		video_writer = NULL;
		cout << "video file closed" << endl;
	}
}

//
// Undistort camera image, not to be used in real time for it takes
// significant CPU
//
void Camera::undistortImg(IplImage* t)
{
	if( intrinsic != NULL && distortion != NULL ) {
		if( t == NULL ) {
			t = cvCloneImage( img_bgr );
			cvUndistort2(t, img_bgr, intrinsic, distortion);
			cvReleaseImage( &t );
		} else
			cvUndistort2(img_bgr, t, intrinsic, distortion);
	} else 
		cout << "Camera::undistortImg -- intrinsic and distortion values do not exist" << endl;
}

// 
// Compensates for radial and tangential distortion. Model From Oulu university.
// 
// INPUT: imgPoints: distorted (normalized) point coordinates in the image plane (2xN matrix)
// 
// OUTPUT: points: undistorted (normalized) point coordinates in the image plane (2xN matrix)
// 
// Method: Iterative method for compensation.
// 
// NOTE: This compensation has to be done after the subtraction
//       of the principal point, and division by the focal length.
void Camera::undistort_points(Matrix imgPoints, Matrix &points)
{

	float k1 = distortion->data.fl[0];
	float k2 = distortion->data.fl[1];
	float k3 = 0; 
	float p1 = distortion->data.fl[2];
	float p2 = distortion->data.fl[3];

	float fu = intrinsic->data.fl[0];
	float alpha = intrinsic->data.fl[1] / fu; 
	float uo = intrinsic->data.fl[2]; 
	float fv = intrinsic->data.fl[4];
	float vo = intrinsic->data.fl[5];

	/*
	cout << "\n== Camera Calibration Parameters -- " << endl
	     << "\tk1=" << k1 << " k2=" << k2 << " p1=" << p1 << " p2=" << p2 << endl
	     << "\tfu=" << fu << " fv=" << fv << " uo=" << uo << " vo=" << vo << endl
		  << "\talpha=" << alpha << endl;
	*/

	int numPoints = imgPoints.NumCol();
	for(int i=0; i < numPoints; i++){

		Matrix xd = imgPoints.getCol(i); // distorted pixel points 

		// first: Subtract principal point, and divide by the focal length
		xd(0) = (xd(0) - uo) / fu;
		xd(1) = (xd(1) - vo) / fv;

		// second: undo skew
		xd(0) = xd(0) - alpha * xd(1);

		//cout << "shifted, normalized, and unskewed: " << endl << xd << endl;

		// use OULU algorithm, in 20 steps error should be < 1e-10
		Matrix x = xd; // initial guess
		Matrix delta_x(2,1);

		// now work on undistorting using oulu algorithm
		double r_2;
		double k_radial;

		for(int kk=0; kk < 20; kk++ ) {

			r_2 = x.Norm2();
			k_radial =  1 + k1 * r_2 + k2 * pow(r_2,2) + k3 * pow(r_2,3);

			delta_x(0) = 2*p1*x(0)*x(1) + p2*(r_2 + 2*pow(x(0),2));
			delta_x(1) = p1 * (r_2 + 2*pow(x(1),2)) + 2*p2*x(0)*x(1);

			x = (xd - delta_x);
			x /= k_radial;
		}

		points.setCol(i, x);
	}
	//cout << "Undistorted Points [px]: " << endl << points << endl;
}

void Camera::getWorldPoints(Matrix imgPoints, Matrix &worldPoints)
{	
	if( worldPoints.NumRow() < 3 )  {
		cout << "Camera::getWorldPoints -- worldPoints is not a matrix of 3-element cols" << endl;
		return;
	}
	if( imgPoints.NumRow() < 2 )  {
		cout << "Camera::getWorldPoints -- imgPoints is not a matrix of 2-element cols" << endl;
		return;
	}
	if( imgPoints.NumCol() != worldPoints.NumCol() ) {
		cout << "Camera::getWorldPoints -- number of cols in worldPoints doesn't equal imgPoints" << endl;
		return;
	}
	int N = imgPoints.NumCol();

	Matrix tmpPoints = imgPoints;

	// first, undistort the imgPoints
	undistort_points(imgPoints, tmpPoints);

	// multiply by camera Zo, this assumes that the camera is perpendicular
	// to the plane (floor)
	tmpPoints *= translation->data.fl[2];

	// form rotation matrix from Rodrigues
	Matrix rod(3,1, rotation->data.fl);
	float theta = rod.Norm();
	rod /= theta; 
	//cout << "Rodrigues: theta=" << theta << " unit vector: " << endl << rod << endl;

	Matrix rot = rod.Skew();
	Matrix I(3,3); I.Identity();

	rot = cos(theta)*I + sin(theta)*rot + (1-cos(theta)) * rod * ~rod;
	//cout << "Rotation Matrix: " << endl << rot << endl;

	// form translation matrix of translation vectors
	Matrix t(3, N);
	t.setRow(0, translation->data.fl[0]);  // 1.5
	t.setRow(1, translation->data.fl[1]);  // 1.5
	t.setRow(2, translation->data.fl[2]);
	//cout << "Translation matrix of vectors: " << endl << t << endl;

	// now form the points in the camera optical axis
	worldPoints.setRow(0, tmpPoints.getRow(0));
	worldPoints.setRow(1, tmpPoints.getRow(1));
	worldPoints.setRow(2, translation->data.fl[2]);
	//cout << "Camera Frame Points: " << endl << worldPoints << endl;

	// rotate and translate optical axis coordinates to world coordinates
	worldPoints = rot * (worldPoints - t);

	//cout << "World Frame Points [m]: " << endl << worldPoints << endl;

}
void Camera::getImgPoints(Matrix worldPoints, Matrix &imgPoints)
{
	if( worldPoints.NumRow() < 3 )  {
		cout << "Camera::getImgPoints -- worldPoints is not a matrix of 3-element cols" << endl;
		return;
	}
	if( imgPoints.NumRow() < 2 )  {
		cout << "Camera::getImgPoints -- imgPoints is not a matrix of 2-element cols" << endl;
		return;
	}
	if( imgPoints.NumCol() != worldPoints.NumCol() ) {
		cout << "Camera::getImgPoints -- number of cols in worldPoints doesn't equal imgPoints" << endl;
		return;
	}
	int N = worldPoints.NumCol();

	CvMat *object_points = cvCreateMat( 1, N, CV_64FC3 );
	CvMat *image_points  = cvCreateMat( 1, N, CV_64FC2 );

	//cout << "Camera::getImgPoint - N=" << N << endl
	     //<< worldPoints
	     //<< "convert from Matrix to CvMat ==>" << endl;

	for( int i=0; i < N; i++) {
		object_points->data.db[i*3] = worldPoints(0,i);
		object_points->data.db[i*3 + 1] = worldPoints(1,i);
		object_points->data.db[i*3 + 2] = worldPoints(2,i);
	}

	// projects object points to the view plane using
	// the specified extrinsic, intrinsic, rotation and translation camera parameters 
	cvProjectPoints2( object_points, rotation, translation, 
			  intrinsic, distortion, image_points);

	//cout << "convert from CvMat to Matrix ==>" << endl;
	for( int i=0; i < N ; i++) {
		//cout << image_points->data.db[i*2] << " " << image_points->data.db[i*2+1] << endl;
		imgPoints(0,i) = image_points->data.db[i*2];
		imgPoints(1,i) = image_points->data.db[i*2+1];
	}
	//cout << "Image Points: " << endl << imgPoints << endl;

	cvReleaseMat(&object_points);
	cvReleaseMat(&image_points);
	
}

int Camera::saveBackground(String file)
{
	if( img_bg == NULL ) {
		cout << "Camera::saveBackground - no background image to save to file" << endl;
		return -1;
	}
	cout << "Saving: " << file << endl << file.c_str() << endl;

	cvNamedWindow("win", CV_WINDOW_AUTOSIZE);
	cvShowImage("win", img_bgr);
	cvWaitKey(5000);


	int val = cvSaveImage(file.c_str(), img_bg);
	return val;
}

int Camera::loadBackground(String file)
{
	cout << "\tBackground: "<< file;

	if( img_bg != NULL )
		cvReleaseImage( &img_bg );

	img_bg = cvLoadImage(file.c_str(),1);

	// check if image was loaded
	if(!img_bg){
		cout << " -- could not load image file" << endl;
		return 0;
	}

	// check background image size
	if( img_bg->width != (int)width 
	 || img_bg->height != (int)height
	 || img_bg->nChannels != 3){
		cout << " -- image is the wrong size" << endl;
		cvReleaseImage( &img_bg );
		img_bg = NULL;
		return 0;
	}
	cout << endl;

	return 1;
}

void Camera::setCaptureBackground(bool val) 
{ 
	getBackgroundImg = val; 

	if( val ) {
		if( img_bg_acc == NULL )
			img_bg_acc = cvCreateImage(cvSize(width, height), IPL_DEPTH_32F, 3);
		if( img_bg == NULL )
			img_bg = cvCreateImage(cvSize(width, height), IPL_DEPTH_8U, 3);

		cvZero(img_bg_acc);
		cvZero(img_bg);
	}
}

void Camera::setUseBackground(bool val) 
{ 
	useBackgroundSub = val; 

	if( val ) {
		//if( img_bg != NULL )
			//img_bg = cvCreateImage(cvSize(width, height), IPL_DEPTH_8U, 3);

		if( img_fg == NULL )
			img_fg = cvCreateImage(cvSize(width, height), IPL_DEPTH_8U, 3);

		if( fg_binary == NULL )
			fg_binary = cvCreateImage(cvSize(width,height), IPL_DEPTH_8U, 1);

		if( img_gray == NULL )
			img_gray = cvCreateImage(cvSize(width, height), IPL_DEPTH_8U, 1);
	}

}


String Camera::timeStr( char *format ) {
	char buf[ 256 ];
	time_t tt = ::time(NULL);
	(void)strftime(buf, 255, format, localtime((time_t *)&tt) );
	return String(buf);
}


void Camera::devDelete()
{
	cout << "Printing from 'Camera.cxx'";
}

/****************************************************************
*    Helper functions
****************************************************************/
int doesDirExist(char* dirPath)
{
	DIR * tmpDirect = opendir(dirPath);
	if (tmpDirect)
	{
		closedir(tmpDirect);
		return 1;
	}
	else
	{
		closedir(tmpDirect);
		return 0;
	}	
}

void enumerate_menu (int fd, struct v4l2_queryctrl &queryctrl)
{
	struct v4l2_querymenu querymenu;

	printf ("  Menu items:\n");

	memset (&querymenu, 0, sizeof (querymenu));
	querymenu.id = queryctrl.id;

	for (querymenu.index = queryctrl.minimum; querymenu.index <= (unsigned)queryctrl.maximum; querymenu.index++) {
		if (0 == xioctl (fd, VIDIOC_QUERYMENU, &querymenu)) {
			printf ("  %s\n", querymenu.name);
		} else {
			perror ("VIDIOC_QUERYMENU");
		}
	}
}

int camdev_size_set(int val, int min, int max, char *s)
{
	if (val == 0)
		return max;
	if (val == -1)
		return min;
	if (val < min) {
		printf("Invalid %s according to driver (%i < %i)\n", s, val, min);
		return 0;
	}
	if (val > max) {
		printf("Invalid %s according to driver (%i > %i)\n", s, val, max);
		return 0;
	}
	return val;
}

int xioctl(int fd, int request, void * arg)
{
	int r;

  	do 
		r = ioctl(fd, request, arg);
  	while (-1 == r && EINTR == errno);

  	return r;
}


