/*
* frame.cc -- utilities for processing digital video frames
* Copyright (C) 2000 Arne Schirmacher <arne@schirmacher.de>
* Copyright (C) 2001-2007 Dan Dennedy <dan@dennedy.org>
* Copyright (C) 2007 Stéphane Brunner <stephane.brunner@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/

/** Code for handling raw DV frame data
 
    These methods are for handling the raw DV frame data. It contains methods for 
    getting info and retrieving the audio data.
 
    \file frame.cc
*/

#ifdef HAVE_LIBDV
#include <libdv/dv.h>
#endif

// C++ includes

#include <string>
#include <iostream>
#include <sstream>
#include <iomanip>
#include <deque>

using std::ostringstream;
using std::setw;
using std::setfill;
using std::hex;
using std::dec;
using std::deque;
using std::cerr;
using std::endl;

// C includes

#include <pthread.h>
#include <math.h>
#include <sys/time.h>

// local includes
#include "frame.h"

VideoInfo::VideoInfo() : width( 0 ), height( 0 ), isPAL( false )
{}

#ifndef HAVE_LIBDV
static bool Frame::maps_initialized = false;
static int Frame::palmap_ch1[ 2000 ];
static int Frame::palmap_ch2[ 2000 ];
static int Frame::palmap_2ch1[ 2000 ];
static int Frame::palmap_2ch2[ 2000 ];
static int Frame::ntscmap_ch1[ 2000 ];
static int Frame::ntscmap_ch2[ 2000 ];
static int Frame::ntscmap_2ch1[ 2000 ];
static int Frame::ntscmap_2ch2[ 2000 ];
static short Frame::compmap[ 4096 ];
#endif

// Static globals for encoder, not accessed by more than one thread
static pthread_mutex_t avcodec_mutex = PTHREAD_MUTEX_INITIALIZER;
#if defined(HAVE_LIBAVCODEC)
static AVFormatContext *avformatEncoder = NULL;
static AVPacket avpacketEncoder;
static bool isEncoderHeaderWritten = false;
#if defined(HAVE_SWSCALE)
static struct SwsContext *imgConvertEncoderCtx = NULL;
#endif
#endif
static uint8_t *tempImage = NULL;

/** constructor
 
    All Frame objects share a set of lookup maps,
    which are initalized once (we are using a variant of the Singleton pattern). 
 
*/

Frame::Frame() : bytesInFrame( 0 )
{
#if defined(HAVE_LIBAVCODEC)
	pthread_mutex_lock( &avcodec_mutex );
	av_register_all();
	libavcodec = avcodec_alloc_context();
//	libavcodec->thread_count = 2;
	avcodec_open( libavcodec, avcodec_find_decoder( CODEC_ID_DVVIDEO ) );
	pthread_mutex_unlock( &avcodec_mutex );
	data = ( unsigned char* ) av_mallocz( 144000 );
#if defined(HAVE_SWSCALE)
	imgConvertRgbCtx = NULL;
	imgConvertYuvCtx = NULL;
#endif
#else
	data = ( unsigned char* ) calloc( 1, 144000 );
#endif

#ifdef HAVE_LIBDV
	decoder = dv_decoder_new( FALSE,  FALSE,  FALSE );
	decoder->audio->arg_audio_emphasis = 2;
	dv_set_audio_correction ( decoder, DV_AUDIO_CORRECT_AVERAGE );
	dv_set_error_log( decoder, NULL );
	encoder = NULL;
#else

	if ( maps_initialized == false )
	{

		for ( int n = 0; n < 1944; ++n )
		{
			int sequence1 = ( ( n / 3 ) + 2 * ( n % 3 ) ) % 6;
			int sequence2 = sequence1 + 6;
			int block = 3 * ( n % 3 ) + ( ( n % 54 ) / 18 );

			block = 6 + block * 16;
			{
				register int byte = 8 + 2 * ( n / 54 );
				palmap_ch1[ n ] = sequence1 * 150 * 80 + block * 80 + byte;
				palmap_ch2[ n ] = sequence2 * 150 * 80 + block * 80 + byte;
				byte += ( n / 54 );
				palmap_2ch1[ n ] = sequence1 * 150 * 80 + block * 80 + byte;
				palmap_2ch2[ n ] = sequence2 * 150 * 80 + block * 80 + byte;
			}
		}
		for ( int n = 0; n < 1620; ++n )
		{
			int sequence1 = ( ( n / 3 ) + 2 * ( n % 3 ) ) % 5;
			int sequence2 = sequence1 + 5;
			int block = 3 * ( n % 3 ) + ( ( n % 45 ) / 15 );

			block = 6 + block * 16;
			{
				register int byte = 8 + 2 * ( n / 45 );
				ntscmap_ch1[ n ] = sequence1 * 150 * 80 + block * 80 + byte;
				ntscmap_ch2[ n ] = sequence2 * 150 * 80 + block * 80 + byte;
				byte += ( n / 45 );
				ntscmap_2ch1[ n ] = sequence1 * 150 * 80 + block * 80 + byte;
				ntscmap_2ch2[ n ] = sequence2 * 150 * 80 + block * 80 + byte;
			}
		}
		for ( int y = 0x700; y <= 0x7ff; ++y )
			compmap[ y ] = ( y - 0x600 ) << 6;
		for ( int y = 0x600; y <= 0x6ff; ++y )
			compmap[ y ] = ( y - 0x500 ) << 5;
		for ( int y = 0x500; y <= 0x5ff; ++y )
			compmap[ y ] = ( y - 0x400 ) << 4;
		for ( int y = 0x400; y <= 0x4ff; ++y )
			compmap[ y ] = ( y - 0x300 ) << 3;
		for ( int y = 0x300; y <= 0x3ff; ++y )
			compmap[ y ] = ( y - 0x200 ) << 2;
		for ( int y = 0x200; y <= 0x2ff; ++y )
			compmap[ y ] = ( y - 0x100 ) << 1;
		for ( int y = 0x000; y <= 0x1ff; ++y )
			compmap[ y ] = y;
		for ( int y = 0x800; y <= 0xfff; ++y )
			compmap[ y ] = -1 - compmap[ 0xfff - y ];
		maps_initialized = true;
	}
#endif
}


Frame::~Frame()
{
#if defined(HAVE_LIBAVCODEC)
	pthread_mutex_lock( &avcodec_mutex );
	avcodec_close( libavcodec );
	av_free( libavcodec );
/*	if ( avformatEncoder )
	{
		av_write_trailer( avformatEncoder );
		av_destruct_packet( &avpacketEncoder );
		avcodec_close( avformatEncoder->streams[0]->codec );
		av_free( avformatEncoder->streams[0]->codec );
		av_free( avformatEncoder->streams[0] );
		av_free( avformatEncoder );
	}*/
	pthread_mutex_unlock( &avcodec_mutex );
	av_free( data );
#if defined(HAVE_SWSCALE)
	if ( imgConvertRgbCtx )
		sws_freeContext( imgConvertRgbCtx );
	if ( imgConvertYuvCtx )
		sws_freeContext( imgConvertYuvCtx );
#endif
#else
	free( data );
#endif

#ifdef HAVE_LIBDV
	dv_decoder_free( decoder );
	if ( encoder )
		dv_encoder_free( encoder );
#endif
}


Frame& Frame::operator=( const Frame& rhs )
{
	bytesInFrame = rhs.GetFrameSize();
	if ( bytesInFrame > 144000 )
		bytesInFrame = 144000;
	memcpy( data, rhs.data, bytesInFrame );
	ExtractHeader();
	return *this;
}


/** gets a subcode data packet
 
    This function returns a SSYB packet from the subcode data section.
 
    \param packNum the SSYB package id to return
    \param pack a reference to the variable where the result is stored
    \return true for success, false if no pack could be found */

bool Frame::GetSSYBPack( int packNum, Pack &pack ) const
{
#ifdef HAVE_LIBDV
	pack.data[ 0 ] = packNum;
	dv_get_ssyb_pack( decoder, packNum, &pack.data[ 1 ] );
	return true;
#else
	/* number of DIF sequences is different for PAL and NTSC */

	int seqCount = IsPAL() ? 12 : 10;

	/* process all DIF sequences */

	for ( int i = 0; i < seqCount; ++i )
	{

		/* there are two DIF blocks in the subcode section */

		for ( int j = 0; j < 2; ++j )
		{

			/* each block has 6 packets */

			for ( int k = 0; k < 6; ++k )
			{

				/* calculate address: 150 DIF blocks per sequence, 80 bytes
				per DIF block, subcode blocks start at block 1, block and
				packet have 3 bytes header, packet is 8 bytes long
				(including header) */

				const unsigned char *s = &data[ i * 150 * 80 + 1 * 80 + j * 80 + 3 + k * 8 + 3 ];
				// printf("ssyb %d: %2.2x %2.2x %2.2x %2.2x %2.2x\n",
				// j * 6 + k, s[0], s[1], s[2], s[3], s[4]);
				if ( s[ 0 ] == packNum )
				{
					//					printf("GetSSYBPack[%x]: sequence %d, block %d, packet %d\n", packNum,i,j,k);
					pack.data[ 0 ] = s[ 0 ];
					pack.data[ 1 ] = s[ 1 ];
					pack.data[ 2 ] = s[ 2 ];
					pack.data[ 3 ] = s[ 3 ];
					pack.data[ 4 ] = s[ 4 ];
					return true;
				}
			}
		}
	}
	return false;
#endif
}


/** gets a video auxiliary data packet
 
    Every DIF block in the video auxiliary data section contains 15
    video auxiliary data packets, for a total of 45 VAUX packets. As
    the position of a VAUX packet is fixed, we could directly look it
    up, but I choose to walk through all data as with the other
    routines.
 
    \param packNum the VAUX package id to return
    \param pack a reference to the variable where the result is stored
    \return true for success, false if no pack could be found */
bool Frame::GetVAUXPack( int packNum, Pack &pack ) const
{
#ifdef HAVE_LIBDV
	pack.data[ 0 ] = packNum;
	dv_get_vaux_pack( decoder, packNum, &pack.data[ 1 ] );
	//cerr << "VAUX: 0x"
	//<< setw(2) << setfill('0') << hex << (int) pack.data[0]
	//<< setw(2) << setfill('0') << hex << (int) pack.data[1]
	//<< setw(2) << setfill('0') << hex << (int) pack.data[2]
	//<< setw(2) << setfill('0') << hex << (int) pack.data[3]
	//<< setw(2) << setfill('0') << hex << (int) pack.data[4]
	//<< endl;
	return true;

#else
	/* number of DIF sequences is different for PAL and NTSC */

	int seqCount = IsPAL() ? 12 : 10;

	/* process all DIF sequences */

	for ( int i = 0; i < seqCount; ++i )
	{

		/* there are three DIF blocks in the VAUX section */

		for ( int j = 0; j < 3; ++j )
		{

			/* each block has 15 packets */

			for ( int k = 0; k < 15; ++k )
			{

				/* calculate address: 150 DIF blocks per sequence, 80 bytes
				per DIF block, vaux blocks start at block 3, block has 3
				bytes header, packets have no header and are 5 bytes
				long. */

				const unsigned char *s = &data[ i * 150 * 80 + 3 * 80 + j * 80 + 3 + k * 5 ];
				//printf("vaux %d: %2.2x %2.2x %2.2x %2.2x %2.2x\n",
				//	j * 15 + k, s[0],  s[1],  s[2],  s[3],  s[4]);
				if ( s[ 0 ] == packNum )
				{
					pack.data[ 0 ] = s[ 0 ];
					pack.data[ 1 ] = s[ 1 ];
					pack.data[ 2 ] = s[ 2 ];
					pack.data[ 3 ] = s[ 3 ];
					pack.data[ 4 ] = s[ 4 ];
					return true;
				}
			}
		}
	}
	return false;
#endif
}


/** gets an audio auxiliary data packet
 
    Every DIF block in the audio section contains 5 bytes audio
    auxiliary data and 72 bytes of audio data.  The function searches
    through all DIF blocks although AAUX packets are only allowed in
    certain defined DIF blocks.
 
    \param packNum the AAUX package id to return
    \param pack a reference to the variable where the result is stored
    \return true for success, false if no pack could be found */
bool Frame::GetAAUXPack( int packNum, Pack &pack ) const
{
#ifdef HAVE_LIBDV
	bool done = false;
	switch ( packNum )
	{
	case 0x50:
		memcpy( pack.data, &decoder->audio->aaux_as, 5 );
		done = true;
		break;

	case 0x51:
		memcpy( pack.data, &decoder->audio->aaux_asc, 5 );
		done = true;
		break;

	case 0x52:
		memcpy( pack.data, &decoder->audio->aaux_as1, 5 );
		done = true;
		break;

	case 0x53:
		memcpy( pack.data, &decoder->audio->aaux_asc1, 5 );
		done = true;
		break;
	}
	if ( done )
		return true;
#endif

	/* number of DIF sequences is different for PAL and NTSC */

	int seqCount = IsPAL() ? 12 : 10;

	/* process all DIF sequences */

	for ( int i = 0; i < seqCount; ++i )
	{

		/* there are nine audio DIF blocks */
		for ( int j = 0; j < 9; ++j )
		{

			/* calculate address: 150 DIF blocks per sequence, 80 bytes
			   per DIF block, audio blocks start at every 16th beginning
			   with block 6, block has 3 bytes header, followed by one
			   packet. */

			const unsigned char *s = &data[ i * 150 * 80 + 6 * 80 + j * 16 * 80 + 3 ];
			if ( s[ 0 ] == packNum )
			{
				// printf("aaux %d: %2.2x %2.2x %2.2x %2.2x %2.2x\n",
				// j, s[0], s[1], s[2], s[3], s[4]);
				pack.data[ 0 ] = s[ 0 ];
				pack.data[ 1 ] = s[ 1 ];
				pack.data[ 2 ] = s[ 2 ];
				pack.data[ 3 ] = s[ 3 ];
				pack.data[ 4 ] = s[ 4 ];
				return true;
			}
		}
	}
	return false;
}

/** gets the size of the frame
 
    Depending on the type (PAL or NTSC) of the frame, the length of the frame is returned 
 
    \return the length of the frame in Bytes */

int Frame::GetFrameSize( void ) const
{
	return IsPAL() ? 144000 : 120000;
}


/** gets the frame rate of the video
 
    Depending on the type (PAL or NTSC) of the frame, the frame rate is returned 
 
    \return frames per second */

float Frame::GetFrameRate( void ) const
{
	return IsPAL() ? 25.0 : 30000.0 / 1001.0;
}


/** checks whether the frame is in PAL or NTSC format
 
    \todo function can't handle "empty" frame
    \return true for PAL frame, false for a NTSC frame
*/

bool Frame::IsPAL( void ) const
{
	unsigned char dsf = data[ 3 ] & 0x80;
	bool pal = ( dsf == 0 ) ? false : true;

#ifdef HAVE_LIBDV
	if ( !pal )
		pal = dv_is_PAL( decoder );
#endif
	return pal;
}


/** checks whether this frame is the first in a new recording
 
    To determine this, the function looks at the recStartPoint bit in
    AAUX pack 51.
 
    \return true if this frame is the start of a new recording */

bool Frame::IsNewRecording() const
{
#ifdef HAVE_LIBDV
	return dv_is_new_recording( decoder, data );
#else

	Pack aauxSourceControl;

	/* if we can't find the packet, we return "no new recording" */

	if ( GetAAUXPack( 0x51, aauxSourceControl ) == false )
		return false;

	unsigned char recStartPoint = aauxSourceControl.data[ 2 ] & 0x80;

	return recStartPoint == 0 ? true : false;
#endif
}


/** checks whether this frame is playing at normal speed
 
    To determine this, the function looks at the speed bit in
    AAUX pack 51.
 
    \return true if this frame is playing at normal speed 
*/

bool Frame::IsNormalSpeed() const
{
	bool normal_speed = true;

#ifdef HAVE_LIBDV
	/* don't do audio if speed is not 1 */
	return dv_is_normal_speed( decoder );
#endif

	return ( normal_speed );
}


/** check whether we have received as many bytes as expected for this frame
 
    \return true if this frames is completed, false otherwise */

bool Frame::IsComplete( void ) const
{
	return bytesInFrame >= GetFrameSize();
}


void Frame::GetUpperField( AVPicture * image, int bpp )
{
	register int width = GetWidth( ) * bpp;
	register int height = GetHeight( );
	for ( register int i = 0; i < height; i += 2 )
		memcpy( ( uint8_t * ) image->data[0] + width * ( i + 1 ), ( uint8_t * ) image->data[0] + width * i, width );
}


void Frame::GetLowerField( AVPicture * image, int bpp )
{
	register int width = GetWidth( ) * bpp;
	register int height = GetHeight( );
	for ( register int i = 0; i < height; i += 2 )
		memcpy( ( uint8_t * ) image->data[0] + width * i, ( uint8_t * ) image->data[0] + width * ( i + 1 ), width );
}


/* Linear Blend filter - C version contributed by Rogerio Brito.
   This algorithm has the same interface as the other functions.

   The destination "screen" (pdst) is constructed from the source
   screen (psrc[0]) line by line.

   The i-th line of the destination screen is the average of 3 lines
   from the source screen: the (i-1)-th, i-th and (i+1)-th lines, with
   the i-th line having weight 2 in the computation.

   Remarks:
   * each line on pdst doesn't depend on previous lines;
   * due to the way the algorithm is defined, the first & last lines of the
     screen aren't deinterlaced.

*/
void Frame::Deinterlace( AVPicture *pdst, AVPicture *psrc, int stride, int height )
{
	register int x, y;
	register uint8_t *l0, *l1, *l2, *l3;

	l0 = pdst->data[0];		/* target line */
	l1 = psrc->data[0];		/* 1st source line */
	l2 = l1 + stride;	/* 2nd source line = line that follows l1 */
	l3 = l2 + stride;	/* 3rd source line = line that follows l2 */

	/* Copy the first line */
	memcpy( l0, l1, stride );
	l0 += stride;

	for (y = 1; y < height-1; ++y)
	{
		/* computes avg of: l1 + 2*l2 + l3 */
		for ( x = 0; x < stride; ++x )
			l0[x] = ( l1[ x ] + ( l2[ x ] << 1 ) + l3[ x ] ) >> 2;

		/* updates the line pointers */
		l1 = l2;
		l2 = l3;
		l3 += stride;
		l0 += stride;
	}

	/* Copy the last line */
	memcpy( l0, l1, stride );
}


/** Get the frame aspect ratio.
 
	Indicates whether frame aspect ration is normal (4:3) or wide (16:9).
 
    \return true if the frame is wide (16:9), false if unknown or normal.
*/
bool Frame::IsWide( void ) const
{
	// Not using libdv due to libdv bug setting widescreen properly when encoding metadata
	// on SMPTE DV as encoded by ffmpeg.
	// return dv_format_wide( decoder ) > 0;
	Pack pack;
	if ( GetVAUXPack( 0x61, pack ) )
		return ( ( pack.data[2] & 0x7 ) == 0x2 ) || ( ( pack.data[2] & 0x7 ) == 0x7 );
	else
		return false;
}


/** Get the frame image width.
 
    \return the width in pixels.
*/
int Frame::GetWidth()
{
	return 720;
}


/** Get the frame image height.
 
    \return the height in pixels.
*/
int Frame::GetHeight()
{
	return IsPAL() ? 576 : 480;
}


#ifdef HAVE_LIBDV

void Frame::ExtractHeader( void )
{
	dv_parse_header( decoder, data );
	dv_parse_packs( decoder, data );
}

int Frame::ExtractRGB( AVPicture *dest, bool fast )
{
#if defined(HAVE_LIBAVCODEC)
	if (!fast) {
		AVFrame *frame = avcodec_alloc_frame();
		int got_picture;
	
//struct timeval start;
//gettimeofday(&start, NULL);
		avcodec_decode_video( libavcodec, frame, &got_picture, data, GetFrameSize() ); // 10ms
//{
//struct timeval end;
//gettimeofday(&end, NULL);
//cerr<<"Decode frame[ms]: "<<(end.tv_sec - start.tv_sec) * 1000.0 + (end.tv_usec - start.tv_usec) / 1000.0<<endl;
//}
		if ( got_picture )
		{
//			avpicture_fill( &dest, static_cast<uint8_t*>( rgb ), PIX_FMT_RGB24, GetWidth(), GetHeight() ); // 0ms
#if defined(HAVE_SWSCALE)
			if ( !imgConvertRgbCtx ) {
//				cerr<<"libavcodec->pix_fmt: "<<libavcodec->pix_fmt<<endl;
				imgConvertRgbCtx = sws_getContext( libavcodec->width, libavcodec->height, libavcodec->pix_fmt,
					GetWidth(), GetHeight(), PIX_FMT_RGB32, SWS_FAST_BILINEAR, NULL, NULL, NULL ); // 0ms
			}
			sws_scale( imgConvertRgbCtx, frame->data, frame->linesize, 0, libavcodec->height, dest->data, dest->linesize ); // 6ms
#else
			img_convert( &dest, PIX_FMT_RGB32, reinterpret_cast<AVPicture*>( frame ), libavcodec->pix_fmt, GetWidth(), GetHeight() );
#endif
		}
		av_free( frame );
	}
	else {
#endif
		unsigned char *pixels[ 3 ];
		int pitches[ 3 ];
	
		pixels[ 0 ] = ( unsigned char* ) dest->data[0];
		pixels[ 1 ] = NULL;
		pixels[ 2 ] = NULL;
	
		pitches[ 0 ] = 720 * 3;
		pitches[ 1 ] = 0;
		pitches[ 2 ] = 0;

		dv_decode_full_frame( decoder, data, e_dv_color_rgb, pixels, pitches ); // 9ms
#if defined(HAVE_LIBAVCODEC)
	}
#endif
	return 0;
}

int Frame::ExtractBGR( AVPicture *dest, bool fast )
{
#if defined(HAVE_LIBAVCODEC)
	if (!fast) {
		AVFrame *frame = avcodec_alloc_frame();
		AVPicture dest;
		int got_picture;
	
		avcodec_decode_video( libavcodec, frame, &got_picture, data, GetFrameSize() ); // 8.5ms
		if ( got_picture )
		{
//			avpicture_fill( &dest, static_cast<uint8_t*>( bgr ), PIX_FMT_BGR24, GetWidth(), GetHeight() ); // 0ms
#if defined(HAVE_SWSCALE)
			if ( !imgConvertRgbCtx ) {
				imgConvertRgbCtx = sws_getContext( libavcodec->width, libavcodec->height, libavcodec->pix_fmt,
					GetWidth(), GetHeight(), PIX_FMT_BGR24, SWS_FAST_BILINEAR, NULL, NULL, NULL ); // 0ms
			}
			sws_scale( imgConvertRgbCtx, frame->data, frame->linesize, 0, libavcodec->height, dest.data, dest.linesize ); // 5.5ms
#else
			img_convert( &dest, PIX_FMT_BGR24, reinterpret_cast<AVPicture*>( frame ), libavcodec->pix_fmt, GetWidth(), GetHeight() );
#endif
		}
		av_free( frame );
	}
	else {
#endif
		unsigned char *pixels[ 3 ];
		int pitches[ 3 ];
	
		pixels[ 0 ] = ( unsigned char* ) dest->data[0];
		pixels[ 1 ] = NULL;
		pixels[ 2 ] = NULL;
	
		pitches[ 0 ] = 720 * 3;
		pitches[ 1 ] = 0;
		pitches[ 2 ] = 0;

		dv_decode_full_frame( decoder, data, e_dv_color_rgb, pixels, pitches ); // 9ms
#if defined(HAVE_LIBAVCODEC)
	}
#endif
	return 0;
}

int Frame::ExtractPreviewRGB(AVPicture *dest, bool fast )
{
	ExtractRGB( dest, fast ); // 16ms
	if (!fast) {
		Deinterlace( dest, dest, GetWidth() * 3, GetHeight() ); // 5ms
	}
	else {
		GetLowerField( dest, 3 ); // 1.6ms
//			GetUpperField( rgb, 3 );
	}
	return 0;
}

int Frame::ExtractPreviewBGR( AVPicture *dest, bool fast )
{
	ExtractBGR( dest, fast ); // 14ms
	if (!fast) {
		Deinterlace( dest, dest, GetWidth() * 3, GetHeight() ); // 10ms
	}
	else {
		GetLowerField( dest, 3 ); // 1.6ms
//			GetUpperField( rgb, 3 );
	}
	return 0;
}

int Frame::ExtractYUV( AVPicture *dest, bool fast )
{
#if defined(HAVE_LIBAVCODEC)
	if (!fast) {
		AVFrame *frame = avcodec_alloc_frame();;
		AVPicture output;
		int got_picture;
	
		avcodec_decode_video( libavcodec, frame, &got_picture, data, GetFrameSize() );
		if ( got_picture )
		{
//			avpicture_fill( &output, static_cast<uint8_t*>( yuv ), PIX_FMT_YUV422, GetWidth(), GetHeight() );
#if defined(HAVE_SWSCALE)
			if ( !imgConvertYuvCtx )
				imgConvertYuvCtx = sws_getContext( libavcodec->width, libavcodec->height, libavcodec->pix_fmt,
					GetWidth(), GetHeight(), PIX_FMT_YUV422, SWS_FAST_BILINEAR, NULL, NULL, NULL );
			sws_scale( imgConvertYuvCtx, frame->data, frame->linesize, 0, libavcodec->height, output.data, output.linesize );
#else
			img_convert( &output, PIX_FMT_YUV422, (AVPicture *)frame, libavcodec->pix_fmt, GetWidth(), GetHeight() );
#endif
		}
		av_free( frame );
	}
	else {
#endif
		unsigned char *pixels[ 3 ];
		int pitches[ 3 ];
	
		pixels[ 0 ] = ( unsigned char* ) dest->data[0];
		pitches[ 0 ] = decoder->width * 2;
	
		dv_decode_full_frame( decoder, data, e_dv_color_yuv, pixels, pitches );
#if defined(HAVE_LIBAVCODEC)
	}
#endif

	return 0;
}

int Frame::ExtractYUV420( uint8_t *yuv, uint8_t *output[ 3 ] )
{
#if defined(HAVE_LIBAVCODEC)
	AVFrame *frame = avcodec_alloc_frame();
	int got_picture;

	avcodec_decode_video( libavcodec, frame, &got_picture, data, GetFrameSize() );

	int width = GetWidth(), height = GetHeight();

	if ( libavcodec->pix_fmt == PIX_FMT_YUV420P )   // PAL
	{
		int h2 = height / 2;
		int w2 = width / 2;

		uint8_t *Y = output[ 0 ];
		uint8_t *Cb = output[ 1 ];
		uint8_t *Cr = output[ 2 ];
		uint8_t *fY = frame->data[ 0 ];
		uint8_t *fCb = frame->data[ 1 ];
		uint8_t *fCr = frame->data[ 2 ];

		for ( int y = 0; y < height; y ++ )
		{
			memcpy( Y, fY, width );
			Y += width;
			fY += frame->linesize[ 0 ];
			if ( y < h2 )
			{
				memcpy( Cb, fCb, w2 );
				memcpy( Cr, fCr, w2 );
				Cb += w2;
				Cr += w2;
				fCb += frame->linesize[ 1 ];
				fCr += frame->linesize[ 2 ];
			}
		}
	}
	else // libavcodec.pix_fmt == PIX_FMT_YUV411P // NTSC
	{
		int w4 = width / 4;

		uint8_t *Y = output[ 0 ];
		uint8_t *Cb = output[ 1 ];
		uint8_t *Cr = output[ 2 ];
		uint8_t *fY = frame->data[ 0 ];
		uint8_t *fCb = frame->data[ 1 ];
		uint8_t *fCr = frame->data[ 2 ];

		for ( int y = 0; y < height; y ++ )
		{
			memcpy( Y, fY, width );
			Y += width;
			fY += frame->linesize[ 0 ];
			if ( y % 2 == 0 )
			{
				for ( int x = 0; x < w4; x ++ )
				{
					*Cb ++ = fCb[ x ];
					*Cb ++ = fCb[ frame->linesize[ 1 ] + x ];
					*Cr ++ = fCr[ x ];
					*Cr ++ = fCr[ frame->linesize[ 2 ] + x ];
				}
				fCb += 2 * frame->linesize[ 1 ];
				fCr += 2 * frame->linesize[ 2 ];
			}
		}
	}
	av_free( frame );
#else
	unsigned char *pixels[ 3 ];
	int pitches[ 3 ];
	int width = GetWidth(), height = GetHeight();

	pixels[ 0 ] = ( unsigned char* ) yuv;
	pitches[ 0 ] = decoder->width * 2;

	dv_decode_full_frame( decoder, data, e_dv_color_yuv, pixels, pitches );

	int w2 = width / 2;
	uint8_t *y = output[ 0 ];
	uint8_t *cb = output[ 1 ];
	uint8_t *cr = output[ 2 ];
	uint8_t *p = yuv;

	for ( int i = 0; i < height; i += 2 )
	{
		/* process two scanlines (one from each field, interleaved) */
		for ( int j = 0; j < w2; j++ )
		{
			/* packed YUV 422 is: Y[i] U[i] Y[i+1] V[i] */
			*( y++ ) = *( p++ );
			*( cb++ ) = *( p++ );
			*( y++ ) = *( p++ );
			*( cr++ ) = *( p++ );
		}
		/* process next two scanlines (one from each field, interleaved) */
		for ( int j = 0; j < w2; j++ )
		{
			/* skip every second line for U and V */
			*( y++ ) = *( p++ );
			p++;
			*( y++ ) = *( p++ );
			p++;
		}
	}
#endif
	return 0;
}

int Frame::ExtractPreviewYUV( AVPicture *dest, bool fast )
{
	ExtractYUV( dest, fast );
	if (!fast) {
			Deinterlace( dest, dest, GetWidth() * 3, GetHeight() );
	}
	else {
			GetLowerField( dest, 2 );
	}
//			GetUpperField( yuv, 2 );
	return 0;
}

bool Frame::CreateEncoder( bool isPAL, bool isWide )
{
	pthread_mutex_lock( &avcodec_mutex );
#if defined(HAVE_LIBAVCODEC)
	if ( avformatEncoder == NULL )
	{
		avformatEncoder = av_alloc_format_context();
		if ( avformatEncoder )
		{
			avformatEncoder->oformat = guess_format( "dv", NULL, NULL );
			av_new_stream( avformatEncoder, 0 );
			AVCodecContext *avcodecEncoder = avformatEncoder->streams[0]->codec;
			avcodecEncoder->width = FRAME_MAX_WIDTH;
			avcodecEncoder->height = isPAL ? 576 : 480;
			if ( isPAL )
			{
				if ( isWide )
					avcodecEncoder->sample_aspect_ratio = av_d2q( 17.0 / avcodecEncoder->width * avcodecEncoder->height / 10.0, 255 );
				else
 					avcodecEncoder->sample_aspect_ratio = isWide ? ( AVRational ){ 118, 81 } : ( AVRational ){ 59, 54 };
			}
			else
			{
				if ( isWide )
					avcodecEncoder->sample_aspect_ratio = av_d2q( 17.0 / avcodecEncoder->width * avcodecEncoder->height / 10.0, 255 );
				else
 					avcodecEncoder->sample_aspect_ratio = isWide ? ( AVRational ){ 40, 33 } : ( AVRational ){ 10, 11 };
			}
			avcodecEncoder->thread_count = 2;
			avcodecEncoder->time_base= isPAL ? ( AVRational ){ 1, 25 } : ( AVRational ){ 1001, 30000 };
			avcodecEncoder->pix_fmt = isPAL ? PIX_FMT_YUV420P : PIX_FMT_YUV411P;
			avcodecEncoder->flags |= CODEC_FLAG_INTERLACED_DCT;
			av_set_parameters( avformatEncoder, NULL );
			avcodec_open( avcodecEncoder, avcodec_find_encoder( CODEC_ID_DVVIDEO ) );
			av_new_packet( &avpacketEncoder, 144000 );
			tempImage = ( uint8_t* ) av_malloc(
				avpicture_get_size( avcodecEncoder->pix_fmt, avcodecEncoder->width, avcodecEncoder->height ) );
#if defined(HAVE_SWSCALE)
			imgConvertEncoderCtx = sws_getContext( avcodecEncoder->width, avcodecEncoder->height, PIX_FMT_RGB32,
				avcodecEncoder->width, avcodecEncoder->height, avcodecEncoder->pix_fmt, SWS_FAST_BILINEAR, NULL, NULL, NULL);
#endif
		}
		else
		{
			pthread_mutex_unlock( &avcodec_mutex );
			return false;
		}
	}
#endif
	if ( encoder == NULL )
	{
			encoder = dv_encoder_new( FALSE, FALSE, FALSE );
		if ( encoder )
		{
			encoder->isPAL = isPAL;
			encoder->is16x9 = isWide;
			encoder->vlc_encode_passes = 3;
			encoder->static_qno = 0;
			encoder->force_dct = DV_DCT_AUTO;

			tempImage = new uint8_t[ FRAME_MAX_WIDTH * FRAME_MAX_HEIGHT * 4 ];
		}
	}
	pthread_mutex_unlock( &avcodec_mutex );
	return ( encoder != NULL );
}

int Frame::CalculateNumberSamples( int frequency, int iteration )
{
	int samples = 0;

	if ( CreateEncoder( IsPAL(), IsWide() ) )
		samples = dv_calculate_samples( encoder, frequency, iteration );
	
	return samples;
}

void Frame::EncodeRGB( uint8_t *rgb )
{
	if ( CreateEncoder( IsPAL(), IsWide() ) )
	{
#if defined(HAVE_LIBAVCODEC)
		AVFrame *input = avcodec_alloc_frame();
		AVFrame *output = avcodec_alloc_frame();

		if ( input && output )
		{
			AVCodecContext *avcodecEncoder = avformatEncoder->streams[0]->codec;
			int width = avcodecEncoder->width;
			int height = avcodecEncoder->height;
			size_t size = height > 480 ? 144000 : 120000;

			// Convert color space
			avpicture_fill( ( AVPicture* )output, tempImage, avcodecEncoder->pix_fmt, width, height );
			avpicture_fill( ( AVPicture* )input, rgb, PIX_FMT_RGB32, width, height );
#if defined(HAVE_SWSCALE)
			sws_scale( imgConvertEncoderCtx, input->data, input->linesize, 0, height,
				output->data, output->linesize);
#else
			img_convert( ( AVPicture* )output, avcodecEncoder->pix_fmt, ( AVPicture* )input, PIX_FMT_RGB32, width, height );
#endif

			// Encode
			bytesInFrame = avcodec_encode_video( avcodecEncoder, avpacketEncoder.data, size, output );
			url_open_buf( &avformatEncoder->pb, data, bytesInFrame, URL_WRONLY );
			avpacketEncoder.size = bytesInFrame;
			if ( !isEncoderHeaderWritten )
			{
				av_write_header( avformatEncoder );
				isEncoderHeaderWritten = true;
			}
			av_write_frame( avformatEncoder, &avpacketEncoder );
			url_close_buf( &avformatEncoder->pb );

			// Update this frame's metadata
			ExtractHeader();

			// Force the output to be IEC 61834
			data[4] &= 0xf8;

			av_free( output );
			av_free( input );
		}
#else
			dv_encode_full_frame( encoder, &rgb, e_dv_color_rgb, data );
#endif
	}
}

#endif
