#include "Stdafx.h"
#include "VideoDecoder.h"
#include <errno.h>
/********************************************************************************************/
#if defined(__cplusplus)
extern "C" {
#endif
#define __STDC_CONSTANT_MACROS
#define __STDC_LIMIT_MACROS
#include <ffmpeg/libavcodec/avcodec.h>
#include <ffmpeg/avformat.h>
#include <ffmpeg/swscale.h>
#if defined(__cplusplus)
}
#endif
/********************************************************************************************/
#include <windows.h>
#include  <math.h>
/********************************************************************************************/
CVideoDecoder::CVideoDecoder() : 
	m_pFormatCtx(NULL),
	m_pScale_context(NULL),
	m_pFrame(NULL),
	m_pFrameRGB(NULL),
	m_pDIFrame(NULL),
	m_pCodecCtx(NULL),
	m_bFlip(0),
	m_nVideoStream(-1),
	m_bActivateDInterlace(0),
	m_nDestWidth(0),
	m_nDestHeight(0)
{
	av_register_all();
	avcodec_init();
	avcodec_register_all();
}
/********************************************************************************************/
CVideoDecoder::~CVideoDecoder()
{
}
/********************************************************************************************/
int readPacket(void* opaque, uint8_t* buf, int buf_size)
{
	return 0;
}
/********************************************************************************************/
void flip(AVCodecContext *avctx, AVPicture * picture, unsigned long nHeight)
{
	unsigned char* s; 
	unsigned char* d; 
	uint8_t* b; 
	unsigned long nModHeight;


	nModHeight = ((nHeight - 1) / 2) * 2; 
	b = (uint8_t*)malloc(picture->linesize[0]);
	for (s= picture->data[0], d = picture->data[0] + picture->linesize[0] * (nModHeight); 
		  s < d; s+= picture->linesize[0], d-= picture->linesize[0]) 
	{ 
		memcpy(b, s, picture->linesize[0]); 
		memcpy(s, d, picture->linesize[0]); 
		memcpy(d, b, picture->linesize[0]); 
	} 
	free(b);
}
/********************************************************************************************/
#define MAKE_BUFFER_SIZE(nWidth, nHeight) (max((1024 *1024), (nWidth * nHeight)))
bool CVideoDecoder::Initialize(const char* szFormat, int iWidth, int iHeight, int bFlip, int bActivateDInterlace)
{
	unsigned int         i;
   AVFormatParameters   ap;
   AVCodec*             pCodec;
   ByteIOContext *      pByteIOCxt;
   int                  numBytes;
   uint8_t*             pBuffer;
   unsigned long        nBufferSize = MAKE_BUFFER_SIZE(iWidth, iHeight);
   unsigned char*       pByteIOCxtBuf = (unsigned char*)malloc(sizeof(unsigned char) * nBufferSize);

   memset(pByteIOCxtBuf, 0, nBufferSize);
   memset(&ap, 0, sizeof(AVFormatParameters));

	m_nDestHeight = 0;
	m_nDestWidth = 0;

   m_bFirstFrame = true; // We are waiting to the first frame
   
	ap.prealloced_context = 1;

   AVInputFormat* pInputFmt = av_find_input_format(szFormat);
   if(pInputFmt == NULL)
	{
		free(pByteIOCxtBuf);
		return false;
   }

   pByteIOCxt = (ByteIOContext*)av_mallocz(sizeof(ByteIOContext));
   init_put_byte(pByteIOCxt, pByteIOCxtBuf, nBufferSize, 0, this, readPacket, NULL, NULL);
   pByteIOCxt->is_streamed = 1;

   m_pFormatCtx = av_alloc_format_context();

	// Open the stream. the 4th argument is the filename, which we ignore.
	if(av_open_input_stream(&m_pFormatCtx, pByteIOCxt, "", pInputFmt, &ap) < 0)
	{
      free(pByteIOCxtBuf);
		av_free(pByteIOCxt);
      av_close_input_file(m_pFormatCtx);
		m_pFormatCtx = NULL;
		return false;
	}

	m_pFormatCtx->iformat->flags = AVFMT_NOFILE;

    // Retrieve stream information
    //if(av_find_stream_info(m_pFormatCtx)<0)
    //{
    //    return false; // Couldn't find stream information
    //}
    //m_pFormatCtx->pb.eof_reached = 0;

    // Dump information about file onto standard error
    //dump_format(m_pFormatCtx, 0, szFileName, false);

    // Find the first video stream
    for(i=0; i<m_pFormatCtx->nb_streams; i++)
    {
        if(m_pFormatCtx->streams[i]->codec->codec_type == CODEC_TYPE_VIDEO)
        {
            m_nVideoStream=i;
            break;
        }
    }

    if(m_nVideoStream==-1)
    {
		free(pByteIOCxtBuf);
		av_free(pByteIOCxt);
      av_close_input_file(m_pFormatCtx);
	   m_pFormatCtx = NULL;
      return false; // Didn't find a video stream
	}

    // Get a pointer to the codec context for the video stream
    m_pCodecCtx=m_pFormatCtx->streams[m_nVideoStream]->codec;

    // Find the decoder for the video stream
    pCodec=avcodec_find_decoder(m_pCodecCtx->codec_id);
    if(pCodec==NULL)
    {
       free(pByteIOCxtBuf);
       av_free(pByteIOCxt);
	   av_close_input_file(m_pFormatCtx);
	   m_pFormatCtx = NULL;
       return false; // Codec not found
    }

 
    if(avcodec_open(m_pCodecCtx, pCodec)<0)
    {
       avcodec_close(m_pCodecCtx);
       free(pByteIOCxtBuf);
       av_free(pByteIOCxt);
       av_close_input_file(m_pFormatCtx);
       m_pFormatCtx = NULL;
       return false; // Could not open codec
    }

    // Allocate video frame
    m_pFrame=avcodec_alloc_frame();
    if(NULL == m_pFrame)
	 {
       avcodec_close(m_pCodecCtx);
       free(pByteIOCxtBuf);
       av_free(pByteIOCxt);
       av_close_input_file(m_pFormatCtx);
       m_pFormatCtx = NULL;
       return false; // Could not open codec
    }

    // Allocate an AVFrame structure
    m_pFrameRGB=avcodec_alloc_frame();
    if(m_pFrameRGB==NULL)
    {      
       free(pByteIOCxtBuf);
       av_free(pByteIOCxt);
	   av_close_input_file(m_pFormatCtx);
	   m_pFormatCtx = NULL;
       avcodec_close(m_pCodecCtx);
       return false; 
    }
   

    // Determine required buffer size and allocate buffer
    m_pCodecCtx->width = iWidth;
    m_pCodecCtx->height = iHeight;
    numBytes = avpicture_get_size(PIX_FMT_RGB32, m_pCodecCtx->width, m_pCodecCtx->height);
    pBuffer = (uint8_t*)malloc(sizeof(uint8_t) * numBytes);

    // Assign appropriate parts of buffer to image planes in pFrameRGB    
    avpicture_fill((AVPicture *)m_pFrameRGB, pBuffer, PIX_FMT_RGB32, m_pCodecCtx->width, m_pCodecCtx->height);
    m_bFlip = bFlip;
    m_bActivateDInterlace = bActivateDInterlace;
    return true;
}
/********************************************************************************************/
bool CVideoDecoder::Terminate(bool bCodecOpen)
{
	// Free the RGB image
   if(m_pFrameRGB)
   {
      uint8_t   *buffer = m_pFrameRGB->data[0];
      free(buffer);
      av_free(m_pFrameRGB);
      m_pFrameRGB = NULL;
   }

   // Free the YUV frame
   if(m_pFrame)
      av_free(m_pFrame);
   m_pFrame = NULL;

   if(m_pDIFrame){
      uint8_t   *buffer = m_pDIFrame->data[0];
      free(buffer);
      av_free(m_pDIFrame);
      m_pDIFrame = NULL;
   }
   // Close the codec
   if(bCodecOpen && m_pCodecCtx)
      avcodec_close(m_pCodecCtx);
   m_pCodecCtx = NULL;

   // Close the video file
   if (m_pFormatCtx)
   {
      ByteIOContext* pByteIoCxt = m_pFormatCtx->pb;
      
      if(pByteIoCxt->buffer)
         free(pByteIoCxt->buffer);
	  
	  av_free(pByteIoCxt);
      av_close_input_file(m_pFormatCtx);
      m_pFormatCtx = NULL;
   }

   if(m_pScale_context)
   {
      sws_freeContext(m_pScale_context);
      m_pScale_context = NULL;
   }

	m_nDestHeight = 0;
	m_nDestWidth = 0;

   m_bFirstFrame = false;
 
   return true;
}
/********************************************************************************************/
bool CVideoDecoder::Decode(const unsigned char* pInBuffer, unsigned long nInBufSize, 
									unsigned char* pOutBuffer, unsigned long nOutBufSize, 
									unsigned long nDestWidth, unsigned long nDestHeight, 
									int stride, bool bDecodeToRGB, int *pbFrameFinished)
{
	AVPacket       packet;
   int            bFrameFinished = false;
   int            nReturn;
   unsigned int   nSize, nWidth, nHeight;
   uint8_t*       pBuffer;
   int            numBytes;

   // Deinterlcae picture
   bool           bUseFFMpegDInterlace = false;
   bool           bUseLinearDInterlace = false;
   AVPicture      *pDIPicture = NULL;
   unsigned long  nStartTick = GetTickCount(), nCopyPass=0, nImgConvertTick =0, nScaleTick;
   unsigned long  nScalePass = 0, nImgConvertPass =0, nFlipTick =0, nFlipPass =0;
   unsigned long  nDecodeTick =0, nDecodePass =0;
   int            nBytesDecode, nTotalBytesDecode, nBytesLeft;
   bool           bFlush = false;

   memset(&packet, 0, sizeof(AVPacket));

   if(NULL == m_pCodecCtx){
       return false;
   }

   /*char szLog[256];
   GetCompositeRawH264Log(pInBuffer, nInBufSize, szLog, 256);

   ODFEI_0("CVideoDecoderWrapper:: frame info %s\n", szLog);*/
   nReturn = CreatePacket(pInBuffer, nInBufSize, &packet);
   if (nReturn < 0)
   {
      return false;
   }

   if(packet.stream_index == m_nVideoStream)
   {

      //static int tOkCounter = 0, tFailCounter = 0;

      // Decode video frame
      nDecodeTick = GetTickCount();
      int nTestHeight = m_pCodecCtx->height;
      nBytesDecode = avcodec_decode_video(m_pCodecCtx, m_pFrame, &bFrameFinished, packet.data, packet.size);
      if( nTestHeight < m_pCodecCtx->height){
         //ODME_0("CVideoDecoderWrapper::Decode - wrong size\n");
         av_free_packet(&packet);
         return false;
      }    

      if(bFrameFinished == false || (nBytesDecode != packet.size)){
         // Check if decode some bytes
         if(nBytesDecode == packet.size){
            // Assume frame is OK, but decode again (ffmpeg request)
            nBytesDecode = avcodec_decode_video(m_pCodecCtx, m_pFrame, &bFrameFinished, NULL, 0);      
            if(nBytesDecode == 0 && !bFrameFinished){
               bFrameFinished = true;
            }
         } else {   
            // See if we need to decode more 
            nTotalBytesDecode = nBytesDecode;
            nBytesLeft =  packet.size - nTotalBytesDecode;
            while(nTotalBytesDecode < packet.size){
               // Do the rest 
               nBytesDecode = avcodec_decode_video(m_pCodecCtx, m_pFrame, &bFrameFinished, &(packet.data[nTotalBytesDecode]), nBytesLeft);      
               if(nBytesDecode <= 0){
                  // That's it 
                  if(nBytesDecode < 0){
                     // Error 
//                     ODME_0("CVideoDecoderWrapper::Decode - decode fail\n");
                     av_free_packet(&packet);
                     return false;
                  } 
                  break;
               } 
               nTotalBytesDecode += nBytesDecode;
               nBytesLeft =  packet.size - nTotalBytesDecode;
            }
         }
      }
      nDecodePass = GetTickCount() - nDecodeTick;

      // Now check if decode success

      // Do we have error
      if(nBytesDecode < 0){
         // Tell the use all is well
         av_free_packet(&packet);
         *pbFrameFinished = bFrameFinished;
         return true; 
      } 

      // Now we need to decide if need to flush 
      // If we have dest width check with previous
      if(nDestWidth){
         if((m_nDestWidth != nDestWidth) || (m_nDestHeight != nDestHeight)){
            bFlush = true;
         }
      } else {
         // Check if first frame
         if(m_bFirstFrame){
            bFlush = true;
            // And not any more
            m_bFirstFrame = false;
         }
      }

      if(bFlush){       
#define MPEG_START_OF_SEQUENCE         0xB0010000
#define MPEG_START_OF_SEQUENCE_HEADER  0xB3010000
         
#define MPEG_VIDEO_OBJECT_START_CODE   0x01010000
#define MPEG_VIDEO_LAYER_START_CODE    0x20010000
         //// Check if have start of sequence
         if( (MPEG_START_OF_SEQUENCE == *(unsigned long*)(pInBuffer)) ||
             (MPEG_START_OF_SEQUENCE == *(unsigned long*)(pInBuffer+130)) ){
            // Start of new sequence, fluse 
            // Flush YUV frame 
            // For almog - remove if problematic 
            if(MPEG_START_OF_SEQUENCE_HEADER ==  *(unsigned long*)(pInBuffer + 30)){
               // don't flush 
            } else {
               // flush 
               avcodec_flush_buffers(m_pCodecCtx);              
            }
         } else {
            // Maybe we have start of video layer
            if(MPEG_VIDEO_OBJECT_START_CODE == *(unsigned long*)(pInBuffer)){
               if(MPEG_VIDEO_LAYER_START_CODE == *(unsigned long*)(pInBuffer + 4)){
                  avcodec_flush_buffers(m_pCodecCtx);
               }
            }
         }
      }

      //// Do we need to convert to RGB
      if(!bDecodeToRGB){
         // Tell the use all is well
         av_free_packet(&packet);
         *pbFrameFinished = bFrameFinished;
         return true;
      }


      // Did we get a video frame?
      if(bFrameFinished)
      {
         AVPicture* pPicture = (AVPicture *)m_pFrame;
         AVPicture* pRGBPicture = (AVPicture *)m_pFrameRGB;

         if(m_bActivateDInterlace){
            if((m_pCodecCtx->width >= 640) && (m_pCodecCtx->height >= 330/*640/2*/)){
               // If dest is zero, than dest is larger than height -> Activte linear  D-Interlace
               // if dest is more than 320 (field) ->activate FFMpeg Deinterlace
               if((!nDestHeight)){
                  // We dont need to scale use linear deinterlace 
                  bUseLinearDInterlace = true;
               } else {
                  // Use FFMpeg Deintercale (better picture when scale)                  
                  bUseFFMpegDInterlace = true;
                  if(NULL == m_pDIFrame){
                     m_pDIFrame = avcodec_alloc_frame();
                  }
                  pDIPicture = (AVPicture *)m_pDIFrame;
               }
            }
         }

         int result;
         //Convert the image from its native format to RGB       
         if(nDestWidth && nDestHeight){
            // We have dest size, can we scale
            if( ((int)nDestWidth <= m_pCodecCtx->width) && ((int)nDestHeight <= m_pCodecCtx->height )){
               // Check if we need to deinterlace before scale
               if(m_pScale_context == NULL || (m_nDestWidth != nDestWidth) ||  (m_nDestHeight != nDestHeight)){
                  // Source size and destination size are different (e.g. we didn't create dest size as source)
                  // Go scale 
                  m_nDestWidth = nDestWidth;
                  m_nDestHeight = nDestHeight;
                  if(m_pFrameRGB){
                     pBuffer = m_pFrameRGB->data[0];
                     free(pBuffer);
                     av_free(m_pFrameRGB);
                  }
                  m_pFrameRGB = avcodec_alloc_frame();
                  if(m_pFrameRGB){
                     pRGBPicture = (AVPicture *)m_pFrameRGB;
                     numBytes = avpicture_get_size(PIX_FMT_RGB32, nDestWidth, nDestHeight);
                     pBuffer = (uint8_t*)malloc(sizeof(uint8_t) * numBytes);
                      avpicture_fill((AVPicture *)m_pFrameRGB, pBuffer, PIX_FMT_RGB32, nDestWidth, nDestHeight);
							 if(m_pScale_context){
								sws_freeContext(m_pScale_context);
							 }
                      m_pScale_context = sws_getContext(m_pCodecCtx->width, m_pCodecCtx->height,
                         m_pCodecCtx->pix_fmt,
                         nDestWidth/*m_pCodecCtx->width*/,
                         nDestHeight/*m_pCodecCtx->height*/,
                         PIX_FMT_RGB32, 0, NULL, NULL, NULL);
                   } else {
                      av_free_packet(&packet);
                      return false;
                   }
                }
                // Source and dest are same size check if need to use FFMpeg deinterlcae 
                nWidth = nDestWidth;
                nHeight =  nDestHeight; 
                // First dinterlace and then scale
                if(bUseFFMpegDInterlace){
                   // Call FFMpeg function and then scale 
                   if(pDIPicture){
                      pBuffer = pDIPicture->data[0];
                      if(NULL == pBuffer){
                         numBytes = avpicture_get_size(m_pCodecCtx->pix_fmt, m_pCodecCtx->width, m_pCodecCtx->height);                     
                         pBuffer = (uint8_t*)malloc(sizeof(uint8_t) * numBytes);
                         result = avpicture_fill(pDIPicture, pBuffer, m_pCodecCtx->pix_fmt, m_pCodecCtx->width, m_pCodecCtx->height);
                      }
                   } else {
                      numBytes = avpicture_get_size(m_pCodecCtx->pix_fmt, m_pCodecCtx->width, m_pCodecCtx->height);                     
                      pBuffer = (uint8_t*)malloc(sizeof(uint8_t) * numBytes);
                      result = avpicture_fill(pDIPicture, pBuffer, m_pCodecCtx->pix_fmt, m_pCodecCtx->width, m_pCodecCtx->height);
                   }
                   result = avpicture_deinterlace((AVPicture*)pDIPicture, (const AVPicture*)pPicture,
                                                       m_pCodecCtx->pix_fmt, m_pCodecCtx->width, m_pCodecCtx->height);

                   // Now scale
                   nScaleTick = GetTickCount();
                   int r = sws_scale(m_pScale_context, pDIPicture->data, pDIPicture->linesize,
                                        nDestWidth, nDestHeight, m_pFrameRGB->data,
                                        m_pFrameRGB->linesize);
                   nScalePass = GetTickCount() - nScaleTick;
                } else {
                   // Just scale
                   nScaleTick = GetTickCount();
                   int r = sws_scale(m_pScale_context, m_pFrame->data,  m_pFrame->linesize,
                                     nDestWidth, nDestHeight, m_pFrameRGB->data,
                                     m_pFrameRGB->linesize);
                   nScalePass = GetTickCount() - nScaleTick;
                }
             } else {
                // Dest bigger than source or we need to strach
                nImgConvertTick = GetTickCount();
					 nWidth = m_pCodecCtx->width;
                nHeight = m_pCodecCtx->height; 
					 if(m_pCodecCtx->height < nDestHeight){                
						nHeight = nDestHeight; 	
						if(nDestHeight != m_nDestHeight){
							if(m_pFrameRGB){
								pBuffer = m_pFrameRGB->data[0];
								free(pBuffer);
								av_free(m_pFrameRGB);
							}
							m_pFrameRGB = avcodec_alloc_frame();
							if(m_pFrameRGB){
								pRGBPicture = (AVPicture *)m_pFrameRGB;
								numBytes = avpicture_get_size(PIX_FMT_RGB32, m_pCodecCtx->width, nDestHeight);
								pBuffer = (uint8_t*)malloc(sizeof(uint8_t) * numBytes);
								avpicture_fill((AVPicture *)m_pFrameRGB, pBuffer, PIX_FMT_RGB32, m_pCodecCtx->width, nDestHeight);
								if(m_pScale_context){
									sws_freeContext(m_pScale_context);
								}
								m_pScale_context = sws_getContext(m_pCodecCtx->width, m_pCodecCtx->height,
									m_pCodecCtx->pix_fmt,
									m_pCodecCtx->width/*m_pCodecCtx->width*/,
									nDestHeight/*m_pCodecCtx->height*/,
									PIX_FMT_RGB32, 0, NULL, NULL, NULL);
							} else {
								av_free_packet(&packet);
								return false;
							}
						}
					 } else {
						 nHeight = m_pCodecCtx->height; 
					 }

					 
                if(m_pScale_context == NULL)
                {
                   m_pScale_context = sws_getContext(m_pCodecCtx->width, m_pCodecCtx->height,
                      m_pCodecCtx->pix_fmt,
                      m_pCodecCtx->width,
                      nHeight,
                      PIX_FMT_RGB32, 0, NULL, NULL, NULL);
                }

                int r = sws_scale(m_pScale_context, m_pFrame->data,  m_pFrame->linesize,
                   m_pCodecCtx->width, nHeight, m_pFrameRGB->data, m_pFrameRGB->linesize);

                //img_convert(pRGBPicture, PIX_FMT_RGB32, pPicture, m_pCodecCtx->pix_fmt, nWidth, nHeight);               
                nImgConvertPass = GetTickCount() - nImgConvertTick;
					 m_nDestHeight = nHeight;
             }
          } else {
             nImgConvertTick = GetTickCount();
             // No scale 
             nWidth = m_pCodecCtx->width;
             nHeight = m_pCodecCtx->height; 

             if(m_pScale_context == NULL)
             {
                m_pScale_context = sws_getContext(m_pCodecCtx->width, m_pCodecCtx->height,
                   m_pCodecCtx->pix_fmt,
                   m_pCodecCtx->width,
                   m_pCodecCtx->height,
                   PIX_FMT_RGB32, 0, NULL, NULL, NULL);
             }

             int r = sws_scale(m_pScale_context, m_pFrame->data,  m_pFrame->linesize,
                0, m_pCodecCtx->height, m_pFrameRGB->data, m_pFrameRGB->linesize);

             //img_convert(pRGBPicture, PIX_FMT_RGB32, pPicture, m_pCodecCtx->pix_fmt, nWidth, nHeight);                        
             nImgConvertPass = GetTickCount() - nImgConvertTick;
          }
          nFlipTick = GetTickCount();
          // Now flip
          if(m_bFlip)
             flip(m_pCodecCtx, pRGBPicture, nHeight);

          nFlipPass = GetTickCount() - nFlipTick;

          nSize = avpicture_get_size(PIX_FMT_RGB32, nWidth, nHeight);
          if(nSize <= nOutBufSize)
          {
             if(bUseLinearDInterlace){
                LinearDeinterlace(m_pFrameRGB->data[0], nSize, nWidth, nHeight, stride);
             }

             unsigned long nCopyTick = GetTickCount();
             if(nWidth*4 == stride)
                memcpy(pOutBuffer, m_pFrameRGB->data[0], nSize);
             else
             {
                unsigned char* pOutPosition = pOutBuffer;
                uint8_t* pInputPosition = m_pFrameRGB->data[0];
                for(unsigned int i=0; i<nHeight; i++)
                {
                   memcpy(pOutPosition, pInputPosition, nWidth*4);
                   pOutPosition += stride;
                   pInputPosition += nWidth*4;
                }
             }
             nCopyPass = GetTickCount() - nCopyTick;                  
          }
          else
          {
             av_free_packet(&packet);
             return false;
          }
       }/* else {
          av_free_packet(&packet);
          *pbFrameFinished = false;
          return false;
       }*/
   }

   m_nDestHeight = nDestHeight;
   m_nDestWidth = nDestWidth;
   
   if((pOutBuffer[0] == 130) || (pOutBuffer[1] == 130)){
//      ODME_0("CVideoDecoderWrapper::Decode - warning gray frame\n");
   }
  
   av_free_packet(&packet);

   *pbFrameFinished = bFrameFinished;
   //ODFEI_0("CVideoDecoderWrapper::Decode: buffer size = %d, decode frame = %d, scale = %d, ImgConvert = %d, Flip = %d, Copy = %d  ,Total = %d \n", nInBufSize, nDecodePass, nScalePass, nImgConvertPass , nFlipPass, nCopyPass, GetTickCount() -  nStartTick);
   return true;
}
/********************************************************************************************/
int CVideoDecoder::CreatePacket(const unsigned char* pInBuffer, unsigned long nInBufSize,
											AVPacket* pPacket)
{
	int            nBufferSize;
	int            len;
	
	if(NULL == m_pCodecCtx || m_pFormatCtx == NULL){
		return false;
	}
	
	ByteIOContext* pByteIoCxt = m_pFormatCtx->pb;

	nBufferSize = MAKE_BUFFER_SIZE(m_pCodecCtx->width, m_pCodecCtx->height);

	if(nInBufSize <= 0 || pInBuffer == NULL || pByteIoCxt->buffer_size < (int)nInBufSize)
		return AVERROR(EIO);

	if (av_new_packet(pPacket, nBufferSize) < 0)
		return AVERROR(EIO);

	pPacket->pos= url_ftell(pByteIoCxt);
	pPacket->stream_index = 0;

	len = (int)(pByteIoCxt->buf_end - pByteIoCxt->buf_ptr);
	if (len == 0) 
	{
		memcpy(pByteIoCxt->buffer, pInBuffer, nInBufSize);

		pByteIoCxt->pos += nInBufSize;
		pByteIoCxt->buf_ptr = pByteIoCxt->buffer;
		pByteIoCxt->buf_end = pByteIoCxt->buffer + nInBufSize ;

		len = (int)(pByteIoCxt->buf_end - pByteIoCxt->buf_ptr);
	}
	if (len > nBufferSize)
		len = nBufferSize;

	memcpy(pPacket->data, pByteIoCxt->buf_ptr, len);
	pByteIoCxt->buf_ptr += len;

	if (len <= 0) 
	{
		av_free_packet(pPacket);
		return AVERROR(EIO);
	}
	
	pPacket->size = len;
	return len;
}
/********************************************************************************************/
void CVideoDecoder::LinearDeinterlace(unsigned char *pBufferIn, unsigned long nBufferSize, 
														   unsigned long nWidth, unsigned long nHeight, unsigned long nStride)
{
#define PIX_THRESHOLD_32_90  0x40

   unsigned long nWorkingPixle = 0;
   unsigned char *pBuffer = pBufferIn;
   unsigned long OddStride = nStride >> 2;
   unsigned long EvenStride = nStride >> 1;
   unsigned char *pWorkPixle, *pUpdateBuffer;  

   unsigned char pixInChar_0_90R, pixInChar_0_90G, pixInChar_0_90B;
   unsigned char pixInChar_2_90R, pixInChar_2_90G, pixInChar_2_90B;
   unsigned char pixAbsChar_R,  pixAbsChar_G, pixAbsChar_B;
   unsigned char pixAvgChar_R,  pixAvgChar_G, pixAvgChar_B;
 
   //thresholds even lines pixels at 90 degree
   unsigned int pixTholdChar_90  = PIX_THRESHOLD_32_90;

   for (unsigned long j = 0; j<nHeight - 2; j++) // Only even lines are checked
   {
      if((j%2)){
         // next line
         pBuffer += nStride * 2;
         continue;
      }
      for (unsigned long i = 0; i<nWidth; i++) // Width jump by 4 bytes 
      {
         // Get pixels at 90 degree
         pWorkPixle = pBuffer + (i*4);          	                 // get even line pixels A0,B0,C0,D
         //// Now we need check each color          
         pixInChar_0_90R = *pWorkPixle;
         pixInChar_0_90G = *(pWorkPixle+1);
         pixInChar_0_90B = *(pWorkPixle+2);

         pUpdateBuffer = pBuffer + ((i + OddStride) * 4);              // get odd line pixels  A1,B1,C1,D1

         pWorkPixle = pBuffer + ((i + EvenStride) * 4);               // get even line pixels A2,B2,C2,D2
         // Now we need check each color          
         pixInChar_2_90R = *pWorkPixle;
         pixInChar_2_90G = *(pWorkPixle+1);
         pixInChar_2_90B = *(pWorkPixle+2);

         // Now calc abs of each value
         pixAbsChar_R = abs((pixInChar_2_90R - pixInChar_0_90R));  		//even line diffabs |A0-A2|, |B0-B2|, |C0-C2|, |D0-D2|
         pixAbsChar_G = abs((pixInChar_2_90G - pixInChar_0_90G));  		//even line diffabs |A0-A2|, |B0-B2|, |C0-C2|, |D0-D2|
         pixAbsChar_B = abs((pixInChar_2_90B - pixInChar_0_90B));  		//even line diffabs |A0-A2|, |B0-B2|, |C0-C2|, |D0-D2|

         //check if we pass threshold value         
         if((pixAbsChar_R < pixTholdChar_90) ||  (pixAbsChar_G < pixTholdChar_90) || (pixAbsChar_B < pixTholdChar_90)){
            // Update R
            pixAvgChar_R = (pixInChar_2_90R +  pixInChar_0_90R)>>1;
            *pUpdateBuffer = pixAvgChar_R;
            // Update G
            pixAvgChar_G = (pixInChar_2_90G +  pixInChar_0_90G)>>1;
            *(pUpdateBuffer+1) = pixAvgChar_G;
            // Update B
            pixAvgChar_B = (pixInChar_2_90B +  pixInChar_0_90B)>>1;
            *(pUpdateBuffer+2) = pixAvgChar_B;
         }  
      }   
   }  
}