#include "VideoEncode.h"
#include <iostream>


VideoEncode::VideoEncode()
 :pBits(NULL),
 m_recording(false),
 frameno(0)
{

}


void VideoEncode::init(char *filename,int w, int h)
{
  unsigned long lImageSize;
  

  av_register_all();
  // Get the viewport dimensions
  glGetIntegerv(GL_VIEWPORT, iViewport);
      
  // How big is the image going to be (targas are tightly packed)
  lImageSize = iViewport[2] * 3 * iViewport[3];	
      
  // Allocate block. If this doesn't work, go home
  pBits = (GLbyte *)malloc(lImageSize);
  codec = avcodec_find_encoder(CODEC_ID_MPEG1VIDEO);//(CODEC_ID_H264);
  if (!codec) {
    std::cerr << "codec not found" << std::endl;
    exit(1);
  }
  c= avcodec_alloc_context();
  picture= avcodec_alloc_frame();
  pictYUV= avcodec_alloc_frame();
  /* put sample parameters */
  c->bit_rate = 400000;
  /* resolution must be a multiple of two */
  c->width = w;
  c->height = h;
  /* frames per second */
  c->time_base= (AVRational){1,25};
  c->gop_size = 10; /* emit one intra frame every ten frames */
  c->max_b_frames=1;
  c->pix_fmt = PIX_FMT_YUV420P;
  /* open it */
  if (avcodec_open(c, codec) < 0) {
    std::cerr << "could not open codec" << std::endl;
    exit(1);
  }
  f = fopen(filename, "wb");
  if (!f) {
    std::cerr << "could not open "<< filename << std::endl;
    exit(1);
  }
  
   /* alloc image and output buffer */
  outbuf_size = 100000;
  outbuf = (uint8_t *)malloc(outbuf_size);
  size =iViewport[2]*iViewport[3];

  picture->data[0] = (uint8_t*)pBits+iViewport[2]*3*(iViewport[3]-1);
  picture->data[1] = picture->data[0] + size;
  picture->data[2] = picture->data[1] + size;
  picture->linesize[0] =-3*iViewport[2];
  picture->linesize[1] =-3*iViewport[2];
  picture->linesize[2] =-3*iViewport[2];
  
  size = w*h;
  pictYUV_buf = (uint8_t*)malloc((size * 3) / 2); /* size for YUV 420 */
  pictYUV->data[0] = pictYUV_buf;
  pictYUV->data[1] = pictYUV->data[0] + size;
  pictYUV->data[2] = pictYUV->data[1] + size / 4;
  pictYUV->linesize[0] = w;
  pictYUV->linesize[1] = w / 2;
  pictYUV->linesize[2] = w / 2;
//  int w = c->width;
//  int h = c->height;
  img_convert_ctx = sws_getContext(iViewport[2], iViewport[3],
                        PIX_FMT_BGR24, 
                        w, h, PIX_FMT_YUV420P, SWS_BICUBIC, 
                        NULL, NULL, NULL);
  if(img_convert_ctx == NULL) {
    std::cerr <<  "Cannot initialize the conversion context!" << std::endl;
    exit(1);
  }
  m_recording=true;
  frameno=0;
}


void VideoEncode::recFrame(void)
{
  if (!m_recording) return;
  // Read bits from color buffer
  glPixelStorei(GL_PACK_ALIGNMENT, 1);
  glPixelStorei(GL_PACK_ROW_LENGTH, 0);
  glPixelStorei(GL_PACK_SKIP_ROWS, 0);
  glPixelStorei(GL_PACK_SKIP_PIXELS, 0);

  // Get the current read buffer setting and save it. Switch to
  // the front buffer and do the read operation. Finally, restore
  // the read buffer state
  glGetIntegerv(GL_READ_BUFFER, (GLint *)&lastBuffer);
  glReadBuffer(GL_FRONT);
  glReadPixels(0, 0, iViewport[2], iViewport[3], GL_BGR_EXT, GL_UNSIGNED_BYTE, pBits);
  glReadBuffer(lastBuffer);
  
  // Convert the image into YUV format 
  // OpenGL fills the memory pointed by picture. Then we convert picture to pictYUV
  sws_scale(img_convert_ctx, picture->data, 
              picture->linesize, 0, 
              iViewport[3],
              pictYUV->data, pictYUV->linesize);
  out_size = avcodec_encode_video(c, outbuf, outbuf_size, pictYUV);
  std::cout << "encoding frame " << ++frameno << " (size="<<out_size <<")" << std::endl;
  fwrite(outbuf, 1, out_size, f);
}

void VideoEncode::stop(void)
{
  /* get the delayed frames */
  for(; out_size; ++frameno) {
    out_size = avcodec_encode_video(c, outbuf, outbuf_size, NULL);
    std::cout << "write frame "<<frameno<<" (size="<<out_size <<")"<<std::endl;
    fwrite(outbuf, 1, out_size, f);
  }

  /* add sequence end code to have a real mpeg file */
  outbuf[0] = 0x00;
  outbuf[1] = 0x00;
  outbuf[2] = 0x01;
  outbuf[3] = 0xb7;
  fwrite(outbuf, 1, 4, f);
  fclose(f);
  free(pBits);
  free(pictYUV_buf);
  free(outbuf);

  sws_freeContext(img_convert_ctx);
  avcodec_close(c);
  av_free(c);
  av_free(picture);
  av_free(pictYUV);
  m_recording=false;
}


// Define targa header. This is only used locally.
#pragma pack(1)
typedef struct
{
    GLbyte	identsize;              // Size of ID field that follows header (0)
    GLbyte	colorMapType;           // 0 = None, 1 = paletted
    GLbyte	imageType;              // 0 = none, 1 = indexed, 2 = rgb, 3 = grey, +8=rle
    unsigned short	colorMapStart;          // First colour map entry
    unsigned short	colorMapLength;         // Number of colors
    unsigned char 	colorMapBits;   // bits per palette entry
    unsigned short	xstart;                 // image x origin
    unsigned short	ystart;                 // image y origin
    unsigned short	width;                  // width in pixels
    unsigned short	height;                 // height in pixels
    GLbyte	bits;                   // bits per pixel (8 16, 24, 32)
    GLbyte	descriptor;             // image descriptor
} TGAHEADER;
#pragma pack(8)


////////////////////////////////////////////////////////////////////
// Capture the current viewport and save it as a targa file.
// Be sure and call SwapBuffers for double buffered contexts or
// glFinish for single buffered contexts before calling this function.
// Returns 0 if an error occurs, or 1 on success.
// Does not work on the iPhone
// Alkis - Code lifted from "Source code for the OpenGL SuperBible, 5th Edition and GLTools"
// GLTools.cpp
// http://code.google.com/p/oglsuperbible5/

int VideoEncode::GrabScreenTGA(const char *szFileName)
{
    FILE *pFile;                // File pointer
    TGAHEADER tgaHeader;		// TGA file header
    unsigned long lImageSize;   // Size in bytes of image
    GLbyte	*pBits = NULL;      // Pointer to bits
    GLint iViewport[4];         // Viewport in pixels
    GLenum lastBuffer;          // Storage for the current read buffer setting
    
    // Get the viewport dimensions
    glGetIntegerv(GL_VIEWPORT, iViewport);
	
    // How big is the image going to be (targas are tightly packed)
    lImageSize = iViewport[2] * 3 * iViewport[3];	
	
    // Allocate block. If this doesn't work, go home
    pBits = (GLbyte *)malloc(lImageSize);
    if(pBits == NULL)
        return 0;
	
    // Read bits from color buffer
    glPixelStorei(GL_PACK_ALIGNMENT, 1);
    glPixelStorei(GL_PACK_ROW_LENGTH, 0);
    glPixelStorei(GL_PACK_SKIP_ROWS, 0);
    glPixelStorei(GL_PACK_SKIP_PIXELS, 0);
    
    // Get the current read buffer setting and save it. Switch to
    // the front buffer and do the read operation. Finally, restore
    // the read buffer state
    glGetIntegerv(GL_READ_BUFFER, (GLint *)&lastBuffer);
    glReadBuffer(GL_FRONT);
    glReadPixels(0, 0, iViewport[2], iViewport[3], GL_BGR_EXT, GL_UNSIGNED_BYTE, pBits);
    glReadBuffer(lastBuffer);
    
    // Initialize the Targa header
    tgaHeader.identsize = 0;
    tgaHeader.colorMapType = 0;
    tgaHeader.imageType = 2;
    tgaHeader.colorMapStart = 0;
    tgaHeader.colorMapLength = 0;
    tgaHeader.colorMapBits = 0;
    tgaHeader.xstart = 0;
    tgaHeader.ystart = 0;
    tgaHeader.width = iViewport[2];
    tgaHeader.height = iViewport[3];
    tgaHeader.bits = 24;
    tgaHeader.descriptor = 0;
    
    // Do byte swap for big vs little endian
#ifdef __APPLE__
    LITTLE_ENDIAN_WORD(&tgaHeader.colorMapStart);
    LITTLE_ENDIAN_WORD(&tgaHeader.colorMapLength);
    LITTLE_ENDIAN_WORD(&tgaHeader.xstart);
    LITTLE_ENDIAN_WORD(&tgaHeader.ystart);
    LITTLE_ENDIAN_WORD(&tgaHeader.width);
    LITTLE_ENDIAN_WORD(&tgaHeader.height);
#endif
    
    // Attempt to open the file
    pFile = fopen(szFileName, "wb");
    if(pFile == NULL)
    {
        free(pBits);    // Free buffer and return error
        return 0;
    }
	
    // Write the header
    fwrite(&tgaHeader, sizeof(TGAHEADER), 1, pFile);
    
    // Write the image data
    fwrite(pBits, lImageSize, 1, pFile);
	
    // Free temporary buffer and close the file
    free(pBits);    
    fclose(pFile);
    
    // Success!
    return 1;
}

#include <jpeglib.h>
// Based on GrabScreenTGA
int VideoEncode::GrabScreenJPG(const char *szFileName,int quality)
{
    FILE *pFile;                // File pointer
    unsigned long lImageSize;   // Size in bytes of image
    GLbyte	*pBits = NULL;      // Pointer to bits
    GLint iViewport[4];         // Viewport in pixels
    GLenum lastBuffer;          // Storage for the current read buffer setting
    
    // Get the viewport dimensions
    glGetIntegerv(GL_VIEWPORT, iViewport);
	
    // How big is the image going to be (targas are tightly packed)
    lImageSize = iViewport[2] * 3 * iViewport[3];	
	
    // Allocate block. If this doesn't work, go home
    pBits = (GLbyte *)malloc(lImageSize);
    if(pBits == NULL)
        return 0;
	
    // Read bits from color buffer
    glPixelStorei(GL_PACK_ALIGNMENT, 1);
    glPixelStorei(GL_PACK_ROW_LENGTH, 0);
    glPixelStorei(GL_PACK_SKIP_ROWS, 0);
    glPixelStorei(GL_PACK_SKIP_PIXELS, 0);
    
    // Get the current read buffer setting and save it. Switch to
    // the front buffer and do the read operation. Finally, restore
    // the read buffer state
    glGetIntegerv(GL_READ_BUFFER, (GLint *)&lastBuffer);
    glReadBuffer(GL_FRONT);
    glReadPixels(0, 0, iViewport[2], iViewport[3], GL_BGR_EXT, GL_UNSIGNED_BYTE, pBits);
    glReadBuffer(lastBuffer);
    

    // Attempt to open the file
    FILE *outfile = fopen(szFileName, "wb");
    if(outfile == NULL)
    {
      free(pBits);    // Free buffer and return error
      return 0;
    }
    
    struct jpeg_compress_struct cinfo;
    struct jpeg_error_mgr jerr;

    JSAMPROW scanline[1];

    cinfo.err = jpeg_std_error(&jerr);
    jpeg_create_compress(&cinfo);
    jpeg_stdio_dest(&cinfo, outfile);

    int width=iViewport[2];
    int height=iViewport[3];
    
    cinfo.image_width = width;
    cinfo.image_height = height;
    cinfo.input_components = 3;
    cinfo.in_color_space = JCS_RGB;

    jpeg_set_defaults(&cinfo);
    jpeg_set_quality(&cinfo, quality, TRUE);
    jpeg_start_compress(&cinfo, TRUE);

    typedef unsigned char ubyte;
    ubyte t;
    while (cinfo.next_scanline < (unsigned int) height) 
    {
        scanline[0] = ((JSAMPLE*)pBits) + 3 * width * (height-(cinfo.next_scanline+1));
	
	// the image is in BGR format, we'll switch it to RGB
	ubyte *pB = (ubyte *)scanline[0];
	for(int xx=0; xx<width; ++xx) {
	  t=*(pB+3*xx); 		//save blue
	  *(pB+3*xx)=*(pB+2+3*xx);	//blue gets written by red
	  *(pB+2+3*xx) = t; 		//red is written by blue
	}
        jpeg_write_scanlines(&cinfo, scanline, 1);
    }
    jpeg_finish_compress(&cinfo);
    jpeg_destroy_compress(&cinfo);

    free(pBits);    
    fclose(outfile);    
    // Success!
    return 1;
}

