/*
 *  video_recorder.c
 *  libCV
 *
 *  Created by Gregory Koch on 08/03/11.
 *  Copyright 2011 IFlyBotIV. All rights reserved.
 *
 */

#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>

#include "video_recorder.h"

static int sws_flags = SWS_BICUBIC;

/**************************************************************/
/* video output */

/*AVFrame *picture, *tmp_picture;
 uint8_t *video_outbuf;
 int frame_count, video_outbuf_size;*/

/* add a video output stream */
static AVStream *add_video_stream(video_record_t *video_record, int codec_id, cvSize_t size, int frame_rate)
{
    AVCodecContext *c;
    AVStream *st;
	
    st = av_new_stream(video_record->oc, 0);
    if (!st) {
        fprintf(stderr, "Could not alloc stream\n");
        exit(1);
    }
	
    c = st->codec;
    c->codec_id = codec_id;
    c->codec_type = CODEC_TYPE_VIDEO;
	
    /* put sample parameters */
    c->bit_rate = 400000;
    /* resolution must be a multiple of two */
    c->width = size.width;
    c->height = size.height;
    /* time base: this is the fundamental unit of time (in seconds) in terms
	 of which frame timestamps are represented. for fixed-fps content,
	 timebase should be 1/framerate and timestamp increments should be
	 identically 1. */
    c->time_base.den = frame_rate;
    c->time_base.num = 1;
    c->gop_size = 12; /* emit one intra frame every twelve frames at most */
    c->pix_fmt = STREAM_PIX_FMT;
    if (c->codec_id == CODEC_ID_MPEG2VIDEO) {
        /* just for testing, we also add B frames */
        c->max_b_frames = 2;
    }
    if (c->codec_id == CODEC_ID_MPEG1VIDEO){
        /* Needed to avoid using macroblocks in which some coeffs overflow.
		 This does not happen with normal video, it just happens here as
		 the motion of the chroma plane does not match the luma plane. */
        c->mb_decision=2;
    }
    // some formats want stream headers to be separate
    if(video_record->oc->oformat->flags & AVFMT_GLOBALHEADER)
        c->flags |= CODEC_FLAG_GLOBAL_HEADER;
	
    return st;
}

static AVFrame *alloc_picture(int pix_fmt, int width, int height)
{
    AVFrame *picture;
    uint8_t *picture_buf;
    int size;
	
    picture = avcodec_alloc_frame();
    if (!picture)
        return NULL;
    size = avpicture_get_size(pix_fmt, width, height);
    picture_buf = av_malloc(size);
    if (!picture_buf) {
        av_free(picture);
        return NULL;
    }
    avpicture_fill((AVPicture *)picture, picture_buf,
                   pix_fmt, width, height);
    return picture;
}

static void open_video(video_record_t *video_record)
{
    AVCodec *codec;
    AVCodecContext *c;
	
    c = video_record->video_st->codec;
	
    /* find the video encoder */
    codec = avcodec_find_encoder(c->codec_id);
    if (!codec) {
        fprintf(stderr, "codec not found\n");
        exit(1);
    }
	
    /* open the codec */
    if (avcodec_open(c, codec) < 0) {
        fprintf(stderr, "could not open codec\n");
        exit(1);
    }
	
    video_record->video_outbuf = NULL;
    if (!(video_record->oc->oformat->flags & AVFMT_RAWPICTURE)) {
        /* allocate output buffer */
        /* XXX: API change will be done */
        /* buffers passed into lav* can be allocated any way you prefer,
		 as long as they're aligned enough for the architecture, and
		 they're freed appropriately (such as using av_free for buffers
		 allocated with av_malloc) */
        video_record->video_outbuf_size = 200000;
        video_record->video_outbuf = av_malloc(video_record->video_outbuf_size);
    }
	
    /* allocate the encoded raw picture */
    video_record->picture = alloc_picture(c->pix_fmt, c->width, c->height);
    if (!video_record->picture) {
        fprintf(stderr, "Could not allocate picture\n");
        exit(1);
    }
}

static void write_video_frame(video_record_t *video_record, image_t img)
{
	const unsigned char* srcData[4] = {img.data, NULL, NULL, NULL};
	int line_size[4] = {img.stride, 0, 0, 0};
    int out_size, ret;
    AVCodecContext *c;
    static struct SwsContext *img_convert_ctx;
	
    c = video_record->video_st->codec;
	
	if(img.type == IMG_RGB24)
	{
		/* as we only generate a YUV420P picture, we must convert it
		 to the codec pixel format if needed */
		if (img_convert_ctx == NULL) {
			img_convert_ctx = sws_getContext(img.size.width, img.size.height,
											 PIX_FMT_RGB24,
											 c->width, c->height,
											 c->pix_fmt,
											 sws_flags, NULL, NULL, NULL);
			if (img_convert_ctx == NULL) {
				fprintf(stderr, "Cannot initialize the conversion context\n");
				exit(1);
			}
		}
	}
	else if(img.type == IMG_BGR24)
	{
		/* as we only generate a YUV420P picture, we must convert it
		 to the codec pixel format if needed */
		if (img_convert_ctx == NULL) {
			img_convert_ctx = sws_getContext(img.size.width, img.size.height,
											 PIX_FMT_BGR24,
											 c->width, c->height,
											 c->pix_fmt,
											 sws_flags, NULL, NULL, NULL);
			if (img_convert_ctx == NULL) {
				fprintf(stderr, "Cannot initialize the conversion context\n");
				exit(1);
			}
		}
	}
	else if(img.type == IMG_GREYSCALE8)
	{
		/* as we only generate a YUV420P picture, we must convert it
		 to the codec pixel format if needed */
		if (img_convert_ctx == NULL) {
			img_convert_ctx = sws_getContext(img.size.width, img.size.height,
											 PIX_FMT_GRAY8,
											 c->width, c->height,
											 c->pix_fmt,
											 sws_flags, NULL, NULL, NULL);
			if (img_convert_ctx == NULL) {
				fprintf(stderr, "Cannot initialize the conversion context\n");
				exit(1);
			}
		}
	}
	else
	{
		fprintf(stderr, "Cannot initialize the conversion context\n");
		exit(1);	
	}
	
	sws_scale(img_convert_ctx, srcData, line_size,
              0, c->height, video_record->picture->data, video_record->picture->linesize);
	
	
    if (video_record->oc->oformat->flags & AVFMT_RAWPICTURE) {
        /* raw video case. The API will change slightly in the near
		 futur for that */
        AVPacket pkt;
        av_init_packet(&pkt);
		
        pkt.flags |= PKT_FLAG_KEY;
        pkt.stream_index= video_record->video_st->index;
        pkt.data= (uint8_t *)video_record->picture;
        pkt.size= sizeof(AVPicture);
		
        ret = av_interleaved_write_frame(video_record->oc, &pkt);
    } else {
        /* encode the image */
        out_size = avcodec_encode_video(c, video_record->video_outbuf, 
										video_record->video_outbuf_size, video_record->picture);
        /* if zero size, it means the image was buffered */
        if (out_size > 0) {
            AVPacket pkt;
            av_init_packet(&pkt);
			
            if (c->coded_frame->pts != AV_NOPTS_VALUE)
                pkt.pts= av_rescale_q(c->coded_frame->pts, c->time_base, 
									  video_record->video_st->time_base);
            if(c->coded_frame->key_frame)
                pkt.flags |= PKT_FLAG_KEY;
            pkt.stream_index= video_record->video_st->index;
            pkt.data= video_record->video_outbuf;
            pkt.size= out_size;
			
            /* write the compressed frame in the media file */
            ret = av_interleaved_write_frame(video_record->oc, &pkt);
        } else {
            ret = 0;
        }
    }
    if (ret != 0) {
        fprintf(stderr, "Error while writing video frame\n");
        exit(1);
    }
}

static void close_video(video_record_t *video_record)
{
    avcodec_close(video_record->video_st->codec);
    av_free(video_record->picture->data[0]);
    av_free(video_record->picture);
    av_free(video_record->video_outbuf);
}



video_record_t *open_video_record(char *filename, cvSize_t size, int frame_rate)
{
	video_record_t *video_record;
	AVOutputFormat *fmt;
    
	
	video_record = malloc(sizeof(video_record_t));
	if(!video_record)
	{
		perror("libCV : malloc error");
		exit(1);
	}
	
	/* initialize libavcodec, and register all codecs and formats */
    av_register_all();
	
	/* auto detect the output format from the name. default is
	 mpeg. */
    fmt = av_guess_format(NULL, filename, NULL);
    if (!fmt) {
        printf("Could not deduce output format from file extension: using MPEG.\n");
        fmt = av_guess_format("mpeg", NULL, NULL);
    }
    if (!fmt) {
        fprintf(stderr, "Could not find suitable output format\n");
        exit(1);
    }
	
    /* allocate the output media context */
    video_record->oc = avformat_alloc_context();
    if (!video_record->oc) {
        fprintf(stderr, "Memory error\n");
        exit(1);
    }
    video_record->oc->oformat = fmt;
    snprintf(video_record->oc->filename, sizeof(video_record->oc->filename), "%s", filename);
	
	/* add the audio and video streams using the default format codecs
	 and initialize the codecs */
    video_record->video_st = NULL;
    if (fmt->video_codec != CODEC_ID_NONE) {
        video_record->video_st = add_video_stream(video_record, fmt->video_codec, size, frame_rate);
    }
	
    /* set the output parameters (must be done even if no
	 parameters). */
    if (av_set_parameters(video_record->oc, NULL) < 0) {
        fprintf(stderr, "Invalid output format parameters\n");
        exit(1);
    }
	
    dump_format(video_record->oc, 0, filename, 1);
	
    /* now that all the parameters are set, we can open the audio and
	 video codecs and allocate the necessary encode buffers */
    if (video_record->video_st)
        open_video(video_record);
	
    /* open the output file, if needed */
    if (!(fmt->flags & AVFMT_NOFILE)) {
        if (url_fopen(&video_record->oc->pb, filename, URL_WRONLY) < 0) {
            fprintf(stderr, "Could not open '%s'\n", filename);
            exit(1);
        }
    }
	
	/* write the stream header, if any */
    av_write_header(video_record->oc);
	
	return video_record;
}

void add_frame2video_record(video_record_t *video_record, image_t img)
{	
	/* write interleaved audio and video frames */
	write_video_frame(video_record, img);
}

void close_video_record(video_record_t *video_record)
{
	unsigned int i;
    
	/* write the trailer, if any.  the trailer must be written
     * before you close the CodecContexts open when you wrote the
     * header; otherwise write_trailer may try to use memory that
     * was freed on av_codec_close() */
    av_write_trailer(video_record->oc);
	
    /* close each codec */
    if (video_record->video_st)
        close_video(video_record);
	
    /* free the streams */
    for(i = 0; i < video_record->oc->nb_streams; i++) {
        av_freep(&video_record->oc->streams[i]->codec);
        av_freep(&video_record->oc->streams[i]);
    }
	
	/* close the output file */
	url_fclose(video_record->oc->pb);
	
    /* free the stream */
    av_free(video_record->oc);
	
	free(video_record);
}

