/*
 * ***** BEGIN GPL LICENSE BLOCK *****
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; either version 2
 * of the License, or (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software Foundation,
 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
 *
 * Contributor(s):
 *
 * Partial Copyright (c) 2006 Peter Schlaile
 *
 * ***** END GPL LICENSE BLOCK *****
 */

/** \file blender/blenkernel/intern/writeffmpeg.c
 *  \ingroup bke
 */

#ifdef WITH_FFMPEG
#include <string.h>
#include <stdio.h>

#include <stdlib.h>

#include <libavformat/avformat.h>
#include <libavcodec/avcodec.h>
#include <libavutil/channel_layout.h>
#include <libavutil/imgutils.h>
#include <libavutil/opt.h>
#include <libavutil/rational.h>
#include <libavutil/samplefmt.h>
#include <libswscale/swscale.h>

#include "MEM_guardedalloc.h"

#include "DNA_scene_types.h"

#include "BLI_blenlib.h"

#ifdef WITH_AUDASPACE
#  include AUD_DEVICE_H
#  include AUD_SPECIAL_H
#endif

#include "BLI_utildefines.h"
#include "BLI_threads.h"

#include "BKE_global.h"
#include "BKE_idprop.h"
#include "BKE_main.h"
#include "BKE_report.h"
#include "BKE_sound.h"
#include "BKE_writeffmpeg.h"

#include "IMB_imbuf.h"

#include "ffmpeg_compat.h"

typedef struct FFMpegContext {
	int ffmpeg_type;
	int ffmpeg_codec;
	int ffmpeg_audio_codec;
	int ffmpeg_video_bitrate;
	int ffmpeg_audio_bitrate;
	int ffmpeg_gop_size;
	int ffmpeg_max_b_frames;
	int ffmpeg_autosplit;
	int ffmpeg_autosplit_count;
	bool ffmpeg_preview;

	int ffmpeg_crf;  /* set to 0 to not use CRF mode; we have another flag for lossless anyway. */
	int ffmpeg_preset; /* see FFMpegPreset */

	AVFormatContext *outfile;
	AVCodecContext *video_codec;
	AVCodecContext *audio_codec;
	AVStream *video_stream;
	AVStream *audio_stream;
	AVFrame *current_frame; /* Image frame in output pixel format. */

	/* Image frame in Blender's own pixel format, may need conversion to the output pixel format. */
	AVFrame *img_convert_frame;
	struct SwsContext *img_convert_ctx;

	uint8_t *audio_input_buffer;
	uint8_t *audio_deinterleave_buffer;
	int audio_input_samples;
	double audio_time;
	bool audio_deinterleave;
	int audio_sample_size;

#ifdef WITH_AUDASPACE
	AUD_Device *audio_mixdown_device;
#endif
} FFMpegContext;

#define FFMPEG_AUTOSPLIT_SIZE 2000000000

#define PRINT if (G.debug & G_DEBUG_FFMPEG) printf

static void ffmpeg_dict_set_int(AVDictionary **dict, const char *key, int value);
static void ffmpeg_filepath_get(FFMpegContext *context, char *string, struct RenderData *rd, bool preview, const char *suffix);

/* Delete a picture buffer */

static void delete_picture(AVFrame *f)
{
	if (f) {
		if (f->data[0]) MEM_freeN(f->data[0]);
		av_free(f);
	}
}

static int request_float_audio_buffer(int codec_id)
{
	/* If any of these codecs, we prefer the float sample format (if supported) */
	return codec_id == AV_CODEC_ID_AAC || codec_id == AV_CODEC_ID_AC3 || codec_id == AV_CODEC_ID_VORBIS;
}

#ifdef WITH_AUDASPACE

static int write_audio_frame(FFMpegContext *context)
{
	AVFrame *frame = NULL;
	AVCodecContext *c = context->audio_codec;

	AUD_Device_read(context->audio_mixdown_device, context->audio_input_buffer, context->audio_input_samples);
	context->audio_time += (double) context->audio_input_samples / (double) c->sample_rate;

	frame = av_frame_alloc();
	frame->pts = context->audio_time / av_q2d(c->time_base);
	frame->nb_samples = context->audio_input_samples;
	frame->format = c->sample_fmt;
	frame->channel_layout = c->channel_layout;

	if (context->audio_deinterleave) {
		int channel, i;
		uint8_t *temp;

		for (channel = 0; channel < c->channels; channel++) {
			for (i = 0; i < frame->nb_samples; i++) {
				memcpy(context->audio_deinterleave_buffer + (i + channel * frame->nb_samples) * context->audio_sample_size,
					   context->audio_input_buffer + (c->channels * i + channel) * context->audio_sample_size, context->audio_sample_size);
			}
		}

		temp = context->audio_deinterleave_buffer;
		context->audio_deinterleave_buffer = context->audio_input_buffer;
		context->audio_input_buffer = temp;
	}

	avcodec_fill_audio_frame(frame, c->channels, c->sample_fmt, context->audio_input_buffer,
	                         context->audio_input_samples * c->channels * context->audio_sample_size, 1);

	int success = 0;

	int ret = avcodec_send_frame(c, frame);
	if (ret < 0) {
		/* Can't send frame to encoder. This shouldn't happen. */
		fprintf(stderr, "Can't send audio frame: %s\n", av_err2str(ret));
		success = -1;
	}

	AVPacket *pkt = av_packet_alloc();

	while (ret >= 0) {

		ret = avcodec_receive_packet(c, pkt);
		if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
			break;
		}
		if (ret < 0) {
			fprintf(stderr, "Error encoding audio frame: %s\n", av_err2str(ret));
			success = -1;
		}

		av_packet_rescale_ts(pkt, c->time_base, context->audio_stream->time_base);
		if (pkt->duration > 0) {
			pkt->duration = av_rescale_q(pkt->duration, c->time_base, context->audio_stream->time_base);
		}

		pkt->stream_index = context->audio_stream->index;

		pkt->flags |= AV_PKT_FLAG_KEY;

		int write_ret = av_interleaved_write_frame(context->outfile, pkt);
		if (write_ret != 0) {
			fprintf(stderr, "Error writing audio packet: %s\n", av_err2str(write_ret));
			success = -1;
			break;
		}
	}

	av_packet_free(&pkt);
	av_frame_free(&frame);

	return success;
}
#endif // #ifdef WITH_AUDASPACE

/* Allocate a temporary frame */
static AVFrame *alloc_picture(int pix_fmt, int width, int height)
{
	AVFrame *f;
	uint8_t *buf;
	int size;
	
	/* allocate space for the struct */
	f = av_frame_alloc();
	if (!f) {
		return NULL;
	}
	size = av_image_get_buffer_size(pix_fmt, width, height, 1);
	/* allocate the actual picture buffer */
	buf = MEM_mallocN(size, "AVFrame buffer");
	if (!buf) {
		free(f);
		return NULL;
	}

	av_image_fill_arrays(f->data, f->linesize, buf, pix_fmt, width, height, 1);
	f->format = pix_fmt;
	f->width = width;
	f->height = height;

	return f;
}

/* Get the correct file extensions for the requested format,
 * first is always desired guess_format parameter */
static const char **get_file_extensions(int format)
{
	switch (format) {
		case FFMPEG_DV:
		{
			static const char *rv[] = { ".dv", NULL };
			return rv;
		}
		case FFMPEG_MPEG1:
		{
			static const char *rv[] = { ".mpg", ".mpeg", NULL };
			return rv;
		}
		case FFMPEG_MPEG2:
		{
			static const char *rv[] = { ".dvd", ".vob", ".mpg", ".mpeg", NULL };
			return rv;
		}
		case FFMPEG_MPEG4:
		{
			static const char *rv[] = { ".mp4", ".mpg", ".mpeg", NULL };
			return rv;
		}
		case FFMPEG_AVI:
		{
			static const char *rv[] = { ".avi", NULL };
			return rv;
		}
		case FFMPEG_MOV:
		{
			static const char *rv[] = { ".mov", NULL };
			return rv;
		}
		case FFMPEG_H264:
		{
			/* FIXME: avi for now... */
			static const char *rv[] = { ".avi", NULL };
			return rv;
		}

		case FFMPEG_XVID:
		{
			/* FIXME: avi for now... */
			static const char *rv[] = { ".avi", NULL };
			return rv;
		}
		case FFMPEG_FLV:
		{
			static const char *rv[] = { ".flv", NULL };
			return rv;
		}
		case FFMPEG_MKV:
		{
			static const char *rv[] = { ".mkv", NULL };
			return rv;
		}
		case FFMPEG_OGG:
		{
			static const char *rv[] = { ".ogv", ".ogg", NULL };
			return rv;
		}
		default:
			return NULL;
	}
}

/* Write a frame to the output file */
static int write_video_frame(FFMpegContext *context, int cfra, AVFrame *frame, ReportList *reports)
{
	int ret, success = 1;
	AVPacket *packet = av_packet_alloc();

	AVCodecContext *c = context->video_codec;

	frame->pts = cfra;

	ret = avcodec_send_frame(c, frame);
	if (ret < 0) {
		/* Can't send frame to encoder. This shouldn't happen. */
		fprintf(stderr, "Can't send video frame: %s\n", av_err2str(ret));
		success = -1;
	}

	while (ret >= 0) {
		ret = avcodec_receive_packet(c, packet);

		if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
			/* No more packets available. */
			break;
		}
		if (ret < 0) {
			fprintf(stderr, "Error encoding frame: %s\n", av_err2str(ret));
			break;
		}

		packet->stream_index = context->video_stream->index;
		av_packet_rescale_ts(packet, c->time_base, context->video_stream->time_base);
		if (av_interleaved_write_frame(context->outfile, packet) != 0) {
			success = -1;
			break;
		}
	}

	if (!success) {
		PRINT("Error writing frame: %s\n", av_err2str(ret));
	}

	av_packet_free(&packet);

	return success;
}

/* read and encode a frame of audio from the buffer */
static AVFrame *generate_video_frame(FFMpegContext *context, const uint8_t *pixels)
{
	AVCodecParameters *codec = context->video_stream->codecpar;
	int height = codec->height;
	AVFrame *rgb_frame;

	if (context->img_convert_frame != NULL) {
		/* Pixel format conversion is needed. */
		rgb_frame = context->img_convert_frame;
	}
	else {
		/* The output pixel format is Blender's internal pixel format. */
		rgb_frame = context->current_frame;
	}

	/* Copy the Blender pixels into the FFmpeg datastructure, taking care of endianness and flipping
	 * the image vertically. */
	int linesize = rgb_frame->linesize[0];
	for (int y = 0; y < height; y++) {
		uint8_t *target = rgb_frame->data[0] + linesize * (height - y - 1);
		const uint8_t *src = pixels + linesize * y;

#  if ENDIAN_ORDER == L_ENDIAN
		memcpy(target, src, linesize);

#  elif ENDIAN_ORDER == B_ENDIAN
		const uint8_t *end = src + linesize;
		while (src != end) {
			target[3] = src[0];
			target[2] = src[1];
			target[1] = src[2];
			target[0] = src[3];

			target += 4;
			src += 4;
		}
#  else
#    error ENDIAN_ORDER should either be L_ENDIAN or B_ENDIAN.
#  endif
	}

	/* Convert to the output pixel format, if it's different that Blender's internal one. */
	if (context->img_convert_frame != NULL) {
		BLI_assert(context->img_convert_ctx != NULL);
		sws_scale(context->img_convert_ctx,
							(const uint8_t *const *)rgb_frame->data,
							rgb_frame->linesize,
							0,
							codec->height,
							context->current_frame->data,
							context->current_frame->linesize);
	}

	return context->current_frame;
}

/* prepare a video stream for the output file */

static AVStream *alloc_video_stream(FFMpegContext *context, RenderData *rd, int codec_id, AVFormatContext *of,
                                    int rectx, int recty, char *error, int error_size)
{
	AVStream *st;
	AVCodec *codec;
	AVDictionary *opts = NULL;

	error[0] = '\0';

	st = avformat_new_stream(of, NULL);
	if (!st) return NULL;
	st->id = 0;

	/* Set up the codec context */

	context->video_codec = avcodec_alloc_context3(NULL);
	AVCodecContext *c = context->video_codec;
	c->codec_id = codec_id;
	c->codec_type = AVMEDIA_TYPE_VIDEO;

	/* Get some values from the current render settings */

	c->width = rectx;
	c->height = recty;

	/* FIXME: Really bad hack (tm) for NTSC support */
	if (context->ffmpeg_type == FFMPEG_DV && rd->frs_sec != 25) {
		c->time_base.den = 2997;
		c->time_base.num = 100;
	}
	else if ((float) ((int) rd->frs_sec_base) == rd->frs_sec_base) {
		c->time_base.den = rd->frs_sec;
		c->time_base.num = (int) rd->frs_sec_base;
	}
	else {
		c->time_base.den = rd->frs_sec * 100000;
		c->time_base.num = ((double) rd->frs_sec_base) * 100000;
	}
	
	c->gop_size = context->ffmpeg_gop_size;
	c->max_b_frames = context->ffmpeg_max_b_frames;

	if (context->ffmpeg_crf >= 0) {
		ffmpeg_dict_set_int(&opts, "crf", context->ffmpeg_crf);
	}
	else {
		c->bit_rate = context->ffmpeg_video_bitrate * 1000;
		c->rc_max_rate = rd->ffcodecdata.rc_max_rate * 1000;
		c->rc_min_rate = rd->ffcodecdata.rc_min_rate * 1000;
		c->rc_buffer_size = rd->ffcodecdata.rc_buffer_size * 1024;
	}

	if (context->ffmpeg_preset) {
		char const *preset_name;
		switch (context->ffmpeg_preset) {
			case FFM_PRESET_ULTRAFAST: preset_name = "ultrafast"; break;
			case FFM_PRESET_SUPERFAST: preset_name = "superfast"; break;
			case FFM_PRESET_VERYFAST: preset_name = "veryfast"; break;
			case FFM_PRESET_FASTER: preset_name = "faster"; break;
			case FFM_PRESET_FAST: preset_name = "fast"; break;
			case FFM_PRESET_MEDIUM: preset_name = "medium"; break;
			case FFM_PRESET_SLOW: preset_name = "slow"; break;
			case FFM_PRESET_SLOWER: preset_name = "slower"; break;
			case FFM_PRESET_VERYSLOW: preset_name = "veryslow"; break;
			default:
				printf("Unknown preset number %i, ignoring.\n", context->ffmpeg_preset);
				preset_name = NULL;
		}
		if (preset_name != NULL) {
			av_dict_set(&opts, "preset", preset_name, 0);
		}
	}

#if 0
	/* this options are not set in ffmpeg.c and leads to artifacts with MPEG-4
	 * see #33586: Encoding to mpeg4 makes first frame(s) blocky
	 */
	c->rc_initial_buffer_occupancy = rd->ffcodecdata.rc_buffer_size * 3 / 4;
	c->rc_buffer_aggressivity = 1.0;
#endif

	codec = avcodec_find_encoder(c->codec_id);
	if (!codec) {
		avcodec_free_context(&c);
		return NULL;
	}

	/* Be sure to use the correct pixel format(e.g. RGB, YUV) */

	if (codec->pix_fmts) {
		c->pix_fmt = codec->pix_fmts[0];
	}
	else {
		/* makes HuffYUV happy ... */
		c->pix_fmt = AV_PIX_FMT_YUV422P;
	}

	if (context->ffmpeg_type == FFMPEG_XVID) {
		/* arghhhh ... */
		c->pix_fmt = AV_PIX_FMT_YUV420P;
		c->codec_tag = (('D' << 24) + ('I' << 16) + ('V' << 8) + 'X');
	}

	if (codec_id == AV_CODEC_ID_H264) {
		/* correct wrong default ffmpeg param which crash x264 */
		c->qmin = 10;
		c->qmax = 51;
	}
	
	/* Keep lossless encodes in the RGB domain. */
	if (codec_id == AV_CODEC_ID_HUFFYUV) {
		if (rd->im_format.planes == R_IMF_PLANES_RGBA) {
			c->pix_fmt = AV_PIX_FMT_BGRA;
		}
		else {
			c->pix_fmt = AV_PIX_FMT_RGB32;
		}
	}

	if (codec_id == AV_CODEC_ID_DNXHD) {
		if (rd->ffcodecdata.flags & FFMPEG_LOSSLESS_OUTPUT) {
			/* Set the block decision algorithm to be of the highest quality ("rd" == 2). */
			c->mb_decision = 2;
		}
	}

	if (codec_id == AV_CODEC_ID_FFV1) {
		c->pix_fmt = AV_PIX_FMT_RGB32;
	}

	if (codec_id == AV_CODEC_ID_QTRLE) {
		if (rd->im_format.planes == R_IMF_PLANES_RGBA) {
			c->pix_fmt = AV_PIX_FMT_ARGB;
		}
	}

	if (codec_id == AV_CODEC_ID_PNG) {
		if (rd->im_format.planes == R_IMF_PLANES_RGBA) {
			c->pix_fmt = AV_PIX_FMT_RGBA;
		}
	}

	if ((of->oformat->flags & AVFMT_GLOBALHEADER)
#if 0
	    || STREQ(of->oformat->name, "mp4")
	    || STREQ(of->oformat->name, "mov")
	    || STREQ(of->oformat->name, "3gp")
#endif
	    )
	{
		PRINT("Using global header\n");
		c->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
	}
	
	/* Determine whether we are encoding interlaced material or not */
	if (rd->mode & R_FIELDS) {
		PRINT("Encoding interlaced video\n");
		c->flags |= AV_CODEC_FLAG_INTERLACED_DCT;
		c->flags |= AV_CODEC_FLAG_INTERLACED_ME;
	}

	/* xasp & yasp got float lately... */

	st->sample_aspect_ratio = c->sample_aspect_ratio = av_d2q(((double) rd->xasp / (double) rd->yasp), 255);
	st->avg_frame_rate = av_inv_q(c->time_base);

	if (avcodec_open2(c, codec, &opts) < 0) {
		BLI_strncpy(error, IMB_ffmpeg_last_error(), error_size);
		av_dict_free(&opts);
		avcodec_free_context(&c);
		return NULL;
	}
	av_dict_free(&opts);

	/* FFmpeg expects its data in the output pixel format. */ 
	context->current_frame = alloc_picture(c->pix_fmt, c->width, c->height);

	if (c->pix_fmt == AV_PIX_FMT_RGBA) {
		/* Output pixel format is the same we use internally, no conversion necessary. */
		context->img_convert_frame = NULL;
		context->img_convert_ctx = NULL;
	}
	else {
		/* Output pixel format is different, allocate frame for conversion. */
		context->img_convert_frame = alloc_picture(AV_PIX_FMT_RGBA, c->width, c->height);
		context->img_convert_ctx = sws_getContext(c->width,
												  c->height,
												  AV_PIX_FMT_RGBA,
												  c->width,
												  c->height,
												  c->pix_fmt,
												  SWS_BICUBIC,
												  NULL,
												  NULL,
												  NULL);
	}

	avcodec_parameters_from_context(st->codecpar, c);

	return st;
}

static AVStream *alloc_audio_stream(FFMpegContext *context, RenderData *rd, int codec_id, AVFormatContext *of, char *error, int error_size)
{
	AVStream *st;
	const AVCodec *codec;

	error[0] = '\0';

	st = avformat_new_stream(of, NULL);
	if (!st) return NULL;
	st->id = 1;

	codec = avcodec_find_encoder(codec_id);
	if (!codec) {
		fprintf(stderr, "Couldn't find valid audio codec\n");
		context->audio_codec = NULL;
		return NULL;
	}

	context->audio_codec = avcodec_alloc_context3(codec);
	AVCodecContext *c = context->audio_codec;
	c->thread_count = BLI_system_thread_count();
	c->thread_type = FF_THREAD_SLICE;

	c->sample_rate = rd->ffcodecdata.audio_mixrate;
	c->bit_rate = context->ffmpeg_audio_bitrate * 1000;
	c->sample_fmt = AV_SAMPLE_FMT_S16;
	c->channels = rd->ffcodecdata.audio_channels;

	switch (rd->ffcodecdata.audio_channels) {
		case AUD_CHANNELS_MONO:
			c->channel_layout = AV_CH_LAYOUT_MONO;
			break;
		case AUD_CHANNELS_STEREO:
			c->channel_layout = AV_CH_LAYOUT_STEREO;
			break;
		case AUD_CHANNELS_SURROUND4:
			c->channel_layout = AV_CH_LAYOUT_QUAD;
			break;
		case AUD_CHANNELS_SURROUND51:
			c->channel_layout = AV_CH_LAYOUT_5POINT1_BACK;
			break;
		case AUD_CHANNELS_SURROUND71:
			c->channel_layout = AV_CH_LAYOUT_7POINT1;
			break;
	}

	if (request_float_audio_buffer(codec_id)) {
		/* mainly for AAC codec which is experimental */
		c->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL;
		c->sample_fmt = AV_SAMPLE_FMT_FLT;
	}

	if (codec->sample_fmts) {
		/* check if the preferred sample format for this codec is supported.
		 * this is because, depending on the version of libav, and with the whole ffmpeg/libav fork situation,
		 * you have various implementations around. float samples in particular are not always supported.
		 */
		const enum AVSampleFormat *p = codec->sample_fmts;
		for (; *p != -1; p++) {
			if (*p == c->sample_fmt) {
				break;
			}
		}
		if (*p == -1) {
			/* sample format incompatible with codec. Defaulting to a format known to work */
			c->sample_fmt = codec->sample_fmts[0];
		}
	}

	if (codec->supported_samplerates) {
		const int *p = codec->supported_samplerates;
		int best = 0;
		int best_dist = INT_MAX;
		for (; *p; p++) {
			int dist = abs(c->sample_rate - *p);
			if (dist < best_dist) {
				best_dist = dist;
				best = *p;
			}
		}
		/* best is the closest supported sample rate (same as selected if best_dist == 0) */
		c->sample_rate = best;
	}

	if (of->oformat->flags & AVFMT_GLOBALHEADER) {
		c->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
	}

	if (avcodec_open2(c, codec, NULL) < 0) {
		//XXX error("Couldn't initialize audio codec");
		BLI_strncpy(error, IMB_ffmpeg_last_error(), error_size);
		avcodec_free_context(&c);
		context->audio_codec = NULL;
		return NULL;
	}

	/* need to prevent floating point exception when using vorbis audio codec,
	 * initialize this value in the same way as it's done in FFmpeg itself (sergey) */
	c->time_base.num = 1;
	c->time_base.den = c->sample_rate;

	if (c->frame_size == 0)
		// used to be if ((c->codec_id >= CODEC_ID_PCM_S16LE) && (c->codec_id <= CODEC_ID_PCM_DVD))
		// not sure if that is needed anymore, so let's try out if there are any
		// complaints regarding some ffmpeg versions users might have
		context->audio_input_samples = AV_INPUT_BUFFER_MIN_SIZE * 8 / c->bits_per_coded_sample / c->channels;
	else {
		context->audio_input_samples = c->frame_size;
	}

	context->audio_deinterleave = av_sample_fmt_is_planar(c->sample_fmt);

	context->audio_sample_size = av_get_bytes_per_sample(c->sample_fmt);

	context->audio_input_buffer = (uint8_t *) av_malloc(context->audio_input_samples * c->channels * context->audio_sample_size);

	if (context->audio_deinterleave)
		context->audio_deinterleave_buffer = (uint8_t *) av_malloc(context->audio_input_samples * c->channels * context->audio_sample_size);

	context->audio_time = 0.0f;

	avcodec_parameters_from_context(st->codecpar, c);

	return st;
}
/* essential functions -- start, append, end */

static void ffmpeg_dict_set_int(AVDictionary **dict, const char *key, int value)
{
	char buffer[32];

	BLI_snprintf(buffer, sizeof(buffer), "%d", value);

	av_dict_set(dict, key, buffer, 0);
}

static int start_ffmpeg_impl(FFMpegContext *context, struct RenderData *rd, int rectx, int recty, const char *suffix, ReportList *reports)
{
	/* Handle to the output file */
	AVFormatContext *of;
	const AVOutputFormat *fmt;
	char name[FILE_MAX], error[1024];
	const char **exts;

	context->ffmpeg_type = rd->ffcodecdata.type;
	context->ffmpeg_codec = rd->ffcodecdata.codec;
	context->ffmpeg_audio_codec = rd->ffcodecdata.audio_codec;
	context->ffmpeg_video_bitrate = rd->ffcodecdata.video_bitrate;
	context->ffmpeg_audio_bitrate = rd->ffcodecdata.audio_bitrate;
	context->ffmpeg_gop_size = rd->ffcodecdata.gop_size;
	context->ffmpeg_autosplit = rd->ffcodecdata.flags & FFMPEG_AUTOSPLIT_OUTPUT;
	context->ffmpeg_crf = rd->ffcodecdata.constant_rate_factor;
	context->ffmpeg_preset = rd->ffcodecdata.ffmpeg_preset;

	if ((rd->ffcodecdata.flags & FFMPEG_USE_MAX_B_FRAMES) != 0) {
		context->ffmpeg_max_b_frames = rd->ffcodecdata.max_b_frames;
	}

	/* Determine the correct filename */
	ffmpeg_filepath_get(context, name, rd, context->ffmpeg_preview, suffix);
	PRINT("Starting output to %s(ffmpeg)...\n"
	        "  Using type=%d, codec=%d, audio_codec=%d,\n"
	        "  video_bitrate=%d, audio_bitrate=%d,\n"
	        "  gop_size=%d, autosplit=%d\n"
	        "  render width=%d, render height=%d\n",
	        name, context->ffmpeg_type, context->ffmpeg_codec, context->ffmpeg_audio_codec,
	        context->ffmpeg_video_bitrate, context->ffmpeg_audio_bitrate,
	        context->ffmpeg_gop_size, context->ffmpeg_autosplit, rectx, recty);

	/* Sanity checks for the output file extensions. */
	exts = get_file_extensions(context->ffmpeg_type);
	if (!exts) {
		BKE_report(reports, RPT_ERROR, "No valid formats found");
		return 0;
	}

	fmt = av_guess_format(NULL, exts[0], NULL);
	if (!fmt) {
		BKE_report(reports, RPT_ERROR, "No valid formats found");
		return 0;
	}

	of = avformat_alloc_context();
	if (!of) {
		BKE_report(reports, RPT_ERROR, "Can't allocate ffmpeg format context");
		return 0;
	}

	enum AVCodecID audio_codec = context->ffmpeg_audio_codec;
	enum AVCodecID video_codec = context->ffmpeg_codec;

	of->url = av_strdup(name);
	/* Check if we need to force change the codec because of file type codec restrictions */
	switch (context->ffmpeg_type) {
		case FFMPEG_OGG:
			video_codec = AV_CODEC_ID_THEORA;
			break;
		case FFMPEG_DV:
			video_codec = AV_CODEC_ID_DVVIDEO;
			break;
		case FFMPEG_MPEG1:
			video_codec = AV_CODEC_ID_MPEG1VIDEO;
			break;
		case FFMPEG_MPEG2:
			video_codec = AV_CODEC_ID_MPEG2VIDEO;
			break;
		case FFMPEG_H264:
			video_codec = AV_CODEC_ID_H264;
			break;
		case FFMPEG_XVID:
			video_codec = AV_CODEC_ID_MPEG4;
			break;
		case FFMPEG_FLV:
			video_codec = AV_CODEC_ID_FLV1;
			break;
		default:
			/* These containers are not restricted to any specific codec types.
			 * Currently we expect these to be .avi, .mov, .mkv, and .mp4.
			 */
			video_codec = context->ffmpeg_codec;
			break;
	}

	/* Returns after this must 'goto fail;' */

	of->oformat = fmt;

	if (video_codec == AV_CODEC_ID_DVVIDEO) {
		if (rectx != 720) {
			BKE_report(reports, RPT_ERROR, "Render width has to be 720 pixels for DV!");
			goto fail;
		}
		if (rd->frs_sec != 25 && recty != 480) {
			BKE_report(reports, RPT_ERROR, "Render height has to be 480 pixels for DV-NTSC!");
			goto fail;
		}
		if (rd->frs_sec == 25 && recty != 576) {
			BKE_report(reports, RPT_ERROR, "Render height has to be 576 pixels for DV-PAL!");
			goto fail;
		}
	}

	if (context->ffmpeg_type == FFMPEG_DV) {
		audio_codec = AV_CODEC_ID_PCM_S16LE;
		if (context->ffmpeg_audio_codec != AV_CODEC_ID_NONE && rd->ffcodecdata.audio_mixrate != 48000 && rd->ffcodecdata.audio_channels != 2) {
			BKE_report(reports, RPT_ERROR, "FFMPEG only supports 48khz / stereo audio for DV!");
			goto fail;
		}
	}
	
	if (video_codec != AV_CODEC_ID_NONE) {
		context->video_stream = alloc_video_stream(context, rd, video_codec, of, rectx, recty, error, sizeof(error));
		PRINT("alloc video stream %p\n", context->video_stream);
		if (!context->video_stream) {
			if (error[0]) {
				BKE_report(reports, RPT_ERROR, error);
				PRINT("Video stream error: %s\n", error);
			}
			else {
				BKE_report(reports, RPT_ERROR, "Error initializing video stream");
				PRINT("Error initializing video stream");
			}
			goto fail;
		}
	}

	if (context->ffmpeg_audio_codec != AV_CODEC_ID_NONE) {
		context->audio_stream = alloc_audio_stream(context, rd, audio_codec, of, error, sizeof(error));
		if (!context->audio_stream) {
			if (error[0]) {
				BKE_report(reports, RPT_ERROR, error);
				PRINT("Audio stream error: %s\n", error);
			}
			else {
				BKE_report(reports, RPT_ERROR, "Error initializing audio stream");
				PRINT("Error initializing audio stream");
			}
			goto fail;
		}
	}
	if (!(fmt->flags & AVFMT_NOFILE)) {
		if (avio_open(&of->pb, name, AVIO_FLAG_WRITE) < 0) {
			BKE_report(reports, RPT_ERROR, "Could not open file for writing");
			PRINT("Could not open file for writing\n");
			goto fail;
		}
	}

	int ret = avformat_write_header(of, NULL);
	if (ret < 0) {
		BKE_report(reports, RPT_ERROR, "Could not initialize streams, probably unsupported codec combination");
		PRINT("Could not write media header: %s\n", av_err2str(ret));
		goto fail;
	}

	context->outfile = of;
	av_dump_format(of, 0, name, 1);

	return 1;


fail:
	if (of->pb) {
		avio_close(of->pb);
	}

	if (context->video_stream) {
		context->video_stream = NULL;
	}

	if (context->audio_stream) {
		context->audio_stream = NULL;
	}

	avformat_free_context(of);
	return 0;
}

/**
 * Writes any delayed frames in the encoder. This function is called before 
 * closing the encoder.
 *
 * <p>
 * Since an encoder may use both past and future frames to predict 
 * inter-frames (H.264 B-frames, for example), it can output the frames 
 * in a different order from the one it was given.
 * For example, when sending frames 1, 2, 3, 4 to the encoder, it may write
 * them in the order 1, 4, 2, 3 - first the two frames used for prediction,
 * and then the bidirectionally-predicted frames. What this means in practice 
 * is that the encoder may not immediately produce one output frame for each 
 * input frame. These delayed frames must be flushed before we close the 
 * stream. We do this by calling avcodec_encode_video with NULL for the last 
 * parameter.
 * </p>
 */
static void flush_ffmpeg(FFMpegContext *context)
{
	AVCodecContext *c = context->video_codec;
	AVPacket *packet = av_packet_alloc();

	avcodec_send_frame(c, NULL);
	/* Get the packets frames. */
	int ret = 1;
	while (ret >= 0) {
		ret = avcodec_receive_packet(c, packet);

		if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
			/* No more packets to flush. */
			break;
		}
		if (ret < 0) {
			fprintf(stderr, "Error encoding delayed frame: %s\n", av_err2str(ret));
			break;
		}

		packet->stream_index = context->video_stream->index;
		av_packet_rescale_ts(packet, c->time_base, context->video_stream->time_base);

		int write_ret = av_interleaved_write_frame(context->outfile, packet);
		if (write_ret != 0) {
			fprintf(stderr, "Error writing delayed frame: %s\n", av_err2str(write_ret));
			break;
		}
	}

	av_packet_free(&packet);
}

/* **********************************************************************
 * * public interface
 * ********************************************************************** */

/* Get the output filename-- similar to the other output formats */
static void ffmpeg_filepath_get(FFMpegContext *context, char *string, RenderData *rd, bool preview, const char *suffix)
{
	char autosplit[20];

	const char **exts = get_file_extensions(rd->ffcodecdata.type);
	const char **fe = exts;
	int sfra, efra;

	if (!string || !exts) return;

	if (preview) {
		sfra = rd->psfra;
		efra = rd->pefra;
	}
	else {
		sfra = rd->sfra;
		efra = rd->efra;
	}

	strcpy(string, rd->pic);
	BLI_path_abs(string, G.main->name);

	BLI_make_existing_file(string);

	autosplit[0] = '\0';

	if ((rd->ffcodecdata.flags & FFMPEG_AUTOSPLIT_OUTPUT) != 0) {
		if (context) {
			sprintf(autosplit, "_%03d", context->ffmpeg_autosplit_count);
		}
	}

	if (rd->scemode & R_EXTENSION) {
		while (*fe) {
			if (BLI_strcasecmp(string + strlen(string) - strlen(*fe), *fe) == 0) {
				break;
			}
			fe++;
		}

		if (*fe == NULL) {
			strcat(string, autosplit);

			BLI_path_frame_range(string, sfra, efra, 4);
			strcat(string, *exts);
		}
		else {
			*(string + strlen(string) - strlen(*fe)) = '\0';
			strcat(string, autosplit);
			strcat(string, *fe);
		}
	}
	else {
		if (BLI_path_frame_check_chars(string)) {
			BLI_path_frame_range(string, sfra, efra, 4);
		}

		strcat(string, autosplit);
	}

	BLI_path_suffix(string, FILE_MAX, suffix, "");
}

void BKE_ffmpeg_filepath_get(char *string, RenderData *rd, bool preview, const char *suffix)
{
	ffmpeg_filepath_get(NULL, string, rd, preview, suffix);
}

int BKE_ffmpeg_start(void *context_v, struct Scene *scene, RenderData *rd, int rectx, int recty,
                     ReportList *reports, bool preview, const char *suffix)
{
	int success;
	FFMpegContext *context = context_v;

	context->ffmpeg_autosplit_count = 0;
	context->ffmpeg_preview = preview;

	success = start_ffmpeg_impl(context, rd, rectx, recty, suffix, reports);
#ifdef WITH_AUDASPACE
	if (context->audio_stream) {
		AVCodecContext *c = context->audio_codec;

		AUD_DeviceSpecs specs;
		specs.channels = c->channels;

		switch (av_get_packed_sample_fmt(c->sample_fmt)) {
			case AV_SAMPLE_FMT_U8:
				specs.format = AUD_FORMAT_U8;
				break;
			case AV_SAMPLE_FMT_S16:
				specs.format = AUD_FORMAT_S16;
				break;
			case AV_SAMPLE_FMT_S32:
				specs.format = AUD_FORMAT_S32;
				break;
			case AV_SAMPLE_FMT_FLT:
				specs.format = AUD_FORMAT_FLOAT32;
				break;
			case AV_SAMPLE_FMT_DBL:
				specs.format = AUD_FORMAT_FLOAT64;
				break;
			default:
				return -31415;
		}

		specs.rate = rd->ffcodecdata.audio_mixrate;
		context->audio_mixdown_device = BKE_sound_mixdown(scene, specs, preview ? rd->psfra : rd->sfra, rd->ffcodecdata.audio_volume);
	}
#endif
	return success;
}

static void end_ffmpeg_impl(FFMpegContext *context, int is_autosplit);

#ifdef WITH_AUDASPACE
static void write_audio_frames(FFMpegContext *context, double to_pts)
{
	int finished = 0;

	while (context->audio_stream && !finished) {
		if ((context->audio_time >= to_pts) ||
		    (write_audio_frame(context)))
		{
			finished = 1;
		}
	}
}
#endif

int BKE_ffmpeg_append(void *context_v, RenderData *rd, int start_frame, int frame, int *pixels,
                      int rectx, int recty, const char *suffix, ReportList *reports)
{
	FFMpegContext *context = context_v;
	AVFrame *avframe;
	int success = 1;

	PRINT("Writing frame %i, render width=%d, render height=%d\n", frame, rectx, recty);

/* why is this done before writing the video frame and again at end_ffmpeg? */
//	write_audio_frames(frame / (((double)rd->frs_sec) / rd->frs_sec_base));

	if (context->video_stream) {
		avframe = generate_video_frame(context, (unsigned char *)pixels);
		success = (avframe && write_video_frame(context, frame - start_frame, avframe, reports));

		if (context->ffmpeg_autosplit) {
			if (avio_tell(context->outfile->pb) > FFMPEG_AUTOSPLIT_SIZE) {
				end_ffmpeg_impl(context, true);
				context->ffmpeg_autosplit_count++;
				success &= start_ffmpeg_impl(context, rd, rectx, recty, suffix, reports);
			}
		}
	}

#ifdef WITH_AUDASPACE
	write_audio_frames(context, (frame - start_frame) / (((double)rd->frs_sec) / (double)rd->frs_sec_base));
#endif
	return success;
}

static void end_ffmpeg_impl(FFMpegContext *context, int is_autosplit)
{
	PRINT("Closing ffmpeg...\n");

#if 0
	if (context->audio_stream) { /* SEE UPPER */
		write_audio_frames(context);
	}
#endif

#ifdef WITH_AUDASPACE
	if (is_autosplit == false) {
		if (context->audio_mixdown_device) {
			AUD_Device_free(context->audio_mixdown_device);
			context->audio_mixdown_device = NULL;
		}
	}
#endif

	if (context->video_stream) {
		PRINT("Flushing delayed frames...\n");
		flush_ffmpeg(context);
	}
	
	if (context->outfile) {
		av_write_trailer(context->outfile);
	}
	
	/* Close the video codec */

	if (context->video_stream != NULL) {
		PRINT("zero video stream %p\n", context->video_stream);
		context->video_stream = NULL;
	}

	if (context->audio_stream != NULL) {
		context->audio_stream = NULL;
	}

	/* free the temp buffer */
	if (context->current_frame != NULL) {
		delete_picture(context->current_frame);
		context->current_frame = NULL;
	}
	if (context->outfile != NULL && context->outfile->oformat) {
		if (!(context->outfile->oformat->flags & AVFMT_NOFILE)) {
			avio_close(context->outfile->pb);
		}
	}

	if (context->video_codec != NULL) {
		avcodec_free_context(&context->video_codec);
		context->video_codec = NULL;
	}
	if (context->audio_codec != NULL) {
		avcodec_free_context(&context->audio_codec);
		context->audio_codec = NULL;
	}

	if (context->img_convert_frame != NULL) {
		delete_picture(context->img_convert_frame);
		context->img_convert_frame = NULL;
	}

	if (context->outfile != NULL) {
		avformat_free_context(context->outfile);
		context->outfile = NULL;
	}

	if (context->audio_input_buffer != NULL) {
		av_free(context->audio_input_buffer);
		context->audio_input_buffer = NULL;
	}

	if (context->audio_deinterleave_buffer != NULL) {
		av_free(context->audio_deinterleave_buffer);
		context->audio_deinterleave_buffer = NULL;
	}

	if (context->img_convert_ctx != NULL) {
		sws_freeContext(context->img_convert_ctx);
		context->img_convert_ctx = NULL;
	}
}

void BKE_ffmpeg_end(void *context_v)
{
	FFMpegContext *context = context_v;
	end_ffmpeg_impl(context, false);
}

void BKE_ffmpeg_preset_set(RenderData *rd, int preset)
{
	bool is_ntsc = (rd->frs_sec != 25);

	switch (preset) {
		case FFMPEG_PRESET_VCD:
			rd->ffcodecdata.type = FFMPEG_MPEG1;
			rd->ffcodecdata.video_bitrate = 1150;
			rd->xsch = 352;
			rd->ysch = is_ntsc ? 240 : 288;
			rd->ffcodecdata.gop_size = is_ntsc ? 18 : 15;
			rd->ffcodecdata.rc_max_rate = 1150;
			rd->ffcodecdata.rc_min_rate = 1150;
			rd->ffcodecdata.rc_buffer_size = 40 * 8;
			rd->ffcodecdata.mux_packet_size = 2324;
			rd->ffcodecdata.mux_rate = 2352 * 75 * 8;
			break;

		case FFMPEG_PRESET_SVCD:
			rd->ffcodecdata.type = FFMPEG_MPEG2;
			rd->ffcodecdata.video_bitrate = 2040;
			rd->xsch = 480;
			rd->ysch = is_ntsc ? 480 : 576;
			rd->ffcodecdata.gop_size = is_ntsc ? 18 : 15;
			rd->ffcodecdata.rc_max_rate = 2516;
			rd->ffcodecdata.rc_min_rate = 0;
			rd->ffcodecdata.rc_buffer_size = 224 * 8;
			rd->ffcodecdata.mux_packet_size = 2324;
			rd->ffcodecdata.mux_rate = 0;
			break;

		case FFMPEG_PRESET_DVD:
			rd->ffcodecdata.type = FFMPEG_MPEG2;
			rd->ffcodecdata.video_bitrate = 6000;

			/* Don't set resolution, see [#21351]
			 * rd->xsch = 720;
			 * rd->ysch = is_ntsc ? 480 : 576; */

			rd->ffcodecdata.gop_size = is_ntsc ? 18 : 15;
			rd->ffcodecdata.rc_max_rate = 9000;
			rd->ffcodecdata.rc_min_rate = 0;
			rd->ffcodecdata.rc_buffer_size = 224 * 8;
			rd->ffcodecdata.mux_packet_size = 2048;
			rd->ffcodecdata.mux_rate = 10080000;
			break;

		case FFMPEG_PRESET_DV:
			rd->ffcodecdata.type = FFMPEG_DV;
			rd->xsch = 720;
			rd->ysch = is_ntsc ? 480 : 576;
			break;

		case FFMPEG_PRESET_H264:
			rd->ffcodecdata.type = FFMPEG_AVI;
			rd->ffcodecdata.codec = AV_CODEC_ID_H264;
			rd->ffcodecdata.video_bitrate = 6000;
			rd->ffcodecdata.gop_size = is_ntsc ? 18 : 15;
			rd->ffcodecdata.rc_max_rate = 9000;
			rd->ffcodecdata.rc_min_rate = 0;
			rd->ffcodecdata.rc_buffer_size = 224 * 8;
			rd->ffcodecdata.mux_packet_size = 2048;
			rd->ffcodecdata.mux_rate = 10080000;

			break;

		case FFMPEG_PRESET_THEORA:
		case FFMPEG_PRESET_XVID:
			if (preset == FFMPEG_PRESET_XVID) {
				rd->ffcodecdata.type = FFMPEG_AVI;
				rd->ffcodecdata.codec = AV_CODEC_ID_MPEG4;
			}
			else if (preset == FFMPEG_PRESET_THEORA) {
				rd->ffcodecdata.type = FFMPEG_OGG; // XXX broken
				rd->ffcodecdata.codec = AV_CODEC_ID_THEORA;
			}

			rd->ffcodecdata.video_bitrate = 6000;
			rd->ffcodecdata.gop_size = is_ntsc ? 18 : 15;
			rd->ffcodecdata.rc_max_rate = 9000;
			rd->ffcodecdata.rc_min_rate = 0;
			rd->ffcodecdata.rc_buffer_size = 224 * 8;
			rd->ffcodecdata.mux_packet_size = 2048;
			rd->ffcodecdata.mux_rate = 10080000;
			break;
	}
}

void BKE_ffmpeg_image_type_verify(RenderData *rd, ImageFormatData *imf)
{
	int audio = 0;

	if (imf->imtype == R_IMF_IMTYPE_FFMPEG) {
		if (rd->ffcodecdata.type <= 0 ||
		    rd->ffcodecdata.codec <= 0 ||
		    rd->ffcodecdata.audio_codec <= 0 ||
		    rd->ffcodecdata.video_bitrate <= 1)
		{
			BKE_ffmpeg_preset_set(rd, FFMPEG_PRESET_H264);
			rd->ffcodecdata.constant_rate_factor = FFM_CRF_MEDIUM;
			rd->ffcodecdata.ffmpeg_preset = FFM_PRESET_MEDIUM;
			rd->ffcodecdata.type = FFMPEG_MKV;
		}
		if (rd->ffcodecdata.type == FFMPEG_OGG) {
			rd->ffcodecdata.type = FFMPEG_MPEG2;
		}

		audio = 1;
	}
	else if (imf->imtype == R_IMF_IMTYPE_H264) {
		if (rd->ffcodecdata.codec != AV_CODEC_ID_H264) {
			BKE_ffmpeg_preset_set(rd, FFMPEG_PRESET_H264);
			audio = 1;
		}
	}
	else if (imf->imtype == R_IMF_IMTYPE_XVID) {
		if (rd->ffcodecdata.codec != AV_CODEC_ID_MPEG4) {
			BKE_ffmpeg_preset_set(rd, FFMPEG_PRESET_XVID);
			audio = 1;
		}
	}
	else if (imf->imtype == R_IMF_IMTYPE_THEORA) {
		if (rd->ffcodecdata.codec != AV_CODEC_ID_THEORA) {
			BKE_ffmpeg_preset_set(rd, FFMPEG_PRESET_THEORA);
			audio = 1;
		}
	}

	if (audio && rd->ffcodecdata.audio_codec < 0) {
		rd->ffcodecdata.audio_codec = AV_CODEC_ID_NONE;
		rd->ffcodecdata.audio_bitrate = 128;
	}
}

bool BKE_ffmpeg_alpha_channel_is_supported(RenderData *rd)
{
	int codec = rd->ffcodecdata.codec;

	return ELEM(codec,
				AV_CODEC_ID_FFV1,
				AV_CODEC_ID_QTRLE,
				AV_CODEC_ID_PNG,
				AV_CODEC_ID_VP9,
				AV_CODEC_ID_HUFFYUV);

}

void *BKE_ffmpeg_context_create(void)
{
	FFMpegContext *context;

	/* new ffmpeg data struct */
	context = MEM_callocN(sizeof(FFMpegContext), "new ffmpeg context");

	context->ffmpeg_codec = AV_CODEC_ID_MPEG4;
	context->ffmpeg_audio_codec = AV_CODEC_ID_NONE;
	context->ffmpeg_video_bitrate = 1150;
	context->ffmpeg_audio_bitrate = 128;
	context->ffmpeg_gop_size = 12;
	context->ffmpeg_autosplit = 0;
	context->ffmpeg_autosplit_count = 0;
	context->ffmpeg_preview = false;

	return context;
}

void BKE_ffmpeg_context_free(void *context_v)
{
	FFMpegContext *context = context_v;
	if (context) {
		MEM_freeN(context);
	}
}

#endif /* WITH_FFMPEG */
