#include "XXfmUtility.h"

#define CouldNotAlloc AVERROR(ENOMEM)

int xxfmUtility_duration(const char *path){
    av_register_all();
    AVFormatContext *context = NULL;
    int ret = avformat_open_input(&context,path,NULL,NULL);
    if(ret < 0){
        return ret;
    }

    AVStream *stream = xxfmUtility_findVideoStream(context);
    if(NULL == stream){
        avformat_close_input(&context);
        return 0;
    }

    int duration = stream->duration * 1000 / stream->time_base.den;
    avformat_close_input(&context);
    return duration;
}

int xxfmUtility_openDecodec(AVCodecContext **codecContext, AVStream *stream){
	enum AVCodecID codecID = stream->codecpar->codec_id;
    AVCodec *codec = avcodec_find_decoder(codecID);
    if(NULL == codec){
        return -1;
    }

    AVCodecContext *context = avcodec_alloc_context3(codec);
    if(NULL == context){
        return CouldNotAlloc;
    }

	int ret = avcodec_parameters_to_context(context, stream->codecpar);
    if(ret < 0){
        avcodec_free_context(&context);
        return ret;
    }
	if (stream->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
		context->channel_layout = av_get_default_channel_layout(stream->codec->channels);
	}

    // 通过codecpar转换之后不需要再设置timebase？？？
    //av_codec_set_pkt_timebase(videoDecodecContext, videoStream->time_base);
	ret = avcodec_open2(context, codec, NULL);
    if(0 != avcodec_open2(context, codec, NULL)){
        avcodec_free_context(&context);
        return ret;
    }

    *codecContext = context;
    return 0;
}
/*
int xxfmUtility_decode(AVCodecContext *decodeContext, AVFrame *frame, AVPacket *packet){
    if(avcodec_send_packet(decodeContext, packet) < 0){
        return -1;
    }

    int ret = 0;
    while (ret >= 0) {
        ret = avcodec_receive_frame(decodeContext, frame);
        if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
            return 0;
        else if (ret < 0) {
            return -1;
        }
        else{

        }
    }

}
*/
int xxfmUtility_openVideoEncodec(AVCodecContext **codecContext, enum AVCodecID codecID, enum AVPixelFormat format, int width, int height, int framerate, int bitrate, int gop){
    AVCodec *codec = avcodec_find_encoder(codecID);
    if(NULL == codec){
        return -1;
    }
	int support = 0;
	if (NULL != codec->pix_fmts) {
		int index = 0;
		while (1){
			const enum AVPixelFormat supportFormat = *(codec->pix_fmts + index);
			if (AV_PIX_FMT_NONE == supportFormat) {
				break;
			}
			else if (format == supportFormat) {
				support = 1;
				break;
			}
			else {

			}
			++index;
		}
	}
	if (0 == support) {
		return 0;
	}

    AVCodecContext *context = avcodec_alloc_context3(codec);
    if(NULL == context){
        return -1;
    }

	// 位率
	context->bit_rate = bitrate;
	// 分辨率
	context->width = width;
	context->height = height;
	// 帧率
	context->time_base = (AVRational) { 1, 90000 };
	context->framerate = (AVRational) { framerate, 1 };
    // 关键帧间隔
	context->gop_size = gop;
	context->pix_fmt = format;

    //if (context->flags & AVFMT_GLOBALHEADER)
    //    context->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;

	if (codec->id == AV_CODEC_ID_H264) {
		av_opt_set(context->priv_data, "preset", "superfast", 0); // 速度
		av_opt_set(context->priv_data, "tune", "zerolatency", 0); // 实时编码
		av_opt_set(context->priv_data, "profile", "baseline", 0);

		context->slices = 1;
		context->slice_count = 1;
		context->thread_count = 1;
		context->thread_type = FF_THREAD_FRAME;
	}

	int ret = avcodec_open2(context, codec, NULL);
    if (ret < 0){
        avcodec_free_context(&context);
		char *errString = av_err2str(ret);
        return ret;
    }

    *codecContext = context;
    return 0;
}
int xxfmUtility_openAudioEncode(AVCodecContext **codecContext, enum AVCodecID codecID, enum AVSampleFormat format, int sampleRate, int channel) {
	AVCodec *codec = avcodec_find_encoder(codecID);
	if (NULL == codec) {
		return -1;
	}

	AVCodecContext *context = avcodec_alloc_context3(codec);
	if (NULL == context) {
		return -1;
	}

	context->sample_fmt = format;
	context->time_base = av_make_q(1, sampleRate);
	context->sample_rate = sampleRate;
	context->channels = channel;
	context->channel_layout = av_get_default_channel_layout(channel);

	if (avcodec_open2(context, codec, NULL) < 0) {
		avcodec_free_context(&context);
		return -1;
	}

	*codecContext = context;
	return 0;
}
AVStream* xxfmUtility_findVideoStream(AVFormatContext *formatContext){
    for(int index = 0; index < formatContext->nb_streams; index++){
        if(AVMEDIA_TYPE_VIDEO == formatContext->streams[index]->codecpar->codec_type){
            return formatContext->streams[index];
        }
    }
    return NULL;
}
AVStream* xxfmUtility_findAudioStream(AVFormatContext *formatContext){
    for(int index = 0; index < formatContext->nb_streams; index++){
        if(AVMEDIA_TYPE_AUDIO == formatContext->streams[index]->codecpar->codec_type){
            return formatContext->streams[index];
        }
    }
    return NULL;
}

void xxfmUtility_transformTimeBase(AVFrame *frame, AVRational from, AVRational to){
    frame->pts = av_rescale_q(frame->pts, from, to);
    frame->pkt_pts = av_rescale_q(frame->pkt_pts, from, to);
    frame->pkt_dts = av_rescale_q(frame->pkt_dts, from, to);
    frame->pkt_duration = av_rescale_q(frame->pkt_duration, from, to);
}
void xxfmUtility_transformTimeBaseFromAnotherFrame(AVFrame *fromFrame,AVRational from, AVFrame *toFrame, AVRational to){
    toFrame->pts = av_rescale_q(fromFrame->pts, from, to);
    toFrame->pkt_pts = av_rescale_q(fromFrame->pkt_pts, from, to);
    toFrame->pkt_dts = av_rescale_q(fromFrame->pkt_dts, from, to);
    toFrame->pkt_duration = av_rescale_q(fromFrame->pkt_duration, from, to);
}

int xxfmUtility_initAudioFrame(AVFrame **frame, enum AVSampleFormat format, int nb_samples, int channel_layout) {
	if (!(*frame=av_frame_alloc())) {
		return CouldNotAlloc;
	}

	(*frame)->format = format;
	(*frame)->nb_samples = nb_samples;
	(*frame)->channel_layout = channel_layout;

	int ret = av_frame_get_buffer(*frame, 0);
	if (0 != ret) {
		av_frame_free(frame);
		*frame = NULL;
	}
	return ret;
}
int xxfmUtility_initVideoFrame(AVFrame **frame, enum AVPixelFormat format, int width, int height) {
	if (!(*frame = av_frame_alloc())) {
		return CouldNotAlloc;
	}

	(*frame)->width = width;
	(*frame)->height = height;
	(*frame)->format = format;

	int ret = av_frame_get_buffer(*frame, 0);
	if (0 != ret) {
		av_frame_free(frame);
		*frame = NULL;
	}
	return ret;
}
int xxfmUtility_initPacket(AVPacket **packet, const char *data, int length){
	if (!(*packet = av_packet_alloc())) {
		return AVERROR(ENOMEM);
	}
	av_init_packet(*packet);
	if (NULL != data && length > 0) {
		char *buffer = av_malloc(length);
		memcpy(buffer, data, length);
		int ret = av_packet_from_data(*packet, buffer, length);
		if (ret) {
			av_free(buffer);
		}
		return ret;
	}
	return 0;
}
int xxfmUtility_initFifo(AVAudioFifo **fifo, AVCodecContext *codecContext) {
	if (!(*fifo = av_audio_fifo_alloc(codecContext->sample_fmt, codecContext->channels, 1))) {
		return AVERROR(ENOMEM);
	}
	return 0;
}
int xxfmUtility_initResampler(struct SwrContext **swr, AVCodecContext *input, AVCodecContext *output) {
	int error = 0;
	
	SwrContext *context = swr_alloc_set_opts(NULL,
		av_get_default_channel_layout(output->channels),
		output->sample_fmt,
		output->sample_rate,
		av_get_default_channel_layout(input->channels),
		input->sample_fmt,
		input->sample_rate,
		0, NULL);
	if (NULL == context) {
		return AVERROR(ENOMEM);
	}

	if ((error = swr_init(context)) < 0) {
		swr_free(&context);
		return error;
	}
	*swr = context;
	return 0;
}
int xxfmUtility_initRescale(struct SwsContext **sws, AVCodecContext *before, AVCodecContext *after) {
	*sws = sws_getContext(
		before->width, before->height, before->pix_fmt,
		after->width, after->height, after->pix_fmt,
		SWS_BICUBIC, NULL, NULL, NULL);
	if (NULL == *sws) {
		return AVERROR(ENOMEM);
	}
	return 0;
}

int xxfmUtility_readAndDecode(AVFormatContext *formatContext, AVCodecContext *codecContext, AVFrame **output) {
	if (NULL != *output) {
		av_frame_free(output);
		*output = NULL;
	}
	
	int error = 0;
	AVPacket *packet = av_packet_alloc();
	*output = av_frame_alloc();
	error = av_read_frame(formatContext, packet);
	if (0 != error) {
		goto cleanup;
	}

	error = avcodec_send_packet(codecContext, packet);
	if (error < 0) {
		goto cleanup;
	}
	error = avcodec_receive_frame(codecContext, *output);
	if (error < 0) {
		goto cleanup;
	}

cleanup:
	if (NULL != packet) av_packet_free(&packet);
	if (0 != error) {
		av_frame_free(output);
		*output = NULL;
	}
	return error;
}
int xxfmUtility_encode(AVFrame *frame, AVCodecContext *codecContext, AVPacket *output) {
	int error = 0;
	error = avcodec_send_frame(codecContext, frame);
	if (0 != error) {
		return error;
	}
	error = avcodec_receive_packet(codecContext, output);
	if (0 != error) {
		return error;
	}
	return error;
}
int xxfmUtility_encodeAudioFrame(AVFrame *frame, AVCodecContext *codecContext, AVPacket *output, int *isEncoded, int isFirst) {
	// 分配临时变量
	static int pts = 0;
	if (isFirst) {
		pts = 0;
	}
	int error = 0;
	*isEncoded = 0;

	// 设置帧的时间戳增量
	if (frame) {
		frame->pts = pts;
		frame->pkt_pts = pts;
		frame->pkt_dts = pts;
		/**
		 * FIXME: 可能在多通道的情况下，这里需要修改为：pts += frame->nb_samples/codecContext->channels;
		 */
		pts += frame->nb_samples;
	}

	error = avcodec_send_frame(codecContext, frame);
	/* 编码器已经进行刷新，没有缓存数据 */
	if (error == AVERROR_EOF) {
		return 0;
	}
	else if (error < 0) {
		return error;
	}
	else {

	}

	error = avcodec_receive_packet(codecContext, output);
	/* 需要更多的数据进行编码 */
	if (error == AVERROR(EAGAIN)) {
		return 0;
	}
	/* 最后一帧已经编码 */
	else if (error == AVERROR_EOF) {
		return 0;
	}
	else if (error < 0) {
		return error;
	}
	else {
		*isEncoded = 1;
	}
	return 0;
}
