﻿#include "MLProgressbar.h"
#include "MLRender.h"

#include <float.h>

MLRender::MLRender(
	const std::string path,
	double frameRate,
	int frame_size_x, 
	int frame_size_y,
	int totalFrames,
	int quality,
	AudioProcessor& audio_processor
) : _path(path),
	_frame_rate(frameRate),
	_frame_size_x(frame_size_x), 
	_frame_size_y(frame_size_y),
	_total_frames(totalFrames), 
	_audio_processor(audio_processor) {
	av_log_set_level(AV_LOG_WARNING);
	avformat_network_init();
	avformat_alloc_output_context2(&fmt_ctx, NULL, "mp4", path.c_str());
	if (!fmt_ctx) {
		throw "无法创建输出上下文。";
	}

	// 查找H.264编码器
	const AVCodec* codec = _find_encoder();

	// 创建视频流
	stream = avformat_new_stream(fmt_ctx, NULL);
	if (!stream) {
		throw "无法创建视频流。";
	}

	codec_ctx = avcodec_alloc_context3(codec);
	if (!codec_ctx) {
		throw "无法创建编码器上下文。";
	}

	if (audio_processor.has_packet()) {
		audio_stream = avformat_new_stream(fmt_ctx, NULL);
		if (!audio_stream) {
			throw "无法创建音频流。";
		}
		audio_processor.set_stream(audio_stream, fmt_ctx);
		_sample_rate = audio_processor.sample_rate();
	}

	// 配置编码参数（示例参数）
	codec_ctx->width = frame_size_x;
	codec_ctx->height = frame_size_y;
	codec_ctx->time_base = AVRational{ 1, (int)rint(_frame_rate) };
	codec_ctx->framerate = AVRational{ (int)rint(_frame_rate), 1 };
	codec_ctx->pix_fmt = AV_PIX_FMT_YUV420P;
	codec_ctx->bit_rate = quality * 1024;
	codec_ctx->sample_aspect_ratio = AVRational{ 1, 1 };
	stream->time_base = codec_ctx->time_base;
	if (fmt_ctx->oformat->flags & AVFMT_GLOBALHEADER) {
		codec_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
	}

	// 打开编码器
	if (avcodec_open2(codec_ctx, codec, NULL) < 0) {
		throw "无法打开编码器。";
	}

	// 将编码器参数复制到流
	avcodec_parameters_from_context(stream->codecpar, codec_ctx);
}

const AVCodec* MLRender::_find_encoder() {
	const AVCodec* codec_nvenc = avcodec_find_encoder_by_name("h264_nvenc");
	const AVCodec* codec = avcodec_find_encoder(AV_CODEC_ID_H264);
	if (!codec_nvenc) {
		const AVCodec* codec = avcodec_find_encoder(AV_CODEC_ID_H264);
		if (!codec) {
			throw "找不到H.264编码器。";
		}
		std::cout << "[INFO]未找到H264_NVENC编码器，将使用普通H.264编码器。" << std::endl;
		return codec;
	}
	return codec_nvenc;
	// return codec;
}

void MLRender::render(std::vector<MLObj*>& objects) {
	FrameWriter writer = Utils::initFrameWriter(_frame_size_x, _frame_size_y);

	frames = new AVFrame * [_total_frames];
	memset(frames, 0, _total_frames * sizeof(AVFrame*));
	for (int i = 0; i < encoding_threads_num; i++) {
		encoding_threads.emplace_back(std::thread(&MLRender::encodingThreadFunc, this, i));
	}

	// 启动编码线程
	std::cout << "渲染视频中……" << std::endl;
	encoding_finished = false;
	bool using_audio = _audio_processor.has_packet();
	if (using_audio) {
		encoding_main_thread = std::thread(&MLRender::writerThreadFuncWithAudio, this);
	}
	else {
		encoding_main_thread = std::thread(&MLRender::writerThreadFunc, this);
	}

	// 主线程生成帧数据
	for (size_t i = 0; i < _total_frames; i++) {
		MLFrame ml_frame(_frame_size_x, _frame_size_y);
		for (auto& obj : objects) {
			obj->get_frame(i, ml_frame);
		}
		uint8_t* frame_mat = ml_frame.getFrame(writer);
		if (frame_queue.size() > 32) {
			while (frame_queue.size() > 16) {
				std::this_thread::yield();
			}
		}
		{
			std::lock_guard<std::mutex> lock(queue_mutex);
			frame_queue.push(FrameData{ frame_mat, i });
		}
		//queue_cv.notify_one();

		ml_frame.clear();
	}

	// 通知编码线程结束
	{
		//std::lock_guard<std::mutex> lock(queue_mutex);
		encoding_finished = true;
	}
	queue_cv.notify_one();

	for (auto& t : encoding_threads) {
		if (t.joinable()) {
			t.join();
		}
	}

	// 等待编码线程完成
	if (encoding_main_thread.joinable()) {
		encoding_main_thread.join();
	}
}

void MLRender::encodingThreadFunc(int index) {
	while (true) {
		AVFrame* frame = av_frame_alloc();
		frame->format = codec_ctx->pix_fmt;
		frame->width = codec_ctx->width;
		frame->height = codec_ctx->height;
		av_frame_get_buffer(frame, 0);
		FrameData frame_data;
		{
			std::lock_guard<std::mutex> lock(queue_mutex);
			if (not_writen_frame_count > 32) {
				av_frame_free(&frame);
				continue;
			}
			if (encoding_finished && frame_queue.empty()) break;

			if (!frame_queue.empty()) {
				frame_data = frame_queue.front();
				frame_queue.pop();
			}
			else {
				av_frame_free(&frame);
				continue;
			}
			_encode_frame(frame_data.data, frame);
			delete[] frame_data.data;
			frames[frame_data.count] = frame;
			not_writen_frame_count++;
		}
	}
}

void MLRender::writerThreadFunc() {
	sws_ctx = sws_getContext(
		_frame_size_x, _frame_size_y, AV_PIX_FMT_RGB24,
		_frame_size_x, _frame_size_y, AV_PIX_FMT_YUV420P,
		SWS_BILINEAR, NULL, NULL, NULL
	);

	if (!(fmt_ctx->oformat->flags & AVFMT_NOFILE)) {
		if (avio_open(&fmt_ctx->pb, _path.c_str(), AVIO_FLAG_WRITE) < 0) {
			std::cerr << _path.c_str() << std::endl;
			throw "无法打开输出文件。";
		}
	}

	if (avformat_write_header(fmt_ctx, NULL) < 0) {
		throw "写入文件头失败。";
	}
	
	size_t writen_count = 0;
	MLProgressbar progressbar(_total_frames, "渲染进度");
	// 接收并写入数据包
	AVPacket* pkt = av_packet_alloc();
	while (writen_count < _total_frames) {
		while (!frames[writen_count]) std::this_thread::yield();
		// 发送帧到视频编码器
		if (avcodec_send_frame(codec_ctx, frames[writen_count]) < 0) {
			throw "发送帧到编码器失败。";
		}
		av_frame_free(&frames[writen_count]);
		while (avcodec_receive_packet(codec_ctx, pkt) == 0) {
			av_packet_rescale_ts(pkt, codec_ctx->time_base, stream->time_base);
			pkt->stream_index = stream->index;

			if (av_interleaved_write_frame(fmt_ctx, pkt) < 0) {
				throw "写入视频包失败。";
			}
			av_packet_unref(pkt);
		}
		{
			std::lock_guard<std::mutex> lock(queue_mutex);
			not_writen_frame_count--;
		}
		writen_count++;
		progressbar.update();
	}

	encoding_finished = true;

	// 刷新编码器
	avcodec_send_frame(codec_ctx, NULL);
	while (avcodec_receive_packet(codec_ctx, pkt) == 0) {
		av_packet_rescale_ts(pkt, codec_ctx->time_base, stream->time_base);
		pkt->stream_index = stream->index;
		av_interleaved_write_frame(fmt_ctx, pkt);
		av_packet_unref(pkt);
	}
	av_packet_free(&pkt);

	// 写尾部并清理资源
	av_write_trailer(fmt_ctx);
	sws_freeContext(sws_ctx);
	avcodec_free_context(&codec_ctx);
	if (fmt_ctx && !(fmt_ctx->oformat->flags & AVFMT_NOFILE)) {
		avio_closep(&fmt_ctx->pb);
	}
	avformat_free_context(fmt_ctx);
	delete[] frames;
}

void MLRender::writerThreadFuncWithAudio() {
	// 创建帧和转换上下文

	sws_ctx = sws_getContext(
		_frame_size_x, _frame_size_y, AV_PIX_FMT_RGB24,
		_frame_size_x, _frame_size_y, AV_PIX_FMT_YUV420P,
		SWS_BILINEAR, NULL, NULL, NULL
	);

	// 打开输出文件
	if (!(fmt_ctx->oformat->flags & AVFMT_NOFILE)) {
		if (avio_open(&fmt_ctx->pb, _path.c_str(), AVIO_FLAG_WRITE) < 0) {
			throw "无法打开输出文件。";
		}
	}

	// 写入文件头
	if (avformat_write_header(fmt_ctx, NULL) < 0) {
		throw "写入文件头失败。";
	}

	auto audio_queue = _audio_processor.get_packets();
	size_t writen_count = 0;

	// 获取时间基信息
	AVRational video_time_base = stream->time_base;
	AVRational audio_codec_time_base = { 1, _sample_rate };  // 编码器时间基
	AVRational audio_stream_time_base = audio_stream->time_base;

	AVPacket* video_pkt = av_packet_alloc();
	int ret;

	MLProgressbar progressbar(_total_frames, "渲染进度");

	// 接收并写入数据包
	AVPacket* pkt = av_packet_alloc();
	AVPacket* audio_pkt = av_packet_alloc();
	bool frame_available;
	int pts;
	while (writen_count < _total_frames) {
		{
			std::lock_guard<std::mutex> lock(queue_mutex);
			frame_available = frames[writen_count] != 0;
		}
		if (!frame_available) continue;
		// 发送帧到视频编码器
		{
			std::lock_guard<std::mutex> lock(queue_mutex);
			if (avcodec_send_frame(codec_ctx, frames[writen_count]) < 0) {
				throw "发送帧到编码器失败。";
			}
			pts = frames[writen_count]->pts;
			av_frame_free(&frames[writen_count]);
		}
		// 处理所有早于当前视频帧的音频包
		while (!audio_queue.empty()) {
			auto& audio_pair = audio_queue.front();
			const int64_t audio_pts_orig = audio_pair.second;

			// 使用原始时间基比较
			int cmp = av_compare_ts(
				pts, codec_ctx->time_base,    // 当前视频帧原始PTS
				audio_pts_orig, { 1, _sample_rate }  // 音频原始PTS
			);

			if (cmp > 0) {  // 音频PTS < 视频PTS
				av_packet_ref(audio_pkt, &audio_pair.first);

				// 时间基转换
				av_packet_rescale_ts(
					audio_pkt,
					{ 1, _sample_rate },  // 编码器时间基
					audio_stream->time_base   // 流时间基
				);
				audio_pkt->stream_index = audio_stream->index;

				if (av_interleaved_write_frame(fmt_ctx, audio_pkt) < 0) {
					throw "写入音频包失败。";
				}
				audio_queue.pop();
			}
			else {
				break;
			}
		}

		// 处理视频包
		while ((ret = avcodec_receive_packet(codec_ctx, video_pkt)) == 0) {
			if (video_pkt->flags & AV_PKT_FLAG_KEY) {
				// 设置流参数（仅需一次）
				if (!stream->codecpar->extradata) {
					stream->codecpar->extradata = (uint8_t*)av_mallocz(
						codec_ctx->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE);
					stream->codecpar->extradata_size = codec_ctx->extradata_size;
					memcpy(stream->codecpar->extradata, codec_ctx->extradata, codec_ctx->extradata_size);
				}
			}
			av_packet_rescale_ts(video_pkt, codec_ctx->time_base, stream->time_base);
			video_pkt->stream_index = stream->index;

			if (av_interleaved_write_frame(fmt_ctx, video_pkt) < 0) {
				throw "写入视频包失败";
			}
		}


		// 处理可能的错误状态
		if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
			throw "视频编码错误: " + std::to_string(ret);
		}

		{
			std::lock_guard<std::mutex> lock(queue_mutex);
			not_writen_frame_count--;
		}
		writen_count++;
		progressbar.update();
	}
	av_packet_free(&audio_pkt);

	encoding_finished = true;

	_flush_remaining_video_packets(audio_queue, video_time_base, audio_codec_time_base, audio_stream_time_base);

	// 刷新视频编码器
	_flush_video_encoder();

	// 处理剩余音频包
	_flush_remaining_audio(audio_queue);

	// 写入文件尾
	av_write_trailer(fmt_ctx);

	// 清理资源
	for (size_t i = 0; i < _total_frames; i++) {
		av_frame_free(&frames[i]);
	}
	sws_freeContext(sws_ctx);
	avcodec_free_context(&codec_ctx);
	if (fmt_ctx && !(fmt_ctx->oformat->flags & AVFMT_NOFILE)) {
		avio_closep(&fmt_ctx->pb);
	}
	avformat_free_context(fmt_ctx);
	_audio_processor.clear();
	delete[] frames;
}

void MLRender::_encode_frame(const uint8_t* src_data, AVFrame* frame) {
	// 图像格式转换
	const uint8_t* src_slices[] = { src_data, NULL, NULL, NULL };
	int src_stride[] = { _frame_size_x * 3, 0, 0, 0 };

	sws_scale(
		sws_ctx,
		src_slices,
		src_stride,
		0,
		_frame_size_y,
		frame->data,
		frame->linesize
	);

	// 设置视频帧时间戳
	frame->pts = next_video_pts;
	next_video_pts += av_rescale_q(1,
		AVRational{ 1, (int)rint(_frame_rate) },  // 帧率
		codec_ctx->time_base
	);

	if (frame->pts == 0) {
		frame->flags |= AV_FRAME_FLAG_KEY;
		frame->pict_type = AV_PICTURE_TYPE_I;
	}
}

void MLRender::_flush_video_encoder() {
	// 发送空帧刷新编码器
	avcodec_send_frame(codec_ctx, nullptr);

	AVPacket* pkt = av_packet_alloc();
	while (avcodec_receive_packet(codec_ctx, pkt) >= 0) {
		av_packet_rescale_ts(pkt, codec_ctx->time_base, stream->time_base);
		pkt->stream_index = stream->index;

		if (av_interleaved_write_frame(fmt_ctx, pkt) < 0) {
			throw "写入剩余视频包失败。";
		}
		av_packet_unref(pkt);
	}
	av_packet_free(&pkt);
}

void MLRender::_flush_remaining_audio(std::queue<std::pair<AVPacket, int64_t>>& packets) {
	static int64_t last_audio_dts = AV_NOPTS_VALUE;
	AVRational audio_time_base = audio_stream->time_base;

	while (!packets.empty()) {
		auto& audio_pair = packets.front();
		AVPacket audio_pkt = audio_pair.first;

		// 设置流索引
		audio_pkt.stream_index = audio_stream->index;

		if (av_interleaved_write_frame(fmt_ctx, &audio_pkt) < 0) {
			throw "写入剩余音频包失败。";
		}

		av_packet_unref(&audio_pkt);
		packets.pop();
	}
}

void MLRender::_flush_remaining_video_packets(
	std::queue<std::pair<AVPacket, int64_t>>& packets,
	AVRational video_time_base,
	AVRational audio_codec_time_base,
	AVRational audio_stream_time_base
) {
	AVPacket* pkt = av_packet_alloc();

	// 发送空帧以刷新编码器
	avcodec_send_frame(codec_ctx, nullptr);

	// 处理所有剩余的包
	while (avcodec_receive_packet(codec_ctx, pkt) == 0) {
		av_packet_rescale_ts(pkt, codec_ctx->time_base, video_time_base);
		pkt->stream_index = stream->index;
		int64_t current_video_pts = pkt->pts;

		// 处理早于这个视频包的音频
		while (!packets.empty()) {
			auto& audio_pair = packets.front();
			AVPacket audio_pkt = audio_pair.first;
			int64_t audio_pts_orig = audio_pair.second;

			int64_t audio_pts_video_time = av_rescale_q(
				audio_pts_orig,
				audio_codec_time_base,
				video_time_base
			);

			if (audio_pts_video_time <= current_video_pts) {
				av_packet_rescale_ts(
					&audio_pkt,
					audio_codec_time_base,
					audio_stream_time_base
				);
				audio_pkt.stream_index = audio_stream->index;

				if (av_interleaved_write_frame(fmt_ctx, &audio_pkt) < 0) {
					throw "写入音频包失败。";
				}
				av_packet_unref(&audio_pkt);
				packets.pop();
			}
			else {
				break;
			}
		}

		if (av_interleaved_write_frame(fmt_ctx, pkt) < 0) {
			throw "写入刷新视频包失败";
		}
		av_packet_unref(pkt);
	}
	av_packet_free(&pkt);
}
