#include "RtmpPushAudioUseFFmpegWindow.h"

RtmpPushAudioUseFFmpegWindow::RtmpPushAudioUseFFmpegWindow(QWidget* parent)
	: QWidget(parent)
{
	this->resize(QSize(480, 480));
	this->setWindowTitle("利用Qt录制音频");
	this->setWindowIcon(QIcon("images/opencv.png"));
	Button* btnStartRecord = new Button(this);
	btnStartRecord->setText("开始录制音频");
	connect(btnStartRecord, &Button::clicked, [=]() {
		startRecord();
		});
}

void RtmpPushAudioUseFFmpegWindow::startRecord() {
	qDebug() << "开始录制音频";
	//1.QT录制音频
	//2.ffmpeg将录制的音频重采样并编码，因为qt采集到的s16格式，aac需要的格式是FLTP
	//3.将编码后的数据通过av_interleaved_write_frame推送的rtmp服务器

	int sampleRate = 44100;//采样率，每秒采集多少次
	int channels = 2;//声道数，此处是双声道
	int sampleByte = 2;//采样位深，占2字节
	AVSampleFormat inSampleFmt = AV_SAMPLE_FMT_S16;//s16 原始采集的位深
	AVSampleFormat outSampleFmt = AV_SAMPLE_FMT_FLTP;//float aac需要
	QAudioFormat fmt;
	fmt.setSampleRate(sampleRate);
	fmt.setChannelCount(channels);
	fmt.setSampleSize(sampleByte * 8);
	fmt.setCodec("audio/pcm");
	fmt.setByteOrder(QAudioFormat::LittleEndian);//字节序为小端模式，ffmpeg统一采用的是小端模式
	fmt.setSampleType(QAudioFormat::UnSignedInt);
	QAudioDeviceInfo info = QAudioDeviceInfo::defaultInputDevice();//获取默认的采集音频设备
	if (!info.isFormatSupported(fmt)) {
		qDebug() << "Audio format not supported!";
		fmt = info.nearestFormat(fmt);
	}
	QAudioInput* input = new QAudioInput(fmt);
	//开始录制音频
	QIODevice* io = input->start();


	//注册网络协议，让ffmpeg拥有网络的能力
	avformat_network_init();
	//推流地址
	char* outUrl = "rtmp://124.223.218.248:1935/live/test";

	//初始化音频重采样上下文
	SwrContext* asc = NULL;
	asc = swr_alloc_set_opts(
		asc,
		av_get_default_channel_layout(channels),
		outSampleFmt, sampleByte,
		av_get_default_channel_layout(channels),
		inSampleFmt, sampleRate,
		0, 0
	);
	if (!asc) {
		qDebug() << "swr_alloc_set_opts failed";
	}

	int ret = swr_init(asc);
	if (ret != 0) {
		char err[1024] = { 0 };
		av_strerror(ret, err, sizeof(err) - 1);
		qDebug() << err;
	}

	qDebug() << "音频重采样上下文初始化成功！";

	//音频重采样输出空间分配
	AVFrame* pcm = av_frame_alloc();
	pcm->format = outSampleFmt;
	pcm->channels = channels;
	pcm->channel_layout = av_get_default_channel_layout(channels);
	pcm->nb_samples = 1024;//一帧音频，一通道的采用数量，如果是两通道则*2
	ret = av_frame_get_buffer(pcm, 32);//真正给AVFrame中的内容分配空间
	if (ret != 0) {
		char err[1024] = { 0 };
		av_strerror(ret, err, sizeof(err) - 1);
		qDebug() << err;
	}

	//初始化音频编码器（根据id寻找编码器）
	const AVCodec* codec = avcodec_find_encoder(AV_CODEC_ID_AAC);
	if (!codec) {
		qDebug() << "avcodec_find_encoder failed";
	}
	//初始化音频编码器上下文
	AVCodecContext* ac = avcodec_alloc_context3(codec);
	if (!ac) {
		qDebug() << "avcodec_alloc_context3 failed!";
	}
	qDebug() << "编码器初始化成功";
	ac->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;//初始化全局的头，和视频初始化的时候一样样的。
	ac->thread_count = 8;//8线程解码
	ac->bit_rate = 40000;//比特率
	ac->sample_rate = sampleRate;//采样率
	ac->sample_fmt = AV_SAMPLE_FMT_FLTP;//采样格式
	ac->channels = channels;//声道数
	ac->channel_layout = av_get_default_channel_layout(channels);//声道布局


	//打开音频编码器
	ret = avcodec_open2(ac, NULL, NULL);
	if (ret != 0) {
		char err[1024] = { 0 };
		av_strerror(ret, err, sizeof(err) - 1);
		qDebug() << err;
	}

	qDebug() << "编码器打开成功";

	//创建输出封装上下文
	AVFormatContext* ic = NULL;
	ret = avformat_alloc_output_context2(&ic, 0, "flv", outUrl);
	if (ret != 0) {
		char err[1024] = { 0 };
		av_strerror(ret, err, sizeof(err) - 1);
		qDebug() << err;
	}
	//添加音频流
	AVStream* as = avformat_new_stream(ic, NULL);
	if (!as) {
		qDebug() << "创建流失败";
	}
	as->codecpar->codec_tag = 0;
	//从编码器复制参数
	avcodec_parameters_from_context(as->codecpar, ac);

	//打开网络io
	ret = avio_open(&ic->pb, outUrl, AVIO_FLAG_WRITE);
	if (ret != 0) {
		char err[1024] = { 0 };
		av_strerror(ret, err, sizeof(err) - 1);
		qDebug() << err;
	}

	//发送封装头
	ret = avformat_write_header(ic, NULL);
	if (ret != 0) {
		char err[1024] = { 0 };
		av_strerror(ret, err, sizeof(err) - 1);
		qDebug() << err;
	}
	//一次读取一帧音频字节数
	int readSize = pcm->nb_samples * channels * sampleByte;
	char* buf = new char[readSize];
	int apts = 0;
	AVPacket pkt = { 0 };
	for (;;) {
		//一次读取一帧音频
		if (input->bytesReady() < readSize) {
			QThread::msleep(1);
			continue;
		}
		int size = 0;
		while (size != readSize) {
			int len = io->read(buf + size, readSize - size);
			if (len < 0) {
				break;
			}
			size += len;
		}
		if (size != readSize)continue;


		//已经读取了一帧数据，对数据进行重采样
		const uint8_t* indata[AV_NUM_DATA_POINTERS] = { 0 };
		indata[0] = (uint8_t*)buf;
		int len = swr_convert(asc, pcm->data, pcm->nb_samples,//一帧数据的字节数量
			indata, pcm->nb_samples);
		pcm->pts = apts;
		apts += av_rescale_q(pcm->nb_samples, { 1,sampleRate }, ac->time_base);

		//编码
		ret = avcodec_send_frame(ac, pcm);
		if (ret != 0)continue;
		av_packet_unref(&pkt);//释放AVPacket内部空间
		ret = avcodec_receive_packet(ac, &pkt);
		if (ret != 0)continue;
		qDebug() << "pkt.size=" << pkt.size << flush;
		//时间基转换
		pkt.pts = av_rescale_q(pkt.pts, ac->time_base, as->time_base);
		pkt.dts = av_rescale_q(pkt.dts, ac->time_base, as->time_base);
		pkt.duration = av_rescale_q(pkt.duration, ac->time_base, as->time_base);
		//推流
		ret = av_interleaved_write_frame(ic, &pkt);
		if (ret == 0)
		{
			qDebug() << "#" << flush;
		}
	}
	delete buf;


}

RtmpPushAudioUseFFmpegWindow::~RtmpPushAudioUseFFmpegWindow()
{
}
