﻿#include "RtmpPusher.h"
#include <Windows.h>
CRtmpPusher::CRtmpPusher() 
{
}
CRtmpPusher::~CRtmpPusher() 
{
}
char *dup_wchar_to_utf8(const wchar_t *w)
{
	char *s = NULL;
	int l = WideCharToMultiByte(CP_UTF8, 0, w, -1, 0, 0, 0, 0);
	s = (char *)av_malloc(l);
	if (s)
		WideCharToMultiByte(CP_UTF8, 0, w, -1, s, l, 0, 0);
	return s;
}
bool CRtmpPusher::InitPusher() 
{
	av_register_all();
	//Register Device
	avdevice_register_all();
	avformat_network_init();

	//查找输入方式
	AVInputFormat* pAudioInputFmt = av_find_input_format("dshow");
	if (!pAudioInputFmt)
	{
		// g_bIsTerminateThread = true;
		return false;
	}

	ifmt = av_find_input_format("dshow");
	// Set device params
	AVDictionary *device_param = 0;
	//if not setting rtbufsize, error messages will be shown in cmd, but you can still watch or record the stream correctly in most time
	//setting rtbufsize will erase those error messages, however, larger rtbufsize will bring latency
	av_dict_set(&device_param, "rtbufsize", "10M", 0);

	//Set own audio device's name
	char* pDevName = dup_wchar_to_utf8(L"audio=麦克风 (Realtek High Definition Audio)");
	if (avformat_open_input(&ifmt_ctx_a, pDevName, ifmt, &device_param) != 0) {

		printf("Couldn't open input audio stream.（无法打开输入流）\n");
		return -1;
	}

	//input audio initialize
	if (avformat_find_stream_info(ifmt_ctx_a, NULL) < 0)
	{
		printf("Couldn't find audio stream information.（无法获取流信息）\n");
		return -1;
	}
	audioindex = -1;
	for (unsigned int i = 0; i < ifmt_ctx_a->nb_streams; i++) {
		if (ifmt_ctx_a->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
			audioindex = i;
			break;
		}
	}

	if (audioindex == -1)
	{
		printf("Couldn't find a audio stream.（没有找到视频流）\n");
		return -1;
	}
	if (avcodec_open2(ifmt_ctx_a->streams[audioindex]->codec, avcodec_find_decoder(ifmt_ctx_a->streams[audioindex]->codec->codec_id), NULL) < 0)
	{
		printf("Could not open audio codec.（无法打开解码器）\n");
		return -1;
	}
	const char* outPath = "rtmp://localhost/live/stream";
	avformat_alloc_output_context2(&ofmt_ctx, NULL, "flv", outPath);
	//output video encoder initialize
	pCodec = avcodec_find_encoder(AV_CODEC_ID_H264);
	if (!pCodec) {
		printf("Can not find output video encoder! (没有找到合适的编码器！)\n");
		return -1;
	}
	pCodecCtx = avcodec_alloc_context3(pCodec);
	pCodecCtx->pix_fmt = PIX_FMT_YUV420P;
	pCodecCtx->width = 640;//  ifmt_ctx->streams[videoindex]->codec->width;
	pCodecCtx->height = 480; // ifmt_ctx->streams[videoindex]->codec->height;
	pCodecCtx->time_base.num = 1;
	pCodecCtx->time_base.den = 25;
	pCodecCtx->bit_rate = 420000;
	pCodecCtx->gop_size = 250;
	/* Some formats want stream headers to be separate. */
	if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
		pCodecCtx->flags |= CODEC_FLAG_GLOBAL_HEADER;

	//H264 codec param
	//pCodecCtx->me_range = 16;
	//pCodecCtx->max_qdiff = 4;
	//pCodecCtx->qcompress = 0.6;
	pCodecCtx->qmin = 10;
	pCodecCtx->qmax = 51;
	//Optional Param
	pCodecCtx->max_b_frames = 0;
	// Set H264 preset and tune
	AVDictionary *param = nullptr;
	av_dict_set(&param, "profile", "high", 0);
	av_dict_set(&param, "preset", "fast", 0);
	av_dict_set(&param, "tune", "zerolatency", 0);

	if (avcodec_open2(pCodecCtx, pCodec, &param) < 0) {
		printf("Failed to open output video encoder! (编码器打开失败！)\n");
		return -1;
	}

	//Add a new stream to output,should be called by the user before avformat_write_header() for muxing
	video_st = avformat_new_stream(ofmt_ctx, pCodec);
	if (video_st == NULL) {
		return -1;
	}
	video_st->time_base.num = 1;
	video_st->time_base.den = 25;
	video_st->codec = pCodecCtx;


	//output audio encoder initialize
	pCodec_a = avcodec_find_encoder(AV_CODEC_ID_AAC);
	if (!pCodec_a) {
		printf("Can not find output audio encoder! (没有找到合适的编码器！)\n");
		return -1;
	}
	pCodecCtx_a = avcodec_alloc_context3(pCodec_a);
	pCodecCtx_a->channels = 2;
	pCodecCtx_a->channel_layout = av_get_default_channel_layout(2);
	pCodecCtx_a->sample_rate = ifmt_ctx_a->streams[audioindex]->codec->sample_rate;
	pCodecCtx_a->sample_fmt = pCodec_a->sample_fmts[0];
	pCodecCtx_a->bit_rate = 64000;
	pCodecCtx_a->time_base.num = 1;
	pCodecCtx_a->time_base.den = pCodecCtx_a->sample_rate;
	/** Allow the use of the experimental AAC encoder */
	pCodecCtx_a->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL;
	/* Some formats want stream headers to be separate. */
	if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
		pCodecCtx_a->flags |= CODEC_FLAG_GLOBAL_HEADER;

	if (avcodec_open2(pCodecCtx_a, pCodec_a, NULL) < 0) {
		printf("Failed to open ouput audio encoder! (编码器打开失败！)\n");
		return -1;
	}

	//Add a new stream to output,should be called by the user before avformat_write_header() for muxing
	audio_st = avformat_new_stream(ofmt_ctx, pCodec_a);
	if (audio_st == NULL) {
		return -1;
	}
	audio_st->time_base.num = 1;
	audio_st->time_base.den = pCodecCtx_a->sample_rate;
	audio_st->codec = pCodecCtx_a;

	//Open output URL,set before avformat_write_header() for muxing
	if (avio_open(&ofmt_ctx->pb, outPath, AVIO_FLAG_READ_WRITE) < 0) {
		printf("Failed to open output file! (输出文件打开失败！)\n");
		return -1;
	}

	//Show some Information
	//	av_dump_format(ofmt_ctx, 0, out_path, 1);

	//Write File Header
	avformat_write_header(ofmt_ctx, NULL);

	//prepare before decode and encode
	dec_pkt = (AVPacket *)av_malloc(sizeof(AVPacket));


	//camera data may has a pix fmt of RGB or sth else,convert it to YUV420
	// 			img_convert_ctx = sws_getContext(ifmt_ctx->streams[videoindex]->codec->width, ifmt_ctx->streams[videoindex]->codec->height,
	// 				ifmt_ctx->streams[videoindex]->codec->pix_fmt, pCodecCtx->width, pCodecCtx->height, PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);

	img_convert_ctx = sws_getContext(640, 480,
		AV_PIX_FMT_BGR24, pCodecCtx->width, pCodecCtx->height, PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);

	// Initialize the resampler to be able to convert audio sample formats
	aud_convert_ctx = swr_alloc_set_opts(NULL,
		av_get_default_channel_layout(pCodecCtx_a->channels),
		pCodecCtx_a->sample_fmt,
		pCodecCtx_a->sample_rate,
		av_get_default_channel_layout(ifmt_ctx_a->streams[audioindex]->codec->channels),
		ifmt_ctx_a->streams[audioindex]->codec->sample_fmt,
		ifmt_ctx_a->streams[audioindex]->codec->sample_rate,
		0, NULL);

	/**
	* Perform a sanity check so that the number of converted samples is
	* not greater than the number of samples to be converted.
	* If the sample rates differ, this case has to be handled differently
	*/
	//av_assert0(pCodecCtx_a->sample_rate == ifmt_ctx_a->streams[audioindex]->codec->sample_rate);

	swr_init(aud_convert_ctx);

	//Initialize the buffer to store YUV frames to be encoded.
	pFrameYUV = av_frame_alloc();
	out_buffer = (uint8_t *)av_malloc(avpicture_get_size(PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height));
	avpicture_fill((AVPicture *)pFrameYUV, out_buffer, PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height);

	//Initialize the FIFO buffer to store audio samples to be encoded. 
	// AVAudioFifo *fifo = NULL;
	fifo = av_audio_fifo_alloc(pCodecCtx_a->sample_fmt, pCodecCtx_a->channels, 1);

	//Initialize the buffer to store converted samples to be encoded.
	// uint8_t **converted_input_samples = NULL;
	/**
	* Allocate as many pointers as there are audio channels.
	* Each pointer will later point to the audio samples of the corresponding
	* channels (although it may be NULL for interleaved formats).
	*/
	if (!(converted_input_samples = (uint8_t**)calloc(pCodecCtx_a->channels,
		sizeof(**converted_input_samples)))) {
		printf("Could not allocate converted input sample pointers\n");
		return AVERROR(ENOMEM);
	}


	printf("\n --------call started----------\n");
	start_time = av_gettime();

	printf("RtmpPusher initialed\n");
	return true;
}
void CRtmpPusher::Frame() 
{
	int64_t start_time = av_gettime();
	//AVInputFormat *ifmt = av_find_input_format("vfwcap");
	//if (avformat_open_input(&ifmt_ctx, 0, ifmt, NULL) != 0) {
	//	printf("Couldn't open input stream.\n");
	//	return;
	//}
	const int output_frame_size = pCodecCtx_a->frame_size;
	while (1) {
		AVFrame *input_frame = av_frame_alloc();
		if (!input_frame)
		{
			av_frame_free(&input_frame);
			ret = AVERROR(ENOMEM);
			return;
		}

		AVPacket input_packet;
		av_init_packet(&input_packet);
		input_packet.data = NULL;
		input_packet.size = 0;
		if ((ret = av_read_frame(ifmt_ctx_a, &input_packet)) < 0) {
			/** If we are at the end of the file, flush the decoder below. */
			if (ret == AVERROR_EOF) {
				encode_audio = 0;
			}
			else {
				printf("Could not read audio frame\n");
				av_free_packet(&input_packet);
				return;
			}
		}
		if ((ret = avcodec_decode_audio4(ifmt_ctx_a->streams[audioindex]->codec, input_frame,
			&dec_got_frame_a, &input_packet)) < 0) {
			printf("Could not decode audio frame\n");
			av_free_packet(&input_packet);
			return;
		}
		av_packet_unref(&input_packet);
		av_free_packet(&input_packet);

		if (dec_got_frame_a) {
			if ((ret = av_samples_alloc(converted_input_samples, NULL,
				pCodecCtx_a->channels,
				input_frame->nb_samples,
				pCodecCtx_a->sample_fmt, 0)) < 0) {
				printf("Could not allocate converted input samples\n");
				av_freep(&(*converted_input_samples)[0]);
				free(*converted_input_samples);
				return;
			}

			if ((ret = swr_convert(aud_convert_ctx,
				converted_input_samples, input_frame->nb_samples,
				(const uint8_t**)input_frame->extended_data, input_frame->nb_samples)) < 0) {
				printf("Could not convert input samples\n");
				return;
			}

			if ((ret = av_audio_fifo_realloc(fifo, av_audio_fifo_size(fifo) + input_frame->nb_samples)) < 0) {
				printf("Could not reallocate FIFO\n");
				return;
			}

			/** Store the new samples in the FIFO buffer. */
			if (av_audio_fifo_write(fifo, (void **)converted_input_samples,
				input_frame->nb_samples) < input_frame->nb_samples) {
				printf("Could not write data to FIFO\n");
				return;
			}
		}
		av_frame_free(&input_frame);

		if (av_audio_fifo_size(fifo) >= output_frame_size) {
			/**
			* Take one frame worth of audio samples from the FIFO buffer,
			* encode it and write it to the output file.
			*/
			/** Temporary storage of the output samples of the frame written to the file. */
			AVFrame *output_frame = av_frame_alloc();
			if (!output_frame) {
				ret = AVERROR(ENOMEM);
				av_frame_free(&output_frame);
				return;
			}

			/**
			* Use the maximum number of possible samples per frame.
			* If there is less than the maximum possible frame size in the FIFO
			* buffer use this number. Otherwise, use the maximum possible frame size
			*/
			const int frame_size = FFMIN(av_audio_fifo_size(fifo), pCodecCtx_a->frame_size);

			/** Initialize temporary storage for one output frame. */
			/**
			* Set the frame's parameters, especially its size and format.
			* av_frame_get_buffer needs this to allocate memory for the
			* audio samples of the frame.
			* Default channel layouts based on the number of channels
			* are assumed for simplicity.
			*/
			output_frame->nb_samples = frame_size;
			output_frame->channel_layout = pCodecCtx_a->channel_layout;
			output_frame->format = pCodecCtx_a->sample_fmt;
			output_frame->sample_rate = pCodecCtx_a->sample_rate;

			/**
			* Allocate the samples of the created frame. This call will make
			* sure that the audio frame can hold as many samples as specified.
			*/
			if ((ret = av_frame_get_buffer(output_frame, 0)) < 0) {
				printf("Could not allocate output frame samples\n");
				av_frame_free(&output_frame);
				return;
			}

			/**
			* Read as many samples from the FIFO buffer as required to fill the frame.
			* The samples are stored in the frame temporarily.
			*/
			if (av_audio_fifo_read(fifo, (void **)output_frame->data, frame_size) < frame_size) {
				printf("Could not read data from FIFO\n");
				av_frame_free(&output_frame);
				return;
			}

			/** Encode one frame worth of audio samples. */
			/** Packet used for temporary storage. */
			AVPacket output_packet;
			av_init_packet(&output_packet);
			output_packet.data = NULL;
			output_packet.size = 0;

			/** Set a timestamp based on the sample rate for the container. */
			if (output_frame) {
				nb_samples += output_frame->nb_samples;
			}

			/**
			* Encode the audio frame and store it in the temporary packet.
			* The output audio stream encoder is used to do this.
			*/
			if ((ret = avcodec_encode_audio2(pCodecCtx_a, &output_packet,
				output_frame, &enc_got_frame_a)) < 0) {
				printf("Could not encode frame\n");
				av_packet_unref(&output_packet);
				av_frame_free(&output_frame);
				return;
			}

			/** Write one audio frame from the temporary packet to the output file. */
			if (enc_got_frame_a) {

				output_packet.stream_index = 1;

				AVRational time_base = ofmt_ctx->streams[1]->time_base;
				AVRational r_framerate1 = { ifmt_ctx_a->streams[audioindex]->codec->sample_rate, 1 };// { 44100, 1};  
				int64_t calc_duration = (double)(AV_TIME_BASE)*(1 / av_q2d(r_framerate1));  //内部时间戳  

				output_packet.pts = av_rescale_q(nb_samples*calc_duration, time_base_q, time_base);
				output_packet.dts = output_packet.pts;
				output_packet.duration = output_frame->nb_samples;

				//printf("audio pts : %d\n", output_packet.pts);
				aud_next_pts = nb_samples * calc_duration;

				int64_t pts_time = av_rescale_q(output_packet.pts, time_base, time_base_q);
				int64_t now_time = av_gettime() - start_time;
				if ((pts_time > now_time) && ((aud_next_pts + pts_time - now_time) < vid_next_pts))
					av_usleep(pts_time - now_time);

				int rett = -88;
				rett = av_interleaved_write_frame(ofmt_ctx, &output_packet);
				if (rett == 0)
					printf("voice data is sended to rtmp server\n");

				av_packet_unref(&output_packet);
				av_free_packet(&output_packet);
			}
			av_frame_free(&output_frame);
		}

	}
}