#include "StdAfx.h"
#include "MediaFile.h"


//-----------------------------------------------------------------------------
MediaFile::MediaFile(Audio *audio, DWORD numBuffers)
:	BufferedVoice		(audio, numBuffers)
{
//	initialize
	InitializeCriticalSectionAndSpinCount(&file_access, 800);
	close();

//	Streaming voices start their "FillBufferThread".
	if(buffers.size() > 1)
		createFillBufferThread();
}

//-----------------------------------------------------------------------------
MediaFile::MediaFile(Audio *audio, const std::wstring &filename, DWORD numBuffers, float vol, bool block_buffers)
:	BufferedVoice		(audio, numBuffers, vol, block_buffers)
{
	InitializeCriticalSectionAndSpinCount(&file_access, 800);
	open(filename);

	if(buffers.size() > 1)
		createFillBufferThread();
}

//-----------------------------------------------------------------------------
MediaFile::MediaFile(Audio *audio, const uint8_t *pcmData, size_t totalBytes,
							WORD channels, WORD bitsPerSample, DWORD sampleRate)
:	BufferedVoice		(audio, 1)
{
	InitializeCriticalSectionAndSpinCount(&file_access, 800);
	close();

	audio_buffer.resize(totalBytes);
	memcpy(&audio_buffer[0], pcmData, totalBytes);

	initialize(channels, bitsPerSample, sampleRate);
}

//-----------------------------------------------------------------------------
MediaFile::~MediaFile()
{
	stop();

	if(buffers.size() > 1)
		exitFillBufferThread();

	close();
	DeleteCriticalSection(&file_access);
};

//-----------------------------------------------------------------------------
void MediaFile::close()
{
	if(video_buffer.m() && video_stream)
	{
		AVCodecContext *codec = video_stream->codec;
		for(int i = 1; i < video_buffer.m(); i++)
			avpicture_free((AVPicture*)&video_buffer[i]);

		sws_freeContext(video_to_rgb);
	}
	
	audio_stream_index	= -1;
	video_stream_index	= -1;
	audio_stream			= NULL;
	video_stream			= NULL;
//	video_src_frame		= 0;				// video_buffer[0] is actually the RGB frame, so video_src_frame = 0 is invalid
//	video_dst_frame		= 1;				// video_buffer[1] will become the first buffered YUV frame
	video_to_rgb			= NULL;
	picture_number			= 0;
	audio_frame_size		= 0;
	eof						= false;
	audio_buffer.free();
	video_buffer.free();

	file.close();
}

//-----------------------------------------------------------------------------
void MediaFile::open(const std::wstring &filename)
{
	stop();
	close();
	file.open(std::string(filename.begin(),filename.end()).c_str());
	audio_stream_index = file.getStream(AVMEDIA_TYPE_AUDIO);
	video_stream_index = file.getStream(AVMEDIA_TYPE_VIDEO);

//----
//	Setup audio
	if(audio_stream_index >= 0)
	{
	//	mp3 has 1152 samples per frame, aac 1024...
		audio_stream						= file.getStream(audio_stream_index);
		const AVCodecContext	&specs	= *audio_stream->codec;
		const int	bitsPerSample		= av_get_bits_per_sample_fmt(specs.sample_fmt),
						sampleSize			= specs.channels * bitsPerSample/8,
						frameSize			= specs.frame_size * sampleSize,
						numBuffers			= buffers.size();
		audio_frame_size					= specs.frame_size;

		initialize(specs.channels, bitsPerSample, specs.sample_rate);

	//----
	//	Decode file right away?
		if(numBuffers == 1)
		{
		//	Estimate final size
			size_t initialSize = max(	size_t(sampleSize * specs.sample_rate * audio_stream->duration * audio_stream->time_base.num / audio_stream->time_base.den),
												size_t(frameSize * audio_stream->nb_frames)	);
			audio_buffer.resize(initialSize + AVCODEC_MAX_AUDIO_FRAME_SIZE);
			int totalSize	= 0;

		// Read until EOF
			while(file.bufferFrame() == 0)
			{
				if(file.streamBufferedPackets(audio_stream_index))
				{
				//	Resize necessary?
					int reserve = totalSize + frameSize + AVCODEC_MAX_AUDIO_FRAME_SIZE;
					if(audio_buffer.m() < reserve)
						audio_buffer.resize(max(3*reserve/2, 2*audio_buffer.m()) + AVCODEC_MAX_AUDIO_FRAME_SIZE);

				// Decode audio frame
					totalSize += file.decodeAudio((int16_t*)&audio_buffer[totalSize], audio_buffer.m()-totalSize);
				}
			}

			audio_buffer.resize(totalSize);
			buffers[0].pAudioData = audio_buffer;
			buffers[0].AudioBytes = totalSize;
			buffers[0].Flags = XAUDIO2_END_OF_STREAM;
		}
		else
		{
		//----
		// Prepare the audio buffers
			const int totalSize = numBuffers * AVCODEC_MAX_AUDIO_FRAME_SIZE;
//			const int totalSize = numBuffers * frameSize + AVCODEC_MAX_AUDIO_FRAME_SIZE;
			if(audio_buffer.m() != totalSize)
			{
				audio_buffer.resize(totalSize);
//				memset(&buffers[0], 0, numBuffers*sizeof(XAUDIO2_BUFFER));
				for(int i = 0; i < numBuffers; i++)
				{
					buffers[i].pAudioData = &audio_buffer[i*AVCODEC_MAX_AUDIO_FRAME_SIZE];
//					buffers[i].pAudioData = &audio_buffer[i*frameSize];
//					buffers[i].AudioBytes = AVCODEC_MAX_AUDIO_FRAME_SIZE;
					buffers[i].AudioBytes = frameSize;
				}
			}
		}

	}//if(audio_stream_index >= 0)

//----
//	Setup video
	if(video_stream_index >= 0)
	{
		video_stream						= file.getStream(video_stream_index);
		const AVCodecContext	&specs	= *video_stream->codec;

		video_buffer.resize(1+buffers.size());				// 1 * YUV frame + numBuffers * RGB frames

		for(int i = 0; i < video_buffer.m(); i++)
			avcodec_get_frame_defaults(&video_buffer[i]);

	//	allocate the RGB picture
		for(int i = 1; i < video_buffer.m(); i++)
			avpicture_alloc((AVPicture*)&video_buffer[i], PIX_FMT_RGB24, specs.width, specs.height);

	//	allocate the YUV -> RGB conversion context
		video_to_rgb = sws_getContext(specs.width, specs.height, specs.pix_fmt, specs.width, specs.height, PIX_FMT_RGB24, SWS_POINT,   NULL, NULL, NULL);
//		video_to_rgb = sws_getContext(specs.width, specs.height, specs.pix_fmt, specs.width, specs.height, PIX_FMT_RGB24, SWS_LANCZOS, NULL, NULL, NULL);

	}//if(video_stream_index >= 0)

}//MediaFile::open

/*
//-----------------------------------------------------------------------------
unsigned MediaFile::getCurrentSample() const
{
	if(audio_frame_size == 0)
		return 0;

	UINT64	n = playedSamples(),		// current sample
				f = audio_frame_size,	// samples per frame
				i = n / f,					// current frame
				j = n % f;					// current sample in frame

	i %= buffers.size();
	return unsigned(i * f + j);
}
*/

//-----------------------------------------------------------------------------
bool MediaFile::fillBuffer(unsigned i)
{
	const unsigned numBuffers = buffers.size();
	if(i == 0 && numBuffers == 1)
		return true;							//	a single constant buffer, already decoded in the constructor.

	if(audio_stream_index < NULL)
		return false;

//	Read the file and buffer audio and video packets
	bool finished = false;
	EnterCriticalSection(&file_access);
	while(!finished && file.streamBufferedPackets(audio_stream_index) < 2)
		finished = file.bufferFrame() != 0;
	LeaveCriticalSection(&file_access);

	if(finished)
		eof = true;

	if(i < buffers.size())
	{
//		unsigned prev = (i+numBuffers-1) % numBuffers;
//		const BYTE *dst = buffers[prev].pAudioData + buffers[prev].AudioBytes;
//		int64_t size = &audio_buffer[audio_buffer.m()] - dst;
//		if(buffers[prev].pAudioData == NULL || size < AVCODEC_MAX_AUDIO_FRAME_SIZE)
//		{
//			dst = &audio_buffer[0];
//			size = audio_buffer.m();
//		}

	// Decode audio frame
		int bytes = file.decodeAudio((int16_t*)buffers[i].pAudioData, AVCODEC_MAX_AUDIO_FRAME_SIZE);

//		buffers[i].pAudioData = dst;
		buffers[i].AudioBytes = bytes;
		buffers[i].Flags = finished ? XAUDIO2_END_OF_STREAM : 0;

		return bytes > 0;
	}

	return false;
}

//-----------------------------------------------------------------------------
bool MediaFile::bufferFrame()
{
	if(video_stream == NULL)
		return false;

//	Search for free frame
	for(int i = 1; i < video_buffer.m(); i++)
	{
		AVFrame &dst = video_buffer[i];
		if(dst.pts < 0)
		{
			bool finished = false;
			EnterCriticalSection(&file_access);
			while(!finished && (file.streamBufferedPackets(video_stream_index) == 0 || file.decodeVideo(&video_buffer[0]) != 1))
				finished = file.bufferFrame() != 0;
			LeaveCriticalSection(&file_access);

			if(finished)
			{
				eof = true;
				return false;
			}

		//	Copy and convert to RGB
			sws_scale(video_to_rgb, video_buffer[0].data, video_buffer[0].linesize, 0, video_stream->codec->height, dst.data, dst.linesize);
			dst.pts = video_buffer[0].pts;
			dst.coded_picture_number = video_buffer[0].coded_picture_number;
			dst.display_picture_number = picture_number++;
			return true;
		}
	}

	return false;
}

//-----------------------------------------------------------------------------
int MediaFile::bufferedFrames() const
{
	int n = 0;
	for(int i = 1; i < video_buffer.m(); i++)
		if(video_buffer[i].pts >= 0)		// e.g. AV_NOPTS_VALUE
			n++;
	return n;
}

//-----------------------------------------------------------------------------
AVFrame *MediaFile::getNextFrame()
{
	if(video_stream == NULL)
		return NULL;

	if(bufferedFrames() == 0)
	if(bufferFrame() == false)
		return NULL;

//	Search for next frame, that is the frame with the lowest coded_picture_number or pts
	int64_t min_pts = std::numeric_limits<int64_t>::max();
	int index = 0;
	for(int i = 1; i < video_buffer.m(); i++)
	{
	// skip invalid frame with pts = AV_NOPTS_VALUE etc.
		if(min_pts > video_buffer[i].display_picture_number && video_buffer[i].pts >= 0)
		{
			min_pts = video_buffer[i].display_picture_number;
			index = i;
		}
	}

	if(index == 0)
		return NULL;

//	std::cout << bufferedFrames() << ", " << video_buffer[index].display_picture_number << ", " << video_buffer[index].pts << std::endl;

// mark frame for reuse
//	video_buffer[index].pts = -1;
//	video_buffer[index].coded_picture_number = -1;


	return &video_buffer[index];
}


//-----------------------------------------------------------------------------
void MediaFile::seek(int64_t time_in_ms, bool seekSafe)
{
	if(time_in_ms <= 0)
		return;

	if(seekSafe == false || audio_stream_index == -1)
	{
		file.seek(time_in_ms, audio_stream_index);
	}
	else
	{
		int64_t	totalSize	= playedSamples(),
					sampleSize	= channels * bitsPerSample/8,
					frameSize	= sampleSize * audio_frame_size,
					limit			= sampleSize * int64_t(sampleRate) * time_in_ms / 1000ll - frameSize;

	// Read until EOF
		while(file.bufferFrame() == 0)
		{
			if(file.streamBufferedPackets(audio_stream_index))
			{
			// Decode and discard audio frame
				totalSize += file.decodeAudio((int16_t*)&audio_buffer[0], audio_buffer.m());

				if(totalSize >= limit)
					return;
			}
		}

	}

}
