﻿#include <crtdbg.h>
#include <string.h>
#include "stream.h"
extern "C" {
#include "util.h"
};


CStreamQueue::CStreamQueue()
{
	m_queuedata.first_pkt = NULL;
	m_queuedata.last_pkt = NULL;
	m_queuedata.nb_packets = 0;
	m_queuedata.size = 0;
}

CStreamQueue::~CStreamQueue()
{

}

bool	CStreamQueue::PutdataToQueue(AVPacket *packet)
{
	AVPacketList *pkt1;

	if( m_queuedata.size >= m_limit) return false;

	if(av_dup_packet(packet) < 0)
	{
		_ASSERT(0);
		return false;
	}

	pkt1 = (AVPacketList*)av_malloc(sizeof(AVPacketList));
	if (!pkt1)
	{
		_ASSERT(0);
		return false;
	}

	pkt1->pkt = *packet;
	pkt1->next = NULL;

	Enter();

	if (!m_queuedata.last_pkt)
		m_queuedata.first_pkt = pkt1;
	else
		m_queuedata.last_pkt->next = pkt1;

	m_queuedata.last_pkt = pkt1;
	m_queuedata.nb_packets++;
	m_queuedata.size += pkt1->pkt.size;

	Leave();

	return true;
}

int		CStreamQueue::GetdataFromQueue(AVPacket *packet)
{
	AVPacketList *pkt1;
	int ret = 0;

	Enter();
	pkt1 = m_queuedata.first_pkt;
	if (pkt1) 
	{
		m_queuedata.first_pkt = pkt1->next;
		if (!m_queuedata.first_pkt)
			m_queuedata.last_pkt = NULL;

		m_queuedata.nb_packets--;
		m_queuedata.size -= pkt1->pkt.size;
		*packet = pkt1->pkt;
		av_free(pkt1);
		ret = packet->size;
	} 
	Leave();

	return ret;
}

//////////////////////////////////////////////////////////////////////////

CStream::CStream()
{
	m_Videoqueue.SetLimit(LIMIT_VIDEO_QUEUESIZE);
	m_Audioqueue.SetLimit(LIMIT_AUDIO_QUEUESIZE);

	m_videostream = -1;
	m_audiostream = -1;

	// 코덱과 포멧등록
	av_register_all();
}


CStream::~CStream()
{
    av_free_packet(&m_AvPacket);
	av_free(m_pFrame);
	avcodec_close(m_pVideoCodecCtx);
	av_close_input_file(m_pFormatCtx);
}


bool	CStream::Open(char *file)
{
	// stream open
	if( av_open_input_file(&m_pFormatCtx, file, NULL, 0, NULL) !=0 )
	{
		_ASSERT(0);
		return false;
	}

	// stream information 얻기
	if(av_find_stream_info(m_pFormatCtx) < 0)
	{
		_ASSERT(0);
		return false;
	}

	// Video / Audio 스트림 검사
	if( SearchStreams() == false )
	{
		_ASSERT(0);
		return false;
	}

	// 코덱 검사 / 코덱 얻기
	if( OpenCodec() == false )
	{
		_ASSERT(0);
		return false;
	}

	OpenFrame();

	Start();
	return true;
}


void	CStream::Close()
{

}

void	CStream::OpenFrame()
{
	// Allocate video frame
	m_pFrame = avcodec_alloc_frame();
}

// Thread
void	CStream::Run()
{
	ReadFrame();
}

int		CStream::ReadFrame()
{
	int result = 0;
	
	while(av_read_frame(m_pFormatCtx, &m_AvPacket) >= 0) 
	{
		// 이번 패킷이 video stream 인가??
		if(m_AvPacket.stream_index == m_videostream) 
		{
			bool r = false;
			while(!r)
			{
				r = m_Videoqueue.PutdataToQueue(&m_AvPacket);
				if( r == false )
				{
					_TRACE(_T("full video\n"));
					Sleep(10);

				}
			}
		}
		else if(m_AvPacket.stream_index == m_audiostream) 
		{
			// Audio stream 인가??
			bool r = false;
			while(!r)
			{
				r = m_Audioqueue.PutdataToQueue(&m_AvPacket);
				if( r == false )
				{
					_TRACE(_T("full audio\n"));
					Sleep(10);
				}
			}
		}
		else
			av_free_packet(&m_AvPacket);
	}

	return result;
}

// 코덱을 얻는다
bool	CStream::OpenCodec()
{
	// Video Codec
	if( m_videostream != -1 )
	{
		// Video codec context를 얻는다
		m_pVideoCodecCtx = m_pFormatCtx->streams[m_videostream]->codec;

		// 스트림의 Video 코덱을 찾아본다 ( m_pCodecCtx->codec_id : CODEC_ID_H264 같은애들 )
		m_pVideoCodec = avcodec_find_decoder(m_pVideoCodecCtx->codec_id);
		if(m_pVideoCodec == NULL) 
		{
			//fprintf(stderr, "Unsupported codec!\n");
			return false; // Codec not found
		}

		// Video codec을 얻는다
		if(avcodec_open(m_pVideoCodecCtx, m_pVideoCodec) < 0)
		{
			_ASSERT(0);
			return false;
		}

		int i = 0;
		while( codec_info[i].codec != 0x0badf00d )
		{
			if( codec_info[i].codec == m_pVideoCodecCtx->codec_id)
			{
				m_videocodecid = m_pVideoCodecCtx->codec_id;
				strcpy(m_videocodecname, codec_info[i].codecname);
			}
			i++;
		}

		// Codec info
		m_streamWidth = m_pVideoCodecCtx->width;
		m_streamHeight = m_pVideoCodecCtx->height;
		m_streamFormat = m_pVideoCodecCtx->pix_fmt;
	}

	// Audio COdec
	if( m_audiostream != -1 )
	{
		// Audio codec context를 얻는다
		m_pAudioCodecCtx = m_pFormatCtx->streams[m_audiostream]->codec;

		// 스트림의 Audio 코덱을 찾아본다 ( m_pCodecCtx->codec_id : CODEC_ID_MP3 같은애들 )
		m_pAudioCodec = avcodec_find_decoder(m_pAudioCodecCtx->codec_id);
		if(m_pAudioCodec == NULL) 
		{
			//fprintf(stderr, "Unsupported codec!\n");
			return false; // Codec not found
		}

		// Audio codec을 얻는다
		if(avcodec_open(m_pAudioCodecCtx, m_pAudioCodec) < 0)
		{
			_ASSERT(0);
			return false;
		}

		int i = 0;
		while( codec_info[i].codec != 0x0badf00d )
		{
			if( codec_info[i].codec == m_pAudioCodecCtx->codec_id)
			{
				m_audiocodecid = m_pAudioCodecCtx->codec_id;
				strcpy(m_audiocodecname, codec_info[i].codecname);
			}
			i++;
		}

		m_audioFreq = m_pAudioCodecCtx->sample_rate;
		m_audioFormat = m_pAudioCodecCtx->sample_fmt;// SAMPLE_FMT_U8;// AUDIO_S16SYS;
		m_audioChannels = m_pAudioCodecCtx->channels;
		m_audioSamples = 1024;
	}



	return true;
}

bool	CStream::SearchStreams()
{
	// Video Stream 검색
	m_videostream = -1;
	for(unsigned int i=0; i<m_pFormatCtx->nb_streams; i++)
	{
		if(m_pFormatCtx->streams[i]->codec->codec_type == CODEC_TYPE_VIDEO) 
		{
			m_videostream = i;
			m_video_st = m_pFormatCtx->streams[i];

			m_frame_timer = (double)av_gettime() / 1000000.0;
			m_frame_last_delay = 40e-3;

			break;
		}
	}

	// Audio Stream 검색
	m_audiostream = -1;
	for(unsigned int i=0; i<m_pFormatCtx->nb_streams; i++)
	{
		if(m_pFormatCtx->streams[i]->codec->codec_type == CODEC_TYPE_AUDIO) 
		{
			m_audiostream = i;
			m_audio_st = m_pFormatCtx->streams[i];
			break;
		}
	}

	// 일단은 비디오 없으면 false!
	if( m_videostream == -1 ) 
	{
		_ASSERT(0);
		return false;
	}

	return true;
}


/*
int img_convert(AVPicture* dst, PixelFormat dst_pix_fmt, AVPicture* src, PixelFormat pix_fmt, int width, int height) 
{ 
	int av_log = av_log_get_level(); 
	av_log_set_level(AV_LOG_QUIET); 
	SwsContext *img_convert_ctx = sws_getContext(width, height, pix_fmt, width, height, dst_pix_fmt, SWS_BICUBIC, NULL, NULL, NULL); 
	int result = sws_scale(img_convert_ctx, src->data, src->linesize, 0, height, dst->data, dst->linesize); 
	sws_freeContext(img_convert_ctx); 
	av_log_set_level(av_log); 
	return result; 
}
*/


void	CStream::RenderFrameToOverlay(SDL_Overlay *overlay)
{
	// 비디오를 decode 하자.
	AVPacket packet;
	int frameFinished;

	if( m_Videoqueue.GetdataFromQueue(&packet) <= 0)
	{
		Sleep(100);
		return;
	}

	avcodec_decode_video(m_pVideoCodecCtx, m_pFrame, &frameFinished, packet.data, packet.size);

	double pts;
	#define INT64_C(val) val##i64

	if(packet.dts != AV_NOPTS_VALUE)
	{
		pts = packet.dts;
	}
	else
	{
		pts = 0;
	}

	pts *= av_q2d(m_video_st->time_base);

	if(!frameFinished) 
	{
		return;
	}

	AVPicture pict;
	pict.data[0] = overlay->pixels[0];
	pict.data[1] = overlay->pixels[2];
	pict.data[2] = overlay->pixels[1];

	pict.linesize[0] = overlay->pitches[0];
	pict.linesize[1] = overlay->pitches[2];
	pict.linesize[2] = overlay->pitches[1];

	AVPicture* src = (AVPicture *)m_pFrame;

	int av_log = av_log_get_level(); 
	av_log_set_level(AV_LOG_QUIET); 

	SwsContext *img_convert_ctx = sws_getContext(m_streamWidth, m_streamHeight, m_streamFormat, 
		m_streamWidth, m_streamHeight, 
		PIX_FMT_YUV420P, SWS_BILINEAR, NULL, NULL, NULL); 
		//PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL); 

	int result = sws_scale(img_convert_ctx, src->data, src->linesize, 0, m_streamHeight, pict.data, pict.linesize); 
	sws_freeContext(img_convert_ctx); 
	av_log_set_level(av_log); 
	
}

//////////////////////////////////////////////////////////////////////////


int		CStream::DecodeAudio(uint8_t *audio_buf, int buf_size)
{
	static AVPacket pkt;
	static uint8_t *audio_pkt_data = NULL;
	static int audio_pkt_size = 0;

	int len1, data_size;

	for(;;) 
	{
		while(audio_pkt_size > 0) 
		{
			data_size = buf_size;
			len1 = avcodec_decode_audio2(m_pAudioCodecCtx, (int16_t *)audio_buf, &data_size, 
				audio_pkt_data, audio_pkt_size);
			if(len1 < 0) 
			{
				/* if error, skip frame */
				audio_pkt_size = 0;
				break;
			}
			audio_pkt_data += len1;
			audio_pkt_size -= len1;
			if(data_size <= 0) 
			{
				/* No data yet, get more frames */
				continue;
			}
			/* We have data, return it and come back for more later */
			return data_size;
		}
		if(pkt.data)
			av_free_packet(&pkt);

// 		if(quit) 
// 		{
// 			return -1;
// 		}

		if( m_Audioqueue.GetdataFromQueue(&pkt) <= 0) 
		{
			Sleep(100);
			continue;
		}
		audio_pkt_data = pkt.data;
		audio_pkt_size = pkt.size;
	}
}