#include "video_player.h"
#include <cassert>
#include <iostream>

#define SCREEN_WIDTH 800
#define SCREEN_HEIGHT 600
#define VIDEO_BITRATE 64000
#define AUDIO_BITRATE 64000
#define AUDIO_SAMPLERATE 44100

VideoPlayer::VideoState *VideoPlayer::global_video_state = 0;
uint64_t VideoPlayer::global_video_pkt_pts = AV_NOPTS_VALUE;

/**
 *
 */
VideoPlayer::VideoPlayer(SDL_Surface *surface)
	: displayThread_(*this), surface_(surface) {
	initializeFFMPEG();
}

/**
 *
 */
VideoPlayer::~VideoPlayer() {
	cleanUpFFMPEG();
}

/**
 *
 */
void VideoPlayer::initializeFFMPEG() {
	int retval;

	// create the encoder context
	videoState.videoDecoderContext_ = avcodec_alloc_context();
	assert(videoState.videoDecoderContext_ != NULL && "videoDecoderContext_ is NULL!");

	// put sample parameters
	videoState.videoDecoderContext_->bit_rate = VIDEO_BITRATE;
	videoState.videoDecoderContext_->width = SCREEN_WIDTH;
	videoState.videoDecoderContext_->height = SCREEN_HEIGHT;
	videoState.videoDecoderContext_->time_base = (AVRational){1/25};
	videoState.videoDecoderContext_->gop_size = 10;
	videoState.videoDecoderContext_->max_b_frames = 1;
	videoState.videoDecoderContext_->pix_fmt = PIX_FMT_YUV420P;
	videoState.videoDecoderContext_->codec_id = CODEC_ID_MPEG2VIDEO;
	videoState.videoDecoderContext_->codec_type = CODEC_TYPE_VIDEO;

	// find the decoder for the video stream
	videoState.videoDecoder_ = avcodec_find_decoder(videoState.videoDecoderContext_->codec_id);
	assert(videoState.videoDecoder_ != 0 && "could not find decoder - videoDecoder_ pointer is null!");

	// open decoder
	retval = avcodec_open(videoState.videoDecoderContext_, videoState.videoDecoder_);
	assert(retval >= 0 && "video decoder could not be opened - videoDecoder_ pointer is null!");

	// create the encoder context
	videoState.audioDecoderContext_ = avcodec_alloc_context();
	assert(videoState.audioDecoderContext_ != NULL && "videoState.audioDecoderContext_ is NULL!");

	// put sample parameters
	videoState.audioDecoderContext_->time_base = (AVRational){1/25};
	videoState.audioDecoderContext_->codec_id = CODEC_ID_MP2;
	videoState.audioDecoderContext_->codec_type = CODEC_TYPE_AUDIO;
	videoState.audioDecoderContext_->bit_rate = AUDIO_BITRATE;
	videoState.audioDecoderContext_->sample_rate = AUDIO_SAMPLERATE;
	videoState.audioDecoderContext_->channels = 1;
	videoState.audioDecoderContext_->sample_fmt = SAMPLE_FMT_S16;

	// find the decoder for the video stream
	videoState.audioDecoder_ = avcodec_find_decoder(videoState.audioDecoderContext_->codec_id);
	assert(videoState.audioDecoder_ != 0 && "could not find decoder - audioDecoder_ pointer is null!");

	// open decoder
	retval = avcodec_open(videoState.audioDecoderContext_, videoState.audioDecoder_);
	assert(retval >= 0 && "audio decoder could not be opened - audioDecoder_ pointer is null!");

	global_video_state = &videoState;
	// will interrupt blocking functions if we quit!
	url_set_interrupt_cb(VideoPlayerCallbacks::decodeInterruptCallback);

	SDL_AudioSpec wanted_spec, spec;
	// set audio settings from codec info
	wanted_spec.freq = videoState.audioDecoderContext_->sample_rate;
	wanted_spec.format = AUDIO_S16SYS;
	wanted_spec.channels = videoState.audioDecoderContext_->channels;
	wanted_spec.silence = 0;
	wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
	wanted_spec.callback = VideoPlayerCallbacks::audioCallback;
	wanted_spec.userdata = &videoState;
	if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
		fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
	}
	videoState.audioHwBufferSize_ = spec.size;

	videoState.audioBuferSize_ = 0;
	videoState.audioBufferIndex_ = 0;
	// averaging filter for audio sync
	videoState.audioDiffAvgCoef_ = exp(log(0.01 / AUDIO_DIFF_AVG_NB));
	videoState.audioDiffAvgCount_ = 0;
	// correct audio only if larger error than this
	videoState.audioDiffThreshold_ = 2.0 * SDL_AUDIO_BUFFER_SIZE / videoState.audioDecoderContext_->sample_rate;
	memset(&videoState.audioPacket_, 0, sizeof(videoState.audioPacket_));
	SDL_PauseAudio(0);

	videoState.frameTimer_ = (double) av_gettime() / 1000000.0;
	videoState.frameLastDelay_ = 40e-3;
	videoState.videoCurrentPtsTime_ = av_gettime();
	videoState.videoDecoderContext_->get_buffer = our_get_buffer;
	videoState.videoDecoderContext_->release_buffer = our_release_buffer;
}

/**
 *
 */
void VideoPlayer::cleanUpFFMPEG() {
	avcodec_close(videoState.videoDecoderContext_);
	avcodec_close(videoState.audioDecoderContext_);
	sws_freeContext(videoState.imageConvertContext_);
	SDL_CloseAudio();
}

/**
 *
 */
void VideoPlayer::run() {
	videoState.quit_ = 0;
	//videoState.reset();
	play();
}

/**
 *
 */
void VideoPlayer::stop() {
	videoState.quit_ = 1;
}

/**
 *
 */
void VideoPlayer::play() {
	scheduleRefresh(40);
	displayThread_.start();

	// enter the event loop
	while (videoState.quit_ != 1) {
		SDL_WaitEvent(&event_);
		switch (event_.type) {
		case FF_ALLOC_EVENT:
			allocPicture();
			break;
		case FF_REFRESH_EVENT:
			videoRefreshTimer(event_.user.data1);
			break;
		default:
			break;
		}
	}

	videoState.quit_ = 1;
	videoState.videoQueue_.stopWaiting();
	videoState.audioQueue_.stopWaiting();
	videoState.queuePictureStop();
	displayThread_.join();

	std::cout << "papa" << std::endl;
}

/**
 *
 */
void VideoPlayer::allocPicture() {
	VideoPicture &vp = videoState.PictureQueue_[videoState.pictureQueueWIndex_];

	if (vp.bmp)
		SDL_FreeYUVOverlay(vp.bmp);
	// Allocate a place to put our YUV image on that screen
	vp.bmp = SDL_CreateYUVOverlay(videoState.videoDecoderContext_->width, videoState.videoDecoderContext_->height, SDL_YV12_OVERLAY, surface_);
	vp.width = videoState.videoDecoderContext_->width;
	vp.height = videoState.videoDecoderContext_->height;

	{
		Lock l(videoState.pictureQueueMutex_);
		vp.allocated = 1;
		videoState.pictureQueueCondition_.notify();
	}
}

/**
 *
 */
void VideoPlayer::videoRefreshTimer(void *userdata) {
	VideoState *is = (VideoState *) userdata;
	VideoPicture *vp;
	double actual_delay, delay, sync_threshold, ref_clock, diff;

		if (is->pictureQueueSize_ == 0) {
				scheduleRefresh(1);
		} else {
			vp = &is->PictureQueue_[is->pictureQueueRIndex_];

			is->videoCurrentPts_ = vp->pts;
			is->videoCurrentPtsTime_ = av_gettime();

			delay = vp->pts - is->frameLastPts_; // the pts from last time
			if (delay <= 0 || delay >= 1.0) {
				// if incorrect delay, use previous one
				delay = is->frameLastDelay_;
			}
			// save for next time
			is->frameLastDelay_ = delay;
			is->frameLastPts_ = vp->pts;

			// update delay to sync to audio if not master source
			if (is->avSyncType_ != AV_SYNC_VIDEO_MASTER) {
				ref_clock = is->getMasterClock();
				diff = vp->pts - ref_clock;

				// Skip or repeat the frame. Take delay into account
				// FFPlay still doesn't "know if this is the best guess."
				sync_threshold = (delay > AV_SYNC_THRESHOLD) ? delay
						: AV_SYNC_THRESHOLD;
				if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
					if (diff <= -sync_threshold) {
						delay = 0;
					} else if (diff >= sync_threshold) {
						delay = 2 * delay;
					}
				}
			}

			is->frameTimer_ += delay;
			// computer the REAL delay
			actual_delay = is->frameTimer_ - (av_gettime() / 1000000.0);
			if (actual_delay < 0.010) {
				// Really it should skip the picture instead
				// actual_delay = 0.010;
			}
			scheduleRefresh((int) (actual_delay * 1000 + 0.5));

			// show the picture!
			videoDisplay();

			// update queue for next picture!
			if (++is->pictureQueueRIndex_ == VIDEO_PICTURE_QUEUE_SIZE) {
				is->pictureQueueRIndex_ = 0;
			}
			{
				Lock l(is->pictureQueueMutex_);
				is->pictureQueueSize_--;
				is->pictureQueueCondition_.notify();
			}
		}
}

/**
 *
 */
void VideoPlayer::videoDisplay() {
	SDL_Rect rect;
	VideoPicture *vp;
	float aspect_ratio;
	int w, h, x, y;

	vp = &(videoState.PictureQueue_[videoState.pictureQueueRIndex_]);
	if (vp->bmp) {
		if (videoState.videoDecoderContext_->sample_aspect_ratio.num == 0) {
			aspect_ratio = 0;
		} else {
			aspect_ratio = av_q2d(videoState.videoDecoderContext_->sample_aspect_ratio) * videoState.videoDecoderContext_->width / videoState.videoDecoderContext_->height;
		}
		if (aspect_ratio <= 0.0) {
			aspect_ratio = (float) videoState.videoDecoderContext_->width / (float) videoState.videoDecoderContext_->height;
		}
		h = surface_->h;
		w = ((int) rint(h * aspect_ratio)) & -3;
		if (w > surface_->w) {
			w = surface_->w;
			h = ((int) rint(w / aspect_ratio)) & -3;
		}
		x = (surface_->w - w) / 2;
		y = (surface_->h - h) / 2;

		rect.x = x;
		rect.y = y;
		rect.w = w;
		rect.h = h;
		SDL_DisplayYUVOverlay(vp->bmp, &rect);
	}
}

/**
 *
 */
void VideoPlayer::scheduleRefresh(int delay) {
	// schedule a video refresh in 'delay' ms
	SDL_AddTimer(delay, VideoPlayerCallbacks::sdlRefreshTimerCallback, &videoState);
}

/**
 *
 */
Uint32 VideoPlayer::VideoPlayerCallbacks::sdlRefreshTimerCallback(Uint32 interval, void *data) {
	SDL_Event event;
	event.type = FF_REFRESH_EVENT;
	event.user.data1 = data;
	SDL_PushEvent(&event);
	return 0; // 0 means stop timer
}

/**
 *
 */
int VideoPlayer::VideoPlayerCallbacks::decodeInterruptCallback() {
	return (global_video_state && global_video_state->quit_);
}

/**
 *
 */
void VideoPlayer::VideoPlayerCallbacks::audioCallback(void *userdata, Uint8 *stream, int len) {
	VideoState *is = (VideoState *) userdata;
	int len1, audio_size;
	double pts;

	while (len > 0) {
		if (is->audioBufferIndex_ >= is->audioBuferSize_) {
			// We have already sent all our data; get more
			audio_size = is->audioDecodeFrame(is->audioBuffer_, sizeof(is->audioBuffer_), &pts);
			if (audio_size < 0) {
				// If error, output silence
				is->audioBuferSize_ = 1024;
				memset(is->audioBuffer_, 0, is->audioBuferSize_);
			} else {
				audio_size = is->synchronizeAudio((int16_t *) is->audioBuffer_, audio_size, pts);
				is->audioBuferSize_ = audio_size;
			}
			is->audioBufferIndex_ = 0;
		}
		len1 = is->audioBuferSize_ - is->audioBufferIndex_;
		if (len1 > len)
			len1 = len;
		memcpy(stream, (uint8_t *) is->audioBuffer_ + is->audioBufferIndex_, len1);
		len -= len1;
		stream += len1;
		is->audioBufferIndex_ += len1;
	}
}

/**
 *
 */
void VideoPlayer::enqueueVideoPacket(const std::vector<char> &data, int pts) {
	// if we are overcrowded, wait
	if (videoState.videoQueue_.size() > MAX_VIDEOQ_SIZE)
		SDL_Delay(10);

	// enqueue video packet
	AVPacket pkt;
	av_init_packet(&pkt);
	pkt.size = data.size();
	pkt.pts = pts;
	pkt.data = (unsigned char*)&data.at(0);
	videoState.videoQueue_.put(&pkt);
}

/**
 *
 */
void VideoPlayer::enqueueAudioPacket(const std::vector<char> &data, int pts) {
	// if we are overcrowded, wait
	if (videoState.audioQueue_.size() > MAX_AUDIOQ_SIZE)
		SDL_Delay(10);

	// enqueue audio packet
	AVPacket pkt;
	av_init_packet(&pkt);
	pkt.size = data.size();
	pkt.data = (unsigned char*)&data.at(0);
	videoState.audioQueue_.put(&pkt);
}

/**
 *
 */
void VideoPlayer::DisplayVideo::run() {
	VideoState *is = &outer_.videoState;
	AVPacket pkt1, *packet = &pkt1;
	int len1, frameFinished;
	AVFrame *pFrame;
	double pts;

	pFrame = avcodec_alloc_frame();

	for (;;) {
		if (is->quit_)
			break;

		// means we quit getting packets
		if (is->videoQueue_.get(packet) < 0)
			break;
		pts = 0;


		// save global pts to be stored in pFrame in first call
		global_video_pkt_pts = packet->pts;

		// decode video frame
		len1 = avcodec_decode_video2(is->videoDecoderContext_, pFrame, &frameFinished, packet);
		if ((unsigned) packet->dts == AV_NOPTS_VALUE && pFrame->opaque && *(uint64_t*) pFrame->opaque != AV_NOPTS_VALUE) {
			pts = *(uint64_t *) pFrame->opaque;
		} else if ((unsigned) packet->dts != AV_NOPTS_VALUE) {
			pts = packet->dts;
		} else {
			pts = 0;
		}
		pts *= av_q2d(is->videoDecoderContext_->time_base);

		// did we get a video frame?
		if (frameFinished) {
			pts = is->synchronizeVideo(pFrame, pts);
			if (is->queuePicture(pFrame, pts) < 0) {
				break;
			}
		}
		av_free_packet(packet);
	}
	av_free(pFrame);

	std::cout << "leaving display thread" << std::endl;
}

double VideoPlayer::VideoPlayer::VideoState::getAudioClock() {
	double pts;
	int hw_buf_size, bytes_per_sec, n;

	pts = audioClock_; // maintained in the audio thread
	hw_buf_size = audioBuferSize_ - audioBufferIndex_;
	bytes_per_sec = 0;
	n = audioDecoderContext_->channels * 2;
	bytes_per_sec = audioDecoderContext_->sample_rate * n;
	if (bytes_per_sec)
		pts -= (double) hw_buf_size / bytes_per_sec;
	return pts;
}

/**
 *
 */
double VideoPlayer::VideoState::getVideoClock() {
	double delta;

	delta = (av_gettime() - videoCurrentPtsTime_) / 1000000.0;
	return videoCurrentPts_ + delta;
}

/**
 *
 */
double VideoPlayer::VideoState::getExternalClock() {
	return av_gettime() / 1000000.0;
}

/**
 *
 */
double VideoPlayer::VideoState::getMasterClock() {
	if (avSyncType_ == AV_SYNC_VIDEO_MASTER)
		return getVideoClock();
	else if (avSyncType_ == AV_SYNC_AUDIO_MASTER)
		return getAudioClock();
	else
		return getExternalClock();
}

/**
 *
 */
int VideoPlayer::VideoState::synchronizeAudio(short *samples, int samples_size, double pts) {
	// Add or subtract samples to get a better sync, return new
	// audio buffer size
	int n;
	double ref_clock;

	n = 2 * audioDecoderContext_->channels;

	if (avSyncType_ != AV_SYNC_AUDIO_MASTER) {
		double diff, avg_diff;
		int wanted_size, min_size, max_size;

		ref_clock = getMasterClock();
		diff = getAudioClock() - ref_clock;

		if (diff < AV_NOSYNC_THRESHOLD) {
			// accumulate the diffs
			audioDiffCum_ = diff + audioDiffAvgCoef_ * audioDiffCum_;
			if (audioDiffAvgCount_ < AUDIO_DIFF_AVG_NB) {
				audioDiffAvgCount_++;
			} else {
				avg_diff = audioDiffCum_ * (1.0 - audioDiffAvgCoef_);
				if (fabs(avg_diff) >= audioDiffThreshold_) {
					wanted_size = samples_size + ((int) (diff * audioDecoderContext_->sample_rate) * n);
					min_size = samples_size * ((100 - SAMPLE_CORRECTION_PERCENT_MAX) / 100);
					max_size = samples_size * ((100 + SAMPLE_CORRECTION_PERCENT_MAX) / 100);
					if (wanted_size < min_size) {
						wanted_size = min_size;
					} else if (wanted_size > max_size) {
						wanted_size = max_size;
					}
					if (wanted_size < samples_size) {
						// remove samples
						samples_size = wanted_size;
					} else if (wanted_size > samples_size) {
						uint8_t *samples_end, *q;
						int nb;
						// add samples by copying final sample
						nb = (samples_size - wanted_size);
						samples_end = (uint8_t *) samples + samples_size - n;
						q = samples_end + n;
						while (nb > 0) {
							memcpy(q, samples_end, n);
							q += n;
							nb -= n;
						}
						samples_size = wanted_size;
					}
				}
			}
		} else {
			// difference is TOO big; reset diff stuff
			audioDiffAvgCount_ = 0;
			audioDiffCum_ = 0;
		}
	}

	return samples_size;
}

/**
 *
 */
int VideoPlayer::VideoState::audioDecodeFrame(uint8_t *audio_buf, int buf_size, double *pts_ptr) {
	int len1, data_size, n;
	AVPacket *pkt = &audioPacket_;
	double pts;

	for (;;) {
		while (audioPacketSize_ > 0) {
			data_size = buf_size;
			len1 = avcodec_decode_audio3(audioDecoderContext_, (int16_t *) audio_buf, &data_size, &audioPacket_);
			if (len1 < 0) {
				// if error, skip frame
				audioPacketSize_ = 0;
				break;
			}
			audioPackageData_ += len1;
			audioPacketSize_ -= len1;
			if (data_size <= 0) {
				// no data yet, get more frames
				continue;
			}
			pts = audioClock_;
			*pts_ptr = pts;
			n = 2 * audioDecoderContext_->channels;
			audioClock_ += (double) data_size / (double) (n * audioDecoderContext_->sample_rate);

			// we have data, return it and come back for more later
			return data_size;
		}
		if (pkt->data)
			av_free_packet(pkt);

		if (quit_) {
			return -1;
		}
		// next packet
		if (audioQueue_.get(pkt) < 0) {
			return -1;
		}
		audioPackageData_ = pkt->data;
		audioPacketSize_ = pkt->size;
		// if update, update the audio clock w/pts
		if ((unsigned) pkt->pts != AV_NOPTS_VALUE) {
			audioClock_ = av_q2d(audioDecoderContext_->time_base) * pkt->pts;
		}
	}
}

/**
 *
 */
void VideoPlayer::VideoState::reset() {
	externalClock_ = 0;
	externalClockTime_ = 0;
	audioClock_ = 0;
	frameTimer_ = 0;
	frameLastPts_ = 0;
	frameLastDelay_ = 0;
	videoClock_ = 0;
	videoCurrentPts_ = 0;
	videoCurrentPtsTime_ = 0;
	quit_ = 0;
}

/**
 *
 */
VideoPlayer::VideoState::VideoState() {
	imageConvertContext_ = 0;
	avSyncType_ = 0;
	externalClock_ = 0;
	externalClockTime_ = 0;
	audioClock_ = 0;
	audioBuferSize_ = 0;
	audioBufferIndex_ = 0;
	audioPackageData_ = 0;
	audioPacketSize_ = 0;
	audioHwBufferSize_ = 0;
	audioDiffCum_ = 0;
	audioDiffAvgCoef_ = 0;
	audioDiffThreshold_ = 0;
	audioDiffAvgCount_ = 0;
	frameTimer_ = 0;
	frameLastPts_ = 0;
	frameLastDelay_ = 0;
	videoClock_ = 0;
	videoCurrentPts_ = 0;
	videoCurrentPtsTime_ = 0;
	pictureQueueSize_ = 0;
	pictureQueueRIndex_ = 0;
	pictureQueueWIndex_ = 0;
	quit_ = 0;
	audioDecoderContext_ = 0;
	audioDecoder_ = 0;
	videoDecoderContext_ = 0;
	videoDecoder_ = 0;
	avSyncType_ = DEFAULT_AV_SYNC_TYPE;
}

/**
 *
 */
double VideoPlayer::VideoState::synchronizeVideo(AVFrame *src_frame, double pts) {
	double frame_delay;

	if (pts != 0) {
		// if we have pts, set video clock to it
		videoClock_ = pts;
	} else {
		// if we aren't given a pts, set it to the clock
		pts = videoClock_;
	}

	// update the video clock
	frame_delay = av_q2d(videoDecoderContext_->time_base);
	// if we are repeating a frame, adjust clock accordingly
	frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
	videoClock_ += frame_delay;
	return pts;
}

/**
 *
 */
void VideoPlayer::VideoState::queuePictureStop() {
	Lock l(pictureQueueMutex_);
	pictureQueueCondition_.notify();
}

/**
 *
 */
int VideoPlayer::VideoState::queuePicture(AVFrame *pFrame, double pts) {
	VideoPicture *vp;
	int dst_pix_fmt;
	AVPicture pict;

	// wait until we have space for a new pic
	{
		Lock l(pictureQueueMutex_);
		while (pictureQueueSize_ >= VIDEO_PICTURE_QUEUE_SIZE && !quit_)
			pictureQueueCondition_.wait(pictureQueueMutex_);
	}

	if (quit_)
		return -1;

	// windex is set to 0 initially
	vp = &PictureQueue_[pictureQueueWIndex_];

	// allocate or resize the buffer!
	if (!vp->bmp || vp->width != videoDecoderContext_->width || vp->height != videoDecoderContext_->height) {
		SDL_Event event;

		vp->allocated = 0;
		// we have to do it in the main thread
		event.type = FF_ALLOC_EVENT;
		event.user.data1 = this;
		SDL_PushEvent(&event);

		// wait until we have a picture allocated
		{
			Lock l(pictureQueueMutex_);
			while (!vp->allocated && !quit_)
				pictureQueueCondition_.wait(pictureQueueMutex_);
		}
		if (quit_) {
			return -1;
		}
	}

	// We have a place to put our picture on the queue
	// If we are skipping a frame, do we set this to null
	// but still return vp->allocated = 1?
	if (vp->bmp) {
		SDL_LockYUVOverlay(vp->bmp);

		dst_pix_fmt = PIX_FMT_YUV420P;

		// point pict at the queue
		pict.data[0] = vp->bmp->pixels[0];
		pict.data[1] = vp->bmp->pixels[2];
		pict.data[2] = vp->bmp->pixels[1];

		pict.linesize[0] = vp->bmp->pitches[0];
		pict.linesize[1] = vp->bmp->pitches[2];
		pict.linesize[2] = vp->bmp->pitches[1];

		// convert the image into YUV format that SDL uses
		if (imageConvertContext_ == NULL) {
			int w = videoDecoderContext_->width;
			int h = videoDecoderContext_->height;
			imageConvertContext_ = sws_getContext(w, h, videoDecoderContext_->pix_fmt, w, h, (PixelFormat) dst_pix_fmt, SWS_BICUBIC, NULL, NULL, NULL);
			assert(imageConvertContext_ != NULL && "cannot initialize the conversion context!");
		}
		sws_scale(imageConvertContext_, pFrame->data, pFrame->linesize, 0, videoDecoderContext_->height, pict.data, pict.linesize);

		SDL_UnlockYUVOverlay(vp->bmp);
		vp->pts = pts;

		// now we inform our display thread that we have a pic ready
		if (++pictureQueueWIndex_ == VIDEO_PICTURE_QUEUE_SIZE) {
			pictureQueueWIndex_ = 0;
		}

		{
			Lock l(pictureQueueMutex_);
			pictureQueueSize_++;
		}
	}

	return 0;
}

/**
 *
 */
void VideoPlayer::our_release_buffer(struct AVCodecContext *c, AVFrame *pic) {
	if (pic)
		av_freep(&pic->opaque);
	avcodec_default_release_buffer(c, pic);
}

/**
 *
 */
int VideoPlayer::our_get_buffer(struct AVCodecContext *c, AVFrame *pic) {
	int ret = avcodec_default_get_buffer(c, pic);
	uint64_t *pts = (uint64_t*) av_malloc(sizeof(uint64_t));
	*pts = global_video_pkt_pts;
	pic->opaque = pts;
	return ret;
}
