/**********************************************************************************
 ***
 ***    Screencasting Teaching System C++ Library
 ***    Copyright (C) 2011  Jiri Novak <jiri.novak@petriny.net>
 ***                        Wadi Jalil Maluf <wadijm@gmail.com>
 ***
 ***    This file is part of the Screencasting Teaching System C++ Library.
 ***
 ***    This library is free software; you can redistribute it and/or modify
 ***    it under the terms of the GNU General Public License as published by
 ***   the Free Software Foundation; either version 3 of the License, or
 ***    (at your option) any later version.
 ***
 ***    This library is distributed in the hope that it will be useful,
 ***    but WITHOUT ANY WARRANTY; without even the implied warranty of
 ***    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 ***   GNU General Public License for more details.
 ***
 ***    You should have received a copy of the GNU General Public License
 ***    along with this library; if not, write to the Free Software
 ***    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 ***
 *********************************************************************************/

#include "capture_workspace_x11_alsa_impl.h"
#include "../serverservice.h"
#include <cassert>
#include <iostream>

#define SCREEN_WIDTH 800
#define SCREEN_HEIGHT 600
#define VIDEO_BITRATE 64000
#define X11_DISPLAY ":0.0"
#define AUDIO_SAMPLERATE 44100
#define AUDIO_BITRATE 80000
#define ALSA_DEVICE "default"

/**
 *
 */
CaptureWorkspaceX11AlsaImpl::CaptureWorkspaceX11AlsaImpl() :
	logger_(log4cxx::Logger::getLogger("client.media.capture_workspace_x11_alsa_impl")),
	capturing_(false), activeLesson_(0),
	videoThread_(*this), audioThread_(*this) {
	//////////////////////////////////////////
	// register devices, filters and codecs //
	//////////////////////////////////////////

	Lock l(initializeAndCleanUpFFMPEGmutex_);
	avdevice_register_all();
	av_register_all();
}

/**
 *
 */
CaptureWorkspaceX11AlsaImpl::~CaptureWorkspaceX11AlsaImpl() {
}

/**
 *
 */
void CaptureWorkspaceX11AlsaImpl::startScreencast(Lesson *lesson) {
	assert(lesson != 0 && "lesson pointer is NULL!");
	this->activeLesson_ = lesson;

	if (!capturing_) {
		this->capturing_ = true;
		videoThread_.start();
		audioThread_.start();
	}
}

/**
 *
 */
void CaptureWorkspaceX11AlsaImpl::stopScreencast() {
	capturing_ = false;
	videoThread_.join();
	audioThread_.join();
}

/**
 *
 */
CaptureWorkspaceX11AlsaImpl::CaptureVideoX11::CaptureVideoX11(CaptureWorkspaceX11AlsaImpl &outer)
	: initialized(false), outer_(outer) {
}

/**
 *
 */
CaptureWorkspaceX11AlsaImpl::CaptureVideoX11::~CaptureVideoX11() {
	if (initialized) {
		LOG4CXX_INFO(outer_.logger_, "cleaning up video capturing...");
		cleanUp();
	}
}

/**
 *
 */
void CaptureWorkspaceX11AlsaImpl::CaptureVideoX11::initVideoCapture() {
	/////////////////////
	// INPUT - X11GRAB //
	/////////////////////

	Lock l(outer_.initializeAndCleanUpFFMPEGmutex_);

	int retval;

	// file format
	videoInputFormat_ = av_find_input_format("x11grab");
	assert(videoInputFormat_ != NULL && "input video format unrecognized - videoInputFormat pointer is NULL!");

	// format context
	videoInputFormatContext_ = avformat_alloc_context();
	assert(videoInputFormatContext_ != NULL && "input video format context could not be allocated - videoInputContext pointer is NULL!");

	AVFormatParameters videoInputFormatParameters, *vip = &videoInputFormatParameters;
	memset(vip, 0, sizeof(*vip));
	vip->prealloced_context = 1;
	vip->time_base = (AVRational) {1,25};
	vip->width = SCREEN_WIDTH;
	vip->height = SCREEN_HEIGHT;

	// open device
	retval = av_open_input_file(&videoInputFormatContext_, X11_DISPLAY, videoInputFormat_, 0, vip);
	assert(retval >= 0 && "input video device could not be opened - retval < 0!");

	// retreive stream information
	retval = av_find_stream_info(videoInputFormatContext_);
	assert(retval >= 0 && "could not retrieve input video stream info - retval < 0!");

	// get a pointer to the codec context for the video stream
	videoInputDecoderContext_ = videoInputFormatContext_->streams[0]->codec;
	assert(videoInputDecoderContext_ != NULL && "videoInputDecoderContext is NULL!");

	// find the decoder for the video stream
	videoInputDecoder_ = avcodec_find_decoder(videoInputDecoderContext_->codec_id);
	assert(videoInputDecoder_ != NULL && "videoInputDecoder is NULL!");

	// open the decoder
	retval = avcodec_open(videoInputDecoderContext_, videoInputDecoder_);
	assert(retval >= 0 && "video input decoder could not be opened - retval < 0!");

	// create the encoder context
	videoInputEncoderContext_ = avcodec_alloc_context();
	assert(videoInputEncoderContext_ != NULL && "videoInputEncoderContext is NULL!");

	// put sample parameters
	videoInputEncoderContext_->bit_rate = VIDEO_BITRATE;//videoInputDecoderContext_->bit_rate;
	videoInputEncoderContext_->width = videoInputDecoderContext_->width;
	videoInputEncoderContext_->height = videoInputDecoderContext_->height;
	videoInputEncoderContext_->time_base = videoInputDecoderContext_->time_base;
	videoInputEncoderContext_->gop_size = 10;
	videoInputEncoderContext_->max_b_frames = 1;
	videoInputEncoderContext_->pix_fmt = PIX_FMT_YUV420P;
	videoInputEncoderContext_->codec_id = CODEC_ID_MPEG2VIDEO;
	videoInputEncoderContext_->codec_type = CODEC_TYPE_VIDEO;
	//videoInputEncoderContext_->bit_rate_tolerance = FFMAX(videoInputEncoderContext_->bit_rate / 4.0386, (int64_t)videoInputEncoderContext_->bit_rate * videoInputEncoderContext_->time_base.num / videoInputEncoderContext_->time_base.den);

	// find the encoder
	videoInputEncoder_ = avcodec_find_encoder(videoInputEncoderContext_->codec_id);
	assert(videoInputEncoder_ != NULL && "videoInputEncoder is NULL!");

	// open the codec
	retval = avcodec_open(videoInputEncoderContext_, videoInputEncoder_);
	assert(retval >= 0 && "could not open encoder codec - retval < 0!");

	// allocate necessary video frames
	pFrame_ = avcodec_alloc_frame();
	pFrameYUV_ = avcodec_alloc_frame();
	assert(pFrame_ != NULL && "video frame could not be allocated - pFrame is NULL!");
	assert(pFrameYUV_ != NULL && "video frame could not be allocated - pFrameYUV is NULL!");

	// calculate the bytes needed for the output image and create buffer for the output image
	nbytes_ = avpicture_get_size(PIX_FMT_YUV420P, videoInputEncoderContext_->width, videoInputEncoderContext_->height);
	outbuffer_ = (uint8_t*) av_malloc(nbytes_ * 10);

	// assign appropriate parts of buffer to image planes in pFrameYUV
	avpicture_fill((AVPicture *) pFrameYUV_, outbuffer_, PIX_FMT_YUV420P, videoInputEncoderContext_->width, videoInputEncoderContext_->height);

	// create convert context
	img_convert_ctx_ = sws_getContext(videoInputDecoderContext_->width, videoInputDecoderContext_->height, PIX_FMT_BGRA, videoInputEncoderContext_->width, videoInputEncoderContext_->height, PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);
}

/**
 *
 */
void CaptureWorkspaceX11AlsaImpl::CaptureVideoX11::run() {
	if (!initialized) {
		LOG4CXX_INFO(outer_.logger_, "initializing the video capturing...");
		initVideoCapture();
	}

	LOG4CXX_INFO(outer_.logger_, "starting the video transcoding and sending...");
	transcode();

	//LOG4CXX_INFO(outer_.logger_, "cleaning up video capturing...");
	//cleanUp();
}

/**
 *
 */
void CaptureWorkspaceX11AlsaImpl::CaptureVideoX11::transcode() {
	/////////////////
	// TRANSCODING //
	/////////////////

	int frameFinished, out_size, ii;

	// transcode
	while (outer_.capturing_) {
		AVPacket packet;

		// decode
		av_read_frame(videoInputFormatContext_, &packet);
		avcodec_decode_video2(videoInputDecoderContext_, pFrame_, &frameFinished, &packet);

		// if we get the complete frame
		if (frameFinished) {
			// convert it to YUV
			sws_scale(img_convert_ctx_, pFrame_->data, pFrame_->linesize, 0, videoInputEncoderContext_->height, pFrameYUV_->data, pFrameYUV_->linesize);

			// encode it
			LOG4CXX_TRACE(outer_.logger_, "Encoding nbytes: " << nbytes_);
			out_size = avcodec_encode_video(videoInputEncoderContext_, outbuffer_, nbytes_, pFrameYUV_);

			// if it worked, add syncronization and send throught socket
			if (out_size > 0) {
				AVPacket packetOut;
				av_init_packet(&packetOut);

				if ((unsigned) videoInputEncoderContext_->coded_frame->pts != AV_NOPTS_VALUE)
					packetOut.pts = av_rescale_q(videoInputEncoderContext_->coded_frame->pts, videoInputEncoderContext_->time_base, videoInputEncoderContext_->time_base);
				if (videoInputEncoderContext_->coded_frame->key_frame)
					packetOut.flags |= AV_PKT_FLAG_KEY;
				//packetOut.stream_index = 0;
				packetOut.data = outbuffer_;
				packetOut.size = out_size;

				// send it through socket
				LOG4CXX_TRACE(outer_.logger_, "Sending VIDEO AVPacket with pts: " << packetOut.pts << " and size: " << packetOut.size);
				ServerService::getInstance().sendScreencastData(outer_.activeLesson_, packetOut.data, packetOut.size, packetOut.pts);

				av_free_packet(&packetOut);
			}
		}

		av_free_packet(&packet);
		ii++;
	}
}

/**
 *
 */
void CaptureWorkspaceX11AlsaImpl::CaptureVideoX11::cleanUp() {
	Lock l(outer_.initializeAndCleanUpFFMPEGmutex_);
	av_free(outbuffer_);
	av_free(pFrame_);
	av_free(pFrameYUV_);
	sws_freeContext(img_convert_ctx_);
	avcodec_close(videoInputDecoderContext_);
	avcodec_close(videoInputEncoderContext_);
	av_close_input_file(videoInputFormatContext_);
}

/**
 *
 */
CaptureWorkspaceX11AlsaImpl::CaptureAudioAlsa::CaptureAudioAlsa(CaptureWorkspaceX11AlsaImpl &outer)
	: initialized_(false), outer_(outer) {
}

/**
 *
 */
CaptureWorkspaceX11AlsaImpl::CaptureAudioAlsa::~CaptureAudioAlsa() {
	if (initialized_) {
		LOG4CXX_INFO(outer_.logger_, "cleaning up audio capturing...");
		cleanUp();
	}
}

/**
 *
 */
void CaptureWorkspaceX11AlsaImpl::CaptureAudioAlsa::initAudioCapture() {
	//////////////////
	// INPUT - ALSA //
	//////////////////

	Lock l(outer_.initializeAndCleanUpFFMPEGmutex_);

	int retval;

	// file format
	audioInputFormat_ = av_find_input_format("alsa");
	assert(audioInputFormat_ != NULL && "input audio format unrecognized - audioInputFormat_ pointer is NULL!");

	// format context
	audioInputFormatContext_ = avformat_alloc_context();
	assert(audioInputFormatContext_ != NULL && "input audio format context could not be allocated - audioInputContext_ pointer is NULL!");

	AVFormatParameters audioInputFormatParameters, *aip = &audioInputFormatParameters;
	memset(aip, 0, sizeof(*aip));
	aip->prealloced_context = 1;
	aip->sample_rate = AUDIO_SAMPLERATE;
	aip->channels = 1;

	// open device
	retval = av_open_input_file(&audioInputFormatContext_, ALSA_DEVICE, audioInputFormat_, 0, aip);
	assert(retval >= 0 && "input audio device could not be opened - retval < 0!");

	// retreive stream information
	retval = av_find_stream_info(audioInputFormatContext_);
	assert(retval >= 0 && "could not retrieve input audio stream info - retval < 0!");

	// get a pointer to the codec context for the video stream
	audioInputDecoderContext_ = audioInputFormatContext_->streams[0]->codec;
	assert(audioInputDecoderContext_ != NULL && "audioInputDecoderContext is NULL!");

	// find the decoder for the video stream
	audioInputDecoder_ = avcodec_find_decoder(audioInputDecoderContext_->codec_id);
	assert(audioInputDecoder_ != NULL && "audioInputDecoder is NULL!");

	// open the decoder
	retval = avcodec_open(audioInputDecoderContext_, audioInputDecoder_);
	assert(retval >= 0 && "audio input decoder could not be opened - retval < 0!");

	// create the encoder context
	audioInputEncoderContext_ = avcodec_alloc_context();
	assert(audioInputEncoderContext_ != NULL && "audioInputEncoderContext is NULL!");

	// put sample parameters
	audioInputEncoderContext_->codec_id = CODEC_ID_MP2;
	audioInputEncoderContext_->codec_type = CODEC_TYPE_AUDIO;
	audioInputEncoderContext_->bit_rate = AUDIO_BITRATE;
	audioInputEncoderContext_->sample_rate = AUDIO_SAMPLERATE;
	audioInputEncoderContext_->channels = 1;
	audioInputEncoderContext_->sample_fmt = SAMPLE_FMT_S16;

	// find the encoder
	audioInputEncoder_ = avcodec_find_encoder(audioInputEncoderContext_->codec_id);
	assert(audioInputEncoder_ != NULL && "audioInputEncoder is NULL!");

	// open the codec
	retval = avcodec_open(audioInputEncoderContext_, audioInputEncoder_);
	assert(retval >= 0 && "could not open encoder codec - retval < 0!");

	// alocate buffers
    audioOutbufSize_ = 10000; //AVCODEC_MAX_AUDIO_FRAME_SIZE;
    audioOutbuf_ = (uint8_t*) av_malloc(audioOutbufSize_);

    // ugly hack for PCM codecs (will be removed ASAP with new PCM
    // support to compute the input frame size in samples
    if (audioInputEncoderContext_->frame_size <= 1) {
        audioInputFrameSize_ = audioOutbufSize_ / audioInputEncoderContext_->channels;
        switch(audioInputEncoderContext_->codec_id) {
        case CODEC_ID_PCM_S16LE:
        case CODEC_ID_PCM_S16BE:
        case CODEC_ID_PCM_U16LE:
        case CODEC_ID_PCM_U16BE:
            audioInputFrameSize_ >>= 1;
            break;
        default:
            break;
        }
    } else {
        audioInputFrameSize_ = audioInputEncoderContext_->frame_size;
    }

    audioInputFrameSize_ = audioInputEncoderContext_->frame_size;
    samples_ = (int16_t*) av_malloc(audioInputFrameSize_ * 2 * audioInputEncoderContext_->channels);
}

void CaptureWorkspaceX11AlsaImpl::CaptureAudioAlsa::run() {
	if (!initialized_) {
		LOG4CXX_INFO(outer_.logger_, "initializing the audio capturing...");
		initAudioCapture();
	}

	LOG4CXX_INFO(outer_.logger_, "starting the audio transcoding and sending...");
	transcode();

	//LOG4CXX_INFO(outer_.logger_, "cleaning up audio capturing...");
	//cleanUp();
}

/**
 *
 */
void CaptureWorkspaceX11AlsaImpl::CaptureAudioAlsa::cleanUp() {
	Lock l(outer_.initializeAndCleanUpFFMPEGmutex_);
    av_free(samples_);
    av_free(audioOutbuf_);
	avcodec_close(audioInputDecoderContext_);
	avcodec_close(audioInputEncoderContext_);
	av_close_input_file(audioInputFormatContext_);
}

/**
 *
 */
void CaptureWorkspaceX11AlsaImpl::CaptureAudioAlsa::transcode() {
	/////////////////
	// TRANSCODING //
	/////////////////

	while (outer_.capturing_) {
		AVPacket pkt;

		// decode
		int out_size = AVCODEC_MAX_AUDIO_FRAME_SIZE;
		av_read_frame(audioInputFormatContext_, &pkt);
		avcodec_decode_audio3(audioInputDecoderContext_, samples_, &out_size, &pkt);
		av_free_packet(&pkt);

		// encode it
	    av_init_packet(&pkt);

	    pkt.size = avcodec_encode_audio(audioInputEncoderContext_, audioOutbuf_, audioOutbufSize_, samples_);

	    //if ((unsigned) audioInputEncoderContext_->coded_frame->pts != AV_NOPTS_VALUE)
	      //pkt.pts= av_rescale_q(audioInputEncoderContext_->coded_frame->pts, audioInputEncoderContext_->time_base, audioInputEncoderContext_->time_base);
	    //pkt.flags |= PKT_FLAG_KEY;
	    //pkt.stream_index = 1;

	    pkt.pts = 0;
	    pkt.data = audioOutbuf_;

		// send it through socket
		LOG4CXX_TRACE(outer_.logger_, "Sending AUDIO AVPacket with pts: " << pkt.pts << " and size: " << pkt.size);
		ServerService::getInstance().sendScreencastAudioData(outer_.activeLesson_, pkt.data, pkt.size, pkt.pts);

		av_free_packet(&pkt);
	}
}
