/*
 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
 * DEALINGS IN THE SOFTWARE.
 * 
 * modified by hualong 2018/4/11
 * appsink class function:
 * @ creat a gstremaer pipeline with appsink in the end
 * @ open the appsink buffer for the downstream operation, such as Opencv CUDA act.

 */

#include "gstAppsink.h"  // declaration the appsink user class
#include "gstUtility.h"  // gstreamer-init  printf message

#include <gst/gst.h>
#include <gst/app/gstappsink.h>

#include <sstream> 
#include <unistd.h>
#include <string.h>
// qt4函数 用于线程同步
#include <QMutex>
#include <QWaitCondition>

// CUDA 分配内存 获取sppsink buffer 用于处理
#include "cudaMappedMemory.h"
#include "cudaYUV.h"
#include "cudaRGB.h"



// constructor 构造函数
// 复位参数 清空缓存
gstAppsink::gstAppsink()
{	
	mAppSink    = NULL;//pipeline param
	mBus        = NULL;
	mPipeline   = NULL;	
	mV4L2Device = -1;  //默认板载CSI相机
	
	mWidth  = 0;   //camera param
	mHeight = 0;
	mDepth  = 0;
	mSize   = 0;
	
	mWaitEvent  = new QWaitCondition();// thread sync　wati执行时，qMutex上锁，wait 阻塞，Mutex解锁
	mWaitMutex  = new QMutex(); //QMutex类提供的是线程之间的访问顺序化
	mRingMutex  = new QMutex(); //目的是保护一个对象、数据结构或者代码段，所以同一时间只有一个线程可以访问它
	
	mLatestRGBA       = 0;
	mLatestRingbuffer = 0;
	mLatestRetrieved  = false;  //Retrived 上次取回的状态
	
	// 缓存bufffer清空
	for( uint32_t n=0; n < NUM_RINGBUFFERS; n++ )
	{
		mRingbufferCPU[n] = NULL;
		mRingbufferGPU[n] = NULL;
		mRGBA[n]          = NULL;
	}
}


// destructor	析构函数
gstAppsink::~gstAppsink()
{
	
}

/* ******************************************************************
// Create 
// 创建 gst pipeline 
// get appsink 
// config appsink 
// set appsink  signal callbakc functon
 @ -----use of the listed below function------
 @ gstreamerInit in gstUtility.h
 @ init  the gstreamer pipeline
******************************************************************* */
gstAppsink* gstAppsink::Create( uint32_t width, uint32_t height, int v4l2_device )
{
	if( !gstreamerInit() )
	{
		printf(LOG_GSTREAMER "failed to initialize gstreamer API\n");
		return NULL;
	}
	
	gstAppsink* cam = new gstAppsink();
	
	if( !cam )
		return NULL;
	
	cam->mV4L2Device = v4l2_device;
	cam->mWidth      = width;
	cam->mHeight     = height;
	cam->mDepth      = cam->onboardCamera() ? 12 : 24;	// NV12 or RGB
	cam->mSize       = (width * height * cam->mDepth) / 8;

	if( !cam->init() )
	{
		printf(LOG_GSTREAMER "failed to init the gstreamer pipeline with appsink\n");
		return NULL;
	}
	
	return cam;
}


gstAppsink* gstAppsink::Create( int v4l2_device )
{
	//Default width*height = 1280*720
	return Create( DefaultWidth, DefaultHeight, v4l2_device );
}



// buildLaunchStr  gst-launch pipeline
bool gstAppsink::buildLaunchStr()
{
	// gst-launch-1.0 nvcamerasrc fpsRange="30.0 30.0" ! 'video/x-raw(memory:NVMM), width=(int)1920, height=(int)1080, format=(string)I420, framerate=(fraction)30/1' ! \
	// nvvidconv flip-method=2 ! 'video/x-raw(memory:NVMM), format=(string)I420' ! fakesink silent=false -v
	std::ostringstream ss;
	
//#define CAPS_STR "video/x-raw(memory:NVMM), width=(int)2592, height=(int)1944, format=(string)I420, framerate=(fraction)30/1"
//#define CAPS_STR "video/x-raw(memory:NVMM), width=(int)1920, height=(int)1080, format=(string)I420, framerate=(fraction)30/1"

	if( onboardCamera() )
	{
	#if NV_TENSORRT_MAJOR > 1	// if JetPack 3.1 (different flip-method)
		const int flipMethod = 0;
	#else
		const int flipMethod = 2;
	#endif
	
		ss << "nvcamerasrc fpsRange=\"30.0 30.0\" ! video/x-raw(memory:NVMM), width=(int)" << mWidth << ", height=(int)" << mHeight << ", format=(string)NV12 ! nvvidconv flip-method=" << flipMethod << " ! "; //'video/x-raw(memory:NVMM), width=(int)1920, height=(int)1080, format=(string)I420, framerate=(fraction)30/1' ! ";
		ss << "video/x-raw ! appsink name=mysink";
	}
	else
	{
		ss << "v4l2src device=/dev/video" << mV4L2Device << " ! ";
		ss << "video/x-raw, width=(int)" << mWidth << ", height=(int)" << mHeight << ", "; 
		ss << "format=RGB ! videoconvert ! video/x-raw, format=RGB ! videoconvert !";
		ss << "appsink name=mysink";
	}
	
	mLaunchStr = ss.str();

	printf(LOG_GSTREAMER "gstreamer decoder pipeline string:\n");
	printf("%s\n", mLaunchStr.c_str());
	return true;
}


/* **************************************************************************
// init   initialize the gstreamer pipeline with the end of appsink

 @ buildLaunchStr
 @ gst_parse_launch
 @ GST_APP_SINK
 @ gst_app_sink_set_callbacks
********************************************************************** */
bool gstAppsink::init()
{
	GError* err = NULL;

	// build pipeline string
	if( !buildLaunchStr() )
	{
		printf(LOG_GSTREAMER "gstreamer decoder failed to build pipeline string\n");
		return false;
	}

	// launch pipeline
	mPipeline = gst_parse_launch(mLaunchStr.c_str(), &err);//creat pipeline

	if( err != NULL )
	{
		printf(LOG_GSTREAMER "gstreamer decoder failed to create pipeline\n");
		printf(LOG_GSTREAMER "   (%s)\n", err->message);
		g_error_free(err);
		return false;
	}

	GstPipeline* pipeline = GST_PIPELINE(mPipeline);

	if( !pipeline )
	{
		printf(LOG_GSTREAMER "gstreamer failed to cast GstElement into GstPipeline\n");
		return false;
	}	

	// retrieve pipeline bus
	/*GstBus**/ mBus = gst_pipeline_get_bus(pipeline);

	if( !mBus )
	{
		printf(LOG_GSTREAMER "gstreamer failed to retrieve GstBus from pipeline\n");
		return false;
	}

	// add watch for messages (disabled when we poll the bus ourselves, instead of gmainloop)
	//gst_bus_add_watch(mBus, (GstBusFunc)gst_message_print, NULL);

	// get the appsink 
	GstElement* appsinkElement = gst_bin_get_by_name(GST_BIN(pipeline), "mysink");
	GstAppSink* appsink = GST_APP_SINK(appsinkElement);

	if( !appsinkElement || !appsink) //确认pipiline 的sink是appsink
	{
		printf(LOG_GSTREAMER "gstreamer failed to retrieve AppSink element from pipeline\n");
		return false;
	}
	
	mAppSink = appsink;
	
	// setup callbacks
	GstAppSinkCallbacks cb; //可以安装在install上的一组回调函数
	memset(&cb, 0, sizeof(GstAppSinkCallbacks));
	
	cb.eos         = onEOS;		//  end of the stream调用
	cb.new_preroll = onPreroll; //new preroll时，调用 PAUSED状态调用
	cb.new_sample  = onBuffer;  // new sample时，调用 READY/NULL状态调用  【onBuffer很关键】
	
	gst_app_sink_set_callbacks(mAppSink, &cb, (void*)this, NULL);
	//每次有eos new preroll new sample,执行回调函数，这是使用信号的替代方案
	return true;
}


// gst_call_back的三种信号
// omEOS
// onPreroll
// onBuffer
void gstAppsink::onEOS(_GstAppSink* sink, void* user_data)
{
	printf(LOG_GSTREAMER "gstreamer decoder onEOS\n");
}


// onPreroll
GstFlowReturn gstAppsink::onPreroll(_GstAppSink* sink, void* user_data)
{
	printf(LOG_GSTREAMER "gstreamer decoder onPreroll\n");
	return GST_FLOW_OK;
}


// onBuffer  
// Called when a new sample is available. This callback is called from the streaming thread. 
GstFlowReturn gstAppsink::onBuffer(_GstAppSink* sink, void* user_data)
{
	//printf(LOG_GSTREAMER "gstreamer decoder onBuffer\n");
	
	if( !user_data )
		return GST_FLOW_OK;
		
	gstAppsink* dec = (gstAppsink*)user_data;//user_data 指向一个gpointer指针
	
	dec->checkBuffer();  //checkBuffer 完成media sample内存映射，内存拷贝
	dec->checkMsgBus();	 //gst message bus
	return GST_FLOW_OK;
}



// Open
bool gstAppsink::Open()
{
	// transition pipline to STATE_PLAYING
	printf(LOG_GSTREAMER "gstreamer transitioning pipeline to GST_STATE_PLAYING\n");
	
	const GstStateChangeReturn result = gst_element_set_state(mPipeline, GST_STATE_PLAYING);

	if( result == GST_STATE_CHANGE_ASYNC )
	{
//#if 0
		GstMessage* asyncMsg = gst_bus_timed_pop_filtered(mBus, 5 * GST_SECOND, 
    	 					      (GstMessageType)(GST_MESSAGE_ASYNC_DONE|GST_MESSAGE_ERROR)); 

		if( asyncMsg != NULL )
		{
			gst_message_print(mBus, asyncMsg, this);
			gst_message_unref(asyncMsg);
		}
		else
			printf(LOG_GSTREAMER "gstreamer NULL message after transitioning pipeline to PLAYING...\n");
//#endif
	}
	else if( result != GST_STATE_CHANGE_SUCCESS )
	{
		printf(LOG_GSTREAMER "gstreamer failed to set pipeline state to PLAYING (error %u)\n", result);
		return false;
	}

	checkMsgBus();
	usleep(100*1000); //sleep 100ms 等待gstreamer pipuline状态改变
	checkMsgBus();

	return true;
}
	

// Close
void gstAppsink::Close()
{
	// stop pipeline
	printf(LOG_GSTREAMER "gstreamer transitioning pipeline to GST_STATE_NULL\n");

	const GstStateChangeReturn result = gst_element_set_state(mPipeline, GST_STATE_NULL);

	if( result != GST_STATE_CHANGE_SUCCESS )
		printf(LOG_GSTREAMER "gstreamer failed to set pipeline state to PLAYING (error %u)\n", result);

	usleep(250*1000);
}


	

//给video sample的引用计数--
#define release_return { gst_sample_unref(gstSample); return; }



// checkBuffer  将gstreamer appsink的gstbuffe数据 映射出来
void gstAppsink::checkBuffer()
{
	if( !mAppSink )
		return;

// block waiting for the buffer
//return samples when the appsink is in the PLAYING state
//所有的rendered buffers将被放到一个队列中，应用程序已自己的数量处理sample，
//当程序不能足够快地处理sample，队列buffer将会消耗大量内存，尤其在处理raw video frame时。
	GstSample* gstSample = gst_app_sink_pull_sample(mAppSink);
	
	if( !gstSample )
	{
		printf(LOG_GSTREAMER "gstreamer camera -- gst_app_sink_pull_sample() returned NULL...\n");
		return;
	}
	
	//Get the buffer associated with sample
	GstBuffer* gstBuffer = gst_sample_get_buffer(gstSample);//获取media sample相关的缓冲区
	
	if( !gstBuffer )
	{
		printf(LOG_GSTREAMER "gstreamer camera -- gst_sample_get_buffer() returned NULL...\n");
		return;
	}
	
	// retrieve
	GstMapInfo map; 
// fills info with the GstMapInfo of all merged memory blocks in buffer
	if(	!gst_buffer_map(gstBuffer, &map, GST_MAP_READ) ) //gstBuffer-->map
	{
		printf(LOG_GSTREAMER "gstreamer camera -- gst_buffer_map() failed...\n");
		return;
	}
	
	//gst_util_dump_mem(map.data, map.size); 

	void* gstData = map.data; //GST_BUFFER_DATA(gstBuffer);
	const uint32_t gstSize = map.size; //GST_BUFFER_SIZE(gstBuffer);
	
	if( !gstData )
	{
		printf(LOG_GSTREAMER "gstreamer camera -- gst_buffer had NULL data pointer...\n");
		release_return;
	}
	
	// retrieve caps
	GstCaps* gstCaps = gst_sample_get_caps(gstSample);
	
	if( !gstCaps )
	{
		printf(LOG_GSTREAMER "gstreamer camera -- gst_buffer had NULL caps...\n");
		release_return;
	}
	
	GstStructure* gstCapsStruct = gst_caps_get_structure(gstCaps, 0);
	
	if( !gstCapsStruct )
	{
		printf(LOG_GSTREAMER "gstreamer camera -- gst_caps had NULL structure...\n");
		release_return;
	}
	
	// get width & height of the buffer
	int width  = 0;
	int height = 0;
	
	if( !gst_structure_get_int(gstCapsStruct, "width", &width) ||
		!gst_structure_get_int(gstCapsStruct, "height", &height) )
	{
		printf(LOG_GSTREAMER "gstreamer camera -- gst_caps missing width/height...\n");
		release_return;
	}
	
	if( width < 1 || height < 1 )
		release_return;
	
	mWidth  = width;
	mHeight = height;
	mDepth  = (gstSize * 8) / (width * height);  //不太理解 mDepth
	mSize   = gstSize;
	
	//printf(LOG_GSTREAMER "gstreamer camera recieved %ix%i frame (%u bytes, %u bpp)\n", width, height, gstSize, mDepth);
	
	// make sure ringbuffer is allocated
	if( !mRingbufferCPU[0] ) //在creat时就创建了，如果没创建buffe,在这里重新创建
	{
		for( uint32_t n=0; n < NUM_RINGBUFFERS; n++ )
		{
			if( !cudaAllocMapped(&mRingbufferCPU[n], &mRingbufferGPU[n], gstSize) )
				printf(LOG_CUDA "gstreamer camera -- failed to allocate ringbuffer %u  (size=%u)\n", n, gstSize);
		}
		
		printf(LOG_CUDA "gstreamer camera -- allocated %u ringbuffers, %u bytes each\n", NUM_RINGBUFFERS, gstSize);
	}
	
	// copy to next ringbuffer  count++ 16循环
	const uint32_t nextRingbuffer = (mLatestRingbuffer + 1) % NUM_RINGBUFFERS;		
	
	//printf(LOG_GSTREAMER "gstreamer camera -- using ringbuffer #%u for next frame\n", nextRingbuffer);
	memcpy(mRingbufferCPU[nextRingbuffer], gstData, gstSize); //终于找到你，memcpy gstdata到buffer
	gst_buffer_unmap(gstBuffer, &map); 
	//gst_buffer_unref(gstBuffer);
	gst_sample_unref(gstSample);
	
	
	// update and signal sleeping threads
	mRingMutex->lock();
	mLatestRingbuffer = nextRingbuffer;
	mLatestRetrieved  = false;
	mRingMutex->unlock();
	mWaitEvent->wakeAll();
}


// checkMsgBus
void gstAppsink::checkMsgBus()
{
	while(true)
	{
		GstMessage* msg = gst_bus_pop(mBus);

		if( !msg )
			break;

		gst_message_print(mBus, msg, this);
		gst_message_unref(msg);
	}
}




// Capture   把ringbuffer中的数据取出来，供appsink后面的非gstreamer部分使用
// cpu  供CPU使用的指向
// cuda 供GPU使用的指向
// timeout  等待线程同步，超时时间
bool gstAppsink::Capture( void** cpu, void** cuda, unsigned long timeout )
{
	mWaitMutex->lock();
    const bool wait_result = mWaitEvent->wait(mWaitMutex, timeout);
    mWaitMutex->unlock();
	
	if( !wait_result )
		return false;
	
	mRingMutex->lock();
	const uint32_t latest = mLatestRingbuffer;
	const bool retrieved = mLatestRetrieved;
	mLatestRetrieved = true;
	mRingMutex->unlock();
	
	// skip if it was already retrieved
	if( retrieved )
		return false;
	
	if( cpu != NULL )
		*cpu = mRingbufferCPU[latest]; //指针指向 映射的 media buffer
	
	if( cuda != NULL )
		*cuda = mRingbufferGPU[latest];
	
	return true;
}



/* ***********use of the gstreamer appsink buffer***********************************************
// Takes in captured YUV-NV12 CUDA image, converts to float4 RGBA (with pixel intensity 0-255)
// Set zeroCopy to true if you need to access ConvertRGBA from CPU, otherwise it will be CUDA only.
//@ input 		带转换的image数据指针
//@ output 		转换成RGBA后的数据指针
//@zeroCopy		如果CPU 访问，zeroCopy= ture    如果仅仅CUDA访问，zeroCopy=false
// ConvertRGBA  捕获的YUV-NV12  转换为RGBA(实际就是分配 RGBA内存buffer)
******************************************************************************  */
bool gstAppsink::ConvertRGBA( void* input, void** output, bool zeroCopy )
{
	if( !input || !output )
		return false;
	
	if( !mRGBA[0] )// MRGBA为空
	{
		const size_t size = mWidth * mHeight * sizeof(float4);

		for( uint32_t n=0; n < NUM_RINGBUFFERS; n++ )
		{
			if( zeroCopy ) // 需要CPU 访问
			{
				void* cpuPtr = NULL;
				void* gpuPtr = NULL;

				if( !cudaAllocMapped(&cpuPtr, &gpuPtr, size) ) //分配内存
				{
					printf(LOG_CUDA "gstAppsink -- failed to allocate zeroCopy memory for %ux%xu RGBA texture\n", mWidth, mHeight);
					return false;
				}

				if( cpuPtr != gpuPtr )// 分配错误，非CPU GPU共享内存
				{
					printf(LOG_CUDA "gstAppsink -- zeroCopy memory has different pointers, please use a UVA-compatible GPU\n");
					return false;
				}

				mRGBA[n] = gpuPtr;// CPU GPU 都指向mRGBA
			}
			else  // 如果不需要CPU访问，仅仅分配CUDA内存 
			{
				if( CUDA_FAILED(cudaMalloc(&mRGBA[n], size)) )
				{
					printf(LOG_CUDA "gstAppsink -- failed to allocate memory for %ux%u RGBA texture\n", mWidth, mHeight);
					return false;
				}
			}
		}
		
		printf(LOG_CUDA "gstreamer camera -- allocated %u RGBA ringbuffers\n", NUM_RINGBUFFERS);
	}// 为RGBA 分配完内存
	
	if( onboardCamera() )
	{
		// onboard camera is NV12
		if( CUDA_FAILED(cudaNV12ToRGBAf((uint8_t*)input, (float4*)mRGBA[mLatestRGBA], mWidth, mHeight)) )
			return false;
	}
	else
	{
		// USB webcam is RGB
		if( CUDA_FAILED(cudaRGBToRGBAf((uchar3*)input, (float4*)mRGBA[mLatestRGBA], mWidth, mHeight)) )
			return false;
	}
	
	*output     = mRGBA[mLatestRGBA];
	mLatestRGBA = (mLatestRGBA + 1) % NUM_RINGBUFFERS;
	return true;
}