/* filename : ffdeck.9_???mutilout???.cpp
 * - 개선 사항 -
 * 1. queue bug fix
 * 2. encoding 된 data의 queue를 video와 audio의 pts를 비교하여 작은 pts부터 처리하도록 변경
 * 3. 마지막에 encode queue(video와 audio)에 있는 것을 모두 읽어들여 interleave하도록 하였음
 * 4. vidoe의 avstream->avcodeccontext->timebase를 수정하여 CBR(고정 bitrate)로 설정하였음.
 * 5. 변수명 "mux_" 모두 제거, global 변수 prefix "g_" 등 변수이름 변경하였음.
 * 6. queue에 retainCount를 사용하여 queue를 관리하기
 * 7. pool를 실시간으로 할당하는것이 아니라 초기에 일정크기로 미리 할당해놓고 처리하는 기능 추가
 * 8. KBS2 TV의 설치되어있는 DeckLink에서는 frame이 늦게 나와서 audio pts 계산이 잘못 되는 문제 해결
 * 9. jpeg thumbnail을 av_interleaved_write_frame()를 이용하여 저장하기
 * 10. audioPacket(IDeckLinkAudioInputPacket)이 NULL이 발생하더라고 pts를 조절하고 data에 "0"을 채워넣도록 수정하였음
 *
 * - 문제점 -
 * 1. vidoe의 avstream->avcodeccontext의 option중에 aspect를 설정하면 프로그램이 종료된다.
 * 2. 인코딩이나, video frame 및 audio frame의 memroy copy의 처리가 오래걸리면
 *    HRESULT DeckLinkCaptureDelegate::VideoInputFrameArrived()함수가 호출(Callback)되지 못하여
 *    Video frame 이나 Audio frame을 받지 못하는 상황이 발생할 수 있다.
 *    이것은 실시간으로 스트리밍 하지 않는 thread를 쉬게 함으로써 문제를 해결하고,
 *    memory copy를 thread로 처리함으로써 문제를 해결할 수 있을것으로 판단된다.
 *
 * - 계획된 작업 -
 * 1. frame의 size가 크거나 bitrate가 커서 encoding 처리가 오래 걸리를 thread에게 cpu를 조금더 할당하도록 보완하기
 *    보완방법은 FFDeckLinkQueue의 개수를 확인하여 frame의 size가 상대적으로 작은 thead가 쉬는 코드를 삽입할 것이다.
 * 2. audio는 AVFIFO를 사용하기
 * 3. qscale 0.5
 * 4. queue를 실시간으로 할당하는것이 아니라 초기에 일정크기로 미리 할당해놓고 처리하는 기능 추가
 * 5. 하나의 out file이 아니라 여러가지(size, bitrate 등...) out file로 가능하도록 처리하기
 * 6. bitstream filter(bsf) 넣어서 rtsp에서 error나는 것 해결하기
 * 7. 90k tbn(timebase of AVStream), 59.94k tbc(timebase of AVCodecContext), 29.97tbr(r_frame_rate of AVStream)
 *    나올 수 있도록 수정하기
 * 8. 메시지큐 사용해보기
 *    메시지 큐 생성
 *    key_id = msgget((key_t)8888, IPC_CREAT|0666);
 *
 * - 이름짓는 규칙 (작명 규칙) -
 * 1.변수명 규칙
 * 변수는 기본적으로 소문자로 시작하는것으로 하고 prefix 다음에는 대문자로 시작한다.
 * 전역 변수 prefix : "g" 대문자로 시작
 * 멤버 변수 prefix : "m"  대문자로 시작
 * DeckLink관련 변수 prefix : (전역|멤버) "DL_" 소문자로시작
 * 지역 변수 : prefix 없음   소문자로 시작
 *
 * 2.함수명 규칙
 * 소문자로 시작, 되도록 동사로 시작
 *
 * 3.클래스 명 규칙
 * 대문자로 시작
 *   객체(instence)는 소문자로 시작
 *
 * 4.구조체 type 명 규칙
 * 대문자로 시작
 *   구조체 변수는 소문자로 시작
 *
 */
/* -LICENSE-START-
** Copyright (c) 2009 Blackmagic Design
** Copyright (c) 2011 Luca Barbato
**    with additions/fixes from Christian Hoffmann, 2012
**
** Permission is hereby granted, free of charge, to any person or organization
** obtaining a copy of the software and accompanying documentation covered by
** this license (the "Software") to use, reproduce, display, distribute,
** execute, and transmit the Software, and to prepare derivative works of the
** Software, and to permit third-parties to whom the Software is furnished to
** do so, all subject to the following:
**
** The copyright notices in the Software and this entire statement, including
** the above license grant, this restriction and the following disclaimer,
** must be included in all copies of the Software, in whole or in part, and
** all derivative works of the Software, unless such copies or derivative
** works are solely in the form of machine-executable object code generated by
** a source language processor.
**
** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
** FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT
** SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE
** FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,
** ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
** DEALINGS IN THE SOFTWARE.
** -LICENSE-END-
*/

#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <getopt.h>
#include <pthread.h>
#include <unistd.h>
#include <fcntl.h>

#include "DeckLinkAPI.h"
#include "Capture.h"
#include "compat.h"
/* "compat.h"안에는
 * DECKLINK_SET_VIDEO_CONNECTION(x)와 DECKLINK_SET_AUDIO_CONNECTION(x)를 정의되어있다.
 * 그런데 deckLinkConfiguration변수를 이용하여 정의하였다.
 * 이 변수명을 바꾸고 싶어 아래와 같이 재 정의(#define) 하였다.
 */
#define deckLinkConfiguration gDL_configuration

extern "C" {
#include <libavformat/avformat.h>
#include <libswscale/swscale.h>
#include <libswresample/swresample.h>
#include <libavutil/opt.h>
#include <ctype.h>
#include <libavutil/avstring.h>
#include <libavutil/parseutils.h>
}
#include "myLog.h"
#include "FFDeckLinkBuffer.h"
#include "FFDeckLinkPool.h"

IDeckLink						*gDecklink = NULL;
IDeckLinkIterator				*gDL_iterator = NULL;
IDeckLinkInput					*gDL_input = NULL;
IDeckLinkDisplayModeIterator	*gDL_displayModeIterator = NULL;
IDeckLinkDisplayMode			*gDL_displayMode = NULL;
//IDeckLinkDeckControl*			gDL_control = NULL;
IDeckLinkConfiguration          *gDL_configuration   =   NULL;
//IBMDStreamingDeviceInput*		gStreamingDeviceInput = NULL;
//BMDStreamingDeviceMode        gStreamingDeviceMode = 0;
//BMDVideoConnection             gVideoConnector = 0;
//BMDDisplayMode					gDisplayMode = 0;

pthread_mutex_t   gSleepMutex;
pthread_cond_t   gSleepCond;
int   gDropped = 0,
	  gVideoModeIndex = -1,
	  gAudioChannels = 2,
	  gAudioSampleDepth = 16;
bool   gVerbose = false;
int64_t   gMaxFrames = -1,
		  gFrameCount = 0;
typedef struct MemoryLimit{
	uint64_t   sizeLimit;//unlimit
	uint16_t   nbAudioPool;//unlimit
	uint16_t   nbVideoPool;//unlimit
	uint16_t   nbAudioQueue;//unlimit
	uint16_t   nbVideoQueue;//unlimit
}MemoryLimit;
MemoryLimit gMaxMem = {ULONG_MAX, USHRT_MAX, USHRT_MAX, USHRT_MAX, USHRT_MAX};
//const char   *gAudioOutputFileName = NULL;

/*FFDeckLinkQueue
 * Audio, Video 의 AVPacket들이 Linked list (단방향) 형태로 기억하고 있는 Queue이다.
 * 원형 큐의 형태를 가진다.
 * 처음에 2개를 가지도록 초기화한다.
 * 한번 메모리를 할당하여 크기가 커지면 프로그램이 종료될때 크기가 줄어들지 않는다.
 * 즉 메모리가 한번 할당되면 해제하지 않는다.
 * 메모리 재사용을 위해 일부로 해제하지 않는다.
 */
typedef struct FFDeckLinkQueue{
	/* pFirst
	 * 첫 AVPacketList 의 pointer
	 */
	AVPacketList   *pFirst;

	/* pHead
	 * 현재 읽을 AVPacketList 의 pointer
	 * pTail
	 * 데이터가 들어있는 마지막 AVPacketList 의 pointer
	 */
	AVPacketList   *pHead, *pTail;

	/* nbTotal
	 * 메모리가 할당되어있는 AVPacketList 의 총 개수
	 */
	unsigned int   nbTotal;

	/* nbUse
	 * 데이터가 들어있는 AVPacketList 의 개수
	 */
	unsigned int   nbUse;

	/* size
	 * 데이터가 들어있는 총 크기
	 */
	unsigned long long   size;

	/* mutex
	 * thread 동기화를 위한 context
	 * thread lock/unlock/signal/wait 를 위한 변수
	 */
	pthread_mutex_t   mutex;

	/* cond
	 * thread 동기화를 위한 Condition Variables
	 * thread signal을 위한 변수
	 */
	pthread_cond_t   cond;
}FFDeckLinkQueue;

/* STREAM_PIX_FMT_IN, STREAM_PIX_FMT_OUT
 * DeckLink에서 만들어진 Video Stream의 pixcel format : UYVY422
 * 출력할 Video Stream의 pixcel format : PIX_FMT_YUV420P
 */
#define STREAM_PIX_FMT_IN    		PIX_FMT_UYVY422
#define STREAM_PIX_FMT_OUT   		PIX_FMT_YUV420P

/* AUDIO_STREAM_INDEX, VIDEO_STREAM_INDEX
 * DeckLink에서 만들어진 Audio 와 Video 를 AVPacket으로 만들때 부여하는 Stream index 번호 정의
 */
#define AUDIO_STREAM_INDEX 			0
#define VIDEO_STREAM_INDEX 			1

/* OutputStream
 * 출력 AVStream 를 관리하기 위한 구조체
 */
typedef struct OutputStream{
	/* file index
	 * 이 stream이 출력되는 파일의 index 번호
	 * g_outputFiles의 행렬 index와 같다.
	 */
	int   file_index;

	/* source_index
	 * DeckLink에서 만들어진 Audio 와 Video 의 AVPacket.stream_index 번호 이다.
	 * 0 또는 1 의 값을 가진다.
	 * decklinkPkt.stream_index(InputStream index) AUDIO_STREAM_INDEX or VIDEO_STREAM_INDEX
	 */
	int   source_index;

	/* st
	 * 실제 AVStream 정보를 갖고 있는 pointer
	 */
	AVStream   *st;

	/* pictureOfDeckLink
	 * DeckLink에서 만들어진 Video 의 raw stream 정보를 UYVY422 pixel format 으로 변경하여 가지고 있는 AVFrame pointer
	 * video만 메모리를 할당하고 audio는 메모리를 할당하지 않는다.
	 * 프로그램이 종료되기 전에 해제하여야한다.
	 * 메모리를 한번만 할당하여 재사용한다.
	 */
	AVFrame   *pictureOfDeckLink;

	/* frame
	 * pixel format이 UYVY422 로 저장되어있는 pictureOfDeckLink를 YUV420P pixel format으로 변경하여 가지고 있는 AVFrame pointer
	 * video만 메모리를 할당하고 audio는 메모리를 할당하지 않는다.
	 * 프로그램이 종료되기 전에 해제하여야한다.
	 * 메모리를 한번만 할당하여 재사용한다.
	 */
	AVFrame   *frame;			//video(AVPicture) or audio(AVFrame);encoding 할 수 있는 data가 들어있음

	/* imgSwsCtx
	 * pixel format을  UYVY422 -> YUV420P 으로 변경하기 위한 Context 정보
	 * video만 메모리를 할당하고 audio는 메모리를 할당하지 않는다.
	 * 프로그램이 종료되기 전에 해제하여야한다.
	 * 메모리를 한번만 할당하여 재사용한다.
	 */
	SwsContext *imgSwsCtx;

	/* rawPacketQueue
	 * DeckLink의 raw stream를 기억하고 있는 Queue 이다.
	 * pixel format이 변경되기도 전인 (UYVY422) 형태로 저장되어있다.
	 * 초기화할때 AVPakcet이 2개를 미리 할당하여 초기화한다.
	 */
    FFDeckLinkQueue   rawPacketQueue;

    /* encodedPacketQueue
     * 인코딩된 AVPacket 정보들을 기억하고 있는 Queue 이다.
	 * pixel format이 변경되어 (UYVY420p) 형태로 저장되어있다.
	 * 초기화할때 AVPakcet이 2개를 미리 할당하여 초기화한다.
     */
	FFDeckLinkQueue   encodedPacketQueue;

	/* pthEnc
	 * encodeAudioFromStream , encodeVideoFromStream 함수를 multi thread로 구동하기 위한 thread 번호
	 * this OutputStream 을 인코딩하는 함수(avcodec_encode_audio2, avcodec_encode_video2)를 포함하고 있다.
	 * thread join 할 때 사용한다.
	 */
    pthread_t   pthEnc;
}OutputStream;

/* OutputFile
 * 출력파일을 관리하기 위한 구조체
 */
typedef struct OutputFile {
	/* filename
	 * 출력 파일이름
	 */
	//char   filename[256];

	/* ctx
	 * 출력 파일 정보를 가지고 있는 AVFormatContext pointer
	 */
    AVFormatContext   *ctx;

    /* output_streams
     * Audio, Video AVStream를 관리하는 OutputStream
     */
    OutputStream   output_streams[2];

    /* videoResolution
     * video output stream 의 해상도
     */
    AVRational   videoResolution;

    //const AVRational g_outVideoTimeBase = {1, 1000};

    /* pthInter
     * interleaveToFile함수를 multi thread 구동하기 위한 thread 번호
	 * thread join 할 때 사용한다.
     */
    pthread_t   pthInter;
//    AVRational videoTimeBase;
//    AVDictionary *opts;

    /* video_disable
     * video를 출력 스트림에서 제외
     */
    int video_disable;

    /* options
     * video encoding option 정보 string
     * qmin, qmax, b, bt, maxrate, minrate, bufsize, level를 설정한다.
     */
    const char   *options;
} OutputFile;

/* gCurl
 * 서버에 시간을 알려줄때 사용하는 curl 명령어 string
 */
char gCurl[256] = {0};

/* gCurlPath
 * 서버에 시간을 알려줄 URL string
 */
char gCurlPath[256] = {0};

/* gDL_audioTimeBase
 * DeckLink의 Audio Sample Rate 값을 기억하는 변수
 * EnableAudioInput 함수로 설정
 */
AVRational gDL_audioTimeBase = {1, 48000};

/* gDL_videoFrameRate
 * DeckLink의 Video Frame Rate 값을 기억하는 변수
 * 29.97 = 30000/1001로 설정될 것이다.
 * GetFrameRate로 설정값을 가져와서 다시 설정한다.
 */
AVRational gDL_videoFrameRate = {30000, 1001};

/* gDL_videoResolution
 * DeckLink의 Video 해상도 값을 기억하는 변수
 * 1920x1080으로 설정될 것이다.
 * IDeckLinkDisplayMode로 설정값 가져와서 다시 설정해준다.
 */
AVRational gDL_videoResolution = {1920, 1080};

/* gStartVideoFrameTime
 * DeckLink의 처음으로 Video Frame 발생한 pts 기억
 */
BMDTimeValue gStartVideoFrameTime = -1;

/* gStartAudioFrameTime
 * DeckLink의 처음으로 Audio Frame 발생한 pts 기억
 */
BMDTimeValue gStartAudioFrameTime = -1;

//bool gIsStoppingEnc = false;
//bool gIsStoppingInterleave = false;

/* gRawIOfile
 * DeckLink에서 입력 데이터를 받지않고
 * File sysytem 에서 데이터를 받을지 여부 설정
 */
char gRawIOfile = 0;//0: Not raw file, 1: IN, rawfile read, 2: OUT, rawfile write

/* DEFAULT_AUDIO_SAMPLE_SIZE
 * ffmpeg audio 인코딩할 때 frame 사이즈가 1024로 해야 인코딩이된다.
 */
#define DEFAULT_AUDIO_SAMPLE_SIZE 	1024

/* MAX_NBOUTPUTFILES
 * 최대 출력 파일 개수
 */
#define MAX_NBOUTPUTFILES 8

/* gOutputFiles
 * 출력 파일 정보를 가지고 있는 행렬
 */
OutputFile gOutputFiles[MAX_NBOUTPUTFILES];

/* gNbOutputFiles
 * 현재 출력할 파일의 개수
 */
int   gNbOutputFiles   = 0;

/* gDL_buf
 * DeckLink에서 만들어진 Audio Packet의 크기가 6408, 6404로 요동치는 크기를
 * 4096의 크기로 일정하게 변경시켜주는 buffer
 * FIFO형태로 되어있고
 * 크기, pts, duration도 함께 알 수 있다.
 */
FFDeckLinkBuffer gDL_buf;//class

/* gDL_audioPool
 * DeckLink에서 만들어진 Audio Stream을 복사하여 기억할 수 있는 메모리를 미리 할당되어있는
 * memroy pool이다.
 * Audio Stream을 복사하기 위해 매번 memory를 allocation하지 않고 이 pool에서 메모리를 이용하게 된다.
 * 원형 Queue형태이고 FIFO로 동작한다
 * RetainCount를 설정하여 여러파일에서 동일한 Stream을 읽기 때문에 초기에 stream 개수 만큼 retainCount를 설정한다.
 */
FFDeckLinkPool gDL_audioPool;//class

/* gDL_videoPool
 * DeckLink에서 만들어진 Video Stream을 복사하여 기억할 수 있는 메모리를 미리 할당되어있는
 * memroy pool이다.
 * Video Stream을 복사하기 위해 매번 memory를 allocation하지 않고 이 pool에서 메모리를 이용하게 된다.
 * 원형 Queue형태이고 FIFO로 동작한다
 * RetainCount를 설정하여 여러파일에서 동일한 Stream을 읽기 때문에 초기에 stream 개수 만큼 retainCount를 설정한다.
 */
FFDeckLinkPool gDL_videoPool;//class

DeckLinkCaptureDelegate::DeckLinkCaptureDelegate() : m_refCount(0)
{
    pthread_mutex_init(&m_mutex, NULL);
}

DeckLinkCaptureDelegate::~DeckLinkCaptureDelegate()
{
    pthread_mutex_destroy(&m_mutex);
}

ULONG DeckLinkCaptureDelegate::AddRef(void)
{
    pthread_mutex_lock(&m_mutex);
        m_refCount++;
    pthread_mutex_unlock(&m_mutex);

    return (ULONG)m_refCount;
}

ULONG DeckLinkCaptureDelegate::Release(void)
{
    pthread_mutex_lock(&m_mutex);
        m_refCount--;
    pthread_mutex_unlock(&m_mutex);

    if (m_refCount == 0)
    {
        delete this;
        return 0;
    }

    return (ULONG)m_refCount;
}

HRESULT DeckLinkCaptureDelegate::VideoInputFormatChanged(BMDVideoInputFormatChangedEvents events, IDeckLinkDisplayMode *mode, BMDDetectedVideoInputFormatFlags)
{
    return S_OK;
}

/* static AVStream *addStreamOfAudio(AVFormatContext *ctx, AVCodec **codec, enum AVCodecID codec_id)
 * Parameters:
 *        ctx; AVStream을 추가될 file context pointer
 *  [out] codec; codec_id의 encoder pointer
 *        codec_id; Audio의 codec ID = AV_CODEC_ID_AAC
 * Returns:
 *    AVStream*; ctx에 추가 할당한 AVStream pointer
 *
 * ctx에 AVStream을 추가 할당하며, bitrate, sample_rate 등을 설정한다.
 */
static AVStream* addStreamOfAudio(AVFormatContext *ctx, AVCodec **codec, enum AVCodecID codec_id)
{
    AVCodecContext *c = NULL;
    AVStream *st = NULL;

    /* find the audio encoder */
    *codec = avcodec_find_encoder(codec_id);
    if (!(*codec)) {
        logF("codec not found\n");
        exit(1);
    }

    st = avformat_new_stream(ctx, *codec);
    if (!st)
    {
        logF("Could not allocation avstream.\n");
        exit(1);
    }
    c = st->codec;

    c->codec_id = codec_id;
    c->thread_count = 0;
    /* put sample parameters */
    c->sample_fmt  = AV_SAMPLE_FMT_S16;
    c->bit_rate    = 98000;//128000;
    //c->sample_rate = gDL_audioTimeBase.den;//48K
    c->sample_rate = 44100;// 48000Hz 44100Hz
    c->channels    = 2;
    c->channel_layout = AV_CH_LAYOUT_STEREO;

    // some formats want stream headers to be separate
    if (ctx->oformat->flags & AVFMT_GLOBALHEADER)
        c->flags |= CODEC_FLAG_GLOBAL_HEADER;

    return st;
}

/* static void openCodecOfAudio(OutputStream *ost, AVCodec *codec)
 * Parameters:
 *      ost; Audio AVStream이 할당되어있는 OutputStream pointer
 *    codec; encoder pointer (AAC)
 * Returns:
 *     void;
 *
 * AAC codec 을 open한다.
 * Audio Sample을 기억할 AVFrame 메모리를 할당한다.
 */
static void openCodecOfAudio(OutputStream *ost, AVCodec *codec)
{
    AVCodecContext *c = NULL;
    int ret = 0;

    c = ost->st->codec;

    /* open it */
    if ((ret = avcodec_open2(c, codec, NULL)) < 0)
    {
        logF("Could not open audio codec = %s\n", av_err2str(ret));
        exit(1);
    }

    if (!(ost->frame = avcodec_alloc_frame()))
    {
    	logF("Could not alloc frame.\n");
    	exit(1);
		//return AVERROR(ENOMEM);
	}
}

/* static AVStream *addVideoStream(OutputFile *of, AVCodec **codec, enum AVCodecID codec_id)
 * Parameters:
 *              of; AVStream을 추가될 file의 OutputFile pointer
 *  [out]    codec; Video codec_id의 encoder pointer
 *        codec_id; Video의 codec ID = AV_CODEC_ID_H264
 * Returns:
 *       AVStream*; of->ctx에 추가 할당한 AVStream pointer
 *
 * ctx에 AVStream을 추가 할당하며, bitrate, sample_rate 등을 설정한다.
 */
static AVStream *addStreamOfVideo(OutputFile *of, AVCodec **codec, enum AVCodecID codec_id)
{
    AVCodecContext *c = NULL;
    AVStream *st = NULL;
    int ret = 0;

    /* find the video encoder */
    *codec = avcodec_find_encoder(codec_id);
    if (!(*codec))
    {
        logF("video codec not found\n");
        exit(1);
    }

    st = avformat_new_stream(of->ctx, *codec);
    if (!st)
    {
        logF("Could not alloc stream\n");
        exit(1);
    }

    c = st->codec;

    if((ret = avcodec_get_context_defaults3(c, *codec)) < 0)
	{
    	logI("fail avcodec_get_context_defaults3 = %s\n", av_err2str(ret));
    	exit(1);
	}


    c->codec_id = codec_id;

    c->thread_count = 16;

	logI("Setting options string '%s'\n", of->options);
	if ((ret = av_set_options_string(c, of->options, "=", ":")) < 0)
	{
		logF("Error setting options string: '%s' because %s\n", of->options, av_err2str(ret));
		exit(1);
	}

    /* Resolution must be a multiple of two. */
    c->width    = of->videoResolution.num;
    c->height   = of->videoResolution.den;
    c->time_base.num = 1;//g_outVideoTimeBase.num;//1;//frameRateDuration;//1001
    c->time_base.den = 1000;//g_outVideoTimeBase.den;//1000;//frameRateScale;//30000
    c->gop_size      = 60;//12;
    c->pix_fmt       = STREAM_PIX_FMT_OUT;
    if (c->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
        /* just for testing, we also add B frames */
        c->max_b_frames = 2;
    }
    if (c->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
        /* Needed to avoid using macroblocks in which some coeffs overflow.
         * This does not happen with normal video, it just happens here as
         * the motion of the chroma plane does not match the luma plane. */
        c->mb_decision = 2;
    }
    /* Some formats want stream headers to be separate. */
    if (of->ctx->oformat->flags & AVFMT_GLOBALHEADER)
        c->flags |= CODEC_FLAG_GLOBAL_HEADER;

    return st;
}

/* static AVStream *addStreamOfJPEG(OutputFile *of, AVCodec **codec, enum AVCodecID codec_id)
 * Parameters:
 *        of; AVStream을 추가될 file의 OutputFile pointer
 *  [out] codec; JPEG codec_id의 encoder pointer
 *        codec_id; Thumbnail의 codec ID = AV_CODEC_ID_MJPEG
 * Returns:
 *    AVStream; of->ctx에 추가 할당한 AVStream pointer
 *
 * ctx에 AVStream을 추가 할당하며, bitrate, sample_rate 등을 설정한다.
 */
static AVStream *addStreamOfJPEG(OutputFile *of, AVCodec **codec, enum AVCodecID codec_id)
{
	AVCodecContext *c = NULL;
	AVStream *st = NULL;
	int ret = 0;

	/* find the video encoder */
	*codec = avcodec_find_encoder(codec_id);
	if (!(*codec))
	{
		logF("codec not found\n");
		exit(1);
	}

	st = avformat_new_stream(of->ctx, *codec);
	if (!st)
	{
		logF("Could not alloc stream\n");
		exit(1);
	}

	c = st->codec;

	if((ret = avcodec_get_context_defaults3(c, *codec)) < 0)
	{
		logI("fail avcodec_get_context_defaults3 = %s\n", av_err2str(ret));
		exit(1);
	}

	c->codec_id = codec_id;

	/* Resolution must be a multiple of two. */
	c->time_base.num = 1;
	c->time_base.den = 1;
	c->width    = of->videoResolution.num;
	c->height   = of->videoResolution.den;
	c->pix_fmt	= PIX_FMT_YUVJ420P;//jpeg

	/* Some formats want stream headers to be separate. */
	if (of->ctx->oformat->flags & AVFMT_GLOBALHEADER)
		c->flags |= CODEC_FLAG_GLOBAL_HEADER;

	return st;
}

/* static AVFrame *allocPicture(enum PixelFormat pix_fmt, int width, int height)
 * Parameters:
 *    pix_fmt; AVPicture 의 pixel format
 *    width; 가로크기
 *    height; 세로크기
 * Returns:
 *    AVFrame; 할당한 AVFrame
 *             나중에 해제 필요
 *
 * pixel format에 맞는 WxH에 해당하는 AVFrame을 할당
 */
static AVFrame *allocPicture(enum PixelFormat pix_fmt, int width, int height)
{
    AVFrame *picture = avcodec_alloc_frame();
    if (!picture || avpicture_alloc((AVPicture *)picture, pix_fmt, width, height) < 0)
        av_freep(&picture);
    return picture;
}

/* static void openCodecOfVideo(OutputStream *ost, AVCodec *codec)
 * Parameters:
 *    ost; Video AVStream이 할당되어있는 OutputStream pointer
 *    codec; encoder pointer (h264,mjpeg)
 * Returns:
 *     void;
 *
 * Video codec 을 open한다.
 * Video Sample을 기억할 AVFrame 메모리를 할당한다.
 * pixel format이 uyvy422 은 ost->pictureOfDeckLink에 할당하고,
 *               yuv420p 은 ost->fame에 할당한다.
 */
static void openCodecOfVideo(OutputStream *ost, AVCodec *codec)
{
    AVCodecContext *c = NULL;

    c = ost->st->codec;

    /* open the codec */
    if (avcodec_open2(c, codec, NULL) < 0)
    {
        logF("could not open codec\n");
        exit(1);
    }

    /* Allocate the Deck Link raw picture. */
    if(NULL == (ost->pictureOfDeckLink = allocPicture(STREAM_PIX_FMT_IN, gDL_videoResolution.num, gDL_videoResolution.den)))
    {
    	logF("Could not allocate pictureOfDeckLink.\n");
    	exit(1);
    }

    /* Allocate the pix_fmt converted raw picture. (STREAM_PIX_FMT_IN->STREAM_PIX_FMT_OUT)*/
    if(NULL == (ost->frame = allocPicture(c->pix_fmt, c->width, c->height)))
    {
        logF("Could not allocate frame that is converted from raw picture.\n");
        exit(1);
    }

    /* as we only generate a YUV420P picture, we must convert it to the codec pixel format if needed */
    ost->imgSwsCtx = sws_getContext(gDL_videoResolution.num, gDL_videoResolution.den, STREAM_PIX_FMT_IN,
    		c->width, c->height, c->pix_fmt, SWS_BICUBIC, NULL, NULL, NULL);
}

/*-----------------------------------------------------------------------------------*/

/* static void initFFDeckLinkQueue(FFDeckLinkQueue *q)
 * Parameters:
 *    q; 초기화할 FFDeckLinkQueue 의 pointer
 * Returns:
 *    void;
 *
 * 초기에 AVPacketList를 2개 미리 할당한다.
 */
static void initFFDeckLinkQueue(FFDeckLinkQueue *q)
{
	AVPacketList *pktList = NULL;

	memset(q, 0, sizeof(FFDeckLinkQueue));
	pthread_mutex_init(&q->mutex, NULL);
	pthread_cond_init(&q->cond, NULL);

	if((pktList = (AVPacketList *)av_malloc(sizeof(AVPacketList))) == NULL)
	{
		logF("Could not allocation AVPacketList.\n");
		exit(1);
	}
	q->pFirst = pktList;
	q->size = sizeof(*pktList);
	//(thread safe를 위해 queue는 2개부터 시작한다.)
	if((pktList = (AVPacketList *)av_malloc(sizeof(AVPacketList))) == NULL)
	{
		logF("Could not allocation AVPacketList.\n");
		exit(1);
	}
	q->pFirst->next = pktList;
	pktList->next = q->pFirst;
	q->nbTotal = 2;
	q->size += sizeof(*pktList);
}

/* static void avpacket_queue_flush(FFDeckLinkQueue *q)
 * Parameters:
 *    q; 메모리 해제할 FFDeckLinkQueue 의 pointer
 * Return:
 *    void;
 *
 * 할당되어있는 메모리를 모두 해제한다.
 */
static void avpacket_queue_flush(FFDeckLinkQueue *q)
{
    AVPacketList *pktList = NULL, *pktList_Next = NULL;

    logD("FFDeckLinkQueue %p release total %d\n", q, q->nbTotal);
	pthread_mutex_lock(&q->mutex);
	for (pktList = q->pFirst; q->nbTotal > 0; q->nbTotal--, pktList = pktList_Next)
	{
		pktList_Next = pktList->next;
		av_freep(&pktList);
	}
	logD("FFDecklinkQueue %p release complete total\n", q);

	q->pFirst = NULL;
	q->pHead = NULL;
	q->pTail = NULL;
	q->nbTotal = 0;
	q->nbUse = 0;
	q->size = 0;
	pthread_mutex_unlock(&q->mutex);
	pthread_mutex_destroy(&q->mutex);
	pthread_cond_destroy(&q->cond);
}

/* static int getAVPacketInFFDeckLinkQueue(FFDeckLinkQueue *q, AVPacket *pkt, int block)
 * Parameters:
 *    q;  가져오고 싶은 AVPacket이 들어있는 FFDeckLinkQueue pointer
 *          pkt(out): q->this_pkt의 AVPacket pointer
 *                    AVPacket은 미리 할당되어있어야하고
 *                    AVPacket.data의 메모리는 할당하지 않아야한다.
 *          block: FFDeckLinkQueue가 비어있을경우 채워질때까지 기다길것인지(block) 바로 return(unblock) 할 것인지 설정
 *                 1 = block, 0 = unblock
 * Returns:
 *    0 = 정상적으로 FFDeckLinkQueue의 AVPacket을 얻었을경우
 *    1 = FFDeckLinkQueue이 비어 있다.
 *    2 = 마지막 AVPacket을 얻었을 경우
 * 기본적으로 FFDeckLinkQueue에 AVPacket이 채워지기를 기다렸다가
 * AVPacket이 채워지면 return되는 형태로 동작한다.
 * Thread 동기화가 잘 이루어져야한다.
 */
static int getAVPacketInFFDeckLinkQueue(FFDeckLinkQueue *q, AVPacket *pkt, int block)
{
    int ret = 0;
    pthread_mutex_lock(&q->mutex);//add oos.

    while(true)
    {
		if(q->pHead)
    		// video or audio AVPacket이 있다면,
		{
			*pkt = q->pHead->pkt;
			if(pkt->data == NULL)
			{
				//마지막 packet이면 return 0
				ret = 2;
			}
			else
			{
				ret = 0;
			}
			q->size -= q->pHead->pkt.size;
			q->nbUse--;

			av_init_packet(&q->pHead->pkt);//data의 pointer는 초기화(NULL)하지 않는다.

			if(q->pHead != q->pTail)
				q->pHead = q->pHead->next;
			else
				q->pHead = q->pTail = NULL;
			break;
		}
		else if (!block)
		{
			ret = 1;
			break;
		}
		else
		{
			//logD("all AVPacket read. Wait for push ... %d/%d %p\n", q->nbUse, q->nbTotal, q);//모든 packet을 다 읽었습니다. packet이 push 될 때 까지 기다림 ...
			pthread_cond_wait(&q->cond, &q->mutex);

			//logD("Have pushed AVPacket %p\n", q);
		}
    }
    pthread_mutex_unlock(&q->mutex);//add oos.
    return ret;
}

/* static void putAVPacketInFFDeckLinkQueue(FFDeckLinkQueue *q, AVPacket pkt)
 * Parameters:
 *      q; AVPacket이 들어있는 FFDeckLinkQueue pointer
 *  [in]pkt; FFDeckLinkQueue에 들어갈 AVPacket
 * Returns:
 *    void
 * FFDeckLinkQueue에 AVPacket을 추가한다.
 * 원형 Queue 형태이고 FIFO로 동작한다.
 * 원형 Queue는 처음에 2개를 갖고 있다가 모자를때 마다 하나씩 늘어난다.
 * 현재 max 개수의 제한은 없다.
 * AVPacket을 가득채우지 않고 하나를 비워두도록 설계되어있다.
 * thread safe를 위해 이렇게 하였다.
 */
static void putAVPacketInFFDeckLinkQueue(FFDeckLinkQueue *q, AVPacket pkt)
{
	AVPacketList *pktList = NULL;
	int i = -1;

	i = pkt.stream_index;
			pthread_mutex_lock(&q->mutex);
//	int64_t ttime = 0;
//	switch (pthread_mutex_trylock(&q->mutex))
//	{
//		case EBUSY:
//			ttime = av_gettime();
//			logD("The mutex is already locked. %ldns\n", av_gettime() - ttime);
//			break;
//		case EINVAL:
//			logD("mutex is not an initialized mutex.\n");
//			break;
//		case EFAULT:
//			logD("mutex is an invalid pointer.\n");
//			break;
//		default:
//			break;
//	}

	if(q->pTail == NULL)
		//할당되어있는 packet을 모두 읽었을 경우 queue의 처음부터 재사용
	{
		pktList = q->pFirst;
		pktList->pkt = pkt;
		q->pHead = q->pFirst;
		q->pTail = q->pFirst;
		//logI("(%d) first memory reusing nb %d/%d Q %p\n", i, q->nbUse + 1, q->nbTotal, q);
	}
	else if(q->pTail->next->next == q->pHead)
		//더 이상 할당되어있는 AVPacket이 없다면,(thread safe를 위해 queue를 가득채우지는 않는다.
	{
		pktList = (AVPacketList *)av_malloc(sizeof(AVPacketList));
		pktList->next = q->pTail->next;
		q->pTail->next = pktList;
		q->pTail = pktList;
		q->nbTotal++;
		q->size += sizeof(*pktList);
		pktList->pkt = pkt;
		//logI("(%d) add alloc memory nb %d/%d Q %p\n", i, q->nbUse +1 , q->nbTotal, q);
	}
	else if(q->pTail->next != q->pHead)
		//if(q->nbTotal > q->nbUse)
		//할당되어있는 packet이 있다면,
	{
		if (!q->pTail->next)
		{
			logF("q->pTail->next is NULL\n");//할당되어있는 pTail 없습니다.
			exit(1);
		}
		q->pTail = q->pTail->next;
		q->pTail->pkt = pkt;
		//logI("(%d) memory reusing nb %d/%d Q %p\n", i, q->nbUse + 1, q->nbTotal, q);
	}
	else
	{
		logF("(%d) memory nb is broken. %d/%d Q %p\n", i, q->nbUse, q->nbTotal, q);
		exit(1);
	}

	q->size += pkt.size;
	q->nbUse++;

	pthread_cond_signal(&q->cond);
	pthread_mutex_unlock(&q->mutex);
}

#define THREAD_MEMCOPY
#ifdef THREAD_MEMCOPY
/* gMemMutex, gMemCond
 * thread 동기화를 위한 context, condition variables
 * thread signal/wait 를 위한 변수
 */
pthread_mutex_t   gMemMutex[4];
pthread_cond_t   gMemCond[4];
/* gMemCpyThIndex
 * thMemCpy함수의 gDstBuf의 행렬 index번호와 같다
 * 조각낸 메모리의 index번호
 */
int gMemCpyThIndex = 0;
/* gDstBuf, gSrcBuf, gNumBuf
 * 조각낸 메모리의 Destination, Source, Number
 */
uint8_t *gDstBuf[4];
uint8_t *gSrcBuf[4];
size_t gNumBuf[4];
/* gIsCopy
 * 조각낸 메모리를 모두 복사하였는지 알 수 있는 변수 memcpy
 */
char gIsCopy[4];
/* DL_memcpy
 * DeckLink에서 만들어낸 Video stream을 조각내어 복사하는 thread 함수
 */
static void* DL_memcpy(void *)
{
	int thi = gMemCpyThIndex;
	gMemCpyThIndex++;
	while(1)
	{
		pthread_mutex_lock(&gMemMutex[thi]);
		pthread_cond_wait(&gMemCond[thi], &gMemMutex[thi]);
		pthread_mutex_unlock(&gMemMutex[thi]);
		if(gDstBuf[thi] != NULL)
			memcpy(gDstBuf[thi], gSrcBuf[thi], gNumBuf[thi]);
		else
			break;
		gIsCopy[thi] = 1;
	}
	return NULL;
}
#endif

/* HRESULT DeckLinkCaptureDelegate::VideoInputFrameArrived(IDeckLinkVideoInputFrame* videoFrame, IDeckLinkAudioInputPacket* audioPacket)
 * 초당 29.97번 Callback되는 함수이다.
 * 다른 thread가 처리하면서 지체되면 이 함수가 Callback 되지 않을 수 있다.
 * 그리고 이 함수를 1/29.97초 안에 return 해주어야 이 함수가 Callback 될 수 있다.
 * 이 함수가 Callback 되지 않으면 다음번에 Callback될 때 videoframe, audiopacket이 NULL 값을 가지게 된다.
 * Video는 IDeckLinkVideoInputFrame을 Thread로 memory copy를 한후에 rawPacketQueue에 적제한다.
 * Audio는 IDeckLinkAudioInputPacket를 gDL_buf에 밀어넣었다가 4096크기로 읽어들여 rawPacketQueue에 적제한다.
 */
HRESULT DeckLinkCaptureDelegate::VideoInputFrameArrived(IDeckLinkVideoInputFrame* videoFrame, IDeckLinkAudioInputPacket* audioPacket)
//void debugFunc(IDeckLinkVideoInputFrame* videoFrame, IDeckLinkAudioInputPacket* audioPacket)
{
    void *videoFrameBytes = NULL, *audioFrameBytes = NULL;
    BMDTimeValue videoFrameTime = 0, audioFrameTime = 0;
    BMDTimeValue videoFrameDuration = 1;
    AVPacket decklinkPkt = {0};
	int i = 0;

    int64_t timer_start = 0;
    int64_t debugtimer[8] = {0}; int debug_i= 0;
    char debugStr[250] = {0};

    gFrameCount++;
    timer_start = av_gettime();

    // Handle Video Frame
    if (videoFrame)// && !(gFrameCount % 100 == 99))
    {
        if (videoFrame->GetFlags() & bmdFrameHasNoInputSource)
        {
            logF("Frame received (#%lu) - No input signal detected - Frames dropped %u\n", gFrameCount, ++gDropped);
            return E_FAIL;
        }
        debugtimer[debug_i++] = av_gettime() - timer_start;//timer check 0
        videoFrame->GetBytes(&videoFrameBytes);
		videoFrame->GetStreamTime(&videoFrameTime, &videoFrameDuration, gDL_videoFrameRate.num);//AV_TIME_BASE=1000000
		if(gStartVideoFrameTime == -1)
		{
			gStartVideoFrameTime = videoFrameTime;
			sprintf(gCurl, "curl %s%ld", gCurlPath, av_gettime());
		}
		//logI("videoFrameTime %ld videoFrameDuration %ld\n", videoFrameTime, videoFrameDuration);
		av_init_packet(&decklinkPkt);
		decklinkPkt.stream_index = VIDEO_STREAM_INDEX;
		decklinkPkt.data = gDL_videoPool.get();
		decklinkPkt.size = videoFrame->GetRowBytes() * videoFrame->GetHeight();
		decklinkPkt.pts = videoFrameTime - gStartVideoFrameTime;
		decklinkPkt.duration = videoFrameDuration;

		debugtimer[debug_i++] = av_gettime() - timer_start;//timer check 1
//raw image를 4개로 조각낸다. thread로 동시에 memcpy를 한다.
#ifdef THREAD_MEMCOPY
		gDstBuf[0] = decklinkPkt.data;
		gSrcBuf[0] = (uint8_t*)videoFrameBytes;
		gNumBuf[0] = decklinkPkt.size/4;

		gDstBuf[1] = gDstBuf[0] + decklinkPkt.size/4;
		gSrcBuf[1] = gSrcBuf[0] + decklinkPkt.size/4;
		gNumBuf[1] = decklinkPkt.size/4;

		gDstBuf[2] = gDstBuf[1] + decklinkPkt.size/4;
		gSrcBuf[2] = gSrcBuf[1] + decklinkPkt.size/4;
		gNumBuf[2] = decklinkPkt.size/4;

		gDstBuf[3] = gDstBuf[2] + decklinkPkt.size/4;
		gSrcBuf[3] = gSrcBuf[2] + decklinkPkt.size/4;
		gNumBuf[3] = decklinkPkt.size/4;


		pthread_cond_signal(&gMemCond[0]);
		pthread_cond_signal(&gMemCond[1]);
		pthread_cond_signal(&gMemCond[2]);
		pthread_cond_signal(&gMemCond[3]);

		while(gIsCopy[0] == 0 || gIsCopy[1] == 0 || gIsCopy[2] == 0 || gIsCopy[3] == 0)
		{
			usleep(0);
		}
		gIsCopy[0] = gIsCopy[1] = gIsCopy[2] = gIsCopy[3] = 0;
#else
		memcpy(decklinkPkt.data, (uint8_t*)videoFrameBytes, decklinkPkt.size);
#endif
		debugtimer[debug_i++] = av_gettime() - timer_start;//timer check 2
		for(i = 0; i < gNbOutputFiles; i++)
		{
			if(gOutputFiles[i].output_streams[VIDEO_STREAM_INDEX].st)
				putAVPacketInFFDeckLinkQueue(&gOutputFiles[i].output_streams[VIDEO_STREAM_INDEX].rawPacketQueue, decklinkPkt);
		}
    }
    else
    {
		logF("videoFrame NULL %ld\n", gFrameCount);
		//exit(1);
	}

	// Handle Audio Frame
    if(audioPacket)// && !(gFrameCount % 100 == 99)
    {
    	audioPacket->GetPacketTime(&audioFrameTime, gDL_audioTimeBase.den);//AV_TIME_BASE
    	if(gStartAudioFrameTime == -1)
    		gStartAudioFrameTime = audioFrameTime;
    	audioPacket->GetBytes(&audioFrameBytes);
    	debugtimer[debug_i++] = av_gettime() - timer_start;//timer check 3
    	//logD("audioFrameTime %ld scount %ld\n", audioFrameTime - gStartAudioFrameTime, audioPacket->GetSampleFrameCount());
    	//FFDeckLinkBuffer::push(uint8_t *buf, int size, int64_t pts, int duration)
		if(gDL_buf.push((uint8_t*)audioFrameBytes,  //*buf
				audioPacket->GetSampleFrameCount() * gAudioChannels * (gAudioSampleDepth / 8), //size
				audioFrameTime - gStartAudioFrameTime, //pts
				audioPacket->GetSampleFrameCount())) // duration
		{
			logF("gDL_buf push error\n");
			abort();
		}
		debugtimer[debug_i++] = av_gettime() - timer_start;//timer check 4

		for(int loopi = 0; loopi < 3*10; loopi++)//audioPacket NULL이 약 10번이 연속적으로 나올때까지 처리할 수 있다.
		{
			uint8_t *audioSample = NULL;
			int64_t mypts = 0;

			//FFDeckLinkBuffer::pop(uint8_t **oBuf, const uint16_t size = 4096)
			mypts = gDL_buf.pop(&audioSample);
			if(audioSample == NULL)
				break;
			else
			{
				av_init_packet(&decklinkPkt);
				decklinkPkt.stream_index = AUDIO_STREAM_INDEX;
				decklinkPkt.data = gDL_audioPool.get();
				decklinkPkt.size = 4096;
				decklinkPkt.pts = mypts;//aframecnt * 1024;// timebase 48000
				decklinkPkt.duration = 1024;

				memcpy(decklinkPkt.data, (uint8_t*)audioSample, decklinkPkt.size);
				for(i = 0; i< gNbOutputFiles; i++)
				{
					logD("pts %ld put raw audio pkt key %d size %d dts %ld\n",
							decklinkPkt.pts, decklinkPkt.flags, decklinkPkt.size, decklinkPkt.dts);
					if(gOutputFiles[i].output_streams[AUDIO_STREAM_INDEX].st)
						putAVPacketInFFDeckLinkQueue(&gOutputFiles[i].output_streams[AUDIO_STREAM_INDEX].rawPacketQueue, decklinkPkt);
				}
			}
		}
		debugtimer[debug_i++] = av_gettime() - timer_start;//timer check
    }
    else
    {
    	logF("audioPacket NULL %ld\n", gFrameCount);
    	//exit(1);
    }

//    for(int i = 0 ; i < debug_i ; i++)
//    {
//    	sprintf(debugStr + strlen(debugStr), "%10lds:%d ", debugtimer[i], i);
//    }
//    printf("\n");

    if(gMaxFrames > 0 && gFrameCount >= gMaxFrames)
    {
    	logI("max frame capture %d\n", gFrameCount >= gMaxFrames);
        pthread_cond_signal(&gSleepCond);
    }
    debugtimer[debug_i++] = av_gettime() - timer_start;//timer check

#ifndef DEBUG

    char *pDebugStr = debugStr;
    for(i = 0; i < 1; i++)
    {
		sprintf(pDebugStr + strlen(pDebugStr), "[%d]%3d/%3d avPool, %d/%d avRQ, %d/%2d avEQ, %7lds:%d debugtime",
				i,
				gDL_audioPool.mUseNB,
				gDL_videoPool.mUseNB,
				gOutputFiles[i].output_streams[AUDIO_STREAM_INDEX].rawPacketQueue.nbUse,
				gOutputFiles[i].output_streams[VIDEO_STREAM_INDEX].rawPacketQueue.nbUse,
				gOutputFiles[i].output_streams[AUDIO_STREAM_INDEX].encodedPacketQueue.nbUse,
				gOutputFiles[i].output_streams[VIDEO_STREAM_INDEX].encodedPacketQueue.nbUse,
				debugtimer[debug_i-1],
				debug_i-1);
		pDebugStr += strlen(pDebugStr);
    }
#endif

   	fprintf(stderr, "[frame:%04ld(%03ld)%03.1fs]\n%s\n", gFrameCount, (videoFrameTime-gStartVideoFrameTime)/videoFrameDuration, (float)videoFrameTime/gDL_videoFrameRate.num, debugStr);
    return S_OK;
}

/* static void* encodeAudioFromStream(void *stream)
 * Parameters:
 *    stream; 인코딩할 Audio AVStream이 들어있는 OutputStream pointer
 * Returns:
 *    void*; 항상 NULL
 *
 * Audio 인코딩 순서
 * 1. rawPacketQueue 에서 AVPacket을 가져온다.
 * 2. AVPacket을 AVFrame에 채운다.
 * 3. AVFrame을 인코딩하여 인코딩된 AVPacket을 얻는다.
 * 4. encodedPacketQueue에 넣는다.
 *
 * 마지막 인코딩 후에는 NULL Data를 포함한 AVPacket을 encodedPacketQueue에 넣는다.
 */
int alloc_samples_array_and_data(uint8_t ***data, int *linesize, int nb_channels,
                                    int nb_samples, enum AVSampleFormat sample_fmt, int align)
{
    int nb_planes = av_sample_fmt_is_planar(sample_fmt) ? nb_channels : 1;

    *data = (uint8_t**)av_malloc(sizeof(*data) * nb_planes);
    if (!*data)
        return AVERROR(ENOMEM);
    return av_samples_alloc(*data, linesize, nb_channels,
                            nb_samples, sample_fmt, align);
}

static void* encodeAudioFromStream(void *stream)
{
	OutputStream *ost = (OutputStream *)stream;
	AVCodecContext *encCtx = NULL;
	AVPacket rawPkt = {0};
	AVPacket oPkt = {0};
	int got_packet = 0;
	int ret = 0;
	int block = 1;

	///////////////////////////////////////////////-------------------------
	int64_t src_ch_layout = AV_CH_LAYOUT_STEREO, dst_ch_layout = AV_CH_LAYOUT_STEREO;
	int src_rate = 48000, dst_rate = 44100;
	uint8_t **src_data = NULL, **dst_data = NULL;
	int src_nb_channels = 0, dst_nb_channels = 0;
	int src_linesize, dst_linesize;
	int src_nb_samples = 1024, dst_nb_samples, max_dst_nb_samples;
	enum AVSampleFormat src_sample_fmt = AV_SAMPLE_FMT_S16, dst_sample_fmt = AV_SAMPLE_FMT_S16;
	int dst_bufsize;
	static struct SwrContext *swr_ctx = NULL;
	int t = 0;
	uint8_t *encodeBuf;
	uint8_t *rsBuf;


//	swr_alloc_set_opts(struct SwrContext *s,
//	                                      int64_t out_ch_layout, enum AVSampleFormat out_sample_fmt, int out_sample_rate,
//	                                      int64_t  in_ch_layout, enum AVSampleFormat  in_sample_fmt, int  in_sample_rate,
//	                                      int log_offset, void *log_ctx)
	swr_ctx = swr_alloc_set_opts(swr_ctx, AV_CH_LAYOUT_STEREO, AV_SAMPLE_FMT_S16,  dst_rate,
			AV_CH_LAYOUT_STEREO,  AV_SAMPLE_FMT_S16,  src_rate,
			0, 0);

	if (swr_init(swr_ctx) < 0) {
		logF("Failed to initialize the resampling context\n");
		return NULL;
	}

	/* create resampler context */
//	swr_ctx = swr_alloc();
//	if (!swr_ctx)
//	{
//		fprintf(stderr, "Could not allocate resampler context\n");
//		ret = AVERROR(ENOMEM);
//		return NULL;
//	}
//
//	/* set options */
//	av_opt_set_int(swr_ctx, "in_channel_layout",    src_ch_layout, 0);
//	av_opt_set_int(swr_ctx, "in_sample_rate",       src_rate, 0);
//	av_opt_set_int(swr_ctx, "in_sample_fmt", src_sample_fmt, 0);
//
//	av_opt_set_int(swr_ctx, "out_channel_layout",    dst_ch_layout, 0);
//	av_opt_set_int(swr_ctx, "out_sample_rate",       dst_rate, 0);
//	av_opt_set_int(swr_ctx, "out_sample_fmt", dst_sample_fmt, 0);
//
//	/* initialize the resampling context */
//	if (swr_init(swr_ctx) < 0) {
//		fprintf(stderr, "Failed to initialize the resampling context\n");
//		return NULL;
//	}
//
//	/* allocate source and destination samples buffers */
//
//	src_nb_channels = av_get_channel_layout_nb_channels(src_ch_layout);
//	ret = alloc_samples_array_and_data(&src_data, &src_linesize, src_nb_channels,
//									   src_nb_samples, src_sample_fmt, 0);
//	if (ret < 0) {
//		fprintf(stderr, "Could not allocate source samples\n");
//		return NULL;
//	}
//
//	max_dst_nb_samples = dst_nb_samples =
//		av_rescale_rnd(src_nb_samples, dst_rate, src_rate, AV_ROUND_UP);

//	dst_nb_channels = av_get_channel_layout_nb_channels(dst_ch_layout);
//	ret = alloc_samples_array_and_data(&dst_data, &dst_linesize, dst_nb_channels,
//									   dst_nb_samples, dst_sample_fmt, 0);
//	if (ret < 0) {
//		fprintf(stderr, "Could not allocate destination samples\n");
//		return NULL;
//	}

//	t = 0;

	////////////////////////////--------------------------------------------

	if(!ost->frame)
	{
		logF("Not yet audio alloc frame\n");
		exit(1);
	}

	encCtx = ost->st->codec;
//	ost->frame->nb_samples = encCtx->frame_size;
	FILE *tf = NULL;
	tf = fopen ("audio.s16le","rb");

//	logD("[0] dst_nb_samples linesize %d -> %d \n", src_linesize, dst_linesize);
	while (0 == (ret = getAVPacketInFFDeckLinkQueue(&ost->rawPacketQueue, &rawPkt, block))) //ret is 0 or 1 or 2
	{
		//last Packet을 얻었을경우
		if(rawPkt.data == NULL)
			break;

//		memcpy(src_data[0], rawPkt.data, 4096);

		////////////////////////////--------------
//		/* compute destination number of samples */
//		dst_nb_samples = av_rescale_rnd(swr_get_delay(swr_ctx, src_rate) +
//										src_nb_samples, dst_rate, src_rate, AV_ROUND_UP);
//
//		logI("[n]dst_nb_samples linesize %d -> %d nb_samples %d -> %d\n", src_linesize, dst_linesize, src_nb_samples, dst_nb_samples);
//		if (dst_nb_samples > max_dst_nb_samples) {
//			av_free(dst_data[0]);
//			ret = av_samples_alloc(dst_data, &dst_linesize, dst_nb_channels,
//								   dst_nb_samples, dst_sample_fmt, 1);
//			if (ret < 0)
//			{
//				logF("break");
//				abort();
//				break;
//			}
//			max_dst_nb_samples = dst_nb_samples;
//		}

		//avcodec_get_frame_defaults(ost->filtered_frame);

		/* convert to destination format */
		rsBuf = (uint8_t *)malloc(1024*4);
		ret = swr_convert(swr_ctx, (uint8_t**)&rsBuf, 4096, (const uint8_t **)&rawPkt.data, 1024);
		if (ret < 0) {
			fprintf(stderr, "Error while converting\n");
			return NULL;
		}
//        dst_bufsize = av_samples_get_buffer_size(&dst_linesize, dst_nb_channels, ret, dst_sample_fmt, 1);
        logI("t:%d in:%d out:%d dst_bufsize %d\n", t++, src_nb_samples, ret, dst_bufsize);
        dst_nb_samples = ret;
        //encCtx->frame_size = 1024;//dst_nb_samples;

//        fwrite (rsBuf , 1 , dst_nb_samples*4 , tf);

        ///////////////////////--------------

        // copy the file into the buffer:
//        rawPkt.size =  dst_linesize;
//        ost->frame->nb_samples = dst_linesize/4;

//		if((ret = avcodec_fill_audio_frame(ost->frame, encCtx->channels, encCtx->sample_fmt, rawPkt.data, rawPkt.size, 1)) < 0)
////        if((ret = avcodec_fill_audio_frame(ost->frame,                  2, AV_SAMPLE_FMT_S16, dst_data[0], dst_bufsize, 1)) < 0)
//		{
//			logF("[error]avcodec_fill_audio_frame = %s\n", av_err2str(ret));
//			exit(1);
//		}

		ost->frame->pts = rawPkt.pts * ost->st->time_base.den / gDL_audioTimeBase.den;//encCtx->time_base.den;
//        logF("pts %ld : %ld is equal? (%s)\n", ost->frame->pts, av_rescale_q(rawPkt.pts, ost->st->time_base, encCtx->time_base)
//        		, (ost->frame->pts == av_rescale_q(rawPkt.pts, ost->st->time_base, encCtx->time_base))?"YES":"NO");

		//av_rescale_q(rawPkt.pts, g_AudioST->time_base, encCtx->time_base);
		//rawPkt.pts * encCtx->time_base.den / g_AudioST->time_base.den;
		//logI("audio pts %ld->%ld den %d den %d\n", rawPkt.pts, ost->frame->pts, ost->st->time_base.den , encCtx->time_base.den);

		av_init_packet(&oPkt);
		oPkt.data = NULL;
		oPkt.size = 0;
		//avcodec_encode_audio(AVCodecContext *avctx, uint8_t *buf, int buf_size, const short *samples);
//		memset(rawPkt.data, 0, dst_bufsize);

		int rsize = 0;
//		if(rand()%2 | true)
//		{
//			rsize = 941*4;
//			encCtx->frame_size = 941;
//		}
//		else
		{
			rsize = 940*4;
			encCtx->frame_size = 940;
		}
		rsize = fread(rsBuf, 1, rsize, tf);

		ost->frame->data[0] = rsBuf;
		ost->frame->nb_samples = encCtx->frame_size;
		ost->frame->linesize[0] = rsize;
//		(gdb) p src_linesize
//		$4 = 4096
//		(gdb) p dst_linesize
//		$5 = 3840

		if(rsize <= 0 )
		{
			break;
		}
		encodeBuf = (uint8_t*)malloc(FF_MIN_BUFFER_SIZE*4);
//		if((ret = avcodec_encode_audio(encCtx, encodeBuf, encCtx->frame_size*4, (short *)rsBuf)) < 0)
		if((ret = avcodec_encode_audio2(encCtx, &oPkt, ost->frame, &got_packet)) < 0)
		{
			logF("[error]avcodec_encode_audio2 = %s\n", av_err2str(ret));
			exit(1);
		}
		free(rsBuf);
//		if(ret > 0)
//		{
//			oPkt.size = ret;
//			oPkt.data = encodeBuf;/////////
//			oPkt.stream_index = ost->st->index;
//			oPkt.flags |= AV_PKT_FLAG_KEY;
//			oPkt.pts = ost->frame->pts;
//			oPkt.dts = ost->frame->pts;
//			logI("audio Frame data  %p key %d size %d pts %ld dts %ld base.num %d\n",
//					oPkt.data, oPkt.flags, oPkt.size, oPkt.pts, oPkt.dts, ost->st->time_base.den);
//			putAVPacketInFFDeckLinkQueue(&ost->encodedPacketQueue, oPkt);
//		}
//		else
//		{
//			logI("no put audio pkt encoded\n");
//			free(encodeBuf);
//		}

		//iPkt는 인코딩할 때 사용했으므로 초기화. 단, data는 초기화 하지 않는다. 왜냐하면 메모리를 재사용하기 때문이다.
		gDL_audioPool.release(rawPkt.data);
		//logD("(%d,%d)release try %p -> %s rc %d\n", enc_num, enc_cnt, rawPkt.data, rc==0?"YES":"NO", rc);
		//logI("aRC = %d", RC);
		//gDL_audioPool.status();

		usleep(0);

    	if(got_packet) // got_packet
		{


			//oPkt.stream_index = rawPkt.stream_index;
    		oPkt.stream_index = ost->st->index;
    		if(oPkt.pts == AV_NOPTS_VALUE)// && ost->st->index == rawPkt.stream_index)
				oPkt.pts = oPkt.dts = 0;

//			logD("put audio pkt encoded %s pts %ld dts %ld\n", "audio", oPkt.pts, oPkt.dts);
			putAVPacketInFFDeckLinkQueue(&ost->encodedPacketQueue, oPkt);
//			logI("interleaved %s pts %ld dts %ld\n", "audio", oPkt.pts, oPkt.dts);
//			av_interleaved_write_frame(gOutputFiles[0].ctx, &oPkt);
			//av_free_packet(&oPkt);
		}
    	else
    	{
    		logD("no put audio pkt encoded\n");
    	}
	}

	fclose(tf);
	//put last null packet
	av_init_packet(&oPkt);
	oPkt.data = NULL;
	oPkt.size = 0;
	oPkt.stream_index = ost->st->index;
	putAVPacketInFFDeckLinkQueue(&ost->encodedPacketQueue, oPkt);
	return NULL;
}

/* static void* encodeVideoFromStream(void *stream)
 * Parameters:
 *    stream; Video를 인코딩할 AVStream이 들어있는 OutputStream pointer
 * Returns:
 *    void*; 항상 NULL
 *
 * Description:
 *   Video 인코딩 순서
 *      1. rawPacketQueue 에서 raw AVPacket을 가져온다.
 *      2. raw AVPacket을 AVPicture에 채운다.
 *      3. AVPicture의 pixel format(uyvy422 -> yuv420p)을 변경하여 AVFrame에 얻는다.
 *      4. AVFrame을 인코딩하여 인코딩된 AVPacket을 얻는다.
 *      5. 인코딩된 AVPacket을 encodedPacketQueue에 넣는다.
 *
 *   마지막 인코딩 후에는 NULL Data를 포함한 AVPacket을 encodedPacketQueue에 넣는다.
 */
static void* encodeVideoFromStream(void *stream)
{
	OutputStream *ost = (OutputStream *)stream;
	AVCodecContext *encCtx = NULL;
    AVPacket rawPkt = {0};
	AVPacket oPkt = {0};
    int got_packet = 0;
    int ret = 0;
	int block = 1;
	int64_t videoPts = 0;
	int64_t jpegPts = 0;

	encCtx = ost->st->codec;

	if (!ost->imgSwsCtx)
	{
		logF("Cannot initialize the conversion context\n");
		exit(1);
	}

	if(!ost->frame)
	{
		logF("Not yet alloc frame\n");
		exit(1);
	}

    while (0 == (ret = getAVPacketInFFDeckLinkQueue(&ost->rawPacketQueue, &rawPkt, block))) //ret is 0 or 1.
    {
    	if(rawPkt.data == NULL)
		//last Packet을 얻었을경우
    		break;

		if(encCtx->codec_id == AV_CODEC_ID_MJPEG)
		//for jpeg
		{
//			logD("video pts  %ld jpeg pts %ld\n",
//					av_rescale_q(rawPkt.pts, (AVRational){1,gDL_videoFrameRate.num}, AV_TIME_BASE_Q),
//					av_rescale_q(encCtx->frame_number, encCtx->time_base, AV_TIME_BASE_Q));
			videoPts = av_rescale_q(rawPkt.pts, (AVRational){1,gDL_videoFrameRate.num}, AV_TIME_BASE_Q);
			jpegPts = av_rescale_q(encCtx->frame_number, encCtx->time_base, AV_TIME_BASE_Q);

			if(videoPts < jpegPts)
			{
				gDL_videoPool.release(rawPkt.data);
				continue;
			}

		}
    	//logI("get (%d) memory nb %d/%d %p\n", st_i, q->nbUse, q->nbTotal, q);

    	av_init_packet(&oPkt);
    	oPkt.data = NULL;
    	oPkt.size = 0;

		avpicture_fill((AVPicture*)ost->pictureOfDeckLink, rawPkt.data, STREAM_PIX_FMT_IN,
				gDL_videoResolution.num, gDL_videoResolution.den);
		sws_scale(ost->imgSwsCtx, (const uint8_t * const *)ost->pictureOfDeckLink->data, ost->pictureOfDeckLink->linesize,
				0, gDL_videoResolution.den, ost->frame->data, ost->frame->linesize);
		gDL_videoPool.release(rawPkt.data);
		//RC = deck.....();
		//logI("vRC = %d", RC);
		//gDL_videoPool.status();
		ost->frame->pts = rawPkt.pts / 30 ;//* g_VidoeST->time_base.den / encCtx->time_base.den;
		//av_rescale_q(rawPkt.pts, g_VidoeST->time_base, encCtx->time_base);
		//logI("video pts %ld->%ld\n", rawPkt.pts, ost->frame->pts);
		if((ret = avcodec_encode_video2(encCtx, &oPkt, ost->frame, &got_packet)) < 0)
		{
			logF("failure avcodec_encode_video2 = %s\n", av_err2str(ret));
			exit(1);
		}
		//logI("Video Frame key %d size %d pts %ld dts %ld base.num %d\n",
		//		oPkt.flags, oPkt.size, oPkt.pts, oPkt.dts, ost->st->time_base.num);
    	usleep(0);

    	if(got_packet) // got_packet
		{
//			oPkt.stream_index = rawPkt.stream_index;
    		oPkt.stream_index = ost->st->index;
			if(oPkt.pts == AV_NOPTS_VALUE)// && ost->st->index == rawPkt.stream_index)
				oPkt.pts = oPkt.dts = 0;

			putAVPacketInFFDeckLinkQueue(&ost->encodedPacketQueue, oPkt);
			//logI("interleaved %s pts %ld\n", oPkt.stream_index==g_VidoeST->index?"video":"audio", oPkt.pts);
			//av_interleaved_write_frame(s, &oPkt);
			//av_free_packet(&oPkt);
		}
    	else{
    		logD("52q NO\n");
    	}

    	//pkt은 인코딩할 때 사용했으므로 초기화. 단, data는 초기화 하지 않는다. 왜냐하면 메모리를 재사용하기 때문이다.
    	//av_init_packet(&rawPkt);
    }

    //put last null packet
    av_init_packet(&oPkt);
    oPkt.data = NULL;
    oPkt.size = 0;
    oPkt.stream_index = ost->st->index;
    putAVPacketInFFDeckLinkQueue(&ost->encodedPacketQueue, oPkt);
    return NULL;
}

/* static void* interleaveToFile(void *outfile)
 * Parameters:
 *    outfile; 출력파일의 정보를 갖고있는 OutputFile pointer
 * Returns:
 *    void*; 항상 NULL
 *
 * 1. 각각 Audio와 Video의 encodedPacketQueue에서 aPkt,vPkt을 얻는다.
 * 2. pts를 비교하여 작은 AVPacket 부터 av_interleaved_write_frame() 한다.
 * 3. encodedPacketQueue에서 모든 AVPacket을 얻으면 encoding 자체 queue에 있는
 *    AVPacket을  av_interleaved_write_frame() 한다.
 */
static void* interleaveToFile(void *outfile)
{
	OutputFile *of = (OutputFile *)outfile;
	AVPacket vPkt = {0};
    AVPacket aPkt = {0};
    AVPacket *minPkt = NULL;
    AVPacket lastPkt = {0};
    int64_t vPts = AV_NOPTS_VALUE;
    int64_t aPts = AV_NOPTS_VALUE;
    bool isEndVideo = false;
	bool isEndAudio = false;
    int got_lpacket = 0;
	int block = 1;
	int ret = 0;

	if(of->output_streams[AUDIO_STREAM_INDEX].st == NULL)
	{
		aPts = INT_MAX;
		isEndAudio = true;
	}

	if(of->output_streams[VIDEO_STREAM_INDEX].st == NULL)
	{
		vPts = INT_MAX;
		isEndVideo = true;
	}

	//encodedPacketQueue에서 video와 audio를 모두 가져올때 까지
	while(true)
	{
		if( !isEndAudio && (minPkt == &aPkt || minPkt == NULL))
		{
			av_init_packet(&aPkt);
			aPkt.data = NULL;
			aPkt.size = 0;
			if (0 == (ret = getAVPacketInFFDeckLinkQueue(&of->output_streams[AUDIO_STREAM_INDEX].encodedPacketQueue, &aPkt, block))) //ret is 0 or 1 or 2.
			{
				if(aPkt.data == NULL)
				{
					aPts = INT_MAX;
					isEndAudio = true;
					continue;
				}
				aPts = av_rescale_q(aPkt.pts, of->output_streams[AUDIO_STREAM_INDEX].st->time_base, AV_TIME_BASE_Q);
			}
			else
			{
				aPts = INT_MAX;
				isEndAudio = true;
			}
		}
		if( !isEndVideo && (minPkt == &vPkt || minPkt == NULL))
		{
			av_init_packet(&vPkt);
			vPkt.data = NULL;
			vPkt.size = 0;
			if (0 == (ret = getAVPacketInFFDeckLinkQueue(&of->output_streams[VIDEO_STREAM_INDEX].encodedPacketQueue, &vPkt, block))) //ret is 0 or 1.
			{
				if(vPkt.data == NULL)
				{
					vPts = INT_MAX;
					isEndVideo = true;
					continue;
				}
				vPts = av_rescale_q(vPkt.pts, of->output_streams[VIDEO_STREAM_INDEX].st->time_base, AV_TIME_BASE_Q);
			}
			else
			{
				vPts = INT_MAX;
				isEndVideo = true;
			}
		}

		if(isEndVideo && isEndAudio)
			break;

		logI("compare pts %ld%s/%ld%s\n", aPts, aPts==INT_MAX?"(audio END)":"", vPts, vPts==INT_MAX?"(video END)":"");
		if(vPts < aPts)
			minPkt = &vPkt;
		else
			minPkt = &aPkt;

		av_interleaved_write_frame(of->ctx, minPkt);
//		if(minPkt == &aPkt)
//			free(aPkt.data);
		av_free_packet(minPkt);
	}

	//encode queue에 남은것 모두 처리하기
	if(of->output_streams[VIDEO_STREAM_INDEX].st)
		isEndVideo = false;
	else
		isEndVideo = true;

	if(of->output_streams[AUDIO_STREAM_INDEX].st)
		isEndAudio = false;
	else
		isEndAudio = true;

	while(!isEndVideo || !isEndAudio)
	{
		logI("POP in last encode queue.\n");
		av_init_packet(&lastPkt);
		lastPkt.data = NULL;
		lastPkt.size = 0;
		got_lpacket = 0;

		if(!isEndVideo)
		{
			if((ret = avcodec_encode_video2(of->output_streams[VIDEO_STREAM_INDEX].st->codec, &lastPkt, NULL, &got_lpacket)) < 0)
			{
				logI("Video Frame key %d size %d pts %ld dts %ld got_lpacket %d\n",
						lastPkt.flags, lastPkt.size, lastPkt.pts, lastPkt.dts, got_lpacket);
				lastPkt.stream_index = 1;
				if(got_lpacket)
				{
					logD("last interleave encode video ret %d\n", ret);
					av_interleaved_write_frame(of->ctx, &lastPkt);
				}
				else
				{
					logD("Not got %d end avcodec_encode_video2\n", got_lpacket);
					isEndVideo = true;
				}
			}
			else
			{
				logD("fail avcodec_encode_video2 = %s\n", av_err2str(ret));
				isEndVideo = true;
			}
		}
		if(!isEndAudio)
		{
			if((ret = avcodec_encode_audio2(of->output_streams[AUDIO_STREAM_INDEX].st->codec, &lastPkt, NULL, &got_lpacket)) < 0)
			{
				logI("audio Frame key %d size %d pts %ld dts %ld got_lpacket %d\n",
						lastPkt.flags, lastPkt.size, lastPkt.pts, lastPkt.dts, got_lpacket);
				lastPkt.stream_index = 0;
				if(got_lpacket)
				{
					logD("last interleave encode audio ret %d\n", ret);
					av_interleaved_write_frame(of->ctx, &lastPkt);
				}
				else
				{
					logD("Not got %d end avcodec_encode_audio2\n", got_lpacket);
					isEndAudio = true;
				}
			}
			else
			{
				logD("fail avcodec_encode_audio2 = %s\n", av_err2str(ret));
				isEndAudio = true;
			}
		}
	}
	logI("end of %p\n", of);
	return NULL;
}

/* static void* sendtime(void *)
 * thread function. Wowza Server 에게 encoding 시작시간을 알려준다.
 * unixtime으로 알려준다.
 */
static void* sendtime(void *)
{
	while(true)
	{
		usleep(500000);//500ms
		if(gCurl[0] == 'c' && gCurl[1] == 'u' && gCurl[2] == 'r' && gCurl[3] == 'l')
		{
			logI("%s\n",gCurl);
			system(gCurl);
			break;
		}
	}
	return NULL;
}

/* void print_output_modes (IDeckLink* deckLink)
 * Parameters:
 *    deckLink; output mode를 알고 싶은 IDeckLink pointer
 * Returns:
 *    void;
 *
 * deckLink의 mode를 출력한다.
 *
 */
static void print_output_modes (IDeckLink* deckLink)
{
    IDeckLinkOutput*                    deckLinkOutput = NULL;
    IDeckLinkDisplayModeIterator*       displayModeIterator = NULL;
    IDeckLinkDisplayMode*               displayMode = NULL;
    HRESULT                             result;
    int                                 displayModeCount = 0;

    // Query the DeckLink for its configuration interface
    result = deckLink->QueryInterface(IID_IDeckLinkOutput, (void**)&deckLinkOutput);
    if (result != S_OK)
    {
        logF("Could not obtain the IDeckLinkOutput interface - result = %08x\n", result);
        goto bail;
    }

    // Obtain an IDeckLinkDisplayModeIterator to enumerate the display modes supported on output
    result = deckLinkOutput->GetDisplayModeIterator(&displayModeIterator);
    if (result != S_OK)
    {
        logF("Could not obtain the video output display mode iterator - result = %08x\n", result);
        goto bail;
    }

    // List all supported output display modes
    fprintf(stdout, "Supported video output display modes and pixel formats:\n");
    while (displayModeIterator->Next(&displayMode) == S_OK)
    {
        char        *displayModeString = NULL;

        result = displayMode->GetName((const char **) &displayModeString);
        if (result == S_OK)
        {
            BMDTimeValue            frameRateDuration;
            BMDTimeScale            frameRateScale;
            // Obtain the display mode's properties
            displayMode->GetFrameRate(&frameRateDuration, &frameRateScale);
            fprintf(stdout, "        %2d:   %-20s \t %ld x %ld \t %7g FPS\n",
            		displayModeCount++, displayModeString, displayMode->GetWidth(), displayMode->GetHeight(),
            		(double)frameRateScale / (double)frameRateDuration);

            free(displayModeString);
        }
        // Release the IDeckLinkDisplayMode object to prevent a leak
        displayMode->Release();
    }

bail:
    // Ensure that the interfaces we obtained are released to prevent a memory leak
    if (displayModeIterator != NULL)
    {
        displayModeIterator->Release();
    }
    if (deckLinkOutput != NULL)
    {
        deckLinkOutput->Release();
    }
}

/* int usage(int status)
 * Parameters:
 *    status; exit 번호
 * Returns:
 *    status;
 * 이 프로그램을 사용하는 방법 출력
 */
static int usage(int status)
{
    HRESULT            result = 0;
    IDeckLinkIterator *deckLinkIterator = NULL;
    IDeckLink*         deckLink = NULL;
    int                numDevices = 0;

    fprintf(stderr,
        "Usage: bmdcapture -m <mode id> [OPTIONS]\n"
        "\n"
        "    -m <mode id>:\n"
    );

    // Create an IDeckLinkIterator object to enumerate all DeckLink cards in the system
    deckLinkIterator = CreateDeckLinkIteratorInstance();
    if (deckLinkIterator == NULL)
    {
        logF("A DeckLink iterator could not be created.  The DeckLink drivers may not be installed.\n");
        return 1;
    }

    // Enumerate all cards in this system
    while (deckLinkIterator->Next(&deckLink) == S_OK)
    {
        char *		deviceNameString = NULL;

        // Increment the total number of DeckLink cards found
        numDevices++;
        if (numDevices > 1)
        {
            printf("\n\n");
        }

        // *** Print the model name of the DeckLink card
        result = deckLink->GetModelName((const char **) &deviceNameString);
        if (result == S_OK)
        {
            printf("=============== %s (-C %d )===============\n\n", deviceNameString, numDevices-1);
            free(deviceNameString);
        }

        print_output_modes(deckLink);
        // Release the IDeckLink instance when we've finished with it to prevent leaks
        deckLink->Release();
    }
    deckLinkIterator->Release();

    // If no DeckLink cards were found in the system, inform the user
    if (numDevices == 0)
    {
        printf("No Blackmagic Design devices were found.\n");
    }
    printf("\n");

    fprintf(stderr,
        "    -v                   Be verbose (report each 25 frames)\n"
		"    -c <channels>        Audio Channels (2, 8 or 16 - default is 2)\n"
		"    -s <depth>           Audio Sample Depth (16 or 32 - default is 16)\n"
		"    -n --frames <frames>    Number of frames to capture (default is unlimited)\n"
		"    -M --memory <memlimit>  Maximum queue size in GB (default is 1 GB)\n"
		"    -C <num>             number of card to be used\n"
		"    -A <audio-in>        Audio input:\n"
		"                         1: Analog (RCA)\n"
		"                         2: Embedded audio (HDMI/SDI)\n"
		"    -V <video-in>        Video input:\n"
		"                         1: Composite\n"
		"                         2: Component\n"
		"                         3: HDMI\n"
		"                         4: SDI\n"
		"    -m <videoModeIndex>  Video Mode Index\n"
		"    -R --raw     <rawIOfile>       Read video data from raw file(a.raw, v.raw). Not use Capture board.\n"
		"    -S --size    <video_size>      output file size WidthxHeight.\n"
		"    -o --options <video_option>    output file option qulity:bitrate:bufsize:profile_leve\n"
		"    -O --out     <output_filename> surport file format:flv,jpeg,rtmp://\n"
		"    -u --url     <url>   UTC time sending URL\n"
		"    -? or -h or --help             help\n"
        "Capture video and audio to a file. Raw video and audio can be sent to a pipe to avconv or vlc e.g.:\n"
        "\n"
        "ffdeck    -v -m 9 -A 2 -V 4 -n 300 -M 1000 \
         -S 1280x720 -o ffdeck720p.flv -S 1280x720 -O qmin=0:qmax=69:b=1500K:bt=1500K:maxrate=1500K:minrate=1500K:bufsize=1835K:level=31"
    );

    exit(status);

    return status;
}

/* static void parsingArg(int argc, char *argv[])
 * Parameters:
 *    argc; argument count
 *    argv; argument values array
 * Returns:
 *    void;
 *
 * Parse command line options
 * DeckLink setting 및 설정값 가져오기를한다.
 * 설정값은 video의 width, height, frame rate와 audio의 sample rate 이다.
 * 그외 몇가지 설정을 더한다.
 */
static void parsingArg(int argc, char *argv[])
{
	int   displayModeCount   =   0,
	      aconnection        =   0,
	      vconnection        =   0,
	      camera             =   0,
	      width              =   0,
	      height             =   0,
	      i				     =   0,
	      ch 			     =   0;
	HRESULT   result   =   0;
	pthread_t   th_sendtime   =   0;
	BMDTimeValue   timeValue   =   0,
				   timeScale   =   0;
	BMDDisplayMode   selectedDisplayMode   =   bmdModeHD720p50;
	DeckLinkCaptureDelegate   *delegate   =   NULL;
	struct option long_options[]   =
	{
			/* These options set a flag. */
			{"memory",  required_argument, 0, 'M'},
			{"raw",     required_argument, 0, 'R'},
			{"size",    required_argument, 0, 'S'},
			{"options", required_argument, 0, 'O'},
			{"out",     required_argument, 0, 'o'},
			{"frames",  required_argument, 0, 'n'},
			{"url",     required_argument, 0, 'u'},
			{"vn",      no_argument,       0, 0}, //disable video
			{"help",    no_argument,       0, 'h'},
			{0,         0,                 0, 0}
	 };
	int option_index = 0;/* getopt_long stores the option index here. */

	//gOutputFiles 초기화
	for(i = 0; i < MAX_NBOUTPUTFILES; i++)
	{
		memset(&gOutputFiles[i], 0, sizeof(OutputFile));
	}

    while ((ch = getopt_long (argc, argv, "vc:s:n:M:C:A:V:N:m:S:O:o:R:?h", long_options, &option_index)) != -1)
	{
		switch (ch)
		{
		case 0:
			/* If this option set a flag, do nothing else now. */
			logD("option %s\n", long_options[option_index].name);
			if (optarg)
				printf(" with arg %s", optarg);

			if(av_strstart(long_options[option_index].name, "vn", NULL))
			{
				logD ("option --vn. set disable video.\n");
				gOutputFiles[gNbOutputFiles].video_disable = 1;
				break;
			}

			logF("Not supported --%s option\n", long_options[option_index].name);
			abort();
			break;
		case 'v':
			logD ("option -v\n");
			gVerbose = true;
			break;
		case 'c':
			gAudioChannels = atoi(optarg);
			if (gAudioChannels != 2 &&
				gAudioChannels != 8 &&
				gAudioChannels != 16)
			{
				logF("Invalid argument: Audio Channels must be either 2, 8 or 16\n");
				abort();
			}
			break;
		case 's':
			gAudioSampleDepth = atoi(optarg);
			if (gAudioSampleDepth != 16 && gAudioSampleDepth != 32)
			{
				logF("Invalid argument: Audio Sample Depth must be either 16 bits or 32 bits\n");
				abort();
			}
			break;
		case 'n'://frames
			logD("option -n with value `%s' gMaxFrames\n", optarg);
			gMaxFrames = atoi(optarg);
			break;
		case 'M'://memory
			logD("option -M with value `%s' gNbMemoryLimit\n", optarg);
			if (strstr(optarg, ":"))
			{
				char *pch = NULL;
				if((pch = strtok (optarg,":")) != NULL)
					gMaxMem.nbAudioPool = atoi(pch);

				if((pch = strtok (NULL,":")) != NULL)
					gMaxMem.nbVideoPool = atoi(pch);

				if((pch = strtok (NULL,":")) != NULL)
					gMaxMem.nbAudioQueue = atoi(pch);

				if((pch = strtok (NULL,":")) != NULL)
					gMaxMem.nbVideoQueue = atoi(pch);
			}
			else
			{
				gMaxMem.sizeLimit = atoi(optarg);
			}
			break;
		case 'C':
			logD("option -C with value `%s' camera\n", optarg);
			 camera = atoi(optarg);
			 break;
		case 'A':
			logD("option -A with value `%s' aconnection\n", optarg);
			aconnection = atoi(optarg);
			 break;
		case 'V':
			logD("option -V with value `%s' vconnection\n", optarg);
			vconnection = atoi(optarg);
			break;
		case 'm':
			logD("option -m with value `%s' gVideoModeIndex\n", optarg);
		 	gVideoModeIndex = atoi(optarg);
			break;
		case 'R':
			logD("option -R with value `%s' gRawIOfile\n", optarg);
			gRawIOfile = atoi(optarg);
			break;
		case 'S'://size
			logD("option -S with value `%s'\n", optarg);
			if (optarg && av_parse_video_size(&width, &height, optarg) < 0)
			{
				av_log(NULL, AV_LOG_FATAL, "Invalid frame size: %s.\n", optarg);
				abort();
			}
			gOutputFiles[gNbOutputFiles].videoResolution = (AVRational){width, height};
			break;
		case 'O'://options
			logD("option -O with value `%s'\n", optarg);
			gOutputFiles[gNbOutputFiles].options = optarg;
			break;
		case 'o':
			logD("option -o with value `%s'\n", optarg);
			//outputFiles avformat context alloc
			if(av_strstart(optarg, "rtmp://", NULL))
			{
				avformat_alloc_output_context2(&gOutputFiles[gNbOutputFiles].ctx, NULL, "flv", optarg);
				gMaxFrames = -1;
				//rtmp를 수신하는 서버에게 encoding 시작 시간을 알려주는 thread 만들기
				if (!th_sendtime && pthread_create(&th_sendtime, NULL, sendtime, NULL))
				{
					logF("Could not pthread_create.\n");
					abort();
				}
			}
			else if (strstr(optarg, ".flv"))
			{
				avformat_alloc_output_context2(&gOutputFiles[gNbOutputFiles].ctx, NULL, "flv", optarg);
			}
			else if (strstr(optarg, ".mp4"))
			{
				avformat_alloc_output_context2(&gOutputFiles[gNbOutputFiles].ctx, NULL, "mp4", optarg);
			}
			else if (strstr(optarg, ".jpeg") || strstr(optarg, ".jpg") || strstr(optarg, ".JPG") || strstr(optarg, ".JPEG"))
			{
				avformat_alloc_output_context2(&gOutputFiles[gNbOutputFiles].ctx, NULL, "image2", optarg);
			}
			else
			{
				logF("Not support file:%s\n", optarg);
				abort();
			}

			if (!gOutputFiles[gNbOutputFiles].ctx)
			{
				logF("Error avformat_alloc_output_context2\n");
				abort();
			}
			gNbOutputFiles++;
			if(gNbOutputFiles > MAX_NBOUTPUTFILES)
			{
				logF("Cann't excess output files. Max number output files is %d.\n", MAX_NBOUTPUTFILES);
				abort();
			}
			break;
		case 'u'://options
			logD("option -u with value `%s'\n", optarg);
			gCurlPath = optarg;
			break;
		case '?':
		case 'h':
			usage(0);
			break;
		 default:
			logD("option -o with value `%s'\n", optarg);
			if (long_options[option_index].flag != 0)
			{
			   logF("flag %d\n", *long_options[option_index].flag);
			}

			logF("Not supported --%s option\n", optarg);
			abort();
		   break;
		 }
	}

    /* Print any remaining command line arguments (not options). */
	if (optind < argc)
	{
		printf ("non-option ARGV-elements: ");
		while (optind < argc)
			printf ("%s ", argv[optind++]);
		putchar ('\n');
		abort();
	}

	gDL_iterator = CreateDeckLinkIteratorInstance();
	if (!gDL_iterator)
	{
		logF("This application requires the DeckLink drivers installed.\n");
		abort();
	}

	/* Connect to the first DeckLink instance */
	i = 0;
	do {
		result = gDL_iterator->Next(&gDecklink);
	} while (i++ < camera);

	if (result != S_OK)
	{
		logF("No DeckLink PCI cards found.\n");
		abort();
	}

	if (gDecklink->QueryInterface(IID_IDeckLinkInput, (void**)&gDL_input) != S_OK)
		abort();

	result = gDecklink->QueryInterface(IID_IDeckLinkConfiguration, (void**)&gDL_configuration);
	if (result != S_OK)
	{
		logF("Could not obtain the IDeckLinkConfiguration interface - result = %08x\n", result);
		abort();
	}

	result = S_OK;
	switch (aconnection)
	{
		case 1:
			result = DECKLINK_SET_AUDIO_CONNECTION(bmdAudioConnectionAnalog);
			break;
		case 2:
			result = DECKLINK_SET_AUDIO_CONNECTION(bmdAudioConnectionEmbedded);
			break;
		default:
			// do not change it
			break;
	}
	if (result != S_OK)
	{
		logF("Failed to set audio input - result = %08x\n", result);
		abort();
	}

	result = S_OK;
	switch (vconnection)
	{
		case 1:
			result = DECKLINK_SET_VIDEO_CONNECTION(bmdVideoConnectionComposite);
			break;
		case 2:
			result = DECKLINK_SET_VIDEO_CONNECTION(bmdVideoConnectionComponent);
			break;
		case 3:
			result = DECKLINK_SET_VIDEO_CONNECTION(bmdVideoConnectionHDMI);
			break;
		case 4:
			result = DECKLINK_SET_VIDEO_CONNECTION(bmdVideoConnectionSDI);
			break;
		default:
			// do not change it
			break;
	}
	if (result != S_OK)
	{
		logF("Failed to set video input - result %08x\n", result);
		abort();
	 }

	delegate = new DeckLinkCaptureDelegate();
	gDL_input->SetCallback(delegate);

	// Obtain an IDeckLinkDisplayModeIterator to enumerate the display modes supported on output
	result = gDL_input->GetDisplayModeIterator(&gDL_displayModeIterator);
	if (result != S_OK)
	{
		logF("Could not obtain the video output display mode iterator - result = %08x\n", result);
		abort();
	}

	if (gVideoModeIndex < 0)
	{
		logE("No video mode specified\n");
		usage(0);
	}

	selectedDisplayMode = -1;
	while (gDL_displayModeIterator->Next(&gDL_displayMode) == S_OK)
	{
		if (gVideoModeIndex == displayModeCount)
		{
			selectedDisplayMode = gDL_displayMode->GetDisplayMode();
			break;
		}
		displayModeCount++;
		gDL_displayMode->Release();
	}

	if (selectedDisplayMode <= 0)
	{
		logF("Invalid mode %d specified\n", gVideoModeIndex);
		abort();
	}

	result = gDL_input->EnableVideoInput(selectedDisplayMode, bmdFormat8BitYUV, 0);
	if (result != S_OK)
	{
		logF("Failed to enable video input. Is another application using the card?\n");
		abort();
	}

	result = gDL_input->EnableAudioInput(bmdAudioSampleRate48kHz, gAudioSampleDepth, gAudioChannels);
	if (result == S_OK)
	{
		gDL_audioTimeBase = (AVRational){1, 48000};
	}
	else
	{
		abort();
	}

	gDL_videoResolution = (AVRational){gDL_displayMode->GetWidth(), gDL_displayMode->GetHeight()};
	gDL_displayMode->GetFrameRate(&timeValue, &timeScale);//    1001  /  30000
	gDL_videoFrameRate = (AVRational){(int) timeScale, (int) timeValue};
}

/* static void myexit(void)
 * debugging 을 위해 exit()하지않고 abort()하도록 수정하는 함수
 */
static void myexit(void)
{
	abort();
	return exit(255);
}

int main(int argc, char *argv[])
{
    int	  exitStatus    =   1,
		  i             =   0,
		  nbOutputStreamAudio = 0,
	      nbOutputStreamVideo = 0;
    char   oFilename[256]   =   {0};
    AVCodec   *video_codec   =   NULL,
    		  *audio_codec   =   NULL;
    AVPacket   lastPkt   =   {0};
    HRESULT   result   =   0;
    AVOutputFormat   *oformat   =   NULL;

#ifdef THREAD_MEMCOPY
    pthread_t   th_mem[4]   =   {0};

	pthread_mutex_init(&gMemMutex[0], NULL);
	pthread_cond_init(&gMemCond[0], NULL);

	pthread_mutex_init(&gMemMutex[1], NULL);
	pthread_cond_init(&gMemCond[1], NULL);

	pthread_mutex_init(&gMemMutex[2], NULL);
	pthread_cond_init(&gMemCond[2], NULL);

	pthread_mutex_init(&gMemMutex[3], NULL);
	pthread_cond_init(&gMemCond[3], NULL);

	if (pthread_create(&th_mem[0], NULL, DL_memcpy, NULL))
	{
		logF("Could not pthread_create.th_mem[0] \n");
		goto fail;
	}
	if (pthread_create(&th_mem[1], NULL, DL_memcpy, NULL))
	{
		logF("Could not pthread_create.th_mem[1] \n");
		goto fail;
	}
	if (pthread_create(&th_mem[2], NULL, DL_memcpy, NULL))
	{
		logF("Could not pthread_create.th_mem[2] \n");
		goto fail;
	}
	if (pthread_create(&th_mem[3], NULL, DL_memcpy, NULL))
	{
		logF("Could not pthread_create.th_mem[3] \n");
		goto fail;
	}
#endif

    atexit(myexit);

    printf("ffdeck start\n");

//#define LOG_FILE
#ifdef LOG_FILE
    freopen ("ffdeck.log","w",stderr);
#endif

	avcodec_register_all();
    av_register_all();
    avformat_network_init();

    av_log_set_level(AV_LOG_VERBOSE);//AV_LOG_QUIET//AV_LOG_FATAL//AV_LOG_ERROR//AV_LOG_WARNING//AV_LOG_INFO//AV_LOG_VERBOSE//AV_LOG_DEBUG

    parsingArg(argc, argv);

    if(gNbOutputFiles == 0)
    {
    	logF("At least one output file must be specified.(-o <filename> or --out <filename>)\n");
    	goto fail;
    }

    //timebase를 설정해야할 수도 있다. 추후 수정
//	gOutputFiles[0].videoTimeBase = (AVRational){1,1000};
//	gOutputFiles[1].videoTimeBase = (AVRational){1,1000};
//	gOutputFiles[2].videoTimeBase = (AVRational){1,1000};

	for (i = 0; i < gNbOutputFiles; ++i)
	{
		audio_codec = NULL;
		video_codec = NULL;
		oformat = gOutputFiles[i].ctx->oformat;

		gOutputFiles[i].output_streams[AUDIO_STREAM_INDEX].file_index = i;
		gOutputFiles[i].output_streams[VIDEO_STREAM_INDEX].file_index = i;

		//add audio stream
		if (oformat->audio_codec != AV_CODEC_ID_NONE)
		{
			gOutputFiles[i].output_streams[AUDIO_STREAM_INDEX].source_index = AUDIO_STREAM_INDEX;
			gOutputFiles[i].output_streams[AUDIO_STREAM_INDEX].st = addStreamOfAudio(gOutputFiles[i].ctx, &audio_codec, AV_CODEC_ID_AAC);
			if (gOutputFiles[i].output_streams[AUDIO_STREAM_INDEX].st)
			{
				nbOutputStreamAudio++;
				//open audio codec
				openCodecOfAudio(&gOutputFiles[i].output_streams[AUDIO_STREAM_INDEX], audio_codec);
				//raw packet Queue, encoded packet Queue 초기화
				initFFDeckLinkQueue(&gOutputFiles[i].output_streams[AUDIO_STREAM_INDEX].rawPacketQueue);
				initFFDeckLinkQueue(&gOutputFiles[i].output_streams[AUDIO_STREAM_INDEX].encodedPacketQueue);
				//create audio encode thread
				if (pthread_create(&gOutputFiles[i].output_streams[AUDIO_STREAM_INDEX].pthEnc,
						NULL, encodeAudioFromStream,
						&gOutputFiles[i].output_streams[AUDIO_STREAM_INDEX] ))
				{
					logF("Could not pthread_create. [%d]encodeAudioFromStream \n", i);
					goto fail;
				}
			}
		}
		else
		{
			gOutputFiles[i].output_streams[VIDEO_STREAM_INDEX].source_index = -1;
		}

		//add video stream
		if ((oformat->video_codec != AV_CODEC_ID_NONE) && (gOutputFiles[i].video_disable == 0))
		{
			gOutputFiles[i].output_streams[VIDEO_STREAM_INDEX].source_index = VIDEO_STREAM_INDEX;
			if (oformat->video_codec == AV_CODEC_ID_MJPEG)
				gOutputFiles[i].output_streams[VIDEO_STREAM_INDEX].st = addStreamOfJPEG(gOutputFiles + i, &video_codec, oformat->video_codec);
			else
				gOutputFiles[i].output_streams[VIDEO_STREAM_INDEX].st = addStreamOfVideo(gOutputFiles + i, &video_codec, AV_CODEC_ID_H264);
			if (gOutputFiles[i].output_streams[VIDEO_STREAM_INDEX].st)
			{
				nbOutputStreamVideo++;
				//open video codec
				openCodecOfVideo(&gOutputFiles[i].output_streams[VIDEO_STREAM_INDEX], video_codec);
				//raw packet Queue, encoded packet Queue 초기화
				initFFDeckLinkQueue(&gOutputFiles[i].output_streams[VIDEO_STREAM_INDEX].rawPacketQueue);
				initFFDeckLinkQueue(&gOutputFiles[i].output_streams[VIDEO_STREAM_INDEX].encodedPacketQueue);
				//create video encode thread
				if (pthread_create(&gOutputFiles[i].output_streams[VIDEO_STREAM_INDEX].pthEnc,
						NULL, encodeVideoFromStream,
						&gOutputFiles[i].output_streams[VIDEO_STREAM_INDEX]))
				{
					logF("Could not pthread_create. [%d]encodeVideoFromStream \n", i);
					goto fail;
				}
			}
		}
		else
		{
			gOutputFiles[i].output_streams[VIDEO_STREAM_INDEX].source_index = -1;
		}



		if (!(oformat->flags & AVFMT_NOFILE))
		{
			/* open the file */
			if (avio_open2(&gOutputFiles[i].ctx->pb, gOutputFiles[i].ctx->filename, AVIO_FLAG_WRITE, NULL, NULL) < 0)
			{
				logF("Could not open '%s'\n", oFilename);
				return 1;
			}
		}

		avformat_write_header(gOutputFiles[i].ctx, NULL);

		av_dump_format(gOutputFiles[i].ctx, i, gOutputFiles[i].ctx->filename, 1);

		//create interleave thread.
		if (pthread_create(&gOutputFiles[i].pthInter, NULL, interleaveToFile, gOutputFiles + i))
		{
			logF("Could not pthread_create. [%d]interleaveToFile \n", i);
			goto fail;
		}
	}

    // All Okay.
    exitStatus = 0;

    pthread_mutex_init(&gSleepMutex, NULL);
	pthread_cond_init(&gSleepCond, NULL);

	gDL_audioPool.init(gMaxMem.nbAudioPool, DEFAULT_AUDIO_SAMPLE_SIZE*4);//1024*4=4096
	gDL_audioPool.setInitRetainCount(nbOutputStreamAudio);
	gDL_audioPool.status();
	gDL_audioPool.isvideo = 0;//temp

	gDL_videoPool.init(gMaxMem.nbVideoPool, gDL_videoResolution.num*gDL_videoResolution.den*2);//1920*1080*2=4147200
	gDL_videoPool.setInitRetainCount(nbOutputStreamVideo);
	gDL_videoPool.status();
	gDL_videoPool.isvideo = 1;//temp

	result = gDL_input->StartStreams();
	if (result != S_OK)
		goto fail;
    // Block main thread until signal occurs
    pthread_mutex_lock(&gSleepMutex);
    pthread_cond_wait(&gSleepCond, &gSleepMutex);
    pthread_mutex_unlock(&gSleepMutex);
    logI("\nStopping Capture...\n");
    result = gDL_input->StopStreams();
	if (result != S_OK)
		goto fail;
	logI("\nStopped Capture\n");

#ifdef THREAD_MEMCOPY
	gDstBuf[0] = gDstBuf[1] = gDstBuf[2] = gDstBuf[3] = NULL;//DL_memcpy thread 종료하기 위한것
#endif

	//put last null packet (audio & video)
	av_init_packet(&lastPkt);
	lastPkt.data = NULL;
	lastPkt.size = 0;
	for(i = 0; i < gNbOutputFiles; i++)
	{
		//put audio last null packet
		lastPkt.stream_index = AUDIO_STREAM_INDEX;
		if(gOutputFiles[i].output_streams[AUDIO_STREAM_INDEX].st)
			putAVPacketInFFDeckLinkQueue(&gOutputFiles[i].output_streams[AUDIO_STREAM_INDEX].rawPacketQueue, lastPkt);
		//put video last null packet
		lastPkt.stream_index = VIDEO_STREAM_INDEX;
		if(gOutputFiles[i].output_streams[VIDEO_STREAM_INDEX].st)
			putAVPacketInFFDeckLinkQueue(&gOutputFiles[i].output_streams[VIDEO_STREAM_INDEX].rawPacketQueue, lastPkt);
	}

	logI("Wait until to complete encoding thread\n");
	for(i = 0; i < gNbOutputFiles; i++)
	{
		if(gOutputFiles[i].output_streams[AUDIO_STREAM_INDEX].st)
			pthread_join(gOutputFiles[i].output_streams[AUDIO_STREAM_INDEX].pthEnc, NULL);//(void **)&status
		if(gOutputFiles[i].output_streams[VIDEO_STREAM_INDEX].st)
			pthread_join(gOutputFiles[i].output_streams[VIDEO_STREAM_INDEX].pthEnc, NULL);//(void **)&status
	}
	logI("Finished to encoding.\n");
	logI("Wait until to complete interleave thread\n");
	for(i = 0; i < gNbOutputFiles; i++)
	{
		pthread_join(gOutputFiles[i].pthInter, NULL);///(void **)&status
	}
	logI("Finished to interleaving.\n");

	for (i = 0; i < gNbOutputFiles; ++i)
	{
	    // write the trailer if needed and close file
		av_write_trailer(gOutputFiles[i].ctx);
		// close audio encoder
		if (gOutputFiles[i].output_streams[AUDIO_STREAM_INDEX].st)
			avcodec_close(gOutputFiles[i].output_streams[AUDIO_STREAM_INDEX].st->codec);
		// close video encoder
		if (gOutputFiles[i].output_streams[VIDEO_STREAM_INDEX].st)
			avcodec_close(gOutputFiles[i].output_streams[VIDEO_STREAM_INDEX].st->codec);
	}

fail:

#ifdef LOG_FILE
	fclose (stderr);//log file 출력
#endif

#ifdef THREAD_MEMCOPY
	pthread_cond_destroy(&gMemCond[0]);
	pthread_cond_destroy(&gMemCond[1]);
	pthread_cond_destroy(&gMemCond[2]);
	pthread_cond_destroy(&gMemCond[3]);
#endif

	gDL_audioPool.flush();
	gDL_videoPool.flush();

	for (i = 0; i < gNbOutputFiles; ++i)
	{
		// close the output file
		if (!(gOutputFiles[i].ctx->oformat->flags & AVFMT_NOFILE) && gOutputFiles[i].ctx->pb)
		{
			avio_close(gOutputFiles[i].ctx->pb);

			avformat_free_context(gOutputFiles[i].ctx);
			gOutputFiles[i].ctx = NULL;
		}

		//free audio memory
		if (gOutputFiles[i].output_streams[AUDIO_STREAM_INDEX].st)
		{
			avpacket_queue_flush(&gOutputFiles[i].output_streams[AUDIO_STREAM_INDEX].rawPacketQueue);
			avpacket_queue_flush(&gOutputFiles[i].output_streams[AUDIO_STREAM_INDEX].encodedPacketQueue);
			avcodec_free_frame(&gOutputFiles[i].output_streams[AUDIO_STREAM_INDEX].frame);

		}
		//free video memory
		if (gOutputFiles[i].output_streams[VIDEO_STREAM_INDEX].st)
		{
			if(gOutputFiles[i].output_streams[VIDEO_STREAM_INDEX].imgSwsCtx)
				sws_freeContext(gOutputFiles[i].output_streams[VIDEO_STREAM_INDEX].imgSwsCtx);
			avpacket_queue_flush(&gOutputFiles[i].output_streams[VIDEO_STREAM_INDEX].rawPacketQueue);
			avpacket_queue_flush(&gOutputFiles[i].output_streams[VIDEO_STREAM_INDEX].encodedPacketQueue);
			avcodec_free_frame(&gOutputFiles[i].output_streams[VIDEO_STREAM_INDEX].frame);
			//only video free
			if (gOutputFiles[i].output_streams[VIDEO_STREAM_INDEX].pictureOfDeckLink->data[0])
			{
				//av_freep(&gOutputFiles[i].output_streams[VIDEO_STREAM_INDEX].pictureOfDeckLink->data[0]);
				avcodec_free_frame(&gOutputFiles[i].output_streams[VIDEO_STREAM_INDEX].pictureOfDeckLink);
			}
		}
	}

    if (gDL_displayModeIterator)
    {
        gDL_displayModeIterator->Release();
        gDL_displayModeIterator = NULL;
    }

    if (gDL_input)
    {
        gDL_input->Release();
        gDL_input = NULL;
    }

    if (gDecklink)
    {
        gDecklink->Release();
        gDecklink = NULL;
    }

    if (gDL_iterator)
    	gDL_iterator->Release();

    logF("Congratulation! ffdeck complete.\n");
    printf("ffdeck start\n");
    return exitStatus;
}
