/*****************************************************************************

INTEL CORPORATION PROPRIETARY INFORMATION
This software is supplied under the terms of a license agreement or
nondisclosure agreement with Intel Corporation and may not be copied
or disclosed except in accordance with the terms of that agreement.
Copyright(c) 2005-2014 Intel Corporation. All Rights Reserved.

*****************************************************************************/

/*
ENC_AVC				使用H264编码
ENC_HEVC			使用HEVC编码
ENABLE_FORCE_IDR	测试强制某帧为IDR
ENABLE_INSERT_SEI	测试插入自定义SEI消息
*/
//#define ENC_AVC
#define ENC_HEVC
#define ENABLE_FORCE_IDR
#define ENABLE_INSERT_SEI

#include <vector>
#include <unordered_map>
#include <algorithm>
#include <numeric>

#include "common_utils.h"
#include "cmd_options.h"

static void usage(CmdOptionsCtx* ctx)
{
    printf(
        "Encodes INPUT and optionally writes OUTPUT. If INPUT is not specified\n"
        "simulates input with empty frames filled with the color.\n"
        "\n"
        "Usage: %s [options] [INPUT] [OUTPUT]\n", ctx->program);
}

int main(int argc, char** argv)
{
    mfxStatus sts = MFX_ERR_NONE;
    bool bEnableInput;  // if true, removes all YUV file reading (which is replaced by pre-initialized surface data). Workload runs for 1000 frames.
    bool bEnableOutput; // if true, removes all output bitsteam file writing and printing the progress
    CmdOptions options;

    // =====================================================================
    // Intel Media SDK encode pipeline setup
    // - In this example we are encoding an AVC (H.264) stream
    // - Video memory surfaces are used
    // - Configure pipeline for low latency
    //

    //1. Read options from the command line (if any is given)
    memset(&options, 0, sizeof(CmdOptions));
    options.ctx.options = OPTIONS_ENCODE | OPTION_MEASURE_LATENCY;
    options.ctx.usage = usage;
    // Set default values:
    options.values.impl = MFX_IMPL_AUTO_ANY;
    options.values.MeasureLatency = true;

    // here we parse options
    ParseOptions(argc, argv, &options);

    if (!options.values.Width || !options.values.Height) {
        printf("error: input video geometry not set (mandatory)\n");
        return -1;
    }
    if (!options.values.Bitrate) {
        printf("error: bitrate not set (mandatory)\n");
        return -1;
    }
    if (!options.values.FrameRateN || !options.values.FrameRateD) {
        printf("error: framerate not set (mandatory)\n");
        return -1;
    }

    bEnableInput = (options.values.SourceName[0] != '\0');
    bEnableOutput = (options.values.SinkName[0] != '\0');
    // Open input YV12 YUV file
    FILE* fSource = NULL;
    if (bEnableInput) {
        MSDK_FOPEN(fSource, options.values.SourceName, "rb");
        MSDK_CHECK_POINTER(fSource, MFX_ERR_NULL_PTR);
    }

	//fSource = NULL;

    // Create output elementary stream (ES) H.264 file
    FILE* fSink = NULL;
    if (bEnableOutput) {
        MSDK_FOPEN(fSink, options.values.SinkName, "wb");
        MSDK_CHECK_POINTER(fSink, MFX_ERR_NULL_PTR);
    }

    //2. Initialize Intel Media SDK session
    // - MFX_IMPL_AUTO_ANY selects HW acceleration if available (on any adapter)
    mfxIMPL impl = options.values.impl;
    mfxVersion ver = { {0, 1} };
    MFXVideoSession session;

    mfxFrameAllocator mfxAllocator;

	//这里需要初始化外部d3d11, 同时根据igfx/dgfx设置修改一下impl参数, 模拟ALVR传进来的D3D11 device和device context
	//impl可以根据external_CreateD3D11Device()里的代码示例在这里修改
	external_CreateD3D11Device(impl, options.values.bPrefferdGfx, options.values.bPrefferiGfx );

	//创建从RGBA文件里读进来的RGBA texture2D buffer, 模拟显示设备传进来的Texture2D buffer
	//直接加载RGB4buffer以后，不需要额外创建RGBA buffer
	//external_CreateRGBAStagingTexture(options.values.Width,options.values.Height);

    sts = Initialize(impl, ver, &session, &mfxAllocator);
    MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts);

	mfxVersion ret_ver = { {0, 0} };
	session.QueryVersion(&ret_ver);
	printf("MFX Version: %d.%d\n", ret_ver.Major, ret_ver.Minor);

    //3. Initialize encoder parameters
    // - In this example we are encoding an AVC (H.264) stream
    mfxVideoParam mfxEncParams;
    memset(&mfxEncParams, 0, sizeof(mfxEncParams));
	//设置H264还是H265编码

#ifdef ENC_AVC
	mfxEncParams.mfx.CodecId = MFX_CODEC_AVC; // MFX_CODEC_HEVC; // MFX_CODEC_HEVC; // MFX_CODEC_AVC;
#endif
#ifdef ENC_HEVC
	mfxEncParams.mfx.CodecId = MFX_CODEC_HEVC;
#endif

	//编码画质的设置, best_speed编码速度最快，但是画质最差
	mfxEncParams.mfx.TargetUsage = MFX_TARGETUSAGE_BEST_SPEED; // MFX_TARGETUSAGE_BEST_SPEED; // MFX_TARGETUSAGE_BALANCED;
	printf("TargetUsage = %d\n", mfxEncParams.mfx.TargetUsage);

	//mfxEncParams.mfx.TargetKbps是16bit，所以最大码率只能设置65535Kbps (64Mbps), 如果大于这个码率，需要结合BRCParamMultiplier一起设置
	//比如要编码120Mbps的码率，需要设置 mfxEncParams.mfx.TargetKbps = 60000; mfxEncParams.mfx.BRCParamMultiplier = 2;
	//这样最终编码码率为 TargetKbps * BRCParamMultiplier = 120000Kbps (即120Mbps)
	mfxEncParams.mfx.TargetKbps = options.values.Bitrate;
	//mfxEncParams.mfx.BRCParamMultiplier = xxx;

	mfxEncParams.mfx.RateControlMethod = MFX_RATECONTROL_CBR; // MFX_RATECONTROL_VBR;
    mfxEncParams.mfx.FrameInfo.FrameRateExtN = options.values.FrameRateN;
    mfxEncParams.mfx.FrameInfo.FrameRateExtD = options.values.FrameRateD;
	//修改这里，直接让encoder接收RGBA的数据, 只有ARC显卡支持RGBA帧的直接输入
	mfxEncParams.mfx.FrameInfo.FourCC = MFX_FOURCC_RGB4; // MFX_FOURCC_NV12;MFX_FOURCC_A2RGB10


	mfxEncParams.mfx.FrameInfo.ChromaFormat = MFX_CHROMAFORMAT_YUV444; // MFX_CHROMAFORMAT_YUV420;
    mfxEncParams.mfx.FrameInfo.PicStruct = MFX_PICSTRUCT_PROGRESSIVE;
    mfxEncParams.mfx.FrameInfo.CropX = 0;
    mfxEncParams.mfx.FrameInfo.CropY = 0;
    mfxEncParams.mfx.FrameInfo.CropW = options.values.Width;
    mfxEncParams.mfx.FrameInfo.CropH = options.values.Height;

	//默认先8bit编码
	mfxEncParams.mfx.FrameInfo.BitDepthChroma = 8;
	mfxEncParams.mfx.FrameInfo.BitDepthLuma = 8;

	if (true == options.values.c10bit)
	{
		printf("Enc HEVC 10bit stream\n");
		mfxEncParams.mfx.CodecProfile = MFX_PROFILE_HEVC_MAIN10; //Parameter for HEVC 10bit
		mfxEncParams.mfx.CodecLevel = MFX_LEVEL_HEVC_51; //Parameter for HEVC 10bit
		mfxEncParams.mfx.FrameInfo.FourCC = MFX_FOURCC_A2RGB10; // MFX_FOURCC_NV12;MFX_FOURCC_A2RGB10
		mfxEncParams.mfx.FrameInfo.BitDepthChroma = 10;
		mfxEncParams.mfx.FrameInfo.BitDepthLuma = 10;
	}

	//输出编码文件buffer的大小，开一个能放1秒编码的buffer
	mfxEncParams.mfx.BufferSizeInKB = mfxEncParams.mfx.TargetKbps / 8;

    // Width must be a multiple of 16
    // Height must be a multiple of 16 in case of frame picture and a multiple of 32 in case of field picture
    mfxEncParams.mfx.FrameInfo.Width = MSDK_ALIGN16(options.values.Width);
    mfxEncParams.mfx.FrameInfo.Height =
        (MFX_PICSTRUCT_PROGRESSIVE == mfxEncParams.mfx.FrameInfo.PicStruct) ?
        MSDK_ALIGN16(options.values.Height) :
        MSDK_ALIGN32(options.values.Height);

    mfxEncParams.IOPattern = MFX_IOPATTERN_IN_VIDEO_MEMORY;

    //4. Configuration for low latency
	//这里设置GopRefDist = 1只编I/P帧
	//AsyncDepth=1不要缓冲buffer
	//NumRefFrame = 1 表示P帧只参考前一帧
	//这3项负责低延迟编码的设置
    mfxEncParams.AsyncDepth = 1;    //1 is best for low latency
    mfxEncParams.mfx.GopRefDist = 1;        //1 is best for low latency, I and P frames only
	mfxEncParams.mfx.NumRefFrame = 1;

	//mfxEncParams.mfx.GopPicSize = 5;		//测试设置每个GOP里面有5帧

	std::vector<mfxExtBuffer*> m_EncExtParams;

#if 1
	mfxExtCodingOption3 extendedCodingOptions3;
	memset(&extendedCodingOptions3, 0, sizeof(extendedCodingOptions3));
	extendedCodingOptions3.Header.BufferId = MFX_EXTBUFF_CODING_OPTION3;
	extendedCodingOptions3.Header.BufferSz = sizeof(extendedCodingOptions3);

	extendedCodingOptions3.ScenarioInfo = MFX_SCENARIO_REMOTE_GAMING;	//设置成REMOTE_GAMING模式可以让GPU频率一直在最高 某些特定硬件和场景下会提高编码性能

	m_EncExtParams.push_back((mfxExtBuffer *)&extendedCodingOptions3);
#endif

#ifdef ENC_HEVC
	//HEVC编码使用tiled encoding选项，使用双路硬件编码器
	mfxExtHEVCTiles extendedHEVCTiles;
	memset(&extendedHEVCTiles, 0, sizeof(extendedHEVCTiles));
	extendedHEVCTiles.Header.BufferId = MFX_EXTBUFF_HEVC_TILES;
	extendedHEVCTiles.Header.BufferSz = sizeof(extendedHEVCTiles);
	extendedHEVCTiles.NumTileRows = 1;
	extendedHEVCTiles.NumTileColumns = 2;  //这里设置成2 使用2个编码器同时编码

	m_EncExtParams.push_back((mfxExtBuffer *)&extendedHEVCTiles);
#endif

	mfxEncParams.ExtParam = &m_EncExtParams[0];
	mfxEncParams.NumExtParam = m_EncExtParams.size();

    // ---

    //5. Create Media SDK encoder
    MFXVideoENCODE mfxENC(session);

    // Validate video encode parameters (optional)
    // - In this example the validation result is written to same structure
    // - MFX_WRN_INCOMPATIBLE_VIDEO_PARAM is returned if some of the video parameters are not supported,
    //   instead the encoder will select suitable parameters closest matching the requested configuration
    sts = mfxENC.Query(&mfxEncParams, &mfxEncParams);
    MSDK_IGNORE_MFX_STS(sts, MFX_WRN_INCOMPATIBLE_VIDEO_PARAM);
    MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts);

    // Query number of required surfaces for encoder
    mfxFrameAllocRequest EncRequest;
    memset(&EncRequest, 0, sizeof(EncRequest));
    sts = mfxENC.QueryIOSurf(&mfxEncParams, &EncRequest);
    MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts);

    EncRequest.Type |= WILL_WRITE; // This line is only required for Windows DirectX11 to ensure that surfaces can be written to by the application

    //6. Allocate required surfaces
    mfxFrameAllocResponse mfxResponse;
    sts = mfxAllocator.Alloc(mfxAllocator.pthis, &EncRequest, &mfxResponse);
    MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts);

    mfxU16 nEncSurfNum = mfxResponse.NumFrameActual;

    // Allocate surface headers (mfxFrameSurface1) for encoder
    mfxFrameSurface1** pmfxSurfaces = new mfxFrameSurface1 *[nEncSurfNum];
    MSDK_CHECK_POINTER(pmfxSurfaces, MFX_ERR_MEMORY_ALLOC);
    for (int i = 0; i < nEncSurfNum; i++) {
        pmfxSurfaces[i] = new mfxFrameSurface1;
        memset(pmfxSurfaces[i], 0, sizeof(mfxFrameSurface1));
        memcpy(&(pmfxSurfaces[i]->Info), &(mfxEncParams.mfx.FrameInfo), sizeof(mfxFrameInfo));
        pmfxSurfaces[i]->Data.MemId = mfxResponse.mids[i];
        if (bEnableInput) {
            ClearYUVSurfaceVMem(pmfxSurfaces[i]->Data.MemId);
        }
    }

    //7. Initialize the Media SDK encoder
    sts = mfxENC.Init(&mfxEncParams);
    MSDK_IGNORE_MFX_STS(sts, MFX_WRN_PARTIAL_ACCELERATION);
    MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts);

    // Retrieve video parameters selected by encoder.
    // - BufferSizeInKB parameter is required to set bit stream buffer size
#if 0
    mfxVideoParam par;
    memset(&par, 0, sizeof(par));
    sts = mfxENC.GetVideoParam(&par);
    MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts);
#else
	//mfxEncParams里面分配了coding_option2的buffer, 如果用上面重新创建的mfxVideoParam,获取不到coding_option2的内容
	sts = mfxENC.GetVideoParam(&mfxEncParams);
	MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts);
#endif

    //8. Prepare Media SDK bit stream buffer
    mfxBitstream mfxBS;
    memset(&mfxBS, 0, sizeof(mfxBS));
    mfxBS.MaxLength = mfxEncParams.mfx.BufferSizeInKB * 1000;
    mfxBS.Data = new mfxU8[mfxBS.MaxLength];
    MSDK_CHECK_POINTER(mfxBS.Data, MFX_ERR_MEMORY_ALLOC);

    // ===================================
    //9. Start encoding the frames
    //

    mfxTime tStart, tEnd;
    mfxGetTime(&tStart);

    std::unordered_map < mfxU64, mfxTime > encInTimeMap;
    std::unordered_map < mfxU64, mfxTime > encOutTimeMap;
    mfxU64 currentTimeStamp = 0; // key value (in hash) to store timestamps

    if (options.values.MeasureLatency) {
        // Record e2e latency for first 1000 frames
        // - Store timing in map (for speed), where unique timestamp value is the key
        encInTimeMap.rehash(1000);
        encOutTimeMap.rehash(1000);
    }

    int nEncSurfIdx = 0;
    mfxSyncPoint syncp;
    mfxU32 nFrame = 0;

    //
    // Stage 1: Main encoding loop
    //
    while (MFX_ERR_NONE <= sts || MFX_ERR_MORE_DATA == sts) {
        nEncSurfIdx = GetFreeSurfaceIndex(pmfxSurfaces, nEncSurfNum);   // Find free frame surface
        MSDK_CHECK_ERROR(MFX_ERR_NOT_FOUND, nEncSurfIdx, MFX_ERR_MEMORY_ALLOC);

#if 1
		//Lock->LoadFrame->Unlock的整个过程是CPU把文件里的数据写入staging buffer, 再从staging buffer放到只能GPU访问的buffer里
		//对于ALVR的场景，对于上层传过来的d3d11Texture2D对象，可以直接用D3d11DeviceContext->CopyResource()拷贝到只能GPU访问的buffer里，不需要map/unmap
        // Surface locking required when read/write video surfaces
        sts = mfxAllocator.Lock(mfxAllocator.pthis, pmfxSurfaces[nEncSurfIdx]->Data.MemId, &(pmfxSurfaces[nEncSurfIdx]->Data));
        MSDK_BREAK_ON_ERROR(sts);

#if 0
		//加载YUV帧
        sts = LoadRawFrame(pmfxSurfaces[nEncSurfIdx], fSource);
#elseif 0
		//加载NV12帧
		sts = LoadRawNV12Frame(pmfxSurfaces[nEncSurfIdx], fSource);
#else
		if (false == options.values.c10bit)
		{
			//直接加载8bit RGB4buffer
			sts = LoadRawRGBFrame(pmfxSurfaces[nEncSurfIdx], fSource);
		}
		else
		{
			//直接加载 A2R10G10B10buffer
			sts = LoadRawGBR10BitleFrame(pmfxSurfaces[nEncSurfIdx], fSource);
		}
#endif
		
        MSDK_BREAK_ON_ERROR(sts);

        sts = mfxAllocator.Unlock(mfxAllocator.pthis, pmfxSurfaces[nEncSurfIdx]->Data.MemId, &(pmfxSurfaces[nEncSurfIdx]->Data));
        MSDK_BREAK_ON_ERROR(sts);
#else
		//加载RGBA帧，从fSource->m_pRGBATexture
		//VPP转换RGBA->NV12 从m_pRGBATexture->pmfxSurfaces[nEncSurfIdx]， NV12帧用于编码
		sts = external_LoadRawRGBAFrame(pmfxSurfaces[nEncSurfIdx]->Data.MemId, fSource);
		MSDK_BREAK_ON_ERROR(sts);
#endif

		mfxEncodeCtrl EncodeCtrl;
		memset(&EncodeCtrl, 0, sizeof(mfxEncodeCtrl));

		//如果需要修改分辨率和码率，请参考Video Conferencing features of Intel Media Software Development Kit.pdf
		static int frame_count = 0;


        for (;;) {
            if (options.values.MeasureLatency && (encInTimeMap.size() < 1000)) {
                mfxGetTime(&encInTimeMap[currentTimeStamp]);
                pmfxSurfaces[nEncSurfIdx]->Data.TimeStamp = currentTimeStamp;
            }

			//如果要把当前帧编码成I frame
			//EncodeCtrl.FrameType =
			//	MFX_FRAMETYPE_I | MFX_FRAMETYPE_REF | MFX_FRAMETYPE_IDR;

			frame_count++;

			memset(&EncodeCtrl, 0, sizeof(mfxEncodeCtrl));

#ifdef ENABLE_FORCE_IDR
			//测试force IDR
			if ((frame_count % 10) == 0)
			{ //这里强制IDR的参数有点不一样，264要设MFX_FRAMETYPE_I, HEVC要设MFX_FRAMETYPE_IDR
				EncodeCtrl.FrameType = MFX_FRAMETYPE_I | MFX_FRAMETYPE_REF | MFX_FRAMETYPE_IDR;
			}
#endif // ENABLE_FORCE_IDR


#ifdef ENABLE_INSERT_SEI
			char m_seiData[100];
			mfxPayload m_mySEIPayload;

			//测试SEI
			//char m_seiData[100];
			//mfxPayload m_mySEIPayload;
			memset(&m_mySEIPayload, 0, sizeof(mfxPayload));

			int i;
			for (i = 0; i < 16; i++)
			{
				//set 16byte UUID
				m_seiData[i] = i;
			}
			sprintf(m_seiData + 16, "frame counter = %08d\n", frame_count);

			m_mySEIPayload.Type = 5; //user data unregister
			m_mySEIPayload.BufSize = 0x29;		//UUID 16bytes + string length "frame counter = %08d\n"
			m_mySEIPayload.NumBit = m_mySEIPayload.BufSize * 8;
			m_mySEIPayload.Data = (mfxU8 *)m_seiData;

			mfxPayload * m_payloads[1];
			m_payloads[0] = &m_mySEIPayload;

			EncodeCtrl.Payload = (mfxPayload **)&m_payloads[0];
			EncodeCtrl.NumPayload = 1;

#endif // ENABLE_INSERT_SEI

				
			// Encode a frame asychronously (returns immediately)
            sts = mfxENC.EncodeFrameAsync(&EncodeCtrl, pmfxSurfaces[nEncSurfIdx], &mfxBS, &syncp);

			//printf("** frame_count = %d, sts = %d\n", frame_count,sts);

			// Since no splitter, artificial timestamp is used
            if (options.values.MeasureLatency && (encInTimeMap.size() < 1000)) {
                currentTimeStamp++;
            }

            if (MFX_ERR_NONE < sts && !syncp) {     // Repeat the call if warning and no output
                if (MFX_WRN_DEVICE_BUSY == sts)
                    MSDK_SLEEP(1);  // Wait if device is busy, then repeat the same call
            } else if (MFX_ERR_NONE < sts && syncp) {
                sts = MFX_ERR_NONE;     // Ignore warnings if output is available
                break;
            } else if (MFX_ERR_NOT_ENOUGH_BUFFER == sts) {
                // Allocate more bitstream buffer memory here if needed...
                break;
            } else
                break;
        }
		
        if (MFX_ERR_NONE == sts) {
            sts = session.SyncOperation(syncp, 60000);   // Synchronize. Wait until encoded frame is ready
            MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts);

            if (options.values.MeasureLatency && (encOutTimeMap.size() < 1000)) {
                mfxGetTime(&encOutTimeMap[mfxBS.TimeStamp]);

                /*printf( "finished encoding frame %d Type: %c latency: %f ms\n",
                        (int)mfxBS.TimeStamp,
                        mfxFrameTypeString(mfxBS.FrameType),
                        TimeDiffMsec(encOutTimeMap[mfxBS.TimeStamp], encInTimeMap[mfxBS.TimeStamp])); */
            }

            ++nFrame;
            if (bEnableOutput) {
                sts = WriteBitStreamFrame(&mfxBS, fSink);
                MSDK_BREAK_ON_ERROR(sts);

                printf("Frame number: %d\r", nFrame);
            }
            else mfxBS.DataLength = 0;
        }
    }

    // MFX_ERR_MORE_DATA means that the input file has ended, need to go to buffering loop, exit in case of other errors
    MSDK_IGNORE_MFX_STS(sts, MFX_ERR_MORE_DATA);
    MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts);

    //
    // Stage 2: Retrieve the buffered encoded frames
    //
	//这部分对应解码结束阶段，外部像素文件已经全送完了，EncodeFrameAsync不再往里送frame buffer, syncOperation会把残留在gpu里的几帧剩余图像取出来，结束
    while (MFX_ERR_NONE <= sts) {
        for (;;) {
            if (options.values.MeasureLatency && (encInTimeMap.size() < 1000)) {
                mfxGetTime(&encInTimeMap[currentTimeStamp]);
                pmfxSurfaces[nEncSurfIdx]->Data.TimeStamp = currentTimeStamp;
            }

            // Encode a frame asychronously (returns immediately)
            sts = mfxENC.EncodeFrameAsync(NULL, NULL, &mfxBS, &syncp);

            // Since no splitter, artificial timestamp is used
            if (options.values.MeasureLatency && (encInTimeMap.size() < 1000)) {
                currentTimeStamp++;
            }

            if (MFX_ERR_NONE < sts && !syncp) {     // Repeat the call if warning and no output
                if (MFX_WRN_DEVICE_BUSY == sts)
                    MSDK_SLEEP(1);  // Wait if device is busy, then repeat the same call
            } else if (MFX_ERR_NONE < sts && syncp) {
                sts = MFX_ERR_NONE;     // Ignore warnings if output is available
                break;
            } else
                break;
        }

        if (MFX_ERR_NONE == sts) {
            sts = session.SyncOperation(syncp, 60000);   // Synchronize. Wait until encoded frame is ready
            MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts);

            if (options.values.MeasureLatency && (encOutTimeMap.size() < 1000)) {
                mfxGetTime(&encOutTimeMap[mfxBS.TimeStamp]);

                /*printf("finished encoding frame %d   latency: %f ms\n",
                       (int)mfxBS.TimeStamp,
                       TimeDiffMsec(encOutTimeMap[mfxBS.TimeStamp], encInTimeMap[mfxBS.TimeStamp])); */
            }

            ++nFrame;
            if (bEnableOutput) {
                sts = WriteBitStreamFrame(&mfxBS, fSink);
                MSDK_BREAK_ON_ERROR(sts);

                printf("Frame number: %d\r", nFrame);
            }
            else mfxBS.DataLength = 0;
        }

    }

    // MFX_ERR_MORE_DATA indicates that there are no more buffered frames, exit in case of other errors
    MSDK_IGNORE_MFX_STS(sts, MFX_ERR_MORE_DATA);
    MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts);

    mfxGetTime(&tEnd);
    double elapsed = TimeDiffMsec(tEnd, tStart) / 1000;
    double fps = ((double)nFrame / elapsed);
    printf("\nExecution time: %3.2f s (%3.2f fps)\n", elapsed, fps);

    //10. Print out the latency measurement
    if (options.values.MeasureLatency) {
        std::vector < double >frameLatencies;

        // Store all frame latencies in vector
        for (std::unordered_map < mfxU64, mfxTime >::iterator it = encInTimeMap.begin(); it != encInTimeMap.end(); ++it) {
            int tsKey = (*it).first;
            mfxTime startTime = (*it).second;
            mfxTime endTime = encOutTimeMap[tsKey];
            double frameLatency = TimeDiffMsec(endTime, startTime);
            if (frameLatency > 0)
                frameLatencies.push_back(frameLatency);
            //printf("[%4d]: %.2f\n", tsKey, frameLatency);
        }
        // Calculate overall latency metrics
        printf("Latency: AVG=%5.1f ms, MAX=%5.1f ms, MIN=%5.1f ms\n",
               std::accumulate(frameLatencies.begin(), frameLatencies.end(), 0.0) / frameLatencies.size(),
               *std::max_element(frameLatencies.begin(), frameLatencies.end()),
               *std::min_element(frameLatencies.begin(), frameLatencies.end()));
    }

    // ===================================================================
    //11. Clean up resources
    //  - It is recommended to close Media SDK components first, before releasing allocated surfaces, since
    //    some surfaces may still be locked by internal Media SDK resources.

    mfxENC.Close();
    // session closed automatically on destruction

    for (int i = 0; i < nEncSurfNum; i++)
        delete pmfxSurfaces[i];
    MSDK_SAFE_DELETE_ARRAY(pmfxSurfaces);
    MSDK_SAFE_DELETE_ARRAY(mfxBS.Data);

    mfxAllocator.Free(mfxAllocator.pthis, &mfxResponse);
	//释放自己创建的RGBA staging buffer和RGBA buffer
	//直接加载RGB4buffer以后，不需要创建额外RGBA buffer, 不需要释放
	//external_ReleaseRGBAStagingTexture();


    if (fSource) fclose(fSource);
    if (fSink) fclose(fSink);

    Release();
	//注意: 新加的所有D3D11相关的变量资源(在common_directx11.cpp顶部)都是使用了智能指针，理论上会自动释放
	//也可以在这里手工释放所有资源
	//ReleaseD3D11Device()

    return 0;
}
