////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Copyright (c) 2017-2018 Qualcomm Technologies, Inc.
// All Rights Reserved.
// Confidential and Proprietary - Qualcomm Technologies, Inc.
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////

////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// @file  camxsession.cpp
/// @brief Definitions for Session class
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////

/// @todo (CAMX-2499): Will move this to the OsUtil library later
#define USE_COLOR_METADATA


#include "chi.h"

#include "camxincs.h"
#include "camxmem.h"
#include "camxmemspy.h"
#include "camxthreadmanager.h"
#include "camxdeferredrequestqueue.h"
#include "camxhal3defaultrequest.h"
#include "camxhal3stream.h"
#include "camxhal3metadatautil.h"
#include "camxhal3queue.h"
#include "camxhwcontext.h"
#include "camxchi.h"
#include "camxpipeline.h"
#include "camxnode.h"
#include "camxsession.h"
#include "camxvendortags.h"
#include "qdMetaData.h"

CAMX_NAMESPACE_BEGIN

////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Constant Definitions
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
static const UINT32 MaxContentLightLevel                = 1000;         ///< Maximum content light level
static const UINT32 MaxFrameAverageLightLevel           = 200;          ///< Maximum frame average light level
static const UINT32 MaxDisplayLuminance                 = 1000;         ///< Maximum Luminance in cd/m^2
static const UINT32 MinDisplayLuminance                 = 50;           ///< Minimum Luminance in 1/10000 cd/m^2
static const UINT32 PrimariesRGB[3][2]                  = {{34000, 16000}, {13250, 34500}, {7500, 3000}};
static const UINT32 PrimariesWhitePoint[2]              = {15635, 16450};
static const UINT32 LivePendingRequestTimeoutDefault    = 800;          ///< Default live pending request time out
static const UINT32 LivePendingRequestTimeOutExtendor   = 100;          ///< Live pending request timeout extendor

////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Session::~Session
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
Session::~Session()
{
}

////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Session::WaitTillAllResultsAvailable
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
BOOL Session::WaitTillAllResultsAvailable(
    BOOL useTimeOut)
{
    CamxResult  result      = CamxResultSuccess;
    BOOL        timedOut    = FALSE;

    m_pResultLock->Lock();
    if (0 != m_resultHolderList.NumNodes())
    {
        SessionResultHolder* pSessionResultHolderHead =
            reinterpret_cast<SessionResultHolder*>(m_resultHolderList.Head()->pData);
        SessionResultHolder* pSessionResultHolderTail =
            reinterpret_cast<SessionResultHolder*>(m_resultHolderList.Tail()->pData);

        CAMX_LOG_PERF_INFO(CamxLogGroupCore, "Waiting for all results  minResult:%d  maxRequest:%d",
            pSessionResultHolderHead->resultHolders[0].sequenceId,
            pSessionResultHolderTail->resultHolders[pSessionResultHolderTail->numResults - 1].sequenceId);


        /// @note This time out is a precaution to handle hang conditions (we expect it to retire in timely manner)
        if (TRUE == useTimeOut)
        {
            CAMX_LOG_INFO(CamxLogGroupCore,
                          "Entering TimedWait(%d ms)",
                          HwEnvironment::GetInstance()->GetStaticSettings()->waitAllResultsTimeout);

            result = m_pWaitAllResultsAvailable->TimedWait(
                m_pResultLock->GetNativeHandle(),
                HwEnvironment::GetInstance()->GetStaticSettings()->waitAllResultsTimeout);

            if (CamxResultSuccess != result)
            {
                timedOut = TRUE;
                CAMX_LOG_ERROR(CamxLogGroupCore,
                               "TimedWait for results timed out with error %s!",
                               CamxResultStrings[result]);
            }
            else
            {
                CAMX_LOG_INFO(CamxLogGroupCore, "TimedWait returned with success");
            }
        }
        else
        {
            CAMX_LOG_INFO(CamxLogGroupCore, "Entering Wait(forever)");
            result = m_pWaitAllResultsAvailable->Wait(m_pResultLock->GetNativeHandle());
            if (CamxResultSuccess != result)
            {
                CAMX_LOG_ERROR(CamxLogGroupCore,
                               "Wait for results returned with error %s!",
                               CamxResultStrings[result]);

            }
            else
            {
                CAMX_LOG_INFO(CamxLogGroupCore, "Wait returned with success");
            }
        }
    }

    m_pResultLock->Unlock();

    return timedOut;
}

////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Session::Flush
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
BOOL Session::Flush(
    BOOL isForced)
{
    BOOL timeout = FALSE;

    if (FALSE == m_sesssionInitComplete)
    {
        return FALSE;
    }

    m_pFlushLock->Lock();
    CamxAtomicStoreU8(&m_aFlushingSession, TRUE);

    if (TRUE == isForced)
    {
        CAMX_LOG_INFO(CamxLogGroupCore, "Forced flush is called");

        BackupAndEmptyHALRequestQueueForFlush();
        HandleFlushForInflightRequests();
        HandleFlushForHALQueueRequests();
        m_pResultLock->Lock();
        LightweightDoublyLinkedListNode* pNode = m_resultHolderList.Head();
        while (NULL != pNode)
        {
            ResultHolder* pResultHolder = reinterpret_cast<ResultHolder*>(pNode->pData);
            if (NULL != pResultHolder)
            {
                CAMX_LOG_INFO(CamxLogGroupHAL, "Destroying result holder for Sequence ID %d",
                    pResultHolder->sequenceId);
            }

            LightweightDoublyLinkedListNode* pNodeProcessed = pNode;
            pNode = m_resultHolderList.NextNode(pNode);
            m_resultHolderList.RemoveNode(pNodeProcessed);
            CAMX_FREE(pNodeProcessed);
            pNodeProcessed = NULL;
        }

        m_pDeferredRequestQueue->DeferredRequestQueueFlush();

        CAMX_LOG_VERBOSE(CamxLogGroupCore, "Flushing outstanding requests via CSLFlush");
        m_pChiContext->GetHwContext()->Flush(TRUE);

        // Sleep while results come back as errors
        m_pWaitAllResultsAvailable->TimedWait(m_pResultLock->GetNativeHandle(), 50);
        CAMX_LOG_VERBOSE(CamxLogGroupCore, "Destroying Slept 50ms");
        m_pChiContext->GetHwContext()->Flush(FALSE);
        m_pResultLock->Unlock();

        for (UINT32 i = 0; i < m_numPipelines; i++)
        {
            // Setting this to invalid so next request will be 0 and
            // operation will continue without any dependancy issues.
            m_pipelineData[i].pPipeline->Flush();
            m_requestBatchId[i] = CamxInvalidRequestId;
        }

        CAMX_LOG_INFO(CamxLogGroupCore, "Forced Flush is done");
    }
    else
    {
        CAMX_LOG_INFO(CamxLogGroupCore, "flush is happening for session %p; we will wait till all results are available", this);

        // We have to wait till all result are back or signaled for flush before returning back to the application
        timeout = WaitTillAllResultsAvailable(TRUE);

        if (TRUE == timeout)
        {
            BOOL inflighttimeout = FALSE;

            BackupAndEmptyHALRequestQueueForFlush();

            m_pChiContext->GetHwContext()->Flush(TRUE);
            inflighttimeout = WaitTillAllResultsAvailable(TRUE);

            m_pDeferredRequestQueue->DeferredRequestQueueFlush();

            if (TRUE == inflighttimeout)
            {
                HandleFlushForInflightRequests();
            }
            m_pChiContext->GetHwContext()->Flush(FALSE);

            for (UINT32 i = 0; i < m_numPipelines; i++)
            {
                // Setting this to invalid so next request will be 0 and
                // operation will continue without any dependancy issues.
                m_pipelineData[i].pPipeline->Flush();
                m_requestBatchId[i] = CamxInvalidRequestId;
            }

            HandleFlushForHALQueueRequests();

        }
        else
        {
            m_pDeferredRequestQueue->DeferredRequestQueueFlush();
        }
    }

    CamxAtomicStoreU8(&m_aFlushingSession, FALSE);

    CAMX_LOG_INFO(CamxLogGroupCore, "Flush is done");

    m_pFlushLock->Unlock();

    return timeout;
}

////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Session::Destroy
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
VOID Session::Destroy(
    BOOL isForced)
{
    CAMX_ENTRYEXIT(CamxLogGroupCore);

    CAMX_LOG_INFO(CamxLogGroupCore, "Destroying session %p", this);

    BOOL timedOut = FALSE;
    if (TRUE == isForced)
    {
        CAMX_LOG_INFO(CamxLogGroupCore, "Destroying Forced");

        BackupAndEmptyHALRequestQueueForFlush();
        HandleFlushForHALQueueRequests();
        m_pResultLock->Lock();
        LightweightDoublyLinkedListNode* pNode = m_resultHolderList.Head();
        while (NULL != pNode)
        {
            ResultHolder* pResultHolder = reinterpret_cast<ResultHolder*>(pNode->pData);
            if (NULL != pResultHolder)
            {
                CAMX_LOG_VERBOSE(CamxLogGroupHAL, "Destroying result holder for Sequence ID %d",
                              pResultHolder->sequenceId);
            }

            LightweightDoublyLinkedListNode* pNodeProcessed = pNode;
            pNode = m_resultHolderList.NextNode(pNode);
            m_resultHolderList.RemoveNode(pNodeProcessed);
            CAMX_FREE(pNodeProcessed);
            pNodeProcessed = NULL;
        }
        CAMX_LOG_VERBOSE(CamxLogGroupCore, "Flushing outstanding requests via CSLFlush");
        m_pChiContext->GetHwContext()->Flush(TRUE);

        // Sleep while results come back as errors
        m_pWaitAllResultsAvailable->TimedWait(m_pResultLock->GetNativeHandle(), 50);
        CAMX_LOG_VERBOSE(CamxLogGroupCore, "Destroying Slept 50ms");
        m_pChiContext->GetHwContext()->Flush(FALSE);
        m_pResultLock->Unlock();
        timedOut = TRUE;
    }
    else
    {
        timedOut = Flush(FALSE);
        if (TRUE == timedOut)
        {
            CAMX_LOG_ERROR(CamxLogGroupCore, "Timed out in Flush raising sigabrt");
            raise(SIGABRT);
        }
    }
    if (TRUE == timedOut)
    {
        // We might be in error condition
        HAL3Module::GetInstance()->SetDropCallbacks();
    }

    this->UnregisterThreadJobCallback();

    // Due to drain logic we had better destroy anything that has a job registered first (including DRQ)
    if (InvalidJobHandle != m_hNodeJobHandle)
    {
        CHAR wrapperName[FILENAME_MAX];
        OsUtils::SNPrintF(&wrapperName[0], sizeof(wrapperName), "NodeCommonThreadJobFamily%p", this);
        m_pThreadManager->UnregisterJobFamily(NodeThreadJobFamilySessionCb, wrapperName, m_hNodeJobHandle);
        m_hNodeJobHandle = InvalidJobHandle;
    }

    /// @todo (CAMX-1797) Temporary workaround - Need to figure out the right place
    for (UINT i = 0; i < m_numPipelines; i++)
    {
        if (NULL != m_pipelineData[i].pPipeline)
        {
            m_pipelineData[i].pPipeline->StreamOff(CHIDeactivateModeDefault);
            m_pipelineData[i].pPipeline->Unlink();
        }
    }

    if (NULL != m_pDeferredRequestQueue)
    {
        m_pDeferredRequestQueue->Destroy();
        m_pDeferredRequestQueue = NULL;
    }

    // We should have no more result waiting for this session, so we are good to move all the nodes
    // we allocated
    m_pResultLock->Lock();
    LightweightDoublyLinkedListNode* pNode = m_resultHolderList.RemoveFromHead();
    while (NULL != pNode)
    {
        CAMX_ASSERT(NULL != pNode->pData);
        if (NULL != pNode->pData)
        {
            CAMX_FREE(pNode->pData);
            pNode->pData = NULL;
        }
        CAMX_FREE(pNode);
        pNode = m_resultHolderList.RemoveFromHead();
    }
    m_pResultLock->Unlock();

    if (NULL != m_pLivePendingRequestsLock)
    {
        m_pLivePendingRequestsLock->Destroy();
        m_pLivePendingRequestsLock = NULL;
    }

    if (NULL != m_pWaitLivePendingRequests)
    {
        m_pWaitLivePendingRequests->Destroy();
        m_pWaitLivePendingRequests = NULL;
    }

    for (UINT i = 0; i < m_numPipelines; i++)
    {
        if (NULL != m_pipelineData[i].pPipeline)
        {
            m_pipelineData[i].pPipeline->Destroy();
            m_pipelineData[i].pPipeline = NULL;
        }
    }

    for (UINT32 i = 0; i < m_requestQueueDepth * m_numPipelines; i++)
    {
        if (NULL != m_pCaptureResult[i].pOutputBuffers)
        {
            // NOWHINE CP036a: exception
            CAMX_FREE(const_cast<ChiStreamBuffer*>(m_pCaptureResult[i].pOutputBuffers));
            m_pCaptureResult[i].pOutputBuffers = NULL;
        }
    }

    if (NULL != m_pCaptureResult)
    {
        CAMX_FREE(m_pCaptureResult);
        m_pCaptureResult = NULL;
    }

    if (NULL != m_pRequestQueue)
    {
        m_pRequestQueue->Destroy();
        m_pRequestQueue = NULL;
    }

    if (NULL != m_pFlushRequestQueue)
    {
        m_pFlushRequestQueue->Destroy();
        m_pFlushRequestQueue = NULL;
    }

    if (NULL != m_pResultLock)
    {
        m_pResultLock->Destroy();
        m_pResultLock = NULL;
    }

    if (NULL != m_pRequestLock)
    {
        m_pRequestLock->Destroy();
        m_pRequestLock = NULL;
    }

    if (NULL != m_pFlushLock)
    {
        m_pFlushLock->Destroy();
        m_pFlushLock = NULL;
    }

    if (NULL != m_pStreamOnOffLock)
    {
        m_pStreamOnOffLock->Destroy();
        m_pStreamOnOffLock = NULL;
    }

    if (NULL != m_pPerFrameDebugDataPool)
    {
        m_pPerFrameDebugDataPool->Destroy();
        m_pPerFrameDebugDataPool = NULL;
    }

    if (NULL != m_pWaitAllResultsAvailable)
    {
        m_pWaitAllResultsAvailable->Destroy();
        m_pWaitAllResultsAvailable = NULL;
    }

    m_sesssionInitComplete = FALSE;

    CAMX_DELETE this;
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Session::FinalizeDeferPipeline
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// 使用会话中的数据去初始化管道
CamxResult Session::FinalizeDeferPipeline(
    UINT32 pipelineIndex)
{
    CAMX_ENTRYEXIT(CamxLogGroupCore);
    CamxResult result = CamxResultSuccess;
    CAMX_ASSERT(pipelineIndex < m_numPipelines);

    const StaticSettings* pStaticSettings = HwEnvironment::GetInstance()->GetStaticSettings();
    m_requestBatchId[pipelineIndex]           = CamxInvalidRequestId;

    FinalizeInitializationData finalizeInitializationData = { 0 };

    finalizeInitializationData.pHwContext                 = m_pChiContext->GetHwContext();
    finalizeInitializationData.pDeferredRequestQueue      = m_pDeferredRequestQueue;
    finalizeInitializationData.pDebugDataPool             = m_pPerFrameDebugDataPool;
    finalizeInitializationData.pSession                   = this;
    finalizeInitializationData.pThreadManager             = m_pThreadManager;
    finalizeInitializationData.usecaseNumBatchedFrames    = m_usecaseNumBatchedFrames;
    finalizeInitializationData.enableQTimer               = pStaticSettings->enableQTimer;;
    finalizeInitializationData.numSessionPipelines        = m_numPipelines;
    finalizeInitializationData.pSensorModeInfo            =
        &(m_pipelineData[pipelineIndex].pPipelineDescriptor->inputData[0].sensorInfo.sensorMode);
    finalizeInitializationData.resourcePolicy             =
        m_pipelineData[pipelineIndex].pipelineFinalizeData.pipelineResourcePolicy;

    if (InvalidJobHandle == m_hNodeJobHandle)
    {
        CHAR wrapperName[FILENAME_MAX];
        OsUtils::SNPrintF(&wrapperName[0], sizeof(wrapperName), "NodeCommonThreadJobFamily%p", this);
        result = m_pThreadManager->RegisterJobFamily(NodeThreadJobFamilySessionCb,
                                                     wrapperName,
                                                     NULL,
                                                     JobPriority::Normal,
                                                     TRUE,
                                                     &m_hNodeJobHandle);
    }

    if (CamxResultSuccess == result)
    {
        finalizeInitializationData.hThreadJobFamilyHandle     = m_hNodeJobHandle;

        result = m_pipelineData[pipelineIndex].pPipeline->FinalizePipeline(&finalizeInitializationData);

    }

    return result;
}

////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Session::FinalizePipeline
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
CamxResult Session::FinalizePipeline(
    SessionCreateData* pCreateData,
    UINT32             pipelineIndex,
    BIT                enableQTimer)
{
    CAMX_ENTRYEXIT_NAME(CamxLogGroupCore, "SessionFinalizePipeline");
    CamxResult result = CamxResultSuccess;

    CAMX_ASSERT(NULL != pCreateData);

    if (FALSE == pCreateData->pPipelineInfo[pipelineIndex].isDeferFinalizeNeeded)
    {
        // 初始化finalizeInitializationData
        FinalizeInitializationData finalizeInitializationData = { 0 };

        finalizeInitializationData.pHwContext                 = pCreateData->pHwContext;
        finalizeInitializationData.pDeferredRequestQueue      = m_pDeferredRequestQueue;
        finalizeInitializationData.pDebugDataPool             = m_pPerFrameDebugDataPool;
        finalizeInitializationData.pSession                   = this;
        finalizeInitializationData.pThreadManager             = m_pThreadManager;
        finalizeInitializationData.usecaseNumBatchedFrames    = m_usecaseNumBatchedFrames;
        finalizeInitializationData.enableQTimer               = enableQTimer;
        finalizeInitializationData.numSessionPipelines        = pCreateData->numPipelines;
        finalizeInitializationData.pSensorModeInfo            =
            pCreateData->pPipelineInfo[pipelineIndex].pipelineInputInfo.sensorInfo.pSensorModeInfo;
        finalizeInitializationData.resourcePolicy             =
            pCreateData->pPipelineInfo[pipelineIndex].pipelineResourcePolicy;
        
        // 如果节点任务句柄无效
        if (InvalidJobHandle == m_hNodeJobHandle)
        {
            CHAR wrapperName[FILENAME_MAX];
            OsUtils::SNPrintF(&wrapperName[0], sizeof(wrapperName), "NodeCommonThreadJobFamily%p", this);
            // 注册任务族
            result = m_pThreadManager->RegisterJobFamily(NodeThreadJobFamilySessionCb,
                                                         wrapperName,
                                                         NULL,
                                                         JobPriority::Normal,
                                                         TRUE,
                                                         &m_hNodeJobHandle);
        }

        if (CamxResultSuccess == result)
        {
            finalizeInitializationData.hThreadJobFamilyHandle     = m_hNodeJobHandle;
            CAMX_LOG_INFO(CamxLogGroupCore, "Finalize for pipeline %d !", pipelineIndex);
            result = m_pipelineData[pipelineIndex].pPipeline->FinalizePipeline(&finalizeInitializationData);
        }
    }
    else
    {
        CAMX_LOG_INFO(CamxLogGroupCore, "deferred pipeline %d finalization!", pipelineIndex);
        m_pipelineData[pipelineIndex].pipelineFinalizeData.pipelineResourcePolicy =
            pCreateData->pPipelineInfo[pipelineIndex].pipelineResourcePolicy;
    }
    return result;
}

////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Session::GetNumInputSensors
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
UINT32 Session::GetNumInputSensors(
    SessionCreateData* pSessionCreateData)
{
    UINT32 numOfInputSensors = 0;
    for (UINT i = 0; i < pSessionCreateData->numPipelines; i++)
    {
        ChiPipelineInfo*      pPipelineInfo       = &pSessionCreateData->pPipelineInfo[i];
        ChiPipelineInputInfo* pPipelineInput      = &pPipelineInfo->pipelineInputInfo;
        if (TRUE == pPipelineInput->isInputSensor)
        {
            numOfInputSensors++;
        }
    }
    return numOfInputSensors;
}

////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Session::GetSensorSyncMode
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
SensorSyncMode Session::GetSensorSyncMode(
    UINT32 cameraID)

{
    SensorSyncMode syncMode  = NoSync;
    CamxResult result        = CamxResultSuccess;

    // read hw sync mode from static meta
    const StaticSettings* pStaticSettings = HwEnvironment::GetInstance()->GetStaticSettings();
    if ((TRUE == pStaticSettings->enableSensorHWSync) && (m_numInputSensors > 1))
    {
        UINT32  metaTag                           = 0;
        VOID*   pSyncMode                         = NULL;
        SensorSyncModeMetadata* pSensorSyncConfig = NULL;
        MetadataSlot* pStaticMetadataSlot         = m_pChiContext->GetStaticMetadataPool(cameraID)->GetSlot(0);

        CAMX_ASSERT(pStaticMetadataSlot != NULL);

        result = VendorTagManager::QueryVendorTagLocation("com.qti.chi.multicamerasensorconfig",
            "sensorsyncmodeconfig", &metaTag);

        if ( CamxResultSuccess == result)
        {
            pStaticMetadataSlot->ReadLock();
            pStaticMetadataSlot->GetMetadataByTag(metaTag, &pSyncMode);
            if (pSyncMode != NULL)
            {
                pSensorSyncConfig = reinterpret_cast<SensorSyncModeMetadata*>(pSyncMode);
                if (pSensorSyncConfig->isValid)
                {
                    syncMode = pSensorSyncConfig->sensorSyncMode;
                }
                CAMX_LOG_INFO(CamxLogGroupCore, "Get Multi Camera hardware sync mode:%d",
                    syncMode);
            }
            else
            {
                CAMX_LOG_WARN(CamxLogGroupCore, "Get Multi Camera hardware sync metadata failed");
                syncMode = NoSync;
            }
            pStaticMetadataSlot->Unlock();
        }
        else
        {
            CAMX_LOG_WARN(CamxLogGroupCore, "No sensor sync tag found!");
            syncMode = NoSync;
        }
    }
    else
    {
        CAMX_LOG_INFO(CamxLogGroupCore, "Disable sensor hardware sync");
        syncMode = NoSync;
    }

    return syncMode;
}

////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Session::NodeThreadJobFamilySessionCb
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
VOID* Session::NodeThreadJobFamilySessionCb(
    VOID* pCbData)
{
    CAMX_ASSERT(NULL != pCbData);

    return Node::NodeThreadJobFamilyCb(pCbData);
}

////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Session::Initialize
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
CamxResult Session::Initialize(
    SessionCreateData* pCreateData)
{
    CamxResult result                    = CamxResultSuccess;
    UINT32     additionalNeededRequests  = 0;
    UINT32     numMetadataSlots          = DefaultPerFramePoolWindowSize;

    BOOL requireExtraHalRequest = FALSE;

    CAMX_ASSERT(NULL != pCreateData);
    CAMX_ASSERT(NULL != pCreateData->pThreadManager);

    Utils::Memcpy(&m_chiCallBacks, pCreateData->pChiAppCallBacks, sizeof(ChiCallBacks));

    const StaticSettings* pStaticSettings = HwEnvironment::GetInstance()->GetStaticSettings();

    m_pThreadManager            = pCreateData->pThreadManager;
    m_pChiContext               = pCreateData->pChiContext;
    m_pPrivateCbData            = pCreateData->pPrivateCbData;
    m_sequenceId                = 0;
    m_syncSequenceId            = 0;
    m_numRealtimePipelines      = 0;
    m_numMetadataResults        = pStaticSettings->numMetadataResults;

    m_hNodeJobHandle                        = InvalidJobHandle;
    m_numInputSensors                       = GetNumInputSensors(pCreateData);
    m_numPipelines                          = pCreateData->numPipelines;
    m_recordingEndOfStreamTagId             = 0;
    m_setVideoPerfModeFlag                  = FALSE;
    m_isSyncedLink                          = FALSE;
    m_sesssionInitComplete                  = FALSE;
    m_recordingEndOfStreamRequestIdTagId    = 0;

    for (UINT i = 0; i < pCreateData->numPipelines; i++)
    {
        ChiPipelineInfo*      pPipelineInfo       = &pCreateData->pPipelineInfo[i];
        PipelineDescriptor*   pPipelineDescriptor = reinterpret_cast<PipelineDescriptor*>(pPipelineInfo->hPipelineDescriptor);
        ChiPipelineInputInfo* pPipelineInput      = &pPipelineInfo->pipelineInputInfo;

        // 将PipelineDescriptor传递给m_pipelineData
        m_pipelineData[i].pPipelineDescriptor = pPipelineDescriptor;
        /// @todo (CAMX-3119) remove IsTorchWidgetSession setting below and handle this in generic way.
        m_isTorchWidgetSession                |= pPipelineDescriptor->flags.isTorchWidget;

        // Consume the input buffer info for offline pipelines and update the pipeline descriptor with that information
        // 若管道输入为输入传感器
        if (FALSE == pPipelineInput->isInputSensor)
        {
            // 从ChiPipelineInputInfo中取出ChiInputBufferInfo
            ChiInputBufferInfo* pChiInputBufferInfo = &pPipelineInput->inputBufferInfo;

            // 将输入缓存数量传递给pPipelineDescriptor
            pPipelineDescriptor->numInputs = pChiInputBufferInfo->numInputBuffers;

            for (UINT input = 0; input < pChiInputBufferInfo->numInputBuffers; input++)
            {
                // 从pChiInputBufferInfo提取ChiPortBufferDescriptor
                const ChiPortBufferDescriptor* pBufferDescriptor = &pChiInputBufferInfo->pInputBufferDescriptors[input];
                // 从pBufferDescriptor中提取ChiStream
                ChiStream*                     pChiStream        = pBufferDescriptor->pStream;
                // 将ChiStream强转为Camera3Stream
                Camera3Stream*                 pHAL3Stream       = reinterpret_cast<Camera3Stream*>(pChiStream);
                // 将ChiStream->pPrivateInfo强转为ChiStreamWrapper
                ChiStreamWrapper*              pChiStreamWrapper =
                    reinterpret_cast<ChiStreamWrapper*>(pChiStream->pPrivateInfo);

                // 若流类型为输入流
                // // isOwner为true
                if (ChiStreamTypeInput == pChiStream->streamType)
                {
                    // 根据pChiStream->format去匹配format
                    Format selectedFormat = m_pChiContext->SelectFormat(pChiStream,
                                                    pBufferDescriptor->bIsOverrideImplDefinedWithRaw);

                    // 调用构造函数
                    pChiStreamWrapper = CAMX_NEW ChiStreamWrapper(pHAL3Stream, input, selectedFormat);

                    // 若创建ChiStreamWrapper成功
                    if (NULL != pChiStreamWrapper)
                    {
                        /// @todo (CAMX-1512) Session can contain all the created Wrappers that it can clean up when destroyed
                        // The wrapper is created by this session
                        BOOL isOwner = TRUE;

                        // 设置pChiStreamWrapper的一些属性
                        m_pChiContext->SetChiStreamInfo(pChiStreamWrapper, pPipelineDescriptor->numBatchedFrames);

                        // 将pChiStreamWrapper赋值回pChiStream
                        pChiStream->pPrivateInfo                                  = pChiStreamWrapper;

                        // pPipelineDescriptor->inputData[input].isWrapperOwner      = isOwner;

                        // 将node匹配的pBufferDescriptor设置给pPipelineDescriptor
                        m_pChiContext->SetPipelineDescriptorInputStream(pPipelineDescriptor, pBufferDescriptor, isOwner);
                        // 将pChiStreamWrapper设置portId
                        pChiStreamWrapper->SetPortId(pBufferDescriptor->nodePort.nodePortId);
                        CAMX_LOG_VERBOSE(CamxLogGroupCore, "Input %d, portId %d wd ht %d x %d wrapper %x stream %x",
                            input, pBufferDescriptor->nodePort.nodePortId, pChiStream->width, pChiStream->height,
                            pChiStreamWrapper, pChiStream);
                    }
                    else
                    {
                        result = CamxResultENoMemory;
                        CAMX_LOG_ERROR(CamxLogGroupCore, "Out of memory!");
                        break;
                    }
                }
                // isOwner为false
                else
                {
                    if (NULL == pChiStreamWrapper)
                    {
                        CAMX_LOG_ERROR(CamxLogGroupCore, "ChiStreamWrapper cannot be NULL!");
                        result = CamxResultEFailed;
                        break;
                    }

                    /// @todo (CAMX-1512) Session can contain all the created Wrappers that it can clean up when destroyed
                    // The wrapper was created by some other session and this session is simply using it as an input
                    BOOL isOwner = FALSE;

                    // pPipelineDescriptor->inputData[input].isWrapperOwner      = isOwner;

                    m_pChiContext->SetPipelineDescriptorInputStream(pPipelineDescriptor, pBufferDescriptor, isOwner);

                    // Need to save only input port information
                    if (ChiStreamTypeBidirectional == pChiStream->streamType)
                    {
                        pChiStreamWrapper->SetPortId(pBufferDescriptor->nodePort.nodePortId);
                        CAMX_LOG_VERBOSE(CamxLogGroupCore, "Bidirectional %d, portId %d wd ht %d x %d wrapper %x stream %x",
                            input, pBufferDescriptor->nodePort.nodePortId, pChiStream->width, pChiStream->height,
                            pChiStreamWrapper, pChiStream);
                    }
                }
            }
        }
        else
        {
            // 提取pPipelineDescriptor的第一个inputData.sensorInfo
            SensorInfo* pSensorInfo = &pPipelineDescriptor->inputData[0].sensorInfo;
            
            pSensorInfo->cameraId = pPipelineInput->sensorInfo.cameraId;
            // 将pPipelineInput->sensorInfo.pSensorModeInfo拷贝给pSensorInfo->sensorMode
            Utils::Memcpy(&pSensorInfo->sensorMode, pPipelineInput->sensorInfo.pSensorModeInfo, sizeof(ChiSensorModeInfo));
            if (60 <= (pSensorInfo->sensorMode.frameRate / pSensorInfo->sensorMode.batchedFrames))
            {
                // Extra HAL request is needed when effective frame rate is 60FPS or more.
                requireExtraHalRequest = TRUE;
            }
        }
    }

    // 构造pDeferredCreateData
    DeferredRequestQueueCreateData pDeferredCreateData;
    pDeferredCreateData.numPipelines   = pCreateData->numPipelines;
    pDeferredCreateData.pThreadManager = m_pThreadManager;

    // 遍历pCreateData的每个pPipelineInfo
    for (UINT i = 0; i < pCreateData->numPipelines; i++)
    {
        ChiPipelineInfo*    pPipelineInfo       = &pCreateData->pPipelineInfo[i];
        // 从ChiPipelineInfo中提取PipelineDescriptor
        PipelineDescriptor* pPipelineDescriptor = reinterpret_cast<PipelineDescriptor*>(pPipelineInfo->hPipelineDescriptor);

        // 保存pPipelineDescriptor
        m_pipelineData[i].pPipelineDescriptor             = pPipelineDescriptor;
        // 创建pipeline
        // TODO
        m_pipelineData[i].pPipeline                       = m_pChiContext->CreatePipelineFromDesc(pPipelineDescriptor, i);
        // 将创建好的pPipeline传递给pPipelineInfo
        pPipelineInfo->pipelineOutputInfo.hPipelineHandle = m_pipelineData[i].pPipeline;

        // 从元数据中获取帧延迟
        UINT32 frameDelay = m_pipelineData[i].pPipeline->DetermineFrameDelay();

        if (0 != frameDelay)
        {
            // 遍历pPipelineDescriptor的所有输出流
            for (UINT output = 0; output < pPipelineDescriptor->numOutputs; output++)
            {
                ChiStreamWrapper* pChiStreamWrapper = pPipelineDescriptor->outputData[output].pOutputStreamWrapper;
                // After successfully creating the pipeline, need to set the maximum num of native buffers for the stream
                // 若为视频流，计算最大缓存并设置给输出流
                if (TRUE == pChiStreamWrapper->IsVideoStream())
                {
                    UINT32 maxNumBuffers = (pStaticSettings->maxHalRequests + frameDelay) *
                                           (pPipelineDescriptor->numBatchedFrames);
                    pChiStreamWrapper->SetNativeMaxNumBuffers(maxNumBuffers);
                }
            }
        }

        // 若帧延迟大于additionalNeededRequests，则更新additionalNeededRequests
        if (frameDelay > additionalNeededRequests)
        {
            additionalNeededRequests = frameDelay;
        }
    }

    // 元数据的位置
    numMetadataSlots = DefaultPerFramePoolWindowSize + (additionalNeededRequests * 2);

    // 若批帧大于1
    if (pCreateData->usecaseNumBatchedFrames > 1)
    {
        // 设置元数据的位置
        numMetadataSlots = DefaultPerFramePoolWindowSize * 2;
    }
    CAMX_ASSERT(numMetadataSlots <= MaxPerFramePoolWindowSize);

    // 遍历pCreateData的所有ChiPipelineInfo
    for (UINT i = 0; i < pCreateData->numPipelines; i++)
    {
        ChiPipelineInfo* pPipelineInfo = &pCreateData->pPipelineInfo[i];

        // 初始化MetadataPools，创建各类池
        if (TRUE == m_pipelineData[i].pPipeline->IsRealTime())
        {
            m_pipelineData[i].pPipeline->InitializeMetadataPools(numMetadataSlots);
        }
        else
        {
            m_pipelineData[i].pPipeline->InitializeMetadataPools(DefaultRequestQueueDepth);
        }

        // 若为输入传感器
        if (pPipelineInfo->pipelineInputInfo.isInputSensor)
        {
            // 给m_pipelineData[i].pPipeline设置m_sensorSyncMode
            UINT32 cameraID = pPipelineInfo->pipelineInputInfo.sensorInfo.cameraId;
            m_pipelineData[i].pPipeline->SetSyncMode(GetSensorSyncMode(cameraID));
        }
        
        if (NULL == m_pipelineData[i].pPipeline)
        {
            result = CamxResultENoMemory;
            break;
        }

        // 传递m_pMainPool给pDeferredCreateData.pMainPools[i]
        pDeferredCreateData.pMainPools[i]     = m_pipelineData[i].pPipeline->GetPerFramePool(PoolType::PerFrameResult);

        // 若为输入传感器
        if (TRUE == pPipelineInfo->pipelineInputInfo.isInputSensor)
        {
            // 保存所有realtime的pipeline id
            // Save realtime pipeline ids, which will be used to find intra pipeline id of multi-camera use case.
            m_realtimePipelineIds[m_numRealtimePipelines] = m_pipelineData[i].pPipeline->GetPipelineId();
            m_numRealtimePipelines++;
        }
    }

    // 若上面的代码执行成功
    if (CamxResultSuccess == result)
    {
        // 更新pDeferredCreateData.requestQueueDepth
        UINT32 currentRequestDepth            = DefaultRequestQueueDepth + additionalNeededRequests;
        pDeferredCreateData.requestQueueDepth = currentRequestDepth;

        // 计算m_requestQueueDepth
        m_requestQueueDepth    = currentRequestDepth * pCreateData->usecaseNumBatchedFrames;
        // 创建HAL3Queue并初始化
        m_pRequestQueue        = HAL3Queue::Create(sizeof(SessionCaptureRequest), RequestQueueDepth, CreatedAs::Empty);
        m_pFlushRequestQueue = HAL3Queue::Create(sizeof(SessionCaptureRequest), RequestQueueDepth, CreatedAs::Empty);


        // 更新m_usecaseNumBatchedFrames
        m_usecaseNumBatchedFrames  = pCreateData->usecaseNumBatchedFrames;
        /// @todo (CAMX-2876) The 8 limit is artificial, and based on the frame number remapping array (m_fwFrameNumberMap)
        CAMX_ASSERT(m_usecaseNumBatchedFrames < 8);

        // 更新m_numPipelines
        m_numPipelines             = pCreateData->numPipelines;
        /// @todo (CAMX-1512) Metadata pools needs to be per pipeline
        // 创建MetadataPool：Debug-data request data pool
        m_pPerFrameDebugDataPool   = MetadataPool::Create(PoolType::PerFrameDebugData,
                                                          UINT_MAX,
                                                          m_pThreadManager,
                                                          numMetadataSlots);
        // 创建锁、请求队列、条件
        m_pResultLock              = Mutex::Create("SessionResultLock");
        m_pRequestLock             = Mutex::Create("SessionRequestLock");
        m_pFlushLock               = Mutex::Create("SessionFlushLock");
        m_pStreamOnOffLock         = Mutex::Create("SessionStreamOnOffLock");
        m_pDeferredRequestQueue    = DeferredRequestQueue::Create(&pDeferredCreateData);
        m_pWaitAllResultsAvailable = Condition::Create("SessionWaitAllResultsAvailable");
        // 创建一个ChiCaptureResult数组
        m_pCaptureResult           = static_cast<ChiCaptureResult*>(
                                         CAMX_CALLOC(m_requestQueueDepth * m_usecaseNumBatchedFrames *
                                                     m_numPipelines * sizeof(ChiCaptureResult)));

        // 创建条件和锁
        m_pWaitLivePendingRequests = Condition::Create("WaitInFlightRequests");
        m_pLivePendingRequestsLock = Mutex::Create("InFlightRequests");
        m_livePendingRequests = 0;

        if (TRUE == requireExtraHalRequest)
        {
            additionalNeededRequests += 1;
        }

        // 计算待处理的请求数
        m_maxLivePendingRequests = (pStaticSettings->maxHalRequests + additionalNeededRequests) *
            m_usecaseNumBatchedFrames;
        // 计算默认待处理请求数
        m_defaultMaxLivePendingRequests = (pStaticSettings->maxHalRequests + additionalNeededRequests);

        // 若m_pCaptureResult分配空间成功
        if (NULL != m_pCaptureResult)
        {
            // 遍历m_pCaptureResult数组
            for (UINT32 i = 0; i < m_requestQueueDepth * m_numPipelines * m_usecaseNumBatchedFrames; i++)
            {
                // 给m_pCaptureResult[i].pOutputBuffers分配空间
                m_pCaptureResult[i].pOutputBuffers =
                    static_cast<ChiStreamBuffer*>(CAMX_CALLOC(MaxNumOutputBuffers * sizeof(ChiStreamBuffer)));

                if (NULL == m_pCaptureResult[i].pOutputBuffers)
                {
                    CAMX_LOG_ERROR(CamxLogGroupCore, "Out of memory");
                    result = CamxResultENoMemory;
                    break;
                }
            }
        }

        if (CamxResultSuccess == result)
        {
            if ((NULL == m_pRequestQueue)            ||
                (NULL == m_pFlushRequestQueue)       ||
                (NULL == m_pPerFrameDebugDataPool)   ||
                (NULL == m_pResultLock)              ||
                (NULL == m_pRequestLock)             ||
                (NULL == m_pStreamOnOffLock)         ||
                (NULL == m_pDeferredRequestQueue)    ||
                (NULL == m_pCaptureResult)           ||
                (NULL == m_pWaitLivePendingRequests) ||
                (NULL == m_pLivePendingRequestsLock) ||
                (NULL == m_pWaitAllResultsAvailable))
            {
                CAMX_LOG_ERROR(CamxLogGroupCore, "Out of memory");
                result = CamxResultENoMemory;
            }
            // 若上面的代码执行成功并且内存分配成功
            else
            {
                m_aFlushingSession      = FALSE;

                if (CamxResultSuccess == result)
                {
                    // Publish ChiSensorModeInfo structure to Vendor tags
                    // 获取sensor_mode_info
                    UINT32             metaTag         = 0;
                    MetadataPool*      pPool           = NULL;
                    MetadataSlot*      pSlot           = NULL;
                    ChiSensorModeInfo* pSensorModeInfo = NULL;
                    result = VendorTagManager::QueryVendorTagLocation("org.codeaurora.qcamera3.sensor_meta_data",
                                                                      "sensor_mode_info",
                                                                      &metaTag);
                    CAMX_ASSERT_MESSAGE(CamxResultSuccess == result, "Failed to get vendor tag: sensor_mode_info");

                    // 遍历m_pipelineData[i]
                    // 提取m_pUsecasePool以及m_pUsecasePool中位置0的MetadataSlot
                    // 取出m_pipelineData[i].pPipelineDescriptor的第一个input数据的传感器模式对象
                    for (UINT i = 0; i < m_numPipelines; i++)
                    {
                        pPool           = m_pipelineData[i].pPipeline->GetPerFramePool(PoolType::PerUsecase);
                        pSlot           = pPool->GetSlot(0);
                        pSensorModeInfo = &m_pipelineData[i].pPipelineDescriptor->inputData[0].sensorInfo.sensorMode;

                        // 计算tag
                        metaTag |= UsecaseMetadataSectionMask;
                        // 根据tag设置元数据
                        pSlot->SetMetadataByTag(metaTag,
                                                static_cast<VOID*>(pSensorModeInfo),
                                                sizeof(ChiSensorModeInfo));
                        pSlot->WriteLock();
                        pSlot->PublishMetadataList(&metaTag, 1);
                        pSlot->Unlock();
                    }
                }

                // TODO
                // 结束pipeline的初始化
                result = InitializeNewPipelines(pCreateData);

            }
        }
    }

    CamxResult resultCode = VendorTagManager::QueryVendorTagLocation("org.codeaurora.qcamera3.sensor_meta_data",
                                                                     "sensor_mode_index",
                                                                     &m_vendorTagSensorModeIndex);
    if (CamxResultSuccess != resultCode)
    {
        CAMX_LOG_ERROR(CamxLogGroupHAL,
                        "Failed to find org.codeaurora.qcamera3.sensor_meta_data.sensor_mode_index, resultCode=%s",
                       CamxResultStrings[resultCode]);
    }

    resultCode = VendorTagManager::QueryVendorTagLocation("org.quic.camera.qtimer", "timestamp", &m_vendorTagIndexTimestamp);
    if (CamxResultSuccess != resultCode)
    {
        CAMX_LOG_ERROR(CamxLogGroupHAL,
                       "Failed to find org.quic.camera.qtimer.timestamp, encoder will fallback to system time, resultCode=%s",
                       CamxResultStrings[resultCode]);
    }

    resultCode = VendorTagManager::QueryVendorTagLocation("org.quic.camera.recording", "endOfStream",
                                                          &m_recordingEndOfStreamTagId);
    if (CamxResultSuccess != resultCode)
    {
        m_recordingEndOfStreamTagId = 0;
        CAMX_LOG_ERROR(CamxLogGroupHAL,
                       "Failed to find org.quic.camera.recording.endOfStream, resultCode=%s", CamxResultStrings[resultCode]);
    }

    resultCode = VendorTagManager::QueryVendorTagLocation("org.quic.camera.recording", "endOfStreamRequestId",
                                                          &m_recordingEndOfStreamRequestIdTagId);
    if (CamxResultSuccess != resultCode)
    {
        m_recordingEndOfStreamRequestIdTagId = 0;
        CAMX_LOG_ERROR(CamxLogGroupHAL,
                       "Failed to find org.quic.camera.recording.endOfStreamRequestId, resultCode=%s",
                       CamxResultStrings[resultCode]);
    }

    if (CamxResultSuccess == result)
    {
        m_sesssionInitComplete = TRUE;
    }

    return result;
}

////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Session::InitializeNewPipelines
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
CamxResult Session::InitializeNewPipelines(
    SessionCreateData* pCreateData)
{
    CamxResult result = CamxResultSuccess;

    CAMX_ASSERT(NULL != pCreateData);
    CAMX_ASSERT(m_usecaseNumBatchedFrames == pCreateData->usecaseNumBatchedFrames);

    const StaticSettings* pStaticSettings = HwEnvironment::GetInstance()->GetStaticSettings();

    for (UINT32 i = 0; i < m_numPipelines; i++)
    {
        m_requestBatchId[i] = CamxInvalidRequestId;
    }

    for (UINT i = 0; i < pCreateData->numPipelines; i++)
    {
        result = FinalizePipeline(pCreateData,
                                  i,
                                  pStaticSettings->enableQTimer);

        if (CamxResultSuccess != result)
        {
            break;
        }
    }

    return result;
}

////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Session::NotifyResult
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
VOID Session::NotifyResult(
    ResultsData* pResultsData)
{
    CAMX_ASSERT(NULL != pResultsData);

    switch (pResultsData->type)
    {
        case CbType::Error:
            HandleErrorCb(&pResultsData->cbPayload.error, pResultsData->pipelineIndex, pResultsData->pPrivData);
            break;

        case CbType::Async:
            HandleAsyncCb(&pResultsData->cbPayload.async, pResultsData->pPrivData);
            break;

        case CbType::Metadata:
            HandleMetadataCb(&pResultsData->cbPayload.metadata, pResultsData->pPrivData);
            break;
        case CbType::EarlyMetadata:
            HandleEarlyMetadataCb(&pResultsData->cbPayload.metadata, pResultsData->pPrivData);
            break;

        case CbType::Buffer:
            HandleBufferCb(&pResultsData->cbPayload.buffer, pResultsData->pipelineIndex,
                           pResultsData->pPrivData);
            break;

        case CbType::SOF:
            HandleSOFCb(&pResultsData->cbPayload.sof);
            break;

        default:
            break;
    }
}


////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Session::StreamOn
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
CamxResult Session::StreamOn(
    CHIPIPELINEHANDLE hPipelineDescriptor)
{
    UINT32     index  = 0;
    CamxResult result = CamxResultSuccess;
    CSLLinkHandle hLinkHandles[MaxRealTimePipelines];

    // input pipelineIndex not really match the index recorded by Session, so use Descriptor to find it.
    // 获取传入的hPipelineDescriptor对应的m_pipelineData的索引
    for (index = 0; index < m_numPipelines; index++)
    {
        if (hPipelineDescriptor == m_pipelineData[index].pPipelineDescriptor)
        {
            // found corresponding pipeline can use index to get to it
            break;
        }
    }

    CAMX_ASSERT(index < m_numPipelines);

    // 根据索引取出管道
    Pipeline* pPipeline = m_pipelineData[index].pPipeline;

    // 如果管道不为空 && 不为流传输状态
    if ((NULL != pPipeline) && (!pPipeline->IsStreamedOn()))
    {
        // 获取管道创建状态
        PipelineCreateStatus pipelineStatus = pPipeline->GetPipelineCreateStatus();

        // 如果管道创建状态不为PipelineCreateStatus::FINALIZED
        if (PipelineCreateStatus::FINALIZED != pipelineStatus)
        {
            // 使用会话中的数据去初始化管道
            result = FinalizeDeferPipeline(index);
            pipelineStatus = pPipeline->GetPipelineCreateStatus();
            CAMX_LOG_DEBUG(CamxLogGroupCore, "FinalizeDeferPipeline result: %d pipelineStatus: %d",
                result, pipelineStatus);
        }

        if (CamxResultSuccess != result)
        {
            CAMX_LOG_ERROR(CamxLogGroupCore, "FinalizeDeferPipeline() unsuccessful, Session StreamOn() is failed !!");
        }
        else
        {
            m_pStreamOnOffLock->Lock();
            if (PipelineCreateStatus::FINALIZED == pipelineStatus)
            {
                // 等待实时管道配置完成
                pPipeline->StreamOn();
            }
            m_pStreamOnOffLock->Unlock();

            CAMX_LOG_INFO(CamxLogGroupCore, "m_numRealtimePipelines %d m_isSyncedLink %d",
                m_numRealtimePipelines, m_isSyncedLink);

            // CSL Link only if its multicamera usecase && not yet linked
            if ((MaxRealTimePipelines == m_numRealtimePipelines) && (FALSE == m_isSyncedLink))
            {
                CamxResult resultSync = CamxResultEFailed;
                UINT32     realTimePipelineIndex = 0;

                // 遍历管道
                for (UINT i = 0; i < m_numPipelines; i++)
                {
                    // m_pipelineData中取出管道对象
                    Pipeline* pPipelineObject = m_pipelineData[i].pPipeline;
                    if (NULL != pPipelineObject)
                    {
                        // 如果该管道非实时
                        if (FALSE == pPipelineObject->IsRealTime())
                        {
                            CAMX_LOG_VERBOSE(CamxLogGroupCore, "Not a Real time pipeline %d", i);
                            continue;
                        }
                        // 如果Camera System Layer link句柄值为0
                        if (0 == *pPipelineObject->GetCSLLink())
                        {
                            CAMX_LOG_WARN(CamxLogGroupCore, "Link is not yet created for pipeline %d", i);
                            resultSync = CamxResultEFailed;
                            break;
                        }
                    }
                    // 保存所有CSL link的有效句柄
                    hLinkHandles[realTimePipelineIndex++] = *pPipelineObject->GetCSLLink();
                    // 必须所有管道的CSL link的句柄有效，返回值才设为成功
                    resultSync = CamxResultSuccess;
                }

                // Sync link only both the handles are non zero
                if (CamxResultSuccess == resultSync)
                {
                    // 同步CSL links
                    resultSync = CSLSyncLinks(m_pChiContext->GetHwContext()->GetCSLSession(),
                        hLinkHandles, MaxRealTimePipelines,
                        hLinkHandles[0], CSLSyncLinkModeNoSync);

                    // 若同步CSL links成功，将是否连接标志符设为true
                    if (CamxResultSuccess == resultSync)
                    {
                        m_isSyncedLink = TRUE;
                        CAMX_LOG_INFO(CamxLogGroupCore, "CSLSyncLinks Success! links (%d, %d)",
                            hLinkHandles[0], hLinkHandles[1]);
                    }
                    else
                    {
                        CAMX_LOG_ERROR(CamxLogGroupCore, "CSLSyncLinks Failed! links (%d, %d)",
                            hLinkHandles[0], hLinkHandles[1]);
                    }
                }
            }
        }
    }

    return result;
}

////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Session::StreamOff
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
CamxResult Session::StreamOff(
    CHIPIPELINEHANDLE           hPipelineDescriptor,
    CHIDEACTIVATEPIPELINEMODE   modeBitmask)
{
    UINT32     index = 0;
    CamxResult result = CamxResultSuccess;

    // input pipelineIndex not really match the index recorded by Session, so use Descriptor to find it.
    for (index = 0; index < m_numPipelines; index++)
    {
        if (hPipelineDescriptor == m_pipelineData[index].pPipelineDescriptor)
        {
            // found corresponding pipeline can use index to get to it
            break;
        }
    }

    CAMX_ASSERT(index < m_numPipelines);

    Pipeline* pPipeline = m_pipelineData[index].pPipeline;

    if ((NULL != pPipeline))
    {

        WaitTillAllResultsAvailable(TRUE);
        m_pStreamOnOffLock->Lock();
        result = pPipeline->StreamOff(modeBitmask);
        m_pStreamOnOffLock->Unlock();
    }

    return result;
}

////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Session::ProcessCaptureRequest
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
CamxResult Session::ProcessCaptureRequest(
    const ChiPipelineRequest* pPipelineRequests)
{
    CamxResult  result      = CamxResultEFailed;
    CamxResult  resultWait  = CamxResultSuccess;

    UINT        numRequests = pPipelineRequests->numRequests;
    UINT32      indexs[MaxPipelinesPerSession];


    const StaticSettings*   pStaticSettings = m_pChiContext->GetStaticSettings();

    // 若m_aFlushingSession为true
    if (TRUE == GetFlushSessionStatus())
    {
        return result;
    }

    m_pLivePendingRequestsLock->Lock();
    m_livePendingRequests++;

    for (UINT requestIndex = 0; requestIndex < numRequests; requestIndex++)
    {
        CAMX_LOG_VERBOSE(CamxLogGroupCore, "ProcessCaptureRequest received for framework framenumber %llu, num outputs %d",
                         pPipelineRequests->pCaptureRequests[requestIndex].frameNumber,
                         pPipelineRequests->pCaptureRequests[requestIndex].numOutputs);
    }

    // m_livePendingRequests 实时挂起的请求数
    while (m_livePendingRequests >= m_maxLivePendingRequests)
    {
        UINT waitTime = LivePendingRequestTimeoutDefault;
        if (m_sequenceId < m_maxLivePendingRequests * 2)
        {
            waitTime = LivePendingRequestTimeoutDefault + (m_maxLivePendingRequests * LivePendingRequestTimeOutExtendor);
        }
        resultWait = m_pWaitLivePendingRequests->TimedWait(m_pLivePendingRequestsLock->GetNativeHandle(), waitTime);

        if (CamxResultSuccess != resultWait)
        {
            break;
        }
    }

    m_pLivePendingRequestsLock->Unlock();

    if (CamxResultSuccess != resultWait)
    {
        CAMX_LOG_INFO(CamxLogGroupCore, "Lets do a Reset");
        return CamxResultETimeout;
    }

    // Prepare info for each request on each pipeline
    // 获取所有管道的索引
    for (UINT requestIndex = 0; requestIndex < numRequests; requestIndex++)
    {
        // input pipelineIndex not really match the index recorded by Session, so use GetPipelineIndex to get corresponding
        // pipeline index.
        indexs[requestIndex] = GetPipelineIndex(pPipelineRequests->pCaptureRequests[requestIndex].hPipelineHandle);
        CAMX_ASSERT(indexs[requestIndex] < m_numPipelines);
    }

    // Block process request while stream on in progress
    m_pStreamOnOffLock->Lock();

    ChiCaptureRequest requests[MaxPipelinesPerSession];
    // 将pPipelineRequests->numRequests传递给m_captureRequest.numRequests
    m_captureRequest.numRequests = numRequests;

    if (MaxRealTimePipelines > m_numRealtimePipelines)
    {
        // In single camera use case, one CHI request should have only one request per pipeline so that incoming requests will
        // not be more than m_requestQueueDepth and the only exception is in Dual Camera use case to have two requests
        if (2 <= numRequests)
        {
            CAMX_LOG_ERROR(CamxLogGroupCore, "In batch mode, number of pipeline requests are more than 1");
        }

        CAMX_ASSERT(2 > numRequests);
    }

    // 遍历所有管道的请求
    for (UINT requestIndex = 0; requestIndex < numRequests; requestIndex++)
    {
        // 从pPipelineRequests中取出ChiCaptureRequest
        const ChiCaptureRequest* pCaptureRequest    = &(pPipelineRequests->pCaptureRequests[requestIndex]);
        // 取出对应管道的索引
        UINT32             pipelinIndex             = indexs[requestIndex];
        // 根据索引从m_pipelineData中取出管道
        Pipeline*          pPipeline                = m_pipelineData[pipelinIndex].pPipeline;
        // 初始化元数据池
        MetadataPool*      pPerFrameInputPool       = NULL;
        MetadataPool*      pPerFrameResultPool      = NULL;
        MetadataPool*      pPerFrameInternalPool    = NULL;
        MetadataPool*      pPerFrameEarlyResultPool = NULL;
        MetadataPool*      pPerUsecasePool          = NULL;
        MetadataPool*      pPerFrameEmptyMetadata   = NULL;
        
        // 若管道不为空 && 管道创建未完毕
        if ((NULL != pPipeline) &&
            ( PipelineCreateStatus::FINALIZED != pPipeline->GetPipelineCreateStatus()))
        {
            // 使用会话中的数据去初始化管道
            FinalizeDeferPipeline(pipelinIndex);
        }

        // 根据枚举switch获取pPipeline中的成员变量
        if (NULL != pPipeline)
        {
            pPerFrameInputPool       = pPipeline->GetPerFramePool(PoolType::PerFrameInput);
            pPerFrameResultPool      = pPipeline->GetPerFramePool(PoolType::PerFrameResult);
            pPerFrameInternalPool    = pPipeline->GetPerFramePool(PoolType::PerFrameInternal);
            pPerFrameEarlyResultPool = pPipeline->GetPerFramePool(PoolType::PerFrameResultEarly);
            pPerUsecasePool          = pPipeline->GetPerFramePool(PoolType::PerUsecase);
            pPerFrameEmptyMetadata   = pPipeline->GetPerFramePool(PoolType::PerFrameEmptyMetadata);
        }

        // 若上一个分支全部执行成功
        if ((NULL != pPerFrameEarlyResultPool) &&
            (NULL != pPerFrameInputPool)       &&
            (NULL != pPerFrameResultPool)      &&
            (NULL != pPerFrameInternalPool)    &&
            (NULL != pPerUsecasePool))
        {
            // Replace the incoming frameNumber with m_sequenceId to protect against sparse input frameNumbers
            // 将从pPipelineRequests中取出的ChiCaptureRequest传递给requests数组
            CamX::Utils::Memcpy(&requests[requestIndex], pCaptureRequest, sizeof(ChiCaptureRequest));

            // 根据计数器更新请求数组元素的帧编号
            requests[requestIndex].frameNumber = m_sequenceId;
            // 计数器递增
            m_sequenceId++;

            // 检测是否可以继续请求
            result = CanRequestProceed(&requests[requestIndex]);

            // 如果CanRequestProceed检测为可以继续请求
            if (CamxResultSuccess == result)
            {
                // 检测是否为输入池设置了默认值
                if (FALSE == m_inputDefaultsInitialized)
                {
                    // NOWHINE CP036a: The 'getters' for metadata because of being a sorted hashmap can modify the object
                    // 准备获取元数据所需的参数
                    Metadata*              pMetadata    = const_cast<Metadata*>(requests[requestIndex].pMetadata);
                    BYTE*                  pIntent      = NULL;
                    Camera3RequestTemplate templateType = RequestTemplatePreview;

                    /// 获取元数据
                    HAL3MetadataUtil::GetMetadata(pMetadata, ControlCaptureIntent, reinterpret_cast<VOID**>(&pIntent));

                    // 若获取元数据失败
                    if ((pIntent == NULL) || (0 == *pIntent))
                    {
                        // 给templateType赋初值
                        templateType = RequestTemplatePreview;
                    }
                    else
                    {
                        // 将元数据强转为Camera3RequestTemplate templateType
                        templateType = static_cast<Camera3RequestTemplate>(*pIntent);
                    }

                    // NOWHINE CP036a: The 'getters' for metadata because of being a sorted hashmap can modify the object
                    // 构造默认请求设置
                    Metadata* pDefaults = const_cast<Metadata*>(
                                            HAL3DefaultRequest::ConstructDefaultRequestSettings(
                                                m_pipelineData[pipelinIndex].pPipeline->GetCameraId(),
                                                templateType));

                    CAMX_ASSERT(NULL != pDefaults);

                    if (NULL != pDefaults)
                    {
                        // 根据元数据去更新配置
                        pPerFrameInputPool->SetDefaults(pDefaults);
                    }
                    // 已为输入池设置了默认值
                    m_inputDefaultsInitialized = TRUE;
                }

                // 等待
                result = WaitOnAcquireFence(&requests[requestIndex]);

                if (CamxResultSuccess == result)
                {
                    // Finally copy and enqueue the request and fire the threadpool
                    // 最后复制请求并将其排入队列，然后启动线程池

                    // m_batchedFrameIndex of respective pipelines should be less than m_usecaseNumBatchedFrames
                    CAMX_ASSERT(m_batchedFrameIndex[pipelinIndex] < m_usecaseNumBatchedFrames);

                    // m_batchedFrameIndex 0 implies a new requestId must be generated - irrespective of batching ON/OFF status
                    if (0 == m_batchedFrameIndex[pipelinIndex])
                    {
                        // 初始化会话的捕捉请求
                        m_requestBatchId[pipelinIndex]++;
                        Utils::Memset(&m_captureRequest.requests[requestIndex], 0, sizeof(CaptureRequest));
                        m_captureRequest.requests[requestIndex].requestId = m_requestBatchId[pipelinIndex];
                        m_captureRequest.requests[requestIndex].pMultiRequestData =
                            &m_requestSyncData[(m_syncSequenceId) % MaxQueueDepth];

                        // 使m_requestBatchId[pipelinIndex]无效
                        pPerFrameInputPool->Invalidate(m_requestBatchId[pipelinIndex]);
                        pPerFrameResultPool->Invalidate(m_requestBatchId[pipelinIndex]);
                        pPerFrameEarlyResultPool->Invalidate(m_requestBatchId[pipelinIndex]);
                        pPerFrameInternalPool->Invalidate(m_requestBatchId[pipelinIndex]);
                        pPerFrameEmptyMetadata->Invalidate(m_requestBatchId[pipelinIndex]);

                        // 更新m_requestBatchId[pipelinIndex]
                        pPerFrameInputPool->UpdateRequestId(m_requestBatchId[pipelinIndex]);
                        pPerFrameResultPool->UpdateRequestId(m_requestBatchId[pipelinIndex]);
                        pPerFrameEarlyResultPool->UpdateRequestId(m_requestBatchId[pipelinIndex]);
                        pPerFrameInternalPool->UpdateRequestId(m_requestBatchId[pipelinIndex]);
                        m_pPerFrameDebugDataPool->UpdateRequestId(m_requestBatchId[pipelinIndex]);
                        pPerFrameEmptyMetadata->UpdateRequestId(m_requestBatchId[pipelinIndex]);

                        // 缓存稀疏的元数据
                        pPerFrameInputPool->UpdateSparse(m_requestBatchId[pipelinIndex],
                        // NOWHINE CP036a: The 'getters' for metadata because of being a sorted hashmap can modify the object
                                                         const_cast<Metadata*>(requests[requestIndex].pMetadata));

                        if ((TRUE == pStaticSettings->logMetaEnable) && (requests[requestIndex].pMetadata))
                        {
                            CAMX_LOG_META("+----------------------------------------------------");
                            CAMX_LOG_META("| Input metadata for request: %lld", m_requestBatchId[pipelinIndex]);
                            CAMX_LOG_META("|     %d entries",
                                HAL3MetadataUtil::GetMetadataEntryCount(requests[requestIndex].pMetadata));
                            CAMX_LOG_META("+----------------------------------------------------");

                            // 打印元数据
                            HAL3MetadataUtil::DumpMetadata(requests[requestIndex].pMetadata);
                        }

                        // 根据m_requestBatchId[pipelinIndex]获取元数据的位置
                        MetadataSlot* pMetadataSlot       = pPerFrameInputPool->GetSlot(m_requestBatchId[pipelinIndex]);
                        MetadataSlot* pResultMetadataSlot = pPerFrameResultPool->GetSlot(m_requestBatchId[pipelinIndex]);
                        MetadataSlot* pUsecasePoolSlot    = pPerUsecasePool->GetSlot(0);

                        if (pMetadataSlot != NULL)
                        {
                            if (CamxResultSuccess == result)
                            {
                                // m_batchedFrameIndex of 0 implies batching may be switched ON/OFF starting from this frame
                                // 判断m_usecaseNumBatchedFrames > 1
                                if (TRUE == IsUsecaseBatchingEnabled())
                                {
                                    RangeINT32* pFPSRange = NULL;

                                    // 根据tag获取元数据
                                    pMetadataSlot->GetMetadataByTag(ControlAETargetFpsRange,
                                                                    reinterpret_cast<VOID**>(&pFPSRange));

                                    // Must have been filled by GetMetadataByTag()
                                    CAMX_ASSERT(NULL != pFPSRange);

                                    BOOL hasBatchingModeChanged = FALSE;

                                    if ((NULL != pFPSRange) && (pFPSRange->min == pFPSRange->max))
                                    {
                                        // 若用例未启用批处理
                                        if (FALSE == m_isRequestBatchingOn)
                                        {
                                            hasBatchingModeChanged = TRUE;
                                        }

                                        m_isRequestBatchingOn = TRUE;
                                    }
                                    else
                                    {
                                        // 若用例启用了批处理
                                        if (TRUE == m_isRequestBatchingOn)
                                        {
                                            hasBatchingModeChanged = TRUE;
                                        }
                                        // 批处理开关设置为false
                                        m_isRequestBatchingOn = FALSE;
                                    }

                                    // If batching mode changes from ON to OFF or OFF to ON we need to dynamically adjust
                                    // m_requestQueueDepth - because m_requestQueueDepth is different with batching ON or OFF
                                    // With batching OFF it is RequestQueueDepth and with ON it is
                                    // "RequestQueueDepth * usecaseNumBatchedFrames"
                                    // 若批处理模式改变了
                                    if (TRUE == hasBatchingModeChanged)
                                    {
                                        // Before changing m_requestQueueDepth, we need to make sure:
                                        // 1. All the current pending requests are processed by the Pipeline
                                        // 2. All the results for all those processed requests are sent back to the framework
                                        //
                                        // (1) is done by waiting for the request queue to become empty
                                        // (2) is done by waiting on a condition variable that is signaled when all results are
                                        //     sent back to the framework
                                        // waiting for the request queue to become empty
                                        m_pRequestQueue->WaitEmpty();

                                        Flush(FALSE);

                                        // The request and result queues are completely empty at this point, and this function
                                        // is the only thing that can add to the request queue.  Safe to change
                                        // m_requestQueueDepth at this point
                                        if (TRUE == m_isRequestBatchingOn)
                                        {
                                            // 根据批处理帧数修改m_requestQueueDepth、m_maxLivePendingRequests
                                            m_requestQueueDepth = DefaultRequestQueueDepth * m_usecaseNumBatchedFrames;
                                            m_maxLivePendingRequests =
                                                    m_defaultMaxLivePendingRequests * m_usecaseNumBatchedFrames;
                                        }
                                        // 若未启用批处理，则仍设为默认值
                                        else
                                        {
                                            m_requestQueueDepth = DefaultRequestQueueDepth;
                                            m_maxLivePendingRequests = m_defaultMaxLivePendingRequests;
                                        }
                                    }
                                    // 若批处理模式未改变
                                    else
                                    {
                                        // Need to set default value if batch mode is enabled but request batching is off.
                                        // In this case we have only preview reqest.
                                        // 若用例未启用批处理，则仍设为默认值
                                        if (FALSE == m_isRequestBatchingOn)
                                        {
                                            m_requestQueueDepth = DefaultRequestQueueDepth;
                                            m_maxLivePendingRequests = m_defaultMaxLivePendingRequests;
                                        }
                                    }
                                }

                                // 若记录的流末尾标志不为0 && 记录的流请求ID标志不为0
                                // 获取元数据中的记录的流末尾
                                if ((0 != m_recordingEndOfStreamTagId) && (0 != m_recordingEndOfStreamRequestIdTagId))
                                {
                                    UINT8* pRecordingEndOfStream = NULL;

                                    pMetadataSlot->GetMetadataByTag(m_recordingEndOfStreamTagId,
                                                                    reinterpret_cast<VOID**>(&pRecordingEndOfStream));

                                    // 若获取元数据成功
                                    if ((FALSE == pStaticSettings->disableDRQPreemptionOnStopRecord) &&
                                        ((NULL != pRecordingEndOfStream) && (0 != *pRecordingEndOfStream)))
                                    {
                                        UINT64 requestId = m_requestBatchId[pipelinIndex];
                                        CAMX_LOG_INFO(CamxLogGroupCore, "Recording stopped on reqId %llu", requestId);

                                        UINT32 endOfStreamRequestIdTag = m_recordingEndOfStreamRequestIdTagId |
                                                                         UsecaseMetadataSectionMask;

                                        // 设置元数据
                                        pUsecasePoolSlot->SetMetadataByTag(endOfStreamRequestIdTag,
                                                                           static_cast<VOID*>(&requestId),
                                                                           sizeof(requestId));

                                        // 通知打印元数据列表
                                        pUsecasePoolSlot->PublishMetadataList(&endOfStreamRequestIdTag, 1);

                                        // 设置视频性能模式为true
                                        m_setVideoPerfModeFlag = TRUE;
                                        // 设置m_isPreemptDependencyEnabled为true
                                        m_pDeferredRequestQueue->SetPreemptDependencyFlag(TRUE);
                                        // 从延迟节点订阅列表中调度并删除所有已完成的订阅服务器
                                        m_pDeferredRequestQueue->DispatchReadyNodes();
                                    }
                                    else
                                    {
                                        // 设置视频性能模式为false
                                        m_setVideoPerfModeFlag = FALSE;
                                        // 设置m_isPreemptDependencyEnabled为false
                                        m_pDeferredRequestQueue->SetPreemptDependencyFlag(FALSE);
                                    }
                                }
                                else
                                {
                                    CAMX_LOG_INFO(CamxLogGroupCore, "No stop recording vendor tags");
                                }

                                ControlCaptureIntentValues*          pCaptureIntent          = NULL;
                                
                                // 获取元数据
                                pMetadataSlot->GetMetadataByTag(ControlCaptureIntent,
                                                                reinterpret_cast<VOID**>(&pCaptureIntent));

                                // Update dynamic pipeline depth metadata which is required in capture result.
                                // 更新管道深度元数据
                                pResultMetadataSlot->SetMetadataByTag(RequestPipelineDepth,
                                                                      static_cast<VOID*>(&(m_requestQueueDepth)),
                                                                      1);

                                // 设置元数据
                                if (NULL != pCaptureIntent)
                                {
                                    // Copy Intent to result
                                    result = pResultMetadataSlot->SetMetadataByTag(ControlCaptureIntent, pCaptureIntent, 1);
                                }
                            }
                            else
                            {
                                CAMX_LOG_ERROR(CamxLogGroupHAL, "Couldn't copy request metadata!");
                            }
                        }
                        else
                        {
                            CAMX_LOG_ERROR(CamxLogGroupHAL,
                                            "Couldn't get metadata slot for request id: %d",
                                            requests[requestIndex].frameNumber);

                            result = CamxResultEFailed;
                        }

                        // Get the per frame sensor mode index
                        UINT* pSensorModeIndex  = NULL;
                        MainPropertyBlob* pBlob = NULL;

                        if (m_vendorTagSensorModeIndex > 0)
                        {
                            if (NULL != pMetadataSlot)
                            {
                                // 从元数据中获取传感器模式的索引
                                pMetadataSlot->GetMetadataByTag(m_vendorTagSensorModeIndex,
                                    reinterpret_cast<VOID**>(&pSensorModeIndex));
                            }

                            CAMX_ASSERT(NULL != pSensorModeIndex);

                            if (NULL != pSensorModeIndex)
                            {
                                pResultMetadataSlot->WriteLock();
                                
                                // 获取binary larger object
                                pResultMetadataSlot->GetPropertyBlob(reinterpret_cast<VOID**>(&pBlob));
                                // 若blob获取成功
                                if (NULL != pBlob)
                                {
                                    pStaticSettings =
                                        HwEnvironment::GetInstance()->GetStaticSettings();

                                    if (TRUE == pStaticSettings->perFrameSensorMode)
                                    {
                                        // 更新blob的数据
                                        pBlob->mSensorCurrentMode = *pSensorModeIndex;
                                        // 通知
                                        pResultMetadataSlot->PublishProperty(PropertyIDSensorCurrentMode);
                                    }
                                }

                                pResultMetadataSlot->Unlock();
                            }
                        }
                    }

                    if (CamxResultSuccess == result)
                    {
                        // 定义流以及HAL3Stream
                        ChiStreamWrapper*   pChiStreamWrapper   = NULL;
                        ChiStream*          pChiStream          = NULL;

                        /// Adding 1 to avoid 0 as 0 is flagged as invalid
                        // 给帧编号 + 1
                        UINT64 cslsyncid = pCaptureRequest->frameNumber + 1;
                        // 通过请求索引获取CaptureRequest
                        CaptureRequest* pRequest = &(m_captureRequest.requests[requestIndex]);
                        // 通过管道索引获取批帧编号
                        UINT batchedFrameIndex = m_batchedFrameIndex[pipelinIndex];

                        // 给pRequest赋值
                        pRequest->streamBuffers[batchedFrameIndex].sequenceId =
                            static_cast<UINT32>(requests[requestIndex].frameNumber);
                        pRequest->streamBuffers[batchedFrameIndex].originalFrameworkNumber =
                            pCaptureRequest->frameNumber;
                        pRequest->CSLSyncID = cslsyncid;
                        pRequest->streamBuffers[batchedFrameIndex].numInputBuffers =
                            requests[requestIndex].numInputs;
                        pRequest->pPrivData = reinterpret_cast<CbPrivateData *>(pCaptureRequest->pPrivData);

                        // 遍历所有请求的输入
                        for (UINT i = 0; i < requests[requestIndex].numInputs; i++)
                        {
                            /// @todo (CAMX-1015): Avoid this memcpy.
                            // 将requests的输入缓存拷贝给pRequest
                            Utils::Memcpy(&pRequest->streamBuffers[batchedFrameIndex].inputBufferInfo[i].inputBuffer,
                                            &requests[requestIndex].pInputBuffers[i],
                                            sizeof(ChiStreamBuffer));
                            // Below check is ideally not required but to avoid regressions making it applicable to only MFNR
                            if (requests[requestIndex].numInputs > 1)
                            {
                                // 将pRequest的输入流强转为ChiStream
                                pChiStream = reinterpret_cast<ChiStream*>(
                                    pRequest->streamBuffers[batchedFrameIndex].inputBufferInfo[i].inputBuffer.pStream);
                                CAMX_ASSERT(NULL != pChiStream);
                                // 获取ChiStream的pPrivateInfo
                                pChiStreamWrapper = reinterpret_cast<ChiStreamWrapper*>(pChiStream->pPrivateInfo);

                                // 更新pRequest的节点ID
                                pRequest->streamBuffers[batchedFrameIndex].inputBufferInfo[i].portId =
                                    pChiStreamWrapper->GetPortId();
                                CAMX_LOG_VERBOSE(CamxLogGroupCore,
                                    "input buffers #%d, port %d, dim %d x %d wrapper %x, stream %x",
                                    i, pRequest->streamBuffers[batchedFrameIndex].inputBufferInfo[i].portId,
                                    pChiStream->width, pChiStream->height, pChiStreamWrapper, pChiStream);
                            }
                        }

                        if (CamxResultSuccess == result)
                        {
                            /// @todo (CAMX-1797) Delete this
                            // 更新pRequest->pipelineIndex
                            pRequest->pipelineIndex    = pipelinIndex;

                            CAMX_LOG_INFO(CamxLogGroupHAL, "Submit to pipeline index: %d / number of pipelines: %d"
                                " batched index %d",
                                pRequest->pipelineIndex,
                                m_numPipelines, m_batchedFrameIndex[pipelinIndex]);

                            CAMX_ASSERT(requests[requestIndex].numOutputs <= MaxOutputBuffers);

                            // 同requests[requestIndex].numOutputs去更新pRequest的输出缓存数量
                            pRequest->streamBuffers[m_batchedFrameIndex[pipelinIndex]].numOutputBuffers =
                                requests[requestIndex].numOutputs;

                            // 将requests的所有输出缓存拷贝给pRequest->streamBuffers的输出缓存
                            for (UINT i = 0; i < requests[requestIndex].numOutputs; i++)
                            {
                                /// @todo (CAMX-1015): Avoid this memcpy.
                                Utils::Memcpy(&pRequest->streamBuffers[m_batchedFrameIndex[pipelinIndex]].outputBuffers[i],
                                              &requests[requestIndex].pOutputBuffers[i],
                                              sizeof(ChiStreamBuffer));
                            }

                            // Increment batch index only if batch mode is on
                            // 如果用例开启了批处理
                            if (TRUE == m_isRequestBatchingOn)
                            {
                                // 运行的帧批数+1
                                m_batchedFrameIndex[pipelinIndex]++;
                                // 将帧批数赋给pRequest
                                pRequest->numBatchedFrames = m_batchedFrameIndex[pipelinIndex];
                            }
                            else
                            {
                                // 反之初始化帧批数相关变量
                                m_batchedFrameIndex[pipelinIndex] = 0;
                                pRequest->numBatchedFrames = 1;
                            }

                        }
                    }

                    if (CamxResultSuccess == result)
                    {
                        // Fill Color Metadata for output buffer
                        // 填充元数据后设置给pCaptureRequest
                        result = SetPerStreamColorMetadata(pCaptureRequest, pPerFrameInputPool, m_requestBatchId[pipelinIndex]);
                    }
                }
                else
                {
                    CAMX_LOG_ERROR(CamxLogGroupHAL, "Acquire fence failed for request");
                }
            }
            else
            {
                CAMX_LOG_INFO(CamxLogGroupHAL, "Session unable to process request because of device state");
            }
        }
        else
        {
            CAMX_LOG_ERROR(CamxLogGroupHAL, "PerFrame MetadataPool is NULL");
            result = CamxResultEInvalidPointer;
        }
    }

    if (CamxResultSuccess == result)
    {
        // Once we batch all the frames according to usecaseNumBatchedFrames we enqueue the capture request.
        // For non-batch mode m_usecaseNumBatchedFrames is 1 so we enqueue every request. If batching is ON
        // we enqueue the batched capture request only after m_usecaseBatchSize number of requests have been
        // received
        BOOL batchFrameReady = TRUE;
        // 检查所有管道的批帧数是否全部相同
        for (UINT requestIndex = 0; requestIndex < numRequests; requestIndex++)
        {
            UINT32 pipelinIndex = indexs[requestIndex];

            if (m_batchedFrameIndex[pipelinIndex] != m_usecaseNumBatchedFrames)
            {
                batchFrameReady = FALSE;
                break; // batch frame number must be same for all the pipelines in same session
            }
        }

        // 如果未启用批处理 || 所有管道的批帧数是否全部相同
        if ((FALSE == m_isRequestBatchingOn) || (TRUE == batchFrameReady))
        {   
            // 等待
            result = m_pRequestQueue->EnqueueWait(&m_captureRequest);

            if (CamxResultSuccess == result)
            {
                // Check for good conditions once more, if enqueue had to wait
                for (UINT requestIndex = 0; requestIndex < numRequests; requestIndex++)
                {   
                    // 检查所有请求是否可以继续，查看卡在了哪个请求
                    result = CanRequestProceed(&requests[requestIndex]);
                    if (CamxResultSuccess != result)
                    {
                        break;
                    }
                }
            }

            if (CamxResultSuccess == result)
            {
                for (UINT requestIndex = 0; requestIndex < numRequests; requestIndex++)
                {   
                    // 获取管道请求的捕捉请求指针
                    const ChiCaptureRequest* pCaptureRequest = &(pPipelineRequests->pCaptureRequests[requestIndex]);
                    CAMX_LOG_INFO(CamxLogGroupHAL,
                        "Added Sequence ID %lld framework framenumber %lld to request queue and launched job with"
                        "request id %llu",
                        requests[requestIndex].frameNumber, pCaptureRequest->frameNumber,
                        m_requestBatchId[indexs[requestIndex]]);
                }

                VOID* pData[] = {this, NULL};
                // 开启线程
                result        = m_pThreadManager->PostJob(m_hJobFamilyHandle,
                                                          NULL,
                                                          &pData[0],
                                                          FALSE,
                                                          FALSE);
            }
            else
            {
                CAMX_LOG_WARN(CamxLogGroupHAL, "Session unable to process request because of device state");
            }

            // 批帧数设为0
            for (UINT requestIndex = 0; requestIndex < numRequests; requestIndex++)
            {
                m_batchedFrameIndex[indexs[requestIndex]] = 0;
            }
        }
    }

    m_pStreamOnOffLock->Unlock();
    CAMX_ASSERT(CamxResultSuccess == result);

    return result;
}

////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Session::ProcessResultEarlyMetadata
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
BOOL Session::ProcessResultEarlyMetadata(
    ResultHolder* pResultHolder,
    UINT*         pNumResults)
{   
    // 提取当前处理的结果数
    UINT currentResult = *pNumResults;

    BOOL metadataAvailable = FALSE;

    // 调度元数据错误，将不再为此帧传递元数据
    // Dispatch metadata error, and no more metadata will be delivered for this frame
    if ((0 != pResultHolder->pendingMetadataCount) && (NULL != pResultHolder->pMetadataError))
    {
        CAMX_LOG_ERROR(CamxLogGroupHAL, "Metadata error for request: %d", pResultHolder->sequenceId);
        pResultHolder->pMetadataError->pPrivData = static_cast<CHIPRIVDATA*>(pResultHolder->pPrivData);
        DispatchNotify(pResultHolder->pMetadataError);
        pResultHolder->pendingMetadataCount = 0;
    }

    // There was no metadata error, concatenate and send all metadata results together in FIFO order
    // 没有元数据错误，按FIFO顺序连接并发送所有元数据结果
    if ((NULL == pResultHolder->pMetadataError) && (TRUE == MetadataReadyToFly(pResultHolder->sequenceId)))
    {
        // The early metadata is always in slot [1].
        // 如果元数据的第二个元素不为空
        if (NULL != pResultHolder->pMetadata[1])
        {
            // 将元数据传递给m_pCaptureResult
            m_pCaptureResult[currentResult].pResultMetadata = pResultHolder->pMetadata[1];

            // 挂起的元数据数量减一
            pResultHolder->pendingMetadataCount--;

            // 设置部分元数据结果
            m_pCaptureResult[currentResult].numPartialMetadata = 1;
            // 设置framework的帧编号
            m_pCaptureResult[currentResult].frameworkFrameNum = GetFrameworkFrameNumber(pResultHolder->sequenceId);
            
            // 将The early metadata置null
            pResultHolder->pMetadata[1] = NULL;
            // 设置返回值
            metadataAvailable = TRUE;

            CAMX_LOG_INFO(CamxLogGroupHAL, "Finalized early metadata result for Sequence ID %d mapped to framework id %d",
                pResultHolder->sequenceId, m_pCaptureResult[currentResult].frameworkFrameNum);
        }
    }

    return metadataAvailable;
}

////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Session::ProcessResultMetadata
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
BOOL Session::ProcessResultMetadata(
    ResultHolder* pResultHolder,
    UINT*         pNumResults)
{
    UINT currentResult = *pNumResults;

    BOOL metadataAvailable = FALSE;

    // Dispatch metadata error, and no more metadata will be delivered for this frame
    // 处理元数据异常
    if ((0 != pResultHolder->pendingMetadataCount) && (NULL != pResultHolder->pMetadataError))
    {
        CAMX_LOG_ERROR(CamxLogGroupHAL, "Metadata error for sequenceId: %d", pResultHolder->sequenceId);
        pResultHolder->pMetadataError->pPrivData = static_cast<CHIPRIVDATA*>(pResultHolder->pPrivData);
        DispatchNotify(pResultHolder->pMetadataError);
        pResultHolder->pendingMetadataCount = 0;
    }

    // There was no metadata error, concatenate and send all metadata results together in FIFO order
    if ((NULL == pResultHolder->pMetadataError) && (TRUE == MetadataReadyToFly(pResultHolder->sequenceId)))
    {
        // The main metadata is always in slot [0].
        // 主元数据总是在位置[0]
        // 若主元数据不为空
        if (NULL != pResultHolder->pMetadata[0])
        {
            /// @todo (CAMX-271) - Handle more than one (>1) partial metadata in pipeline/HAL -
            ///                    When we handle metadata in pipeline, we need to decide how we
            ///                    want to break the slot metadata into multiple result metadata
            ///                    components, as per the contract in MaxPartialMetadataHAL
            ///                    (i.e. android.request.partialResultCount)

            // 用主元数据填充m_pCaptureResult以发送给framework
            m_pCaptureResult[currentResult].pResultMetadata = pResultHolder->pMetadata[0];

            // 挂起的元数据计数减一
            pResultHolder->pendingMetadataCount--;

            m_pCaptureResult[currentResult].numPartialMetadata = m_numMetadataResults;
            m_pCaptureResult[currentResult].frameworkFrameNum = GetFrameworkFrameNumber(pResultHolder->sequenceId);

            // 将主元数据置null
            pResultHolder->pMetadata[0] = NULL;
            metadataAvailable = TRUE;

            CAMX_LOG_INFO(CamxLogGroupHAL, "Finalized metadata result for Sequence ID %d mapped to framework id %d",
                pResultHolder->sequenceId, m_pCaptureResult[currentResult].frameworkFrameNum);
        }
    }

    return metadataAvailable;
}

////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Session::ProcessResultBuffers
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
BOOL Session::ProcessResultBuffers(
    ResultHolder* pResultHolder,
    BOOL          metadataAvailable,
    UINT*         pNumResults)
{
    // 设置返回值
    BOOL gotResult = FALSE;
    // 提取当前结果数
    UINT currentResult = *pNumResults;

    // Need to have procesed the metadata before we can send any buffer(s) back
    // 若主元数据准备好了 || 没有挂起的元数据
    if ((TRUE == metadataAvailable) || (0 == pResultHolder->pendingMetadataCount))
    {
        // Dispatch buffers with both OK and Error status
        UINT numBuffersOut = 0;

        // 遍历pResultHolder->bufferHolder
        for (UINT32 bufIndex = 0; bufIndex < MaxNumOutputBuffers; bufIndex++)
        {
            /// @todo (CAMX-3119) remove IsTorchWidgetSession check below and handle this in generic way.
            // 若为闪光灯会话
            if (TRUE == IsTorchWidgetSession())
            {
                // Invalidate the pStream for torch, as there is no actual buffer with torch.
                // 将缓存的流置null
                pResultHolder->bufferHolder[bufIndex].pStream = NULL;
            }
            
            // 若缓存不为空且有效
            if ((NULL != pResultHolder->bufferHolder[bufIndex].pBuffer) &&
                (TRUE == pResultHolder->bufferHolder[bufIndex].valid))
            {
                // 若缓存所属的流不为空 && 缓存准备好了
                if ((NULL != pResultHolder->bufferHolder[bufIndex].pStream) &&
                    (TRUE == BufferReadyToFly(pResultHolder->sequenceId, pResultHolder->bufferHolder[bufIndex].pStream)))
                {
                    // 提取要发送给framework的结果元素的指针
                    ChiCaptureResult* pResult           = &m_pCaptureResult[currentResult];
                    // 强转缓存所属的流的私有信息为HAL3Stream ChiStreamWrapper
                    ChiStreamWrapper* pChiStreamWrapper =
                        static_cast<ChiStreamWrapper*>(pResultHolder->bufferHolder[bufIndex].pStream->pPrivateInfo);

                    // 设置framework帧编号
                    pResult->frameworkFrameNum = GetFrameworkFrameNumber(pResultHolder->sequenceId);

                    /// @todo (CAMX-1797) Rethink this way of keeping track of which is the next expected frame number
                    ///                   for the stream
                    // 将流迁移到下一帧
                    pChiStreamWrapper->MoveToNextExpectedResultFrame();

                    // 提取要发送给framework的结果元素的输出缓存的指针
                    ChiStreamBuffer* pStreamBuffer =
                        // NOWHINE CP036a: Google API requires const type
                        const_cast<ChiStreamBuffer*>(&pResult->pOutputBuffers[pResult->numOutputBuffers]);

                    // 将pResultHolder中的缓存拷贝给m_pCaptureResult内部的元素
                    Utils::Memcpy(pStreamBuffer, pResultHolder->bufferHolder[bufIndex].pBuffer, sizeof(ChiStreamBuffer));

                    // 输出缓存数加一
                    pResult->numOutputBuffers++;
                    // 设置返回值
                    gotResult = TRUE;

                    // Invalidate the stream, will help in determining FIFO order for next result
                    // 无效化流，帮助决定下一次处理结果时先进先出的顺序
                    pResultHolder->bufferHolder[bufIndex].pStream = NULL;
                    pResultHolder->bufferHolder[bufIndex].valid = FALSE;

                    // Need to use this local since buffers may not all come back right away
                    // 需要使用此本地缓冲区，因为缓冲区可能不会立即全部返回
                    numBuffersOut++;

                    // 处理异常
                    if (NULL != pResultHolder->bufferHolder[bufIndex].pBufferError)
                    {
                        // Got a buffer in an error state so dispatch error
                        pResultHolder->bufferHolder[bufIndex].error = TRUE;
                        pResultHolder->bufferHolder[bufIndex].pBufferError->pPrivData =
                                                  static_cast<CHIPRIVDATA*>(pResultHolder->pPrivData);

                        CAMX_LOG_ERROR(CamxLogGroupCore, "Result buffer for sequence ID=%d in error state.",
                                       pResultHolder->sequenceId);
                        DispatchNotify(pResultHolder->bufferHolder[bufIndex].pBufferError);
                    }
                    
                    // 我们得到了我们期望的输出缓冲区的数量。对于输入缓冲区，一旦我们有了所有的重新处理输出缓冲区，我们就应该释放输入缓冲区围栏。
                    if (numBuffersOut == pResultHolder->numOutBuffers)
                    {
                        // We got the number of output buffers that we were expecting
                        // For the input buffer, we should release input buffer fence
                        // once we have all reporocess outputs buffers.
                        if ((0    != pResultHolder->numInBuffers) &&
                            (NULL != pResultHolder->inputbufferHolder[0].pBuffer))
                        {
                            // 更新pResult->pInputBuffer
                            pResult->pInputBuffer = pResultHolder->inputbufferHolder[0].pBuffer;
                            ChiStreamBuffer* pStreamInputBuffer =
                                // NOWHINE CP036a: Google API requires const type
                                const_cast<ChiStreamBuffer*>(pResult->pInputBuffer);

                            // Driver no longer owns this and app will take ownership
                            pStreamInputBuffer->releaseFence = -1;
                        }
                        break;
                    }
                }
            }
        }
    }
    else
    {
        gotResult = FALSE;
    }

    return gotResult;
}

////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Session::ProcessResults
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
CamxResult Session::ProcessResults()
{
    CamxResult    result              = CamxResultSuccess;
    UINT32        i                   = 0;
    UINT32        numResults          = 0;
    ResultHolder* pResultHolder       = NULL;
    SessionResultHolder* pSessionResultHolder   = NULL;

    result = m_pResultLock->TryLock();
    // 如果加锁失败，则跳过结果处理，再次尝试
    if (CamxResultSuccess != result)
    {
        // This could happen if InjectResult is called around the same time as the request/result processing thread
        // is running.  If it happens, skip result processing and try again
        CAMX_LOG_PERF_WARN(CamxLogGroupCore, "m_pResultLock failed, schedule this thread for reprocessing");
        CamxAtomicStore32(&m_aCheckResults, TRUE);
        return CamxResultSuccess;
    }

    // Reset the essential fields of framework results, so that not to be taken in stale
    // 重置framework结果的重要字段，防止数据过时
    for (i = 0; i < m_requestQueueDepth * m_numPipelines; i++)
    {
        m_pCaptureResult[i].frameworkFrameNum  = 0;
        m_pCaptureResult[i].numOutputBuffers   = 0;
        m_pCaptureResult[i].numPartialMetadata = 0;
        m_pCaptureResult[i].pResultMetadata    = NULL;
        m_pCaptureResult[i].pInputBuffer       = NULL;
    }

    LightweightDoublyLinkedListNode* pNode = m_resultHolderList.Head();

    // 遍历链表中的节点
    while (NULL != pNode)
    {
        BOOL earlyMetadataReady = FALSE;
        BOOL metadataReady      = FALSE;
        BOOL bufferReady        = FALSE;

        CAMX_ASSERT(NULL != pNode->pData);

        if (NULL != pNode->pData)
        {
            // 从节点中提取出数据
            pSessionResultHolder = reinterpret_cast<SessionResultHolder*>(pNode->pData);
            // 遍历SessionResultHolder中的ResultHolder
            for (i = 0; i < pSessionResultHolder->numResults; i++)
            {
                pResultHolder = &(pSessionResultHolder->resultHolders[i]);
                if (NULL != pResultHolder)
                {
                    // 仅当m_numMetadataResults大于1时才进行部分元数据处理
                    // Only do partial metadata processing when the setting has defined the number of results greater than 1
                    if (1 < m_numMetadataResults)
                    {
                        // 若处理完毕slot[1]的元数据，earlyMetadataReady为true，否则为false
                        earlyMetadataReady = ProcessResultEarlyMetadata(pResultHolder, &numResults);
                    }

                    // If we ever have early metadata for a given result before anything else. Stop processing the rest
                    // and just make sure we send back the early metadata.
                    // 如果我们之前有给定结果的早期元数据。停止处理其余部分，只需确保我们发回早期的元数据。

                    // 如果没有早期元数据
                    if (FALSE == earlyMetadataReady)
                    {
                        // 返回主元数据是否填充准备好了
                        metadataReady = ProcessResultMetadata(pResultHolder, &numResults);
                        // 处理结果缓存
                        bufferReady = ProcessResultBuffers(pResultHolder, metadataReady, &numResults);
                        CAMX_LOG_INFO(CamxLogGroupHAL,
                                "Processing result for Sequence ID %d and metadataReady %d bufferReady = %d",
                                pResultHolder->sequenceId, metadataReady, bufferReady);
                    }

                    // 如果pResultHolder 主元数据准备好了 || 缓存准备好了 || 早期元数据准备好了
                    if ((TRUE == metadataReady) || (TRUE == bufferReady) || (TRUE == earlyMetadataReady))
                    {
                        // 传递私有数据
                        m_pCaptureResult[numResults].pPrivData =
                                static_cast<CHIPRIVDATA *>(pResultHolder->pPrivData);

                        // 结果数++， 重置bufferReady，metadataReady
                        numResults++;
                        bufferReady   = FALSE;
                        metadataReady = FALSE;
                    }
                }
            }
        }

        // 下一个节点
        // Get the next result holder and see what's going on with it
        pNode = m_resultHolderList.NextNode(pNode);
    }

    if (numResults > 0)
    {
        // 准备了numResults个结果，因此分发numResults个
        // Finally dispatch all the results to the Framework
        DispatchResults(&m_pCaptureResult[0], numResults);
    }

    // Error results can result in the minimum being advanced without dispatching results
    // 推进m_resultHolderList的所有节点
    AdvanceMinExpectedResult(FALSE);

    m_pResultLock->Unlock();

    return result;
}

////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Session::ProcessRequest
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
CamxResult Session::ProcessRequest()
{
    CamxResult              result          = CamxResultSuccess;
    SessionCaptureRequest*  pSessionRequest = NULL;

    // Initialize a result holder expected for the result coming out of this request
    // This information will be used in the result notification path

    // This should only ever be called from threadpool, should never be reentrant, and nothing else grabs the request lock.
    // If there is contention on this lock something very bad happened.
    result = m_pRequestLock->TryLock();
    if (CamxResultSuccess != result)
    {
        // Should never happen...return control back to the threadpool and this will eventually get called again
        CAMX_LOG_ERROR(CamxLogGroupCore, "Could not grab m_pRequestLock...undefined behavior possible");
        return CamxResultETryAgain;
    }

    pSessionRequest = static_cast<SessionCaptureRequest*>(m_pRequestQueue->Dequeue());

    if (NULL != pSessionRequest)
    {

        // If session request contain multiple pipeline request, it means pipelines need to be sync
        // and the batch frame number must be same.
        // 获取第一个请求的批帧数
        UINT32 numBatchedFrames = pSessionRequest->requests[0].numBatchedFrames;
        for (UINT requestIndex = 0; requestIndex < pSessionRequest->numRequests; requestIndex++)
        {
            if (numBatchedFrames != pSessionRequest->requests[requestIndex].numBatchedFrames)
            {
                CAMX_LOG_ERROR(CamxLogGroupCore,
                    "batch frame number are different in different pipline request");
                m_pRequestLock->Unlock();
                return CamxResultEInvalidArg;
            }
        }

        const SettingsManager* pSettingManager = HwEnvironment::GetInstance()->GetSettingsManager();

        if (TRUE == pSettingManager->GetStaticSettings()->dynamicPropertiesEnabled)
        {
            // NOWHINE CP036a: We're actually poking into updating the settings dynamically so we do want to do this
            // 更新SettingsManager的属性
            const_cast<SettingsManager*>(pSettingManager)->UpdateOverrideProperties();
        }

        LightweightDoublyLinkedListNode** ppResultNodes         = NULL;
        SessionResultHolder**             ppSessionResultHolder = NULL;

        // 遍历pSessionRequest的所有请求
        for (UINT requestIndex = 0; requestIndex < pSessionRequest->numRequests; requestIndex++)
        {
            CaptureRequest* pRequest = &(pSessionRequest->requests[requestIndex]);
            CAMX_ASSERT(pRequest->numBatchedFrames > 0);

            if (NULL == ppResultNodes)
            {
                ppResultNodes = reinterpret_cast<LightweightDoublyLinkedListNode**>(
                    CAMX_CALLOC(numBatchedFrames * sizeof(LightweightDoublyLinkedListNode*)));
                    // 初始化指针
                if (NULL != ppResultNodes)
                {
                    Utils::Memset(ppResultNodes, 0x0, numBatchedFrames * sizeof(LightweightDoublyLinkedListNode*));
                }
            }

            if (NULL == ppSessionResultHolder)
            {
                ppSessionResultHolder = reinterpret_cast<SessionResultHolder**>(
                    CAMX_CALLOC(numBatchedFrames * sizeof(SessionResultHolder*)));
                // 初始化ppSessionResultHolder指针
                if (NULL != ppSessionResultHolder)
                {
                    Utils::Memset(ppSessionResultHolder, 0x0, numBatchedFrames * sizeof(SessionResultHolder*));
                }
            }
            
            // 若分配空间成功
            if ((NULL != ppResultNodes) && (NULL != ppSessionResultHolder))
            {
                // Add sequence id to framework frame number mapping after CheckRequestProcessingRate() = TRUE
                // This is to make sure new process request do not override old result has not sent to framework yet.
                // 遍历pRequest的所有streamBuffers
                for (UINT32 i = 0; i < pRequest->numBatchedFrames; i++)
                {
                    UINT32 sequenceId = pRequest->streamBuffers[i].sequenceId;

                    m_fwFrameNumberMap[sequenceId % MaxQueueDepth] = pRequest->streamBuffers[i].originalFrameworkNumber;

                    CAMX_LOG_INFO(CamxLogGroupHAL,
                        "Sequence ID %d assigned for framework frameNumber %llu, RequestId: %llu and m_requestQueueDepth %d",
                        sequenceId, pRequest->streamBuffers[i].originalFrameworkNumber,
                        pRequest->requestId, m_requestQueueDepth);
                }

                // 遍历pRequest的所有streamBuffers
                for (UINT batchIndex = 0; batchIndex < pRequest->numBatchedFrames; batchIndex++)
                {
                    UINT32 sequenceId = pRequest->streamBuffers[batchIndex].sequenceId;

                    CAMX_TRACE_MESSAGE_F(CamxLogGroupHAL, "ProcessRequest: RequestId: %llu sequenceId: %u",
                        pRequest->requestId, sequenceId);

                    // 获取初始化好的指针
                    LightweightDoublyLinkedListNode* pNode = ppResultNodes[batchIndex];
                    if (NULL == pNode)
                    {
                        // 若指针为空，则继续分配空间并更新指针数组
                        pNode = reinterpret_cast<LightweightDoublyLinkedListNode*>
                            (CAMX_CALLOC(sizeof(LightweightDoublyLinkedListNode)));
                        ppResultNodes[batchIndex] = pNode;
                    }

                    // 获取初始化好的指针
                    SessionResultHolder* pSessionResultHolder = ppSessionResultHolder[batchIndex];
                    if (NULL == pSessionResultHolder)
                    {
                        // 若指针为空，则继续分配空间并更新指针数组
                        pSessionResultHolder = reinterpret_cast<SessionResultHolder*>
                            (CAMX_CALLOC(sizeof(SessionResultHolder)));
                        ppSessionResultHolder[batchIndex] = pSessionResultHolder;
                    }

                    // 若指针仍为空，则释放空间
                    if ((NULL == pNode) ||
                        (NULL == pSessionResultHolder))
                    {
                        CAMX_LOG_ERROR(CamxLogGroupCore, "Out of memory");
                        result = CamxResultENoMemory;

                        if (NULL != pNode)
                        {
                            CAMX_FREE(pNode);
                            pNode = NULL;
                        }

                        if (NULL != pSessionResultHolder)
                        {
                            CAMX_FREE(pSessionResultHolder);
                            pSessionResultHolder = NULL;
                        }
                    }

                    // 若上面的代码执行没有失败
                    if (CamxResultSuccess == result)
                    {
                        // 从pSessionResultHolder中取出resultHolders
                        ResultHolder* pHolder = &(pSessionResultHolder->resultHolders[requestIndex]);
                        // 初始化resultHolders
                        Utils::Memset(pHolder, 0x0, sizeof(ResultHolder));
                        // 填充resultHolders
                        pHolder->sequenceId           = sequenceId;
                        pHolder->numOutBuffers        = pRequest->streamBuffers[batchIndex].numOutputBuffers;
                        pHolder->numInBuffers         = pRequest->streamBuffers[batchIndex].numInputBuffers;
                        pHolder->pendingMetadataCount = m_numMetadataResults;
                        pHolder->pPrivData            = pRequest->pPrivData;

                        // We may not get a result metadata for reprocess requests
                        // This logic may need to be expanded for multi-camera CHI override scenarios,
                        // as to designate what pipelines are exactly offline
                        if (pRequest->pipelineIndex > 0)
                        {
                            // 不确定的元数据
                            pHolder->tentativeMetadata = TRUE;
                        }

                        // 遍历pHolder的streamBuffers
                        for (UINT32 buffer = 0; buffer < pHolder->numOutBuffers; buffer++)
                        {
                            // 获取流索引
                            UINT32 streamIndex = GetStreamIndex(reinterpret_cast<ChiStream*>(
                                pRequest->streamBuffers[batchIndex].outputBuffers[buffer].pStream));

                            if (streamIndex < MaxNumOutputBuffers)
                            {
                                // 获取m_resultStreamBuffers.resultStreamBuffer填充给pHolder->bufferHolder
                                pHolder->bufferHolder[streamIndex].pBuffer = GetResultStreamBuffer();

                                // 将pRequest->streamBuffers的输出缓存填充给pHolder->bufferHolder
                                Utils::Memcpy(pHolder->bufferHolder[streamIndex].pBuffer,
                                              &(pRequest->streamBuffers[batchIndex].outputBuffers[buffer]),
                                              sizeof(ChiStreamBuffer));
                                
                                // Flag for buffer is invalid or not
                                pHolder->bufferHolder[streamIndex].valid = FALSE;

                                // 将pRequest->streamBuffers的输出缓存的流填充给pHolder->bufferHolder
                                pHolder->bufferHolder[streamIndex].pStream = reinterpret_cast<ChiStream*>(
                                    pRequest->streamBuffers[batchIndex].outputBuffers[buffer].pStream);

                                ChiStreamWrapper* pChiStreamWrapper = static_cast<ChiStreamWrapper*>(
                                    pRequest->streamBuffers[batchIndex].outputBuffers[buffer].pStream->pPrivateInfo);

                                // 将pRequest->streamBuffers[batchIndex].sequenceId添加到m_frameEnabledInfo.enabledInFrame中
                                pChiStreamWrapper->AddEnabledInFrame(pRequest->streamBuffers[batchIndex].sequenceId);
                            }
                            else
                            {
                                CAMX_LOG_ERROR(CamxLogGroupCore, "stream index = %d < MaxNumOutputBuffers = %d",
                                               streamIndex, MaxNumOutputBuffers);
                            }
                        }

                        for (UINT32 buffer = 0; buffer < pHolder->numInBuffers; buffer++)
                        {
                            // 获取流索引
                            UINT32 streamIndex =
                                GetStreamIndex(
                                    reinterpret_cast<ChiStream*>(
                                        pRequest->streamBuffers[batchIndex].inputBufferInfo[buffer].inputBuffer.pStream));

                            /// @todo (CAMX-1797) Kernel currently requires us to pass a fence always even if we dont need it.
                            ///                   Fix that and also need to handle input fence mechanism
                            CSLFence* phFence = &pRequest->streamBuffers[batchIndex].inputBufferInfo[buffer].fence;

                            // TODO
                            result = CSLCreatePrivateFence("InputBufferFence", phFence);

                            if (CamxResultSuccess != result)
                            {
                                CAMX_LOG_ERROR(CamxLogGroupCore, "process request failed : result %d", result);
                                break;
                            }

                            // 向pHolder->inputbufferHolder的流中填充输入缓存流
                            if (streamIndex < MaxNumInputBuffers)
                            {
                                pHolder->inputbufferHolder[streamIndex].pStream =
                                    reinterpret_cast<ChiStream*>(
                                        pRequest->streamBuffers[batchIndex].inputBufferInfo[buffer].inputBuffer.pStream);

                                // 传递指针给pHolder->inputbufferHolder
                                pHolder->inputbufferHolder[streamIndex].pBuffer = GetResultStreamBuffer();
                                // 从pRequest->streamBuffers中拷贝数据给pHolder->inputbufferHolder的pBuffer，同时等同于给m_resultStreamBuffers.resultStreamBuffer赋值
                                Utils::Memcpy(pHolder->inputbufferHolder[streamIndex].pBuffer,
                                              &(pRequest->streamBuffers[batchIndex].inputBufferInfo[buffer].inputBuffer),
                                              sizeof(ChiStreamBuffer));
                            }
                        }
                    }
                    else
                    {
                        break;
                    }
                }
            }
        }

        if ((NULL != ppResultNodes) && (NULL != ppSessionResultHolder))
        {
            // Now add the result holder to the linked list
            // 遍历ppResultNodes和ppSessionResultHolder，填充数据给pSessionResultHolder，然后赋给pNode，最后将pNode插入到m_resultHolderList末尾
            for (UINT batchIndex = 0; batchIndex < numBatchedFrames; batchIndex++)
            {
                LightweightDoublyLinkedListNode* pNode                  = ppResultNodes[batchIndex];
                SessionResultHolder*             pSessionResultHolder   = ppSessionResultHolder[batchIndex];
                pSessionResultHolder->numResults = pSessionRequest->numRequests;
                pNode->pData = pSessionResultHolder;
                m_pResultLock->Lock();
                m_resultHolderList.InsertToTail(pNode);
                m_pResultLock->Unlock();
            }
        }

        // De-acllocate the array of ppResultNodes and ppSessionResultHolder.
        // The actual node and session result holder will be free in processResult

        // 释放空间
        if (NULL != ppResultNodes)
        {
            CAMX_FREE(ppResultNodes);
            ppResultNodes = NULL;
        }
        if (NULL != ppSessionResultHolder)
        {
            CAMX_FREE(ppSessionResultHolder);
            ppSessionResultHolder = NULL;
        }
    }
    m_pRequestLock->Unlock();

    if (NULL != pSessionRequest)
    {
        // 遍历pSessionRequest的requests
        for (UINT requestIndex = 0; requestIndex < pSessionRequest->numRequests; requestIndex++)
        {
            // 提取CaptureRequest
            CaptureRequest* pRequest = &(pSessionRequest->requests[requestIndex]);
            // TODO
            result = m_pipelineData[pRequest->pipelineIndex].pPipeline->OpenRequest(pRequest->requestId, pRequest->CSLSyncID);
            CAMX_LOG_VERBOSE(CamxLogGroupCore, "pipeline[%d] OpenRequest called for request id = %llu",
                    pRequest->pipelineIndex, pRequest->requestId);
            if (CamxResultSuccess != result)
            {
                CAMX_LOG_ERROR(CamxLogGroupCore, "pipeline[%d] OpenRequest failed for request id = %llu",
                    pRequest->pipelineIndex, pRequest->requestId);
            }
        }

        // 遍历pSessionRequest的requests
        for (UINT requestIndex = 0; requestIndex < pSessionRequest->numRequests; requestIndex++)
        {
            // 提取CaptureRequest
            CaptureRequest* pRequest = &(pSessionRequest->requests[requestIndex]);
            // Pipeline to process this Request
            // 初始化PipelineProcessRequestData
            PipelineProcessRequestData pipelineProcessRequestData = { 0 };
            pipelineProcessRequestData.pCaptureRequest            = pRequest;

            // 向PipelineProcessRequestData中填充数据
            DetermineActiveStreams(&pipelineProcessRequestData);

            for (UINT batchIndex = 0; batchIndex < pRequest->numBatchedFrames; batchIndex++)
            {
                CAMX_ASSERT(pipelineProcessRequestData.perBatchedFrameInfo[batchIndex].activeStreamIdMask > 0);
            }

            // Trigger the fences on the input buffers
            for (UINT batchIndex = 0; batchIndex < pRequest->numBatchedFrames; batchIndex++)
            {
                UINT32 numInputBuffers = pRequest->streamBuffers[batchIndex].numInputBuffers;

                for (UINT32 buffer = 0; buffer < numInputBuffers; buffer++)
                {
                    if (CSLInvalidHandle != pRequest->streamBuffers[batchIndex].inputBufferInfo[buffer].fence)
                    {
                        // 通知CSL fence
                        CSLFenceSignal(pRequest->streamBuffers[batchIndex].inputBufferInfo[buffer].fence,
                                       CSLFenceResultSuccess);
                    }
                }
            }

            /// @todo (CAMX-1512) Handle multiple pipelines per session
            // TODO
            result = m_pipelineData[pRequest->pipelineIndex].pPipeline->ProcessRequest(&pipelineProcessRequestData);

            if (CamxResultSuccess != result)
            {
                CAMX_LOG_ERROR(CamxLogGroupCore, "pipeline[%d] ProcessRequest failed", pRequest->pipelineIndex);
            }
        }

        m_pRequestQueue->Release(pSessionRequest);
    }

    return result;
}

////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Session::HandleErrorCb
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
VOID Session::HandleErrorCb(
    CbPayloadError* pError,
    UINT            pipelineIndex,
    VOID*           pPrivData)
{
    VOID*                 pPayloads[2]        = {0};
    ChiMessageDescriptor* pNotify             = GetNotifyMessageDescriptor();
    UINT                  streamId            = 0;
    PipelineOutputData*   pPipelineOutputData = NULL;
    ChiStreamBuffer*      pOutBuffer          = NULL;

    CAMX_ASSERT(NULL != pNotify);

    pNotify->messageType                            = ChiMessageTypeError;
    pNotify->message.errorMessage.frameworkFrameNum = GetFrameworkFrameNumber(pError->sequenceId);
    pNotify->message.errorMessage.errorMessageCode  =
        static_cast<ChiErrorMessageCode>(pError->code);
    pNotify->pPrivData                              = static_cast<CHIPRIVDATA *>(pPrivData);

    switch (pError->code)
    {
        case MessageCodeDevice:
            /// @todo (CAMX-3266) Finalize error fence callbacks for on the flight results, we would depend on flush
            ///                implementation under normal conditions.
            ///                Yet, a device in error state might not be responsive; decide how to handle
            CAMX_LOG_ERROR(CamxLogGroupCore, "Device is in error condition!");
            // Set error state (block processing of capture requests and results), block Callbacks including torch
            // notify that would cause a segfault if device is resetting, etc.
            SetDeviceInError(TRUE);
            HAL3Module::GetInstance()->SetDropCallbacks();

            pNotify->message.errorMessage.pErrorStream = NULL; // No stream applicable
            // Dispatch it immediately
            DispatchNotify(pNotify);
            // Trigger if caller was blocked on ProcessCaptureRequest
            CAMX_LOG_INFO(CamxLogGroupHAL, "Canceling HALQUEUE");
            m_pRequestQueue->CancelWait();

            // We expect close to be called at this point
            break;

        case MessageCodeRequest:
            pNotify->message.errorMessage.pErrorStream = NULL; // No stream applicable
            // Dispatch it immediately
            DispatchNotify(pNotify);
            // Needed to ensure that no other result gets notified for this frame

            InjectResult(ResultType::RequestError, pNotify, pError->sequenceId, pPrivData);
            break;

        case MessageCodeResult:
            pNotify->message.errorMessage.pErrorStream = NULL; // No stream applicable
            // Notification will be dispatched along with other results
            InjectResult(ResultType::MetadataError, pNotify, pError->sequenceId, pPrivData);
            break;

        case MessageCodeBuffer:
            pOutBuffer = GetResultStreamBuffer();

            CAMX_ASSERT(NULL != pOutBuffer);

            streamId            = pError->streamId;
            pPipelineOutputData = &m_pipelineData[pipelineIndex].pPipelineDescriptor->outputData[streamId];

            pOutBuffer->pStream      =
                reinterpret_cast<ChiStream*>(pPipelineOutputData->pOutputStreamWrapper->GetNativeStream());
            pOutBuffer->phBuffer     = pError->phBuffer;
            pOutBuffer->bufferStatus = BufferStatusError;
            pOutBuffer->releaseFence = -1;

            if (TRUE == isVideoStream(pOutBuffer->pStream->grallocUsage))
            {
                SetPerFrameVTTimestampMetadata(*pError->phBuffer,
                    GetIntraPipelinePerFramePool(PoolType::PerFrameResult, pipelineIndex),
                    pError->sequenceId + 1); // request ID starts from 1
            }

            pNotify->message.errorMessage.pErrorStream =
                reinterpret_cast<ChiStream*>(pPipelineOutputData->pOutputStreamWrapper->GetNativeStream());

            pPayloads[0] = pOutBuffer;
            pPayloads[1] = pNotify;
            // Notification will be dispatched along with other results
            InjectResult(ResultType::BufferError, pPayloads, pError->sequenceId, pPrivData);
            break;

        default:
            break;
    }
}

////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Session::HandleAsyncCb
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
VOID Session::HandleAsyncCb(
    CbPayloadAsync* pAsync,
    VOID*           pPrivData)
{
    ChiMessageDescriptor* pNotify = GetNotifyMessageDescriptor();

    CAMX_ASSERT(NULL != pNotify);

    // We have to assume that any async callback has to be shutter message
    pNotify->messageType                              = ChiMessageTypeShutter;
    pNotify->message.shutterMessage.frameworkFrameNum = GetFrameworkFrameNumber(pAsync->sequenceId);
    pNotify->message.shutterMessage.timestamp         = pAsync->timestamp;
    pNotify->pPrivData                                = static_cast<CHIPRIVDATA *>(pPrivData);

    // Dispatch it immediately
    DispatchNotify(pNotify);
}

////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Session::HandleSOFCb
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
VOID Session::HandleSOFCb(
    CbPayloadSof* pSof)
{
    ChiMessageDescriptor* pNotify = GetNotifyMessageDescriptor();

    CAMX_ASSERT(NULL != pNotify);

    // Send SOF event as a shutter message
    pNotify->messageType                                   = ChiMessageTypeSof;
    pNotify->message.sofMessage.timestamp                  = pSof->timestamp;
    pNotify->message.sofMessage.sofId                      = pSof->frameNum;
    pNotify->message.sofMessage.bIsFrameworkFrameNumValid  = pSof->bIsSequenceIdValid;
    if (pSof->bIsSequenceIdValid)
    {
        pNotify->message.sofMessage.frameworkFrameNum = GetFrameworkFrameNumber(pSof->sequenceId);
    }

    // Dispatch it immediately
    DispatchNotify(pNotify);
}

////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Session::HandleMetadataCb
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
VOID Session::HandleMetadataCb(
    CbPayloadMetadata* pMetadata,
    VOID*              pPrivData)
{
    // We will use the pMetadata->pMetadata all the way upto the framework
    InjectResult(ResultType::MetadataOK, pMetadata->pMetadata, pMetadata->sequenceId, pPrivData);
}

////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Session::HandleEarlyMetadataCb
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
VOID Session::HandleEarlyMetadataCb(
    CbPayloadMetadata* pMetadata,
    VOID*              pPrivData)
{
    // We will use the pMetadata->pMetadata all the way upto the framework
    InjectResult(ResultType::EarlyMetadataOK, pMetadata->pMetadata, pMetadata->sequenceId, pPrivData);
}

////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Session::HandleBufferCb
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
VOID Session::HandleBufferCb(
    CbPayloadBuffer* pPayload,
    UINT             pipelineIndex,
    VOID*            pPrivData)
{
    ChiStreamBuffer outBuffer = { 0 };

    UINT                streamId            = pPayload->streamId;
    PipelineOutputData* pPipelineOutputData = &m_pipelineData[pipelineIndex].pPipelineDescriptor->outputData[streamId];

    outBuffer.pStream      = reinterpret_cast<ChiStream*>(pPipelineOutputData->pOutputStreamWrapper->GetNativeStream());
    outBuffer.phBuffer     = pPayload->phBuffer;
    outBuffer.bufferStatus = BufferStatusOK;
    outBuffer.releaseFence = -1; // For the moment

    if (TRUE == isVideoStream(outBuffer.pStream->grallocUsage))
    {
        SetPerFrameVTTimestampMetadata(*pPayload->phBuffer,
            GetIntraPipelinePerFramePool(PoolType::PerFrameResult, pipelineIndex),
            pPayload->sequenceId + 1); // request ID starts from 1

        if ((FALSE == m_pChiContext->GetStaticSettings()->disableVideoPerfModeSetting) &&
           (TRUE == m_setVideoPerfModeFlag))
        {
            SetPerFrameVideoPerfModeMetadata(*pPayload->phBuffer);
        }
    }

    InjectResult(ResultType::BufferOK, &outBuffer, pPayload->sequenceId, pPrivData);
}

////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Session::InjectResult
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
CamxResult Session::InjectResult(
    ResultType  resultType,
    VOID*       pPayload,
    UINT32      sequenceId,
    VOID*       pPrivData)
{
    CamxResult result = CamxResultSuccess;

    m_pResultLock->Lock();

    ResultHolder* pHolder = GetResultHolderBySequenceId(sequenceId);

    if (NULL == pHolder)
    {
        CAMX_LOG_ERROR(CamxLogGroupCore,
                       "Result holder NULL for seqId: %d, this request may be flushed out already.",
                       sequenceId);

        m_pResultLock->Unlock();
        return CamxResultSuccess;
    }

    CAMX_ASSERT(FALSE == GetDeviceInError());
    CAMX_ASSERT(FALSE == pHolder->addNoMore);

    // Bail out if device/request is in error
    if ((TRUE == GetDeviceInError()) || (TRUE == pHolder->addNoMore))
    {
        CAMX_LOG_ERROR(CamxLogGroupCore, "Cannot inject results");
        m_pResultLock->Unlock();

        return CamxResultEFailed;
    }
    if (ResultType::RequestError == resultType)
    {
        CAMX_LOG_ERROR(CamxLogGroupCore, "Reporting a request error to the framework");
        pHolder->addNoMore = TRUE;      // Cannot accept any more result for this frame num
        ClearAllPendingItems(pHolder);
    }
    else if (ResultType::MetadataError == resultType)
    {
        pHolder->pMetadataError = static_cast<ChiMessageDescriptor*>(pPayload);
    }
    else if (ResultType::MetadataOK == resultType)
    {
        /// @todo (CAMX-271) - Handle more than one (>1) partial metadata in pipeline/HAL -
        ///                    When we handle metadata in pipeline, we need to decide how we
        ///                    want to break the slot metadata into multiple result metadata
        ///                    components, as per the contract in MaxPartialMetadataHAL
        ///                    (i.e. android.request.partialResultCount)
        CAMX_LOG_INFO(CamxLogGroupHAL, "Inject result added metadata in result holder for sequence ID : %d", sequenceId);

        pHolder->pMetadata[0] = static_cast<Metadata*>(pPayload);
        pHolder->metadataCbIndex++;
    }
    else if (ResultType::EarlyMetadataOK == resultType)
    {
        CAMX_LOG_INFO(CamxLogGroupHAL, "Inject result added early metadata in result holder for sequence ID : %d", sequenceId);
        pHolder->pMetadata[1] = static_cast<Metadata*>(pPayload);
        pHolder->metadataCbIndex++;

    }
    else if (ResultType::BufferError == resultType)
    {
        VOID**           ppPayloads = static_cast<VOID**>(pPayload);
        ChiStreamBuffer* pBuffer    = static_cast<ChiStreamBuffer*>(ppPayloads[0]);
        ChiStream*       pStream    = pBuffer->pStream;

        UINT32 streamIndex = GetStreamIndex(pStream);

        CAMX_LOG_ERROR(CamxLogGroupCore, "Reporting a buffer error to the framework for streamIndex %u sequenceId : %d",
                       streamIndex, sequenceId);

        if (MaxNumOutputBuffers != streamIndex)
        {
            pHolder->bufferHolder[streamIndex].pBuffer      = pBuffer;
            pHolder->bufferHolder[streamIndex].pBufferError = static_cast<ChiMessageDescriptor*>(ppPayloads[1]);
        }
    }
    else if (ResultType::BufferOK == resultType)
    {
        ChiStreamBuffer* pBuffer = static_cast<ChiStreamBuffer*>(pPayload);
        ChiStream*       pStream = pBuffer->pStream;

        UINT32 streamIndex = GetStreamIndex(pStream);

        if (MaxNumOutputBuffers != streamIndex)
        {
            CAMX_LOG_INFO(CamxLogGroupHAL,
                          "Inject result added request %d output buffer in result holder:Stream %p Fmt: %d W: %d H: %d",
                          sequenceId,
                          pStream,
                          pStream->format,
                          pStream->width,
                          pStream->height);

            if (pHolder->bufferHolder[streamIndex].pBuffer->pStream == pStream &&
                pHolder->bufferHolder[streamIndex].pBuffer->phBuffer == pBuffer->phBuffer)
            {
                Utils::Memcpy(pHolder->bufferHolder[streamIndex].pBuffer,
                              pBuffer,
                              sizeof(ChiStreamBuffer));
                pHolder->bufferHolder[streamIndex].valid = TRUE;
            }
            else
            {
                CAMX_LOG_ERROR(CamxLogGroupCore, "Session %p: Sequence ID %d: Result bufferHolder[%d] pStream = %p, "
                               "phBuffer = %p does not match buffer Cb pStream = %p, phBuffer = %p",
                               this,
                               sequenceId,
                               streamIndex,
                               pHolder->bufferHolder[streamIndex].pBuffer->pStream,
                               pHolder->bufferHolder[streamIndex].pBuffer->phBuffer,
                               pStream,
                               pBuffer->phBuffer);
            }
        }
    }

    pHolder->pPrivData      = pPrivData;

    // Make the result holder slot live, it's ok to write it more than once
    pHolder->isAlive = TRUE;

    m_pResultLock->Unlock();

    if (TRUE == MeetFrameworkNotifyCriteria(pHolder))
    {
        // Worker thread needs to check results
        CamxAtomicStore32(&m_aCheckResults, TRUE);


        if (m_pThreadManager->GetJobCount(m_hJobFamilyHandle) <= 1)
        {
            // Check current HAL worker thread job count. If count is 0 or 1, it means we are in below two situations.
            // 1. Possible last request comes in and no job will posted by request anymore.
            // 2. Request is processed too fast and paused.
            // Both cases means there might not be any HALWorker jobs pending to consume the result.
            // And since we have a result that needs to be processed by the HALWorker, it needs to post a job now.

            VOID* pData[] = { this, NULL };
            result        = m_pThreadManager->PostJob(m_hJobFamilyHandle, NULL, &pData[0], FALSE, FALSE);
        }
    }

    return result;
}

////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Session::ClearAllPendingItems
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
VOID Session::ClearAllPendingItems(
    ResultHolder* pHolder
    ) const
{
    // Request was in error, no result for this frame will be deemed valid but next result can proceed

    pHolder->pendingMetadataCount = 0;

    for (UINT32 i = 0; i < MaxNumOutputBuffers; i++)
    {
        pHolder->bufferHolder[i].pStream = NULL;
    }
}

////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Session::MeetFrameworkNotifyCriteria
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
BOOL Session::MeetFrameworkNotifyCriteria(
    ResultHolder* pHolder)
{
    CAMX_UNREFERENCED_PARAM(pHolder);

    BOOL metCriteria = FALSE;

    // To start with, we ensure that we have got at least as many metadata as HAL is expected to send in a result
    /// @todo (CAMX-1797) For offline pipelines this is not necessarily valid
    metCriteria = TRUE;

    // We leave this space open for optimization ---
    // We may want to wake the worker up only when we have a batch of results, OR
    // We have output buffers from specific streams in the result, OR
    // Move the burden of checking FIFO eligibility from ProcessResults to here

    return metCriteria;
}

////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Session::MetadataReadyToFly
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
BOOL Session::MetadataReadyToFly(
    UINT32 sequenceId)
{
    // Nothing pending, good to go
    SessionResultHolder* pSessionResultHolder =
            reinterpret_cast<SessionResultHolder*>(m_resultHolderList.Head()->pData);
    CamxResult result = CamxResultEFailed;
    BOOL canDispatch = TRUE;
    for (UINT32 i = 0; i < pSessionResultHolder->numResults; i++)
    {
        ResultHolder* pHolder = &pSessionResultHolder->resultHolders[i];

        if (sequenceId == pHolder->sequenceId)
        {
            // Make sure the meta data is updated
            canDispatch = (pHolder->metadataCbIndex == 0) ? FALSE : TRUE;
            result = CamxResultSuccess;
            break;
        }
    }

    if (CamxResultSuccess != result)
    {
        ResultHolder* pLastFrameHolder = GetResultHolderBySequenceId(sequenceId - 1);

        // If we find a previous result AND its metadata count is the same as num partials, then we likely
        // still have processing to do. If pendingMetadataCount != numPartial, it means we've gotten some metadata backs
        if ((NULL != pLastFrameHolder) && (pLastFrameHolder->pendingMetadataCount == m_numMetadataResults))
        {
            canDispatch = FALSE;
        }
    }

    return canDispatch;
}

////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Session::BufferReadyToFly
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
BOOL Session::BufferReadyToFly(
    UINT32      sequenceId,
    ChiStream*  pStream)
{
    BOOL isReadyToFly = FALSE;

    ChiStreamWrapper* pChiStreamWrapper = static_cast<ChiStreamWrapper*>(pStream->pPrivateInfo);

    if (TRUE == pChiStreamWrapper->IsNextExpectedResultFrame(sequenceId))
    {
        isReadyToFly = TRUE;
    }

    return isReadyToFly;
}

////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Session::AdvanceMinExpectedResult
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
VOID Session::AdvanceMinExpectedResult(
    BOOL flush)
{
    /// @note This function can only be called from Session::ProcessRequest with m_pResultLock taken.  If it is called from
    ///       anywhere else, a deadlock is possible
    // 是否可以推进结果
    BOOL canAdvance          = TRUE;
    // 是否移动到下一个node
    BOOL moveNext            = TRUE;

    // m_resultHolderList Result Holder list
    // pNode Result Holder node
    LightweightDoublyLinkedListNode* pNode = m_resultHolderList.Head();
    if ((NULL == pNode) && (0 < m_resultHolderList.NumNodes()))
    {
        CAMX_LOG_WARN(CamxLogGroupCore,
                "Session %p: Warning: Result Holder HEAD is NULL with size %d",
                this, m_resultHolderList.NumNodes());
    }

    // 遍历所有的result holder node
    while (NULL != pNode)
    {
        // pNode->pData从node中取出SessionResultHolder
        SessionResultHolder* pSessionResultHolder = reinterpret_cast<SessionResultHolder*>(pNode->pData);
        // If there are multiple pipeline result map to same framework number
        // e.g. In dual camera both wide and tele pipeline need to generate output for same framework request
        // all the pipeline result need to be sent before it can advance.
        for (UINT i = 0 ; i < pSessionResultHolder->numResults; i++)
        {
            // 从SessionResultHolder中取出每个结果ResultHolder* pHolder
            ResultHolder* pHolder = &pSessionResultHolder->resultHolders[i];

            if (NULL != pHolder)
            {
                if (FALSE == flush)
                {
                    // 如果仍有挂起的元数据缓冲区，则不要推进最小结果。
                    // Do not advance the minimum result if there is still pending metadata buffer.
                    if (pHolder->pendingMetadataCount > 0)
                    {
                        CAMX_LOG_INFO(CamxLogGroupCore,
                                      "Session %p: Can't advance because of metadata for id: %d",
                                      this,
                                      reinterpret_cast<ResultHolder*>(pNode->pData)->sequenceId);
                        canAdvance = FALSE;
                    }

                    // 若没有挂起的元数据缓冲区
                    if (TRUE == canAdvance)
                    {
                        // Make sure all of the pStreams have been consumed
                        // from the result bufferHolder, if any are NOT NULL it means
                        // we haven't finished with the request and still have more
                        // results to return
                        // 遍历ResultHolder的所有的bufferHolder，确保所有的流都被消费了
                        // 若存在未被消费的流，则设置canAdvance为false
                        for (UINT32 j = 0; j < MaxNumOutputBuffers; j++)
                        {
                            if (NULL != pHolder->bufferHolder[j].pStream)
                            {
                                CAMX_LOG_INFO(CamxLogGroupCore,
                                              "Session %p: Can't advance because of buffer[%d] for id: %d",
                                              this,
                                              j,
                                              reinterpret_cast<ResultHolder*>(pNode->pData)->sequenceId);
                                canAdvance = FALSE;
                                break;
                            }
                        }
                    }
                }
                // 若存在未被消费的流 || 仍有挂起的元数据缓冲区
                // 跳出循环，因为该node已不符合条件
                if (FALSE == canAdvance)
                {
                    break;
                }
            }
            else
            {
                CAMX_LOG_ERROR(CamxLogGroupCore, "Session %p: pHolder is NULL while trying to advance min result.", this);
            }
        }

        // 若结果可以推进
        if (TRUE == canAdvance)
        {
            // 遍历pSessionResultHolder的所有ResultHolder
            for (UINT i = 0 ; i < pSessionResultHolder->numResults; i++)
            {
                ResultHolder* pHolder = &pSessionResultHolder->resultHolders[i];
                // Making it this far for a given pHolder means it's ready to be popped off the
                // list and we can move forward.

                // m_pDeferredRequestQueue: Pointer to the deferred process handler
                // If DRQ debugging is enabled dump it out.
                // 打印m_deferredNodes的所有节点信息
                m_pDeferredRequestQueue->Dump(pHolder->sequenceId);
                // Add some trace events
                CAMX_TRACE_ASYNC_END_F(CamxLogGroupHAL, pHolder->sequenceId, "HAL3: RequestTrace");
                CAMX_LOG_INFO(CamxLogGroupCore,
                              "Session %p: Results processed for sequenceId: %d",
                              this, pHolder->sequenceId);
            }

            // 释放该node并置null
            CAMX_FREE(pNode->pData);
            pNode->pData = NULL;

            // Since we've finished the requeset, remove the node from the list
            // 从m_resultHolderList中移除当前node
            m_resultHolderList.RemoveNode(pNode);

            // 释放node并置null
            CAMX_FREE(pNode);
            pNode = NULL;
            moveNext = FALSE;

            // If we've gotten any buffer ready for this result then we can accept another
            m_pLivePendingRequestsLock->Lock();
            // 实时挂起的请求数 > 0
            if (m_livePendingRequests > 0)
            {
                m_livePendingRequests--;
                // If we aren't flushing (aka draining) accept a new request
                if (FALSE == CamxAtomicLoadU8(&m_aFlushingPipeline))
                {
                    m_pWaitLivePendingRequests->Signal();
                }
            }
            m_pLivePendingRequestsLock->Unlock();
            if (0 == m_resultHolderList.NumNodes())
            {
                CAMX_LOG_INFO(CamxLogGroupCore,
                              "Session %p: All results are available, waking up anything waiting on a flush",
                              this);
                m_pWaitAllResultsAvailable->Signal();
            }
        }
        // 若结果不可推进，跳出循环
        else
        {
            break;
        }

        // 若结果不可推进，则进入下一个节点
        if (TRUE == moveNext)
        {
            pNode = m_resultHolderList.NextNode(pNode);
        }
        else
        {
            pNode = m_resultHolderList.Head();
        }
    }

    // 若m_resultHolderList节点数不为0 && 头指针不为null && 尾指针不为null
    if ((0 != m_resultHolderList.NumNodes()) &&
        ((NULL != m_resultHolderList.Head()) &&
        (NULL != m_resultHolderList.Tail())))
    {
        
        // m_resultHolderList.Head()->pData通过获取头指针和尾指针的SessionResultHolder
        SessionResultHolder* pSessionResultHolderHead =
                reinterpret_cast<SessionResultHolder*>(m_resultHolderList.Head()->pData);
        SessionResultHolder* pSessionResultHolderTail =
                reinterpret_cast<SessionResultHolder*>(m_resultHolderList.Tail()->pData);

        CAMX_LOG_INFO(CamxLogGroupCore,
                      "Session %p: Results processed, current queue state: minResult = %d, "
                      "maxResult = %d, and numResultsPending: %d",
                      this,
                      pSessionResultHolderHead->resultHolders[0].sequenceId,
                      pSessionResultHolderTail->resultHolders[pSessionResultHolderTail->numResults - 1].sequenceId,
                      m_resultHolderList.NumNodes());
    }
    // 若m_resultHolderList节点数为0 || 头指针为null || 尾指针为null
    else
    {
        CAMX_LOG_INFO(CamxLogGroupCore, "Session %p: All results processed, current queue state is empty.", this);

        // Now that we've emptied the queue, signal saying we're ready for a new request if they exist
        m_pWaitLivePendingRequests->Signal();

        // If flush and process_capture_request were racing, it's possible that we accepted the job
        // and added it to the HALQueue right as we were starting flush, in which case we've got a request
        // hanging out in the HALQueue that now needs to be processed, so this kicks the queue
        if (FALSE == m_pRequestQueue->IsEmpty())
        {
            VOID* pData[] = { this, NULL };
            m_pThreadManager->PostJob(m_hJobFamilyHandle, NULL, &pData[0], FALSE, FALSE);
        }
    }

    // 如果可以推进 && enableFPSLog为true
    if ((TRUE == canAdvance) && (TRUE == m_pChiContext->GetStaticSettings()->enableFPSLog))
    {
        UINT64 currentTime = OsUtils::GetNanoSeconds();
        // 经过的时间
        UINT64 elapsedTime = currentTime - m_lastFPSCountTime;

        m_currentFrameCount++;
        
        // 如果超时
        if (elapsedTime > NanoSecondsPerSecond)
        {
            FLOAT fps = static_cast<FLOAT>(m_currentFrameCount) * (NanoSecondsPerSecond / elapsedTime);
            CAMX_LOG_PERF_INFO(CamxLogGroupCore, "FPS: %0.2f", fps);

            // m_currentFrameCount：Current frame count used for FPS calc
            // 将m_currentFrameCount置0
            m_currentFrameCount = 0;
            // 更新m_lastFPSCountTime
            m_lastFPSCountTime = currentTime;
        }
    }
}

////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Session::DetermineActiveStreams
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
VOID Session::DetermineActiveStreams(
    PipelineProcessRequestData*  pPipelineProcessRequestData
    ) const
{
    const CaptureRequest* pCaptureRequest = pPipelineProcessRequestData->pCaptureRequest;

    for (UINT frameIndex = 0; frameIndex < pCaptureRequest->numBatchedFrames; frameIndex++)
    {
        PerBatchedFrameInfo* pTopologyPerFrameInfo = &pPipelineProcessRequestData->perBatchedFrameInfo[frameIndex];

        pTopologyPerFrameInfo->activeStreamIdMask  = 0;
        pTopologyPerFrameInfo->sequenceId          = pCaptureRequest->streamBuffers[frameIndex].sequenceId;

        for (UINT i = 0; i < pCaptureRequest->streamBuffers[frameIndex].numOutputBuffers; i++)
        {
            const ChiStreamBuffer*  pOutputBuffer  =
                reinterpret_cast<const ChiStreamBuffer*>(&pCaptureRequest->streamBuffers[frameIndex].outputBuffers[i]);
            const ChiStreamWrapper* pStreamWrapper = static_cast<ChiStreamWrapper*>(pOutputBuffer->pStream->pPrivateInfo);

            UINT streamId          = pStreamWrapper->GetStreamIndex();
            UINT currentStreamMask = pTopologyPerFrameInfo->activeStreamIdMask;
            pTopologyPerFrameInfo->phBuffers[streamId] = reinterpret_cast<BufferHandle*>(pOutputBuffer->phBuffer);
            pTopologyPerFrameInfo->activeStreamIdMask |= Utils::BitSet(currentStreamMask, streamId);
        }
    }
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Session::FlushRequests
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
CamxResult Session::FlushRequests(
    BOOL isForced)
{
    CamxResult result = CamxResultSuccess;

    Flush(isForced);

    return result;
}

////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Session::ProcessRequestWhileFlushing
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
CamxResult Session::ProcessRequestWhileFlushing(
    const ChiCaptureRequest* pRequest)
{
    CamxResult result = CamxResultEBusy;

    CAMX_UNREFERENCED_PARAM(pRequest);

    CAMX_NOT_IMPLEMENTED();

    CAMX_LOG_ERROR(CamxLogGroupCore, "ProcessCaptureRequest called during flush...undefined behavior possible");

    return result;
}

////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Session::UpdateMultiRequestSyncData
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
VOID Session::UpdateMultiRequestSyncData(
    const ChiPipelineRequest* pRequest)
{
    UINT32 pipelineIndex  = InvalidPipelineIndex;
    UINT64 comboID        = m_syncSequenceId + 1;
    UINT32 comboIndex     = comboID % MaxQueueDepth;
    UINT32 lastComboIndex = (comboID - 1) % MaxQueueDepth;


    // just multiple realtime pipeline request in one session
    if (m_numInputSensors >= 2)
    {
        /// refresh request data;
        for (UINT i = 0; i < m_numPipelines; i++)
        {
            m_requestSyncData[comboIndex].requestID[i]
                = m_requestSyncData[lastComboIndex].requestID[i];
        }

        for (UINT i = 0; i < pRequest->numRequests; i++)
        {
            if (0 != pRequest->pCaptureRequests[i].hPipelineHandle)
            {
                pipelineIndex = GetPipelineIndex(pRequest->pCaptureRequests[i].hPipelineHandle);

                if (pipelineIndex != InvalidPipelineIndex)
                {
                    m_requestSyncData[comboIndex].requestID[pipelineIndex]
                        = m_requestSyncData[lastComboIndex].requestID[pipelineIndex] + 1;
                }
                else
                {
                    CAMX_LOG_ERROR(CamxLogGroupHAL, "unkown pipeline handle request!");
                }
            }
        }

        // just number of ruquest is bigger than 2, it needs 3A sync.
        if (pRequest->numRequests >= 2)
        {
            m_requestSyncData[comboIndex].isMultiRequest = TRUE;
        }
        else
        {
            m_requestSyncData[comboIndex].isMultiRequest = FALSE;
        }
        m_syncSequenceId ++;
    }
}

////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Session::CanRequestProceed
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
CamxResult Session::CanRequestProceed(
    const ChiCaptureRequest* pRequest)
{
    CamxResult result = CamxResultSuccess;

    if (TRUE == static_cast<BOOL>(CamxAtomicLoad32(&m_aDeviceInError)))
    {
        result = CamxResultEFailed;
    }
    else if (TRUE == GetFlushSessionStatus())
    {
        result = ProcessRequestWhileFlushing(pRequest);
    }

    return result;
}

////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Session::CheckValidInputRequest
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
CamxResult Session::CheckValidInputRequest(
    const ChiCaptureRequest* pCaptureRequest
    ) const
{
    //// HAL interface requires -EINVAL (EInvalidArg) if the input request is malformed
    CamxResult        result = CamxResultSuccess;

    /// @todo (CAMX-1797) Add validation

    // BOOL              foundMatch        = FALSE;
    // ChiStreamWrapper* pChiStreamWrapper = NULL;
    //
    // CAMX_ASSERT(NULL != pPipelineRequest);
    //
    // for (UINT i = 0; i < pPipelineRequest->numRequests; i++)
    // {
    //     const ChiCaptureRequest* pRequest            = &pPipelineRequest->pCaptureRequests[i];
    //     PipelineDescriptor*      pPipelineDescriptor = GetPipelineDescriptor(pRequest->hPipelineHandle);
    //
    //     if ((NULL != pRequest) && (pRequest->numOutputs > 0))
    //     {
    //         for (UINT32 buffer = 0; buffer < pRequest->numOutputs; buffer++)
    //         {
    //             if (NULL != pRequest->pOutputBuffers)
    //             {
    //                 foundMatch        = FALSE;
    //                 pChiStreamWrapper =
    //                     reinterpret_cast<ChiStreamWrapper*>(pRequest->pOutputBuffers[buffer].pStream->pPrivateInfo);
    //
    //                 for (UINT32 stream = 0; stream < phPipelineHandle->numStreams; stream++)
    //                 {
    //                     if (pChiStreamWrapper == phPipelineHandle->ppChiStreamWrappers[stream])
    //                     {
    //                         foundMatch = TRUE;
    //                         break;
    //                     }
    //                 }
    //
    //                 if (FALSE == foundMatch)
    //                 {
    //                     CAMX_LOG_ERROR(CamxLogGroupHAL, "Not a valid request, o/p stream not configured!");
    //                     break;
    //                 }
    //             }
    //         }
    //
    //         if (TRUE == foundMatch)
    //         {
    //             for (UINT32 buffer = 0; buffer < pRequest->numInputs; buffer++)
    //             {
    //                 if (NULL != pRequest->pInputBuffers)
    //                 {
    //                     foundMatch        = FALSE;
    //                     pChiStreamWrapper =
    //                         reinterpret_cast<ChiStreamWrapper*>(pRequest->pInputBuffers[buffer].pStream->pPrivateInfo);
    //
    //                     for (UINT32 stream = 0; stream < phPipelineHandle->numStreams; stream++)
    //                     {
    //                         if (pChiStreamWrapper == phPipelineHandle->ppChiStreamWrappers[stream])
    //                         {
    //                             foundMatch = TRUE;
    //                             break;
    //                         }
    //                     }
    //
    //                     if (FALSE == foundMatch)
    //                     {
    //                         CAMX_LOG_ERROR(CamxLogGroupHAL, "Not a valid request, i/p stream not configured!");
    //                         break;
    //                     }
    //                 }
    //             }
    //         }
    //
    //         if (TRUE == foundMatch)
    //         {
    //             result = CamxResultSuccess;
    //
    //             if (NULL != pRequest->pMetadata)
    //             {
    //                 if (pRequest->numInputs > 0)
    //                 {
    //                     // result = check for valid reprocess settings
    //                     /// @todo (CAMX-356): Check validity of request
    //                 }
    //                 else
    //                 {
    //                     // result = check for valid capture settings
    //                     /// @todo (CAMX-356): Check validity of request
    //                 }
    //             }
    //         }
    //     }
    // }

    const Metadata* pMetadata = static_cast<const Metadata*>(pCaptureRequest->pMetadata);

    // NULL settings means to use the settings from the previous capture request, however that is not
    // possible if this is the first request after configure_streams() according to HAL documentation of
    // camera_metadata_t.settings.
    if ((NULL == pMetadata) && (0 == m_sequenceId))
    {
        result = CamxResultEInvalidArg;
    }

    return result;
}

////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Session::WaitOnAcquireFence
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
CamxResult Session::WaitOnAcquireFence(
    const ChiCaptureRequest* pRequest)
{
    CamxResult result = CamxResultSuccess;
#if ANDROID
    for (UINT i = 0; i < pRequest->numOutputs; i++)
    {
        NativeFence acquireFence = pRequest->pOutputBuffers[i].acquireFence;

        if (InvalidNativeFence != acquireFence)
        {
            CAMX_TRACE_SYNC_BEGIN_F(CamxLogGroupSync, "Waiting on Acquire Fence");
            /// @todo (CAMX-2491) Define constant for HalFenceTimeout
            result = OsUtils::NativeFenceWait(acquireFence, 5000);
            CAMX_TRACE_SYNC_END(CamxLogGroupSync);

            if (CamxResultSuccess != result)
            {
                break;
            }
        }

        OsUtils::Close(acquireFence);
    }
#else
    CAMX_UNREFERENCED_PARAM(pRequest);
#endif // ANDROID

    return result;
}

////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Session::GetIntraPipelinePerFramePool
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
MetadataPool* Session::GetIntraPipelinePerFramePool(
    PoolType poolType,
    UINT     pipelineId)
{
    CAMX_ASSERT(pipelineId < m_numPipelines);
    return m_pipelineData[pipelineId].pPipeline->GetPerFramePool(poolType);
}

////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Session::GetIntraRealtimePipelineId
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
CamxResult Session::GetIntraRealtimePipelineId(
    UINT  inputPipelineId,
    UINT* pIntraPipelineId)
{
    CamxResult result = CamxResultSuccess;
    CAMX_ASSERT(inputPipelineId < m_numPipelines);

    if (m_numRealtimePipelines <= 1)
    {
        result = CamxResultEFailed;
        CAMX_LOG_ERROR(CamxLogGroupHAL, "Less than two realtime pipelines, no intra result");
    }

    if (CamxResultSuccess == result)
    {
        UINT i;
        for (i = 0; i < m_numRealtimePipelines; i++)
        {
            // need revisit here if want support realtime sensors more than two.
            if (inputPipelineId != m_realtimePipelineIds[i])
            {
                *pIntraPipelineId = m_realtimePipelineIds[i];
                break;
            }
        }

        if (i >= m_numRealtimePipelines)
        {
            result = CamxResultEFailed;
            CAMX_LOG_ERROR(CamxLogGroupHAL, "Failed to get intra pipeline id for input %d", inputPipelineId);
        }
    }

    return result;
}

////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Session::GetStreamIndex
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
CAMX_INLINE UINT32 Session::GetStreamIndex(
    ChiStream* pStream
    ) const
{
    ChiStreamWrapper* pChiStreamWrapper = static_cast<ChiStreamWrapper*>(pStream->pPrivateInfo);

    return pChiStreamWrapper->GetStreamIndex();
}

////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Session::UnregisterThreadJobCallback
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
VOID Session::UnregisterThreadJobCallback()
{
}

////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Session::GetCurrentSequenceId
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
UINT32 Session::GetCurrentSequenceId()
{
    return m_sequenceId;
}

////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Session::GetCurrentPipelineRequestId
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
UINT64 Session::GetCurrentPipelineRequestId(
    UINT pipelineIndex)
{
    CAMX_ASSERT(pipelineIndex < m_numPipelines);
    return m_requestBatchId[pipelineIndex];
}

////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Session::IsPipelineRealTime
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
BOOL Session::IsPipelineRealTime(
    CHIPIPELINEHANDLE hPipelineDescriptor)
{
    UINT32     index = 0;

    // input pipelineIndex not really match the index recorded by Session, so use Descriptor to find it.
    for (index = 0; index < m_numPipelines; index++)
    {
        if (hPipelineDescriptor == m_pipelineData[index].pPipelineDescriptor)
        {
            // found corresponding pipeline can use index to get to it
            break;
        }
    }

    CAMX_ASSERT(index < m_numPipelines);

    Pipeline* pPipeline = m_pipelineData[index].pPipeline;
    CAMX_ASSERT(NULL != pPipeline);

    return pPipeline->IsRealTime();
}

////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Session::QueryStreamHDRMode
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
CamxResult Session::QueryStreamHDRMode(
    HAL3Stream*              pStream,
    MetadataPool*            pInputPool,
    UINT64                   requestId)
{
    CamxResult                result = CamxResultSuccess;

    if (HDRModeMax == pStream->GetHDRMode())
    {
        // HDR mode can be only changed once when the first capture request given.
        UINT32 metaTag        = 0;
        UINT8  currentHDRMode = HDRModeNone;
        Format format         = pStream->GetInternalFormat();

        result = VendorTagManager::QueryVendorTagLocation("org.quic.camera2.streamconfigs", "HDRVideoMode", &metaTag);

        if (CamxResultSuccess == result)
        {
            MetadataSlot* pMetaDataSlot = pInputPool->GetSlot(requestId);

            if (TRUE == pMetaDataSlot->IsPublished(UnitType::Metadata, metaTag))
            {
                VOID* pMetaValue = NULL;
                pMetaDataSlot->GetMetadataByTag(metaTag, &pMetaValue);
                CAMX_ASSERT(pMetaValue != NULL);

                if (NULL != pMetaValue)
                {
                    currentHDRMode = *(static_cast<UINT8*>(pMetaValue));

                    if (currentHDRMode >= HDRModeMax)
                    {
                        CAMX_LOG_ERROR(CamxLogGroupHAL, "Invalid HDR mode :%d sent", currentHDRMode);
                        currentHDRMode = HDRModeNone;
                    }
                }
            }
        }

        switch (format)
        {
            case Format::UBWCTP10:
            case Format::P010:
                pStream->SetHDRMode(currentHDRMode);
                break;

            default:
                pStream->SetHDRMode(HDRModeNone);
                break;
        }

        CAMX_LOG_VERBOSE(CamxLogGroupHAL, "Set stream HDR mode %d", pStream->GetHDRMode());
    }

    return result;
}

////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Session::SetPerStreamColorMetadata
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
CamxResult Session::SetPerStreamColorMetadata(
    const ChiCaptureRequest* pRequest,
    MetadataPool*            pInputPool,
    UINT64                   requestId)
{
    ColorMetaData             bufferMetadata   = {};
    CHISTREAM*                pStream        = NULL;
    struct private_handle_t*  phBufferHandle = NULL;
    CamxResult                result         = CamxResultSuccess;

    CAMX_LOG_VERBOSE(CamxLogGroupHAL, "Request with outbuffer num %d", pRequest->numOutputs);

    for (UINT32 i = 0; i < pRequest->numOutputs; i++)
    {
        pStream = pRequest->pOutputBuffers[i].pStream;

        HAL3Stream*  pHAL3Stream = static_cast<HAL3Stream*>(pStream->pPrivateInfo);

        CAMX_ASSERT(pHAL3Stream != NULL);
        QueryStreamHDRMode(pHAL3Stream, pInputPool, requestId);

        switch (pHAL3Stream->GetHDRMode())
        {
            case HDRModeHLG:
                CAMX_LOG_VERBOSE(CamxLogGroupHAL, "Set HDR mode HLG for req %llu", requestId);

                // In HLG mode, UBWC 10bit-TP
                bufferMetadata.colorPrimaries           = ColorPrimaries_BT2020;
                bufferMetadata.range                    = Range_Full;
                bufferMetadata.transfer                 = Transfer_HLG;
                bufferMetadata.matrixCoefficients       = MatrixCoEff_BT2020;

                bufferMetadata.contentLightLevel.lightLevelSEIEnabled    = TRUE;
                bufferMetadata.contentLightLevel.maxContentLightLevel    = MaxContentLightLevel;
                bufferMetadata.contentLightLevel.minPicAverageLightLevel = MaxFrameAverageLightLevel;

                bufferMetadata.masteringDisplayInfo.colorVolumeSEIEnabled   = TRUE;
                bufferMetadata.masteringDisplayInfo.maxDisplayLuminance     = MaxDisplayLuminance;
                bufferMetadata.masteringDisplayInfo.minDisplayLuminance     = MinDisplayLuminance;

                Utils::Memcpy(&bufferMetadata.masteringDisplayInfo.primaries.rgbPrimaries, PrimariesRGB,
                    sizeof(bufferMetadata.masteringDisplayInfo.primaries.rgbPrimaries));
                Utils::Memcpy(&bufferMetadata.masteringDisplayInfo.primaries.whitePoint, PrimariesWhitePoint,
                    sizeof(bufferMetadata.masteringDisplayInfo.primaries.whitePoint));
                break;
            case HDRModeHDR10:
                CAMX_LOG_VERBOSE(CamxLogGroupHAL, "Set HDR mode HDR10 for req %llu", requestId);

                bufferMetadata.colorPrimaries       = ColorPrimaries_BT2020;
                bufferMetadata.range                = Range_Full;
                bufferMetadata.transfer             = Transfer_SMPTE_ST2084;
                bufferMetadata.matrixCoefficients   = MatrixCoEff_BT2020;

                bufferMetadata.contentLightLevel.lightLevelSEIEnabled    = TRUE;
                bufferMetadata.contentLightLevel.maxContentLightLevel    = MaxContentLightLevel;
                bufferMetadata.contentLightLevel.minPicAverageLightLevel = MaxFrameAverageLightLevel;

                bufferMetadata.masteringDisplayInfo.colorVolumeSEIEnabled   = TRUE;
                bufferMetadata.masteringDisplayInfo.maxDisplayLuminance     = MaxDisplayLuminance;
                bufferMetadata.masteringDisplayInfo.minDisplayLuminance     = MinDisplayLuminance;

                Utils::Memcpy(&bufferMetadata.masteringDisplayInfo.primaries.rgbPrimaries, PrimariesRGB,
                    sizeof(bufferMetadata.masteringDisplayInfo.primaries.rgbPrimaries));
                Utils::Memcpy(&bufferMetadata.masteringDisplayInfo.primaries.whitePoint, PrimariesWhitePoint,
                    sizeof(bufferMetadata.masteringDisplayInfo.primaries.whitePoint));
                break;
            default:
                CAMX_LOG_VERBOSE(CamxLogGroupHAL, "Set HDR mode default for req %llu", requestId);

                // default Color Metadata
                bufferMetadata.colorPrimaries        = ColorPrimaries_BT601_6_625;
                bufferMetadata.range                 = Range_Full;
                bufferMetadata.transfer              = Transfer_SMPTE_170M;
                bufferMetadata.matrixCoefficients    = MatrixCoEff_BT601_6_625;
                break;
        }

        /// @todo (CAMX-2499): Decouple the camx core dependency from the android display API.
        Camera3StreamBuffer* pBuffers = reinterpret_cast<Camera3StreamBuffer*>(pRequest->pOutputBuffers);
        if (NULL != pBuffers[i].phBuffer)
        {
            // NOWHINE CP036a: exception
            phBufferHandle = const_cast<struct private_handle_t*>(
                reinterpret_cast<const struct private_handle_t*>(
                    *pBuffers[i].phBuffer));
            setMetaData(phBufferHandle, COLOR_METADATA, &bufferMetadata);
        }
    }
    return result;
}

////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Session::SetPerFrameVTTimestampMetadata
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
VOID Session::SetPerFrameVTTimestampMetadata(
    const NativeHandle* phNativeBufferHandle,
    MetadataPool*       pPool,
    UINT64              requestId)
{
    UINT64*                   pTimestamp        = NULL;
    struct private_handle_t*  phBufferHandle    = NULL;

    CAMX_ASSERT(NULL != pPool);
    MetadataSlot* pMetadataSlot = pPool->GetSlot(requestId);
    CAMX_ASSERT(NULL != pMetadataSlot);
    pMetadataSlot->GetMetadataByTag(m_vendorTagIndexTimestamp, reinterpret_cast<VOID**> (&pTimestamp));

    if (NULL != pTimestamp)
    {
        CAMX_LOG_VERBOSE(CamxLogGroupHAL, "PerFrame Metadata VT timestamp=%llu", *pTimestamp);

        // NOWHINE CP036a: Google API requires const types
        phBufferHandle = const_cast<struct private_handle_t*>(
                reinterpret_cast<const struct private_handle_t*>(phNativeBufferHandle));

        setMetaData(phBufferHandle, SET_VT_TIMESTAMP, pTimestamp);
    }
    else
    {
        CAMX_LOG_ERROR(CamxLogGroupHAL,
                        "Failed to retrieve VT timestamp for requestId=%llu, encoder will fallback to system time",
                        requestId);
    }
}

////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Session::SetPerFrameVideoPerfModeMetadata
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
VOID Session::SetPerFrameVideoPerfModeMetadata(
    const NativeHandle* phNativeBufferHandle)
{
    struct private_handle_t*  phBufferHandle = NULL;
    UINT32                    videoPerfMode  = 1;

    // NOWHINE CP036a: Google API requires const types
    phBufferHandle = const_cast<struct private_handle_t*>(
        reinterpret_cast<const struct private_handle_t*>(phNativeBufferHandle));

    CAMX_LOG_INFO(CamxLogGroupCore, "Set Video Perf mode %d", videoPerfMode);
    setMetaData(phBufferHandle, SET_VIDEO_PERF_MODE, &videoPerfMode);
}

////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Session::DumpResultState
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
VOID Session::DumpResultState(
    INT     fd,
    UINT32  indent)
{
    /// @note Accessing with a TryLock since this is intended to be a post-mortem log.  If we try to enforce the lock, there's a
    ///       reasonable chance the post-mortem will deadlock. Failed locks will be noted.
    CamxResult result = m_pResultLock->TryLock();

    if (CamxResultSuccess != result)
    {
        CAMX_LOG_TO_FILE(fd, indent + 3, "WARNING: Lock failed with status: %d.  Results may not be completely accurate",
            result);
    }

    LightweightDoublyLinkedListNode* pNode = m_resultHolderList.Head();
    ResultHolder*                    pResultHolder = NULL;
    CAMX_LOG_TO_FILE(fd, indent, "+---------------------------------+");
    CAMX_LOG_TO_FILE(fd, indent, "+ Result holder:");
    CAMX_LOG_TO_FILE(fd, indent + 3, "Num entries in holder list: %d", m_resultHolderList.NumNodes());

    while (NULL != pNode)
    {
        if (NULL != pNode->pData)
        {
            pResultHolder = reinterpret_cast<ResultHolder*>(pNode->pData);

            if (NULL != pResultHolder)
            {
                CAMX_LOG_TO_FILE(fd, indent + 4, "sequenceId: %d  isAlive: %d  addNoMore: %d",
                    pResultHolder->sequenceId, pResultHolder->isAlive, pResultHolder->addNoMore);
                CAMX_LOG_TO_FILE(fd, indent + 5, "numOutBuffers: %d  numInBuffers: %d  pendingMetadataCount: %d",
                    pResultHolder->numOutBuffers, pResultHolder->numInBuffers, pResultHolder->pendingMetadataCount);
                for (UINT i = 0; i < pResultHolder->numOutBuffers; i++)
                {
                    CAMX_LOG_TO_FILE(fd, indent + 6, "bufferHolder[%d]", i);
                    CAMX_LOG_TO_FILE(fd, indent + 7, "pStream: %p  pBuffer: %p  error: %d",
                        pResultHolder->bufferHolder[i].pStream,
                        pResultHolder->bufferHolder[i].pBuffer,
                        pResultHolder->bufferHolder[i].error);
                }
                for (UINT i = 0; i < pResultHolder->numInBuffers; i++)
                {
                    CAMX_LOG_TO_FILE(fd, indent + 6, "inputbufferHolder[%d]", i);
                    CAMX_LOG_TO_FILE(fd, indent + 7, "pStream: %p  pBuffer: %p",
                        pResultHolder->inputbufferHolder[i].pStream,
                        pResultHolder->inputbufferHolder[i].pBuffer);
                }
            }
        }

        // Get the next result holder and see what's going on with it
        pNode = m_resultHolderList.NextNode(pNode);
    }

    if (CamxResultSuccess == result)
    {
        m_pResultLock->Unlock();
    }
}

////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Session::DumpState
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
VOID Session::DumpState(
    INT     fd,
    UINT32  indent)
{
    CAMX_LOG_TO_FILE(fd, indent, "+------------------------------------------------------------------+");
    CAMX_LOG_TO_FILE(fd, indent, "+ Session state:");
    CAMX_LOG_TO_FILE(fd, indent + 3, "m_numPipelines: %d", m_numPipelines);
    CAMX_LOG_TO_FILE(fd, indent + 3, "m_numRealtimePipelines: %d", m_numRealtimePipelines);
    CAMX_LOG_TO_FILE(fd, indent + 3, "m_numMetadataResults: %d", m_numMetadataResults);
    CAMX_LOG_TO_FILE(fd, indent + 3, "m_aDeviceInError: %d", CamxAtomicLoad32(&m_aDeviceInError));
    CAMX_LOG_TO_FILE(fd, indent + 3, "m_aFlushStatus: %d", CamxAtomicLoad32(&m_aFlushStatus));
    CAMX_LOG_TO_FILE(fd, indent + 3, "m_aFlushingPipeline: %d", CamxAtomicLoadU8(&m_aFlushingPipeline));
    CAMX_LOG_TO_FILE(fd, indent + 3, "m_isTorchWidgetSession: %d", m_isTorchWidgetSession);
    CAMX_LOG_TO_FILE(fd, indent + 3, "m_isRequestBatchingOn: %d", m_isRequestBatchingOn);
    CAMX_LOG_TO_FILE(fd, indent + 3, "m_usecaseNumBatchedFrames: %d", m_usecaseNumBatchedFrames);
    CAMX_LOG_TO_FILE(fd, indent + 3, "m_livePendingRequests: %d", m_livePendingRequests);
    CAMX_LOG_TO_FILE(fd, indent + 3, "m_maxLivePendingRequests: %d", m_maxLivePendingRequests);
    CAMX_LOG_TO_FILE(fd, indent + 3, "m_sequenceId: %d", m_sequenceId);
    CAMX_LOG_TO_FILE(fd, indent + 3, "m_requestBatchId: %llu", m_requestBatchId);
    CAMX_LOG_TO_FILE(fd, indent + 3, "m_batchedFrameIndex: %d", m_batchedFrameIndex);
    CAMX_LOG_TO_FILE(fd, indent + 3, "m_syncSequenceId: %d", m_syncSequenceId);
    CAMX_LOG_TO_FILE(fd, indent + 3, "m_fwFrameNumberMap[%d]:", m_requestQueueDepth);
    for (UINT i = 0; i < m_requestQueueDepth; i++)
    {
        CAMX_LOG_TO_FILE(fd, indent + 4, "[%d] = %lld", i, m_fwFrameNumberMap[i]);
    }
    CAMX_LOG_TO_FILE(fd, indent, "+------------------------------------------------------------------+");

    CAMX_LOG_TO_FILE(fd, indent, "+ Stuck on Request Id: %d",
                    reinterpret_cast<ResultHolder*>(m_resultHolderList.Head())->sequenceId);
    CAMX_LOG_TO_FILE(fd, indent, "+------------------------------------------------------------------+");

    // Dump pipeline info for this session
    CAMX_LOG_TO_FILE(fd, indent, "+ Pipelines:");
    for (UINT i = 0; i < m_numPipelines; i++)
    {
        CAMX_LOG_TO_FILE(fd, indent + 2, "+------------------------------------------------------------------+");
        CAMX_LOG_TO_FILE(fd, indent + 3, "Pipeline[%d]: %p", i, m_pipelineData[i].pPipeline);
        m_pipelineData[i].pPipeline->DumpState(fd, indent + 4);
        CAMX_LOG_TO_FILE(fd, indent + 2, "+------------------------------------------------------------------+");
    }

    CAMX_LOG_TO_FILE(fd, indent, "+------------------------------------------------------------------+");

    // Dump the session's request queue
    m_pRequestQueue->DumpState(fd, indent + 3);

    // Dump the result holder list
    DumpResultState(fd, indent);

    // Dump this session's DRQ
    m_pDeferredRequestQueue->DumpState(fd, indent);
}


////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Session::HandleFlushForHALQueueRequests
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
VOID Session::HandleFlushForHALQueueRequests()
{
    UINT32                  numOfResults = 0;
    SessionCaptureRequest*  pSessionRequests = NULL;
    ChiCaptureResult*       pCaptureResult = NULL;

    if (TRUE == m_pFlushRequestQueue->IsEmpty())
    {
        CAMX_LOG_VERBOSE(CamxLogGroupHAL, "HAL Session queue empty");
    }

    while (FALSE == m_pFlushRequestQueue->IsEmpty())
    {
        pSessionRequests = static_cast<SessionCaptureRequest*>(m_pFlushRequestQueue->Dequeue());
        if (NULL == pSessionRequests)
        {
            CAMX_LOG_ERROR(CamxLogGroupHAL, "pCaptureResult is NULL");
            break;
        }

        for (UINT32 request = 0; request < pSessionRequests->numRequests; request++)
        {
            CaptureRequest* pRequest = static_cast<CaptureRequest*>(&pSessionRequests->requests[request]);

            if (NULL != pRequest)
            {
                CAMX_LOG_INFO(CamxLogGroupHAL, "in HandleFlushForHALQueueRequests, processing request id %llu",
                    pRequest->streamBuffers[request].originalFrameworkNumber);

                numOfResults = 0;
                pCaptureResult = static_cast<ChiCaptureResult*>(CAMX_CALLOC(MaxBatchedFrames * sizeof(ChiCaptureResult)));
                if (NULL == pCaptureResult)
                {
                    CAMX_LOG_VERBOSE(CamxLogGroupHAL, "pCaptureResult is NULL");
                    break;
                }

                for (UINT32 n = 0; n < MaxBatchedFrames; n++)
                {
                    pCaptureResult[n].frameworkFrameNum  = 0;
                    pCaptureResult[n].numOutputBuffers   = 0;
                    pCaptureResult[n].numPartialMetadata = 0;
                    pCaptureResult[n].pResultMetadata    = NULL;
                    pCaptureResult[n].pInputBuffer       =
                        static_cast<ChiStreamBuffer*>(CAMX_CALLOC(MaxNumInputBuffers * sizeof(ChiStreamBuffer)));
                    pCaptureResult[n].pOutputBuffers     =
                        static_cast<ChiStreamBuffer*>(CAMX_CALLOC(MaxNumOutputBuffers * sizeof(ChiStreamBuffer)));
                }

                for (UINT32 i = 0; i < pRequest->numBatchedFrames; i++)
                {
                    ChiMessageDescriptor* pNotify = GetNotifyMessageDescriptor();
                    pNotify->messageType = ChiMessageTypeError;
                    pNotify->message.errorMessage.frameworkFrameNum =
                        static_cast<UINT32>(pRequest->streamBuffers[i].originalFrameworkNumber);
                    pNotify->message.errorMessage.errorMessageCode  = static_cast<ChiErrorMessageCode>(MessageCodeRequest);
                    pNotify->pPrivData = reinterpret_cast<CHIPRIVDATA *>(pRequest->pPrivData);

                    pNotify->message.errorMessage.pErrorStream = NULL;
                    // No stream applicable
                    // Dispatch it immediately
                    DispatchNotify(pNotify);

                    for (UINT32 buffer = 0; buffer < pRequest->streamBuffers[i].numOutputBuffers; buffer++)
                    {
                        CAMX_LOG_INFO(CamxLogGroupHAL, "Returning error for pending buffers - %llu",
                            pRequest->streamBuffers[i].originalFrameworkNumber);

                        pRequest->streamBuffers[i].outputBuffers[buffer].releaseFence = -1;
                        pRequest->streamBuffers[i].outputBuffers[buffer].bufferStatus = BufferStatusError;

                        ChiStreamBuffer* pStreamBuffer =
                            // NOWHINE CP036a: Google API requires const type
                            const_cast<ChiStreamBuffer*>(&pCaptureResult[i].pOutputBuffers[buffer]);
                        Utils::Memcpy(pStreamBuffer,
                                     &pRequest->streamBuffers[i].outputBuffers[buffer],
                                     sizeof(ChiStreamBuffer));
                    }

                    for (UINT32 buffer = 0; buffer < pRequest->streamBuffers[i].numInputBuffers; buffer++)
                    {
                        CAMX_LOG_INFO(CamxLogGroupHAL, "Returning error for pending input buffers - %llu",
                            pRequest->streamBuffers[i].originalFrameworkNumber);

                        pRequest->streamBuffers[i].inputBufferInfo[buffer].inputBuffer.releaseFence = -1;
                        pRequest->streamBuffers[i].inputBufferInfo[buffer].inputBuffer.bufferStatus = BufferStatusError;

                        ChiStreamBuffer* pStreamBuffer =
                            // NOWHINE CP036a: Google API requires const type
                            const_cast<ChiStreamBuffer*>(&pCaptureResult[i].pInputBuffer[buffer]);
                        Utils::Memcpy(pStreamBuffer,
                                     &pRequest->streamBuffers[i].inputBufferInfo[buffer].inputBuffer,
                                     sizeof(ChiStreamBuffer));
                    }

                    pCaptureResult[i].frameworkFrameNum  =
                        static_cast<UINT32>(pRequest->streamBuffers[i].originalFrameworkNumber);
                    pCaptureResult[i].numOutputBuffers   = pRequest->streamBuffers[i].numOutputBuffers;
                    pCaptureResult[i].pResultMetadata    = NULL;
                    pCaptureResult[i].numPartialMetadata = 1;
                    pCaptureResult[i].pPrivData          = reinterpret_cast<CHIPRIVDATA *>(pRequest->pPrivData);
                    numOfResults++;
                }

                DispatchResults(&pCaptureResult[0], numOfResults);


                for (UINT32 j = 0; j < MaxBatchedFrames; j++)
                {
                    if (NULL != pCaptureResult[j].pOutputBuffers)
                    {
                        // NOWHINE CP036a: Google API requires const type
                        CAMX_FREE(const_cast<ChiStreamBuffer*>(pCaptureResult[j].pOutputBuffers));
                        pCaptureResult[j].pOutputBuffers = NULL;
                    }
                    if (NULL != pCaptureResult[j].pInputBuffer)
                    {
                        // NOWHINE CP036a: Google API requires const type
                        CAMX_FREE(const_cast<ChiStreamBuffer*>(pCaptureResult[j].pInputBuffer));
                        pCaptureResult[j].pInputBuffer = NULL;
                    }
                }

                CAMX_FREE(pCaptureResult);
                pCaptureResult = NULL;

            }
            else
            {
                break;
            }

        }

        m_pFlushRequestQueue->Release(pSessionRequests);
    }
}

////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Session::HandleFlushForInflightRequests
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
VOID Session::HandleFlushForInflightRequests()
{
    UINT32        numResults          = 0;
    ResultHolder* pResultHolder       = NULL;

    m_pResultLock->Lock();

    CAMX_LOG_VERBOSE(CamxLogGroupCore, "Enter HandleFlushForInflightRequests");

    // Reset the essential fields of framework results, so that not to be taken in stale
    for (UINT i = 0; i < m_requestQueueDepth * m_numPipelines; i++)
    {
        m_pCaptureResult[i].frameworkFrameNum  = 0;
        m_pCaptureResult[i].numOutputBuffers   = 0;
        m_pCaptureResult[i].numPartialMetadata = 0;
        m_pCaptureResult[i].pResultMetadata    = NULL;
        m_pCaptureResult[i].pInputBuffer       = NULL;
    }

    if (0 != m_resultHolderList.NumNodes())
    {
        SessionResultHolder* pSessionResultHolderHead =
            reinterpret_cast<SessionResultHolder*>(m_resultHolderList.Head()->pData);
        SessionResultHolder* pSessionResultHolderTail =
            reinterpret_cast<SessionResultHolder*>(m_resultHolderList.Tail()->pData);

        CAMX_LOG_INFO(CamxLogGroupCore, "Waiting for all results  minResult:%d  maxRequest:%d before cleanup",
                      pSessionResultHolderHead->resultHolders[0].sequenceId,
                      pSessionResultHolderTail->resultHolders[pSessionResultHolderTail->numResults - 1].sequenceId);
    }
    else
    {
        CAMX_LOG_INFO(CamxLogGroupHAL, "No nodes in resultQueue before clean up");
    }

    LightweightDoublyLinkedListNode* pNode = m_resultHolderList.Head();

    while (NULL != pNode)
    {
        CAMX_ASSERT(NULL != pNode->pData);

        if (NULL != pNode->pData)
        {
            SessionResultHolder* pSessionResultHolder = reinterpret_cast<SessionResultHolder*>(pNode->pData);
            for (UINT32 i = 0; i < pSessionResultHolder->numResults; i++)
            {
                pResultHolder = &pSessionResultHolder->resultHolders[i];
                UINT32 numOutputBuffers = 0;

                if (NULL != pResultHolder)
                {
                    CAMX_LOG_INFO(CamxLogGroupHAL, "Processing result for Sequence ID %d and m_requestQueueDepth %d",
                                  pResultHolder->sequenceId, m_requestQueueDepth);
                    ChiMessageDescriptor* pNotify = GetNotifyMessageDescriptor();

                    pNotify->messageType                            = ChiMessageTypeError;
                    pNotify->message.errorMessage.frameworkFrameNum = GetFrameworkFrameNumber(pResultHolder->sequenceId);
                    pNotify->message.errorMessage.errorMessageCode  = static_cast<ChiErrorMessageCode>(MessageCodeRequest);
                    pNotify->pPrivData                              = static_cast<CHIPRIVDATA *>(pResultHolder->pPrivData);

                    pNotify->message.errorMessage.pErrorStream = NULL; // No stream applicable
                    CAMX_LOG_INFO(CamxLogGroupHAL,
                                    "notifying request  error for framework frame number %d with Private Data=%p",
                                    pNotify->message.errorMessage.frameworkFrameNum,
                                    pNotify->pPrivData);
                    // Dispatch it immediately
                    DispatchNotify(pNotify);

                    m_pCaptureResult[numResults].pResultMetadata = pResultHolder->pMetadata[0];

                    pResultHolder->pendingMetadataCount = 0;
                    m_pCaptureResult[numResults].numPartialMetadata = 1;
                    m_pCaptureResult[numResults].frameworkFrameNum = GetFrameworkFrameNumber(pResultHolder->sequenceId);
                    pResultHolder->pMetadata[0] = NULL;
                    CAMX_LOG_INFO(CamxLogGroupHAL,
                            "ProcessResultMetadata Finalized result for Sequence ID %d mapped to framework id %d",
                            pResultHolder->sequenceId, m_pCaptureResult[numResults].frameworkFrameNum);

                    for (UINT32 buffer = 0; buffer < MaxNumOutputBuffers; buffer++)
                    {
                        if (NULL != pResultHolder->bufferHolder[buffer].pBuffer)
                        {
                            CAMX_LOG_INFO(CamxLogGroupHAL,
                                "Result holder buffers inside for = %d stream = %p, ADDR=%p ,"
                                "buffer handle = %p NUM of out buffer =%d",
                                buffer,
                                pResultHolder->bufferHolder[buffer].pBuffer->pStream,
                                pResultHolder->bufferHolder[buffer].pBuffer,
                                pResultHolder->bufferHolder[buffer].pBuffer->phBuffer,
                                pResultHolder->numOutBuffers);
                            pResultHolder->bufferHolder[buffer].pBuffer->releaseFence = -1;
                            pResultHolder->bufferHolder[buffer].pBuffer->bufferStatus = BufferStatusError;

                            ChiStreamBuffer* pStreamBuffer =
                                    // NOWHINE CP036a: Google API requires const type
                                    const_cast<ChiStreamBuffer*>
                                (&m_pCaptureResult[numResults].pOutputBuffers[numOutputBuffers]);
                            Utils::Memcpy(pStreamBuffer, pResultHolder->bufferHolder[buffer].pBuffer,
                                    sizeof(ChiStreamBuffer));

                            pResultHolder->bufferHolder[buffer].pStream = NULL;
                            numOutputBuffers++;
                        }

                        if (buffer == MaxNumOutputBuffers - 1)
                        {
                            // We got the number of output buffers that we were expecting
                            // For the input buffer, we should release input buffer fence
                            // once we have all reporocess outputs buffers.
                            if ((0    != pResultHolder->numInBuffers) &&
                                (NULL != pResultHolder->inputbufferHolder[0].pBuffer))
                            {
                                m_pCaptureResult[numResults].pInputBuffer = pResultHolder->inputbufferHolder[0].pBuffer;

                                ChiStreamBuffer* pStreamInputBuffer =
                                    // NOWHINE CP036a: Google API requires const type
                                    const_cast<ChiStreamBuffer*>(m_pCaptureResult[numResults].pInputBuffer);

                                // Driver no longer owns this and app will take ownership
                                pStreamInputBuffer->releaseFence = -1;
                            }
                        }
                    }

                    m_pCaptureResult[numResults].frameworkFrameNum  = GetFrameworkFrameNumber(pResultHolder->sequenceId);
                    m_pCaptureResult[numResults].numOutputBuffers   = pResultHolder->numOutBuffers;
                    m_pCaptureResult[numResults].numPartialMetadata = 1;
                    m_pCaptureResult[numResults].pPrivData          = static_cast<CHIPRIVDATA *>(pResultHolder->pPrivData);

                    numResults++;
                }
            }
        }

        // Get the next result holder and see what's going on with it
        pNode = m_resultHolderList.NextNode(pNode);
    }

    if (numResults > 0)
    {
        // Finally dispatch all the results to the Framework
        DispatchResults(&m_pCaptureResult[0], numResults);
        AdvanceMinExpectedResult(TRUE);
    }

    m_pResultLock->Unlock();
}

////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Session::BackupAndEmptyHALRequestQueueForFlush
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
VOID Session::BackupAndEmptyHALRequestQueueForFlush()
{
    m_pRequestLock->Lock();

    while (FALSE == m_pRequestQueue->IsEmpty())
    {
        SessionCaptureRequest* pSessionRequests = static_cast<SessionCaptureRequest*>(m_pRequestQueue->Dequeue());

        if (NULL != pSessionRequests)
        {
            m_pFlushRequestQueue->EnqueueWait(pSessionRequests);
            m_pRequestQueue->Release(pSessionRequests);
        }
        else
        {
            break;
        }
    }
    m_pRequestLock->Unlock();
}
CAMX_NAMESPACE_END
