/*
 * FreeRTOS Kernel V10.5.1
 * Copyright (C) 2021 Amazon.com, Inc. or its affiliates.  All Rights Reserved.
 *
 * SPDX-License-Identifier: MIT
 *
 */

#include <stdlib.h>
#include <string.h>

/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
 * all the API functions to use the MPU wrappers.  That should only be done when
 * task.h is included from an application file. */
#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE

#include "FreeRTOS.h"
#include "task.h"
#include "queue.h"

/* Lint e9021, e961 and e750 are suppressed as a MISRA exception justified
 * because the MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined
 * for the header files above, but not in this file, in order to generate the
 * correct privileged Vs unprivileged linkage and placement. */
#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*lint !e961 !e750 !e9021. */


/* Constants used with the cRxLock and cTxLock structure members. */
#define queueUNLOCKED             ( ( int8_t ) -1 )
#define queueLOCKED_UNMODIFIED    ( ( int8_t ) 0 )
#define queueINT8_MAX             ( ( int8_t ) 127 )

/* When the Queue_t structure is used to represent a base queue its pcHead and
 * pcTail members are used as pointers into the queue storage area.  When the
 * Queue_t structure is used to represent a mutex pcHead and pcTail pointers are
 * not necessary, and the pcHead pointer is set to NULL to indicate that the
 * structure instead holds a pointer to the mutex holder (if any).  Map alternative
 * names to the pcHead and structure member to ensure the readability of the code
 * is maintained.  The QueuePointers_t and SemaphoreData_t types are used to form
 * a union as their usage is mutually exclusive dependent on what the queue is
 * being used for. */
#define uxQueueType               pcHead
#define queueQUEUE_IS_MUTEX       NULL

typedef struct QueuePointers
{
    int8_t * pcTail;     /* 指向队列中存放消息的最后位置 */
    int8_t * pcReadFrom; /**/
} QueuePointers_t;

typedef struct SemaphoreData
{
    TaskHandle_t xMutexHolder;          /* 持有互斥锁的任务 */
    UBaseType_t uxRecursiveCallCount;   /* 递归获取互斥锁的次数 */
} SemaphoreData_t;

/* Semaphores do not actually store or copy data, so have an item size of
 * zero. */
#define queueSEMAPHORE_QUEUE_ITEM_LENGTH    ((UBaseType_t)0)
#define queueMUTEX_GIVE_BLOCK_TIME          ((TickType_t)0U)

#if (configUSE_PREEMPTION == 0)

/* If the cooperative scheduler is being used then a yield should not be
 * performed just because a higher priority task has been woken. */
    #define queueYIELD_IF_USING_PREEMPTION()
#else
    #define queueYIELD_IF_USING_PREEMPTION()    portYIELD_WITHIN_API()
#endif

/*
 * Definition of the queue used by the scheduler.
 * 队列管理结构
 */
typedef struct QueueDefinition
{
    int8_t *pcHead;                        /* 队列中存放消息的位置 */
    int8_t *pcWriteTo;                     /* 下一个可存放消息的位置 */

    union
    {
        QueuePointers_t xQueue;             /* 消息队列实现 */
        SemaphoreData_t xSemaphore;         /* 信号量实现 */
    } u;

    List_t xTasksWaitingToSend;             /* 等待发送任务列表，队列满导致像向消息队列发送的任务，都会阻塞加到该优先级链表上 */
    List_t xTasksWaitingToReceive;          /* 等待接收任务列表，队列空导致像从消息队列读消息的任务，都会阻塞加到该优先级链表上 */

    volatile UBaseType_t uxMessagesWaiting; /* 当前队列中的消息数 */
    UBaseType_t uxLength;                   /* 队列的长度，其实是保存队列元素的个数 */
    UBaseType_t uxItemSize;                 /* 消息的长度 */

    volatile int8_t cRxLock;                /* 队列接收锁 */
    volatile int8_t cTxLock;                /* 队列发送锁 */

#if (configUSE_QUEUE_SETS == 1)
    struct QueueDefinition *pxQueueSetContainer;    /* 该队列所在的队列集合 */
#endif

#if (configUSE_TRACE_FACILITY == 1)
    UBaseType_t uxQueueNumber;
    uint8_t ucQueueType;
#endif
} xQUEUE;

/**
 * The old xQUEUE name is maintained above then typedefed to the new Queue_t
 * name below to enable the use of older kernel aware debuggers.
 */
typedef xQUEUE Queue_t;

/*-----------------------------------------------------------*/

/*
 * The queue registry is just a means for kernel aware debuggers to locate
 * queue structures.  It has no other purpose so is an optional component.
 */
#if (configQUEUE_REGISTRY_SIZE > 0)

/* The type stored within the queue registry array.  This allows a name
 * to be assigned to each queue making kernel aware debugging a little
 * more user friendly. */
    typedef struct QUEUE_REGISTRY_ITEM
    {
        const char * pcQueueName; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
        QueueHandle_t xHandle;
    } xQueueRegistryItem;

/* The old xQueueRegistryItem name is maintained above then typedefed to the
 * new xQueueRegistryItem name below to enable the use of older kernel aware
 * debuggers. */
    typedef xQueueRegistryItem QueueRegistryItem_t;

/* The queue registry is simply an array of QueueRegistryItem_t structures.
 * The pcQueueName member of a structure being NULL is indicative of the
 * array position being vacant. */
    PRIVILEGED_DATA QueueRegistryItem_t xQueueRegistry[ configQUEUE_REGISTRY_SIZE ];

#endif /* configQUEUE_REGISTRY_SIZE */

/*
 * Unlocks a queue locked by a call to prvLockQueue.  Locking a queue does not
 * prevent an ISR from adding or removing items to the queue, but does prevent
 * an ISR from removing tasks from the queue event lists.  If an ISR finds a
 * queue is locked it will instead increment the appropriate queue lock count
 * to indicate that a task may require unblocking.  When the queue in unlocked
 * these lock counts are inspected, and the appropriate action taken.
 */
static void prvUnlockQueue(Queue_t *const pxQueue) PRIVILEGED_FUNCTION;

/*
 * Uses a critical section to determine if there is any data in a queue.
 *
 * @return pdTRUE if the queue contains no items, otherwise pdFALSE.
 */
static BaseType_t prvIsQueueEmpty(const Queue_t *pxQueue) PRIVILEGED_FUNCTION;

/*
 * Uses a critical section to determine if there is any space in a queue.
 *
 * @return pdTRUE if there is no space, otherwise pdFALSE;
 */
static BaseType_t prvIsQueueFull(const Queue_t *pxQueue) PRIVILEGED_FUNCTION;

/*
 * Copies an item into the queue, either at the front of the queue or the
 * back of the queue.
 */
static BaseType_t prvCopyDataToQueue(Queue_t *const pxQueue,
                                      const void *pvItemToQueue,
                                      const BaseType_t xPosition) PRIVILEGED_FUNCTION;

/*
 * Copies an item out of a queue.
 */
static void prvCopyDataFromQueue(Queue_t *const pxQueue,
                                  void *const pvBuffer) PRIVILEGED_FUNCTION;

#if (configUSE_QUEUE_SETS == 1)

/*
 * Checks to see if a queue is a member of a queue set, and if so, notifies
 * the queue set that the queue contains data.
 */
    static BaseType_t prvNotifyQueueSetContainer(const Queue_t *const pxQueue) PRIVILEGED_FUNCTION;
#endif

/*
 * Called after a Queue_t structure has been allocated either statically or
 * dynamically to fill in the structure's members.
 */
static void prvInitialiseNewQueue(const UBaseType_t uxQueueLength,
                                   const UBaseType_t uxItemSize,
                                   uint8_t *pucQueueStorage,
                                   const uint8_t ucQueueType,
                                   Queue_t *pxNewQueue) PRIVILEGED_FUNCTION;

/*
 * Mutexes are a special type of queue.  When a mutex is created, first the
 * queue is created, then prvInitialiseMutex() is called to configure the queue
 * as a mutex.
 */
#if ( configUSE_MUTEXES == 1 )
    static void prvInitialiseMutex(Queue_t * pxNewQueue) PRIVILEGED_FUNCTION;
#endif

#if (configUSE_MUTEXES == 1)

/*
 * If a task waiting for a mutex causes the mutex holder to inherit a
 * priority, but the waiting task times out, then the holder should
 * disinherit the priority - but only down to the highest priority of any
 * other tasks that are waiting for the same mutex.  This function returns
 * that priority.
 */
    static UBaseType_t prvGetDisinheritPriorityAfterTimeout( const Queue_t * const pxQueue ) PRIVILEGED_FUNCTION;
#endif
/*-----------------------------------------------------------*/

/*
 * Macro to mark a queue as locked.  Locking a queue prevents an ISR from
 * accessing the queue event lists.
 */
#define prvLockQueue(pxQueue)                            \
    taskENTER_CRITICAL();                                \
    {                                                    \
        if ((pxQueue)->cRxLock == queueUNLOCKED)          \
        {                                                \
            (pxQueue)->cRxLock = queueLOCKED_UNMODIFIED; \
        }                                                \
        if ((pxQueue)->cTxLock == queueUNLOCKED)          \
        {                                                \
            (pxQueue)->cTxLock = queueLOCKED_UNMODIFIED; \
        }                                                \
    }                                                    \
    taskEXIT_CRITICAL()

/*
 * Macro to increment cTxLock member of the queue data structure. It is
 * capped at the number of tasks in the system as we cannot unblock more
 * tasks than the number of tasks in the system.
 */
#define prvIncrementQueueTxLock( pxQueue, cTxLock )                           \
    {                                                                         \
        const UBaseType_t uxNumberOfTasks = uxTaskGetNumberOfTasks();         \
        if ( ( UBaseType_t ) ( cTxLock ) < uxNumberOfTasks )                   \
        {                                                                     \
            configASSERT( ( cTxLock ) != queueINT8_MAX );                     \
            ( pxQueue )->cTxLock = ( int8_t ) ( ( cTxLock ) + ( int8_t ) 1 ); \
        }                                                                     \
    }

/*
 * Macro to increment cRxLock member of the queue data structure. It is
 * capped at the number of tasks in the system as we cannot unblock more
 * tasks than the number of tasks in the system.
 */
#define prvIncrementQueueRxLock(pxQueue, cRxLock)                       \
    {                                                                   \
        const UBaseType_t uxNumberOfTasks = uxTaskGetNumberOfTasks();   \
        if ((UBaseType_t) (cRxLock) < uxNumberOfTasks)                   \
        {                                                               \
            configASSERT((cRxLock) != queueINT8_MAX);                   \
            (pxQueue)->cRxLock = (int8_t) ((cRxLock) + (int8_t)1);      \
        }                                                               \
    }

BaseType_t xQueueGenericReset(QueueHandle_t xQueue, BaseType_t xNewQueue)
{
    BaseType_t xReturn = pdPASS;
    Queue_t * const pxQueue = xQueue;

    configASSERT(pxQueue);

    if ((pxQueue != NULL) &&
        (pxQueue->uxLength >= 1U) &&
        /* Check for multiplication overflow. */
        ((SIZE_MAX / pxQueue->uxLength) >= pxQueue->uxItemSize))
    {
        taskENTER_CRITICAL();
        {
            pxQueue->u.xQueue.pcTail = pxQueue->pcHead + (pxQueue->uxLength * pxQueue->uxItemSize);
            pxQueue->uxMessagesWaiting = (UBaseType_t) 0U;
            /* 复位队列的存放消息的写位置和读位置 */
            pxQueue->pcWriteTo = pxQueue->pcHead;
            pxQueue->u.xQueue.pcReadFrom = pxQueue->pcHead + ((pxQueue->uxLength - 1U ) * pxQueue->uxItemSize);
            pxQueue->cRxLock = queueUNLOCKED;
            pxQueue->cTxLock = queueUNLOCKED;

            /* 是否为创建新队列 */
            if (xNewQueue == pdFALSE)
            {
                /* If there are tasks blocked waiting to read from the queue, then
                 * the tasks will remain blocked as after this function exits the queue
                 * will still be empty.  If there are tasks blocked waiting to write to
                 * the queue, then one should be unblocked as after this function exits
                 * it will be possible to write to it. */
                if (listLIST_IS_EMPTY(&(pxQueue->xTasksWaitingToSend)) == pdFALSE)
                {
                    /* 等待发送消息的任务，取消阻塞 */
                    if (xTaskRemoveFromEventList(&(pxQueue->xTasksWaitingToSend)) != pdFALSE)
                    {
                        queueYIELD_IF_USING_PREEMPTION();
                    }
                }
            }
            else
            {
                /**
                 * Ensure the event queues start in the correct state.
                 * 初始化新创建队列等待发送和接收链表
                 */
                vListInitialise(&(pxQueue->xTasksWaitingToSend));
                vListInitialise(&(pxQueue->xTasksWaitingToReceive));
            }
        }
        taskEXIT_CRITICAL();
    }
    else
    {
        xReturn = pdFAIL;
    }

    configASSERT(xReturn != pdFAIL);

    /* A value is returned for calling semantic consistency with previous
     * versions.
     */
    return xReturn;
}

/**
 * xQueueGenericCreate: 创建队列
 * @uxQueueLength:  队列元素个数
 * @uxItemSize:     队列的元素的长度
 * @ucQueueType:    队列类型
*/
QueueHandle_t xQueueGenericCreate(const UBaseType_t uxQueueLength,
                                   const UBaseType_t uxItemSize,
                                   const uint8_t ucQueueType)
{
    Queue_t *pxNewQueue = NULL;
    size_t xQueueSizeInBytes;
    uint8_t *pucQueueStorage;

    if ((uxQueueLength > (UBaseType_t)0) &&
        /* Check for multiplication overflow. */
        ((SIZE_MAX / uxQueueLength) >= uxItemSize) &&
        /* Check for addition overflow. */
        ((SIZE_MAX - sizeof(Queue_t)) >= (uxQueueLength * uxItemSize)))
    {
        /**
         * Allocate enough space to hold the maximum number of items that
         * can be in the queue at any time.  It is valid for uxItemSize to be
         * zero in the case the queue is used as a semaphore.
         * 计算整个队列队列(管理结构和存放消息)的长度
         */
        xQueueSizeInBytes = (size_t)(uxQueueLength * uxItemSize);
        /**
         * Allocate the queue and storage area.  Justification for MISRA
         * deviation as follows:  pvPortMalloc() always ensures returned memory
         * blocks are aligned per the requirements of the MCU stack.  In this case
         * pvPortMalloc() must return a pointer that is guaranteed to meet the
         * alignment requirements of the Queue_t structure - which in this case
         * is an int8_t *.  Therefore, whenever the stack alignment requirements
         * are greater than or equal to the pointer to char requirements the cast
         * is safe.  In other cases alignment requirements are not strict (one or
         * two bytes).
         */
        pxNewQueue = (Queue_t *)pvPortMalloc(sizeof(Queue_t) + xQueueSizeInBytes);
        if (pxNewQueue != NULL)
        {
            /**
             * Jump past the queue structure to find the location of the queue
             * storage area.
             * 队列存放消息的内存紧挨在Queue_t管理结构的后面
             */
            pucQueueStorage = (uint8_t *)pxNewQueue;
            pucQueueStorage += sizeof(Queue_t);
            prvInitialiseNewQueue(uxQueueLength, uxItemSize, pucQueueStorage, ucQueueType, pxNewQueue);
        }
        else
        {
            traceQUEUE_CREATE_FAILED(ucQueueType);
        }
    }
    else
    {
        configASSERT( pxNewQueue );
    }
    return pxNewQueue;
}

/**
 * @uxQueueLength:      队列中元素个数
 * @uxItemSize:         队列元素的长度
 * @pucQueueStorage:    队列存放消息的起始位置
 * @ucQueueType:        队列的类型
 * @pxNewQueue:         队列管理结构
 */
static void prvInitialiseNewQueue(const UBaseType_t uxQueueLength,
                                   const UBaseType_t uxItemSize,
                                   uint8_t * pucQueueStorage,
                                   const uint8_t ucQueueType,
                                   Queue_t * pxNewQueue)
{
    /* Remove compiler warnings about unused parameters should
     * configUSE_TRACE_FACILITY not be set to 1. */
    (void)ucQueueType;

    /**
     * 如果队列元素的长度为0，说明队列中不需要存放消息
     * 比如二值信号量的队列元素大小就是0
     */
    if (uxItemSize == (UBaseType_t)0)
    {
        /**
         * No RAM was allocated for the queue storage area, but PC head cannot
         * be set to NULL because NULL is used as a key to say the queue is used as
         * a mutex.  Therefore just set pcHead to point to the queue as a benign
         * value that is known to be within the memory map.
         */
        pxNewQueue->pcHead = (int8_t *)pxNewQueue;
    }
    else
    {
        /**
         * Set the head to the start of the queue storage area.
         * 设置队列中存放消息的位置指针
         */
        pxNewQueue->pcHead = (int8_t *)pucQueueStorage;
    }

    /**
     * Initialise the queue members as described where the queue type is
     * defined.
     * 设置队列元素个数和元素长度
     */
    pxNewQueue->uxLength = uxQueueLength;
    pxNewQueue->uxItemSize = uxItemSize;
    (void)xQueueGenericReset(pxNewQueue, pdTRUE);

    #if (configUSE_TRACE_FACILITY == 1)
    {
        pxNewQueue->ucQueueType = ucQueueType;
    }
    #endif /* configUSE_TRACE_FACILITY */

    #if (configUSE_QUEUE_SETS == 1)
    {
        pxNewQueue->pxQueueSetContainer = NULL;
    }
    #endif /* configUSE_QUEUE_SETS */

    traceQUEUE_CREATE(pxNewQueue);
}

#if (configUSE_MUTEXES == 1)

static void prvInitialiseMutex(Queue_t * pxNewQueue)
{
    if (pxNewQueue != NULL)
    {
        /* The queue create function will set all the queue structure members
        * correctly for a generic queue, but this function is creating a
        * mutex.  Overwrite those members that need to be set differently -
        * in particular the information required for priority inheritance. */
        pxNewQueue->u.xSemaphore.xMutexHolder = NULL;
        pxNewQueue->uxQueueType = queueQUEUE_IS_MUTEX;
        /* In case this is a recursive mutex. */
        pxNewQueue->u.xSemaphore.uxRecursiveCallCount = 0;
        traceCREATE_MUTEX(pxNewQueue);
        /**
         * Start with the semaphore in the expected state.
         * 向互斥量队列发送一个消息，让互斥量处于空闲状态
         */
        (void)xQueueGenericSend(pxNewQueue, NULL, (TickType_t )0U, queueSEND_TO_BACK);
    }
    else
    {
        traceCREATE_MUTEX_FAILED();
    }
}

/**
 * 创建互斥量队列
 * @ucQueueType: 队列类型
 */
QueueHandle_t xQueueCreateMutex(const uint8_t ucQueueType)
{
    QueueHandle_t xNewQueue;
    const UBaseType_t uxMutexLength = (UBaseType_t)1;
    const UBaseType_t uxMutexSize = (UBaseType_t)0;

    xNewQueue = xQueueGenericCreate(uxMutexLength, uxMutexSize, ucQueueType);
    prvInitialiseMutex((Queue_t *)xNewQueue);

    return xNewQueue;
}

#endif /* configUSE_MUTEXES */

#if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) )

    TaskHandle_t xQueueGetMutexHolder( QueueHandle_t xSemaphore )
    {
        TaskHandle_t pxReturn;
        Queue_t * const pxSemaphore = ( Queue_t * ) xSemaphore;

        configASSERT( xSemaphore );

        /* This function is called by xSemaphoreGetMutexHolder(), and should not
         * be called directly.  Note:  This is a good way of determining if the
         * calling task is the mutex holder, but not a good way of determining the
         * identity of the mutex holder, as the holder may change between the
         * following critical section exiting and the function returning. */
        taskENTER_CRITICAL();
        {
            if ( pxSemaphore->uxQueueType == queueQUEUE_IS_MUTEX )
            {
                pxReturn = pxSemaphore->u.xSemaphore.xMutexHolder;
            }
            else
            {
                pxReturn = NULL;
            }
        }
        taskEXIT_CRITICAL();

        return pxReturn;
    } /*lint !e818 xSemaphore cannot be a pointer to const because it is a typedef. */

    TaskHandle_t xQueueGetMutexHolderFromISR( QueueHandle_t xSemaphore )
    {
        TaskHandle_t pxReturn;

        configASSERT( xSemaphore );

        /* Mutexes cannot be used in interrupt service routines, so the mutex
         * holder should not change in an ISR, and therefore a critical section is
         * not required here. */
        if ( ( ( Queue_t * ) xSemaphore )->uxQueueType == queueQUEUE_IS_MUTEX )
        {
            pxReturn = ( ( Queue_t * ) xSemaphore )->u.xSemaphore.xMutexHolder;
        }
        else
        {
            pxReturn = NULL;
        }

        return pxReturn;
    } /*lint !e818 xSemaphore cannot be a pointer to const because it is a typedef. */

#endif /* if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) */

#if ( configUSE_RECURSIVE_MUTEXES == 1 )

    BaseType_t xQueueGiveMutexRecursive( QueueHandle_t xMutex )
    {
        BaseType_t xReturn;
        Queue_t * const pxMutex = ( Queue_t * ) xMutex;

        configASSERT( pxMutex );

        /* If this is the task that holds the mutex then xMutexHolder will not
         * change outside of this task.  If this task does not hold the mutex then
         * pxMutexHolder can never coincidentally equal the tasks handle, and as
         * this is the only condition we are interested in it does not matter if
         * pxMutexHolder is accessed simultaneously by another task.  Therefore no
         * mutual exclusion is required to test the pxMutexHolder variable. */
        if ( pxMutex->u.xSemaphore.xMutexHolder == xTaskGetCurrentTaskHandle() )
        {
            traceGIVE_MUTEX_RECURSIVE( pxMutex );

            /* uxRecursiveCallCount cannot be zero if xMutexHolder is equal to
             * the task handle, therefore no underflow check is required.  Also,
             * uxRecursiveCallCount is only modified by the mutex holder, and as
             * there can only be one, no mutual exclusion is required to modify the
             * uxRecursiveCallCount member. */
            ( pxMutex->u.xSemaphore.uxRecursiveCallCount )--;

            /* Has the recursive call count unwound to 0? */
            if ( pxMutex->u.xSemaphore.uxRecursiveCallCount == ( UBaseType_t ) 0 )
            {
                /* Return the mutex.  This will automatically unblock any other
                 * task that might be waiting to access the mutex. */
                ( void ) xQueueGenericSend( pxMutex, NULL, queueMUTEX_GIVE_BLOCK_TIME, queueSEND_TO_BACK );
            }

            xReturn = pdPASS;
        }
        else
        {
            /* The mutex cannot be given because the calling task is not the
             * holder. */
            xReturn = pdFAIL;

            traceGIVE_MUTEX_RECURSIVE_FAILED( pxMutex );
        }

        return xReturn;
    }

    BaseType_t xQueueTakeMutexRecursive( QueueHandle_t xMutex,
                                         TickType_t xTicksToWait )
    {
        BaseType_t xReturn;
        Queue_t * const pxMutex = ( Queue_t * ) xMutex;

        configASSERT( pxMutex );

        /* Comments regarding mutual exclusion as per those within
         * xQueueGiveMutexRecursive(). */

        traceTAKE_MUTEX_RECURSIVE( pxMutex );

        if ( pxMutex->u.xSemaphore.xMutexHolder == xTaskGetCurrentTaskHandle() )
        {
            ( pxMutex->u.xSemaphore.uxRecursiveCallCount )++;
            xReturn = pdPASS;
        }
        else
        {
            xReturn = xQueueSemaphoreTake( pxMutex, xTicksToWait );

            /* pdPASS will only be returned if the mutex was successfully
             * obtained.  The calling task may have entered the Blocked state
             * before reaching here. */
            if ( xReturn != pdFAIL )
            {
                ( pxMutex->u.xSemaphore.uxRecursiveCallCount )++;
            }
            else
            {
                traceTAKE_MUTEX_RECURSIVE_FAILED( pxMutex );
            }
        }

        return xReturn;
    }

#endif /* configUSE_RECURSIVE_MUTEXES */


#if (configUSE_COUNTING_SEMAPHORES == 1)

/**
 * @uxMaxCount: 最大计数值
 * @uxInitialCount: 初始计数值
 */
QueueHandle_t xQueueCreateCountingSemaphore(const UBaseType_t uxMaxCount,
                                             const UBaseType_t uxInitialCount)
{
    QueueHandle_t xHandle = NULL;

    if ((uxMaxCount != 0) &&
        (uxInitialCount <= uxMaxCount))
    {
        xHandle = xQueueGenericCreate(uxMaxCount, queueSEMAPHORE_QUEUE_ITEM_LENGTH, queueQUEUE_TYPE_COUNTING_SEMAPHORE);
        if (xHandle != NULL)
        {
            /* 初始计数信号量的个数 */
            ((Queue_t *)xHandle)->uxMessagesWaiting = uxInitialCount;
            traceCREATE_COUNTING_SEMAPHORE();
        }
        else
        {
            traceCREATE_COUNTING_SEMAPHORE_FAILED();
        }
    }
    else
    {
        configASSERT(xHandle);
    }

    return xHandle;
}

#endif

/**
 * @pvItemToQueue: 消息
 * @xTicksToWait: 等待发送超时tick
 * @xCopyPosition: 消息存放的位置
*/
BaseType_t xQueueGenericSend(QueueHandle_t xQueue,
                              const void * const pvItemToQueue,
                              TickType_t xTicksToWait,
                              const BaseType_t xCopyPosition)
{
    BaseType_t xEntryTimeSet = pdFALSE, xYieldRequired;
    TimeOut_t xTimeOut;
    Queue_t * const pxQueue = xQueue;

    configASSERT(pxQueue);
    configASSERT(!((pvItemToQueue == NULL) && (pxQueue->uxItemSize != (UBaseType_t )0U)));
    configASSERT(!((xCopyPosition == queueOVERWRITE) && (pxQueue->uxLength != 1)));

    #if ((INCLUDE_xTaskGetSchedulerState == 1) || (configUSE_TIMERS == 1))
    {
        configASSERT(!((xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED) && (xTicksToWait != 0)));
    }
    #endif

    for( ; ; )
    {
        taskENTER_CRITICAL();
        {
            /**
             * Is there room on the queue now?  The running task must be the
             * highest priority task wanting to access the queue.  If the head item
             * in the queue is to be overwritten then it does not matter if the
             * queue is full.
             * 判断队列是否有足够的空间，存放发送的消息
             */
            if ((pxQueue->uxMessagesWaiting < pxQueue->uxLength) || (xCopyPosition == queueOVERWRITE))
            {
                traceQUEUE_SEND(pxQueue);

                #if (configUSE_QUEUE_SETS == 1)
                {
                    const UBaseType_t uxPreviousMessagesWaiting = pxQueue->uxMessagesWaiting;

                    /* 拷贝消息到队列 */
                    xYieldRequired = prvCopyDataToQueue(pxQueue, pvItemToQueue, xCopyPosition);

                    if (pxQueue->pxQueueSetContainer != NULL)
                    {
                        if ((xCopyPosition == queueOVERWRITE) && (uxPreviousMessagesWaiting != (UBaseType_t)0))
                        {
                            /* Do not notify the queue set as an existing item
                             * was overwritten in the queue so the number of items
                             * in the queue has not changed. */
                            mtCOVERAGE_TEST_MARKER();
                        }
                        else if (prvNotifyQueueSetContainer(pxQueue) != pdFALSE)
                        {
                            /* The queue is a member of a queue set, and posting
                             * to the queue set caused a higher priority task to
                             * unblock. A context switch is required. */
                            queueYIELD_IF_USING_PREEMPTION();
                        }
                    }
                    else
                    {
                        /**
                         * If there was a task waiting for data to arrive on the
                         * queue then unblock it now.
                         * 如果在队列上有接收任务阻塞等待消息
                         */
                        if (listLIST_IS_EMPTY(&(pxQueue->xTasksWaitingToReceive)) == pdFALSE)
                        {
                            /* 阻塞任务从队列中删除 */
                            if (xTaskRemoveFromEventList(&(pxQueue->xTasksWaitingToReceive)) != pdFALSE)
                            {
                                /**
                                 * The unblocked task has a priority higher than
                                 * our own so yield immediately.  Yes it is ok to
                                 * do this from within the critical section - the
                                 * kernel takes care of that.
                                 * 触发任务调度，阻塞等待的任务，将会被唤醒，从队列中读取元素
                                 */
                                queueYIELD_IF_USING_PREEMPTION();
                            }
                        }
                        else if (xYieldRequired != pdFALSE)
                        {
                            /**
                             * This path is a special case that will only get
                             * executed if the task was holding multiple mutexes
                             * and the mutexes were given back in an order that is
                             * different to that in which they were taken.
                             * 主动触发抢占
                             */
                            queueYIELD_IF_USING_PREEMPTION();
                        }
                    }
                }
                #else /* configUSE_QUEUE_SETS */
                {
                    xYieldRequired = prvCopyDataToQueue(pxQueue, pvItemToQueue, xCopyPosition);

                    /* If there was a task waiting for data to arrive on the
                     * queue then unblock it now. */
                    if (listLIST_IS_EMPTY(&(pxQueue->xTasksWaitingToReceive)) == pdFALSE)
                    {
                        if ( xTaskRemoveFromEventList(&(pxQueue->xTasksWaitingToReceive)) != pdFALSE)
                        {
                            /* The unblocked task has a priority higher than
                             * our own so yield immediately.  Yes it is ok to do
                             * this from within the critical section - the kernel
                             * takes care of that. */
                            queueYIELD_IF_USING_PREEMPTION();
                        }
                    }
                    else if (xYieldRequired != pdFALSE)
                    {
                        /* This path is a special case that will only get
                         * executed if the task was holding multiple mutexes and
                         * the mutexes were given back in an order that is
                         * different to that in which they were taken. */
                        queueYIELD_IF_USING_PREEMPTION();
                    }
                }
                #endif /* configUSE_QUEUE_SETS */

                taskEXIT_CRITICAL();
                /* 队列未满,发送消息成功，直接返回 */
                return pdPASS;
            }
            else    /* 队列已满 */
            {
                /* 不阻塞等待 */
                if (xTicksToWait == (TickType_t)0)
                {
                    /* The queue was full and no block time is specified (or
                     * the block time has expired) so leave now. */
                    taskEXIT_CRITICAL();

                    /* Return to the original privilege level before exiting
                     * the function.
                     */
                    traceQUEUE_SEND_FAILED(pxQueue);
                    return errQUEUE_FULL;
                }
                else if (xEntryTimeSet == pdFALSE)
                {
                    /* The queue was full and a block time was specified so
                     * configure the timeout structure. */
                    vTaskInternalSetTimeOutState(&xTimeOut);
                    xEntryTimeSet = pdTRUE;
                }
            }
        }
        taskEXIT_CRITICAL();

        /* Interrupts and other tasks can send to and receive from the queue
         * now the critical section has been exited.
         * 发送失败，挂起调度
         */
        vTaskSuspendAll();
        /* 队列上锁 */
        prvLockQueue(pxQueue);

        /**
         * Update the timeout state to see if it has expired yet.
         * 检查是否已经超时等待发送设置的超时值
         */
        if (xTaskCheckForTimeOut(&xTimeOut, &xTicksToWait) == pdFALSE)
        {
            /* 队列还未满 */
            if (prvIsQueueFull(pxQueue) != pdFALSE)
            {
                traceBLOCKING_ON_QUEUE_SEND(pxQueue);
                /* 队列还未满, 加入到等待发送的任务队列 */
                vTaskPlaceOnEventList(&(pxQueue->xTasksWaitingToSend), xTicksToWait);

                /* Unlocking the queue means queue events can effect the
                 * event list. It is possible that interrupts occurring now
                 * remove this task from the event list again - but as the
                 * scheduler is suspended the task will go onto the pending
                 * ready list instead of the actual ready list.
                 * 队列解锁
                 */
                prvUnlockQueue(pxQueue);

                /* Resuming the scheduler will move tasks from the pending
                 * ready list into the ready list - so it is feasible that this
                 * task is already in the ready list before it yields - in which
                 * case the yield will not cause a context switch unless there
                 * is also a higher priority task in the pending ready list. */
                if (xTaskResumeAll() == pdFALSE)
                {
                    portYIELD_WITHIN_API();
                }
            }
            else
            {
                /* 队列已满, 但是未超过发送等待超时，继续尝试发送 */
                prvUnlockQueue(pxQueue);
                (void)xTaskResumeAll();
            }
        }
        else
        {
            /*
             * The timeout has expired.
             * 等待发送超时
             */
            prvUnlockQueue(pxQueue);
            (void)xTaskResumeAll();

            traceQUEUE_SEND_FAILED(pxQueue);
            return errQUEUE_FULL;
        }
    }
}


BaseType_t xQueueGenericSendFromISR(QueueHandle_t xQueue,
                                     const void * const pvItemToQueue,
                                     BaseType_t * const pxHigherPriorityTaskWoken,
                                     const BaseType_t xCopyPosition)
{
    BaseType_t xReturn;
    UBaseType_t uxSavedInterruptStatus;
    Queue_t * const pxQueue = xQueue;

    configASSERT(pxQueue);
    configASSERT(!((pvItemToQueue == NULL) && (pxQueue->uxItemSize != (UBaseType_t)0U)));
    configASSERT(!((xCopyPosition == queueOVERWRITE) && (pxQueue->uxLength != 1)));

    /* RTOS ports that support interrupt nesting have the concept of a maximum
     * system call (or maximum API call) interrupt priority.  Interrupts that are
     * above the maximum system call priority are kept permanently enabled, even
     * when the RTOS kernel is in a critical section, but cannot make any calls to
     * FreeRTOS API functions.  If configASSERT() is defined in FreeRTOSConfig.h
     * then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
     * failure if a FreeRTOS API function is called from an interrupt that has been
     * assigned a priority above the configured maximum system call priority.
     * Only FreeRTOS functions that end in FromISR can be called from interrupts
     * that have been assigned a priority at or (logically) below the maximum
     * system call interrupt priority.  FreeRTOS maintains a separate interrupt
     * safe API to ensure interrupt entry is as fast and as simple as possible.
     * More information (albeit Cortex-M specific) is provided on the following
     * link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
    portASSERT_IF_INTERRUPT_PRIORITY_INVALID();

    /* Similar to xQueueGenericSend, except without blocking if there is no room
     * in the queue.  Also don't directly wake a task that was blocked on a queue
     * read, instead return a flag to say whether a context switch is required or
     * not (i.e. has a task with a higher priority than us been woken by this
     * post). */
    uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
    {
        if ((pxQueue->uxMessagesWaiting < pxQueue->uxLength) || (xCopyPosition == queueOVERWRITE))
        {
            const int8_t cTxLock = pxQueue->cTxLock;
            const UBaseType_t uxPreviousMessagesWaiting = pxQueue->uxMessagesWaiting;

            traceQUEUE_SEND_FROM_ISR(pxQueue);

            /* Semaphores use xQueueGiveFromISR(), so pxQueue will not be a
             *  semaphore or mutex.  That means prvCopyDataToQueue() cannot result
             *  in a task disinheriting a priority and prvCopyDataToQueue() can be
             *  called here even though the disinherit function does not check if
             *  the scheduler is suspended before accessing the ready lists. */
            (void)prvCopyDataToQueue(pxQueue, pvItemToQueue, xCopyPosition);

            /* The event list is not altered if the queue is locked.  This will
             * be done when the queue is unlocked later. */
            if (cTxLock == queueUNLOCKED)
            {
                #if (configUSE_QUEUE_SETS == 1)
                {
                    if (pxQueue->pxQueueSetContainer != NULL)
                    {
                        if ((xCopyPosition == queueOVERWRITE) && (uxPreviousMessagesWaiting != (UBaseType_t)0))
                        {
                            /* Do not notify the queue set as an existing item
                             * was overwritten in the queue so the number of items
                             * in the queue has not changed. */
                            mtCOVERAGE_TEST_MARKER();
                        }
                        else if (prvNotifyQueueSetContainer(pxQueue) != pdFALSE)
                        {
                            /* The queue is a member of a queue set, and posting
                             * to the queue set caused a higher priority task to
                             * unblock.  A context switch is required. */
                            if (pxHigherPriorityTaskWoken != NULL)
                            {
                                *pxHigherPriorityTaskWoken = pdTRUE;
                            }
                        }
                    }
                    else
                    {
                        if (listLIST_IS_EMPTY(&(pxQueue->xTasksWaitingToReceive)) == pdFALSE)
                        {
                            if (xTaskRemoveFromEventList(&(pxQueue->xTasksWaitingToReceive)) != pdFALSE)
                            {
                                /* The task waiting has a higher priority so
                                 *  record that a context switch is required. */
                                if (pxHigherPriorityTaskWoken != NULL)
                                {
                                    *pxHigherPriorityTaskWoken = pdTRUE;
                                }
                            }

                        }

                    }
                }
                #else /* configUSE_QUEUE_SETS */
                {
                    if (listLIST_IS_EMPTY(&(pxQueue->xTasksWaitingToReceive)) == pdFALSE)
                    {
                        if (xTaskRemoveFromEventList(&(pxQueue->xTasksWaitingToReceive)) != pdFALSE)
                        {
                            /* The task waiting has a higher priority so record that a
                             * context switch is required. */
                            if (pxHigherPriorityTaskWoken != NULL)
                            {
                                *pxHigherPriorityTaskWoken = pdTRUE;
                            }

                        }
                    }

                    /* Not used in this path. */
                    (void) uxPreviousMessagesWaiting;
                }
                #endif /* configUSE_QUEUE_SETS */
            }
            else
            {
                /* Increment the lock count so the task that unlocks the queue
                 * knows that data was posted while it was locked. */
                prvIncrementQueueTxLock(pxQueue, cTxLock);
            }

            xReturn = pdPASS;
        }
        else
        {
            traceQUEUE_SEND_FROM_ISR_FAILED(pxQueue);
            xReturn = errQUEUE_FULL;
        }
    }
    portCLEAR_INTERRUPT_MASK_FROM_ISR(uxSavedInterruptStatus);

    return xReturn;
}


BaseType_t xQueueGiveFromISR(QueueHandle_t xQueue,
                              BaseType_t * const pxHigherPriorityTaskWoken)
{
    BaseType_t xReturn;
    UBaseType_t uxSavedInterruptStatus;
    Queue_t * const pxQueue = xQueue;

    /* Similar to xQueueGenericSendFromISR() but used with semaphores where the
     * item size is 0.  Don't directly wake a task that was blocked on a queue
     * read, instead return a flag to say whether a context switch is required or
     * not (i.e. has a task with a higher priority than us been woken by this
     * post). */

    configASSERT(pxQueue);

    /* xQueueGenericSendFromISR() should be used instead of xQueueGiveFromISR()
     * if the item size is not 0. */
    configASSERT(pxQueue->uxItemSize == 0);

    /* Normally a mutex would not be given from an interrupt, especially if
     * there is a mutex holder, as priority inheritance makes no sense for an
     * interrupts, only tasks. */
    configASSERT(!((pxQueue->uxQueueType == queueQUEUE_IS_MUTEX) && (pxQueue->u.xSemaphore.xMutexHolder != NULL)));

    /* RTOS ports that support interrupt nesting have the concept of a maximum
     * system call (or maximum API call) interrupt priority.  Interrupts that are
     * above the maximum system call priority are kept permanently enabled, even
     * when the RTOS kernel is in a critical section, but cannot make any calls to
     * FreeRTOS API functions.  If configASSERT() is defined in FreeRTOSConfig.h
     * then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
     * failure if a FreeRTOS API function is called from an interrupt that has been
     * assigned a priority above the configured maximum system call priority.
     * Only FreeRTOS functions that end in FromISR can be called from interrupts
     * that have been assigned a priority at or (logically) below the maximum
     * system call interrupt priority.  FreeRTOS maintains a separate interrupt
     * safe API to ensure interrupt entry is as fast and as simple as possible.
     * More information (albeit Cortex-M specific) is provided on the following
     * link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
    portASSERT_IF_INTERRUPT_PRIORITY_INVALID();

    uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
    {
        const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;

        /* When the queue is used to implement a semaphore no data is ever
         * moved through the queue but it is still valid to see if the queue 'has
         * space'. */
        if (uxMessagesWaiting < pxQueue->uxLength)
        {
            const int8_t cTxLock = pxQueue->cTxLock;

            traceQUEUE_SEND_FROM_ISR(pxQueue);

            /* A task can only have an inherited priority if it is a mutex
             * holder - and if there is a mutex holder then the mutex cannot be
             * given from an ISR.  As this is the ISR version of the function it
             * can be assumed there is no mutex holder and no need to determine if
             * priority disinheritance is needed.  Simply increase the count of
             * messages (semaphores) available. */
            pxQueue->uxMessagesWaiting = uxMessagesWaiting + (UBaseType_t)1;

            /* The event list is not altered if the queue is locked.  This will
             * be done when the queue is unlocked later. */
            if (cTxLock == queueUNLOCKED)
            {
                #if (configUSE_QUEUE_SETS == 1)
                {
                    if (pxQueue->pxQueueSetContainer != NULL)
                    {
                        if (prvNotifyQueueSetContainer(pxQueue) != pdFALSE)
                        {
                            /* The semaphore is a member of a queue set, and
                             * posting to the queue set caused a higher priority
                             * task to unblock.  A context switch is required. */
                            if (pxHigherPriorityTaskWoken != NULL)
                            {
                                *pxHigherPriorityTaskWoken = pdTRUE;
                            }

                        }

                    }
                    else
                    {
                        if (listLIST_IS_EMPTY(&(pxQueue->xTasksWaitingToReceive)) == pdFALSE)
                        {
                            if (xTaskRemoveFromEventList(&(pxQueue->xTasksWaitingToReceive)) != pdFALSE)
                            {
                                /* The task waiting has a higher priority so
                                 *  record that a context switch is required. */
                                if (pxHigherPriorityTaskWoken != NULL)
                                {
                                    *pxHigherPriorityTaskWoken = pdTRUE;
                                }
                            }

                        }

                    }
                }
                #else /* configUSE_QUEUE_SETS */
                {
                    if (listLIST_IS_EMPTY(&(pxQueue->xTasksWaitingToReceive)) == pdFALSE)
                    {
                        if (xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive)) != pdFALSE)
                        {
                            /* The task waiting has a higher priority so record that a
                             * context switch is required. */
                            if (pxHigherPriorityTaskWoken != NULL)
                            {
                                *pxHigherPriorityTaskWoken = pdTRUE;
                            }

                        }

                    }
                }
                #endif /* configUSE_QUEUE_SETS */
            }
            else
            {
                /* Increment the lock count so the task that unlocks the queue
                 * knows that data was posted while it was locked. */
                prvIncrementQueueTxLock(pxQueue, cTxLock);
            }

            xReturn = pdPASS;
        }
        else
        {
            traceQUEUE_SEND_FROM_ISR_FAILED(pxQueue);
            xReturn = errQUEUE_FULL;
        }
    }
    portCLEAR_INTERRUPT_MASK_FROM_ISR(uxSavedInterruptStatus);

    return xReturn;
}

/**
 * 从队列中读消息
 * @pvBuffer: 存放读到的消息缓存
*/
BaseType_t xQueueReceive(QueueHandle_t xQueue, void * const pvBuffer,
                          TickType_t xTicksToWait)
{
    BaseType_t xEntryTimeSet = pdFALSE;
    TimeOut_t xTimeOut;
    Queue_t * const pxQueue = xQueue;

    /* Check the pointer is not NULL. */
    configASSERT((pxQueue));

    /* The buffer into which data is received can only be NULL if the data size
     * is zero (so no data is copied into the buffer). */
    configASSERT(!(((pvBuffer) == NULL) && ((pxQueue)->uxItemSize != (UBaseType_t) 0U)));

    /* Cannot block if the scheduler is suspended. */
    #if ((INCLUDE_xTaskGetSchedulerState == 1) || (configUSE_TIMERS == 1))
    {
        configASSERT(!((xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED) && (xTicksToWait != 0)));
    }
    #endif


    for( ; ; )
    {
        taskENTER_CRITICAL();
        {
            const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;

            /**
             * Is there data in the queue now?  To be running the calling task
             * must be the highest priority task wanting to access the queue.
             * 队列中存在消息
             */
            if (uxMessagesWaiting > (UBaseType_t)0)
            {
                /* Data available, remove one item. */
                prvCopyDataFromQueue(pxQueue, pvBuffer);
                traceQUEUE_RECEIVE(pxQueue);
                /* 队列现存消息数减一 */
                pxQueue->uxMessagesWaiting = uxMessagesWaiting - (UBaseType_t)1;

                /**
                 * There is now space in the queue, were any tasks waiting to
                 * post to the queue?  If so, unblock the highest priority waiting
                 * task.
                 * 入队等待队列不为空，说明之前的队列是满的，本次的读操作，会腾出来一个位置，将
                 * 可以唤醒一个等待入队的任务执行入队
                 */
                if (listLIST_IS_EMPTY(&(pxQueue->xTasksWaitingToSend)) == pdFALSE)
                {
                    if (xTaskRemoveFromEventList(&(pxQueue->xTasksWaitingToSend)) != pdFALSE)
                    {
                        queueYIELD_IF_USING_PREEMPTION();
                    }
                }

                taskEXIT_CRITICAL();
                return pdPASS;
            }
            else
            {
                /* 队列中不存在消息, 任务不等待，直接返回 */
                if (xTicksToWait == (TickType_t)0)
                {
                    /* The queue was empty and no block time is specified (or
                     * the block time has expired) so leave now. */
                    taskEXIT_CRITICAL();
                    traceQUEUE_RECEIVE_FAILED(pxQueue);
                    return errQUEUE_EMPTY;
                }
                else if (xEntryTimeSet == pdFALSE)
                {
                    /**
                     * The queue was empty and a block time was specified so
                     * configure the timeout structure.
                     */
                    vTaskInternalSetTimeOutState(&xTimeOut);
                    xEntryTimeSet = pdTRUE;
                }
            }
        }
        taskEXIT_CRITICAL();

        /* Interrupts and other tasks can send to and receive from the queue
         * now the critical section has been exited. */

        vTaskSuspendAll();
        prvLockQueue(pxQueue);

        /**
         * Update the timeout state to see if it has expired yet.
         */
        if (xTaskCheckForTimeOut(&xTimeOut, &xTicksToWait) == pdFALSE)
        {
            /* The timeout has not expired.  If the queue is still empty place
             * the task on the list of tasks waiting to receive from the queue. */
            if (prvIsQueueEmpty(pxQueue) != pdFALSE)
            {
                traceBLOCKING_ON_QUEUE_RECEIVE(pxQueue);
                /* 当前任务加入到阻塞等待队列 */
                vTaskPlaceOnEventList(&(pxQueue->xTasksWaitingToReceive), xTicksToWait);
                prvUnlockQueue(pxQueue);

                if (xTaskResumeAll() == pdFALSE)
                {
                    portYIELD_WITHIN_API();
                }
            }
            else
            {
                /**
                 * The queue contains data again.  Loop back to try and read the
                 * data
                 */
                prvUnlockQueue(pxQueue);
                (void)xTaskResumeAll();
            }
        }
        else
        {
            /* Timed out.  If there is no data in the queue exit, otherwise loop
             * back and attempt to read the data. */
            prvUnlockQueue(pxQueue);
            (void)xTaskResumeAll();

            if (prvIsQueueEmpty(pxQueue) != pdFALSE)
            {
                traceQUEUE_RECEIVE_FAILED(pxQueue);
                return errQUEUE_EMPTY;
            }
        }
    }
}

/**
 * 获取信号量
 * @xQueue:         消息队列
 * @xTicksToWait:   超时时间
 */
BaseType_t xQueueSemaphoreTake(QueueHandle_t xQueue, TickType_t xTicksToWait)
{
    BaseType_t xEntryTimeSet = pdFALSE;
    TimeOut_t xTimeOut;
    Queue_t * const pxQueue = xQueue;

#if (configUSE_MUTEXES == 1)
    BaseType_t xInheritanceOccurred = pdFALSE;
#endif

    /* Check the queue pointer is not NULL. */
    configASSERT((pxQueue));

    /* Check this really is a semaphore, in which case the item size will be
     * 0. */
    configASSERT(pxQueue->uxItemSize == 0);

    /* Cannot block if the scheduler is suspended. */
    #if ((INCLUDE_xTaskGetSchedulerState == 1) || (configUSE_TIMERS == 1))
    {
        configASSERT(!((xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED) && (xTicksToWait != 0)));
    }
    #endif

    for( ; ; )
    {
        taskENTER_CRITICAL();
        {
            /* Semaphores are queues with an item size of 0, and where the
             * number of messages in the queue is the semaphore's count value. */
            const UBaseType_t uxSemaphoreCount = pxQueue->uxMessagesWaiting;

            /**
             * Is there data in the queue now?  To be running the calling task
             * must be the highest priority task wanting to access the queue.
             * 消息队列存在信号量消息(信号量被释放)，直接获取，任务不阻塞
             */
            if (uxSemaphoreCount > (UBaseType_t)0)
            {
                traceQUEUE_RECEIVE(pxQueue);

                /* Semaphores are queues with a data size of zero and where the
                 * messages waiting is the semaphore's count.  Reduce the count.
                 * 获取信号量
                 */
                pxQueue->uxMessagesWaiting = uxSemaphoreCount - (UBaseType_t)1;

                #if (configUSE_MUTEXES == 1)
                {
                    if (pxQueue->uxQueueType == queueQUEUE_IS_MUTEX)
                    {
                        /**
                         * Record the information required to implement
                         * priority inheritance should it become necessary.
                         * 保存拿到互斥锁的任务结构体
                         */
                        pxQueue->u.xSemaphore.xMutexHolder = pvTaskIncrementMutexHeldCount();
                    }
                }
                #endif /* configUSE_MUTEXES */

                /**
                 * Check to see if other tasks are blocked waiting to give the
                 * semaphore, and if so, unblock the highest priority such task.
                 * 有任务在等待发送信号量
                 */
                if (listLIST_IS_EMPTY(&(pxQueue->xTasksWaitingToSend)) == pdFALSE)
                {
                    if (xTaskRemoveFromEventList(&(pxQueue->xTasksWaitingToSend)) != pdFALSE)
                    {
                        /* 触发任务调度 */
                        queueYIELD_IF_USING_PREEMPTION();
                    }
                }

                taskEXIT_CRITICAL();
                return pdPASS;
            }
            else    /* 消息队列为空，没有信号量消息进入队列 */
            {
                /* 不等待，直接返回 */
                if (xTicksToWait == (TickType_t)0)
                {
                    /* The semaphore count was 0 and no block time is specified
                     * (or the block time has expired) so exit now. */
                    taskEXIT_CRITICAL();
                    traceQUEUE_RECEIVE_FAILED(pxQueue);
                    return errQUEUE_EMPTY;
                }
                else if (xEntryTimeSet == pdFALSE)
                {
                    /*
                     * The semaphore count was 0 and a block time was specified
                     * so configure the timeout structure ready to block.
                     * 保存开始阻塞的时间
                     */
                    vTaskInternalSetTimeOutState(&xTimeOut);
                    xEntryTimeSet = pdTRUE;
                }
            }
        }
        taskEXIT_CRITICAL();

        /* Interrupts and other tasks can give to and take from the semaphore
         * now the critical section has been exited. */

        vTaskSuspendAll();
        prvLockQueue(pxQueue);

        /**
         * Update the timeout state to see if it has expired yet.
         * 检查等待是否超时
         */
        if (xTaskCheckForTimeOut(&xTimeOut, &xTicksToWait) == pdFALSE)
        {
            /* A block time is specified and not expired.  If the semaphore
             * count is 0 then enter the Blocked state to wait for a semaphore to
             * become available.  As semaphores are implemented with queues the
             * queue being empty is equivalent to the semaphore count being 0.
             * 未超时
             */
            if (prvIsQueueEmpty(pxQueue) != pdFALSE)
            {
                traceBLOCKING_ON_QUEUE_RECEIVE(pxQueue);

                #if (configUSE_MUTEXES == 1)
                {
                    if (pxQueue->uxQueueType == queueQUEUE_IS_MUTEX)
                    {
                        taskENTER_CRITICAL();
                        {
                            xInheritanceOccurred = xTaskPriorityInherit(pxQueue->u.xSemaphore.xMutexHolder);
                        }
                        taskEXIT_CRITICAL();
                    }
                }
                #endif /* if ( configUSE_MUTEXES == 1 ) */

                /* 当前任务被挂到xTasksWaitingToReceive事件队列 */
                vTaskPlaceOnEventList(&(pxQueue->xTasksWaitingToReceive), xTicksToWait);
                prvUnlockQueue(pxQueue);

                if (xTaskResumeAll() == pdFALSE)
                {
                    /* 触发任务调度 */
                    portYIELD_WITHIN_API();
                }
            }
            else
            {
                /* There was no timeout and the semaphore count was not 0, so
                 * attempt to take the semaphore again. */
                prvUnlockQueue(pxQueue);
                (void) xTaskResumeAll();
            }
        }
        else
        {
            /* 等待超时 */
            prvUnlockQueue(pxQueue);
            (void) xTaskResumeAll();

            /* If the semaphore count is 0 exit now as the timeout has
             * expired.  Otherwise return to attempt to take the semaphore that is
             * known to be available.  As semaphores are implemented by queues the
             * queue being empty is equivalent to the semaphore count being 0.
             * 队列为空, 消息队列中没有信号量消息
             */
            if (prvIsQueueEmpty(pxQueue) != pdFALSE)
            {
                #if (configUSE_MUTEXES == 1)
                {
                    /* xInheritanceOccurred could only have be set if
                     * pxQueue->uxQueueType == queueQUEUE_IS_MUTEX so no need to
                     * test the mutex type again to check it is actually a mutex.
                     * 出现优先级继承
                     */
                    if (xInheritanceOccurred != pdFALSE)
                    {
                        taskENTER_CRITICAL();
                        {
                            UBaseType_t uxHighestWaitingPriority;

                            /* This task blocking on the mutex caused another
                             * task to inherit this task's priority.  Now this task
                             * has timed out the priority should be disinherited
                             * again, but only as low as the next highest priority
                             * task that is waiting for the same mutex.
                             * 获得等待互斥锁任务中，最高的优先级
                             */
                            uxHighestWaitingPriority = prvGetDisinheritPriorityAfterTimeout(pxQueue);
                            vTaskPriorityDisinheritAfterTimeout(pxQueue->u.xSemaphore.xMutexHolder, uxHighestWaitingPriority);
                        }
                        taskEXIT_CRITICAL();
                    }
                }
                #endif /* configUSE_MUTEXES */

                traceQUEUE_RECEIVE_FAILED(pxQueue);
                return errQUEUE_EMPTY;
            }
        }
    }
}

/**
 * 从队列中拷贝消息，但是消息不会出队
 */
BaseType_t xQueuePeek(QueueHandle_t xQueue, void * const pvBuffer,
                       TickType_t xTicksToWait)
{
    BaseType_t xEntryTimeSet = pdFALSE;
    TimeOut_t xTimeOut;
    int8_t * pcOriginalReadPosition;
    Queue_t * const pxQueue = xQueue;

    /* Check the pointer is not NULL. */
    configASSERT((pxQueue));

    /* The buffer into which data is received can only be NULL if the data size
     * is zero (so no data is copied into the buffer. */
    configASSERT(!(((pvBuffer) == NULL) && ((pxQueue)->uxItemSize != (UBaseType_t) 0U)));

    /* Cannot block if the scheduler is suspended. */
    #if ((INCLUDE_xTaskGetSchedulerState == 1) || (configUSE_TIMERS == 1))
    {
        configASSERT(!((xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED) && (xTicksToWait != 0)));
    }
    #endif

    /*lint -save -e904  This function relaxes the coding standard somewhat to
     * allow return statements within the function itself.  This is done in the
     * interest of execution time efficiency. */
    for( ; ; )
    {
        taskENTER_CRITICAL();
        {
            const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;

            /* Is there data in the queue now?  To be running the calling task
             * must be the highest priority task wanting to access the queue. */
            if (uxMessagesWaiting > (UBaseType_t)0)
            {
                /* Remember the read position so it can be reset after the data
                 * is read from the queue as this function is only peeking the
                 * data, not removing it.
                 * 找到读数据的位置
                 */
                pcOriginalReadPosition = pxQueue->u.xQueue.pcReadFrom;

                prvCopyDataFromQueue(pxQueue, pvBuffer);
                traceQUEUE_PEEK(pxQueue);

                /**
                 * The data is not being removed, so reset the read pointer.
                 * 读完数据后，恢复原来的读位置
                 */
                pxQueue->u.xQueue.pcReadFrom = pcOriginalReadPosition;

                /* The data is being left in the queue, so see if there are
                 * any other tasks waiting for the data. */
                if (listLIST_IS_EMPTY(&(pxQueue->xTasksWaitingToReceive)) == pdFALSE)
                {
                    if (xTaskRemoveFromEventList(&(pxQueue->xTasksWaitingToReceive)) != pdFALSE)
                    {
                        /* The task waiting has a higher priority than this task. */
                        queueYIELD_IF_USING_PREEMPTION();
                    }
                }

                taskEXIT_CRITICAL();
                return pdPASS;
            }
            else
            {
                if (xTicksToWait == (TickType_t)0)
                {
                    /* The queue was empty and no block time is specified (or
                     * the block time has expired) so leave now. */
                    taskEXIT_CRITICAL();
                    traceQUEUE_PEEK_FAILED(pxQueue);
                    return errQUEUE_EMPTY;
                }
                else if (xEntryTimeSet == pdFALSE)
                {
                    /* The queue was empty and a block time was specified so
                     * configure the timeout structure ready to enter the blocked
                     * state. */
                    vTaskInternalSetTimeOutState(&xTimeOut);
                    xEntryTimeSet = pdTRUE;
                }
            }
        }
        taskEXIT_CRITICAL();

        /* Interrupts and other tasks can send to and receive from the queue
         * now that the critical section has been exited. */

        vTaskSuspendAll();
        prvLockQueue(pxQueue);

        /**
         * Update the timeout state to see if it has expired yet.
         * 等待超时
         */
        if (xTaskCheckForTimeOut(&xTimeOut, &xTicksToWait) == pdFALSE)
        {
            /* Timeout has not expired yet, check to see if there is data in the
            * queue now, and if not enter the Blocked state to wait for data. */
            if (prvIsQueueEmpty(pxQueue) != pdFALSE)
            {
                traceBLOCKING_ON_QUEUE_PEEK(pxQueue);
                vTaskPlaceOnEventList(&(pxQueue->xTasksWaitingToReceive), xTicksToWait);
                prvUnlockQueue(pxQueue);

                if (xTaskResumeAll() == pdFALSE)
                {
                    portYIELD_WITHIN_API();
                }
            }
            else
            {
                /* There is data in the queue now, so don't enter the blocked
                 * state, instead return to try and obtain the data. */
                prvUnlockQueue(pxQueue);
                (void) xTaskResumeAll();
            }
        }
        else
        {
            /* The timeout has expired.  If there is still no data in the queue
             * exit, otherwise go back and try to read the data again. */
            prvUnlockQueue(pxQueue);
            (void) xTaskResumeAll();

            if (prvIsQueueEmpty(pxQueue) != pdFALSE)
            {
                traceQUEUE_PEEK_FAILED(pxQueue);
                return errQUEUE_EMPTY;
            }
        }
    }
}


BaseType_t xQueueReceiveFromISR(QueueHandle_t xQueue,
                                 void * const pvBuffer,
                                 BaseType_t * const pxHigherPriorityTaskWoken)
{
    BaseType_t xReturn;
    UBaseType_t uxSavedInterruptStatus;
    Queue_t * const pxQueue = xQueue;

    configASSERT(pxQueue);
    configASSERT(!((pvBuffer == NULL) && (pxQueue->uxItemSize != (UBaseType_t)0U)));

    /* RTOS ports that support interrupt nesting have the concept of a maximum
     * system call (or maximum API call) interrupt priority.  Interrupts that are
     * above the maximum system call priority are kept permanently enabled, even
     * when the RTOS kernel is in a critical section, but cannot make any calls to
     * FreeRTOS API functions.  If configASSERT() is defined in FreeRTOSConfig.h
     * then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
     * failure if a FreeRTOS API function is called from an interrupt that has been
     * assigned a priority above the configured maximum system call priority.
     * Only FreeRTOS functions that end in FromISR can be called from interrupts
     * that have been assigned a priority at or (logically) below the maximum
     * system call interrupt priority.  FreeRTOS maintains a separate interrupt
     * safe API to ensure interrupt entry is as fast and as simple as possible.
     * More information (albeit Cortex-M specific) is provided on the following
     * link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
    portASSERT_IF_INTERRUPT_PRIORITY_INVALID();

    uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
    {
        const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;

        /* Cannot block in an ISR, so check there is data available. */
        if (uxMessagesWaiting > (UBaseType_t)0)
        {
            const int8_t cRxLock = pxQueue->cRxLock;

            traceQUEUE_RECEIVE_FROM_ISR(pxQueue);

            prvCopyDataFromQueue(pxQueue, pvBuffer);
            pxQueue->uxMessagesWaiting = uxMessagesWaiting - (UBaseType_t)1;

            /* If the queue is locked the event list will not be modified.
             * Instead update the lock count so the task that unlocks the queue
             * will know that an ISR has removed data while the queue was
             * locked. */
            if (cRxLock == queueUNLOCKED)
            {
                if (listLIST_IS_EMPTY(&(pxQueue->xTasksWaitingToSend)) == pdFALSE)
                {
                    if (xTaskRemoveFromEventList(&(pxQueue->xTasksWaitingToSend)) != pdFALSE)
                    {
                        /* The task waiting has a higher priority than us so
                         * force a context switch. */
                        if (pxHigherPriorityTaskWoken != NULL)
                        {
                            *pxHigherPriorityTaskWoken = pdTRUE;
                        }
                    }
                }
            }
            else
            {
                /* Increment the lock count so the task that unlocks the queue
                 * knows that data was removed while it was locked. */
                prvIncrementQueueRxLock(pxQueue, cRxLock);
            }

            xReturn = pdPASS;
        }
        else
        {
            xReturn = pdFAIL;
            traceQUEUE_RECEIVE_FROM_ISR_FAILED(pxQueue);
        }
    }
    portCLEAR_INTERRUPT_MASK_FROM_ISR(uxSavedInterruptStatus);

    return xReturn;
}

BaseType_t xQueuePeekFromISR(QueueHandle_t xQueue, void * const pvBuffer)
{
    BaseType_t xReturn;
    UBaseType_t uxSavedInterruptStatus;
    int8_t * pcOriginalReadPosition;
    Queue_t * const pxQueue = xQueue;

    configASSERT(pxQueue);
    configASSERT(!((pvBuffer == NULL ) && (pxQueue->uxItemSize != (UBaseType_t) 0U)));
    configASSERT(pxQueue->uxItemSize != 0); /* Can't peek a semaphore. */

    /* RTOS ports that support interrupt nesting have the concept of a maximum
     * system call (or maximum API call) interrupt priority.  Interrupts that are
     * above the maximum system call priority are kept permanently enabled, even
     * when the RTOS kernel is in a critical section, but cannot make any calls to
     * FreeRTOS API functions.  If configASSERT() is defined in FreeRTOSConfig.h
     * then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
     * failure if a FreeRTOS API function is called from an interrupt that has been
     * assigned a priority above the configured maximum system call priority.
     * Only FreeRTOS functions that end in FromISR can be called from interrupts
     * that have been assigned a priority at or (logically) below the maximum
     * system call interrupt priority.  FreeRTOS maintains a separate interrupt
     * safe API to ensure interrupt entry is as fast and as simple as possible.
     * More information (albeit Cortex-M specific) is provided on the following
     * link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
    portASSERT_IF_INTERRUPT_PRIORITY_INVALID();

    uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
    {
        /* Cannot block in an ISR, so check there is data available. */
        if ( pxQueue->uxMessagesWaiting > (UBaseType_t)0)
        {
            traceQUEUE_PEEK_FROM_ISR(pxQueue);

            /* Remember the read position so it can be reset as nothing is
             * actually being removed from the queue. */
            pcOriginalReadPosition = pxQueue->u.xQueue.pcReadFrom;
            prvCopyDataFromQueue(pxQueue, pvBuffer);
            pxQueue->u.xQueue.pcReadFrom = pcOriginalReadPosition;

            xReturn = pdPASS;
        }
        else
        {
            xReturn = pdFAIL;
            traceQUEUE_PEEK_FROM_ISR_FAILED(pxQueue);
        }
    }
    portCLEAR_INTERRUPT_MASK_FROM_ISR(uxSavedInterruptStatus);

    return xReturn;
}


/* 返回队列中的消息数 */
UBaseType_t uxQueueMessagesWaiting(const QueueHandle_t xQueue)
{
    UBaseType_t uxReturn;

    configASSERT(xQueue);

    taskENTER_CRITICAL();
    {
        uxReturn = ((Queue_t *)xQueue)->uxMessagesWaiting;
    }
    taskEXIT_CRITICAL();

    return uxReturn;
}

/* 返回队列中可以存放消息数 */
UBaseType_t uxQueueSpacesAvailable(const QueueHandle_t xQueue)
{
    UBaseType_t uxReturn;
    Queue_t * const pxQueue = xQueue;

    configASSERT(pxQueue);

    taskENTER_CRITICAL();
    {
        uxReturn = pxQueue->uxLength - pxQueue->uxMessagesWaiting;
    }
    taskEXIT_CRITICAL();

    return uxReturn;
}

UBaseType_t uxQueueMessagesWaitingFromISR(const QueueHandle_t xQueue)
{
    UBaseType_t uxReturn;
    Queue_t * const pxQueue = xQueue;

    configASSERT(pxQueue);
    uxReturn = pxQueue->uxMessagesWaiting;

    return uxReturn;
}

void vQueueDelete(QueueHandle_t xQueue)
{
    Queue_t * const pxQueue = xQueue;

    configASSERT(pxQueue);
    traceQUEUE_DELETE(pxQueue);

    #if (configQUEUE_REGISTRY_SIZE > 0)
    {
        vQueueUnregisterQueue(pxQueue);
    }
    #endif

    /* The queue can only have been allocated dynamically - free it
    * again. */
    vPortFree(pxQueue);
}


#if (configUSE_TRACE_FACILITY == 1)

    UBaseType_t uxQueueGetQueueNumber(QueueHandle_t xQueue)
    {
        return ((Queue_t *)xQueue)->uxQueueNumber;
    }

    void vQueueSetQueueNumber( QueueHandle_t xQueue,
                               UBaseType_t uxQueueNumber )
    {
        ((Queue_t *) xQueue)->uxQueueNumber = uxQueueNumber;
    }


    uint8_t ucQueueGetQueueType( QueueHandle_t xQueue )
    {
        return ((Queue_t * ) xQueue )->ucQueueType;
    }

#endif /* configUSE_TRACE_FACILITY */


#if (configUSE_MUTEXES == 1)

/**
 * 返回队列阻塞任务的最高的优先级
 */
static UBaseType_t prvGetDisinheritPriorityAfterTimeout(const Queue_t * const pxQueue)
{
    UBaseType_t uxHighestPriorityOfWaitingTasks;
    UBaseType_t uxHeadItemValue;
    /* If a task waiting for a mutex causes the mutex holder to inherit a
     * priority, but the waiting task times out, then the holder should
     * disinherit the priority - but only down to the highest priority of any
     * other tasks that are waiting for the same mutex.  For this purpose,
     * return the priority of the highest priority task that is waiting for the
     * mutex. */
    if (listCURRENT_LIST_LENGTH(&(pxQueue->xTasksWaitingToReceive)) > 0U)
    {
        uxHeadItemValue = (UBaseType_t) listGET_ITEM_VALUE_OF_HEAD_ENTRY(&(pxQueue->xTasksWaitingToReceive));
        uxHighestPriorityOfWaitingTasks = (UBaseType_t)configMAX_PRIORITIES - uxHeadItemValue;
    }
    else
    {
        uxHighestPriorityOfWaitingTasks = tskIDLE_PRIORITY;
    }
    return uxHighestPriorityOfWaitingTasks;
}

#endif /* configUSE_MUTEXES */


/**
 * 拷贝消息到队列中
 */
static BaseType_t prvCopyDataToQueue(Queue_t *const pxQueue,
                                      const void *pvItemToQueue,
                                      const BaseType_t xPosition)
{
    BaseType_t xReturn = pdFALSE;
    UBaseType_t uxMessagesWaiting;

    /* This function is called from a critical section. */

    uxMessagesWaiting = pxQueue->uxMessagesWaiting;

    if (pxQueue->uxItemSize == (UBaseType_t)0)
    {
        #if (configUSE_MUTEXES == 1)
        {
            if (pxQueue->uxQueueType == queueQUEUE_IS_MUTEX)
            {
                /* The mutex is no longer being held. */
                xReturn = xTaskPriorityDisinherit(pxQueue->u.xSemaphore.xMutexHolder);
                pxQueue->u.xSemaphore.xMutexHolder = NULL;
            }
        }
        #endif /* configUSE_MUTEXES */
    }
    else if (xPosition == queueSEND_TO_BACK)
    {
        /* 拷贝到队列存储消息区域的尾部 */
        (void) memcpy((void *) pxQueue->pcWriteTo, pvItemToQueue, (size_t)pxQueue->uxItemSize);
        pxQueue->pcWriteTo += pxQueue->uxItemSize;

        /* 检查队列是否为满, 如果满了,从头开始覆盖,类似于循环fifo */
        if (pxQueue->pcWriteTo >= pxQueue->u.xQueue.pcTail)
        {
            pxQueue->pcWriteTo = pxQueue->pcHead;
        }
    }
    else
    {
        /* 拷贝到队列的头部 */
        (void)memcpy((void *)pxQueue->u.xQueue.pcReadFrom, pvItemToQueue, (size_t)pxQueue->uxItemSize);
        pxQueue->u.xQueue.pcReadFrom -= pxQueue->uxItemSize;

        if (pxQueue->u.xQueue.pcReadFrom < pxQueue->pcHead)
        {
            pxQueue->u.xQueue.pcReadFrom = (pxQueue->u.xQueue.pcTail - pxQueue->uxItemSize);
        }

        /* 覆盖方式,删除队列中的元素 */
        if (xPosition == queueOVERWRITE)
        {
            if (uxMessagesWaiting > (UBaseType_t)0)
            {
                /**
                 * An item is not being added but overwritten, so subtract
                 * one from the recorded number of items in the queue so when
                 * one is added again below the number of recorded items remains
                 * correct.
                 */
                --uxMessagesWaiting;
            }
        }
    }

    pxQueue->uxMessagesWaiting = uxMessagesWaiting + (UBaseType_t)1;

    return xReturn;
}

/**
 * 队列中的消息拷贝到pvBuffer中
 */
static void prvCopyDataFromQueue(Queue_t * const pxQueue, void * const pvBuffer)
{
    if (pxQueue->uxItemSize != (UBaseType_t)0)
    {
        pxQueue->u.xQueue.pcReadFrom += pxQueue->uxItemSize;

        /* 队列为空，复位读位置指针 */
        if (pxQueue->u.xQueue.pcReadFrom >= pxQueue->u.xQueue.pcTail)
        {
            pxQueue->u.xQueue.pcReadFrom = pxQueue->pcHead;
        }

        (void)memcpy((void *)pvBuffer, (void *)pxQueue->u.xQueue.pcReadFrom, (size_t)pxQueue->uxItemSize);
    }
}


static void prvUnlockQueue(Queue_t * const pxQueue)
{
    /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. */

    /* The lock counts contains the number of extra data items placed or
     * removed from the queue while the queue was locked.  When a queue is
     * locked items can be added or removed, but the event lists cannot be
     * updated. */
    taskENTER_CRITICAL();
    {
        int8_t cTxLock = pxQueue->cTxLock;

        /* See if data was added to the queue while it was locked. */
        while(cTxLock > queueLOCKED_UNMODIFIED)
        {
            /* Data was posted while the queue was locked.  Are any tasks
             * blocked waiting for data to become available? */
            #if (onfigUSE_QUEUE_SETS == 1)
            {
                if (pxQueue->pxQueueSetContainer != NULL)
                {
                    if (prvNotifyQueueSetContainer(pxQueue) != pdFALSE)
                    {
                        /* The queue is a member of a queue set, and posting to
                         * the queue set caused a higher priority task to unblock.
                         * A context switch is required. */
                        vTaskMissedYield();
                    }
                }
                else
                {
                    /* Tasks that are removed from the event list will get
                     * added to the pending ready list as the scheduler is still
                     * suspended. */
                    if (listLIST_IS_EMPTY(&(pxQueue->xTasksWaitingToReceive)) == pdFALSE)
                    {
                        if (xTaskRemoveFromEventList(&(pxQueue->xTasksWaitingToReceive)) != pdFALSE)
                        {
                            /* The task waiting has a higher priority so record that a
                             * context switch is required. */
                            vTaskMissedYield();
                        }
                    }
                    else
                    {
                        break;
                    }
                }
            }
            #else /* configUSE_QUEUE_SETS */
            {
                /* Tasks that are removed from the event list will get added to
                 * the pending ready list as the scheduler is still suspended.
                 * 等待接收消息的任务阻塞队列不为空
                 */
                if (listLIST_IS_EMPTY(&(pxQueue->xTasksWaitingToReceive)) == pdFALSE)
                {
                    /* 从阻塞队列删除，重新加入就绪队列 */
                    if (xTaskRemoveFromEventList(&(pxQueue->xTasksWaitingToReceive)) != pdFALSE)
                    {
                        /* The task waiting has a higher priority so record that
                         * a context switch is required. */
                        vTaskMissedYield();
                    }
                }
                else
                {
                    break;
                }
            }
            #endif /* configUSE_QUEUE_SETS */

            --cTxLock;
        }

        pxQueue->cTxLock = queueUNLOCKED;
    }
    taskEXIT_CRITICAL();

    /* Do the same for the Rx lock. */
    taskENTER_CRITICAL();
    {
        int8_t cRxLock = pxQueue->cRxLock;

        while(cRxLock > queueLOCKED_UNMODIFIED)
        {
            if (listLIST_IS_EMPTY(&(pxQueue->xTasksWaitingToSend)) == pdFALSE)
            {
                if (xTaskRemoveFromEventList(&(pxQueue->xTasksWaitingToSend)) != pdFALSE)
                {
                    vTaskMissedYield();
                }

                --cRxLock;
            }
            else
            {
                break;
            }
        }

        pxQueue->cRxLock = queueUNLOCKED;
    }
    taskEXIT_CRITICAL();
}

/**
 * 判断队列是否为空
 * 返回值:
 *    true:     队列空
 *    fasle:    队列非空
 */
static BaseType_t prvIsQueueEmpty(const Queue_t * pxQueue)
{
    BaseType_t xReturn;

    taskENTER_CRITICAL();
    {
        /* 消息数为0， 消息队列为空 */
        if (pxQueue->uxMessagesWaiting == ( UBaseType_t )0)
        {
            xReturn = pdTRUE;
        }
        else
        {
            xReturn = pdFALSE;
        }
    }
    taskEXIT_CRITICAL();

    return xReturn;
}

BaseType_t xQueueIsQueueEmptyFromISR(const QueueHandle_t xQueue)
{
    BaseType_t xReturn;
    Queue_t * const pxQueue = xQueue;

    configASSERT(pxQueue);

    if (pxQueue->uxMessagesWaiting == (UBaseType_t)0)
    {
        xReturn = pdTRUE;
    }
    else
    {
        xReturn = pdFALSE;
    }

    return xReturn;
}

/* 队列是否为满 */
static BaseType_t prvIsQueueFull(const Queue_t *pxQueue)
{
    BaseType_t xReturn;

    taskENTER_CRITICAL();
    {
        if (pxQueue->uxMessagesWaiting == pxQueue->uxLength)
        {
            xReturn = pdTRUE;
        }
        else
        {
            xReturn = pdFALSE;
        }
    }
    taskEXIT_CRITICAL();

    return xReturn;
}


BaseType_t xQueueIsQueueFullFromISR(const QueueHandle_t xQueue)
{
    BaseType_t xReturn;
    Queue_t * const pxQueue = xQueue;

    configASSERT(pxQueue);

    if (pxQueue->uxMessagesWaiting == pxQueue->uxLength)
    {
        xReturn = pdTRUE;
    }
    else
    {
        xReturn = pdFALSE;
    }

    return xReturn;
}

#if (configQUEUE_REGISTRY_SIZE > 0)

/**
 * 向xQueueRegistry数组添加队列元素xQueue
 */
void vQueueAddToRegistry(QueueHandle_t xQueue, const char *pcQueueName)
{
    UBaseType_t ux;
    QueueRegistryItem_t * pxEntryToWrite = NULL;
    configASSERT(xQueue);
    if (pcQueueName != NULL)
    {
        /* See if there is an empty space in the registry.  A NULL name denotes
         * a free slot. */
        for(ux = (UBaseType_t) 0U; ux < (UBaseType_t)configQUEUE_REGISTRY_SIZE; ux++)
        {
            /* Replace an existing entry if the queue is already in the registry. */
            if (xQueue == xQueueRegistry[ux].xHandle)
            {
                pxEntryToWrite = &(xQueueRegistry[ux]);
                break;
            }
            else if ((pxEntryToWrite == NULL) && (xQueueRegistry[ux].pcQueueName == NULL))
            {
                pxEntryToWrite = &(xQueueRegistry[ux]);
            }
        }
    }
    /* 如果已经存在，用新的覆盖旧的元素 */
    if (pxEntryToWrite != NULL)
    {
        /* Store the information on this queue. */
        pxEntryToWrite->pcQueueName = pcQueueName;
        pxEntryToWrite->xHandle = xQueue;
        traceQUEUE_REGISTRY_ADD(xQueue, pcQueueName);
    }
}

/**
 * 返回队列的名字
 */
const char * pcQueueGetName(QueueHandle_t xQueue)
{
    UBaseType_t ux;
    const char * pcReturn = NULL;
    configASSERT(xQueue);
    /* Note there is nothing here to protect against another task adding or
     * removing entries from the registry while it is being searched. */
    for(ux = (UBaseType_t) 0U; ux < (UBaseType_t) configQUEUE_REGISTRY_SIZE; ux++)
    {
        if (xQueueRegistry[ux].xHandle == xQueue)
        {
            pcReturn = xQueueRegistry[ux].pcQueueName;
            break;
        }
    }
    return pcReturn;
}

/**
 * 从xQueueRegistry数组中删除xQueue队列
 */
void vQueueUnregisterQueue(QueueHandle_t xQueue)
{
    UBaseType_t ux;
    configASSERT( xQueue );
    /* See if the handle of the queue being unregistered in actually in the
     * registry. */
    for(ux = (UBaseType_t) 0U; ux < (UBaseType_t) configQUEUE_REGISTRY_SIZE; ux++)
    {
        if (xQueueRegistry[ux].xHandle == xQueue)
        {
            /* Set the name to NULL to show that this slot if free again. */
            xQueueRegistry[ux].pcQueueName = NULL;
            /* Set the handle to NULL to ensure the same queue handle cannot
             * appear in the registry twice if it is added, removed, then
             * added again. */
            xQueueRegistry[ux].xHandle = (QueueHandle_t)0;
            break;
        }
    }
}

#endif /* configQUEUE_REGISTRY_SIZE */


#if ( configUSE_TIMERS == 1 )

void vQueueWaitForMessageRestricted(QueueHandle_t xQueue,
                                     TickType_t xTicksToWait,
                                     const BaseType_t xWaitIndefinitely)
{
    Queue_t * const pxQueue = xQueue;
    /* This function should not be called by application code hence the
     * 'Restricted' in its name.  It is not part of the public API.  It is
     * designed for use by kernel code, and has special calling requirements.
     * It can result in vListInsert() being called on a list that can only
     * possibly ever have one item in it, so the list will be fast, but even
     * so it should be called with the scheduler locked and not from a critical
     * section. */
    /* Only do anything if there are no messages in the queue.  This function
     *  will not actually cause the task to block, just place it on a blocked
     *  list.  It will not block until the scheduler is unlocked - at which
     *  time a yield will be performed.  If an item is added to the queue while
     *  the queue is locked, and the calling task blocks on the queue, then the
     *  calling task will be immediately unblocked when the queue is unlocked. */
    prvLockQueue(pxQueue);
    if (pxQueue->uxMessagesWaiting == (UBaseType_t) 0U)
    {
        /* There is nothing in the queue, block for the specified period. */
        vTaskPlaceOnEventListRestricted(&(pxQueue->xTasksWaitingToReceive), xTicksToWait, xWaitIndefinitely);
    }
    prvUnlockQueue(pxQueue);
}

#endif /* configUSE_TIMERS */


#if (configUSE_QUEUE_SETS == 1)

QueueSetHandle_t xQueueCreateSet(const UBaseType_t uxEventQueueLength)
{
    QueueSetHandle_t pxQueue;
    /* 队列的集合 */
    pxQueue = xQueueGenericCreate(uxEventQueueLength, (UBaseType_t) sizeof(Queue_t *), queueQUEUE_TYPE_SET);
    return pxQueue;
}

BaseType_t xQueueAddToSet(QueueSetMemberHandle_t xQueueOrSemaphore,
                           QueueSetHandle_t xQueueSet)
{
    BaseType_t xReturn;

    taskENTER_CRITICAL();
    {
        if (((Queue_t *)xQueueOrSemaphore)->pxQueueSetContainer != NULL)
        {
            /* Cannot add a queue/semaphore to more than one queue set. */
            xReturn = pdFAIL;
        }
        else if (((Queue_t *) xQueueOrSemaphore)->uxMessagesWaiting != (UBaseType_t)0)
        {
            /* Cannot add a queue/semaphore to a queue set if there are already
             * items in the queue/semaphore. */
            xReturn = pdFAIL;
        }
        else
        {
            ((Queue_t *) xQueueOrSemaphore)->pxQueueSetContainer = xQueueSet;
            xReturn = pdPASS;
        }
    }
    taskEXIT_CRITICAL();
    return xReturn;
}

BaseType_t xQueueRemoveFromSet(QueueSetMemberHandle_t xQueueOrSemaphore,
                                QueueSetHandle_t xQueueSet)
{
    BaseType_t xReturn;
    Queue_t * const pxQueueOrSemaphore = (Queue_t *) xQueueOrSemaphore;
    if (pxQueueOrSemaphore->pxQueueSetContainer != xQueueSet)
    {
        /* The queue was not a member of the set. */
        xReturn = pdFAIL;
    }
    else if (pxQueueOrSemaphore->uxMessagesWaiting != (UBaseType_t)0)
    {
        /* It is dangerous to remove a queue from a set when the queue is
         * not empty because the queue set will still hold pending events for
         * the queue. */
        xReturn = pdFAIL;
    }
    else
    {
        taskENTER_CRITICAL();
        {
            /* The queue is no longer contained in the set. */
            pxQueueOrSemaphore->pxQueueSetContainer = NULL;
        }
        taskEXIT_CRITICAL();
        xReturn = pdPASS;
    }
    return xReturn;
}

QueueSetMemberHandle_t xQueueSelectFromSet(QueueSetHandle_t xQueueSet,
                                            TickType_t const xTicksToWait)
{
    QueueSetMemberHandle_t xReturn = NULL;
    (void) xQueueReceive((QueueHandle_t) xQueueSet, &xReturn, xTicksToWait);
    return xReturn;
}

QueueSetMemberHandle_t xQueueSelectFromSetFromISR(QueueSetHandle_t xQueueSet)
{
    QueueSetMemberHandle_t xReturn = NULL;
    (void) xQueueReceiveFromISR((QueueHandle_t) xQueueSet, &xReturn, NULL);
    return xReturn;
}

static BaseType_t prvNotifyQueueSetContainer(const Queue_t *const pxQueue)
{
    Queue_t * pxQueueSetContainer = pxQueue->pxQueueSetContainer;
    BaseType_t xReturn = pdFALSE;
    /* This function must be called form a critical section. */
    /* The following line is not reachable in unit tests because every call
     * to prvNotifyQueueSetContainer is preceded by a check that
     * pxQueueSetContainer != NULL */
    configASSERT(pxQueueSetContainer); /* LCOV_EXCL_BR_LINE */
    configASSERT(pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength);
    if (pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength)
    {
        const int8_t cTxLock = pxQueueSetContainer->cTxLock;
        traceQUEUE_SET_SEND(pxQueueSetContainer);
        /* The data copied is the handle of the queue that contains data. */
        xReturn = prvCopyDataToQueue(pxQueueSetContainer, &pxQueue, queueSEND_TO_BACK);
        if (cTxLock == queueUNLOCKED)
        {
            if (listLIST_IS_EMPTY(&(pxQueueSetContainer->xTasksWaitingToReceive)) == pdFALSE)
            {
                if (xTaskRemoveFromEventList(&(pxQueueSetContainer->xTasksWaitingToReceive)) != pdFALSE)
                {
                    /* The task waiting has a higher priority. */
                    xReturn = pdTRUE;
                }
            }
        }
        else
        {
            prvIncrementQueueTxLock(pxQueueSetContainer, cTxLock);
        }
    }
    return xReturn;
}

#endif /* configUSE_QUEUE_SETS */
