/*
	File:    	APSRTPJitterBuffer.c
	Package: 	Apple CarPlay Communication Plug-in.
	Abstract: 	n/a
	Version: 	450.14

	Disclaimer: IMPORTANT: This Apple software is supplied to you, by Apple Inc. ("Apple"), in your
	capacity as a current, and in good standing, Licensee in the MFi Licensing Program. Use of this
	Apple software is governed by and subject to the terms and conditions of your MFi License,
	including, but not limited to, the restrictions specified in the provision entitled ”Public
	Software”, and is further subject to your agreement to the following additional terms, and your
	agreement that the use, installation, modification or redistribution of this Apple software
	constitutes acceptance of these additional terms. If you do not agree with these additional terms,
	please do not use, install, modify or redistribute this Apple software.

	Subject to all of these terms and in consideration of your agreement to abide by them, Apple grants
	you, for as long as you are a current and in good-standing MFi Licensee, a personal, non-exclusive
	license, under Apple's copyrights in this original Apple software (the "Apple Software"), to use,
	reproduce, and modify the Apple Software in source form, and to use, reproduce, modify, and
	redistribute the Apple Software, with or without modifications, in binary form. While you may not
	redistribute the Apple Software in source form, should you redistribute the Apple Software in binary
	form, you must retain this notice and the following text and disclaimers in all such redistributions
	of the Apple Software. Neither the name, trademarks, service marks, or logos of Apple Inc. may be
	used to endorse or promote products derived from the Apple Software without specific prior written
	permission from Apple. Except as expressly stated in this notice, no other rights or licenses,
	express or implied, are granted by Apple herein, including but not limited to any patent rights that
	may be infringed by your derivative works or by other works in which the Apple Software may be
	incorporated.

	Unless you explicitly state otherwise, if you provide any ideas, suggestions, recommendations, bug
	fixes or enhancements to Apple in connection with this software (“Feedback”), you hereby grant to
	Apple a non-exclusive, fully paid-up, perpetual, irrevocable, worldwide license to make, use,
	reproduce, incorporate, modify, display, perform, sell, make or have made derivative works of,
	distribute (directly or indirectly) and sublicense, such Feedback in connection with Apple products
	and services. Providing this Feedback is voluntary, but if you do provide Feedback to Apple, you
	acknowledge and agree that Apple may exercise the license granted above without the payment of
	royalties or further consideration to Participant.

	The Apple Software is provided by Apple on an "AS IS" basis. APPLE MAKES NO WARRANTIES, EXPRESS OR
	IMPLIED, INCLUDING WITHOUT LIMITATION THE IMPLIED WARRANTIES OF NON-INFRINGEMENT, MERCHANTABILITY
	AND FITNESS FOR A PARTICULAR PURPOSE, REGARDING THE APPLE SOFTWARE OR ITS USE AND OPERATION ALONE OR
	IN COMBINATION WITH YOUR PRODUCTS.

	IN NO EVENT SHALL APPLE BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL OR CONSEQUENTIAL DAMAGES
	(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
	PROFITS; OR BUSINESS INTERRUPTION) ARISING IN ANY WAY OUT OF THE USE, REPRODUCTION, MODIFICATION
	AND/OR DISTRIBUTION OF THE APPLE SOFTWARE, HOWEVER CAUSED AND WHETHER UNDER THEORY OF CONTRACT, TORT
	(INCLUDING NEGLIGENCE), STRICT LIABILITY OR OTHERWISE, EVEN IF APPLE HAS BEEN ADVISED OF THE
	POSSIBILITY OF SUCH DAMAGE.

	Copyright (C) 2017 Apple Inc. All Rights Reserved. Not to be used or disclosed without permission from Apple.
*/

#include "APSRTPJitterBuffer.h"
#include "AirPlayCommon.h"

#include CF_RUNTIME_HEADER
#include LIBDISPATCH_HEADER

#include "AudioConverter.h"
#include "CommonServices.h"
#include "DebugServices.h"
#include "LogUtils.h"
#include "MiscUtils.h"
#include "ThreadUtils.h"
#include "TickUtils.h"
#include "SimpleQueue.h"


//===========================================================================================================================
//	Logging
//===========================================================================================================================
#if 0
#pragma mark ==Logging==
#endif

ulog_define( APSRTPJitterBuffer, kLogLevelNotice, kLogFlags_Default, "APSRTPJitterBuffer", NULL );
#define ap_ulog( LEVEL, ... )					ulog( &log_category_from_name( APSRTPJitterBuffer ), ( LEVEL ), __VA_ARGS__ )

//===========================================================================================================================
//	Macros
//===========================================================================================================================
#if 0
#pragma mark ==Macros==
#endif

#define PAYLOAD_SIZE(PKT)		(uint32_t)(PKT->len ? (PKT->len - kRTPHeaderSize) : 0)

#define kAPSRTPJitterBuffer_PacketCountMultiplier	2
#define kAPSRTPJitterBuffer_SizeMultiplier			20 //45 //20


//===========================================================================================================================
//	Internals
//===========================================================================================================================
#if 0
#pragma mark ==Structs==
#endif

struct APSRTPJitterBufferPrivate
{
	CFRuntimeBase						base;									// CFRuntimeBase: must be first

	const char *						label;									// Label to use when logging

	APSRTPSavedPacket *					packets;								// Used for allocation tracking only
	uint32_t							packetCount;							// Number of allocated packets
	SimpleQueueRef						freeQueue;								// Reader = GetPacket API, Writer = Decode thread
	SimpleQueueRef						busyQueue;								// Reader = Decode thread, Writer = Discard/PutPacket API
	dispatch_semaphore_t				packetsAvailableSemaphore;				// Waiter = Decode thread, Signaller = Discard/PutPacket API

	AudioStreamBasicDescription			inputFormat;							// Format of enqueued packets
	AudioStreamBasicDescription			outputFormat;							// Format of read packets
	uint32_t							maxSamplesPerPacket;					// Maximum number of samples per RTP packet
	uint32_t							targetSizeSamples;						// Target buffer size in samples
	uint32_t							targetSizeMS;							// Target buffer size in milliseconds

	Boolean								legacyBufferingMode;					// True if client requested legacy buffering mode (false for fixed latency)
	Boolean								legacyActivelyBuffering;				// True when we are buffering in legacy mode
	uint64_t							legacyBufferingStartTicks;				// Time (upticks) when we entered buffering mode
	Boolean								legacyBufferingDiscardExcess;			// True if excess data should be discarded on the next read
	uint32_t							legacyBufferingDiscardThresholdSamples;	// Buffered sample count at which a discard excess should be triggered
	int32_t								legacyBufferingDiscardTargetSamples;	// Buffered sample count to discard down to

	MirroredRingBuffer					outputRingBuffer;						// Ring buffer for decoded samples
	uint32_t							outputRingBufferSizeBytes;				// Size of the ring buffer in bytes
	uint32_t							outputBufferReadSampleTime;				// Current read time in samples, Writer = Read API
	uint32_t							outputBufferWriteSampleTime;			// Current write time in samples, Writer = Decode thread
	int32_t								outputBufferWriteSampleTimeOffset;		// Offset applied to force buffering to be at least targetSize.

	uint32_t							busyQueueNextExpectedSampleTime;		// Expected sample time of the head packet, Reader/Writer = Decode thread
	uint32_t							busyQueueLastSampleTime;				// Sample time one past the end of the most recently enqueued packet, Writer = PutPacket API

	AudioConverterRef					decoder;								// Decoder instance if input and output formats differ
	pthread_t							decodeThread;							// Thread on which decode or copying occurs
	pthread_t *							decodeThreadPtr;						// Non-NULL when decodeThread is valid
	Boolean								decodeStarted;							// True once the first packet has been decoded / copied
	Boolean								decodeDone;								// Decode thread termination indicator
	Boolean								inUnderRun;								// Under-run condition indicator

	//Statistics
	APSRTPJitterBufferStatistics		stats;
};

typedef struct
{
	APSRTPJitterBufferRef				jitterBuffer;							// Jitter buffer
	APSRTPSavedPacket *					packet;									// Packet to decode
	AudioStreamPacketDescription		packetDesription;						// Description of packet to decode
	Boolean								packetConsumed;							// True on output if packet was consumed by the input proc

} APSRTPJitterBufferDecodeContext;


#if 0
#pragma mark ==Functions==
#endif

static void	_APSRTPJitterBufferFinalize( CFTypeRef inCF );

//===========================================================================================================================
//	Utilities
//===========================================================================================================================

//Note: These work only when inBuffer is defined and valid! Just to make code a little cleaner.

#define SAMPLES_TO_BYTES( SAMPLES )			( (SAMPLES) * inBuffer->outputFormat.mBytesPerFrame )
#define BYTES_TO_SAMPLES( BYTES )			( (BYTES) / inBuffer->outputFormat.mBytesPerFrame )

static inline int32_t _APSRTPJitterBufferSamplesToMS( int32_t inSampleCount, const AudioStreamBasicDescription *inDescription )
{
	return ( inSampleCount * 1000 ) / (uint32_t) inDescription->mSampleRate;
}

static inline int32_t _APSRTPJitterBufferMSToSamples( int32_t inMilliseconds, const AudioStreamBasicDescription *inDescription )
{
	return inMilliseconds * (uint32_t) inDescription->mSampleRate / 1000;
}



//===========================================================================================================================
//	_APSRTPJitterBufferDecodeThreadNeeded
//===========================================================================================================================

static Boolean _APSRTPJitterBufferDecodeThreadNeeded(APSRTPJitterBufferRef inBuffer)
{
	return (inBuffer->inputFormat.mFormatID != inBuffer->outputFormat.mFormatID);
}

//===========================================================================================================================
//	_APSRTPJitterBufferConfigurePacketQueues
//===========================================================================================================================

static OSStatus _APSRTPJitterBufferConfigurePacketQueues( APSRTPJitterBufferRef inBuffer )
{
	OSStatus		err				= kNoErr;
	uint32_t		ndx				= 0;

	// Assumes packets are fully utilized when PCM or contain one compressed packet otherwise

	inBuffer->maxSamplesPerPacket = inBuffer->inputFormat.mFormatID == kAudioFormatLinearPCM ?
										kAirTunesMaxPayloadSizeUDP / inBuffer->inputFormat.mBytesPerPacket :
										inBuffer->inputFormat.mFramesPerPacket;
	require_action( inBuffer->maxSamplesPerPacket > 0, bail, err = kParamErr );

	// Calculate the number of packets required to hold  kAPSRTPJitterBuffer_PacketCountMultiplier times the target size (to allow headroom for jitter bursts)
	//  (Add one extra packet to round up for integer math truncation)

	//A packet queue is useful when we have a decode thread. Otherwise, just use a single packet.
	if( _APSRTPJitterBufferDecodeThreadNeeded(inBuffer) )
	{
		inBuffer->packetCount = ( (kAPSRTPJitterBuffer_PacketCountMultiplier * inBuffer->targetSizeSamples) / inBuffer->maxSamplesPerPacket ) + 1;
	}
	else
	{
		inBuffer->packetCount = 1;
	}

	require_action( inBuffer->packetCount > 0, bail, err = kParamErr );

	// Allocate the packets

	inBuffer->packets = calloc( inBuffer->packetCount, sizeof( *inBuffer->packets ) );
	require_action( inBuffer->packets, bail, err = kNoMemoryErr );

	ap_ulog( kLogLevelInfo, "'%s' Allocated %u packets\n", inBuffer->label, inBuffer->packetCount );

	// Allocate the queues

	err = SimpleQueueCreate( NULL, (int32_t) inBuffer->packetCount, &inBuffer->busyQueue );
	require_noerr( err, bail );

	err = SimpleQueueCreate( NULL, (int32_t) inBuffer->packetCount, &inBuffer->freeQueue );
	require_noerr( err, bail );

	inBuffer->packetsAvailableSemaphore = dispatch_semaphore_create( 0 );
	require_action( inBuffer->packetsAvailableSemaphore, bail, err = kNoMemoryErr );

	// Populate the free queue

	for( ndx = 0; ndx < inBuffer->packetCount; ++ndx )
	{
		SimpleQueueEnqueue( inBuffer->freeQueue, &inBuffer->packets[ ndx ] );
	}

bail:
	return( err );
}

//===========================================================================================================================
//	_APSRTPJitterBufferConfigureOutput
//===========================================================================================================================

static void* _APSRTPJitterBufferDecodeThreadEntry( void *inCtx );

static OSStatus _APSRTPJitterBufferConfigureOutput( APSRTPJitterBufferRef inBuffer )
{
	OSStatus		err				= kNoErr;

	// Sample rate and channel count must match

	require_action( (uint64_t)inBuffer->inputFormat.mSampleRate == (uint64_t)inBuffer->outputFormat.mSampleRate, bail, err = kParamErr );
	require_action( inBuffer->inputFormat.mChannelsPerFrame == inBuffer->outputFormat.mChannelsPerFrame, bail, err = kParamErr );
	// Either the format ID must be different or the descriptions must be identical

	require_action( inBuffer->inputFormat.mFormatID != inBuffer->outputFormat.mFormatID ||
		memcmp( &inBuffer->inputFormat, &inBuffer->outputFormat, sizeof( inBuffer->inputFormat ) ) == 0, bail, err = kParamErr );
	// Allocate the output ring buffer (Using kAPSRTPJitterBuffer_SizeMultiplier to allow headroom for jitter bursts)

	inBuffer->outputRingBufferSizeBytes = (kAPSRTPJitterBuffer_SizeMultiplier * SAMPLES_TO_BYTES( inBuffer->targetSizeSamples ) );
	err = MirroredRingBufferInit( &inBuffer->outputRingBuffer, inBuffer->outputRingBufferSizeBytes, true );
	require_noerr( err, bail );

	ap_ulog( kLogLevelInfo, "'%s' Allocated ring buffer of size: %u bytes / %u samples\n",
		inBuffer->label, inBuffer->outputRingBufferSizeBytes, BYTES_TO_SAMPLES( inBuffer->outputRingBufferSizeBytes ) );

	// Set up an audio converter if needed

	if( _APSRTPJitterBufferDecodeThreadNeeded(inBuffer) )
	{
		err = AudioConverterNew( &inBuffer->inputFormat, &inBuffer->outputFormat, &inBuffer->decoder );
		require_noerr( err, bail );

		// Set up the decode thread
		err = pthread_create( &inBuffer->decodeThread, NULL, _APSRTPJitterBufferDecodeThreadEntry, inBuffer );
		require_noerr( err, bail );
		inBuffer->decodeThreadPtr = &inBuffer->decodeThread;
	}

bail:
	return( err );
}

static void _APSRTPJitterBufferGetTypeID( void *inCtx )
{
	static const CFRuntimeClass		kAPSRTPJitterBufferClass =
	{
		0,											// version
		"APSRTPJitterBuffer",						// className
		NULL,										// init
		NULL,										// copy
		_APSRTPJitterBufferFinalize,				// finalize
		NULL,										// equal -- NULL means pointer equality.
		NULL,										// hash  -- NULL means pointer hash.
		NULL,										// copyFormattingDesc
		NULL,										// copyDebugDesc
		NULL,										// reclaim
		NULL										// refcount
	};

	*( (CFTypeID*) inCtx ) = _CFRuntimeRegisterClass( &kAPSRTPJitterBufferClass );
}

static CFTypeID APSRTPJitterBufferGetTypeID( void )
{
	static dispatch_once_t		serviceInitOnce = 0;
	static CFTypeID				serviceTypeID = _kCFRuntimeNotATypeID;

	dispatch_once_f( &serviceInitOnce, &serviceTypeID, _APSRTPJitterBufferGetTypeID );
	return( serviceTypeID );
}

//===========================================================================================================================
//	APSRTPJitterBufferCreate
//===========================================================================================================================

APSRTPJitterBufferRef
	APSRTPJitterBufferCreate(
		CFAllocatorRef							inAllocator,
		const AudioStreamBasicDescription *		inInputFormat,
		const AudioStreamBasicDescription *		inOutputFormat,
		uint32_t								inTargetSizeMS,
		APSRTPJitterBufferOptions				inOptions,
		const char *							inLoggingLabel,
		OSStatus *								outErr )
{
	OSStatus					err	= kNoErr;
	size_t						len	= 0;
	APSRTPJitterBufferRef		me	= NULL;

	(void)inAllocator;

	require_action( inInputFormat && inOutputFormat, bail, err = kParamErr );
	require_action( inOutputFormat->mFormatID == kAudioFormatLinearPCM, bail, err = kParamErr );
	require_action( inTargetSizeMS > 0, bail, err = kParamErr );

	len = sizeof( *me ) - sizeof( me->base );
	me = (APSRTPJitterBufferRef) _CFRuntimeCreateInstance( NULL, APSRTPJitterBufferGetTypeID(), (CFIndex) len, NULL );
	require_action( me, bail, err = kNoMemoryErr );
	memset( ( (uint8_t *) me ) + sizeof( me->base ), 0, len );

	me->inputFormat = *inInputFormat;
	me->outputFormat = *inOutputFormat;
	me->targetSizeMS = inTargetSizeMS;
	me->targetSizeSamples = _APSRTPJitterBufferMSToSamples( inTargetSizeMS, &me->outputFormat );
	me->inUnderRun = false;
	me->outputBufferWriteSampleTimeOffset = 0;

	if( inOptions & kAPSRTPJitterBufferOption_LegacyBuffering )
	{
		me->legacyBufferingMode = true;
		me->legacyActivelyBuffering = true;
	}

	//Initialize statistics

	me->stats.latePacketCount	= 0;
	me->stats.lostPacketCount	= 0;
	me->stats.overrunCount		= 0;
	me->stats.underrunCount		= 0;

	me->label = inLoggingLabel;

	ap_ulog( kLogLevelNotice,
		"Creating jitter buffer '%s' for input format: %x, target size: %u ms / %u samples, legacy buffering mode: %s\n",
		inLoggingLabel, inInputFormat->mFormatID, inTargetSizeMS, me->targetSizeSamples, me->legacyBufferingMode ? "Y" : "N" );

	err = _APSRTPJitterBufferConfigurePacketQueues( me );
	require_noerr( err, bail );
	err = _APSRTPJitterBufferConfigureOutput( me );
	require_noerr( err, bail );
	if( me->legacyBufferingMode )
	{
		// These values are based on the old "RTPJitterBuffer" implementation,
		//  which we are trying to preserve the behavior of with "legacyBufferingMode"
		//  N.B. - These calculations are dependent upon "ConfigurePacketQueues" and "ConfigureOutput" having already been called
        // 0.9
		me->legacyBufferingDiscardThresholdSamples = (uint32_t) ( 0.9 * me->outputRingBufferSizeBytes / me->outputFormat.mBytesPerFrame );
		me->legacyBufferingDiscardTargetSamples = (int32_t)_APSRTPJitterBufferMSToSamples( 10 * inTargetSizeMS, &me->outputFormat );
		check( me->legacyBufferingDiscardThresholdSamples > 0 && me->legacyBufferingDiscardTargetSamples > 0 );

		ap_ulog( kLogLevelNotice, "'%s' Setting discard threshold / target sample counts to: %lu / %lu\n", me->label,
			me->legacyBufferingDiscardThresholdSamples, me->legacyBufferingDiscardTargetSamples );
	}

bail:
	if( outErr )
	{
		*outErr = err;
	}

	return( me );
}

//===========================================================================================================================
//	_APSRTPJitterBufferFinalize
//===========================================================================================================================

static void	_APSRTPJitterBufferFinalize( CFTypeRef inCF )
{
	APSRTPJitterBufferRef const		me = (APSRTPJitterBufferRef) inCF;

	// Stop the decode thread

	if( me->decodeThreadPtr )
	{
		me->decodeDone = true;
		dispatch_semaphore_signal( me->packetsAvailableSemaphore );
		pthread_join( me->decodeThread, NULL );
	}

	// Log statistics

	ap_ulog( kLogLevelNotice, "'%s' Statistics:\n\tUnderruns: %u\n\tOverruns: %u\n\tGaps: %u\n",
		me->label, me->stats.underrunCount, me->stats.overrunCount, me->stats.lostPacketCount );

	// Clean up the audio converter

	AudioConverterForget( &me->decoder );

	// Clean up the ouput buffer

	MirroredRingBufferFree( &me->outputRingBuffer );

	// Clean up the packets / queues

	ForgetMem( &me->packets );
	CFRelease( me->busyQueue );
	CFRelease( me->freeQueue );
	dispatch_forget( &me->packetsAvailableSemaphore );
}



//===========================================================================================================================
//	_APSRTPJitterBufferDecodeInputCallback
//	See <http://developer.apple.com/library/mac/#qa/qa2001/qa1317.html> for AudioConverterFillComplexBuffer callback details.
//===========================================================================================================================

static OSStatus
	_APSRTPJitterBufferDecodeInputCallback(
		AudioConverterRef					inAudioConverter,
		UInt32 *							ioNumberDataPackets,
		AudioBufferList *					ioData,
		AudioStreamPacketDescription **		outDataPacketDescription,
		void *								inCtx )
{
	OSStatus								err	= kNoErr;
	APSRTPJitterBufferDecodeContext *		ctx	= (APSRTPJitterBufferDecodeContext*) inCtx;

	(void) inAudioConverter;

	require_quiet( *ioNumberDataPackets > 0, bail );
	require_action_quiet( ctx->packet && ctx->packet->len > 0, bail, err = kUnderrunErr );

	ioData->mBuffers[ 0 ].mData = ctx->packet->pkt.rtp.payload;
	ioData->mBuffers[ 0 ].mDataByteSize = (UInt32) PAYLOAD_SIZE(ctx->packet);
	ioData->mBuffers[ 0 ].mNumberChannels = ctx->jitterBuffer->inputFormat.mChannelsPerFrame;

	if( outDataPacketDescription )
	{
		ctx->packetDesription.mStartOffset = 0;
		ctx->packetDesription.mVariableFramesInPacket = 0;
		ctx->packetDesription.mDataByteSize = (UInt32) PAYLOAD_SIZE(ctx->packet);
		*outDataPacketDescription = &ctx->packetDesription;
	}

	ctx->packet->len = 0;
	ctx->packetConsumed = true;

bail:
	*ioNumberDataPackets = ( err == kNoErr ) ? 1 : 0;

	return( err );
}

//===========================================================================================================================
//	_APSRTPJitterBufferNullDecodeInputCallback
//===========================================================================================================================

static OSStatus
	_APSRTPJitterBufferNullDecodeInputCallback(
		AudioConverterRef					inAudioConverter,
		UInt32 *							ioNumberDataPackets,
		AudioBufferList *					ioData,
		AudioStreamPacketDescription **		outDataPacketDescription,
		void *								inCtx )
{
	OSStatus								err	= kNoErr;
	APSRTPJitterBufferDecodeContext *		ctx	= (APSRTPJitterBufferDecodeContext*) inCtx;

	(void) inAudioConverter;

	require_quiet( *ioNumberDataPackets > 0, bail );
	require_action_quiet( ctx->packet == NULL, bail, err = kUnderrunErr );

	// Inject null data in place of missing packets

	static short missingData[2];

	ioData->mBuffers[ 0 ].mData = missingData;
	ioData->mBuffers[ 0 ].mDataByteSize = 0;
	ioData->mBuffers[ 0 ].mNumberChannels = ctx->jitterBuffer->inputFormat.mChannelsPerFrame;

	if( outDataPacketDescription )
	{
		ctx->packetDesription.mStartOffset = 0;
		ctx->packetDesription.mVariableFramesInPacket = 0;
		ctx->packetDesription.mDataByteSize = 0;
		*outDataPacketDescription = &ctx->packetDesription;
	}

	ctx->packetConsumed = true;

bail:
	*ioNumberDataPackets = ( err == kNoErr ) ? 1 : 0;

	return( err );
}

//===========================================================================================================================
//	_APSRTPJitterBufferCheckOverrun - Checks to see if the buffer overruns
//===========================================================================================================================

static void
	_APSRTPJitterBufferCheckOverrun(
		APSRTPJitterBufferRef		inBuffer,
		uint32_t					inSamplesToWrite )
{
	uint32_t ringBufferFreeSamples = BYTES_TO_SAMPLES( MirroredRingBufferGetBytesFree( &inBuffer->outputRingBuffer ) );

	if( inSamplesToWrite > ringBufferFreeSamples )
	{
		//Use local variables so that the async logs give correct values.
		uint32_t rPtr = inBuffer->outputBufferReadSampleTime;
		uint32_t wPtr = inBuffer->outputBufferWriteSampleTime;

		ap_ulog( kLogLevelError, "'%s' Overrun occurred\n", inBuffer->label );
		ap_ulog( kLogLevelTrace, "--- rPtr: %u, wPtr: %u, diff: %u, actual space: %u\n", rPtr, wPtr, (wPtr-rPtr), ringBufferFreeSamples );
		++inBuffer->stats.overrunCount;
	}
}

//===========================================================================================================================
//	APSRTPJitterBufferCheckUnderrun - Check if we are running dry and print a log, if so.
//===========================================================================================================================

static void
	APSRTPJitterBufferCheckUnderrun(
		APSRTPJitterBufferRef		inBuffer,
		uint32_t					inSampleTime,
		uint32_t					inSamplesRequested )
{
	Boolean		decodeStarted			= inBuffer->decodeStarted;
	uint32_t	readPtr					= inBuffer->outputBufferReadSampleTime;
	uint32_t	currentWriteSampleTime	= inBuffer->outputBufferWriteSampleTime;

	if( (inSampleTime + inSamplesRequested >= currentWriteSampleTime) &&
		decodeStarted )
	{
		if( !inBuffer->inUnderRun )
		{
			ap_ulog( kLogLevelNotice, "'%s' Jitter buffer underrun; reading %u samples at inSampleTime %u; ReadPtr %d; Write Ptr %u; Buffer size %d\n",
				inBuffer->label, inSamplesRequested, inSampleTime, readPtr, currentWriteSampleTime, currentWriteSampleTime - readPtr );
			inBuffer->inUnderRun = true;

			++inBuffer->stats.underrunCount;
		}
	}
	else if( inBuffer->inUnderRun )
	{
		ap_ulog( kLogLevelNotice, "'%s' Jitter buffer exit underrun; reading %u samples at time %u; Write Ptr %u, Buffer size %d\n",
			inBuffer->label, inSamplesRequested, inSampleTime, currentWriteSampleTime, currentWriteSampleTime - readPtr );
		inBuffer->inUnderRun = false;
	}
}

//===========================================================================================================================
//	_AdvanceReadPointersBySamples - Move all read pointers ahead
//===========================================================================================================================

static void _AdvanceReadPointersBySamples(
	APSRTPJitterBufferRef		inBuffer,
	uint32_t					inNumSamples )
{
	MirroredRingBufferReadAdvance( &inBuffer->outputRingBuffer, SAMPLES_TO_BYTES( inNumSamples ) );

	inBuffer->outputBufferReadSampleTime += inNumSamples;
}

//===========================================================================================================================
//	_AdvanceWritePointerSamples - Moves all write pointers forward
//===========================================================================================================================

static void _AdvanceWritePointerSamples( APSRTPJitterBufferRef inBuffer, uint32_t samplesToWrite )
{
	inBuffer->outputBufferWriteSampleTime += samplesToWrite;
	MirroredRingBufferWriteAdvance( &inBuffer->outputRingBuffer, SAMPLES_TO_BYTES(samplesToWrite) );
}

//===========================================================================================================================
//	_APSRTPJitterBufferDecodeOneUnit - Decodes and writes one packet, using the given callback
//===========================================================================================================================

static uint32_t _APSRTPJitterBufferDecodeOneUnit(
	APSRTPJitterBufferRef inBuffer,
	AudioConverterComplexInputDataProc	inDataCallBack,
	APSRTPSavedPacket *inPacket )
{
	OSStatus err  = kNoErr;
	uint32_t							samplesFilled		= 0;
	AudioBufferList						bufferList			= { 0, { { 0, 0, NULL } } };
	APSRTPJitterBufferDecodeContext		decodeContext		= { inBuffer, inPacket, { 0, 0, 0 }, false };


	// Initialize bufferList.

	bufferList.mBuffers[ 0 ].mData				= MirroredRingBufferGetWritePtr( &inBuffer->outputRingBuffer );
	bufferList.mBuffers[ 0 ].mDataByteSize		= SAMPLES_TO_BYTES( inBuffer->inputFormat.mFramesPerPacket );
	bufferList.mBuffers[ 0 ].mNumberChannels	= inBuffer->outputFormat.mChannelsPerFrame;
	bufferList.mNumberBuffers					= 1;

	// Initilize output counter. Also used to indicate output buffer size.

	samplesFilled = inBuffer->inputFormat.mFramesPerPacket;

	//Actual decode call

	err = AudioConverterFillComplexBuffer( inBuffer->decoder, inDataCallBack,
				&decodeContext, (UInt32*) &samplesFilled, &bufferList, NULL );

	if( err && err != kUnderrunErr)
	{
		// Error, fall back to memset.
		samplesFilled = inBuffer->inputFormat.mFramesPerPacket;
		memset( bufferList.mBuffers[ 0 ].mData, 0, SAMPLES_TO_BYTES( samplesFilled ) );

		check( !decodeContext.packetConsumed );
		ap_ulog( kLogLevelError,
			"'%s' Writing %u samples of silence (instead of concealed audio) due to decode error %#m\n",
			inBuffer->label, samplesFilled, err );
	}
	else
	{
		check( decodeContext.packetConsumed );
		if( decodeContext.packet )
		{
			check( decodeContext.packet->len == 0 );
		}

		if( err )
		{
			ap_ulog( kLogLevelError, "'%s' Error occurred during decode: %#m (%u samples produced)\n",
				inBuffer->label, err, samplesFilled );
		}

		ap_ulog( kLogLevelVerbose, "'%s' Writing %u samples of concealed audio\n", inBuffer->label, samplesFilled );
	}

	_AdvanceWritePointerSamples( inBuffer, samplesFilled );

	// Return samplesConsumed - this is from the input, not the output of the decode call above
	// (Accounts for any internal buffering)
	return inBuffer->inputFormat.mFramesPerPacket;

}


//===========================================================================================================================
//	_APSRTPJitterBufferWriteZeros - Writes given number of samples worth of zeros into the ring buffer.
//===========================================================================================================================

static void _APSRTPJitterBufferWriteZeros( APSRTPJitterBufferRef inBuffer, uint32_t inNumZeroSamples )
{
	if( inNumZeroSamples > 0 )
	{
		// Buffering calculations need to handled better; this ensures that there are no crashes.

		uint32_t bytesToFill = Min( SAMPLES_TO_BYTES( inNumZeroSamples ), inBuffer->outputRingBufferSizeBytes ) ;

		memset( MirroredRingBufferGetWritePtr( &inBuffer->outputRingBuffer ), 0, bytesToFill );
		ap_ulog( kLogLevelVerbose, "'%s' Writing %u samples of silence\n", inBuffer->label, inNumZeroSamples );

		_AdvanceWritePointerSamples( inBuffer, inNumZeroSamples );
	}
}

//===========================================================================================================================
//	_APSRTPJitterBufferDecodePacketSamples
//===========================================================================================================================

static uint32_t _APSRTPJitterBufferDecodePacketSamples( APSRTPJitterBufferRef inBuffer, uint32_t inMissingOutputSamplesCount, APSRTPSavedPacket *inPacket )
{
	uint32_t							missingInputPacketCount			= 0;
	uint32_t							extraMissingOutputSamplesCount	= 0;
	uint32_t							inputSamplesConsumed			= 0;
	uint32_t							packetSampleCount				= 0;

	require_quiet( inPacket, bail );

	packetSampleCount = inBuffer->inputFormat.mFramesPerPacket;

	_APSRTPJitterBufferCheckOverrun( inBuffer, inMissingOutputSamplesCount + packetSampleCount );

	// Handle missing input packets

	missingInputPacketCount = inMissingOutputSamplesCount / inBuffer->inputFormat.mFramesPerPacket;
	extraMissingOutputSamplesCount = inMissingOutputSamplesCount % inBuffer->inputFormat.mFramesPerPacket;

	while( missingInputPacketCount > 0 )
	{
		// Fill one packet worth of zeros using the decoder. This results in a smoother break in the audio.
		// Use the NULL decode callback for this.

		inputSamplesConsumed += _APSRTPJitterBufferDecodeOneUnit( inBuffer, _APSRTPJitterBufferNullDecodeInputCallback, NULL );

		--missingInputPacketCount;
	}

	// Handle any remaining missing output samples (i.e. if the gap was not a multiple of the input format's frames per packet)

	if( extraMissingOutputSamplesCount > 0 )
	{
		ap_ulog( kLogLevelError,
			"'%s' Writing %u samples of silence (instead of concealed audio) due to non-multiple of frames per packet\n",
			inBuffer->label, extraMissingOutputSamplesCount );

		_APSRTPJitterBufferWriteZeros( inBuffer, extraMissingOutputSamplesCount );

		_AdvanceWritePointerSamples( inBuffer, extraMissingOutputSamplesCount );

		inputSamplesConsumed += extraMissingOutputSamplesCount;
	}


	// Decode the packet

	inputSamplesConsumed += _APSRTPJitterBufferDecodeOneUnit( inBuffer, _APSRTPJitterBufferDecodeInputCallback, inPacket );

bail:
	return( inputSamplesConsumed );
}

//===========================================================================================================================
//	_APSRTPJitterBufferCopyPacketPCMSamples
//===========================================================================================================================

static uint32_t _APSRTPJitterBufferCopyPacketPCMSamples( APSRTPJitterBufferRef inBuffer, uint32_t inMissingOutputSamplesCount, APSRTPSavedPacket *inPacket )
{
	uint32_t inputSamplesConsumed	= 0;
	uint8_t* payLoad				= NULL;
	uint32_t payLoadSamples			= 0;
	Boolean	 dataAvailable			= true;
	uint32_t readSampleTime			= inBuffer->outputBufferReadSampleTime; //Record this once for local usage.

	require_quiet( inPacket, bail );

	payLoad			= inPacket->pkt.rtp.payload;
	payLoadSamples	= BYTES_TO_SAMPLES( PAYLOAD_SIZE( inPacket ) );

	//Check for overruns

	_APSRTPJitterBufferCheckOverrun( inBuffer, inMissingOutputSamplesCount + payLoadSamples );

	// Handle missing packets

	if( inMissingOutputSamplesCount > 0 )
	{
		ap_ulog( kLogLevelNotice,
			"'%s' Writing %u samples of silence due to gap\n", inBuffer->label, inMissingOutputSamplesCount );

		_APSRTPJitterBufferWriteZeros( inBuffer, inMissingOutputSamplesCount );

		inputSamplesConsumed += inMissingOutputSamplesCount;
	}


	//Handle late packets.

	if( inBuffer->outputBufferWriteSampleTime < readSampleTime )
	{
		//Very late packet - read time has gone beyond the end of the packet.
		if( inBuffer->outputBufferWriteSampleTime + payLoadSamples < readSampleTime)
		{
			dataAvailable = false;

			ap_ulog( kLogLevelTrace, "'%s' Dropping packet @ st %u; advancing write pointer to %u\n",
																inBuffer->label, inPacket->pkt.rtp.header.ts, inBuffer->outputBufferWriteSampleTime + payLoadSamples);
		}
		//Slightly late packet - Some part of the packet is late, but the rest is usable.
		else
		{
			uint32_t lateSamples = readSampleTime - inBuffer->outputBufferWriteSampleTime;

			//Advance ring buffer by the partial size.
			//There is no need to set this data to 0 or anything - the read pointer has already gone past this point, we are just catching up.
			_AdvanceWritePointerSamples( inBuffer, lateSamples );

			ap_ulog( kLogLevelNotice, "'%s' Dropping %d samples of packet @ st %u; advancing write pointer to %u\n",
																inBuffer->label, lateSamples, inPacket->pkt.rtp.header.ts, inBuffer->outputBufferWriteSampleTime);

			//Write the rest of the stuff into the RingBuffer.
			//Write the rest of the stuff into the RingBuffer.
			payLoad					+= SAMPLES_TO_BYTES( lateSamples );
			payLoadSamples			-= lateSamples;
			inputSamplesConsumed	+= lateSamples;
		}
	}

	//This is true only if there is valid data to be copied into the ring buffer.
	//In cases where we are dropping data, we will simply advance the write pointer without bothering to copy data in.
	if( dataAvailable )
	{
		ap_ulog( kLogLevelTrace, "'%s' Writing %u samples of pcm audio; wPtr %u, rPtr %u\n", inBuffer->label, payLoadSamples,
															inBuffer->outputBufferWriteSampleTime,  readSampleTime );

		memcpy( MirroredRingBufferGetWritePtr( &inBuffer->outputRingBuffer ), payLoad, SAMPLES_TO_BYTES( payLoadSamples ) );
	}

	//Advance the write pointer accordingly.
	_AdvanceWritePointerSamples( inBuffer, payLoadSamples );
	inputSamplesConsumed += payLoadSamples;

bail:
	return( inputSamplesConsumed );
}

//===========================================================================================================================
//	_APSRTPJitterBuferInitializeWritePointers
//	Initializes write-related pointers.
//===========================================================================================================================

static void _APSRTPJitterBufferInitializeWritePointers( APSRTPJitterBufferRef inBuffer, APSRTPSavedPacket *inPacket, uint32_t inReadSampleTime )
{
	uint32_t gapInBytes  = 0;

	// It is no longer safe to modify the read pointer, as the read pointer gets updated by the read thread always.
	// To ensure that the ring buffer starts off with reasonable buffering, adjust the write pointer according to the
	// current position of the read pointer. Calculate an offset that will be applied to future timestamps so that
	// the incoming timestamps look continuous.

	if( inBuffer->legacyBufferingMode )
	{
		inBuffer->outputBufferWriteSampleTime 		= inReadSampleTime;
		inBuffer->outputBufferWriteSampleTimeOffset = inBuffer->outputBufferWriteSampleTime - inPacket->pkt.rtp.header.ts;
		inBuffer->busyQueueNextExpectedSampleTime	= inBuffer->outputBufferWriteSampleTime;
	}
	else
	{
		inBuffer->outputBufferWriteSampleTime = inPacket->pkt.rtp.header.ts;

		if( (int32_t)(inPacket->pkt.rtp.header.ts - inReadSampleTime) < (int32_t)inBuffer->targetSizeSamples )
		{
			inBuffer->outputBufferWriteSampleTimeOffset = inReadSampleTime + inBuffer->targetSizeSamples - inPacket->pkt.rtp.header.ts;
		}
		else
		{
			inBuffer->outputBufferWriteSampleTimeOffset = 0;
		}

		inBuffer->outputBufferWriteSampleTime		= inPacket->pkt.rtp.header.ts + inBuffer->outputBufferWriteSampleTimeOffset;
		inBuffer->busyQueueNextExpectedSampleTime	= inBuffer->outputBufferWriteSampleTime;

		//Update ring buffer to be filled with zeros for this gap.
		gapInBytes = SAMPLES_TO_BYTES( inBuffer->outputBufferWriteSampleTime - inReadSampleTime );

		if( gapInBytes > 0)
		{
			memset( MirroredRingBufferGetWritePtr(&inBuffer->outputRingBuffer), 0,  MIN( gapInBytes, inBuffer->outputRingBufferSizeBytes ) );
			MirroredRingBufferWriteAdvance( &inBuffer->outputRingBuffer, gapInBytes);
		}
	}

	ap_ulog( kLogLevelTrace, "'%@' Decode started with packet at timestamp: %u\n",
		inBuffer->label, inPacket->pkt.rtp.header.ts );
}

//===========================================================================================================================
//	_APSRTPJitterBufferWritePacket
//===========================================================================================================================

static void _APSRTPJitterBufferWritePacket(void* inCtx, APSRTPSavedPacket *packet)
{
	APSRTPJitterBufferRef		inBuffer	= (APSRTPJitterBufferRef) inCtx;

	if( !inBuffer->decodeDone )
	{
		uint32_t	missingOutputSamplesCount	= 0;
		uint32_t	inputSamplesConsumed		= 0;
		uint32_t	pcmFrameCount				= 0;
		uint32_t	readSampleTime				= inBuffer->outputBufferReadSampleTime;

		// Zero length packets should be skipped as invalid (e.g. they came from APSRTPJitterBufferDiscardPacket)
		require( packet, skipPacket );
		require_quiet( packet->len > 0, skipPacket );

		// Set initial sample times
		if( !inBuffer->decodeStarted )
		{
			_APSRTPJitterBufferInitializeWritePointers( inBuffer, packet, readSampleTime );
		}

		// Add the offset to the incoming timestamp

		packet->pkt.rtp.header.ts += inBuffer->outputBufferWriteSampleTimeOffset;

		// Sanity check the incoming packet timestamp

		require_action_quiet( packet->pkt.rtp.header.ts >= inBuffer->busyQueueNextExpectedSampleTime, skipPacket,
			ap_ulog( kLogLevelNotice, "Skipping out of order packet\n" ) );

		// Check for missing packets

		if( packet->pkt.rtp.header.ts != inBuffer->busyQueueNextExpectedSampleTime )
		{
			uint32_t expected = inBuffer->busyQueueNextExpectedSampleTime;
			uint32_t received = packet->pkt.rtp.header.ts;

			missingOutputSamplesCount = packet->pkt.rtp.header.ts - inBuffer->busyQueueNextExpectedSampleTime;

			ap_ulog( kLogLevelNotice, "'%s' Gap detected: %u samples (%u ms); expected %u, received %u\n",
				inBuffer->label, missingOutputSamplesCount, _APSRTPJitterBufferSamplesToMS( missingOutputSamplesCount, &inBuffer->outputFormat ),
				expected, received );

			++inBuffer->stats.lostPacketCount;
		}

		// Decode / Copy the packet's sample data, pre-filling missing output packets if necessary

		if( inBuffer->decoder )
		{
			inputSamplesConsumed = _APSRTPJitterBufferDecodePacketSamples( inBuffer, missingOutputSamplesCount, packet );
		}
		else
		{
			inputSamplesConsumed = _APSRTPJitterBufferCopyPacketPCMSamples( inBuffer, missingOutputSamplesCount, packet );
		}

		// Advance "busyQueueNextExpectedSampleTime" based on how much input we consumed
		inBuffer->busyQueueNextExpectedSampleTime += inputSamplesConsumed;

		// In legacy buffering mode, check that we aren't getting close to overrunning our output buffer

		if( inBuffer->legacyBufferingMode && !inBuffer->legacyBufferingDiscardExcess )
		{
			int32_t	enqueuedSampleCount	= (int32_t)(inBuffer->busyQueueLastSampleTime - inBuffer->outputBufferReadSampleTime);


			// If we've exceeded out buffer threshold, trigger a discard of excess samples

			if( enqueuedSampleCount >= (int32_t)inBuffer->legacyBufferingDiscardThresholdSamples )
			{
			    ap_ulog( kLogLevelNotice,
			 		"'%s' Discard ????? BusyQueueLastSampleTime: %u - OutputBufferReadSampleTime: %u = enqueuedSampleCount:%u , legacyBufferingDiscardThresholdSamples = %u \n",
					inBuffer->label, inBuffer->busyQueueLastSampleTime,inBuffer->outputBufferReadSampleTime, enqueuedSampleCount, inBuffer->legacyBufferingDiscardThresholdSamples );

				++inBuffer->stats.overrunCount;
				inBuffer->legacyBufferingDiscardExcess = true;
			}
		}

		// Indicate that decoding has started after we've written a packet

		if( !inBuffer->decodeStarted )
		{
			if( inBuffer->legacyActivelyBuffering )
			{
				// If legacy buffering is active, indicate the start time as "now" minus the duration of the packet we just decoded
				inBuffer->legacyBufferingStartTicks =
					UpTicks() - MillisecondsToUpTicks( _APSRTPJitterBufferSamplesToMS( pcmFrameCount, &inBuffer->outputFormat ) );
			}

			inBuffer->decodeStarted = true;
		}

	skipPacket:

		// Return the packet to the free queue
		if( packet ) SimpleQueueEnqueue( inBuffer->freeQueue, packet );
	}
}



//===========================================================================================================================
//	_APSRTPJitterBufferDecodeThreadEntry
//===========================================================================================================================

static void* _APSRTPJitterBufferDecodeThreadEntry( void *inCtx )
{
	APSRTPJitterBufferRef		inBuffer	= (APSRTPJitterBufferRef) inCtx;
	APSRTPSavedPacket *			packet		= NULL;


	SetCurrentThreadPriority( kAirPlayThreadPriority_AudioDecoder );

	ap_ulog( kLogLevelTrace, "'%s' Decode thread starting\n", inBuffer->label );

	while( !inBuffer->decodeDone )
	{
		// Wait for a packet to become available or termination to be signalled

		dispatch_semaphore_wait( inBuffer->packetsAvailableSemaphore, DISPATCH_TIME_FOREVER );
		if( inBuffer->decodeDone )
		{
			break;
		}

		packet = (APSRTPSavedPacket*) SimpleQueueDequeue( inBuffer->busyQueue );

		_APSRTPJitterBufferWritePacket(inBuffer, packet);
	}

	ap_ulog( kLogLevelTrace, "'%s' Decode thread ending\n", inBuffer->label );

	return( NULL );
}

//===========================================================================================================================
//	APSRTPJitterBufferGetEmptyPacket
//===========================================================================================================================

APSRTPSavedPacket* APSRTPJitterBufferGetEmptyPacket( APSRTPJitterBufferRef inBuffer, OSStatus *outErr )
{
	OSStatus				err		= kNoErr;
	APSRTPSavedPacket *		packet	= NULL;


	packet = (APSRTPSavedPacket*) SimpleQueueDequeue( inBuffer->freeQueue );

	//An empty queue is a fairly common phenomenon, so get out quietly. Asserts take time, and build up problems.
	require_action_quiet( packet, bail, err = kNoMemoryErr;);

bail:
	if( outErr )
	{
		*outErr = err;
	}

	return( packet );
}

//===========================================================================================================================
//	APSRTPJitterBufferDiscardPacket
//===========================================================================================================================

void APSRTPJitterBufferDiscardPacket( APSRTPJitterBufferRef inBuffer, APSRTPSavedPacket* inPacket )
{
	if( inPacket )
	{
		// In order to maintain consistency, only the decode thread can write to the free queue, so mark the packet
		//  invalid by giving it a zero length and put it on the busy queue to be recycled by the decode thread

		inPacket->len = 0;
		SimpleQueueEnqueue( inBuffer->busyQueue, inPacket );
		dispatch_semaphore_signal( inBuffer->packetsAvailableSemaphore );
	}
}

//===========================================================================================================================
//	APSRTPJitterBufferEnqueuePacket
//===========================================================================================================================

void APSRTPJitterBufferEnqueuePacket( APSRTPJitterBufferRef inBuffer, APSRTPSavedPacket* inPacket )
{
	if( inPacket )
	{
		if( inBuffer->inputFormat.mFormatID == kAudioFormatLinearPCM )
		{
			inBuffer->busyQueueLastSampleTime =
				inPacket->pkt.rtp.header.ts + PAYLOAD_SIZE(inPacket) / inBuffer->inputFormat.mBytesPerFrame;
		}
		else
		{
			inBuffer->busyQueueLastSampleTime =
				inPacket->pkt.rtp.header.ts + inBuffer->inputFormat.mFramesPerPacket;
		}

		//If there's a thread waiting for processing packets, send it to that thread. Else write the packet in the rbuf directly.
		if( inBuffer->decodeThreadPtr )
		{
			SimpleQueueEnqueue( inBuffer->busyQueue, inPacket );
			dispatch_semaphore_signal( inBuffer->packetsAvailableSemaphore );
		}
		else
		{
			_APSRTPJitterBufferWritePacket(inBuffer, inPacket);
		}
	}
}

//===========================================================================================================================
//	APSRTPJitterBufferLegacyEvaluateBufferedSize - Legacy mode: Evaluate if enough data has been buffered, and take action.
//===========================================================================================================================

static void
	APSRTPJitterBufferLegacyEvaluateBufferedSize( APSRTPJitterBufferRef		inBuffer )
{
	uint64_t		nowTicks				= UpTicks();
	uint64_t		elapsedBufferingTimeMS	= 0;
	int32_t			estimatedBufferedTimeMS	= 0;

	if( inBuffer->legacyActivelyBuffering )
	{
		elapsedBufferingTimeMS = UpTicksToMilliseconds( nowTicks - inBuffer->legacyBufferingStartTicks );
		estimatedBufferedTimeMS = _APSRTPJitterBufferSamplesToMS(  ( inBuffer->busyQueueLastSampleTime + inBuffer->outputBufferWriteSampleTimeOffset ) - inBuffer->outputBufferReadSampleTime,
										&inBuffer->outputFormat );

		// If we've caught up due to a burst of late packets or we've been buffering for a while, exit buffering mode

		if( estimatedBufferedTimeMS >= (int32_t)( elapsedBufferingTimeMS + inBuffer->targetSizeMS ) || elapsedBufferingTimeMS >= inBuffer->targetSizeMS )
		{
			// We may have received a spike of traffic after having entered buffering mode, so trim

			inBuffer->legacyBufferingDiscardExcess = true;

			// Exit buffering mode

			inBuffer->legacyActivelyBuffering = false;
			inBuffer->legacyBufferingStartTicks = 0;

			ap_ulog( kLogLevelNotice,
				"'%s' Exiting legacy buffering mode (%u ms buffered)\n", inBuffer->label, estimatedBufferedTimeMS );
		}
	}
}

//===========================================================================================================================
//	_APSRTPJitterBufferLegacyDiscardExcess - Legacy mode: If too much data has been buffered, discard the excess.
//===========================================================================================================================

static void
	_APSRTPJitterBufferLegacyDiscardExcess(
		APSRTPJitterBufferRef		inBuffer,
		uint32_t					inCurrentWriteSampleTime )
{
	int32_t	totalSampleCount		= (int32_t)( inBuffer->busyQueueLastSampleTime + inBuffer->outputBufferWriteSampleTimeOffset - inBuffer->outputBufferReadSampleTime );
	int32_t	availableSampleCount	= (int32_t)(inCurrentWriteSampleTime - inBuffer->outputBufferReadSampleTime);
	int32_t	excessSampleCount		= 0;

	// We may not actually be over target

	if( totalSampleCount > inBuffer->legacyBufferingDiscardTargetSamples )
	{
		ap_ulog( kLogLevelNotice, "'%s' Discard excess requested with totalSampleCount: %u, target: %u\n",
			inBuffer->label, totalSampleCount, inBuffer->legacyBufferingDiscardTargetSamples );

		// Discard from the ring buffer only (since only the decode thread can write to the busy queue

		excessSampleCount = Min(totalSampleCount -  inBuffer->legacyBufferingDiscardTargetSamples, availableSampleCount );

		_AdvanceReadPointersBySamples( inBuffer, excessSampleCount );

		ap_ulog( kLogLevelNotice, "'%s' Discarding %u samples of excess pcm audio\n",
				inBuffer->label, excessSampleCount );
	}

	// We may not have discarded enough depending on what was in the ring buffer vs. the busy queue

	if( ( totalSampleCount - excessSampleCount ) <= inBuffer->legacyBufferingDiscardTargetSamples )
	{
		inBuffer->legacyBufferingDiscardExcess = false;
	}
}

//===========================================================================================================================
//	APSRTPJitterBufferRead
//===========================================================================================================================

OSStatus
	APSRTPJitterBufferRead(
		APSRTPJitterBufferRef		inBuffer,
		uint32_t					inSampleTime,
		void *						inReadBuffer,
		size_t						inReadBufferLen )
{
	OSStatus		err							= kNoErr;
	uint32_t		sampleTime					= inSampleTime;
	uint32_t		samplesRequested			= 0;
	uint32_t		samplesOfSilenceToPrepend	= 0;
	uint32_t		samplesToRead				= 0;
	uint32_t		samplesOfSilenceToAppend	= 0;
	uint8_t *		sampleBuffer				= inReadBuffer;

	// Read these value once and store it locally so that it doesn't change for the duration of this call
	uint32_t		currentWriteSampleTime		= inBuffer->outputBufferWriteSampleTime;
	Boolean			decodeStarted				= inBuffer->decodeStarted;

	samplesRequested = ( uint32_t ) BYTES_TO_SAMPLES ( inReadBufferLen  );

	ap_ulog( kLogLevelChatty, "'%s' Read request for %u bytes (%u samples) of pcm audio\n",
		inBuffer->label, inReadBufferLen, samplesRequested );

	if( !inBuffer->legacyBufferingMode )
	{
		APSRTPJitterBufferCheckUnderrun( inBuffer, inSampleTime, samplesRequested );
	}


	// Figure out what we have available to read

	// If no packets have arrived yet, just send silence

	if( !decodeStarted )
	{
		samplesOfSilenceToPrepend = 0;
		samplesToRead = 0;
		inBuffer->outputBufferReadSampleTime = inSampleTime;
	}
	// Packets have arrived.
	else
	{
		if( inBuffer->legacyBufferingMode )
		{
			APSRTPJitterBufferLegacyEvaluateBufferedSize( inBuffer );

			// Legacy buffering mode is active. So, send zeros for now.
			if( inBuffer->legacyActivelyBuffering )
			{
				samplesOfSilenceToPrepend = 0;
				samplesToRead = 0;
			}
			// Legacy buffering is not active.
			else if( inBuffer->legacyBufferingDiscardExcess )
			{
				// If discard excess has been set, bring our output buffer down to our target

				_APSRTPJitterBufferLegacyDiscardExcess( inBuffer, currentWriteSampleTime );
			}

			// In legacy buffering mode, we always read from the head of what we have buffered
			sampleTime = inBuffer->outputBufferReadSampleTime;
		}


		// See where the requested time lands in relation to what's in the ring buffer

		if( sampleTime < inBuffer->outputBufferReadSampleTime )
		{
			// If the request preceeds what we have data for, prepend silence

			samplesOfSilenceToPrepend = Min( (uint32_t) ( inBuffer->outputBufferReadSampleTime - sampleTime ), samplesRequested );
		}
		else if( sampleTime > inBuffer->outputBufferReadSampleTime )
		{
			uint32_t samplesToSkip = sampleTime - inBuffer->outputBufferReadSampleTime;

			// If the request follows the start of what we have data for, skip ahead
			ap_ulog( kLogLevelTrace, "'%s' Skipping %u bytes (%u samples) of decoded audio\n",
				inBuffer->label, SAMPLES_TO_BYTES( samplesToSkip ), samplesToSkip );

			//Note: Earlier code here limited the sampleTime to never go beyond the write pointer.
			//However, with the current implementation, the write pointer will catch up by dropping
			//late packets. This means that the sampleTime is free to exceed the write pointer.
			//Note that we will never enter this if statement in the legacy mode - sampleTime is always forced to readSampleTime.

			_AdvanceReadPointersBySamples( inBuffer, samplesToSkip );
		}


		// If the request overlaps the data we have available, figure out how much we can fulfill
		// In legacy buffering mode, if we are actively buffering, we have already set samplesToRead to be 0 above. So no need to determine it again here.

		if( !inBuffer->legacyActivelyBuffering && ( samplesOfSilenceToPrepend < samplesRequested ) )
		{
			// Is data available in the ring buffer ?
			if( currentWriteSampleTime >= inBuffer->outputBufferReadSampleTime)
			{
				samplesToRead = Min( (uint32_t) ( currentWriteSampleTime - inBuffer->outputBufferReadSampleTime ),
									samplesRequested - samplesOfSilenceToPrepend ) ;

				// Don't read more data than is available
				samplesToRead = Min( samplesToRead, BYTES_TO_SAMPLES( MirroredRingBufferGetBytesUsed( &inBuffer->outputRingBuffer) ) );
			}
			//Read pointer has gone past the write pointer. There are no samples available to be read.
			else
			{
				samplesToRead = 0;
			}
		}
	} //Decode started case


	// If the request exceeds what we have data for, append silence and enter buffering state if legacy buffering mode is enabled

	samplesOfSilenceToAppend = samplesRequested - ( samplesOfSilenceToPrepend + samplesToRead );

	///---------------------

	// Fill the buffer

	if( samplesOfSilenceToPrepend > 0 )
	{
		memset( sampleBuffer, 0, SAMPLES_TO_BYTES( samplesOfSilenceToPrepend ) );
		sampleBuffer += SAMPLES_TO_BYTES( samplesOfSilenceToPrepend );
	}

	if( samplesToRead > 0 )
	{
		uint32_t bytesToRead	= SAMPLES_TO_BYTES( samplesToRead );

		ap_ulog( kLogLevelTrace, "'%s' Reading %u bytes (%u samples) of decoded audio; wPtr %lx, rPtr %lx\n",
			inBuffer->label, bytesToRead, samplesToRead,
			currentWriteSampleTime, inBuffer->outputBufferReadSampleTime );

		memcpy( sampleBuffer, MirroredRingBufferGetReadPtr( &inBuffer->outputRingBuffer ), bytesToRead );
		sampleBuffer += bytesToRead;

		_AdvanceReadPointersBySamples( inBuffer, samplesToRead );
	}

	if( samplesOfSilenceToAppend )
	{
		if( inBuffer->legacyBufferingMode )
		{
			if( !inBuffer->legacyActivelyBuffering )
			{
				ap_ulog( kLogLevelNotice,
					"'%s' Entering legacy buffering mode (%u ms underrun) - %u samples enqueued, %u packets enqueued\n",
					inBuffer->label, _APSRTPJitterBufferSamplesToMS( samplesOfSilenceToAppend, &inBuffer->outputFormat ),
					BYTES_TO_SAMPLES( MirroredRingBufferGetBytesUsed( &inBuffer->outputRingBuffer ) ),
					SimpleQueueGetCount( inBuffer->busyQueue ) );

				inBuffer->legacyActivelyBuffering = true;
				inBuffer->legacyBufferingStartTicks = UpTicks();

				ap_ulog( kLogLevelInfo, "'%s' Underrun occurred\n", inBuffer->label );
				++inBuffer->stats.underrunCount;
			}
		}
		else
		{
			if( decodeStarted )
			{
				//We need to advance the read head. This might move the read head ahead of the write head!
				//There's another thread moving the write head. We will let that thread catch up when necessary.

				MirroredRingBufferReadAdvance(&inBuffer->outputRingBuffer, SAMPLES_TO_BYTES( samplesOfSilenceToAppend ) );
			}

			inBuffer->outputBufferReadSampleTime += samplesOfSilenceToAppend;
		}

 		memset( sampleBuffer, 0, SAMPLES_TO_BYTES( samplesOfSilenceToAppend ) );
	}

	return( err );
}


/*!	@function	APSRTPJitterBufferGetStats
	@abstract	Extracts statistics - number of late samples, number of gaps and number of skips.
 */

OSStatus APSRTPJitterBufferGetStats(
	APSRTPJitterBufferRef		inJitterBuffer,
	APSRTPJitterBufferStatistics * outStats )
{
	OSStatus		err	= kNoErr;

	require_action(inJitterBuffer, bail, err = kBadReferenceErr );
	require_action(outStats, bail, err = kBadReferenceErr );

	*outStats = inJitterBuffer->stats;

bail:
	return err;
}




