/*
	File:    	AudioConverter_FDKAAC_OPUS.c
	Package: 	Apple CarPlay Communication Plug-in.
	Abstract: 	n/a 
	Version: 	n/a
	
	Disclaimer: IMPORTANT: This Apple software is supplied to you, by Apple Inc. ("Apple"), in your
	capacity as a current, and in good standing, Licensee in the MFi Licensing Program. Use of this
	Apple software is governed by and subject to the terms and conditions of your MFi License,
	including, but not limited to, the restrictions specified in the provision entitled ”Public 
	Software”, and is further subject to your agreement to the following additional terms, and your 
	agreement that the use, installation, modification or redistribution of this Apple software
	constitutes acceptance of these additional terms. If you do not agree with these additional terms,
	please do not use, install, modify or redistribute this Apple software.
	
	Subject to all of these terms and in consideration of your agreement to abide by them, Apple grants
	you, for as long as you are a current and in good-standing MFi Licensee, a personal, non-exclusive 
	license, under Apple's copyrights in this original Apple software (the "Apple Software"), to use, 
	reproduce, and modify the Apple Software in source form, and to use, reproduce, modify, and 
	redistribute the Apple Software, with or without modifications, in binary form. While you may not 
	redistribute the Apple Software in source form, should you redistribute the Apple Software in binary
	form, you must retain this notice and the following text and disclaimers in all such redistributions
	of the Apple Software. Neither the name, trademarks, service marks, or logos of Apple Inc. may be
	used to endorse or promote products derived from the Apple Software without specific prior written
	permission from Apple. Except as expressly stated in this notice, no other rights or licenses, 
	express or implied, are granted by Apple herein, including but not limited to any patent rights that
	may be infringed by your derivative works or by other works in which the Apple Software may be 
	incorporated.  
	
	Unless you explicitly state otherwise, if you provide any ideas, suggestions, recommendations, bug 
	fixes or enhancements to Apple in connection with this software (“Feedback”), you hereby grant to
	Apple a non-exclusive, fully paid-up, perpetual, irrevocable, worldwide license to make, use, 
	reproduce, incorporate, modify, display, perform, sell, make or have made derivative works of,
	distribute (directly or indirectly) and sublicense, such Feedback in connection with Apple products 
	and services. Providing this Feedback is voluntary, but if you do provide Feedback to Apple, you 
	acknowledge and agree that Apple may exercise the license granted above without the payment of 
	royalties or further consideration to Participant.
	
	The Apple Software is provided by Apple on an "AS IS" basis. APPLE MAKES NO WARRANTIES, EXPRESS OR 
	IMPLIED, INCLUDING WITHOUT LIMITATION THE IMPLIED WARRANTIES OF NON-INFRINGEMENT, MERCHANTABILITY 
	AND FITNESS FOR A PARTICULAR PURPOSE, REGARDING THE APPLE SOFTWARE OR ITS USE AND OPERATION ALONE OR
	IN COMBINATION WITH YOUR PRODUCTS.
	
	IN NO EVENT SHALL APPLE BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL OR CONSEQUENTIAL DAMAGES 
	(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
	PROFITS; OR BUSINESS INTERRUPTION) ARISING IN ANY WAY OUT OF THE USE, REPRODUCTION, MODIFICATION 
	AND/OR DISTRIBUTION OF THE APPLE SOFTWARE, HOWEVER CAUSED AND WHETHER UNDER THEORY OF CONTRACT, TORT
	(INCLUDING NEGLIGENCE), STRICT LIABILITY OR OTHERWISE, EVEN IF APPLE HAS BEEN ADVISED OF THE 
	POSSIBILITY OF SUCH DAMAGE.
	
	Copyright (C) 2014-2021 Apple Inc. All Rights Reserved. Not to be used or disclosed without permission from Apple.

	This sample audio converter utilizes Fraunhofer FDK AAC Codec Library and the Opus Interactive Audio Codec.
*/

#include "AudioConverter.h"

#include <DebugServices.h>
#include <AirPlayCommon.h>
#include <genericStds.h>

#if( !defined( AUDIOCONVERTER_ENABLE_AACELD ) )
	#define AUDIOCONVERTER_ENABLE_AACELD 1
#endif

#if( !defined( AUDIOCONVERTER_ENABLE_AAC_LC ) )
	#define AUDIOCONVERTER_ENABLE_AAC_LC 1
#endif

#if( !defined( AUDIOCONVERTER_ENABLE_OPUS ) )
	#define AUDIOCONVERTER_ENABLE_OPUS 1
#endif

#include "aacdecoder_lib.h"

#if( AUDIOCONVERTER_ENABLE_AACELD )
#include "aacenc_lib.h"
#endif

#if( AUDIOCONVERTER_ENABLE_OPUS )
//#include <opus/opus.h>
#include "opus.h"
#endif

ulog_define( AudioConverter, kLogLevelNotice, kLogFlags_Default, "AudioConverter", NULL );
#define ac_dlog( LEVEL, ... ) dlogc( &log_category_from_name( AudioConverter ), (LEVEL), __VA_ARGS__ )
#define ac_ulog( LEVEL, ... ) ulog( &log_category_from_name( AudioConverter ), (LEVEL), __VA_ARGS__)

//===========================================================================================================================
//	Internals
//===========================================================================================================================

typedef struct AudioConverterPrivate * AudioConverterPrivateRef;
struct AudioConverterPrivate
{
	uint32_t sourceFormatID;
	uint32_t destFormatID;
	uint32_t sampleRate;
	uint32_t channels;
	void * nativeCodecRef;
	uint32_t savedBufferSizeInSamples;
	uint32_t savedSampleCount;
	void * savedInputBuffer;
};

#define kAudioConverterEncodeBitRate 0x62726174 //'brat'

#if( AUDIOCONVERTER_ENABLE_OPUS )
#define kAudioMillisecondsPerPerPacket_Opus     20
#endif

static int GetSamplingFrequencyIndex( const uint32_t inFrequency ) {
	static unsigned const samplingFrequencyTable[] = {
		96000, 88200, 64000, 48000,
		44100, 32000, 24000, 22050,
		16000, 12000, 11025, 8000,
		7350
	};

	int tableIndex = -1;
	unsigned int i;

	for( i = 0; i < sizeof( samplingFrequencyTable ) / sizeof( samplingFrequencyTable[0] ); i++) {
		if( samplingFrequencyTable[i] == inFrequency ) {
			tableIndex = i;
			break;
		}
	}
	return tableIndex;
}

//===========================================================================================================================
//	AAC-LC
//===========================================================================================================================

static OSStatus _AudioConverterSetPropertyAACDecode( AudioConverterPrivateRef const me, AudioConverterPropertyID inPropertyID, uint32_t inSize, const void * inData )
{
	(void)inSize;
	(void)inData;
	(void)me;
	switch ( inPropertyID ) {
		default:
			return kUnsupportedErr;
	}
	return kNoErr;
}

static OSStatus _AudioConverterFillComplexBufferAACDecode(AudioConverterRef inConverter, AudioConverterComplexInputDataProc inInputDataProc, void * inInputDataProcUserData, uint32_t * ioOutputDataPacketSize, AudioBufferList * outOutputData, AudioStreamPacketDescription * outPacketDescription )
{
	AudioConverterPrivateRef const me = (AudioConverterPrivateRef) inConverter;
	OSStatus							err;
	
	(void)outPacketDescription;
	
	AudioBufferList						bufferList;
	uint32_t							packetCount;
	AudioStreamPacketDescription *		packetDesc;
	
	if( *ioOutputDataPacketSize < kAudioSamplesPerPacket_AAC_LC ) {
		return kSizeErr;
	}

	packetCount = 1;
	packetDesc  = NULL;
	err = inInputDataProc( inConverter, &packetCount, &bufferList, &packetDesc, inInputDataProcUserData );
	require_noerr_quiet( err, exit );

	UCHAR *aacInputBuffers[2];
	UINT aacInputBufferSizes[2] = { 0 };
	UINT numValidBytes[2] = { 0 };

	aacInputBuffers[ 0 ] = (UCHAR *) bufferList.mBuffers[0].mData;
	aacInputBufferSizes[ 0 ] = bufferList.mBuffers[ 0 ].mDataByteSize;
	numValidBytes[ 0 ] = bufferList.mBuffers[ 0 ].mDataByteSize;

	err = aacDecoder_Fill( me->nativeCodecRef, aacInputBuffers, aacInputBufferSizes, numValidBytes );
	if( err != AAC_DEC_OK ) {
		err = kUnknownErr;
	}

	err = aacDecoder_DecodeFrame( me->nativeCodecRef, outOutputData->mBuffers[ 0 ].mData, outOutputData->mBuffers[ 0 ].mDataByteSize, 0 );
	if( IS_DECODE_ERROR( err ) ) {
		if( err == AAC_DEC_NOT_ENOUGH_BITS ) {
			err = kSizeErr;
		} else {
			err = kUnknownErr;
		}
	}
	if( err == AAC_DEC_OK && outPacketDescription ) {
		outPacketDescription->mStartOffset = 0;
		outPacketDescription->mVariableFramesInPacket = 0; // Constant at kAudioSamplesPerPacket_AAC_LC;
		outPacketDescription->mDataByteSize = kAudioSamplesPerPacket_AAC_LC * sizeof(uint16_t) * me->channels;
		err = kNoErr;
	}
	*ioOutputDataPacketSize = kAudioSamplesPerPacket_AAC_LC;
exit:
	return err;
}

//===========================================================================================================================
//	AAC-LC
//===========================================================================================================================
#if( AUDIOCONVERTER_ENABLE_AAC_LC )



static OSStatus _AudioConverterSetPropertyAACEncode( AudioConverterPrivateRef const me, AudioConverterPropertyID	inPropertyID, uint32_t inSize, const void * inData )
{
	(void)me;
	(void)inSize;
	(void)inData;
	switch ( inPropertyID ) {
		case kAudioCodecPropertyPacketSizeLimitForVBR:
		{
			if( inSize != sizeof( uint32_t ) )
				return kSizeErr;

			uint32_t bitrate;

			if( me->sampleRate <= 24000 ) {
				bitrate = kAirPlayAudioBitrateLowLatencyUpTo24KHz;
			} else if( me->sampleRate <= 32000 ) {
				bitrate = kAirPlayAudioBitrateLowLatencyUpTo32KHz;
			} else if( me->sampleRate <= 48000 ) {
				bitrate = kAirPlayAudioBitrateLowLatencyUpTo48KHz;
			} else {
				return kUnknownErr;
			}
			if( me->sampleRate <= 44100 )
			{
				bitrate = (kAirPlayAudioBitrateHighLatency / 2);
			}else{
				bitrate = kAirPlayAudioBitrateHighLatency;
			}
			if( aacEncoder_SetParam( me->nativeCodecRef, AACENC_BITRATE, bitrate) != AACENC_OK ) {
					return kUnknownErr;
			}
			return kNoErr;
		}
		default:
			return kUnsupportedErr;
	}
}


static OSStatus _AudioConverterFillComplexBufferAACEncode( AudioConverterRef inConverter, AudioConverterComplexInputDataProc inInputDataProc, void * inInputDataProcUserData, uint32_t * ioOutputDataPacketSize, AudioBufferList * outOutputData, AudioStreamPacketDescription * outPacketDescription )
{
	AudioConverterPrivateRef const me = (AudioConverterPrivateRef) inConverter;
	OSStatus							err;

	AudioBufferList						bufferList;
	uint32_t							inputSamplesAvailable;
	AudioStreamPacketDescription *		packetDesc;

	uint32_t outputPacketSizeMax = *ioOutputDataPacketSize;

	int in_identifier = IN_AUDIO_DATA;
	int in_size, in_elem_size;
	int out_identifier = OUT_BITSTREAM_DATA;
	int out_size, out_elem_size;
	void *in_ptr, *out_ptr;

	AACENC_BufDesc in_buf, out_buf;
	AACENC_InArgs in_args;
	AACENC_OutArgs out_args;

	memset(&in_buf, 0, sizeof(in_buf));
	memset(&in_args, 0, sizeof(in_args));
	memset(&out_args, 0, sizeof(out_args));

	// Require at least 1 output packet
	if( outputPacketSizeMax < 1 ) {
		return kSizeErr;
	}
	bufferList.mNumberBuffers = 1;

	// just use a large number since the callback will cap it
	inputSamplesAvailable = kAudioSamplesPerPacket_AAC_LC;
	packetDesc  = NULL;
	err = inInputDataProc( inConverter, &inputSamplesAvailable, &bufferList, &packetDesc, inInputDataProcUserData );
	require_noerr_quiet( err, exit );

	size_t bytesPerFrame = me->channels * sizeof( uint16_t );
	uint32_t outputPacketNum = 0;
	uint8_t * outputBuffer = outOutputData->mBuffers[ outputPacketNum ].mData;
	size_t outputOffset = 0;
	size_t outputBufferByteSize = outOutputData->mBuffers[ outputPacketNum ].mDataByteSize;
	void * inputSamples = bufferList.mBuffers[ 0 ].mData;
//	ac_ulog( kLogLevelError, "aacEncoder me->savedSampleCount = %d  ",me->savedSampleCount);
//	ac_ulog( kLogLevelError, "aacEncoder me->inputSamplesAvailable = %d  ",inputSamplesAvailable);
//	ac_ulog( kLogLevelError, "aacEncoder me->totalSampleCount = %d  ",( me->savedSampleCount + inputSamplesAvailable ));

	// Buffer samples if there is not enough for encode.  This encoder has an internal buffer
	// as well if we don't want to buffer ourselves.
	if( me->savedSampleCount + inputSamplesAvailable < kAudioSamplesPerPacket_AAC_LC ) {
		goto copyRemainingSamples;
	}




	uint32_t totalSampleCount = ( me->savedSampleCount + inputSamplesAvailable );
	uint8_t pcmBuffer [ 4096 ];
	memset(pcmBuffer, 0, 4096);
	uint32_t storeSampleCount = 0;
	if( me->savedSampleCount > 0 )
	{
		if( me->savedSampleCount == kAudioSamplesPerPacket_AAC_LC )
		{
			memcpy(pcmBuffer,me->savedInputBuffer,( me->savedSampleCount * bytesPerFrame));
			storeSampleCount += kAudioSamplesPerPacket_AAC_LC;
			me->savedSampleCount -= kAudioSamplesPerPacket_AAC_LC;
		}
		else  if( me->savedSampleCount > kAudioSamplesPerPacket_AAC_LC)
		{
			uint32_t savedSampleBytesTotal  = ( me->savedSampleCount * bytesPerFrame);

			memcpy(pcmBuffer,me->savedInputBuffer,( kAudioSamplesPerPacket_AAC_LC * bytesPerFrame));
			storeSampleCount += kAudioSamplesPerPacket_AAC_LC;
			me->savedSampleCount -= kAudioSamplesPerPacket_AAC_LC;
			uint32_t savedSampleBytesConsumed  = ( kAudioSamplesPerPacket_AAC_LC * bytesPerFrame);
			if (me->savedSampleCount > 0) {
				memmove( (void *)me->savedInputBuffer, (void *)((uint8_t *)me->savedInputBuffer + savedSampleBytesConsumed), savedSampleBytesTotal - savedSampleBytesConsumed);
			}

		}
		else  if( me->savedSampleCount < kAudioSamplesPerPacket_AAC_LC)
		{
			memcpy(pcmBuffer,me->savedInputBuffer,( me->savedSampleCount * bytesPerFrame));
			storeSampleCount += me->savedSampleCount;
			me->savedSampleCount -= me->savedSampleCount;

		}

	}

	size_t inputOffset = 0;
	uint32_t oweSampleCount = kAudioSamplesPerPacket_AAC_LC - storeSampleCount;
	if( storeSampleCount < kAudioSamplesPerPacket_AAC_LC && inputSamplesAvailable >= oweSampleCount)
	{
		uint32_t inputSamplesAvailableSize = bufferList.mBuffers[ 0 ].mDataByteSize - inputOffset;
		memcpy(pcmBuffer+( storeSampleCount * bytesPerFrame),inputSamples,( oweSampleCount * bytesPerFrame));

					// consumsed samples
		size_t inputSamplesAvailableBytesConsumed = oweSampleCount * bytesPerFrame;
		inputOffset += inputSamplesAvailableBytesConsumed;
		inputSamples = (uint8_t *)inputSamples + inputSamplesAvailableBytesConsumed;
		inputSamplesAvailable -= oweSampleCount;
		storeSampleCount += oweSampleCount;


	}
//	ac_ulog( kLogLevelError, "aacEncoder storeSampleCount = %d  ",storeSampleCount);
	//开始编码
	if( storeSampleCount ==  kAudioSamplesPerPacket_AAC_LC ) {
		in_args.numInSamples = storeSampleCount * 2;
		in_ptr = pcmBuffer;
		in_size = kAudioSamplesPerPacket_AAC_LC * bytesPerFrame;
		in_elem_size = 2;
		in_buf.numBufs = 1;
		in_buf.bufs = &in_ptr;
		in_buf.bufSizes = &in_size;
		in_buf.bufferIdentifiers = &in_identifier;
		in_buf.bufElSizes = &in_elem_size;

		out_ptr = (uint8_t *)outOutputData->mBuffers[0].mData + outputOffset;
		out_size = outOutputData->mBuffers[0].mDataByteSize - outputOffset;
		out_elem_size = 1;
		out_buf.numBufs = 1;
		out_buf.bufs = &out_ptr;
		out_buf.bufferIdentifiers = &out_identifier;
		out_buf.bufSizes = &out_size;
		out_buf.bufElSizes = &out_elem_size;

		err = aacEncEncode( me->nativeCodecRef, &in_buf, &out_buf, &in_args, &out_args );
//		ac_ulog( kLogLevelError, "aacEncEncode err = %d  ",err);
//		ac_ulog( kLogLevelError, "aacEncEncode out_args.numInSamples = %d  ",out_args.numInSamples);
//		ac_ulog( kLogLevelError, "aacEncEncode out_args.numOutBytes = %d  ",out_args.numOutBytes);
		if( out_args.numInSamples == 0 ) {
			// nothing consumed, break out of here
			ac_ulog( kLogLevelError, "aacEncEncode nothing consumed, break out of here.");
			return kUnknownErr;
		}
		if( out_args.numOutBytes == 0 ) {
			// nothing consumed,print warning.
			ac_ulog( kLogLevelError, "aacEncEncode nothing consumed, out_args.numOutBytes = 0.");
		}

		if( out_args.numOutBytes > 0 ) {
			// bitstream outputted, always 1 full packet
			if( outPacketDescription ) {
				outPacketDescription[ outputPacketNum ].mStartOffset = outputOffset;
				outPacketDescription[ outputPacketNum ].mVariableFramesInPacket = 0; // Constant at kAudioSamplesPerPacket_AAC_LC
				outPacketDescription[ outputPacketNum ].mDataByteSize = out_args.numOutBytes;
			}
			outputPacketNum++;
			outputOffset += out_args.numOutBytes;
		}
		if ( outputPacketNum >= outputPacketSizeMax ) {
			goto copyRemainingSamples;
		}
	}

copyRemainingSamples:
	if( inputSamplesAvailable > 0 )
	{
		uint32_t newSavedSamples = me->savedSampleCount + inputSamplesAvailable;
		uint32_t newBufferSizeInSamples = newSavedSamples;
		if( me->savedBufferSizeInSamples < newBufferSizeInSamples )
		{
			void * newBuffer = realloc( me->savedInputBuffer, newBufferSizeInSamples * bytesPerFrame );
			if( !newBuffer )
			{
				err = kNoMemoryErr;
				goto exit;
			}
			me->savedInputBuffer = newBuffer;
			me->savedBufferSizeInSamples = newBufferSizeInSamples;
		}
		memcpy( &((uint8_t*)me->savedInputBuffer)[ me->savedSampleCount * bytesPerFrame ], inputSamples, inputSamplesAvailable * bytesPerFrame );
		me->savedSampleCount = newSavedSamples;
	}

	*ioOutputDataPacketSize = outputPacketNum;
	outOutputData->mBuffers[0].mDataByteSize = outputOffset;
exit:
	return err;
}

#endif //AUDIOCONVERTER_ENABLE_AAC_LC

//===========================================================================================================================
//	AAC-ELD
//===========================================================================================================================
#if( AUDIOCONVERTER_ENABLE_AACELD )

static OSStatus _AudioConverterSetPropertyAACELDDecode( AudioConverterPrivateRef const me, AudioConverterPropertyID	inPropertyID, uint32_t inSize, const void * inData )
{
	(void)me;
	(void)inSize;
	(void)inData;
	switch ( inPropertyID ) {
		default:
			return kUnsupportedErr;
	}
}

static OSStatus _AudioConverterSetPropertyAACELDEncode( AudioConverterPrivateRef const me, AudioConverterPropertyID	inPropertyID, uint32_t inSize, const void * inData )
{
	(void)me;
	(void)inSize;
	(void)inData;
	switch ( inPropertyID ) {
		case kAudioCodecPropertyPacketSizeLimitForVBR:
		{
			if( inSize != sizeof( uint32_t ) )
				return kSizeErr;

			uint32_t bitrate;

			if( me->sampleRate <= 24000 ) {
				bitrate = kAirPlayAudioBitrateLowLatencyUpTo24KHz;
			} else if( me->sampleRate <= 32000 ) {
				bitrate = kAirPlayAudioBitrateLowLatencyUpTo32KHz;
			} else if( me->sampleRate <= 48000 ) {
				bitrate = kAirPlayAudioBitrateLowLatencyUpTo48KHz;
			} else {
				return kUnknownErr;
			}
			if( aacEncoder_SetParam( me->nativeCodecRef, AACENC_BITRATE, bitrate) != AACENC_OK ) {
					return kUnknownErr;
			}
			return kNoErr;
		}
		default:
			return kUnsupportedErr;
	}
}

static OSStatus _AudioConverterFillComplexBufferAACELDDecode(AudioConverterRef inConverter, AudioConverterComplexInputDataProc inInputDataProc, void * inInputDataProcUserData, uint32_t * ioOutputDataPacketSize, AudioBufferList * outOutputData, AudioStreamPacketDescription * outPacketDescription )
{
	AudioConverterPrivateRef const me = (AudioConverterPrivateRef) inConverter;
	OSStatus							err;
	
	(void)outPacketDescription;
	
	AudioBufferList						bufferList;
	uint32_t							packetCount;
	AudioStreamPacketDescription *		packetDesc;
	
	if( *ioOutputDataPacketSize < kAudioSamplesPerPacket_AAC_ELD ) {
		return kSizeErr;
	}

	packetCount = 1;
	packetDesc  = NULL;
	err = inInputDataProc( inConverter, &packetCount, &bufferList, &packetDesc, inInputDataProcUserData );
	require_noerr_quiet( err, exit );

	UCHAR *aacInputBuffers;
	UINT aacInputBufferSizes;
	UINT numValidBytes;

	aacInputBuffers = (UCHAR *) bufferList.mBuffers[0].mData;
	aacInputBufferSizes = bufferList.mBuffers[ 0 ].mDataByteSize;
	numValidBytes = bufferList.mBuffers[ 0 ].mDataByteSize;

	err = aacDecoder_Fill( me->nativeCodecRef, &aacInputBuffers, &aacInputBufferSizes, &numValidBytes );
	if( err != AAC_DEC_OK  || numValidBytes != 0) {
		err = kUnknownErr;
		goto exit;
	}
	err = aacDecoder_DecodeFrame( me->nativeCodecRef, (INT_PCM *)(outOutputData->mBuffers[ 0 ].mData), outOutputData->mBuffers[ 0 ].mDataByteSize, 0 );
	if( IS_DECODE_ERROR( err ) ) {
		err = kUnknownErr;
		goto exit;
	}

	if( err == AAC_DEC_OK && outPacketDescription ) {
		outPacketDescription[ 0 ].mStartOffset = 0;
		outPacketDescription[ 0 ].mVariableFramesInPacket = 0; // Constant at kAudioSamplesPerPacket_AAC_ELD
		outPacketDescription[ 0 ].mDataByteSize = kAudioSamplesPerPacket_AAC_ELD * sizeof(uint16_t) * me->channels;
		err = kNoErr;
	}
	*ioOutputDataPacketSize = kAudioSamplesPerPacket_AAC_ELD;
exit:
	return err;
}

static OSStatus _AudioConverterFillComplexBufferAACELDHighEncode( AudioConverterRef inConverter, AudioConverterComplexInputDataProc inInputDataProc, void * inInputDataProcUserData, uint32_t * ioOutputDataPacketSize, AudioBufferList * outOutputData, AudioStreamPacketDescription * outPacketDescription )
{
	AudioConverterPrivateRef const me = (AudioConverterPrivateRef) inConverter;
	OSStatus							err;

	AudioBufferList						bufferList;
	uint32_t							inputSamplesAvailable;
	AudioStreamPacketDescription *		packetDesc;

	uint32_t outputPacketSizeMax = *ioOutputDataPacketSize;

	int in_identifier = IN_AUDIO_DATA;
	int in_size, in_elem_size;
	int out_identifier = OUT_BITSTREAM_DATA;
	int out_size, out_elem_size;
	void *in_ptr, *out_ptr;

	AACENC_BufDesc in_buf, out_buf;
	AACENC_InArgs in_args;
	AACENC_OutArgs out_args;

	memset(&in_buf, 0, sizeof(in_buf));
	memset(&in_args, 0, sizeof(in_args));
	memset(&out_args, 0, sizeof(out_args));

	// Require at least 1 output packet
	if( outputPacketSizeMax < 1 ) {
		return kSizeErr;
	}
	bufferList.mNumberBuffers = 1;

	// just use a large number since the callback will cap it
	inputSamplesAvailable = kAudioSamplesPerPacket_AAC_ELD;
	packetDesc  = NULL;
	err = inInputDataProc( inConverter, &inputSamplesAvailable, &bufferList, &packetDesc, inInputDataProcUserData );
	require_noerr_quiet( err, exit );

	size_t bytesPerFrame = me->channels * sizeof( uint16_t );
	uint32_t outputPacketNum = 0;
	uint8_t * outputBuffer = outOutputData->mBuffers[ outputPacketNum ].mData;
	size_t outputOffset = 0;
	size_t outputBufferByteSize = outOutputData->mBuffers[ outputPacketNum ].mDataByteSize;
	void * inputSamples = bufferList.mBuffers[ 0 ].mData;
//	ac_ulog( kLogLevelError, "aacEncoder me->savedSampleCount = %d  ",me->savedSampleCount);
//	ac_ulog( kLogLevelError, "aacEncoder me->inputSamplesAvailable = %d  ",inputSamplesAvailable);
//	ac_ulog( kLogLevelError, "aacEncoder me->totalSampleCount = %d  ",( me->savedSampleCount + inputSamplesAvailable ));

	// Buffer samples if there is not enough for encode.  This encoder has an internal buffer
	// as well if we don't want to buffer ourselves.
	if( me->savedSampleCount + inputSamplesAvailable < kAudioSamplesPerPacket_AAC_ELD ) {
		goto copyRemainingSamples;
	}




	uint32_t totalSampleCount = ( me->savedSampleCount + inputSamplesAvailable );
	uint8_t pcmBuffer [ 1920 ];
	memset(pcmBuffer, 0, 1920);
	uint32_t storeSampleCount = 0;
	if( me->savedSampleCount > 0 )
	{
		if( me->savedSampleCount == kAudioSamplesPerPacket_AAC_ELD )
		{
			memcpy(pcmBuffer,me->savedInputBuffer,( me->savedSampleCount * bytesPerFrame));
			storeSampleCount += kAudioSamplesPerPacket_AAC_ELD;
			me->savedSampleCount -= kAudioSamplesPerPacket_AAC_ELD;
		}
		else  if( me->savedSampleCount > kAudioSamplesPerPacket_AAC_ELD)
		{
			uint32_t savedSampleBytesTotal  = ( me->savedSampleCount * bytesPerFrame);

			memcpy(pcmBuffer,me->savedInputBuffer,( kAudioSamplesPerPacket_AAC_ELD * bytesPerFrame));
			storeSampleCount += kAudioSamplesPerPacket_AAC_ELD;
			me->savedSampleCount -= kAudioSamplesPerPacket_AAC_ELD;
			uint32_t savedSampleBytesConsumed  = ( kAudioSamplesPerPacket_AAC_ELD * bytesPerFrame);
			if (me->savedSampleCount > 0) {
				memmove( (void *)me->savedInputBuffer, (void *)((uint8_t *)me->savedInputBuffer + savedSampleBytesConsumed), savedSampleBytesTotal - savedSampleBytesConsumed);
			}

		}
		else  if( me->savedSampleCount < kAudioSamplesPerPacket_AAC_ELD)
		{
			memcpy(pcmBuffer,me->savedInputBuffer,( me->savedSampleCount * bytesPerFrame));
			storeSampleCount += me->savedSampleCount;
			me->savedSampleCount -= me->savedSampleCount;

		}

	}

	size_t inputOffset = 0;
	uint32_t oweSampleCount = kAudioSamplesPerPacket_AAC_ELD - storeSampleCount;
	if( storeSampleCount < kAudioSamplesPerPacket_AAC_ELD && inputSamplesAvailable >= oweSampleCount)
	{
		uint32_t inputSamplesAvailableSize = bufferList.mBuffers[ 0 ].mDataByteSize - inputOffset;
		memcpy(pcmBuffer+( storeSampleCount * bytesPerFrame),inputSamples,( oweSampleCount * bytesPerFrame));

					// consumsed samples
		size_t inputSamplesAvailableBytesConsumed = oweSampleCount * bytesPerFrame;
		inputOffset += inputSamplesAvailableBytesConsumed;
		inputSamples = (uint8_t *)inputSamples + inputSamplesAvailableBytesConsumed;
		inputSamplesAvailable -= oweSampleCount;
		storeSampleCount += oweSampleCount;


	}
//	ac_ulog( kLogLevelError, "aacEncoder storeSampleCount = %d  ",storeSampleCount);
	//开始编码
	if( storeSampleCount ==  kAudioSamplesPerPacket_AAC_ELD ) {
		in_args.numInSamples = storeSampleCount * 2;
		in_ptr = pcmBuffer;
		in_size = kAudioSamplesPerPacket_AAC_ELD * bytesPerFrame;
		in_elem_size = 2;
		in_buf.numBufs = 1;
		in_buf.bufs = &in_ptr;
		in_buf.bufSizes = &in_size;
		in_buf.bufferIdentifiers = &in_identifier;
		in_buf.bufElSizes = &in_elem_size;

		out_ptr = (uint8_t *)outOutputData->mBuffers[0].mData + outputOffset;
		out_size = outOutputData->mBuffers[0].mDataByteSize - outputOffset;
		out_elem_size = 1;
		out_buf.numBufs = 1;
		out_buf.bufs = &out_ptr;
		out_buf.bufferIdentifiers = &out_identifier;
		out_buf.bufSizes = &out_size;
		out_buf.bufElSizes = &out_elem_size;

		err = aacEncEncode( me->nativeCodecRef, &in_buf, &out_buf, &in_args, &out_args );
//		ac_ulog( kLogLevelError, "aacEncEncode err = %d  ",err);
//		ac_ulog( kLogLevelError, "aacEncEncode out_args.numInSamples = %d  ",out_args.numInSamples);
//		ac_ulog( kLogLevelError, "aacEncEncode out_args.numOutBytes = %d  ",out_args.numOutBytes);
		if( out_args.numInSamples == 0 ) {
			// nothing consumed, break out of here
			ac_ulog( kLogLevelError, "aacEncEncode nothing consumed, break out of here.");
			return kUnknownErr;
		}
		if( out_args.numOutBytes == 0 ) {
			// nothing consumed,print warning.
			ac_ulog( kLogLevelError, "aacEncEncode nothing consumed, out_args.numOutBytes = 0.");
		}

		if( out_args.numOutBytes > 0 ) {
			// bitstream outputted, always 1 full packet
			if( outPacketDescription ) {
				outPacketDescription[ outputPacketNum ].mStartOffset = outputOffset;
				outPacketDescription[ outputPacketNum ].mVariableFramesInPacket = 0; // Constant at kAudioSamplesPerPacket_AAC_ELD
				outPacketDescription[ outputPacketNum ].mDataByteSize = out_args.numOutBytes;
			}
			outputPacketNum++;
			outputOffset += out_args.numOutBytes;
		}
		if ( outputPacketNum >= outputPacketSizeMax ) {
			goto copyRemainingSamples;
		}
	}

copyRemainingSamples:
	if( inputSamplesAvailable > 0 )
	{
		uint32_t newSavedSamples = me->savedSampleCount + inputSamplesAvailable;
		uint32_t newBufferSizeInSamples = newSavedSamples;
		if( me->savedBufferSizeInSamples < newBufferSizeInSamples )
		{
			void * newBuffer = realloc( me->savedInputBuffer, newBufferSizeInSamples * bytesPerFrame );
			if( !newBuffer )
			{
				err = kNoMemoryErr;
				goto exit;
			}
			me->savedInputBuffer = newBuffer;
			me->savedBufferSizeInSamples = newBufferSizeInSamples;
		}
		memcpy( &((uint8_t*)me->savedInputBuffer)[ me->savedSampleCount * bytesPerFrame ], inputSamples, inputSamplesAvailable * bytesPerFrame );
		me->savedSampleCount = newSavedSamples;
	}

	*ioOutputDataPacketSize = outputPacketNum;
	outOutputData->mBuffers[0].mDataByteSize = outputOffset;
exit:
	return err;
}

static OSStatus _AudioConverterFillComplexBufferAACELDEncode( AudioConverterRef inConverter, AudioConverterComplexInputDataProc inInputDataProc, void * inInputDataProcUserData, uint32_t * ioOutputDataPacketSize, AudioBufferList * outOutputData, AudioStreamPacketDescription * outPacketDescription )
{
	AudioConverterPrivateRef const me = (AudioConverterPrivateRef) inConverter;
	OSStatus							err;

	AudioBufferList						bufferList;
	uint32_t							inputSamplesAvailable;
	AudioStreamPacketDescription *		packetDesc;

	uint32_t outputPacketSizeMax = *ioOutputDataPacketSize;

	int in_identifier = IN_AUDIO_DATA;
	int in_size, in_elem_size;
	int out_identifier = OUT_BITSTREAM_DATA;
	int out_size, out_elem_size;
	void *in_ptr, *out_ptr;

	AACENC_BufDesc in_buf, out_buf;
	AACENC_InArgs in_args;
	AACENC_OutArgs out_args;

	memset(&in_buf, 0, sizeof(in_buf));
	memset(&in_args, 0, sizeof(in_args));
	memset(&out_args, 0, sizeof(out_args));

	// Require at least 1 output packet
	if( outputPacketSizeMax < 1 ) {
		return kSizeErr;
	}
	bufferList.mNumberBuffers = 1;

	// just use a large number since the callback will cap it
	inputSamplesAvailable = kAudioSamplesPerPacket_AAC_ELD;
	packetDesc  = NULL;
	err = inInputDataProc( inConverter, &inputSamplesAvailable, &bufferList, &packetDesc, inInputDataProcUserData );
	require_noerr_quiet( err, exit );

	size_t bytesPerFrame = me->channels * sizeof( uint16_t );
	uint32_t outputPacketNum = 0;
	uint8_t * outputBuffer = outOutputData->mBuffers[ outputPacketNum ].mData;
	size_t outputOffset = 0;
	size_t outputBufferByteSize = outOutputData->mBuffers[ outputPacketNum ].mDataByteSize;
	void * inputSamples = bufferList.mBuffers[ 0 ].mData;

	// Buffer samples if there is not enough for encode.  This encoder has an internal buffer
	// as well if we don't want to buffer ourselves.
	if( me->savedSampleCount + inputSamplesAvailable < kAudioSamplesPerPacket_AAC_ELD ) {
		goto copyRemainingSamples;
	}

	while( me->savedSampleCount ) {
		in_args.numInSamples = me->savedSampleCount;
		in_size = me->savedSampleCount * bytesPerFrame;
		in_ptr = me->savedInputBuffer;
		in_elem_size = 2;
		in_buf.numBufs = 1;
		in_buf.bufs = &in_ptr;
		in_buf.bufSizes = &in_size;
		in_buf.bufferIdentifiers = &in_identifier;
		in_buf.bufElSizes = &in_elem_size;

		out_ptr = outputBuffer + outputOffset;
		out_size = outputBufferByteSize - outputOffset;
		out_elem_size = 1;
		out_buf.numBufs = 1;
		out_buf.bufs = &out_ptr;
		out_buf.bufferIdentifiers = &out_identifier;
		out_buf.bufSizes = &out_size;
		out_buf.bufElSizes = &out_elem_size;

		err = aacEncEncode( me->nativeCodecRef, &in_buf, &out_buf, &in_args, &out_args );
		if( err ) {
			return kUnknownErr;
		}
		if( out_args.numInSamples == 0 ) {
			// nothing consumed, break out of here
			return kUnknownErr;
		} else {
			size_t bytesConsumed = out_args.numInSamples * bytesPerFrame;
			me->savedSampleCount -= out_args.numInSamples;
			if (bytesConsumed != (size_t)in_size) {
				memmove( (void *)me->savedInputBuffer, (void *)((uint8_t *)me->savedInputBuffer + bytesConsumed), in_size - bytesConsumed);
			}
		}
		if( out_args.numOutBytes > 0 ) {
			// bitstream outputted, always 1 full packet
			if( outPacketDescription ) {
				outPacketDescription[ outputPacketNum ].mStartOffset = outputOffset;
				outPacketDescription[ outputPacketNum ].mVariableFramesInPacket = 0; // Constant at kAudioSamplesPerPacket_AAC_ELD
				outPacketDescription[ outputPacketNum ].mDataByteSize = out_args.numOutBytes;
			}
			outputPacketNum++;
			outputOffset += out_args.numOutBytes;
		}
		if ( outputPacketNum >= outputPacketSizeMax ) {
			goto copyRemainingSamples;
		}
	}

	size_t inputOffset = 0;

	while( inputSamplesAvailable ) {
		in_args.numInSamples = inputSamplesAvailable;
		in_ptr = inputSamples;
		in_size = bufferList.mBuffers[ 0 ].mDataByteSize - inputOffset;
		in_elem_size = 2;
		in_buf.numBufs = 1;
		in_buf.bufs = &in_ptr;
		in_buf.bufSizes = &in_size;
		in_buf.bufferIdentifiers = &in_identifier;
		in_buf.bufElSizes = &in_elem_size;

		out_ptr = (uint8_t *)outOutputData->mBuffers[0].mData + outputOffset;
		out_size = outOutputData->mBuffers[0].mDataByteSize - outputOffset;
		out_elem_size = 1;
		out_buf.numBufs = 1;
		out_buf.bufs = &out_ptr;
		out_buf.bufferIdentifiers = &out_identifier;
		out_buf.bufSizes = &out_size;
		out_buf.bufElSizes = &out_elem_size;

		err = aacEncEncode( me->nativeCodecRef, &in_buf, &out_buf, &in_args, &out_args );
		if( out_args.numInSamples == 0 ) {
			// nothing consumed, break out of here
			return kUnknownErr;
		} else {
			// consumsed samples
			size_t bytesConsumed = out_args.numInSamples * bytesPerFrame;
			inputOffset += bytesConsumed;
			inputSamples = (uint8_t *)inputSamples + bytesConsumed;
			inputSamplesAvailable -= out_args.numInSamples;
		}
		if( out_args.numOutBytes > 0 ) {
			// bitstream outputted, always 1 full packet
			if( outPacketDescription ) {
				outPacketDescription[ outputPacketNum ].mStartOffset = outputOffset;
				outPacketDescription[ outputPacketNum ].mVariableFramesInPacket = 0; // Constant at kAudioSamplesPerPacket_AAC_ELD
				outPacketDescription[ outputPacketNum ].mDataByteSize = out_args.numOutBytes;
			}
			outputPacketNum++;
			outputOffset += out_args.numOutBytes;
		}
		if ( outputPacketNum >= outputPacketSizeMax ) {
			goto copyRemainingSamples;
		}
	}

copyRemainingSamples:
	if( inputSamplesAvailable > 0 )
	{
		uint32_t newSavedSamples = me->savedSampleCount + inputSamplesAvailable;
		uint32_t newBufferSizeInSamples = newSavedSamples;
		if( me->savedBufferSizeInSamples < newBufferSizeInSamples )
		{
			void * newBuffer = realloc( me->savedInputBuffer, newBufferSizeInSamples * bytesPerFrame );
			if( !newBuffer )
			{
				err = kNoMemoryErr;
				goto exit;
			}
			me->savedInputBuffer = newBuffer;
			me->savedBufferSizeInSamples = newBufferSizeInSamples;
		}
		memcpy( &((uint8_t*)me->savedInputBuffer)[ me->savedSampleCount * bytesPerFrame ], inputSamples, inputSamplesAvailable * bytesPerFrame );
		me->savedSampleCount = newSavedSamples;
	}

	*ioOutputDataPacketSize = outputPacketNum;
	outOutputData->mBuffers[0].mDataByteSize = outputOffset;
exit:
	return err;
}

#endif //AUDIOCONVERTER_ENABLE_AACELD

//===========================================================================================================================
//	OPUS
//===========================================================================================================================
#if( AUDIOCONVERTER_ENABLE_OPUS )

static OSStatus _AudioConverterSetPropertyOpusDecode( AudioConverterPrivateRef const me, AudioConverterPropertyID	inPropertyID, uint32_t inSize, const void * inData )
{
	(void)me;
	(void)inSize;
	(void)inData;
	switch ( inPropertyID ) {
		default:
			return kUnsupportedErr;
	}
}

static OSStatus _AudioConverterSetPropertyOpusEncode( AudioConverterPrivateRef const me, AudioConverterPropertyID inPropertyID, uint32_t inSize, const void * inData )
{
	(void)inSize;
	(void)inData;
	switch ( inPropertyID ) {
		case kAudioCodecPropertyPacketSizeLimitForVBR:
		{
			if( inSize != sizeof( uint32_t ) )
				return kSizeErr;

			uint32_t bitrate;

			if( me->sampleRate <= 24000 ) {
				bitrate = kAirPlayAudioBitrateLowLatencyUpTo24KHz;
			} else if( me->sampleRate <= 32000 ) {
				bitrate = kAirPlayAudioBitrateLowLatencyUpTo32KHz;
			} else if( me->sampleRate <= 48000 ) {
				bitrate = kAirPlayAudioBitrateLowLatencyUpTo48KHz;
			} else {
				return kUnknownErr;
			}

			opus_encoder_ctl( me->nativeCodecRef,  OPUS_SET_BITRATE( bitrate ) );
			return kNoErr;
		}
		case kAudioConverterEncodeBitRate:
			if( inSize != sizeof( uint32_t ) )
				return kSizeErr;
			opus_encoder_ctl( me->nativeCodecRef,  OPUS_SET_BITRATE( *(uint32_t*)inData ) );
			return kNoErr;
		default:
			return kUnsupportedErr;
	}
}

static OSStatus _AudioConverterFillComplexBufferOpusDecode( AudioConverterRef inConverter, AudioConverterComplexInputDataProc inInputDataProc, void * inInputDataProcUserData, uint32_t * ioOutputDataPacketSize, AudioBufferList * outOutputData, AudioStreamPacketDescription * outPacketDescription )
{
	AudioConverterPrivateRef const me = (AudioConverterPrivateRef) inConverter;
	OSStatus							err;
	
	(void)outPacketDescription;
	
	AudioBufferList						bufferList;
	uint32_t							packetCount;
	AudioStreamPacketDescription *		packetDesc;
	
	bufferList.mNumberBuffers = 1;
	packetCount = 1;
	packetDesc  = NULL;
	err = inInputDataProc( inConverter, &packetCount, &bufferList, &packetDesc, inInputDataProcUserData );
	require_noerr_quiet( err, exit );
	size_t bytesPerFrame = me->channels * sizeof( opus_int16 );
	int frameSize = outOutputData->mBuffers[ 0 ].mDataByteSize / bytesPerFrame;

	int result = opus_decode( me->nativeCodecRef, bufferList.mBuffers[ 0 ].mData, bufferList.mBuffers[ 0 ].mDataByteSize, outOutputData->mBuffers[ 0 ].mData, frameSize, 0 );

	if( result == OPUS_BUFFER_TOO_SMALL )
		return kSizeErr;
	if( result <= 0 )
		return kUnknownErr;
	outOutputData->mBuffers[ 0 ].mDataByteSize = result * bytesPerFrame;
	if( outPacketDescription ) {
		outPacketDescription->mStartOffset = 0;
		outPacketDescription->mVariableFramesInPacket = result;
		outPacketDescription->mDataByteSize = outOutputData->mBuffers[ 0 ].mDataByteSize;
	}
	*ioOutputDataPacketSize = result;
exit:
	return err;
}

static OSStatus _AudioConverterFillComplexBufferOpusEncode( AudioConverterRef inConverter, AudioConverterComplexInputDataProc inInputDataProc, void * inInputDataProcUserData, uint32_t * ioOutputDataPacketSize, AudioBufferList * outOutputData, AudioStreamPacketDescription * outPacketDescription )
{
	AudioConverterPrivateRef const me = (AudioConverterPrivateRef) inConverter;
	OSStatus							err;

	AudioBufferList						bufferList;
	uint32_t							inputSamplesAvailable;
	AudioStreamPacketDescription *		packetDesc;
	uint32_t outputPacketSize = 0;

	uint32_t outputPacketSizeMax = *ioOutputDataPacketSize;
	if( outputPacketSizeMax < 1 )
		return kSizeErr;
	size_t bytesPerFrame = me->channels * sizeof( opus_int16 );
	int framesIn20ms = me->sampleRate / 50;

	bufferList.mNumberBuffers = 1;
	inputSamplesAvailable = framesIn20ms;
	packetDesc  = NULL;
	err = inInputDataProc( inConverter, &inputSamplesAvailable, &bufferList, &packetDesc, inInputDataProcUserData );
	require_noerr_quiet( err, exit );

	void * inputSamples = bufferList.mBuffers[ 0 ].mData;
	uint8_t * outputBuffer = outOutputData->mBuffers[ outputPacketSize ].mData;
	size_t outputOffset = 0;
	size_t outputBufferByteSize = outOutputData->mBuffers[ outputPacketSize ].mDataByteSize;

	//if we have saved samples, make it up to 20ms worth, then compress
	//如果我们已经保存了一些样本，就将其补足到20毫秒的数量，然后进行压缩。
	while( me->savedSampleCount )
	{
		int frameSize = me->savedSampleCount;
		//Using 20ms frame size
		if( frameSize >= framesIn20ms )
			frameSize = framesIn20ms;
		else if ( frameSize + (int)inputSamplesAvailable >= framesIn20ms )
		{
			uint32_t inputToCopy = framesIn20ms - frameSize;
			frameSize = framesIn20ms;
			memcpy( &((uint8_t*)me->savedInputBuffer)[ me->savedSampleCount * bytesPerFrame ], inputSamples, inputToCopy * bytesPerFrame );
			inputSamples = (void*)( (uint8_t*)inputSamples + inputToCopy * bytesPerFrame );
			inputSamplesAvailable -= inputToCopy;
			me->savedSampleCount = framesIn20ms;
		}
		else
			break;
		
		opus_int32 result = opus_encode( me->nativeCodecRef, me->savedInputBuffer, frameSize, &outputBuffer[ outputOffset ], outputBufferByteSize - outputOffset );
		
		if( result == OPUS_BUFFER_TOO_SMALL ) {
			if ( outputPacketSize < 1 )
				err = kSizeErr;//output buffer couldn't even hold 1 compressed frame
			break;
		}
		if( result <= 0 ) {
			err = kUnknownErr;
			break;
		}
		
		if( outPacketDescription ) {
			outPacketDescription[ outputPacketSize ].mStartOffset = outputOffset;
			outPacketDescription[ outputPacketSize ].mVariableFramesInPacket = frameSize;
			outPacketDescription[ outputPacketSize ].mDataByteSize = result;
		}
		outputPacketSize++;
		outputOffset += result;
		outOutputData->mBuffers[ 0 ].mDataByteSize = outputOffset;
		me->savedSampleCount -= frameSize;
		memmove( me->savedInputBuffer, &((uint8_t*)me->savedInputBuffer)[ frameSize * bytesPerFrame ], me->savedSampleCount * bytesPerFrame );
		if ( outputPacketSize >= outputPacketSizeMax )
			goto copyRemainingSamples;
	}

	while( outputPacketSize < outputPacketSizeMax ) {
		int frameSize = inputSamplesAvailable;
		//Using 20ms frame size
		if( frameSize >= framesIn20ms )
			frameSize = framesIn20ms;
		else
			break;
		
		opus_int32 result = opus_encode( me->nativeCodecRef, inputSamples, frameSize, &outputBuffer[ outputOffset ], outputBufferByteSize - outputOffset );

		if( result == OPUS_BUFFER_TOO_SMALL ) {
			if ( outputPacketSize < 1 )
				err = kSizeErr;//output buffer couldn't even hold 1 compressed frame
			break;
		}
		if( result <= 0 ) {
			err = kUnknownErr;
			break;
		}

		if( outPacketDescription ) {
			outPacketDescription[ outputPacketSize ].mStartOffset = outputOffset;
			outPacketDescription[ outputPacketSize ].mVariableFramesInPacket = frameSize;
			outPacketDescription[ outputPacketSize ].mDataByteSize = result;
		}
		outputPacketSize++;
		outputOffset += result;
		outOutputData->mBuffers[ 0 ].mDataByteSize = outputOffset;
		inputSamples = (void*)( (uint8_t*)inputSamples + frameSize * bytesPerFrame );
		inputSamplesAvailable -= frameSize;
	}

copyRemainingSamples:
	if( inputSamplesAvailable > 0 )
	{
		//TODO: save any remaining input samples if the input wasn't
		uint32_t newSavedSamples = me->savedSampleCount + inputSamplesAvailable;
		uint32_t newBufferSizeInSamples = (newSavedSamples + framesIn20ms - 1) / framesIn20ms * framesIn20ms;//Round up to 20ms so we can add some more samples next pass
		if( me->savedBufferSizeInSamples < newBufferSizeInSamples )
		{
			void * newBuffer = realloc( me->savedInputBuffer, newBufferSizeInSamples * bytesPerFrame );
			if( !newBuffer )
			{
				err = kNoMemoryErr;
				goto exit;
			}
			me->savedInputBuffer = newBuffer;
			me->savedBufferSizeInSamples = newBufferSizeInSamples;
		}
		memcpy( &((uint8_t*)me->savedInputBuffer)[ me->savedSampleCount * bytesPerFrame ], inputSamples, inputSamplesAvailable * bytesPerFrame );
		me->savedSampleCount = newSavedSamples;
	}
exit:
	*ioOutputDataPacketSize = outputPacketSize;
	return err;
}

#endif //AUDIOCONVERTER_ENABLE_OPUS

//===========================================================================================================================
//	AudioConverterNew
//===========================================================================================================================

OSStatus AudioConverterNew( const AudioStreamBasicDescription *	inSourceFormat, const AudioStreamBasicDescription *	inDestinationFormat, AudioConverterRef * outAudioConverter )
{
	OSStatus						err;
	AudioConverterPrivateRef		me;

	//Sample rate conversion and mixing are not supported
	if( inDestinationFormat->mSampleRate != inSourceFormat->mSampleRate )
		return kUnsupportedErr;
	if( inDestinationFormat->mChannelsPerFrame != inSourceFormat->mChannelsPerFrame )
		return kUnsupportedErr;
	
	me = (AudioConverterPrivateRef) calloc( 1, sizeof( *me ) );
	require_action( me, exit, err = kNoMemoryErr );

	me->sourceFormatID = inSourceFormat->mFormatID;
	me->destFormatID = inDestinationFormat->mFormatID;
	me->sampleRate = inDestinationFormat->mSampleRate;
	me->channels = inDestinationFormat->mChannelsPerFrame;

	switch( me->sourceFormatID ) {

		case kAudioFormatMPEG4AAC:
		{
			require_action_quiet( inDestinationFormat->mFormatID == kAudioFormatLinearPCM, exit, err = kUnsupportedErr );

			UCHAR asc[2];
			int objectType, frequencyIndex, channelConfig;

			UCHAR* configBuffers[ 1 ];
			UINT configBufferSizes[ 1 ] = { 0 };

			objectType = AOT_AAC_LC;
			channelConfig = inDestinationFormat->mChannelsPerFrame;
			frequencyIndex = GetSamplingFrequencyIndex( inDestinationFormat->mSampleRate );
			if( ( channelConfig != 1 && channelConfig != 2 ) || frequencyIndex == -1 ) {
				// Only support 1/0/0 or 2/0/0 channel configurations
				err = kUnknownErr;
				goto exit;
			}

			/*
			 * Audio Specific Config:
			 * 5 bits for object type
			 * 4 bits for sampling rate
			 * 4 bit for channel
			 * 1 bit for frame length flag: 0=1024 sample or 1=960 sample
			 * 1 bit for depends on core coder
			 * 1 bit for extension flag
			*/

			// Silence compiler warning
			TO_LITTLE_ENDIAN( 0 );

			asc[ 0 ] = ( objectType << 3 ) | ( ( frequencyIndex & 0x0E ) >> 1 );
			asc[ 1 ] = ( ( frequencyIndex & 0x01 ) << 7 ) | ( ( channelConfig & 0x0F) << 3 );
			configBuffers[ 0 ] = asc;
			configBufferSizes[ 0 ] = sizeof(asc);

			me->nativeCodecRef = aacDecoder_Open( TT_MP4_RAW, 1 );
			require_action( me->nativeCodecRef, exit, err = kUnknownErr );

			err = aacDecoder_ConfigRaw( me->nativeCodecRef, configBuffers, configBufferSizes);
			break;
		}

#if( AUDIOCONVERTER_ENABLE_AACELD )
		case kAudioFormatMPEG4AAC_ELD:
		{
			require_action_quiet( inDestinationFormat->mFormatID == kAudioFormatLinearPCM, exit, err = kUnsupportedErr );

			UCHAR asc[4];
			int objectType, frequencyIndex, channelConfig;

			UCHAR* configBuffers[ 1 ];
			UINT configBufferSizes[ 1 ] = { 0 };

			objectType = AOT_ER_AAC_ELD;
			channelConfig = inDestinationFormat->mChannelsPerFrame;
			frequencyIndex = GetSamplingFrequencyIndex( inDestinationFormat->mSampleRate );

			if( ( channelConfig != 1 && channelConfig != 2 ) || frequencyIndex == -1 ) {
				// Only support 1/0/0 or 2/0/0 channel configurations
				err = kUnknownErr;
				goto exit;
			}

			/*
			 * Audio Specific Config:
			 * 5 bits for object type = 11111 = Extension field
			 * 6 bits for extended object type = type - 32
			 * 4 bits for sampling rate index
			 * 4 bit for channel configuration
			 * ELD Specific Config
			 * 1 bit for frame length flag: 1=480, 0=512
			 * 1 bit Section Data Resilience Flag
			 * 1 bit Scale factor Data Resilience Flag
			 * 1 bit Spectral Data Resilience Flag
			 * 1 bit Sbr Present flag (Data follows if present)
			 * 4 bits ELD Extended config (0000=Termination)
			*/
			objectType -= 32;
			asc[ 0 ] = ( 0x1F << 3 ) | ( ( objectType & 0x38 ) >> 3 );
			asc[ 1 ] = ( ( objectType & 0x07 ) << 5 ) | ( ( frequencyIndex & 0x0F ) << 1 ) | ( ( channelConfig & 0x08) >> 3 );
			asc[ 2 ] = ( ( channelConfig & 0x07 ) << 5 ) | ( 1 << 4 );
			asc[ 3 ] = 0; // Terminate config
			configBuffers[ 0 ] = asc;
			configBufferSizes[ 0 ] = sizeof(asc);

			me->nativeCodecRef = aacDecoder_Open( TT_MP4_RAW, 1 );
			require_action( me->nativeCodecRef, exit, err = kUnknownErr );

			err = aacDecoder_ConfigRaw( me->nativeCodecRef, configBuffers, configBufferSizes);
			if( err != AAC_DEC_OK ) {
				err = kUnknownErr;
			}
			break;
		}
#endif

#if( AUDIOCONVERTER_ENABLE_OPUS )
		case kAudioFormatOpus:
			require_action_quiet( inDestinationFormat->mFormatID == kAudioFormatLinearPCM, exit, err = kUnsupportedErr );
			switch( inSourceFormat->mSampleRate ) {
				case 8000:
				case 12000:
				case 16000:
				case 24000:
				case 48000:
					break;
				default:
					err = kUnsupportedErr;
					goto exit;
			}
			me->nativeCodecRef = opus_decoder_create( inSourceFormat->mSampleRate, inSourceFormat->mChannelsPerFrame, &err );
			if( err || !me->nativeCodecRef ) {
				err = kUnknownErr;
				goto exit;
			}
			break;
#endif

		case kAudioFormatLinearPCM:
			if( 0 ) {
				return kUnsupportedErr;
			}

#if( AUDIOCONVERTER_ENABLE_AACELD )
			else if( inDestinationFormat->mFormatID == kAudioFormatMPEG4AAC_ELD )
			{
				int mode = 0;
				// 0x01: AAC Module
				err = aacEncOpen( (struct AACENCODER **)(&me->nativeCodecRef), 0x01, inSourceFormat->mChannelsPerFrame );
				if( err || !me->nativeCodecRef ) {
					err = kUnknownErr;
					goto exit;
				}
				while( 1 ) {
					err = kUnknownErr;
					if( aacEncoder_SetParam( me->nativeCodecRef, AACENC_AOT, AOT_ER_AAC_ELD ) != AACENC_OK ) {
						break;
					}
					if( aacEncoder_SetParam( me->nativeCodecRef, AACENC_SBR_MODE, 0 ) != AACENC_OK ) {
						break;
					}
					if( aacEncoder_SetParam( me->nativeCodecRef, AACENC_SAMPLERATE, inDestinationFormat->mSampleRate ) != AACENC_OK ) {
						break;
					}
					if( aacEncoder_SetParam( me->nativeCodecRef, AACENC_GRANULE_LENGTH, kAudioSamplesPerPacket_AAC_ELD ) != AACENC_OK ) {
						break;
					}
					switch (inDestinationFormat->mChannelsPerFrame) {
						case 1: mode = MODE_1; break;
						case 2: mode = MODE_2; break;
						default:
							break;
					}
					if( aacEncoder_SetParam( me->nativeCodecRef, AACENC_CHANNELMODE, mode ) != AACENC_OK ) {
						break;
					}
					if( aacEncoder_SetParam( me->nativeCodecRef, AACENC_CHANNELORDER, 1 ) != AACENC_OK ) {
						break;
					}
					// 0 = Raw access units
					if( aacEncoder_SetParam( me->nativeCodecRef, AACENC_TRANSMUX, 0 ) != AACENC_OK ) {
						break;
					}
					err = kNoErr;
					break;
				}
				err = kNoErr;
				break;
			}
#endif

#if( AUDIOCONVERTER_ENABLE_AAC_LC )
			else if( inDestinationFormat->mFormatID == kAudioFormatMPEG4AAC )
			{
				int mode = 0;
				// 0x01: AAC Module
				err = aacEncOpen( (struct AACENCODER **)(&me->nativeCodecRef), 0x01, inSourceFormat->mChannelsPerFrame );
				if( err || !me->nativeCodecRef ) {
					err = kUnknownErr;
					goto exit;
				}
				while( 1 ) {
					err = kUnknownErr;
					if( aacEncoder_SetParam( me->nativeCodecRef, AACENC_AOT, AOT_AAC_LC ) != AACENC_OK ) {
						break;
					}
					if( aacEncoder_SetParam( me->nativeCodecRef, AACENC_SBR_MODE, 0 ) != AACENC_OK ) {
						break;
					}
					if( aacEncoder_SetParam( me->nativeCodecRef, AACENC_SAMPLERATE, inDestinationFormat->mSampleRate ) != AACENC_OK ) {
						break;
					}
					if( aacEncoder_SetParam( me->nativeCodecRef, AACENC_GRANULE_LENGTH, kAudioSamplesPerPacket_AAC_LC ) != AACENC_OK ) {
						break;
					}
					switch (inDestinationFormat->mChannelsPerFrame) {
						case 1: mode = MODE_1; break;
						case 2: mode = MODE_2; break;
						default:
							break;
					}
					if( aacEncoder_SetParam( me->nativeCodecRef, AACENC_CHANNELMODE, mode ) != AACENC_OK ) {
						break;
					}
					if( aacEncoder_SetParam( me->nativeCodecRef, AACENC_CHANNELORDER, 1 ) != AACENC_OK ) {
						break;
					}
					// 0 = Raw access units  TT_MP4_ADTS  TT_MP4_RAW
					if( aacEncoder_SetParam( me->nativeCodecRef, AACENC_TRANSMUX, TT_MP4_RAW ) != AACENC_OK ) {
						break;
					}
					// Initialize the encoder
//					if( aacEncEncode( me->nativeCodecRef, NULL, NULL, NULL,NULL ) != AACENC_OK ) {
//						break;
//					}
					err = kNoErr;
					break;
				}
				err = kNoErr;
				break;
			}
#endif

#if( AUDIOCONVERTER_ENABLE_OPUS )
			else if( inDestinationFormat->mFormatID == kAudioFormatOpus )
			{
				switch( inDestinationFormat->mSampleRate ) {
					case 8000:
					case 12000:
					case 16000:
					case 24000:
					case 48000:
						break;
					default:
						err = kUnsupportedErr;
						goto exit;
				}
				me->nativeCodecRef = opus_encoder_create( inDestinationFormat->mSampleRate, inDestinationFormat->mChannelsPerFrame, OPUS_APPLICATION_VOIP, &err );
				if( err || !me->nativeCodecRef ) {
					err = kUnknownErr;
					goto exit;
				}
				err = kNoErr;
				break;
			}
#endif
			else
			{
				err = kUnsupportedErr;
				goto exit;
			}
		default:
			err = kUnsupportedErr;
			goto exit;
	}
	
	*outAudioConverter = (AudioConverterRef) me;
	me = NULL;
	
exit:
	if( me ) AudioConverterDispose( (AudioConverterRef) me );
	return( err );
}

//===========================================================================================================================
//	AudioConverterDispose
//===========================================================================================================================

OSStatus AudioConverterDispose( AudioConverterRef inConverter )
{
	AudioConverterPrivateRef const me = (AudioConverterPrivateRef) inConverter;

	if( me->nativeCodecRef ) {
		switch( me->sourceFormatID ) {
			case kAudioFormatMPEG4AAC:
				aacDecoder_Close( me->nativeCodecRef );
				break;

#if( AUDIOCONVERTER_ENABLE_AACELD )
			case kAudioFormatMPEG4AAC_ELD:
				aacDecoder_Close( me->nativeCodecRef );
				break;
#endif

#if( AUDIOCONVERTER_ENABLE_OPUS )
			case kAudioFormatOpus:
				opus_decoder_destroy( me->nativeCodecRef );
				break;
#endif
			case kAudioFormatLinearPCM:
				if( 0 ) {}

#if( AUDIOCONVERTER_ENABLE_AACELD )
				else if( me->destFormatID == kAudioFormatMPEG4AAC_ELD )
				{
					aacEncClose( (HANDLE_AACENCODER *) &me->nativeCodecRef );
				}
#endif

#if( AUDIOCONVERTER_ENABLE_OPUS )
				else if( me->destFormatID == kAudioFormatOpus )
				{
					opus_encoder_destroy( me->nativeCodecRef );
				}
#endif
				break;
		}
	}
	if( me->savedInputBuffer )
		free( me->savedInputBuffer );
	free( me );
	return( kNoErr );
}

//===========================================================================================================================
//	AudioConverterReset
//===========================================================================================================================

OSStatus AudioConverterReset( AudioConverterRef inConverter )
{
	AudioConverterPrivateRef const me = (AudioConverterPrivateRef) inConverter;
	AAC_DECODER_ERROR decoderErr = AAC_DEC_OK;

	if( me->sourceFormatID == kAudioFormatMPEG4AAC )
	{
		INT_PCM buffer[4096] = { 0 };
		const UINT flags = AACDEC_FLUSH | AACDEC_INTR | AACDEC_CLRHIST;

		/*
		 * To completely flush the buffer we must clear all the buffers, then force the internals of the fdk to
		 * flush, resync and clear it's history.

		 * Explanations from the fdk header files.
		 *    AACDEC_FLUSH: Flush filterbanks (output delayed audio)
		 *    AACDEC_INTR: Signal an input bit stream data discontinuity. Resync any internals as necessary
		 *    AACDEC_CLRHIST: Clear all signal delay lines and history buffers
		 */

		decoderErr = aacDecoder_SetParam( me->nativeCodecRef, AAC_TPDEC_CLEAR_BUFFER, 1 );
		if( decoderErr != AAC_DEC_OK )
		{
			ac_ulog( kLogLevelError, "aacDecoder_SetParam failed to AAC_TPDEC_CLEAR_BUFFER. [%u]\n", decoderErr );
		}

		decoderErr = aacDecoder_DecodeFrame( me->nativeCodecRef, buffer, sizeof(buffer)/sizeof(INT_PCM), flags );
		if( decoderErr != AAC_DEC_OK )
		{
			ac_ulog( kLogLevelError, "aacDecoder_DecodeFrame failed to flush, resync and clear history. [%u] \n", decoderErr );
		}
	}
	return( decoderErr == AAC_DEC_OK ? kNoErr : kUnknownErr );
}

//===========================================================================================================================
//	AudioConverterSetProperty
//===========================================================================================================================

OSStatus AudioConverterSetProperty( AudioConverterRef inConverter, AudioConverterPropertyID	inPropertyID, uint32_t inSize, const void * inData )
{
	AudioConverterPrivateRef const		me = (AudioConverterPrivateRef) inConverter;
	
	if( !me->nativeCodecRef )
		return kStateErr;
	switch( me->sourceFormatID ) {
		case kAudioFormatMPEG4AAC:
			return _AudioConverterSetPropertyAACDecode( me, inPropertyID, inSize, inData );

#if( AUDIOCONVERTER_ENABLE_AACELD )
		case kAudioFormatMPEG4AAC_ELD:
			return _AudioConverterSetPropertyAACELDDecode( me, inPropertyID, inSize, inData );
#endif

#if( AUDIOCONVERTER_ENABLE_OPUS )
		case kAudioFormatOpus:
			return _AudioConverterSetPropertyOpusDecode( me, inPropertyID, inSize, inData );
#endif
		
		case kAudioFormatLinearPCM:
			if( 0 ) {
				return kUnsupportedErr;
			}

#if( AUDIOCONVERTER_ENABLE_AACELD )
			else if( me->destFormatID == kAudioFormatMPEG4AAC_ELD )
				return _AudioConverterSetPropertyAACELDEncode( me, inPropertyID, inSize, inData );
#endif

#if( AUDIOCONVERTER_ENABLE_AAC_LC )
			else if( me->destFormatID == kAudioFormatMPEG4AAC )
				return _AudioConverterSetPropertyAACEncode( me, inPropertyID, inSize, inData );
#endif

#if( AUDIOCONVERTER_ENABLE_OPUS )
			else if( me->destFormatID == kAudioFormatOpus )
				return _AudioConverterSetPropertyOpusEncode( me, inPropertyID, inSize, inData );
#endif
			else
				return kUnsupportedErr;
		default:
			return kUnsupportedErr;
	}
}

//===========================================================================================================================
//	AudioConverterFillComplexBuffer
//===========================================================================================================================

OSStatus AudioConverterFillComplexBuffer( AudioConverterRef inConverter, AudioConverterComplexInputDataProc inInputDataProc,
		void *								inInputDataProcUserData,
		uint32_t *							ioOutputDataPacketSize,
		AudioBufferList *					outOutputData,
		AudioStreamPacketDescription *		outPacketDescription )
{
	AudioConverterPrivateRef const me = (AudioConverterPrivateRef) inConverter;

	if( !me->nativeCodecRef )
		return kStateErr;
	switch ( me->sourceFormatID ) {
		case kAudioFormatMPEG4AAC:
			return _AudioConverterFillComplexBufferAACDecode( inConverter, inInputDataProc, inInputDataProcUserData, ioOutputDataPacketSize, outOutputData, outPacketDescription );

#if( AUDIOCONVERTER_ENABLE_AACELD )
		case kAudioFormatMPEG4AAC_ELD:
			//Decompress AAC ELD
			return _AudioConverterFillComplexBufferAACELDDecode( inConverter, inInputDataProc, inInputDataProcUserData, ioOutputDataPacketSize, outOutputData, outPacketDescription );
#endif

#if( AUDIOCONVERTER_ENABLE_OPUS )
		case kAudioFormatOpus:
			return _AudioConverterFillComplexBufferOpusDecode( inConverter, inInputDataProc, inInputDataProcUserData, ioOutputDataPacketSize, outOutputData, outPacketDescription );
#endif

		case kAudioFormatLinearPCM:
			if( 0 ) {}

#if( AUDIOCONVERTER_ENABLE_AACELD )
			else if( me->destFormatID == kAudioFormatMPEG4AAC_ELD )
			{
				//Compress to AAC ELD
//				return _AudioConverterFillComplexBufferAACELDEncode( inConverter, inInputDataProc, inInputDataProcUserData, ioOutputDataPacketSize, outOutputData, outPacketDescription );
				return _AudioConverterFillComplexBufferAACELDHighEncode( inConverter, inInputDataProc, inInputDataProcUserData, ioOutputDataPacketSize, outOutputData, outPacketDescription );
			}
#endif

#if( AUDIOCONVERTER_ENABLE_AAC_LC )
			else if( me->destFormatID == kAudioFormatMPEG4AAC )
			{
				//Compress to AAC LC
				return _AudioConverterFillComplexBufferAACEncode( inConverter, inInputDataProc, inInputDataProcUserData, ioOutputDataPacketSize, outOutputData, outPacketDescription );
			}
#endif

#if( AUDIOCONVERTER_ENABLE_OPUS )
			else if( me->destFormatID == kAudioFormatOpus )
			{
				return _AudioConverterFillComplexBufferOpusEncode( inConverter, inInputDataProc, inInputDataProcUserData, ioOutputDataPacketSize, outOutputData, outPacketDescription );
			}
#endif
			return kUnsupportedErr;
		default:
			return kUnsupportedErr;
	}
}

