/*
    File:       AudioUtilsStub.c
    Package:    Apple CarPlay Communication Plug-in.
    Abstract:   n/a
    Version:    450.14

    Disclaimer: IMPORTANT: This Apple software is supplied to you, by Apple Inc. ("Apple"), in your
    capacity as a current, and in good standing, Licensee in the MFi Licensing Program. Use of this
    Apple software is governed by and subject to the terms and conditions of your MFi License,
    including, but not limited to, the restrictions specified in the provision entitled ”Public
    Software”, and is further subject to your agreement to the following additional terms, and your
    agreement that the use, installation, modification or redistribution of this Apple software
    constitutes acceptance of these additional terms. If you do not agree with these additional terms,
    please do not use, install, modify or redistribute this Apple software.

    Subject to all of these terms and in consideration of your agreement to abide by them, Apple grants
    you, for as long as you are a current and in good-standing MFi Licensee, a personal, non-exclusive
    license, under Apple's copyrights in this original Apple software (the "Apple Software"), to use,
    reproduce, and modify the Apple Software in source form, and to use, reproduce, modify, and
    redistribute the Apple Software, with or without modifications, in binary form. While you may not
    redistribute the Apple Software in source form, should you redistribute the Apple Software in binary
    form, you must retain this notice and the following text and disclaimers in all such redistributions
    of the Apple Software. Neither the name, trademarks, service marks, or logos of Apple Inc. may be
    used to endorse or promote products derived from the Apple Software without specific prior written
    permission from Apple. Except as expressly stated in this notice, no other rights or licenses,
    express or implied, are granted by Apple herein, including but not limited to any patent rights that
    may be infringed by your derivative works or by other works in which the Apple Software may be
    incorporated.

    Unless you explicitly state otherwise, if you provide any ideas, suggestions, recommendations, bug
    fixes or enhancements to Apple in connection with this software (“Feedback”), you hereby grant to
    Apple a non-exclusive, fully paid-up, perpetual, irrevocable, worldwide license to make, use,
    reproduce, incorporate, modify, display, perform, sell, make or have made derivative works of,
    distribute (directly or indirectly) and sublicense, such Feedback in connection with Apple products
    and services. Providing this Feedback is voluntary, but if you do provide Feedback to Apple, you
    acknowledge and agree that Apple may exercise the license granted above without the payment of
    royalties or further consideration to Participant.

    The Apple Software is provided by Apple on an "AS IS" basis. APPLE MAKES NO WARRANTIES, EXPRESS OR
    IMPLIED, INCLUDING WITHOUT LIMITATION THE IMPLIED WARRANTIES OF NON-INFRINGEMENT, MERCHANTABILITY
    AND FITNESS FOR A PARTICULAR PURPOSE, REGARDING THE APPLE SOFTWARE OR ITS USE AND OPERATION ALONE OR
    IN COMBINATION WITH YOUR PRODUCTS.

    IN NO EVENT SHALL APPLE BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL OR CONSEQUENTIAL DAMAGES
    (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
    PROFITS; OR BUSINESS INTERRUPTION) ARISING IN ANY WAY OUT OF THE USE, REPRODUCTION, MODIFICATION
    AND/OR DISTRIBUTION OF THE APPLE SOFTWARE, HOWEVER CAUSED AND WHETHER UNDER THEORY OF CONTRACT, TORT
    (INCLUDING NEGLIGENCE), STRICT LIABILITY OR OTHERWISE, EVEN IF APPLE HAS BEEN ADVISED OF THE
    POSSIBILITY OF SUCH DAMAGE.

    Copyright (C) 2012-2015 Apple Inc. All Rights Reserved. Not to be used or disclosed without permission from Apple.
*/

#include "AudioUtils.h"
#include "ThreadUtils.h"
#include <errno.h>
#include <stdlib.h>
#include "AirPlayReceiverSessionAudio.h"

#include "CommonServices.h"


#include "TickUtils.h"

#include CF_HEADER
#include LIBDISPATCH_HEADER

#if( !AUDIO_STREAM_DLL )
#include CF_RUNTIME_HEADER
#endif

//===========================================================================================================================
//  AudioStream
//===========================================================================================================================

#if( AUDIO_STREAM_DLL )
typedef struct AudioStreamImp*          AudioStreamImpRef;
struct AudioStreamImp
#else
typedef struct AudioStreamPrivate*      AudioStreamImpRef;
struct AudioStreamPrivate
#endif
{
#if( !AUDIO_STREAM_DLL )
    CFRuntimeBase                   base;                   // CF type info. Must be first.
#endif
    Boolean
    prepared;               // True if AudioStreamPrepare has been called (and stop hasn't yet).

    void*                           delegateContext;        // Context for the session delegate
    AudioStreamInputCallback_f      inputCallbackPtr;       // Function to call to write input audio.
    void*                           inputCallbackCtx;       // Context to pass to audio input callback function.
    AudioStreamOutputCallback_f     outputCallbackPtr;      // Function to call to read audio to output.
    void*                           outputCallbackCtx;      // Context to pass to audio output callback function.
    Boolean                         input;                  // Enable audio input.
    AudioStreamBasicDescription     format;                 // Format of the audio data.
    uint32_t                        preferredLatencyMics;   // Max latency the app can tolerate.
    uint32_t                        streamType;             // AirPlay Stream type (e.g. main, alt).
    Boolean                         stop;                   // True if audio should stop.

    uint64_t                        direction;
    char                            audioType[64];

    struct timespec                 inputTimeSpec;
    uint64_t                        inputHostTime;// 代表 主机（系统）时间，即数据包进入 RTP 发送缓冲区的时间
    uint64_t                        inputSampleTime;//inputSampleTime 代表 音频数据在音频采样时钟中的时间，通常是 RTP 头部的 时间戳（Timestamp）


    uint8_t*                        outputBuffer;
    struct timespec                 outputTimeSpec;
    uint64_t                        outputHostTime;
    uint64_t                        outputSampleTime;

    size_t                          numFrames;
    size_t                          bufferSize;
    float                           timeSeconds;

};

#if( AUDIO_STREAM_DLL )
#define _AudioStreamGetImp( STREAM )        ( (AudioStreamImpRef) AudioStreamGetContext( (STREAM) ) )
#else
#define _AudioStreamGetImp( STREAM )        (STREAM)
#endif

#if( !AUDIO_STREAM_DLL )
static void     _AudioStreamGetTypeID(void* inContext);
static void     _AudioStreamFinalize(CFTypeRef inCF);
#endif

#if( !AUDIO_STREAM_DLL )
static dispatch_once_t          gAudioStreamInitOnce = 0;
static CFTypeID                 gAudioStreamTypeID = _kCFRuntimeNotATypeID;
static const CFRuntimeClass     kAudioStreamClass = {
    0,                      // version
    "AudioStream",          // className
    NULL,                   // init
    NULL,                   // copy
    _AudioStreamFinalize,   // finalize
    NULL,                   // equal -- NULL means pointer equality.
    NULL,                   // hash  -- NULL means pointer hash.
    NULL,                   // copyFormattingDesc
    NULL,                   // copyDebugDesc
    NULL,                   // reclaim
    NULL                    // refcount
};
#endif

//===========================================================================================================================
//  Logging
//===========================================================================================================================

ulog_define(AudioStream, kLogLevelTrace, kLogFlags_Default, "AudioStream", NULL);
#define as_dlog( LEVEL, ... )       dlogc( &log_category_from_name( AudioStream ), (LEVEL), __VA_ARGS__ )
#define as_ulog( LEVEL, ... )       ulog( &log_category_from_name( AudioStream ), (LEVEL), __VA_ARGS__ )

#if( !AUDIO_STREAM_DLL )
//===========================================================================================================================
//  AudioStreamGetTypeID
//===========================================================================================================================

CFTypeID    AudioStreamGetTypeID(void) {
    dispatch_once_f(&gAudioStreamInitOnce, NULL, _AudioStreamGetTypeID);
    return (gAudioStreamTypeID);
}

static void _AudioStreamGetTypeID(void* inContext) {
    (void) inContext;

    gAudioStreamTypeID = _CFRuntimeRegisterClass(&kAudioStreamClass);
    check(gAudioStreamTypeID != _kCFRuntimeNotATypeID);
}

//===========================================================================================================================
//  AudioStreamCreate
//===========================================================================================================================

OSStatus    AudioStreamCreate(AudioStreamRef* outStream) {
    OSStatus            err;
    AudioStreamRef      me;
    size_t              extraLen;
    as_ulog(kLogLevelNotice, "AudioStreamCreate with AudioUtilsStub.c\n");
    extraLen = sizeof(*me) - sizeof(me->base);
    me = (AudioStreamRef) _CFRuntimeCreateInstance(NULL, AudioStreamGetTypeID(), (CFIndex) extraLen, NULL);
    require_action(me, exit, err = kNoMemoryErr);
    memset(((uint8_t*) me) + sizeof(me->base), 0, extraLen);

    // $$$ TODO: Other initialization goes here.
    // This function is only called when AudioUtils is compiled into the AirPlay library.

    *outStream = me;
    me = NULL;
    err = kNoErr;

exit:
    CFReleaseNullSafe(me);
    return (err);
}

//===========================================================================================================================
//  _AudioStreamFinalize
//===========================================================================================================================

static void _AudioStreamFinalize(CFTypeRef inCF) {
	as_ulog(kLogLevelNotice, "AudioStreamFinalize with AudioUtilsStub.c\n");
    AudioStreamRef const        me = (AudioStreamRef) inCF;

    // $$$ TODO: Last chance to free any resources allocated by this object.
    // This function is called when AudioUtils is compiled into the AirPlay library, when the retain count of an AudioStream
    // object goes to zero.
    (void) me;
}
#endif // !AUDIO_STREAM_DLL

#if( AUDIO_STREAM_DLL )
//===========================================================================================================================
//  AudioStreamInitialize
//===========================================================================================================================

OSStatus    AudioStreamInitialize(AudioStreamRef inStream) {
    OSStatus                err;
    AudioStreamImpRef       me;

    require_action(AudioStreamGetContext(inStream) == NULL, exit, err = kAlreadyInitializedErr);

    me = (AudioStreamImpRef) calloc(1, sizeof(*me));
    require_action(me, exit, err = kNoMemoryErr);

    // $$$ TODO: Other initialization goes here.
    // This function is called (instead of AudioStreamCreate()) when AudioUtils is built as a standalone shared object
    // that is loaded dynamically by AirPlay at runtime, so the initialization code should look very similar
    // to that in AudioStreamCreate().

    AudioStreamSetContext(inStream, me);
    err = kNoErr;

exit:
    return (err);
}

//===========================================================================================================================
//  AudioStreamFinalize
//===========================================================================================================================

void    AudioStreamFinalize(AudioStreamRef inStream) {
    AudioStreamImpRef const     me = _AudioStreamGetImp(inStream);

    if (!me) {
        return;
    }

    // $$$ TODO: Last chance to free any resources allocated by this object.
    // This function is called (instead of _AudioStreamFinalize()) when AudioUtils is built as a standalone shared object
    // that is loaded dynamically by AirPlay at runtime, so the finalization code should look very similar to that in
    // _AudioStreamFinalize().
    // It is automatically invoked, when the retain count of an AudioStream object goes to zero.

    free(me);
    AudioStreamSetContext(inStream, NULL);
}
#endif

static AudioStreamRef gAudioInputStreamRef  = NULL;
static AudioStreamRef gAudioOutputStreamRef = NULL;
//===========================================================================================================================
//  AudioStreamSetInputCallback
//===========================================================================================================================

void    AudioStreamSetInputCallback(AudioStreamRef inStream, AudioStreamInputCallback_f inFunc, void* inContext) {
    AudioStreamImpRef const     me = _AudioStreamGetImp(inStream);

    me->inputCallbackPtr = inFunc;
    me->inputCallbackCtx = inContext;
    gAudioInputStreamRef = inStream;
}

//===========================================================================================================================
//  AudioStreamSetOutputCallback
//===========================================================================================================================

void    AudioStreamSetOutputCallback(AudioStreamRef inStream, AudioStreamOutputCallback_f inFunc, void* inContext) {
    AudioStreamImpRef const     me = _AudioStreamGetImp(inStream);

    me->outputCallbackPtr = inFunc;
    me->outputCallbackCtx = inContext;
    gAudioOutputStreamRef = inStream;
}

//===========================================================================================================================
//  _AudioStreamCopyProperty
//===========================================================================================================================

CFTypeRef
_AudioStreamCopyProperty(
    CFTypeRef       inObject,
    CFStringRef     inProperty,
    OSStatus*       outErr) {
    AudioStreamImpRef const     me = _AudioStreamGetImp((AudioStreamRef) inObject);
    OSStatus                    err;
    CFTypeRef                   value = NULL;

    if (0) {}

    // AudioType
    else if (CFEqual(inProperty, kAudioStreamProperty_AudioType)) {
        // $$$ TODO: Return the current audio type.
    }

    // Format

    else if (CFEqual(inProperty, kAudioStreamProperty_Format)) {
        value = CFDataCreate(NULL, (const uint8_t*) &me->format, sizeof(me->format));
        require_action(value, exit, err = kNoMemoryErr);
    }

    // Input

    else if (CFEqual(inProperty, kAudioStreamProperty_Input)) {
        value = me->input ? kCFBooleanTrue : kCFBooleanFalse;
        CFRetain(value);
    }

    // PreferredLatency

    else if (CFEqual(inProperty, kAudioStreamProperty_PreferredLatency)) {
        value = CFNumberCreateInt64(me->preferredLatencyMics);
        require_action(value, exit, err = kNoMemoryErr);
    }

    // StreamType

    else if (CFEqual(inProperty, kAudioStreamProperty_StreamType)) {
        value = CFNumberCreateInt64(me->streamType);
        require_action(value, exit, err = kNoMemoryErr);
    }

    // ThreadName

    else if (CFEqual(inProperty, kAudioStreamProperty_ThreadName)) {
        // $$$ TODO: If your implementation uses a helper thread, return its name here.
    }

    // ThreadPriority

    else if (CFEqual(inProperty, kAudioStreamProperty_ThreadPriority)) {
        // $$$ TODO: If your implementation uses a helper thread, return its priority here.
    }

    // Other

    else {
        err = kNotHandledErr;
        goto exit;
    }
    err = kNoErr;

exit:
    if (outErr) {
        *outErr = err;
    }
    return (value);
}

//===========================================================================================================================
//  _AudioStreamSetProperty
//===========================================================================================================================

OSStatus
_AudioStreamSetProperty(
    CFTypeRef       inObject,
    CFStringRef     inProperty,
    CFTypeRef       inValue) {
    AudioStreamRef const        me = _AudioStreamGetImp((AudioStreamRef) inObject);
    OSStatus                    err;

    // Properties may only be set before AudioStreamPrepare is called.

    require_action(!me->prepared, exit, err = kStateErr);

    if (0) {}

    // AudioType

    else if (CFEqual(inProperty, kAudioStreamProperty_AudioType)) {
        // $$$ TODO: Use the audio type to enable certain types of audio processing.
        // For example, if the audio type is "telephony", echo cancellation should be enabled;
        // if the audio type is "speech recognition", non-linear processing algorithms should be disabled.
        CFGetCString(inValue, me->audioType, sizeof(me->audioType));

    }

    // Format

    else if (CFEqual(inProperty, kAudioStreamProperty_Format)) {
        CFGetData(inValue, &me->format, sizeof(me->format), NULL, &err);
        require_noerr(err, exit);
    }

    // Input

    else if (CFEqual(inProperty, kAudioStreamProperty_Input)) {
        me->input = CFGetBoolean(inValue, NULL);
    }

    // PreferredLatency

    else if (CFEqual(inProperty, kAudioStreamProperty_PreferredLatency)) {
        me->preferredLatencyMics = (uint32_t) CFGetInt64(inValue, &err);
        require_noerr(err, exit);
    }

    // StreamType

    else if (CFEqual(inProperty, kAudioStreamProperty_StreamType)) {
        me->streamType = (uint32_t) CFGetInt64(inValue, &err);
        require_noerr(err, exit);
    }

    else if (CFEqual(inProperty, kAudioStreamProperty_Direction)) {
        me->direction = CFGetInt64(inValue, NULL);
    }

    // ThreadName

    else if (CFEqual(inProperty, kAudioStreamProperty_ThreadName)) {
        // $$$ TODO: If your implementation uses a helper thread, set the name of the thread to the string passed in
        // to this property.  See SetThreadName().

        char  inBuf[ 64 ];
        CFGetCString(inValue, inBuf, sizeof(inBuf));
        SetThreadName(inBuf);
    }

    // ThreadPriority

    else if (CFEqual(inProperty, kAudioStreamProperty_ThreadPriority)) {
        // $$$ TODO: If your implementation uses a helper thread, set the priority of the thread to the string passed in
        // to this property.  See SetCurrentThreadPriority().

        int64_t inProperty = CFGetInt64(inValue, &err);
        require_noerr(err, exit);
    }
    // Other

    else {
        err = kNotHandledErr;
        goto exit;
    }
    err = kNoErr;

exit:
    return (err);
}

//===========================================================================================================================
//  AudioStreamSetFormat
//===========================================================================================================================

void AudioStreamSetFormat(AudioStreamRef inStream, const AudioStreamBasicDescription* inFormat) {
    AudioStreamImpRef const     me = _AudioStreamGetImp(inStream);
    me->format = *inFormat;
}

//===========================================================================================================================
//  AudioStreamSetDelegateContext
//===========================================================================================================================

void AudioStreamSetDelegateContext(AudioStreamRef inStream, void* inContext) {
    AudioStreamImpRef const     me = _AudioStreamGetImp(inStream);
    me->delegateContext = inContext;
}

#if 0
#pragma mark -
#endif

//===========================================================================================================================
//  AudioStreamPrepare
//===========================================================================================================================

OSStatus    AudioStreamPrepare(AudioStreamRef inStream) {
    AudioStreamImpRef const     me = _AudioStreamGetImp(inStream);
    OSStatus                    err;

    me->prepared = true;
    if (_AudioDirectionHasInput(me->direction)) {
        me->input = true;
    }

    err = kNoErr;

    if (err) {
        AudioStreamStop(inStream, false);
    }
    return (err);
}

//===========================================================================================================================
//  AudioStreamStart
//===========================================================================================================================

OSStatus    AudioStreamStart(AudioStreamRef inStream) {
	as_ulog(kLogLevelNotice, "AudioStreamStart with AudioUtilsStub.c\n");
    AudioStreamRef const        me = _AudioStreamGetImp(inStream);
    OSStatus                    err;
    CarplayAudioStreamType      audioType = AudioStreamMedia;
    int                         handle = AudioStreamMedia;
    if (!me->prepared) {
        err = AudioStreamPrepare(inStream);
        as_ulog(kLogLevelNotice, "### AudioStreamPrepare err = %d \n", err);
        require_noerr(err, exit);
    }

    as_ulog(kLogLevelNotice,
            "### AudioStreamPrepare done sSampleRate = %d,mChannelsPerFrame = %d, mBitsPerChannel = %d,audiotype = %s, me->direction = %d, me->streamType = %u, me->input = %d \n",
            me->format.mSampleRate,
            me->format.mChannelsPerFrame,
            me->format.mBitsPerChannel,
            me->audioType,
            me->direction,
            me->streamType,
            me->input);

    // Start threads to process audio.
    me->stop = false;
    AirPlayAudioStreamPlatformContext * platform = (AirPlayAudioStreamPlatformContext * ) me-> inputCallbackCtx;
    if (me->streamType == kAudioStreamType_MainAudio || me->streamType == kAudioStreamType_MainHighAudio) {
        if (strcmp(me->audioType, "speechRecognition") == 0) {
            audioType = AudioStreamRecognition;
            handle = AudioStreamRecognition;
        }
        else if (strcmp(me->audioType, "telephony") == 0) {
            audioType = AudioStreamTelephony;
            handle = AudioStreamTelephony;
        }
        else if (strcmp(me->audioType, "alert") == 0) {
            audioType = AudioStreamAlert;
            handle = AudioStreamAlert;
        }
        else {
            // media
            audioType = AudioStreamMedia;
            handle = AudioStreamMedia;
        }
        if( platform !=NULL && platform->session !=NULL){
            // input
        	platform->session->delegate.audioStreamStart_f(me->audioType, handle, audioType, me->format.mSampleRate, me->format.mBitsPerChannel,
                                   me->format.mChannelsPerFrame, inStream);
        }
    }
    else if (me->streamType == kAudioStreamType_AltAudio) {
        as_ulog(kLogLevelNotice, "### me->streamType == kAudioStreamType_AltAudio ");
        audioType = AudioStreamAlert;
        handle = AudioStreamAlert;
        if( platform !=NULL && platform->session !=NULL){
            // input
        	platform->session->delegate.audioStreamStart_f(me->audioType, handle, audioType, me->format.mSampleRate, me->format.mBitsPerChannel,
                                   me->format.mChannelsPerFrame, inStream);
        }
    }
    else if (me->streamType == kAudioStreamType_AuxOutAudio) {
        as_ulog(kLogLevelNotice, "### me->streamType == kAudioStreamType_AuxOutAudio ");
//        handleAudioStreamStart(me->audioType, 5, AudioStreamCall, me->format.mSampleRate, me->format.mBitsPerChannel,
//                               me->format.mChannelsPerFrame, inStream);
    }
    else {

        as_ulog(kLogLevelNotice, "#### AudioStreamStart audioType = %s, direction = %d  \n", me->audioType, me->direction);
    }
    err = kNoErr;

exit:
    if (err) {
        AudioStreamStop(inStream, false);
    }
    return (err);
}


//从车机获取音频数据
void AudioStreamOutputProcessData(int handle, void* inbuffer, int len, int frames, uint64_t timestamp, void* inStream) {

//	AudioStreamRef const        me = _AudioStreamGetImp(inStream);
    AudioStreamRef const        me = _AudioStreamGetImp(gAudioOutputStreamRef);
    OSStatus                    err;

    if ( me == NULL)
    {
    	return;
    }
    if (!me->prepared) {
        err = AudioStreamPrepare(me);
    }

    if (me->stop) {
        return;
    }

    clock_gettime(CLOCK_MONOTONIC, &(me->outputTimeSpec));
    me->outputHostTime = me->outputTimeSpec.tv_nsec + me->outputTimeSpec.tv_sec * 100000;
    me->outputSampleTime += len / 4;// 2, 3.5, 4, 4.5, 5
    if( me->outputCallbackPtr != NULL ){
    	me->outputCallbackPtr(me->outputSampleTime, me->outputHostTime, inbuffer, len, me->outputCallbackCtx);
    }
}


//发送音频数据给车机
void AudioStreamInputProcessData(int handle, void* buffer, int len, int numFrames, uint64_t timestamp, void* inStream) {
//    AudioStreamRef const        me = _AudioStreamGetImp(inStream);
    AudioStreamImpRef const       me = _AudioStreamGetImp(gAudioInputStreamRef);
    OSStatus                    err;

    if( me == NULL)
    {
    	return;
    }
    if (me->stop || !me->prepared) {
        as_ulog(kLogLevelNotice, "### AudioStreamInputProc inStream stop \n");
        return;
    }

    me->inputHostTime = UpMilliseconds();
    if (buffer == NULL || len == 0) {
    	if(me->inputCallbackPtr != NULL ){
//    		me->inputCallbackPtr(timestamp, me->inputHostTime, buffer, 0, me->inputCallbackCtx);
    		clock_gettime(CLOCK_MONOTONIC, &(me->inputTimeSpec));
    		me->inputHostTime = me->inputTimeSpec.tv_nsec + me->inputTimeSpec.tv_sec * 1000000000;
    		me->inputSampleTime += numFrames;
    		me->inputCallbackPtr(me->inputSampleTime, me->inputHostTime, buffer, numFrames * me->format.mBytesPerFrame, me->inputCallbackCtx);
    	}
    }
    else {
    	if(me->inputCallbackPtr != NULL ){
//    		me->inputCallbackPtr(timestamp, me->inputHostTime, buffer, len, me->inputCallbackCtx);
    		clock_gettime(CLOCK_MONOTONIC, &(me->inputTimeSpec));
    		me->inputHostTime = me->inputTimeSpec.tv_nsec + me->inputTimeSpec.tv_sec * 1000000000;
    		me->inputSampleTime += numFrames;
    		me->inputCallbackPtr(me->inputSampleTime, me->inputHostTime, buffer, numFrames * me->format.mBytesPerFrame, me->inputCallbackCtx);
    	}
    }

}


//===========================================================================================================================
//  AudioStreamStop
//===========================================================================================================================

void AudioStreamStop(AudioStreamRef inStream, Boolean inDrain) {
	as_ulog(kLogLevelNotice, "AudioStreamStop with AudioUtilsStub.c\n");
    AudioStreamRef const        me = _AudioStreamGetImp(inStream);
    int                         handle = AudioStreamMedia;
    CarplayAudioStreamType      audioType = AudioStreamMedia;
    // $$$ TODO: This is where the audio processing chain should be stopped, and the audio processing chain torn down.
    // When AudioStreamStop() returns, the object should return to the state similar to before AudioStreamPrepare()
    // was called, so this function is responsible for undoing any resource allocation performed in AudioStreamPrepare().
    (void) inDrain;
    AirPlayAudioStreamPlatformContext * platform = (AirPlayAudioStreamPlatformContext * ) me-> inputCallbackCtx;
    if (me->streamType == kAudioStreamType_MainAudio || me->streamType == kAudioStreamType_MainHighAudio) {
        if (strcmp(me->audioType, "speechRecognition") == 0) {
            audioType = AudioStreamRecognition;
            handle = AudioStreamRecognition;
        }
        else if (strcmp(me->audioType, "telephony") == 0) {
            audioType = AudioStreamTelephony;
            handle = AudioStreamTelephony;
        }
        else if (strcmp(me->audioType, "alert") == 0) {
            audioType = AudioStreamAlert;
            handle = AudioStreamAlert;
        }
        else {
            // media
            audioType = AudioStreamMedia;
            handle = AudioStreamMedia;
        }
        if( platform !=NULL && platform->session !=NULL ){
            // input
        	platform->session->delegate.audioStreamStop_f( handle, audioType);
        }
    }
    else if (me->streamType == kAudioStreamType_AltAudio) {
        audioType = AudioStreamAlert;
        handle = AudioStreamAlert;
        if( platform !=NULL && platform->session !=NULL ){
            // input
        	platform->session->delegate.audioStreamStop_f( handle, audioType);
        }
    }
    else if (me->streamType == kAudioStreamType_AuxOutAudio) {
//        handleAudioStreamStop(1, AudioStreamCall);
    }


    me->prepared = false;
}

//===========================================================================================================================
//  AudioStreamSetVocoderSampleRate
//===========================================================================================================================

void    AudioStreamSetVocoderSampleRate(AudioStreamRef inStream, uint32_t inVocoderSampleRate) {
    AudioStreamRef const        me = _AudioStreamGetImp(inStream);

    // $$$ TODO: This is where the vocoder sample rate has been updated.
    (void) me;
    (void) inVocoderSampleRate;
}

//===========================================================================================================================
//  AudioStreamUpdateState
//===========================================================================================================================

void    AudioStreamUpdateState(AudioStreamRef inStream, AirPlayStreamState inState) {
    (void) inStream;
    // $$$ TODO: For Aux In Audio to determine if audio is being locally buffered or streamed.
//  as_ulog( kLogLevelNotice, "### AudioUtilsStud.c AudioStreamUpdateState=%u\n", inState );
}

#if 0
#pragma mark -
#endif

