//
//  AudioController.m
//  TengineTwo
//
//  Created by StandardUser on 11/03/2012.
//  Copyright (c) 2012 __MyCompanyName__. All rights reserved.
//
//  todo: this needs a lot of cleanup
//
//  help from:
//  http://blog.andre-michelle.com/2009/pitch-mp3/
//  http://timbolstad.com/2010/03/14/core-audio-getting-started/
//  http://stackoverflow.com/questions/8533143/decoding-mp3-files-by-extaudiofileopenurl
//
//  I also ordered:
//  http://my.safaribooksonline.com/book/audio/9780321636973
//


#import "AudioController.h"

// Audio Stream Descriptions

// Native iphone sample rate of 44.1kHz, same as a CD.
const Float64 kGraphSampleRate = 44100.0;
float rate = 1.0;
ExtAudioFileRef extAFRef;

@implementation AudioController

- (id)init {
    self = [super init];
    if (self) {
        
        rate = 1.0;
        
        NSString *filePath  = [[NSBundle mainBundle] pathForResource:@"test" ofType:@"mp3"];  
        NSURL *audioURL     = [NSURL fileURLWithPath:filePath]; 
        
        UInt32 propSize;        
        
        OSStatus err = ExtAudioFileOpenURL((CFURLRef)audioURL, &extAFRef);
        //if (err) 
        //    return err;
        
        AudioStreamBasicDescription fileFormat;
        propSize = sizeof(fileFormat);
        memset(&fileFormat, 0, sizeof(AudioStreamBasicDescription));
        
        err = ExtAudioFileGetProperty(extAFRef, kExtAudioFileProperty_FileDataFormat, &propSize, &fileFormat);
        //if (err) {
        //    return err;
        //}
        
        //extAFRateRatio = sampleRate / fileFormat.mSampleRate;
        AudioStreamBasicDescription clientFormat;
        propSize = sizeof(clientFormat);
        memset(&clientFormat, 0, sizeof(AudioStreamBasicDescription));
        clientFormat.mFormatID           = kAudioFormatLinearPCM;
        clientFormat.mSampleRate         = fileFormat.mSampleRate;
        clientFormat.mFormatFlags        = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
        clientFormat.mChannelsPerFrame   = 1;
        clientFormat.mBitsPerChannel     = sizeof(AudioSampleType) * 8;
        clientFormat.mFramesPerPacket    = 1;
        clientFormat.mBytesPerFrame      = (clientFormat.mBitsPerChannel / 8) * clientFormat.mChannelsPerFrame ;
        clientFormat.mBytesPerPacket     = clientFormat.mFramesPerPacket * clientFormat.mBytesPerFrame;
        clientFormat.mReserved           = 0;
        
        err = ExtAudioFileSetProperty(extAFRef, kExtAudioFileProperty_ClientDataFormat, propSize, &clientFormat);
        
    }
    return self;
}

//it seems that the callback is a static function rather than a class member as a matter of efficiency
//so to modify rate which is a global in the filespace we need to wrap it with accessors
-(float)playbackRate
{
    return rate;
}
-(void)setPlaybackRate:(float)value
{
    if(value < 0.01) value = 0.01;
    if(value > 2.0) value = 2.0;
    rate = value;
}

// starts render
- (void)startAUGraph
{
	// Start the AUGraph
	OSStatus result = AUGraphStart(mGraph);
	// Print the result
	// print is c++ functionality from 
    //if (result) { printf("AUGraphStart result %d %08X %4.4s\n", (int)result, (int)result, (char*)&result); return; }
}

// stops render
- (void)stopAUGraph
{
    Boolean isRunning = false;
    
    // Check to see if the graph is running.
    OSStatus result = AUGraphIsRunning(mGraph, &isRunning);
    // If the graph is running, stop it.
    if (isRunning) {
        result = AUGraphStop(mGraph);
    }
}

//pragma mark callback function to fill buffer

// audio render procedure, don't allocate memory, don't take any locks, don't waste time
static OSStatus renderInput(void *inRefCon, AudioUnitRenderActionFlags *ioActionFlags, const AudioTimeStamp *inTimeStamp, UInt32 inBusNumber, UInt32 inNumberFrames, AudioBufferList *ioData)
{
	// Get a reference to the object that was passed with the callback
	// In this case, the AudioController passed itself so
	// that you can access its data.
	//AudioController *THIS = (AudioController*)inRefCon;
    
	// Get a pointer to the dataBuffer of the AudioBufferList
	AudioSampleType *outA = (AudioSampleType *)ioData->mBuffers[0].mData;
    
    float scaledFrames = inNumberFrames * rate; //we need to read this many frames but write inNumberFrames into the buffer
    UInt32 dataSize = (int)ceilf(scaledFrames * sizeof(SInt16));//word boundary
    SInt16 *data = (SInt16*) malloc(dataSize);
    
    //temporary audio buffer - wasteful ?
    AudioBufferList bufList;
    bufList.mNumberBuffers = 1;
    bufList.mBuffers[0].mNumberChannels = 1;
    bufList.mBuffers[0].mData = data;
    bufList.mBuffers[0].mDataByteSize = dataSize;

    //AudioFileReadBytes(audioFile, false, position, &dataSize, data);
    OSStatus err = noErr;
    err = ExtAudioFileRead(extAFRef, &dataSize, &bufList);
    if(err) NSLog(@"error");
    
    //SInt16 *dataAsCanonical = (SInt16*)data;
    
    //if we are pitching up we need to skip some frames
    //if we are pitchg down we need to read some frames twice
    //therefore, in order to smooth the output signal we interpolate the frames
    
    //lerp values
    float frameFloat = 0.f;
    
    for (UInt32 i=0; i < inNumberFrames; i++) //account for lerp 
    {
        frameFloat = i * rate;
        
        int previousFrame = (int)floorf(frameFloat);
        int nextFrame = (int)ceilf(frameFloat);
        float lerp  = frameFloat - previousFrame;
        
        SInt16 interpolated = data[previousFrame] + (data[nextFrame] - data[previousFrame]) * lerp; 
        outA[i] = interpolated;
    }
    
    free(data);
    
    return noErr;
}

//pragma mark graph of units is defined here

- (void)initializeAUGraph
{
	//************************************************************
	//*** Setup the AUGraph, add AUNodes, and make connections ***
	//************************************************************
	// Error checking result
	OSStatus result = noErr;
    
	// create a new AUGraph
	result = NewAUGraph(&mGraph);
    
    // AUNodes represent AudioUnits on the AUGraph and provide an
	// easy means for connecting audioUnits together.
    AUNode outputNode;
	AUNode mixerNode;
    
    // Create AudioComponentDescriptions for the AUs we want in the graph
    // mixer component
	AudioComponentDescription mixer_desc;
	mixer_desc.componentType = kAudioUnitType_Mixer;
	mixer_desc.componentSubType = kAudioUnitSubType_MultiChannelMixer;
	mixer_desc.componentFlags = 0;
	mixer_desc.componentFlagsMask = 0;
	mixer_desc.componentManufacturer = kAudioUnitManufacturer_Apple;
    
	//  output component
	AudioComponentDescription output_desc;
	output_desc.componentType = kAudioUnitType_Output;
	output_desc.componentSubType = kAudioUnitSubType_RemoteIO;
	output_desc.componentFlags = 0;
	output_desc.componentFlagsMask = 0;
	output_desc.componentManufacturer = kAudioUnitManufacturer_Apple;
    
    // Add nodes to the graph to hold our AudioUnits,
	// You pass in a reference to the  AudioComponentDescription
	// and get back an  AudioUnit
	result = AUGraphAddNode(mGraph, &output_desc, &outputNode);
	result = AUGraphAddNode(mGraph, &mixer_desc, &mixerNode );
    
	// Now we can manage connections using nodes in the graph.
    // Connect the mixer node's output to the output node's input
	result = AUGraphConnectNodeInput(mGraph, mixerNode, 0, outputNode, 0);
    
    // open the graph AudioUnits are open but not initialized (no resource allocation occurs here)
	result = AUGraphOpen(mGraph);
    
	// Get a link to the mixer AU so we can talk to it later
	result = AUGraphNodeInfo(mGraph, mixerNode, NULL, &mMixer);
    
	//************************************************************
	//*** Make connections to the mixer unit's inputs ***
	//************************************************************
    // Set the number of input busses on the Mixer Unit
	// Right now we are only doing a single bus.
	UInt32 numbuses = 1;
	UInt32 size = sizeof(numbuses);
    result = AudioUnitSetProperty(mMixer, kAudioUnitProperty_ElementCount, kAudioUnitScope_Input, 0, &numbuses, size);
    
	//CAStreamBasicDescription desc;
    AudioStreamBasicDescription desc;
    
	// Loop through and setup a callback for each source you want to send to the mixer.
	// Right now we are only doing a single bus so we could do without the loop.
	for (int i = 0; i < numbuses; ++i) {
        
		// Setup render callback struct
		// This struct describes the function that will be called
		// to provide a buffer of audio samples for the mixer unit.
		AURenderCallbackStruct renderCallbackStruct;
		renderCallbackStruct.inputProc = &renderInput;
		renderCallbackStruct.inputProcRefCon = self;
        
        // Set a callback for the specified node's specified input
        result = AUGraphSetNodeInputCallback(mGraph, mixerNode, i, &renderCallbackStruct);
        
		// Get a CAStreamBasicDescription from the mixer bus.
        size = sizeof(desc);
		result = AudioUnitGetProperty(  mMixer,
                                      kAudioUnitProperty_StreamFormat,
                                      kAudioUnitScope_Input,
                                      i,
                                      &desc,
                                      &size);
		// Initializes the structure to 0 to ensure there are no spurious values.
		memset (&desc, 0, sizeof (desc));        						
        
		// Make modifications to the CAStreamBasicDescription
		// We're going to use 16 bit Signed Ints because they're easier to deal with
		// The Mixer unit will accept either 16 bit signed integers or
		// 32 bit 8.24 fixed point integers.
		desc.mSampleRate        = kGraphSampleRate; // set sample rate
		desc.mFormatID          = kAudioFormatLinearPCM;
		desc.mFormatFlags       = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
		desc.mBitsPerChannel    = sizeof(AudioSampleType) * 8; // AudioSampleType == 16 bit signed ints
		desc.mChannelsPerFrame  = 1;
		desc.mFramesPerPacket   = 1;
		desc.mBytesPerFrame     = ( desc.mBitsPerChannel / 8 ) * desc.mChannelsPerFrame;
		desc.mBytesPerPacket    = desc.mBytesPerFrame * desc.mFramesPerPacket;
        
		//printf("Mixer file format: "); desc.Print();
		// Apply the modified CAStreamBasicDescription to the mixer input bus
		result = AudioUnitSetProperty(  mMixer,
                                      kAudioUnitProperty_StreamFormat,
                                      kAudioUnitScope_Input,
                                      i,
                                      &desc,
                                      sizeof(desc));
	}
	// Apply the CAStreamBasicDescription to the mixer output bus
	result = AudioUnitSetProperty(	 mMixer,
                                  kAudioUnitProperty_StreamFormat,
                                  kAudioUnitScope_Output,
                                  0,
                                  &desc,
                                  sizeof(desc));
    
	//************************************************************
	//*** Setup the audio output stream ***
	//************************************************************
    
	// Get a CAStreamBasicDescription from the output Audio Unit
    result = AudioUnitGetProperty(  mMixer,
                                  kAudioUnitProperty_StreamFormat,
                                  kAudioUnitScope_Output,
                                  0,
                                  &desc,
                                  &size);
    
	// Initializes the structure to 0 to ensure there are no spurious values.
	memset (&desc, 0, sizeof (desc));
    
	// Make modifications to the CAStreamBasicDescription
	// AUCanonical on the iPhone is the 8.24 integer format that is native to the iPhone.
	// The Mixer unit does the format shifting for you.
	//desc.SetAUCanonical(1, true);
    //straight c implementation
    
    UInt32 mFormatID = kAudioFormatLinearPCM;
    UInt32 mFormatFlags = kAudioFormatFlagsCanonical;
    UInt32 mChannelsPerFrame = 1;
    UInt32 mFramesPerPacket = 1;
    UInt32 mBitsPerChannel = 8 * (UInt32)sizeof(AudioUnitSampleType);
    UInt32 mBytesPerFrame = (mBitsPerChannel / 8) * mChannelsPerFrame ;
    UInt32 mBytesPerPacket = mFramesPerPacket * mBytesPerFrame;
    
    desc.mFormatID = mFormatID;
    desc.mFormatFlags = mFormatFlags;
    desc.mChannelsPerFrame = mChannelsPerFrame;
    desc.mFramesPerPacket = mFramesPerPacket;
    desc.mBitsPerChannel = mBitsPerChannel;
    desc.mBytesPerFrame = mBytesPerFrame;
    desc.mBytesPerPacket = mBytesPerPacket;
    
	desc.mSampleRate = kGraphSampleRate;
    
    // Apply the modified CAStreamBasicDescription to the output Audio Unit
	result = AudioUnitSetProperty(  mMixer,
                                  kAudioUnitProperty_StreamFormat,
                                  kAudioUnitScope_Output,
                                  0,
                                  &desc,
                                  sizeof(desc));
    
    // Once everything is set up call initialize to validate connections
	result = AUGraphInitialize(mGraph);
}

// Clean up memory
- (void)dealloc {
    
    DisposeAUGraph(mGraph);
    [super dealloc];
}

@end