//
//  AudioEngine.m
//  MantuPlay
//
//  Created by Markus Sintonen on 7.10.2009.
//  Copyright 2009 __MyCompanyName__. All rights reserved.
//

#import <AudioToolbox/AudioServices.h>
#include <unistd.h>
#import "AudioEngine.h"
#import "Track.h"
#import "DebuggingMacros.h"
#import "AudioEngineHelpers.h"

@implementation AudioEngine

@synthesize audioFormat, audioFile, audioQueue, audioBuffer, captureBuffer, currentPacket, numPacketsToRead, audioPacketDescriptions, audioQueueFlushed, audioQueueDone, reqFrames, ts;

SINGLETON_IMPLEMENTATION(AudioEngine, sharedAudioEngine)

-(void)audioOutputUnit:(id)audioOutputUnit fillFrameBuffer:(SInt32*)frameBuffer withNumberOfFrames:(UInt32)numberOfFrames {
	UInt32 requestFrames = numberOfFrames;
	unsigned int gotFrames;
	while(1) {
		if(frameBufferQueue->NextFramesFromBuffer(frameBuffer, requestFrames, &gotFrames)) break;
		else usleep(1000);
	}
	if(gotFrames != requestFrames) {
		requestFrames = requestFrames - gotFrames;
		while(!frameBufferQueue->PopFrameBuffer()) { usleep(1000); }
		while(1) {
			if(frameBufferQueue->NextFramesFromBuffer(frameBuffer + gotFrames, requestFrames, &gotFrames)) break;
			else usleep(1000);
		}
	}
	//equalizer->process(frameBuffer, numberOfFrames, 2);
}

static void AudioQueueBufferCallback(void *inUserData, AudioQueueRef inAQ, AudioQueueBufferRef inCompleteAQBuffer) {
	AudioEngine *audioEngine = (AudioEngine*)inUserData;
	if (audioEngine.audioQueueDone) return;
	
	UInt32 numBytes;
	UInt32 nPackets = audioEngine.numPacketsToRead;
	OSStatus status = AudioFileReadPackets(audioEngine.audioFile,      // The audio file from which packets of audio data are to be read.
                                           FALSE,                   // Set to true to cache the data. Otherwise, set to false.
                                           &numBytes,               // On output, a pointer to the number of bytes actually returned.
                                           audioEngine.audioPacketDescriptions,    // A pointer to an array of packet descriptions that have been allocated.
                                           audioEngine.currentPacket,  // The packet index of the first packet you want to be returned.
                                           &nPackets,               // On input, a pointer to the number of packets to read. On output, the number of packets actually read.
                                           inCompleteAQBuffer->mAudioData); // A pointer to user-allocated memory.
	ERRCHECK(status);
    
    // we have some data
	if (nPackets > 0) {
		inCompleteAQBuffer->mAudioDataByteSize = numBytes;
        
		status = AudioQueueEnqueueBuffer(inAQ,                                  // The audio queue that owns the audio queue buffer.
                                         inCompleteAQBuffer,                    // The audio queue buffer to add to the buffer queue.
                                         (audioEngine.audioPacketDescriptions ? nPackets : 0), // The number of packets of audio data in the inBuffer parameter. See Docs.
                                         audioEngine.audioPacketDescriptions);                 // An array of packet descriptions. Or NULL. See Docs.
		ERRCHECK(status);
        
		audioEngine.currentPacket += nPackets;
	} else {
        // **** This ensures that we flush the queue when done -- ensures you get all the data out ****
        if (!audioEngine.audioQueueFlushed) {
			status = AudioQueueFlush(audioEngine.audioQueue);
            ERRCHECK(status);
            
			audioEngine.audioQueueFlushed = YES;
		}
		
		status = AudioQueueStop(audioEngine.audioQueue, NO);
		ERRCHECK(status);
        
		// reading nPackets == 0 is our EOF condition
		audioEngine.audioQueueDone = YES;
	}
}

-(void)decodeFrames {
	while(YES) {
		usleep(1000);
		if(!frameBufferQueue->ReadyToPushFrameBuffer()) continue;

		OSStatus result = AudioQueueOfflineRender(audioQueue, &ts, captureBuffer, reqFrames);
		ERRCHECK(result);
		unsigned int frameCount = captureBuffer->mAudioDataByteSize / [audioOutputUnit audioFormat].mBytesPerFrame;
		SInt32 *frameBuffer = (SInt32*)captureBuffer->mAudioData;
		
		//equalizer->process(frameBuffer, frameCount, 2);
		
		if(frameBufferQueue->PushFrameBuffer(frameBuffer, frameCount)) {
			ts.mSampleTime += frameCount;
		}
	}
}

-(id)init {
	if ((self = [super init]) != nil) {
		[self setupAudioEngine];
	}
	return self;
}

-(void)setupAudioEngine {	
	AudioSessionInitialize(NULL, NULL, NULL, NULL);
	
	UInt32 sessionCategory = kAudioSessionCategory_MediaPlayback;
	AudioSessionSetProperty(kAudioSessionProperty_AudioCategory, sizeof(sessionCategory), &sessionCategory);
	
	float aBufferLength = 0.05; // In seconds
	AudioSessionSetProperty(kAudioSessionProperty_PreferredHardwareIOBufferDuration, sizeof(aBufferLength), &aBufferLength);
	AudioSessionSetActive(YES);
	
	audioOutputUnit = [[AudioOutputUnit alloc] initWithDelegate:self];
}

-(void)setupTrackPlayerWithTrackPath:(NSString*)trackPath {
	//self.trackQueue = queue;
	
	updateAudioEngine = YES;
	
	audioQueueFlushed = NO;
	audioQueueDone = NO;
	
	currentTrackPlaying = 0;
	
	OSStatus status;
	
	status = AudioFileOpenURL((CFURLRef)[NSURL fileURLWithPath:trackPath isDirectory:NO], 0x01, 0, &audioFile);
	ERRCHECK(status);
	
	AudioStreamBasicDescription audioFileDataFormat;
	UInt32 size = sizeof(audioFileDataFormat);
	status = AudioFileGetProperty(audioFile, kAudioFilePropertyDataFormat, &size, &audioFileDataFormat);
	ERRCHECK(status);
	
	status = AudioQueueNewOutput(&audioFileDataFormat, AudioQueueBufferCallback, self, NULL, NULL, 0, &audioQueue);
	
	// first check to see what the max size of a packet is - if it is bigger
	// than our allocation default size, that needs to become larger
	UInt32 maxPacketSize;
	size = sizeof(maxPacketSize);
	status = AudioFileGetProperty(audioFile, kAudioFilePropertyPacketSizeUpperBound, &size, &maxPacketSize);
	ERRCHECK(status);
	
	// adjust buffer size to represent about a second of audio based on this format
	UInt32 bufferByteSize;
	CalculateBytesForTime(audioFileDataFormat, maxPacketSize, 1.0, &bufferByteSize, &numPacketsToRead);
	NSLog(@"Buffer Byte Size: %d, Num Packets to Read: %d", (int)bufferByteSize, (int)numPacketsToRead);
	
	BOOL isFormatVBR = (audioFileDataFormat.mBytesPerPacket == 0 || audioFileDataFormat.mFramesPerPacket == 0);
	if (isFormatVBR) {
		audioPacketDescriptions = (AudioStreamPacketDescription*)malloc(numPacketsToRead*sizeof(AudioStreamPacketDescription));
	} else {
		audioPacketDescriptions = NULL; // we don't provide packet descriptions for constant bit rate formats (like linear PCM)
	}
	
	// if the file has a magic cookie, we should get it and set it on the AQ
	size = sizeof(UInt32);
	OSStatus result = AudioFileGetPropertyInfo (audioFile, kAudioFilePropertyMagicCookieData, &size, NULL);
	if (!result && size) {
		char* cookie = (char*)malloc(size*sizeof(char));		
		ERRCHECK(AudioFileGetProperty (audioFile, kAudioFilePropertyMagicCookieData, &size, cookie));
		ERRCHECK(AudioQueueSetProperty(audioQueue, kAudioQueueProperty_MagicCookie, cookie, size));
		free(cookie);
	}
	
	AudioChannelLayout acl;
    acl.mChannelLayoutTag = kAudioChannelLayoutTag_Stereo;
    acl.mChannelBitmap = 0;
    acl.mNumberChannelDescriptions = 0;
	
	result = AudioQueueAllocateBuffer(audioQueue, bufferByteSize, &audioBuffer);
	ERRCHECK(result);
	
	AudioStreamBasicDescription audioFormat = audioOutputUnit.audioFormat;
	result = AudioQueueSetOfflineRenderFormat(audioQueue, &audioFormat, &acl);
	ERRCHECK(result);
	
	// allocate the capture buffer, just keep it at half the size of the enqueue buffer
	// we don't ever want to pull any faster than we can push data in for render
	// this 2:1 ratio keeps the AQ Offline Render happy
	const UInt32 captureBufferByteSize = bufferByteSize / 2;
	reqFrames = captureBufferByteSize / [audioOutputUnit audioFormat].mBytesPerFrame;
	
	EqualizerSettings *eqSettings = new EqualizerSettings();
	equalizer = new Equalizer(eqSettings, [audioOutputUnit audioFormat].mSampleRate);
	
	NSLog(@"Max amount of frames to request: %d capture buffer size: %d", (int)reqFrames, (int)captureBufferByteSize);
	
	result = AudioQueueAllocateBuffer(audioQueue, captureBufferByteSize, &captureBuffer);
	ERRCHECK(result);
	
	result = AudioQueueStart(audioQueue, NULL);
	ERRCHECK(result);
	
	ts.mFlags = kAudioTimeStampSampleTimeValid;
	ts.mSampleTime = 0;
	
	result = AudioQueueOfflineRender(audioQueue, &ts, captureBuffer, 0);
	ERRCHECK(result);

	// we need to enqueue a buffer after the queue has started
	AudioQueueBufferCallback(self, audioQueue, audioBuffer);
		
	frameBufferQueue = new AudioFrameBufferQueue(10, reqFrames);

	while(1) {
		OSStatus result = AudioQueueOfflineRender(audioQueue, &ts, captureBuffer, reqFrames);
		ERRCHECK(result);
		unsigned int frameCount = captureBuffer->mAudioDataByteSize / audioFormat.mBytesPerFrame;
		SInt32 *frameBuffer = (SInt32*)captureBuffer->mAudioData;
		
		//equalizer->process(frameBuffer, frameCount, 2);
		
		if(frameBufferQueue->PushFrameBuffer(frameBuffer, frameCount))
			ts.mSampleTime += frameCount;
		else
			break;
	}
	
	[NSThread detachNewThreadSelector:@selector(decodeFrames) toTarget:self withObject:nil];
	NSLog(@"AudioQueue inited");
	
	[audioOutputUnit start];
}

-(void)play {
	
}
-(void)pause {
	
}

-(NSTimeInterval)currentTime {
	NSTimeInterval time = 0.;
	@synchronized(self)
	{
		if (impl->_wasStarted) {
			double queueTime = getQueueTime(impl);
			time = (queueTime + impl->_mediaStartSampleTime) / impl->_asbd.mSampleRate;
		} else {
			time =  impl->_mediaSampleTime / impl->_asbd.mSampleRate;
		}
	}
	return time;
}

-(void)setDelegate:(id<AudioEngineDelegate>)audioEngineDelegate {
	delegate = audioEngineDelegate;
}

-(void)unloadAudioEngine {
	[audioOutputUnit stop];
	[audioOutputUnit release];
	
	OSStatus status = AudioQueueStop(audioQueue, YES);
	ERRCHECK(status);
	status = AudioQueueFreeBuffer(audioQueue, captureBuffer);
	ERRCHECK(status);
	status = AudioQueueFreeBuffer(audioQueue, audioBuffer);
	ERRCHECK(status);
	status = AudioQueueDispose(audioQueue, YES);
	ERRCHECK(status);
	
	free(audioPacketDescriptions);
	
	status = AudioFileClose(audioFile);
	ERRCHECK(status);
	
	delete equalizer;
	delete frameBufferQueue;
	
	AudioSessionSetActive(NO);
}

- (void)dealloc {
	[self unloadAudioEngine];
	//[trackQueue release];
    [super dealloc];
}
@end
