//////////////////////////////////////////////////////////////////////////////
//
//  Created by Philip Mulcahy on 20/01/2012.
//  Copyright (c) 2012 Philip Mulcahy. All rights reserved.
//
//  This file is part of the note-recog library.
//
//  note-recog is free software: you can redistribute it and/or modify
//  it under the terms of version 3 of the GNU Lesser General Public License 
//  as published by the Free Software Foundation.
//
//  note-recog is distributed is distributed in the hope that it will be useful,
//  but WITHOUT ANY WARRANTY; without even the implied warranty of
//  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
//  GNU General Public License for more details.
//
//  You should have received a copy of the GNU General Public License
//  along with note-recog.  If not, see <http://www.gnu.org/licenses/>.
//
//////////////////////////////////////////////////////////////////////////////

#include "Listener.h"
#include <iostream>
#include <AVFoundation/AVFoundation.h>
#include <stdlib.h>
#include "NoteEvent.h"
#include "Slice.h"
#include "Condenser.h"
#include <fstream>
#include "TestData.h"
#include "Util.h"
#include "Spectrum.h"

using namespace std;

LOG_DEFINE(Listener);

OSStatus callbackWrapper(void					        *inRefCon, 
                         AudioUnitRenderActionFlags 	*ioActionFlags, 
                         const AudioTimeStamp			*inTimeStamp, 
                         UInt32 						inBusNumber, 
                         UInt32 						inNumberFrames, 
                         AudioBufferList				*ioData)
{
    Listener* p = static_cast<Listener*>(inRefCon);
    return p->callback(ioActionFlags,
                       inTimeStamp, 
                       inBusNumber, 
                       inNumberFrames,
                       ioData);
}

float setupAudioSessionAndReturnActualSampleRate( float desiredSampleRate );

AudioBufferList * createAudioBufferList(size_t bytesPerSample); 

Listener Listener::_instance;

Listener& Listener::instance()
{
    return _instance;
}

static AudioStreamBasicDescription createUpStreamFormat(
                                        const size_t bytesPerSample,
                                        const float sampleRate
                                        ) {
    AudioStreamBasicDescription streamFormat;
    streamFormat.mFormatID = kAudioFormatLinearPCM;
	streamFormat.mFormatFlags =
        kAudioFormatFlagIsSignedInteger | 
        kAudioFormatFlagIsPacked;
	streamFormat.mBitsPerChannel = static_cast<UInt32>(8 * bytesPerSample);
	streamFormat.mFramesPerPacket = static_cast<UInt32>(1);
	streamFormat.mChannelsPerFrame = 1;	
	streamFormat.mBytesPerPacket = static_cast<UInt32>(bytesPerSample * streamFormat.mFramesPerPacket);
	streamFormat.mBytesPerFrame = static_cast<UInt32>(bytesPerSample * streamFormat.mChannelsPerFrame);
	streamFormat.mSampleRate = sampleRate;
    return streamFormat;
}

Listener::Listener() :
  _bytesPerSample(sizeof(SInt16)),
  _bytesPerOutputSample(sizeof(float)),
  _sampleRate(setupAudioSessionAndReturnActualSampleRate(22050.f)),
  _bufferCapacity(1024),
  _freqResolution(_sampleRate/_bufferCapacity),
  _nOver2(_bufferCapacity/2),
  _shortBuffer( _bufferCapacity ),
  _floatBuffer( _bufferCapacity ),
  _bufferList( createAudioBufferList( _bytesPerSample ) ),
  _streamFormat(createUpStreamFormat( _bytesPerSample, _sampleRate ) ),
  _frameCount(0),
  _condenser(0),
  _fft( new FastFourierTransform(
                                 _shortBuffer,
                                 _sampleRate) )
{
    LOG_INFO("Listener() starting");
    
    // check _bufferCapacity is a power of 2
    const int log2n = (log2f(_bufferCapacity/2));
    assert(1 << log2n == _nOver2);
    
    _index = 0;
    
    AudioComponentDescription ioUnitDescription;
    ioUnitDescription.componentType = kAudioUnitType_Output;
    ioUnitDescription.componentSubType = kAudioUnitSubType_RemoteIO;
    ioUnitDescription.componentManufacturer = kAudioUnitManufacturer_Apple;
    ioUnitDescription.componentFlags = 0;
    ioUnitDescription.componentFlagsMask = 0;
    
    Util::check(NewAUGraph(&_processingGraph));
    
    AUNode ioNode;
    Util::check(AUGraphAddNode(_processingGraph, &ioUnitDescription, &ioNode));
    
    Util::check(AUGraphOpen(_processingGraph)); // indirectly performs audio unit instantiation
    
    Util::check(AUGraphNodeInfo(_processingGraph, ioNode, NULL, &_ioUnit));
    
    AURenderCallbackStruct callbackStruct = {0};
    
    const UInt32 enableInput = 1; 
    const UInt32 enableOutput = 0;
    
    callbackStruct.inputProc = callbackWrapper;
    callbackStruct.inputProcRefCon = this;
    
    Util::check(AudioUnitSetProperty(_ioUnit, kAudioOutputUnitProperty_EnableIO,
                               kAudioUnitScope_Input,
                               kInputBus, &enableInput, sizeof(enableInput)));
    
    Util::check(AudioUnitSetProperty(_ioUnit, kAudioOutputUnitProperty_EnableIO,
                               kAudioUnitScope_Output,
                               kOutputBus, &enableOutput, sizeof(enableOutput)));
    
    Util::check(AudioUnitSetProperty(_ioUnit, kAudioOutputUnitProperty_SetInputCallback,
                               kAudioUnitScope_Input,
                               kOutputBus, &callbackStruct, sizeof(callbackStruct)));
    
    Util::check(AudioUnitSetProperty(_ioUnit, kAudioUnitProperty_StreamFormat,
                               kAudioUnitScope_Output,
                               kInputBus, &_streamFormat, sizeof(_streamFormat)));
    
    Util::check(AudioUnitSetProperty(_ioUnit, kAudioUnitProperty_StreamFormat,
                               kAudioUnitScope_Input,
                               kOutputBus, &_streamFormat, sizeof(_streamFormat)));
    
    // Disable system buffer allocation. We're going to do it ourselves.
    const UInt32 flag = 0;
    Util::check(AudioUnitSetProperty(_ioUnit, kAudioUnitProperty_ShouldAllocateBuffer,
                               kAudioUnitScope_Output,
                               kInputBus, &flag, sizeof(flag)));
    
    Util::check(AUGraphInitialize(_processingGraph));
}

float setupAudioSessionAndReturnActualSampleRate( float desiredSampleRate )
{
    NSError	*err = nil;
    
	AVAudioSession *session = [AVAudioSession sharedInstance];
	
	[session setPreferredSampleRate:desiredSampleRate error:&err];
	Util::check(err.code);
    
    [session setCategory:AVAudioSessionCategoryPlayAndRecord error:&err];
    Util::check(err.code);
    
	[session setActive:YES error:&err];
    Util::check(err.code);
    
    return session.sampleRate;
}

AudioBufferList * createAudioBufferList(size_t bytesPerSample)
{
    AudioBufferList * bl = (AudioBufferList *)malloc(sizeof(AudioBuffer));
    
    bl->mNumberBuffers = 1;
    bl->mBuffers[0].mNumberChannels = 1;
    bl->mBuffers[0].mDataByteSize = static_cast<UInt32>(512 * bytesPerSample);
    bl->mBuffers[0].mData = calloc(512, bytesPerSample);
    
    return bl;
}

Listener::~Listener()
{
    LOG_INFO( "Listener::~Listener()" << endl << *this);
    delete _fft;
}

void Listener::start(IConsumer * consumer)
{
    LOG_INFO("Listener::start() starting:" << endl << *this);
    
    delete _condenser;
    _condenser = 0;
    _condenser = new Condenser(consumer);
    _startTime = CACurrentMediaTime();

    Util::check(AUGraphStart(_processingGraph));
}

void Listener::stop()
{
    LOG_INFO("stop() " << endl << *this);
    
    Util::check(AUGraphStop(_processingGraph));
    
    delete _condenser;
    _condenser = 0;
}

OSStatus Listener::callback(
                             AudioUnitRenderActionFlags 	*ioActionFlags, 
                             const AudioTimeStamp			*inTimeStamp, 
                             UInt32 						inBusNumber, 
                             UInt32 						inNumberFrames, 
                             AudioBufferList				*ioData)
{
	const UInt32 bus1 = 1;
    	
    Util::check(AudioUnitRender(
                          _ioUnit, 
                          ioActionFlags, 
                          inTimeStamp, 
                          bus1, 
                          inNumberFrames, 
                          _bufferList));

    const float time = CACurrentMediaTime() - _startTime;
    
    memcpy(
           &_shortBuffer.front() + _index,
           _bufferList->mBuffers[0].mData, 
           inNumberFrames*sizeof(SInt16)
           );
    
    _frameCount += inNumberFrames;
        
    _index += inNumberFrames;
    
    const bool bufferFull = (_index + 1) >= _bufferCapacity;
    if (bufferFull)
    {
		_index = _bufferCapacity / 2;
        
        memcpy(&_shortBuffer.front(),
               &_shortBuffer.front() + _index,
               _bufferCapacity / 2);
        
        const Spectrum spec = _fft->calculate();
        
        const Slice slice(
                          _frameCount,
                          time,
                          spec,
                          _shortBuffer);
        
        _condenser->submit(slice);
        
	}
    
    return 0;
}

ostream& operator<<(ostream& os, const Listener& in)
{
    os 
    << "Listener(" << endl
    << "  _sampleRate           : " << in._sampleRate << endl
    << "  _bufferCapacity       : " << in._bufferCapacity << endl
    << "  _bytesPerSample       : " << in._bytesPerSample << endl
    << "  _bytesPerOutputSample : " << in._bytesPerOutputSample << endl
    << "  _inputABSD : " << in._streamFormat << endl
    << ")";
    
    return os;
}

ostream& operator<<(
                    ostream& os,
                    const AudioStreamBasicDescription& desc)
{
    os 
    << "ASBD("
    << "bits/channel:" << desc.mBitsPerChannel
    << ",bytes/frame" << desc.mBytesPerFrame
    << ",bytes/packet" << desc.mBytesPerPacket
    << ",channels/frame" << desc.mChannelsPerFrame
    << ",frames/packet" << desc.mFramesPerPacket
    << ",sampleRate:" << desc.mSampleRate
    << ")";
    
    return os;
}

