#import "ask_decode.h"
#import "format.h"
#import "utils.h"

#import <unistd.h>
#import <AudioUnit/AudioUnit.h>
#import <AudioToolbox/AudioToolbox.h>

AudioUnit unit;

UInt32 numsamples;
struct
{
	AudioBufferList list;
	AudioBuffer buffer;
} compbuf;

struct ask_decoder ask;

OSStatus AudioInputProc(void *inRefCon,AudioUnitRenderActionFlags *actionflags,const AudioTimeStamp *timestamp,UInt32 busnum,UInt32 numframes,AudioBufferList *data)
{
	OSStatus err=AudioUnitRender(unit,actionflags,timestamp,busnum,numframes,&compbuf.list);

	if(err==noErr) 
	provide_ask_input(&ask,(int16_t *)compbuf.list.mBuffers[0].mData,numsamples);
	
	return err;
}

void print_progress()
{
	u_int8_t *bytes=ask_data_bytes(&ask);
	int length=ask_data_length(&ask);
	int size=file_size(bytes,length);
	int percent=100*length/size;
	if(percent>100) percent=100;

	fprintf(stderr,"\rReceiving (%d%%)...",percent); fflush(stderr);
}

int is_file_received()
{
	return is_ask_sync_lost(&ask)||is_file_complete(ask_data_bytes(&ask),ask_data_length(&ask));
}

int main(int argc,char **argv)
{
	if(argc!=2&&argc!=3)
	{
		fprintf(stderr,"Usage: %s [input device name] file.dat\n",argv[0]);
		exit(1);
	}

	if(argc==3)
	{
		fprintf(stderr,"Selecting devices not yet implemented.\n");
		exit(1);
	}

	Component component=NULL;
	UInt32 param;
	
	ComponentDescription description;
	description.componentType=kAudioUnitType_Output;
	description.componentSubType=kAudioUnitSubType_HALOutput;
	description.componentManufacturer=kAudioUnitManufacturer_Apple;
	description.componentFlags=0;
	description.componentFlagsMask=0;
	if(component=FindNextComponent(NULL,&description))
	{
		if(OpenAComponent(component,&unit)!=noErr)
		{
			fprintf(stderr,"Couldn't open audio component.\n");
			exit(5);
		}
	}

	// Configure the AudioOutputUnit
	// You must enable the Audio Unit (AUHAL) for input and output for the same  device.
	// When using AudioUnitSetProperty the 4th parameter in the method
	// refers to an AudioUnitElement.  When using an AudioOutputUnit
	// for input the element will be '1' and the output element will be '0'.	
	
	// Enable input on the AUHAL
	param=1;
	if(AudioUnitSetProperty(unit,kAudioOutputUnitProperty_EnableIO,kAudioUnitScope_Input,1,&param,sizeof(UInt32))==noErr);
	{
		// Disable Output on the AUHAL
		param=0;
		if(AudioUnitSetProperty(unit,kAudioOutputUnitProperty_EnableIO,kAudioUnitScope_Output,0,&param,sizeof(UInt32))!=noErr)
		{
			fprintf(stderr,"Couldn't configure audio component.\n");
			exit(5);
		}
	}

	// Select the default input device
	AudioDeviceID devid;
	param=sizeof(AudioDeviceID);
	if(AudioHardwareGetProperty(kAudioHardwarePropertyDefaultInputDevice,&param,&devid)!=noErr)
	{
		fprintf(stderr,"Couldn't configure audio component.\n");
		exit(5);
	}
	
	// Set the current device to the default input unit.
	if(AudioUnitSetProperty(unit,kAudioOutputUnitProperty_CurrentDevice,kAudioUnitScope_Global,0,&devid,sizeof(AudioDeviceID))!=noErr)
	{
		fprintf(stderr,"Couldn't configure audio component.\n");
		exit(5);
	}
	
	// Setup render callback
	// This will be called when the AUHAL has input data
	AURenderCallbackStruct callback;
	callback.inputProc=AudioInputProc;
	if(AudioUnitSetProperty(unit,kAudioOutputUnitProperty_SetInputCallback,kAudioUnitScope_Global,0,&callback,sizeof(AURenderCallbackStruct))!=noErr)
	{
		fprintf(stderr,"Couldn't configure audio component.\n");
		exit(5);
	}
	
	// get hardware device format
	AudioStreamBasicDescription	devformat;
	param=sizeof(AudioStreamBasicDescription);
	if(AudioUnitGetProperty(unit,kAudioUnitProperty_StreamFormat,kAudioUnitScope_Input,1,&devformat,&param)!=noErr)
	{
		fprintf(stderr,"Couldn't configure audio component.\n");
		exit(5);
	}

	// Twiddle the format to our liking
	AudioStreamBasicDescription	actualformat;
	actualformat.mChannelsPerFrame=1;
	actualformat.mSampleRate=devformat.mSampleRate;
	actualformat.mFormatID=kAudioFormatLinearPCM;
	actualformat.mFormatFlags=kAudioFormatFlagIsSignedInteger|kAudioFormatFlagIsPacked|kAudioFormatFlagsNativeEndian;
	actualformat.mBitsPerChannel=16;
	actualformat.mBytesPerFrame=actualformat.mBitsPerChannel/8;
	actualformat.mFramesPerPacket=1;
	actualformat.mBytesPerPacket=actualformat.mBytesPerFrame;

	// Set the AudioOutputUnit output data format
	if(AudioUnitSetProperty(unit,kAudioUnitProperty_StreamFormat,kAudioUnitScope_Output,1,&actualformat,sizeof(AudioStreamBasicDescription))!=noErr)
	{
		fprintf(stderr,"Couldn't configure audio component.\n");
		exit(5);
	}
	
	// Get the number of frames in the IO buffer(s)
	param=sizeof(UInt32);
	if(AudioUnitGetProperty(unit,kAudioDevicePropertyBufferFrameSize,kAudioUnitScope_Global,0,&numsamples,&param)!=noErr)
	{
		fprintf(stderr,"Couldn't configure audio component.\n");
		exit(5);
	}
	
	// Initialize the AU
	if(AudioUnitInitialize(unit)!=noErr)
	{
		fprintf(stderr,"Couldn't initialize audio component.\n");
		exit(5);
	}

	int16_t buf[numsamples];

	compbuf.list.mNumberBuffers=1;
	compbuf.list.mBuffers[0].mNumberChannels=1;
	compbuf.list.mBuffers[0].mDataByteSize=sizeof(buf);
	compbuf.list.mBuffers[0].mData=buf;

	init_ask_decoder(&ask,(int)devformat.mSampleRate);

	AudioOutputUnitStart(unit);

	fprintf(stderr,"Waiting for a transmission...\n");

	while(!is_ask_sync_found(&ask)) usleep(25000);

	fprintf(stderr,"Receiving..."); fflush(stderr);

	while(!is_file_received()&&file_size(ask_data_bytes(&ask),ask_data_length(&ask))<0) usleep(25000);

	while(!is_file_received())
	{
		print_progress(&ask);
		usleep(100000);
	}

	print_progress();
	fprintf(stderr,"\n");

	u_int8_t *bytes=ask_data_bytes(&ask);
	int length=ask_data_length(&ask);

	check_errors(bytes,length,NULL,0);
	fputs(describe_file(bytes,length),stderr);

	FILE *out=open_file(argc==2?argv[1]:argv[2],"wb",stdout,"Couldn't create file \"%s\".\n");
	fwrite(bytes,1,file_size(bytes,length),out);
	if(out!=stdout) fclose(out);

	destroy_ask_decoder(&ask);
}
