//
//  ViewController.m
//  Wi-Fi Party macOS
//
//  Created by 许煜恒 on 2022/9/21.
//

#import "ViewController.h"


OSStatus audioConverterDataProvider(AudioConverterRef inAudioConverter, UInt32 *ioNumberDataPackets, AudioBufferList *ioData, AudioStreamPacketDescription * _Nullable *outDataPacketDescription, void *inUserData){
    AudioBufferList *sourceAbl=(AudioBufferList*)inUserData;
    ioData->mNumberBuffers=sourceAbl->mNumberBuffers;
    int copy_buffer_count=MIN(*ioNumberDataPackets,sourceAbl->mNumberBuffers),copied_packets=0;
    for(int i=0;i<copy_buffer_count;i++){
        ioData->mBuffers[i]=sourceAbl->mBuffers[i];
        copied_packets+=sourceAbl->mBuffers[i].mDataByteSize/4;
    }
    *ioNumberDataPackets=copied_packets;
    if(outDataPacketDescription!=NULL){
        NSLog(@"Why should pcm converting requires packet format description?");
    }
    
    return 0;
}


@implementation ViewController
- (IBAction)singBoxChecked:(NSButton*)sender {
//    if(sender.state==NSControlStateValueOn){
//        startSinging(self->queues.recordQueue);
//    }else{
//        stopSinging(self->queues.recordQueue);
//    }
    if(sender.state==NSControlStateValueOn){
        startSinging(self->unit);
    }else{
        stopSinging(self->unit);
    }
}

- (IBAction)appAudioBoxChecked:(NSButton*)sender {
    [SCShareableContent getShareableContentWithCompletionHandler:^(SCShareableContent *content, NSError *err){
        SCDisplay *display=content.displays[0];
        SCContentFilter *filter=[[SCContentFilter alloc] initWithDisplay:display excludingWindows:[[NSArray alloc] init]];
        
        
        SCStreamConfiguration *streamConfig=[[SCStreamConfiguration alloc] init];
        streamConfig.capturesAudio=true;
        streamConfig.channelCount=1;
        streamConfig.sampleRate=44100;
        streamConfig.excludesCurrentProcessAudio=FALSE;
        streamConfig.width=display.width*NSScreen.mainScreen.backingScaleFactor;
        streamConfig.height=display.height*NSScreen.mainScreen.backingScaleFactor;
        streamConfig.minimumFrameInterval=CMTimeMake(1, 1);
        streamConfig.queueDepth=5;
        streamConfig.excludesCurrentProcessAudio=true;
        
        self->screenCaptureStream=[[SCStream alloc] initWithFilter:filter configuration:streamConfig delegate:self];
        NSError *e;
        dispatch_queue_t videoDispatchQueue,audioDispatchQueue;
        videoDispatchQueue=dispatch_queue_create("com.ken.wifi-party-macos.videoDispatchQueue", NULL);
        audioDispatchQueue=dispatch_queue_create("com.ken.wifi-party-macos.audioDispatchQueue", NULL);
        [self->screenCaptureStream addStreamOutput:self type:SCStreamOutputTypeAudio sampleHandlerQueue:audioDispatchQueue error:&e];
        if(err){
            NSLog(@"Error adding audio capture: %@", [err localizedDescription]);
        }
        [self->screenCaptureStream addStreamOutput:self type:SCStreamOutputTypeScreen sampleHandlerQueue:videoDispatchQueue error:&e]; //We don't need this, but without this it would give out warnings
        if(err){
            NSLog(@"Error adding video capture: %@", [err localizedDescription]);
        }

        [self->screenCaptureStream startCaptureWithCompletionHandler:^(NSError * _Nullable error) {
            NSLog(@"Called start completion handler");
            if(error){
                NSLog(@"%@",[error localizedDescription]);
            }
        }];
    }];
}


- (void)viewDidLoad {
    [super viewDidLoad];
    if([AVCaptureDevice authorizationStatusForMediaType:AVMediaTypeAudio]==AVAuthorizationStatusNotDetermined){
        [AVCaptureDevice requestAccessForMediaType:AVMediaTypeAudio completionHandler:^(BOOL granted) {

        }];
    }
    struct ifaddrs *self_addrs=get_selfaddrs();
    
    int s=socket(AF_INET, SOCK_DGRAM, 0);
    
    int flag=1;
    //Reuse address (So the in-app recording could bind the same addr)
    if(setsockopt(s, SOL_SOCKET, SO_REUSEADDR,&flag,sizeof(flag))!=0){
        printf("Failed to enable address reuse\n");
    }
    
    Receiver *r=new Receiver(s);
    Trackreceiver *music_receiver=new Trackreceiver();
    Trackreceiver *vocal_receiver=new Trackreceiver();
    Mixer<Trackreceiver> *sound_mixer=new Mixer<Trackreceiver>();
    sound_mixer->addSource(music_receiver);
    sound_mixer->addSource(vocal_receiver);
    Channel *music_channel=new Channel(music,channel_unreliable,1,s,[=](Packet *p){
        bool isFromSelf=false;
        for(struct ifaddrs *i=self_addrs;i!=NULL;i=i->ifa_next){
            if(((struct sockaddr_in*)i->ifa_addr)->sin_addr.s_addr==p->from_addr->sin_addr.s_addr){
                isFromSelf=true;
                break;
            }
        }
        if(!isFromSelf){
            music_receiver->receiveSound(p);
        }
        return true;
    });
    self->musicChannel=music_channel;
    Channel *vocal_channel=new Channel(vocal,channel_unreliable,1,s,[=](Packet *p){
        vocal_receiver->receiveSound(p);
        return true;
    });
    r->addChannel(music_channel);
    r->addChannel(vocal_channel);
    
    //Unit Test
    Channel *test_channel=new Channel(3,channel_unreliable,1,s,[=](Packet *p){
        NSLog(@"%s",(char*)p->content.addr);
        return true;
    });
    r->addChannel(test_channel);
    
    self->unit=startAudioUnit(sound_mixer, music_receiver, vocal_channel);
    r->receive_forever();

    // Do any additional setup after loading the view.
}


- (void)setRepresentedObject:(id)representedObject {
    [super setRepresentedObject:representedObject];

    // Update the view, if already loaded.
}

-(void) stream:(SCStream *)stream didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer ofType:(SCStreamOutputType)type{
    if(!CMSampleBufferIsValid(sampleBuffer)){
        NSLog(@"Got invalid buffer");
        return;
    }
    
    switch(type){
        case SCStreamOutputTypeAudio:{
            AudioBufferList sourceAudioBufferList;
            CMBlockBufferRef blockBuffer;
            CMSampleBufferGetAudioBufferListWithRetainedBlockBuffer(sampleBuffer, NULL, &sourceAudioBufferList, sizeof(AudioBufferList), NULL, NULL, 0, &blockBuffer);
            CMFormatDescriptionRef fmt=CMSampleBufferGetFormatDescription(sampleBuffer);
            const AudioStreamBasicDescription *streamFormat=CMAudioFormatDescriptionGetStreamBasicDescription(fmt);
            
            static AudioConverterRef converter=NULL;
            if(converter==NULL){
                AudioStreamBasicDescription format={0};
                format.mBitsPerChannel=8;
                format.mChannelsPerFrame=1;
                format.mFramesPerPacket=1;
                format.mBytesPerFrame=1;
                format.mBytesPerPacket=1;
                format.mSampleRate=44100;
                format.mFormatID=kAudioFormatLinearPCM;
                format.mFormatFlags=kLinearPCMFormatFlagIsPacked;
                OSStatus ret=AudioConverterNew(streamFormat, &format, &converter);
                check_int(ret, "Failed to create audio converter");
            }
            UInt32 packet_count=sourceAudioBufferList.mBuffers[0].mDataByteSize*(Float64(44100)/streamFormat->mSampleRate)+1;
            AudioBufferList *convertedAudio=(AudioBufferList*)malloc(offsetof(AudioBufferList, mBuffers) + (sizeof(AudioBuffer) * 1));
            convertedAudio->mNumberBuffers=1;
            convertedAudio->mBuffers[0].mDataByteSize=packet_count*1;
            convertedAudio->mBuffers[0].mData=malloc(packet_count*1);
            AudioStreamPacketDescription pd[packet_count];
            OSStatus ret=AudioConverterFillComplexBuffer(converter, audioConverterDataProvider, &sourceAudioBufferList, &packet_count, convertedAudio, pd);
            check_int(ret, "Failed to convert audio format");
            for(int i=0;i<sourceAudioBufferList.mNumberBuffers;i++){
                AudioBuffer *buffer=&sourceAudioBufferList.mBuffers[i];
                unsigned int frames=buffer->mDataByteSize/buffer->mNumberChannels;
                //Only use channel 0
                for(int frameIndex=0;frameIndex<frames;frameIndex++){
                    
//                    send_buffer[send_buffer_write_index++]=buffer->mData[frameIndex*buffer->mNumberChannels];
                }
            }
                
            self->musicChannel->send(convertedAudio->mBuffers[0].mData, convertedAudio->mBuffers[0].mDataByteSize, 0);
            free(convertedAudio->mBuffers[0].mData);
            free(convertedAudio);
            CFRelease(blockBuffer);
            break;
        }
        case SCStreamOutputTypeScreen:{
            //do nothing
            break;
        }
        default:{
            NSLog(@"Received unexpeted stream type");
            break;
        }
    }
}

-(void)stream:(SCStream *)stream didStopWithError:(NSError *)error{
    NSLog(@"Stream stopped");
    //Update the satus of the button?
}


@end
