//
//  AudioRecorder.m
//  audio_data
//
//  Created by os on 2/29/24.
//

#import "AudioRecorder.h"
#import <AVFoundation/AVFoundation.h>


@interface AudioRecorder()<AVCaptureAudioDataOutputSampleBufferDelegate>

@property (nonatomic, strong) AVCaptureSession *captureSession;

@end


@implementation AudioRecorder

- (instancetype)init{
    self = [super init];
    if (self) {
        [self setupCaptureSession];
    }
    return self;
}

- (void)setupCaptureSession {
    NSError *error = nil;

    // 创建AVCaptureSession对象
    self.captureSession = [[AVCaptureSession alloc] init];
    // 创建AVCaptureAudioDataOutput对象
    AVCaptureAudioDataOutput *output = [[AVCaptureAudioDataOutput alloc] init];

    // 获取麦克风设备
    AVCaptureDevice *microphone = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeAudio];

    // 创建AVCaptureDeviceInput对象
    AVCaptureDeviceInput *input = [AVCaptureDeviceInput deviceInputWithDevice:microphone error:&error];
    

    if (input) {
        // 将音频输入添加到AVCaptureSession中
        if ([self.captureSession canAddInput:input]) {
            [self.captureSession addInput:input];
        }
        
        // 设置采样率
        AVAudioSession *audioSession = [AVAudioSession sharedInstance];
        NSError *error = nil;
        [audioSession setPreferredSampleRate:44100.0 error:&error];
        
        // 设置通道数为单声道
        [audioSession setPreferredInputNumberOfChannels:1 error:&error];

        // 设置音频数据输出队列
        dispatch_queue_t audioQueue = dispatch_queue_create("com.example.audioQueue", NULL);
        [output setSampleBufferDelegate:self queue:audioQueue];

        // 将输出添加到会话
        if ([self.captureSession canAddOutput:output]) {
            [self.captureSession addOutput:output];
        }

        
    } else {
        NSLog(@"Error creating capture device input: %@", error.localizedDescription);
    }
}

- (void)captureOutput:(AVCaptureOutput *)output didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer fromConnection:(AVCaptureConnection *)connection {
    
    CMBlockBufferRef blockBuffer = CMSampleBufferGetDataBuffer(sampleBuffer);
    size_t lengthAtOffset;
    size_t totalLength;
    char *dataPointer;

    OSStatus status = CMBlockBufferGetDataPointer(blockBuffer, 0, &lengthAtOffset, &totalLength, &dataPointer);
    if (status == noErr) {
        // 创建NSData对象
        NSData *audioData = [NSData dataWithBytes:dataPointer length:totalLength];
        
        // 将NSData转换为Uint8List
        FlutterStandardTypedData *typedData = [FlutterStandardTypedData typedDataWithBytes:audioData];

        // 将音频数据发送给Flutter
        dispatch_async(dispatch_get_main_queue(), ^{
            // 在主线程执行的代码
            self.sink(typedData);
        });
    }

}

- (void)startRecord{
    // 启动会话
    dispatch_async(dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0), ^{
        [self.captureSession startRunning];
    });
    
//    dispatch_async(dispatch_get_main_queue(), ^{
//        [self.captureSession startRunning];
//    });
    
}
- (void)endRecord{
//    dispatch_async(dispatch_get_main_queue(), ^{
//        [self.captureSession stopRunning];
//    });
    dispatch_async(dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0), ^{
        [self.captureSession stopRunning];
    });
}

@end
