//
//  AudioRecorder.m
//  AudioUnitDemo
//
//  Created by ilongge on 2021/9/1.
//

#import "AudioRecorder.h"
#import "AudioRecorderFileManager.h"
#import "OSStatusCheck.h"

#import <AudioUnit/AudioUnit.h>
#import <AVFoundation/AVFoundation.h>

@interface AudioRecorderDescription()
/**
 实际音频采样率
 */
@property (nonatomic, assign) AudioRecorderSampleRate sampleRate;

/**
 实际录音声道数
 */
@property (nonatomic, assign) AudioRecorderChannel inputChannels;

/**
 实际播放声道数
 */
@property (nonatomic, assign) AudioRecorderChannel ouputChannels;

/**
 实际录音回调间隔
 */
@property (nonatomic, assign) NSTimeInterval IOBufferDuration;
@end


@implementation AudioRecorderDescription


#pragma mark ************** settings

- (void)setPreferredSampleRate:(AudioRecorderSampleRate)preferredSampleRate;
{
    _preferredSampleRate = preferredSampleRate;
    
    BOOL success;
    NSError *error;
    
    success = [[AVAudioSession sharedInstance] setPreferredSampleRate:_preferredSampleRate
                                                                error:&error];
    _sampleRate = [AVAudioSession sharedInstance].sampleRate;
    if (success == NO) {
        NSLog (@"error setting preferredSampleRate ");
    }
}

- (AudioRecorderSampleRate)sampleRate
{
    return _sampleRate;
}

- (void)setPreferredInputChannels:(AudioRecorderChannel)preferredInputChannels
{
    _preferredInputChannels = preferredInputChannels;
    
    BOOL success;
    NSError *error;
    success = [[AVAudioSession sharedInstance] setPreferredInputNumberOfChannels:_preferredInputChannels
                                                                           error:&error];
    
    if (success == NO) {
        if ([AVAudioSession sharedInstance].inputNumberOfChannels == 1) {
            _inputChannels = AudioRecorderChannel_Mono;
        }
        else  if ([AVAudioSession sharedInstance].inputNumberOfChannels == 2) {
            _inputChannels = AudioRecorderChannel_Stereo;
        }
        NSLog (@"error setting preferredInputChannels %@", error);
    }
    else{
        _inputChannels = _preferredInputChannels;
    }
}

- (AudioRecorderChannel)inputChannels
{
    return _inputChannels;
}

- (void)setPreferredOuputChannels:(AudioRecorderChannel)preferredOuputChannels
{
    _preferredOuputChannels = preferredOuputChannels;
    
    BOOL success;
    NSError *error;
    
    success = [[AVAudioSession sharedInstance] setPreferredOutputNumberOfChannels:_preferredOuputChannels
                                                                            error:&error];
    
    if (success == NO) {
        if ([AVAudioSession sharedInstance].outputNumberOfChannels == 1) {
            _ouputChannels = AudioRecorderChannel_Mono;
        }
        else  if ([AVAudioSession sharedInstance].outputNumberOfChannels == 2) {
            _ouputChannels = AudioRecorderChannel_Stereo;
        }
        NSLog (@"error setting preferredOuputChannels %@", error);
    }
    else{
        _ouputChannels = _preferredOuputChannels;
    }
}

- (AudioRecorderChannel)ouputChannels
{
    return _ouputChannels;
}

- (void)setPreferredIOBufferDuration:(NSTimeInterval)preferredIOBufferDuration
{
    _preferredIOBufferDuration = preferredIOBufferDuration;
    
    BOOL success;
    NSError *error;
    //设置IO的Buffer 越小延迟越低
    success = [[AVAudioSession sharedInstance] setPreferredIOBufferDuration:_preferredIOBufferDuration
                                                                      error:&error];
    _IOBufferDuration = [AVAudioSession sharedInstance].IOBufferDuration;
    if (success == NO) {
        NSLog (@"error setting preferredIOBufferDuration %@", error);
    }
}

- (NSTimeInterval)IOBufferDuration
{
    return _IOBufferDuration;
}

- (void)setPreferredDiskBufferSize:(NSTimeInterval)preferredDiskBufferSize
{
    _preferredDiskBufferSize = preferredDiskBufferSize;
    if (_preferredDiskBufferSize < 64) {
        _diskBufferSize = 64;
    }
    else if(_preferredDiskBufferSize > 1024)
    {
        _diskBufferSize = 1024;
    }
    else{
        _diskBufferSize = _preferredDiskBufferSize;
    }
}

-(NSInteger)diskBufferSize
{
    return _diskBufferSize;
}

+(instancetype)defaultRecorderDescription
{
    AudioRecorderDescription *recorderDescription = [[AudioRecorderDescription alloc] init];
    recorderDescription.preferredSampleRate = AudioRecorderSampleRate_44100;
    recorderDescription.preferredInputChannels = AudioRecorderChannel_Mono;
    recorderDescription.preferredOuputChannels = AudioRecorderChannel_Mono;
    recorderDescription.preferredIOBufferDuration = 0.001;
    recorderDescription.enableDiskBuffer = YES;
    recorderDescription.diskBufferSize = 256;
    recorderDescription.enablePlayWhenRecord = NO;
    
    return recorderDescription;
}

- (NSString *)description
{
    NSMutableString *description = [NSMutableString stringWithFormat:@"\n<%@ %p>\n", [self class] , self];
    [description appendFormat:@"{\n"];
    
    [description appendFormat:@"\t      preferredSampleRate : %u Hz,\n", (unsigned int)_preferredSampleRate];
    [description appendFormat:@"\t               sampleRate : %u Hz,\n", (unsigned int)_sampleRate];
    
    [description appendFormat:@"\t   preferredInputChannels : %d channel,\n", _preferredInputChannels];
    [description appendFormat:@"\t            inputChannels : %d channel,\n", _inputChannels];
    
    [description appendFormat:@"\t   preferredOuputChannels : %d channel,\n", _preferredOuputChannels];
    [description appendFormat:@"\t            ouputChannels : %d channel,\n", _ouputChannels];
    
    [description appendFormat:@"\tpreferredIOBufferDuration : %lf second,\n", _preferredIOBufferDuration];
    [description appendFormat:@"\t         IOBufferDuration : %lf second,\n", _IOBufferDuration];
    
    [description appendFormat:@"\t  preferredDiskBufferSize : %ld KB,\n", (long)_preferredDiskBufferSize];
    [description appendFormat:@"\t           diskBufferSize : %ld KB,\n", (long)_diskBufferSize];
    
    [description appendFormat:@"\t         enableDiskBuffer : %@,\n", _enableDiskBuffer?@"YES":@"NO"];
    [description appendFormat:@"\t           diskBufferSize : %@,\n",_enablePlayWhenRecord?@"YES":@"NO"];
    
    [description appendFormat:@"}\n"];
    return description.copy;
}

@end


@interface AudioRecorder()
{
    // 音频流描述
    AudioStreamBasicDescription _audioASBD;
    //
    AudioComponentDescription _audioACD;
    // 音频渲染器
    AUGraph _audioGraph;
    // 音频成员
    AUNode _audioIONode;
    // 音频单元
    AudioUnit _audioIOUnit;
    // 音频文件
    ExtAudioFileRef _audioFileRef;
}
// 音频会话
@property (nonatomic, strong) AVAudioSession *audioSession;
// 当前是否在工作
@property (nonatomic, assign) BOOL isRecording;
// 缓存数据
@property (nonatomic, strong) NSMutableData *voiceBuffer0;
// 缓存区0满了之后转1
@property (nonatomic, strong) NSData *voiceBuffer1;

@property (nonatomic, strong) AudioRecorderFileManager *fileManager;

@end

@implementation AudioRecorder

/**
 初始化
 */
- (instancetype)init
{
    self = [super init];
    if (self) {
        
        _recorderDescription = [AudioRecorderDescription defaultRecorderDescription];
        
        _voiceBuffer0 = [[NSMutableData alloc] init];
        _voiceBuffer1 = [[NSData alloc] init];
        NSLog(@"\n录音参数：%@", _recorderDescription);
        [self configAudioRecorderPrepareRecord];
    }
    return self;
}

-(instancetype)initWithPreferredRecorderDescription:(AudioRecorderDescription *)preferredRecorderDescription
{
    if (self = [super init]) {
        if (preferredRecorderDescription == nil) {
            preferredRecorderDescription = [AudioRecorderDescription defaultRecorderDescription];
        }
        _recorderDescription = preferredRecorderDescription;
        
        _voiceBuffer0 = [[NSMutableData alloc] init];
        _voiceBuffer1 = [[NSData alloc] init];
        NSLog(@"\n录音参数：%@", _recorderDescription);
        [self configAudioRecorderPrepareRecord];
    }
    return self;
}

#pragma mark ************** 公开方法
/**
 初始化 录音
 */

- (void)configAudioRecorderPrepareRecord{
    [self initializeAudioSession];
    [self initializeAudioACD];
    [self initializeAudioASBD];
    [self creatAuGraph];
    [self creatAudioUnitAndAddNode];
    [self configAudioUnit];
    [self setAuGraphCallBack];
    [self updateAUGraph];
}

#pragma mark ************** 录音
/**
 开始录音
 */
- (void)startAudioRecord
{
    if (self.isRecording) {
        return;
    }
    // 创建音频文件
    
    _audioFileRef = [self.fileManager createExtAudioFileWithAsbd:_audioASBD];
    
    if(_audioFileRef) {
        // 开始录音
        OSStatus status = AUGraphStart(_audioGraph);
        
        CheckStatus(status, @"开始录音", YES);
        
        self.isRecording = YES;
    }
    else{
        NSLog(@"创建音频文件失败");
    }
    
}

- (void)pauseAudioRecord {
    
    if (self.isRecording == NO) {
        return;
    }
    OSStatus status = AUGraphStop(_audioGraph);
    
    CheckStatus(status, @"暂停录音", YES);
    
    self.isRecording = NO;
}

- (void)continueAudioRecord {
    
    if (self.isRecording) {
        return;
    }
    
    if(_audioFileRef) {
        // 开始录音
        OSStatus status = AUGraphStart(_audioGraph);
        
        CheckStatus(status, @"继续录音", YES);
        
        self.isRecording = YES;
    }
    else{
        NSLog(@"获取上次录音音频文件失败");
    }
}

- (NSString *)stopAudioRecord
{
    self.isRecording = NO;
    // 将缓冲区写入文件
    [self sendVoiceDataToBuffer:[NSData data]
                      isEndFile:self.isRecording == NO];
    
    OSStatus status = AUGraphStop(_audioGraph);
    
    CheckStatus(status, @"结束录音", YES);
    
    NSString *recordFilePath = [self.fileManager closeFile:_audioFileRef];
    
    return recordFilePath;
}

/**
 播放音频
 */
+ (void)playAudioAt:(NSString *)audioPath
{
    
}


#pragma mark **************

/**
 构建AVAudioSession
 */
- (void)initializeAudioSession{
    //获取音频会话
    NSError *error;
    if (_audioSession == nil) {
        _audioSession = [AVAudioSession sharedInstance];
        //设置会话类别
        if (@available(iOS 10.0, *)) {
            [_audioSession setCategory:AVAudioSessionCategoryPlayAndRecord
                           withOptions:AVAudioSessionCategoryOptionMixWithOthers
             | AVAudioSessionCategoryOptionAllowBluetooth
             | AVAudioSessionCategoryOptionDefaultToSpeaker
             | AVAudioSessionCategoryOptionAllowAirPlay
             | AVAudioSessionCategoryOptionAllowBluetoothA2DP
                                 error:&error];
        } else {
            [_audioSession setCategory:AVAudioSessionCategoryPlayAndRecord
                           withOptions:AVAudioSessionCategoryOptionMixWithOthers
             | AVAudioSessionCategoryOptionAllowBluetooth
             | AVAudioSessionCategoryOptionDefaultToSpeaker
                                 error:&error];
        }
        
        if (error) {
            NSLog(@"setCategory Error");
        }
    }
    
}

/**
 构建audioGraph
 */
- (void)creatAuGraph{
    
    NewAUGraph(&_audioGraph);
    // 打开AudioGraph 完成内部初始化
    AUGraphOpen(_audioGraph);
}
/**
 构建AudioUnit
 */
- (void)creatAudioUnitAndAddNode{
    OSStatus status;
    // 添加一个Node
    status = AUGraphAddNode(_audioGraph,
                            &_audioACD,
                            &_audioIONode);
    CheckStatus(status, @"添加Node", YES);
    // 从audioNode中获取到我们需要的AudioUnit
    status = AUGraphNodeInfo(_audioGraph,
                             _audioIONode,
                             NULL,
                             &_audioIOUnit);
    CheckStatus(status, @"获取NodeInfo", YES);
}

- (void)configAudioUnit{
    
    [self activeMicrophoneInputWithAudioUnit:_audioIOUnit];
    
    [self activeMicrophoneOutputWithAudioUnit:_audioIOUnit
                                      andASBD:_audioASBD];
    
    if (_recorderDescription.enablePlayWhenRecord) {
        
        [self activeSpeakerOutputWithAudioUnit:_audioIOUnit];
        
        [self activeSpeakerInputWithAudioUnit:_audioIOUnit
                                      andASBD:_audioASBD];
    }
}

/**
 设置AudioUnit CallBack
 */
- (void)setAuGraphCallBack
{
    AudioUnitElement element0 = 0;
    
    AURenderCallbackStruct callbackStruct;
    callbackStruct.inputProc = inputCallBackFun;
    callbackStruct.inputProcRefCon = (__bridge void *) self;
    OSStatus status = AUGraphSetNodeInputCallback(_audioGraph,
                                                  _audioIONode,
                                                  element0,
                                                  &callbackStruct);
    CheckStatus(status, @"麦克风回调", YES);
}

/**
 初始化并更新AUG
 */
- (void)updateAUGraph {
    OSStatus status = noErr;
    status = AUGraphInitialize(_audioGraph);
    CheckStatus(status, @"初始化AUG", YES);
    status = AUGraphUpdate(_audioGraph, NULL);
    CheckStatus(status, @"更新AUG", YES);
}
/**
 启用扬声器 输出
 */
- (void)activeSpeakerOutputWithAudioUnit:(AudioUnit)audioUnit{
    OSStatus status = noErr;
    UInt32 oneFlag = 1;
    // element 0
    UInt32 element0 = 0;
    // 调用扬声器输出
    status = AudioUnitSetProperty(audioUnit,
                                  kAudioOutputUnitProperty_EnableIO,
                                  kAudioUnitScope_Output,
                                  element0,
                                  &oneFlag,
                                  sizeof(oneFlag));
    CheckStatus(status, @"调用扬声器输出", YES);
}

/**
 启用扬声器 输入
 */
- (void)activeSpeakerInputWithAudioUnit:(AudioUnit)audioUnit
                                andASBD:(AudioStreamBasicDescription)asbd{
    OSStatus status = noErr;
    // element 0
    UInt32 element0 = 0;
    status = AudioUnitSetProperty(audioUnit,
                                  kAudioUnitProperty_StreamFormat,
                                  kAudioUnitScope_Input,
                                  element0,
                                  &asbd,
                                  sizeof(asbd));
    CheckStatus(status, @"启用扬声器输入", YES);
}

/**
 启用麦克风 输入
 */
- (void)activeMicrophoneInputWithAudioUnit:(AudioUnit)audioUnit {
    OSStatus status = noErr;
    UInt32 oneFlag = 1;
    // element 1
    UInt32 element1 = 1;
    // 调用麦克风
    status = AudioUnitSetProperty(audioUnit,
                                  kAudioOutputUnitProperty_EnableIO,
                                  kAudioUnitScope_Input,
                                  element1,
                                  &oneFlag,
                                  sizeof(oneFlag));
    CheckStatus(status, @"启用麦克风输入", YES);
}

/**
 启用麦克风 输出
 */
- (void)activeMicrophoneOutputWithAudioUnit:(AudioUnit)audioUnit
                                    andASBD:(AudioStreamBasicDescription)asbd{
    OSStatus status = noErr;
    // element 1
    UInt32 element1 = 1;
    // 调用麦克风
    status = AudioUnitSetProperty(audioUnit,
                                  kAudioUnitProperty_StreamFormat,
                                  kAudioUnitScope_Output,
                                  element1,
                                  &asbd,
                                  sizeof(asbd));
    CheckStatus(status, @"调用麦克风输出", YES);
}

/**
 构建AudioComponentDescription
 AudioComponentDescription是一个描述，用来创建后续的AudioUnit
 */
- (void)initializeAudioACD{
    if (_audioACD.componentType == 0) {
        _audioACD.componentType = kAudioUnitType_Output;//输出类型
        _audioACD.componentSubType = kAudioUnitSubType_RemoteIO;//该类型下的小类
        _audioACD.componentManufacturer = kAudioUnitManufacturer_Apple;//厂商
        _audioACD.componentFlags = 0;
        _audioACD.componentFlagsMask = 0;
    }
}

/**
 构建AudioStreamBasicDescription
 AudioStreamBasicDescription是一个描述，用来创建后续的AudioUnit
 */
- (void)initializeAudioASBD{
    if (_audioASBD.mSampleRate == 0) {
        // 指定音频编码格式
        _audioASBD.mFormatID = kAudioFormatLinearPCM;
        // 音频格式参数 NativeFloatPacked指定每个采样为Float NonInterleaved多通道非交错（每个声道单独一个bufferlist）
        _audioASBD.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
        // 指定采样率
        _audioASBD.mSampleRate = self.recorderDescription.sampleRate;
        // 指定每帧几通道
        _audioASBD.mChannelsPerFrame = self.recorderDescription.inputChannels;
        // 单声道的采样深度 即位深
        _audioASBD.mBitsPerChannel = 16;
        // 每个数据包下有几帧
        _audioASBD.mFramesPerPacket = 1;
        // 如果mFramesPerPacket=1，那么mBytesPerPacket和mBytesPerFrame是相等的，并且一般要么是1个字节，要么是两个字节
        // 帧比特 需根据mFormatFlags设置 NonInterleaved每个声道独立存放则为bytesPerSample 若不是这需要bytesPerSample * mChannelsPerFrame
        _audioASBD.mBytesPerFrame = (_audioASBD.mBitsPerChannel / 8) * _audioASBD.mChannelsPerFrame; // 每帧的bytes数;
        // 包比特 需根据mFormatFlags设置 NonInterleaved每个声道独立存放则为bytesPerSample 若不是这需要bytesPerSample * mChannelsPerFrame
        _audioASBD.mBytesPerPacket = _audioASBD.mBytesPerFrame;
    }
}

/**
 录音回调
 */
static OSStatus inputCallBackFun(void *inRefCon,
                                 AudioUnitRenderActionFlags *ioActionFlags,
                                 const AudioTimeStamp *inTimeStamp,
                                 UInt32 inBusNumber,
                                 UInt32 inNumberFrames,
                                 AudioBufferList * __nullable ioData)
{
    // 当前录音工具
    AudioRecorder *recorder = (__bridge AudioRecorder *)(inRefCon);
    if (recorder.recorderDescription.isEnablePlayWhenRecord) {
        AudioUnitRender(recorder->_audioIOUnit,
                        ioActionFlags,
                        inTimeStamp,
                        1,
                        inNumberFrames,
                        ioData);
    }
    // 此次回调数据
    NSMutableData *voiceData;
    // 单声道
    if (recorder.recorderDescription.inputChannels == AudioRecorderChannel_Mono) {
        
        AudioBuffer buffer = ioData->mBuffers[0];
        voiceData = [NSMutableData dataWithData:[NSData dataWithBytes:buffer.mData length:buffer.mDataByteSize]];
    }
    // 立体声 未验证
    else if(recorder.recorderDescription.inputChannels == AudioRecorderChannel_Stereo)
    {
        NSData *data = [NSData dataWithBytes:ioData->mBuffers[0].mData
                                      length:ioData->mBuffers[0].mDataByteSize];
        
        NSMutableData *leftData = [NSMutableData dataWithCapacity:0];
        NSMutableData *rightData = [NSMutableData dataWithCapacity:0];
        // 分离左右声道
        for (int i = 0; i < data.length; i+=4) {
            [leftData appendData:[data subdataWithRange:NSMakeRange(i, 2)]];
            [rightData appendData:[data subdataWithRange:NSMakeRange(i+2, 2)]];
        }
        
        voiceData = [[NSMutableData alloc] initWithData:leftData];
        [voiceData appendData:rightData];
    }
    
    [recorder sendVoiceDataToBuffer:voiceData
                          isEndFile:recorder.isRecording == NO];
    
    return noErr;
}
/**
 声音数据保存进缓存区
 降低文件读写频率
 */
- (void)sendVoiceDataToBuffer:(NSData *)voiceData isEndFile:(BOOL)isEndfile
{
    if (voiceData) {
        // 追加缓存区
        [self.voiceBuffer0 appendData:voiceData];
        // 查看缓存区是否已满
        if (self.voiceBuffer0.length >= self.recorderDescription.diskBufferSize * 1024 || isEndfile) {
            self.voiceBuffer1 = [NSData dataWithData:self.voiceBuffer0];
            //清空缓存区数据
            [self.voiceBuffer0 resetBytesInRange:NSMakeRange(0, self.voiceBuffer0.length)];
            [self.voiceBuffer0 setLength:0];
            [self.fileManager writeData:self.voiceBuffer1];
            self.voiceBuffer1 = nil;
            if (isEndfile) {
                NSLog(@"文件结尾");
            }
        }
    }
}
#pragma mark ************** lazy Load
-(AudioRecorderFileManager *)fileManager
{
    if (_fileManager == nil) {
        _fileManager = [[AudioRecorderFileManager alloc] init];
    }
    return _fileManager;
}

@end
