//
//
//  WCLRecordEngine.m
//  WCL
//
//  Created by WangZhenyu on 2020/12/4.
//

#import "WCLRecordEngine.h"
#import "WCLRecordEncoder.h"
#import <AVFoundation/AVFoundation.h>
#import <Photos/Photos.h>
#import "TYLevelPairs.h"
#import "TYMeterTable.h"
#import "CustomRecordModel.h"
#import "FileManager.h"
#import <lame/lame.h>
#import "LocalRecordModel.h"

#define QUEUE_BUFFER_COUNT 3
#define QUEUE_BUFFER_SIZE 640
#define PCM_FRAME_BYTE_SIZE 640

@interface WCLRecordEngine ()<AVCaptureAudioDataOutputSampleBufferDelegate> {
    CMTime _timeOffset;//录制的偏移CMTime
    CMTime _lastVideo;//记录上一次视频数据文件的CMTime
    CMTime _lastAudio;//记录上一次音频数据文件的CMTime
    
    int _channels;//音频通道
    Float64 _samplerate;//音频采样率
    
    AudioQueueRef mQueue;

}

@property (strong, nonatomic) WCLRecordEncoder           *recordAudioEncoder;//录制编码
@property (strong, nonatomic) LocalRecordModel           *localModel;//录制编码

@property (strong, nonatomic) AVCaptureSession           *recordSession;//捕获视频的会话
@property (strong, nonatomic) AVCaptureDeviceInput       *audioMicInput;//麦克风输入
@property (copy  , nonatomic) dispatch_queue_t           captureQueue;//录制的队列
@property (strong, nonatomic) AVCaptureConnection        *audioConnection;//音频录制连接
@property (strong, nonatomic) AVCaptureAudioDataOutput   *audioOutput;//音频输出

@property (atomic, assign) BOOL isCapturing;//正在录制
@property (atomic, assign) BOOL isPaused;//是否暂停
@property (atomic, assign) BOOL discont;//是否中断
@property (atomic, assign) CMTime startTime;//开始录制的时间
@property (atomic, assign) CGFloat currentRecordTime;//当前录制时间
@property (nonatomic, strong) TYMeterTable *meterTable;

@property(nonatomic,strong) NSMutableData *bufferedVoiceData;
@property(nonatomic,assign,readwrite) NSUInteger currentVoiceVolume;

@end

@implementation WCLRecordEngine

- (void)dealloc {
    
    [_recordSession stopRunning];
    [self _disposeAudioQueue];
    _captureQueue     = nil;
    _recordSession    = nil;
    _audioOutput      = nil;
    _audioConnection  = nil;
    _recordAudioEncoder = nil;
    _localModel = nil;
}

- (instancetype)init
{
    self = [super init];
    if (self) {
        self.maxRecordTime = MAX_INPUT;
        self.bufferedVoiceData = [NSMutableData data];
        
    }
    return self;
}

#pragma mark - 公开的方法
//启动录制功能
- (void)startUp {
//    NSLog(@"启动录制功能");
    self.startTime = CMTimeMake(0, 0);
    self.isCapturing = NO;
    self.isPaused = NO;
    self.discont = NO;
    dispatch_async(dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0), ^(void) {
        [self.recordSession startRunning];
    });
    
    
}

- (TYLevelPairs *)levels {
    AVCaptureAudioChannel *channel = [self.audioConnection.audioChannels firstObject];
    float avgPower = channel.averagePowerLevel;
    float peakPower = channel.peakHoldLevel;
    float linearLevel = [self.meterTable valueForPower:avgPower];
    float linearPeak = [self.meterTable valueForPower:peakPower];
    return [TYLevelPairs levelsWithLevel:linearLevel peakLevel:linearPeak avgPower:avgPower peakPower:peakPower];
}

//关闭录制功能
- (void)shutdown {
    self.isPaused = NO;
    self.isCapturing = NO;
    _startTime = CMTimeMake(0, 0);
    if (_recordSession) {
        [_recordSession stopRunning];
    }
    self.isCapturing = NO;
    [_recordAudioEncoder finishWithCompletionHandler:^{
        self->_recordAudioEncoder.isCanWrite = NO;
        self.recordAudioEncoder = nil;
        //删除录制文件
        [[FileManager sharedManager] removeObject:self.audioPath];
    }];
}

//开始录制
- (void) startCapture {
    @synchronized(self) {
        if (!self.isCapturing) {
//            NSLog(@"开始录制");
            self.recordAudioEncoder = nil;
            self.localModel = nil;
            self.isPaused = NO;
            self.discont = NO;
            _timeOffset = CMTimeMake(0, 0);
            self.isCapturing = YES;
//                        [self _createAudioQueue];
//                        [self _startAudioQueue];
            self.bufferedVoiceData = [NSMutableData data];
        }
    }
}
//暂停录制
- (void) pauseCapture {
    @synchronized(self) {
        if (self.isCapturing) {
//            NSLog(@"暂停录制");
            self.isPaused = YES;
            self.discont = YES;
        }
    }
}
//继续录制
- (void) resumeCapture {
    @synchronized(self) {
        if (self.isPaused) {
//            NSLog(@"继续录制");
            self.isPaused = NO;
        }
    }
}

//停止录制
- (void) stopCaptureHandler:(void (^)(NSString *audioPath, LocalRecordModel *model))handler {
    if ([self.delegate respondsToSelector:@selector(recordStopPlayer)]) {
        [self.delegate recordStopPlayer];
    }
    @synchronized(self) {
        if (self.isCapturing) {
            self.isCapturing = NO;
            self.bufferedVoiceData = nil;
            dispatch_async(_captureQueue, ^{
                [self.recordAudioEncoder finishWithCompletionHandler:^{
                    if ([self.recordModel.name isEqualToString:@"mp3"]) {
                        NSString *outPath = [[FileManager sharedManager] getLocalFilePath:[NSString stringWithFormat:@"%@.mp3", self.localModel.recordId]];
                        [self convenrtToMp3WithResult:self.audioPath outPath:outPath completed:^(NSString *successPath) {
                            self.localModel.fileName = [outPath lastPathComponent];
                            handler(successPath, self.localModel);
                            if (self.needSave) {
                                [UserManger createFileModel:self.localModel];
                            }
                            self.recordAudioEncoder.isCanWrite = NO;
                            self.recordAudioEncoder = nil;
                            self.localModel = nil;
                            self.isCapturing = NO;
                            self.startTime = CMTimeMake(0, 0);
                            self.currentRecordTime = 0;
                        }];
                    } else {
                        if (self.typeStr) {
                            self.localModel.nickName = [NSString stringWithFormat:@"%@-%@", self.typeStr, self.localModel.nickName];
                        }
                        handler(self.audioPath, self.localModel);
                        if (self.needSave) {
                            [UserManger createFileModel:self.localModel];
                        }
                        self.recordAudioEncoder.isCanWrite = NO;
                        self.recordAudioEncoder = nil;
                        self.localModel = nil;
                        self.isCapturing = NO;
                        self.startTime = CMTimeMake(0, 0);
                        self.currentRecordTime = 0;
                    }
                }];
            });
        }
    }
}

- (void)stopVoiceQueue{
    [self _stopAudioQueue];
    [self _disposeAudioQueue];
}

#pragma mark - set、get方法
//捕获视频的会话
- (AVCaptureSession *)recordSession {
    if (_recordSession == nil) {
        _recordSession = [[AVCaptureSession alloc] init];
        //添加后置麦克风的输出
        if ([_recordSession canAddInput:self.audioMicInput]) {
            [_recordSession addInput:self.audioMicInput];
        }
        //添加音频输出
        if ([_recordSession canAddOutput:self.audioOutput]) {
            [_recordSession addOutput:self.audioOutput];
        }
    }
    return _recordSession;
}

//麦克风输入
- (AVCaptureDeviceInput *)audioMicInput {
    if (_audioMicInput == nil) {
       AVCaptureDevice *mic = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeAudio];
        NSError *error;
        _audioMicInput = [AVCaptureDeviceInput deviceInputWithDevice:mic error:&error];
        if (error) {
            NSLog(@"获取麦克风失败~");
        }
    }
    return _audioMicInput;
}

//音频输出
- (AVCaptureAudioDataOutput *)audioOutput {
    if (_audioOutput == nil) {
        _audioOutput = [[AVCaptureAudioDataOutput alloc] init];
        [_audioOutput setSampleBufferDelegate:self queue:self.captureQueue];
    }
    return _audioOutput;
}

//音频连接
- (AVCaptureConnection *)audioConnection {
    if (_audioConnection == nil) {
        _audioConnection = [self.audioOutput connectionWithMediaType:AVMediaTypeAudio];
    }
    return _audioConnection;
}

//录制的队列
- (dispatch_queue_t)captureQueue {
    if (_captureQueue == nil) {
        _captureQueue = dispatch_queue_create("com.fanhantech.DepressionAuxiliary.capture", DISPATCH_QUEUE_SERIAL);
    }
    return _captureQueue;
}

- (TYMeterTable *)meterTable {
    if (_meterTable == nil) {
        _meterTable = [[TYMeterTable alloc] init];
    }
    return _meterTable;
}

//获得音频存放地址
- (NSString *)getAudioCachePath {
    NSString *audioCache = [NSTemporaryDirectory() stringByAppendingPathComponent:@"test.m4a"] ;
    return audioCache;
}

- (NSString *)getUploadFile_type:(NSString *)type fileType:(NSString *)fileType {
    NSTimeInterval now = [[NSDate date] timeIntervalSince1970];
    NSDateFormatter * formatter = [[NSDateFormatter alloc] init];
    [formatter setDateFormat:@"HHmmss"];
    NSDate * NowDate = [NSDate dateWithTimeIntervalSince1970:now];
    ;
    NSString * timeStr = [formatter stringFromDate:NowDate];
    NSString *fileName = [NSString stringWithFormat:@"%@_%@.%@",type,timeStr,fileType];
    return fileName;
}

#pragma mark - 写入数据
- (void) captureOutput:(AVCaptureOutput *)captureOutput didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer fromConnection:(AVCaptureConnection *)connection {
    BOOL isVideo = YES;
    @synchronized(self) {
        if (!self.isCapturing  || self.isPaused) {
            return;
        }
        isVideo = NO;
        //初始化编码器，当有音频和视频参数时创建编码器
        if (self.recordAudioEncoder == nil) {
            CMFormatDescriptionRef fmt = CMSampleBufferGetFormatDescription(sampleBuffer);
            [self setAudioFormat:fmt];
            //如果是mp3格式，需要先录制wav后转mp3
            if ([self.recordModel.name isEqual:@"mp3"]) {
                self.localModel = [[LocalRecordModel alloc] initRecord:@"wav"];
            } else {
                self.localModel = [[LocalRecordModel alloc] initRecord:self.recordModel.name];
            }

            self.audioPath = [[FileManager sharedManager] getLocalFilePath:self.localModel.fileName];
            self.recordAudioEncoder = [WCLRecordEncoder encoderForAudioPath:self.audioPath model:self.recordModel];
            [self _createAudioQueue];
            [self _startAudioQueue];
        }
        //判断是否中断录制过
        if (self.discont) {
            if (isVideo) {
                return;
            }
            self.discont = NO;
            // 计算暂停的时间
            CMTime pts = CMSampleBufferGetPresentationTimeStamp(sampleBuffer);
            CMTime last = isVideo ? _lastVideo : _lastAudio;
            if (last.flags & kCMTimeFlags_Valid) {
                if (_timeOffset.flags & kCMTimeFlags_Valid) {
                    pts = CMTimeSubtract(pts, _timeOffset);
                }
                CMTime offset = CMTimeSubtract(pts, last);
                if (_timeOffset.value == 0) {
                    _timeOffset = offset;
                }else {
                    _timeOffset = CMTimeAdd(_timeOffset, offset);
                }
            }
            _lastVideo.flags = 0;
            _lastAudio.flags = 0;
        }
        // 增加sampleBuffer的引用计时,这样我们可以释放这个或修改这个数据，防止在修改时被释放
        CFRetain(sampleBuffer);
        if (_timeOffset.value > 0) {
            CFRelease(sampleBuffer);
            //根据得到的timeOffset调整
            sampleBuffer = [self adjustTime:sampleBuffer by:_timeOffset];
        }
        // 记录暂停上一次录制的时间
        CMTime pts = CMSampleBufferGetPresentationTimeStamp(sampleBuffer);
        CMTime dur = CMSampleBufferGetDuration(sampleBuffer);
        if (dur.value > 0) {
            pts = CMTimeAdd(pts, dur);
        }
        _lastAudio = pts;        
    }
    CMTime dur = CMSampleBufferGetPresentationTimeStamp(sampleBuffer);
    if (self.startTime.value == 0) {
        self.startTime = dur;
    }
    CMTime sub = CMTimeSubtract(dur, self.startTime);
    self.currentRecordTime = CMTimeGetSeconds(sub);
    if (self.currentRecordTime > self.maxRecordTime) {
        if (self.currentRecordTime - self.maxRecordTime < 0.1) {
            if ([self.delegate respondsToSelector:@selector(recordProgress:)]) {
                dispatch_async(dispatch_get_main_queue(), ^{
                    [self.delegate recordProgress:self.currentRecordTime];
                });
            }
        }
        return;
    }
    if ([self.delegate respondsToSelector:@selector(recordProgress:)]) {
        dispatch_async(dispatch_get_main_queue(), ^{
            [self.delegate recordProgress:self.currentRecordTime];
        });
    }
    // 进行数据编码
    [self.recordAudioEncoder encodeFrame:sampleBuffer];
    
    CFRelease(sampleBuffer);
}

//设置音频格式
- (void)setAudioFormat:(CMFormatDescriptionRef)fmt {
    const AudioStreamBasicDescription *asbd = CMAudioFormatDescriptionGetStreamBasicDescription(fmt);
    _samplerate = asbd->mSampleRate;
    _channels = asbd->mChannelsPerFrame;
}

//调整媒体数据的时间
- (CMSampleBufferRef)adjustTime:(CMSampleBufferRef)sample by:(CMTime)offset {
    CMItemCount count;
    CMSampleBufferGetSampleTimingInfoArray(sample, 0, nil, &count);
    CMSampleTimingInfo* pInfo = malloc(sizeof(CMSampleTimingInfo) * count);
    CMSampleBufferGetSampleTimingInfoArray(sample, count, pInfo, &count);
    for (CMItemCount i = 0; i < count; i++) {
        pInfo[i].decodeTimeStamp = CMTimeSubtract(pInfo[i].decodeTimeStamp, offset);
        pInfo[i].presentationTimeStamp = CMTimeSubtract(pInfo[i].presentationTimeStamp, offset);
    }
    CMSampleBufferRef sout;
    CMSampleBufferCreateCopyWithNewTiming(nil, sample, count, pInfo, &sout);
    free(pInfo);
    return sout;
}

#pragma mark - AVCaptureMetadataOutputObjectsDelegate
- (void)captureOutput:(AVCaptureOutput *)output didOutputMetadataObjects:(NSArray<__kindof AVMetadataObject *> *)metadataObjects fromConnection:(AVCaptureConnection *)connection {
    if (self.isPaused) return;
}


#pragma mark - Internal implementations

-(void)_updateCurrentVoiceVolume{
    if (mQueue) {
        //FIXME - delay calculate the volume
        static int skipFrame = 0;
        if(skipFrame++ == 3){
            skipFrame = 0;
            // 如果要获得多个通道数据，需要用数组
            // 这里没有去处理多个通道的数据显示,直接就显示最后一个通道的结果了
            UInt32 data_sz = sizeof(AudioQueueLevelMeterState);
            AudioQueueLevelMeterState levelMeter;
            OSErr status = AudioQueueGetProperty(mQueue, kAudioQueueProperty_CurrentLevelMeterDB, &levelMeter, &data_sz);
            if (status == noErr) {
                _currentVoiceVolume = (levelMeter.mAveragePower+50)*2;
            }
        }
    }
}

static void inputBufferHandler(void *                          inUserData,
                               AudioQueueRef                   inAQ,
                               AudioQueueBufferRef             inBuffer,
                               const AudioTimeStamp *          inStartTime,
                               UInt32                          inNumberPacketDescriptions,
                               const AudioStreamPacketDescription *inPacketDescs){
    @autoreleasepool {

        WCLRecordEngine *recorder = (__bridge WCLRecordEngine*) inUserData;
        if(recorder.isCapturing){
            // 有时候AuduioQueueBuffer大小并非是预设的640，需要缓冲
            NSData *frame = [recorder _bufferPCMFrame:inBuffer];
            if(frame){
                [recorder _handleVoiceFrame:frame];
            }
            AudioQueueEnqueueBuffer(inAQ, inBuffer, 0, NULL);
        }else{
            NSLog(@"WARN: - recorder is stopped, ignoring the callback data %d bytes",(int)inBuffer->mAudioDataByteSize);
        }
    }
}

/*
 * Allocate audio queue and buffers
 */
-(BOOL) _createAudioQueue{
    @synchronized(self){
        if(mQueue != NULL){
            return YES;
        }
        // parameters 设置AudioQueue相关参数
        AudioStreamBasicDescription format;
        memset(&format, 0, sizeof(format));
        format.mFormatID = (int)self.recordModel.formatIdKey;
        format.mSampleRate = self.recordModel.rate;
        format.mChannelsPerFrame = (int)self.recordModel.channels;
        if (self.recordModel.formatIdKey == kAudioFormatLinearPCM) {
            format.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked;
            format.mBitsPerChannel = 16;
            format.mBytesPerPacket =  (format.mBitsPerChannel >> 3) * format.mChannelsPerFrame;
            format.mBytesPerFrame = format.mBytesPerPacket;
            format.mFramesPerPacket = 1;
        }

        // queue
        OSStatus result = AudioQueueNewInput(&format, inputBufferHandler, (__bridge void * _Nullable)(self), NULL, NULL, 0, &mQueue);
        if (result != noErr) {
            mQueue = NULL;
            return NO;
        }
        AudioQueueSetParameter(mQueue, kAudioQueueParam_Volume, 1.0f);

        return YES;
    }
}

-(void) _disposeAudioQueue{
    if(mQueue == NULL){
        return;
    }

    AudioQueueDispose(mQueue, true);
    mQueue = NULL;
}

-(BOOL) _startAudioQueue{
    NSAssert(mQueue,@"mQueue is null");

    OSStatus result = noErr;

    // buffers
    AudioQueueBufferRef queueBuffer;
    for (int i = 0; i < QUEUE_BUFFER_COUNT; ++i) {
        queueBuffer = NULL;
        if((result = AudioQueueAllocateBuffer(mQueue, QUEUE_BUFFER_SIZE, &queueBuffer) != noErr)){
            NSLog(@"AudioQueueAllocateBuffer error %d", (int)result);
            [self _disposeAudioQueue];
            return NO;
        }
        if((result = AudioQueueEnqueueBuffer(mQueue, queueBuffer, 0, NULL)) != noErr) {
            NSLog(@"AudioQueueEnqueueBuffer error %d", (int)result);
            [self _disposeAudioQueue];
            return NO;
        }
    }

    if ((result = AudioQueueStart(mQueue, NULL)) != noErr) {
        NSLog(@"AudioQueueStart error %d",(int)result);
        [self _disposeAudioQueue];
        return NO;
    }

    //TODO - do we need level metering?
    UInt32 val = 1;
    AudioQueueSetProperty(mQueue, kAudioQueueProperty_EnableLevelMetering, &val, sizeof(UInt32));

    return YES;
}

-(void) _stopAudioQueue{
    if(mQueue == NULL){
        return;
    }
    AudioQueueStop(mQueue, true);
    AudioSessionSetActive(NO);
}


/*
 * AudioQueue 返回的 frame长度不确定，这里做一个缓冲，确保满了640bytes以后，返回。
 * 640 bytes = 320 frames/16bit = 20ms
 */

- (NSData*) _bufferPCMFrame:(AudioQueueBufferRef)aqBuffer{
    NSAssert(_bufferedVoiceData != nil,@"_bufferVoiceData is nil" );

    NSInteger nBufferSpaceLeft = PCM_FRAME_BYTE_SIZE - _bufferedVoiceData.length;

    NSInteger nBytesReceived = aqBuffer->mAudioDataByteSize;
    NSInteger nBytesToCopy = nBufferSpaceLeft >= nBytesReceived ?nBytesReceived:nBufferSpaceLeft;
    NSInteger nBytesLeft = nBytesReceived - nBytesToCopy;

    [_bufferedVoiceData appendBytes:aqBuffer->mAudioData length:nBytesToCopy];

    if(_bufferedVoiceData.length == PCM_FRAME_BYTE_SIZE){
        // buffer is full
        NSData *frame = [NSData dataWithData:_bufferedVoiceData];
        // reset the buffer
        _bufferedVoiceData.length = 0;

        // save the left partial data
        if(nBytesLeft > 0){
            [_bufferedVoiceData appendBytes:(aqBuffer->mAudioData + nBytesToCopy) length:nBytesLeft];
        }
        return frame;
    }

    return nil;
}

-(void) _handleVoiceFrame:(NSData*)voiceFrame {
    [self _updateCurrentVoiceVolume];
    if (self.delegate && [self.delegate respondsToSelector:@selector(voiceRecorded:)]) {
        [self.delegate voiceRecorded:voiceFrame];
    }
}

- (void)convenrtToMp3WithResult:(NSString *)originalPath outPath:(NSString *)outPath completed:(void (^)(NSString *successPath))completed{
    [[NSFileManager defaultManager] removeItemAtPath:outPath error:nil];
    @try {
        int read, write;
        FILE *pcm = fopen([originalPath cStringUsingEncoding:1], "rb");//被转换的文件
        fseek(pcm, 4*1024, SEEK_CUR);                                   //skip file header
        FILE *mp3 = fopen([outPath cStringUsingEncoding:1], "wb");//转换后文件的存放位置
        const int PCM_SIZE = 8192;
        const int MP3_SIZE = 8192;
        short int pcm_buffer[PCM_SIZE*2];
        unsigned char mp3_buffer[MP3_SIZE];
        lame_t lame = lame_init();
        lame_set_num_channels (lame, (int)self.recordModel.channels); // 设置 1 为单通道，默认为 2 双通道
        lame_set_in_samplerate(lame, self.recordModel.rate);//
        lame_set_brate (lame, 16);
        lame_set_mode (lame, 3);
        lame_set_VBR(lame, vbr_default);
        lame_set_quality (lame, 2); /* 2=high  5 = medium  7=low 音 质 */
        lame_init_params(lame);
        do {
            read = fread(pcm_buffer, 2*sizeof(short int), PCM_SIZE, pcm);
            if (read == 0)
                write = lame_encode_flush(lame, mp3_buffer, MP3_SIZE);
            else
                write = lame_encode_buffer_interleaved(lame, pcm_buffer, read, mp3_buffer, MP3_SIZE);
            
            fwrite(mp3_buffer, write, 1, mp3);
            
        } while (read != 0);
        lame_close(lame);
        fclose(mp3);
        fclose(pcm);
    }
    @catch (NSException *exception) {
         NSLog(@"%@",[exception description]);
    }
    @finally {
        [[NSFileManager defaultManager] removeItemAtPath:originalPath error:nil];
        completed(outPath);
    }
}
@end
