//
//  AudioEncodeViewController.m
//  Pods
//
//  Created by ilongge on 2023/8/16.
//

#import "AudioEncodeViewController.h"
 
@interface AudioEncodeViewController ()
{
    NSString *_inputAudioFile;
    NSString *_outputAudioFile;
    
    AVFormatContext *_inputFormatContext;
    AVCodecContext *_inputAudioCodecContext;
    const struct AVCodec *_inputAudioCodec;
    int _inputAudioStreamIndex;
    AVStream *_inputAudioStream;
    AVFrame *_inputAudioFrame;
    
    AVFormatContext *_outputFormatContext;
    AVCodecContext *_outputAudioCodecContext;
    const struct AVCodec *_outputAudioCodec;
    int _outputAudioStreamIndex;
    AVStream *_outputAudioStream;
    AVFrame *_outputAudioFrame;
    
    int _dstAudioChannels;
    int _dstAudioSampleRate;
    enum AVSampleFormat _dstAudioSampleFmt;
    
    SwrContext *_swrContext;
    AVChannelLayout _outLayout;
    int64_t _pts;
    AVAudioFifo *_fifo;
    
    int64_t _origin_nb_samples;
    int64_t _output_nb_samples;
    int64_t _origin_frame_count;
    int64_t _output_frame_count;
    
    dispatch_queue_t _decode_queue;
}
@property (weak, nonatomic) IBOutlet UITextView *consoleText;
@property (weak, nonatomic) IBOutlet UIProgressView *progressView;
@end

@implementation AudioEncodeViewController

- (void)viewDidLoad {
    [super viewDidLoad];
    self.title = @"音频编码";
    _inputAudioFile = @"西海情歌.mp3";
    _decode_queue = dispatch_queue_create("com.ilongge.queue.resample", DISPATCH_QUEUE_SERIAL);
    self.progressView.progress = 0;
    [self initDstParameter];
    [self initInputFormatContext];
    [self initDecoder];
    [self createSwrContext];
    [self initOutputFormatContext];
    [self initEncoder];
}

- (void)initDstParameter
{
    _dstAudioChannels = 2;
    _dstAudioSampleRate = 48000;
    _dstAudioSampleFmt = AV_SAMPLE_FMT_FLTP;
    // 声道出参
    if (_dstAudioChannels == 2) {
        _outLayout = (AVChannelLayout)AV_CHANNEL_LAYOUT_STEREO;
    }
    else if (_dstAudioChannels == 1) {
        _outLayout = (AVChannelLayout)AV_CHANNEL_LAYOUT_MONO;
    }
    else{
        _outLayout.nb_channels = _dstAudioChannels;
    }
}

- (void)initInputFormatContext
{
    NSString *pathString = [[NSBundle mainBundle] pathForResource:_inputAudioFile ofType:nil];
    if (pathString == nil) {
        printf("file not exsit");
        return;
    }
    /// 打开音频文件并获取相关上下文
    int ret = avformat_open_input(&_inputFormatContext, pathString.UTF8String, NULL, NULL);
    if (ret != 0) {
        printf("avformat_open_input : %d", ret);
        return;
    }
    ret = avformat_find_stream_info(_inputFormatContext, NULL);
    if (ret != 0) {
        printf("avformat_find_stream_info : %d", ret);
        return;
    }
    {
        [self printMessage:@"输入文件:" breakline:YES];
        [self printMessage:pathString breakline:YES];
        NSString *info_string = [FFMpegKitTool av_dump_format:_inputFormatContext streamIndex:0 url:pathString is_output:NO];
        [self printMessage:info_string breakline:YES];
    }
}

- (void)initDecoder
{
    _inputAudioStreamIndex = -1;
    /// 寻找音频流
    _inputAudioStreamIndex = av_find_best_stream(_inputFormatContext, AVMEDIA_TYPE_AUDIO, -1, -1, NULL, 0);
    if (_inputAudioStreamIndex < 0) {
        NSLog(@"av_find_best_stream : %d", _inputAudioStreamIndex);
        return;
    }
    /// 获取音频流
    _inputAudioStream = _inputFormatContext->streams[_inputAudioStreamIndex];
    /// 获取并打开解码器
    _inputAudioCodec = avcodec_find_decoder(_inputAudioStream->codecpar->codec_id);
    /// 获取解码器上下文
    _inputAudioCodecContext = avcodec_alloc_context3(_inputAudioCodec);
    /// 打开解码器
    int ret = avcodec_open2(_inputAudioCodecContext, _inputAudioCodec, NULL);
    if (ret != 0) {
        NSLog(@"avcodec_open2 : %d", ret);
        return;
    }
    /// 申请AVPacket和AVFrame以及相关设置
    _inputAudioFrame = av_frame_alloc();
    {
        [self printMessage:@"配置解码器:" breakline:YES];
    }
}

- (void)createSwrContext
{
    if (_inputAudioStream != NULL) {
        // 声道入参
        AVChannelLayout inLayout = _inputAudioStream->codecpar->ch_layout;
        // 原始采样格式
        enum AVSampleFormat sample_fmt = _inputAudioStream->codecpar->format;
        // 原始采样率
        int sample_rate = _inputAudioStream->codecpar->sample_rate;
        // 初始化重采样上下文
        _swrContext = NULL;
        int ret = swr_alloc_set_opts2(&_swrContext,
                                      &_outLayout,
                                      _dstAudioSampleFmt,
                                      _dstAudioSampleRate,
                                      &inLayout,
                                      sample_fmt,
                                      sample_rate,
                                      0,
                                      NULL);
        if (ret != 0) {
            NSLog(@"swr_alloc_set_opts2 : %d", ret);
            swr_free(&_swrContext);
        }
    }
    else{
        printf("_inputAudioStream is NULL");
    }
    {
        [self printMessage:@"\n创建SwrContext" breakline:YES];
    }
}

- (void)initOutputFormatContext
{
    _outputAudioFile = [NSString stringWithFormat:@"%@/Documents/%@_Encode.aac", NSHomeDirectory(), _inputAudioFile];
    if (_outputAudioFile == nil) {
        [[NSFileManager defaultManager] createFileAtPath:_outputAudioFile contents:nil attributes:nil];
        printf("file not exsit");
    }
    /// 打开音频文件并获取相关上下文
    _outputFormatContext = avformat_alloc_context();
    const AVOutputFormat *outputFormat = av_guess_format(NULL, _outputAudioFile.UTF8String, NULL);
    if (outputFormat) {
        _outputFormatContext->oformat = outputFormat;
    }
    {
        [self printMessage:@"输出文件:" breakline:YES];
        [self printMessage:_outputAudioFile breakline:YES];
    }
}

- (void)initEncoder
{
    const AVCodec *encodeCodec = avcodec_find_encoder(AV_CODEC_ID_AAC);
    
    _outputAudioCodecContext = avcodec_alloc_context3(encodeCodec);
    _outputAudioCodecContext->sample_fmt = _dstAudioSampleFmt;
    _outputAudioCodecContext->sample_rate = _dstAudioSampleRate;
    _outputAudioCodecContext->ch_layout = _outLayout;
    _outputAudioCodecContext->bit_rate = 128000;
    _outputAudioCodecContext->profile = FF_PROFILE_AAC_MAIN;
    _outputAudioCodecContext->flags = AV_CODEC_FLAG_GLOBAL_HEADER;
    
    av_dump_format(_outputFormatContext, 0, _outputAudioFile.UTF8String, 1);
    
    int ret = avcodec_open2(_outputAudioCodecContext, encodeCodec, NULL);
    if (ret < 0) {
        char error[1024];
        av_strerror(ret, error, 1024);
        printf("%s\n", error);
        return;
    }
    _outputAudioStream = avformat_new_stream(_outputFormatContext, encodeCodec);
    if (_outputAudioStream == NULL) {
        printf("avformat_new_stream ERROR");
        return;
    }
    _outputAudioStreamIndex = _outputAudioStream->index;
    // 装配编码参数
    ret = avcodec_parameters_from_context(_outputAudioStream->codecpar, _outputAudioCodecContext);
    if (ret < 0) {
        printf("avcodec_parameters_from_context ERROR");
        return;
    }
    ret = avio_open(&_outputFormatContext->pb, _outputAudioFile.UTF8String, AVIO_FLAG_WRITE);
    if (ret < 0) {
        printf("avio_open ERROR");
        return;
    }
    _outputAudioFrame = av_frame_alloc();
    _outputAudioFrame->nb_samples = _outputAudioCodecContext->frame_size;
    _outputAudioFrame->sample_rate = _outputAudioCodecContext->sample_rate;
    _outputAudioFrame->ch_layout = _outputAudioCodecContext->ch_layout;
    _outputAudioFrame->format = _outputAudioCodecContext->sample_fmt;
    av_frame_get_buffer(_outputAudioFrame, 0);
    
    _fifo = av_audio_fifo_alloc(_outputAudioCodecContext->sample_fmt,
                                _outputAudioCodecContext->ch_layout.nb_channels,
                                _outputAudioCodecContext->frame_size);
    {
        [self printMessage:@"配置编码器:" breakline:YES];
        [self printMessage:[NSString stringWithFormat:@"\tChannel: %d", _outputAudioCodecContext->ch_layout.nb_channels] breakline:YES];
        [self printMessage:[NSString stringWithFormat:@"\tSampleRate: %d", _outputAudioCodecContext->sample_rate] breakline:YES];
        [self printMessage:[NSString stringWithFormat:@"\tSampleFmt: %d", _outputAudioCodecContext->sample_fmt] breakline:YES];
    }
}

- (IBAction)startDecodeAction:(UIButton *)sender {
    sender.userInteractionEnabled = NO;
    [self audioEncode];
    sender.userInteractionEnabled = YES;
}

- (void)audioEncode
{
    dispatch_async(_decode_queue, ^{
        self->_pts = 0;
        int ret;
        ret = avformat_write_header(self->_outputFormatContext, NULL);
        if (ret < 0) {
            printf("文件头写入失败");
            return;
        }
        self->_origin_nb_samples = 0;
        self->_output_nb_samples = 0;
        self->_origin_frame_count = 0;
        self->_output_frame_count = 0;
        float totle_length = self->_inputFormatContext->duration * self->_inputAudioStream->codecpar->sample_rate / self->_inputAudioStream->codecpar->frame_size / 1000.0 / 1000.0;
        {
            [self printMessage:@"开始解码" breakline:YES];
            [self printMessage:@"开始重采样" breakline:YES];
            [self printMessage:@"开始编码" breakline:YES];
            [self printMessage:@"" breakline:YES];
        }
        AVPacket *inputPacket = av_packet_alloc();
        AVPacket *outputPacket = av_packet_alloc();
        while (true) {
            ret = av_read_frame(self->_inputFormatContext, inputPacket);
            if (ret == 0) {
                if (inputPacket->stream_index == self->_inputAudioStreamIndex) {
                    ret = avcodec_send_packet(self->_inputAudioCodecContext, inputPacket);
                    av_packet_unref(inputPacket);
                    while (ret >= 0) {
                        ret = avcodec_receive_frame(self->_inputAudioCodecContext, self->_inputAudioFrame);
                        if (ret == AVERROR_EOF) {
                            break;
                        }
                        else if (ret == AVERROR(EAGAIN)) {
                            break;
                        }
                        else if (ret < 0) {
                            av_frame_unref(self->_inputAudioFrame);
                            break;
                        }
                        else{
                            {
                                dispatch_sync(dispatch_get_main_queue(), ^{
                                    float progress = self->_origin_frame_count / totle_length;
                                    float value = progress - self.progressView.progress;
                                    if (value > 0.01) {
                                        self.progressView.progress = progress;
                                        [self printMessage:@"." breakline:NO];
                                    }
                                });
                            }
                            self->_origin_nb_samples += self->_inputAudioFrame->nb_samples;
                            self->_origin_frame_count += 1;
                            // 首先重采样
                            AVFrame *frame = [self audioConvert:self->_inputAudioFrame
                                                        chanels:self->_dstAudioChannels
                                                      sampleFmt:self->_dstAudioSampleFmt
                                                     sampleRate:self->_dstAudioSampleRate];
                            ret = [self encdec:frame outputPacket:outputPacket flush:0];
                            // 使用完了释放掉
                            av_frame_free(&frame);
                        }
                    }
                }
                else{
                    av_packet_unref(inputPacket);
                    continue;
                }
            }
            else{
                AVFrame *frame = av_frame_alloc();
                frame->ch_layout = self->_outLayout;
                frame->sample_rate = self->_dstAudioSampleRate;
                frame->format = self->_dstAudioSampleFmt;
                ret = [self encdec:frame outputPacket:outputPacket flush:1];
                if (ret) {
                    
                }
                av_packet_unref(inputPacket);
                break;
            }
        }
        ret = avcodec_send_frame(self->_inputAudioCodecContext, self->_outputAudioFrame);
        if (ret < 0) {
            printf("flush fail");
        }
        while (ret >= 0) {
            ret = avcodec_receive_packet(self->_outputAudioCodecContext, outputPacket);
        }
        printf("%lld Frames -> %lld Frames, %lld Samples -> %lld Samples\n",
               self->_origin_frame_count,
               self->_output_frame_count,
               self->_origin_nb_samples,
               self->_output_nb_samples);
        ret = av_write_trailer(self->_outputFormatContext);
        if (ret < 0) {
            printf("文件尾写入失败");
            return;
        }
        self->_origin_frame_count -= 1;
        {
            [self printMessage:@"解码完成" breakline:YES];
            [self printMessage:@"重采样完成" breakline:YES];
            [self printMessage:@"编码完成" breakline:YES];
            [self printMessage:@(self->_origin_frame_count).description breakline:YES];
            [self printMessage:@"frames" breakline:NO];
            [self printMessage:@"->" breakline:NO];
            [self printMessage:@(self->_output_frame_count).description breakline:NO];
            [self printMessage:@"frames" breakline:NO];
        }
        av_packet_free(&inputPacket);
        av_packet_free(&outputPacket);
        av_seek_frame(self->_inputFormatContext, self->_inputAudioStreamIndex, 0, 0);
    });
}

- (AVFrame *)audioConvert:(AVFrame *)inFrame
                  chanels:(int)dstChannels
                sampleFmt:(enum AVSampleFormat)dstSampleFmt
               sampleRate:(int)dstSampleRate
{
    int ret = -1;
    // 创建输出音频帧
    AVFrame *swrFrame = av_frame_alloc();
    swrFrame->ch_layout = _outLayout;
    swrFrame->sample_rate = dstSampleRate;
    swrFrame->format = dstSampleFmt;
    ret = swr_convert_frame(_swrContext, swrFrame, inFrame);
    if (ret < 0) {
        NSLog(@"swr_convert_frame %d", ret);
        av_frame_free(&swrFrame);
        return NULL;
    }
    swrFrame->pts = inFrame->pts;
    return swrFrame;
}
// flush 没效果 不继续研究了
- (BOOL)encdec:(AVFrame *)inFrame outputPacket:(AVPacket *)outputPacket flush:(int)flush
{
    if (inFrame == NULL) {
        printf("swr frame failed\n");
        return NO;
    }
    int cache_size = av_audio_fifo_size(_fifo);
    int ret = av_audio_fifo_realloc(_fifo, cache_size+inFrame->nb_samples);
    if (ret < 0) {
        printf("av_audio_fifo_realloc failed\n");
        return NO;
    }
    
    ret = av_audio_fifo_write(_fifo,
                              (void **)(inFrame->data),
                              inFrame->nb_samples);
    if (ret < 0) {
        printf("av_audio_fifo_write failed\n");
        return NO;
    }
    while ( ( flush == 1 && av_audio_fifo_size(_fifo) > 0 ) 
           ||
           ( flush == 0 && av_audio_fifo_size(_fifo) > _outputAudioCodecContext->frame_size )) {
        ret = av_audio_fifo_read(_fifo,
                                 (void **)(_outputAudioFrame->data),
                                 _outputAudioCodecContext->frame_size);
        if (ret < 0) {
            printf("av_audio_fifo_read failed\n");
            break;
        }
        _pts += _outputAudioFrame->nb_samples;
        _outputAudioFrame->pts = _pts;
        ret = avcodec_send_frame(_outputAudioCodecContext, _outputAudioFrame);
        if (ret < 0) {
            printf("avcodec_send_frame failed\n");
            break;
        }
        _output_frame_count += 1;
        _output_nb_samples += _outputAudioFrame->nb_samples;
        while (true) {
            ret = avcodec_receive_packet(_outputAudioCodecContext, outputPacket);
            if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
                //                printf("avcodec_receive_packet end\n");
                break;
            }
            else if (ret < 0) {
                printf("avcodec_receive_packet fail\n");
                break;
            }
            else {
                outputPacket->stream_index = _outputAudioStreamIndex;
                ret = av_write_frame(_outputFormatContext, outputPacket);
                if (ret < 0) {
                    printf("av_write_frame fail\n");
                    break;
                }
                else{
                    
                }
            }
        }
    }
    return YES;
}

- (void)printMessage:(NSString *)message breakline:(BOOL)breakline
{
    dispatch_async(dispatch_get_main_queue(), ^{
        NSMutableString *string = [[NSMutableString alloc] init];
        if (self.consoleText.text.length) {
            [string appendString:self.consoleText.text];
        }
        if (string.length && breakline){
            [string appendString:@"\n"];
            printf("\n");
        }
        else if (string.length && breakline == NO){
            [string appendString:@" "];
            printf(" ");
        }
        if (message.length) {
            [string appendString:message];
            printf("%s", message.UTF8String);
        }
        self.consoleText.text = string;
        CGFloat offset = self.consoleText.contentSize.height - self.consoleText.frame.size.height;
        BOOL needTrans = offset > self.consoleText.contentOffset.y;
        if (offset > 0 && needTrans) {
            [self.consoleText setContentOffset:CGPointMake(0, offset) animated:NO];
        }
    });
}
- (void)dealloc
{
    if (_inputFormatContext != NULL) {
        avformat_close_input(&_inputFormatContext);
    }
    if (_outputFormatContext != NULL) {
        avformat_close_input(&_outputFormatContext);
    }
    
    if (_inputAudioCodecContext != NULL) {
        avcodec_free_context(&_inputAudioCodecContext);
    }
    if (_outputAudioCodecContext != NULL) {
        avcodec_free_context(&_outputAudioCodecContext);
    }
    printf("%s\n", __func__);
}
@end
