﻿//文件指定编译编码UTF-8
#pragma execution_character_set("utf-8")

/**
*    @file:    audioconverter.cpp
*    @brief:
*    @author:  sanshi522@163.com
*    @date:    2024-11-28  15:58
*/
#include "audioconverter.h"
#include <QDataStream>
#include <QDebug>
#include <QFile>


// WAV 文件头结构
struct RiffHeader {
    char riff[4];        // "RIFF"
    uint32_t fileSize;   // 文件大小（不包括前8个字节）
    char wave[4];        // "WAVE"
};

struct FmtChunk {
    char fmt[4];         // "fmt "
    uint32_t chunkSize;  // 块大小（通常是16）
    uint16_t formatTag;  // 格式标签（通常是1，表示PCM）
    uint16_t channels;   // 通道数
    uint32_t sampleRate; // 采样率
    uint32_t byteRate;   // 每秒字节数
    uint16_t blockAlign; // 每个样本块的字节数
    uint16_t bitsPerSample; // 每个样本的位数
};

struct DataChunk {
    char data[4];        // "data"
    uint32_t dataSize;   // 数据大小
};

AudioConverter::AudioConverter(QObject *parent)
    : QObject(parent)
{
}

AudioConverter::~AudioConverter()
{
    // 清理资源
    //    if (inputFormatContext) {
    //        avformat_close_input(&inputFormatContext);
    //    avformat_free_context(inputFormatContext);
    //    }
    //    if (outputFormatContext && outputFormatContext->pb) {
    //        avio_closep(&outputFormatContext->pb);
    //    }
    //    if (outputFormatContext) {
    //        avformat_free_context(outputFormatContext);
    //    }
    //    if (audioCodecContext) {
    //        avcodec_free_context(&audioCodecContext);
    //    }
    //    if (outAudioCodecContext) {
    //        avcodec_free_context(&outAudioCodecContext);
    //    }
    //    if (swrCtx) {
    //        swr_free(&swrCtx);
    //    }
}

bool pcmToWav(const QString &inputFile, const QString &outputFile, int sampleRate, int channelNum, int bitDepth)
{
    int blockSize = 1024 * 1024;
    // 打开输入文件
    QFile in(inputFile);
    if (!in.open(QIODevice::ReadOnly)) {
        qDebug() << "Could not open input file:" << inputFile;
        return false;
    }

    // 创建输出文件
    QFile out(outputFile);
    if (!out.open(QIODevice::WriteOnly)) {
        qDebug() << "Could not open output file:" << outputFile;
        in.close();
        return false;
    }

    // 计算其他参数
    uint32_t byteRate = sampleRate * channelNum * (bitDepth / 8);
    uint16_t blockAlign = channelNum * (bitDepth / 8);

    // 写入 RIFF 头
    RiffHeader riffHeader;
    riffHeader.riff[0] = 'R';
    riffHeader.riff[1] = 'I';
    riffHeader.riff[2] = 'F';
    riffHeader.riff[3] = 'F';
    riffHeader.fileSize = 0; // 先设置为0，最后再更新
    riffHeader.wave[0] = 'W';
    riffHeader.wave[1] = 'A';
    riffHeader.wave[2] = 'V';
    riffHeader.wave[3] = 'E';

    QDataStream outStream(&out);
    outStream.writeRawData(reinterpret_cast<char*>(&riffHeader), sizeof(RiffHeader));

    // 写入 fmt 块
    FmtChunk fmtChunk;
    fmtChunk.fmt[0] = 'f';
    fmtChunk.fmt[1] = 'm';
    fmtChunk.fmt[2] = 't';
    fmtChunk.fmt[3] = ' ';
    fmtChunk.chunkSize = 16;
    fmtChunk.formatTag = 1; // PCM
    fmtChunk.channels = static_cast<uint16_t>(channelNum);
    fmtChunk.sampleRate = static_cast<uint32_t>(sampleRate);
    fmtChunk.byteRate = byteRate;
    fmtChunk.blockAlign = blockAlign;
    fmtChunk.bitsPerSample = static_cast<uint16_t>(bitDepth);

    outStream.writeRawData(reinterpret_cast<char*>(&fmtChunk), sizeof(FmtChunk));

    // 写入 data 块
    DataChunk dataChunk;
    dataChunk.data[0] = 'd';
    dataChunk.data[1] = 'a';
    dataChunk.data[2] = 't';
    dataChunk.data[3] = 'a';
    dataChunk.dataSize = 0; // 先设置为0，最后再更新

    outStream.writeRawData(reinterpret_cast<char*>(&dataChunk), sizeof(DataChunk));

    // 分块读取 PCM 数据并写入输出文件
    QByteArray buffer(blockSize, 0);
    uint32_t totalDataSize = 0;

    while (true) {
        qint64 bytesRead = in.read(buffer.data(), blockSize);
        if (bytesRead <= 0) {
            break;
        }
        outStream.writeRawData(buffer.data(), bytesRead);
        totalDataSize += bytesRead;
    }

    // 更新文件大小和数据大小
    out.seek(4);
    riffHeader.fileSize = totalDataSize + 36; // 36是WAV头的大小
    outStream.writeRawData(reinterpret_cast<char*>(&riffHeader.fileSize), sizeof(riffHeader.fileSize));

    out.seek(40);
    dataChunk.dataSize = totalDataSize;
    outStream.writeRawData(reinterpret_cast<char*>(&dataChunk.dataSize), sizeof(dataChunk.dataSize));

    // 关闭文件
    in.close();
    out.close();

    return true;
}

bool AudioConverter::convertToWav(const QString &inputFile, const QString &outputFile)
{
    AVFormatContext *inputFormatContext = nullptr;
    AVCodecContext *audioCodecContext = nullptr;
    AVFormatContext *outputFormatContext = nullptr;
    AVCodecContext *outAudioCodecContext = nullptr;
    SwrContext *swrCtx = nullptr;
    AVFrame *audioFrame = nullptr;
    AVFrame *convertedFrame = nullptr;
    AVPacket *inputPacket = nullptr;
    AVPacket *audioOutputPacket = nullptr;

    // 打开输入文件
    if (avformat_open_input(&inputFormatContext, inputFile.toStdString().c_str(), nullptr, nullptr) != 0) {
        qDebug() << "Could not open input file.";
        return false;
    }

    // 获取输入流信息
    if (avformat_find_stream_info(inputFormatContext, nullptr) < 0) {
        qDebug() << "Failed to retrieve input stream information.";
        avformat_close_input(&inputFormatContext);
        avformat_free_context(inputFormatContext);
        return false;

    }

    int audioStreamIndex = -1;
    for (unsigned int i = 0; i < inputFormatContext->nb_streams; i++) {
        if (inputFormatContext->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
            audioStreamIndex = i;
            break;
        }
    }

    if (audioStreamIndex == -1) {
        qDebug() << "No audio stream found.";
        avformat_close_input(&inputFormatContext);
        avformat_free_context(inputFormatContext);
        return false;

    }

    // 找到解码器
    const AVCodec *decoder = avcodec_find_decoder(inputFormatContext->streams[audioStreamIndex]->codecpar->codec_id);
    if (!decoder) {
        qDebug() << "Unsupported codec!";
        avformat_close_input(&inputFormatContext);
        avformat_free_context(inputFormatContext);
        return false;
    }

    // 初始化解码器上下文
    audioCodecContext = avcodec_alloc_context3(decoder);
    if (!audioCodecContext) {
        qDebug() << "创建音频编码器上下文失败";
        avformat_close_input(&inputFormatContext);
        avformat_free_context(inputFormatContext);
        return false;
    }

    //音频流参数填充上下文
    if (avcodec_parameters_to_context(audioCodecContext, inputFormatContext->streams[audioStreamIndex]->codecpar) < 0) {
        qDebug() << "创建音频编码器上下文失败";
        avformat_close_input(&inputFormatContext);
        avformat_free_context(inputFormatContext);
        return false;
    }
    if (avcodec_open2(audioCodecContext, decoder, nullptr) < 0) {
        qDebug() << "打开音频编码器失败";
        avformat_close_input(&inputFormatContext);
        avformat_free_context(inputFormatContext);
        avcodec_free_context(&audioCodecContext);
        return false;
    }


    // 初始化输出格式
    avformat_alloc_output_context2(&outputFormatContext, nullptr, "wav", outputFile.toStdString().c_str());
    if (!outputFormatContext) {
        qDebug() << "无法分配输出上下文。";
        avformat_close_input(&inputFormatContext);
        avformat_free_context(inputFormatContext);
        avcodec_free_context(&audioCodecContext);
        return false;
    }

    // 添加音频流到输出上下文
    AVStream *outStream = avformat_new_stream(outputFormatContext, nullptr);
    if (!outStream) {
        qDebug() << "分配输出流失败。";
        avformat_close_input(&inputFormatContext);
        avformat_free_context(inputFormatContext);
        avcodec_free_context(&audioCodecContext);
        avformat_free_context(outputFormatContext);
        return false;
    }

    // 设置音频编码器
    const AVCodec *encoder = avcodec_find_encoder(AV_CODEC_ID_PCM_S16LE);
    if (!encoder) {
        qDebug() << "设置音频编码器失败!";
        avformat_close_input(&inputFormatContext);
        avformat_free_context(inputFormatContext);
        avcodec_free_context(&audioCodecContext);
        avformat_free_context(outputFormatContext);
        return false;
    }

    // 初始化音频编码器上下文
    outAudioCodecContext = avcodec_alloc_context3(encoder);
    if (!outAudioCodecContext) {
        qDebug() << "无法分配音频编码器上下文。";
        avformat_close_input(&inputFormatContext);
        avformat_free_context(inputFormatContext);
        avcodec_free_context(&audioCodecContext);
        avformat_free_context(outputFormatContext);
        return false;
    }
    //音频编码器参数
    outAudioCodecContext->sample_rate = 48000;
    outAudioCodecContext->ch_layout = audioCodecContext->ch_layout;
    outAudioCodecContext->time_base = audioCodecContext->time_base;
    outAudioCodecContext->sample_fmt = AV_SAMPLE_FMT_S16;

    if (avcodec_parameters_from_context(outStream->codecpar, outAudioCodecContext) < 0) {
        qDebug() << "将编解码器参数复制到输出流失败。";
        avformat_close_input(&inputFormatContext);
        avformat_free_context(inputFormatContext);
        avcodec_free_context(&audioCodecContext);
        avformat_free_context(outputFormatContext);
        return false;
    }

    // 打开音频编码器
    if (avcodec_open2(outAudioCodecContext, encoder, nullptr) < 0) {
        qDebug() << "打开编解码器失败。";
        avformat_close_input(&inputFormatContext);
        avformat_free_context(inputFormatContext);
        avcodec_free_context(&audioCodecContext);
        avformat_free_context(outputFormatContext);
        avcodec_free_context(&outAudioCodecContext);
        return false;
    }

    // 检查编码器的默认帧大小
    if (outAudioCodecContext->frame_size == 0) {
        outAudioCodecContext->frame_size = audioCodecContext->frame_size;  // 常见的帧大小
    }

    AVChannelLayout *out_ch_layout = &outAudioCodecContext->ch_layout;
    AVChannelLayout *in_ch_layout = &audioCodecContext->ch_layout;

    // 初始化重采样上下文
    swrCtx = swr_alloc();
    swr_alloc_set_opts2(&swrCtx,
                        out_ch_layout, outAudioCodecContext->sample_fmt, outAudioCodecContext->sample_rate,
                        in_ch_layout, audioCodecContext->sample_fmt, audioCodecContext->sample_rate,
                        0, nullptr);
    if (swr_init(swrCtx) < 0) {
        qDebug() << "初始化重采样上下文失败";
        avformat_close_input(&inputFormatContext);
        avformat_free_context(inputFormatContext);
        avcodec_free_context(&audioCodecContext);
        avformat_free_context(outputFormatContext);
        avcodec_free_context(&outAudioCodecContext);
        swr_free(&swrCtx);
        return false;
    }

    // 写入文件头
    if (avio_open(&outputFormatContext->pb, outputFile.toStdString().c_str(), AVIO_FLAG_WRITE) < 0) {
        qDebug() << "无法打开输出文件。";
        avformat_close_input(&inputFormatContext);
        avformat_free_context(inputFormatContext);
        avcodec_free_context(&audioCodecContext);
        avformat_free_context(outputFormatContext);
        avcodec_free_context(&outAudioCodecContext);
        swr_free(&swrCtx);
        return false;
    }

    if (avformat_write_header(outputFormatContext, nullptr) < 0) {
        qDebug() << "无法写入输出文件头";
        avformat_close_input(&inputFormatContext);
        avformat_free_context(inputFormatContext);
        avcodec_free_context(&audioCodecContext);
        avformat_free_context(outputFormatContext);
        avcodec_free_context(&outAudioCodecContext);
        swr_free(&swrCtx);
        return false;
    }

    // 解码和编码音频帧
    inputPacket  = av_packet_alloc();
    audioOutputPacket = av_packet_alloc();
    audioFrame  = av_frame_alloc();
    convertedFrame = av_frame_alloc();
    //    if(!inputPacket || !audioFrame )
    int ret = 0;
    int64_t next_pts = 0;
    while (av_read_frame(inputFormatContext, inputPacket ) >= 0) {
        if (inputPacket ->stream_index == audioStreamIndex) {
            // 确保包的时间戳是有效的
            if (inputPacket->pts == AV_NOPTS_VALUE) {
                // 如果时间戳无效，可以尝试基于上一个有效的时间戳和帧大小来计算
                AVRational timeBase = inputFormatContext->streams[audioStreamIndex]->time_base;
                AVRational codecTimeBase = audioCodecContext->time_base;

                // 如果这是第一个包，我们可以假设其 PTS 为 0 或者基于前一个 PTS 计算
                if (next_pts == 0) {
                    inputPacket->pts = 0;
                    inputPacket->dts = 0;
                } else {
                    // 根据样本数和采样率计算下一个 PTS
                    int64_t samples_per_packet = av_get_audio_frame_duration(audioCodecContext, inputPacket->size);
                    if (samples_per_packet <= 0) {
                        samples_per_packet = audioCodecContext->frame_size;
                    }
                    next_pts += samples_per_packet;
                    inputPacket->pts = av_rescale_q(next_pts - samples_per_packet, codecTimeBase, timeBase);
                    inputPacket->dts = inputPacket->pts;
                }
            } else {
                // 如果时间戳有效，更新 next_pts
                next_pts = av_rescale_q_rnd(inputPacket->pts,
                                            inputFormatContext->streams[audioStreamIndex]->time_base,
                                            audioCodecContext->time_base,
                                            static_cast<AVRounding>(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
            }
            // 音频流处理
            ret = avcodec_send_packet(audioCodecContext, inputPacket);
            if (ret < 0) {
                break;
            }

            while (ret >= 0) {
                ret = avcodec_receive_frame(audioCodecContext, audioFrame);
                if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
                    break;
                }else if (ret < 0) {
                    avformat_close_input(&inputFormatContext);
                    avformat_free_context(inputFormatContext);
                    avcodec_free_context(&audioCodecContext);
                    avformat_free_context(outputFormatContext);
                    avcodec_free_context(&outAudioCodecContext);
                    av_frame_free(&audioFrame);
                    av_packet_free(&inputPacket);
                    av_packet_free(&audioOutputPacket);
                    return false;
                }
                // 确保帧的时间戳是有效的
                if (audioFrame->pts == AV_NOPTS_VALUE) {
                    audioFrame->pts = next_pts;
                }
                next_pts += audioFrame->nb_samples;

                // 重采样
                int outSamples = av_rescale_rnd(swr_get_delay(swrCtx, audioCodecContext->sample_rate) + audioFrame->nb_samples,
                                                outAudioCodecContext->sample_rate, audioCodecContext->sample_rate, AV_ROUND_UP);

                convertedFrame->format = outAudioCodecContext->sample_fmt;
                convertedFrame->ch_layout = outAudioCodecContext->ch_layout;
                convertedFrame->sample_rate = outAudioCodecContext->sample_rate;
                convertedFrame->nb_samples = outSamples;

                if (av_frame_get_buffer(convertedFrame, 0) < 0) {
                    qDebug() << "分配转换帧缓冲区错误。";
                    break;
                }

                if (swr_convert_frame(swrCtx, convertedFrame, audioFrame) < 0) {
                    qDebug() << "转换帧错误。";
                    break;
                }

                // 发送转换后的帧给编码器
                ret = avcodec_send_frame(outAudioCodecContext, convertedFrame);
                if (ret < 0) {
                    qDebug() << "发送用于编码的帧错误：" << ret;
                    avformat_close_input(&inputFormatContext);
                    avformat_free_context(inputFormatContext);
                    avcodec_free_context(&audioCodecContext);
                    avformat_free_context(outputFormatContext);
                    avcodec_free_context(&outAudioCodecContext);
                    av_frame_free(&audioFrame);
                    av_packet_free(&inputPacket);
                    av_packet_free(&audioOutputPacket);
                    return false;
                }
                // 接收编码后的包并写入输出文件
                while (true) {
                    ret = avcodec_receive_packet(outAudioCodecContext, audioOutputPacket);
                    if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
                        break;
                    } else if (ret < 0) {
                        qDebug() << "接收编码后的包失败";
                        break;
                    }

                    av_packet_rescale_ts(audioOutputPacket, audioCodecContext->time_base, outStream->time_base);
                    audioOutputPacket->stream_index = outStream->index;

                    // 写入音频包到输出文件
                    ret = av_interleaved_write_frame(outputFormatContext, audioOutputPacket);
                    if (ret < 0) {
                        qDebug() << "写入音频包失败：" << ret;
                        break;
                    }
                    av_packet_unref(audioOutputPacket);
                }
                //                if (ret2 < 0) {
                //                    qDebug() << "ret2 < 0 3";
                //                    break;
                //                }
                av_frame_unref(audioFrame);
                av_frame_unref(convertedFrame);
            }
        }
        av_packet_unref(inputPacket);
    }

    // 写入文件尾
    av_write_trailer(outputFormatContext);

    // 释放资源
    av_frame_free(&audioFrame);
    av_packet_free(&inputPacket);
    av_packet_free(&audioOutputPacket);

    avcodec_free_context(&audioCodecContext);
    avcodec_free_context(&outAudioCodecContext);

    //    avformat_close_input(&inputFormatContext);
    //    avformat_free_context(inputFormatContext);

    // 关闭输入文件IO上下文
    if (inputFormatContext) {
        avio_closep(&inputFormatContext->pb); // 确保关闭输入文件IO上下文
        avformat_close_input(&inputFormatContext);
        avformat_free_context(inputFormatContext); // 直接传递指针，不要使用 &
    }

    // 关闭输出文件IO上下文
    if (outputFormatContext) {
        avio_closep(&outputFormatContext->pb); // 确保关闭输出IO上下文
        avformat_close_input(&outputFormatContext);
        avformat_free_context(outputFormatContext); // 直接传递指针，不要使用 &
    }

    swr_free(&swrCtx);
    return true;
}
