//
// Created by 伍泉 on 2024/6/7.
//

#include "FFmpegWrapper.h"
#include "log.h"

static int openInputFile(const char *srcPath, AVFormatContext **in_fmt_ctx) {
    int ret = avformat_open_input(in_fmt_ctx, srcPath, nullptr, nullptr);
    if (ret < 0) {
        LOGD("open path error %s", av_err2str(ret));
    }
    return ret;
}

static int findBestStream(AVFormatContext *in_fmt_ctx, enum AVMediaType type, int *index) {
    int ret = av_find_best_stream(in_fmt_ctx, type, -1, -1, nullptr, 0);
    if (ret < 0) {
        LOGD("Could not find %s stream: %s", av_get_media_type_string(type), av_err2str(ret));
    } else {
        *index = ret;
    }
    return ret;
}

static int createOutputContext(const char *dstPath, AVFormatContext **out_fmt_ctx, const AVOutputFormat **out_fmt) {
    *out_fmt_ctx = avformat_alloc_context();
    *out_fmt = av_guess_format(nullptr, dstPath, nullptr);
    if (!*out_fmt) {
        LOGD("Could not guess file format");
        return AVERROR_UNKNOWN;
    }
    (*out_fmt_ctx)->oformat = *out_fmt;
    return 0;
}

static int createStreamAndCopyCodecParams(AVFormatContext *out_fmt_ctx, AVStream **out_stream, AVCodecParameters *in_codecpar) {
    *out_stream = avformat_new_stream(out_fmt_ctx, nullptr);
    if (!*out_stream) {
        LOGD("Failed to create output stream");
        return AVERROR_UNKNOWN;
    }

    int ret = avcodec_parameters_copy((*out_stream)->codecpar, in_codecpar);
    if (ret < 0) {
        LOGD("avcodec_parameters_copy error: %s", av_err2str(ret));
    }
    (*out_stream)->codecpar->codec_tag = 0;
    return ret;
}

static int initializeOutputFile(AVFormatContext *out_fmt_ctx, const char *dstPath) {
    int ret = avio_open(&out_fmt_ctx->pb, dstPath, AVIO_FLAG_WRITE);
    if (ret < 0) {
        LOGD("avio_open error: %s", av_err2str(ret));
    }
    return ret;
}

static void writePacketAndRescale(AVPacket *pkt, AVStream *in_stream, AVStream *out_stream) {
    pkt->pts = av_rescale_q(pkt->pts, in_stream->time_base, out_stream->time_base);
    pkt->dts = pkt->pts;
    pkt->duration = av_rescale_q(pkt->duration, in_stream->time_base, out_stream->time_base);
    pkt->pos = -1;
    pkt->stream_index = 0;
}

static void encodeFrame(AVCodecContext *avCodecContext, AVFrame *frame, AVPacket *pkt, FILE *outfile) {
    int ret;

    if (frame) LOGD("Send frame %lli\n", frame->pts);

    /* send the frame to the encoder */
    ret = avcodec_send_frame(avCodecContext, frame);
    if (ret < 0) {
        LOGD("Error sending a frame for encoding\n");
        return;
    }

    while (ret >= 0) {
        ret = avcodec_receive_packet(avCodecContext, pkt);
        if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
            return;
        else if (ret < 0) {
            LOGD("Error during encoding\n");
            return;
        }

        LOGD("Write packet %lli (size=%5d)\n", pkt->pts, pkt->size);
        fwrite(pkt->data, 1, pkt->size, outfile);
        av_packet_unref(pkt);
    }
}

static int encodeAudio(const char *outputFilename) {
    const AVCodec *codec = avcodec_find_encoder(AV_CODEC_ID_AAC);
    if (!codec) {
        LOGD("Could not find encoder\n");
        return -1;
    }

    AVCodecContext *avCodecContext = avcodec_alloc_context3(codec);
    if (!avCodecContext) {
        LOGD("Could not allocate audio codec context\n");
        return -1;
    }

    avCodecContext->bit_rate = 64000;
    avCodecContext->sample_fmt = AV_SAMPLE_FMT_FLTP;
    avCodecContext->sample_rate = 44100;
    avCodecContext->channel_layout = AV_CH_LAYOUT_STEREO;
    avCodecContext->channels = av_get_channel_layout_nb_channels(avCodecContext->channel_layout);

    if (avcodec_open2(avCodecContext, codec, nullptr) < 0) {
        LOGD("Could not open codec\n");
        avcodec_free_context(&avCodecContext);
        return -1;
    }

    FILE *outfile = fopen(outputFilename, "wb");
    if (!outfile) {
        LOGD("Could not open output file %s\n", outputFilename);
        avcodec_free_context(&avCodecContext);
        return -1;
    }

    AVPacket *pkt = av_packet_alloc();
    if (!pkt) {
        LOGD("Could not allocate packet\n");
        fclose(outfile);
        avcodec_free_context(&avCodecContext);
        return -1;
    }

    AVFrame *frame = av_frame_alloc();
    if (!frame) {
        LOGD("Could not allocate audio frame\n");
        av_packet_free(&pkt);
        fclose(outfile);
        avcodec_free_context(&avCodecContext);
        return -1;
    }

    frame->nb_samples = avCodecContext->frame_size;
    frame->format = avCodecContext->sample_fmt;
    frame->channel_layout = avCodecContext->channel_layout;

    if (av_frame_get_buffer(frame, 0) < 0) {
        LOGD("Could not allocate audio data buffers\n");
        av_frame_free(&frame);
        av_packet_free(&pkt);
        fclose(outfile);
        avcodec_free_context(&avCodecContext);
        return -1;
    }

    for (int i = 0; i < 100; i++) {
        if (av_frame_make_writable(frame) < 0)
            break;

        for (int j = 0; j < frame->nb_samples; j++) {
            frame->data[0][j] = static_cast<uint8_t>(rand() % 256);
            frame->data[1][j] = static_cast<uint8_t>(rand() % 256);
        }

        frame->pts = i;
        encodeFrame(avCodecContext, frame, pkt, outfile);
    }

    encodeFrame(avCodecContext, nullptr, pkt, outfile);

    fclose(outfile);
    av_frame_free(&frame);
    av_packet_free(&pkt);
    avcodec_free_context(&avCodecContext);

    return 0;
}

static int encodeVideo(const char *inputFilename, const char *outputFilename) {
    const AVCodec *codec = avcodec_find_encoder(AV_CODEC_ID_H264);
    if (!codec) {
        LOGD("Could not find encoder\n");
        return -1;
    }

    AVCodecContext *avCodecContext = avcodec_alloc_context3(codec);
    if (!avCodecContext) {
        LOGD("Could not allocate video codec context\n");
        return -1;
    }

    avCodecContext->bit_rate = 400000;
    avCodecContext->width = 352;
    avCodecContext->height = 288;
    avCodecContext->time_base = {1, 25};
    avCodecContext->framerate = {25, 1};
    avCodecContext->gop_size = 10;
    avCodecContext->max_b_frames = 1;
    avCodecContext->pix_fmt = AV_PIX_FMT_YUV420P;

    if (codec->id == AV_CODEC_ID_H264) {
        AVDictionary *opts = nullptr;
        av_dict_set(&opts, "preset", "slow", 0);
        int ret = avcodec_open2(avCodecContext, codec, &opts);
        if (ret < 0) {
            av_dict_free(&opts);
            char errbuf[128];
            av_strerror(ret, errbuf, sizeof(errbuf));
            LOGD("Could not open H264 codec: %s\n", errbuf);
            avcodec_free_context(&avCodecContext);
            return -1;
        }
        av_dict_free(&opts);
    } else {
        if (avcodec_open2(avCodecContext, codec, nullptr) < 0) {
            LOGD("Could not open codec\n");
            avcodec_free_context(&avCodecContext);
            return -1;
        }
    }

    FILE *outfile = fopen(outputFilename, "wb");
    if (!outfile) {
        LOGD("Could not open output file %s\n", outputFilename);
        avcodec_free_context(&avCodecContext);
        return -1;
    }

    AVPacket *pkt = av_packet_alloc();
    if (!pkt) {
        LOGD("Could not allocate packet\n");
        fclose(outfile);
        avcodec_free_context(&avCodecContext);
        return -1;
    }

    AVFrame *frame = av_frame_alloc();
    if (!frame) {
        LOGD("Could not allocate video frame\n");
        av_packet_free(&pkt);
        fclose(outfile);
        avcodec_free_context(&avCodecContext);
        return -1;
    }

    frame->format = avCodecContext->pix_fmt;
    frame->width = avCodecContext->width;
    frame->height = avCodecContext->height;

    if (av_frame_get_buffer(frame, 32) < 0) {
        LOGD("Could not allocate the video frame data\n");
        av_frame_free(&frame);
        av_packet_free(&pkt);
        fclose(outfile);
        avcodec_free_context(&avCodecContext);
        return -1;
    }

    for (int i = 0; i < 25; i++) {
        if (av_frame_make_writable(frame) < 0)
            break;

        for (int y = 0; y < avCodecContext->height; y++) {
            for (int x = 0; x < avCodecContext->width; x++) {
                frame->data[0][y * frame->linesize[0] + x] = x + y + i * 3;
            }
        }
        for (int y = 0; y < avCodecContext->height / 2; y++) {
            for (int x = 0; x < avCodecContext->width / 2; x++) {
                frame->data[1][y * frame->linesize[1] + x] = 128 + y + i * 2;
                frame->data[2][y * frame->linesize[2] + x] = 64 + x + i * 5;
            }
        }

        frame->pts = i;
        encodeFrame(avCodecContext, frame, pkt, outfile);
    }

    encodeFrame(avCodecContext, nullptr, pkt, outfile);

    fclose(outfile);
    av_frame_free(&frame);
    av_packet_free(&pkt);
    avcodec_free_context(&avCodecContext);

    return 0;
}

static void log_packet(const AVFormatContext *fmt_ctx, const AVPacket *pkt, const char *tag) {
    AVRational *time_base = &fmt_ctx->streams[pkt->stream_index]->time_base;

    LOGD("%s: pts:%s pts_time:%s dts:%s dts_time:%s duration:%s duration_time:%s stream_index:%d\n",
         tag,
         av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, time_base),
         av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, time_base),
         av_ts2str(pkt->duration), av_ts2timestr(pkt->duration, time_base),
         pkt->stream_index);
}


FFmpegWrapper::FFmpegWrapper() {

}

FFmpegWrapper::~FFmpegWrapper() {

}

void FFmpegWrapper::initFFmpeg() {
    avformat_network_init();
}

void FFmpegWrapper::cleanup() {
    avformat_network_deinit();
}


AVFormatContext* FFmpegWrapper::openInputFile(const char *filename) {
    AVFormatContext *formatContext = avformat_alloc_context();
    if (avformat_open_input(&formatContext, filename, nullptr, nullptr) != 0) {
        LOGD("Could not open input file %s\n", filename);
        return nullptr;
    }

    if (avformat_find_stream_info(formatContext, nullptr) < 0) {
        LOGD("Could not find stream information %s\n", filename);
        avformat_close_input(&formatContext);
        return nullptr;
    }

    return formatContext;
}


AVCodecContext* FFmpegWrapper::createCodecContext(AVCodecParameters *params, const AVCodec *codec) {
    AVCodecContext *codecContext = avcodec_alloc_context3(codec);
    if (!codecContext) {
        LOGD("Could not allocate codec context\n");
        return nullptr;
    }

    if (avcodec_parameters_to_context(codecContext, params) < 0) {
        LOGD("Could not copy codec parameters to context\n");
        avcodec_free_context(&codecContext);
        return nullptr;
    }

    if (avcodec_open2(codecContext, codec, nullptr) < 0) {
        LOGD( "Could not open codec\n");
        avcodec_free_context(&codecContext);
        return nullptr;
    }

    return codecContext;
}

bool FFmpegWrapper::writeDecodedFrameToFile(AVFrame *frame, FILE *videoFile, FILE *audioFile) {
    LOGD("write video/audio frame, format: %d\n", frame->format);

    if (videoFile && frame->format == AV_PIX_FMT_YUV420P) {
        for (int i = 0; i < frame->height; i++) {
            fwrite(frame->data[0] + i * frame->linesize[0], 1, frame->width, videoFile);
        }
        for (int i = 0; i < frame->height / 2; i++) {
            fwrite(frame->data[1] + i * frame->linesize[1], 1, frame->width / 2, videoFile);
        }
        for (int i = 0; i < frame->height / 2; i++) {
            fwrite(frame->data[2] + i * frame->linesize[2], 1, frame->width / 2, videoFile);
        }
        LOGD("write video frame\n");
    } else if (audioFile && (frame->format == AV_SAMPLE_FMT_FLTP ||
                             frame->format == AV_SAMPLE_FMT_S16 ||
                             frame->format == AV_SAMPLE_FMT_S16P ||
                             frame->format == AV_SAMPLE_FMT_FLT ||
                             frame->format == AV_SAMPLE_FMT_S32)) {
        int data_size = av_get_bytes_per_sample((AVSampleFormat)frame->format);
        if (data_size < 0) {
            LOGD("Failed to calculate data size\n");
            return false;
        }

        for (int i = 0; i < frame->nb_samples; i++) {
            for (int ch = 0; ch < frame->channels; ch++) {
                fwrite(frame->data[ch] + data_size * i, 1, data_size, audioFile);
            }
        }
        LOGD("write audio frame\n");
    } else {
        LOGD("Unsupported format: %d\n", frame->format);
        return false;
    }
    return true;
}

bool FFmpegWrapper::openOutputFile(const char *filename, AVFormatContext **outputFormatContext) {
    if (avformat_alloc_output_context2(outputFormatContext, nullptr, nullptr, filename) < 0) {
        LOGD("Could not create output format context\n");
        return false;
    }

    if (!((*outputFormatContext)->oformat->flags & AVFMT_NOFILE)) {
        if (avio_open(&(*outputFormatContext)->pb, filename, AVIO_FLAG_WRITE) < 0) {
            LOGD("Could not open output file %s\n", filename);
            return false;
        }
    }

    return true;
}

void FFmpegWrapper::closeFileResources(AVFormatContext *formatContext, AVCodecContext *decoderContext, AVCodecContext *encoderContext, AVStream *outStream) {
    if (formatContext && !(formatContext->oformat->flags & AVFMT_NOFILE)) {
        avio_closep(&formatContext->pb);
    }
    if (formatContext) avformat_free_context(formatContext);
    if (encoderContext) avcodec_free_context(&encoderContext);
    if (decoderContext) avcodec_free_context(&decoderContext);
}

bool FFmpegWrapper::decode(const char *inputFilename, const char *videoOutputFilename, const char *audioOutputFilename) {
    int ret;
    AVFormatContext *in_fmt_ctx = nullptr;
    AVCodecParameters *in_codecpar = nullptr;
    AVFormatContext *out_audio_fmt_ctx = nullptr;
    AVFormatContext *out_video_fmt_ctx = nullptr;
    const AVOutputFormat *out_audio_fmt = nullptr;
    const AVOutputFormat *out_video_fmt = nullptr;
    AVStream *out_audio_stream = nullptr;
    AVStream *out_video_stream = nullptr;
    AVStream *in_stream = nullptr;
    AVPacket pkt;

    int audio_index, video_index;

    ret = reinterpret_cast<int>(::openInputFile(inputFilename, &in_fmt_ctx));
    if (ret < 0) goto end;

    ret = findBestStream(in_fmt_ctx, AVMEDIA_TYPE_AUDIO, &audio_index);
    if (ret < 0) goto end;

    ret = findBestStream(in_fmt_ctx, AVMEDIA_TYPE_VIDEO, &video_index);
    if (ret < 0) goto end;

    in_stream = in_fmt_ctx->streams[audio_index];
    in_codecpar = in_stream->codecpar;
    if (in_codecpar->codec_type != AVMEDIA_TYPE_AUDIO) {
        LOGD("The Codec type is invalid! about audio");
        goto end;
    }

    ret = createOutputContext(audioOutputFilename, &out_audio_fmt_ctx, &out_audio_fmt);
    if (ret < 0) goto end;

    ret = createStreamAndCopyCodecParams(out_audio_fmt_ctx, &out_audio_stream, in_codecpar);
    if (ret < 0) goto end;

    ret = initializeOutputFile(out_audio_fmt_ctx, audioOutputFilename);
    if (ret < 0) goto end;

    in_stream = in_fmt_ctx->streams[video_index];
    in_codecpar = in_stream->codecpar;
    if (in_codecpar->codec_type != AVMEDIA_TYPE_VIDEO) {
        LOGD("The Codec type is invalid! about video");
        goto end;
    }

    ret = createOutputContext(videoOutputFilename, &out_video_fmt_ctx, &out_video_fmt);
    if (ret < 0) goto end;

    ret = createStreamAndCopyCodecParams(out_video_fmt_ctx, &out_video_stream, in_codecpar);
    if (ret < 0) goto end;

    ret = initializeOutputFile(out_video_fmt_ctx, videoOutputFilename);
    if (ret < 0) goto end;

    av_init_packet(&pkt);
    pkt.data = nullptr;
    pkt.size = 0;

    ret = avformat_write_header(out_audio_fmt_ctx, nullptr);
    if (ret < 0) {
        LOGD("avformat_write_header (audio) error: %s", av_err2str(ret));
        goto end;
    }

    ret = avformat_write_header(out_video_fmt_ctx, nullptr);
    if (ret < 0) {
        LOGD("avformat_write_header (video) error: %s", av_err2str(ret));
        goto end;
    }

    while (av_read_frame(in_fmt_ctx, &pkt) == 0) {
        if (pkt.stream_index == audio_index) {
            writePacketAndRescale(&pkt, in_fmt_ctx->streams[audio_index], out_audio_stream);
            av_interleaved_write_frame(out_audio_fmt_ctx, &pkt);
            LOGD("audio frame write %lld", pkt.pts);
            av_packet_unref(&pkt);
        } else if (pkt.stream_index == video_index) {
            writePacketAndRescale(&pkt, in_fmt_ctx->streams[video_index], out_video_stream);
            av_interleaved_write_frame(out_video_fmt_ctx, &pkt);
            LOGD("video frame write %lld", pkt.pts);
            av_packet_unref(&pkt);
        }
    }

    av_write_trailer(out_audio_fmt_ctx);
    av_write_trailer(out_video_fmt_ctx);

    end:
    if (in_fmt_ctx) avformat_close_input(&in_fmt_ctx);
    if (out_audio_fmt_ctx) {
        if (out_audio_fmt_ctx->pb) avio_close(out_audio_fmt_ctx->pb);
        avformat_free_context(out_audio_fmt_ctx);
    }
    if (out_video_fmt_ctx) {
        if (out_video_fmt_ctx->pb) avio_close(out_video_fmt_ctx->pb);
        avformat_free_context(out_video_fmt_ctx);
    }

    return ret >= 0;
}

bool FFmpegWrapper::encode(const char *videoInputFilename, const char *audioInputFilename, const char *outputFilename) {
    if (videoInputFilename && strlen(videoInputFilename) > 0) {
        if (encodeVideo(videoInputFilename, outputFilename) < 0) {
            return false;
        }
    }

    if (audioInputFilename && strlen(audioInputFilename) > 0) {
        if (encodeAudio(outputFilename) < 0) {
            return false;
        }
    }

    return true;
}

bool FFmpegWrapper::convertAudioToAAC(const char *inputFilename, const char *outputFilename) {
    AVFormatContext *inputFormatContext = nullptr;
    AVFormatContext *outputFormatContext = nullptr;
    AVCodecContext *audioDecoderContext = nullptr;
    AVCodecContext *audioEncoderContext = nullptr;
    AVStream *audioOutStream = nullptr;
    int audioStreamIndex = -1;
    AVPacket *packet = nullptr;
    AVFrame *frame = nullptr;

    inputFormatContext = openInputFile(inputFilename);
    if (!inputFormatContext) return false;

    // 查找音频流
    for (unsigned int i = 0; i < inputFormatContext->nb_streams; i++) {
        if (inputFormatContext->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
            audioStreamIndex = i;
            break;
        }
    }

    if (audioStreamIndex == -1) {
        LOGD("Could not find audio stream in input file\n");
        return false;
    }

    // 创建音频解码器上下文
    AVCodecParameters *audioParams = inputFormatContext->streams[audioStreamIndex]->codecpar;
    const AVCodec *audioDecoder = avcodec_find_decoder(audioParams->codec_id);
    if (!audioDecoder) {
        LOGD("Could not find audio decoder\n");
        return false;
    }

    audioDecoderContext = createCodecContext(audioParams, audioDecoder);
    if (!audioDecoderContext) return false;

    // 打开输出文件
    if (!openOutputFile(outputFilename, &outputFormatContext)) return false;

    // 创建音频编码器上下文
    const AVCodec *audioEncoder = avcodec_find_encoder(AV_CODEC_ID_AAC);
    if (!audioEncoder) {
        LOGD( "Could not find audio encoder\n");
        goto end;
    }

    audioEncoderContext = avcodec_alloc_context3(audioEncoder);
    if (!audioEncoderContext) {
        LOGD( "Could not allocate audio encoder context\n");
        goto end;
    }

    audioEncoderContext->bit_rate = 64000;
    audioEncoderContext->sample_fmt = AV_SAMPLE_FMT_FLTP;
    audioEncoderContext->channel_layout = audioDecoderContext->channel_layout;
    audioEncoderContext->sample_rate = audioDecoderContext->sample_rate;
    audioEncoderContext->channels = av_get_channel_layout_nb_channels(audioEncoderContext->channel_layout);
    audioEncoderContext->time_base = (AVRational){1, audioEncoderContext->sample_rate};

    if (avcodec_open2(audioEncoderContext, audioEncoder, nullptr) < 0) {
        LOGD( "Could not open audio encoder\n");
        goto end;
    }

    // 创建音频输出流
    audioOutStream = avformat_new_stream(outputFormatContext, nullptr);
    if (!audioOutStream) {
        LOGD( "Failed to allocate audio output stream\n");
        goto end;
    }

    if (avcodec_parameters_from_context(audioOutStream->codecpar, audioEncoderContext) < 0) {
        LOGD( "Failed to copy audio encoder parameters to output stream\n");
        goto end;
    }

    // 写入文件头
    if (avformat_write_header(outputFormatContext, nullptr) < 0) {
        LOGD( "Error occurred when opening output file\n");
        goto end;
    }

    // 编码音频帧
    packet = av_packet_alloc();
    frame = av_frame_alloc();

    while (av_read_frame(inputFormatContext, packet) >= 0) {
        if (packet->stream_index == audioStreamIndex) {
            if (avcodec_send_packet(audioDecoderContext, packet) == 0) {
                while (avcodec_receive_frame(audioDecoderContext, frame) == 0) {
                    if (avcodec_send_frame(audioEncoderContext, frame) == 0) {
                        while (avcodec_receive_packet(audioEncoderContext, packet) == 0) {
                            packet->stream_index = audioOutStream->index;
                            av_packet_rescale_ts(packet, audioEncoderContext->time_base, audioOutStream->time_base);
                            av_interleaved_write_frame(outputFormatContext, packet);
                            av_packet_unref(packet);
                        }
                    }
                    av_frame_unref(frame);
                }
            }
            av_packet_unref(packet);
        }
    }

    av_write_trailer(outputFormatContext);

    end:
    if (packet) av_packet_free(&packet);
    if (frame) av_frame_free(&frame);
    closeFileResources(outputFormatContext, audioDecoderContext, audioEncoderContext, audioOutStream);
    if (inputFormatContext) avformat_close_input(&inputFormatContext);

    return true;
}

bool FFmpegWrapper::convertVideoToH264(const char *inputFilename, const char *outputFilename) {
    AVFormatContext *inputFormatContext = nullptr;
    AVFormatContext *outputFormatContext = nullptr;
    AVCodecContext *videoDecoderContext = nullptr;
    AVCodecContext *videoEncoderContext = nullptr;
    AVStream *videoOutStream = nullptr;
    int videoStreamIndex = -1;
    AVPacket *packet = nullptr;
    AVFrame *frame = nullptr;

    inputFormatContext = openInputFile(inputFilename);
    if (!inputFormatContext) return false;

    // 查找视频流
    for (unsigned int i = 0; i < inputFormatContext->nb_streams; i++) {
        if (inputFormatContext->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
            videoStreamIndex = i;
            break;
        }
    }

    if (videoStreamIndex == -1) {
        LOGD( "Could not find video stream in input file\n");
        return false;
    }

    // 创建视频解码器上下文
    AVCodecParameters *videoParams = inputFormatContext->streams[videoStreamIndex]->codecpar;
    const AVCodec *videoDecoder = avcodec_find_decoder(videoParams->codec_id);
    if (!videoDecoder) {
        LOGD( "Could not find video decoder\n");
        return false;
    }

    videoDecoderContext = createCodecContext(videoParams, videoDecoder);
    if (!videoDecoderContext) return false;

    // 打开输出文件
    if (!openOutputFile(outputFilename, &outputFormatContext)) return false;

    // 创建视频编码器上下文
    const AVCodec *videoEncoder = avcodec_find_encoder(AV_CODEC_ID_H264);
    if (!videoEncoder) {
        LOGD( "Could not find video encoder\n");
        goto end;
    }

    videoEncoderContext = avcodec_alloc_context3(videoEncoder);
    if (!videoEncoderContext) {
        LOGD( "Could not allocate video encoder context\n");
        goto end;
    }

    videoEncoderContext->bit_rate = 400000;
    videoEncoderContext->width = videoDecoderContext->width;
    videoEncoderContext->height = videoDecoderContext->height;
    videoEncoderContext->time_base = videoDecoderContext->time_base;
    videoEncoderContext->framerate = videoDecoderContext->framerate;
    videoEncoderContext->gop_size = 10;
    videoEncoderContext->max_b_frames = 1;
    videoEncoderContext->pix_fmt = AV_PIX_FMT_YUV420P;

    if (avcodec_open2(videoEncoderContext, videoEncoder, nullptr) < 0) {
        LOGD( "Could not open video encoder\n");
        goto end;
    }

    // 创建视频输出流
    videoOutStream = avformat_new_stream(outputFormatContext, nullptr);
    if (!videoOutStream) {
        LOGD( "Failed to allocate video output stream\n");
        goto end;
    }

    if (avcodec_parameters_from_context(videoOutStream->codecpar, videoEncoderContext) < 0) {
        LOGD( "Failed to copy video encoder parameters to output stream\n");
        goto end;
    }

    // 写入文件头
    if (avformat_write_header(outputFormatContext, nullptr) < 0) {
        LOGD( "Error occurred when opening output file\n");
        goto end;
    }

    // 编码视频帧
    packet = av_packet_alloc();
    frame = av_frame_alloc();

    while (av_read_frame(inputFormatContext, packet) >= 0) {
        if (packet->stream_index == videoStreamIndex) {
            if (avcodec_send_packet(videoDecoderContext, packet) == 0) {
                while (avcodec_receive_frame(videoDecoderContext, frame) == 0) {
                    if (avcodec_send_frame(videoEncoderContext, frame) == 0) {
                        while (avcodec_receive_packet(videoEncoderContext, packet) == 0) {
                            packet->stream_index = videoOutStream->index;
                            av_packet_rescale_ts(packet, videoEncoderContext->time_base, videoOutStream->time_base);
                            av_interleaved_write_frame(outputFormatContext, packet);
                            av_packet_unref(packet);
                        }
                    }
                    av_frame_unref(frame);
                }
            }
            av_packet_unref(packet);
        }
    }

    av_write_trailer(outputFormatContext);

    end:
    if (packet) av_packet_free(&packet);
    if (frame) av_frame_free(&frame);
    closeFileResources(outputFormatContext, videoDecoderContext, videoEncoderContext, videoOutStream);
    if (inputFormatContext) avformat_close_input(&inputFormatContext);

    return true;
}

void FFmpegWrapper::decodeVideo(AVCodecContext *avCodecContext, AVFrame *avFrame, AVPacket *pkt, const char *filename) {
    char buf[1024];
    int ret = avcodec_send_packet(avCodecContext, pkt);

    if (ret < 0) {
        LOGD("Error sending a packet for decoding: %s\n", av_err2str(ret));
        return;
    }

    while (ret >= 0) {
        ret = avcodec_receive_frame(avCodecContext, avFrame);
        if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
            return;
        } else if (ret < 0) {
            LOGD("Error during decoding\n");
            return;
        }

        if (avCodecContext->frame_number % 20 == 0) {
            LOGD("Saving frame %3d\n", avCodecContext->frame_number);
            snprintf(buf, sizeof(buf), "%s-%d.yuv", filename, avCodecContext->frame_number);
            saveYUV(avFrame, buf);
        }
    }
}


void FFmpegWrapper::decodeAudio(AVCodecContext *avCodecContext, AVPacket *pkt, AVFrame *avFrame,
                                FILE *outfile) {
    int i, ch;
    int ret, data_size;

    /* send the packet with the compressed data to the decoder */
    ret = avcodec_send_packet(avCodecContext, pkt);
    if (ret < 0) {
        LOGD("Error sending a packet for decoding: %s\n",
             av_err2str(ret));
        return;
    }

    /* read all the output frames (in general there may be any number of them */
    while (ret >= 0) {
        //解码出frame并存入avFrame参数
        ret = avcodec_receive_frame(avCodecContext, avFrame);
        if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
            return;
        else if (ret < 0) {
            LOGD("Error during decoding\n");
            return;
        }

        //获取该采样格式每个采样是多少字节
        //一个采样中可能包含多个声道，每个声道的数据大小都是data_size
        data_size = av_get_bytes_per_sample(avCodecContext->sample_fmt);
        if (data_size < 0) {
            /* This should not occur, checking just for paranoia */
            LOGD("Failed to calculate data size\n");
            return;
        }

        //遍历avFrame中的每一个采样数据
        for (i = 0; i < avFrame->nb_samples; i++)
            //遍历每一个声道
            for (ch = 0; ch < avCodecContext->channels; ch++)
                //文件中数据的排列格式：采样1声道1 采样1声道2 采样2声道1 采样2声道2...
                fwrite(avFrame->data[ch] + data_size * i, 1, data_size, outfile);
    }
}

void FFmpegWrapper::saveYUV(AVFrame *avFrame, char *filename) {
    FILE *file = fopen(filename, "wb");
    if (!file) {
        LOGD("Could not open output file\n");
        return;
    }

    int width = avFrame->width;
    int height = avFrame->height;

    for (int i = 0; i < height; i++) {
        fwrite(avFrame->data[0] + i * avFrame->linesize[0], 1, width, file);
    }
    for (int i = 0; i < height/2; i++) {
        fwrite(avFrame->data[1] + i * avFrame->linesize[1], 1, width/2, file);
    }
    for (int i = 0; i < height/2; i++) {
        fwrite(avFrame->data[2] + i * avFrame->linesize[2], 1, width/2, file);
    }

    fclose(file);
}

int FFmpegWrapper::getSampleFmtFromName(const char **fmt, enum AVSampleFormat sample_fmt) {
    int i;
    *fmt = nullptr;

    struct sample_fmt_entry {
        enum AVSampleFormat sample_fmt;
        const char *fmt_be, *fmt_le;
    } sample_fmt_entries[] = {
            {AV_SAMPLE_FMT_U8,  "u8",    "u8"},
            {AV_SAMPLE_FMT_S16, "s16be", "s16le"},
            {AV_SAMPLE_FMT_S32, "s32be", "s32le"},
            {AV_SAMPLE_FMT_FLT, "f32be", "f32le"},
            {AV_SAMPLE_FMT_DBL, "f64be", "f64le"},
    };

    for (i = 0; i < FF_ARRAY_ELEMS(sample_fmt_entries); i++) {
        struct sample_fmt_entry *entry = &sample_fmt_entries[i];
        if (sample_fmt == entry->sample_fmt) {
            *fmt = AV_NE(entry->fmt_be, entry->fmt_le);
            return 0;
        }
    }

    LOGD("Sample format %s is not supported as output format\n", av_get_sample_fmt_name(sample_fmt));
    return -1;
}



