#include "FFmpegProcessor.h"
#include <QDebug>
#include <QVideoFrame>
#include <QImage>

FFmpegProcessor& FFmpegProcessor::instance() {
    static FFmpegProcessor instance;
    return instance;
}

FFmpegProcessor::FFmpegProcessor() {
    avformat_network_init();
    qDebug() << "初始化视频编码器...";
    initVideoEncoder();
    qDebug() << "初始化视频解码器...";
    initVideoDecoder();
    qDebug() << "初始化音频编码器...";
    initAudioEncoder();
    qDebug() << "初始化音频解码器...";
    initAudioDecoder();
    qDebug() << "FFmpegProcessor 初始化完成";
}

FFmpegProcessor::~FFmpegProcessor() {
    sws_freeContext(sws_ctx);
    swr_free(&swr_ctx);

    if(video_enc_ctx) avcodec_free_context(&video_enc_ctx);
    if(video_dec_ctx) avcodec_free_context(&video_dec_ctx);
    if(audio_enc_ctx) avcodec_free_context(&audio_enc_ctx);
    if(audio_dec_ctx) avcodec_free_context(&audio_dec_ctx);
}

void FFmpegProcessor::initVideoEncoder() {
    const AVCodec* codec = avcodec_find_encoder(AV_CODEC_ID_H264);
    if (!codec) {
        qCritical() << "H.264 encoder not found";
        return;
    }

    video_enc_ctx = avcodec_alloc_context3(codec);
    if (!video_enc_ctx) {
        qCritical() << "Failed to allocate video encoder context";
        return;
    }

    video_enc_ctx->bit_rate = 400000;
    video_enc_ctx->width = 640;
    video_enc_ctx->height = 480;
    video_enc_ctx->time_base = {1, 25};
    video_enc_ctx->framerate = {25, 1};
    video_enc_ctx->gop_size = 10;
    video_enc_ctx->max_b_frames = 1;
    video_enc_ctx->pix_fmt = AV_PIX_FMT_YUV420P;

    av_opt_set(video_enc_ctx->priv_data, "preset", "fast", 0);

    if (avcodec_open2(video_enc_ctx, codec, nullptr) < 0) {
        qCritical() << "Failed to open video encoder";
        avcodec_free_context(&video_enc_ctx);
    }
}

void FFmpegProcessor::initVideoDecoder() {
    const AVCodec* codec = avcodec_find_decoder(AV_CODEC_ID_H264);
    if (!codec) {
        qCritical() << "H.264 decoder not found";
        return;
    }

    video_dec_ctx = avcodec_alloc_context3(codec);
    if (!video_dec_ctx) {
        qCritical() << "Failed to allocate video decoder context";
        return;
    }

    if (avcodec_open2(video_dec_ctx, codec, nullptr) < 0) {
        qCritical() << "Failed to open video decoder";
        avcodec_free_context(&video_dec_ctx);
    }
}

void FFmpegProcessor::initAudioEncoder() {
    const AVCodec* codec = avcodec_find_encoder(AV_CODEC_ID_AAC);
    if (!codec) {
        qCritical() << "AAC encoder not found";
        return;
    }

    audio_enc_ctx = avcodec_alloc_context3(codec);
    if (!audio_enc_ctx) {
        qCritical() << "Failed to allocate audio encoder context";
        return;
    }

    audio_enc_ctx->bit_rate = 64000;
    audio_enc_ctx->sample_rate = 44100;
    av_channel_layout_default(&audio_enc_ctx->ch_layout, 1); // Mono
    audio_enc_ctx->sample_fmt = AV_SAMPLE_FMT_FLTP;

    if (avcodec_open2(audio_enc_ctx, codec, nullptr) < 0) {
        qCritical() << "Failed to open audio encoder";
        avcodec_free_context(&audio_enc_ctx);
    }
}

void FFmpegProcessor::initAudioDecoder() {
    const AVCodec* codec = avcodec_find_decoder(AV_CODEC_ID_AAC);
    if (!codec) {
        qCritical() << "AAC decoder not found";
        return;
    }

    audio_dec_ctx = avcodec_alloc_context3(codec);
    if (!audio_dec_ctx) {
        qCritical() << "Failed to allocate audio decoder context";
        return;
    }

    av_channel_layout_default(&audio_dec_ctx->ch_layout, 1); // Mono
    audio_dec_ctx->sample_rate = 44100;
    audio_dec_ctx->sample_fmt = AV_SAMPLE_FMT_FLTP;

    if (avcodec_open2(audio_dec_ctx, codec, nullptr) < 0) {
        qCritical() << "Failed to open audio decoder";
        avcodec_free_context(&audio_dec_ctx);
    }
}

QByteArray FFmpegProcessor::encodeVideo(const QVideoFrame& frame) {
    QMutexLocker lock(&video_mutex);
    if (!video_enc_ctx) return {};

    QVideoFrame mappedFrame = frame;
    if (!mappedFrame.map(QVideoFrame::ReadOnly)) {
        qWarning() << "Failed to map video frame";
        return {};
    }

    // Convert to YUV420P
    AVFrame* avframe = av_frame_alloc();
    if (!avframe) {
        mappedFrame.unmap();
        return {};
    }

    avframe->format = AV_PIX_FMT_YUV420P;
    avframe->width = video_enc_ctx->width;
    avframe->height = video_enc_ctx->height;
    avframe->pts = 0;

    if (av_frame_get_buffer(avframe, 32) < 0) {
        av_frame_free(&avframe);
        mappedFrame.unmap();
        return {};
    }

    const int src_width = static_cast<int>(mappedFrame.width());
    const int src_height = static_cast<int>(mappedFrame.height());

    sws_ctx = sws_getCachedContext(sws_ctx,
                                   src_width, src_height, AV_PIX_FMT_RGB32,
                                   avframe->width, avframe->height, AV_PIX_FMT_YUV420P,
                                   SWS_BILINEAR, nullptr, nullptr, nullptr);

    if (!sws_ctx) {
        av_frame_free(&avframe);
        mappedFrame.unmap();
        return {};
    }

    const uint8_t* data = mappedFrame.bits(0);
    const int linesize = mappedFrame.bytesPerLine(0);
    sws_scale(sws_ctx, &data, &linesize, 0, src_height,
              avframe->data, avframe->linesize);

    // Encode
    if (avcodec_send_frame(video_enc_ctx, avframe) < 0) {
        qWarning() << "Error sending video frame";
        av_frame_free(&avframe);
        mappedFrame.unmap();
        return {};
    }

    QByteArray result;
    AVPacket* pkt = av_packet_alloc();
    if (!pkt) {
        av_frame_free(&avframe);
        mappedFrame.unmap();
        return {};
    }

    while (true) {
        int ret = avcodec_receive_packet(video_enc_ctx, pkt);
        if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
            break;
        }
        if (ret < 0) {
            qWarning() << "Error receiving packet";
            break;
        }

        result.append(reinterpret_cast<const char*>(pkt->data), pkt->size);
        av_packet_unref(pkt);
    }

    av_frame_free(&avframe);
    av_packet_free(&pkt);
    mappedFrame.unmap();
    return result;
}

QByteArray FFmpegProcessor::decodeVideo(const QByteArray& packet) {
    QMutexLocker lock(&video_mutex);
    if (!video_dec_ctx || packet.isEmpty()) return QByteArray();

    AVPacket* pkt = av_packet_alloc();
    if (!pkt) return QByteArray();

    pkt->data = reinterpret_cast<uint8_t*>(const_cast<char*>(packet.data()));
    pkt->size = static_cast<int>(packet.size());

    if (avcodec_send_packet(video_dec_ctx, pkt) < 0) {
        av_packet_free(&pkt);
        return QByteArray();
    }

    AVFrame* frame = av_frame_alloc();
    if (!frame) {
        av_packet_free(&pkt);
        return QByteArray();
    }

    int ret = avcodec_receive_frame(video_dec_ctx, frame);
    if (ret < 0) {
        av_frame_free(&frame);
        av_packet_free(&pkt);
        return QByteArray();
    }

    // Convert to RGB32
    AVFrame* rgbFrame = av_frame_alloc();
    if (!rgbFrame) {
        av_frame_free(&frame);
        av_packet_free(&pkt);
        return QByteArray();
    }

    rgbFrame->format = AV_PIX_FMT_RGB32;
    rgbFrame->width = frame->width;
    rgbFrame->height = frame->height;
    if (av_frame_get_buffer(rgbFrame, 32) < 0) {
        av_frame_free(&rgbFrame);
        av_frame_free(&frame);
        av_packet_free(&pkt);
        return QByteArray();
    }

    sws_ctx = sws_getCachedContext(sws_ctx,
                                   frame->width, frame->height, static_cast<AVPixelFormat>(frame->format),
                                   frame->width, frame->height, AV_PIX_FMT_RGB32,
                                   SWS_BILINEAR, nullptr, nullptr, nullptr);

    if (!sws_ctx) {
        av_frame_free(&rgbFrame);
        av_frame_free(&frame);
        av_packet_free(&pkt);
        return QByteArray();
    }

    sws_scale(sws_ctx, frame->data, frame->linesize, 0, frame->height,
              rgbFrame->data, rgbFrame->linesize);

    // Copy RGB data to QByteArray
    QByteArray result;
    result.resize(rgbFrame->height * rgbFrame->linesize[0]);
    memcpy(result.data(), rgbFrame->data[0], rgbFrame->height * rgbFrame->linesize[0]);

    av_frame_free(&rgbFrame);
    av_frame_free(&frame);
    av_packet_free(&pkt);

    return result;
}
QByteArray FFmpegProcessor::encodeAudio(const QByteArray& pcm, int samples) {
    QMutexLocker lock(&audio_mutex);
    if (!audio_enc_ctx || pcm.isEmpty() || samples <= 0) return {};

    AVFrame* frame = av_frame_alloc();
    if (!frame) return {};

    frame->nb_samples = samples;
    frame->format = audio_enc_ctx->sample_fmt;
    av_channel_layout_copy(&frame->ch_layout, &audio_enc_ctx->ch_layout);

    if (av_frame_get_buffer(frame, 0) < 0) {
        av_frame_free(&frame);
        return {};
    }

    memcpy(frame->data[0], pcm.constData(), pcm.size());

    if (avcodec_send_frame(audio_enc_ctx, frame) < 0) {
        av_frame_free(&frame);
        return {};
    }

    QByteArray result;
    AVPacket* pkt = av_packet_alloc();
    if (!pkt) {
        av_frame_free(&frame);
        return {};
    }

    while (true) {
        int ret = avcodec_receive_packet(audio_enc_ctx, pkt);
        if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
            break;
        }
        if (ret < 0) {
            qWarning() << "Error receiving audio packet";
            break;
        }

        result.append(reinterpret_cast<const char*>(pkt->data), pkt->size);
        av_packet_unref(pkt);
    }

    av_frame_free(&frame);
    av_packet_free(&pkt);
    return result;
}

QByteArray FFmpegProcessor::decodeAudio(const QByteArray& aac) {
    QMutexLocker lock(&audio_mutex);
    if (!audio_dec_ctx || aac.isEmpty()) return {};

    AVPacket* pkt = av_packet_alloc();
    if (!pkt) return {};

    pkt->data = reinterpret_cast<uint8_t*>(const_cast<char*>(aac.data()));
    pkt->size = static_cast<int>(aac.size());

    if (avcodec_send_packet(audio_dec_ctx, pkt) < 0) {
        av_packet_free(&pkt);
        return {};
    }

    AVFrame* frame = av_frame_alloc();
    if (!frame) {
        av_packet_free(&pkt);
        return {};
    }

    if (avcodec_receive_frame(audio_dec_ctx, frame) < 0) {
        av_frame_free(&frame);
        av_packet_free(&pkt);
        return {};
    }

    // Convert to PCM
    QByteArray pcm;
    const int out_samples = frame->nb_samples;
    const int out_channels = 1; // Mono
    const int out_sample_size = av_get_bytes_per_sample(AV_SAMPLE_FMT_S16);
    pcm.resize(out_samples * out_channels * out_sample_size);

    // Allocate SwrContext
    SwrContext* swr_ctx = nullptr;

    // Initialize output channel layout
    AVChannelLayout out_ch_layout;
    av_channel_layout_default(&out_ch_layout, out_channels); // Mono

    // Initialize input channel layout
    AVChannelLayout in_ch_layout = frame->ch_layout;

    // Set options for SwrContext
    if (swr_alloc_set_opts2(&swr_ctx,
                            &out_ch_layout, AV_SAMPLE_FMT_S16, 44100,
                            &in_ch_layout, static_cast<AVSampleFormat>(frame->format), frame->sample_rate,
                            0, nullptr) < 0) {
        av_frame_free(&frame);
        av_packet_free(&pkt);
        return {};
    }

    // Initialize SwrContext
    if (swr_init(swr_ctx) < 0) {
        av_frame_free(&frame);
        av_packet_free(&pkt);
        swr_free(&swr_ctx);
        return {};
    }

    uint8_t* output[] = { reinterpret_cast<uint8_t*>(pcm.data()) };
    swr_convert(swr_ctx, output, out_samples,
                const_cast<const uint8_t**>(frame->data), frame->nb_samples);

    av_frame_free(&frame);
    av_packet_free(&pkt);
    swr_free(&swr_ctx);
    return pcm;
}
