#include "MjpegDecode.h"

#include <unistd.h>
#include <stdio.h>
#include <stdlib.h>
#include "Util/logc.h"


namespace Media{

    bool hasAudioStream(const char *file_path){
        AVFormatContext *input_ctx = NULL;
        int ret = avformat_open_input(&input_ctx, file_path, NULL, NULL);
        if (ret < 0) {
            errorf("Failed to open input file\n");
            return false;
        }

        for (int i = 0; i < input_ctx->nb_streams; i++) {
            AVStream *st = input_ctx->streams[i];

            if (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO){
                avformat_close_input(&input_ctx);
                return true;
            }
        }
        avformat_close_input(&input_ctx);
        return false;
    }

    // AVFrame* convert_to_nv12(AVFrame* src_frame) {
    //     AVFrame* dst_frame = av_frame_alloc();
    //     if (!dst_frame) {
    //         return NULL;
    //     }

    //     // 设置输出AVFrame的格式和尺寸
    //     dst_frame->format = AV_PIX_FMT_NV12;
    //     dst_frame->width = src_frame->width;
    //     dst_frame->height = src_frame->height;

    //     // 分配输出AVFrame的内存
    //     int ret = av_frame_get_buffer(dst_frame, 32); // 32是对齐要求，可以根据实际情况调整
    //     if (ret < 0) {
    //         av_frame_free(&dst_frame);
    //         return NULL;
    //     }

    //     // 配置用于转换的SwsContext（如果需要的话）
    //     struct SwsContext* sws_ctx = sws_getContext(src_frame->width, src_frame->height, src_frame->format,
    //                                                 dst_frame->width, dst_frame->height, dst_frame->format,
    //                                                 0, NULL, NULL, NULL);
    //     if (!sws_ctx) {
    //         av_frame_free(&dst_frame);
    //         return NULL;
    //     }

    //     // 执行转换
    //     sws_scale(sws_ctx, src_frame->data, src_frame->linesize, 0, src_frame->height,
    //             dst_frame->data, dst_frame->linesize);

    //     // 释放SwsContext
    //     sws_freeContext(sws_ctx);

    //     return dst_frame;
    // }


    int calculate_frame_size(AVFrame *frame) {
        int size = 0;
        
        // 计算Y分量大小
        size += frame->linesize[0] * frame->height;
        
        // 计算U和V分量大小（如果存在）
        if (frame->format == AV_PIX_FMT_YUV420P || frame->format == AV_PIX_FMT_YUVJ420P) {
            size += frame->linesize[1] * frame->height / 2; // U分量
            size += frame->linesize[2] * frame->height / 2; // V分量
        } else if (frame->format == AV_PIX_FMT_YUV422P || frame->format == AV_PIX_FMT_YUVJ422P) {
            size += frame->linesize[1] * frame->height; // U分量
            size += frame->linesize[2] * frame->height; // V分量
        } else if (frame->format == AV_PIX_FMT_YUV444P || frame->format == AV_PIX_FMT_YUVJ444P) {
            size += frame->linesize[1] * frame->height; // U分量
            size += frame->linesize[2] * frame->height; // V分量
        } else if (frame->format == AV_PIX_FMT_NV12 || frame->format == AV_PIX_FMT_NV21) {
            size += frame->linesize[1] * frame->height / 2; // UV分量
        } else if (frame->format == AV_PIX_FMT_NV16) {
            size += frame->linesize[1] * frame->height; // UV分量
        } else if (frame->format == AV_PIX_FMT_YUV440P) {
            size += frame->linesize[1] * frame->height / 2; // U分量
            size += frame->linesize[2] * frame->height / 2; // V分量
        } else if (frame->format == AV_PIX_FMT_YUV410P) {
            size += frame->linesize[1] * frame->height / 4; // U分量
            size += frame->linesize[2] * frame->height / 4; // V分量
        } else if (frame->format == AV_PIX_FMT_YUV411P) {
            size += frame->linesize[1] * frame->height; // U分量
            size += frame->linesize[2] * frame->height; // V分量
        }
        
        return size;
    }

    MjpegDecode::MjpegDecode(NV12FrameCallBack frameBack){
        m_frameBack = frameBack;
        init();
    }

    void MjpegDecode::PushMjpegFrame(uint8_t *data, int size){
        AVPacket pkt;
        av_init_packet(&pkt);
        pkt.data = data;
        pkt.size = size;

        AVFrame* frame = av_frame_alloc();
        if(!frame){
            errorf("Failed to allocate frame");
            return;
        }

        int ret = avcodec_send_packet(m_codec_ctx, &pkt);
        if (ret < 0) {
            errorf( "Error sending packet for decoding\n");
            av_frame_free(&frame);
            return ;
        }

        while(ret >= 0){
            ret = avcodec_receive_frame(m_codec_ctx, frame);
            if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
                break;
            } else if (ret < 0) {
                errorf("Error while receiving a frame from the decoder\n");
                goto end;
            }

            frame->pts = frame->best_effort_timestamp;

            // AVPixelFormat
            // tracef("decode frame   format  %d, width %d, height %d !!!\n", frame->format, frame->width, frame->height);

            if(frame){
                if(m_frameBack){
                    m_frameBack(frame->data[0], calculate_frame_size(frame), frame->width, frame->height, 0);
                }
            }

            av_frame_unref(frame);
        }
    end:
        av_frame_free(&frame);
        av_packet_unref(&pkt);
    }
    MjpegDecode::~MjpegDecode(){
        if(m_codec_ctx)
            avcodec_free_context(&m_codec_ctx);
    }

    bool MjpegDecode::init(){
        // 创建解码器上下文
        AVCodec* codec = avcodec_find_decoder(AV_CODEC_ID_MJPEG);
        if (!codec) {
            errorf("Failed to find MJPEG decoder\n");
            return false;
        }

        m_codec_ctx = avcodec_alloc_context3(codec);
        if (!m_codec_ctx) {
            errorf("Failed to allocate decoder context\n");
            return false;
        }

        int ret = avcodec_open2(m_codec_ctx, codec, NULL);
        if (ret < 0) {
            errorf("Failed to open decoder\n");
            avcodec_free_context(&m_codec_ctx);
            return false;
        }
        return true;
    }


}