#include "ffmVideoReader.h"
extern "C" {
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libavutil/imgutils.h>
#include <libswscale/swscale.h>
}
#include <iostream>
#include <stdexcept>
#include <thread>

using namespace std;

FfmVideoReader::FfmVideoReader() {
}

FfmVideoReader::~FfmVideoReader() {
    close();
}

void FfmVideoReader::decodeVideoStream() {
    // 初始化帧和包
    AVPacket* packet = av_packet_alloc();
    AVFrame* frame = av_frame_alloc();
    if (!packet || !frame) {
        std::cerr << "Could not allocate frame or packet.\n";
        throw ::runtime_error("Could not allocate frame or packet.\n");
    }

    // 这里假设你想处理视频输出
    SwsContext* swsContext = sws_getContext(width, height,
                                            codecContext_->pix_fmt,
                                            width, height,
                                            AV_PIX_FMT_RGB24, SWS_BILINEAR, nullptr, nullptr, nullptr);
    if (!swsContext) {
        std::cerr << "Could not initialize the conversion context.\n";
        throw ::runtime_error("Could not initialize the conversion context.\n");
    }

    string finish = "is finish";
    // 解码循环
    while (av_read_frame(formatContext_, packet) >= 0) {
        if (packet->stream_index == videoStreamIndex_) {
            avcodec_send_packet(codecContext_, packet);  // 发送数据到ffmepg，放到解码队列中
            if (avcodec_receive_frame(codecContext_, frame) == 0) {
                // 处理解码后的帧，例如显示或进一步处理
                shared_ptr<Tensor<uint8_t>> rgbPtr = make_shared<Tensor<uint8_t>>(1, height, width, 3, 0);
                uint8_t* rgb = rgbPtr->getData();
                uint8_t* data[AV_NUM_DATA_POINTERS] = {0};
                data[0] = rgb;
                int lines[AV_NUM_DATA_POINTERS] = {0};
                lines[0] = width * 3;
                sws_scale(swsContext, frame->data, frame->linesize, 0, height, data, lines);

                rgbDataQue_.Push(rgbPtr);
            }
        }

        av_packet_unref(packet);
        if (!isPlaying_) {
            finish = "is closed";
            break;
        }
    }
    printf("%s %s\n", videoPath_.c_str(), finish.c_str());
    // 清理
    sws_freeContext(swsContext);
    av_frame_free(&frame);
    av_packet_free(&packet);
    avcodec_close(codecContext_);
    avformat_close_input(&formatContext_);
    avcodec_free_context(&codecContext_);
    isPlaying_ = false;
}

void FfmVideoReader::open(const char* videoPath) {
    if (decodeTh_) {
        close();
    }
    videoPath_ = videoPath;
    if (avformat_open_input(&formatContext_, videoPath, nullptr, nullptr) != 0) {
        std::cerr << "Could not open input file: " << videoPath << endl;
        throw std::invalid_argument("Could not open input file.\n");
    }
    cout << "open " << videoPath << " success\n";
    // 获取流信息
    if (avformat_find_stream_info(formatContext_, nullptr) < 0) {
        std::cerr << "Failed to retrieve stream info.\n";
        throw std::invalid_argument("Failed to retrieve stream info.\n");
    }

    // 查找视频流
    for (unsigned int i = 0; i < formatContext_->nb_streams; ++i) {
        if (formatContext_->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
            videoStreamIndex_ = i;
            break;
        }
    }
    if (videoStreamIndex_ == -1) {
        std::cerr << "No video stream found.\n";
        throw std::invalid_argument("No video stream found.\n");
    }

    // 获取视频流的编解码参数
    AVCodecParameters* codecParams = formatContext_->streams[videoStreamIndex_]->codecpar;
    AVRational fps = formatContext_->streams[videoStreamIndex_]->avg_frame_rate;
    duration_ = fps.den * 1000 / fps.num;
    printf("fps %d / %d,duration: %d \n", fps.num, fps.den, duration_);
    // 打印视频流的颜色格式
    printf("Video color format: %s\n", av_get_pix_fmt_name((AVPixelFormat)codecParams->format));
#if 1
    AVCodec* codec = avcodec_find_decoder(formatContext_->streams[videoStreamIndex_]->codecpar->codec_id);
#else
    // 查找解码器
    AVCodec* codec = avcodec_find_decoder_by_name("h264_rkmpp");
    if (codec) {
        cout << "found h264_rkmpp decoder\n";
    } else {
        cout << "NOT found h264_rkmpp decoder\n";
        codec = avcodec_find_decoder(formatContext_->streams[videoStreamIndex_]->codecpar->codec_id);
        if (!codec) {
            std::cerr << "Decoder not found.\n";
            throw std::invalid_argument("Decoder not found.\n");
        }
    }

#endif
    cout << "decoder name: " << codec->name << endl;
    // 创建解码上下文
    codecContext_ = avcodec_alloc_context3(codec);
    if (!codecContext_) {
        std::cerr << "Could not allocate codec context.\n";
        throw ::runtime_error("Could not allocate codec context.\n");
    }
    if (avcodec_parameters_to_context(codecContext_, formatContext_->streams[videoStreamIndex_]->codecpar) < 0) {
        std::cerr << "Failed to copy codec parameters to codec context.\n";
        throw ::runtime_error("Failed to copy codec parameters to codec context.\n");
    }

    // 打开解码器
    if (avcodec_open2(codecContext_, codec, nullptr) < 0) {
        std::cerr << "Could not open codec.\n";
        throw ::runtime_error("Could not open codec.\n");
    }
    width = codecContext_->width;
    height = codecContext_->height;
    printf("%s: width %d,height %d\n", videoPath, width, height);
    isPlaying_ = true;
    decodeTh_ = new thread([this]() {
        decodeVideoStream();
    });
}

void FfmVideoReader::close() {
    if (decodeTh_) {
        isPlaying_ = false;
        decodeTh_->join();
        delete decodeTh_;
        decodeTh_ = nullptr;
    }
}

void FfmVideoReader::stay() {
    if (!isLive_) {
        std::this_thread::sleep_for(std::chrono::milliseconds(duration_));
    }
}

std::shared_ptr<Tensor<unsigned char>> FfmVideoReader::pop() {
    if (!isPlaying_ && rgbDataQue_.Empty()) {
        return nullptr;
    } else {
        return rgbDataQue_.Pop();
    }
}
