#include "videopush.h"

VideoPush::VideoPush(QObject *parent)
: QThread(parent)
{
    dshow();
    videoContext = avformat_alloc_context();

    AVDictionary* options = nullptr;
    av_dict_set(&options, "video_size", "640x480", 0);
    av_dict_set(&options, "framerate", "30", 0);
    av_dict_set(&options, "rtbufsize", "1G", 0);

    // 打开摄像头设备video.c_str()
    errnum = avformat_open_input(&videoContext, video.c_str(), inputFormat, &options);
    if (Error_detail("无法打开摄像头")){
        av_dict_free(&options);
        throw std::runtime_error("无法打开摄像头");
    }

    // 查找流信息
    errnum = avformat_find_stream_info(videoContext, nullptr);
    if (Error_detail("找不到流详细")) {
        av_dict_free(&options);
        throw std::runtime_error("输入流找不到");
    }

    set_decoder();
    av_dict_free(&options);
}

void VideoPush::set_encoder(const QString _url)
{
    url = _url;
    qDebug() << url;

    // 打开输出文件 (这里是RTMP URL)
    errnum = avformat_alloc_output_context2(&outputContext, nullptr, "flv", url.toStdString().c_str());
    if (Error_detail("无法创建输出上下文")) return ;

    // 添加视频流
    AVStream* out_stream_video = avformat_new_stream(outputContext, nullptr);
    if (!out_stream_video) {
        std::cerr << "无法分配输出流" << AVERROR_UNKNOWN << std::endl;
        return;
    }

    // 初始化编码器
    const AVCodec* codec = avcodec_find_encoder(AV_CODEC_ID_H264);
    if (!codec) {
        std::cerr << "找不到编码器" << AVERROR_UNKNOWN << std::endl;
        return ;
    }
    encoder = avcodec_alloc_context3(codec);
    if (!encoder) {
        std::cerr << "无法分配视频编码器上下文" << AVERROR_UNKNOWN << std::endl;
        return ;
    }

    // 设置编码器参数
    encoder->bit_rate = 100000; // 400k bit/s
    encoder->width = 640;
    encoder->height = 480;
    encoder->time_base = AVRational{ 1, 30 }; // 15 fps
    encoder->framerate = AVRational{ 30, 1 };
    encoder->gop_size = 12; // key frame every 12 frames
    encoder->max_b_frames = 2;
    encoder->pix_fmt = AV_PIX_FMT_YUV420P;

    if (outputContext->oformat->flags & AVFMT_GLOBALHEADER)
        encoder->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;

    // 打开编码器
    errnum = avcodec_open2(encoder, codec, nullptr);
    if (Error_detail("无法打开编码器")) return ;
    if (out_stream_video->codecpar == nullptr) return;
    errnum = avcodec_parameters_from_context(out_stream_video->codecpar, encoder);
    if (Error_detail("未能将编码器参数复制到输出流")) return ;

    out_stream_video->time_base = encoder->time_base;
}

void VideoPush::stop_push()
{
    stop = false;
    quit(); // 请求事件循环退出
    wait(); // 等待线程结束

    // 检查是否有打开的视频输入上下文
    if (videoContext) {
        // 关闭视频输入上下文，释放内部资源
        avformat_close_input(&videoContext);
        avformat_free_context(videoContext);
        videoContext = nullptr;
    }
    // 通过发送一个空帧 (NULL) 来刷新编码器
    if (avcodec_send_frame(encoder, nullptr) >= 0) {
        while (avcodec_receive_packet(encoder, outPacket) == 0) {
            outPacket->stream_index = 0;
            outPacket->pts = av_rescale_q_rnd(outPacket->pts, encoder->time_base,
                                              outputContext->streams[0]->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
            outPacket->dts = av_rescale_q_rnd(outPacket->dts, encoder->time_base,
                                              outputContext->streams[0]->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
            outPacket->duration = av_rescale_q(outPacket->duration, encoder->time_base,
                                               outputContext->streams[0]->time_base);
            outPacket->pos = -1;

            av_interleaved_write_frame(outputContext, outPacket);
            av_packet_unref(outPacket);
        }
    }
    // 写入输出文件的结尾标识（trailer）
    av_write_trailer(outputContext);

    // 释放资源
    av_frame_free(&iFrame);
    av_frame_free(&oFrame);
    av_packet_free(&outPacket);

    // 关闭SwsContext
    if (swsCtx) {
        sws_freeContext(swsCtx);
        swsCtx = nullptr;
    }

    // 关闭输出上下文
    if (!(outputContext->oformat->flags & AVFMT_NOFILE)) {
        avio_closep(&outputContext->pb);
    }

    // 释放编码器和解码器的上下文
    avcodec_free_context(&encoder);
    avcodec_free_context(&decoder);

    // 释放输出格式上下文
    avformat_free_context(outputContext);
}

void VideoPush::_VideoPush()
{
    if (!(outputContext->oformat->flags & AVFMT_NOFILE)) {
        errnum = avio_open(&outputContext->pb, url.toStdString().c_str(), AVIO_FLAG_WRITE);
        if (Error_detail("无法打开输出地址url")) return;
    }
    errnum = avformat_write_header(outputContext, nullptr);
    if (Error_detail("写入标头时发送错误")) return;

    // 视频帧分配
    iFrame = av_frame_alloc();
    // 分配iFrame缓冲区
    int numBytes = av_image_get_buffer_size(encoder->pix_fmt, encoder->width, encoder->height, 32);
    uint8_t* buffer = (uint8_t*)av_malloc(numBytes * sizeof(uint8_t));
    av_image_fill_arrays(iFrame->data, iFrame->linesize
                         , buffer, encoder->pix_fmt, encoder->width, encoder->height, 32);
    iFrame->width = encoder->width;
    iFrame->height = encoder->height;
    iFrame->format = encoder->pix_fmt;
    swsCtx = sws_getContext(decoder->width, decoder->height, decoder->pix_fmt,
                            encoder->width, encoder->height, encoder->pix_fmt,
                            SWS_BILINEAR, nullptr, nullptr, nullptr);

    oFrame = av_frame_alloc();//frame帧 框架
    AVPacket* pPacket = av_packet_alloc();//packet包装盒
    outPacket = av_packet_alloc();

    //av_read_frame返回流中的下一帧 并不保证是有效帧 返回的是压缩数据
    while (stop && av_read_frame(videoContext, pPacket) >= 0) {
        //确保是视频流的一帧 ->stream_index储存了相应的->nb_streams
        if (pPacket->stream_index != video_stream_index) continue;
        //将AVPacket送入解码器进行解码
        errnum = avcodec_send_packet(decoder, pPacket);
        if (Error_detail("将AVPacket放入编解码器时发送错误")) return;
        //从解码器获取解码后的AVFrame数据
        while (avcodec_receive_frame(decoder, oFrame) == 0) {//receive得到
            // qDebug() << "原始图片：" << decoder->frame_num;
            //处理解码后的帧 (oFrame) 保存为图片
            processing_frame();
        }
        av_packet_unref(pPacket);
    }
    av_packet_unref(pPacket);
    av_free(buffer);
}

void VideoPush::processing_frame()
{
    sws_scale(swsCtx, oFrame->data, oFrame->linesize, 0, decoder->height
              , iFrame->data, iFrame->linesize);
    iFrame->pts = av_rescale_q(oFrame->pts, videoContext->streams[video_stream_index]->time_base, encoder->time_base);


    if (avcodec_send_frame(encoder, iFrame) >= 0) {
        while (avcodec_receive_packet(encoder, outPacket) == 0) {
            outPacket->stream_index = 0;
            outPacket->pts = av_rescale_q_rnd(outPacket->pts, encoder->time_base
                                              , outputContext->streams[0]->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
            outPacket->dts = av_rescale_q_rnd(outPacket->dts, encoder->time_base
                                              , outputContext->streams[0]->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
            outPacket->duration = av_rescale_q(outPacket->duration, encoder->time_base
                                               , outputContext->streams[0]->time_base);
            outPacket->pos = -1;

            // emit frameReady(new QImage((uchar*)iFrame->data[0], encoder->width, encoder->height, QImage::Format_RGB888)); // 发射信号，传递图像

            // qDebug() << "发送" << encoder->frame_num;
            av_interleaved_write_frame(outputContext, outPacket);
            av_packet_unref(outPacket);
        }
    }
    return;
}

void VideoPush::dshow()
{
    video = "video=";
    //查找dshow输入设备
    inputFormat = av_find_input_format("dshow");
    if (!inputFormat) {
        std::cerr << "未找到dshow" << std::endl;
        return ;
    }

    AVDeviceInfoList* device_list = nullptr;
    errnum = avdevice_list_input_sources(inputFormat, nullptr, nullptr, &device_list);
    if (errnum <= 0) {
        std::cout << "没有找到合适的输入设备" << std::endl;
        return ;
    }

    for (int i = 0; i < errnum; ++i) {
        if (device_list->devices[i]->media_types != nullptr) {
            if (*device_list->devices[i]->media_types == AVMEDIA_TYPE_VIDEO) {
                video += device_list->devices[i]->device_name;
                std::cout << "摄像头：" << video;
            }
        }
    }

    avdevice_free_list_devices(&device_list);
}

void VideoPush::set_decoder()
{
    AVCodecParameters* pCodecParameters = nullptr;
    const AVCodec* pCodec = nullptr;
    video_stream_index = -1;

    for (unsigned int i = 0; i < videoContext->nb_streams; ++i) {
        AVCodecParameters* pLocalCodecParameters = videoContext->streams[i]->codecpar;//获得第一个流的编解码器信息
        const AVCodec* pLocalCodec = avcodec_find_decoder(pLocalCodecParameters->codec_id);
        if (pLocalCodec == nullptr) continue;

        //->codec_type编码的类型  media媒体 type类型 表示数据类型是视频
        if (pLocalCodecParameters->codec_type == AVMEDIA_TYPE_VIDEO) {//找到视频流进行处理
            //将视频流的索引和编解码器信息储存后退出循环
            video_stream_index = i;
            pCodec = pLocalCodec;
            pCodecParameters = pLocalCodecParameters;
            break;
        }
    }

    if (video_stream_index == -1) {
        printf("没有找到视频流\n");
        return;
    }

    decoder = avcodec_alloc_context3(pCodec);

    errnum = avcodec_parameters_to_context(decoder, pCodecParameters);
    if (Error_detail("无法初始化编码器上下文")) return;

    errnum = avcodec_open2(decoder, pCodec, nullptr);
    if (Error_detail("打开编码器时发送错误")) return;
}

bool VideoPush::Error_detail(const char *err)
{
    //如果是0表示成功返回一个false
    if (errnum == 0) return false;
    av_strerror(errnum, errbuf, sizeof(errbuf));
    qDebug() << err << ":  " << errbuf;
    return true;
}
