﻿#include "mediacapturer.h"
#include <QAudioOutput>
VideoCapturer::VideoCapturer()
{
    isRun = false;
    rtpSender = nullptr;
}

VideoCapturer::~VideoCapturer()
{
    quit();
    wait();
}

void VideoCapturer::setRunState(bool start)
{
    isRun = start;

    if(start)
        this->start();
}

void VideoCapturer::run()
{
    if(rtpSender == nullptr)
    {
        qDebug() << "VideoCapturer: invalid rtpSender.";
        return;
    }

    AVFormatContext *avFormatContext = avformat_alloc_context();
    AVInputFormat *iFmt = NULL;
    int ret = 0;

#if 1
    // gdigrab捕捉桌面
    iFmt = av_find_input_format("gdigrab");
    ret = avformat_open_input(&avFormatContext, "desktop", iFmt, NULL);
#elif 0
    // dshow捕捉摄像头/桌面
    iFmt = av_find_input_format("dshow");
    ret = avformat_open_input(&avFormatContext, "video=screen-capture-recorder", iFmt, NULL);
#endif
    if (ret < 0)
    {
        qDebug() << "open input error" << getAVError(ret);
        return;
    }

    //设置Capture的分辨率和帧率
    //当使用usb摄像头时，很多只能固定帧率30，下方的options设置可能无效
    AVDictionary *options = NULL;
    av_dict_set(&options, "video_size", "1920x1080", 0); // 分辨率
    av_dict_set(&options, "framerate", "60", 0);

    //获取流信息
    ret = avformat_find_stream_info(avFormatContext, &options);
    if (ret < 0)
    {
        qDebug() << "find video stream info error";
        av_dict_free(&options);
        return;
    }

    av_dict_free(&options);

    //获取视频流
    int videoIndex = av_find_best_stream(avFormatContext, AVMEDIA_TYPE_VIDEO, -1, -1, NULL, 0);
    if (videoIndex < 0)
    {
        qDebug() << "find video stream index error";
        return;
    }
    AVStream *videoStream = avFormatContext->streams[videoIndex];

    //对抓取到的屏幕进行解码的解码器
    AVCodec *captureDecoder = avcodec_find_decoder(videoStream->codecpar->codec_id);
    if (captureDecoder == NULL)
    {
        qDebug() << "video decoder not found";
        return;
    }

    AVCodecContext *captureCodecCtx = avcodec_alloc_context3(captureDecoder);
    if (!captureCodecCtx)
    {
        qDebug() << "avcodec_alloc_context3 error";
        return;
    }

    //复制参数
    ret = avcodec_parameters_to_context(captureCodecCtx, videoStream->codecpar);
    if (ret < 0)
    {
        qDebug() << "avcodec_parameters_to_context error" << getAVError(ret);
        return;
    }

    //打开解码器
    ret = avcodec_open2(captureCodecCtx, captureDecoder, NULL);
    if (ret < 0)
    {
        qDebug() << "open video codec error" << getAVError(ret);
        return;
    }

    //获取视频信息 注：USB设备或录屏当系统负载较多时很难达到真实帧率 可能下降较多
    int videoWidth = videoStream->codecpar->width;
    int videoHeight = videoStream->codecpar->height;
    int videoFPS = av_q2d(videoStream->avg_frame_rate);

    //用于调整帧率，实时传输不需要太高帧数
    // uint frameCount = 0;
    // bool needHalfFrame = false;
    // if(videoFPS >= 20)
    // {
    //     needHalfFrame = true;
    //     videoFPS = videoFPS * 0.5;
    // }

    // 这里有待删除，发送端不需要渲染
    videoOutput->setVideoParm(videoWidth, videoHeight, true);

    //设置RTP发送器的媒体信息
    rtpSender->info.width = videoWidth;
    rtpSender->info.height = videoHeight;
    rtpSender->info.fps = videoFPS;
    rtpSender->info.hasVideo = 1;
    rtpSender->setVideo = true;
    rtpSender->setTransType(true);

    //初始化视频格式转换器，这里转换成YUV格式，因为H264编码的原始数据就是YUV格式
    AVPixelFormat srcFormat = captureCodecCtx->pix_fmt; //经过打印，发现用gdigrab抓取屏幕的格式是: AV_PIX_FMT_BGRA
    // qDebug() << "captureCodecCtx->pix_fmt: " << srcFormat;
    AVPixelFormat dstFormat = AV_PIX_FMT_YUV420P;
    VideoSwser videoSwser;
    videoSwser.initSwsCtx(videoWidth, videoHeight, srcFormat, videoWidth, videoHeight, dstFormat);

    //设置编码器
    VideoEncoder videoEncoder;
    videoEncoder.initEncoder(videoWidth, videoHeight, dstFormat, videoFPS);

    qDebug() << "init VideoCapturer Sucess, video[" << videoWidth << "x" << videoHeight << "], frame rate:" << videoFPS << "fps.";


    AVPacket *videoPacket = av_packet_alloc();
    AVFrame *videoFrame = av_frame_alloc();
    while (isRun)
    {
        //摄像头不断读取视频帧到packet里
        int ret = av_read_frame(avFormatContext, videoPacket);
        if (ret == AVERROR_EOF)
            break;

        // //实时调整帧率
        // frameCount++;
        // if(needHalfFrame && (frameCount % 2 == 0))
        // {
        //     av_packet_unref(videoPacket);
        //     continue;
        // }

        //过滤掉非视频流
        if (videoPacket->stream_index != videoIndex)
        {
            av_packet_unref(videoPacket);
            continue;
        }

        //编码数据进行解码
        ret = avcodec_send_packet(captureCodecCtx, videoPacket);
        if (ret < 0)
        {
            qDebug() << "avcodec_send_packet video error" << getAVError(ret);
            av_packet_unref(videoPacket);
            continue;
        }

        //获得解码后的videoFrame
        ret = avcodec_receive_frame(captureCodecCtx, videoFrame);
        if (ret < 0)
        {
            qDebug() << "avcodec_receive_frame video error" << getAVError(ret);
            av_packet_unref(videoPacket);
            continue;
        }

        //获取转换后的帧，这个帧就是YUV的了
        AVFrame *swsFrame = videoSwser.getSwsFrame(videoFrame);

        // 这里有待删除，发送端不需要渲染
        videoOutput->startDisplay(swsFrame);

        //使用VideoEncoder，将videoFrame编码成H264的AVPacket，此时就能打包成RTP发出去了。
        AVPacket *packet = videoEncoder.getEncodePacket(swsFrame);
        if(packet)
        {
            ReadyPacket *readyPacket = new ReadyPacket();
            readyPacket->type = PacketVideoStart;
            readyPacket->packet = packet;

            //写入视频包到RtpPacketSender的packetList中，待RTP发送线程发送出去
            rtpSender->writePacket(readyPacket);
        }

        //释放解码帧
        av_frame_free(&swsFrame);
        av_frame_unref(videoFrame);

        //释放原始编码帧
        av_packet_unref(videoPacket);
    }


    // 这里有待删除
    videoOutput->stopDisplay();

    av_packet_free(&videoPacket); videoPacket = NULL;
    av_frame_free(&videoFrame); videoFrame = NULL;
    avcodec_free_context(&captureCodecCtx); captureCodecCtx = NULL;
    avformat_close_input(&avFormatContext); avFormatContext = NULL;
}

//*********************************************************************************************//

AudioCapturer::AudioCapturer()
{
    isRun = false;
    rtpSender = nullptr;
}

AudioCapturer::~AudioCapturer()
{
    quit();
    wait();
}

void AudioCapturer::setRunState(bool start)
{
    isRun = start;

    if(start)
        this->start();
}

#if 0 // 用QT采集麦克风
void AudioCapturer::run()
{
    if(rtpSender == nullptr)
    {
        qDebug() << "外部RTP发送者指针无效";
        return;
    }

    //设置Qt音频录制参数
    int sampleRate = 44100;
    int channels = 2;
    int sampleByte = 2;
    AVSampleFormat inSampleFmt = AV_SAMPLE_FMT_S16;

    QAudioFormat recordFmt;
    recordFmt.setSampleRate(sampleRate);
    recordFmt.setChannelCount(channels);
    recordFmt.setSampleSize(sampleByte * 8);
    recordFmt.setCodec("audio/pcm");
    recordFmt.setByteOrder(QAudioFormat::LittleEndian);
    recordFmt.setSampleType(QAudioFormat::UnSignedInt);

    QAudioDeviceInfo info = QAudioDeviceInfo::defaultInputDevice();

    if (!info.isFormatSupported(recordFmt))
    {
        qDebug() << "Audio format not support!";
        recordFmt = info.nearestFormat(recordFmt);
    }
    QAudioInput *audioInput = new QAudioInput(recordFmt);

    //ffmpeg -h encoder=aac 查询自带编码器仅支持AV_SAMPLE_FMT_FLTP 大多数AAC编码器都采用平面布局数据格式 可以提高数据访问效率和缓存命中率 加快编码效率
    AVSampleFormat outSampleFmt = AV_SAMPLE_FMT_FLTP;
    rtpSender->info.sampleRate = sampleRate;
    rtpSender->info.format = outSampleFmt;
    rtpSender->info.channel = channels;
    rtpSender->info.hasAudio = 1;
    rtpSender->setAudio = true;
    rtpSender->setTransType(false);

    //设置重采样
    AudioSwrer audioSwrer;
    audioSwrer.initSwrCtx(channels, sampleRate, inSampleFmt, channels, sampleRate, outSampleFmt);

    //初始化音频编码器相关
    AudioEncoder audioEncoder;
    audioEncoder.initEncoder(channels, sampleRate, outSampleFmt);

    //一帧pcm原始数据的字节数
    int pcmSize = av_get_bytes_per_sample((AVSampleFormat)inSampleFmt) * channels * 1024;
    char *pcmBuf = new char[pcmSize];

    //音频数据开始捕获
    QIODevice *audioIO = audioInput->start();
    qDebug() << "init AudioCapturer Sucess" << sampleRate << channels << outSampleFmt;



    while(isRun)
    {
        if (audioInput->bytesReady() >= pcmSize)
        {
            //捕获一帧pcm原始数据
            int size = 0;
            while (size != pcmSize)
            {
                int len = audioIO->read(pcmBuf + size, pcmSize - size);
                if (len < 0)
                    break;
                size += len;
            }

            //重采样后进行编码处理
            AVFrame *swrFrame = audioSwrer.getSwrFrame((uint8_t *)pcmBuf);

            qDebug() << swrFrame->sample_rate << swrFrame->format << swrFrame->channels;

            //添加时间戳 实时时间戳方式
            AVPacket *packet = audioEncoder.getEncodePacket(swrFrame);
            if(packet)
            {
                ReadyPacket *readyPacket = new ReadyPacket();
                readyPacket->type = PacketAudio;
                readyPacket->packet = packet;
                rtpSender->writePacket(readyPacket);
            }

            av_frame_free(&swrFrame);
        }

        QThread::msleep(1);
    }

    //释放资源
    audioInput->stop();
    audioIO->close();
    delete audioInput;

    delete[] pcmBuf;
    pcmBuf = nullptr;
}




#elif 1 // 用dshow采集系统音频
void AudioCapturer::run()
{
    if(rtpSender == nullptr)
    {
        qDebug() << "rtpSender is nullptr.";
        return;
    }

    AVFormatContext *pFormatCtx = avformat_alloc_context();
    AVInputFormat *iFmt = NULL;
    int ret = 0;

    // dshow采集系统音频
    iFmt = av_find_input_format("dshow");
    ret = avformat_open_input(&pFormatCtx, "audio=virtual-audio-capturer", iFmt, NULL);
    if(ret < 0)
    {
        qDebug() << "Could not open input device: " << getAVError(ret);
        return;
    }

    //获取流信息
    ret = avformat_find_stream_info(pFormatCtx, NULL);
    if(ret < 0)
    {
        qDebug() << "Could not find stream info.";
        return;
    }

    //获取音频流
    int audioIndex = av_find_best_stream(pFormatCtx, AVMEDIA_TYPE_AUDIO, -1, -1, NULL, 0);
    if(audioIndex < 0)
    {
        qDebug() << "Could not find audio stream index.";
        return;
    }
    AVStream* audioStream = pFormatCtx->streams[audioIndex];

    //对采集到的音频包进行解码的解码器
    AVCodec *pCodec = avcodec_find_decoder(audioStream->codecpar->codec_id);
    if(pCodec == NULL)
    {
        qDebug() << "Unsupported codec.";
        return;
    }
    AVCodecContext *pCodecCtx = avcodec_alloc_context3(pCodec);
    if(!pCodecCtx)
    {
        qDebug() << "avcodec_alloc_context3 error.";
        return;
    }

    //复制参数
    ret = avcodec_parameters_to_context(pCodecCtx, audioStream->codecpar);
    if(ret < 0)
    {
        qDebug() << "Could not copy codec context.";
        return;
    }

    //打开解码器
    ret = avcodec_open2(pCodecCtx, pCodec, NULL);
    if(ret < 0)
    {
        qDebug() << "Could not open codec.";
        return;
    }

    //设置音频参数
    int sampleRate = pCodecCtx->sample_rate;
    int channels = pCodecCtx->channels;
    AVSampleFormat inSampleFmt = pCodecCtx->sample_fmt;
    AVSampleFormat outSampleFmt = AV_SAMPLE_FMT_FLTP;

    //设置RTP发送器的媒体信息
    rtpSender->info.sampleRate = sampleRate;
    rtpSender->info.format = outSampleFmt;
    rtpSender->info.channel = channels;
    rtpSender->info.hasAudio = 1;
    rtpSender->setAudio = true;
    rtpSender->setTransType(false);

    //设置重采样类
    AudioSwrer audioSwrer;
    audioSwrer.initSwrCtx(channels, sampleRate, inSampleFmt, channels, sampleRate, outSampleFmt);

    //初始化音频aac编码器
    AudioEncoder audioEncoder;
    audioEncoder.initEncoder(channels, sampleRate, outSampleFmt);

    qDebug() << "init AudioCapturer Sucess" << sampleRate << channels << outSampleFmt;

    AVPacket *packet = av_packet_alloc();
    AVFrame *frame = av_frame_alloc();
    while (isRun)
    {
        //不断读取音频数据到packet中
        ret = av_read_frame(pFormatCtx, packet);
        if (ret == AVERROR_EOF)
            break;

        //过滤掉非音频流
        if(packet->stream_index != audioIndex)
        {
            av_packet_unref(packet);
            continue;
        }

        //对packet进行解码
        ret = avcodec_send_packet(pCodecCtx, packet);
        if(ret < 0)
        {
            qDebug() << "avcodec_send_packet audio error" << getAVError(ret);
            av_packet_unref(packet);
            continue;
        }

        //获取解码后的frame
        ret = avcodec_receive_frame(pCodecCtx, frame);
        if (ret < 0)
        {
            qDebug() << "avcodec_receive_frame audio error" << getAVError(ret);
            av_packet_unref(packet);
            continue;
        }

        // 重采样后进行编码处理
        AVFrame *swrFrame = audioSwrer.getSwrFrame(frame);

        qDebug() << swrFrame->sample_rate << swrFrame->format << swrFrame->channels; //采样率不对了

        if (swrFrame)
        {
            // 把swrFrame交给audioEncoder 编码为AAC
            AVPacket *encPacket = audioEncoder.getEncodePacket(swrFrame);
            if (encPacket)
            {
                ReadyPacket *readyPacket = new ReadyPacket();
                readyPacket->type = PacketAudio;
                readyPacket->packet = encPacket;

                //写入音频包到RtpPacketSender的packetList中，待RTP发送线程发送出去
                rtpSender->writePacket(readyPacket);
            }

            //释放解码帧
            av_frame_free(&swrFrame);
            av_frame_unref(frame);

            //释放原始包
            av_packet_unref(packet);
        }

        QThread::msleep(1);
    }

    //释放资源
    av_packet_free(&packet); packet = NULL;
    av_frame_free(&frame); frame = NULL;
    avcodec_free_context(&pCodecCtx); pCodecCtx = NULL;
    avformat_close_input(&pFormatCtx); pFormatCtx = NULL;
    avformat_free_context(pFormatCtx); pFormatCtx = NULL;
}

#endif


