﻿#include "../../includes/multimedia/cremotecamera.h"
#include "../../includes/QsLog/QsLog.h"
#include "../../includes/common/common.h"

#include <QDebug>
#include <QHash>
#include <QMouseEvent>

#ifdef ENABLE_REMOTECAMERA

initialiseSingleton(CVideoCapManager);

static enum AVPixelFormat hw_pix_fmt;
static AVBufferRef* hw_device_ctx=NULL;

#define ATTRIB_VERTEX 3
#define ATTRIB_TEXTURE 4

static int XGetCpuNum()
{
#if defined WIN32 || defined _WIN32
    SYSTEM_INFO sysinfo;
    GetSystemInfo(&sysinfo);

    return (int)sysinfo.dwNumberOfProcessors;
#elif defined __linux__
    return (int)sysconf(_SC_NPROCESSORS_ONLN);
#elif defined __APPLE__
    int numCPU = 0;
    int mib[4];
    size_t len = sizeof(numCPU);

    return 2;//(int)numCPU;
#else
    return 1;
#endif
}

#ifdef REMOTECAMERA_ENABLE_DARKNET
std::vector<std::string> objects_names_from_file(std::string const filename)
{
    std::ifstream file(filename);
    std::vector<std::string> file_lines;
    if (!file.is_open()) return file_lines;
    for(std::string line; getline(file, line);) file_lines.push_back(line);
    std::cout << "object names loaded \n";
    return file_lines;
}

void draw_boxes(cv::Mat mat_img, std::vector<bbox_t> result_vec, std::vector<std::string> obj_names,
    int current_det_fps = -1, int current_cap_fps = -1)
{
    int const colors[6][3] = { { 1,0,1 },{ 0,0,1 },{ 0,1,1 },{ 0,1,0 },{ 1,1,0 },{ 1,0,0 } };

    for (auto &i : result_vec) {
        cv::Scalar color = obj_id_to_color(i.obj_id);
        cv::rectangle(mat_img, cv::Rect(i.x, i.y, i.w, i.h), color, 2);
        if (obj_names.size() > i.obj_id) {
            std::string obj_name = obj_names[i.obj_id];
            if (i.track_id > 0) obj_name += " - " + std::to_string(i.track_id);
            cv::Size const text_size = getTextSize(obj_name, cv::FONT_HERSHEY_COMPLEX_SMALL, 1.2, 2, 0);
            int max_width = (text_size.width > i.w + 2) ? text_size.width : (i.w + 2);
            max_width = std::max(max_width, (int)i.w + 2);
            //max_width = std::max(max_width, 283);
            std::string coords_3d;
            if (!std::isnan(i.z_3d)) {
                std::stringstream ss;
                ss << std::fixed << std::setprecision(2) << "x:" << i.x_3d << "m y:" << i.y_3d << "m z:" << i.z_3d << "m ";
                coords_3d = ss.str();
                cv::Size const text_size_3d = getTextSize(ss.str(), cv::FONT_HERSHEY_COMPLEX_SMALL, 0.8, 1, 0);
                int const max_width_3d = (text_size_3d.width > i.w + 2) ? text_size_3d.width : (i.w + 2);
                if (max_width_3d > max_width) max_width = max_width_3d;
            }

            cv::rectangle(mat_img, cv::Point2f(std::max((int)i.x - 1, 0), std::max((int)i.y - 35, 0)),
                cv::Point2f(std::min((int)i.x + max_width, mat_img.cols - 1), std::min((int)i.y, mat_img.rows - 1)),
                color, CV_FILLED, 8, 0);
            putText(mat_img, obj_name, cv::Point2f(i.x, i.y - 16), cv::FONT_HERSHEY_COMPLEX_SMALL, 1.2, cv::Scalar(0, 0, 0), 2);
            if(!coords_3d.empty()) putText(mat_img, coords_3d, cv::Point2f(i.x, i.y-1), cv::FONT_HERSHEY_COMPLEX_SMALL, 0.8, cv::Scalar(0, 0, 0), 1);
        }
    }
    if (current_det_fps >= 0 && current_cap_fps >= 0) {
        std::string fps_str = "FPS detection: " + std::to_string(current_det_fps) + "   FPS capture: " + std::to_string(current_cap_fps);
        putText(mat_img, fps_str, cv::Point2f(10, 20), cv::FONT_HERSHEY_COMPLEX_SMALL, 1.2, cv::Scalar(50, 255, 0), 2);
    }
}
#endif

CVideoCapture::CVideoCapture(void)
    : m_isExit(false),m_isPlaying(true),m_isRtmpSend(false)
{
    pAVFormatContext=nullptr;
    pAVCodecContext=nullptr;
    pAVCodec=nullptr;
    pAVpacket=nullptr;
    pyuvFrame=nullptr;
    nv12Frame=nullptr;
    pAVFrameRgb=nullptr;
    pSwsContext=nullptr;
    videoIndex=-1;
    m_hw_device_ctx=nullptr;
    //mpix_fmt=AV_PIX_FMT_RGB32;
    mpix_fmt=AV_PIX_FMT_YUV420P;
    numBytes=0;
    m_isHarddecoding=false;
    m_outbuffer=NULL;
    m_isDisplayFrame=false;
}

CVideoCapture::~CVideoCapture()
{
    if(m_outbuffer) av_free(m_outbuffer);
    if(pSwsContext) sws_freeContext(pSwsContext);
    if(pAVFrameRgb) av_frame_free(&pAVFrameRgb);
    if(pyuvFrame) av_frame_free(&pyuvFrame);
    if(nv12Frame) av_frame_free(&nv12Frame);
    if(pAVpacket) av_packet_free(&pAVpacket);
    if(pAVCodecContext)
    {
        avcodec_free_context(&pAVCodecContext);
        avcodec_close(pAVCodecContext);
    }
    if(pAVFormatContext) avformat_free_context(pAVFormatContext);
    if(m_hw_device_ctx) av_buffer_unref(&m_hw_device_ctx);
}

/// 初始化硬解码
bool CVideoCapture::InitHarddecoding(QString url)
{
    enum AVHWDeviceType type;
    int i;

    pAVFormatContext = avformat_alloc_context();//分配全局上下文空间
    pAVpacket = av_packet_alloc();              //分配数据包空间
    av_init_packet(pAVpacket);
    pyuvFrame  = av_frame_alloc();               //分配单帧空间
    nv12Frame = av_frame_alloc();
    pAVFrameRgb  = av_frame_alloc();           //分配rgb单帧空间

    if(!pAVFormatContext || !pAVpacket || !pyuvFrame || !pAVFrameRgb || !nv12Frame)
    {
        QLOG_ERROR()<< "init_mdecode failed";
        return false;
    }

    type = av_hwdevice_find_type_by_name("dxva2");
    QLOG_INFO() << "AVHWDeviceType" << type;
    if (type == AV_HWDEVICE_TYPE_NONE) {
        QLOG_ERROR() << "Device type %s is not supported." << "h264_cuvid";
        QLOG_ERROR() << "Available device types:";
        while((type = av_hwdevice_iterate_types(type)) != AV_HWDEVICE_TYPE_NONE)
            QLOG_ERROR() <<  av_hwdevice_get_type_name(type);
        return false;
    }

    AVDictionary *optionsDict = NULL;
    av_dict_set(&optionsDict, "buffer_size", "1024000", 0); //设置缓存大小，1080p可将值调大
    av_dict_set(&optionsDict, "rtsp_transport", "tcp", 0); //以udp方式打开，如果以tcp方式打开将udp替换为tcp
    av_dict_set(&optionsDict, "rtmp_transport", "tcp", 0);
    av_dict_set(&optionsDict, "stimeout", "20000000", 0); //设置超时断开连接时间，单位微秒
    av_dict_set(&optionsDict, "max_delay", "30000000", 0);

    /* open the input file */
    if (avformat_open_input(&pAVFormatContext, url.toUtf8().data(), NULL, &optionsDict) != 0) {
        return false;
    }

    int ret = avformat_find_stream_info(pAVFormatContext, 0);
    if(ret < 0)
    {
        QLOG_ERROR() << "Failed to avformat_find_stream_info(pAVCodecContext, 0)";
        return false;
    }

    av_dump_format(pAVFormatContext, 0, url.toUtf8().data(), 0);//打印文件中包含的格式信息

    AVStream *tempstream=NULL;

    for(int index = 0; index < pAVFormatContext->nb_streams; index++) //遍历寻找视频流
    {
        if (pAVFormatContext->streams[index]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)//codec弃用函数
        {
            videoIndex = index;
            tempstream = pAVFormatContext->streams[index];
        }
    }

    if(videoIndex == -1)
    {
        QLOG_ERROR() << "Failed to find video stream";
        return false;
    }

    pAVCodec = avcodec_find_decoder(tempstream->codecpar->codec_id);
    if(!pAVCodec)
    {
        QLOG_ERROR() << "Fialed to avcodec_find_decoder(pAVCodecContext->codec_id):";
        return false;
    }

    //获取支持该decoder的hw配置型
    for (i = 0;; i++) {
        const AVCodecHWConfig *config = avcodec_get_hw_config(pAVCodec, i);
        if (!config) {
            QLOG_ERROR() << "Decoder does not support device type",
                    av_hwdevice_get_type_name(type);
            return false;
        }
        if (config->methods & AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX &&
                config->device_type == type) {
            hw_pix_fmt = config->pix_fmt;
            QLOG_ERROR() << "hw_pix_fmt:" << hw_pix_fmt;
            break;
        }
    }

    if (!(pAVCodecContext = avcodec_alloc_context3(pAVCodec)))
        return false;

    //videoStream = pAVFormatContext->streams[videoStreamIndex];
    if (avcodec_parameters_to_context(pAVCodecContext, tempstream->codecpar) < 0)
        return false;

    pAVCodecContext->get_format  = get_hw_format;

    if (hw_decoder_init(pAVCodecContext, type) < 0)
        return false;

    pAVCodecContext->pix_fmt=AV_PIX_FMT_NV12;
    pAVCodecContext->sw_pix_fmt=AV_PIX_FMT_YUV420P;

    //QString url_type=filename.mid(0,4);

    if(avcodec_open2(pAVCodecContext, pAVCodec, NULL))
    {
        QLOG_ERROR() <<"Failed to avcodec_open2(pAVCodecContext, pAVCodec, pAVDictionary)";
        return false;
    }

    pSwsContext = sws_getContext(pAVCodecContext->width,
                             pAVCodecContext->height,
                             AV_PIX_FMT_NV12,
                             pAVCodecContext->width,
                             pAVCodecContext->height,
                             AV_PIX_FMT_YUV420P,
                             SWS_BICUBIC,NULL,NULL,NULL);

    numBytes = av_image_get_buffer_size(AV_PIX_FMT_YUV420P,
                                        pAVCodecContext->width,
                                        pAVCodecContext->height,1);
    m_outbuffer = (unsigned char *)av_malloc(numBytes*sizeof(uchar));

    int res = av_image_fill_arrays(
                pAVFrameRgb->data,pAVFrameRgb->linesize,
                m_outbuffer,AV_PIX_FMT_YUV420P,
                pAVCodecContext->width,pAVCodecContext->height,1);
    if(res<0){
        QLOG_ERROR() << "Fill arrays failed.";
        return false;
    }

    m_isHarddecoding=true;

    return true;
}

/// 初始化软解码
bool CVideoCapture::InitSoftdecoding(QString url)
{
    int ret=0;
    enum AVHWDeviceType type;

    pAVFormatContext = avformat_alloc_context();//分配全局上下文空间
    pAVpacket = av_packet_alloc();              //分配数据包空间
    pyuvFrame  = av_frame_alloc();               //分配单帧空间
    nv12Frame = av_frame_alloc();
    pAVFrameRgb  = av_frame_alloc();           //分配rgb单帧空间

    if(!pAVFormatContext || !pAVpacket || !pyuvFrame || !pAVFrameRgb || !nv12Frame)
    {
        QLOG_ERROR()<< "init_mdecode failed";
        return false;
    }

    AVDictionary *optionsDict = NULL;
    av_dict_set(&optionsDict, "buffer_size", "1024000", 0); //设置缓存大小，1080p可将值调大
    av_dict_set(&optionsDict, "rtsp_transport", "tcp", 0); //以udp方式打开，如果以tcp方式打开将udp替换为tcp
    av_dict_set(&optionsDict, "rtmp_transport", "tcp", 0);
    av_dict_set(&optionsDict, "stimeout", "20000000", 0); //设置超时断开连接时间，单位微秒
    av_dict_set(&optionsDict, "max_delay", "30000000", 0);

    ret = avformat_open_input(&pAVFormatContext, filename.toUtf8().data(), 0, &optionsDict);
    if(ret)
    {
        QLOG_ERROR() << "Failed to avformat_open_input(&pAVFormatContext, filename.toUtf8().data(), 0, 0)";
        return false;
    }
    /*
     * 探测流媒体信息。
    */
    ret = avformat_find_stream_info(pAVFormatContext, 0);
    if(ret < 0)
    {
        QLOG_ERROR() << "Failed to avformat_find_stream_info(pAVCodecContext, 0)";
        return false;
    }

    av_dump_format(pAVFormatContext, 0, filename.toUtf8().data(), 0);//打印文件中包含的格式信息

    AVStream *tempstream=NULL;

    for(int index = 0; index < pAVFormatContext->nb_streams; index++) //遍历寻找视频流
    {
        if (pAVFormatContext->streams[index]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)//codec弃用函数
        {
            videoIndex = index;
            tempstream = pAVFormatContext->streams[index];
        }
    }

    if(videoIndex == -1)
    {
        QLOG_ERROR() << "Failed to find video stream";
        return false;
    }

    /*
        查找解码器并打开。
    */
    pAVCodec = avcodec_find_decoder(tempstream->codecpar->codec_id);
    if(!pAVCodec)
    {
        QLOG_ERROR() << "Fialed to avcodec_find_decoder(pAVCodecContext->codec_id):"<< pAVCodecContext->codec_id;
        return false;
    }

    pAVCodecContext = avcodec_alloc_context3(pAVCodec);
    avcodec_parameters_to_context(pAVCodecContext, tempstream->codecpar);
    //av_codec_set_pkt_timebase(pAVCodecContext, tempstream->time_base);

    //av_opt_set(pAVCodecContext->priv_data, "preset", "ultrafast", 0);
    //av_opt_set(pAVCodecContext->priv_data, "tune", "zerolatency", 0);
    //av_opt_set(pAVCodecContext->priv_data, "profile", "main", 0);

    //pAVCodecContext->thread_count = XGetCpuNum();

    if(avcodec_open2(pAVCodecContext, pAVCodec, NULL))
    {
        QLOG_ERROR() <<"Failed to avcodec_open2(pAVCodecContext, pAVCodec, pAVDictionary)";
        return false;
    }

    QLOG_INFO()<<"video W x H:"<< pAVCodecContext->width << "x" << pAVCodecContext->height<<pAVCodecContext->pix_fmt;
    numBytes = av_image_get_buffer_size(mpix_fmt,
                                        pAVCodecContext->width,pAVCodecContext->height,1);       //计算转换后的内存大小
    m_outbuffer=(uchar*)av_malloc(numBytes);//申请转换后图片存放的内存

    /*
     * int avpicture_fill(AVPicture *picture, const uint8_t *ptr, enum AVPixelFormat pix_fmt, int width, int height)
     * 上面的pAVFrameRgba只是malloc了一段结构体内存，结构体中的数据部分是没有分配的，使用此函数将pAVFrameRgba的data和outbuffer关联起来
     * pFrameRGB里面使用的是outbuffer所指向的内存空间.
     * 此函数在ffmpeg4.2后变为av_image_fill_arrays;
     * 我理解的是这个函数主要是给buffer添加pix_fmt width height linesize等属性.
     */
    //avpicture_fill((AVPicture *)pAVFrameRgb,outbuffer,mpix_fmt,pAVCodecContext->width,pAVCodecContext->height);
    av_image_fill_arrays(pAVFrameRgb->data,pAVFrameRgb->linesize,
                         m_outbuffer,mpix_fmt,pAVCodecContext->width,pAVCodecContext->height,1);
    /*
     * sws_getContext函数
     * 相当于初始化转换函数，当后面使用sws_scale执行转换的时候不在写入格式等信息
    */
    pSwsContext = sws_getContext(pAVCodecContext->width,pAVCodecContext->height,pAVCodecContext->pix_fmt,//转换前的长、宽、像素格式
                                 pAVCodecContext->width,pAVCodecContext->height,mpix_fmt,         //转换后的长、宽、像素格式
                                 SWS_BICUBIC,                                                      //转换方法  libswscale/swscale.h
                                 0,0,0                                                                   //其他参数默认为空
                                 );

    m_isHarddecoding = false;

    return true;
}

AVPixelFormat CVideoCapture::get_hw_format(AVCodecContext *ctx, const AVPixelFormat *pix_fmts)
{
    const enum AVPixelFormat *p;

    for (p = pix_fmts; *p != -1; p++) {
        if (*p == hw_pix_fmt)
            return *p;
    }

    QLOG_ERROR() << "Failed to get HW surface format.";
    return AV_PIX_FMT_NONE;
}

int CVideoCapture::hw_decoder_init(AVCodecContext *ctx, const AVHWDeviceType type)
{
    int err = 0;

    if ((err = av_hwdevice_ctx_create(&hw_device_ctx, type,
                                      NULL, NULL, 0)) < 0) {
        QLOG_ERROR() << "Failed to create specified HW device.";
        return err;
    }
    ctx->hw_device_ctx = av_buffer_ref(hw_device_ctx);

    return err;
}

/**
 * @brief CVideoCapture::openUrl 打开一个网络摄像头
 * @param url 要打开的网络摄像头的地址
 * @return 如果摄像头打开成功返回真，否则返回假
 */
bool CVideoCapture::openUrl(QString url,bool isHarddecoding)
{
    if(url.isEmpty())
        return false;

    filename = url;
    m_isExit = false;
    m_isHarddecoding = isHarddecoding;

    return true;
}

void CVideoCapture::run()
{
    if(m_isHarddecoding)
        InitHarddecoding(filename);
    else
        InitSoftdecoding(filename);

    m_VideoSize = QSize(pAVCodecContext->width,pAVCodecContext->height);

    while(!m_isExit)
    {
        if(!m_isPlaying)
            continue;

        if(av_read_frame(pAVFormatContext,pAVpacket) >= 0 &&
                pAVpacket->stream_index == videoIndex)
        {
            int state = avcodec_send_packet(pAVCodecContext,pAVpacket);
            if(state>=0)
            {
                int ret;
                while((ret=avcodec_receive_frame(pAVCodecContext, pyuvFrame))>=0)
                {
                    if(m_isExit)
                        break;

                    if(m_isHarddecoding)
                    {
                        if(pyuvFrame->format==pAVCodecContext->pix_fmt){
                            if(ret = av_hwframe_transfer_data(nv12Frame,pyuvFrame,0)<0){
                                av_frame_unref(nv12Frame);
                                av_frame_unref(pyuvFrame);
                                continue;
                            }
                        }

                        sws_scale(pSwsContext,
                                  (const uint8_t* const*)nv12Frame->data,
                                  (const int*)nv12Frame->linesize,
                                  0,
                                  nv12Frame->height,
                                  pAVFrameRgb->data,pAVFrameRgb->linesize);
                    }
                    else
                    {
                        sws_scale(pSwsContext,                              //上面使用sws_getContext得到的结构体
                                  pyuvFrame->data, //解码后的数据
                                  pyuvFrame->linesize,                      //每个通道的行字节数,linesize和width不同。
                                  0,                                        //起始位置
                                  pAVCodecContext->height,                         //处理行数
                                  pAVFrameRgb->data,                       //目的buffer
                                  pAVFrameRgb->linesize
                                );
                    }

#ifdef REMOTECAMERA_ENABLE_DARKNET
            // Detector进行处理
            CVideoCapManager::getSingleton().DetectorprocessVideoFrame(this,
                                                                       (unsigned char *) m_outbuffer,
                                                                       pAVCodecContext->width,
                                                                       pAVCodecContext->height);
#endif

                    if(m_isDisplayFrame)
                    {
                        emit signal_processVideoFrame(m_outbuffer,
                                                      pAVCodecContext->width,
                                                      pAVCodecContext->height);
                    }

                    if(m_isRtmpSend)
                    {
                        emit signal_processVideoRtmpSend(pAVFrameRgb);
                    }

                    QThread::msleep(10);
                }
            }
            av_packet_unref(pAVpacket);
        }
    }
}

//////////////////////////////////////////////////////////////////////////////////

CVideoCaptureRtmpSend::CVideoCaptureRtmpSend(QObject *parent)
    : QObject(parent)
{
    swsContext = NULL;
    avFormatContext = NULL;
    outVideoPacket = {0};
    videoPts = 0;
    fps = 25;
    vindex = -1;
    InvideoCodecContext = nullptr;
    OutvideoStream = NULL;
    m_VideoCapture = NULL;
    m_isDeleteVideoCapture = false;
}

CVideoCaptureRtmpSend::~CVideoCaptureRtmpSend()
{
    this->stop();

    if(m_isDeleteVideoCapture && m_VideoCapture)
    {
        delete m_VideoCapture;
        m_VideoCapture = NULL;
    }
}

/**
 * @brief CVideoCaptureRtmpSend::CloseVideoSend 关闭流推送
 */
void CVideoCaptureRtmpSend::CloseVideoSend(void)
{
    if (swsContext) {
        sws_freeContext(swsContext);
        swsContext = NULL;
    }
    if (InvideoCodecContext) {
        avcodec_free_context(&InvideoCodecContext);
    }
    videoPts = 0;
    av_packet_unref(&outVideoPacket);

    if (avFormatContext) {
        avformat_close_input(&avFormatContext);
        avFormatContext = NULL;
    }
    OutvideoStream = NULL;
}

/**
 * @brief CVideoCaptureRtmpSend::InitVideoCodec 初始化编码器
 * @param videoW 视频宽度
 * @param videoH 视频高度
 *
 * @return 如果视频编码器初始成功返回真， 否则返回假
 */
bool CVideoCaptureRtmpSend::InitVideoCodec(int videoW,int videoH)
{
    int ret = 0;

    // 找到编码器
    const AVCodec* videoCodec = avcodec_find_encoder(AV_CODEC_ID_H264);
    if (!videoCodec) {
        QLOG_ERROR() << "InitVideoCodec error";
        return false;
    }

    // 创建编码器上下文
    InvideoCodecContext = avcodec_alloc_context3(videoCodec);
    if (!InvideoCodecContext) {
        QLOG_ERROR() << "InitVideoCodec error";
        return false;
    }

    // 配置编码器参数
    InvideoCodecContext->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
    InvideoCodecContext->codec_id = videoCodec->id;
    InvideoCodecContext->thread_count = XGetCpuNum();

    QLOG_INFO() << "InitVideoCodec get CUP NUM: " << InvideoCodecContext->thread_count;

    //压缩后每秒视频的bit位大小 50kB
    InvideoCodecContext->bit_rate = 10 *1024 * 1024; //
    InvideoCodecContext->width = videoW;
    InvideoCodecContext->height = videoH;
    InvideoCodecContext->time_base = {1, fps};
    InvideoCodecContext->framerate = {fps, 1};

    // 画面组的大小，多少帧一个关键帧
    InvideoCodecContext->gop_size = 10;
    InvideoCodecContext->max_b_frames = 0;
    InvideoCodecContext->pix_fmt = AV_PIX_FMT_YUV420P;

    // 打开编码器上下文
    ret = avcodec_open2(InvideoCodecContext, 0, 0);
    if (ret != 0) {
        QLOG_ERROR() << "avcodec_open2 error";
        return false;
    }

    QLOG_INFO() << "XMediaEncode InitVideoCodec finished";

    return true;
}

/// 开始工作
void CVideoCaptureRtmpSend::start(void)
{
    if(m_VideoCapture == NULL)
        return;

    connect(m_VideoCapture, SIGNAL(signal_processVideoRtmpSend(AVFrame *)),
                     this,
            SLOT(processVideoRtmpSend(AVFrame *)),Qt::QueuedConnection);

    m_VideoCapture->start();
}

/// 停止工作
void CVideoCaptureRtmpSend::stop(void)
{
    if(m_VideoCapture == NULL)
        return;

    disconnect(m_VideoCapture, SIGNAL(signal_processVideoRtmpSend(AVFrame *)),
                     this,
            SLOT(processVideoRtmpSend(AVFrame *)));

    m_VideoCapture->setIsExit(true);
    m_VideoCapture->wait();

    CloseVideoSend();
}

bool CVideoCaptureRtmpSend::openUrl2(CVideoCapture *pVideoCapture,QString decurl)
{
    if(pVideoCapture == NULL || decurl.isEmpty())
        return false;

    m_VideoCapture = pVideoCapture;

    // 初始解码器
    if(!this->InitVideoCodec(m_VideoCapture->getVideoSize().width(),
                         m_VideoCapture->getVideoSize().height()))
    {
        QLOG_ERROR() << "InitVideoCodec failed.";
        delete m_VideoCapture;
        m_VideoCapture=NULL;
        return false;
    }

    // 只推送视频
    m_VideoCapture->setIsRtmpSend(true);

    avFormatContext = avformat_alloc_context();//分配全局上下文空间

    if(!avFormatContext)
    {
        QLOG_ERROR()<< "avformat_alloc_context failed";
        delete m_VideoCapture;
        m_VideoCapture=NULL;
        return false;
    }

    // 输出封装器和视频流配置
    int ret = avformat_alloc_output_context2(&avFormatContext, 0, "flv", decurl.toLocal8Bit().data());

    if (ret != 0) {
        QLOG_ERROR() << "avformat_alloc_output_context2 error.";
        delete m_VideoCapture;
        m_VideoCapture=NULL;
        return false;
    }

    // 添加视频流
    AVStream* avStream = avformat_new_stream(avFormatContext, NULL);
    if (!avStream) {
        QLOG_ERROR() << "avformat_new_stream error";
        delete m_VideoCapture;
        m_VideoCapture=NULL;
        return false;
    }

    avStream->codecpar->codec_tag = 0; //

    // 从编码器复制参数
    avcodec_parameters_from_context(avStream->codecpar, InvideoCodecContext);
    av_dump_format(avFormatContext, 0, decurl.toLocal8Bit().data(), 1);

    if (InvideoCodecContext->codec_type == AVMEDIA_TYPE_VIDEO) {
        this->OutvideoStream = avStream;
    }

    // 发送信息头
    SendMuxHead(decurl);

    vindex = avStream->index;

    return vindex < 0 ? false : true;
}

/**
 * @brief CVideoCaptureRtmpSend::openUrl 打开推送网址
 * @param url 要打开的rtmp网址
 *
 * @return 如果网络打开成功返回真，否则返回假
 */
bool CVideoCaptureRtmpSend::openUrl(QString srcurl,QString decurl)
{
    if(srcurl.isEmpty() || decurl.isEmpty())
        return false;

    m_isDeleteVideoCapture = true;

    m_VideoCapture = new CVideoCapture();

    // 开始网络摄像头
    if(!m_VideoCapture->openUrl(srcurl))
    {
        QLOG_ERROR() << "open video capture fail.";
        delete m_VideoCapture;
        m_VideoCapture=NULL;
        return false;
    }

    return openUrl2(m_VideoCapture,decurl);
}

/**
 * @brief CVideoCaptureRtmpSend::SendFrame RTMP推流
 * @param pack
 * @return
 */
bool CVideoCaptureRtmpSend::SendFrame(AVPacket* pack)
{
    if (!pack || pack->size <= 0 || !pack->data || vindex < 0)
        return false;

    pack->stream_index = vindex;

    AVRational stime;
    AVRational dtime;

    //判断是音频还是视频
    if (OutvideoStream && InvideoCodecContext &&
            pack->stream_index == OutvideoStream->index)
    {
        stime = InvideoCodecContext->time_base;
        dtime = OutvideoStream->time_base;
    }
    else
    {
        QLOG_ERROR() << "[Error] SendFrame type error！！！！ ";
        return false;
    }

    //推流
    //pack->pts = av_rescale_q(pack->pts, stime, dtime);
    //pack->dts = av_rescale_q(pack->dts, stime, dtime);
    //pack->duration = av_rescale_q(pack->duration, stime, dtime);

    pack->pts = av_rescale_q_rnd(pack->pts, stime, dtime, (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
    pack->dts = av_rescale_q_rnd(pack->dts, stime, dtime, (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
    pack->duration = av_rescale_q(pack->duration, stime, dtime);
    pack->pos = -1;

    int ret = av_interleaved_write_frame(avFormatContext, pack);
    if (ret == 0)
    {
        return true;
    }

    QLOG_ERROR() << "SendFrame error:" << ret;

    return false;
}

/**
 * @brief CVideoCaptureRtmpSend::processVideoRtmpSend rtmp流推送
 * @param pFrame
 */
void CVideoCaptureRtmpSend::processVideoRtmpSend(AVFrame *pFrame)
{
    if(pFrame == NULL)
        return;

    SendFrame(EncodeVideo(pFrame));
}

/**
 * @brief CVideoCaptureRtmpSend::SendMuxHead 打开RTMP网络IO，发送封装头MUX
 * @return
 */
bool CVideoCaptureRtmpSend::SendMuxHead(QString url)
{
    ///打开rtmp 的网络输出IO
    int ret = avio_open(&avFormatContext->pb, url.toLocal8Bit().data(), AVIO_FLAG_WRITE);
    if (ret != 0)
    {
        QLOG_ERROR() << "avio_open error";
        return false;
    }

    //写入封装头
    ret = avformat_write_header(avFormatContext, NULL);
    if (ret != 0)
    {
        QLOG_ERROR() << "avformat_write_header error";
        return false;
    }

    return true;
}

/**
 * 开始编码
 * @param frame
 * @return
 */
AVPacket* CVideoCaptureRtmpSend::EncodeVideo(AVFrame* frame) {
    // 先释放空间
    av_packet_unref(&outVideoPacket);

    // 开始h264编码, pts必须递增
    frame->pts = videoPts;
    videoPts++;

    // 发送原始帧，开始编码
    int ret = avcodec_send_frame(InvideoCodecContext, frame);
    if (ret != 0) {
        QLOG_ERROR() << "EncodeVideo1:" << ret;
        return NULL;
    }

    ret = avcodec_receive_packet(InvideoCodecContext, &outVideoPacket);
    if (ret != 0 || outVideoPacket.size <= 0) {
        QLOG_ERROR() << "EncodeVideo2:" << ret;
        return NULL;
    }

    return &outVideoPacket;
}

//////////////////////////////////////////////////////////////////////////////////

CVideoCaptureWidget::CVideoCaptureWidget(QWidget *parent)
    : QOpenGLWidget(parent)
{
    m_Id = -1;
    m_nVideoW = m_nVideoH = 0;
    textureUniformY = 0;
    textureUniformU = 0;
    textureUniformV = 0;
    id_y = 0;
    id_u = 0;
    id_v = 0;
    m_pTextureY = NULL;
    m_pTextureU = NULL;
    m_pTextureV = NULL;
    m_pVSHader = NULL;
    m_pFSHader = NULL;
    m_pShaderProgram = NULL;
    m_outbuffer = NULL;

    m_RtmpSendInitSuccessed = false;
    m_ChangeBig = false;
}

CVideoCaptureWidget::~CVideoCaptureWidget()
{
    stop();

    if(m_pTextureY) m_pTextureY->destroy();
    if(m_pTextureU) m_pTextureU->destroy();
    if(m_pTextureV) m_pTextureV->destroy();
    if(m_pVSHader) m_pVSHader->deleteLater();
    if(m_pFSHader) m_pFSHader->deleteLater();
    if(m_pShaderProgram) m_pShaderProgram->deleteLater();
}

/// 打开一个网络摄像头
bool CVideoCaptureWidget::openUrl(QString url,bool isHarddecoding)
{
    m_VideoCapture.setDisplayFrame(true);

    return m_VideoCapture.openUrl(url,isHarddecoding);
}

/// 是否启用rtmp推流
bool CVideoCaptureWidget::enableRtmp(QString decurl)
{
    if(decurl.isEmpty())
        return false;

    m_decurl = decurl;
    m_RtmpSendInitSuccessed = false;

    return true;
}

/// 处理视频帧
void CVideoCaptureWidget::processvideoframe(uint8_t *outbuffer,int videoW,int videoH)
{
    m_nVideoW = videoW;
    m_nVideoH = videoH;
    m_outbuffer = outbuffer;

    if(!m_RtmpSendInitSuccessed && !m_decurl.isEmpty())
        m_RtmpSendInitSuccessed = m_VideoCaptureRtmpSend.openUrl2(&m_VideoCapture,m_decurl);

    this->update();
}

/// 开始工作
void CVideoCaptureWidget::open(void)
{
    connect(&m_VideoCapture, SIGNAL(signal_processVideoFrame(uint8_t*,int,int)),
                     this,
            SLOT(processvideoframe(uint8_t*,int,int)),Qt::QueuedConnection);

    m_VideoCapture.start();
}

/// 停止工作
void CVideoCaptureWidget::close(void)
{
    disconnect(&m_VideoCapture, SIGNAL(signal_processVideoFrame(uint8_t*,int,int)),
                     this,
            SLOT(processvideoframe(uint8_t*,int,int)));

    m_VideoCapture.setIsExit(true);
    m_VideoCapture.wait();
}

/// 开始播放视频
void CVideoCaptureWidget::play(void)
{
    m_VideoCapture.setIsPlaying(true);
}

/// 停止播放视频
void CVideoCaptureWidget::stop(void)
{
    m_VideoCapture.setIsPlaying(false);
}

void CVideoCaptureWidget::initializeGL()
{
    initializeOpenGLFunctions();
    glEnable(GL_DEPTH_TEST);
    //现代opengl渲染管线依赖着色器来处理传入的数据
    //着色器：就是使用openGL着色语言(OpenGL Shading Language, GLSL)编写的一个小函数,
    //       GLSL是构成所有OpenGL着色器的语言,具体的GLSL语言的语法需要读者查找相关资料
    //初始化顶点着色器 对象
    m_pVSHader = new QOpenGLShader(QOpenGLShader::Vertex, this);
    //顶点着色器源码
    const char *vsrc = "attribute vec4 vertexIn; \
    attribute vec2 textureIn; \
    varying vec2 textureOut;  \
    void main(void)           \
    {                         \
        gl_Position = vertexIn; \
        textureOut = textureIn; \
    }";
    //编译顶点着色器程序
    bool bCompile = m_pVSHader->compileSourceCode(vsrc);
    if(!bCompile)
    {
    }
    //初始化片段着色器 功能gpu中yuv转换成rgb
    m_pFSHader = new QOpenGLShader(QOpenGLShader::Fragment, this);
    //片段着色器源码
    const char *fsrc = "varying vec2 textureOut; \
    uniform sampler2D tex_y; \
    uniform sampler2D tex_u; \
    uniform sampler2D tex_v; \
    void main(void) \
    { \
        vec3 yuv; \
        vec3 rgb; \
        yuv.x = texture2D(tex_y, textureOut).r; \
        yuv.y = texture2D(tex_u, textureOut).r - 0.5; \
        yuv.z = texture2D(tex_v, textureOut).r - 0.5; \
        rgb = mat3( 1,       1,         1, \
                    0,       -0.39465,  2.03211, \
                    1.13983, -0.58060,  0) * yuv; \
        gl_FragColor = vec4(rgb, 1); \
    }";
    //将glsl源码送入编译器编译着色器程序
    bCompile = m_pFSHader->compileSourceCode(fsrc);
    if(!bCompile)
    {
    }
#define PROGRAM_VERTEX_ATTRIBUTE 0
#define PROGRAM_TEXCOORD_ATTRIBUTE 1
    //创建着色器程序容器
    m_pShaderProgram = new QOpenGLShaderProgram;
    //将片段着色器添加到程序容器
    m_pShaderProgram->addShader(m_pFSHader);
    //将顶点着色器添加到程序容器
    m_pShaderProgram->addShader(m_pVSHader);
    //绑定属性vertexIn到指定位置ATTRIB_VERTEX,该属性在顶点着色源码其中有声明
    m_pShaderProgram->bindAttributeLocation("vertexIn", ATTRIB_VERTEX);
    //绑定属性textureIn到指定位置ATTRIB_TEXTURE,该属性在顶点着色源码其中有声明
    m_pShaderProgram->bindAttributeLocation("textureIn", ATTRIB_TEXTURE);
    //链接所有所有添入到的着色器程序
    m_pShaderProgram->link();
    //激活所有链接
    m_pShaderProgram->bind();
    //读取着色器中的数据变量tex_y, tex_u, tex_v的位置,这些变量的声明可以在
    //片段着色器源码中可以看到
    textureUniformY = m_pShaderProgram->uniformLocation("tex_y");
    textureUniformU =  m_pShaderProgram->uniformLocation("tex_u");
    textureUniformV =  m_pShaderProgram->uniformLocation("tex_v");
    // 顶点矩阵
    static const GLfloat vertexVertices[] = {
        -1.0f, -1.0f,
         1.0f, -1.0f,
         -1.0f, 1.0f,
         1.0f, 1.0f,
    };
    //纹理矩阵
    static const GLfloat textureVertices[] = {
        0.0f,  1.0f,
        1.0f,  1.0f,
        0.0f,  0.0f,
        1.0f,  0.0f,
    };
    //设置属性ATTRIB_VERTEX的顶点矩阵值以及格式
    glVertexAttribPointer(ATTRIB_VERTEX, 2, GL_FLOAT, 0, 0, vertexVertices);
    //设置属性ATTRIB_TEXTURE的纹理矩阵值以及格式
    glVertexAttribPointer(ATTRIB_TEXTURE, 2, GL_FLOAT, 0, 0, textureVertices);
    //启用ATTRIB_VERTEX属性的数据,默认是关闭的
    glEnableVertexAttribArray(ATTRIB_VERTEX);
    //启用ATTRIB_TEXTURE属性的数据,默认是关闭的
    glEnableVertexAttribArray(ATTRIB_TEXTURE);
    //分别创建y,u,v纹理对象
    m_pTextureY = new QOpenGLTexture(QOpenGLTexture::Target2D);
    m_pTextureU = new QOpenGLTexture(QOpenGLTexture::Target2D);
    m_pTextureV = new QOpenGLTexture(QOpenGLTexture::Target2D);
    m_pTextureY->create();
    m_pTextureU->create();
    m_pTextureV->create();
    //获取返回y分量的纹理索引值
    id_y = m_pTextureY->textureId();
    //获取返回u分量的纹理索引值
    id_u = m_pTextureU->textureId();
    //获取返回v分量的纹理索引值
    id_v = m_pTextureV->textureId();
    glClearColor(0.3f,0.3f,0.3f,0.0f);//设置背景色
    //qDebug("addr=%x id_y = %d id_u=%d id_v=%d\n", this, id_y, id_u, id_v);
}

void CVideoCaptureWidget::mouseDoubleClickEvent(QMouseEvent *event)
{
    if(event->button() == Qt::LeftButton)
        emit signalsMouseDoubleClickEvent(this->getID());
}

void CVideoCaptureWidget::resizeGL(int w, int h)
{
    if(h == 0)// 防止被零除
    {
        h = 1;// 将高设为1
    }

    //qDebug()<<"CVideoCaptureWidget::resizeGL:"<<m_ChangeBig;

    //设置视口
    glViewport(0,0, w,h);
}

void CVideoCaptureWidget::paintGL()
{
    if(m_outbuffer == NULL)
        return;

    //加载y数据纹理
     //激活纹理单元GL_TEXTURE0
    glActiveTexture(GL_TEXTURE0);
    //使用来自y数据生成纹理
    glBindTexture(GL_TEXTURE_2D, id_y);
    //使用内存中m_pBufYuv420p数据创建真正的y数据纹理
    glTexImage2D(GL_TEXTURE_2D, 0, GL_RED, m_nVideoW, m_nVideoH, 0, GL_RED, GL_UNSIGNED_BYTE, m_outbuffer);
    glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MAG_FILTER,GL_LINEAR);
    glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,GL_LINEAR);
    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
    //加载u数据纹理
    glActiveTexture(GL_TEXTURE1);//激活纹理单元GL_TEXTURE1
    glBindTexture(GL_TEXTURE_2D, id_u);
    glTexImage2D(GL_TEXTURE_2D, 0, GL_RED, m_nVideoW/2, m_nVideoH/2, 0, GL_RED, GL_UNSIGNED_BYTE, (char*)m_outbuffer+m_nVideoW*m_nVideoH);
    glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MAG_FILTER,GL_LINEAR);
    glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,GL_LINEAR);
    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
    //加载v数据纹理
    glActiveTexture(GL_TEXTURE2);//激活纹理单元GL_TEXTURE2
    glBindTexture(GL_TEXTURE_2D, id_v);
    glTexImage2D(GL_TEXTURE_2D, 0, GL_RED, m_nVideoW/2, m_nVideoH/2, 0, GL_RED, GL_UNSIGNED_BYTE, (char*)m_outbuffer+m_nVideoW*m_nVideoH*5/4);
    glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MAG_FILTER,GL_LINEAR);
    glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,GL_LINEAR);
    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
    //指定y纹理要使用新值 只能用0,1,2等表示纹理单元的索引，这是opengl不人性化的地方
    //0对应纹理单元GL_TEXTURE0 1对应纹理单元GL_TEXTURE1 2对应纹理的单元
    glUniform1i(textureUniformY, 0);
    //指定u纹理要使用新值
    glUniform1i(textureUniformU, 1);
    //指定v纹理要使用新值
    glUniform1i(textureUniformV, 2);
    //使用顶点数组方式绘制图形
    glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
}

//////////////////////////////////////////////////////////////////////////////////

CVideoCapManager::CVideoCapManager(QObject *parent)
    : QObject(parent)
{
#ifdef REMOTECAMERA_ENABLE_DARKNET
    m_isLoadDetector = false;
    m_Detector = NULL;
    m_DetectorMaxTime = 0;
#endif
}

CVideoCapManager::~CVideoCapManager()
{
    deleteAllVideoCaptures();
    deleteAllVideoCapRtmp();

    cleanDetector();
}

/// 删除所有的摄像头
void CVideoCapManager::deleteAllVideoCaptures(void)
{
    for(int i=0;i<m_VideoCapManager.size();i++)
    {
        m_VideoCapManager[i]->close();
        m_VideoCapManager[i]->deleteLater();
    }

    m_VideoCapManager.clear();
}

/**
 * @brief CVideoCapManager::loadDetector 启用Detector
 * @param cfg_filename cfg文件路径
 * @param weight_filename 权重文件路径
 * @param names_filename 名称文件路径
 * @param gpu_id 要使用的gpuID，初始使用0
 * @param DetectorMaxTime 多少毫秒检测一次，初始为1分钟检测一次
 *
 * @return 如果darknet导入成功返回真，否则返回假
 */
bool CVideoCapManager::enableDetector(QString cfg_filename,
                                  QString weight_filename,
                                  QString names_filename,
                                  int gpu_id,
                                  int DetectorMaxTime)
{
    if(cfg_filename.isEmpty() ||
            weight_filename.isEmpty() ||
            names_filename.isEmpty() ||
            gpu_id < 0)
        return false;

#ifdef REMOTECAMERA_ENABLE_DARKNET
    m_Detector = new Detector(cfg_filename.toLocal8Bit().data(),
                          weight_filename.toLocal8Bit().data(),
                          gpu_id);

    objects_names = objects_names_from_file(names_filename.toLocal8Bit().data());
    m_isLoadDetector = true;
    m_DetectorMaxTime = DetectorMaxTime;
#endif

    return true;
}

/**
 * @brief CVideoCapManager::cleanDetector 关闭Detector
 */
void CVideoCapManager::cleanDetector(void)
{
#ifdef REMOTECAMERA_ENABLE_DARKNET
    if(m_Detector) delete m_Detector;
    m_Detector = NULL;

    m_isLoadDetector = false;
#endif
}

/**
 * @brief CVideoCapManager::processVideoFrame 处理视频帧
 * @param videocapture 要处理的摄像头
 * @param data 要处理的视频帧数据
 * @param videoW 视频宽度
 * @param videoH 视频高度
 */
void CVideoCapManager::DetectorprocessVideoFrame(CVideoCapture *videocapture,unsigned char *data,int videoW,int videoH)
{
#ifdef REMOTECAMERA_ENABLE_DARKNET
    if(!m_isLoadDetector || m_Detector == NULL ||
            data == NULL || videoW <= 0 || videoH <= 0 ||
            videocapture == NULL)
        return;

    QMutexLocker tempLocker(&m_DetectorMutex);

    // YUV420 -> Mat
    cv::Mat myuv( videoH + videoH / 2, videoW, CV_8UC1, (unsigned char *) data);
    cv::Mat mgMat,mgSizeMat;
    cv::cvtColor(myuv, mgSizeMat, CV_YUV420p2RGB);
    cv::resize(mgSizeMat, mgMat, Size(mgSizeMat.cols / 2, mgSizeMat.rows / 2), 0, 0, INTER_LINEAR);

    tagVideoCaptureBbox pVideoCaptureBbox = m_videocapturebboxs[videocapture];

    if(pVideoCaptureBbox.curTime == 0)
        pVideoCaptureBbox.curTime = (quint64)time(NULL);

    if((quint64)time(NULL) > pVideoCaptureBbox.curTime + m_DetectorMaxTime)
    {
        pVideoCaptureBbox.curTime = 0;

        auto det_image = m_Detector->mat_to_image_resize(mgMat);
        m_videocapturebboxs[videocapture].bboxs = m_Detector->detect_resized(*det_image, mgMat.size().width, mgMat.size().height,0.4);
    }

    if(!pVideoCaptureBbox.bboxs.empty())
        draw_boxes(mgMat, pVideoCaptureBbox.bboxs, objects_names);

    // Mat -> YUV420
    cv::cvtColor(mgMat, myuv,CV_RGB2YUV_I420); //CV_RGB2YUV_YV12, CV_RGB2YUV 会产生不同的效果，可以多尝试几个，看那个效果是正确的
    memcpy( data, myuv.data, videoW * videoH * ( 3 / 2 ) * sizeof(unsigned char) );
#endif
}

/// 设置指定ID的网络摄像头是否显示
void CVideoCapManager::showVideoCapture(int pId,bool isShow,bool isMyself)
{
    if(m_VideoCapManager.isEmpty())
        return;

    if(pId == -1)
    {
        for(int i=0;i<m_VideoCapManager.size();i++)
        {
            if(isShow) m_VideoCapManager[i]->show();
            else m_VideoCapManager[i]->hide();
        }

        return;
    }

    if(isMyself)
    {
        for(int i=0;i<m_VideoCapManager.size();i++)
        {
            if(m_VideoCapManager[i]->getID() == pId)
            {
                if(isShow) m_VideoCapManager[i]->show();
                else m_VideoCapManager[i]->hide();
            }
            else
            {
                if(isShow) m_VideoCapManager[i]->hide();
                else m_VideoCapManager[i]->show();
            }
        }
    }
    else
    {
        for(int i=0;i<m_VideoCapManager.size();i++)
        {
            if(m_VideoCapManager[i]->getID() == pId)
            {
                if(isShow) m_VideoCapManager[i]->hide();
                else m_VideoCapManager[i]->show();

                continue;
            }

            if(isShow) m_VideoCapManager[i]->show();
            else m_VideoCapManager[i]->hide();
        }
    }
}

/// 根据ID得到网络摄像头
CVideoCaptureWidget *CVideoCapManager::getVideoCapture(int pId)
{
    if(pId < 0 || m_VideoCapManager.isEmpty())
        return NULL;

    for(int i=0;i<m_VideoCapManager.size();i++)
    {
        if(m_VideoCapManager[i]->getID() == pId)
            return m_VideoCapManager[i];
    }

    return NULL;
}

/**
 * @brief CVideoCapManager::addVideoCapture 添加一个网络摄像头
 * @param capUrl 网络摄像头的地址
 * @return 如果摄像头添加成功返回真，否则返回假
 */
CVideoCaptureWidget *CVideoCapManager::addVideoCapture(QWidget *parwidget,QString capUrl,bool isHarddecoding)
{
    if(capUrl.isEmpty())
        return NULL;

    CVideoCaptureWidget *newVideoCapture = new CVideoCaptureWidget(parwidget);
    if(!newVideoCapture->openUrl(capUrl,isHarddecoding))
    {
        delete newVideoCapture;
        return NULL;
    }

    newVideoCapture->setID(m_VideoCapManager.size());
    m_VideoCapManager.push_back(newVideoCapture);

    connect(newVideoCapture, SIGNAL(signalsMouseDoubleClickEvent(int)),
                     this,
            SLOT(slotsMouseDoubleClickEvent(int)));

    return newVideoCapture;
}

/**
 * @brief CVideoCapManager::delVideoCapture 删除一个摄像头
 * @param parwidget 要删除的摄像头
 */
bool CVideoCapManager::delVideoCapture(CVideoCaptureWidget *parwidget)
{
    if(parwidget == NULL || m_VideoCapManager.isEmpty())
        return false;

    QVector<CVideoCaptureWidget*>::iterator iter = qFind(m_VideoCapManager.begin(),
                                                         m_VideoCapManager.end(),
                                                         parwidget);
    if(iter != m_VideoCapManager.end())
    {
        (*iter)->close();
        (*iter)->deleteLater();
        m_VideoCapManager.erase(iter);
    }

    return true;
}

/**
 * @brief CVideoCapManager::addVideoCaptureRtmp 添加一个网络摄像头推流
 * @param capUrl
 * @param rtmpUrl
 * @return
 */
CVideoCaptureRtmpSend* CVideoCapManager::addVideoCaptureRtmp(QString capUrl,QString rtmpUrl)
{
    if(capUrl.isEmpty() || rtmpUrl.isEmpty())
        return NULL;

    CVideoCaptureRtmpSend *pVideoCaptureRtmpSend = new CVideoCaptureRtmpSend(this);
    if(!pVideoCaptureRtmpSend->openUrl(capUrl,rtmpUrl))
    {
        delete pVideoCaptureRtmpSend;
        return NULL;
    }

    m_VideoCapRtmpSendManager.push_back(pVideoCaptureRtmpSend);

    return pVideoCaptureRtmpSend;
}

/**
 * @brief CVideoCapManager::delVideoCaptureRtmp 删除一个网络摄像头推流
 * @param caprtmp
 * @return
 */
bool CVideoCapManager::delVideoCaptureRtmp(CVideoCaptureRtmpSend *caprtmp)
{
    if(caprtmp == NULL || m_VideoCapRtmpSendManager.isEmpty())
        return false;

    QVector<CVideoCaptureRtmpSend*>::iterator iter = qFind(m_VideoCapRtmpSendManager.begin(),
                                                         m_VideoCapRtmpSendManager.end(),
                                                         caprtmp);
    if(iter != m_VideoCapRtmpSendManager.end())
    {
        (*iter)->stop();
        (*iter)->deleteLater();
        m_VideoCapRtmpSendManager.erase(iter);
    }

    return true;
}

/**
 * @brief CVideoCapManager::deleteAllVideoCapRtmp 清除所有的网络摄像头推流
 */
void CVideoCapManager::deleteAllVideoCapRtmp(void)
{
    for(int i=0;i<m_VideoCapRtmpSendManager.size();i++)
    {
        m_VideoCapRtmpSendManager[i]->stop();
        m_VideoCapRtmpSendManager[i]->deleteLater();
    }

    m_VideoCapRtmpSendManager.clear();
}

void CVideoCapManager::slotsMouseDoubleClickEvent(int pId)
{
    CVideoCaptureWidget *pVideoCaptureWidget = NULL;
    int pVisibleWidgets = 0;

    for(int i=0;i<m_VideoCapManager.size();i++)
    {
        if(m_VideoCapManager[i]->getID() == pId)
        {
            pVideoCaptureWidget = m_VideoCapManager[i];
        }

        if(m_VideoCapManager[i]->isVisible())
           pVisibleWidgets+=1;
    }

    if(pVideoCaptureWidget)
    {
        if(pVisibleWidgets > 1)
            pVideoCaptureWidget->setChangeBig(false);
        else
            pVideoCaptureWidget->setChangeBig(true);
    }

    emit signalsMouseDoubleClickEvent(pId);
}

#endif
