#include "device.h"
#include <algorithm>

static int cbOpenInputTimeout(void *user)
{
    Device *device = static_cast<Device *>(user);
    if(device != nullptr)
    {
        //默认超时5s结束阻塞
        int64_t msOffset = msOffset = av_gettime_relative() / 1000 - device->m_curIOTime;
        if(msOffset > 5000)
        {
            string info = device->m_param.deviceName + ":" + device->m_param.app + device->m_param.stream;
            log_printf(LOG_LEV, "cbInputTimeout %s", info.c_str());
            return 1;
        }
    }

    return 0;
}

static void cbH264Frame(void *user_data, mk_h264_splitter splitter, const char *data, int size)
{
    Device *device = static_cast<Device *>(user_data);
    if(device == nullptr)
    {
        return;
    }

    //设定时间戳
    uint64_t dts = mk_util_get_current_millisecond();

    //构造数据帧
    mk_frame frame;
    if(!device->m_param.isH264)
    {
        frame = mk_frame_create(MKCodecH265, dts, dts, data, size, NULL, NULL);
    }
    else
    {
        frame = mk_frame_create(MKCodecH264, dts, dts, data, size, NULL, NULL);
    }

//    log_printf(LOG_LEV, "test! device->m_media=%p frame=%p %d", device->m_media, frame, dts);

    //测试裸流保存
    if(device->m_enableH264File)
    {
        if(nullptr == device->m_h264File)
        {
            device->m_h264File = fopen("./test.h264", "w+");
            if(!device->m_h264File)
            {
                device->m_enableH264File = false;
                return;
            }

            device->m_h264FileSize = 0;
        }
        else
        {
            const char *frameData = mk_frame_get_data(frame);
            uint32_t frameSize = mk_frame_get_data_size(frame);
            fwrite(frameData, 1, frameSize, device->m_h264File);

            //判断大小
            device->m_h264FileSize += frameSize;
            if(device->m_h264FileSize > 5 * 1024 * 1024)
            {
                fclose(device->m_h264File);
                device->m_h264File = nullptr;
                device->m_enableH264File = false;
            }
        }
    }

    //保证有效
    if((device->m_media && frame))
    {
        mk_media_input_frame(device->m_media, frame);
        mk_frame_unref(frame);
    }
}

Device::Device()
{

}

Device::~Device()
{
    stopTask();

    if(m_videoFilter)
    {
        delete m_videoFilter;
        m_videoFilter = nullptr;
    }
}

void Device::setDeviceParam(const STU_CAMERAINFO &info)
{
    m_param.inHeight = info.inHeight;
    m_param.inWidth = info.inWidth;
    m_param.inFPS = info.inFPS;
    m_param.deviceName = info.devName;
    m_param.outHeight = info.outHeight;
    m_param.outWidth = info.outWidth;
    m_param.outFPS = info.outFPS;
    m_param.bitRate = info.outBitrate;
    m_param.isH264 = info.outH264;
    m_param.stream = info.stream;
    m_param.app = info.app;
    m_param.record = info.record;

    //容错判断
    if(m_param.inFPS < 10 || m_param.inFPS > 30)
    {
        m_param.inFPS = 25;
    }
    if(m_param.inWidth < 640 || m_param.inWidth > 1920)
    {
        m_param.inWidth = 640;
    }
    if(m_param.inHeight < 480 || m_param.inHeight > 1080)
    {
        m_param.inHeight = 480;
    }

    if(m_param.outFPS < 10 || m_param.outFPS > 30)
    {
        m_param.outFPS = 25;
    }
    if(m_param.outWidth < 640 || m_param.outWidth > 1920)
    {
        m_param.outWidth = 1280;
    }
    if(m_param.outHeight < 480 || m_param.outHeight > 1080)
    {
        m_param.outHeight = 720;
    }

    //码率处理
    if(m_param.bitRate < 512000)
    {
        m_param.bitRate = 1024000;
    }
    if(m_param.bitRate > 4096000)
    {
        m_param.bitRate = 2048000;
    }

    //处理类型
    m_runWay = info.protocal;
}

void Device::startTask()
{
    if(!m_media || !m_splitter)
    {
        log_printf(LOG_LEV, "m_media m_splitter not ready!");
        return;
    }

    m_bRun.store(true);

    if (m_thread == nullptr)
    {
        m_thread = new std::thread(&Device::run, this);
    }
}

void Device::stopTask()
{
    m_bRun.store(false);

    if(m_thread != nullptr )
    {
        if(m_thread->joinable())
        {
            m_thread->join();
            log_printf(LOG_LEV, "Device thread finished");
        }

        releaseMediaSource();

        delete m_thread;
        m_thread = nullptr;
    }
}

void Device::autoReconnect()
{
    log_printf(LOG_LEV, "autoReconnect %s:%s %s", m_param.deviceName.c_str(), m_param.app.c_str(), m_param.stream.c_str());
    initMediaSource();
    startTask();
}

void Device::initMediaSource()
{
    releaseMediaSource();

    //初始化mk接口
    m_media = mk_media_create("", m_param.app.c_str(), m_param.stream.c_str(), 0, 0, m_param.record);
    if(!m_media)
    {
        log_printf(LOG_LEV, "mk_media_create error");
        return;
    }

    codec_args v_args;
    v_args.video.fps = m_param.outFPS;
    v_args.video.height = m_param.outHeight;
    v_args.video.width = m_param.outWidth;
    mk_track v_track;
    if(!m_param.isH264)
    {
        v_track = mk_track_create(MKCodecH265, &v_args);
    }
    else
    {
        v_track = mk_track_create(MKCodecH264, &v_args);
    }

    mk_media_init_track(m_media, v_track);
    mk_media_init_complete(m_media);
    mk_track_unref(v_track);

    // Create h264 frame splitter
    m_splitter = mk_h264_splitter_create(cbH264Frame, this, !m_param.isH264);
    if(!m_splitter)
    {
        log_printf(LOG_LEV, "mk_h264_splitter_create error");
        return;
    }

    log_printf(LOG_LEV, "initMediaSource Sucess! %d %d %d H264:%d", m_param.outFPS, m_param.outWidth, m_param.outHeight, m_param.isH264);

}

void Device::releaseMediaSource()
{
    if(m_media || m_splitter)
    {
        log_printf(LOG_LEV, "releaseMediaSource");
    }

    if(m_media)
    {
        mk_media_release(m_media);
        m_media = nullptr;
    }

    if(m_splitter)
    {
        mk_h264_splitter_release(m_splitter);
        m_splitter = nullptr;
    }
}

// 打开摄像头设备
AVFormatContext* Device::openCameraInFFmpeg(const char* device_name)
{
    AVFormatContext *fmt_ctx = avformat_alloc_context();
    AVDictionary* options = nullptr;

    // 设置摄像头格式为 Video4Linux2
    std::string fps = std::to_string(m_param.inFPS);
    std::string w = std::to_string(m_param.inWidth);
    std::string h = std::to_string(m_param.inHeight);
    std::string videoSize = w + "x" + h;
    av_dict_set(&options, "framerate", fps.c_str(), 0);
    av_dict_set(&options, "video_size", videoSize.c_str(), 0);
    av_dict_set(&options, "probesize", "10M", 0);
    av_dict_set(&options, "rtbufsize", "4096000", 0);

    //超时判断
    m_curIOTime = av_gettime_relative() / 1000;
    fmt_ctx->interrupt_callback.callback = cbOpenInputTimeout;
    fmt_ctx->interrupt_callback.opaque = this;

    // 打开摄像头设备
    AVInputFormat *iFmt = av_find_input_format("v4l2");
    int ret = avformat_open_input(&fmt_ctx, device_name, iFmt, &options);
    if (ret < 0)
    {
        getFFmpegError(ret);
        log_printf(LOG_LEV, "avformat_open_input error %s", device_name);
        return nullptr;
    }

    // 查找流信息
    ret = avformat_find_stream_info(fmt_ctx, nullptr);
    if (ret < 0)
    {
        getFFmpegError(ret);
        log_printf(LOG_LEV, "avformat_find_stream_info");
        return nullptr;
    }

    return fmt_ctx;
}

int Device::openCameraInV4l2(const char *device_name)
{
    log_printf(LOG_LEV, "openCameraInV4l2 %s", device_name);

    // 打开设备
    m_videoFd = open(device_name, O_RDWR);
    if(m_videoFd < 0)
    {
        log_printf(LOG_LEV, "video open fail! %s", device_name);
        return -1;
    }

    //查看设备是否为视频采集设备
    struct v4l2_capability vcap;
    ioctl(m_videoFd, VIDIOC_QUERYCAP, &vcap);
    if (!(V4L2_CAP_VIDEO_CAPTURE & vcap.capabilities))
    {
        log_printf(LOG_LEV, "Not video capture device! %s", device_name);
        return -1;
    }

    // 枚举帧格式
    std::map<unsigned int, string> mapFormat;

    struct v4l2_fmtdesc fmtdesc;
    fmtdesc.index = 0;
    fmtdesc.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
    while(ioctl(m_videoFd, VIDIOC_ENUM_FMT, &fmtdesc) == 0)
    {
        log_printf(LOG_LEV, "v4l2_format %d:%s",fmtdesc.index, fmtdesc.description);
        fmtdesc.index++;

        string value = (char *)fmtdesc.description;
        unsigned int key = (unsigned int)fmtdesc.pixelformat;
        mapFormat.insert(make_pair(key, value));
    }

    // 配置分辨率和格式 V4L2_PIX_FMT_YUYV V4L2_PIX_FMT_MJPEG
    int pixel_width = m_param.inWidth;
    int pixel_height = m_param.inHeight;
    int fps = m_param.inFPS;
    unsigned int pixelFormat = V4L2_PIX_FMT_YUYV;

    // 设置采集格式
    struct v4l2_format vfmt;
    vfmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
    vfmt.fmt.pix.width = pixel_width;
    vfmt.fmt.pix.height = pixel_height;
    vfmt.fmt.pix.pixelformat = pixelFormat;

    if(ioctl(m_videoFd, VIDIOC_S_FMT, &vfmt) < 0)
    {
        log_printf(LOG_LEV, "video设置格式失败");
        return -1;
    }
    if(ioctl(m_videoFd, VIDIOC_G_FMT, &vfmt) < 0)
    {
        log_printf(LOG_LEV, "video获取格式失败");
        return -1;
    }
    log_printf(LOG_LEV, "video当前格式:%s %d %d", mapFormat.at(vfmt.fmt.pix.pixelformat).c_str(), vfmt.fmt.pix.width, vfmt.fmt.pix.height);

    // 设置帧信息
    struct v4l2_streamparm streamparm;
    streamparm.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
    if(V4L2_CAP_TIMEPERFRAME & streamparm.parm.capture.capability)
    {
        streamparm.parm.capture.timeperframe.numerator = 1;
        streamparm.parm.capture.timeperframe.denominator = fps;

        if(ioctl(m_videoFd, VIDIOC_S_PARM, &streamparm) < 0)
        {
            log_printf(LOG_LEV, "video设置帧率失败");
            return -1;
        }
    }
    if(ioctl(m_videoFd, VIDIOC_G_PARM, &streamparm) < 0)
    {
        log_printf(LOG_LEV, "video获取帧率失败");
        return -1;
    }
    log_printf(LOG_LEV, "video当前帧率 %d/%d", streamparm.parm.capture.timeperframe.numerator, streamparm.parm.capture.timeperframe.denominator);

    // 申请缓冲区空间
    struct v4l2_requestbuffers reqbuf;
    reqbuf.type  = V4L2_BUF_TYPE_VIDEO_CAPTURE;
    reqbuf.count = m_frameCount;
    reqbuf.memory = V4L2_MEMORY_MMAP;
    if(ioctl(m_videoFd, VIDIOC_REQBUFS, &reqbuf) < 0)
    {
        log_printf(LOG_LEV, "video申请缓冲区失败\n");
        return -1;
    }

    // 将帧缓冲映射到进程地址空间
    struct v4l2_buffer buf;
    buf.type   = V4L2_BUF_TYPE_VIDEO_CAPTURE;
    buf.memory = V4L2_MEMORY_MMAP;

    // 将每一帧对应的缓冲区的起始地址保存在m_userVideoBuf数组中，读取采集数据时，只需直接读取映射区即可
    for(buf.index=0; buf.index<m_frameCount; buf.index++)
    {
        ioctl(m_videoFd, VIDIOC_QUERYBUF, &buf);
        m_userBuf[buf.index] = mmap(NULL, buf.length, PROT_READ | PROT_WRITE, MAP_SHARED, m_videoFd, buf.m.offset);
        m_userBufSize[buf.index] = buf.length;
        if(m_userBuf[buf.index] == MAP_FAILED)
        {
            log_printf(LOG_LEV, "video mmap failed\n");
            return -1;
        }

        // 入队操作
        if(ioctl(m_videoFd, VIDIOC_QBUF, &buf) < 0)
        {
            log_printf(LOG_LEV, "入队失败\n");
            return -1;
        }
    }

    return 0;
}

// 初始化解码器
AVCodecContext *Device::createDecoder(AVFormatContext *fmt_ctx)
{
    if(!fmt_ctx)
    {
        return nullptr;
    }

    //获取视频流
    int ret = 0;
    int videoIndex = av_find_best_stream(fmt_ctx, AVMEDIA_TYPE_VIDEO, -1, -1, NULL, 0);
    if (videoIndex < 0)
    {
        getFFmpegError(ret);
        log_printf(LOG_LEV, "av_find_best_stream");
        return nullptr;
    }

    //获取视频信息 注：当系统负载较多时难达到真实帧率 可能下降
    AVStream *videoStream = fmt_ctx->streams[videoIndex];
    m_inputWidth = videoStream->codecpar->width;
    m_inputHeight = videoStream->codecpar->height;
    m_inputFormat = videoStream->codecpar->format;
    m_inputFPS = av_q2d(videoStream->avg_frame_rate);
    log_printf(LOG_LEV, "Device DecoderInfo %s %d %d %d %d", m_param.deviceName.c_str(), m_inputWidth, m_inputHeight, m_inputFPS, m_inputFormat);

    //初始化编码器上下文
    AVCodec *videoDecoder = avcodec_find_decoder(videoStream->codecpar->codec_id);
    if (videoDecoder == NULL)
    {
        log_printf(LOG_LEV, "avcodec_find_decoder");
        return nullptr;
    }

    AVCodecContext *videoDeCodecCtx = avcodec_alloc_context3(videoDecoder);
    if (!videoDeCodecCtx)
    {
        log_printf(LOG_LEV, "avcodec_alloc_context3");
        return nullptr;
    }

    //复制参数
    ret = avcodec_parameters_to_context(videoDeCodecCtx, videoStream->codecpar);
    if (ret < 0)
    {
        getFFmpegError(ret);
        log_printf(LOG_LEV, "avcodec_parameters_to_context");
        return nullptr;
    }

    //打开解码器
    ret = avcodec_open2(videoDeCodecCtx, videoDecoder, NULL);
    if (ret < 0)
    {
        getFFmpegError(ret);
        log_printf(LOG_LEV, "avcodec_open2");
        return nullptr;
    }

    return videoDeCodecCtx;
}

// 初始化编码器
AVCodecContext* Device::createEncoder()
{
    // 查找编码器
    AVCodecID codecID;
    string codecName;
    if(!m_param.isH264)
    {
        codecID = AV_CODEC_ID_H265;
        codecName = "H265";
    }
    else
    {
        codecID = AV_CODEC_ID_H264;
        codecName = "H264";
    }

    const AVCodec* codec = avcodec_find_encoder(codecID);
    if (!codec)
    {
        log_printf(LOG_LEV, "avcodec_find_encoder null %s", codecName.c_str());
        return nullptr;
    }

    // 创建编码器上下文
    AVCodecContext* codec_ctx = avcodec_alloc_context3(codec);
    if (!codec_ctx)
    {
        log_printf(LOG_LEV, "avcodec_alloc_context3 null");
        return nullptr;
    }

    // 设置编码器参数
    codec_ctx->width = m_param.outWidth;
    codec_ctx->height = m_param.outHeight;
    codec_ctx->time_base = {1, m_param.outFPS};
    codec_ctx->pix_fmt = m_param.pixFmt;
    codec_ctx->max_b_frames = m_param.bFrame;
    codec_ctx->thread_count = m_param.threadsCount;
    codec_ctx->gop_size = m_param.outFPS * 2;

    //限定码率范围 0.7-1.8
    int maxBitrate = m_param.bitRate * 1.8;
    int minBitrate = m_param.bitRate * 0.7;
    codec_ctx->bit_rate = m_param.bitRate;
    codec_ctx->rc_max_rate = maxBitrate;
    codec_ctx->rc_min_rate = minBitrate;
    codec_ctx->rc_buffer_size = m_param.bitRate;

    // 设置编码器私有选项
    AVDictionary* codec_options = nullptr;
    av_dict_set(&codec_options, "preset", m_param.preset.c_str(), 0);
    av_dict_set(&codec_options, "tune", m_param.tune.c_str(), 0);

    // 打开编码器
    int ret = avcodec_open2(codec_ctx, codec, &codec_options);
    if (ret < 0)
    {
        getFFmpegError(ret);
        log_printf(LOG_LEV, "avcodec_open2 null");
        return nullptr;
    }

    // 释放选项字典
    av_dict_free(&codec_options);

    return codec_ctx;
}

int Device::convert_and_scale_yuyv422_to_yuv420p(AVFrame *inFrame, AVFrame **outFrame)
{
    if(!(inFrame && outFrame))
    {
        return -1;
    }

    //判断是否需要缩放
    bool needScale = true;
    if((inFrame->width == (*outFrame)->width) && (inFrame->height == (*outFrame)->height))
    {
        needScale = false;
    }

    if(needScale)
    {
        //1. 先转成 I420 (YUV420P) 临时帧
        AVFrame* tmpFrame = av_frame_alloc();
        tmpFrame->format = AV_PIX_FMT_YUV420P;
        tmpFrame->width = inFrame->width;
        tmpFrame->height = inFrame->height;
        av_frame_get_buffer(tmpFrame, 0);

        libyuv::YUY2ToI420(
                    inFrame->data[0], inFrame->linesize[0],
                    (tmpFrame)->data[0], (tmpFrame)->linesize[0],
                    (tmpFrame)->data[1], (tmpFrame)->linesize[1],
                    (tmpFrame)->data[2], (tmpFrame)->linesize[2],
                    inFrame->width, inFrame->height);

        //2. I420 720P → I420 1080P
        libyuv::I420Scale(
                    tmpFrame->data[0], tmpFrame->linesize[0],       // 输入 Y
                    tmpFrame->data[1], tmpFrame->linesize[1],       // 输入 U
                    tmpFrame->data[2], tmpFrame->linesize[2],       // 输入 V
                    tmpFrame->width, tmpFrame->height,              // 输入分辨率
                    (*outFrame)->data[0], (*outFrame)->linesize[0], // 输出 Y
                    (*outFrame)->data[1], (*outFrame)->linesize[1], // 输出 U
                    (*outFrame)->data[2], (*outFrame)->linesize[2], // 输出 V
                    (*outFrame)->width, (*outFrame)->height,        // 输出分辨率
                    libyuv::kFilterBilinear                         // 缩放算法
                    );

        //释放临时帧
        av_frame_free(&tmpFrame);
    }
    else
    {
        libyuv::YUY2ToI420(
                    inFrame->data[0], inFrame->linesize[0],
                    (*outFrame)->data[0], (*outFrame)->linesize[0],
                    (*outFrame)->data[1], (*outFrame)->linesize[1],
                    (*outFrame)->data[2], (*outFrame)->linesize[2],
                    inFrame->width, inFrame->height);
    }

    return 0;
}

void Device::run()
{
    //运行方式
    if(10 == m_runWay)
    {
        runWay1();
    }
    else if(11 == m_runWay)
    {
        runWay2();
    }
    else if(12 == m_runWay)
    {
        runWay3();
    }
    else if(13 == m_runWay)
    {
        runWay4();
    }
    else if(14 == m_runWay)
    {
        runWay5();
    }
}

void Device::runWay1()
{
    //开始读取设备数据
    AVFormatContext *fmt_ctx = openCameraInFFmpeg(m_param.deviceName.c_str());
    if(!fmt_ctx)
    {
        log_printf(LOG_LEV, "inputCtx is null");
        return;
    }

    //构造解码器
    AVCodecContext *decodeCtx = createDecoder(fmt_ctx);
    if(!decodeCtx)
    {
        avformat_close_input(&fmt_ctx);
        log_printf(LOG_LEV, "decodeCtx is null");
        return;
    }

    //构造编码器
    AVCodecContext *encodeCtx = createEncoder();
    if(!encodeCtx)
    {
        log_printf(LOG_LEV, "encodeCtx is null");
        return;
    }

    // 分配帧和包
    AVFrame *decodeFrame = av_frame_alloc();
    AVFrame *swsFrame = av_frame_alloc();
    AVPacket *outPkt = av_packet_alloc();

    // 设置帧参数
    swsFrame->format = encodeCtx->pix_fmt;
    swsFrame->width = encodeCtx->width;
    swsFrame->height = encodeCtx->height;
    av_frame_get_buffer(swsFrame, 0);
    av_frame_make_writable(swsFrame);

    // 初始化图像转换上下文 摄像头一般输出AV_PIX_FMT_YUYV422
    SwsContext *sws_ctx = sws_getContext(m_inputWidth, m_inputHeight, (AVPixelFormat)m_inputFormat,
                                         encodeCtx->width, encodeCtx->height, encodeCtx->pix_fmt,
                                         SWS_FAST_BILINEAR, nullptr, nullptr, nullptr);

    m_isRuning = false;

    //水印设置
    changeVideoFilter(getCurDatetime(), " ", 25, m_param.outHeight - 40);

    //开始取原始帧并编码送进媒体源
    while (m_bRun)
    {
        AVPacket input_pkt;
        int ret = av_read_frame(fmt_ctx, &input_pkt);
        if (ret < 0)
        {
            getFFmpegError(ret);
            log_printf(LOG_LEV, "av_read_frame error");
            break;
        }

        m_curIOTime = av_gettime_relative() / 1000;

        // 开始解码
//        uint64_t time1 = mk_util_get_current_millisecond();
        ret = avcodec_send_packet(decodeCtx, &input_pkt);
        if (ret < 0)
        {
            av_packet_unref(&input_pkt);
            continue;
        }

        ret = avcodec_receive_frame(decodeCtx, decodeFrame);
        if (ret < 0)
        {
            av_packet_unref(&input_pkt);
            continue;
        }
//        log_printf(LOG_LEV, "decodeFrame %d %d %d", decodeFrame->format, decodeFrame->width, decodeFrame->height);

        // 开始转换 为提升性能尤其arm平台采用libyuv
//        uint64_t time2 = mk_util_get_current_millisecond();
        if(0)
        {
            sws_scale(sws_ctx, (const uint8_t* const*)decodeFrame->data, decodeFrame->linesize, 0,
                               decodeFrame->height, swsFrame->data, swsFrame->linesize);
        }
        else
        {
            convert_and_scale_yuyv422_to_yuv420p(decodeFrame, &swsFrame);
        }
//        log_printf(LOG_LEV, "swsframe %d %d %d", swsFrame->format, swsFrame->width, swsFrame->height);

        // 开始滤镜
//        uint64_t time3 = mk_util_get_current_millisecond();
        bool filterRet = false;
        if(m_enableFilter)
        {
            //时间变化时更新 改为设置参数自动更新时间不需要判断
//            tm *cur = getCurDatetime();
//            if(cur->tm_sec != m_videoFilter->lastSec)
//            {
//                changeVideoFilter(cur, " ", 25, m_param.outHeight - 40);
//            }

            AVFrame *tmp = av_frame_clone(swsFrame);
            filterRet = m_videoFilter->FilteringFrame(tmp);
        }
//        log_printf(LOG_LEV, "filterFrame %d %d %d", m_videoFilter->filterFrame->format, m_videoFilter->filterFrame->width, m_videoFilter->filterFrame->height);

        // 开始编码
//        uint64_t time4 = mk_util_get_current_millisecond();
        if(filterRet)
        {
            ret = avcodec_send_frame(encodeCtx, m_videoFilter->filterFrame);
        }
        else
        {
            ret = avcodec_send_frame(encodeCtx, swsFrame);
        }
        if (ret< 0)
        {
            av_packet_unref(&input_pkt);
            continue;
        }

        while (avcodec_receive_packet(encodeCtx, outPkt) == 0)
        {
            //输入至媒体源
//            uint64_t time5 = mk_util_get_current_millisecond();
//            log_printf(LOG_LEV, "time=%d sws:%d filter:%d encode:%d offset=%d", time1, time2, time3, time4, time5-time1);

            mk_h264_splitter_input_data(m_splitter, (const char *)outPkt->data, outPkt->size);
            av_packet_unref(outPkt);
        }

        m_isRuning = true;

        av_frame_unref(decodeFrame);
        av_packet_unref(&input_pkt);
    }

    // 释放资源
    av_frame_free(&swsFrame);
    av_frame_free(&decodeFrame);
    av_packet_free(&outPkt);
    avcodec_free_context(&encodeCtx);
    avformat_close_input(&fmt_ctx);
    sws_freeContext(sws_ctx);

    m_bRun.store(false);
    m_isRuning = false;
    log_printf(LOG_LEV, "run stop");
}

void Device::runWay2()
{
    //打开设备
    int ret = openCameraInV4l2(m_param.deviceName.c_str());
    if(ret < 0)
    {
        log_printf(LOG_LEV, "openCamera fail!");
        return;
    }

    m_isRuning = false;

    // 开始采集
    enum v4l2_buf_type type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
    if (ioctl(m_videoFd, VIDIOC_STREAMON, &type) < 0)
    {
        log_printf(LOG_LEV, "video开始采集失败");
        m_bRun.store(false);
    }

    //持续读取图像数据 使用select监听数据
    fd_set fds;
    struct timeval tv;
    tv.tv_sec  = 1;
    tv.tv_usec = 0;

    //构造编码器
    AVCodecContext *encodeCtx = createEncoder();
    if(!encodeCtx)
    {
        log_printf(LOG_LEV, "encodeCtx is null");
        return;
    }

    AVFrame *decodeFrame = av_frame_alloc();
    decodeFrame->format = AV_PIX_FMT_YUYV422;
    decodeFrame->width = m_param.inWidth;
    decodeFrame->height = m_param.inHeight;
    av_frame_get_buffer(decodeFrame, 0);
    av_frame_make_writable(decodeFrame);

    AVFrame *swsFrame = av_frame_alloc();
    swsFrame->format = encodeCtx->pix_fmt;
    swsFrame->width = encodeCtx->width;
    swsFrame->height = encodeCtx->height;
    av_frame_get_buffer(swsFrame, 0);
    av_frame_make_writable(swsFrame);

    AVPacket *outPkt = av_packet_alloc();

    while(m_bRun)
    {
        FD_ZERO(&fds);
        FD_SET(m_videoFd, &fds);
        int ret = select(m_videoFd+1, &fds, NULL, NULL, &tv);
        if(ret < 0)
        {
            log_printf(LOG_LEV, "select io error");
            break;
        }

        struct v4l2_buffer readbuffer;
        readbuffer.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
        readbuffer.memory = V4L2_MEMORY_MMAP;
        if(ioctl(m_videoFd, VIDIOC_DQBUF, &readbuffer) < 0)
        {
            log_printf(LOG_LEV, "读取帧失败");
        }

        //帧数据处理 默认yuyv422
        const char *data = (const char *)m_userBuf[readbuffer.index];
        int length = readbuffer.length;

        //配置avframe
        memcpy(decodeFrame->data[0], data, length);
        convert_and_scale_yuyv422_to_yuv420p(decodeFrame, &swsFrame);

        avcodec_send_frame(encodeCtx, swsFrame);
        while (avcodec_receive_packet(encodeCtx, outPkt) == 0)
        {
            mk_h264_splitter_input_data(m_splitter, (const char *)outPkt->data, outPkt->size);
            av_packet_unref(outPkt);
        }

        if(ioctl(m_videoFd, VIDIOC_QBUF, &readbuffer) < 0)
        {
            log_printf(LOG_LEV, "再次入队失败");
        }

        m_isRuning = true;
    }

    // 停止采集
    if (ioctl(m_videoFd, VIDIOC_STREAMOFF, &type) < 0)
    {
        log_printf(LOG_LEV, "停止采集失败\n");
    }

    // 释放映射
    for(uint i=0; i<m_frameCount; i++)
    {
        munmap(m_userBuf[i], m_userBufSize[i]);
    }

    close(m_videoFd);
    m_bRun.store(false);
}

void Device::runWay3()
{
    //打开设备
    int ret = openCameraInV4l2(m_param.deviceName.c_str());
    if(ret < 0)
    {
        log_printf(LOG_LEV, "openCamera fail!");
        return;
    }

    m_isRuning = false;

    // 开始采集
    enum v4l2_buf_type type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
    if (ioctl(m_videoFd, VIDIOC_STREAMON, &type) < 0)
    {
        log_printf(LOG_LEV, "video开始采集失败");
        m_bRun.store(false);
    }

    //持续读取图像数据 使用select监听数据
    fd_set fds;
    struct timeval tv;
    tv.tv_sec  = 1;
    tv.tv_usec = 0;

    //构造编码器
    AVCodecContext *encodeCtx = createEncoder();
    if(!encodeCtx)
    {
        log_printf(LOG_LEV, "encodeCtx is null");
        return;
    }

    AVFrame *decodeFrame = av_frame_alloc();
    decodeFrame->format = AV_PIX_FMT_YUYV422;
    decodeFrame->width = m_param.inWidth;
    decodeFrame->height = m_param.inHeight;
    av_frame_get_buffer(decodeFrame, 0);
    av_frame_make_writable(decodeFrame);

    AVFrame *swsFrame = av_frame_alloc();
    swsFrame->format = encodeCtx->pix_fmt;
    swsFrame->width = encodeCtx->width;
    swsFrame->height = encodeCtx->height;
    av_frame_get_buffer(swsFrame, 0);
    av_frame_make_writable(swsFrame);

    AVPacket *outPkt = av_packet_alloc();

    while(m_bRun)
    {
        FD_ZERO(&fds);
        FD_SET(m_videoFd, &fds);
        int ret = select(m_videoFd+1, &fds, NULL, NULL, &tv);
        if(ret < 0)
        {
            log_printf(LOG_LEV, "select io error");
            break;
        }

        struct v4l2_buffer readbuffer;
        readbuffer.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
        readbuffer.memory = V4L2_MEMORY_MMAP;
        if(ioctl(m_videoFd, VIDIOC_DQBUF, &readbuffer) < 0)
        {
            log_printf(LOG_LEV, "读取帧失败");
        }

        //帧数据处理 默认yuyv422
        const char *data = (const char *)m_userBuf[readbuffer.index];
        int length = readbuffer.length;

        //配置avframe
        memcpy(decodeFrame->data[0], data, length);
        convert_and_scale_yuyv422_to_yuv420p(decodeFrame, &swsFrame);

        avcodec_send_frame(encodeCtx, swsFrame);
        while (avcodec_receive_packet(encodeCtx, outPkt) == 0)
        {
            mk_h264_splitter_input_data(m_splitter, (const char *)outPkt->data, outPkt->size);
            av_packet_unref(outPkt);
        }

        if(ioctl(m_videoFd, VIDIOC_QBUF, &readbuffer) < 0)
        {
            log_printf(LOG_LEV, "再次入队失败");
        }

        m_isRuning = true;
    }

    // 停止采集
    if (ioctl(m_videoFd, VIDIOC_STREAMOFF, &type) < 0)
    {
        log_printf(LOG_LEV, "停止采集失败\n");
    }

    // 释放映射
    for(uint i=0; i<m_frameCount; i++)
    {
        munmap(m_userBuf[i], m_userBufSize[i]);
    }

    close(m_videoFd);
    m_bRun.store(false);
}

void Device::runWay4()
{
    //打开设备
    int ret = openCameraInV4l2(m_param.deviceName.c_str());
    if(ret < 0)
    {
        log_printf(LOG_LEV, "openCamera fail!");
        return;
    }

    m_isRuning = false;

    // 开始采集
    enum v4l2_buf_type type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
    if (ioctl(m_videoFd, VIDIOC_STREAMON, &type) < 0)
    {
        log_printf(LOG_LEV, "video开始采集失败");
        m_bRun.store(false);
    }

    //持续读取图像数据 使用select监听数据
    fd_set fds;
    struct timeval tv;
    tv.tv_sec  = 1;
    tv.tv_usec = 0;

    //构造编码器


    AVPacket *outPkt = av_packet_alloc();

    while(m_bRun)
    {
        FD_ZERO(&fds);
        FD_SET(m_videoFd, &fds);
        int ret = select(m_videoFd+1, &fds, NULL, NULL, &tv);
        if(ret < 0)
        {
            log_printf(LOG_LEV, "select io error");
            break;
        }

        struct v4l2_buffer readbuffer;
        readbuffer.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
        readbuffer.memory = V4L2_MEMORY_MMAP;
        if(ioctl(m_videoFd, VIDIOC_DQBUF, &readbuffer) < 0)
        {
            log_printf(LOG_LEV, "读取帧失败");
        }

        //帧数据处理 默认yuyv422
        const char *data = (const char *)m_userBuf[readbuffer.index];
        int length = readbuffer.length;

        //rga处理

        //mpp处理

        //输入编码帧
        mk_h264_splitter_input_data(m_splitter, (const char *)outPkt->data, outPkt->size);

        if(ioctl(m_videoFd, VIDIOC_QBUF, &readbuffer) < 0)
        {
            log_printf(LOG_LEV, "再次入队失败");
        }

        m_isRuning = true;
    }

    // 停止采集
    if (ioctl(m_videoFd, VIDIOC_STREAMOFF, &type) < 0)
    {
        log_printf(LOG_LEV, "停止采集失败\n");
    }

    // 释放映射
    for(uint i=0; i<m_frameCount; i++)
    {
        munmap(m_userBuf[i], m_userBufSize[i]);
    }

    close(m_videoFd);
    m_bRun.store(false);
}

void Device::runWay5()
{

}

bool Device::changeVideoFilter(tm *cur, const string &filterText, int fontLeft, int fontTop, int fontSize, char *fontColor, float fontAlpha)
{
    if (m_videoFilter)
    {
        delete m_videoFilter;
        m_videoFilter = nullptr;
    }

    m_videoFilter = new CFFVideoFilter();
    m_videoFilter->waterMarkText = filterText;
    m_videoFilter->nFilterFontSize = fontSize;
    strncpy(m_videoFilter->nFilterFontColor, fontColor, 32);
    m_videoFilter->nFilterFontAlpha = fontAlpha;
    m_videoFilter->nFilterFontLeft = fontLeft;
    m_videoFilter->nFilterFontTop = fontTop;

    m_videoFilter->lastYear = cur->tm_year + 1900;
    m_videoFilter->lastMonth = cur->tm_mon + 1;
    m_videoFilter->lastDay = cur->tm_mday;
    m_videoFilter->lastHour = cur->tm_hour;
    m_videoFilter->lastMinute = cur->tm_min;
    m_videoFilter->lastSec = cur->tm_sec;
    m_videoFilter->StartFilter(m_param.pixFmt, m_param.outWidth, m_param.outHeight, m_param.outFPS, m_videoFilter->nFilterFontSize, m_videoFilter->nFilterFontColor, m_videoFilter->nFilterFontAlpha, m_videoFilter->nFilterFontLeft, m_videoFilter->nFilterFontTop);
    return true;
}


//Add by ZXT
CFFVideoFilter::CFFVideoFilter()
{

}

CFFVideoFilter::~CFFVideoFilter()
{
    StopFilter();
}

bool CFFVideoFilter::StartFilter(AVPixelFormat nAVPixel, int nWidth, int nHeight, int nFrameRate,int nFontSize,char* FontColor,float FontAlpha,int FontLeft,int FontTop)
{
    if(bRunFlag)
        return true;

    m_nWidth  = nWidth;
    m_nHeight = nHeight;

    //文本水印 转义时间冒号 fontfile=./simhei.ttf:
    char szFilter[512] = {0};
//    sprintf(szFilter, "drawtext=fontsize=%d:fontcolor=%s:alpha=%.1f:x=%d:y=%d:text='%d-%02d-%02d %02d\\:%02d\\:%02d'",
//            nFontSize, FontColor, FontAlpha, FontLeft, FontTop, lastYear, lastMonth, lastDay, lastHour, lastMinute, lastSec);
    sprintf(szFilter, "drawtext=fontsize=%d:fontcolor=%s:alpha=%.1f:x=%d:y=%d:text='%%{localtime\\:%%Y-%%m-%%d %%T}'",
            nFontSize, FontColor, FontAlpha, FontLeft, FontTop);

    std::string filters_descr = szFilter;
    log_printf(LOG_LEV, "StartFilter1 filter:%s", filters_descr.c_str());

    //图片水印
    //std::string filters_descr = "movie=./watermark/logo.png[watermark];[in][watermark]overlay=10:10[out]";

    enum AVPixelFormat pix_fmts[] = {nAVPixel, AV_PIX_FMT_NONE};
    const AVFilter *buffersrc = avfilter_get_by_name("buffer");
    const AVFilter *buffersink = avfilter_get_by_name("buffersink");

    filterOutputs = avfilter_inout_alloc();
    filterInputs = avfilter_inout_alloc();
    filter_graph = avfilter_graph_alloc();
    if (!filterOutputs || !filterInputs || !filter_graph)
    {
        fprintf(stderr, "avfilter inout/graph alloc error\n");
        return false;
    }

    /* buffer video source: the decoded frames from the decoder will be inserted here. */
    char args[512];
    sprintf(args, "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d", nWidth, nHeight, nAVPixel, 1, nFrameRate, 1, 1);
    log_printf(LOG_LEV, "StartFilter2 args:%s", args);

    int ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in", args, NULL, filter_graph);
    if (ret < 0)
    {
        fprintf(stderr, "avfilter_graph_create_filter error\n");
        return false;
    }

    /* buffer video sink: to terminate the filter chain. */
    ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out", NULL, NULL, filter_graph);
    if (ret < 0)
    {
        fprintf(stderr, "avfilter_graph_create_filter error\n");
        return false;
    }

    ret = av_opt_set_int_list(buffersink_ctx, "pix_fmts", pix_fmts, AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN);
    if (ret < 0)
    {
        fprintf(stderr, "av_opt_set_int_list error\n");
        return false;
    }

    /* Endpoints for the filter graph. */
    filterOutputs->name = av_strdup("in");
    filterOutputs->filter_ctx = buffersrc_ctx;
    filterOutputs->pad_idx = 0;
    filterOutputs->next = NULL;

    filterInputs->name = av_strdup("out");
    filterInputs->filter_ctx = buffersink_ctx;
    filterInputs->pad_idx = 0;
    filterInputs->next = NULL;

    ret = avfilter_graph_parse_ptr(filter_graph, filters_descr.c_str(), &filterInputs, &filterOutputs, NULL);
    if (ret < 0)
    {
        fprintf(stderr, "avfilter_graph_parse_ptr error");
        return false;
    }

    ret = avfilter_graph_config(filter_graph, NULL);
    if (ret < 0)
    {
        fprintf(stderr, "avfilter_graph_config error\n");
        return false;
    }

    filterFrame = av_frame_alloc();

    bRunFlag = true;
    return true;
}

bool CFFVideoFilter::StopFilter()
{
    if(bRunFlag)
    {
        if (filterInputs != NULL)
        {
            avfilter_inout_free(&filterInputs);
            filterInputs = NULL;
        }
        if (filterOutputs != NULL)
        {
            avfilter_inout_free(&filterOutputs);
            filterInputs = NULL;
        }
        if (filter_graph != NULL)
        {
            avfilter_graph_free(&filter_graph);
            filter_graph = NULL;
        }
        if (filterFrame != NULL)
        {
            av_frame_free(&filterFrame);
            filterFrame = NULL;
        }
        bRunFlag = false ;
        return true;
    }
    return false;
}

bool CFFVideoFilter::FilteringFrame(AVFrame * srcFrame)
{
    if(!bRunFlag)
        return false;

    //释放已存在的数据
    av_frame_unref(filterFrame);

    //解码后的frame添加至filtergraph
    int ret = av_buffersrc_add_frame(buffersrc_ctx, srcFrame);
    if (ret < 0)
    {
        fprintf(stderr, "Error while feeding the filtergraph\n");
        getFFmpegError(ret);
        return false;
    }

    //filtergraph中取出处理好的frame
    ret = av_buffersink_get_frame(buffersink_ctx, filterFrame);
    if (ret < 0)
    {
        fprintf(stderr, "av_buffersink_get_frame error\n");
        getFFmpegError(ret);
        return false;
    }

    return true;
}

bool CFFVideoFilter::CopyYUVData(unsigned char* pOutYUVData)
{
    if(!bRunFlag || filterFrame == NULL )
        return false ;

    if(filterFrame->data[0] == NULL || filterFrame->data[1] == NULL || filterFrame->data[2] == NULL)
        return false ;

      nYUVPos = 0;
      nYPos = 0;
      nUPos = 0;
      nVPos = 0;

      for (i = 0; i<m_nHeight; i++)
      {
          memcpy(pOutYUVData + nYUVPos, filterFrame->data[0] + nYPos, m_nWidth);
          nYPos += filterFrame->linesize[0];
          nYUVPos += m_nWidth;
      }

      // fill U data
      for (i = 0; i<m_nHeight / 2; i++)
      {
          memcpy(pOutYUVData + nYUVPos, filterFrame->data[1] + nUPos, m_nWidth / 2);
          nUPos += filterFrame->linesize[1];
          nYUVPos += m_nWidth / 2;
      }

      // fill V data
      for (i = 0; i<m_nHeight / 2; i++)
      {
          memcpy(pOutYUVData + nYUVPos, filterFrame->data[2] + nVPos, m_nWidth / 2);
          nVPos += filterFrame->linesize[2];
          nYUVPos += m_nWidth / 2;
      }
    return true;
}
