#include "decode_detect_video.h"
#include "decode_video.h"
#include "mock_detect.h"
#include "serialize.hpp"
#include "message_handler.h"
#include "loghelper.h"
#include "call_async.hpp"
#include "utility.hpp"
#include "messagebus.h"

#include <fstream>
#include <sstream>
#include <thread>
#include <boost/circular_buffer.hpp>
#include <atomic>

using namespace std;
using namespace RockLog;

// 队列所包含项
struct QueueItem_t
{
    uint32_t index = 0;
    cv::Mat mat;
    AVPacket pkt;
};

// ringBuffer所包含项
struct RingBufItem_t
{
    uint32_t index = 0;
    AVPacket pkt;
};

moodycamel::BlockingConcurrentQueue<QueueItem_t> s_cvMatQueue;  // TODO使用std::shared_ptr

static std::atomic_bool s_isStopWriteBuf(false); // 是否停止写ringbuffer
static std::atomic_bool s_isStartRecord(false);  // 是否已经准备写视频内容

static std::atomic_bool s_isStopDetectFrame(false);  // 是否停止检测线程
static bool s_isStopDecode = false;  // 是否停止解码线程

static boost::circular_buffer<RingBufItem_t> s_cbAVPacket(600);

DecodeDetectVideo::DecodeDetectVideo()
{
}

void DecodeDetectVideo::init(const std::string &workerId)
{
    m_workerId = workerId;
    // bind to messagebus
    g_messagebus.attach<CameraInfo_t>(kRecvCameraInfo, &DecodeDetectVideo::start, *this);
}

void DecodeDetectVideo::start(CameraInfo_t t)
{
    if (!m_started)
    {
        m_cameraInfo = t;

        // 使用opencv自带的模型检测
        if (0 != initDetect(t.alg_param_file))
        {
            LOG(kInfo) << "--(!)Error loading face cascade\n";
            return;
        }

        startDetectFrame();
        startVideoDecode();
        m_started = true;
    }
    else
    {
        restart(t);
    }
}

void DecodeDetectVideo::restart(CameraInfo_t t)
{
    m_started = false;
    s_isStopDetectFrame = true;
    s_isStopDecode = true;
    while (s_isStopDetectFrame || s_isStopDecode)
    {
        Utility::usleep(1000);
    }
    start(t);
}

std::string DecodeDetectVideo::serializeTrajectoryInfo(const std::string &video_name)
{
    m_info.video_name = video_name;
    m_info.video_width = m_cameraInfo.main_pixel_w;
    m_info.video_height = m_cameraInfo.main_pixel_h;
    m_info.video_framerate = m_cameraInfo.main_frame_rate;
    LOG(kInfo) << "serializeTrajectoryInfo video:" << m_info.video_name;

    std::stringstream ss;
    serialize(ss, m_info.video_name);
    serialize(ss, m_info.video_width);
    serialize(ss, m_info.video_height);
    serialize(ss, m_info.video_framerate);
    for (auto &track : m_info.tracks)
        serialize(ss, track);

    // LOG(kInfo)<< "m_info.tracks.size(): " << m_info.tracks.size();
    return ss.str();
}

void DecodeDetectVideo::publishVideoAndTracks()
{
    s_isStopWriteBuf = true;

    bool firstKeyFrame = false;
    std::stringstream oss;
    auto writeToVideo = [&](AVPacket &newPacket)
    {
        if (!firstKeyFrame)
        {
            if (newPacket.flags & AV_PKT_FLAG_KEY)
                firstKeyFrame = true;
            else
                return;
        }

        oss.write((char *)newPacket.data, newPacket.size);
        av_packet_unref(&newPacket);
    };
    int count = 0;
    auto it = std::find_if(s_cbAVPacket.begin(), s_cbAVPacket.end(), [this](const RingBufItem_t &t) { return t.index == this->m_info.tracks[0].f; });
    if (it != s_cbAVPacket.end())
    {
        LOG(kInfo) << " find! ";
        m_info.tracks[0].f = 0;
        int k = 1;
        for (; it != s_cbAVPacket.end(); it++)
        {
            if (it->index == m_info.tracks[k].f)
            {
                m_info.tracks[k].f = count;
                LOG(kInfo) << " m_info.tracks[k].f " << k << " " << m_info.tracks[k].f;
                k++;
            }
            writeToVideo(it->pkt);
            count++;
            // 记录帧数为kRecordVideoLenth*视频帧率
            if (count == kRecordVideoLenth * m_cameraInfo.main_frame_rate)
                break;
        }
    }

    s_cbAVPacket.clear();
    s_isStopWriteBuf = false;

    if (count < 2 * m_cameraInfo.main_frame_rate)
        return; // 少于一定帧数则放弃记录

    std::string outputFile = std::string("output_") + m_workerId + "_" + Utility::getTimeString() + ".h264";
    std::string tracksStr = serializeTrajectoryInfo(outputFile);
    MessageHandler::instance().publishVideoAndTracks(tracksStr, outputFile, oss.str());
    LOG(kInfo) << "[publishVideoAndTracks] tracksStr size: " << tracksStr.size() << ", outputFile:" << outputFile;

#ifdef WORKER_DEBUG
    //// for test //////
    std::ofstream f(outputFile, std::ofstream::binary);
    f << oss.str();
    f.close();
    //// for test //////
#endif

    TrajectoryInfo_t tmp; // reset m_info
    std::swap(this->m_info, tmp);
    oss.clear();
    s_isStartRecord = false;
}

void DecodeDetectVideo::startDetectFrame()
{
    LOG(kInfo) << "[startDetectFrame]";
    std::thread([this]()
        {
            uint32_t count = 0;
            while (!s_isStopDetectFrame)
            {
                QueueItem_t frame;
                if (!s_cvMatQueue.wait_dequeue_timed(frame, 100 * 1000))
                {
                    if (s_isStopDetectFrame)
                        break;
                    else
                        continue;
                }
                // LOG(kInfo)<< "[wait_dequeue]";

                if (!s_isStopWriteBuf)
                {
                    if (s_cbAVPacket.full())
                        av_packet_unref(&s_cbAVPacket.front().pkt);
                    s_cbAVPacket.push_back({ frame.index, frame.pkt });
                }

                Track_t track;
                int detected = detect(frame.mat, track.rect);
                if (detected > 0) // 开始检测到
                {
                    if (!s_isStartRecord)
                    {
                        LOG(kInfo) << "[DecodeDetectVideo] detected face, start record!";
                        s_isStartRecord = true;

                    }
                    if (!s_isStopWriteBuf) // 与ringbuffer内容保持一致
                    {
                        track.f = frame.index;
                        m_info.tracks.push_back(track);
                    }
                }
                if (s_isStartRecord)
                {
                    if (count == kRecordVideoLenth * this->m_cameraInfo.main_frame_rate)
                    {
                        delay_call(500, true, &DecodeDetectVideo::publishVideoAndTracks, this); // 过0.5秒后发送
                        count = 0;
                    }
                    else
                    {
                        count++;
                    }
                }
            }
            s_isStopDetectFrame = false;    // 退出重置状态
        }).detach();
}
#include <opencv2/imgproc.hpp>
extern "C"
{
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libavutil/hwcontext.h>
#include <libavutil/imgutils.h>
#include <libswscale/swscale.h>
#include <libavutil/opt.h>
}

void avframeToMat(const AVFrame * frame, cv::Mat& image)
{
    int width = frame->width;
    int height = frame->height;

    // Allocate the opencv mat and store its stride in a 1-element array
    if (image.rows != height || image.cols != width || image.type() != CV_8UC3) image = cv::Mat(height, width, CV_8UC3);
    int cvLinesizes[1];
    cvLinesizes[0] = image.step1();

    // Convert the colour format and write directly to the opencv matrix
    SwsContext* conversion = sws_getContext(width, height, (AVPixelFormat)frame->format, width, height, AVPixelFormat::AV_PIX_FMT_BGR24, SWS_FAST_BILINEAR, NULL, NULL, NULL);
    sws_scale(conversion, frame->data, frame->linesize, 0, height, &image.data, cvLinesizes);
    sws_freeContext(conversion);

}
// decode callback
static void callback(AVFrame *src_image, AVPacket *packet, int decode_type)
{
    if (s_isStopWriteBuf) // 假设事件不是频繁产生的，上一个事件在写发送时，暂停记录帧
        return;
    cv::Mat frame_bgr;
    avframeToMat(src_image, frame_bgr);

    AVPacket newPacket;
    av_packet_ref(&newPacket, packet);
    s_cvMatQueue.enqueue({ (uint32_t)Utility::getTimeStamp(), frame_bgr.clone(), newPacket });
}


void DecodeDetectVideo::startVideoDecode()
{
    std::thread([this]()
        {
            ffmpeg_video_decode(this->m_cameraInfo.main_stream, callback, this->m_decode_type, this->m_only_key_frame, s_isStopDecode, false);
            s_isStopDecode = false;
            LOG(kInfo) << "[ffmpeg_video_decode] finished! ";
        }).detach();
}
