#include "streamer.h"
#include <iostream>
#include <chrono>
#include <vector>
#include <cstring>
#include <algorithm>

// OpenCV and FFmpeg swscale
#include <opencv2/opencv.hpp>

#include "otl_string.h"
#include "stream_sei.h"
#include "utils.h"

extern "C" {
#include <libavutil/rational.h>
#include <libavutil/avutil.h>
#include <libswscale/swscale.h>
}

// Simple overlay utilities for YUV420P frames
static inline int clampi(int v, int lo, int hi) { return v < lo ? lo : (v > hi ? hi : v); }
static void draw_rect_y(uint8_t* y, int w, int h, int linesize, int x1, int y1, int x2, int y2, int thickness, uint8_t yval)
{
    x1 = clampi(x1, 0, w - 1); x2 = clampi(x2, 0, w - 1);
    y1 = clampi(y1, 0, h - 1); y2 = clampi(y2, 0, h - 1);
    if (x1 > x2) std::swap(x1, x2);
    if (y1 > y2) std::swap(y1, y2);
    for (int t = 0; t < thickness; ++t) {
        int yt1 = clampi(y1 + t, 0, h - 1);
        int yt2 = clampi(y2 - t, 0, h - 1);
        // top and bottom
        memset(y + yt1 * linesize + x1, yval, x2 - x1 + 1);
        memset(y + yt2 * linesize + x1, yval, x2 - x1 + 1);
        // left and right
        for (int yy = y1; yy <= y2; ++yy) {
            int yyy = clampi(yy, 0, h - 1);
            if (x1 + t < w) y[yyy * linesize + x1 + t] = yval;
            if (x2 - t >= 0) y[yyy * linesize + x2 - t] = yval;
        }
    }
}



Streamer::Streamer(DeviceManagerPtr ptr) {
    m_fpsStat = enrigin::StatTool::create(5);
    m_detectorManager = ptr;
}

Streamer::~Streamer() {
    stop();
}


bool Streamer::init(const Config& config) {
    m_config = config;
    m_decoder = std::make_unique<enrigin::StreamDecoder>(config.decodeId);
    m_decoder->setObserver(this);


    if (m_config.detectEnabled) {
        m_detector = m_detectorManager->getDetector(config.devId, config.modelPath);
        m_inferPipe = m_detectorManager->getInferPipe(config.devId, config.modelPath);
    }

    if (m_config.encodeEnabled) {
        std::string codecName = "h264";
        m_encoder = enrigin::CreateStreamEncoder(codecName);
        enrigin::EncodeParam p;
        p.codecName = codecName;
        p.width = 1280; p.height = 720;
        p.timeBase = {1, 90000};
        p.frameRate = {30, 1};
        p.pixFmt = AV_PIX_FMT_YUV420P;
        p.gopSize = 60;
        p.maxBFrames = 0;
        p.bitRate = 3'000'000;
        p.preferHardware = true;
        m_encoder->init(&p);
        // set encoder timing for PTS generation
        m_encTimeBase = m_encoder->getTimeBase();
        m_encFrameRate = p.frameRate;
        m_nextPts = 0;
    }

    return true;
}

bool Streamer::start() {
    if (m_running) {
        return false;
    }

    // If we will encode, prepare the output sink early with explicit codec parameters from encoder
    if (m_config.encodeEnabled && m_output == nullptr && m_encoder) {
        const AVCodecParameters* cpar = m_encoder->getCodecParameters();
        if (cpar) {
            AVRational encTb = m_encoder->getTimeBase();
            m_output = std::make_unique<enrigin::FfmpegOutputer>();
            m_output->openOutputStreamWithCodec(m_config.outputUrl, cpar, encTb);
        }
    }

    AVDictionary *opts=NULL;
    //av_dict_set(&opts, "vf", "scale=640:640:force_original_aspect_ratio=decrease,pad=640:640:(ow-iw)/2:(oh-ih)/2,format=rgb24", 0);
    if (m_config.ppset_enabled)
    {
        av_dict_set(&opts, "pp_set", m_config.pp_str.c_str(), 0);
        av_dict_set(&opts, "pix_fmt", "bgr24", 0);
    }

    av_dict_set_int(&opts, "buffer_type", m_config.m_buffer_type, 0);

    if (m_decoder->openStream(m_config.inputUrl, true,  opts) != 0) {
        std::cout << "OpenStream " << m_config.inputUrl << " failed!" << std::endl;
        return false;
    }
    av_dict_free(&opts);

    //---- stream operations -----//
    m_decoder->setAvformatOpenedCallback([this](const AVFormatContext* ifmtCtx)
    {
        // For pass-through (no re-encode), derive output stream from input format
        if (!m_config.encodeEnabled) {
            if (m_output == nullptr) {
                m_output = std::make_unique<enrigin::FfmpegOutputer>();
                m_output->openOutputStream(m_config.outputUrl, ifmtCtx);
            }
        }
    });

    m_decoder->setAvformatClosedCallback([this]()
    {
        if (m_output != nullptr) {
            m_output->closeOutputStream();
        }
    });


    //------- Delegate callback --------//
    if (m_config.detectEnabled) {
        m_detector->set_detected_callback([this](FrameInfo& frameInfo)
        {
            if (m_config.ppset_enabled)
            {
                for (enrigin::Bbox& b : frameInfo.detection.bboxes()) {
                    b.x1 = b.x1 / m_config.pp_scale;
                    b.y1 = b.y1 / m_config.pp_scale;
                    b.x2 = b.x2 / m_config.pp_scale;
                    b.y2 = b.y2 / m_config.pp_scale;
                }
            }

            if (m_config.tracker_enabled) {
                std::vector<Object> objects;
                // Convert frameInfo.detection -> tracker objects (handle normalized coords)
                if (frameInfo.frame) {
                    int W = frameInfo.frame->width;
                    int H = frameInfo.frame->height;
                    for (const enrigin::Bbox& b : frameInfo.detection.bboxes()) {
                        float w = std::max(0.0f, b.x2 - b.x1);
                        float h = std::max(0.0f, b.y2 - b.y1);
                        if (w <= 0.0f || h <= 0.0f) continue;

                        Object o;
                        o.rect = cv::Rect_<float>(b.x1, b.y1, w, h);
                        o.label = b.classId;
                        o.prob = b.confidence;
                        objects.push_back(o);
                    }
                }
                if (objects.size() > 0) {
                    auto tracks = m_tracker.update(objects);

                    // Update back to frameInfo.detection using tracked boxes (tlbr in pixels)
                    frameInfo.detection.clear();
                    for (const auto& t : tracks) {
                        if (t.tlbr.size() >= 4) {
                            enrigin::Bbox nb;
                            nb.x1 = t.tlbr[0];
                            nb.y1 = t.tlbr[1];
                            nb.x2 = t.tlbr[2];
                            nb.y2 = t.tlbr[3];
                            nb.confidence = t.score; // use track score
                            nb.classId = t.class_id;  // store track id for downstream usage
                            nb.trackId = t.track_id;
                            frameInfo.detection.push_back(nb);
                        }
                    }
                }
            }

            if (frameInfo.streamer)
            {
                if (frameInfo.streamer->m_config.encodeEnabled) {
                    // 1) Overlay detection bboxes on Y plane (YUV420P/YUVJ420P) before encoding
                    if (frameInfo.frame &&
                        (frameInfo.frame->format == AV_PIX_FMT_YUV420P || frameInfo.frame->format == AV_PIX_FMT_YUVJ420P) &&
                        !frameInfo.detection.bboxes().empty()) {
                        uint8_t* y = frameInfo.frame->data[0];
                        int ls = frameInfo.frame->linesize[0];
                        int W = frameInfo.frame->width;
                        int H = frameInfo.frame->height;
                        for (const enrigin::Bbox& b : frameInfo.detection.bboxes()) {
                            // If coords look normalized (<=1), scale to pixels
                            auto norm = (b.x2 <= 1.0f && b.y2 <= 1.0f);
                            int x1 = norm ? (int)(b.x1 * W) : (int)b.x1;
                            int y1 = norm ? (int)(b.y1 * H) : (int)b.y1;
                            int x2 = norm ? (int)(b.x2 * W) : (int)b.x2;
                            int y2 = norm ? (int)(b.y2 * H) : (int)b.y2;
                            draw_rect_y(y, W, H, ls, x1, y1, x2, y2, 2, 235);
                        }
                    }

                    // 2) Assign monotonically increasing PTS in encoder time base and encode
                    if (frameInfo.frame) {
                        int64_t step = av_rescale_q(1, av_inv_q(frameInfo.streamer->m_encFrameRate), frameInfo.streamer->m_encTimeBase);
                        if (step <= 0) step = 1;
                        if (frameInfo.frame->pts == AV_NOPTS_VALUE || frameInfo.frame->pts < frameInfo.streamer->m_nextPts) {
                            frameInfo.frame->pts = frameInfo.streamer->m_nextPts;
                        }
                        frameInfo.streamer->m_nextPts = frameInfo.frame->pts + step;
                    }
                    // Encode frame to packets via vector API
                    std::vector<AVPacket*> pkts;
                    auto ret = frameInfo.streamer->m_encoder->encode(frameInfo.frame, pkts);
                    if (ret == 0) {
                        if (!frameInfo.streamer->m_output) {
                            const AVCodecParameters* cpar = frameInfo.streamer->m_encoder->getCodecParameters();
                            if (cpar) {
                                AVRational encTb = frameInfo.streamer->m_encoder->getTimeBase();
                                frameInfo.streamer->m_output = std::make_unique<enrigin::FfmpegOutputer>();
                                frameInfo.streamer->m_output->openOutputStreamWithCodec(frameInfo.streamer->m_config.outputUrl, cpar, encTb);
                            }
                        }
                        for (AVPacket* out : pkts) {
                            if (!out) continue;
                            if (out->stream_index < 0) out->stream_index = 0; // single-stream
                            frameInfo.streamer->m_output->inputPacket(out);
                            frameInfo.streamer->m_encoder->freePacket(out);
                        }
                    }
                    // When encoding path is used, we skip SEI injection (bbox already meant to be fused into image)
                    return;
                }

                // Non-encoding path: build and cache SEI NAL for later merge in onDecodedAVFrame
                std::lock_guard<std::mutex> lk(frameInfo.streamer->m_seiMutex);
                frameInfo.streamer->m_latestSeiNal.clear();
                if (frameInfo.detection.size() > 0) {
                    auto detect_bbuf = frameInfo.detection.toByteBuffer();
                    auto base64_str = enrigin::base64Enc(detect_bbuf->data(), detect_bbuf->size());
                    AVCodecID codec_id = frameInfo.streamer->get_video_codec_id();
                    bool isAnnexb = !frameInfo.streamer->preferAVCC();
                    frameInfo.streamer->m_cachedCodec = codec_id;
                    frameInfo.streamer->m_cachedIsAnnexb = isAnnexb;

                    if (codec_id == AV_CODEC_ID_H264) {
                        auto packet_size = enrigin::h264SeiCalcPacketSize((uint32_t)base64_str.length(), isAnnexb, 4);
                        frameInfo.streamer->m_latestSeiNal.resize(packet_size);
                        int real_size = enrigin::h264SeiPacketWrite(frameInfo.streamer->m_latestSeiNal.data(), isAnnexb,
                                                                (uint8_t*)base64_str.data(), (uint32_t)base64_str.length());
                        frameInfo.streamer->m_latestSeiNal.resize(real_size);
                    } else if (codec_id == AV_CODEC_ID_H265) {
                        auto base_size = enrigin::h264SeiCalcPacketSize((uint32_t)base64_str.length(), isAnnexb, 4);
                        frameInfo.streamer->m_latestSeiNal.resize(base_size + 16);
                        int real_size = enrigin::h265SeiPacketWrite(frameInfo.streamer->m_latestSeiNal.data(), isAnnexb,
                                                                (uint8_t*)base64_str.data(), (uint32_t)base64_str.length());
                        frameInfo.streamer->m_latestSeiNal.resize(real_size);
                    } else {
                        // Unknown codec: leave cache empty
                        frameInfo.streamer->m_latestSeiNal.clear();
                    }
                }
            }
        });
    }

    m_running = true;
    // Launch flush thread if using output delay buffering (non-encoding path only)
    if (m_config.outputDelayMs > 0) {
        m_flushThread = std::thread(&Streamer::flushLoop, this);
    }
    return true;
}

void Streamer::stop() {
    m_running = false;
    m_decoder->closeStream();
    // Wake and join flush thread
    if (m_flushThread.joinable()) {
        {
            std::lock_guard<std::mutex> lk(m_queueMutex);
        }
        m_queueCv.notify_all();
        m_flushThread.join();
    }
    // Drain and free any pending packets without sending
    {
        std::lock_guard<std::mutex> lk(m_queueMutex);
        while (!m_pending.empty()) {
            AVPacket* p = m_pending.front().pkt;
            if (p) {
                av_packet_unref(p);
                av_packet_free(&p);
            }
            m_pending.pop_front();
        }
    }
}

Streamer::Stats Streamer::getStats() {
    std::lock_guard<std::mutex> lock(m_mutex);
    m_stats.fps = m_fpsStat->getSpeed();
    return m_stats;
}

enrigin::PipeStatus Streamer::getPipeStatus()
{
    enrigin::PipeStatus stat;
    m_inferPipe->statis(&stat);
    return stat;
}

void Streamer::onDecodedAVFrame(const AVPacket* pkt, const AVFrame* pFrame) {
    //1. statistic
    m_fpsStat->update();

    if (m_config.detectEnabled) {
        if (pFrame->width == 0 || pFrame->height == 0)
        {
            std::cout << "ERROR: width=height=0\n" << std::endl;
        }
        // 2. Optionally post frame to detector queue (sub-sampling by detectStride)
        uint64_t idx = ++m_frameCounter;
        if (m_inferPipe && (m_config.detectStride <= 1 || (idx % (uint64_t)std::max(1, m_config.detectStride) == 0))) {
            FrameInfo frame;
            frame.pkt = av_packet_alloc();
            av_packet_copy_props(frame.pkt, pkt);
            av_packet_ref(frame.pkt, pkt);

            frame.frame = av_frame_alloc();
            av_frame_ref(frame.frame, pFrame);

            frame.streamer = get_shared_ptr();
            frame.width = pFrame->width;
            frame.height = pFrame->height;

            m_inferPipe->push_frame(&frame);
        }

        // 3. Output path
        if (m_output) {
            if (m_config.outputDelayMs > 0) {
                // Enqueue packet for delayed flush
                AVPacket* copy = av_packet_alloc();
                av_packet_copy_props(copy, pkt);
                av_packet_ref(copy, pkt);
                PendingPacket node;
                node.pkt = copy;
                node.ts = std::chrono::steady_clock::now();
                {
                    std::lock_guard<std::mutex> qlk(m_queueMutex);
                    m_pending.push_back(node);
                }
                m_queueCv.notify_one();
            } else {
                // Immediate: merge latest cached SEI if present
                std::vector<uint8_t> seiCopy;
                {
                    std::lock_guard<std::mutex> lk(m_seiMutex);
                    seiCopy = m_latestSeiNal; // copy to avoid holding lock during IO
                }
                if (!seiCopy.empty()) {
                    AVPacket *merged = av_packet_alloc();
                    av_packet_copy_props(merged, pkt);
                    merged->stream_index = pkt->stream_index;
                    int merged_size = (int)seiCopy.size() + pkt->size;
                    AVBufferRef *merged_buf = av_buffer_alloc(merged_size);
                    memcpy(merged_buf->data, seiCopy.data(), seiCopy.size());
                    memcpy(merged_buf->data + seiCopy.size(), pkt->data, pkt->size);
                    merged->data = merged_buf->data;
                    merged->size = merged_size;
                    merged->buf = merged_buf;

                    m_output->inputPacket(merged);
                    av_packet_unref(merged);
                    av_packet_free(&merged);
                } else {
                    m_output->inputPacket(pkt);
                }
            }
        }
    } else {
        // 3. Detection disabled: directly output
        if (m_output)
            m_output->inputPacket(pkt);
    }
}

void Streamer::flushLoop() {
    const auto delay = std::chrono::milliseconds(std::max(0, m_config.outputDelayMs));
    while (m_running) {
        PendingPacket item;
        bool hasItem = false;
        {
            std::unique_lock<std::mutex> ulk(m_queueMutex);
            if (m_pending.empty()) {
                m_queueCv.wait_for(ulk, std::chrono::milliseconds(10));
            }
            if (!m_pending.empty()) {
                auto now = std::chrono::steady_clock::now();
                auto due = m_pending.front().ts + delay;
                if (now >= due) {
                    item = m_pending.front();
                    m_pending.pop_front();
                    hasItem = true;
                } else {
                    // sleep until due or wake by new item/stop
                    auto waitDur = due - now;
                    m_queueCv.wait_for(ulk, waitDur);
                }
            }
        }

        if (!hasItem) continue;

        // Merge latest cached SEI (if any) and send
        if (m_output && item.pkt) {
            std::vector<uint8_t> seiCopy;
            {
                std::lock_guard<std::mutex> lk(m_seiMutex);
                seiCopy = m_latestSeiNal;
            }
            if (!seiCopy.empty()) {
                AVPacket *merged = av_packet_alloc();
                av_packet_copy_props(merged, item.pkt);
                merged->stream_index = item.pkt->stream_index;
                int merged_size = (int)seiCopy.size() + item.pkt->size;
                AVBufferRef *merged_buf = av_buffer_alloc(merged_size);
                memcpy(merged_buf->data, seiCopy.data(), seiCopy.size());
                memcpy(merged_buf->data + seiCopy.size(), item.pkt->data, item.pkt->size);
                merged->data = merged_buf->data;
                merged->size = merged_size;
                merged->buf = merged_buf;

                m_output->inputPacket(merged);
                av_packet_unref(merged);
                av_packet_free(&merged);
            } else {
                m_output->inputPacket(item.pkt);
            }
        }

        if (item.pkt) {
            av_packet_unref(item.pkt);
            av_packet_free(&item.pkt);
        }
    }
}
