#include "stream_encoder.h"
#include <libswscale/swscale.h>

#include <chrono>
#include <vector>
#include <unistd.h>

namespace enrigin {

static const char* TAG = "StreamEncoder";

class FfmpegStreamEncoder final : public StreamEncoder {
public:
    FfmpegStreamEncoder(const std::string& codecName)
        : mCodecName(codecName) {
        mStart = std::chrono::steady_clock::now();
        mFrameCount.store(0);
        mForceIdr.store(true);
    }

    ~FfmpegStreamEncoder() override {
        close();
    }

    int init(EncodeParam* params) override {
        if (!params) return AVERROR(EINVAL);
        mParams = *params;

        if (mParams.hwAccel == "vsv") // VSV HWAccel
        {
            //1. first check /opt/rivs/lib/lib_vcd.so
            char* envVcdSo = getenv("VCE_SHARED_LIB");
            if (envVcdSo == nullptr) {
                std::string vce_lib_dir = "/opt/rivs/lib//libh2enc.so";
                if (access(vce_lib_dir.c_str(), F_OK) == 0) {
                    setenv("VCE_SHARED_LIB", vce_lib_dir.c_str(), 1);
                }else {
                    std::string homedir= getenv("HOME");
                    vce_lib_dir = homedir + "/enrigin_sdk/opt/rivs/lib/libh2enc.so";
                    setenv("VCE_SHARED_LIB", vce_lib_dir.c_str(), 1);
                }
            }
        }

        const AVCodec* codec = resolveCodec(mParams.codecName);
        if (!codec) {
            OTL_LOGE(TAG, "codec not found for name=%s", mParams.codecName.c_str());
            return AVERROR_ENCODER_NOT_FOUND;
        }

        mCtx = avcodec_alloc_context3(codec);
        if (!mCtx) return AVERROR(ENOMEM);

        mCtx->codec_type = AVMEDIA_TYPE_VIDEO;
        mCtx->codec_id   = codec->id;
        mCtx->width      = mParams.width;
        mCtx->height     = mParams.height;
        mCtx->time_base  = mParams.timeBase.num > 0 ? mParams.timeBase : (AVRational{1, 90000});
        if (mParams.frameRate.num > 0 && mParams.frameRate.den > 0) {
            mCtx->framerate = mParams.frameRate;
        }
        mCtx->pix_fmt    = mParams.pixFmt;
        if (mParams.bitRate > 0) mCtx->bit_rate = mParams.bitRate;
        if (mParams.gopSize > 0) mCtx->gop_size = mParams.gopSize;
        if (mParams.maxBFrames >= 0) mCtx->max_b_frames = mParams.maxBFrames;
        if (mParams.threadCount > 0) mCtx->thread_count = mParams.threadCount;

        // If an external HW frames context is provided (e.g., from decoder), adopt it.
        // This enables true zero-copy when feeding GPU frames to encoders like NVENC.
        if (mParams.externalHwFramesCtx) {
            AVHWFramesContext* frames_ctx = (AVHWFramesContext*)mParams.externalHwFramesCtx->data;
            if (frames_ctx) {
                // Set encoder to expect HW pixel format (e.g., AV_PIX_FMT_CUDA)
                mCtx->pix_fmt = frames_ctx->format;
                // Bind frames ctx and device ctx
                mCtx->hw_frames_ctx = av_buffer_ref(mParams.externalHwFramesCtx);
                if (frames_ctx->device_ref) {
                    mCtx->hw_device_ctx = av_buffer_ref(frames_ctx->device_ref);
                }
                // Also set SW format and dimensions from frames context to avoid mismatch
                if (frames_ctx->sw_format != AV_PIX_FMT_NONE) {
                    mCtx->sw_pix_fmt = frames_ctx->sw_format;
                }
                if (frames_ctx->width > 0 && frames_ctx->height > 0) {
                    mCtx->width = frames_ctx->width;
                    mCtx->height = frames_ctx->height;
                }
                // When feeding HW frames directly, disable SW upload path
                mParams.autoUploadHw = false;
            }
        }

        // Common low-latency hints
#ifdef AV_CODEC_FLAG_GLOBAL_HEADER
        // leave global header decision to muxer; do not enforce here
#endif

        AVDictionary* opts = nullptr;
        if (mParams.options) {
            // clone external options but do not take ownership
            av_dict_copy(&opts, mParams.options, 0);
        }

        // If VSV path requested, initialize hw device and frames like sample
        if (mParams.hwPixelFormat != AV_PIX_FMT_NONE || mParams.cardId >= 0 || mParams.vpuId >= 0) {
            // VSV hardware doesn't support B-frames, force to 0
            mCtx->max_b_frames = 0;
            // Force pix_fmt to hw pixel format
            if (mParams.hwPixelFormat != AV_PIX_FMT_NONE) {
                mCtx->pix_fmt = mParams.hwPixelFormat;
            }
            // Prepare device options "enc=/dev/ecu{card}vid{vpu},mem=/dev/ecu{card},mapped_io=1"
            AVDictionary *devopts = nullptr;
            int cid = mParams.cardId >= 0 ? mParams.cardId : 0;
            int vid = mParams.vpuId >= 0 ? mParams.vpuId : 0;
            char buf[128];
            snprintf(buf, sizeof(buf), "enc=/dev/ecu%dvid%d,mem=/dev/ecu%d,mapped_io=1", cid, vid, cid);
            av_dict_parse_string(&devopts, buf, "=", ",", 0);
            AVBufferRef *hw_device_ctx = nullptr;
            enum AVHWDeviceType devType = av_hwdevice_find_type_by_name("vsv");
            if (devType != AV_HWDEVICE_TYPE_NONE) {
                if (av_hwdevice_ctx_create(&hw_device_ctx, devType, nullptr, devopts, 0) >= 0) {
                    mCtx->hw_device_ctx = av_buffer_ref(hw_device_ctx);
                    // setup hw frames ctx
                    AVBufferRef *hw_frames_ref = av_hwframe_ctx_alloc(hw_device_ctx);
                    if (hw_frames_ref) {
                        auto *frames_ctx = (AVHWFramesContext*)(hw_frames_ref->data);
                        frames_ctx->format = mParams.hwPixelFormat != AV_PIX_FMT_NONE ? mParams.hwPixelFormat : mCtx->pix_fmt;
                        frames_ctx->sw_format = mParams.pixFmt;
                        frames_ctx->width = mParams.width;
                        frames_ctx->height = mParams.height;
                        frames_ctx->initial_pool_size = 20;
                        if (av_hwframe_ctx_init(hw_frames_ref) >= 0) {
                            mCtx->hw_frames_ctx = av_buffer_ref(hw_frames_ref);
                        }
                        av_buffer_unref(&hw_frames_ref);
                    }
                    av_buffer_unref(&hw_device_ctx);
                }
            }
            if (devopts) av_dict_free(&devopts);
            
            // Set encoder private parameters using av_opt_set BEFORE avcodec_open2
            // This matches the working encode_func.cpp example
            
            // Preset
            if (!mParams.preset.empty()) {
                av_opt_set(mCtx->priv_data, "preset", mParams.preset.c_str(), 0);
            } else {
                av_opt_set(mCtx->priv_data, "preset", "superfast", 0);
            }
            
            // GOP size - VSV only supports gop_size=1 (no B-frames)
            if (mParams.gopSize > 0) {
                char buf[32]; snprintf(buf, sizeof(buf), "%d", mParams.gopSize);
                av_opt_set(mCtx->priv_data, "gop_size", buf, 0);
            } else {
                av_opt_set(mCtx->priv_data, "gop_size", "1", 0);  // VSV default: gop_size=1
            }
            
            // Rate control mode: CBR when bitrate is set, VBR otherwise
            const char* rc_mode = "cbr";
            if (mParams.bitRate > 0) {
                rc_mode = "cbr";  // Use CBR for explicit bitrate
            } else {
                rc_mode = "vbr";  // Use VBR for quality-based encoding
                mCtx->bit_rate = 0;  // Clear bit_rate for VBR mode
            }
            av_opt_set(mCtx->priv_data, "rc_mode", rc_mode, 0);
            
            // Profile
            if (!mParams.profile.empty()) {
                av_opt_set(mCtx->priv_data, "profile", mParams.profile.c_str(), 0);
            } else {
                av_opt_set(mCtx->priv_data, "profile", "high", 0);
            }
            
            // Other VSV-specific parameters
            av_opt_set(mCtx->priv_data, "enable_cabac", "0", 0);
            av_opt_set(mCtx->priv_data, "level", "5.1", 0);
            
            // Note: roi_enable and rdo_level are NOT set in the working example
            // Setting them may cause issues
        }

        // Diagnostics for zero-copy
        if (mCtx->pix_fmt == AV_PIX_FMT_CUDA) {
            OTL_LOGI(TAG, "Using CUDA input: hw_frames_ctx=%p, hw_device_ctx=%p, sw_pix_fmt=%d, %dx%d",
                     (void*)mCtx->hw_frames_ctx, (void*)mCtx->hw_device_ctx, (int)mCtx->sw_pix_fmt, mCtx->width, mCtx->height);
        }


        int ret = avcodec_open2(mCtx, codec, &opts);
        if (ret < 0) {
            char err[AV_ERROR_MAX_STRING_SIZE];
            av_strerror(ret, err, sizeof(err));
            OTL_LOGE(TAG, "avcodec_open2 failed: %s", err);
            avcodec_free_context(&mCtx);
            av_dict_free(&opts);
            return ret;
        }
        av_dict_free(&opts);

        OTL_LOGI(TAG, "encoder opened: %s %dx%d pixfmt=%d qp=(%d,%d) br=%lld gop=%d b=%d tb=%d/%d fr=%d/%d",
                 mParams.codecName.c_str(), mCtx->width, mCtx->height, mCtx->pix_fmt, mCtx->qmin,mCtx->qmax,
                 (long long)mCtx->bit_rate, mCtx->gop_size, mCtx->max_b_frames,
                 mCtx->time_base.num, mCtx->time_base.den,
                 mCtx->framerate.num, mCtx->framerate.den);
        mStart = std::chrono::steady_clock::now();
        mFrameCount.store(0);
        return 0;
    }

    int encode(AVFrame* frame, AVPacket** p_pkt, int* p_num) override {
        if (!mCtx) return AVERROR(EINVAL);
        if (!p_pkt || !p_num) return AVERROR(EINVAL);
        *p_pkt = nullptr; *p_num = 0;

        // apply on-demand keyframe request
        if (frame && mForceIdr.exchange(false)) {
            frame->pict_type = AV_PICTURE_TYPE_I;
            frame->key_frame = 1;
        }

        // Optional SW scale/convert before HW upload
        AVFrame* sw_converted = nullptr;
        auto need_sw_convert = [&](int w, int h)->bool{
            return frame && ( (frame->width != w) || (frame->height != h) || (mParams.allowSwScale && frame->format != mParams.preferredSwInput) );
        };

        if (frame && mParams.allowSwScale && need_sw_convert(mCtx->width, mCtx->height)) {
            // Perform SW scale/convert to desired input spec first
            sw_converted = av_frame_alloc();
            if (!sw_converted) return AVERROR(ENOMEM);
            sw_converted->format = (int)mParams.preferredSwInput;
            sw_converted->width = mCtx->width;
            sw_converted->height = mCtx->height;
            if (av_frame_get_buffer(sw_converted, 32) < 0) { av_frame_free(&sw_converted); return AVERROR(ENOMEM);}            
            struct SwsContext* sws = sws_getContext(frame->width, frame->height, (AVPixelFormat)frame->format,
                                                    sw_converted->width, sw_converted->height, (AVPixelFormat)sw_converted->format,
                                                    SWS_BILINEAR, nullptr, nullptr, nullptr);
            if (!sws) { av_frame_free(&sw_converted); return AVERROR_EXTERNAL; }
            int ret_sws = sws_scale(sws,
                                    frame->data, frame->linesize,
                                    0, frame->height,
                                    sw_converted->data, sw_converted->linesize);
            sws_freeContext(sws);
            if (ret_sws <= 0) { av_frame_free(&sw_converted); return AVERROR_EXTERNAL; }
        }

        // prepare frame for HW encoders: if hw_frames_ctx is set and input is SW, upload to HW
        auto send_frame = sw_converted ? sw_converted : frame;
        AVFrame* hw_owned = nullptr; // if we allocate a hw frame, keep pointer to free after send
        if (send_frame && mCtx->hw_frames_ctx && mParams.autoUploadHw) {
            if (send_frame->format != mCtx->pix_fmt) {
                AVFrame* tmp = av_frame_alloc();
                if (!tmp) { if (sw_converted) av_frame_free(&sw_converted); return AVERROR(ENOMEM);}                
                int ret_get = av_hwframe_get_buffer(mCtx->hw_frames_ctx, tmp, 0);
                if (ret_get < 0) {
                    av_frame_free(&tmp);
                    if (sw_converted) av_frame_free(&sw_converted);
                    logAvError("av_hwframe_get_buffer", ret_get);
                    return ret_get;
                }
                tmp->width = mCtx->width;
                tmp->height = mCtx->height;
                int ret_tx = av_hwframe_transfer_data(tmp, send_frame, 0);
                if (ret_tx < 0) {
                    logAvError("av_hwframe_transfer_data", ret_tx);
                    av_frame_free(&tmp);
                    if (sw_converted) av_frame_free(&sw_converted);
                    return ret_tx;
                }
                hw_owned = tmp;
                send_frame = hw_owned;
            }
        }

        int ret = avcodec_send_frame(mCtx, send_frame);
        if (hw_owned) {
            av_frame_free(&hw_owned);
        }
        if (sw_converted) {
            av_frame_free(&sw_converted);
        }
        if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
            logAvError("avcodec_send_frame", ret);
            return ret;
        }

        // Try receive one packet (API allows many; we return the first for simplicity)
        AVPacket* pkt = av_packet_alloc();
        if (!pkt) return AVERROR(ENOMEM);
        ret = avcodec_receive_packet(mCtx, pkt);
        if (ret == 0) {
            *p_pkt = pkt;
            *p_num = 1;
            mFrameCount.fetch_add(1, std::memory_order_relaxed);
            return 0;
        } else {
            av_packet_free(&pkt);
            if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) return 0; // no output yet
            logAvError("avcodec_receive_packet", ret);
            return ret;
        }
    }

    // New vector-based API: collects all available packets after sending a frame
    int encode(AVFrame* frame, std::vector<AVPacket*>& outPkts) override {
        if (!mCtx) return AVERROR(EINVAL);

        // apply on-demand keyframe request
        if (frame && mForceIdr.exchange(false)) {
            frame->pict_type = AV_PICTURE_TYPE_I;
            frame->key_frame = 1;
        }

        // Optional SW scale/convert for SW input when requested
        AVFrame* sw_converted = nullptr;
        auto need_sw_convert = [&](int w, int h)->bool{
            return frame && ( (frame->width != w) || (frame->height != h) || (mParams.allowSwScale && frame->format != mParams.preferredSwInput) );
        };
        if (frame && mParams.allowSwScale && need_sw_convert(mCtx->width, mCtx->height)) {
            sw_converted = av_frame_alloc();
            if (!sw_converted) return AVERROR(ENOMEM);
            sw_converted->format = (int)mParams.preferredSwInput;
            sw_converted->width = mCtx->width;
            sw_converted->height = mCtx->height;
            if (av_frame_get_buffer(sw_converted, 32) < 0) { av_frame_free(&sw_converted); return AVERROR(ENOMEM);}            
            struct SwsContext* sws = sws_getContext(frame->width, frame->height, (AVPixelFormat)frame->format,
                                                    sw_converted->width, sw_converted->height, (AVPixelFormat)sw_converted->format,
                                                    SWS_BILINEAR, nullptr, nullptr, nullptr);
            if (!sws) { av_frame_free(&sw_converted); return AVERROR_EXTERNAL; }
            int ret_sws = sws_scale(sws,
                                    frame->data, frame->linesize,
                                    0, frame->height,
                                    sw_converted->data, sw_converted->linesize);
            sws_freeContext(sws);
            if (ret_sws <= 0) { av_frame_free(&sw_converted); return AVERROR_EXTERNAL; }
        }

        // prepare frame for HW encoders: if hw_frames_ctx is set and input is SW, upload to HW
        auto send_frame = sw_converted ? sw_converted : frame;
        AVFrame* hw_owned = nullptr; // if we allocate a hw frame, keep pointer to free after send
        if (send_frame && mCtx->hw_frames_ctx && mParams.autoUploadHw) {
            if (send_frame->format != mCtx->pix_fmt) {
                AVFrame* tmp = av_frame_alloc();
                if (!tmp) { if (sw_converted) av_frame_free(&sw_converted); return AVERROR(ENOMEM);}                
                int ret_get = av_hwframe_get_buffer(mCtx->hw_frames_ctx, tmp, 0);
                if (ret_get < 0) {
                    av_frame_free(&tmp);
                    if (sw_converted) av_frame_free(&sw_converted);
                    logAvError("av_hwframe_get_buffer", ret_get);
                    return ret_get;
                }
                tmp->width = mCtx->width;
                tmp->height = mCtx->height;
                int ret_tx = av_hwframe_transfer_data(tmp, send_frame, 0);
                if (ret_tx < 0) {
                    logAvError("av_hwframe_transfer_data", ret_tx);
                    av_frame_free(&tmp);
                    if (sw_converted) av_frame_free(&sw_converted);
                    return ret_tx;
                }
                hw_owned = tmp;
                send_frame = hw_owned;
            }
        }

        int ret = avcodec_send_frame(mCtx, send_frame);
        if (hw_owned) {
            av_frame_free(&hw_owned);
        }
        if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
            logAvError("avcodec_send_frame", ret);
            return ret;
        }

        while (true) {
            AVPacket* pkt = av_packet_alloc();
            if (!pkt) return AVERROR(ENOMEM);
            ret = avcodec_receive_packet(mCtx, pkt);
            if (ret == 0) {
                outPkts.push_back(pkt);
                mFrameCount.fetch_add(1, std::memory_order_relaxed);
                continue; // try drain more
            }
            av_packet_free(&pkt);
            if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) return 0; // done
            logAvError("avcodec_receive_packet", ret);
            return ret;
        }
    }

    void freePacket(AVPacket* pkt) override {
        if (pkt) {
            av_packet_unref(pkt);
            av_packet_free(&pkt);
        }
    }

    const AVCodecParameters* getCodecParameters() const override {
        if (!mCtx) return nullptr;
        if (!mCodecPar) {
            mCodecPar = avcodec_parameters_alloc();
            if (!mCodecPar) return nullptr;
        }
        // Refresh from current context each call to reflect runtime changes (e.g., extradata)
        if (avcodec_parameters_from_context(mCodecPar, mCtx) < 0) {
            return nullptr;
        }
        return mCodecPar;
    }

    AVRational getTimeBase() const override {
        if (mCtx) return mCtx->time_base;
        return {1, 90000};
    }

    int requestKeyFrame() override {
        mForceIdr.store(true, std::memory_order_relaxed);
        return 0;
    }

    double getFps(uint64_t &frames, double &elapsedSec) const override {
        frames = mFrameCount.load(std::memory_order_relaxed);
        auto now = std::chrono::steady_clock::now();
        elapsedSec = std::chrono::duration<double>(now - mStart).count();
        if (elapsedSec <= 0.0) return 0.0;
        return static_cast<double>(frames) / elapsedSec;
    }

private:
    int getInputSpec(InputSpec &spec) const override {
        spec.width = mCtx ? mCtx->width : mParams.width;
        spec.height = mCtx ? mCtx->height : mParams.height;
        spec.acceptsHw = (mCtx && mCtx->hw_frames_ctx) || (mParams.hwPixelFormat != AV_PIX_FMT_NONE);
        spec.acceptsSw = true;
        spec.preferredSwPixFmt = mParams.preferredSwInput;
        spec.preferredHwPixFmt = (mParams.hwPixelFormat != AV_PIX_FMT_NONE) ? mParams.hwPixelFormat : (mCtx ? mCtx->pix_fmt : AV_PIX_FMT_NONE);
        return 0;
    }
    const AVCodec* resolveCodec(const std::string& name) {
        std::string lower = name;
        for (auto &c : lower) c = (char)tolower(c);
        // Normalize some aliases
        if (lower == "h265") lower = "hevc";
        if (lower == "x264") lower = "h264";   // treat as family, choose hw if available
        if (lower == "x265") lower = "hevc";

        // Build candidate list, prefer hardware if requested
        std::vector<std::string> candidates;

        auto push_if = [&](const std::string& v) {
            if (!v.empty()) candidates.push_back(v);
        };

        auto push_hw_family = [&](const std::string& family){
            // Respect explicit hwAccel preference first
            if (!mParams.hwAccel.empty()) {
                std::string acc = mParams.hwAccel;
                for (auto &ch : acc) ch = (char)tolower(ch);
                if (family == "h264") {
                    if (acc == "vsv") push_if("h264_vsv_encoder");
                    if (acc == "videotoolbox") push_if("h264_videotoolbox");
                    if (acc == "nvenc" || acc == "cuda") push_if("h264_nvenc");
                    if (acc == "qsv") push_if("h264_qsv");
                    if (acc == "amf") push_if("h264_amf");
                    if (acc == "vaapi") push_if("h264_vaapi");
                } else if (family == "hevc") {
                    if (acc == "vsv") push_if("hevc_vsv_encoder");
                    if (acc == "videotoolbox") push_if("hevc_videotoolbox");
                    if (acc == "nvenc" || acc == "cuda") push_if("hevc_nvenc");
                    if (acc == "qsv") push_if("hevc_qsv");
                    if (acc == "amf") push_if("hevc_amf");
                    if (acc == "vaapi") push_if("hevc_vaapi");
                }
            } else {
                // Platform default preference order
#if defined(__APPLE__)
                if (family == "h264") push_if("h264_videotoolbox");
                if (family == "hevc") push_if("hevc_videotoolbox");
#endif
                // Prefer VSV first if cardId/vpuId provided
                if (mParams.cardId >= 0 || mParams.vpuId >= 0 || mParams.hwPixelFormat != AV_PIX_FMT_NONE) {
                    if (family == "h264") push_if("h264_vsv_encoder");
                    if (family == "hevc") push_if("hevc_vsv_encoder");
                }
                if (family == "h264") {
                    push_if("h264_nvenc");
                    push_if("h264_qsv");
                    push_if("h264_amf");
                    push_if("h264_vaapi");
                } else if (family == "hevc") {
                    push_if("hevc_nvenc");
                    push_if("hevc_qsv");
                    push_if("hevc_amf");
                    push_if("hevc_vaapi");
                }
            }
        };

        if (mParams.preferHardware) {
            if (lower == "h264") push_hw_family("h264");
            if (lower == "hevc") push_hw_family("hevc");
        }

        // Then try the exact name as given (SW or HW)
        push_if(lower);

        // Software fallbacks by common names
        // For software encoding, try multiple options in order of preference
        if (!mParams.preferHardware) {
            if (lower == "h264" || lower == "libx264") {
                push_if("libx264");      // Preferred software encoder
                push_if("libopenh264");  // Alternative software encoder
                push_if("h264");         // Generic name
            }
            if (lower == "hevc" || lower == "libx265") {
                push_if("libx265");      // Preferred software encoder
                push_if("hevc");         // Generic name
            }
        } else {
            // For hardware encoding, still add software fallbacks as last resort
            if (lower == "h264") push_if("libx264");
            if (lower == "hevc") push_if("libx265");
        }
        if (lower == "mjpeg") push_if("mjpeg");
        if (lower == "mpeg4") push_if("mpeg4");

        // Probe in order
        for (const auto& n : candidates) {
            if (const AVCodec* c = avcodec_find_encoder_by_name(n.c_str())) {
                OTL_LOGI(TAG, "Selected encoder: %s for codec family: %s", c->name, lower.c_str());
                return c;
            }
        }

        // No fallback - if encoder not found, return nullptr
        // This system only supports VSV hardware encoding
        OTL_LOGE(TAG, "Encoder not found for %s (preferHardware=%d)", lower.c_str(), mParams.preferHardware);
        return nullptr;
    }

    void close() {
        if (mCtx) {
            avcodec_free_context(&mCtx);
            mCtx = nullptr;
        }
        if (mCodecPar) {
            avcodec_parameters_free(&mCodecPar);
            mCodecPar = nullptr;
        }
    }

    static void logAvError(const char* what, int err) {
        char buf[AV_ERROR_MAX_STRING_SIZE];
        av_strerror(err, buf, sizeof(buf));
        OTL_LOGE(TAG, "%s failed: %s", what, buf);
    }

private:
    std::string mCodecName;
    EncodeParam mParams{};
    AVCodecContext* mCtx{nullptr};
    mutable AVCodecParameters* mCodecPar{nullptr};

    std::atomic<bool> mForceIdr{false};
    std::atomic<uint64_t> mFrameCount{0};
    std::chrono::steady_clock::time_point mStart;
};

std::unique_ptr<StreamEncoder> CreateStreamEncoder(const std::string &codecName) {
    return std::unique_ptr<StreamEncoder>(new FfmpegStreamEncoder(codecName));
}

// Default base ctor/dtor definitions
StreamEncoder::StreamEncoder() = default;
StreamEncoder::~StreamEncoder() = default;

} // namespace enrigin
