/*
 * GStreamer Enrigin Encoder Plugin (Direct FFmpeg Implementation)
 * 直接基于FFmpeg VSV编码器实现，参考encode_func.cpp的逻辑
 */

#include "gst-enriginenc.h"
#include <string.h>

#ifndef PACKAGE
#define PACKAGE "enriginenc"
#endif

GST_DEBUG_CATEGORY_STATIC(gst_enriginenc_debug);
#define GST_CAT_DEFAULT gst_enriginenc_debug

/* Properties */
enum
{
  PROP_0,
  PROP_CODEC_NAME,
  PROP_BITRATE,
  PROP_GOP_SIZE,
  PROP_PRESET,
  PROP_PROFILE,
  PROP_RC_MODE,
  PROP_LEVEL,
  PROP_ENABLE_CABAC,
  PROP_CARD_ID,
  PROP_VPU_ID,
};

/* Default values */
#define DEFAULT_CODEC_NAME "h264"
#define DEFAULT_BITRATE 2000
#define DEFAULT_GOP_SIZE 1
#define DEFAULT_PRESET "superfast"
#define DEFAULT_PROFILE "high"
#define DEFAULT_RC_MODE "vbr"
#define DEFAULT_LEVEL "5.1"
#define DEFAULT_ENABLE_CABAC FALSE
#define DEFAULT_CARD_ID 0
#define DEFAULT_VPU_ID 0

/* Pad templates */
static GstStaticPadTemplate sink_template = GST_STATIC_PAD_TEMPLATE(
    "sink",
    GST_PAD_SINK,
    GST_PAD_ALWAYS,
    GST_STATIC_CAPS(
        "video/x-raw, "
        "format = (string) { I420, NV12 }, "
        "width = (int) [ 16, 4096 ], "
        "height = (int) [ 16, 4096 ], "
        "framerate = (fraction) [ 0/1, MAX ]"
    )
);

static GstStaticPadTemplate src_template = GST_STATIC_PAD_TEMPLATE(
    "src",
    GST_PAD_SRC,
    GST_PAD_ALWAYS,
    GST_STATIC_CAPS(
        "video/x-h264, "
        "stream-format = (string) byte-stream, "
        "alignment = (string) au; "
        "video/x-h265, "
        "stream-format = (string) byte-stream, "
        "alignment = (string) au"
    )
);

#define gst_enriginenc_parent_class parent_class
G_DEFINE_TYPE(GstEnriginEnc, gst_enriginenc, GST_TYPE_VIDEO_ENCODER);

/* Forward declarations */
static void gst_enriginenc_set_property(GObject *object, guint prop_id,
    const GValue *value, GParamSpec *pspec);
static void gst_enriginenc_get_property(GObject *object, guint prop_id,
    GValue *value, GParamSpec *pspec);
static void gst_enriginenc_finalize(GObject *object);
static gboolean gst_enriginenc_start(GstVideoEncoder *encoder);
static gboolean gst_enriginenc_stop(GstVideoEncoder *encoder);
static gboolean gst_enriginenc_set_format(GstVideoEncoder *encoder,
    GstVideoCodecState *state);
static GstFlowReturn gst_enriginenc_handle_frame(GstVideoEncoder *encoder,
    GstVideoCodecFrame *frame);
static GstFlowReturn gst_enriginenc_finish(GstVideoEncoder *encoder);

/* Helper functions */
static char* generate_vpu_parameters(int vid, int cid)
{
    char* buffer = (char*)g_malloc(100);
    g_snprintf(buffer, 100, "enc=/dev/ecu%dvid%d,mem=/dev/ecu%d,mapped_io=1", cid, vid, cid);
    return buffer;
}

static gboolean init_hw_encoder(GstEnriginEnc *enc)
{
    int ret;
    char *vpu_device;
    AVDictionary *opts = NULL;
    
    // Set VCE_SHARED_LIB environment variable if not already set
    if (!g_getenv("VCE_SHARED_LIB")) {
        g_setenv("VCE_SHARED_LIB", "/opt/rivs/lib/libh2enc.so", TRUE);
        GST_INFO_OBJECT(enc, "Set VCE_SHARED_LIB=/opt/rivs/lib/libh2enc.so");
    }
    
    vpu_device = generate_vpu_parameters(enc->vpu_id, enc->card_id);
    ret = av_dict_parse_string(&opts, vpu_device, "=", ",", 0);
    g_free(vpu_device);
    
    if (ret < 0) {
        GST_ERROR_OBJECT(enc, "Failed to parse VSV options");
        return FALSE;
    }
    
    ret = av_hwdevice_ctx_create(&enc->hw_device_ctx, AV_HWDEVICE_TYPE_VSV, 
                                  NULL, opts, 0);
    av_dict_free(&opts);
    
    if (ret < 0) {
        GST_ERROR_OBJECT(enc, "Failed to create HW device");
        return FALSE;
    }
    
    enc->avctx->hw_device_ctx = av_buffer_ref(enc->hw_device_ctx);
    return TRUE;
}

static gboolean set_hwframe_ctx(GstEnriginEnc *enc)
{
    AVBufferRef *hw_frames_ref;
    AVHWFramesContext *frames_ctx;
    int ret;
    
    hw_frames_ref = av_hwframe_ctx_alloc(enc->hw_device_ctx);
    if (!hw_frames_ref) return FALSE;
    
    frames_ctx = (AVHWFramesContext *)(hw_frames_ref->data);
    frames_ctx->format = AV_PIX_FMT_VSV;
    // Match SW upload format with negotiated software pixel format
    frames_ctx->sw_format = enc->sw_pix_fmt;
    frames_ctx->width = enc->width;
    frames_ctx->height = enc->height;
    // Increase pool size to prevent starvation during continuous encoding
    frames_ctx->initial_pool_size = 40;
    
    ret = av_hwframe_ctx_init(hw_frames_ref);
    if (ret < 0) {
        av_buffer_unref(&hw_frames_ref);
        return FALSE;
    }
    
    enc->avctx->hw_frames_ctx = av_buffer_ref(hw_frames_ref);
    av_buffer_unref(&hw_frames_ref);
    
    return enc->avctx->hw_frames_ctx != NULL;
}

/* Class initialization */
static void
gst_enriginenc_class_init(GstEnriginEncClass *klass)
{
  GObjectClass *gobject_class = G_OBJECT_CLASS(klass);
  GstElementClass *element_class = GST_ELEMENT_CLASS(klass);
  GstVideoEncoderClass *encoder_class = GST_VIDEO_ENCODER_CLASS(klass);

  gobject_class->set_property = gst_enriginenc_set_property;
  gobject_class->get_property = gst_enriginenc_get_property;
  gobject_class->finalize = gst_enriginenc_finalize;

  encoder_class->start = GST_DEBUG_FUNCPTR(gst_enriginenc_start);
  encoder_class->stop = GST_DEBUG_FUNCPTR(gst_enriginenc_stop);
  encoder_class->set_format = GST_DEBUG_FUNCPTR(gst_enriginenc_set_format);
  encoder_class->handle_frame = GST_DEBUG_FUNCPTR(gst_enriginenc_handle_frame);
  encoder_class->finish = GST_DEBUG_FUNCPTR(gst_enriginenc_finish);

  /* Install properties */
  g_object_class_install_property(gobject_class, PROP_CODEC_NAME,
      g_param_spec_string("codec", "Codec", "Codec (h264/hevc)",
          DEFAULT_CODEC_NAME, (GParamFlags)(G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)));

  g_object_class_install_property(gobject_class, PROP_BITRATE,
      g_param_spec_int("bitrate", "Bitrate", "Bitrate in kbps",
          0, G_MAXINT, DEFAULT_BITRATE, (GParamFlags)(G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)));

  g_object_class_install_property(gobject_class, PROP_GOP_SIZE,
      g_param_spec_int("gop-size", "GOP Size", "GOP size (VSV=1)",
          1, 1, DEFAULT_GOP_SIZE, (GParamFlags)(G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)));

  g_object_class_install_property(gobject_class, PROP_PRESET,
      g_param_spec_string("preset", "Preset", "Encoding preset",
          DEFAULT_PRESET, (GParamFlags)(G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)));

  g_object_class_install_property(gobject_class, PROP_PROFILE,
      g_param_spec_string("profile", "Profile", "Encoding profile",
          DEFAULT_PROFILE, (GParamFlags)(G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)));

  g_object_class_install_property(gobject_class, PROP_RC_MODE,
      g_param_spec_string("rc-mode", "RC Mode", "Rate control (cbr/vbr/abr)",
          DEFAULT_RC_MODE, (GParamFlags)(G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)));

  g_object_class_install_property(gobject_class, PROP_LEVEL,
      g_param_spec_string("level", "Level", "Encoding level",
          DEFAULT_LEVEL, (GParamFlags)(G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)));

  g_object_class_install_property(gobject_class, PROP_ENABLE_CABAC,
      g_param_spec_boolean("enable-cabac", "CABAC", "Enable CABAC",
          DEFAULT_ENABLE_CABAC, (GParamFlags)(G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)));

  g_object_class_install_property(gobject_class, PROP_CARD_ID,
      g_param_spec_int("card-id", "Card ID", "VSV Card ID",
          0, G_MAXINT, DEFAULT_CARD_ID, (GParamFlags)(G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)));

  g_object_class_install_property(gobject_class, PROP_VPU_ID,
      g_param_spec_int("vpu-id", "VPU ID", "VSV VPU ID",
          0, G_MAXINT, DEFAULT_VPU_ID, (GParamFlags)(G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)));

  gst_element_class_set_static_metadata(element_class,
      "Enrigin Video Encoder (FFmpeg Direct)",
      "Codec/Encoder/Video",
      "H264/H265 hardware encoder using FFmpeg VSV",
      "Cascade AI");

  gst_element_class_add_static_pad_template(element_class, &sink_template);
  gst_element_class_add_static_pad_template(element_class, &src_template);

  GST_DEBUG_CATEGORY_INIT(gst_enriginenc_debug, "enriginenc", 0, "Enrigin Encoder");
}

static void
gst_enriginenc_init(GstEnriginEnc *enc)
{
  enc->codec_name = g_strdup(DEFAULT_CODEC_NAME);
  enc->bitrate = DEFAULT_BITRATE;
  enc->gop_size = DEFAULT_GOP_SIZE;
  enc->preset = g_strdup(DEFAULT_PRESET);
  enc->profile = g_strdup(DEFAULT_PROFILE);
  enc->rc_mode = g_strdup(DEFAULT_RC_MODE);
  enc->level = g_strdup(DEFAULT_LEVEL);
  enc->enable_cabac = DEFAULT_ENABLE_CABAC;
  enc->card_id = DEFAULT_CARD_ID;
  enc->vpu_id = DEFAULT_VPU_ID;
  enc->avctx = NULL;
  enc->codec = NULL;
  enc->hw_device_ctx = NULL;
  enc->sw_frame = NULL;
  enc->hw_frame = NULL;
  enc->encoder_initialized = FALSE;
  enc->frame_count = 0;
  enc->fps_num = 25;
  enc->fps_den = 1;
  enc->pending_frames = g_queue_new();
}

static void
gst_enriginenc_set_property(GObject *object, guint prop_id,
    const GValue *value, GParamSpec *pspec)
{
  GstEnriginEnc *enc = GST_ENRIGINENC(object);
  switch (prop_id) {
    case PROP_CODEC_NAME: g_free(enc->codec_name); enc->codec_name = g_value_dup_string(value); break;
    case PROP_BITRATE: enc->bitrate = g_value_get_int(value); break;
    case PROP_GOP_SIZE: enc->gop_size = g_value_get_int(value); break;
    case PROP_PRESET: g_free(enc->preset); enc->preset = g_value_dup_string(value); break;
    case PROP_PROFILE: g_free(enc->profile); enc->profile = g_value_dup_string(value); break;
    case PROP_RC_MODE: g_free(enc->rc_mode); enc->rc_mode = g_value_dup_string(value); break;
    case PROP_LEVEL: g_free(enc->level); enc->level = g_value_dup_string(value); break;
    case PROP_ENABLE_CABAC: enc->enable_cabac = g_value_get_boolean(value); break;
    case PROP_CARD_ID: enc->card_id = g_value_get_int(value); break;
    case PROP_VPU_ID: enc->vpu_id = g_value_get_int(value); break;
    default: G_OBJECT_WARN_INVALID_PROPERTY_ID(object, prop_id, pspec); break;
  }
}

static void
gst_enriginenc_get_property(GObject *object, guint prop_id,
    GValue *value, GParamSpec *pspec)
{
  GstEnriginEnc *enc = GST_ENRIGINENC(object);
  switch (prop_id) {
    case PROP_CODEC_NAME: g_value_set_string(value, enc->codec_name); break;
    case PROP_BITRATE: g_value_set_int(value, enc->bitrate); break;
    case PROP_GOP_SIZE: g_value_set_int(value, enc->gop_size); break;
    case PROP_PRESET: g_value_set_string(value, enc->preset); break;
    case PROP_PROFILE: g_value_set_string(value, enc->profile); break;
    case PROP_RC_MODE: g_value_set_string(value, enc->rc_mode); break;
    case PROP_LEVEL: g_value_set_string(value, enc->level); break;
    case PROP_ENABLE_CABAC: g_value_set_boolean(value, enc->enable_cabac); break;
    case PROP_CARD_ID: g_value_set_int(value, enc->card_id); break;
    case PROP_VPU_ID: g_value_set_int(value, enc->vpu_id); break;
    default: G_OBJECT_WARN_INVALID_PROPERTY_ID(object, prop_id, pspec); break;
  }
}

static void
gst_enriginenc_finalize(GObject *object)
{
  GstEnriginEnc *enc = GST_ENRIGINENC(object);
  g_free(enc->codec_name);
  g_free(enc->preset);
  g_free(enc->profile);
  g_free(enc->rc_mode);
  g_free(enc->level);
  
  // Free pending frames queue
  if (enc->pending_frames) {
    while (!g_queue_is_empty(enc->pending_frames)) {
      GstVideoCodecFrame *frame = (GstVideoCodecFrame*)g_queue_pop_head(enc->pending_frames);
      gst_video_codec_frame_unref(frame);
    }
    g_queue_free(enc->pending_frames);
    enc->pending_frames = NULL;
  }
  
  // Safety net: ensure codec context and hw device are released if stop() was not called
  if (enc->avctx) {
    avcodec_free_context(&enc->avctx);
    enc->avctx = NULL;
  }
  if (enc->hw_device_ctx) {
    av_buffer_unref(&enc->hw_device_ctx);
    enc->hw_device_ctx = NULL;
  }
  G_OBJECT_CLASS(parent_class)->finalize(object);
}

static gboolean
gst_enriginenc_start(GstVideoEncoder *encoder)
{
  GstEnriginEnc *enc = GST_ENRIGINENC(encoder);
  enc->frame_count = 0;
  return TRUE;
}

static gboolean
gst_enriginenc_stop(GstVideoEncoder *encoder)
{
  GstEnriginEnc *enc = GST_ENRIGINENC(encoder);
  
  GST_INFO_OBJECT(enc, "Stopping encoder, encoded %ld frames", enc->frame_count);
  
  // Clear pending frames queue
  if (enc->pending_frames) {
    int remaining = g_queue_get_length(enc->pending_frames);
    if (remaining > 0) {
      GST_WARNING_OBJECT(enc, "Clearing %d pending frames on stop", remaining);
    }
    while (!g_queue_is_empty(enc->pending_frames)) {
      GstVideoCodecFrame *frame = (GstVideoCodecFrame*)g_queue_pop_head(enc->pending_frames);
      gst_video_codec_frame_unref(frame);
    }
    g_queue_clear(enc->pending_frames);  // Clear but keep the queue object
  }
  
  if (enc->sw_frame) { av_frame_free(&enc->sw_frame); enc->sw_frame = NULL; }
  if (enc->hw_frame) { av_frame_free(&enc->hw_frame); enc->hw_frame = NULL; }
  if (enc->avctx) { avcodec_free_context(&enc->avctx); enc->avctx = NULL; }
  if (enc->hw_device_ctx) { av_buffer_unref(&enc->hw_device_ctx); enc->hw_device_ctx = NULL; }
  
  enc->encoder_initialized = FALSE;
  enc->frame_count = 0;  // Reset frame counter for next stream
  
  return TRUE;
}

static gboolean
gst_enriginenc_set_format(GstVideoEncoder *encoder, GstVideoCodecState *state)
{
  GstEnriginEnc *enc = GST_ENRIGINENC(encoder);
  GstVideoInfo *info = &state->info;
  const char *encoder_name;
  int ret;
  GstCaps *outcaps = NULL;                // declared early to avoid goto crossing init
  GstVideoCodecState *output_state = NULL; // declared early to avoid goto crossing init
  
  // Tear down any previous contexts to avoid leaks on renegotiation
  if (enc->avctx) {
    avcodec_free_context(&enc->avctx);
    enc->avctx = NULL;
  }
  if (enc->hw_device_ctx) {
    av_buffer_unref(&enc->hw_device_ctx);
    enc->hw_device_ctx = NULL;
  }
  enc->encoder_initialized = FALSE;

  enc->width = GST_VIDEO_INFO_WIDTH(info);
  enc->height = GST_VIDEO_INFO_HEIGHT(info);
  enc->fps_num = GST_VIDEO_INFO_FPS_N(info) ?: 25;
  enc->fps_den = GST_VIDEO_INFO_FPS_D(info) ?: 1;
  
  switch (GST_VIDEO_INFO_FORMAT(info)) {
    case GST_VIDEO_FORMAT_I420: enc->sw_pix_fmt = AV_PIX_FMT_YUV420P; break;
    case GST_VIDEO_FORMAT_NV12: enc->sw_pix_fmt = AV_PIX_FMT_NV12; break;
    case GST_VIDEO_FORMAT_YV12: enc->sw_pix_fmt = AV_PIX_FMT_YUV420P; break;
    case GST_VIDEO_FORMAT_BGR: enc->sw_pix_fmt = AV_PIX_FMT_BGR24; break;
    default: goto fail;
  }
  
  encoder_name = (g_strcmp0(enc->codec_name, "hevc") == 0) ? "hevc_vsv_encoder" : "h264_vsv_encoder";
  enc->codec = avcodec_find_encoder_by_name(encoder_name);
  if (!enc->codec) goto fail;
  
  enc->avctx = avcodec_alloc_context3(enc->codec);
  if (!enc->avctx) goto fail;
  
  enc->avctx->width = enc->width;
  enc->avctx->height = enc->height;
  enc->avctx->time_base = (AVRational){enc->fps_den, enc->fps_num};
  enc->avctx->framerate = (AVRational){enc->fps_num, enc->fps_den};
  enc->avctx->pix_fmt = AV_PIX_FMT_VSV;
  enc->avctx->bit_rate = (g_strcmp0(enc->rc_mode, "vbr") == 0) ? 0 : enc->bitrate * 1000;
  enc->avctx->gop_size = enc->gop_size > 0 ? enc->gop_size : 1;
  enc->avctx->max_b_frames = 0; // avoid frame reordering issues
  
  av_opt_set(enc->avctx->priv_data, "preset", enc->preset, 0);
  av_opt_set(enc->avctx->priv_data, "gop_size", "1", 0);
  av_opt_set(enc->avctx->priv_data, "intra_pic_rate", "5", 0);
  av_opt_set(enc->avctx->priv_data, "rc_mode", enc->rc_mode, 0);
  av_opt_set(enc->avctx->priv_data, "profile", enc->profile, 0);
  av_opt_set(enc->avctx->priv_data, "enable_cabac", enc->enable_cabac ? "1" : "0", 0);
  av_opt_set(enc->avctx->priv_data, "level", enc->level, 0);
  av_opt_set(enc->avctx->priv_data, "qp_min", "1", 0);
  av_opt_set(enc->avctx->priv_data, "qp_max", "20", 0);
  // Ensure SPS/PPS (and VPS for HEVC) are present in-band on keyframes
  av_opt_set(enc->avctx->priv_data, "repeat_headers", "1", 0);
  // Force zero delay to ensure every send_frame produces immediate output
  // This prevents the encoder from buffering frames internally
  av_opt_set(enc->avctx->priv_data, "delay", "0", 0);
  
  // Do not set GLOBAL_HEADER for byte-stream output; SPS/PPS should be in-band
  
  if (!init_hw_encoder(enc) || !set_hwframe_ctx(enc)) goto fail;
  
  ret = avcodec_open2(enc->avctx, enc->codec, NULL);
  if (ret < 0) goto fail;
  
  enc->encoder_initialized = TRUE;
  
  outcaps = gst_caps_new_simple(
      (g_strcmp0(enc->codec_name, "hevc") == 0) ? "video/x-h265" : "video/x-h264",
      "stream-format", G_TYPE_STRING, "byte-stream",
      "alignment", G_TYPE_STRING, "au", NULL);
  output_state = gst_video_encoder_set_output_state(encoder, outcaps, state);
  gst_video_codec_state_unref(output_state);
  
  // Set encoder latency: VSV encoder buffers ~5-6 frames before first output
  // This tells downstream elements to expect delayed output
  {
    gint fps_n = state->info.fps_n;
    gint fps_d = state->info.fps_d;
    if (fps_n > 0 && fps_d > 0) {
      GstClockTime latency = gst_util_uint64_scale(6 * fps_d, GST_SECOND, fps_n);
      gst_video_encoder_set_latency(encoder, latency, latency);
      GST_INFO_OBJECT(enc, "Set encoder latency to %" GST_TIME_FORMAT, GST_TIME_ARGS(latency));
    }
  }
  
  return TRUE;

fail:
  // Unified cleanup on failure to avoid leaks
  if (enc->avctx) {
    if (enc->avctx->hw_frames_ctx) {
      av_buffer_unref(&enc->avctx->hw_frames_ctx);
    }
    if (enc->avctx->hw_device_ctx) {
      av_buffer_unref(&enc->avctx->hw_device_ctx);
    }
    avcodec_free_context(&enc->avctx);
    enc->avctx = NULL;
  }
  if (enc->hw_device_ctx) {
    av_buffer_unref(&enc->hw_device_ctx);
    enc->hw_device_ctx = NULL;
  }
  enc->encoder_initialized = FALSE;
  return FALSE;
}

static GstFlowReturn
gst_enriginenc_handle_frame(GstVideoEncoder *encoder, GstVideoCodecFrame *frame)
{
  GstEnriginEnc *enc = GST_ENRIGINENC(encoder);
  GstMapInfo map;
  GstVideoFrame vframe;
  GstVideoInfo in_info;
  int ret;
  AVPacket *pkt = NULL;
  AVFrame *sw_frame = NULL;
  AVFrame *hw_frame = NULL;
  
  // Log every 100 frames to track progress
  if (enc->frame_count % 100 == 0) {
    GST_INFO_OBJECT(enc, ">>> Entering handle_frame for frame %ld", enc->frame_count);
  }
  
  if (!gst_buffer_map(frame->input_buffer, &map, GST_MAP_READ)) {
    gst_video_codec_frame_unref(frame);
    return GST_FLOW_ERROR;
  }
  // Prepare a local GstVideoInfo based on negotiated format
  GstVideoFormat gst_fmt = (enc->sw_pix_fmt == AV_PIX_FMT_NV12) ? GST_VIDEO_FORMAT_NV12 : GST_VIDEO_FORMAT_I420;
  gst_video_info_set_format(&in_info, gst_fmt, enc->width, enc->height);
  // Map as GstVideoFrame to respect strides and plane layout
  if (!gst_video_frame_map(&vframe, &in_info, frame->input_buffer, GST_MAP_READ)) {
    gst_buffer_unmap(frame->input_buffer, &map);
    gst_video_codec_frame_unref(frame);
    return GST_FLOW_ERROR;
  }
  
  // Allocate new frames for each encoding (avoid reuse issues)
  sw_frame = av_frame_alloc();
  if (!sw_frame) {
    gst_video_frame_unmap(&vframe);
    gst_buffer_unmap(frame->input_buffer, &map);
    gst_video_codec_frame_unref(frame);
    return GST_FLOW_ERROR;
  }
  
  sw_frame->width = enc->width;
  sw_frame->height = enc->height;
  sw_frame->format = enc->sw_pix_fmt;
  
  ret = av_frame_get_buffer(sw_frame, 0);
  if (ret < 0) {
    av_frame_free(&sw_frame);
    gst_video_frame_unmap(&vframe);
    gst_buffer_unmap(frame->input_buffer, &map);
    gst_video_codec_frame_unref(frame);
    return GST_FLOW_ERROR;
  }
  
  // Copy data plane-by-plane respecting strides
  const int width = enc->width;
  const int height = enc->height;
  if (enc->sw_pix_fmt == AV_PIX_FMT_NV12) {
    // Y plane
    const guint8 *srcY = (const guint8 *)GST_VIDEO_FRAME_PLANE_DATA(&vframe, 0);
    int srcYStride = GST_VIDEO_FRAME_PLANE_STRIDE(&vframe, 0);
    for (int h = 0; h < height; ++h) {
      memcpy(sw_frame->data[0] + h * sw_frame->linesize[0], srcY + h * srcYStride, width);
    }
    // UV plane (interleaved)
    const guint8 *srcUV = (const guint8 *)GST_VIDEO_FRAME_PLANE_DATA(&vframe, 1);
    int srcUVStride = GST_VIDEO_FRAME_PLANE_STRIDE(&vframe, 1);
    for (int h = 0; h < height / 2; ++h) {
      memcpy(sw_frame->data[1] + h * sw_frame->linesize[1], srcUV + h * srcUVStride, width);
    }
  } else if (enc->sw_pix_fmt == AV_PIX_FMT_YUV420P) {
    // Y
    const guint8 *srcY = (const guint8 *)GST_VIDEO_FRAME_PLANE_DATA(&vframe, 0);
    int srcYStride = GST_VIDEO_FRAME_PLANE_STRIDE(&vframe, 0);
    for (int h = 0; h < height; ++h) {
      memcpy(sw_frame->data[0] + h * sw_frame->linesize[0], srcY + h * srcYStride, width);
    }
    // U
    const guint8 *srcU = (const guint8 *)GST_VIDEO_FRAME_PLANE_DATA(&vframe, 1);
    int srcUStride = GST_VIDEO_FRAME_PLANE_STRIDE(&vframe, 1);
    for (int h = 0; h < height / 2; ++h) {
      memcpy(sw_frame->data[1] + h * sw_frame->linesize[1], srcU + h * srcUStride, width / 2);
    }
    // V
    const guint8 *srcV = (const guint8 *)GST_VIDEO_FRAME_PLANE_DATA(&vframe, 2);
    int srcVStride = GST_VIDEO_FRAME_PLANE_STRIDE(&vframe, 2);
    for (int h = 0; h < height / 2; ++h) {
      memcpy(sw_frame->data[2] + h * sw_frame->linesize[2], srcV + h * srcVStride, width / 2);
    }
  } else {
    // Fallback: tightly packed copy
    memcpy(sw_frame->data[0], map.data, map.size);
  }
  gst_video_frame_unmap(&vframe);
  gst_buffer_unmap(frame->input_buffer, &map);
  
  // Use monotonic PTS based on frame count to ensure continuity
  // This avoids timestamp discontinuities that can cause frame repeats
  sw_frame->pts = enc->frame_count;
  
  // Force keyframe for first frame on the HW frame to ensure decoder starts cleanly
  
  // Allocate hardware frame
  hw_frame = av_frame_alloc();
  if (!hw_frame) {
    av_frame_free(&sw_frame);
    gst_video_codec_frame_unref(frame);
    return GST_FLOW_ERROR;
  }
  
  ret = av_hwframe_get_buffer(enc->avctx->hw_frames_ctx, hw_frame, 0);
  if (ret < 0) {
    GST_ERROR_OBJECT(enc, "av_hwframe_get_buffer failed: %d at frame %ld (pool exhausted?)", ret, enc->frame_count);
    av_frame_free(&sw_frame);
    av_frame_free(&hw_frame);
    gst_video_codec_frame_unref(frame);
    enc->frame_count++;
    return GST_FLOW_ERROR;
  }
  
  // Verify hw_frames_ctx is set (from encode_func.cpp pattern)
  if (!hw_frame->hw_frames_ctx) {
    GST_ERROR_OBJECT(enc, "hw_frame->hw_frames_ctx is NULL at frame %ld", enc->frame_count);
    av_frame_free(&sw_frame);
    av_frame_free(&hw_frame);
    gst_video_codec_frame_unref(frame);
    enc->frame_count++;
    return GST_FLOW_ERROR;
  }
  
  // Transfer data to hardware frame
  ret = av_hwframe_transfer_data(hw_frame, sw_frame, 0);
  if (ret < 0) {
    GST_ERROR_OBJECT(enc, "av_hwframe_transfer_data failed: %d at frame %ld", ret, enc->frame_count);
    av_frame_free(&sw_frame);
    av_frame_free(&hw_frame);
    gst_video_codec_frame_unref(frame);
    enc->frame_count++;
    return GST_FLOW_ERROR;
  }
  
  // Copy PTS to hardware frame (must do this after transfer)
  hw_frame->pts = sw_frame->pts;
  // Note: VSV encoder with gop_size=1 forces all frames to be IDR automatically
  
  // Free software frame immediately after transfer (matching encode_func.cpp pattern)
  av_frame_free(&sw_frame);
  
  // Send frame to encoder
  if (enc->frame_count % 25 == 0) {
    GST_INFO_OBJECT(enc, ">>> Sending frame %ld to encoder (PTS=%ld)", enc->frame_count, hw_frame->pts);
    hw_frame->key_frame = 1;
    hw_frame->pict_type = AV_PICTURE_TYPE_I;
  }

  ret = avcodec_send_frame(enc->avctx, hw_frame);
  if (ret < 0) {
    if (ret == AVERROR(EAGAIN)) {
      GST_WARNING_OBJECT(enc, "!!! avcodec_send_frame returned EAGAIN at frame %ld (encoder full?)", enc->frame_count);
      // Encoder is full, need to drain first
      // This should not happen with gop_size=1, but handle it anyway
    } else {
      GST_ERROR_OBJECT(enc, "!!! avcodec_send_frame failed: %d at frame %ld", ret, enc->frame_count);
    }
    av_frame_free(&hw_frame);
    gst_video_codec_frame_unref(frame);
    enc->frame_count++;
    return GST_FLOW_ERROR;
  }
  
  // Add current frame to pending queue BEFORE receive_packet
  // This way we can match outputs to inputs correctly
  gst_video_codec_frame_ref(frame);  // Increase ref count for queue
  g_queue_push_tail(enc->pending_frames, frame);
  
  // Receive packets - encoder may output 0, 1, or multiple packets
  if (enc->frame_count % 100 == 0) {
    GST_INFO_OBJECT(enc, ">>> Receiving packets, queue size: %d", g_queue_get_length(enc->pending_frames));
  }
  
  pkt = av_packet_alloc();
  int packet_count = 0;
  gboolean encoder_error = FALSE;
  
  while (1) {
    ret = avcodec_receive_packet(enc->avctx, pkt);
    if (ret == 0) {
      // Got a packet - pop the oldest frame from queue and finish it
      if (g_queue_is_empty(enc->pending_frames)) {
        GST_ERROR_OBJECT(enc, "Got packet but pending queue is empty! This should not happen.");
        av_packet_unref(pkt);
        encoder_error = TRUE;
        break;
      }
      
      GstVideoCodecFrame *pending_frame = (GstVideoCodecFrame*)g_queue_pop_head(enc->pending_frames);
      GstBuffer *out_buf = gst_buffer_new_allocate(NULL, pkt->size, NULL);
      gst_buffer_fill(out_buf, 0, pkt->data, pkt->size);
      
      pending_frame->output_buffer = out_buf;
      gst_video_encoder_finish_frame(encoder, pending_frame);
      
      packet_count++;
      GST_LOG_OBJECT(enc, "Output packet %d: %d bytes for pending frame", packet_count, pkt->size);
      av_packet_unref(pkt);
      continue;
    } else if (ret == AVERROR(EAGAIN)) {
      // No more packets available right now
      GST_LOG_OBJECT(enc, "EAGAIN after %d packets, %d frames still pending", 
                     packet_count, g_queue_get_length(enc->pending_frames));
      break;
    } else if (ret == AVERROR_EOF) {
      GST_DEBUG_OBJECT(enc, "EOF received");
      break;
    } else {
      GST_ERROR_OBJECT(enc, "avcodec_receive_packet error: %d", ret);
      encoder_error = TRUE;
      break;
    }
  }
  
  // Always free hw_frame
  av_frame_free(&hw_frame);
  av_packet_free(&pkt);
  
  if (encoder_error) {
    // Clear pending queue on error
    while (!g_queue_is_empty(enc->pending_frames)) {
      GstVideoCodecFrame *f = (GstVideoCodecFrame*)g_queue_pop_head(enc->pending_frames);
      gst_video_codec_frame_unref(f);
    }
    return GST_FLOW_ERROR;
  }
  
  // Log status
  if (packet_count == 0 && enc->frame_count < 5) {
    GST_DEBUG_OBJECT(enc, "Frame %ld: no output (initial buffering)", enc->frame_count);
  } else if (packet_count > 1) {
    GST_DEBUG_OBJECT(enc, "Frame %ld triggered %d outputs (catching up)", enc->frame_count, packet_count);
  }
  
  // Health check
  if (enc->frame_count > 0 && enc->frame_count % 300 == 0) {
    GST_INFO_OBJECT(enc, "Health check: frame %ld, pending queue: %d", 
                    enc->frame_count, g_queue_get_length(enc->pending_frames));
  }
  
  // Always increment frame_count
  enc->frame_count++;
  
  return GST_FLOW_OK;
}

static GstFlowReturn
gst_enriginenc_finish(GstVideoEncoder *encoder)
{
  GstEnriginEnc *enc = GST_ENRIGINENC(encoder);
  AVPacket *pkt = NULL;
  int ret;
  
  if (!enc->avctx) return GST_FLOW_OK;
  
  GST_INFO_OBJECT(enc, "Flushing encoder, %d frames pending", g_queue_get_length(enc->pending_frames));
  
  // Flush encoder
  avcodec_send_frame(enc->avctx, NULL);
  
  // Receive remaining packets and match with pending frames
  pkt = av_packet_alloc();
  while ((ret = avcodec_receive_packet(enc->avctx, pkt)) == 0) {
    if (!g_queue_is_empty(enc->pending_frames)) {
      GstVideoCodecFrame *pending_frame = (GstVideoCodecFrame*)g_queue_pop_head(enc->pending_frames);
      GstBuffer *out_buf = gst_buffer_new_allocate(NULL, pkt->size, NULL);
      gst_buffer_fill(out_buf, 0, pkt->data, pkt->size);
      
      pending_frame->output_buffer = out_buf;
      gst_video_encoder_finish_frame(encoder, pending_frame);
      GST_DEBUG_OBJECT(enc, "Flushed packet: %d bytes", pkt->size);
    }
    av_packet_unref(pkt);
  }
  av_packet_free(&pkt);
  
  // If there are still pending frames, drop them
  if (!g_queue_is_empty(enc->pending_frames)) {
    GST_WARNING_OBJECT(enc, "%d frames remain in queue after flush (encoder delay)", 
                       g_queue_get_length(enc->pending_frames));
    while (!g_queue_is_empty(enc->pending_frames)) {
      GstVideoCodecFrame *f = (GstVideoCodecFrame*)g_queue_pop_head(enc->pending_frames);
      gst_video_codec_frame_unref(f);
    }
  }
  
  return GST_FLOW_OK;
}

static gboolean
plugin_init(GstPlugin *plugin)
{
  return gst_element_register(plugin, "enriginenc", GST_RANK_PRIMARY, GST_TYPE_ENRIGINENC);
}

GST_PLUGIN_DEFINE(
    GST_VERSION_MAJOR,
    GST_VERSION_MINOR,
    enriginenc,
    "Enrigin Video Encoder (FFmpeg Direct)",
    plugin_init,
    "1.0",
    "LGPL",
    "GStreamer",
    "http://gstreamer.net/"
)
