﻿// copyright (c) 2025 author:fio66<fio66@foxmail.com>. All rights reserved.
// date: 2025-08-21 14:33:32

#include "src/demuxer_impl.h"

#include <atomic>
#include <string_view>
#include <thread>

extern "C" {
#include <libavcodec/avcodec.h>
#include <libavutil/imgutils.h>
#include <libavutil/timestamp.h>
#include <libswscale/swscale.h>
};

#include "logger/logger.hpp"
#include "src/utils.h"

namespace fio66 {
namespace {
uint64_t video_frame_count = 0;
uint64_t audio_frame_count = 0;

bool OpenCodecCtx(int* stream_idx, AVCodecContext** dec_ctx,
                  AVFormatContext* fmt_ctx, AVMediaType type) {
  bool ret = false;
  do {
    int stream_index = av_find_best_stream(fmt_ctx, type, -1, -1, NULL, 0);
    if (stream_index < 0) {
      LOGE << "Could not find " << av_get_media_type_string(type) << " stream in input file";
      break;
    }
    AVStream* st = fmt_ctx->streams[stream_index];

    /* find decoder for the stream */
    const AVCodec* dec = avcodec_find_decoder(st->codecpar->codec_id);
    if (!dec) {
      LOGE << "Failed to find " << av_get_media_type_string(type) << " codec";
      break;
    }

    /* Allocate a codec context for the decoder */
    *dec_ctx = avcodec_alloc_context3(dec);
    if (!*dec_ctx) {
      LOGE << "Failed to allocate the " << av_get_media_type_string(type) << " codec context";
      avcodec_free_context(dec_ctx);
      break;
    }

    /* Copy codec parameters from input stream to output codec context */
    if (avcodec_parameters_to_context(*dec_ctx, st->codecpar) < 0) {
      LOGE << "Failed to copy " << av_get_media_type_string(type) << " codec parameters to decoder context";
      avcodec_free_context(dec_ctx);
      break;
    }

    if (type == AVMEDIA_TYPE_AUDIO) {
      // 设置音频解码参数
      (*dec_ctx)->request_sample_fmt = AV_SAMPLE_FMT_FLT; // 请求解码为AV_SAMPLE_FMT_S16格式
    }

    /* Init the decoders */
    if (avcodec_open2(*dec_ctx, dec, NULL) < 0) {
      LOGE << "Failed to open " << av_get_media_type_string(type) << " codec";
      avcodec_free_context(dec_ctx);
      break;
    }
    *stream_idx = stream_index;
    ret = true;
  } while (false);

  return ret;
}

AVFormatContext* CreateAVFormatContext(std::string_view url,
                                       AVDictionary** option) {
  AVFormatContext* ctx = avformat_alloc_context();
  if (!ctx) {
    return nullptr;
  }

  auto err = avformat_open_input(&ctx, url.data(), NULL, option);
  if (!AV_SUCCESS(err)) {
    avformat_free_context(ctx);
    LOGE << "Failed to open input file:" << utils::AVErr2Str(err);
    return nullptr;
  }
  err = avformat_find_stream_info(ctx, NULL);
  if (err < 0) {
    LOGE << "Failed to find stream info: " << utils::AVErr2Str(err);
    avformat_free_context(ctx);
    return nullptr;
  }
  return ctx;
}

bool GetVideoFrame(AVFrame* frame, DemuxerImpl::ImageData* img) {
  if (frame->width != img->width || frame->height != img->height ||
      frame->format != img->pix_fmt) {
    /* To handle this change, one could call av_image_alloc again and
     * decode the following frames into another rawvideo file. */
    LOGW << "Error: Width, height and pixel format have to be "
         << "constant in a rawvideo file, but the width, height or "
         << "pixel format of the input video changed:\n"
         << "old: width = " << img->width << ", height = " << img->height << ", format = " << av_get_pix_fmt_name(img->pix_fmt) << "\n"
         << "new: width = " << frame->width << ", height = " << frame->height << ", format = " << av_get_pix_fmt_name(static_cast<AVPixelFormat>(frame->format));
    return false;
  }
  LOGI << "video_frame n:" << video_frame_count++;
  /* copy decoded frame to destination buffer:
   * this is required since rawvideo expects non aligned data */
  av_image_copy(img->img_data, img->img_linesize,
                (const uint8_t**)(frame->data), frame->linesize, img->pix_fmt,
                img->width, img->height);
  return true;
}
}  // namespace

bool DemuxerImpl::Demuxing(std::string_view url, MediaDataCallback* callback) {
  bool ret = false;
  do {
    if (!callback) {
      break;
    }
    callback_ = callback;
    if (url.empty()) {
      break;
    }

    fmt_ctx_ = CreateAVFormatContext(url, NULL);
    if (!fmt_ctx_) {
      break;
    }
    if (OpenCodecCtx(&video_stream_idx_, &video_codec_context_, fmt_ctx_,
                     AVMEDIA_TYPE_VIDEO)) {
      video_stream_ = fmt_ctx_->streams[video_stream_idx_];
      /* allocate image where the decoded image will be put */
      img_.width = video_codec_context_->width;
      img_.height = video_codec_context_->height;
      img_.pix_fmt = video_codec_context_->pix_fmt;
      img_.img_buffersize =
          av_image_alloc(img_.img_data, img_.img_linesize, img_.width,
                         img_.height, img_.pix_fmt, 1);
      if (img_.img_buffersize < 0) {
        printf("Could not allocate raw video buffer\n");
        break;
      }
    }

    if (OpenCodecCtx(&audio_stream_idx_, &audio_codec_context_, fmt_ctx_,
                     AVMEDIA_TYPE_AUDIO)) {
      audio_stream_ = fmt_ctx_->streams[audio_stream_idx_];
    }
    if (!video_stream_ && !audio_stream_) {
      LOGE << "No valid stream find!";
      break;
    }
    work_thread_future_ = std::async(std::launch::async, &DemuxerImpl::WorkThread, this);
    ret = true;
    
  } while (false);
  if (!ret) {
    UninitFFMpeg();
  }
  return ret;
}

void DemuxerImpl::UninitFFMpeg() {
  avformat_free_context(fmt_ctx_);
  avcodec_free_context(&video_codec_context_);
  avcodec_free_context(&audio_codec_context_);
  // av_free(img_.img_data[0]);
  av_freep(&img_.img_data[0]);
}

bool DemuxerImpl::WorkThread() {
  bool ret = false;
  AVFrame* frame = NULL;
  AVPacket* pkt = NULL;

  do {
    frame = av_frame_alloc();
    if (!frame) {
      LOGE << "Could not allocate frame";
      break;
    }
    pkt = av_packet_alloc();
    if (!pkt) {
      LOGE << "Could not allocate packet";
      break;
    }
    ret = true;
    run_demuxing_ = true;
    video_frame_count = 0;
    audio_frame_count = 0;
    callback_->OnRecieveMediaData(MediaDataType::kMediaDataStart, nullptr, 0);
    /* read frames from the context */
    while (run_demuxing_) {
      // 检查是否处于暂停状态
      {
        std::unique_lock<std::mutex> lock(pause_mutex_);
        pause_cv_.wait(lock, [this] { return !pause_state_ || !run_demuxing_; });
      }
      // 如果在暂停期间已经退出，则直接跳出循环
      if (!run_demuxing_) {
        break;
      }
      // 检查是否需要执行seek操作
      if (need_seek_) {
        DoSeek();
      }
      // 读取一帧数据
      if (av_read_frame(fmt_ctx_, pkt) < 0) {
        break;
      }
    

      if (pkt->stream_index == video_stream_idx_) {
        ret &= DecodePacket(video_codec_context_, pkt, frame);
      } else if (pkt->stream_index == audio_stream_idx_) {
        ret &= DecodePacket(audio_codec_context_, pkt, frame);
      }

      av_packet_unref(pkt);
      if (!ret) {
        break;
      }
    }

  } while (false);
  callback_->OnRecieveMediaData(MediaDataType::kMediaDataEnd, nullptr, 0);
  av_frame_free(&frame);
  av_packet_free(&pkt);
  return ret;
}

void DemuxerImpl::PauseDemuxing() { 
  { 
    std::lock_guard<std::mutex> lock(pause_mutex_);
    pause_state_ = true;
  }
  LOGI << "Demuxing paused";
}

void DemuxerImpl::ResumeDemuxing() {
  { 
    std::lock_guard<std::mutex> lock(pause_mutex_);
    pause_state_ = false;
  }
  pause_cv_.notify_one();  // 通知等待的线程恢复工作
  LOGI << "Demuxing resumed";
}

void DemuxerImpl::QuitDemuxing() {
  LOGI << "Demuxing quit";
  run_demuxing_ = false;
  pause_cv_.notify_one();  // 通知等待的线程检查退出条件
}

// 进度控制实现
double DemuxerImpl::GetCurrentPosition() {
  if (!fmt_ctx_ || !video_stream_ && !audio_stream_) {
    return 0.0;
  }

  // 尝试使用视频帧的时间戳
  if (video_stream_ && last_video_pts_ != AV_NOPTS_VALUE) {
    return av_q2d(video_stream_->time_base) * last_video_pts_;
  }

  // 如果没有视频帧时间戳或没有视频流，则使用音频帧时间戳
  if (audio_stream_ && last_audio_pts_ != AV_NOPTS_VALUE) {
    return av_q2d(audio_stream_->time_base) * last_audio_pts_;
  }

  // 如果都没有有效时间戳，返回0
  return 0.0;
}

double DemuxerImpl::GetDuration() {
  if (!fmt_ctx_ || fmt_ctx_->duration == AV_NOPTS_VALUE) {
    return 0.0;
  }

  // 转换为秒
  return static_cast<double>(fmt_ctx_->duration) / AV_TIME_BASE;
}

std::optional<std::variant<AudioInfo, VideoInfo>> DemuxerImpl::GetMediaInfo(MediaType type) {
  switch (type) {
    case MediaType::kAudio: {
      if (!audio_codec_context_) {
        return std::nullopt;  // 没有音频编解码器上下文，返回空
      }
      
      AudioInfo audio_info{};
      audio_info.sample_rate = audio_codec_context_->sample_rate;
      audio_info.channels = audio_codec_context_->ch_layout.nb_channels;
      
      // 计算采样位数
      switch (audio_codec_context_->sample_fmt) {
        case AV_SAMPLE_FMT_U8:
        case AV_SAMPLE_FMT_U8P:
          audio_info.bit_depth = 8;
          break;
        case AV_SAMPLE_FMT_S16:
        case AV_SAMPLE_FMT_S16P:
          audio_info.bit_depth = 16;
          break;
        case AV_SAMPLE_FMT_S32:
        case AV_SAMPLE_FMT_S32P:
          audio_info.bit_depth = 32;
          break;
        case AV_SAMPLE_FMT_FLT:
        case AV_SAMPLE_FMT_FLTP:
          audio_info.bit_depth = 32; // Float32
          break;
        case AV_SAMPLE_FMT_DBL:
        case AV_SAMPLE_FMT_DBLP:
          audio_info.bit_depth = 64; // Float64
          break;
        default:
          audio_info.bit_depth = 0;
          break;
      }
      
      // 获取编解码器名称
      if (audio_codec_context_->codec && audio_codec_context_->codec->name) {
        audio_info.codec_name = audio_codec_context_->codec->name;
      }
      return audio_info;
    }
    case MediaType::kVideo: {
      if (!video_codec_context_) {
        return std::nullopt;  // 没有视频编解码器上下文，返回空
      }
      
      VideoInfo video_info{};
      video_info.width = video_codec_context_->width;
      video_info.height = video_codec_context_->height;
      
      // 计算帧率
      if (video_codec_context_->framerate.num > 0 && video_codec_context_->framerate.den > 0) {
        video_info.frame_rate = av_q2d(video_codec_context_->framerate);
      }
      
      // 获取编解码器名称
      if (video_codec_context_->codec && video_codec_context_->codec->name) {
        video_info.codec_name = video_codec_context_->codec->name;
      }
      return video_info;
    }
    default:
      // 默认返回空的optional
      return std::nullopt;
  }
}

bool DemuxerImpl::SeekTo(double position) {
  if (!fmt_ctx_ || position < 0.0) {
    LOGE << "Invalid seek position";
    return false;
  }

  // 设置seek标志和目标位置，实际操作将在workthread中执行
  target_seek_position_ = position;
  need_seek_ = true;

  LOGI << "Seek requested to " << position << " seconds, will be processed in work thread";
  return true;
}

// 执行实际的seek操作
void DemuxerImpl::DoSeek() {
  if (!need_seek_ || !fmt_ctx_) {
    return;
  }

  double position = target_seek_position_;
  need_seek_ = false;  // 重置标志

  // 转换为微秒
  int64_t timestamp = static_cast<int64_t>(position * AV_TIME_BASE);

  // 执行seek操作
  int ret = av_seek_frame(fmt_ctx_, -1, timestamp, AVSEEK_FLAG_BACKWARD);
  if (ret < 0) {
    LOGE << "Error seeking to position " << position << " seconds: " << utils::AVErr2Str(ret);
    return;
  }

  // 清空解码器缓冲区
  if (video_codec_context_) {
    avcodec_flush_buffers(video_codec_context_);
  }
  if (audio_codec_context_) {
    avcodec_flush_buffers(audio_codec_context_);
  }

  // 重置时间戳跟踪
  last_video_pts_ = AV_NOPTS_VALUE;
  last_audio_pts_ = AV_NOPTS_VALUE;

  LOGI << "Seek to " << position << " seconds successful";
}

bool DemuxerImpl::CallVideo(AVFrame* frame) {
  if (GetVideoFrame(frame, &img_)) {
    callback_->OnRecieveMediaData(MediaDataType::kVideoData, img_.img_data[0],
                                  img_.img_buffersize);

    // 更新视频帧时间戳
    if (frame->pts != AV_NOPTS_VALUE) {
      last_video_pts_ = frame->pts;
    }
    return true;
  }
  return false;
}

void DemuxerImpl::CallAudio(AVFrame* frame) {
  int unpadded_linesize =
    frame->nb_samples *
    av_get_bytes_per_sample(static_cast<AVSampleFormat>(frame->format)) *
    frame->ch_layout.nb_channels;
  if (callback_) {
    callback_->OnRecieveMediaData(MediaDataType::kAudioData,
      frame->extended_data[0], unpadded_linesize);
  }

  // 更新音频帧时间戳
  if (frame->pts != AV_NOPTS_VALUE) {
    last_audio_pts_ = frame->pts;
  }

  LOGI << "audio_frame n:" << audio_frame_count++
       << " nb_samples:" << frame->nb_samples
       << " pts:" << utils::AVTs2TimeStr(frame->pts, &audio_codec_context_->time_base);
}

bool DemuxerImpl::DecodePacket(AVCodecContext* codec_ctx, const AVPacket* pkt,
                               AVFrame* frame) {
  // submit the packet to the decoder
  int ret = avcodec_send_packet(codec_ctx, pkt);
  if (ret < 0) {
    LOGE << "Error submitting a packet for decoding (" << utils::AVErr2Str(ret) << ")";
    return false;
  }

  // get all the available frames from the decoder
  while (ret >= 0) {
    ret = avcodec_receive_frame(codec_ctx, frame);
    if (ret < 0) {
      // those two return values are special and mean there is no output
      // frame available, but there were no errors during decoding
      if (ret == AVERROR_EOF || ret == AVERROR(EAGAIN)) {
        return true;
      }
      LOGE << "Error during decoding (" << utils::AVErr2Str(ret) << ")";
      return false;
    }

    // write the frame data to output file
    if (codec_ctx->codec->type == AVMEDIA_TYPE_VIDEO) {
      ret = CallVideo(frame) ? 0 : -1;
    } else {
      CallAudio(frame);
    }
    av_frame_unref(frame);
    if (ret < 0) {
      return false;
    }
  }
  return true;
}
}  // namespace fio66
