
#include "include/audio_codec.h"

#include <algorithm>
namespace codec {
bool AudioCodec::init(AVCodecParameters *codec_params) {
  if (!codec_) {
    codec_ = avcodec_find_decoder(codec_params->codec_id);
  }

  if (!codec_) return false;

  codec_ctx_ = avcodec_alloc_context3(codec_);
  if (avcodec_parameters_to_context(codec_ctx_, codec_params) < 0) {
    return false;
  }
  return (avcodec_open2(codec_ctx_, codec_, nullptr) >= 0);
}
aaudio_data_callback_result_t AudioCodec::audioCallback(AAudioStream *stream,
                                                        void *userData,
                                                        void *audioData,
                                                        int32_t numFrames) {
  auto *buffer = static_cast<AudioBuffer *>(userData);
  float *output = static_cast<float *>(audioData);
  const int channels = AAudioStream_getChannelCount(stream);
  const int totalSamples = numFrames * channels;

  size_t read = buffer->read(output, totalSamples);

  // 处理数据不足
  if (read < totalSamples) {
    memset(output + read, 0, (totalSamples - read) * sizeof(float));

    // 检查是否解码完成且无数据
    if (read == 0) {
      return AAUDIO_CALLBACK_RESULT_STOP;
    }
  }
  return AAUDIO_CALLBACK_RESULT_CONTINUE;
}

void AudioCodec::close() {
  // 第一步：优先停止流并等待回调结束
  if (stream) {
    AAudioStream_close(stream);  // 阻塞等待所有回调退出
    stream = nullptr;            // 立即置空防止重复关闭
  }

  if (swrCtx_) {
    swr_free(&swrCtx_);  // 正确释放SwrContext
    swrCtx_ = nullptr;
  }
  if (codec_ctx_) {
    avcodec_free_context(&codec_ctx_);
    codec_ctx_ = nullptr;
  }

  codec_ = nullptr;
}
bool AudioCodec::initAAudio() {
  AVChannelLayout out_ch_layout = AV_CHANNEL_LAYOUT_STEREO;  // 输出声道布局
  swr_alloc_set_opts2(&swrCtx_,
                      &out_ch_layout,  // 输出布局
                      AV_SAMPLE_FMT_FLT,
                      codec_ctx_->sample_rate,  // 输出采样率
                      &codec_ctx_->ch_layout,   // 输入布局
                      codec_ctx_->sample_fmt,   // 输入格式
                      codec_ctx_->sample_rate,  // 输入采样率
                      0, nullptr);
  swr_init(swrCtx_);
  out_channels_ = out_ch_layout.nb_channels;
  AAudioStreamBuilder *builder;
  AAudio_createStreamBuilder(&builder);
  AAudioStreamBuilder_setDirection(builder, AAUDIO_DIRECTION_OUTPUT);
  AAudioStreamBuilder_setSampleRate(
      builder, codec_ctx_->sample_rate);  // 使用解码器采样率
  AAudioStreamBuilder_setChannelCount(builder, out_channels_);  // 动态声道数
  AAudioStreamBuilder_setFormat(builder, AAUDIO_FORMAT_PCM_FLOAT);  //
  AAudioStreamBuilder_setPerformanceMode(builder,
                                         AAUDIO_PERFORMANCE_MODE_LOW_LATENCY);
  AAudioStreamBuilder_setDataCallback(builder, audioCallback, &pcm_buffer);

  // 启动音频流
  aaudio_result_t result = AAudioStreamBuilder_openStream(builder, &stream);
  if (result != AAUDIO_OK) {
    // 错误处理
    return false;
  }
  AAudioStream_requestStart(stream);
  return true;
}

// 高精度单调时钟获取
int64_t getMonotonicTime() {
  struct timespec ts {};
  clock_gettime(CLOCK_MONOTONIC, &ts);
  return ts.tv_sec * 1000000000LL + ts.tv_nsec;
}

// int64_t AudioCodec::getAudioPosition() {
//   int64_t framePos, timeNs;
//   aaudio_result_t result =
//       AAudioStream_getTimestamp(stream, CLOCK_MONOTONIC, &framePos, &timeNs);

//   if (result != AAUDIO_OK) return -1;

//   const int64_t nowNs = getMonotonicTime();
//   int64_t elapsedNs = nowNs - timeNs;

//   const int32_t sample_rate = AAudioStream_getSampleRate(stream);

//   // 抗溢出计算：分步执行除法
//   const int64_t elapsedMs = elapsedNs / 1000000;  // 纳秒→毫秒
//   const int64_t elapsedFrames = (elapsedMs * sample_rate) / 1000;
//   double framePostime = static_cast<double>(framePos) / getSampleRate();
//   double elapsedFramestime =
//       static_cast<double>(elapsedFrames) / getSampleRate();

//   __android_log_print(ANDROID_LOG_ERROR, "fhq1",
//                       "framePostime:%f：elapsedFrames：%f", framePostime,
//                       elapsedFramestime);
//   return framePos + elapsedFrames;
//}

void AudioCodec::setTarget(double video_s) {
  const int32_t sample_rate = AAudioStream_getSampleRate(stream);
  mBaseFrameOffset = static_cast<int64_t>(video_s * sample_rate);
  __android_log_print(ANDROID_LOG_ERROR, "fhq1", "seek_target_s：%lf：",
                      video_s);
  // 2. 记录当前累计帧位置作为跳转基准点
  int64_t curFramePos, timeNs;
  AAudioStream_getTimestamp(stream, CLOCK_MONOTONIC, &curFramePos, &timeNs);
  mSeekStartFramePos = curFramePos;  // 保存跳转起始点
}

int64_t AudioCodec::getAudioPosition() {
  int64_t framePos, timeNs;
  aaudio_result_t result =
      AAudioStream_getTimestamp(stream, CLOCK_MONOTONIC, &framePos, &timeNs);

  if (result == AAUDIO_OK) {
    // 3. 计算跳转后相对位置 = 目标偏移量 + (当前累计帧 - 跳转起始帧)
    return mBaseFrameOffset + (framePos - mSeekStartFramePos);
  }
  return -1;  // 错误处理
}

bool AudioCodec::sendPacket(AVPacket *packet) {
  // 检查解码器状态
  if (!codec_ctx_) {
    return false;
  }

  int ret = avcodec_send_packet(codec_ctx_, packet);

  // 处理不同返回状态
  switch (ret) {
    case 0:  // 成功发送数据包
      return true;

    case AVERROR(EAGAIN):
      // 输入缓冲区已满，需要先接收解码帧
      __android_log_print(ANDROID_LOG_DEBUG, "Decoder",
                          "Input buffer full, need to receive frames first");
      return false;

    case AVERROR_EOF:
      __android_log_print(ANDROID_LOG_WARN, "Decoder",
                          "Decoder has already been flushed");
      return false;

    case AVERROR(EINVAL):
      __android_log_print(ANDROID_LOG_ERROR, "Decoder",
                          "Invalid argument or decoder not opened");
      return false;

    case AVERROR(ENOMEM):
      __android_log_print(
          ANDROID_LOG_ERROR, "Decoder",
          "Failed to add packet to internal queue (out of memory)");
      return false;

    default:
      char errbuf[AV_ERROR_MAX_STRING_SIZE];
      av_strerror(ret, errbuf, sizeof(errbuf));
      __android_log_print(ANDROID_LOG_ERROR, "Decoder",
                          "Failed to send packet: %s (code %d)", errbuf, ret);
      return false;
  }
}

void AudioCodec::swrController(AVFrame *frame) {
  int outSamples = swr_get_out_samples(swrCtx_, frame->nb_samples);
  if (outSamples < 0) {
    return;
  }

  // 使用 av_samples_alloc 分配单缓冲区
  uint8_t *audioBuf = nullptr;  // 单缓冲区指针
  int linesize;
  int alloc_ret = av_samples_alloc(&audioBuf,      // 输出缓冲区指针
                                   &linesize,      // 输出行大小
                                   out_channels_,  // 输出声道数
                                   outSamples,     // 每通道样本数
                                   AV_SAMPLE_FMT_FLT,
                                   0  // 对齐方式(0=默认)
  );

  if (alloc_ret < 0) {
    return;
  }

  // 创建指针数组用于swr_convert（交错格式只需第一个指针）
  uint8_t *audioData[1] = {audioBuf};  // 单元素指针数组

  // 执行重采样转换
  int realSamples = swr_convert(swrCtx_,
                                audioData,   // 输出缓冲区数组
                                outSamples,  // 输出容量
                                (const uint8_t **)frame->data,  // 输入数据
                                frame->nb_samples);

  if (realSamples < 0) {
    av_freep(&audioBuf);  // 安全释放内存
  }
  const float *floatData = reinterpret_cast<float *>(audioBuf);
  int total_samples = realSamples * out_channels_;
  pcm_buffer.write(floatData, total_samples);
}

AVFrame *AudioCodec::receiveFrame() {
  AVFrame *frame = av_frame_alloc();
  if (avcodec_receive_frame(codec_ctx_, frame) == 0) {
    return frame;
  }
  av_frame_free(&frame);
  return nullptr;
}
}  // namespace codec