/*
 *  Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
 *
 *  Use of this source code is governed by a BSD-style license
 *  that can be found in the LICENSE file in the root of the source
 *  tree. An additional intellectual property rights grant can be found
 *  in the file PATENTS.  All contributing project authors may
 *  be found in the AUTHORS file in the root of the source tree.
 */

#include "modules/audio_device/ohos/ohaudio_player_wrapper.h"

#include "rtc_base/logging.h"
#include "rtc_base/strings/string_builder.h"
#include "rtc_base/time_utils.h"

#define LOG_ON_ERROR(op)                                                      \
  do {                                                                        \
    OH_AudioStream_Result result = (op);                                            \
    if (result != AUDIOSTREAM_SUCCESS) {                                                \
      RTC_LOG(LS_ERROR) << #op << ": " << "NOK"; \
    }                                                                         \
  } while (0)

#define RETURN_ON_ERROR(op, ...)                                              \
  do {                                                                        \
    OH_AudioStream_Result result = (op);                                            \
    if (result != AUDIOSTREAM_SUCCESS) {                                                \
      RTC_LOG(LS_ERROR) << #op << ": " << "NOK"; \
      return __VA_ARGS__;                                                     \
    }                                                                         \
  } while (0)

namespace webrtc {

namespace {

const char* DirectionToString(OH_AudioStream_Type direction) {
  switch (direction) {
    case AUDIOSTREAM_TYPE_RENDERER:
      return "OUTPUT";
    case AUDIOSTREAM_TYPE_CAPTURER:
      return "INPUT";
    default:
      return "UNKNOWN";
  }
}

const char* StateToString(OH_AudioStream_State mode) {
  switch (mode) {
    case AUDIOSTREAM_STATE_INVALID:
      return "INVALID";
    case AUDIOSTREAM_STATE_PREPARED:
      return "PREPARED";
    case AUDIOSTREAM_STATE_RUNNING:
      return "RUNNING";
    case AUDIOSTREAM_STATE_STOPPED:
      return "STOPPED";
    case AUDIOSTREAM_STATE_RELEASED:
      return "RELEASED";
    case AUDIOSTREAM_STATE_PAUSED:
      return "PAUSED";
    default:
      return "UNKNOWN";
  }
}

const char* PerformanceModeToString(OH_AudioStream_LatencyMode mode) {
  switch (mode) {
    case AUDIOSTREAM_LATENCY_MODE_NORMAL:
      return "NORMAL";
    case AUDIOSTREAM_LATENCY_MODE_FAST:
      return "LOW_LATENCY";
    default:
      return "UNKNOWN";
  }
}

const char* FormatToString(OH_AudioStream_SampleFormat id) {
  switch (id) {
    case AUDIOSTREAM_SAMPLE_U8:
      return "u8";
    case AUDIOSTREAM_SAMPLE_S16LE:
      return "S16LE";
    case AUDIOSTREAM_SAMPLE_S24LE:
      return "S24LE";
    case AUDIOSTREAM_SAMPLE_S32LE:
      return "S32LE";
    default:
      return "UNKNOWN";
  }
}

static int32_t ErrorCallback(OH_AudioRenderer* stream,
                   void* user_data,
                   OH_AudioStream_Result error) {
  RTC_DCHECK(user_data);
  OHAudioPlayerWrapper* ohaudio_wrapper = reinterpret_cast<OHAudioPlayerWrapper*>(user_data);
  RTC_LOG(LS_WARNING) << "ErrorCallback: "
                   << DirectionToString(ohaudio_wrapper->direction());
  RTC_DCHECK(ohaudio_wrapper->observer());
  return ohaudio_wrapper->observer()->OnErrorCallback(error);
}

static int32_t DataCallback(OH_AudioRenderer* stream,
                                           void* user_data,
                                           void* audio_data,
                                           int32_t bufferLen) {
  RTC_DCHECK(user_data);
  RTC_DCHECK(audio_data);
  OHAudioPlayerWrapper* ohaudio_wrapper = reinterpret_cast<OHAudioPlayerWrapper*>(user_data);
  RTC_DCHECK(ohaudio_wrapper->observer());
  return ohaudio_wrapper->observer()->OnDataCallback(audio_data, bufferLen);
}

// Wraps the stream builder object to ensure that it is released properly when
// the stream builder goes out of scope.
class ScopedStreamBuilder {
 public:
  ScopedStreamBuilder(OH_AudioStream_Type streamType) {
    LOG_ON_ERROR(OH_AudioStreamBuilder_Create(&builder_, streamType));
    RTC_DCHECK(builder_);
  }
  ~ScopedStreamBuilder() {
    if (builder_) {
      LOG_ON_ERROR(OH_AudioStreamBuilder_Destroy(builder_));
    }
  }

  OH_AudioStreamBuilder* get() const { return builder_; }

 private:
  OH_AudioStreamBuilder* builder_ = nullptr;
};

}  // namespace

OHAudioPlayerWrapper::OHAudioPlayerWrapper(const AudioParameters& audio_parameters,
                             OH_AudioStream_Type direction,
                             OHAudioPlayerObserverInterface* observer)
    : audio_parameters_(audio_parameters), direction_(direction), observer_(observer) {
  RTC_LOG(LS_INFO) << "ctor";
  RTC_DCHECK(observer_);
  // ohaudio_thread_checker_.DetachFromThread();
  RTC_LOG(LS_INFO) << audio_parameters_.ToString();
}

OHAudioPlayerWrapper::~OHAudioPlayerWrapper() {
  RTC_LOG(LS_INFO) << "dtor";
  // RTC_DCHECK(thread_checker_.CalledOnValidThread());
  RTC_DCHECK(!stream_);
}

bool OHAudioPlayerWrapper::Init() {
  RTC_LOG(LS_INFO) << "Init";
  // RTC_DCHECK(thread_checker_.CalledOnValidThread());
  // Creates a stream builder which can be used to open an audio stream.
  ScopedStreamBuilder builder(AUDIOSTREAM_TYPE_RENDERER);
  // Configures the stream builder using audio parameters given at construction.
  SetStreamConfiguration(builder.get());
  // Opens a stream based on options in the stream builder.
  if (!OpenStream(builder.get())) {
    return false;
  }
  // Ensures that the opened stream could activate the requested settings.
  if (!VerifyStreamConfiguration()) {
    return false;
  }
  // Optimizes the buffer scheme for lowest possible latency and creates
  // additional buffer logic to match the 10ms buffer size used in WebRTC.
  if (!OptimizeBuffers()) {
    return false;
  }
  LogStreamState();
  return true;
}

bool OHAudioPlayerWrapper::Start() {
  RTC_LOG(LS_INFO) << "Start";
  // RTC_DCHECK(thread_checker_.CalledOnValidThread());
  // TODO(henrika): this state check might not be needed.
  OH_AudioStream_State current_state;
  OH_AudioRenderer_GetCurrentState(stream_, &current_state);
  if (current_state != AUDIOSTREAM_STATE_PREPARED) {
    RTC_LOG(LS_ERROR) << "Invalid state: "
                      << StateToString(current_state);
    return false;
  }
  // Asynchronous request for the stream to start.
  RETURN_ON_ERROR(OH_AudioRenderer_Start(stream_), false);
  LogStreamState();
  return true;
}

bool OHAudioPlayerWrapper::Stop() {
  RTC_LOG(LS_INFO) << "Stop: " << DirectionToString(direction());
  // RTC_DCHECK(thread_checker_.CalledOnValidThread());
  // Asynchronous request for the stream to stop.
  RETURN_ON_ERROR(OH_AudioRenderer_Stop(stream_), false);
  CloseStream();
  // ohaudio_thread_checker_.DetachFromThread();
  return true;
}

double OHAudioPlayerWrapper::EstimateLatencyMillis() const {
  RTC_DCHECK(stream_);
  double latency_millis = 0.0;
  if (direction() == AUDIOSTREAM_TYPE_CAPTURER) {
    // For input streams. Best guess we can do is to use the current burst size
    // as delay estimate.
    latency_millis = static_cast<double>(frames_per_burst()) / sample_rate() *
                     rtc::kNumMillisecsPerSec;
  } else {
    int64_t existing_frame_index;
    int64_t existing_frame_presentation_time;
    // Get the time at which a particular frame was presented to audio hardware.
    OH_AudioStream_Result result = OH_AudioRenderer_GetTimestamp(
        stream_, CLOCK_MONOTONIC, &existing_frame_index,
        &existing_frame_presentation_time);
    // Results are only valid when the stream is in AAUDIO_STREAM_STATE_STARTED.
    if (result == AUDIOSTREAM_SUCCESS) {
      // Get write index for next audio frame.
      int64_t next_frame_index = frames_written();
      // Number of frames between next frame and the existing frame.
      int64_t frame_index_delta = next_frame_index - existing_frame_index;
      // Assume the next frame will be written now.
      int64_t next_frame_write_time = rtc::TimeNanos();
      // Calculate time when next frame will be presented to the hardware taking
      // sample rate into account.
      int64_t frame_time_delta =
          (frame_index_delta * rtc::kNumNanosecsPerSec) / sample_rate();
      int64_t next_frame_presentation_time =
          existing_frame_presentation_time + frame_time_delta;
      // Derive a latency estimate given results above.
      latency_millis = static_cast<double>(next_frame_presentation_time -
                                           next_frame_write_time) /
                       rtc::kNumNanosecsPerMillisec;
    }
  }
  return latency_millis;
}

// Returns new buffer size or a negative error value if buffer size could not
// be increased.
bool OHAudioPlayerWrapper::IncreaseOutputBufferSize() {
  RTC_LOG(LS_INFO) << "IncreaseBufferSize";
  RTC_DCHECK(stream_);
  // RTC_DCHECK(ohaudio_thread_checker_.CalledOnValidThread());
  RTC_DCHECK_EQ(direction(), AUDIOSTREAM_TYPE_RENDERER);
  // OH_AudioStream_Result buffer_size = AAudioStream_getBufferSizeInFrames(stream_);
  // // Try to increase size of buffer with one burst to reduce risk of underrun.
  // buffer_size += frames_per_burst();
  // // Verify that the new buffer size is not larger than max capacity.
  // // TODO(henrika): keep track of case when we reach the capacity limit.
  // const int32_t max_buffer_size = buffer_capacity_in_frames();
  // if (buffer_size > max_buffer_size) {
  //   RTC_LOG(LS_ERROR) << "Required buffer size (" << buffer_size
  //                     << ") is higher than max: " << max_buffer_size;
  //   return false;
  // }
  // RTC_LOG(LS_INFO) << "Updating buffer size to: " << buffer_size
  //               << " (max=" << max_buffer_size << ")";
  // buffer_size = AAudioStream_setBufferSizeInFrames(stream_, buffer_size);
  // if (buffer_size < 0) {
  //   RTC_LOG(LS_ERROR) << "Failed to change buffer size: "
  //                     << AAudio_convertResultToText(buffer_size);
  //   return false;
  // }
  // RTC_LOG(LS_INFO) << "Buffer size changed to: " << buffer_size;
  return true;
}

void OHAudioPlayerWrapper::ClearInputStream(void* audio_data, int32_t num_frames) {
  RTC_LOG(LS_INFO) << "ClearInputStream";
  RTC_DCHECK(stream_);
  // RTC_DCHECK(ohaudio_thread_checker_.CalledOnValidThread());
  RTC_DCHECK_EQ(direction(), AUDIOSTREAM_TYPE_CAPTURER);
  // OH_AudioStream_Result cleared_frames = 0;
  // do {
  //   cleared_frames = AAudioStream_read(stream_, audio_data, num_frames, 0);
  // } while (cleared_frames > 0);
}

OHAudioPlayerObserverInterface* OHAudioPlayerWrapper::observer() const {
  return observer_;
}

AudioParameters OHAudioPlayerWrapper::audio_parameters() const {
  return audio_parameters_;
}

int32_t OHAudioPlayerWrapper::samples_per_frame() const {
  RTC_DCHECK(stream_);
  int32_t channelCount;
  OH_AudioRenderer_GetChannelCount(stream_, &channelCount);
  return channelCount;
}

// int32_t OHAudioPlayerWrapper::buffer_size_in_frames() const {
//   RTC_DCHECK(stream_);
//   return AAudioStream_getBufferSizeInFrames(stream_);
// }

// int32_t OHAudioPlayerWrapper::buffer_capacity_in_frames() const {
//   RTC_DCHECK(stream_);
//   return AAudioStream_getBufferCapacityInFrames(stream_);
// }

int32_t OHAudioPlayerWrapper::device_id() const {
  RTC_DCHECK(stream_);
  return 0;
}

int32_t OHAudioPlayerWrapper::xrun_count() const {
  RTC_DCHECK(stream_);
  return 0;
}

OH_AudioStream_SampleFormat OHAudioPlayerWrapper::format() const {
  RTC_DCHECK(stream_);
  OH_AudioStream_SampleFormat sampleFormat;
  OH_AudioRenderer_GetSampleFormat(stream_, &sampleFormat);
  return sampleFormat;
}

int32_t OHAudioPlayerWrapper::sample_rate() const {
  RTC_DCHECK(stream_);
  int32_t rate;
  OH_AudioRenderer_GetSamplingRate(stream_, &rate);
  return rate;
}

int32_t OHAudioPlayerWrapper::channel_count() const {
  RTC_DCHECK(stream_);
  int32_t channelCount;
  OH_AudioRenderer_GetChannelCount(stream_, &channelCount);
  return channelCount;
}

// int32_t OHAudioPlayerWrapper::frames_per_callback() const {
//   RTC_DCHECK(stream_);
//   return AAudioStream_getFramesPerDataCallback(stream_);
// }

// OH_AudioStream_State OHAudioPlayerWrapper::sharing_mode() const {
//   RTC_DCHECK(stream_);
//   return AAudioStream_getSharingMode(stream_);
// }

OH_AudioStream_LatencyMode OHAudioPlayerWrapper::performance_mode() const {
  RTC_DCHECK(stream_);
  OH_AudioStream_LatencyMode latencyMode;
  OH_AudioRenderer_GetLatencyMode(stream_, &latencyMode);
  return latencyMode;
}

OH_AudioStream_State OHAudioPlayerWrapper::stream_state() const {
  RTC_DCHECK(stream_);
  OH_AudioStream_State current_state;
  OH_AudioRenderer_GetCurrentState(stream_, &current_state);
  return current_state;
}

int64_t OHAudioPlayerWrapper::frames_written() const {
  RTC_DCHECK(stream_);
  int64_t frames;
  OH_AudioRenderer_GetFramesWritten(stream_, &frames);
  return frames;
}

int64_t OHAudioPlayerWrapper::frames_read() const {
  RTC_DCHECK(stream_);
  return 0;
}

void OHAudioPlayerWrapper::SetStreamConfiguration(OH_AudioStreamBuilder* builder) {
  RTC_LOG(LS_INFO) << "SetStreamConfiguration";
  RTC_DCHECK(builder);
  // RTC_DCHECK(thread_checker_.CalledOnValidThread());
  // Use preferred sample rate given by the audio parameters.
  OH_AudioStreamBuilder_SetSamplingRate(builder, audio_parameters().sample_rate());
  // Use preferred channel configuration given by the audio parameters.
  OH_AudioStreamBuilder_SetChannelCount(builder, audio_parameters().channels());
  // Always use 16-bit PCM audio sample format.
  OH_AudioStreamBuilder_SetSampleFormat(builder, AUDIOSTREAM_SAMPLE_S16LE);
  // OH_AudioStreamBuilder_SetLatencyMode(builder, AUDIOSTREAM_LATENCY_MODE_FAST);
  OH_AudioStreamBuilder_SetLatencyMode(builder, AUDIOSTREAM_LATENCY_MODE_NORMAL);
  // set usage, MUSIC、COMMUNICATION、ALARM etc.
  OH_AudioStreamBuilder_SetRendererInfo(builder, AUDIOSTREAM_USAGE_VOICE_COMMUNICATION);
  OH_AudioRenderer_Callbacks callbacks;
  callbacks.OH_AudioRenderer_OnWriteData = DataCallback;
  callbacks.OH_AudioRenderer_OnStreamEvent = nullptr; // Each callback must be set, otherwise it crashes in Bluetooth mode
  callbacks.OH_AudioRenderer_OnInterruptEvent = nullptr;
  callbacks.OH_AudioRenderer_OnError = ErrorCallback;
  OH_AudioStreamBuilder_SetRendererCallback(builder, callbacks, this);
}

bool OHAudioPlayerWrapper::OpenStream(OH_AudioStreamBuilder* builder) {
  RTC_LOG(LS_INFO) << "OpenStream";
  RTC_DCHECK(builder);
  OH_AudioRenderer* stream = nullptr;
  RETURN_ON_ERROR(OH_AudioStreamBuilder_GenerateRenderer(builder, &stream), false);
  stream_ = stream;
  LogStreamConfiguration();
  return true;
}

void OHAudioPlayerWrapper::CloseStream() {
  RTC_LOG(LS_INFO) << "CloseStream";
  RTC_DCHECK(stream_);
  LOG_ON_ERROR(OH_AudioRenderer_Release(stream_));
  stream_ = nullptr;
}

void OHAudioPlayerWrapper::LogStreamConfiguration() {
  RTC_DCHECK(stream_);
  char ss_buf[1024];
  rtc::SimpleStringBuilder ss(ss_buf);
  ss << "Stream Configuration: ";
  ss << "sample rate=" << sample_rate() << ", channels=" << channel_count();
  // ss << ", samples per frame=" << samples_per_frame();
  ss << ", format=" << FormatToString(format());
  // ss << ", sharing mode=" << SharingModeToString(sharing_mode());
  ss << ", performance mode=" << PerformanceModeToString(performance_mode());
  ss << ", direction=" << DirectionToString(direction());
  // ss << ", device id=" << AAudioStream_getDeviceId(stream_);
  // ss << ", frames per callback=" << frames_per_callback();
  RTC_LOG(LS_INFO) << ss.str();
}

void OHAudioPlayerWrapper::LogStreamState() {
  RTC_LOG(LS_INFO) << "OHAudio stream state: "
                << StateToString(stream_state());
}

bool OHAudioPlayerWrapper::VerifyStreamConfiguration() {
  RTC_LOG(LS_INFO) << "VerifyStreamConfiguration";
  RTC_DCHECK(stream_);
  // TODO(henrika): should we verify device ID as well?
  int32_t rate;
  OH_AudioRenderer_GetSamplingRate(stream_, &rate);
  if (rate != audio_parameters().sample_rate()) {
    RTC_LOG(LS_ERROR) << "Stream unable to use requested sample rate";
    return false;
  }

  int32_t channelCount;
  OH_AudioRenderer_GetChannelCount(stream_, &channelCount);
  if (channelCount !=
      static_cast<int32_t>(audio_parameters().channels())) {
    RTC_LOG(LS_ERROR) << "Stream unable to use requested channel count";
    return false;
  }

  OH_AudioStream_SampleFormat sampleFormat;
  OH_AudioRenderer_GetSampleFormat(stream_, &sampleFormat);
  if (sampleFormat != AUDIOSTREAM_SAMPLE_S16LE) {
    RTC_LOG(LS_ERROR) << "Stream unable to use requested format";
    return false;
  }
  // if (AAudioStream_getSharingMode(stream_) != AAUDIO_SHARING_MODE_SHARED) {
  //   RTC_LOG(LS_ERROR) << "Stream unable to use requested sharing mode";
  //   return false;
  // }
  // if (AAudioStream_getPerformanceMode(stream_) !=
  //     AAUDIO_PERFORMANCE_MODE_LOW_LATENCY) {
  //   RTC_LOG(LS_ERROR) << "Stream unable to use requested performance mode";
  //   return false;
  // }
  // if (AAudioStream_getDirection(stream_) != direction()) {
  //   RTC_LOG(LS_ERROR) << "Stream direction could not be set";
  //   return false;
  // }
  // if (AAudioStream_getSamplesPerFrame(stream_) !=
  //     static_cast<int32_t>(audio_parameters().channels())) {
  //   RTC_LOG(LS_ERROR) << "Invalid number of samples per frame";
  //   return false;
  // }
  return true;
}

bool OHAudioPlayerWrapper::OptimizeBuffers() {
  RTC_LOG(LS_INFO) << "OptimizeBuffers";
  RTC_DCHECK(stream_);
  // Maximum number of frames that can be filled without blocking.
  // RTC_LOG(LS_INFO) << "max buffer capacity in frames: "
  //               << buffer_capacity_in_frames();
  // // Query the number of frames that the application should read or write at
  // // one time for optimal performance.
  // int32_t frames_per_burst = AAudioStream_getFramesPerBurst(stream_);
  // RTC_LOG(LS_INFO) << "frames per burst for optimal performance: "
  //               << frames_per_burst;
  // frames_per_burst_ = frames_per_burst;
  // if (direction() == AUDIOSTREAM_TYPE_CAPTURER) {
  //   // There is no point in calling setBufferSizeInFrames() for input streams
  //   // since it has no effect on the performance (latency in this case).
  //   return true;
  // }
  // // Set buffer size to same as burst size to guarantee lowest possible latency.
  // // This size might change for output streams if underruns are detected and
  // // automatic buffer adjustment is enabled.
  // AAudioStream_setBufferSizeInFrames(stream_, frames_per_burst);
  // int32_t buffer_size = AAudioStream_getBufferSizeInFrames(stream_);
  // if (buffer_size != frames_per_burst) {
  //   RTC_LOG(LS_ERROR) << "Failed to use optimal buffer burst size";
  //   return false;
  // }
  // // Maximum number of frames that can be filled without blocking.
  // RTC_LOG(LS_INFO) << "buffer burst size in frames: " << buffer_size;
  return true;
}

}  // namespace webrtc
