/* Copyright (c) Huawei Technologies Co., Ltd. 2021-2021. All rights reserved.
 * Description: convert ffmpeg codec id to caps.
 * Author: huchang
 * Created: 2021-08-20
 * Note:
 *  1. 因为调用audiotrack，因此每个jni调用前必须用AttachJEnvIfNeed获取jenv
 *  2. 优化点：新启一个线程attach到jvm，所有的jni在该线程里调用
 */

#ifdef __ANDROID__

#define LOG_TAG "AudioTrackProxy"

#include <fstream>
#include <map>
#include "audio_track_proxy.h"
#include "foundation/log.h"
extern JavaVM *jvm;
namespace {
using namespace OHOS::Media::Plugin;
constexpr int MAX_AUDIO_FRAME_SIZE = 192000;
JNIEnv * AttachJEnvIfNeed()
{
    JNIEnv * jniEnv = nullptr;
    if (jvm != nullptr) {
        jvm->GetEnv((void **)&jniEnv, JNI_VERSION_1_4);
        if (jniEnv == nullptr) {
            jvm->AttachCurrentThread(&jniEnv, nullptr);
        }
    }
    return jniEnv;
}
std::map<AudioSampleFormat, AVSampleFormat> reverseFormatMap = {
    {AudioSampleFormat::U8, AV_SAMPLE_FMT_U8, },
    {AudioSampleFormat::U8P, AV_SAMPLE_FMT_U8P,},
    {AudioSampleFormat::S16, AV_SAMPLE_FMT_S16,},
    {AudioSampleFormat::S16P, AV_SAMPLE_FMT_S16P},
    {AudioSampleFormat::S32, AV_SAMPLE_FMT_S32,},
    {AudioSampleFormat::S32P, AV_SAMPLE_FMT_S32P},
    {AudioSampleFormat::F32, AV_SAMPLE_FMT_FLT,},
    {AudioSampleFormat::F32P, AV_SAMPLE_FMT_FLTP},
    {AudioSampleFormat::F64, AV_SAMPLE_FMT_DBL,},
    {AudioSampleFormat::F64P, AV_SAMPLE_FMT_DBLP},
};

}

namespace OHOS {
namespace Media{
namespace Plugin{
AudioTrackProxy::AudioTrackProxy() : transformCache_(new uint8_t [(MAX_AUDIO_FRAME_SIZE * 3) / 2]())
{
    transformSize_ = (MAX_AUDIO_FRAME_SIZE * 3) / 2;
}

AudioTrackProxy::~AudioTrackProxy()
{
    Release();
}

bool AudioTrackProxy::Open(int32_t sampleRate, AudioSampleFormat audioFormat, int32_t channels,
        uint64_t channelMask, int32_t samples)
{
    auto jniEnv = AttachJEnvIfNeed();
    if (jniEnv == nullptr) {
        MEDIA_LOG_E("thread not attached to jvm");
        return false;
    }
    channels_= channels;
    samplesPerFrame_ = samples;

    AVSampleFormat outSampleFmt = AV_SAMPLE_FMT_S16;
    uint64_t outChannelLayout = AV_CH_LAYOUT_STEREO;
    int outChannels = av_get_channel_layout_nb_channels(outChannelLayout);
    avFrameSize_ = av_samples_get_buffer_size(nullptr, outChannels, samples, outSampleFmt, 1);
    SwrContext* swrContext = swr_alloc();
    if (swrContext == nullptr) {
        MEDIA_LOG_E("cannot allocate swr context");
        return false;
    }
    format_ = audioFormat;
    AVSampleFormat sampleFormat = reverseFormatMap[format_];
    swrContext = swr_alloc_set_opts(swrContext, outChannelLayout, outSampleFmt, sampleRate, channelMask, sampleFormat,sampleRate, 0, nullptr);
    if (swr_init(swrContext) != 0) {
        MEDIA_LOG_E("swr init error");
        return false;
    }
    swrCtx_ = std::shared_ptr<SwrContext>(swrContext, [](SwrContext* ptr) {
        if (ptr) {
            swr_free(&ptr);
        }
    });
    auto clazz = jniEnv->FindClass("android/media/AudioTrack");
    auto init = jniEnv->GetMethodID(clazz, "<init>", "(IIIIII)V");
    play_ = jniEnv->GetMethodID(clazz, "play", "()V");
    pause_ = jniEnv->GetMethodID(clazz, "pause", "()V");
    stop_ = jniEnv->GetMethodID(clazz, "stop", "()V");
    release_ = jniEnv->GetMethodID(clazz, "release", "()V");
    flush_ = jniEnv->GetMethodID(clazz, "flush", "()V");
    write_ = jniEnv->GetMethodID(clazz, "write", "(Ljava/nio/ByteBuffer;II)I");
    getPlayState_ = jniEnv->GetMethodID(clazz, "getPlayState", "()I");
    getState_ = jniEnv->GetMethodID(clazz, "getState", "()I");

    auto tmp = jniEnv->NewObject(clazz, init, 3, sampleRate, 12, 2, avFrameSize_ * 10, 1);
    if (jniEnv->ExceptionCheck() == JNI_TRUE) {
        MEDIA_LOG_D("EXCEPTION OCCUR");
        jniEnv->ExceptionDescribe();
    }
    audioTrack_ = jniEnv->NewGlobalRef(tmp);
    MEDIA_LOG_D("state %d", jniEnv->CallIntMethod(audioTrack_, getState_));
    MEDIA_LOG_D("play state %d", jniEnv->CallIntMethod(audioTrack_, getPlayState_));

    clazz = jniEnv->FindClass("java/nio/ByteBuffer");
    positionBuffer_ = jniEnv->GetMethodID(clazz, "position", "(I)Ljava/nio/Buffer;");
    tmp = jniEnv->NewDirectByteBuffer(transformCache_.get(), transformSize_);
    bufferCache_ = jniEnv->NewGlobalRef(tmp);
    MEDIA_LOG_D("prepare finished");
    return true;
}

bool AudioTrackProxy::Play()
{
    auto jniEnv = AttachJEnvIfNeed();
    if (jniEnv == nullptr) {
        MEDIA_LOG_E("thread not attached to jvm");
        return false;
    }
    MEDIA_LOG_D("Play");
    jniEnv->CallVoidMethod(audioTrack_, play_);
    return true;
}

bool AudioTrackProxy::Pause()
{
    auto jniEnv = AttachJEnvIfNeed();
    if (jniEnv == nullptr) {
        MEDIA_LOG_E("thread not attached to jvm");
        return false;
    }
    jniEnv->CallVoidMethod(audioTrack_, pause_);
    return true;
}

bool AudioTrackProxy::Stop()
{
    auto jniEnv = AttachJEnvIfNeed();
    if (jniEnv == nullptr) {
        MEDIA_LOG_E("thread not attached to jvm");
        return false;
    }
    MEDIA_LOG_D("stop called");
    jniEnv->CallVoidMethod(audioTrack_, stop_);
    return true;
}

bool AudioTrackProxy::Flush()
{
    auto jniEnv = AttachJEnvIfNeed();
    if (jniEnv == nullptr) {
        MEDIA_LOG_E("thread not attached to jvm");
        return false;
    }
    jniEnv->CallVoidMethod(audioTrack_, flush_);
    return true;
}

bool AudioTrackProxy::Write(uint8_t* data, size_t len)
{
    auto jniEnv = AttachJEnvIfNeed();
    if (jniEnv == nullptr) {
        MEDIA_LOG_E("thread not attached to jvm");
        return false;
    }
    MEDIA_LOG_D("Write called");
    auto dataPtr = transformCache_.get();
    std::vector<const uint8_t *> input(channels_);
    input[0] = data;
    if (av_sample_fmt_is_planar(reverseFormatMap[format_])) {
        size_t planeSize = len / channels_;
        for (auto i = 1; i < channels_; ++i) {
            input[i] = input[i - 1] + planeSize;
        }
    }
    swr_convert(swrCtx_.get(), &dataPtr, MAX_AUDIO_FRAME_SIZE, (const uint8_t**)input.data(), samplesPerFrame_);
    jniEnv->CallObjectMethod(bufferCache_, positionBuffer_, 0);
    jniEnv->CallVoidMethod(audioTrack_, play_);
    MEDIA_LOG_D("before write to audio track");
    jniEnv->CallIntMethod(audioTrack_, write_,bufferCache_, avFrameSize_, 0);
    return true;
}

bool AudioTrackProxy::Release()
{
    auto jniEnv = AttachJEnvIfNeed();
    if (jniEnv == nullptr) {
        MEDIA_LOG_E("thread not attached to jvm");
        return false;
    }
    jniEnv->CallVoidMethod(audioTrack_, release_);
    return true;
}

}
}
}
#endif