#include "fstream"
#include "iostream"
#include "memory"
#include "mutex"
#include "napi/native_api.h"
#include "queue"
#include "string"

using namespace std;
#include <multimedia/native_audio_channel_layout.h>
#include <multimedia/player_framework/native_avbuffer.h>
#include <multimedia/player_framework/native_avcapability.h>
#include <multimedia/player_framework/native_avcodec_audiocodec.h>
#include <multimedia/player_framework/native_avcodec_base.h>
#include <multimedia/player_framework/native_avformat.h>

// 创建错误对象的辅助函数
static napi_value CreateError(napi_env env, const string &message) {
    napi_value error_message;
    napi_create_string_utf8(env, message.c_str(), NAPI_AUTO_LENGTH, &error_message);

    napi_value error;
    napi_create_error(env, nullptr, error_message, &error);

    return error;
}

// 初始化队列
class AEncBufferSignal {
public:
    std::mutex inMutex_;
    std::mutex outMutex_;
    std::mutex startMutex_;
    std::condition_variable inCond_;
    std::condition_variable outCond_;
    std::condition_variable startCond_;
    std::queue<uint32_t> inQueue_;
    std::queue<uint32_t> outQueue_;
    std::queue<OH_AVBuffer *> inBufferQueue_;
    std::queue<OH_AVBuffer *> outBufferQueue_;
};

AEncBufferSignal *signal_;

// OH_AVCodecOnError回调函数的实现
static void OnError(OH_AVCodec *codec, int32_t errorCode, void *userData) {
    (void)codec;
    (void)errorCode;
    (void)userData;
}
// OH_AVCodecOnStreamChanged回调函数的实现
static void OnOutputFormatChanged(OH_AVCodec *codec, OH_AVFormat *format, void *userData) {
    (void)codec;
    (void)format;
    (void)userData;
}
// OH_AVCodecOnNeedInputBuffer回调函数的实现
static void OnInputBufferAvailable(OH_AVCodec *codec, uint32_t index, OH_AVBuffer *data, void *userData) {
    (void)codec;
    // 编码输入码流送入InputBuffer队列
    AEncBufferSignal *signal = static_cast<AEncBufferSignal *>(userData);
    unique_lock<mutex> lock(signal->inMutex_);
    signal->inQueue_.push(index);
    signal->inBufferQueue_.push(data);
    signal->inCond_.notify_all();
}
// OH_AVCodecOnNewOutputBuffer回调函数的实现
static void OnOutputBufferAvailable(OH_AVCodec *codec, uint32_t index, OH_AVBuffer *data, void *userData) {
    (void)codec;
    // 将对应输出buffer的index送入OutputQueue_队列
    // 将对应编码完成的数据data送入outBuffer队列
    AEncBufferSignal *signal = static_cast<AEncBufferSignal *>(userData);
    unique_lock<mutex> lock(signal->outMutex_);
    signal->outQueue_.push(index);
    signal->outBufferQueue_.push(data);
}

//no use
bool checkParam(int32_t sampleRate_ = 8000, int32_t channelCount_ = 1, int32_t bitrate_ = 48000) {
    int32_t sampleRate = sampleRate_;
    int32_t channelCount = channelCount_;
    int32_t bitrate = bitrate_;
    OH_AVCapability *capability = OH_AVCodec_GetCapability(OH_AVCODEC_MIMETYPE_AUDIO_MPEG, true);
    if (capability == nullptr) {
        return false;
    }
    // 1. 确认待配置采样率是否支持
    const int32_t *sampleRates = nullptr;
    uint32_t sampleRateNum = 0;
    int32_t ret = OH_AVCapability_GetAudioSupportedSampleRates(capability, &sampleRates, &sampleRateNum);
    if (ret != AV_ERR_OK || sampleRates == nullptr || sampleRateNum == 0) {
        return false;
    }
    bool isMatched = false;
    for (int i = 0; i < sampleRateNum; i++) {
        if (sampleRates[i] == sampleRate) {
            isMatched = true;
        }
    }
    if (!isMatched) {
        // 2.（可选）调整待配置采样率
    }
    // 3. 获取通道数范围，判断待配置通道数参数是否在范围内
    OH_AVRange channelRange = {-1, -1};
    ret = OH_AVCapability_GetAudioChannelCountRange(capability, &channelRange);
    if (ret != AV_ERR_OK || channelRange.maxVal <= 0) {
        return false;
    }
    if (channelCount > channelRange.maxVal || channelCount < channelRange.minVal) {
        // 4.（可选）调整待配置通道数
    }
    // 5. 获取码率范围，判断待配置码率参数是否在范围内
    OH_AVRange bitrateRange = {-1, -1};
    ret = OH_AVCapability_GetEncoderBitrateRange(capability, &bitrateRange);
    if (ret != AV_ERR_OK || bitrateRange.maxVal <= 0) {
        return false;
    }
    if (bitrate > bitrateRange.maxVal || bitrate < bitrateRange.minVal) {
        // 7.（可选）调整待配置码率值
    }
    return true;
}


// 定义异步工作的数据结构
struct ConvertAsyncData {
    napi_env env;
    napi_ref callback_ref;
    napi_deferred deferred;
    string inputFilePath;
    string outputFilePath;
    uint32_t sampleRate;
    uint64_t channelCount;
    uint32_t bitrate;
    bool success;
    string errorMessage;
};

static napi_value NAPI_Global_PcmConvertMp3(napi_env env, napi_callback_info info) {
    // 创建Promise和异步工作数据
    napi_deferred deferred;
    napi_value promise;
    napi_create_promise(env, &deferred, &promise);

    // 获取函数参数
    size_t argc = 5;
    napi_value args[5] = {nullptr};
    napi_get_cb_info(env, info, &argc, args, nullptr, nullptr);

    // 创建异步工作数据结构
    ConvertAsyncData *asyncData = new ConvertAsyncData();
    asyncData->env = env;
    asyncData->deferred = deferred;
    asyncData->sampleRate = 8000; // 默认采样率
    asyncData->channelCount = 1;  // 默认通道数
    asyncData->bitrate = 48000;   // 默认比特率
    asyncData->success = false;

    // 检查参数数量
    if (argc < 2) {
        asyncData->errorMessage = "至少需要两个参数: 输入文件路径和输出文件路径";
        napi_reject_deferred(env, deferred, CreateError(env, asyncData->errorMessage));
        delete asyncData;
        return promise;
    }

    // 检查前两个参数是否为字符串
    napi_valuetype inputFile, outputFile;
    napi_typeof(env, args[0], &inputFile);
    napi_typeof(env, args[1], &outputFile);

    if (inputFile != napi_string || outputFile != napi_string) {
        asyncData->errorMessage = "输入和输出文件路径必须是字符串";
        napi_reject_deferred(env, deferred, CreateError(env, asyncData->errorMessage));
        delete asyncData;
        return promise;
    }

    // 获取输入和输出文件路径
    char inputPath[256] = {0};
    char outputPath[256] = {0};
    size_t inputLength, outputLength;

    napi_get_value_string_utf8(env, args[0], inputPath, sizeof(inputPath), &inputLength);
    napi_get_value_string_utf8(env, args[1], outputPath, sizeof(outputPath), &outputLength);

    asyncData->inputFilePath = string(inputPath);
    asyncData->outputFilePath = string(outputPath);

    // 如果有5个参数，检查并获取后三个参数
    bool lossless = true;
    if (argc == 5) {
        napi_valuetype sampleRate, channelCount, bitRate;
        napi_typeof(env, args[2], &sampleRate);
        napi_typeof(env, args[3], &channelCount);
        napi_typeof(env, args[4], &bitRate);

        if (sampleRate != napi_number || channelCount != napi_number || bitRate != napi_number) {
            asyncData->errorMessage = "采样率、通道数和比特率必须是数字";
            napi_reject_deferred(env, deferred, CreateError(env, asyncData->errorMessage));
            delete asyncData;
            return promise;
        }

        // 获取参数值
        napi_get_value_uint32(env, args[2], &asyncData->sampleRate);
        napi_get_value_bigint_uint64(env, args[3], &asyncData->channelCount, &lossless);
        napi_get_value_uint32(env, args[4], &asyncData->bitrate);
    }

    // 定义异步工作的执行函数
    auto executeWork = [](napi_env env, void *data) {
        // 通过 codecname 创建编码器
        OH_AVCapability *capability = OH_AVCodec_GetCapability(OH_AVCODEC_MIMETYPE_AUDIO_MPEG, true);
        const char *name = OH_AVCapability_GetName(capability);
        OH_AVCodec *audioEnc_ = OH_AudioCodec_CreateByName(name);
        ConvertAsyncData *asyncData = static_cast<ConvertAsyncData *>(data);

        // 检查参数是否有效
        bool invalidParam = checkParam(asyncData->sampleRate, asyncData->channelCount, asyncData->bitrate);

        AEncBufferSignal *signal = new AEncBufferSignal();
        OH_AVCodecCallback cb = {&OnError, &OnOutputFormatChanged, &OnInputBufferAvailable, &OnOutputBufferAvailable};
        // 配置异步回调
        int32_t ret = OH_AudioCodec_RegisterCallback(audioEnc_, cb, signal);
        if (ret != AV_ERR_OK) {
            asyncData->errorMessage = "注册编码器回调失败";
            delete signal;
            return;
        }

        // 配置音频声道类型（必须）
        constexpr OH_AudioChannelLayout CHANNEL_LAYOUT = OH_AudioChannelLayout::CH_LAYOUT_MONO;
        // 配置音频位深（必须）
        constexpr OH_BitsPerSample SAMPLE_FORMAT = OH_BitsPerSample::SAMPLE_S16LE;
        // 每20ms一帧音频数据
        constexpr float TIME_PER_FRAME = 0.02;
        // 配置最大输入长度, 每帧音频数据的大小（可选）
        uint32_t DEFAULT_MAX_INPUT_SIZE =
            asyncData->sampleRate * TIME_PER_FRAME * asyncData->channelCount * sizeof(short);

        OH_AVFormat *format = OH_AVFormat_Create();
        // 写入format
        OH_AVFormat_SetIntValue(format, OH_MD_KEY_AUD_CHANNEL_COUNT, asyncData->channelCount);
        OH_AVFormat_SetIntValue(format, OH_MD_KEY_AUD_SAMPLE_RATE, asyncData->sampleRate);
        OH_AVFormat_SetLongValue(format, OH_MD_KEY_BITRATE, asyncData->bitrate);
        OH_AVFormat_SetIntValue(format, OH_MD_KEY_AUDIO_SAMPLE_FORMAT, SAMPLE_FORMAT);
        OH_AVFormat_SetLongValue(format, OH_MD_KEY_CHANNEL_LAYOUT, CHANNEL_LAYOUT);
        OH_AVFormat_SetIntValue(format, OH_MD_KEY_MAX_INPUT_SIZE, DEFAULT_MAX_INPUT_SIZE);

        // 配置编码器
        ret = OH_AudioCodec_Configure(audioEnc_, format);
        if (ret != AV_ERR_OK) {
            asyncData->errorMessage = "配置编码器失败";
            delete signal;
            return;
        }

        ret = OH_AudioCodec_Prepare(audioEnc_);
        if (ret != AV_ERR_OK) {
            asyncData->errorMessage = "准备编码器失败";
            delete signal;
            return;
        }

        unique_ptr<ifstream> inputFile_ = make_unique<ifstream>();
        unique_ptr<ofstream> outFile_ = make_unique<ofstream>();
        // 打开待编码二进制文件路径（此处输入为PCM文件）
        inputFile_->open(asyncData->inputFilePath.data(), ios::in | ios::binary);
        if (!inputFile_->is_open()) {
            asyncData->errorMessage = "无法打开输入文件: " + asyncData->inputFilePath;
            delete signal;
            return;
        }

        // 配置编码文件输出路径（此处输出为MP3码流文件）
        outFile_->open(asyncData->outputFilePath.data(), ios::out | ios::binary);
        if (!outFile_->is_open()) {
            asyncData->errorMessage = "无法打开输出文件: " + asyncData->outputFilePath;
            delete signal;
            return;
        }

        // 开始编码
        ret = OH_AudioCodec_Start(audioEnc_);
        if (ret != AV_ERR_OK) {
            asyncData->errorMessage = "启动编码器失败";
            delete signal;
            return;
        }

        // 每帧样点数
        int32_t SAMPLES_PER_FRAME = asyncData->sampleRate * TIME_PER_FRAME;
        // 每帧输入数据的长度，声道数 * 每帧样点数 * 每个样点的字节数（以采样格式SAMPLE_S16LE为例）
        int32_t INPUT_FRAME_BYTES = asyncData->channelCount * SAMPLES_PER_FRAME * sizeof(short);
        bool isEOS = false;

        // 处理输入文件的所有数据
        while (!inputFile_->eof() || !isEOS) {
            // 等待输入缓冲区可用
            {
                unique_lock<mutex> lock(signal->inMutex_);
                if (signal->inQueue_.empty()) {
                    // 等待编码器回调函数填充队列
                    signal->inCond_.wait(lock, [&]() { return !signal->inQueue_.empty(); });
                }

                // 获取输入缓冲区
                uint32_t index = signal->inQueue_.front();
                signal->inQueue_.pop();
                auto buffer = signal->inBufferQueue_.front();
                signal->inBufferQueue_.pop();

                // 设置缓冲区属性
                OH_AVCodecBufferAttr attr = {0};
                if (!inputFile_->eof()) {
                    inputFile_->read((char *)OH_AVBuffer_GetAddr(buffer), INPUT_FRAME_BYTES);
                    size_t bytesRead = inputFile_->gcount();
                    if (bytesRead > 0) {
                        attr.size = bytesRead;
                        attr.flags = AVCODEC_BUFFER_FLAGS_NONE;
                    } else {
                        // 文件结束
                        attr.size = 0;
                        attr.flags = AVCODEC_BUFFER_FLAGS_EOS;
                        isEOS = true;
                    }
                } else {
                    // 文件结束
                    attr.size = 0;
                    attr.flags = AVCODEC_BUFFER_FLAGS_EOS;
                    isEOS = true;
                }

                OH_AVBuffer_SetBufferAttr(buffer, &attr);

                // 送入编码输入队列进行编码
                ret = OH_AudioCodec_PushInputBuffer(audioEnc_, index);
                if (ret != AV_ERR_OK) {
                    asyncData->errorMessage = "推送输入缓冲区失败";
                    delete signal;
                    return;
                }
            }

            // 处理输出数据
            {
                unique_lock<mutex> lock(signal->outMutex_);
                // 检查是否有输出数据可用
                if (!signal->outQueue_.empty()) {
                    uint32_t oindex = signal->outQueue_.front();
                    signal->outQueue_.pop();
                    OH_AVBuffer *avBuffer = signal->outBufferQueue_.front();
                    signal->outBufferQueue_.pop();

                    // 获取buffer attributes
                    OH_AVCodecBufferAttr oattr = {0};
                    ret = OH_AVBuffer_GetBufferAttr(avBuffer, &oattr);
                    if (ret != AV_ERR_OK) {
                        asyncData->errorMessage = "获取输出缓冲区属性失败";
                        delete signal;
                        return;
                    }

                    // 将编码完成数据写入到输出文件
                    if (oattr.size > 0) {
                        outFile_->write(reinterpret_cast<char *>(OH_AVBuffer_GetAddr(avBuffer)), oattr.size);
                    }

                    // 检查是否是最后一帧
                    if (oattr.flags == AVCODEC_BUFFER_FLAGS_EOS) {
                        isEOS = true;
                    }

                    // 释放已完成写入的数据
                    ret = OH_AudioCodec_FreeOutputBuffer(audioEnc_, oindex);
                    if (ret != AV_ERR_OK) {
                        asyncData->errorMessage = "释放输出缓冲区失败";
                        delete signal;
                        return;
                    }
                }
            }
        }

        // 确保所有输出数据都被处理
        bool outputDone = false;
        while (!outputDone) {
            unique_lock<mutex> lock(signal->outMutex_);
            if (signal->outQueue_.empty()) {
                // 如果没有更多输出数据，退出循环
                outputDone = true;
            } else {
                uint32_t oindex = signal->outQueue_.front();
                signal->outQueue_.pop();
                OH_AVBuffer *avBuffer = signal->outBufferQueue_.front();
                signal->outBufferQueue_.pop();

                // 获取buffer attributes
                OH_AVCodecBufferAttr oattr = {0};
                ret = OH_AVBuffer_GetBufferAttr(avBuffer, &oattr);
                if (ret != AV_ERR_OK) {
                    asyncData->errorMessage = "获取输出缓冲区属性失败";
                    delete signal;
                    return;
                }

                // 将编码完成数据写入到输出文件
                if (oattr.size > 0) {
                    outFile_->write(reinterpret_cast<char *>(OH_AVBuffer_GetAddr(avBuffer)), oattr.size);
                }

                // 释放已完成写入的数据
                ret = OH_AudioCodec_FreeOutputBuffer(audioEnc_, oindex);
                if (ret != AV_ERR_OK) {
                    asyncData->errorMessage = "释放输出缓冲区失败";
                    delete signal;
                    return;
                }

                // 检查是否是最后一帧
                if (oattr.flags == AVCODEC_BUFFER_FLAGS_EOS) {
                    outputDone = true;
                }
            }
        }

        // 终止编码器 audioEnc_
        ret = OH_AudioCodec_Stop(audioEnc_);
        if (ret != AV_ERR_OK) {
            asyncData->errorMessage = "停止编码器失败";
            delete signal;
            return;
        }

        // 调用OH_AudioCodec_Destroy, 注销编码器
        ret = OH_AudioCodec_Destroy(audioEnc_);
        if (ret != AV_ERR_OK) {
            asyncData->errorMessage = "销毁编码器失败";
            delete signal;
            return;
        } else {
            audioEnc_ = NULL; // 不可重复destroy
        }

        // 清理资源
        delete signal;

        // 标记成功
        asyncData->success = true;
    };

    // 定义异步工作完成后的回调函数
    auto completeWork = [](napi_env env, napi_status status, void *data) {
        ConvertAsyncData *asyncData = static_cast<ConvertAsyncData *>(data);

        if (asyncData->success) {
            // 创建成功结果对象
            napi_value result;
            napi_create_object(env, &result);

            napi_value success;
            napi_get_boolean(env, true, &success);
            napi_set_named_property(env, result, "success", success);

            napi_value outputPath;
            napi_create_string_utf8(env, asyncData->outputFilePath.c_str(), NAPI_AUTO_LENGTH, &outputPath);
            napi_set_named_property(env, result, "outputPath", outputPath);

            // 解析Promise
            napi_resolve_deferred(env, asyncData->deferred, result);
        } else {
            // 创建错误对象并拒绝Promise
            napi_value error = CreateError(env, asyncData->errorMessage);
            napi_reject_deferred(env, asyncData->deferred, error);
        }

        // 清理异步数据
        delete asyncData;
    };

    // 创建异步工作
    napi_async_work work;
    napi_value async_resource_name;
    napi_create_string_utf8(env, "PCM2MP3Conversion", NAPI_AUTO_LENGTH, &async_resource_name),
    napi_create_async_work(env, nullptr, async_resource_name, executeWork, completeWork, asyncData, &work);

    // 将工作加入队列
    napi_queue_async_work(env, work);

    // 返回Promise对象
    return promise;
}

EXTERN_C_START
static napi_value Init(napi_env env, napi_value exports) {
    napi_property_descriptor desc[] = {
        {"PcmConvertMp3", nullptr, NAPI_Global_PcmConvertMp3, nullptr, nullptr, nullptr, napi_default, nullptr}};
    napi_define_properties(env, exports, sizeof(desc) / sizeof(desc[0]), desc);
    return exports;
}
EXTERN_C_END

static napi_module demoModule = {
    .nm_version = 1,
    .nm_flags = 0,
    .nm_filename = nullptr,
    .nm_register_func = Init,
    .nm_modname = "entry",
    .nm_priv = ((void *)0),
    .reserved = {0},
};

extern "C" __attribute__((constructor)) void RegisterEntryModule(void) { napi_module_register(&demoModule); }
