﻿#include <QDebug>
#include <QEventLoop>

#include "common/LoopBuffer.h"
#include "common/AVAudioConvert.h"

#include "MacMicRecord.h"

static OSStatus audio_callback(void *data,
                               AudioUnitRenderActionFlags *ioActionFlags,
                               const AudioTimeStamp *time,
                               UInt32 inBusNumber,
                               UInt32 inNumberFrames,
                               AudioBufferList *ioData)
{
    Q_UNUSED(ioData);
    Q_UNUSED(time);
    
    int64_t c_time = av_gettime();

    MacAudio *audioInput = static_cast<MacAudio *>(data);
    OSStatus status;
    
    status = AudioUnitRender(audioInput->unit, ioActionFlags, time, inBusNumber, inNumberFrames, audioInput->bufferList);
    
    if (noErr != status)
        return noErr;

    /* 每次积累1024个采样点后发送到数据派发层 */
    int defaultSampleRate = 44100;
    int defaultNbSample = 1024;
    int resampleSize = 2048; /* 1024 * 2 * 2*/
    int frameSize = av_samples_get_buffer_size(0,av_get_channel_layout_nb_channels(audioInput->channelLayout),defaultNbSample,
                                               audioInput->format,1);

    audioInput->m_buffer->writeData((const char *)audioInput->bufferList->mBuffers[0].mData,audioInput->bufferList->mBuffers[0].mDataByteSize);

    if (audioInput->m_buffer->usedSize() >= frameSize) {
        QByteArray tempBuf(frameSize,0);
        if (audioInput->m_buffer->read(tempBuf.data(),frameSize) == frameSize) {
            AVFrame * srcFrame = av_frame_alloc();
            srcFrame->sample_rate = audioInput->sampleRate;
            srcFrame->channels = audioInput->channel;
            srcFrame->nb_samples = defaultNbSample;
            srcFrame->format = audioInput->format;
            srcFrame->data[0] = (uint8_t*)tempBuf.data();

            QByteArray resampleBuf(resampleSize,0);
            int retBytes = 0;
            int retNbSample = 0;
            audioInput->m_convert->audio_rescale(srcFrame,(uint8_t*)resampleBuf.data(),&retBytes,&retNbSample);

            AVFrame *dstFrame = av_frame_alloc();
            dstFrame->sample_rate = defaultSampleRate;
            dstFrame->channels = 1;
            dstFrame->nb_samples = retNbSample;
            dstFrame->format = AV_SAMPLE_FMT_S16;
            dstFrame->data[0] = (uint8_t*)resampleBuf.data();

            AVFrame *emitFrame = av_frame_clone(dstFrame);/* 由接受线程释放 */
            emitFrame->pts = av_gettime();
            emit audioInput->GotAVFrame(emitFrame);

            av_frame_unref(srcFrame);
            av_frame_free(&srcFrame);

            av_frame_unref(dstFrame);
            av_frame_free(&dstFrame);

        } else {
            qDebug() << "read error";
        }
    }

    return noErr;
}

MacAudio::MacAudio(QObject *parent)
    : MacMediaDevice(parent),m_buffer(new LoopBuffer(8192))
{
    m_buffer->open(QIODevice::ReadWrite);
}

MacAudio::~MacAudio()
{
    release();

    if (m_buffer) {
        m_buffer->deleteLater();
        m_buffer = nullptr;
    }

    if (m_convert) {
        delete m_convert;
        m_convert = nullptr;
    }

}

void MacAudio::release()
{
    OSStatus status;
    if (unit != nullptr) {
        status = AudioOutputUnitStop(unit);
        status == noErr ? qDebug() << "audio unit stop success" : qDebug() << "audio unit stop error";

        AURenderCallbackStruct callback = {
            .inputProc = nullptr,
            .inputProcRefCon = nullptr,
        };

        AudioUnitSetProperty(unit, kAudioOutputUnitProperty_SetInputCallback, kAudioUnitScope_Global, 0, &callback, sizeof(callback));

        status = AudioUnitUninitialize(unit);
        status == noErr ? qDebug() << "audio unit uninit success" : qDebug() << "audio unit uninit error";

        status = AudioComponentInstanceDispose(unit);

        status == noErr ? qDebug() << "audio unit dispose success" : qDebug() << "audio unit dispose error";
        unit = nullptr;
    }

    if (bufferList != nullptr) {
        for (size_t i = 0; i < bufferList->mNumberBuffers; i++)
            av_free(bufferList->mBuffers[i].mData);

        delete bufferList;
        bufferList = nullptr;
    }
}

void MacAudio::setDeviceId(const QString &deviceId)
{
    this->deviceId = deviceId.toInt();
}

QString MacAudio::getDeviceId()
{
    return QString::number(this->deviceId);
}
#pragma mark - convert channel layout

UInt32 MacAudio::convertChannelLayout(UInt32 channelsPerFrame)
{
    switch (channelsPerFrame) {
    case 1:
        return AV_CH_LAYOUT_MONO;
    case 2:
        return AV_CH_LAYOUT_STEREO;
    case 3:
        return AV_CH_LAYOUT_2POINT1;
    case 4:
        return AV_CH_LAYOUT_QUAD;
    case 5:
        return AV_CH_LAYOUT_4POINT1;
    case 6:
        return AV_CH_LAYOUT_5POINT1;
    case 8:
        return AV_CH_LAYOUT_7POINT1;
    default:
        return -1;
    }
}

#pragma mark - convert format

AVSampleFormat MacAudio::convertFormat(AudioFormatFlags flags, UInt32 bits)
{
    bool planar = (flags & kAudioFormatFlagIsNonInterleaved) != 0;
    
    if (flags & kAudioFormatFlagIsFloat)
        return planar ? AV_SAMPLE_FMT_FLTP : AV_SAMPLE_FMT_FLT;
    
    // 不是int且bits是8
    if (!(flags & kAudioFormatFlagIsSignedInteger) && bits == 8)
        return planar ? AV_SAMPLE_FMT_U8P : AV_SAMPLE_FMT_U8;
    
    // 不是float 也不是 int
    if ((flags & kAudioFormatFlagIsSignedInteger) == 0)
        return AV_SAMPLE_FMT_NONE;
    
    if (bits == 16)
        return planar ? AV_SAMPLE_FMT_S16P : AV_SAMPLE_FMT_S16;
    else if (bits == 32)
        return planar ? AV_SAMPLE_FMT_S32P : AV_SAMPLE_FMT_S32;
    
    return AV_SAMPLE_FMT_NONE;
    
}

#pragma mark - enable audio unit io

OSStatus MacAudio::enableAUIO(MacAudio::CoreAudioIO type, bool enable)
{
    UInt32 enableI = enable;
    return AudioUnitSetProperty(unit, kAudioOutputUnitProperty_EnableIO,
                                (type == INPUT_IO) ? kAudioUnitScope_Input : kAudioUnitScope_Output,
                                (type == INPUT_IO) ? InputBus : OutputBus,
                                &enableI, sizeof(enableI));
}

#pragma mark - init audio unit

bool MacAudio::initAudioUnit()
{
    AudioComponentDescription desc = {
        .componentType    = kAudioUnitType_Output,
        .componentSubType = kAudioUnitSubType_HALOutput
    };
    
    AudioComponent component = AudioComponentFindNext(NULL, &desc);
    if (!component) {
        qDebug() << "initAudioUnit" << "find component failed";
        return false;
    }
    OSStatus status;
    status = AudioComponentInstanceNew(component, &unit);
    if (status != noErr)
    {
        qDebug() << "initAudioUnit" << "new unit erro";
        return false;
    }
    
    
    return true;
}

#pragma mark - init format

bool MacAudio::initFormat()
{
    AudioStreamBasicDescription desc;
    OSStatus status;
    UInt32 size = sizeof(desc);
    
    status = AudioUnitGetProperty(unit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, InputBus, &desc, &size);
    
    if(status != noErr)
    {
        qDebug() << "initFormat" << "get input format error";
        return false;
    }
    
    if (desc.mChannelsPerFrame > 8) {
        desc.mChannelsPerFrame = 2;
        desc.mBytesPerFrame = 2 * desc.mBitsPerChannel / 8;
        desc.mBytesPerPacket = desc.mFramesPerPacket * desc.mBytesPerFrame;
    }
    
    status = AudioUnitSetProperty(unit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, InputBus, &desc, size);
    
    if(status != noErr) {
        qDebug() << "initFormat" << "set input format error";
        return false;
    }
    
    if (desc.mFormatID != kAudioFormatLinearPCM) {
        qDebug() << "initFormat", "format is not PCM";
        return false;
    }
    
    format = convertFormat(desc.mFormatFlags, desc.mBitsPerChannel);
    
    if (format == AV_SAMPLE_FMT_NONE) {
        qDebug() << "initFormat" << "convert format error";
        return false;
    }
    
    sampleRate = desc.mSampleRate;
    channelLayout = convertChannelLayout(desc.mChannelsPerFrame);
    channel = av_get_channel_layout_nb_channels(channelLayout);

    if (channelLayout == -1) {
        qDebug() << "initFormat" << "convert channel layout error";
        return false;
    }
    
    return true;
}

#pragma mark - init callback

bool MacAudio::initCallback()
{
    AURenderCallbackStruct callback_info = {
        .inputProc       = audio_callback,
        .inputProcRefCon = this
    };
    
    OSStatus status;
    
    status = AudioUnitSetProperty(unit, kAudioOutputUnitProperty_SetInputCallback,
                                  kAudioUnitScope_Global, 0, &callback_info, sizeof(callback_info));

    if (status != noErr) {
        qDebug() << "init callback error";
        return false;
    }
    
    return true;
}

#pragma mark - init buffer

bool MacAudio::initAudioBufferList()
{
    AudioObjectPropertyAddress addr = {
        .mSelector = kAudioDevicePropertyStreamConfiguration,
        .mScope = DEVICE_INPUT_SCOPE,
        .mElement = kAudioObjectPropertyElementMaster,
    };
    UInt32 bufferSize = 0;
    OSStatus status = AudioObjectGetPropertyDataSize(deviceId, &addr, 0, nullptr, &bufferSize);
    
    if (status != noErr)
    {
        qDebug() << "[init buffer list] get buffer size error";
        return false;
    }
    
    bufferList = new AudioBufferList;
    status = AudioObjectGetPropertyData(deviceId, &addr, 0, nullptr, &bufferSize, bufferList);
    
    if (status != noErr)
    {
        qDebug() << "[init buffer list] alloc buffer list error";
        return false;
    }
    
    UInt32 frames = 0;
    UInt32 size = sizeof(frames);

    status = AudioUnitGetProperty(unit, kAudioDevicePropertyBufferFrameSize, kAudioUnitScope_Global, OutputBus, &frames, &size);
    
    if (status != noErr) {
        qDebug() << "[init buffer list] get buffer frame size error";
        return false;
    }
    
    for(UInt32 i = 0; i < bufferList->mNumberBuffers; i++) {
        size = bufferList->mBuffers[i].mDataByteSize;
        bufferList->mBuffers[i].mData = av_malloc(size);
    }
    
    return true;
}

bool MacAudio::init()
{

}

bool MacAudio::capture()
{
    if(!initAudioUnit())
    {
        qDebug() << "init audio unit error";
        goto fail;
    }

    if(enableAUIO(CoreAudioIO::INPUT_IO, true) != noErr)
    {
        qDebug() << "initAudioUnit error";
        goto fail;
    }

    if(enableAUIO(OUTPUT_IO, false) != noErr)
    {
        qDebug() << "initAudioUnit error";
        goto fail;
    }

    OSStatus status;
    status = AudioUnitSetProperty(unit, kAudioOutputUnitProperty_CurrentDevice, kAudioUnitScope_Global, 0, &deviceId, sizeof(deviceId));
    if (status != noErr)
    {
        qDebug() << "initAudioUnit" << "bind device error";
        goto fail;
    }

    if(!initFormat())
    {
        qDebug() << "initFormat" << "init format error";
        goto fail;
    }

    m_convert = new AVAudioConvert(sampleRate,channel,format,
                              44100,1,AV_SAMPLE_FMT_S16);

    if(!initCallback())
    {
        goto fail;
    }

    if(!initAudioBufferList())
    {
        goto fail;
    }

    status = AudioUnitInitialize(unit);
    if (status != noErr) {
        qDebug() << "init core unit error";
        goto fail;
    }

    status = AudioOutputUnitStart(unit);
    if (status != noErr) {
        qDebug() << "init core unit error";
        goto fail;
    }

    qDebug() << "open audio input device: " << deviceName << sampleRate << channel << format;


    return true;

fail:
    release();
    return false;
}

void MacAudio::run()
{
    this->init();

    if (this->capture()) {
        QEventLoop loop;
        while (true) {
            loop.processEvents(QEventLoop::AllEvents);
            QThread::msleep(20);
        }
    }
}



