#include "jarviswake.h"

JarvisWake::JarvisWake()
{
    m_type = Type::OTHER;
}

void JarvisWake::Init(QString resPath)
{

    pv_status_t status = pv_porcupine_init((resPath+"data/"+modelPath).toUtf8().data(), (resPath+"data/"+keywordPath).toUtf8().data(), 0.5, &porcupineObject);
    if (status != PV_STATUS_SUCCESS) {
        qDebug() << "Failed to initialize Porcupine";
        exit(2);
    }
    QAudioFormat qtFormat;

    // Get default audio input device
    QAudioDeviceInfo qtInfo = QAudioDeviceInfo::defaultInputDevice();

    // Set the audio format settings
    qtFormat.setCodec("audio/pcm");
    qtFormat.setByteOrder(QAudioFormat::Endian::LittleEndian);
    qtFormat.setChannelCount(1);
    qtFormat.setSampleRate(16000);
    qtFormat.setSampleSize(16);
    qtFormat.setSampleType(QAudioFormat::SignedInt);

    // Check whether the format is supported
    if (!qtInfo.isFormatSupported(qtFormat)) {
        qWarning() << "Default format is not supported";
        exit(3);
    }

    // Instantiate QAudioInput with the settings
    audioInput = new QAudioInput(qtFormat);

    // Start receiving data from audio input
    ioDevice = audioInput->start();

    // Listen to the received data for wake words
    QObject::connect(ioDevice, &QIODevice::readyRead, [=] {
        listen(ioDevice->readAll());
    });
}

QWidget *JarvisWake::GetWidget(QWidget *parent)
{
    return nullptr;
}

void JarvisWake::PollMessage(JarvisInterface::Type from, QJsonObject data)
{

}

JarvisWake::~JarvisWake()
{

}

void JarvisWake::listen(const QByteArray &audioData)
{
    const int porcupineFrameLength = pv_porcupine_frame_length();
    audioDataBuffer.append(audioData);
    processSamples(porcupineFrameLength);
}

// Check if we have enough bytes of audio data for Porcupine to work with
bool JarvisWake::hasEnoughSamples(int porcupineFrameLength)
{
    // We use porcupineFrameLength * 2 because frame length type is int16_t while audioDataBuffer's type is byte
    return audioDataBuffer.size() >= porcupineFrameLength * 2;
}

// Go through the audio data and detect the wake word if there's any
void JarvisWake::processSamples(int porcupineFrameLength)
{
    while (hasEnoughSamples(porcupineFrameLength)) {
        const int16_t *audioData = reinterpret_cast<int16_t*>(audioDataBuffer.data());
        bool detected;

        pv_porcupine_process(porcupineObject, &audioData[0], &detected);
        if (detected) {
            // Detected keyword. Do something!
            qDebug() << "Detected keyword!";
            QJsonObject obj;
            obj["MODE"] = "Click";
            qRegisterMetaType<Type>("Type");
            emit SendData(Type::SPEECH,obj);
        }

        // Remove the audio data that we have processed
        audioDataBuffer.remove(0, porcupineFrameLength * 2);
    }
}
