const logger = require('../util/log');
const fs = require('fs')
const path = require('path')
const opusCreater = require('./opus-creator');

const TAG = 'AUDIO'
const saveFrameCount = 50 // 固定每次收集40 frames, about 3s

recorderDataMap = new Map();

// 定时检查队列，超过120s未保存的队列会被清空
setInterval(() => {
    const now = Date.now()
    console.log('check recorderDataMap size:', recorderDataMap.size)

    for (const [key, deviceQueue] of recorderDataMap) {
        const queueAge = now - deviceQueue.lastSaveTime;

        // 60s
        if (queueAge > 120 * 1000 && deviceQueue.queue.length > 0) {
            console.log('clear queue', deviceQueue.queue.length)
            saveAudioFrames(deviceQueue);
            recorderDataMap.delete(key);
        }
    }

}, 5 * 60 * 1000);

function parseAudioInfo(buffer) {
    if(!buffer || buffer.length === 0) {
        logger.log(TAG, `opus buffer empty`)
        return null;
    }

    return parseProtocolVersion1(buffer);
}

function parseProtocolVersion1(buffer) {
    return {
        protocolVersion: 1,
        type: 0,
        reserved: 0,
        timestamp: Date.now(),
        payloadSize: buffer.length,
        audioData: buffer
    }
}

async function saveAudioToFile(audioData, metadata) {
    const {deviceId, clientId, timestamp, } = metadata;

    const sessionKey = `${deviceId}_${clientId}`

    if (!recorderDataMap.has(sessionKey)) {
        recorderDataMap.set(sessionKey, {
            metadata,
            queue: [],
            lastSaveTime: Date.now()
        })
    }

    const deviceQueue = recorderDataMap.get(sessionKey)
    if (!deviceQueue) {
        logger.log(TAG, `No queue found for device ${deviceId}, client ${clientId}`);
        return;
    }
    deviceQueue.queue.push({
        data: audioData,
        timestamp,
    })
    logger.log(TAG, `push data to queue,${deviceId}, client ${clientId}`);
   
    if (deviceQueue.queue.length >= saveFrameCount) {
        saveAudioFrames(deviceQueue);
    }
}

function saveAudioFrames(deviceQueue){
    const firstFrame = deviceQueue.queue[0];
    const metadata = deviceQueue.metadata;
    const { deviceId, clientId } = metadata;
    const date = new Date(firstFrame.timestamp)
    const pad = n => n.toString().padStart(2, '0');
    const dateStr = `${date.getFullYear()}-${pad(date.getMonth() + 1)}-${pad(date.getDate())}`;
    const timeStr = `${pad(date.getHours())}-${pad(date.getMinutes())}-${pad(date.getSeconds())}`;
    const fileName = `${deviceId}_${clientId}_${dateStr}_${timeStr}.opus`;
    const dirPath = '/log/websocket-iot-server/opus/' + deviceId;
    const filePath = path.join(dirPath, fileName)

    const queueArray = deviceQueue.queue.splice(0, saveFrameCount);
    const totalSize = queueArray.reduce((sum, frame) => sum + frame.data.length, 0)
    console.log(`save audio to file, ${deviceId}, ${clientId}, frame count: ${deviceQueue.queue.length}, totalSize: ${totalSize}`)

    if (!fs.existsSync(dirPath)) {
        fs.mkdirSync(dirPath, { recursive: true });
    }

    opusCreater.createOpusFileWithFrames(queueArray.map(frame => frame.data), filePath, deviceId);
    deviceQueue.queue.lastSaveTime = Date.now()
}


class AudioCodec {
    format = 'opus'
    sample_rate = 16000;
    channels = 1;
    frame_duration = 60;

    constructor(){

    }

    getAudioParams() {
        return {
            format: this.format,
            sample_rate: this.sample_rate,
            channels: this.channels,
            frame_duration: this.frame_duration
        }
    }

    async parseAudioData(DeviceId, ClientId, buffer) {
        try {
            const audioInfo = parseAudioInfo(buffer)

            await this.processOpusAudio(buffer, {
                deviceId: DeviceId,
                clientId: ClientId,
                timestamp: audioInfo.timestamp,
                protocolVersion: audioInfo.protocolVersion
            })
        } catch(error) {
            console.error('parse audio error: ', error);
        }
    }

    async processOpusAudio(audioData, metadata) {
        try {
            const { deviceId, clientId, timestamp, protocolVersion } = metadata;


            saveAudioToFile(audioData, metadata)

            // await decodeOpusAudio(audioData, metadata)

            // await sendToSpeechRecognization(audioData, metadata)

        } catch(error) {
            console.error(`process audio error: ${deviceId}, ${clientId}`, error)
        }
    }
}

const audioCodec = new AudioCodec();

module.exports = audioCodec;