
const target = new WeakMap();

function getPromise() {
    return navigator.mediaDevices.getUserMedia({audio: true}).catch(err => {
        throw new Error(err.message)
    });
}

const options = {
    recordTime: 60,
    unlimited: true,
    volume: 1,
    onEnd: function() {}
};

const states = {
    inactive: 'inactive',
    recording: 'recording',
    paused: 'paused'
};

class Recorder {
    constructor(ops) {
        ops = Object.assign({}, options, ops);
        target.set(this, {
            isSupported: !!(navigator.mediaDevices && MediaRecorder && (AudioContext || webkitAudioContext)),
            isRecording: false,
            unlimited: ops.unlimited,
            recordTime: Number(ops.recordTime),
            volume: Math.max(0, Math.min(parseFloat(ops.volume), 1)),
            chunks: [],
            onEnd: ops.onEnd,
        })
    }
    get state() {
        if(!target.get(this).recorder) return states.inactive;
        return target.get(this).recorder.state;
    }
    get isSupported() {
        return target.get(this).isSupported;
    }
    get getByteFrequencyData() {
        const o = target.get(this);
        if(!o.audioContext) return new Uint8Array(0);
        const buffer = new Uint8Array(o.analyser.frequencyBinCount);
        o.analyser.getByteFrequencyData(buffer);
        return buffer;
    }
    get getFloatFrequencyData() {
        const o = target.get(this);
        if(!o.audioContext) return new Float32Array(0);
        const buffer = new Float32Array(o.analyser.frequencyBinCount);
        o.analyser.getFloatFrequencyData(buffer);
        return buffer;
    }
    get currentTime() {
        const o = target.get(this);
        if(!o.audioContext) return 0;
        return o.audioContext.currentTime;
    }
    getBlob() {
        const o = target.get(this);
        return new Blob(o.chunks, {type: 'audio/ogg; codecs=opus' });
    }
    async start() {
        if(this.state !== states.inactive) return;
        const o = target.get(this);
        const stream = await getPromise();
        o.audioContext = new (AudioContext || webkitAudioContext)();
        o.analyser = o.audioContext.createAnalyser();
        o.analyser.fftSize = 256;
        const audioSource = o.audioContext.createMediaStreamSource(stream);
        const gain = o.audioContext.createGain();
        gain.gain.value = o.volume;
        audioSource.connect(gain);
        gain.connect(o.analyser);
        o.recorder = new MediaRecorder(stream);
        o.chunks = [];
        o.recorder.ondataavailable = e => {
            o.chunks.push(e.data);
            o.onEnd();
        };
        o.recorder.start();
        if(!o.unlimited) {
            o.tid = setTimeout(() => {
                this.stop();
            }, o.recordTime * 1000);
        }
    }
    pause() {
        if(this.state === states.recording) {
            const o = target.get(this);
            o.recorder.pause();
            o.audioContext.suspend();
            if(!o.unlimited) clearTimeout(o.tid);
        }
    }
    resume() {
        if(this.state === states.paused) {
            const o = target.get(this);
            o.recorder.resume();
            o.audioContext.resume();
            if(!o.unlimited) {
                o.tid = setTimeout(() => {
                    this.stop();
                }, Math.max(0, o.recordTime - o.audioContext.currentTime) * 1000);
            }
        }
    }
    stop() {
        if(this.state !== states.inactive) {
            const o = target.get(this);
            o.recorder.stop();
            o.recorder.stream.getAudioTracks().forEach(track => track.stop());
            o.audioContext.close();
            if(!o.unlimited) clearTimeout(o.tid);
        }
    }
    destroy() {
        this.stop();
        target.delete(this);
    }
}

export default Recorder