from audio.system_capture import fallback_system_audio_capture, init_system_audio
import pyaudio, numpy as np, time


class AudioStream:
    def __init__(self, source="mic", file_path=None, rate=16000, chunk_duration=2):
        self.source = source
        self.file_path = file_path
        self.rate = rate
        self.chunk = int(rate * chunk_duration)

        if source == "mic":
            self.audio_interface = pyaudio.PyAudio()
        elif source == "system":
            self.default_speaker = init_system_audio()

    def generator(self):
        if self.source == "mic":
            return self._mic_generator()
        elif self.source == "system":
            return self._system_generator()
        elif self.source == "file":
            return self._file_generator()
        else:
            raise ValueError("不支持的音频源")

    def _mic_generator(self):
        stream = self.audio_interface.open(format=pyaudio.paInt16, channels=1, rate=self.rate,
                                           input=True, frames_per_buffer=self.chunk)
        while True:
            data = stream.read(self.chunk, exception_on_overflow=False)
            yield np.frombuffer(data, dtype=np.int16).astype(np.float32) / 32768.0

    def _system_generator(self):
        from soundcard import Recorder
        duration = self.chunk / self.rate
        with self.default_speaker.recorder(samplerate=self.rate) as mic:
            while True:
                try:
                    audio = mic.record(numframes=self.chunk)
                    if audio.ndim > 1:
                        audio = np.mean(audio, axis=1)
                    if np.max(np.abs(audio)) < 0.001:
                        audio = np.zeros_like(audio)
                    yield audio.astype(np.float32)
                except:
                    yield from fallback_system_audio_capture(self.chunk, self.rate)

    def _file_generator(self):
        import librosa
        audio, sr = librosa.load(self.file_path, sr=self.rate, mono=True)
        audio = audio.astype(np.float32)
        for i in range(0, len(audio), self.chunk):
            chunk = audio[i:i + self.chunk]
            if len(chunk) < self.chunk:
                chunk = np.pad(chunk, (0, self.chunk - len(chunk)))
            yield chunk
            time.sleep(0.1)
