import numpy as np
# import pyttsx3
import pyaudio
import wave
from queue import Queue
from time import sleep

'''
from speech_recognition import (
    AudioData,
    Microphone,
    Recognizer,
)
'''

from transcriber_model import TranscriberWhisper


class MicrophoneListener():
    """Class to listen to speech convert it to text"""

    def __init__(self):
        self.transcriber = TranscriberWhisper()
        '''
        self._recognizer = Recognizer()
        self._recognizer.energy_threshold = 1000
        self._recognizer.dynamic_energy_threshold = False
        '''

    def listen(self,time_second = 10,save_as_file = False):
        """
        Listen on the specified input device for speech and return the heard text.
        :return: the text from the speech listened to.
        """
        # phrase_time_limit = 8
        self.data_queue = Queue()
        # frames = np.array([])

        self.stopCmd = False

        self.chunk = 1024                        # Record in chunks of 1024 samples
        self.sample_format = pyaudio.paInt16     # 16 bits per sample
        self.channels = 1                        # 单声道录音
        self.fs = 16000                          # 采样率
        self.seconds = time_second
        self.filename = "output.wav"

        self.p = pyaudio.PyAudio()  # Create an interface to PortAudio
        print('Recording...')

        self.stream = self.p.open(
            format = self.sample_format,
            channels = self.channels,
            rate = self.fs,
            frames_per_buffer = self.chunk,
            input = True
        )

        
        try:
            for i in range(0, int(self.fs / self.chunk * self.seconds)):
                if self.stream.is_stopped():
                    break
                data = self.stream.read(self.chunk)
                # frames.append(data)
                self.data_queue.put(data)
                # frames.append(data)
                # print(i)
                if self.stopCmd:
                    break

            # Stop and close the stream 
            # self.Stop_recording(save_as_file)
                
            # Stop and close the stream 
            self.stream.stop_stream()
            self.stream.close()
            # Terminate the PortAudio interface
            self.p.terminate()
            print('Finished recording')

            if save_as_file:        # Save the recorded data as a WAV file
                wf = wave.open(self.filename, 'wb')
                wf.setnchannels(self.channels)
                wf.setsampwidth(self.p.get_sample_size(self.sample_format))
                wf.setframerate(self.fs)
                wf.writeframes(b''.join(self.data_queue))
                print('Wave has been saved!')
                wf.close()
            '''
            while True:
                text = self._recognize_text_in_audio(self.data_queue)
                if text is not None:
                    print("text is not None")
                    return text
                else:
                    print("text is None")
                    sleep(0.1)
            '''
                    
        except Exception as e:
            print('Exception')
            return None

        '''
        def record_callback(_, audio: AudioData) -> None:
            """Callback function to receive audio data when recordings finish."""
            data_queue.put(audio.get_raw_data())

        try:
            with Microphone(sample_rate=16000) as source:
                self._recognizer.adjust_for_ambient_noise(source)
            listening = self._recognizer.listen_in_background(
                source, record_callback, phrase_time_limit=phrase_time_limit
            )

            while True:
                text = self._recognize_text_in_audio(data_queue)
                if text is not None:
                    # listening(wait_for_stop=False)
                    return text
                sleep(0.1)

        except Exception as e:
            return None
        '''

    def speech2text(self):
        while True:
            text = self._recognize_text_in_audio(self.data_queue)
            if text is not None:
                print("text is not None")
                return text
            else:
                print("text is None")
                sleep(0.1)

    def _recognize_text_in_audio(self, data_queue):
        # Pull raw recorded audio from the queue.
        audio_data = b''
        while not data_queue.empty():
            audio_data += data_queue.get()

        if audio_data:
            # Convert in-ram buffer to something the model can use directly without needing a temp file.
            # Convert data from 16-bit wide integers to floating point with a width of 32 bits.
            # Clamp the audio stream frequency to a PCM wavelength compatible default of 32768hz max.
            audio_np = np.frombuffer(audio_data, dtype = np.int16).astype(np.float32) / 32768.0

            result = self.transcriber.transcribe(audio_np)
            text = result['text'].strip()
            # print('134' + text)
            return text

        else:
            return None
