from examples.rest import TraineRestClient, TraineRestConnection, RecognitionTransType
import asyncio
import speech_recognition as sr
from speech_recognition.audio import AudioData
from typing import Optional
import threading, time
import datetime
import traceback

class MicrophoneRecognition:
    def __init__(self, ):
        self.__client: TraineRestClient = TraineRestClient('http://localhost:9090/api/v1', 'http://localhost:9090/auth',
                                                           'xiaofanku@live.cn', 'admin888')
        self.__recognition_token = None
        self.__audioQueue: asyncio.Queue[AudioData] = asyncio.Queue()
        #
        self.__mic_thread: Optional[threading.Thread] = None
        self.__rest_thread: Optional[threading.Thread] = None
        self.__thread_quit = None
        #
        self.event_loop = asyncio.get_event_loop()

    def connect(self, timeout: int = 60):
        print('trc create passport')
        return self.__client.connection(timeout)

    def generate_token(self, transfer_data_type: str = 'wav', connect: TraineRestConnection = None, ttl: int = 3600):
        print('generate recognition token')
        self.__recognition_token = self.__client.create_recognition(RecognitionTransType(transfer_data_type), connect,
                                                                    ttl)

    def disconnect(self, ):

        if self.__thread_quit:
            self.__thread_quit.set()
            self.__thread_quit = None

        if self.__mic_thread:
            self.__mic_thread.join()
            self.__mic_thread = None

        if self.__rest_thread:
            self.__rest_thread.join()
            self.__rest_thread = None

        self.__client.close()

    def start_mic_listen_thread(self):
        print('start micphone listen thread')

        def __mic_listen(sample_rate: int, audioQueue: asyncio.Queue[AudioData], quitEvent: threading.Event):
            recognition = sr.Recognizer()
            while not quitEvent.is_set():
                with sr.Microphone(sample_rate=sample_rate) as source:
                    print("please say something")
                    recognition.adjust_for_ambient_noise(source)
                    audio: AudioData = recognition.listen(source, timeout=None, phrase_time_limit=None)
                    audioQueue.put_nowait(audio)
                    # print('print put queue audio:%s' % audio)
                    time.sleep(1.0)

        # 开始麦克风录音
        self.__thread_quit = threading.Event()
        self.__mic_thread = threading.Thread(target=__mic_listen, args=(16000, self.__audioQueue, self.__thread_quit))
        self.__mic_thread.start()

    async def rest_recognition(self, connect: TraineRestConnection = None):
        print('start recognition stream')
        assert self.__recognition_token, 'recognition token is None'

        async with self.__client.recognition_stream_pipe(connect, self.__recognition_token) as wsp:
            while not self.__thread_quit.is_set():
                audio: AudioData = await self.__audioQueue.get()
                if audio is None:
                    print('print queue audio is None')
                    await asyncio.sleep(1.0)

                # 文件中转
                # wav_file_path = self.__save_voice(audio.get_wav_data())
                # sample_rate, nparr = wavfile.read(wav_file_path)
                # print('SR:%d, numpy array struct print, length:%s, sharp:%s, type:%s' % ( sample_rate, len(nparr), nparr.shape, nparr.dtype))

                # wav file bytes
                await wsp.send_bytes(audio.get_wav_data())
                jso = await wsp.receive_json()
                print('ws response:%s' % jso)
            await asyncio.sleep(0.1)
        print('recognition finish')

    # 保存音频文件
    def save_voice(self, rawDataBytes: bytes) -> str:
        """
        将音频字节保存到wav文件作为中间传播对象
        Args:
            rawDataBytes (bytes): 音频字节

        Returns:
            str: 采集到的音频文件路径
        """
        filename = datetime.datetime.now().strftime('chunk_%Y-%m-%d_%H-%M-%S_%f.wav')
        cachePath = "{}/{}".format('./voice', filename)
        with open(cachePath, "wb") as f:
            f.write(rawDataBytes)
        return cachePath

if __name__ == "__main__":
    loop = asyncio.get_event_loop()
    mr = MicrophoneRecognition()
    try:
        with mr.connect() as conn:
            print('wait connection token generated')
            mr.generate_token(connect=conn)
            #'start micphone listen thread'
            mr.start_mic_listen_thread()
            #
            loop.run_until_complete(mr.rest_recognition(conn))
    except Exception as exp:
        print('exception:%s, message:%s' % (repr(exp), str(exp)))
        error_message = traceback.format_exc()
        print(error_message)
    finally:
        mr.disconnect()
        loop.close()