#!/usr/bin/env python3
import sherpa_ncnn
import os
import queue
import sounddevice as sd
import pyaudio
import vosk
import sys
import numpy as np
import re

format = pyaudio.paFloat32  # 音频格式
npformat = np.float32
channels = 1  # 声道数

class AudioRecognizer:
    def __init__(self):
        self.detecting = False
        self.model_dir = (
            os.path.expanduser(
                "~") + "/Lepi_Data/ros/audio_recognizer/sherpa-ncnn-streaming-zipformer-zh-14M-2023-02-23"
        )
        # Please replace the model files if needed.
        # See https://k2-fsa.github.io/sherpa/ncnn/pretrained_models/index.html
        # for download links.
        self.recognizer = sherpa_ncnn.Recognizer(
            tokens=os.path.join(self.model_dir, "tokens.txt"),
            encoder_param=os.path.join(
                self.model_dir, "encoder_jit_trace-pnnx.ncnn.param"),
            encoder_bin=os.path.join(
                self.model_dir, "encoder_jit_trace-pnnx.ncnn.bin"),
            decoder_param=os.path.join(
                self.model_dir, "decoder_jit_trace-pnnx.ncnn.param"),
            decoder_bin=os.path.join(
                self.model_dir, "decoder_jit_trace-pnnx.ncnn.bin"),
            joiner_param=os.path.join(
                self.model_dir, "joiner_jit_trace-pnnx.ncnn.param"),
            joiner_bin=os.path.join(
                self.model_dir, "joiner_jit_trace-pnnx.ncnn.bin"),
            num_threads=2,
        )
        self.max_silent_count = 10
        self.list_audio_devices()

    def list_audio_devices(self):
        p = pyaudio.PyAudio()
        device_count = p.get_device_count()
        for i in range(0, device_count):
            info = p.get_device_info_by_index(i)
            # print("Device {} = {}".format(info["index"], info["name"]))
            if 'wm8960' in info["name"]:
                self.input_device_index = info["index"]
                print("Device {} = {}".format(info["index"], info["name"]))
        return
        devices = sd.query_devices()
        print(devices)
        print(self.recognizer.sample_rate) #16k
        for device in devices:
            if "wm8960" in device["name"]:
                sd.default.device = device["index"]
        default_input_device_idx = sd.default.device[0]
        print(
            f'Use default device: {devices[default_input_device_idx]["name"]}')

    def recognize(self):
        silent_count = self.max_silent_count
        recognizer = self.recognizer
        result = ""
        try:
            print("Started! Please speak")
            sample_rate = recognizer.sample_rate
            # sample_rate = 48000
            samples_per_read = int(0.1 * sample_rate)  # 0.1 second = 100 ms
            last_result = ""
            volumes = []
            with sd.InputStream(
                channels=1, dtype="float32", samplerate=sample_rate
            ) as s:
                self.detecting = True
                while self.detecting:
                    samples, _ = s.read(samples_per_read)  # a blocking read
                    samples = samples.reshape(-1)
                    # volume = np.abs(samples).mean()
                    # volumes.append(volume)
                    # print(len(volumes), max(volumes))
                    # volumes = volumes[-self.silent_count:]
                    recognizer.accept_waveform(sample_rate, samples)
                    if last_result == recognizer.text:
                        silent_count = silent_count - 1
                    else:
                        last_result = recognizer.text
                        silent_count = self.max_silent_count
                        print(last_result)
                    if silent_count <= 0 and len(recognizer.text) > 0 :
                        result = recognizer.text
                        break
                recognizer.reset()
                volumes.clear()
                return result
        except KeyboardInterrupt:
            print("\nDone")
            return result
        except Exception as e:
            print(type(e).__name__ + ": " + str(e))
            return result

    def recognize2(self):
        silent_count = self.max_silent_count
        last_result = ""
        recognizer = self.recognizer
        sample_rate = self.recognizer.sample_rate
        chunk = int(0.1 * sample_rate)  # 0.1 second = 100 ms
        try:
            self.p = pyaudio.PyAudio()
            self.stream = self.p.open(
                format=format,
                channels=channels,
                rate=sample_rate,
                input=True,
                input_device_index=self.input_device_index,
                frames_per_buffer=chunk,
            )
            self.detecting = True
            while self.detecting:
                in_data = self.stream.read(chunk)
                audio_data = np.frombuffer(in_data, dtype=npformat)
                recognizer.accept_waveform(sample_rate, audio_data)
                if last_result == recognizer.text:
                    silent_count = silent_count - 1
                else:
                    last_result = recognizer.text
                    silent_count = self.max_silent_count
                if silent_count <= 0 and len(recognizer.text) > 0 :
                    break
            recognizer.reset()
            self.stream.stop_stream()
            self.stream.close()
            self.p.terminate()
            return last_result
        except KeyboardInterrupt:
            print("\nDone")
            return last_result
        except Exception as e:
            print(type(e).__name__ + ": " + str(e))
            return last_result
        
if __name__ == '__main__':
    ar = AudioRecognizer()
    print(ar.recognize())
