import pyaudio
import wave
import tensorflow as tf
from keras.models import load_model
import numpy as np
from pydub import AudioSegment
from Speech.asr_test import wav2mfcc


"""
语音预测类,用于语音识别
根据确定一个固定时间窗口，每隔n秒进行对文件的重新写入，
判断文件是否为空，若为空则不进行操作，
若不为空则进行语音识别(根据音频音量大小来判断音频是否为空)
"""
class SpeechPredictionMoel():
    def __init__(self,wav_path,weight_path,predict_threshold,
                 loudness_threshold=1000,save_time=2,chunk=1024,
                 channels=1, rate=16000,graph=None):
        self._wav_path=wav_path
        self._predict_word=["down", "go", "left", "right", "stop","up"]

        self._save_time = save_time     # 录音保存时长
        self._predict_threshold = predict_threshold
        self._loudness_threshold = loudness_threshold
        self._chunk = chunk
        self._format = pyaudio.paInt16
        self._channels = channels   # 单声道
        self._rate = rate   # 采样率

        if graph is None:
            graph=tf.Graph()
        self._graph=graph
        self._sess=tf.compat.v1.Session(graph=self._graph)

        with self._sess.as_default():
            with self._graph.as_default():
                self._model=load_model(weight_path)

    def saveSpeech(self):
        # 保存
        print("开始录音,请说话......")
        audio = pyaudio.PyAudio()
        wavfile = wave.open(self._wav_path, 'w')
        wavfile.setnchannels(self._channels)
        wavfile.setsampwidth(audio.get_sample_size(self._format))
        wavfile.setframerate(self._rate)
        wavstream = audio.open(format=self._format,
                               channels=self._channels,
                               rate=self._rate,
                               input=True,
                               frames_per_buffer=self._chunk)

        frames = []
        for i in range(0, int(self._rate / self._chunk * self._save_time)):
            data = wavstream.read(self._chunk)
            frames.append(data)

        wavfile.writeframes(b''.join(frames))
        wavstream.stop_stream()
        wavstream.close()
        audio.terminate()
        print("录音结束")

    def recognizeSpeech(self):
        # 识别
        with self._sess.as_default():
            with self._sess.graph.as_default():
                wavs = []
                wavs.append(wav2mfcc(self._wav_path))
                wavs_data = np.array(wavs)
                wavs_data = wavs_data.reshape((-1, 220))
                result = self._model.predict(wavs_data, batch_size=128)[0]
                predict_index = np.argmax(result)
                return self._predict_word[predict_index],result[predict_index]

    def run(self,gui):
        gui.is_run = True
        while gui.is_run:
            self.saveSpeech()   # 保存语音

            soud = AudioSegment.from_file(self._wav_path)
            loudness = soud.rms
            if loudness > self._loudness_threshold:
               predict_word,predict_probability=self.recognizeSpeech()
               if predict_probability>=self._predict_threshold:
                   if gui.flag == 4:
                       gui.outputAllPredictWord(predict_word,predict_probability, 1)
                   else:
                       gui.outputPredictWord(predict_word)
            else:
                pass
        gui.outputPredictWord("")
