import warnings
warnings.filterwarnings("ignore")
import os
os.environ["TF_CPP_MIN_LOG_LEVEL"]='2'
import tensorflow as tf
tf.compat.v1.logging.set_verbosity(40)
from tensorflow.keras import models
import numpy as np
from scipy.io import wavfile
import pyaudio
import wave
import pyttsx3 as pyttsx
import scipy.io.wavfile
from scipy import signal

CHUNK = 1024
FORMAT = pyaudio.paInt16
CHANNELS = 2
RATE = 16000
RECORD_SECONDS = 2
WAVE_OUTPUT_FILENAME = "train_0001.wav"
PLAY_FILER_PATH = 'English-voice'
def Voice_recording():
    p = pyaudio.PyAudio()#使用这个可以进行录音，播放，生成wav文件等等
    #打开数据流
    stream = p.open(format=FORMAT,
                    channels=CHANNELS,
                    rate=RATE,
                    input=True,
                    frames_per_buffer=CHUNK)#CHUNK数据流块
    # 声音的处理在计算机里面是按chunk来存储，播放，录制的
    # 比如每一帧是代表声波的信号（单声道、双声道、混合声道等等）每一次进行量化之后的数据，那么chunk就是每次可以处理多少帧这样的数据。这个chunk
    # size是可以设置的，但是一般来说buffer
    # size有大小限制

    print("start recording......")

    frames = []

    for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):
        data = stream.read(CHUNK)#读取数据
        frames.append(data)

    print("end!")

    #停止数据流
    stream.stop_stream()
    stream.close()
    #关闭PyAudio
    p.terminate()

    wf = wave.open(WAVE_OUTPUT_FILENAME, 'wb')#wave.open读取的data 是字节
    wf.setnchannels(CHANNELS)#设置通道数
    wf.setsampwidth(p.get_sample_size(FORMAT))#设置样本宽度
    wf.setframerate(RATE)
    wf.writeframes(b''.join(frames))
    wf.close()


def get_log_spec(audio,sr,window_length=20,step_length=10,eps=1e-8):
    npe=int(round(window_length*sr/1e3))
    nov=int(round(step_length*sr/1e3))
    freq,time,spec=signal.spectrogram(x=audio,
                                      fs=sr,
                                      nperseg=npe,
                                      noverlap=nov,
                                      detrend=False)
    return freq,time,np.log(spec.T.astype(np.float32)+eps)

def read_data(path):
    Data=[]
    new_sr=8000
    sr,sample=scipy.io.wavfile.read(path)
    sample=sample[:,0]
    resample = signal.resample(sample, int(new_sr / sr * sample.shape[0]))
    _, _, spec = get_log_spec(resample, new_sr)
    Data.append(spec)
    return np.array(Data)

# def play_voice(path):
#     wf = wave.open(path, 'rb')
#
#     p = pyaudio.PyAudio()
#
#     stream = p.open(format=p.get_format_from_width(wf.getsampwidth()),
#                     channels=wf.getnchannels(),
#                     rate=wf.getframerate(),
#                     output=True)
#
#     data = wf.readframes(CHUNK)
#
#     while data != b'':
#         stream.write(data)
#         data = wf.readframes(CHUNK)
#
#     stream.stop_stream()
#     stream.close()
#
#     p.terminate()

if __name__ == '__main__':
    #1.语音录制
    Voice_recording()
    #2.模型读取
    data_path=WAVE_OUTPUT_FILENAME
    data=read_data(data_path)
    data=np.stack((data,data,data),axis=3)
    print(data.shape)
    model=models.load_model('MobileNet_3.h5')
    #model =load_model('../dataes/MobileNet.h5')
    print(model.summary())
    #3.语音预测
    pred=model.predict(data)
    num=np.argmax(pred)
    #4.输出预测结果
    if num==0:
        print(num,"--left")
    elif num==1:
        print(num,"--right")
    else:
        print(num,"--stop")
    #5.语音形式反馈
    # label_name=['left','right','stop']
    label_name = ['左转', '右转', '停下']
    engine = pyttsx.init()
    engine.say('语音指令')
    engine.say(label_name[num])
    engine.runAndWait()


    # save_voice_pred_path='train_0002.wav'

    # p = pyaudio.PyAudio()
    #
    # p.terminate()
    # wf = wave.open(save_voice_pred_path, 'wb')
    # wf.setnchannels(CHANNELS)
    # wf.setsampwidth(p.get_sample_size(FORMAT))
    # wf.setframerate(RATE)
    # wf.writeframes(b''.join(engine.say(label_name[num])))
    # wf.close()

    # ALL_PLAY_FILER_PATH=PLAY_FILER_PATH+'/'+str(num)+'.wav'
    # play_voice(ALL_PLAY_FILER_PATH)

