import threading
import pyaudio
import wave
from time import sleep
import tkinter as tk
import os
from Speech.asr_test import wav2mfcc
from keras.models import load_model
import numpy as np
from datetime import datetime
import time
import librosa
from pydub import AudioSegment


""" 根据确定一个固定时间窗口，每隔n秒进行对文件的重新写入，判断文件是否为空，若为空则不进行操作，若不为空则进行语音识别(根据音频音量大小来判断音频是否为空)"""

class RecordThread(threading.Thread):
    def __init__(self, audiofile='record.wav',filepath=r'/home/lisen/tool/PyProjects/multisensor_information/record/'):#录音保存的路径
        threading.Thread.__init__(self)
        self.bRecord = True
        self.filepath=filepath
        self.audiofile = filepath+audiofile
        self.chunk = 1024
        self.format = pyaudio.paInt16
        self.channels = 1#单声道
        self.rate = 16000#采样率

    def run(self):
        audio = pyaudio.PyAudio()
        wavfile = wave.open(self.audiofile, 'wb')
        wavfile.setnchannels(self.channels)
        wavfile.setsampwidth(audio.get_sample_size(self.format))
        wavfile.setframerate(self.rate)
        wavstream = audio.open(format=self.format,
                               channels=self.channels,
                               rate=self.rate,
                               input=True,
                               frames_per_buffer=self.chunk)

        while self.bRecord:
            wavfile.writeframes(wavstream.read(self.chunk))
        wavstream.stop_stream()
        wavstream.close()
        audio.terminate()

    def play(self):#播放
        wf = wave.open(self.audiofile, 'rb')
        p = pyaudio.PyAudio()
        stream = p.open(format=p.get_format_from_width(wf.getsampwidth()), channels=self.channels,
                        rate=wf.getframerate(), output=True)

        data = wf.readframes(self.chunk)  # 读取数据

        while True:
            data = wf.readframes(self.chunk)
            if data == "":
                break
            stream.write(data)



        stream.stop_stream()  # 停止数据流
        stream.close()
        p.terminate()  # 关闭 PyAudio


    def recognize(self):#识别
        model = load_model('/home/lisen/tool/PyProjects/multisensor_information/weight_files/Speech_weight/asr_model_weights11.h5')  # 加载训练模型

        wavs = []

        path = r'/home/lisen/tool/PyProjects/multisensor_information/record/'
        files = os.listdir(path)

        for _ in files:
            wavs.append(wav2mfcc(path+_))
            X = np.array(wavs)
            print(X.shape)
            x = X.reshape((-1, 220))
            result = model.predict(x, batch_size=128)[0]
            print(np.shape(result))
            print("识别的概率",result)
            # #  因为在训练的时候，标签集的名字 为：  0：seven   1：stop    0 和 1 是下标
            name = ["down", "go", "left", "right", "stop","up"]  # 创建一个跟训练时一样的标签集  ,["down","go","left","right","stop","up"]
            predict=np.argmax(result)
            print("识别的语音结果是：", name[predict])

    def save(self):#录音
        print("开始录音,请说话......")
        audio = pyaudio.PyAudio()

        wavfile = wave.open(self.audiofile, 'w')
        wavfile.setnchannels(self.channels)
        wavfile.setsampwidth(audio.get_sample_size(self.format))
        wavfile.setframerate(self.rate)
        wavstream = audio.open(format=self.format,
                               channels=self.channels,
                               rate=self.rate,
                               input=True,
                               frames_per_buffer=self.chunk)
        # wavstream.start_stream()


        # print("开始录音,请说话......")
        # time.sleep(0.3)
        frames = []

        for i in range(0, int(self.rate / self.chunk * 2)):#2秒
            data = wavstream.read(self.chunk)
            frames.append(data)

        print("录音结束")

        # wavfile.writeframes(wavstream.read(self.chunk))
        wavfile.writeframes(b''.join(frames))
        wavstream.stop_stream()
        wavstream.close()
        audio.terminate()

    def stoprecord(self):#暂停
        self.bRecord = False

    def rt_recognize(self):
        while True:
            # y, sr = librosa.load(r'/home/lisen/tool/PyProjects/multisensor_information/record/test2.wav',
            #                      sr=None)  # D:\\voice\\test_data\\record\\test2.wav
            # mfccs = librosa.feature.mfcc(y=y, sr=sr, n_mfcc=24)
            # print(mfccs[0])
            print(' ')
            RecordThread.save(self)
            # start = time.clock()
            # print('开始时间：',start)
            soud = AudioSegment.from_file(r'/home/lisen/tool/PyProjects/multisensor_information/record/record.wav')
            loudness = soud.rms
            if loudness>1000:
                RecordThread.recognize(self)
                # RecordThread.play(self)
                # time.sleep(3)
            else:
                print(' ')
                print('重新录音')
                time.sleep(1)
            # end = time.clock()
            # print('结束时间:',end)
            # print('时间：', end - start)
            time.sleep(1)


if __name__=='__main__':
    audio_record=RecordThread('record.wav')#record50_back.wav
    root = tk.Tk()
    button1 = tk.Button(root, text='录音', command=audio_record.start)
    button2 =tk.Button(root, text='停止', command=audio_record.stoprecord)
    button3 = tk.Button(root, text='识别', command=audio_record.recognize)
    button4 = tk.Button(root, text='播放', command=audio_record.play)
    button5 = tk.Button(root, text='实时识别', command=audio_record.rt_recognize)

    button1.pack()
    button2.pack()
    button3.pack()
    button4.pack()
    button5.pack()
    root.mainloop()