import time

from aip import AipSpeech
from utils import const
import threading
from threading import Thread
import pyaudio
import speech_recognition as sr
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from service import music_service as ms
import logging

""" 你的 APPID AK SK """
const.APP_ID = '25513551'
const.API_KEY = 'yRgQuIjsVY4NtXDqjcoxG11a'
const.SECRET_KEY = '5dgkHGkSSGjGA6dMGBTOym23IjbuBIEO'
client = AipSpeech(const.APP_ID, const.API_KEY, const.SECRET_KEY)

# 常量定义
const.EXIT = '退出'
# const.AWAKE = '小艺'
const.AWAKE = '小明'  # 唤醒词
const.PLAY_A_SONG = 1
# 日志
logging.basicConfig(level=logging.INFO,  # 控制台打印的日志级别
                    filename='audio_control_service.log',
                    filemode='a',  ##模式，有w和a，w就是写模式，每次都会重新写日志，覆盖之前的日志
                    # a是追加模式，默认如果不写的话，就是追加模式
                    format=
                    '%(asctime)s - [line:%(lineno)d] - %(levelname)s: %(message)s'
                    # 日志格式
                    )
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - [line:%(lineno)d] - %(levelname)s: %(message)s')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)

class AudioControlThread(QThread):
    intSignal = pyqtSignal(int)
    strSignal = pyqtSignal(str)

    def __init__(self):
        super(AudioControlThread, self).__init__()

    # 发射信号
    def emit_(self, val):
        self.strSignal.emit(val)

    def communicate(self):
        # awake == False 表示还没唤醒
        awake = False
        while (True):
            rec()  # 录音上传到百度识别
            res = listen()  # 得到识别的结果
            logging.info(u'[识别结果]' + res)
            if not awake:  # 还没唤醒，等待关键词
                if const.AWAKE in res:  # 成功唤醒
                    logging.info(u'[成功唤醒]')
                    awake = True  # 设为True，可以进行下一步识别了
                else:
                    continue
            elif awake:  # 被唤醒了，可以做别的事情
                if '暂停' in res:
                    # 暂停播放
                    # 暂停子线程
                    logging.info(u'发送信号给主线程，暂停歌曲')
                    pass
                elif '播放' in res:
                    # 给主线程返回信号
                    # 让主线程创建子线程，播放歌曲
                    # 把识别到的内容送给主线程
                    logging.info(u'发送信号给主线程，播放歌曲')
                    self.emit_(res)
                elif '退出' in res:
                    logging.info(u'[退出语音识别]')
                    awake = False
                    break
                else:
                    continue
                awake = False

    def run(self):
        self.communicate()


def rec(rate=16000):
    r = sr.Recognizer()
    with sr.Microphone(sample_rate=rate) as source:
        print('请说出指令')
        audio = r.listen(source)

    with open("cache/recording.wav", "wb") as f:
        f.write(audio.get_wav_data())


def listen():
    flag = False
    with open("cache/recording.wav", "rb") as f:
        audio_data = f.read()

        result = client.asr(audio_data, 'wav', 16000, {'dev_pid': 1537, })

        result_text = result['result'][0]
    return result_text


if __name__ == '__main__':
    awake = False  # 还没被唤醒
    while (True):
        rec()
        res = listen()
        logging.info('[识别结果]' + res)
        if awake == False and const.AWAKE in res:
            # 说出关键词，唤醒
            awake = True
            print('[电脑] 我在，有什么事？')
        elif awake == True and const.EXIT not in res:
            awake = False
            print('[程序] 去做别的事情')
            time.sleep(10)
            print('[程序] 完成！')
        elif const.EXIT in res:
            print('[退出程序]')
            break
