from time import time, sleep

import requests
import json
import base64
import os
import logging
import speech_recognition as sr
# import speech

import numpy as np
import matplotlib.pyplot as plt
from PyQt5 import QtCore
from PyQt5.QtCore import QThread, QMutex, QSemaphore

import socket


def get_token():
    logging.info('开始获取token...')
    baidu_server = "https://openapi.baidu.com/oauth/2.0/token?"
    grant_type = "client_credentials"
    client_id = 'VgmwOxYo6QdgkyT90ACwj6MD'  # "up7sdaBHdk09sbMk1l6ijszx"
    # "XmoFEcE4i8ErqBbnuSlgWb2B81AKXard"
    client_secret = '9GC4YY2VFvwc6vSZoXFfMP09vaRIs8gr'

    url = f"{baidu_server}grant_type={grant_type}&client_id={client_id}&client_secret={client_secret}"
    res = requests.post(url)
    token = json.loads(res.text)["access_token"]
    return token


remote_ip = '115.156.75.45'

mux = QMutex()
sem = QSemaphore()

try:
    token = get_token()
    r = sr.Recognizer()
    # for index, name in enumerate(sr.Microphone.list_microphone_names()):
    #     print('%d:%s'%(index,name))
    # assert(False)
    mic = sr.Microphone(sample_rate=16000)
    headers = {'Content-Type': 'application/json'}
    url = "https://vop.baidu.com/server_api"

    data = {
        "format": "wav",
        "rate": "16000",
                "dev_pid": "1536",
                "cuid": "TEDxPY",
                "len": 0,
                "channel": 1,
                "token": token,
    }

    with mic as source:
        r.adjust_for_ambient_noise(source)
    avaliable = True
    send_soc = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
    send_soc.bind(('', 80))

except Exception as e:
    avaliable = False
    print(e)
    print('audio not available')


class MicRecognize(QThread):

    def __init__(self, parent=None):
        super(MicRecognize, self).__init__(parent)
        self.available = avaliable
        self.need_run = False
        self.is_recording = False

        # try:
        #     self.token = get_token()
        #     self.r = sr.Recognizer()
        #     self.mic = sr.Microphone(sample_rate=16000)
        # except Exception as e:
        #     self.available = False
        #     return
        # self.headers = {'Content-Type': 'application/json'}
        # self.url = "https://vop.baidu.com/server_api"
        # self.data = {
        #     "format": "wav",
        #     "rate": "16000",
        #     "dev_pid": "1536",
        #     "cuid": "TEDxPY",
        #     "len": 0,
        #     "channel": 1,
        #     "token": self.token,
        # }

        # self.adjust_noise()

    detected_words = QtCore.pyqtSignal(str)
    er_msg_out = QtCore.pyqtSignal(str)
    doing = QtCore.pyqtSignal(str)

    def run(self):
        if not self.available:
            return
        while True:
            try:
                if not self.need_run:
                    # mux.lock()
                    sem.acquire()
                    # sleep(0.5)
                    # continue

                mux.lock()
                self.doing.emit('recording')
                with mic as source:
                    # print('recording...')
                    self.is_recording = True
                    audio = r.listen(source, phrase_time_limit=2)
                    self.is_recording = False
                mux.unlock()
                # print('record done')
                self.doing.emit('done')
                t = time()
                raw_data = audio.get_wav_data()  # convert_rate=16000
                duration = len(raw_data) / 16000  # s
                if duration < 0.8:
                    continue
                speech = base64.b64encode(raw_data).decode('utf-8')
                size = len(raw_data)
                data['speech'] = speech
                data['len'] = size

                print('preprocess:%dms' % ((time() - t) * 1000))
                t = time()

                # self.doing.emit('recognizing')
                try:
                    req = requests.post(url, json.dumps(data), headers)
                    if req.status_code != requests.codes.ok:
                        self.need_run = False
                        self.er_msg_out.emit(
                            'http error, voice control is closed')
                        continue
                except requests.exceptions.RequestException as e:
                    self.er_msg_out.emit(str(e))
                    print(e)
                    # self.doing.emit('done')
                    continue
                print('recognize:%dms' % ((time() - t) * 1000))
                # self.doing.emit('done')
                result = json.loads(req.text)

                if result["err_msg"] == "success.":
                    print(result['result'])
                    self.detected_words.emit(result['result'][0])
                else:
                    print(result["err_msg"])
            except Exception as e:
                print('[running error]:', e)
            # plot_audio(audio)

    def start_listen(self, yes: bool):
        if not self.available:
            return
        self.need_run = yes

    def adjust_noise(self):
        if self.is_recording:
            return False
        with self.mic as source:
            self.r.adjust_for_ambient_noise(source)
        return True

    def to_remote(self):
        self.detected_words.disconnect()
        self.detected_words.connect(lambda s:send_soc.sendto(s.encode('utf-8'), (remote_ip, 80)))
        

    

class RemoteListen(QThread):
    detected_words = QtCore.pyqtSignal(str)
    doing = QtCore.pyqtSignal(str)

    def __init__(self, parent=None):
        super(RemoteListen, self).__init__(parent)
        self.port = 80
        self.soc = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
        self.soc.bind(('0.0.0.0', 80))
        self.need_run = False

    def run(self):
        while 1:
            if not self.need_run:
                self.doing.emit('done')
                sem.acquire()
                self.doing.emit('recording')
            print('recving')
            rec, _ = self.soc.recvfrom(1024)
            self.detected_words.emit(rec.decode('utf-8'))
            print(rec)


def plot_audio(audio):
    d = audio.get_raw_data()
    y = np.frombuffer(d, 'int16')
    x = np.array(list(range(len(y))))
    plt.plot(x / 16000, y)
    plt.show()


def audio_baidu(filename):
    logging.info('开始识别语音文件...')
    with open(filename, "rb") as f:
        speech = base64.b64encode(f.read()).decode('utf-8')
    size = os.path.getsize(filename)

    headers = {'Content-Type': 'application/json'}
    url = "https://vop.baidu.com/server_api"
    data = {
        "format": "wav",
        "rate": "16000",
        "dev_pid": "1536",
        "speech": speech,
        "cuid": "TEDxPY",
        "len": size,
        "channel": 1,
        "token": token,
    }

    req = requests.post(url, json.dumps(data), headers)
    result = json.loads(req.text)

    if result["err_msg"] == "success.":
        print(result['result'])
        return result['result']
    else:
        print("内容获取失败，退出语音识别")
        return -1


def continue_cpeech_test():
    while True:
        with mic as source:
            print('recording...')
            audio = r.listen(source, phrase_time_limit=30)
        print('record done')
        t = time()
        raw_data = audio.get_wav_data()  # convert_rate=16000
        speech = base64.b64encode(raw_data).decode('utf-8')
        size = len(raw_data)
        data['speech'] = speech
        data['len'] = size

        print('preprocess:%dms' % ((time() - t) * 1000))

        t = time()
        req = requests.post(url, json.dumps(data), headers)
        print('recognize:%dms' % ((time() - t) * 1000))

        result = json.loads(req.text)

        if result["err_msg"] == "success.":
            print(result['result'])
        else:
            print(result["err_msg"])
            print("内容获取失败，退出语音识别")

        plot_audio(audio)


def finished_call(r, audio):
    # global in_record
    # print('record done')
    # t = time()
    raw_data = audio.get_wav_data()  # convert_rate=16000
    speech = base64.b64encode(raw_data).decode('utf-8')
    size = len(raw_data)
    data['speech'] = speech
    data['len'] = size

    # print('preprocess:%dms' % ((time() - t) * 1000))

    # t = time()
    req = requests.post(url, json.dumps(data), headers)
    # print('recognize:%dms' % ((time() - t) * 1000))

    result = json.loads(req.text)

    if result["err_msg"] == "success.":
        # print(result['result'])
        if word_out is not None:
            if word_out_arg is not None:
                word_out(word_out_arg, result['result'])
            else:
                word_out(result['result'])

    # else:
    # print(result["err_msg"])
    # print("内容获取失败，退出语音识别")

    # in_record = False


def listen_audio(word_callback, the_arg=None):
    global stop_listening, in_record, word_out, word_out_arg
    in_record = True
    word_out = word_callback
    word_out_arg = the_arg
    stop_listening = r.listen_in_background(
        source, finished_call, phrase_time_limit=10)


def listen_stop():
    global word_out, in_record
    if not in_record:
        return
    if stop_listening is None:
        return
    stop_listening(wait_for_stop=False)
    word_out = None
    in_record = False


def adjust_noise():
    if in_record:
        return False
    with mic as source:
        r.adjust_for_ambient_noise(source)
    return True


if __name__ == "__main__":

    in_record = False
    stop_listening = None
    word_out = None
    word_out_arg = None

    # continue_speech_test()

    logging.basicConfig(level=logging.INFO)

    wav_num = 0
    target = audio_baidu(f"00{wav_num}.wav")
    r = sr.Recognizer()

    # r.non_speaking_duration
    # 启用麦克风
    mic = sr.Microphone()

    while True:
        logging.info('录音中...')
        with mic as source:
            # 降噪
            #
            audio = r.listen(source)
        with open(f"00{wav_num}.wav", "wb") as f:
            # 将麦克风录到的声音保存为wav文件
            f.write(audio.get_wav_data(convert_rate=16000))
        logging.info('录音结束，识别中...')
        target = audio_baidu(f"00{wav_num}.wav")
        if target == -1:
            break
        wav_num += 1
