# coding=utf-8
'''

'''


import sys
import json
import time

import _thread as thread
import pyaudio
import numpy as np
import queue
from aip import AipSpeech


# 提取语音文字，可将此函数暴露，供调用
detout = queue.Queue()
def get_voice_text():
    item = ''
    while not detout.empty():
        item = detout.get()
    return item


# 待上传数据队列
QMAX = 12 # 大致相当于1.5秒的语音长度
CHUNK = 2048  # 对应略大于 0.1 秒的时间
RECORD_RATE = 16000

# 录音线程，注意参数与上传数据的参数要一致，特别是采样率
def record_thread(duration = 88400):
    FORMAT = pyaudio.paInt16
    CHANNELS = 1 # 单声道
    RATE = RECORD_RATE
    RECORD_SECONDS = duration

    p = pyaudio.PyAudio()

    stream = p.open(format=FORMAT,
                    channels=CHANNELS,
                    rate=RATE,
                    input=True,
                    frames_per_buffer=CHUNK)

    print("* recording")

    frames = []

    for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):
        if i % 10 == 0:
            sys.stdout.write('*')
            sys.stdout.flush()
        # 从网络查到，声音数据如果没及时读取会被丢弃
        data = stream.read(CHUNK, exception_on_overflow=False)

        # 降采样3倍，大小端是否在此解决？
        # out = bytearray(int(len(data) / 6))
        # for i in range(0, int(len(data) / 6)):
        #     out[i * 2:i * 2 + 2] = data[i * 6:i * 6 + 2]

        # length = len(out)
        # tmp = out[0:length:2]
        # out[0:length:2] = out[1:length:2]
        # out[1:length:2] = tmp

        #data = bytes(out)

        frames.append(data)
        if len(frames)>QMAX: # 最多缓存1.5秒时间
            frames.pop(0)
            r0 = np.frombuffer(frames[5], np.dtype('<i2'))
            r2 = np.frombuffer(frames[7], np.dtype('<i2'))
            if r2.std() / r0.std() > 2: # 前一批数据已经清空，现在又检查到从静音到突然声音增大的一个变化
                print(' ', int(r2.std()), int(r0.std()))
                query_once(frames)
                frames = []

            # 有需要时，可转换为波形序列进行观察，一定要是单声道，每采样是16位的
            # 检查0,3帧的能量，当第三帧
            #rt_data = np.frombuffer(data, np.dtype('<i2'))
            #df = pd.DataFrame(data=rt_data)

    print("* done recording")
    return

# 学生申请的ID
APP_ID = '19485500'
API_KEY = 'l3Rlenu3RFBdNE0Yu2SN8EX6'
SECRET_KEY = 'zlTtqq3GwzQrY2SRzbkojQhuU3G0Vf4F'

# 我申请的ID
#APP_ID = '22758334'
#API_KEY = 'GvQv0rALh498mZ88LGl8SUDd'
#SECRET_KEY = 'X2WviYMkGZplAUReV9tKOeKKX6jr9viP'


# 采样率
YUN_RATE = 16000;  # 固定值

# 百度api
client = AipSpeech(APP_ID, API_KEY, SECRET_KEY)


# 发起一次语音检测
def query_once(frames):
    speech_data = bytearray(int(QMAX * CHUNK * YUN_RATE / RECORD_RATE * 2))
    for i in range(len(frames)):
        frame = frames[i]
        speech_data[int(i*CHUNK* YUN_RATE/RECORD_RATE*2):int((i+1)*CHUNK*YUN_RATE/RECORD_RATE*2)] = frame

    speech_data = bytes(speech_data)

    try:
        print('start query')
        result = client.asr(speech_data, 'pcm', 16000, {'DEV_PID': 1537})
        item = ""
        for d in result["result"]:
            item += d
        print('output: ' + item)
        detout.put(item)
    except:
        print('query failed')


# 外部调用此函数，启动语音所有线程
def start_voice_threads():
    thread.start_new_thread(record_thread, ())

if __name__ == '__main__':
    start_voice_threads()
    while True:
        time.sleep(1.0)
