#!/usr/bin/env python3
# 利用热词唤醒后使用百度语音识别api识别语音指令,然后匹配操作指令
import os
import sys

import_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(import_path)
import time
import pyaudio
import wave
import pygame
import snowboydecoder
import signal
import RPi.GPIO as GPIO
from aip import AipSpeech
import cv2
from cv2 import dnn
import numpy as np

from intelligentCar.motorContler import motorContler
from steeringGearTest.servo import servoContler
from opencv.face import faceControl


GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)

APP_ID = '25502908'
API_KEY = 'oPWSUgbtst7zGQT2C6FIVPB9'
SECRET_KEY = '8jVLT5GfnT7lzIg3nCGQDPMrnjZ8RRIR'

APIClient = AipSpeech(APP_ID, API_KEY, SECRET_KEY)

interrupted = False

# 定义采集声音文件参数
CHUNK = 1024
FORMAT = pyaudio.paInt16  # 16位采集
CHANNELS = 1  # 单声道
RATE = 16000  # 采样率
RECORD_SECONDS = 5  # 采样时长 定义为9秒的录音
# 采集声音文件存储路径
WAVE_OUTPUT_FILENAME = "./myvoice.pcm"


class VoiceControl():

    def __init__(self):
        self.pin = 18
        self.mode = 1

        self.btm_count = 0
        self.top_count = 0

        self.image_std = 128.0
        self.center_variance = 0.1
        self.size_variance = 0.2

        servoContler.__init__(self)
        faceControl.__init__(self)
        motorContler.__init__(self)


    def find_someone(self):
        print("找到我")
        servo = servoContler()
        face = faceControl()
        car = motorContler()
        # 初始化舵机角度
        servo.set_cloud_platform_degree(100, 100)
        topdegreeArray = [80, 90, 110, 115, 120]
        btmdegreeArray = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 140, 150, 160, 170, 180]

        caffe_prototxt_path = "/home/pi/kratos/test/shumeipai/opencv/caffe/RFB-320.prototxt"
        caffe_model_path = "/home/pi/kratos/test/shumeipai/opencv/caffe/RFB-320.caffemodel"
        threshold = 0.7
        input_size = [640, 480]
        net = dnn.readNetFromCaffe(caffe_prototxt_path, caffe_model_path)
        witdh = input_size[0]
        height = input_size[1]
        priors = face.define_img_size(input_size)
        cap = cv2.VideoCapture(0)
        cap.set(3, witdh)
        cap.set(4, height)
        while (cap.isOpened()):
            ret, frame = cap.read()
            if ret == True:
                img_ori = frame
                img_ori = cv2.flip(img_ori, 1)
                rect = cv2.resize(img_ori, (witdh, height))
                rect = cv2.cvtColor(rect, cv2.COLOR_BGR2RGB)
                net.setInput(dnn.blobFromImage(rect, 1 / self.image_std, (witdh, height), 127))
                boxes, scores = net.forward(["boxes", "scores"])
                boxes = np.expand_dims(np.reshape(boxes, (-1, 4)), axis=0)
                scores = np.expand_dims(np.reshape(scores, (-1, 2)), axis=0)
                boxes = face.convert_locations_to_boxes(boxes, priors, self.center_variance, self.size_variance)
                boxes = face.center_form_to_corner_form(boxes)
                boxes, labels, probs = face.predict(img_ori.shape[1], img_ori.shape[0], scores, boxes, threshold)
                box = face.face_filter(boxes)

                if box is not None:
                    cv2.rectangle(img_ori, (box[0], box[1]), (box[2], box[3]), (0, 255, 0), 2)
                    cv2.circle(img_ori, (box[0], box[1]), 1, (0, 0, 255), 4)
                    cv2.circle(img_ori, (box[2], box[3]), 1, (0, 0, 255), 4)
                    img_height, img_width, _ = img_ori.shape
                    (offset_x, offset_y) = servo.calculate_offset(img_width, img_height, box)
                    next_btm_degree = servo.btm_servo_control(offset_x, last_btm_degree)
                    next_top_degree = servo.top_servo_control(offset_y, last_top_degree)
                    servo.set_cloud_platform_degree(next_btm_degree, next_top_degree)
                    # 更新角度值
                    tmp_next_btm_degree = last_btm_degree
                    last_btm_degree = next_btm_degree
                    last_top_degree = next_top_degree
                    lrn = int(next_btm_degree) - int(tmp_next_btm_degree)
                    print("lrn:{}".format(lrn))
                    ya_max = box[1]
                    yb_max = box[3]
                    pix_person_height = yb_max - ya_max
                    distance = face.distance_to_camera(15, 580, pix_person_height)
                    if distance / 100 > 1.0:
                        cv2.putText(img_ori, "%.2fm" % (distance / 100),
                                    (box[0], box[1] - 10), cv2.FONT_HERSHEY_SIMPLEX,
                                    1, (255, 0, 255), 2)
                    else:
                        cv2.putText(img_ori, "%.2fcm" % (distance),
                                    (box[0], box[1] - 10), cv2.FONT_HERSHEY_SIMPLEX,
                                    1, (255, 0, 255), 2)

                    if lrn > 0:
                        car.turnLeft(30)
                    elif lrn < 0:
                        car.turnLeft(-30)

                    if int(distance) > 50 and int(distance) < 100:
                        car.goForward(20)
                    elif int(distance) > 100 and int(distance) < 130:
                        car.motorStop()
                    elif int(distance) > 130 and int(distance) < 300:
                        car.goForward(-20)
                else:
                    if self.btm_count == 17 and self.top_count == 4:
                        print("但是没有检测到人脸")
                        break
                    elif self.btm_count < 17 and self.top_count < 4:
                        self.btm_count += 1
                        servo.set_btm_cloud_platform_degree(btmdegreeArray[self.btm_count])
                    elif self.btm_count == 17 and self.top_count < 4:
                        self.btm_count = 0
                        self.top_count += 1
                        servo.set_btm_cloud_platform_degree(0)
                        servo.set_top_cloud_platform_degree(topdegreeArray[self.top_count])

                    car.motorStop()

                cv2.imshow("Face_Distance_Mosaic", img_ori)
                if cv2.waitKey(10) & 0xFF == ord('q'):
                    break
            else:
                break
        motorContler.motorStop(None)
        GPIO.cleanup()
        cap.release()
        cv2.destroyAllWindows()


    def whereis(self):
        print("你在哪")
        #self.mgpio.setV(self.pin, self.mode & 0)

    def status(self):
        print("状态是啥啊")
        #return self.mgpio.getV(self.pin)


def get_file_content(filePath):
    with open(filePath, 'rb') as fp:
        return fp.read()


def word_to_voice(text):
    result = APIClient.synthesis(text, 'zh', 1, {
        'vol': 5, 'spd': 3, 'per': 3})
    if not isinstance(result, dict):
        with open('./audio.mp3', 'wb') as f:
            f.write(result)
            f.close()
    time.sleep(.2)
    pygame.mixer.music.load('./audio.mp3')  # text文字转化的语音文件
    pygame.mixer.music.play()
    while pygame.mixer.music.get_busy() == True:
        print('waiting')


def get_mic_voice_file(p):
    #word_to_voice('主人还在么.')

    stream = p.open(format=FORMAT,
                    channels=CHANNELS,
                    rate=RATE,
                    input=True,
                    frames_per_buffer=CHUNK)
    print("* recording")

    frames = []
    for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):
        data = stream.read(CHUNK)
        frames.append(data)
    print("* done recording")
    stream.stop_stream()
    stream.close()
    wf = wave.open(WAVE_OUTPUT_FILENAME, 'wb')
    wf.setnchannels(CHANNELS)
    wf.setsampwidth(p.get_sample_size(FORMAT))
    wf.setframerate(RATE)
    wf.writeframes(b''.join(frames))
    wf.close()
    print('recording finished')


def baidu_get_words(client):
    results = client.asr(get_file_content(WAVE_OUTPUT_FILENAME), 'pcm', 16000, {'dev_pid': 1536, })
    print("结果:{}".format(results))
    words = results['result'][0]
    return words


# 实现离线语音唤醒和语音识别，实现一些语音交互控制


def signal_handler(signal, frame):
    global interrupted
    interrupted = True


def interrupt_callback():
    global interrupted
    return interrupted


#  回调函数，语音识别在这里实现
def callbacks():
    global detector

    pygame.mixer.music.load('./laile.mp3')
    pygame.mixer.music.play()
    while pygame.mixer.music.get_busy() == True:
        print('waiting')

    #  关闭snowboy功能
    detector.terminate()
    #  开启语音识别
    get_mic_voice_file(p)
    rText = baidu_get_words(client=APIClient)

    if rText.find("找到我") != -1:
        VoiceControl.find_someone()
    elif rText.find("你在哪") != -1:
        VoiceControl.whereis()

    # 打开snowboy功能
    # wake_up —> monitor —> wake_up  递归调用
    wake_up()


# 热词唤醒
def wake_up():
    global detector
    model = './xiaojianjian.pmdl'  # 唤醒词为 小贱贱
    signal.signal(signal.SIGINT, signal_handler)

    # 唤醒词检测函数，调整sensitivity参数可修改唤醒词检测的准确性
    detector = snowboydecoder.HotwordDetector(model, sensitivity=0.5)
    # 修改回调函数可实现我们想要的功能
    detector.start(detected_callback=callbacks,  # 自定义回调函数
                   interrupt_check=interrupt_callback,
                   sleep_time=0.03)
    # 释放资源
    detector.terminate()


if __name__ == '__main__':
    pygame.mixer.init()
    p = pyaudio.PyAudio()
    VoiceControl = VoiceControl()
    wake_up()
