# -*- coding: utf-8 -*-

# voice_control.py
import os
import json
import queue
import threading
from vosk import Model, KaldiRecognizer
import sounddevice as sd
import soundfile as sf
from datetime import datetime

# 配置
MODEL_PATH = "/home/pi/vosk-model/vosk-model-small-cn-0.22"
SAMPLE_RATE = 16000
CHANNELS = 1

# 指令映射
COMMAND_MAP = {
    "你好": "你好！我是你的树莓派语音助手。",
    "今天天气怎么样": "今天天气晴朗，适合外出。",
    "打开灯": "已为你打开灯。",
    "关闭灯": "已为你关闭灯。",
    "退出": "再见！系统将关闭。"
}

# 音频队列
audio_queue = queue.Queue(maxsize=10)

def record_audio():
    """录制音频并放入队列"""
    try:
        with sd.InputStream(samplerate=SAMPLE_RATE, channels=CHANNELS, callback=audio_callback):
            print("正在监听...(说'退出'结束程序)")
            while True:
                pass
    except KeyboardInterrupt:
        pass
    except Exception as e:
        print(f"录音错误: {e}")

def audio_callback(indata, frames, time, status):
    """音频回调函数"""
    if status:
        print(status, file=sys.stderr)
    audio_queue.put(bytes(indata))

def recognize_speech():
    """从队列中识别语音"""
    if not os.path.exists(MODEL_PATH):
        print(f"模型路径不存在: {MODEL_PATH}")
        return

    model = Model(MODEL_PATH)
    rec = KaldiRecognizer(model, SAMPLE_RATE)

    while True:
        data = audio_queue.get()
        if rec.AcceptWaveform(data):
            result = rec.Result()
            handle_result(result)
        # audio_queue.task_done()

def handle_result(result):
    """处理识别结果"""
    try:
        result_json = json.loads(result)
        text = result_json.get("text", "")
        if text:
            print(f"识别结果: {text}")
            process_command(text)
    except Exception as e:
        print(f"处理结果错误: {e}")

def process_command(text):
    """处理指令"""
    for cmd, response in COMMAND_MAP.items():
        if cmd in text:
            print(f"识别到指令: {cmd}")
            speak(response)
            if cmd == "退出":
                print("系统即将关闭...")
                os._exit(0)
            return
    
    # 如果没有匹配到指令
    unknown_response = "抱歉，我没听懂你的指令。"
    print(unknown_response)
    speak(unknown_response)

def speak(text):
    """语音合成"""
    # 使用eSpeak（简单快速）
    os.system(f'espeak -v zh "{text}" --stdout | aplay')

if __name__ == "__main__":
    # 启动录音线程
    record_thread = threading.Thread(target=record_audio, daemon=True)
    record_thread.start()
    
    # 启动识别线程
    recognize_thread = threading.Thread(target=recognize_speech, daemon=True)
    recognize_thread.start()
    
    # 保持主程序运行
    try:
        while True:
            pass
    except KeyboardInterrupt:
        print("程序已停止")