import socket
import time
import asyncio
import speech_recognition as sr
import numpy as np
import torch
import pygame
from modules.asr import SpeechRecognizer
from modules.tts import VoiceSynthesizer

# 初始化 pygame 音频系统
pygame.mixer.init()

# Socket 设置
client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
host = '192.168.35.117'
port = 8080

# 连接到服务器
def connect_to_server():
    try:
        client_socket.connect((host, port))
        print(f"[{time.strftime('%H:%M:%S')}] 已连接到服务器 {host}:{port}")
        return True
    except Exception as e:
        print(f"[{time.strftime('%H:%M:%S')}] 连接失败: {e}")
        return False

if not connect_to_server():
    exit(1)

# 语音命令关键字（直接对应 control.py 的指令）
VOICE_KEYWORDS = {
    "前进": "w",
    "后退": "s",
    "停止": "a",
    "站立": "z",
    "蹲下": "x",
    "持续前进": "m",
    "退出": "q"
}
WAKE_WORD = "你好"
COMMAND_TIMEOUT = 90  # 命令模式超时（秒，延长以支持更长等待）
MIN_TEXT_LENGTH = 2  # 最小文本长度，过滤杂音

# 发送消息到服务器
def send_message(message):
    global client_socket
    try:
        client_socket.sendall(message.encode('utf-8'))
        print(f"[{time.strftime('%H:%M:%S')}] 已发送消息: {message.strip()}")
        return True
    except BrokenPipeError:
        print(f"[{time.strftime('%H:%M:%S')}] 连接断开，尝试重新连接...")
        client_socket.close()
        client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        if connect_to_server():
            print(f"[{time.strftime('%H:%M:%S')}] 已重新连接到服务器")
            client_socket.sendall(message.encode('utf-8'))
            print(f"[{time.strftime('%H:%M:%S')}] 已发送消息: {message.strip()}")
            return True
        else:
            print(f"[{time.strftime('%H:%M:%S')}] 重新连接失败，稍后重试...")
            time.sleep(1)
            return False

# 处理语音命令（直接使用 control.py 的 if-elif 逻辑）
async def process_command(char, tts):
    global running
    message = ""
    if char == "w":
        message = "linear x: 0.5, angular z: 0.0\n"
    elif char == "s":
        message = "linear x: -0.5, angular z: 0.0\n"
    elif char == "a":
        message = "linear x: 0.0, angular z: 0.0\n"
    elif char == "z":
        message = "linear x: 0.0, angular z: 0.0, action: 1\n"
    elif char == "x":
        message = "linear x: 0.0, angular z: 0.0, action: 0\n"
    elif char == "m":
        print(f"[{time.strftime('%H:%M:%S')}] 开始持续发送前进指令，持续 3 秒")
        start_time = time.time()
        while time.time() - start_time < 3 and running:
            message1 = "linear x: 0.5, angular z: 0.0\n"
            if not send_message(message1):
                output_file = await tts.synthesize("抱歉，连接出错了，请稍后再试")
                play_audio(output_file)
                return False
            time.sleep(0.1)
        print(f"[{time.strftime('%H:%M:%S')}] 已持续发送前进指令 3 秒")
        output_file = await tts.synthesize("好了，已经向前移动了三秒")
        play_audio(output_file)
        return True
    elif char == "q":
        print(f"[{time.strftime('%H:%M:%S')}] 退出程序")
        running = False
        return True
    else:
        print(f"[{time.strftime('%H:%M:%S')}] 无效命令: {char}")
        output_file = await tts.synthesize("抱歉，我没听懂，请再说一次")
        play_audio(output_file)
        return False

    # 发送普通命令
    if message:
        success = send_message(message)
        if success:
            output_file = await tts.synthesize("好的，已完成操作")
            play_audio(output_file)
        else:
            output_file = await tts.synthesize("抱歉，连接出错了，请稍后再试")
            play_audio(output_file)
        return success
    return False

# 播放 TTS 音频（参考 main.py 的 _play_audio 方法）
def play_audio(file_path):
    if not os.path.exists(file_path):
        print(f"[{time.strftime('%H:%M:%S')}] 音频文件不存在: {file_path}")
        return
    try:
        pygame.mixer.music.load(file_path)
        pygame.mixer.music.play()
        while pygame.mixer.music.get_busy():
            time.sleep(0.1)
    except Exception as e:
        print(f"[{time.strftime('%H:%M:%S')}] 播放失败: {str(e)}")
    finally:
        try:
            os.remove(file_path)
        except:
            pass

# 语音控制主循环
async def speech_control():
    global running
    running = True
    recognizer = sr.Recognizer()
    recognizer.energy_threshold = 12000  # 提高阈值以过滤杂音
    recognizer.dynamic_energy_threshold = True
    asr = SpeechRecognizer(device="cuda" if torch.cuda.is_available() else "cpu")
    tts = VoiceSynthesizer()

    try:
        with sr.Microphone(sample_rate=16000) as source:
            print(f"[{time.strftime('%H:%M:%S')}] 可用麦克风设备: {sr.Microphone.list_microphone_names()}")
            print(f"[{time.strftime('%H:%M:%S')}] 正在调整环境噪音...")
            recognizer.adjust_for_ambient_noise(source, duration=2)
            print(f"[{time.strftime('%H:%M:%S')}] 语音控制已启动，等待唤醒词: '{WAKE_WORD}'")
            print(f"[{time.strftime('%H:%M:%S')}] 支持的命令: 前进, 后退, 停止, 站立, 蹲下, 持续前进, 退出")

            is_awake = False
            last_command_time = time.time()

            while running:
                try:
                    print(f"[{time.strftime('%H:%M:%S')}] 监听中...")
                    audio = recognizer.listen(source, timeout=5, phrase_time_limit=5)

                    # 直接处理音频数据
                    audio_data = np.frombuffer(audio.get_wav_data(), dtype=np.int16).astype(np.float32) / 32768.0
                    text = asr.transcribe(audio_data)

                    if text and len(text) >= MIN_TEXT_LENGTH:
                        print(f"[{time.strftime('%H:%M:%S')}] 识别到语音: {text}")
                    else:
                        # 文本为空或过短，可能是杂音，静默等待
                        print(f"[{time.strftime('%H:%M:%S')}] 未识别到有效文本或文本过短")
                        if is_awake and time.time() - last_command_time > COMMAND_TIMEOUT:
                            print(f"[{time.strftime('%H:%M:%S')}] 命令模式超时，回到等待唤醒词状态")
                            output_file = await tts.synthesize("您好，我还在等您的指令哦，请说‘你好’重新唤醒")
                            play_audio(output_file)
                            is_awake = False
                        continue

                    # 等待唤醒词
                    if not is_awake:
                        if WAKE_WORD in text:
                            print(f"[{time.strftime('%H:%M:%S')}] 唤醒成功！进入命令模式...")
                            output_file = await tts.synthesize(f"好的，我听到了‘{text}’，请告诉我您想做什么")
                            play_audio(output_file)
                            is_awake = True
                            last_command_time = time.time()
                        continue

                    # 命令模式
                    command_matched = False
                    for keyword, char in VOICE_KEYWORDS.items():
                        if keyword in text:
                            print(f"[{time.strftime('%H:%M:%S')}] 识别到命令关键字: {keyword}")
                            command_matched = True
                            await process_command(char, tts)
                            last_command_time = time.time()
                            break

                    if not command_matched:
                        # 非命令文本，静默等待，不播报
                        print(f"[{time.strftime('%H:%M:%S')}] 未匹配到命令: {text}")

                    # 检查命令模式超时
                    if is_awake and time.time() - last_command_time > COMMAND_TIMEOUT:
                        print(f"[{time.strftime('%H:%M:%S')}] 命令模式超时，回到等待唤醒词状态")
                        output_file = await tts.synthesize("您好，我还在等您的指令哦，请说‘你好’重新唤醒")
                        play_audio(output_file)
                        is_awake = False

                    time.sleep(0.1)  # 降低 CPU 使用率
                except sr.WaitTimeoutError:
                    # 无语音输入，静默等待
                    if is_awake and time.time() - last_command_time > COMMAND_TIMEOUT:
                        print(f"[{time.strftime('%H:%M:%S')}] 命令模式超时，回到等待唤醒词状态")
                        output_file = await tts.synthesize("您好，我还在等您的指令哦，请说‘你好’重新唤醒")
                        play_audio(output_file)
                        is_awake = False
                except Exception as e:
                    print(f"[{time.strftime('%H:%M:%S')}] 语音控制错误: {e}")
                    output_file = await tts.synthesize("语音处理出错，请稍后再试")
                    play_audio(output_file)

    finally:
        client_socket.close()
        pygame.mixer.quit()
        print(f"[{time.strftime('%H:%M:%S')}] 连接已关闭")

# 主程序
if __name__ == "__main__":
    try:
        asyncio.run(speech_control())
    except KeyboardInterrupt:
        print(f"[{time.strftime('%H:%M:%S')}] 客户端正在关闭...")
    finally:
        client_socket.close()
        pygame.mixer.quit()
        print(f"[{time.strftime('%H:%M:%S')}] 连接已关闭")