from dataclasses import dataclass
from typing import Literal
import time

# ==== 1. 定义输入数据结构 ====
@dataclass
class InputData:
    type: Literal["voice", "gesture", "text"]
    content: str  # 对于语音是音频路径，对于手势是识别结果，文本则是直接内容

# ==== 2. 模拟多模态识别模型 ====
class ASRModel:
    """语音识别模型（模拟）"""
    def __call__(self, input_data: InputData):
        print("🎤 正在识别语音...")
        time.sleep(0.5)
        # 模拟识别结果
        return "打开空调，温度设为24度"

class GestureRecognizer:
    """手势识别模型（模拟）"""
    def __call__(self, input_data: InputData):
        print("👋 正在识别手势...")
        time.sleep(0.5)
        # 模拟识别结果
        return "播放我最喜欢的歌单"

# ==== 3. 模拟 LLM ====
class MockLLM:
    """大语言模型（模拟）"""
    def generate(self, text: str):
        print(f"🧠 LLM 正在处理: {text}")
        time.sleep(0.8)
        if "空调" in text:
            return "已为您将空调设置为24度，当前车内温度23度"
        elif "播放" in text:
            return "正在为您播放收藏的歌单"
        else:
            return "我明白了，正在处理您的请求"

# ==== 4. 模拟 TTS 和仪表盘 ====
class TTSEngine:
    """语音合成引擎（模拟）"""
    def speak(self, text: str):
        print(f"🔊 [语音输出] {text}")

class Dashboard:
    """仪表盘显示（模拟）"""
    def display(self, info: str):
        print(f"🖥️ [仪表盘显示] {info}")

# ==== 5. 主处理函数 ====
def process_input(input_data: InputData, asr_model, gesture_model, llm, tts_engine, dashboard):
    """处理多模态输入，生成回复并输出"""
    if input_data.type == "voice":
        text = asr_model(input_data)
    elif input_data.type == "gesture":
        text = gesture_model(input_data)
    elif input_data.type == "text":
        text = input_data.content
    else:
        text = "无法识别的输入类型"

    print(f"📝 识别结果: {text}")

    # LLM 生成回复
    response = llm.generate(text)

    # 输出回复
    tts_engine.speak(response)

    # 仪表盘显示相关信息
    dashboard.display(response)

    return response

# ==== 6. 运行示例 ====
if __name__ == "__main__":
    # 初始化模型和引擎
    asr_model = ASRModel()
    gesture_model = GestureRecognizer()
    llm = MockLLM()
    tts_engine = TTSEngine()
    dashboard = Dashboard()

    print("🚗 智能座舱多模态交互系统已启动！")

    # 测试语音输入
    print("\n--- 测试语音输入 ---")
    voice_input = InputData(type="voice", content="audio.wav")
    process_input(voice_input, asr_model, gesture_model, llm, tts_engine, dashboard)

    # 测试手势输入
    print("\n--- 测试手势输入 ---")
    gesture_input = InputData(type="gesture", content="wave_right")
    process_input(gesture_input, asr_model, gesture_model, llm, tts_engine, dashboard)

    # 测试文本输入
    print("\n--- 测试文本输入 ---")
    text_input = InputData(type="text", content="导航到最近的加油站")
    process_input(text_input, asr_model, gesture_model, llm, tts_engine, dashboard)