import json
import logging
import threading
import queue
import time
import os
from datetime import datetime
import sounddevice as sd
import vosk
import vlc
import dashscope
from dashscope.audio.tts_v2 import SpeechSynthesizer
from openai import OpenAI
from agent_lock import open_mihome_app
from agent_tel import check_adb_connection, make_phone_call
from agent_apps_dd import open_didi_app
from agent_always_on_online import CameraDetector
import argparse

# 配置常量
WAKE_WORD = "小黑"
API_KEY = "sk-你的API密钥"
API_BASE_URL = "https://dashscope.aliyuncs.com/compatible-mode/v1"
SAMPLE_RATE = 16000

class AudioManager:
    def __init__(self):
        # 初始化语音识别，不限制词典
        model = vosk.Model("vosk-model-small-cn")
        self.recognizer = vosk.KaldiRecognizer(model, SAMPLE_RATE)
        self.audio_queue = queue.Queue()
        self.is_speaking = False
        
        # 初始化语音合成
        dashscope.api_key = API_KEY
        
        # 初始化VLC播放器
        self.player = None
        
    def speak(self, text):
        if not text:  # 检查文本是否为空
            return
            
        self.is_speaking = True
        try:
            # 每次合成都创建新的合成器实例
            synthesizer = SpeechSynthesizer(
                model="cosyvoice-v1",
                voice="longxiaochun"
            )
            
            audio = synthesizer.call(text)
            if audio:  # 检查返回的音频数据是否有效
                with open('response.mp3', 'wb') as f:
                    f.write(audio)
                
                # 使用VLC播放音频
                if self.player:
                    self.player.stop()
                self.player = vlc.MediaPlayer('response.mp3')
                self.player.play()
                time.sleep(0.5)  # 给一点时间加载音频
                
                # 等待播放完成
                while self.player.is_playing():
                    time.sleep(0.1)
                self.player.stop()
            else:
                print("[错误]: 语音合成失败")
        except Exception as e:
            print(f"[错误]: 语音合成出错 - {str(e)}")
        finally:
            self.is_speaking = False
            
    def audio_callback(self, indata, frames, time, status):
        self.audio_queue.put(bytes(indata))
        
    def start_recording(self):
        self.stream = sd.RawInputStream(
            samplerate=SAMPLE_RATE,
            blocksize=1024,  # 减小blocksize以提高响应速度
            dtype='int16',
            channels=1,
            callback=self.audio_callback
        )
        self.stream.start()
        
    def stop_recording(self):
        if hasattr(self, 'stream'):
            self.stream.stop()
            self.stream.close()
            
class VoiceRecognitionThread(threading.Thread):
    def __init__(self, audio_manager, command_queue, wake_word):
        super().__init__()
        self.audio_manager = audio_manager
        self.command_queue = command_queue
        self.wake_word = wake_word
        self.running = True
        self.last_text = ""  # 用于避免重复输出
        self.partial_result = ""  # 用于存储部分识别结果
        # 初始化唤醒提示音播放器
        self.wake_player = vlc.MediaPlayer('msg.mp3')
        self.wake_sound_played = False  # 用于跟踪是否已经播放过提示音
        
    def run(self):
        while self.running:
            if not self.audio_manager.is_speaking:
                try:
                    audio_data = self.audio_manager.audio_queue.get(timeout=0.1)
                    
                    # 处理部分识别结果
                    if self.audio_manager.recognizer.AcceptWaveform(audio_data):
                        result = json.loads(self.audio_manager.recognizer.Result())
                        if result.get("text"):
                            text = result["text"].strip()
                            if text and text != self.last_text:  # 避免重复输出
                                print(f"\r[识别]: {text}", end='', flush=True)
                                self.last_text = text
                                if text.startswith(self.wake_word):
                                    command = text[len(self.wake_word):].strip()
                                    if command:
                                        print("\n✅ 已识别到唤醒词")
                                        self.command_queue.put(command)
                                self.wake_sound_played = False  # 重置标志，允许下一次播放
                    else:
                        # 实时显示部分识别结果
                        partial = json.loads(self.audio_manager.recognizer.PartialResult())
                        if partial.get("partial"):
                            partial_text = partial["partial"].strip()
                            if partial_text and partial_text != self.partial_result:
                                print(f"\r[正在识别]: {partial_text}", end='', flush=True)
                                self.partial_result = partial_text
                                # 检查是否包含"小"字且还没有播放过提示音
                                if "黑" in partial_text and not self.wake_sound_played:
                                    if self.wake_player:
                                        self.wake_player.stop()
                                        self.wake_player.play()
                                        self.wake_sound_played = True  # 设置标志防止重复播放
                                
                except queue.Empty:
                    continue
                except Exception as e:
                    print(f"\r[错误]: 语音识别出错 - {str(e)}", end='', flush=True)
                    continue
                    
    def stop(self):
        self.running = False

class VoiceAssistant:
    def __init__(self, enable_camera=False):
        # 禁用所有日志输出
        logging.basicConfig(level=logging.CRITICAL)
        for logger_name in ['httpx', 'httpcore', 'dashscope', 'vosk', 'urllib3', 'openai']:
            logging.getLogger(logger_name).setLevel(logging.CRITICAL)
            logging.getLogger(logger_name).propagate = False
        
        # 禁用第三方库的debug输出
        os.environ['DASHSCOPE_LOG_LEVEL'] = 'CRITICAL'
        
        self.command_queue = queue.Queue()
        self.audio_manager = AudioManager()
        self.running = True
        
        # 初始化AI客户端
        self.client = OpenAI(
            api_key=API_KEY,
            base_url=API_BASE_URL
        )
        
        # 初始化工具音效播放器
        self.tool_player = vlc.MediaPlayer('tool_use.mp3')
        
        # 初始化工具
        self._init_tools()
        
        # 启动语音识别线程
        self.recognition_thread = VoiceRecognitionThread(
            self.audio_manager,
            self.command_queue,
            WAKE_WORD
        )

        # 根据参数决定是否初始化摄像头监控
        if enable_camera:
            self.camera_detector = CameraDetector(emergency_callback=self.handle_emergency)
            self.camera_thread = threading.Thread(target=self._run_camera_monitor)
            self.camera_thread.daemon = True
        else:
            self.camera_detector = None
            self.camera_thread = None
        
    def _init_tools(self):
        self.tools = [
            {
                "type": "function",
                "function": {
                    "name": "unlock_door",
                    "description": "解锁门的功能",
                    "parameters": {
                        "type": "object",
                        "properties": {},
                        "required": []
                    }
                }
            },
            {
                "type": "function",
                "function": {
                    "name": "make_call",
                    "description": "拨打电话的功能",
                    "parameters": {
                        "type": "object",
                        "properties": {
                            "phone_number": {
                                "type": "string",
                                "description": "要拨打的电话号码"
                            }
                        },
                        "required": ["phone_number"]
                    }
                }
            },
            {
                "type": "function",
                "function": {
                    "name": "call_taxi",
                    "description": "打开滴滴出行APP并呼叫出租车",
                    "parameters": {
                        "type": "object",
                        "properties": {},
                        "required": []
                    }
                }
            }
        ]
        
    def _run_camera_monitor(self):
        """运行摄像头监控的线程函数"""
        self.camera_detector.capture_and_analyze()
        
    def handle_emergency(self, action_type, data):
        """处理紧急情况的回调"""
        if action_type == "make_call":
            # 播放工具使用音效
            if self.tool_player:
                self.tool_player.stop()
                self.tool_player.play()
            self._execute_make_call(data["phone_number"])
        else:
            # 普通提示
            self.audio_manager.speak(data)
            
    def process_command(self, command):
        """处理语音命令"""
        try:
            # 重置紧急呼叫计数
            if hasattr(self, 'camera_detector') and self.camera_detector:
                self.camera_detector.emergency_count = 0
                self.camera_detector.last_emergency_time = 0
            
            # 播放确认提示音
            confirm_player = vlc.MediaPlayer('confirm.mp3')
            confirm_player.play()
            
            # 构建系统消息
            messages = [
                {'role': 'system', 'content': """
                 你是一个友好的AI助手。如果用户要求开门，请调用unlock_door功能。
                 如果用户要求拨打电话，请调用make_call功能。
                 如果用户要求叫出租车，请调用call_taxi功能。
                 已知电话号码：
                 - 儿子: 13243784433
                 回答不超过15个字。
                 """},
                {'role': 'user', 'content': command}
            ]
            
            response = self.client.chat.completions.create(
                model="qwen-plus",
                messages=messages,
                tools=self.tools
            )
            
            # 处理响应
            message = response.choices[0].message
            print(f"\n[AI]: {message.content}")
            
            if hasattr(message, 'tool_calls') and message.tool_calls:
                tool_call = message.tool_calls[0]
                
                # 先播放语音响应
                self.audio_manager.speak(message.content)
                
                # 执行工具调用
                if tool_call.function.name == "unlock_door":
                    print("[执行]: 开门")
                    self._execute_unlock_door()
                elif tool_call.function.name == "make_call":
                    params = json.loads(tool_call.function.arguments)
                    print(f"[执行]: 拨打电话 {params['phone_number']}")
                    self._execute_make_call(params["phone_number"])
                elif tool_call.function.name == "call_taxi":
                    print("[执行]: 叫出租车")
                    self._execute_call_taxi()
            else:
                self.audio_manager.speak(message.content)
                
        except Exception as e:
            print(f"\n[错误]: {str(e)}")
            self.audio_manager.speak("抱歉，处理您的请求时出现错误")
            
    def _execute_unlock_door(self):
        # 播放工具使用音效
        if self.tool_player:
            self.tool_player.stop()
            self.tool_player.play()
        self.audio_manager.speak("正在为您开门")
        open_mihome_app()
        
    def _execute_make_call(self, phone_number):
        # 播放工具使用音效
        if self.tool_player:
            self.tool_player.stop()
            self.tool_player.play()
        if check_adb_connection():
            self.audio_manager.speak("正在拨打电话")
            make_phone_call(phone_number)
        else:
            self.audio_manager.speak("手机未连接，请检查连接状态")
            
    def _execute_call_taxi(self):
        # 播放工具使用音效
        if self.tool_player:
            self.tool_player.stop()
            self.tool_player.play()
        self.audio_manager.speak("正在为您叫车")
        open_didi_app()
        
    def run(self):
        """启动助手"""
        try:
            print(f"🎤 语音助手已启动，用 小黑 唤醒我")
            self.audio_manager.start_recording()
            self.recognition_thread.start()
            
            # 仅在启用摄像头时启动摄像头线程
            if self.camera_thread:
                self.camera_thread.start()
                print("📸 摄像头监控已启动")
            
            while self.running:
                try:
                    command = self.command_queue.get(timeout=0.1)
                    if command:
                        print(f"[指令]: {command}")
                        self.process_command(command)
                except queue.Empty:
                    continue
        except KeyboardInterrupt:
            self.stop()
            
    def stop(self):
        """停止助手"""
        self.running = False
        if hasattr(self, 'camera_detector'):
            self.camera_detector.running = False
        self.recognition_thread.running = False
        self.audio_manager.stop_recording()

if __name__ == "__main__":
    # 创建命令行参数解析器
    parser = argparse.ArgumentParser(description='启动语音助手')
    parser.add_argument('-c', '--camera', action='store_true',
                      help='启用摄像头监控功能')
    
    # 解析命令行参数
    args = parser.parse_args()
    
    # 创建并运行语音助手实例，传入摄像头启用参数
    assistant = VoiceAssistant(enable_camera=args.camera)
    assistant.run()