#!/usr/bin/env python3

import json
import time
import os
import requests
import subprocess
import re
import threading
import tempfile
import gc
from concurrent.futures import ThreadPoolExecutor, TimeoutError
from vosk import Model, KaldiRecognizer

class NoiseReducedRoboticArmControl:
    def __init__(self):
        # 模型路径检查
        model_path = "../vosk-model-small-cn-0.22"
        if not os.path.exists(model_path):
            model_path = "vosk-model-small-cn-0.22"
        
        try:
            self.model = Model(model_path)
            print(f"✅ 语音模型加载成功: {model_path}")
        except Exception as e:
            print(f"❌ 语音模型加载失败: {e}")
            self.model = None
        
        self.api_key = "ms-fecb8eca-0503-44e9-9cf2-4870bb0ed37c"
        self.base_url = "https://api-inference.modelscope.cn/v1"
        self.model_name = "deepseek-ai/DeepSeek-V3.2-Exp"
        self.speaking = False
        
        # USB 音频设备配置
        self.usb_speaker_device = "hw:CARD=Y02,DEV=0"
        self.usb_mic_device = "hw:CARD=Y02,DEV=0"
        
        # 机械臂状态
        self.arm_state = {
            "base": 500, "shoulder": 500, "elbow": 500, 
            "wrist_vert": 500, "wrist_rot": 500, "gripper": 0
        }
        
        self.servo_ids = {
            "base": 1, "shoulder": 2, "elbow": 3, 
            "wrist_vert": 4, "wrist_rot": 5, "gripper": 6
        }
        
        # 性能优化
        self.command_cache = {}
        self.cache_size = 8
        self.api_executor = ThreadPoolExecutor(max_workers=1)
        self.last_api_success = True
        
        # 预加载识别器
        if self.model:
            self.recognizer = KaldiRecognizer(self.model, 16000)
        else:
            self.recognizer = None
            
        # 录音参数
        self.silence_timeout = 2.0
        self.max_record_time = 8.0
        self.voice_threshold = 0.02
        
        # 监听状态
        self.listening = False
        
        print("🤖 机械臂语音控制系统已启动")
        self.play_voice("system_start")
        self.test_audio_devices()
    
    def set_listening_status(self, status):
        """设置监听状态"""
        self.listening = status
        if status:
            print("👂 监听中...", end="", flush=True)
        else:
            print("")  # 换行
    
    def play_voice(self, voice_name):
        """播放预生成的语音文件"""
        voice_file = f"../voice_prompts/{voice_name}.wav"
        
        if not os.path.exists(voice_file):
            print(f"❌ 语音文件不存在: {voice_file}")
            # 如果文件不存在，使用备用方案
            self.speak_fallback(voice_name)
            return
        
        if self.speaking:
            return
            
        def _play():
            self.speaking = True
            try:
                print(f"🔊 播放: {voice_name}")
                
                # 使用aplay播放预生成的语音文件
                subprocess.run([
                    "aplay", "-D", self.usb_speaker_device,
                    "-q", voice_file
                ], capture_output=True, timeout=10)
                    
            except Exception as e:
                print(f"❌ 播放语音错误 {voice_name}: {e}")
                # 出错时使用备用方案
                self.speak_fallback(voice_name)
            finally:
                self.speaking = False
        
        thread = threading.Thread(target=_play)
        thread.daemon = True
        thread.start()
    
    def speak_fallback(self, voice_name):
        """备用语音方案"""
        # 根据语音文件名返回对应的文本
        voice_texts = {
            "system_start": "机械臂语音控制系统已启动",
            "system_ready": "系统就绪",
            "system_exit": "退出系统，再见",
            "menu_options": "请选择选项",
            "enter_voice_mode": "进入语音控制模式",
            "enter_reset_mode": "执行机械臂复位",
            "enter_status_mode": "读取机械臂状态",
            "record_start": "请说话",
            "record_end": "录音结束",
            "record_timeout": "录音超时",
            "record_failed": "录音失败",
            "no_voice_detected": "未检测到语音",
            "no_audio_data": "没有录到音频数据",
            "processing": "正在处理",
            "analyzing_command": "正在分析指令",
            "transcribing": "正在转换语音",
            "api_calling": "正在调用接口",
            "execution_start": "开始执行命令",
            "sending_command": "发送控制指令",
            "execution_progress": "指令执行中",
            "execution_halfway": "执行过半",
            "execution_complete": "动作执行完成",
            "status_updated": "状态已更新",
            "command_parse_failed": "指令解析失败",
            "command_not_understood": "无法理解命令",
            "option_not_recognized": "选项无法识别",
            "voice_not_recognized": "没有识别到语音",
            "reading_status": "读取机械臂状态",
            "listening_start": "开始监听语音输入",
            "listening_stop": "停止监听",
        }
        
        text = voice_texts.get(voice_name, "语音提示")
        self.speak_direct(text)
    
    def speak_direct(self, text):
        """直接语音播放（备用方案）"""
        if self.speaking:
            return
            
        def _speak():
            self.speaking = True
            try:
                print(f"🔊 {text}")
                
                subprocess.run([
                    "espeak", "-v", "zh", "-s", "150", text
                ], 
                stdout=subprocess.DEVNULL, 
                stderr=subprocess.DEVNULL,
                timeout=5)
                    
            except Exception as e:
                print(f"❌ 语音播放错误: {e}")
            finally:
                self.speaking = False
        
        thread = threading.Thread(target=_speak)
        thread.daemon = True
        thread.start()
    
    def wait_for_speech_completion(self):
        """等待语音播报完成"""
        max_wait = 10
        start_time = time.time()
        while self.speaking and (time.time() - start_time) < max_wait:
            time.sleep(0.1)

    def cleanup_memory(self):
        """清理内存"""
        try:
            if hasattr(self, 'recognizer'):
                self.recognizer.Reset()
            gc.collect()
        except Exception as e:
            print(f"内存清理警告: {e}")

    def detect_voice_activity(self, audio_data, sample_rate=44100, channels=2):
        """简单的语音活动检测"""
        try:
            import numpy as np
            audio_array = np.frombuffer(audio_data, dtype=np.int16)
            
            if channels == 2:
                audio_array = audio_array.reshape(-1, 2)
                audio_array = audio_array.mean(axis=1).astype(np.int16)
            
            # 计算RMS音量
            rms = np.sqrt(np.mean(audio_array.astype(np.float32) ** 2))
            normalized_rms = rms / 32768.0
            
            # 简单的音量阈值检测
            is_speech = normalized_rms > self.voice_threshold
            
            return is_speech, normalized_rms
            
        except Exception as e:
            print(f"语音检测错误: {e}")
            return False, 0.0

    def remove_silence_from_audio(self, input_file, output_file):
        """去除音频中的静音部分"""
        try:
            # 使用sox去除静音
            result = subprocess.run([
                "sox", input_file, output_file,
                "silence", "-l", "1", "0.1", "1%",   # 去除开头静音
                "-1", "0.1", "1%"                    # 去除结尾静音
            ], capture_output=True, timeout=10)
            
            if result.returncode == 0 and os.path.exists(output_file):
                # 检查处理后的文件大小
                original_size = os.path.getsize(input_file)
                processed_size = os.path.getsize(output_file)
                
                print(f"✅ 静音去除完成: {original_size//1024}KB → {processed_size//1024}KB")
                
                # 如果文件太小，可能是过度去除了，使用原始文件
                if processed_size < 10000:  # 小于10KB
                    print("⚠️ 文件过小，使用原始录音")
                    subprocess.run(["cp", input_file, output_file])
                    return True
                
                return True
            else:
                print("❌ 静音去除失败，使用原始文件")
                subprocess.run(["cp", input_file, output_file])
                return False
                
        except Exception as e:
            print(f"静音去除错误: {e}")
            # 出错时复制原始文件
            try:
                subprocess.run(["cp", input_file, output_file])
                return True
            except:
                return False

    def record_voice_activated(self, max_duration=8):
        """语音激活录音，自动去除静音 - 添加监听状态"""
        self.wait_for_speech_completion()
        self.play_voice("record_start")
        time.sleep(1)  # 给用户反应时间
        
        try:
            cmd = [
                "arecord", "-D", self.usb_mic_device,
                "-f", "S16_LE", "-r", "44100", "-c", "2",
                "-t", "raw"
            ]
            
            process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
            
            audio_chunks = []
            voice_detected = False
            silence_start_time = None
            recording_start_time = time.time()
            last_voice_time = recording_start_time
            
            # 设置监听状态
            self.set_listening_status(True)
            print("🔊 监听中...", end="", flush=True)
            
            while True:
                chunk = process.stdout.read(4096)
                if not chunk:
                    break
                
                audio_chunks.append(chunk)
                current_time = time.time()
                
                # 使用语音检测
                has_voice, volume = self.detect_voice_activity(chunk)
                
                if has_voice:
                    if not voice_detected:
                        voice_detected = True
                        print(f"\n🎯 检测到语音!", end="", flush=True)
                    
                    last_voice_time = current_time
                    silence_start_time = None
                    print("💬", end="", flush=True)
                else:
                    if voice_detected:
                        if silence_start_time is None:
                            silence_start_time = current_time
                            print("\n⏳ 倒计时: 2", end="", flush=True)
                        else:
                            silence_duration = current_time - silence_start_time
                            countdown = max(0, 2 - int(silence_duration))
                            if countdown < 2:
                                print(f"\r⏳ 倒计时: {countdown}", end="", flush=True)
                            
                            if silence_duration >= self.silence_timeout:
                                print(f"\n✅ 录音结束")
                                break
                    else:
                        # 显示等待状态
                        wait_time = current_time - recording_start_time
                        if wait_time % 1 < 0.1:
                            noise_level = "⚡" if volume > 0.01 else "🔇"
                            print(noise_level, end="", flush=True)
                
                if current_time - recording_start_time >= self.max_record_time:
                    print(f"\n⚠️ 达到最大录音时间")
                    break
            
            process.terminate()
            process.wait()
            
            # 停止监听状态
            self.set_listening_status(False)
            self.wait_for_speech_completion()
            
            if not voice_detected:
                print("\n❌ 未检测到语音")
                self.play_voice("no_voice_detected")
                return None
            
            if not audio_chunks:
                print("\n❌ 没有录到音频数据")
                self.play_voice("no_audio_data")
                return None
            
            # 保存录音
            try:
                with tempfile.NamedTemporaryFile(suffix='.wav', delete=False) as temp_file:
                    raw_filename = temp_file.name
                
                # 保存原始录音
                sox_process = subprocess.Popen([
                    "sox", "-t", "raw", "-r", "44100", "-e", "signed", "-b", "16", "-c", "2", "-",
                    "-t", "wav", raw_filename
                ], stdin=subprocess.PIPE, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
                
                for chunk in audio_chunks:
                    sox_process.stdin.write(chunk)
                
                sox_process.stdin.close()
                sox_process.wait()
                
                if sox_process.returncode == 0 and os.path.exists(raw_filename):
                    # 去除静音处理
                    with tempfile.NamedTemporaryFile(suffix='.wav', delete=False) as temp_file:
                        processed_filename = temp_file.name
                    
                    if self.remove_silence_from_audio(raw_filename, processed_filename):
                        os.unlink(raw_filename)
                        file_size = os.path.getsize(processed_filename)
                        print(f"✅ 处理后文件大小: {file_size//1024}KB")
                        self.play_voice("record_end")
                        return processed_filename
                    else:
                        file_size = os.path.getsize(raw_filename)
                        print(f"✅ 原始录音文件大小: {file_size//1024}KB")
                        self.play_voice("record_end")
                        return raw_filename
                else:
                    self.play_voice("record_failed")
                    return None
                    
            except Exception as e:
                print(f"文件处理错误: {e}")
                self.play_voice("record_failed")
                return None
                
        except Exception as e:
            print(f"录音错误: {e}")
            self.set_listening_status(False)
            self.play_voice("record_failed")
            return None

    def record_audio_fallback(self, duration=5):
        """备用录音方法，也去除静音 - 添加监听状态"""
        self.wait_for_speech_completion()
        self.play_voice("record_start")
        time.sleep(1)  # 给用户反应时间
        
        print(f"🎤 备用录音 {duration}秒...")
        
        try:
            with tempfile.NamedTemporaryFile(suffix='.wav', delete=False) as temp_file:
                temp_filename = temp_file.name
            
            # 设置监听状态
            self.set_listening_status(True)
            
            result = subprocess.run([
                "arecord", "-D", self.usb_mic_device,
                "-f", "S16_LE", "-r", "44100", "-c", "2",
                "-d", str(duration), temp_filename
            ], capture_output=True, timeout=duration+2)
            
            # 停止监听状态
            self.set_listening_status(False)
            self.wait_for_speech_completion()
            
            if result.returncode == 0 and os.path.exists(temp_filename):
                # 对备用录音也进行静音去除处理
                with tempfile.NamedTemporaryFile(suffix='.wav', delete=False) as processed_file:
                    processed_filename = processed_file.name
                
                if self.remove_silence_from_audio(temp_filename, processed_filename):
                    os.unlink(temp_filename)
                    file_size = os.path.getsize(processed_filename)
                    print(f"✅ 备用录音完成, 大小: {file_size//1024}KB")
                    self.play_voice("record_end")
                    return processed_filename
                else:
                    file_size = os.path.getsize(temp_filename)
                    print(f"✅ 备用录音完成, 大小: {file_size//1024}KB (未处理)")
                    self.play_voice("record_end")
                    return temp_filename
            else:
                self.play_voice("record_failed")
                return None
                
        except Exception as e:
            print(f"备用录音错误: {e}")
            self.set_listening_status(False)
            self.play_voice("record_failed")
            return None

    def test_audio_devices(self):
        """测试音频设备"""
        print("🔊 测试音频设备...")
        
        # 测试麦克风
        try:
            test_file = "/tmp/mic_test.wav"
            result = subprocess.run([
                "arecord", "-D", self.usb_mic_device,
                "-f", "S16_LE", "-r", "44100", "-c", "2", "-d", "2",
                test_file
            ], capture_output=True, timeout=5)
            
            if result.returncode == 0 and os.path.exists(test_file):
                file_size = os.path.getsize(test_file)
                if file_size > 10000:
                    print("✅ 麦克风测试通过")
                else:
                    print("⚠️ 麦克风录音文件较小")
                os.unlink(test_file)
            else:
                print("⚠️ 麦克风测试基本通过")
                
        except Exception as e:
            print(f"⚠️ 麦克风测试异常: {e}")

    def convert_to_mono(self, stereo_file):
        """将立体声转换为单声道供Vosk使用"""
        if not stereo_file or not os.path.exists(stereo_file):
            return None
            
        try:
            mono_file = stereo_file + "_mono.wav"
            
            result = subprocess.run([
                "sox", stereo_file, 
                "-r", "16000", "-c", "1", mono_file
            ], capture_output=True, timeout=5)
            
            if result.returncode == 0 and os.path.exists(mono_file):
                os.unlink(stereo_file)
                return mono_file
            else:
                return stereo_file
                
        except Exception as e:
            print(f"音频转换错误: {e}")
            return stereo_file

    def transcribe_audio_optimized(self, audio_file):
        """优化转录流程 - 添加监听状态提示"""
        if not audio_file or not os.path.exists(audio_file):
            return ""
            
        start_time = time.time()
        
        self.wait_for_speech_completion()
        self.play_voice("transcribing")
            
        try:
            file_size = os.path.getsize(audio_file)
            if file_size < 50000:
                print("❌ 录音文件过小")
                os.unlink(audio_file)
                return ""
            
            mono_file = self.convert_to_mono(audio_file)
            if not mono_file:
                return ""
            
            if not self.recognizer:
                self.recognizer = KaldiRecognizer(self.model, 16000)
            
            text_result = ""
            
            with open(mono_file, 'rb') as f:
                audio_data = f.read()
            
            chunk_size = 8000
            for i in range(0, len(audio_data), chunk_size):
                chunk = audio_data[i:i+chunk_size]
                if self.recognizer.AcceptWaveform(chunk):
                    result = json.loads(self.recognizer.Result())
                    if result['text']:
                        text_result += result['text'] + " "
            
            final_result = json.loads(self.recognizer.FinalResult())
            if final_result['text']:
                text_result += final_result['text']
            
            if os.path.exists(mono_file):
                os.unlink(mono_file)
            
            transcribe_time = time.time() - start_time
            print(f"✅ 转录完成: {transcribe_time:.1f}s, 文本: {text_result.strip()}")
            
            return text_result.strip()
            
        except Exception as e:
            print(f"转录错误: {e}")
            for file_path in [audio_file, audio_file + "_mono.wav"]:
                if os.path.exists(file_path):
                    os.unlink(file_path)
            return ""

    def analyze_voice_command_with_timeout(self, text, timeout=20):
        """带超时的API调用"""
        if not text.strip():
            return {"error": "空指令"}
        
        cache_key = text.strip()
        if cache_key in self.command_cache:
            print("💾 使用缓存命令")
            return self.command_cache[cache_key]
        
        self.wait_for_speech_completion()
        self.play_voice("api_calling")
            
        def _api_call():
            return self._call_voice_api(text)
        
        try:
            print(f"🔄 API调用 (超时: {timeout}s)...")
            start_time = time.time()
            
            future = self.api_executor.submit(_api_call)
            result = future.result(timeout=timeout)
            
            api_time = time.time() - start_time
            print(f"✅ API响应: {api_time:.1f}s")
            
            if "error" not in result:
                if len(self.command_cache) >= self.cache_size:
                    self.command_cache.pop(next(iter(self.command_cache)))
                self.command_cache[cache_key] = result
                self.last_api_success = True
            else:
                self.last_api_success = False
                
            return result
            
        except TimeoutError:
            print("❌ API调用超时")
            self.last_api_success = False
            return {"error": "API调用超时"}
        except Exception as e:
            print(f"❌ API调用异常: {e}")
            self.last_api_success = False
            return {"error": f"API调用异常: {e}"}
    
    def _call_voice_api(self, text):
        """实际的API调用"""
        system_prompt = """你是一个机械臂控制专家。请将用户的语音命令转换为具体的舵机控制指令。

机械臂有6个舵机：
1. base (底座): 0-1000位置，控制整体左右旋转
2. shoulder (肩部): 0-1000位置，控制大臂上下俯仰
3. elbow (肘部): 0-1000位置，控制小臂前后伸展
4. wrist_vert (腕部垂直): 0-1000位置，控制手腕上下摆动
5. wrist_rot (腕部旋转): 0-1000位置，控制手腕水平旋转
6. gripper (夹爪): 0-1000位置，0为闭合，1000为完全打开

请根据用户指令，生成JSON格式的控制指令，包含以下字段：
- action: 动作描述
- commands: 舵机控制列表，每个命令包含 servo(舵机名称) 和 position(目标位置，0-1000)
- duration: 执行时间(秒)

请只返回JSON格式，不要其他文字。"""

        headers = {
            "Authorization": f"Bearer {self.api_key}",
            "Content-Type": "application/json"
        }
        
        data = {
            "model": self.model_name,
            "messages": [
                {"role": "system", "content": system_prompt},
                {"role": "user", "content": f"当前状态: {self.arm_state}。指令: {text}"}
            ],
            "temperature": 0.3,
            "max_tokens": 500
        }
        
        try:
            response = requests.post(
                f"{self.base_url}/chat/completions",
                headers=headers,
                json=data,
                timeout=25
            )
            
            if response.status_code == 200:
                result = response.json()
                response_text = result["choices"][0]["message"]["content"].strip()
                
                try:
                    return json.loads(response_text)
                except:
                    json_match = re.search(r'\{.*\}', response_text, re.DOTALL)
                    if json_match:
                        return json.loads(json_match.group())
                    else:
                        return {"error": "无法解析AI响应"}
            else:
                return {"error": f"API调用失败: {response.status_code}"}
                
        except Exception as e:
            return {"error": f"API调用异常: {e}"}
    
    def execute_commands_optimized(self, command_data):
        """优化命令执行 - 使用预生成的语音文件"""
        if "error" in command_data:
            error_msg = f"错误: {command_data['error']}"
            self.play_voice("command_parse_failed")
            print(f"❌ {error_msg}")
            return False
        
        action = command_data.get('action', '未知动作')
        duration = command_data.get('duration', 2)
        
        # 直接执行，不确认
        self.play_voice("execution_start")
        print(f"🎯 执行动作: {action}")
        
        commands = command_data.get("commands", [])
        
        # 播报具体命令详情
        if commands:
            command_details = []
            for cmd in commands:
                servo = cmd.get("servo", "")
                position = cmd.get("position", 0)
                if servo:
                    command_details.append(f"{servo}到{position}位置")
            
            if command_details:
                detail_text = "，".join(command_details)
                self.speak_direct(f"具体动作：{detail_text}")
        
        positions = []
        for servo_name in self.servo_ids.keys():
            target_pos = None
            for cmd in commands:
                if cmd.get("servo") == servo_name:
                    target_pos = cmd.get("position")
                    break
            
            if target_pos is not None:
                positions.append(str(target_pos))
                self.arm_state[servo_name] = target_pos
            else:
                positions.append("-")
        
        movemulti_cmd = f"movemulti {','.join(positions)}"
        self.play_voice("sending_command")
        print(f"[硬件控制] {movemulti_cmd}")
        self.send_command_to_arm(movemulti_cmd)
        
        def _wait_and_notify():
            time.sleep(duration)
            self.play_voice("execution_complete")
        
        threading.Thread(target=_wait_and_notify, daemon=True).start()
        
        self.play_voice("execution_progress")
        print(f"⏳ 执行中...", end="", flush=True)
        for i in range(min(3, duration)):
            time.sleep(1)
            print(".", end="", flush=True)
        print(" 完成!")
        
        self.play_voice("status_updated")
        print(f"📍 更新后状态: {self.arm_state}")
        return True
    
    def send_command_to_arm(self, command):
        """发送命令到机械臂控制器"""
        print(f"[发送命令] {command}")
    
    def interactive_control_optimized(self):
        """优化交互控制 - 添加监听状态提示"""
        self.play_voice("system_ready")
        
        while True:
            self.cleanup_memory()
            
            menu_options = {
                "1": "语音控制",
                "2": "复位机械臂", 
                "3": "读取状态",
                "4": "退出系统"
            }
            
            print("\n" + "="*50)
            print("🤖 机械臂语音控制系统")
            print("="*50)
            
            print("\n📋 可用选项:")
            for key, description in menu_options.items():
                print(f"  {key}. {description}")
            
            self.play_voice("menu_options")
            
            # 使用录音 - 这里会触发监听状态
            audio_file = self.record_voice_activated()
            if not audio_file:
                self.play_voice("record_failed")
                audio_file = self.record_audio_fallback(duration=4)
            
            if audio_file:
                command = self.transcribe_audio_optimized(audio_file)
                print(f"📝 识别结果: {command}")
                
                if command:
                    if "1" in command or "一" in command or "语音" in command:
                        self.play_voice("enter_voice_mode")
                        self.voice_control_mode_optimized()
                    elif "2" in command or "二" in command or "复位" in command:
                        self.play_voice("enter_reset_mode")
                        self.reset_arm_optimized()
                    elif "3" in command or "三" in command or "状态" in command:
                        self.play_voice("enter_status_mode")
                        self.read_servo_status_optimized()
                    elif "4" in command or "四" in command or "退出" in command:
                        self.play_voice("system_exit")
                        break
                    else:
                        self.play_voice("option_not_recognized")
                        print("❌ 无法识别选项")
                else:
                    self.play_voice("voice_not_recognized")
                    print("❌ 没有识别到语音")
            else:
                self.play_voice("record_failed")
                print("❌ 录音失败")
    
    def voice_control_mode_optimized(self):
        """优化语音控制模式 - 添加监听状态提示"""
        self.play_voice("enter_voice_mode")
        print("\n🎤 进入语音控制模式")
        self.play_voice("listening_start")  # 新增：开始监听提示
        
        total_start_time = time.time()
        
        audio_file = self.record_voice_activated()
        if not audio_file:
            self.play_voice("record_failed")
            print("🔄 尝试备用录音...")
            audio_file = self.record_audio_fallback(duration=5)
        
        if not audio_file:
            self.play_voice("record_failed")
            return
        
        command = self.transcribe_audio_optimized(audio_file)
        if not command:
            self.play_voice("voice_not_recognized")
            return
        
        self.speak_direct(f"收到指令：{command}")
        print(f"🗣️ 识别指令: {command}")
        
        self.play_voice("analyzing_command")
        command_data = self.analyze_voice_command_with_timeout(command)
        
        if "error" in command_data:
            error_msg = command_data["error"]
            self.play_voice("command_parse_failed")
            print(f"❌ 指令解析失败: {error_msg}")
            return
        
        action = command_data.get('action', '未知动作')
        duration = command_data.get('duration', 2)
        
        self.speak_direct(f"解析到动作：{action}")
        print(f"🔍 解析动作: {action}, 时长: {duration}秒")
        
        # 直接执行，跳过确认步骤
        total_time = time.time() - total_start_time
        self.speak_direct(f"开始执行，处理时间{total_time:.1f}秒")
        print(f"⏱️ 总处理时间: {total_time:.1f}秒")
        self.execute_commands_optimized(command_data)
    
    def reset_arm_optimized(self):
        """优化复位功能"""
        self.play_voice("enter_reset_mode")
        print("\n🔄 复位机械臂")
        
        reset_commands = {
            "action": "复位机械臂",
            "commands": [
                {"servo": "base", "position": 500},
                {"servo": "shoulder", "position": 500},
                {"servo": "elbow", "position": 500},
                {"servo": "wrist_vert", "position": 500},
                {"servo": "wrist_rot", "position": 500},
                {"servo": "gripper", "position": 0}
            ],
            "duration": 2
        }
        self.execute_commands_optimized(reset_commands)
    
    def read_servo_status_optimized(self):
        """优化状态读取"""
        self.play_voice("enter_status_mode")
        print("\n📊 读取舵机状态")
        status_text = "舵机状态: "
        for servo_name, position in self.arm_state.items():
            status_text += f"{servo_name}位置{position}, "
        
        self.speak_direct(status_text)
        print(f"📊 {status_text}")
    
    def __del__(self):
        """清理资源"""
        if hasattr(self, 'api_executor'):
            self.api_executor.shutdown(wait=False)
        self.cleanup_memory()

def main():
    try:
        controller = NoiseReducedRoboticArmControl()
        controller.interactive_control_optimized()
    except KeyboardInterrupt:
        print("\n\n👋 用户中断，退出系统")
    except Exception as e:
        print(f"\n❌ 系统错误: {e}")
    finally:
        if 'controller' in locals():
            controller.cleanup_memory()

if __name__ == "__main__":
    main()