import asyncio  # 异步编程核心库，用于处理WebSocket连接、音频采集等异步操作
import uuid  # 生成唯一标识符，用于会话ID、请求ID等
import os  # 操作系统接口，用于文件路径操作、环境变量等
import sys  # Python系统相关参数和函数，用于模块路径管理
import logging  # 日志记录模块，用于调试和运行状态跟踪
import threading  # 多线程支持，用于音频采集和播放的后台线程
import numpy as np  # 科学计算库，用于音频信号处理（增益、压缩、噪声门限等）
from pathlib import Path  # 面向对象的文件系统路径操作
from dataclasses import dataclass  # 数据类装饰器，用于创建配置和音频数据结构
from typing import Optional, List, Dict  # 类型注解，提供静态类型检查
import websockets  # WebSocket客户端和服务器实现，用于实时通信
from websockets import Headers  # WebSocket请求头处理
import json  # JSON数据解析和序列化，用于消息格式转换
import pyaudio  # 跨平台音频I/O库，用于麦克风采集和扬声器播放
import time  # 时间相关函数，用于日志时间戳、性能监控等
import queue  # 线程安全的队列实现，用于音频数据缓冲
from google.protobuf.json_format import MessageToDict  # Protocol Buffers与JSON格式互转
import wave  # WAV音频文件读写，用于音频数据格式处理
import io  # 内存中的文件操作，用于音频数据流处理
import struct  # Python值和C结构体之间的转换，用于音频数据打包/解包

# 添加protobuf模块路径
current_dir = os.path.dirname(os.path.abspath(__file__))
parent_dir = os.path.dirname(current_dir)
sys.path.insert(0, parent_dir)

from python_protogen.products.understanding.ast.ast_service_pb2 import TranslateRequest, TranslateResponse
from python_protogen.common.events_pb2 import Type

# 配置类
@dataclass
class Config:
    ws_url: str
    app_key: str
    access_key: str
    resource_id: str

@dataclass
class Audio:
    format: str = None
    rate: int = None
    bits: Optional[int] = None
    channel: Optional[int] = None#
    binary_data: Optional[bytes] = None

@dataclass
class TranslateRequestData:
    session_id: str
    event: str
    source_audio: Optional[Audio] = None
    target_audio: Optional[Audio] = None
    mode: Optional[str] = None
    source_language: Optional[str] = None
    target_language: Optional[str] = None

# 增强的实时音频采集类
class EnhancedRealtimeAudioStreamer:
    def __init__(self, chunk_duration=0.1, sample_rate=16000, channels=1, queue_maxsize=50, audio_mode="play"):
        self.chunk_size = int(sample_rate * chunk_duration)
        self.sample_rate = sample_rate
        self.channels = channels
        self.audio_queue = asyncio.Queue(maxsize=queue_maxsize)
        self.is_recording = False
        self.pyaudio_instance = pyaudio.PyAudio()
        self.recording_thread = None
        
        # 播放模式: "play" - 实时播放, "save" - 仅保存文件
        self.audio_mode = audio_mode
        
        # 播放相关
        self.playback_instance = pyaudio.PyAudio()
        self.playback_stream = None
        self.playback_queue = queue.Queue(maxsize=20)
        self.is_playing = False
        self.playback_thread = None
        
        # 文件保存相关
        self.save_directory = Path("output")
        self.save_directory.mkdir(exist_ok=True)
        self.saved_files_count = 0
        
        # 音频缓冲
        self.audio_buffer = bytearray()
        self.buffer_lock = threading.Lock()
        
        # 日志控制 - 添加时间戳来限制日志频率
        self.last_mic_log_time = 0
        self.last_send_log_time = 0
        self.last_prepare_log_time = 0
        self.log_interval = 10  # 10秒打印一次日志
        
        # 音频增强参数 - 解决麦克风输入音量过低问题
        self.input_gain_factor = 10.0  # 输入增益因子
        self.auto_input_gain_enabled = True  # 自动输入增益
        self.input_noise_gate_threshold = -70.0  # 输入噪声门限(dB)
        self.input_levels = []  # 输入级别历史
        
        # 新增：音频累积缓冲区 - 用于累积至少1秒的音频数据
        self.audio_accumulation_buffer = bytearray()
        self.min_audio_duration = 1.0  # 最小音频持续时间（秒）
        self.accumulation_lock = threading.Lock()
        
        # 新增：翻译音频累积缓冲区 - 用于累积短的翻译音频片段
        self.translation_audio_buffer = bytearray()
        self.translation_audio_lock = threading.Lock()
        self.min_translation_audio_duration = 1.0  # 最小翻译音频持续时间（秒）
    def _enhance_input_audio(self, audio_data: bytes) -> bytes:
        """增强输入音频数据"""
        try:
            # 转换为numpy数组
            audio_array = np.frombuffer(audio_data, dtype=np.int16)
            audio_float = audio_array.astype(np.float32)
            
            # 计算当前音频级别
            rms = np.sqrt(np.mean(audio_float ** 2))
            db_level = 20 * np.log10(rms / 32768.0) if rms > 0 else -96
            
            # 记录输入级别
            self.input_levels.append(db_level)
            if len(self.input_levels) > 100:
                self.input_levels.pop(0)
            
            # 应用噪声门限（更敏感的检测）
            if db_level < self.input_noise_gate_threshold:
                attenuation = 0.05  # 降低衰减到5%，从10%改为5%
                audio_float = audio_float * attenuation
            
            # 应用自动增益控制（更激进的增益策略）
            if self.auto_input_gain_enabled and db_level > -85:  # 降低阈值，从-80改为-85
                target_db = -20.0  # 提高目标dB级别，从-25.0改为-20.0
                current_gain = 10 ** ((target_db - db_level) / 20.0)
                
                # 提高增益范围限制，从50.0提高到80.0
                current_gain = np.clip(current_gain, 1.0, 80.0)
                audio_float = audio_float * current_gain
            
            # 应用固定增益（使用当前优化值）
            audio_float = audio_float * self.input_gain_factor
            
            # 动态范围压缩（新增功能）
            # 对高音量部分进行压缩，防止削波
            compression_threshold = 25000  # 压缩门限（从20000提高到25000）
            compression_ratio = 0.5  # 压缩比例（从0.7降低到0.5，更强压缩）
            
            # 应用软压缩
            mask = np.abs(audio_float) > compression_threshold
            audio_float[mask] = (np.sign(audio_float[mask]) * 
                              (compression_threshold + 
                               (np.abs(audio_float[mask]) - compression_threshold) * compression_ratio))
            
            # 限制幅度防止削波
            audio_float = np.clip(audio_float, -32768, 32767)
            
            # 转换回int16和字节
            enhanced_array = audio_float.astype(np.int16)
            
            # 记录增强后的音频级别
            enhanced_rms = np.sqrt(np.mean(enhanced_array.astype(np.float32) ** 2))
            enhanced_db = 20 * np.log10(enhanced_rms / 32768.0) if enhanced_rms > 0 else -96
            
            return enhanced_array.tobytes()
            
        except Exception as e:
            logging.error(f"输入音频增强失败: {e}")
            return audio_data

    def _audio_callback(self, in_data, frame_count, time_info, status):
        """音频采集回调函数"""
        if self.is_recording:
            # 增强输入音频
            enhanced_data = self._enhance_input_audio(in_data)
            
            # 记录接收到麦克风音频信号 - 控制日志频率
            current_time = time.time()
            if current_time - self.last_mic_log_time >= self.log_interval:
                # 计算增强后的音频级别
                audio_array = np.frombuffer(enhanced_data, dtype=np.int16)
                rms = np.sqrt(np.mean(audio_array ** 2))
                db_level = 20 * np.log10(rms / 32768.0) if rms > 0 else -96
                logging.info(f"🎤 接收到麦克风音频信号 (大小: {len(enhanced_data)} 字节, RMS: {rms:.0f}, dB: {db_level:.1f})")
                self.last_mic_log_time = current_time
            
            # 将音频数据累积到缓冲区
            with self.accumulation_lock:
                self.audio_accumulation_buffer.extend(enhanced_data)
                
                # 检查是否累积了足够的音频数据（至少1秒）
                current_duration = len(self.audio_accumulation_buffer) / (self.sample_rate * self.channels * 2)  # 16-bit PCM
                
                if current_duration >= self.min_audio_duration:
                    # 累积足够的数据，将其放入音频队列
                    accumulated_data = bytes(self.audio_accumulation_buffer)
                    self.audio_accumulation_buffer.clear()  # 清空累积缓冲区
                    
                    try:
                        loop = asyncio.get_running_loop()
                        loop.call_soon_threadsafe(self.audio_queue.put_nowait, accumulated_data)
                    except RuntimeError:
                        try:
                            self.audio_queue.put_nowait(accumulated_data)
                        except asyncio.QueueFull:
                            try:
                                self.audio_queue.get_nowait()
                                self.audio_queue.put_nowait(accumulated_data)
                            except (asyncio.QueueEmpty, asyncio.QueueFull):
                                pass
            
            return (enhanced_data, pyaudio.paContinue)
        return (None, pyaudio.paContinue)
    
    def _recording_worker(self):
        """音频采集工作线程 - 优化版"""
        try:
            self.stream = self.pyaudio_instance.open(
                format=pyaudio.paInt16,
                channels=self.channels,
                rate=self.sample_rate,
                input=True,
                output=True,
                frames_per_buffer=self.chunk_size,
                stream_callback=self._audio_callback
            )
            
            self.stream.start_stream()
            logging.info("音频采集线程已启动")
            
            while self.is_recording and self.stream.is_active():
                threading.Event().wait(0.1)
                
        except Exception as e:
            logging.error(f"音频采集错误: {e}")
        finally:
            if hasattr(self, 'stream'):
                self.stream.stop_stream()
                self.stream.close()
                logging.info("音频采集线程已停止")
    
    def _playback_worker(self):
        """音频播放工作线程 - 修复噪声问题"""
        try:
            if not self.playback_stream:
                self.playback_stream = self.playback_instance.open(
                    format=pyaudio.paInt16,
                    channels=1,  # 确保是单声道
                    rate=24000,   # 目标采样率
                    output=True,
                    frames_per_buffer=1024,
                    output_device_index=None  # 使用默认输出设备
                )
            
            self.is_playing = True
            logging.info("音频播放线程已启动")
            
            while self.is_playing:
                try:
                    # 从队列获取音频数据
                    audio_data = self.playback_queue.get(timeout=0.1)
                    
                    if audio_data and len(audio_data) > 0:
                        # 播放前验证数据
                        if len(audio_data) % 2 != 0:
                            logging.warning("音频数据长度为奇数，移除最后一个字节")
                            audio_data = audio_data[:-1]
                        
                        # 播放音频 - 使用更小的块大小避免缓冲问题
                        chunk_size = min(1024, len(audio_data))
                        for i in range(0, len(audio_data), chunk_size):
                            chunk = audio_data[i:i + chunk_size]
                            if len(chunk) > 0:
                                self.playback_stream.write(chunk)
                    
                except queue.Empty:
                    continue
                except Exception as e:
                    logging.error(f"播放音频数据错误: {e}")
                    # 尝试重新初始化播放流
                    try:
                        if self.playback_stream:
                            self.playback_stream.stop_stream()
                            self.playback_stream.close()
                        self.playback_stream = self.playback_instance.open(
                            format=pyaudio.paInt16,
                            channels=1,
                            rate=24000,
                            output=True,
                            frames_per_buffer=1024
                        )
                        logging.info("播放流已重新初始化")
                    except Exception as reinit_error:
                        logging.error(f"播放流重新初始化失败: {reinit_error}")
                    
        except Exception as e:
            logging.error(f"音频播放线程错误: {e}")
        finally:
            if self.playback_stream:
                try:
                    self.playback_stream.stop_stream()
                    self.playback_stream.close()
                except Exception as close_error:
                    logging.error(f"关闭播放流失败: {close_error}")
                logging.info("音频播放线程已停止")
    
    async def start_recording(self):
        """异步启动音频采集 - 优化版"""
        if not self.is_recording:
            self.is_recording = True
            self.recording_thread = threading.Thread(target=self._recording_worker)
            self.recording_thread.daemon = True
            self.recording_thread.start()
            
            # 启动播放线程
            if not self.is_playing:
                self.playback_thread = threading.Thread(target=self._playback_worker)
                self.playback_thread.daemon = True
                self.playback_thread.start()
            
            logging.info("实时音频采集已启动...")
    
    async def stop_recording(self):
        """停止音频采集"""
        if self.is_recording:
            self.is_recording = False
            if self.recording_thread:
                self.recording_thread.join()
            
            self.is_playing = False
            if self.playback_thread:
                self.playback_thread.join()
            
            logging.info("音频采集已停止")
    
    async def get_audio_chunk(self):
        """获取音频chunk"""
        return await self.audio_queue.get()
    
    def calculate_audio_level(self, audio_data):
        """计算音频音量级别"""
        # 将字节数据转换为numpy数组
        audio_array = np.frombuffer(audio_data, dtype=np.int16)
        # 计算RMS（均方根）值
        rms = np.sqrt(np.mean(audio_array**2))
        # 转换为dB
        if rms > 0:
            db = 20 * np.log10(rms / 32768.0)  # 32768是int16的最大值
        else:
            db = -96  # 最小值
        return db, rms
    
    def amplify_audio(self, audio_data, gain_factor=2.0):
        """放大音频数据"""
        # 将字节数据转换为numpy数组
        audio_array = np.frombuffer(audio_data, dtype=np.int16)
        
        # 放大音频
        amplified = audio_array * gain_factor
        
        # 防止溢出，限制在int16范围内
        amplified = np.clip(amplified, -32768, 32767)
        
        # 转换回字节
        return amplified.astype(np.int16).tobytes()
    
    def check_audio_data_integrity(self, audio_data: bytes) -> tuple[bool, str]:
        """检查音频数据完整性
        返回: (是否有效, 状态描述)
        """
        try:
            if not audio_data:
                return False, "音频数据为空"
            
            data_len = len(audio_data)
            logging.info(f"音频数据完整性检查: 长度={data_len}字节")
            
            # 检查数据长度
            if data_len < 100:
                return False, f"音频数据太短({data_len}字节)"
            
            # 检查数据是否全为相同值（可能是静音或损坏）
            first_byte = audio_data[0]
            if all(b == first_byte for b in audio_data[:100]):
                return False, "音频数据全为相同值，可能是静音或损坏"
            
            # 检查数据是否全为0（静音）
            if all(b == 0 for b in audio_data[:100]):
                return False, "音频数据全为0，是静音"
            
            # 检查数据是否全为FF（可能是未初始化内存）
            if all(b == 0xFF for b in audio_data[:100]):
                return False, "音频数据全为0xFF，可能是未初始化数据"
            
            # 如果是PCM格式，检查数值合理性
            if data_len % 2 == 0 and data_len >= 200:
                audio_array = np.frombuffer(audio_data[:200], dtype=np.int16)
                
                # 检查数值范围
                if np.all(audio_array == 0):
                    return False, "PCM数据全为0"
                
                # 检查数值分布
                unique_values = len(np.unique(audio_array))
                if unique_values < 5:
                    return False, f"PCM数据数值分布异常，只有{unique_values}个不同值"
                
                # 检查是否可能是噪音
                rms = np.sqrt(np.mean(audio_array**2))
                if rms == 0:
                    return False, "PCM数据RMS为0"
                
                # 检查数据模式（避免明显的模式化数据）
                diff = np.diff(audio_array)
                if len(diff) > 10 and np.std(diff) == 0:
                    return False, "PCM数据变化模式异常，可能是人工生成的噪音"
            
            # 检查数据熵（避免过于规律的数据）
            entropy = self.calculate_data_entropy(audio_data[:200])
            if entropy < 0.1:  # 熵值过低表示数据过于规律
                return False, f"音频数据熵值过低({entropy:.3f})，可能不是有效音频"
            
            return True, "音频数据完整性检查通过"
            
        except Exception as e:
            logging.error(f"音频数据完整性检查失败: {e}")
            return False, f"检查失败: {e}"
    
    def calculate_data_entropy(self, data: bytes) -> float:
        """计算数据熵值"""
        try:
            if not data:
                return 0.0
            
            # 统计字节频率
            byte_counts = np.bincount(np.frombuffer(data, dtype=np.uint8), minlength=256)
            probabilities = byte_counts / len(data)
            
            # 计算香农熵
            entropy = -np.sum(probabilities * np.log2(probabilities + 1e-10))
            return entropy
        except Exception:
            return 0.0

    def detect_audio_format(self, audio_data: bytes) -> str:
        """检测音频数据格式"""
        try:
            # 首先检查数据完整性
            is_valid, reason = self.check_audio_data_integrity(audio_data)
            if not is_valid:
                logging.warning(f"音频数据完整性检查失败: {reason}")
                return "invalid"
            
            # 检查WAV格式
            if audio_data[:4] == b'RIFF' and len(audio_data) > 12:
                if audio_data[8:12] == b'WAVE':
                    return "wav"
            
            # 检查OGG/Opus格式
            if audio_data[:4] == b'OggS':
                return "ogg_opus"
            
            # 检查是否是原始PCM数据
            # 如果数据长度合理且数值分布符合16位PCM特征
            if len(audio_data) % 2 == 0 and len(audio_data) > 100:
                # 抽样检查几个值是否在合理的PCM范围内
                sample_count = min(10, len(audio_data) // 2)
                valid_samples = 0
                for i in range(sample_count):
                    sample = struct.unpack('<h', audio_data[i*2:(i+1)*2])[0]
                    if -32768 <= sample <= 32767:
                        valid_samples += 1
                if valid_samples >= sample_count * 0.8:  # 80%的样本在有效范围内
                    return "pcm"
            
            return "unknown"
        except Exception as e:
            logging.error(f"音频格式检测失败: {e}")
            return "unknown"
    
    def analyze_audio_data(self, audio_data: bytes, title: str = "音频数据"):
        """分析音频数据特征"""
        try:
            if not audio_data or len(audio_data) < 10:
                logging.warning(f"{title}: 数据太短无法分析")
                return
            
            # 基本统计
            data_len = len(audio_data)
            logging.info(f"{title}分析:")
            logging.info(f"  数据长度: {data_len} 字节")
            logging.info(f"  前16字节: {audio_data[:16].hex()}")
            logging.info(f"  后16字节: {audio_data[-16:].hex()}")
            
            # 如果是PCM格式，计算音频特征
            if data_len % 2 == 0 and data_len >= 100:
                audio_array = np.frombuffer(audio_data[:200], dtype=np.int16)  # 取前200字节分析
                if len(audio_array) > 0:
                    rms = np.sqrt(np.mean(audio_array**2))
                    db = 20 * np.log10(rms / 32768.0) if rms > 0 else -96
                    logging.info(f"  音频特征: RMS={rms:.0f}, dB={db:.1f}, 范围=[{audio_array.min()}, {audio_array.max()}]")
                    
                    # 检查是否可能是静音或噪音
                    if db < -40:
                        logging.warning(f"  ⚠️  音频音量过低，可能是静音")
                    elif db > -10:
                        logging.warning(f"  ⚠️  音频音量过高，可能失真")
                    
                    # 检查数值分布
                    unique_values = len(np.unique(audio_array))
                    if unique_values < 10:
                        logging.warning(f"  ⚠️  音频数值分布异常，只有{unique_values}个不同值")
        
        except Exception as e:
            logging.error(f"音频数据分析失败: {e}")
    
    def decode_wav_to_pcm(self, wav_data: bytes) -> Optional[bytes]:
        """将WAV数据解码为PCM"""
        try:
            self.analyze_audio_data(wav_data, "WAV音频数据")
            
            wav_io = io.BytesIO(wav_data)
            with wave.open(wav_io, 'rb') as wav_file:
                # 检查格式
                channels = wav_file.getnchannels()
                sampwidth = wav_file.getsampwidth()
                framerate = wav_file.getframerate()
                nframes = wav_file.getnframes()
                
                logging.info(f"WAV格式信息: {channels}通道, {sampwidth*8}位, {framerate}Hz, {nframes}帧")
                
                if sampwidth != 2 or channels != 1:
                    logging.warning(f"不支持的WAV格式: {channels}通道, {sampwidth*8}位")
                    return None
                
                # 读取PCM数据
                pcm_data = wav_file.readframes(nframes)
                logging.info(f"WAV解码成功: {framerate}Hz, {len(pcm_data)}字节")
                
                # 分析解码后的数据
                self.analyze_audio_data(pcm_data, "解码后的PCM音频")
                
                return pcm_data
        except Exception as e:
            logging.error(f"WAV解码失败: {e}")
            return None
    
    def resample_audio(self, pcm_data: bytes, original_rate: int, target_rate: int) -> bytes:
        """重采样音频数据"""
        if original_rate == target_rate:
            return pcm_data
        
        try:
            # 将字节数据转换为numpy数组
            audio_array = np.frombuffer(pcm_data, dtype=np.int16)
            
            # 计算重采样比例
            resample_ratio = target_rate / original_rate
            new_length = int(len(audio_array) * resample_ratio)
            
            # 使用线性插值进行重采样
            old_indices = np.arange(len(audio_array))
            new_indices = np.arange(new_length) / resample_ratio
            
            # 线性插值
            resampled_array = np.interp(new_indices, old_indices, audio_array).astype(np.int16)
            
            logging.info(f"音频重采样: {original_rate}Hz -> {target_rate}Hz, {len(audio_array)} -> {len(resampled_array)} 样本")
            return resampled_array.tobytes()
        except Exception as e:
            logging.error(f"音频重采样失败: {e}")
            return pcm_data
    
    def play_translation_audio(self, audio_data: bytes) -> bool:
        """播放翻译音频 - 修复噪声问题"""
        try:
            if not audio_data:
                logging.warning("音频数据为空")
                return False
            
            logging.info(f"收到翻译音频数据: {len(audio_data)} 字节")
            
            # 首先检查音频数据完整性
            is_valid, reason = self.check_audio_data_integrity(audio_data)
            if not is_valid:
                logging.error(f"音频数据无效: {reason}")
                return False
            
            # 分析原始音频数据
            self.analyze_audio_data(audio_data, "原始音频数据")
            
            # 检测音频格式
            audio_format = self.detect_audio_format(audio_data)
            pcm_data = None
            original_sample_rate = 24000  # 默认采样率
            
            if audio_format == "wav":
                logging.info("检测到WAV格式，开始解码...")
                wav_io = io.BytesIO(audio_data)
                with wave.open(wav_io, 'rb') as wav_file:
                    # 获取WAV文件的真实参数
                    channels = wav_file.getnchannels()
                    sampwidth = wav_file.getsampwidth()
                    original_sample_rate = wav_file.getframerate()
                    nframes = wav_file.getnframes()
                    
                    logging.info(f"WAV格式: {channels}通道, {sampwidth*8}位, {original_sample_rate}Hz, {nframes}帧")
                    
                    # 检查格式兼容性
                    if sampwidth != 2:  # 必须是16位
                        logging.error(f"不支持的WAV位深度: {sampwidth*8}位")
                        return False
                    
                    # 读取PCM数据
                    pcm_data = wav_file.readframes(nframes)
                    
                    # 如果是立体声，转换为单声道
                    if channels == 2:
                        logging.info("将立体声转换为单声道")
                        stereo_array = np.frombuffer(pcm_data, dtype=np.int16)
                        mono_array = (stereo_array[0::2] + stereo_array[1::2]) // 2
                        pcm_data = mono_array.tobytes()
                    
                    logging.info(f"WAV解码成功: {len(pcm_data)} 字节")
                    
            elif audio_format == "ogg" or audio_format == "ogg_opus":
                logging.info("检测到OGG/Opus格式，开始解码...")
                pcm_data = self.decode_opus_robust(audio_data)
                if pcm_data:
                    original_sample_rate = 24000  # 假设标准采样率
                    logging.info(f"OGG/Opus解码成功: {len(pcm_data)} 字节")
                else:
                    logging.error("OGG/Opus解码失败")
                    return False
                    
            elif audio_format == "pcm":
                logging.info("检测到原始PCM数据，假设为24000Hz 16-bit单声道")
                pcm_data = audio_data
                original_sample_rate = 24000
                
            elif audio_format == "invalid":
                logging.error("音频数据完整性检查失败，拒绝播放")
                return False
                
            else:
                logging.warning(f"未知音频格式: {audio_format}，尝试按24000Hz PCM处理")
                pcm_data = audio_data
                original_sample_rate = 24000
            
            if pcm_data is None or len(pcm_data) == 0:
                logging.error("音频解码失败或无数据")
                return False
            
            # 分析解码后的数据
            self.analyze_audio_data(pcm_data, "解码后音频数据")
            
            # 重采样到播放设备支持的采样率（24000Hz）
            target_rate = 24000
            if original_sample_rate != target_rate:
                logging.info(f"重采样: {original_sample_rate}Hz -> {target_rate}Hz")
                pcm_data = self.resample_audio(pcm_data, original_sample_rate, target_rate)
                if pcm_data is None:
                    logging.error("重采样失败")
                    return False
            
            # 分析重采样后的数据
            self.analyze_audio_data(pcm_data, "重采样后音频数据")
            
            # 最终检查
            is_valid_final, reason_final = self.check_audio_data_integrity(pcm_data)
            if not is_valid_final:
                logging.error(f"处理后的音频数据无效: {reason_final}")
                return False
            
            # 确保数据长度是偶数（16位PCM要求）
            if len(pcm_data) % 2 != 0:
                logging.warning("音频数据长度为奇数，移除最后一个字节")
                pcm_data = pcm_data[:-1]
            
            # 确保音频至少持续1秒
            current_duration = len(pcm_data) / (self.sample_rate * self.channels * 2)  # 16-bit PCM
            if current_duration < self.min_audio_duration:
                # 计算需要添加的静音字节数
                silence_samples = int((self.min_audio_duration - current_duration) * self.sample_rate * self.channels)
                silence_bytes = silence_samples * 2  # 16-bit = 2 bytes per sample
                silence_data = b'\x00' * silence_bytes
                pcm_data = pcm_data + silence_data
                logging.info(f"🎵 音频长度不足{self.min_audio_duration}秒，已填充静音 (原始长度: {current_duration:.2f}秒, 填充后: {len(pcm_data)/(self.sample_rate*self.channels*2):.2f}秒)")
            
            logging.info(f"准备播放音频: {len(pcm_data)} 字节")
            
            # 将翻译音频数据累积到缓冲区
            with self.translation_audio_lock:
                self.translation_audio_buffer.extend(pcm_data)
                
                # 计算累积的音频时长
                accumulated_duration = len(self.translation_audio_buffer) / (self.sample_rate * self.channels * 2)  # 16-bit PCM
                
                # 如果累积的音频达到最小时长要求，或者数据明显很短（可能是最后一个片段）
                if accumulated_duration >= self.min_translation_audio_duration or len(pcm_data) < 1000:
                    # 使用累积的音频数据
                    final_pcm_data = bytes(self.translation_audio_buffer)
                    self.translation_audio_buffer.clear()  # 清空累积缓冲区
                    
                    logging.info(f"🎵 累积翻译音频: {accumulated_duration:.2f}秒, 最终播放: {len(final_pcm_data)} 字节")
                    
                    # 根据音频模式决定处理方式
                    if self.audio_mode == "save":
                        # 仅保存文件模式
                        return self._save_translation_audio(final_pcm_data)
                    else:
                        # 实时播放模式（默认）
                        # 确保音频至少持续1秒
                        current_duration = len(final_pcm_data) / (self.sample_rate * self.channels * 2)
                        if current_duration < self.min_audio_duration:
                            # 计算需要添加的静音字节数
                            silence_samples = int((self.min_audio_duration - current_duration) * self.sample_rate * self.channels)
                            silence_bytes = silence_samples * 2  # 16-bit = 2 bytes per sample
                            silence_data = b'\x00' * silence_bytes
                            final_pcm_data = final_pcm_data + silence_data
                            logging.info(f"🎵 翻译音频长度不足{self.min_audio_duration}秒，已填充静音 (原始长度: {current_duration:.2f}秒, 填充后: {len(final_pcm_data)/(self.sample_rate*self.channels*2):.2f}秒)")
                        
                        # 将音频数据放入播放队列
                        self.playback_queue.put(final_pcm_data, block=False)
                        logging.info("累积的翻译音频数据已加入播放队列")
                        return True
                else:
                    logging.info(f"⏳ 翻译音频累积中: {accumulated_duration:.2f}秒/{self.min_translation_audio_duration}秒, 等待更多数据...")
                    return True  # 继续累积，不播放
            
        except queue.Full:
            # 如果队列满了，丢弃最旧的数据
            logging.warning("播放队列已满，丢弃最旧数据")
            try:
                self.playback_queue.get_nowait()
                if 'pcm_data' in locals() and pcm_data:
                    self.playback_queue.put(pcm_data, block=False)
            except queue.Empty:
                pass
            return False
        except Exception as e:
            logging.error(f"播放翻译音频失败: {e}")
            return False
    
    def get_input_audio_stats(self) -> dict:
        """获取输入音频统计信息"""
        if not self.input_levels:
            return {"avg_db": -96, "max_db": -96, "min_db": -96, "gain_factor": self.input_gain_factor}
        
        return {
            "avg_db": np.mean(self.input_levels),
            "max_db": np.max(self.input_levels),
            "min_db": np.min(self.input_levels),
            "gain_factor": self.input_gain_factor,
            "auto_gain_enabled": self.auto_input_gain_enabled
        }
    
    def adjust_input_gain(self, gain_factor: float):
        """调整输入增益因子"""
        self.input_gain_factor = gain_factor
        logging.info(f"输入增益因子调整为: {gain_factor}")
    
    def enable_auto_input_gain(self, enabled: bool):
        """启用/禁用自动输入增益"""
        self.auto_input_gain_enabled = enabled
        logging.info(f"自动输入增益: {'启用' if enabled else '禁用'}")
    
    def optimize_for_low_volume(self):
        """针对低音量环境优化音频参数"""
        self.input_gain_factor = 30.0  # 极端增益（从20.0提升到30.0）
        self.input_noise_gate_threshold = -85.0  # 极端低噪声门限（从-80.0降低到-85.0）
        self.auto_input_gain_enabled = True
        logging.info("🔧 已优化音频参数以应对极端低音量环境:")
        logging.info(f"   📈 增益因子: {self.input_gain_factor}x")
        logging.info(f"   🔇 噪声门限: {self.input_noise_gate_threshold}dB")
        logging.info(f"   🤖 自动增益: 开启")
        return {
            'gain_factor': self.input_gain_factor,
            'noise_gate': self.input_noise_gate_threshold,
            'auto_gain': self.auto_input_gain_enabled
        }
    
    def reset_audio_enhancement(self):
        """重置音频增强参数到默认值"""
        self.input_gain_factor = 10.0
        self.input_noise_gate_threshold = -70.0
        self.auto_input_gain_enabled = True
        self.input_levels.clear()
        logging.info("🔧 音频增强参数已重置为默认值")
        return self.get_input_audio_stats()
    
    def _save_translation_audio(self, pcm_data: bytes) -> bool:
        """保存翻译音频到文件"""
        try:
            # 生成文件名
            timestamp = time.strftime("%Y%m%d_%H%M%S")
            filename = f"translation_audio_{timestamp}_{self.saved_files_count:04d}.wav"
            filepath = self.save_directory / filename
            
            # 创建WAV文件
            with wave.open(str(filepath), 'wb') as wav_file:
                wav_file.setnchannels(1)  # 单声道
                wav_file.setsampwidth(2)  # 16位
                wav_file.setframerate(24000)  # 24kHz
                wav_file.writeframes(pcm_data)
            
            self.saved_files_count += 1
            logging.info(f"💾 翻译音频已保存到文件: {filepath} (大小: {len(pcm_data)} 字节)")
            return True
            
        except Exception as e:
            logging.error(f"保存翻译音频失败: {e}")
            return False
    
    def decode_opus_robust(self, audio_data):
        """
        健壮的OGG/Opus解码器
        尝试多种方法处理OGG/Opus数据
        """
        
        # 方法1: 尝试使用soundfile
        try:
            import soundfile as sf
            import io
            
            # 创建缓冲区
            audio_buffer = io.BytesIO(audio_data)
            
            # 尝试读取音频
            audio_array, sample_rate = sf.read(audio_buffer)
            
            # 处理多通道
            if len(audio_array.shape) > 1:
                audio_array = audio_array.mean(axis=1)
            
            # 转换为16位PCM
            pcm_samples = (audio_array * 32767).astype(np.int16)
            
            logging.info(f"soundfile解码成功: {len(pcm_samples)} 采样点, {sample_rate}Hz")
            return pcm_samples.tobytes()
            
        except Exception as e:
            logging.info(f"soundfile解码失败: {e}")
        
        # 方法2: 尝试提取PCM数据
        try:
            return self.extract_pcm_from_opus(audio_data)
        except Exception as e:
            logging.info(f"PCM提取失败: {e}")
        
        # 方法3: 生成静音
        return self.generate_silence()
    
    def extract_pcm_from_opus(self, audio_data):
        """尝试从OGG/Opus数据中提取PCM数据"""
        
        # 检查是否是有效的OGG文件
        if not audio_data.startswith(b'OggS'):
            raise ValueError("不是有效的OGG文件")
        
        # 查找PCM数据模式
        pcm_candidates = []
        
        # 搜索PCM数据特征
        for i in range(0, len(audio_data) - 1024, 2):
            # 检查是否有合理的PCM数据模式
            segment = audio_data[i:i+1024]
            
            # 简单的PCM数据检测
            if len(segment) >= 1024 and len(segment) % 2 == 0:
                pcm_samples = np.frombuffer(segment, dtype=np.int16)
                
                # 检查PCM数据特征
                if self.is_valid_pcm_data(pcm_samples):
                    pcm_candidates.append({
                        'offset': i,
                        'data': segment,
                        'score': self.calculate_pcm_quality(pcm_samples)
                    })
        
        # 选择最佳的PCM数据
        if pcm_candidates:
            best_candidate = max(pcm_candidates, key=lambda x: x['score'])
            logging.info(f"找到PCM数据段: 偏移={best_candidate['offset']}, 质量={best_candidate['score']}")
            return best_candidate['data']
        
        raise ValueError("未找到有效的PCM数据")
    
    def is_valid_pcm_data(self, samples):
        """检查是否是有效的PCM数据"""
        if len(samples) == 0:
            return False
        
        # 检查数值范围
        if np.max(np.abs(samples)) > 32767:
            return False
        
        # 检查数据分布
        unique_values = len(np.unique(samples))
        if unique_values < len(samples) * 0.1:  # 至少10%的不同值
            return False
        
        # 检查零值比例
        zero_ratio = np.sum(samples == 0) / len(samples)
        if zero_ratio > 0.9:  # 零值不能太多
            return False
        
        return True
    
    def calculate_pcm_quality(self, samples):
        """计算PCM数据质量分数"""
        if len(samples) == 0:
            return 0
        
        score = 0
        
        # 动态范围分数
        dynamic_range = np.max(samples) - np.min(samples)
        score += min(dynamic_range / 1000, 10)  # 最多10分
        
        # 零值比例分数
        zero_ratio = np.sum(samples == 0) / len(samples)
        score += (1 - zero_ratio) * 5  # 最多5分
        
        # 数据分布分数
        unique_ratio = len(np.unique(samples)) / len(samples)
        score += unique_ratio * 5  # 最多5分
        
        return score
    
    def generate_silence(self, duration=0.5):
        """生成静音数据"""
        sample_rate = 24000
        sample_count = int(sample_rate * duration)
        silence = np.zeros(sample_count, dtype=np.int16)
        logging.info(f"生成静音数据: {duration}秒, {sample_count} 采样点")
        return silence.tobytes()

    def close(self):
        """清理资源"""
        self.is_recording = False
        self.is_playing = False
        
        if self.recording_thread:
            self.recording_thread.join()
        if self.playback_thread:
            self.playback_thread.join()
        
        if self.playback_stream:
            self.playback_stream.stop_stream()
            self.playback_stream.close()
            
        if self.pyaudio_instance:
            self.pyaudio_instance.terminate()
        if self.playback_instance:
            self.playback_instance.terminate()

# 翻译服务客户端类（保持不变）
class TranslationClient:
    def __init__(self, config: Config):
        self.config = config
        self.websocket = None
        self.session_id = None
        self.log_id = None
        self.audio_streamer = None  # 添加对音频流处理器的引用
        
    async def build_http_headers(self, conn_id: str) -> Headers:
        """构建WebSocket连接头"""
        return Headers({
            "X-Api-App-Key": self.config.app_key,
            "X-Api-Access-Key": self.config.access_key,
            "X-Api-Resource-Id": self.config.resource_id,
            "X-Api-Connect-Id": conn_id
        })
    
    async def send_request(self, request: TranslateRequestData):
        """发送请求到WebSocket服务器"""
        request_data = TranslateRequest()
        request_data.request_meta.SessionID = request.session_id
        
        if request.event == "Type_StartSession":
            request_data.event = Type.StartSession
        elif request.event == "Type_TaskRequest":
            request_data.event = Type.TaskRequest
        elif request.event == "Type_FinishSession":
            request_data.event = Type.FinishSession
            
        request_data.user.uid = "realtime_translation_client"
        request_data.user.did = "realtime_translation_client"
        
        if request.source_audio:
            request_data.source_audio.format = request.source_audio.format or "wav"
            request_data.source_audio.rate = request.source_audio.rate or 16000
            request_data.source_audio.bits = request.source_audio.bits or 16
            request_data.source_audio.channel = request.source_audio.channel or 1
            if request.source_audio.binary_data:
                request_data.source_audio.binary_data = request.source_audio.binary_data
        
        if request.target_audio:
            request_data.target_audio.format = request.target_audio.format or "ogg_opus"
            request_data.target_audio.rate = request.target_audio.rate or 24000
            
        if request.mode:
            request_data.request.mode = request.mode
        if request.source_language:
            request_data.request.source_language = request.source_language
        if request.target_language:
            request_data.request.target_language = request.target_language
            
        await self.websocket.send(request_data.SerializeToString())
    
    async def receive_message(self):
        """接收并解析服务器响应"""
        try:
            response = await self.websocket.recv()
            response_data = TranslateResponse()
            response_data.ParseFromString(response)
            
            # 添加详细的调试信息
            logging.info(f"📨 接收到原始响应 - 事件类型: {response_data.event}, 序列号: {response_data.response_meta.Sequence}")
            if response_data.text:
                logging.info(f"📝 响应文本: {response_data.text[:100]}...")
            if response_data.data:
                logging.info(f"🔊 响应音频数据大小: {len(response_data.data)} 字节")
            if response_data.response_meta.Message:
                logging.info(f"💬 响应消息: {response_data.response_meta.Message}")
            
            return {
                'event': response_data.event,
                'session_id': response_data.response_meta.SessionID,
                'sequence': response_data.response_meta.Sequence,
                'text': response_data.text,
                'data': response_data.data,
                'spk_chg': response_data.spk_chg,
                'message': response_data.response_meta.Message
            }
        except Exception as e:
            logging.error(f"接收消息错误: {e}")
            return None
    
    async def connect(self):
        """连接到翻译服务器 - 优化心跳机制"""
        try:
            conn_id = str(uuid.uuid4())
            headers = await self.build_http_headers(conn_id)
            
            # 启用客户端心跳机制，减少音频数据发送压力
            self.websocket = await websockets.connect(
                self.config.ws_url,
                additional_headers=headers,
                max_size=1000000000,
                ping_interval=30,    # 30秒心跳间隔
                ping_timeout=10,     # 心跳超时时间
                close_timeout=10     # 关闭超时时间
            )
            
            self.log_id = self.websocket.response.headers.get('X-Tt-Logid')
            logging.info(f"已连接到翻译服务器 (log_id={self.log_id}, 心跳间隔: 30秒)")
            return True
            
        except Exception as e:
            logging.error(f"连接翻译服务器失败: {e}")
            return False
    
    async def start_session(self, session_id: str, source_lang: str = "zh", target_lang: str = "en"):
        """开始翻译会话"""
        self.session_id = session_id
        
        start_request = TranslateRequestData(
            session_id=session_id,
            event="Type_StartSession",
            source_audio=Audio(format="wav", rate=16000, bits=16, channel=1),
            target_audio=Audio(format="ogg_opus", rate=24000),
            mode="s2s",
            source_language=source_lang,
            target_language=target_lang
        )
        
        await self.send_request(start_request)
        resp = await self.receive_message()
        
        if not resp or resp['event'] != Type.SessionStarted:
            error_msg = resp['message'] if resp else "未知错误"
            logging.error(f"会话启动失败: {error_msg}")
            return False
            
        logging.info(f"翻译会话已启动 (ID={session_id})")
        return True
    
    async def send_audio_chunk(self, audio_chunk: bytes):
        """发送音频数据块"""
        # 验证音频数据
        if not audio_chunk or len(audio_chunk) == 0:
            logging.warning("⚠️  尝试发送空音频数据，跳过")
            return
            
        # 记录音频数据详情 - 控制日志频率
        current_time = time.time()
        if current_time - self.audio_streamer.last_prepare_log_time >= self.audio_streamer.log_interval:
            logging.info(f"🎵 准备发送音频数据块 - 大小: {len(audio_chunk)} 字节, 采样率: 16000Hz, 格式: 16-bit PCM")
            self.audio_streamer.last_prepare_log_time = current_time
        
        chunk_request = TranslateRequestData(
            session_id=self.session_id,
            event="Type_TaskRequest",
            source_audio=Audio(
                binary_data=audio_chunk,
                format="wav",
                rate=16000,
                bits=16,
                channel=1
            )
        )
        await self.send_request(chunk_request)
    
    async def end_session(self):
        """结束翻译会话"""
        if self.session_id:
            finish_request = TranslateRequestData(
                session_id=self.session_id,
                event="Type_FinishSession",
                source_audio=Audio()
            )
            await self.send_request(finish_request)
            logging.info("翻译会话结束请求已发送")
    
    async def close(self):
        """关闭连接"""
        if self.websocket:
            await self.websocket.close()

# 实时同声传译主类（增强版）
class EnhancedRealtimeTranslator:
    def __init__(self, config: Config, audio_mode: str = "play"):
        self.config = config
        self.audio_streamer = EnhancedRealtimeAudioStreamer(audio_mode=audio_mode)
        self.translation_client = TranslationClient(config)
        self.is_translating = False
        self.receive_task = None
        self.send_task = None
        self.stats = {
            'audio_sent': 0,
            'translations_received': 0,
            'start_time': None
        }
        
        # 建立客户端和音频流处理器之间的引用关系
        self.translation_client.audio_streamer = self.audio_streamer
    
    async def start_translation(self, source_lang: str = "zh", target_lang: str = "en"):
        """开始实时翻译"""
        logging.info("正在启动增强版实时同声传译...")
        
        # 首先优化音频参数以应对低音量问题
        logging.info("🔧 正在优化音频输入参数以应对低音量环境...")
        optimization_result = self.audio_streamer.optimize_for_low_volume()
        logging.info(f"✅ 音频参数优化完成: 增益因子={optimization_result['gain_factor']}x, 噪声门限={optimization_result['noise_gate']}dB")
        
        # 显示当前音频设备统计
        stats = self.audio_streamer.get_input_audio_stats()
        logging.info(f"📊 当前音频输入统计: 平均音量={stats['avg_db']:.1f}dB, 最大音量={stats['max_db']:.1f}dB")
        
        # 连接到翻译服务器
        if not await self.translation_client.connect():
            logging.error("无法连接到翻译服务器")
            return False
        
        # 开始翻译会话
        session_id = str(uuid.uuid4())
        if not await self.translation_client.start_session(session_id, source_lang, target_lang):
            logging.error("无法启动翻译会话")
            await self.translation_client.close()
            return False
        
        # 启动音频采集
        await self.audio_streamer.start_recording()
        
        self.is_translating = True
        self.stats['start_time'] = time.time()
        
        # 启动接收翻译结果的任务
        self.receive_task = asyncio.create_task(self._receive_translation_results())
        
        # 启动音频发送任务
        self.send_task = asyncio.create_task(self._send_audio_chunks())
        
        logging.info("增强版实时同声传译已启动！")
        logging.info("请开始说话，系统会自动翻译并播放结果...")
        logging.info("按 Ctrl+C 停止翻译")
        
        try:
            # 等待任务完成
            await self.send_task
        except KeyboardInterrupt:
            logging.info("\n正在停止实时翻译...")
        except Exception as e:
            logging.error(f"翻译过程错误: {e}")
        finally:
            await self.stop_translation()
        
        return True
    
    def _monitor_input_audio_quality(self, audio_chunk: bytes, db_level: float, rms_level: float):
        """监控输入音频质量并提供详细反馈"""
        # 获取增强后的音频统计
        stats = self.audio_streamer.get_input_audio_stats()
        
        # 每20次发送记录一次详细音频质量信息
        if self.stats['audio_sent'] % 20 == 0:
            logging.info(f"🎤 输入音频质量监控:")
            logging.info(f"   📊 当前音量: {db_level:.1f}dB (RMS: {rms_level:.0f})")
            logging.info(f"   📈 平均音量: {stats['avg_db']:.1f}dB, 最大: {stats['max_db']:.1f}dB, 最小: {stats['min_db']:.1f}dB")
            logging.info(f"   🔧 增益因子: {stats['gain_factor']:.1f}x, 自动增益: {'开启' if stats['auto_gain_enabled'] else '关闭'}")
            logging.info(f"   🔇 静音检测: {'是' if db_level < self.audio_streamer.input_noise_gate_threshold else '否'}")
        
        # 音量过低警告和建议
        if db_level < -50:
            if self.stats['audio_sent'] % 30 == 0:  # 每30次提醒一次
                logging.warning(f"⚠️  音频输入音量过低 ({db_level:.1f}dB)")
                logging.warning("   💡 建议: 1) 增大麦克风音量 2) 靠近麦克风说话 3) 检查麦克风设置")
                
                # 如果启用了自动增益但音量仍然很低，给出提示
                if stats['auto_gain_enabled'] and stats['gain_factor'] >= 8.0:
                    logging.warning("   🔧 自动增益已启用但效果有限，建议手动调整麦克风设置")
        
        # 检查音频数据大小
        if len(audio_chunk) < 200:
            if self.stats['audio_sent'] % 40 == 0:  # 每40次提醒一次
                logging.warning(f"⚠️  音频数据块过小 ({len(audio_chunk)} 字节)，可能影响识别效果")
        
        # 音量波动检测
        if len(self.audio_streamer.input_levels) > 10:
            recent_levels = self.audio_streamer.input_levels[-10:]
            level_variance = max(recent_levels) - min(recent_levels)
            if level_variance < 5 and max(recent_levels) < -60:  # 音量变化很小且都很低
                if self.stats['audio_sent'] % 60 == 0:  # 每60次提醒一次
                    logging.warning("⚠️  检测到持续的低音量输入，可能是麦克风问题或环境噪音过低")
                    logging.warning("   💡 建议: 检查麦克风是否被静音或损坏")

    async def _send_audio_chunks(self):
        """发送音频数据块到翻译服务器 - 智能发送策略"""
        try:
            consecutive_empty_chunks = 0
            consecutive_silent_chunks = 0
            last_voice_activity_time = time.time()
            
            while self.is_translating:
                # 获取音频数据块
                audio_chunk = await self.audio_streamer.get_audio_chunk()
                
                # 计算音频音量级别
                db_level, rms_level = self.audio_streamer.calculate_audio_level(audio_chunk)
                
                # 监控输入音频质量
                self._monitor_input_audio_quality(audio_chunk, db_level, rms_level)
                
                # 检查音频数据质量
                if len(audio_chunk) < 100:  # 音频数据太小
                    consecutive_empty_chunks += 1
                    if consecutive_empty_chunks % 50 == 0:  # 每50次提醒一次
                        logging.warning(f"⚠️  连续接收到 {consecutive_empty_chunks} 个小音频块 (大小: {len(audio_chunk)} 字节)")
                else:
                    consecutive_empty_chunks = 0
                
                # 如果音量太低，给出警告并尝试放大
                if db_level < -40:  # -40dB以下认为是静音
                    if self.stats['audio_sent'] % 50 == 0:  # 每50次提醒一次
                        logging.warning(f"⚠️  音频音量过低 ({db_level:.1f}dB)，请检查麦克风设置或增大说话音量")
                    
                    # 尝试放大音频
                    if db_level < -50:  # 如果音量非常低，尝试放大
                        audio_chunk = self.audio_streamer.amplify_audio(audio_chunk, gain_factor=3.0)
                        logging.debug(f"🔊 音频被放大 (原始音量: {db_level:.1f}dB)")
                
                # 记录发送音频到翻译服务器
                if current_time - self.audio_streamer.last_send_log_time >= self.audio_streamer.log_interval:
                    if is_silence:
                        logging.info(f"📤 发送静音音频数据到翻译服务器 (大小: {len(audio_chunk)} 字节, 音量: {db_level:.1f}dB)")
                    else:
                        logging.info(f"📤 发送语音音频数据到翻译服务器 (大小: {len(audio_chunk)} 字节, 音量: {db_level:.1f}dB)")
                    self.audio_streamer.last_send_log_time = current_time
                
                # 发送到翻译服务器
                await self.translation_client.send_audio_chunk(audio_chunk)
                
                # 更新统计
                self.stats['audio_sent'] += 1
                
                # 动态延迟：语音活动期间短延迟，静音期间长延迟
                if is_silence:
                    await asyncio.sleep(0.1)  # 静音期间更长延迟
                else:
                    await asyncio.sleep(0.05)  # 语音活动期间正常延迟
                    
        except Exception as e:
            logging.error(f"发送音频数据错误: {e}")
            self.is_translating = False
                
        except Exception as e:
            logging.error(f"发送音频数据错误: {e}")
            self.is_translating = False
    
    async def _receive_translation_results(self):
        """接收翻译结果并播放"""
        try:
            while self.is_translating:
                try:
                    # 接收翻译结果 - 增加超时调试
                    logging.debug("🔄 等待接收翻译结果...")
                    resp = await asyncio.wait_for(self.translation_client.receive_message(), timeout=1.0)
                    
                    if not resp:
                        logging.debug("⚠️  接收到空响应，继续等待...")
                        continue
                    
                    # 记录所有接收到的响应类型，用于调试
                    logging.info(f"📋 接收到响应 - 事件类型: {resp['event']}, 序列号: {resp['sequence']}")
                    
                    # 处理不同类型的响应
                    if resp['event'] == Type.SessionFinished:
                        logging.info("翻译会话正常结束")
                        break
                    elif resp['event'] == Type.SessionFailed or resp['event'] == Type.SessionCanceled:
                        logging.error(f"翻译会话失败: {resp['message']}")
                        self.is_translating = False
                        break
                    elif resp['event'] == Type.TaskStarted or resp['event'] == Type.ConnectionStarted:
                        logging.info("✅ 翻译任务成功启动")
                    elif resp['event'] == Type.TaskFailed or resp['event'] == Type.ConnectionFailed:
                        logging.error(f"❌ 翻译任务启动失败: {resp['message']}")
                        self.is_translating = False
                        break
                    elif resp['event'] == Type.TranslationSubtitleResponse:
                        # 翻译字幕响应 - 可能包含翻译文本
                        if resp['text']:
                            logging.info(f"🎯 翻译字幕: {resp['text']}")
                            self.stats['translations_received'] += 1
                        else:
                            logging.debug(f"接收到翻译字幕响应但无文本内容")
                    elif resp['event'] == Type.ASRResponse:
                        # 语音识别响应 - 可能包含识别文本和翻译
                        if resp['text']:
                            logging.info(f"🎤 语音识别: {resp['text']}")
                            # 如果有音频数据，也播放
                            if resp.get('data'):
                                self.audio_streamer.play_translation_audio(resp['data'])
                                logging.info(f"🔊 播放识别音频 (大小: {len(resp['data'])} 字节)")
                            self.stats['translations_received'] += 1
                        else:
                            logging.debug(f"接收到ASR响应但无文本内容")
                    elif resp['event'] == Type.AudioMuted:
                        # 音频静音事件 - 服务器检测到静音
                        logging.info("🔇 服务器检测到音频静音，继续发送音频...")
                    elif resp['event'] == 351:  # 新发现的事件类型 - 翻译文本响应
                        logging.info(f"📝 接收到翻译文本响应 (事件类型: 351)")
                        if resp.get('text'):
                            logging.info(f"🗣️  翻译文本: {resp['text']}")
                            self.stats['translation_text'] = self.stats.get('translation_text', 0) + 1
                    elif resp['event'] == 352:  # 新发现的事件类型 - 翻译音频响应
                        logging.info(f"🔊 接收到翻译音频响应 (事件类型: 352)")
                        if resp.get('data'):
                            logging.info(f"🎵 翻译音频数据大小: {len(resp['data'])} 字节")
                            self.audio_streamer.play_translation_audio(resp['data'])
                            self.stats['translation_audio'] = self.stats.get('translation_audio', 0) + 1
                    elif resp['event'] == Type.UsageResponse:
                        if resp['data']:
                            # 记录接收到翻译响应
                            logging.info(f"📥 接收到翻译响应 (音频大小: {len(resp['data'])} 字节, 文本: '{resp['text'][:50]}...' if resp['text'] else '无文本')")
                            
                            # 更新统计
                            self.stats['translations_received'] += 1
                            
                            # 播放翻译后的音频
                            self.audio_streamer.play_translation_audio(resp['data'])
                            logging.info(f"🔊 播放翻译音频 (大小: {len(resp['data'])} 字节)")
                            
                            # 显示翻译文本
                            if resp['text']:
                                logging.info(f"🎯 翻译: {resp['text']}")
                                
                            # 显示统计信息
                            if self.stats['translations_received'] % 10 == 0:
                                elapsed_time = time.time() - self.stats['start_time']
                                logging.info(f"📊 统计 - 已发送音频: {self.stats['audio_sent']}, 已接收翻译: {self.stats['translations_received']}, 运行时间: {elapsed_time:.1f}秒")
                        else:
                            logging.warning(f"⚠️  接收到UsageResponse但无音频数据 - 文本: {resp['text']}")
                    else:
                        # 记录未处理的响应类型，包含更详细的信息
                        event_name = Type.Name(resp['event']) if hasattr(resp['event'], 'Name') else str(resp['event'])
                        logging.warning(f"⚠️  接收到未处理的响应类型: {event_name} (值: {resp['event']}), 消息: {resp.get('message', '无消息')}, 文本: {resp.get('text', '无文本')}, 有序列号: {'sequence' in resp}")
                    
                except asyncio.TimeoutError:
                    # 超时继续循环 - 减少日志频率
                    logging.debug("⏰ 接收超时，继续等待...")
                    continue
                except Exception as e:
                    logging.error(f"接收翻译结果错误: {e}")
                    
        except Exception as e:
            logging.error(f"接收任务错误: {e}")
            self.is_translating = False
    
    async def stop_translation(self):
        """停止实时翻译"""
        self.is_translating = False
        
        # 结束翻译会话
        await self.translation_client.end_session()
        
        # 停止音频采集
        await self.audio_streamer.stop_recording()
        
        # 等待任务完成
        if self.send_task:
            await self.send_task
        if self.receive_task:
            await self.receive_task
        
        # 关闭连接
        await self.translation_client.close()
        
        # 清理资源
        self.audio_streamer.close()
        
        # 显示最终统计
        if self.stats['start_time']:
            total_time = time.time() - self.stats['start_time']
            logging.info(f"🎯 翻译会话结束")
            logging.info(f"📊 最终统计:")
            logging.info(f"   总运行时间: {total_time:.1f}秒")
            logging.info(f"   发送音频块数: {self.stats['audio_sent']}")
            logging.info(f"   接收翻译数: {self.stats['translations_received']}")
            if total_time > 0:
                logging.info(f"   平均翻译速度: {self.stats['translations_received']/total_time:.1f} 次/秒")

# 配置和语言设置
LANGUAGE_CONFIGS = {
    "zh-en": {"name": "中文→英文", "source": "zh", "target": "en"},
    "en-zh": {"name": "英文→中文", "source": "en", "target": "zh"},
    "zh-ja": {"name": "中文→日文", "source": "zh", "target": "ja"},
    "ja-zh": {"name": "日文→中文", "source": "ja", "target": "zh"},
}

def list_languages():
    """列出支持的语言对"""
    print("支持的语言对:")
    for key, config in LANGUAGE_CONFIGS.items():
        print(f"  {key}: {config['name']}")

# 主函数
async def main():
    """主函数"""
    import argparse
    parser = argparse.ArgumentParser(description='实时同声传译系统')
    parser.add_argument('--lang', default='zh-en', choices=list(LANGUAGE_CONFIGS.keys()),
                       help='语言对 (默认: zh-en)')
    parser.add_argument('--list-langs', action='store_true', help='列出支持的语言对')
    parser.add_argument('--audio-mode', default='play', choices=['play', 'save'],
                       help='音频输出模式: play=实时播放, save=仅保存文件 (默认: play)')
    
    args = parser.parse_args()
    
    if args.list_langs:
        list_languages()
        return
    
    # 获取语言配置
    lang_config = LANGUAGE_CONFIGS[args.lang]
    
    # 配置翻译服务
    config = Config(
        ws_url="wss://openspeech.bytedance.com/api/v4/ast/v2/translate",
        app_key="3448079381",
        access_key="HShSuL8ulFuLrMU0d4W4gIbJUpwr34UO",
        resource_id="volc.service_type.10053"
    )
    
    # 创建实时翻译器
    translator = EnhancedRealtimeTranslator(config, audio_mode=args.audio_mode)
    
    try:
        # 显示欢迎信息
        print("=" * 60)
        print("🎤 实时同声传译系统 v2.0")
        print("=" * 60)
        print(f"语言对: {lang_config['name']}")
        print(f"源语言: {lang_config['source']} -> 目标语言: {lang_config['target']}")
        print(f"音频模式: {'实时播放' if args.audio_mode == 'play' else '仅保存文件'}")
        print("-" * 60)
        
        # 开始实时翻译
        await translator.start_translation(
            source_lang=lang_config['source'], 
            target_lang=lang_config['target']
        )
        
    except Exception as e:
        logging.error(f"实时翻译错误: {e}")
    finally:
        await translator.stop_translation()

if __name__ == "__main__":
    # 配置日志
    logging.basicConfig(
        level=logging.INFO,
        format='%(asctime)s - %(levelname)s - %(message)s',
        datefmt='%H:%M:%S'
    )
    
    try:
        # 运行实时同声传译
        asyncio.run(main())
    except KeyboardInterrupt:
        logging.info("程序被用户中断")
    except Exception as e:
        logging.error(f"程序运行错误: {e}")