import asyncio
import websockets
import subprocess
import logging
import numpy as np
from collections import deque
from speech_recognition import Recognizer

# graph LR
#     A[前端] -->|WebSocket MP3流| B(FFmpeg解码器)
#     B -->|PCM数据| C[语音识别引擎]
#     C -->|文本| D[结果处理器]

# 配置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger("FFmpegStreamDecoder")

class FFmpegStreamDecoder:
    def __init__(self, sample_rate=16000, channels=1, format='s16le'):
        """
        初始化FFmpeg流式解码器
        :param sample_rate: 输出采样率 (Hz)
        :param channels: 输出声道数
        :param format: 输出格式 (s16le = 16位小端有符号PCM)
        """
        self.sample_rate = sample_rate
        self.channels = channels
        self.format = format
        self.process = None
        self.buffer = deque(maxlen=10)  # 存储最近10个音频块
        self.recognizer = Recognizer()
        
    def start(self):
        """启动FFmpeg解码进程"""
        # FFmpeg命令参数解释:
        # -i - : 从标准输入读取
        # -acodec pcm_s16le : 解码为PCM 16位小端
        # -ar 16000 : 采样率16kHz
        # -ac 1 : 单声道
        # -f s16le : 输出格式为16位小端PCM
        # - : 输出到标准输出
        ffmpeg_cmd = [
            'ffmpeg',
            '-i', '-',           # 从标准输入读取
            '-acodec', 'pcm_s16le', # 解码为PCM
            '-ar', str(self.sample_rate), # 采样率
            '-ac', str(self.channels),   # 声道数
            '-f', self.format,   # 输出格式
            '-fflags', 'nobuffer', # 最小化缓冲
            '-flags', 'low_delay', # 低延迟模式
            '-'                  # 输出到标准输出
        ]
        
        logger.info(f"启动FFmpeg进程: {' '.join(ffmpeg_cmd)}")
        self.process = subprocess.Popen(
            ffmpeg_cmd,
            stdin=subprocess.PIPE,
            stdout=subprocess.PIPE,
            stderr=subprocess.PIPE,
            bufsize=0  # 无缓冲
        )
        
        # 启动FFmpeg错误日志线程
        self._start_error_logger()
        
    def _start_error_logger(self):
        """在后台线程记录FFmpeg错误输出"""
        import threading
        def log_errors():
            while self.process.poll() is None:
                line = self.process.stderr.readline()
                if line:
                    logger.debug(f"FFmpeg: {line.decode().strip()}")
        threading.Thread(target=log_errors, daemon=True).start()
    
    def feed_audio(self, data: bytes):
        """向解码器输入音频数据"""
        if self.process and self.process.poll() is None:
            try:
                self.process.stdin.write(data)
                self.process.stdin.flush()
            except BrokenPipeError:
                logger.error("FFmpeg进程管道已断开")
    
    def read_decoded(self, chunk_size=3200) -> bytes:
        """
        读取解码后的PCM数据
        :param chunk_size: 读取块大小 (3200字节=100ms 16kHz音频)
        :return: PCM数据块
        """
        if self.process and self.process.poll() is None:
            try:
                data = self.process.stdout.read(chunk_size)
                if data:
                    self.buffer.append(data)
                return data
            except Exception as e:
                logger.error(f"读取解码数据错误: {str(e)}")
        return b''
    
    def stop(self):
        """停止解码器"""
        if self.process:
            try:
                self.process.stdin.close()
                self.process.terminate()
                self.process.wait(timeout=2)
            except subprocess.TimeoutExpired:
                self.process.kill()
            logger.info("FFmpeg进程已停止")
    
    def transcribe_stream(self):
        """流式转录当前缓冲区内容"""
        if not self.buffer:
            return ""
        
        # 组合最近的音频块进行识别
        audio_data = b''.join(self.buffer)
        self.buffer.clear()
        
        # 转换为语音识别库需要的格式
        audio_frame = self._bytes_to_audio_data(audio_data)
        
        try:
            # 使用语音识别库处理
            text = self.recognizer.recognize_whisper(
                audio_frame, 
                language="zh-CN",
                model="small"
            )
            return text
        except Exception as e:
            logger.error(f"语音识别错误: {str(e)}")
            return ""
    
    def _bytes_to_audio_data(self, data: bytes):
        """将PCM字节转换为AudioData对象"""
        from speech_recognition import AudioData
        return AudioData(
            data, 
            sample_rate=self.sample_rate,
            sample_width=2  # 16位=2字节
        )

# WebSocket服务器
async def audio_stream_server(websocket, path):
    logger.info(f"客户端连接: {websocket.remote_address}")
    decoder = FFmpegStreamDecoder()
    decoder.start()
    
    try:
        async for message in websocket:
            # 接收前端发送的MP3音频块
            if isinstance(message, bytes):
                # 将音频数据送入解码器
                decoder.feed_audio(message)
                
                # 读取解码后的PCM
                pcm_data = decoder.read_decoded()
                
                # 每隔500ms进行一次识别
                if pcm_data:
                    # 这里可以添加语音活动检测(VAD)逻辑
                    # 简化版：每收到5个块识别一次
                    if len(decoder.buffer) >= 5:
                        text = decoder.transcribe_stream()
                        if text:
                            # 将识别结果发送回客户端
                            await websocket.send(text)
                            logger.info(f"识别结果: {text}")
            else:
                logger.warning(f"收到非二进制消息: {message[:50]}...")
    except websockets.exceptions.ConnectionClosed:
        logger.info("客户端断开连接")
    finally:
        decoder.stop()

# 主服务启动
async def main():
    server = await websockets.serve(
        audio_stream_server, 
        "0.0.0.0", 
        8765,
        max_size=10 * 1024 * 1024  # 10MB
    )
    logger.info("WebSocket服务器已启动，监听端口 8765")
    await server.wait_closed()

if __name__ == "__main__":
    asyncio.run(main())