from fastapi import APIRouter, WebSocket, WebSocketDisconnect
from typing import Dict, Optional
import asyncio
import json
import threading
import time
import os
from record_wav import record_and_recognize
from pydantic import BaseModel
from punc_restore import chinese_marks_punctuation_replace

# 确保临时目录存在
TEMP_DIR = "temp"
if not os.path.exists(TEMP_DIR):
    os.makedirs(TEMP_DIR)

# 定义响应模型，用于文档
class ASRResult(BaseModel):
    status: str
    text: str
    inference_time: float = None
    message: str = None

# 创建路由器
router = APIRouter(
    tags=["websocket"],
    responses={404: {"description": "Not found"}},
)

# 存储所有活跃的WebSocket连接
active_connections: Dict[str, WebSocket] = {}
# 控制音频录制状态的标志
recording_status = {}
# 录音线程
recording_threads = {}
# 存储自动静音检测设置
auto_silence_detection = {}

# 添加一个HTTP GET端点，用于在/docs中显示WebSocket API的文档
@router.get("/audio-api", summary="语音识别WebSocket API文档")
async def get_websocket_docs():
    """
    ## 语音识别WebSocket API文档
    
    通过WebSocket连接进行实时语音识别。连接URL: `ws://{host}:{port}/ws/audio`
    
    ### 客户端发送的消息格式:
    
    1. 开始录音:
    ```json
    {
        "action": "start",
        "lang_sym": "zh",
        "auto_silence_detection": false
    }
    ```
    
    2. 停止录音:
    ```json
    {
        "action": "stop"
    }
    ```
    
    ### 服务器返回的消息格式:
    
    1. 连接成功:
    ```json
    {
        "status": "ready",
        "message": "连接成功，等待指令"
    }
    ```
    
    2. 录音状态:
    ```json
    {
        "status": "recording",
        "message": "开始录音..."
    }
    ```
    
    3. 音量信息:
    ```json
    {
        "status": "volume",
        "volume": 35.5,
        "is_speaking": true
    }
    ```
    
    4. 识别结果:
    ```json
    {
        "status": "success",
        "text": "识别的文本内容",
        "inference_time": 0.123
    }
    ```
    
    5. 错误信息:
    ```json
    {
        "status": "error",
        "message": "错误描述"
    }
    ```
    """
    return {
        "websocket_url": "/ws/audio",
        "description": "语音识别WebSocket API",
        "usage": "通过WebSocket连接进行实时语音识别",
        "example_client": "可以使用JavaScript WebSocket客户端连接此API"
    }

# WebSocket客户端连接和处理
@router.websocket("/audio")
async def websocket_endpoint(websocket: WebSocket):
    """
    语音识别WebSocket端点
    
    通过WebSocket连接进行实时语音识别。客户端可以发送开始录音和停止录音的指令，
    服务器会根据设置自动检测静音并在2秒无声后停止录音，然后返回识别结果。
    客户端可以通过设置auto_silence_detection为true或false来控制是否启用自动静音检测功能。
    asr：调用的是dolphin_asr.stream_asr；
    返回的识别结果格式为JSON：
    ```json
    {
        "status": "success",
        "text": "识别的文本内容",
        "inference_time": 0.123
    }
    ```
    """
    await websocket.accept()
    client_id = str(id(websocket))
    active_connections[client_id] = websocket
    recording_status[client_id] = False
    auto_silence_detection[client_id] = False  # 默认关闭自动静音检测
    
    try:
        await websocket.send_json({
            "status": "ready",
            "message": "连接成功，等待指令"
        })
        
        while True:
            data = await websocket.receive_json()
            
            if data.get("action") == "start" and not recording_status[client_id]:
                # 开始录音
                recording_status[client_id] = True
                
                # 设置录音参数
                output_file = os.path.join(TEMP_DIR, f"temp_recording_{client_id}.wav")
                lang_sym = data.get("lang_sym", "zh")
                device_keyword = data.get("device_keyword", "audio")
                
                # 获取自动静音检测设置，默认为False
                auto_silence_detection[client_id] = data.get("auto_silence_detection", False)
                
                await websocket.send_json({
                    "status": "recording",
                    "message": f"开始录音...自动静音检测: {'开启' if auto_silence_detection[client_id] else '关闭'}"
                })
                
                # 创建并启动录音线程
                recording_thread = threading.Thread(
                    target=record_audio_for_client,
                    args=(client_id, output_file, websocket, lang_sym, device_keyword)
                )
                recording_threads[client_id] = recording_thread
                recording_thread.daemon = True  # 设置为守护线程
                recording_thread.start()
                
            elif data.get("action") == "stop" and recording_status[client_id]:
                # 停止录音
                recording_status[client_id] = False
                await websocket.send_json({
                    "status": "processing",
                    "message": "停止录音..."
                })
                
    except WebSocketDisconnect:
        print(f"客户端 {client_id} 断开连接")
        cleanup_client(client_id)
    except Exception as e:
        print(f"WebSocket错误: {str(e)}")
        try:
            await websocket.send_json({
                "status": "error",
                "message": f"发生错误: {str(e)}"
            })
        except:
            pass
    finally:
        cleanup_client(client_id)

import re
from typing import Union, List, Any

def extract_clean_text(input_data: Union[str, List, Any]) -> str:
    """
    从包含XML标签和时间戳的文本中提取纯文本内容，支持多种输入类型
    
    参数:
        input_data: 输入数据，可以是字符串、列表或其他类型
                   str示例: '<zh><SICHUAN><asr><0.00> 哎呀,桂花超级漂亮的。<2.98>'
                   list示例: ['<tag>文本1', '<tag>文本2']
    
    返回:
        str: 纯文本内容，如 '哎呀,桂花超级漂亮的。' 或 '文本1 文本2'
    """
    # 处理非字符串输入
    if not isinstance(input_data, str):
        if isinstance(input_data, list):
            # 如果是列表，递归处理每个元素后拼接
            return ' '.join(extract_clean_text(item) for item in input_data)
        else:
            # 其他类型直接转为字符串
            input_data = str(input_data)
    
    # 使用正则表达式移除所有< >标签及其内容
    clean_text = re.sub(r'<[^>]+>', '', input_data)
    
    # 中文标点处理
    clean_text = chinese_marks_punctuation_replace(clean_text)
    
    return clean_text

def record_audio_for_client(client_id, output_file, websocket, lang_sym="zh", device_keyword="audio"):
    """
    为特定客户端录制音频的线程函数，通过WebSocket控制开始和结束录音
    如果启用了自动静音检测，则2秒没有声音自动结束录制，并调用ASR函数识别，返回识别后的文本结果
    """
    try:
        # 确保输出文件路径在temp目录下
        output_file = os.path.join(TEMP_DIR, f"temp_recording_{client_id}.wav")
        
        # 初始化录音设置
        import pyaudio
        
        # 列出并选择音频设备
        from record_wav import list_audio_devices, find_audio_device
        input_devices = list_audio_devices()
        if not input_devices:
            asyncio.run(send_to_client(websocket, {
                "status": "error",
                "message": "无法继续录音，请检查您的麦克风连接或权限设置。"
            }))
            recording_status[client_id] = False
            return None
            
        device_index = find_audio_device(input_devices, device_keyword)
        
        # 音频参数设置
        FORMAT = pyaudio.paInt16  # 16-bit采样
        CHANNELS = 1              # 单声道
        RATE = 44100              # 采样率44.1kHz
        CHUNK = 1024              # 每次读取的帧数
        
        # 计算无声检测参数
        SILENCE_THRESHOLD = 15  # 音量阈值，低于此值视为静音
        SILENCE_DURATION = 2    # 静音持续秒数，超过此值自动停止录音
        CHUNKS_PER_SECOND = RATE / CHUNK  # 每秒的数据块数
        MAX_SILENT_CHUNKS = int(SILENCE_DURATION * CHUNKS_PER_SECOND)  # 最大连续静音块数
        
        audio = pyaudio.PyAudio()
        
        # 打开音频流
        stream = audio.open(format=FORMAT,
                          channels=CHANNELS,
                          rate=RATE,
                          input=True,
                          input_device_index=device_index,
                          frames_per_buffer=CHUNK)
        
        # 发送设备信息和静音检测状态
        silence_detection_msg = f"，{SILENCE_DURATION}秒静音将自动停止" if auto_silence_detection.get(client_id, False) else "，自动静音检测已关闭"
        asyncio.run(send_to_client(websocket, {
            "status": "recording",
            "message": f"使用设备ID {device_index} 录制音频{silence_detection_msg}"
        }))
        
        frames = []
        silent_chunks = 0
        voice_detected = False
        
        # 采集背景噪音样本进行校准
        asyncio.run(send_to_client(websocket, {
            "status": "calibrating",
            "message": "校准中，请保持安静..."
        }))
        
        background_samples = []
        for i in range(0, int(RATE / CHUNK * 1)):  # 收集1秒的背景噪音
            data = stream.read(CHUNK, exception_on_overflow=False)
            background_samples.append(data)
            
        # 计算背景噪音水平
        import numpy as np
        background_noise = 0
        for sample in background_samples:
            audio_data = np.frombuffer(sample, dtype=np.int16)
            background_noise += np.abs(audio_data).mean()
            
        background_noise = background_noise / len(background_samples) if background_samples else 0
        # 将阈值设为背景噪音的2倍，但不小于预设值
        adjusted_threshold = max(SILENCE_THRESHOLD, background_noise * 2)
        
        asyncio.run(send_to_client(websocket, {
            "status": "info",
            "message": f"背景噪音水平: {background_noise:.2f}，调整后阈值: {adjusted_threshold:.2f}"
        }))
        
        asyncio.run(send_to_client(websocket, {
            "status": "listening",
            "message": "开始录音，请说话..."
        }))
        
        # 录音直到客户端请求停止或检测到2秒静音(如果启用了自动静音检测)
        while recording_status.get(client_id, False):
            if stream.is_stopped():
                stream.start_stream()
                
            # 读取音频数据
            data = stream.read(CHUNK, exception_on_overflow=False)
            frames.append(data)
            
            # 计算音量以检测语音
            audio_data = np.frombuffer(data, dtype=np.int16)
            volume = np.abs(audio_data).mean()
            
            # 每5个数据块(约0.1秒)发送一次音量状态
            if len(frames) % 5 == 0:
                # 发送音量信息和是否有语音
                if volume > adjusted_threshold:
                    is_speaking = True
                    voice_detected = True
                    silent_chunks = 0  # 重置静音计数
                    asyncio.run(send_to_client(websocket, {
                        "status": "volume",
                        "volume": float(volume),
                        "is_speaking": True
                    }))
                else:
                    silent_chunks += 1
                    volume_info = {
                        "status": "volume",
                        "volume": float(volume),
                        "is_speaking": False
                    }
                    
                    # 只有在启用了自动静音检测时才发送静音计数
                    if auto_silence_detection.get(client_id, False):
                        volume_info.update({
                            "silent_count": silent_chunks,
                            "max_silent": MAX_SILENT_CHUNKS
                        })
                        
                    asyncio.run(send_to_client(websocket, volume_info))
            
            # 如果启用了自动静音检测
            if auto_silence_detection.get(client_id, False):
                # 如果没有检测到声音开始2秒后自动结束
                if len(frames) > MAX_SILENT_CHUNKS and not voice_detected:
                    asyncio.run(send_to_client(websocket, {
                        "status": "no_voice",
                        "message": "未检测到声音，停止录音"
                    }))
                    break
                    
                # 如果检测到声音，然后2秒没有声音则停止录音
                if silent_chunks >= MAX_SILENT_CHUNKS and voice_detected:
                    asyncio.run(send_to_client(websocket, {
                        "status": "silence_detected",
                        "message": f"检测到{SILENCE_DURATION}秒静音，自动停止录音"
                    }))
                    break
        
        # 录音结束，关闭流
        stream.stop_stream()
        stream.close()
        audio.terminate()
        
        # 如果有录制到内容且检测到声音，则保存为WAV文件并进行识别
        if frames and voice_detected:
            import wave
            with wave.open(output_file, 'wb') as wf:
                wf.setnchannels(CHANNELS)
                wf.setsampwidth(audio.get_sample_size(FORMAT))
                wf.setframerate(RATE)
                wf.writeframes(b''.join(frames))
            
            # 进行语音识别
            try:
                # 导入dolphin_asr模块
                import dolphin_asr
                
                # 发送识别中状态
                asyncio.run(send_to_client(websocket, {
                    "status": "recognizing",
                    "message": "正在进行语音识别..."
                }))
                
                # 加载asr模型并进行识别
                result, inference_time = dolphin_asr.stream_asr(output_file, lang_sym=lang_sym)
                recognized_text = result.text
                
                # 去除<>标签
                recognized_text = extract_clean_text(recognized_text)
                
                # 构建简化的JSON结果，只包含文本和必要信息
                result_json = {
                    "status": "success",
                    "text": recognized_text,
                    "inference_time": float(inference_time)
                }
                
                # 发送识别结果
                asyncio.run(send_to_client(websocket, result_json))
                
                # 删除临时文件
                try:
                    if os.path.exists(output_file):
                        os.remove(output_file)
                        asyncio.run(send_to_client(websocket, {
                            "status": "info",
                            "message": f"临时文件已删除: {output_file}"
                        }))
                except Exception as e:
                    asyncio.run(send_to_client(websocket, {
                        "status": "warning",
                        "message": f"无法删除临时文件: {str(e)}"
                    }))
                    
                return result_json
                
            except ImportError:
                asyncio.run(send_to_client(websocket, {
                    "status": "error",
                    "message": "无法导入dolphin_asr模块，无法进行语音识别"
                }))
            except Exception as e:
                asyncio.run(send_to_client(websocket, {
                    "status": "error",
                    "message": f"语音识别出错: {str(e)}"
                }))
        else:
            asyncio.run(send_to_client(websocket, {
                "status": "no_valid_audio",
                "message": "未检测到有效语音，无法识别",
                "text": ""
            }))
            
    except Exception as e:
        asyncio.run(send_to_client(websocket, {
            "status": "error",
            "message": f"录音过程出错: {str(e)}"
        }))

async def send_to_client(websocket, message):
    """
    安全地向WebSocket客户端发送消息
    """
    try:
        await websocket.send_json(message)
    except Exception as e:
        print(f"向客户端发送消息失败: {str(e)}")

def cleanup_client(client_id):
    """
    清理客户端相关资源
    """
    if client_id in active_connections:
        del active_connections[client_id]
    
    recording_status[client_id] = False
    
    if client_id in recording_threads:
        # 线程会自动退出，因为状态已设置为False
        del recording_threads[client_id]
    
    # 删除临时录音文件
    temp_file = os.path.join(TEMP_DIR, f"temp_recording_{client_id}.wav")
    if os.path.exists(temp_file):
        try:
            os.remove(temp_file)
            print(f"已删除临时文件: {temp_file}")
        except Exception as e:
            print(f"无法删除临时文件 {temp_file}: {str(e)}") 