import os
import tempfile
import numpy as np
import sherpa_onnx
from fastapi import APIRouter, WebSocket, WebSocketDisconnect
from fastapi.websockets import WebSocketState
from logger import logging
import json

KEYWORDS = ["再见","你好小坎"]

router = APIRouter(prefix="/api")


class Detector:
    def __init__(self):

        # 生成热词拼音
        keywords_pinyin = sherpa_onnx.utils.text2token(KEYWORDS, "./models/tokens.txt", "ppinyin")
        logging.debug(keywords_pinyin)

        self.keywords_content = [' '.join(pinyin) for pinyin in keywords_pinyin]
        self.py_keywords = [''.join(pinyin) for pinyin in keywords_pinyin]

        logging.debug(f"生成的关键词拼音列表：{self.keywords_content}")
       
        # 创建关键词文件
        self.keywords_file = self._create_keywords_file(self.keywords_content)
        
        # 初始化识别器
        self.recognizer = sherpa_onnx.KeywordSpotter(
            # 不知为何不可以使用全局变量
            tokens="./models/tokens.txt",
            encoder="./models/encoder.onnx",
            decoder="./models/decoder.onnx",
            joiner="./models/joiner.onnx",
            keywords_file=self.keywords_file,
            # 不可以太高不然直接不可用了
            keywords_score=1,
            # 关键词的触发阈值
            keywords_threshold=0.05,
            # 关键词后面应跟随的尾随空白数量。当您的关键词之间存在重叠的标记时，请将其设置为较大的值（例如 8）
            num_trailing_blanks=1,
            # 计算线程数
            num_threads=8,
            # 模型训练的采样率
            sample_rate=16000,
        )

    def _create_keywords_file(self, keywords):
        """创建临时关键词文件"""
        fd, path = tempfile.mkstemp(suffix='.txt')
        with os.fdopen(fd, 'w', encoding='utf-8') as f:
            for i, word in enumerate(keywords):
                f.write(f"{word} @{KEYWORDS[i]}\n")
        logging.debug(f"生成关键词文件：{path}")
        return path

detector = Detector()

@router.websocket("/ws/keyword")
async def handle_websocket(websocket: WebSocket):
    await websocket.accept()
    stream = detector.recognizer.create_stream()
    logging.debug("连接建立")
    
    try:
        while True:
            data = await websocket.receive_bytes()
            
            samples = np.frombuffer(data, dtype=np.float32)

            # 处理音频
            stream.accept_waveform(16000, samples)
            
            # 每次处理都检查端点
            while detector.recognizer.is_ready(stream):
                detector.recognizer.decode_stream(stream)
                result = detector.recognizer.get_result(stream)
                if result != "":
                    logging.info(f"触发关键词: [{result}]")
                    response = {
                        "current": result,
                        "keywords": KEYWORDS
                    }
                    await websocket.send_text(json.dumps(response))

    except WebSocketDisconnect:
        logging.info("客户端已正常断开连接")
    except Exception as e:
        logging.error(f"连接异常：{str(e)}", exc_info=True)
    finally:
        # 清理资源
        if detector.recognizer:
            detector.recognizer.reset_stream(stream)
        # 在未断开时关闭
        if not websocket.client_state == WebSocketState.DISCONNECTED:
            try:
                await websocket.close()
            except RuntimeError:
                # 忽略重复关闭时的错误
                pass


# 退出时清理临时文件
import atexit
@atexit.register
def cleanup():
    if os.path.exists(detector.keywords_file):
        os.remove(detector.keywords_file)
        print(f"已清理关键词文件：{detector.keywords_file}")
