from faster_whisper import WhisperModel
import torch, numpy as np


# 识别模型
class SpeechRecognizer:
    def __init__(self, model_size="medium"):
        device = "cuda" if torch.cuda.is_available() else "cpu"
        self.model = WhisperModel(model_size, device=device, compute_type="int8")

    def transcribe(self, audio: np.ndarray) -> str:
        if np.max(np.abs(audio)) < 0.001:
            return ""
        segments, _ = self.model.transcribe(audio, language="en", beam_size=5)
        return " ".join([seg.text.strip() for seg in segments])
