# stt_listener.py
# -------------
# Author: Zako888y
#
# This file is dedicated to the public domain under the Creative Commons CC0 1.0 Universal (CC0 1.0) Public Domain Dedication.
# You may use, copy, modify, distribute, and/or sell this code for any purpose, without restriction. No warranty is provided. The author waives all rights and claims to this work worldwide.
# To view a copy of this dedication, visit https://creativecommons.org/publicdomain/zero/1.0/

from collections.abc import Callable

import whisper
import speech_recognition as sr

from segmenter import SegmentListener, SpeechSegmenter

__whisper_model: whisper.Whisper | None = None

model_level = "base"  # 可选: "tiny", "base", "small", "medium", "large"


def load_whisper_model():
    global __whisper_model
    if __whisper_model is None:
        device = "cpu"
        __whisper_model = whisper.load_model(model_level)
        print(f"Whisper 模型加载完成，使用设备: {device}")
    return __whisper_model


def stt_whisper(audio_data, samplerate=16000) -> str:
    """
    直接用numpy数组进行识别，自动重采样到16000Hz
    """
    if __whisper_model is None:
        load_whisper_model()
    model = __whisper_model
    assert model is not None
    # whisper要求float32, 单声道, 16000Hz
    import numpy as np
    import librosa

    if not isinstance(audio_data, np.ndarray):
        audio_data = np.array(audio_data, dtype=np.float32)
    if audio_data.ndim > 1:
        audio_data = librosa.to_mono(audio_data.T)
    if samplerate != 16000:
        audio_data = librosa.resample(audio_data, orig_sr=samplerate, target_sr=16000)
    audio_data = audio_data.astype(np.float32)
    result = model.transcribe(
        audio_data,
        language="zh",
        fp16=False,
    )
    text = result["text"]
    assert isinstance(text, str)
    return text


# speech_recognition 方法已废弃

__speech_recognition_recognizer: sr.Recognizer | None = None


def stt_speech_recognition(fp: str) -> str:
    global __speech_recognition_recognizer
    if __speech_recognition_recognizer is None:
        __speech_recognition_recognizer = sr.Recognizer()
        print("SpeechRecognition 识别器加载完成")
    recognizer = __speech_recognition_recognizer
    with sr.AudioFile(fp) as source:
        audio = recognizer.record(source)
    text = recognizer.recognize_sphinx(audio, language="zh-CN")  # type: ignore
    return text


type OnTextListener = Callable[[str, int], None]


class STTListener(SegmentListener):

    def __init__(self, on_text: OnTextListener):
        self.counter = 0
        self.recognizer = sr.Recognizer()
        self.on_text = on_text  # 识别结果回调函数

    def on_segment(self, audio_data, samplerate):
        text = ""
        try:
            # 直接用 numpy 数组识别
            text = stt_whisper(audio_data, samplerate)
            # 自动过滤空识别结果
            if text is not None:
                text = text.strip()
            if not text:
                return
        except Exception as e:
            text = f"[识别失败]: {e}"
        if self.on_text:
            self.on_text(text, self.counter)
        self.counter += 1


# 用法示例：
if __name__ == "__main__":

    load_whisper_model()

    def print_on_text(text, idx):
        print(f"识别结果 {idx}: {text}")

    listener = STTListener(on_text=print_on_text)
    segmenter = SpeechSegmenter(listener=listener)
    segmenter.start()

    print("语音识别服务已启动，按 Enter 键停止...")
    input()
