import logging
import os

import sherpa_onnx
from get_config import config

BASE_DIR = os.path.dirname(os.path.abspath(__file__))
recognition_config = config["recognition"]
audio_config = config["audio"]
MODEL=recognition_config["model"]
DECODING_METHOD=recognition_config["decoding_method"]
MODEL_DIR = os.path.join(BASE_DIR, "sherpa_onnx",MODEL)
KWS_DIR = os.path.join(BASE_DIR, "sherpa_onnx","kws")
VITS_DIR = os.path.join(BASE_DIR, "sherpa_onnx","vits")
HOT_DIR = os.path.join(BASE_DIR, "sherpa_onnx","hot")
TARGET_RATE = audio_config["target_rate"]

# 其他配置
num_threads=recognition_config['num_threads']
feature_dim=recognition_config['feature_dim']
max_active_paths=recognition_config['max_active_paths']
rule1_min_trailing_silence=recognition_config['rule1_min_trailing_silence']
rule2_min_trailing_silence=recognition_config['rule2_min_trailing_silence']
rule3_min_utterance_length=recognition_config['rule3_min_utterance_length']
encoder=recognition_config['encoder']
decoder=recognition_config['decoder']
joiner=recognition_config['joiner']
provider=recognition_config['provider']
hotwords_score=recognition_config['hotwords_score']
blank_penalty=recognition_config['blank_penalty']
keywords_score=recognition_config['keywords_score']
keywords_threshold=recognition_config['keywords_threshold']
num_trailing_blanks=recognition_config['num_trailing_blanks']

def create_recognizer():
    print("create_recognizer")
    recognizer = sherpa_onnx.OnlineRecognizer.from_transducer(
        tokens=os.path.join(MODEL_DIR, "data/lang_char/tokens.txt"),
        encoder=os.path.join(MODEL_DIR, "exp", encoder),
        decoder=os.path.join(MODEL_DIR, "exp", decoder),
        joiner=os.path.join(MODEL_DIR, "exp", joiner),
        num_threads=num_threads,
        sample_rate=TARGET_RATE,
        feature_dim=80,
        max_active_paths=max_active_paths,
        enable_endpoint_detection=True,
        rule1_min_trailing_silence=rule1_min_trailing_silence,
        rule2_min_trailing_silence=rule2_min_trailing_silence,
        rule3_min_utterance_length=rule3_min_utterance_length,  # it essentially disables this rule
        decoding_method=DECODING_METHOD,
        provider=provider,
        hotwords_file=os.path.join(HOT_DIR, "hotwords_file.text"),
        hotwords_score=hotwords_score,
        blank_penalty=blank_penalty,
    )
    return recognizer

def create_keyword_spotter():
    print("keyword_recognizer")
    keyword_spotter = sherpa_onnx.KeywordSpotter(
        sample_rate=TARGET_RATE,
        tokens=os.path.join(KWS_DIR, "tokens.txt"),
        encoder=os.path.join(KWS_DIR, "encoder-epoch-12-avg-2-chunk-16-left-64.onnx"),
        decoder=os.path.join(KWS_DIR, "decoder-epoch-12-avg-2-chunk-16-left-64.onnx"),
        joiner=os.path.join(KWS_DIR, "joiner-epoch-12-avg-2-chunk-16-left-64.onnx"),
        num_threads=num_threads,
        max_active_paths=max_active_paths,
        keywords_file=os.path.join(KWS_DIR, "keywords.txt"),
        keywords_score=keywords_score,
        keywords_threshold=keywords_threshold,
        num_trailing_blanks=num_trailing_blanks,
        provider=provider,
    )
    return keyword_spotter

def create_tts():
    print("create tts")
    tts_config = sherpa_onnx.OfflineTtsConfig(
        model=sherpa_onnx.OfflineTtsModelConfig(
            vits=sherpa_onnx.OfflineTtsVitsModelConfig(
                model=os.path.join(VITS_DIR,"model.onnx"),
                lexicon=os.path.join(VITS_DIR,"lexicon.txt"),
                tokens=os.path.join(VITS_DIR,"tokens.txt")
            ),
            provider='cuda',
            num_threads=4,
        ),
        rule_fsts=os.path.join(VITS_DIR,"phone.fst")+','+os.path.join(VITS_DIR,"date.fst")+","+os.path.join(VITS_DIR,"number.fst"),
        max_num_sentences=1
    )
    if not tts_config.validate():
        raise ValueError("Please check your config")
    logging.info("Loading model ...")
    tts = sherpa_onnx.OfflineTts(tts_config)
    logging.info("Loading model done.")
    return tts

import queue
import threading
import sounddevice as sd
import numpy as np

buffer = queue.Queue()
event = threading.Event()
#started is set to True once generated_audio_callback is called.
started = False

#stopped is set to True once all the text has been processed
stopped = False

#killed is set to True once ctrl + C is pressed
killed = False
def generated_audio_callback(samples: np.ndarray, progress: float):
    buffer.put(samples)
    global started
    if started is False:
        logging.info("Start playing ...")
    started = True
    if killed:
        return 0
    return 1
def play_audio_callback(
    outdata: np.ndarray, frames: int, time, status: sd.CallbackFlags
):
    if killed or (started and buffer.empty() and stopped):
        event.set()

    # outdata is of shape (frames, num_channels)
    if buffer.empty():
        outdata.fill(0)
        return

    n = 0
    while n < frames and not buffer.empty():
        remaining = frames - n
        k = buffer.queue[0].shape[0]

        if remaining <= k:
            outdata[n:, 0] = buffer.queue[0][:remaining]
            buffer.queue[0] = buffer.queue[0][remaining:]
            n = frames
            if buffer.queue[0].shape[0] == 0:
                buffer.get()

            break

        outdata[n : n + k, 0] = buffer.get()
        n += k

    if n < frames:
        outdata[n:, 0] = 0
def play_audio():
    devices = sd.query_devices()
    default_output_device_idx = sd.default.device[1]
    print(
        f'Use default output device: {devices[default_output_device_idx]["name"]}'
    )
    # with 确保 event.wait() 结束后 关闭流
    with sd.OutputStream(
            channels=1,
            callback=play_audio_callback,
            dtype="float32",
            samplerate=8000,
            blocksize=1024,
    ):
        event.wait()

# if __name__ == '__main__':
#     play_back_thread = threading.Thread(target=play_audio)
#     play_back_thread.start()
#     tts=create_tts()
#     tts.generate("测试生成语音",sid=10,speed=1.0,callback=generated_audio_callback)
#     stopped = True