#!/usr/bin/env python3

import sys
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple

import numpy as np
import sherpa_onnx
import soundfile as sf

try:
    import sounddevice as sd
except ImportError:
    print("Please install sounddevice first. You can use")
    print()
    print("  pip install sounddevice")
    print()
    print("to install it")
    sys.exit(-1)

# 采样率
g_sample_rate = 16000
# 阈值，用于判断当前说话人与已有的说话人之间的相似度
threshold = 0.6

# 断言文件存在
def assert_file_exists(filename: str):
    assert Path(filename).is_file(), (
        f"{filename} does not exist!\n"
        "Please refer to "
        "https://k2-fsa.github.io/sherpa/onnx/pretrained_models/index.html to download it"
    )



# 从本地文件加载ASR模型，使用fire-red-asr模型的int8量化版本的onnx
def create_recognizer() -> sherpa_onnx.OfflineRecognizer:
    encoder = "./models/sherpa-onnx-fire-red-asr-large-zh_en-2025-02-16/encoder.int8.onnx"
    decoder = "./models/sherpa-onnx-fire-red-asr-large-zh_en-2025-02-16/decoder.int8.onnx"
    tokens = "./models/sherpa-onnx-fire-red-asr-large-zh_en-2025-02-16/tokens.txt"
    test_wav = "./models/sherpa-onnx-fire-red-asr-large-zh_en-2025-02-16/test_wavs/0.wav"

    if (
        not Path(encoder).is_file()
        or not Path(decoder).is_file()
        or not Path(test_wav).is_file()
    ):
        raise ValueError(
            """Please download model files from
            https://github.com/k2-fsa/sherpa-onnx/releases/tag/asr-models
            """
        )
    return (
        sherpa_onnx.OfflineRecognizer.from_fire_red_asr(
            encoder=encoder,
            decoder=decoder,
            num_threads=6,
            tokens=tokens,
            debug=True,
        )
    )


def load_speaker_embedding_model():
    num_threads = 2
    debug = False
    provider = "cpu"
    speaker_embedding_model_path = "./models/wespeaker_zh_cnceleb_resnet34.onnx"
    
    config = sherpa_onnx.SpeakerEmbeddingExtractorConfig(
        model=speaker_embedding_model_path,
        num_threads=num_threads,
        debug=debug,
        provider=provider,
    )
    if not config.validate():
        raise ValueError(f"Invalid config. {config}")
    extractor = sherpa_onnx.SpeakerEmbeddingExtractor(config)
    return extractor


def load_speaker_file() -> Dict[str, List[str]]:
    speaker_file = "./speakers/speakers.txt"
    if not Path(speaker_file).is_file():
        raise ValueError(f"--speaker-file {speaker_file} does not exist")

    ans = defaultdict(list)
    with open(speaker_file) as f:
        for line in f:
            line = line.strip()
            if not line:
                continue

            fields = line.split()
            if len(fields) != 2:
                raise ValueError(f"Invalid line: {line}. Fields: {fields}")

            speaker_name, filename = fields
            ans[speaker_name].append(filename)
    return ans


def load_audio(filename: str) -> Tuple[np.ndarray, int]:
    data, sample_rate = sf.read(
        filename,
        always_2d=True,
        dtype="float32",
    )
    data = data[:, 0]  # use only the first channel
    samples = np.ascontiguousarray(data)
    return samples, sample_rate


def compute_speaker_embedding(
    filenames: List[str],
    extractor: sherpa_onnx.SpeakerEmbeddingExtractor,
) -> np.ndarray:
    assert len(filenames) > 0, "filenames is empty"

    ans = None
    for filename in filenames:
        print(f"processing {filename}")
        samples, sample_rate = load_audio(filename)
        stream = extractor.create_stream()
        stream.accept_waveform(sample_rate=sample_rate, waveform=samples)
        stream.input_finished()

        assert extractor.is_ready(stream)
        embedding = extractor.compute(stream)
        embedding = np.array(embedding)
        if ans is None:
            ans = embedding
        else:
            ans += embedding

    return ans / len(filenames)


def create_puncutation():
    model = "./models/sherpa-onnx-punct-ct-transformer-zh-en-vocab272727-2024-04-12/model.onnx"
    if not Path(model).is_file():
        raise ValueError(f"{model} does not exist")
    config = sherpa_onnx.OfflinePunctuationConfig(
        model=sherpa_onnx.OfflinePunctuationModelConfig(ct_transformer=model),
    )

    punct = sherpa_onnx.OfflinePunctuation(config)
    return punct

# 使用麦克风进行识别
def mic_asr():
    vad_model_path = "./models/silero_vad.onnx"
    

    recognizer = create_recognizer()
    extractor = load_speaker_embedding_model()
    speaker_file = load_speaker_file()
    punct = create_puncutation()

    manager = sherpa_onnx.SpeakerEmbeddingManager(extractor.dim)
    for name, filename_list in speaker_file.items():
        embedding = compute_speaker_embedding(
            filenames=filename_list,
            extractor=extractor,
        )
        status = manager.add(name, embedding)
        if not status:
            raise RuntimeError(f"Failed to register speaker {name}")

    vad_config = sherpa_onnx.VadModelConfig()
    vad_config.silero_vad.model = vad_model_path
    vad_config.silero_vad.min_silence_duration = 0.25
    vad_config.silero_vad.min_speech_duration = 0.25
    vad_config.sample_rate = g_sample_rate
    if not vad_config.validate():
        raise ValueError("Errors in vad config")

    window_size = vad_config.silero_vad.window_size

    vad = sherpa_onnx.VoiceActivityDetector(vad_config, buffer_size_in_seconds=100)

    samples_per_read = int(0.1 * g_sample_rate)  # 0.1 second = 100 ms

    devices = sd.query_devices()
    if len(devices) == 0:
        print("No microphone devices found")
        sys.exit(0)

    print(devices)
    default_input_device_idx = sd.default.device[0]
    print(f'Use default device: {devices[default_input_device_idx]["name"]}')

    print("Started! Please speak")

    idx = 0
    buffer = []
    with sd.InputStream(channels=1, dtype="float32", samplerate=g_sample_rate) as s:
        while True:
            samples, _ = s.read(samples_per_read)  # a blocking read
            samples = samples.reshape(-1)
            buffer = np.concatenate([buffer, samples])
            while len(buffer) > window_size:
                vad.accept_waveform(buffer[:window_size])
                buffer = buffer[window_size:]

            while not vad.empty():
                if len(vad.front.samples) < 0.5 * g_sample_rate:
                    # this segment is too short, skip it
                    vad.pop()
                    continue
                stream = extractor.create_stream()
                stream.accept_waveform(
                    sample_rate=g_sample_rate, waveform=vad.front.samples
                )
                stream.input_finished()

                embedding = extractor.compute(stream)
                embedding = np.array(embedding)
                name = manager.search(embedding, threshold=threshold)
                if not name:
                    name = "unknown"

                # Now for non-streaming ASR
                asr_stream = recognizer.create_stream()
                asr_stream.accept_waveform(
                    sample_rate=g_sample_rate, waveform=vad.front.samples
                )
                recognizer.decode_stream(asr_stream)
                text = asr_stream.result.text
                text = punct.add_punctuation(text)

                vad.pop()

                print(f"\r{idx}-{name}: {text}")
                idx += 1


# 使用音频文件进行识别
def file_asr(wav_file_name) -> None:
    vad_model_path = "./models/silero_vad.onnx"

    recognizer = create_recognizer()
    extractor = load_speaker_embedding_model()
    speaker_file = load_speaker_file()
    punct = create_puncutation()

    manager = sherpa_onnx.SpeakerEmbeddingManager(extractor.dim)
    for name, filename_list in speaker_file.items():
        embedding = compute_speaker_embedding(
            filenames=filename_list,
            extractor=extractor,
        )
        status = manager.add(name, embedding)
        if not status:
            raise RuntimeError(f"Failed to register speaker {name}")

    vad_config = sherpa_onnx.VadModelConfig()
    vad_config.silero_vad.model = vad_model_path
    vad_config.silero_vad.min_silence_duration = 0.25
    vad_config.silero_vad.min_speech_duration = 0.25
    vad_config.sample_rate = g_sample_rate
    if not vad_config.validate():
        raise ValueError("Errors in vad config")

    window_size = vad_config.silero_vad.window_size

    vad = sherpa_onnx.VoiceActivityDetector(vad_config, buffer_size_in_seconds=100)

    # 从音频文件读取数据
    samples, sample_rate = load_audio(wav_file_name)
    if sample_rate != g_sample_rate:
        raise ValueError(f"Sample rate {sample_rate} != {g_sample_rate}")

    buffer = []
    idx = 0
    
    # 将音频数据分块处理，模拟实时输入
    samples_per_window = window_size
    total_samples = len(samples)
    pos = 0
    
    print("Start ASR!")
    
    while pos < total_samples:
        # 读取当前窗口的样本
        end_pos = min(pos + samples_per_window, total_samples)
        current_samples = samples[pos:end_pos]
        pos = end_pos
        
        # 将样本送入VAD处理
        vad.accept_waveform(current_samples)
        
        # 处理检测到的语音段
        while not vad.empty():
            if len(vad.front.samples) < 0.5 * g_sample_rate:
                # 语音段太短，跳过
                vad.pop()
                continue
                
            # 提取说话人特征
            stream = extractor.create_stream()
            stream.accept_waveform(
                sample_rate=g_sample_rate, waveform=vad.front.samples
            )
            stream.input_finished()

            embedding = extractor.compute(stream)
            embedding = np.array(embedding)
            name = manager.search(embedding, threshold=threshold)
            if not name:
                name = "unknown"

            # 进行ASR识别
            asr_stream = recognizer.create_stream()
            asr_stream.accept_waveform(
                sample_rate=g_sample_rate, waveform=vad.front.samples
            )
            recognizer.decode_stream(asr_stream)
            text = asr_stream.result.text
            text = punct.add_punctuation(text)
            
            vad.pop()

            print(f"\r{idx}-{name}: {text}")
            idx += 1



if __name__ == "__main__":
    mode_a = "mic_asr"
    mode_b = "file_asr"
    mode_test = "file_asr"
    if mode_test == mode_a:
        try:
            mic_asr()
        except KeyboardInterrupt:
            print("\nCaught Ctrl + C. Exiting")
    elif mode_test == mode_b:
        try:
            wav_file_name = "./wavs/input.wav"
            file_asr(wav_file_name)
        except KeyboardInterrupt:
            print("\nCaught Ctrl + C. Exiting")
    
