# segmenter.py
# -------------
# Author: Zako888y
#
# This file is dedicated to the public domain under the Creative Commons CC0 1.0 Universal (CC0 1.0) Public Domain Dedication.
# You may use, copy, modify, distribute, and/or sell this code for any purpose, without restriction. No warranty is provided. The author waives all rights and claims to this work worldwide.
# To view a copy of this dedication, visit https://creativecommons.org/publicdomain/zero/1.0/
#
# Portions of this code reference or adapt logic from gui_v1.py and related files from the RVC project:
# https://github.com/RVC-Project/Retrieval-based-Voice-Conversion-WebUI/
# The original RVC WebUI project is licensed under the MIT License by liujing04, 源文雨, Ftps and others. See LICENSE in the original repository for details.

import numpy as np
import librosa
import sounddevice as sd
from abc import ABC, abstractmethod


class SegmentListener(ABC):

    @abstractmethod
    def on_segment(self, audio_data: np.ndarray, samplerate: int):
        """
        处理每句话的音频数据（分割结果）
        :param audio_data: 该句话的音频数据（numpy数组，已静音分割）
        :param samplerate: 采样率
        """
        raise NotImplementedError()


class SpeechSegmenter:
    def __init__(
        self,
        listener: SegmentListener,
        samplerate=16000,
        channels=1,
        block_time=0.25,
        crossfade_time=0.05,
        extra_time=2.5,
        threhold=-60,
        rms_mix_rate=0.0,
    ):
        self.listener = listener
        self.samplerate = samplerate
        self.channels = channels
        self.block_time = block_time
        self.crossfade_time = crossfade_time
        self.extra_time = extra_time
        self.threhold = threhold
        self.rms_mix_rate = rms_mix_rate
        self.zc = samplerate // 100
        self.block_frame = int(np.round(block_time * samplerate / self.zc)) * self.zc
        self.crossfade_frame = int(np.round(crossfade_time * samplerate / self.zc)) * self.zc
        self.extra_frame = int(np.round(extra_time * samplerate / self.zc)) * self.zc
        self.rms_buffer = np.zeros(4 * self.zc, dtype="float32")
        self.input_wav = np.zeros(
            self.extra_frame + self.crossfade_frame + self.block_frame, dtype="float32"
        )
        self.output_buffer = np.zeros_like(self.input_wav)
        self.segment_buffer = []
        self.speaking = False
        self.stream = None
        self.fade_in_window = np.sin(0.5 * np.pi * np.linspace(0.0, 1.0, self.crossfade_frame)) ** 2
        self.fade_out_window = 1 - self.fade_in_window

    def start(self):
        self.stream = sd.InputStream(
            channels=self.channels,
            samplerate=self.samplerate,
            blocksize=self.block_frame,
            dtype="float32",
            callback=self.audio_callback,
        )
        self.stream.start()

    def stop(self):
        if self.stream is not None:
            self.stream.stop()
            self.stream.close()
            self.stream = None

    def audio_callback(self, indata, frames, time_info, status):
        # 1. 转为单声道
        indata = librosa.to_mono(indata.T)
        # 2. 输入音频缓存（用于后续推理/降噪）
        self.input_wav[:-indata.shape[0]] = self.input_wav[indata.shape[0]:]
        self.input_wav[-indata.shape[0]:] = indata
        # 3. 响度检测与静音分割（端点检测）
        indata_rms = np.append(self.rms_buffer, indata)
        rms = librosa.feature.rms(
            y=indata_rms, frame_length=4 * self.zc, hop_length=self.zc
        )[:, 2:]
        self.rms_buffer[:] = indata_rms[-4 * self.zc :]
        indata_proc = indata_rms[2 * self.zc - self.zc // 2 :]
        db_threhold = librosa.amplitude_to_db(rms, ref=1.0)[0] < self.threhold
        for i in range(db_threhold.shape[0]):
            if db_threhold[i]:
                indata_proc[i * self.zc : (i + 1) * self.zc] = 0
        indata_proc = indata_proc[self.zc // 2 :]
        # 4. 语句分割逻辑（带淡入淡出）
        if np.max(np.abs(indata_proc)) > 1e-4:
            self.segment_buffer.append(indata_proc)
            self.speaking = True
        else:
            if self.speaking and len(self.segment_buffer) > 0:
                segment = np.concatenate(self.segment_buffer)
                # 淡入淡出处理，减少切割杂音
                if segment.shape[0] > 2 * self.crossfade_frame:
                    segment[:self.crossfade_frame] *= self.fade_in_window
                    segment[-self.crossfade_frame:] *= self.fade_out_window
                self.listener.on_segment(segment, self.samplerate)
                self.segment_buffer = []
            self.speaking = False


if __name__ == "__main__":
    # 用法示例：
    class MyListener(SegmentListener):
        def on_segment(self, audio_data, samplerate):
            print(f"收到一个语音片段，长度: {len(audio_data)}，采样率: {samplerate}")

    # 创建并启动语音分割流
    segmenter = SpeechSegmenter(listener=MyListener())
    segmenter.start()
    print("语音分割服务已启动，按 Enter 键停止...")
    input()
