import { useEffect, useRef, useState } from "react";

export function useSpeechSegments({
  threshold = 20, // 音量阈值，数值越小越灵敏
  silenceTime = 800, // 静音判定时间(ms)，超过就认为一句话结束
} = {}) {
  const [segments, setSegments] = useState<Blob[]>([]);
  const isSpeakingRef = useRef(false);
  const silenceStartRef = useRef(0);
  const mediaRecorderRef = useRef<MediaRecorder | null>(null);
  const chunksRef = useRef<BlobPart[]>([]);

  useEffect(() => {
    let analyser: AnalyserNode;
    let dataArray: Uint8Array;
    let animationId: number;

    async function init() {
      const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
      const ctx = new AudioContext();
      const source = ctx.createMediaStreamSource(stream);

      analyser = ctx.createAnalyser();
      analyser.fftSize = 512;
      dataArray = new Uint8Array(analyser.fftSize);
      source.connect(analyser);

      const mediaRecorder = new MediaRecorder(stream);
      mediaRecorderRef.current = mediaRecorder;

      mediaRecorder.ondataavailable = (e) => {
        chunksRef.current.push(e.data);
      };

      mediaRecorder.onstop = () => {
        const blob = new Blob(chunksRef.current, { type: "audio/webm" });
        setSegments((prev) => [...prev, blob]);
        chunksRef.current = [];
      };

      function detect() {
        analyser.getByteTimeDomainData(dataArray);
        let sum = 0;
        for (let i = 0; i < dataArray.length; i++) {
          const v = dataArray[i] - 128;
          sum += v * v;
        }
        const rms = Math.sqrt(sum / dataArray.length);

        if (rms > threshold) {
          // 语音开始
          if (!isSpeakingRef.current) {
            isSpeakingRef.current = true;
            chunksRef.current = [];
            mediaRecorder.start();
          }
          silenceStartRef.current = Date.now();
        } else if (isSpeakingRef.current) {
          // 检查是否静音够久
          if (Date.now() - silenceStartRef.current > silenceTime) {
            isSpeakingRef.current = false;
            mediaRecorder.stop();
          }
        }

        animationId = requestAnimationFrame(detect);
      }

      detect();
    }

    init();

    return () => {
      if (animationId) cancelAnimationFrame(animationId);
      mediaRecorderRef.current?.stop();
    };
  }, [threshold, silenceTime]);

  return { segments };
}
