import { useState } from 'react';

const constraintsDefault: MediaStreamConstraints = {
  video: false,
  audio: {
    channelCount: 1,
    noiseSuppression: true,
  },
};


export const useMedia = (constraints: MediaStreamConstraints = constraintsDefault) => {
  const [stream, setStream] = useState<MediaStream | null>(null);
  const [mediaError, setMediaError] = useState<any>(null);
  const [microphoneVolume, setMicrophoneVolume] = useState(0);
  const [audioContext] = useState<AudioContext>(
    () => new AudioContext()
  );

  const onCloseStream = async () => {
    if (stream && stream.getTracks()) {
      stream.getTracks().forEach((track: MediaStreamTrack) => {
        track.stop();
      });
    }
    setStream(null);
    await audioContext?.close();
  }

  const getMicrophoneVolume =  async (mediaStream: MediaStream) => {
    // addModule()  相对于根目录的路径
    audioContext?.audioWorklet.addModule('/src/useMedia/vumeterProcessor.js').then(() => {
      const microphone: MediaStreamAudioSourceNode = audioContext?.createMediaStreamSource(mediaStream);
      const node = new AudioWorkletNode(audioContext, 'vumeter');
      // mediaStream 连接到 audioContext 之后，播放状态会被暂停，需要手动resume
      if(node.context.state ==='suspended') {
        audioContext.resume();
      }
      node.port.onmessage = event => {
        let volume = 0;
        if (event.data.volume) {
          volume = Math.round(event.data.volume);
        }
        setMicrophoneVolume(volume);
      }
      microphone?.connect(node).connect(audioContext.destination);
      
    }).catch((error) => {
      console.log(error); 
    })
  }
  const onStartStream = () => {
    navigator.mediaDevices.getUserMedia(constraints).then(async mediaStream => {
        setStream(mediaStream);
        getMicrophoneVolume(mediaStream);
      }).catch(error => {
        setMediaError(error);
      });
  }
  return {
    onStartStream,
    stream,
    mediaError,
    microphoneVolume,
    onCloseStream,
  };
}
