import { useEffect, useState } from 'react'
import VideoScreen from '@renderer/components/VideoScreen'
import { DragView } from '@renderer/components/Utils'
import { getInitStream } from '@renderer/index'
function ViewRecordingVideo(): React.JSX.Element {
  // 视频流
  const [stream, setStream] = useState<MediaStream | undefined>()
  // 录制器
  const [mediaRecorder, setMediaRecorder] = useState<MediaRecorder | undefined>()
  // 视频收集
  const [recordedChunks, setRecordedChunks] = useState<Blob[]>([])
  const [status, setStatus] = useState<MediaRecorder['state'] | 'done'>('inactive')
  const [videoTime, setVideoTime] = useState<string>('00:00:00')
  const [saveMessage, setSaveMessage] = useState<{
    success: boolean
    message: string
    path?: string
  }>({
    success: false,
    message: ''
  })
  const handleClose = (e: React.MouseEvent): void => {
    console.log('close')
    e.preventDefault()
    resetData()
    window.electron.ipcRenderer.send('close:win')
  }

  const handleStart = async (e: React.MouseEvent<HTMLDivElement, MouseEvent>): Promise<void> => {
    e.preventDefault()
    if (stream && !mediaRecorder) {
      setMediaRecorder(
        new MediaRecorder(stream, {
          mimeType: 'video/webm; codecs=vp9',
          videoBitsPerSecond: 2500000 // 2.5 Mbps
        })
      )
      return
    }
    console.log('mediaRecorder.state', mediaRecorder!.state)
    if (mediaRecorder && mediaRecorder.state === 'recording') {
      mediaRecorder.pause()
      setStatus(mediaRecorder.state)
      return
    }

    if (mediaRecorder && mediaRecorder.state === 'paused') {
      mediaRecorder.resume()
      setStatus(mediaRecorder.state)
      return
    }
  }

  const handleSave = (e: React.MouseEvent<HTMLDivElement, MouseEvent>): void => {
    console.log('handleSave')
    e.preventDefault()
    if (mediaRecorder && mediaRecorder.state !== 'inactive') {
      mediaRecorder.stop()
      setStatus(mediaRecorder.state)
    }
  }
  const resetData = (): void => {
    setMediaRecorder(undefined)
    setRecordedChunks([])
    if (stream) {
      stream.getTracks().forEach((track) => track.stop())
    }
    setStream(undefined)
  }
  // 获取screen视频流
  useEffect(() => {
    init()
  }, [])

  useEffect(() => {
    if (mediaRecorder) {
      const startTime = Date.now()
      mediaRecorder.ondataavailable = (e) => {
        if (e.data.size > 0) {
          const currentTime = Date.now()
          const elapsedSeconds = Math.round((currentTime - startTime) / 1000)
          // console.log(elapsedSeconds)
          const hrs = Math.floor(elapsedSeconds / 3600)
          const mins = Math.floor((elapsedSeconds % 3600) / 60)
          const secs = elapsedSeconds % 60
          setVideoTime(
            [
              hrs.toString().padStart(2, '0'),
              mins.toString().padStart(2, '0'),
              secs.toString().padStart(2, '0')
            ].join(':')
          )
          recordedChunks.push(e.data)
        }
      }
      // 录制视频结束
      mediaRecorder.onstop = async () => {
        // 创建 Blob
        const blob = new Blob(recordedChunks, {
          type: 'video/webm'
        })
        // 转换为 ArrayBuffer
        const arrayBuffer = await blob.arrayBuffer()
        // 发送给主进程
        // 清空数据
        resetData()
        const res = await window.electron.ipcRenderer.invoke('save:recording:video', arrayBuffer)
        console.log('sadasd', res)
        setSaveMessage(res)
        setStatus('done')
      }
      mediaRecorder.start(1000)
      setStatus(mediaRecorder.state)
    }
  }, [mediaRecorder])
  const init = async (): Promise<void> => {
    // 解析屏幕源
    const sourcejson = await window.electron.ipcRenderer.invoke('get:recording:screen:source')
    const screenSource = JSON.parse(sourcejson)
    console.log('screenSource', screenSource)
    let stream: MediaStream | undefined
    if (screenSource.type === 'screen') {
      stream = await getInitStream(screenSource.devices, {
        mandatory: {
          chromeMediaSource: 'desktop',
          sampleRate: 44100, // 限制采样率
          channelCount: 2, // 立体声
          bitrate: 128000, // 128 Kbps
          latency: 0.01 // 低延迟
        }
      })
    } else {
      console.log('录像')
      const { deviceId } = screenSource.devices
      // 获取摄像头流
      stream = await navigator.mediaDevices.getUserMedia({
        video: {
          deviceId, // 使用第一个真实摄像头
          width: { ideal: 1280 },
          height: { ideal: 720 }
        }
      })
    }
    if (!stream) return
    const audioStream = await navigator.mediaDevices.getUserMedia({
      audio: {
        sampleRate: 44100, // 与系统音频一致
        // 如果没有流，则返回
        channelCount: 1, // 单声道
        // 获取音频流
        noiseSuppression: true,
        echoCancellation: true
      },
      video: false
    })
    audioStream.getTracks().forEach((track) => {
      stream.addTrack(track)
    })
    // 4. 使用Web Audio API混合音频
    // 将音频流添加到视频流中
    const audioContext = new AudioContext()
    const destination = audioContext.createMediaStreamDestination()
    // 5. 将系统音频连接到目标
    const systemSource = audioContext.createMediaStreamSource(
      new MediaStream([stream.getAudioTracks()[0]])
    )
    systemSource.connect(destination)
    // 添加麦克风轨道
    const micSource = audioContext.createMediaStreamSource(audioStream)
    micSource.connect(destination)
    // 5. 合并最终流
    const findStream = new MediaStream([
      ...stream.getVideoTracks(),
      ...destination.stream.getTracks()
    ])
    setStream(findStream)
    const videoNode = document.getElementById('video') as HTMLVideoElement
    if (videoNode) {
      videoNode.srcObject = findStream
      // 设置流
      videoNode.play()
      // 获取视频节点
    }
    // 如果视频节点存在，则设置视频源并播放
  }
  return (
    <DragView>
      <div className="w-full h-full bg-[var(--color-background)]">
        <VideoScreen
          onClose={handleClose}
          onStart={handleStart}
          onSave={handleSave}
          status={status}
          videoTime={videoTime}
          saveMessage={saveMessage}
        />
      </div>
    </DragView>
  )
}

export default ViewRecordingVideo
