<template>
  <screen-recording ref="screen-recording" @streamStop="streamStop" :fileName="fileName"></screen-recording>
</template>

<script>
  import screenRecording from '@/components/screen-recording/screen-recording'
const { desktopCapturer } = require('electron')
  const { ipcRenderer } = require('electron')
  const fs = require('fs')

  let recorder = null
  let i = 0
  export default {
    data () {
      return {
        video: null,
        videoStart: false,
        playOrPause: this.videoStart ? 'pause-circle' : 'play-circle',
        text: '录制',
        mediaDeviceInfo: null,
        one: '00', // 时
        two: '00', // 分
        three: '00', // 秒
        abc: 0, // 秒的计数
        cde: 0, // 分的计数
        efg: 0, // 时的计数
        interval: null
      }
    },
    mounted () {
      this.getDevices()
    },
    methods: {
      startRecording () {
        desktopCapturer.getSources({types: ['window', 'screen']}).then(async sources => {
          for (const source of sources) {
            console.log(source.name)
            if (source.name === 'yicourse') {
              await navigator.mediaDevices.getUserMedia({
                audio: false,
                // audio: {
                //   mandatory: {
                //     chromeMediaSource: 'desktop' // screen desktop
                //   }
                // },
                video: {
                  mandatory: {
                    chromeMediaSource: 'desktop', // screen desktop
                    // chromeMediaSourceId: source.id,
                    minWidth: 1280,
                    minHeight: 720
                  }
                }
              }).then(mediastream => {
                this.getMicroAudioStream(mediastream).then(audioStream => {
                  const [audioTrack] = audioStream.getAudioTracks()
                  mediastream.addTrack(audioTrack)
                  this.handleStream(mediastream)

                  this.videoStart = true
                  this.startRecordTime()
                }).catch(e => {
                  alert('未检测到麦克风设备' + e)
                })
                // this.getmixAudioStream(mediastream).then(audioStream => {
                //     // mediastream.removeTrack(mediastream.getAudioTracks()[0])
                //     const [audioTrack] = audioStream.getAudioTracks()
                //     mediastream.addTrack(audioTrack)
                //     console.log(audioTrack)
                //     this.handleStream(mediastream)
                // })
              })
              return
            }
          }
        })
      },
      stopRecording () {
        this.videoStart = false
        this.stopRecord()
        this.stopRecordTime()
      },
      handleStream (stream) {
        // const video = document.querySelector('video')
        // video.srcObject = stream
        // video.onloadedmetadata = (e) => video.play()
        this.createRecorder(stream)
      },
      handleError (e) {
        console.log(e)
      },
      // createRecorder() 函数初始化录制
      createRecorder (stream) {
        console.log('开始录制' + stream)
        recorder = new MediaRecorder(stream)
        recorder.start(6000000)
        // 如果 start 没设置 timeslice，ondataavailable 在 stop 时会触发
        recorder.ondataavailable = event => {
          console.log(event)
          let blob = new Blob([event.data], {
            type: 'video/mp4'
          })
          this.saveMedia(blob)
        }
        recorder.onerror = err => {
          console.error(err)
        }
      },
      // 函数结束录制并保存至本地 mp4 文件；
      stopRecord () {
        recorder.stop()
      },
      saveMedia (blob) {
        let reader = new FileReader()
        reader.onload = () => {
          let buffer = new Buffer(reader.result)
          fs.writeFile('yicourse_' + Date.now() + '_.mp4', buffer, {}, (err, res) => {
            if (err) return console.error(err)
          })
          i++
        }
        reader.onerror = err => console.error(err)
        reader.readAsArrayBuffer(blob)
      },
      getDevices () {
        // return navigator.mediaDevices.enumerateDevices()
        navigator.mediaDevices.enumerateDevices().then(deviceInfos => {
          this.mediaDeviceInfo = deviceInfos
        })
      },
      getMicroAudioStream () {
        return navigator.mediaDevices.getUserMedia({ audio: true, video: false })
      },
      startRecordTime () {
        this.clearRecordTime()
        this.interval = setInterval(() => {
          if (this.three === 60 || this.three === '60') {
            this.three = '00'
            this.abc = 0
            if (this.two === 60 || this.two === '60') {
              this.two = '00'
              this.cde = 0
              if (this.efg + 1 <= 9) {
                this.efg++
                this.one = '0' + this.efg
              } else {
                this.efg++
                this.one = this.efg
              }
            } else {
              if (this.cde + 1 <= 9) {
                this.cde++
                this.two = '0' + this.cde
              } else {
                this.cde++
                this.two = this.cde
              }
            }
          } else {
            if (this.abc + 1 <= 9) {
              this.abc++
              this.three = '0' + this.abc
            } else {
              this.abc++
              this.three = this.abc
            }
          }
        }, 1000)
      },
      stopRecordTime () {
        clearInterval(this.interval)
      },
      clearRecordTime () {
        this.one = '00'// 时
        this.two = '00' // 分
        this.three = '00' // 秒this.abc: 0, // 秒的计数
        this.abc = 0 // 秒的计数
        this.cde = 0 // 分的计数
        this.efg = 0 // 时的计数
      },
      getmixAudioStream (mediastream) { // 混音模式,还有点问题,后续研究下
        // let systemAudioTrack = Mediastream.getAudioTracks()[0]
        return new Promise((resolve, reject) => {
          this.getMicroAudioStream().then(audioStream => {
            let audioContext = new AudioContext()
            let mixedOutput = audioContext.createMediaStreamDestination()
            // 系统声音
            if (mediastream && mediastream.getAudioTracks().length > 0) {
              // let sysAudioStreamNode = audioContext.createMediaStreamSource(mediastream)
              // const sysTopGain = audioContext.createGain()
              // sysTopGain.gain.value = 0.7
              // sysAudioStreamNode.connect(sysTopGain).connect(mixedOutput)
              console.log('混音:加入系统声音')
            }
            // 麦克风声音
            if (audioStream && audioStream.getAudioTracks().length > 0) {
              let microphoneStreamNode = audioContext.createMediaStreamSource(audioStream)
              // const micTopGain = audioContext.createGain()
              // micTopGain.gain.value = 0.7
              // microphoneStreamNode.connect(micTopGain).connect(mixedOutput)
              microphoneStreamNode.connect(mixedOutput)
              // audioContext.createMediaStreamSource(mediastream)
              console.log('混音:加入麦克风')
            }
            resolve(mixedOutput.stream)
          }).catch(err => {
            reject(err)
          })
        })
      }
    }
  }
</script>

<style>

</style>
