<template>
  <div>
    <button @click="isRecording ? stopRecording() : startRecording()">
      {{ isRecording ? '停止录音' : '开始录音' }}
    </button>
    <div v-if="recognizedText"> 识别结果: {{ recognizedText }} </div>
    <div ref="waveviewRef" class="wave-view"></div>
  </div>
</template>

<script lang="ts">
import { defineComponent, ref, onMounted, onBeforeUnmount } from 'vue'
import Recorder from 'recorder-core'
import 'recorder-core/src/engine/mp3'
import 'recorder-core/src/engine/mp3-engine'
import 'recorder-core/src/extensions/waveview'
import 'recorder-core/src/extensions/wavesurfer.view'
import type { SpeechTranscriberMessage, TranscriptionParams } from '/#/ali-speech'
import { randomHexString } from '/@/utils/id.ts'

// 阿里云配置
const config = {
  appkey: 'xxx',
  token: 'xxx' // 这里通过控制台获取的临时token，真正逻辑应该调用后端接口获取
}

function genId() {
  return randomHexString(32)
}

export default defineComponent({
  name: 'AudioRecorder',
  setup() {
    const recorder = ref<typeof Recorder>(null)
    const ws = ref<WebSocket | null>(null)
    const isRecording = ref(false)
    const recognizedText = ref('')
    const taskId = ref(genId())
    const waveviewRef = ref<HTMLDivElement>()
    const waveview = ref<any>(null)

    /**
     * 基于阿里ASR的 websocket 方式对接
     * https://help.aliyun.com/zh/isi/developer-reference/websocket?spm=a2c4g.11186623.0.0.7e9474c1F17pAY#topic-2121083
     */
    const initWebSocket = (): void => {
      ws.value = new WebSocket(
        `wss://nls-gateway-cn-shanghai.aliyuncs.com/ws/v1?token=${config.token}`
      )

      ws.value.onopen = () => {
        console.log('WebSocket连接已建立')
        sendStartTranscription()
      }

      ws.value.onmessage = (event: MessageEvent) => {
        const message: SpeechTranscriberMessage = JSON.parse(event.data)
        handleServerMessage(message)
      }

      ws.value.onerror = (event: Event) => {
        console.error('WebSocket错误:', event)
      }

      ws.value.onclose = (e) => {
        console.log('WebSocket连接已关闭', e)
      }
    }

    const sendStartTranscription = (): void => {
      if (!ws.value) return

      const startParams: TranscriptionParams = {
        header: {
          message_id: genId(),
          task_id: taskId.value,
          namespace: 'SpeechTranscriber',
          name: 'StartTranscription',
          appkey: config.appkey
        },
        payload: {
          format: 'mp3',
          sample_rate: 16000,
          enable_intermediate_result: true,
          enable_punctuation_prediction: true
        }
      }

      ws.value.send(JSON.stringify(startParams))
    }

    const handleServerMessage = (message: SpeechTranscriberMessage): void => {
      const { header, payload } = message
      console.log(message)
      switch (header.name) {
        case 'TranscriptionStarted':
          console.log('识别已开始')
          break
        case 'TranscriptionResultChanged':
          if (payload?.result) {
            recognizedText.value = payload.result
          }
          break
        case 'SentenceEnd':
          if (payload?.result) {
            recognizedText.value = payload.result
          }
          break
        case 'TranscriptionCompleted':
          console.log('识别已完成')
          break
      }
    }

    onMounted(() => {
      waveview.value = Recorder.WaveSurferView({
        elem: waveviewRef.value,
        width: 1000,
        height: 100,
        fps: 50,
        lineWidth: 2,
        scale: 2,
        speed: 30,
        lineColor: '#4CAF50',
        backgroundColor: '#ffffff'
      })

      recorder.value = Recorder({
        type: 'mp3',
        sampleRate: 16000,
        bitRate: 16,
        onProcess: (buffers: Uint8Array, powerLevel: number, duration: number) => {
          if (isRecording.value) {
            waveview.value.input(buffers[buffers.length - 1], powerLevel, duration)
          }
        },
        takeoffEncodeChunk: (chunkBytes: Uint8Array) => {
          if (isRecording.value && ws.value?.readyState === WebSocket.OPEN) {
            ws.value.send(chunkBytes)
          }
        }
      })
    })

    const startRecording = async (): Promise<void> => {
      try {
        if (!recorder.value) return
        await recorder.value.open()
        initWebSocket()
        setTimeout(() => {
          recorder.value.start()
          isRecording.value = true
          recognizedText.value = ''
        }, 1000)
      } catch (error) {
        console.error('启动录音失败:', error)
      }
    }

    const stopRecording = (): void => {
      if (!recorder.value || !ws.value) return

      recorder.value.stop((blob: Blob, duration: number) => {
        console.log('录音结束', blob, duration)
        // 如果需要播放录音结果
        // playRecord(blob)
      })
      isRecording.value = false

      const stopParams: TranscriptionParams = {
        header: {
          message_id: genId(),
          task_id: taskId.value,
          namespace: 'SpeechTranscriber',
          name: 'StopTranscription',
          appkey: config.appkey
        }
      }
      ws.value.send(JSON.stringify(stopParams))
      ws.value.close()
      ws.value = null
    }

    const playRecord = (blob: Blob) => {
      const audio = document.createElement('audio')
      //简单利用URL生成播放地址，注意不用了时需要revokeObjectURL，否则霸占内存
      audio.src = (window.URL || webkitURL).createObjectURL(blob)
      audio.play()

      setTimeout(function () {
        ;(window.URL || webkitURL).revokeObjectURL(audio.src)
      }, 5000)
    }

    onBeforeUnmount(() => {
      if (recorder.value) {
        recorder.value.stop()
      }
      if (ws.value) {
        ws.value.close()
      }
      waveview.value?.destroy()
    })

    return {
      isRecording,
      recognizedText,
      startRecording,
      stopRecording,
      waveviewRef
    }
  }
})
</script>

<style scoped>
.wave-view {
  margin-top: 20px;
  border: 1px solid #ddd;
  border-radius: 4px;
  background-color: #fff;
  height: 100px;
  width: 300px;
}
</style>
