<script setup>
import RippleButton from "@/components/ui/RippleButton.vue";
import {ref, onMounted, onUnmounted} from 'vue';
import {RealtimeClient} from '@openai/realtime-api-beta';
import {WavRecorder, WavStreamPlayer} from '@/lib/wavtools/index.js';
import {instructions} from '@/utils/conversation_config.js';
import {WavRenderer} from '@/utils/wav_renderer';
import {showMessageError, showMessageOk} from "@/utils/dialog";
import {getUserToken} from "@/store/session";

const props = defineProps({
  height: {
    type: String,
    default: '100vh'
  }
})

const emits = defineEmits(['close'])

/********************** connection animation code(连接动画代码) *************************/
const fullText = '正在连接中...'
const connectingText = ref('')
let index = 0
const typeText = () => {
  if (index < fullText.length) {
    connectingText.value += fullText[index]
    index++
    setTimeout(typeText, 200) // 每200毫秒显示一个字
  } else {
    setTimeout(() => {
      connectingText.value = ''
      index = 0
      typeText()
    }, 1000) // 等待1秒后重新开始
  }
}

/*************************** end of code ****************************************/

/********************** conversation process code(对话过程代码) ***************************/
const leftVoiceActive = ref(false) // 左声道激活状态
const rightVoiceActive = ref(false) // 右声道激活状态
// 动画更新方法
const animateVoice = () => {
  leftVoiceActive.value = Math.random() > 0.5 // 50%概率设置左声道状态
  rightVoiceActive.value = Math.random() > 0.5 // 50%概率设置右声道状态
}

// 创建WAV录音机实例：音频输入设备（麦克风）的数据采集 PCM数据转WAV格式编码 音频流实时处理与存储 sampleRate 音频采样率参数
const wavRecorder = ref(new WavRecorder({sampleRate: 24000}))
// 创建WAV流播放器实例：WAV格式音频流解码、音频输出设备（扬声器）的播放控制、实时流式播放支持 sampleRate 播放采样率参数
const wavStreamPlayer = ref(new WavStreamPlayer({sampleRate: 24000}))
let host = import.meta.env.VITE_VUE_APP_WS_HOST
if (host === '') {
  if (location.protocol === 'https:') {
    host = 'wss://' + location.host
  } else {
    host = 'ws://' + location.host
  }
}

// RealtimeClient：管理实时双向通信（WebSocket/Socket.IO等）、处理连接生命周期（连接/重连/断开）、消息发布订阅机制、自动心跳检测与错误恢复
const client = ref(new RealtimeClient({
  url: `${host}/api/realtime`, // 服务端地址
  apiKey: getUserToken(), // 用户认证凭证
  dangerouslyAllowAPIKeyInBrowser: true, // 安全开关，设置为true表示确认接受浏览器环境存储密钥的风险
}))


client.value.updateSession({
  instructions: instructions,
  turn_detection: null,
  input_audio_transcription: {model: 'whisper-1'},
  voice: 'alloy'
})

// set voice wave canvas 设置声波画布
const clientCanvasRef = ref(null)
const serverCanvasRef = ref(null)
// 是否连接
const isConnected = ref(false)
// 是否开启语音输入
const isRecording = ref(false)
const backgroundAudio = ref(null)
const hangUpAudio = ref(null)

const sleep = (ms) => {
  return new Promise(resolve => setTimeout(resolve, ms))
}

const connect = async () => {
  if (isConnected.value) {
    return
  }
  // 播放背景音乐
  if (backgroundAudio.valueOf()) {
    backgroundAudio.value.play().catch(e => {
      showMessageError('背景音乐播放失败，可能是浏览器的自动播放策略导致的：', e)
    })
  }

  // 模拟拨号延时
  await sleep(3000)
  try {
    await client.value.connect()
    await wavRecorder.value.bebin()
    await wavStreamPlayer.value.connect()
    showMessageOk('对话连接成功')
    if (!client.value.isConnected()) {
      return
    }
    isConnected.value = true
    backgroundAudio.value?.pause()
    backgroundAudio.value.currentTime = 0
    client.value.sendUserMessageContent([
      {
        type: 'input_text',
        text: '你好，我是极客学长!'
      }
    ])
    if (client.value.getTurnDetectionType() === 'server_vad') {
      await wavRecorder.value.record((data) => client.value.appendInputAudio(data.mono))
    }
  } catch (e) {
    showMessageError('拨号失败：', e)
  }
}

// 开始语音输入
const startRecording = async () => {
  if (isRecording.value) {
    return
  }

  isRecording.value = true
  try {
    const trackSampleOffset = await wavStreamPlayer.value.interrupt()
    if (trackSampleOffset?.trackId) {
      const {trackId, offset} = trackSampleOffset
      client.value.cancelResponse(trackId, offset)
    }
    await wavRecorder.value.record(data => client.value.appendInputAudio(data.mono))
  } catch (e) {
    showMessageError('开启语音输入失败：', e)
  }
}

// 结束语音输入失败
const stopRecording = async () => {
  try {
    isRecording.value = false
    await wavRecorder.value.pause()
    client.value.createResponse()
  } catch (e) {
    showMessageError('结束语音输入失败：', e)
  }
}

// 初始化 WaveRecorder 组件和 RealtimeClient 事件处理
const initialize = async () => {
  // Set up render loops for the visualization canvas 为可视化画布设置渲染循环
  let isLoaded = true
  const render = () => {
    if (isLoaded) {
      if (clientCanvasRef.value) {
        const canvas = clientCanvasRef.value
        if (!canvas.width || !canvas.height) {
          canvas.width = canvas.offsetWidth
          canvas.height = canvas.offsetHeight
        }
        const ctx = canvas.getContext('2d')
        if (ctx) {
          ctx.clearRect(0, 0, canvas.width, canvas.height)
          const result = wavRecorder.value.recording ? wavRecorder.value.getFrequencies('voice') : {values: new Float32Array([0])}
          WavRenderer.drawBars(canvas, ctx, result.value, '#0099ff', 10, 0, 8)
        }
      }
      if (serverCanvasRef.value) {
        const canvas = serverCanvasRef.value
        if (!canvas.width || !canvas.height) {
          canvas.width = canvas.offsetWidth
          canvas.height = canvas.offsetHeight
        }
        const ctx = canvas.getContext('2d')
        if (ctx) {
          ctx.clearRect(0, 0, canvas.width, canvas.height)
          const result = wavStreamPlayer.value.analyser ? wavStreamPlayer.value.getFrequencies('voice') : {values: new Float32Array([0])}
          WavRenderer.drawBars(canvas, ctx, result.value, '#0099ff', 10, 0, 8)
        }
      }
      requestAnimationFrame(render)
    }
  }
  render()

  client.value.on('error', (event) => {
    showMessageError(event.error)
  })

  client.value.on('realtime.event', (re) => {
    if (re.event.type === 'error') {
      showMessageError(re.event.error)
    }
  })

  client.value.on('conversation.interrupted', async () => {
    const trackSampleOffset = await wavStreamPlayer.value.interrupt()
    if (trackSampleOffset?.trackId) {
      const {trackId, offset} = trackSampleOffset
      client.value.cancelResponse(trackId, offset)
    }
  })

  client.value.on('conversation.updated', async ({item, delta}) => {
    if (delta?.audio) {
      wavStreamPlayer.value.add16BitPCM(delta.audio, item.id)
    }
  })
}

const voiceInterval = ref(null)
onMounted(() => {
  initialize()
  // 启动聊天进行中的动画
  voiceInterval.value = setInterval(animateVoice, 200)
  typeText()
})

onUnmounted(() => {
  clearInterval(voiceInterval.value)
  client.value.reset()
})

// 挂断电话
const hangUp = async () => {
  try {
    isConnected.value = false
    // 停止播放背景音乐
    if (backgroundAudio.value?.currentTime) {
      backgroundAudio.value?.pause()
      backgroundAudio.value.currentTime = 0
    }
    // 断开客户端连接
    client.value.reset()
    // 中断语音输入和输出服务
    await wavRecorder.value.end()
    await wavStreamPlayer.value.interrupt()
  } catch (e) {
    showMessageError('挂断电话出现错误：', e)
  } finally {
    // 播放挂断音乐
    hangUpAudio.value?.play()
    emits('close')
  }
}
/*************************** end of code ****************************************/

defineExpose({connect, hangUp})

</script>

<template>
  <el-container class="realtime-conversation" :style="{height: height}">
    <el-container class="connection-container" v-if="!isConnected">
      <div class="phone-container">
        <div class="signal"></div>
        <div class="signal"></div>
        <div class="signal"></div>
        <div class="phone"></div>
      </div>
      <div class="status-text">{{connectingText}}</div>
      <audio ref="backgroundAudio" loop>
        <source src="/medias/calling.mp3" type="audio/mp3" />
        您的浏览器不支持音频元素。
      </audio>
      <audio ref="hangUpAudio">
        <source src="/medias/hang-up.mp3" type="audio/mp3" />
        您的浏览器不支持音频元素。
      </audio>
    </el-container>

    <div class="conversation-container" v-else>
      <div class="wave-container">
        <div class="wave-animation">
          <div v-for="i in 5" :key="i" class="wave-ellipse"></div>
        </div>
      </div>
      <div class="voice-indicators">
        <div class="voice-indicators left">
          <canvas ref="clientCanvasRef"></canvas>
        </div>
        <div class="voice-indicators right">
          <canvas ref="serverCanvasRef"></canvas>
        </div>
      </div>
      <div class="call-controls">
        <el-tooltip content="长按发送语音" placement="top">
          <ripple-button>
            <button class="call-button answer" @mousedown="startRecording" @mouseup="stopRecording">
              <i class="iconfont icon-mic-bold"></i>
            </button>
          </ripple-button>
        </el-tooltip>
        <el-tooltip content="结束通话" placement="top">
          <button class="call-button hangup" @click="hangUp">
            <i class="iconfont icon-hung-up"></i>
          </button>
        </el-tooltip>
      </div>
    </div>
  </el-container>
</template>

<style scoped lang="stylus">
@import "../assets/css/realtime.styl"
</style>