<template>
    <div
      class="ai-chat-popup"
      v-if="visible"
      :style="{opacity: popupOpacity, left: popupLeft + 'px', top: popupTop + 'px', position: 'fixed'}"
      @mousedown.stop
    >
      <div
        class="ai-chat-header"
        @mousedown="startDrag"
      >
        <span>蓝心小V客服</span>
        <button class="close-btn" @click="emit('close')">×</button>
      </div>
      <div class="ai-chat-messages" ref="msgBox">
        <div v-for="(msg, idx) in messages" :key="idx" :class="['ai-message-row', msg.from]">
          <span v-if="msg.from === 'bot'" class="ai-avatar">🤖</span>
          <div :class="['ai-message', msg.from]">
            {{ msg.text }}
          </div>
          <span v-if="msg.from === 'user'" class="ai-avatar">👤</span>
        </div>
      </div>
      <div class="ai-chat-input-area">
        <input
          v-if="!isVoiceMode"
          v-model="input"
          @focus="inputFocus = true"
          @blur="inputFocus = false"
          class="ai-chat-input"
          :placeholder="inputFocus ? '' : '请输入内容...'"
          @keyup.enter="send"
        />
        <button 
          v-if="!isVoiceMode"
          class="ai-chat-send" 
          @click="send"
        >
          <svg width="20" height="20" viewBox="0 0 20 20" fill="none">
            <path d="M3 17L17 10L3 3V8L13 10L3 12V17Z" fill="#fff"/>
          </svg>
        </button>
        <button 
          v-if="!isVoiceMode"
          class="ai-chat-voice-btn"
          @click="toggleVoiceMode"
        >
          <svg width="24" height="24" viewBox="0 0 24 24" fill="none">
            <path d="M12 14C13.66 14 15 12.66 15 11V5C15 3.34 13.66 2 12 2C10.34 2 9 3.34 9 5V11C9 12.66 10.34 14 12 14Z" fill="#3F7BFC"/>
            <path d="M19 11C19 14.53 16.39 17.44 13 17.93V21H11V17.93C7.61 17.44 5 14.53 5 11H7C7 13.76 9.24 16 12 16C14.76 16 17 13.76 17 11H19Z" fill="#3F7BFC"/>
          </svg>
        </button>
        <div v-if="isVoiceMode" class="voice-input-area">
          <button 
            class="voice-record-btn"
            @mousedown="startVoiceRecord"
            @mouseup="stopVoiceRecord"
            @mouseleave="stopVoiceRecord"
          >
            <span v-if="!isRecording">按住说话</span>
            <span v-else>松开结束</span>
          </button>
          <button class="voice-cancel-btn" @click="toggleVoiceMode">
            <svg width="24" height="24" viewBox="0 0 24 24" fill="none">
              <path d="M19 6.41L17.59 5L12 10.59L6.41 5L5 6.41L10.59 12L5 17.59L6.41 19L12 13.41L17.59 19L19 17.59L13.41 12L19 6.41Z" fill="#666"/>
            </svg>
          </button>
        </div>
      </div>
    </div>
  </template>
  
  <script setup>
  import { ref, nextTick, onMounted, onBeforeUnmount } from 'vue'
  import { defineProps, defineEmits } from 'vue'
  const props = defineProps({
    visible: Boolean,
    popupOpacity: {
      type: Number,
      default: 0.95
    }
  })
  const emit = defineEmits(['close'])
  
  const input = ref('')
  const inputFocus = ref(false)
  const messages = ref([
    { from: 'bot', text: '你好，我是蓝心小V，有什么可以帮您？' }
  ])
  const msgBox = ref(null)
  const sessionId = ref(null)
  
  // 拖拽相关
  const popupLeft = ref(window.innerWidth - 400 - 40) // 默认右下角
  const popupTop = ref(window.innerHeight - 450 - 100)
  let dragging = false
  let dragOffsetX = 0
  let dragOffsetY = 0
  
  function startDrag(e) {
    dragging = true
    dragOffsetX = e.clientX - popupLeft.value
    dragOffsetY = e.clientY - popupTop.value
    document.addEventListener('mousemove', onDrag)
    document.addEventListener('mouseup', stopDrag)
  }
  function onDrag(e) {
    if (!dragging) return
    popupLeft.value = Math.min(Math.max(0, e.clientX - dragOffsetX), window.innerWidth - 350)
    popupTop.value = Math.min(Math.max(0, e.clientY - dragOffsetY), window.innerHeight - 100)
  }
  function stopDrag() {
    dragging = false
    document.removeEventListener('mousemove', onDrag)
    document.removeEventListener('mouseup', stopDrag)
  }
  onBeforeUnmount(() => {
    document.removeEventListener('mousemove', onDrag)
    document.removeEventListener('mouseup', stopDrag)
  })
  
  function send() {
    const text = input.value.trim()
    if (!text) return
    messages.value.push({ from: 'user', text })
    input.value = ''
    nextTick(() => {
      scrollToBottom()
    })
    callAI()
  }
  
  async function callAI() {
    // 显示思考中
    messages.value.push({ from: 'bot', text: '思考中……' })
    nextTick(() => {
      scrollToBottom()
    })
    try {
      const requestData = {
        model: 'vivo-BlueLM-TB-Pro',
        sessionId: sessionId.value,
        messages: messages.value
          .filter(m => m.from === 'user')
          .map(m => ({ role: 'user', content: m.text })),
        extra: {
          temperature: 0.9,
          max_new_tokens: 2048,
          top_p: 0.9,
          top_k: 50
        }
      };
      
      const { result } = await uniCloud.callFunction({
        name: 'callBlueLM-app',
        data: requestData
      })
      // 移除"思考中"
      messages.value = messages.value.filter(m => m.text !== '思考中……')
      
      if (result.code === 0) {
        messages.value.push({ from: 'bot', text: result.data })
        sessionId.value = result.sessionId || sessionId.value
        // 新增：触发自定义事件，通知父组件播放AI回复的音频
        emit('play-bot-audio', result.data);
      } else {
        let errorMessage = 'AI接口调用失败'
        if (result.message) {
          try {
            const errorData = JSON.parse(result.message)
            errorMessage = errorData.message || errorData.error || result.message
          } catch {
            errorMessage = result.message
          }
        }
        messages.value.push({ from: 'bot', text: errorMessage })
      }
    } catch (err) {
      messages.value = messages.value.filter(m => m.text !== '思考中……')
      let errorMessage = '请求失败'
      if (err.message) {
        try {
          const errorData = JSON.parse(err.message)
          errorMessage = errorData.message || errorData.error || err.message
        } catch {
          errorMessage = err.message
        }
      }
      messages.value.push({ from: 'bot', text: errorMessage })
    }
    nextTick(() => {
      scrollToBottom()
    })
  }
  
  function scrollToBottom() {
    if (msgBox.value) {
      msgBox.value.scrollTop = msgBox.value.scrollHeight
    }
  }
  
  // 语音相关状态
  const isVoiceMode = ref(false)
  const isRecording = ref(false)
  let ws = null
  let audioContext = null
  let processor = null
  let source = null
  let stream = null
  let wsReady = false
  let audioBufferQueue = []
  let recordStartTime = 0
  
  // 生成32位uuid
  function generateUUID() {
    return 'xxxxxxxxxxxx4xxxyxxxxxxxxxxxxxxx'.replace(/[xy]/g, function (c) {
      var r = Math.random() * 16 | 0,
        v = c === 'x' ? r : (r & 0x3 | 0x8)
      return v.toString(16)
    })
  }
  
  // float32转16kHz 16bit PCM
  function floatTo16kPCM(input, inputSampleRate = 44100) {
    const sampleRate = 16000
    const sampleRatio = inputSampleRate / sampleRate
    const length = Math.floor(input.length / sampleRatio)
    const result = new Int16Array(length)
    for (let i = 0; i < length; i++) {
      const idx = Math.floor(i * sampleRatio)
      let s = Math.max(-1, Math.min(1, input[idx]))
      result[i] = s < 0 ? s * 0x8000 : s * 0x7FFF
    }
    return result
  }
  
  // 切换语音输入模式
  function toggleVoiceMode() {
    isVoiceMode.value = !isVoiceMode.value
  }
  
  // 按住说话：开始录音并连接WebSocket
  async function startVoiceRecord() {
    try {
      stream = await navigator.mediaDevices.getUserMedia({ audio: true })
      audioContext = new (window.AudioContext || window.webkitAudioContext)({ sampleRate: 44100 })
      source = audioContext.createMediaStreamSource(stream)
      processor = audioContext.createScriptProcessor(4096, 1, 1)
      source.connect(processor)
      processor.connect(audioContext.destination)
      isRecording.value = true
      recordStartTime = Date.now();
      console.log('开始录音');
      // 避免重复堆积"正在录音"消息
      messages.value = messages.value.filter(m => m.text !== '正在录音，请说话...')
      messages.value.push({ from: 'bot', text: '正在录音，请说话...' })
      nextTick(scrollToBottom)

      // 调用云函数获取ASR鉴权信息和URL
      const { result } = await uniCloud.callFunction({
          name: 'yuyin', // 调用负责ASR签名的云函数
          data: {
              action: 'getAsrSign', // 指定云函数动作
              method: 'GET', // ASR接口的HTTP方法
              uri: '/asr/v2', // ASR接口的URI
              query: { // ASR接口所需的查询参数 (根据文档)
                  client_version: 'unknown',
                  package: 'unknown',
                  sdk_version: 'unknown',
                  android_version: 'unknown',
                  net_type: '1', // 网络类型，文档中是 string "1"
                  engineid: 'shortasrinput' // ASR引擎ID
                  // user_id 和 system_time 会在云函数中生成并添加到query中
              }
          }
      });

      if (result.code !== 0 || !result.data || !result.data.wsUrl) {
          console.error('获取ASR签名失败', result);
          messages.value.push({ from: 'bot', text: '获取语音识别服务信息失败' });
          nextTick(scrollToBottom);
          stopVoiceRecord(); // 停止录音状态
          return;
      }

      const wsUrl = result.data.wsUrl;
      // 同时获取云函数返回的headers
      const headers = result.data.headers; // 确保云函数返回了headers字段

      console.log('获取ASR签名成功，连接到:', wsUrl);

      // 连接 vivo ASR WebSocket API
      // 使用 uni-app 的 uni.connectSocket，支持设置 header
      ws = uni.connectSocket({
          url: wsUrl, // 恢复使用云函数返回的原始wsUrl
          header: headers, // 恢复传递 header
          protocols: [] // 如果不需要特定协议，可以留空数组
      });

      // 监听 WebSocket 事件 (稍作延迟注册，避免 TypeError)
      if (ws) { // 确保 ws 对象存在
        ws.onOpen((res) => {
          console.log('ASR WebSocket已连接', res);
          // 发送握手text包 (根据ASR文档)
          const requestId = generateUUID();
          const startPayload = {
            type: 'started',
            request_id: requestId,
            asr_info: {
              end_vad_time: 3000,
              audio_type: 'pcm',
              chinese2digital: 1,
              punctuation: 1
            }
          }
          // uni.send需要一个包含data字段的对象
          ws.send({ data: JSON.stringify(startPayload) });
          wsReady = true
          // 发送缓冲区的音频帧
          audioBufferQueue.forEach(buf => ws.send({ data: buf }));
          audioBufferQueue = []
        });

        ws.onMessage((event) => {
          console.log('收到ASR WebSocket消息', event.data);
          try {
            const data = JSON.parse(event.data)
            if (data.action === 'result' && data.data && data.data.text) {
              // 实时识别结果
              // 如果是最终结果，根据is_last或action='result'且is_finish来判断
              if (data.action === 'result' && data.data.is_last) {
                  console.log('最终识别结果:', data.data.text);
                  messages.value = messages.value.filter(m => m.text !== '正在录音，请说话...')
                  // 避免发送空文本给AI
                  if (data.data.text.trim()) {
                       messages.value.push({ from: 'user', text: data.data.text.trim() }); // 最终结果显示为用户消息
                       nextTick(scrollToBottom);
                       callAI(); // 使用最终识别结果调用AI
                  } else {
                       messages.value.push({ from: 'bot', text: '未识别到有效语音' });
                       nextTick(scrollToBottom);
                  }
                  // 最终结果后，可以考虑关闭ASR WebSocket连接
                  // ASR服务在收到结束标志(--end--)后通常会自动关闭连接
              }
            } else if (data.action === 'error') {
              messages.value = messages.value.filter(m => m.text !== '正在录音，请说话...')
              messages.value.push({ from: 'bot', text: '语音识别出错: ' + (data.desc || JSON.stringify(data)) });
              console.error('ASR接口返回错误:', data);
              nextTick(scrollToBottom);
              stopVoiceRecord(); // 出错时停止录音状态
            } else {
                console.log('收到其他ASR消息:', data); // 记录其他类型的消息
            }
          } catch (e) { 
               console.error('处理ASR WebSocket消息失败:', e);
               messages.value = messages.value.filter(m => m.text !== '正在录音，请说话...')
               messages.value.push({ from: 'bot', text: '处理语音识别结果出错' });
               nextTick(scrollToBottom);
               stopVoiceRecord();
          }
        });

        ws.onError((e) => {
          console.error('ASR WebSocket连接出错', e);
           messages.value = messages.value.filter(m => m.text !== '正在录音，请说话...')
          messages.value.push({ from: 'bot', text: '语音识别服务连接失败' });
          nextTick(scrollToBottom);
          isRecording.value = false; // 出错时确保停止录音状态
        });

        ws.onClose((event) => {
          console.log('ASR WebSocket已关闭', event.code, event.reason);
           messages.value = messages.value.filter(m => m.text !== '正在录音，请说话...')
           if (isRecording.value) { // 如果在录音状态下连接关闭，可能是异常断开
              messages.value.push({ from: 'bot', text: '语音识别服务已断开' });
               nextTick(scrollToBottom);
               isRecording.value = false; // 确保停止录音状态
           }
           // 清理资源 (WebSocket 资源)
           wsReady = false;
           ws = null; // 清空 ws 变量
        });
      } else {
          console.error('uni.connectSocket 返回无效的 WebSocket 对象');
          messages.value.push({ from: 'bot', text: '语音识别服务连接初始化失败' });
          nextTick(scrollToBottom);
          stopVoiceRecord();
      }

      // 录音帧处理
      processor.onaudioprocess = (e) => {
        if (!isRecording.value || !ws || ws.readyState !== 1) return // 检查WebSocket状态
        const input = e.inputBuffer.getChannelData(0)
        // 将音频数据从44100Hz降采样到16000Hz，并转换为16bit PCM
        const pcm16k = floatTo16kPCM(input, audioContext.sampleRate) // 使用麦克风的实际采样率
        const buf = pcm16k.buffer
        // console.log('发送音频帧', buf.byteLength)
        ws.send({ data: buf }); // uni.send需要data参数
      }

    } catch (err) {
      isRecording.value = false
      console.error('启动录音失败', err);
      messages.value.filter(m => m.text !== '正在录音，请说话...')
      messages.value.push({ from: 'bot', text: '无法访问麦克风，请检查权限设置或浏览器不支持' })
      nextTick(scrollToBottom);
    }
  }

  // 松开说话：结束录音并关闭WebSocket
  function stopVoiceRecord() {
    if (!isRecording.value) return
    isRecording.value = false
    const duration = Date.now() - recordStartTime;
     messages.value = messages.value.filter(m => m.text !== '正在录音，请说话...'); // 移除录音提示

    if (duration < 800) {
      messages.value.push({ from: 'bot', text: '说话时间太短，请重试' });
      nextTick(scrollToBottom);
    } else {
        // 发送结束标志，根据vivo ASR文档，发送一个opcode为binary，payload是' --end –- '
        if (ws && ws.readyState === 1) {
             // 注意：uni.send 发送 ArrayBuffer 需要放在 data 字段里
             ws.send({ data: new TextEncoder().encode(' --end –- ').buffer }); 
             // WebSocket 关闭通常由服务端在收到结束标志后触发，前端可以延迟关闭或不手动关闭
             // uni.closeSocket() 是手动关闭连接
             // setTimeout(() => { ws && uni.closeSocket() }, 2000); // 延迟关闭
        } else if (audioBufferQueue.length > 0) {
            // 如果WebSocket未准备好但有缓冲数据，说明连接失败或未建立，提示错误
             messages.value.push({ from: 'bot', text: '语音识别连接未建立，请重试' });
             nextTick(scrollToBottom);
        }
    }

    // 清理音频资源
    if (processor) processor.disconnect()
    if (source) source.disconnect()
    if (audioContext) audioContext.close()
    if (stream) stream.getTracks().forEach(track => track.stop())

    // 重置 WebSocket 相关的变量
    ws = null; // 重置为null以便下次重新连接
    audioContext = null;
    processor = null;
    source = null;
    stream = null;
    wsReady = false;
    audioBufferQueue = [];
    recordStartTime = 0;

    console.log('录音结束，时长(ms):', duration);
  }
  </script>
  
  <style scoped>
  .ai-chat-popup {
    width: 350px;
    height: 450px;
    background: #fff;
    border-radius: 20px;
    box-shadow: 0 8px 32px rgba(63,123,252,0.15);
    display: flex;
    flex-direction: column;
    z-index: 10001;
    transition: opacity 0.2s;
    user-select: none;
  }
  .ai-chat-header {
    height: 48px;
    background: #3F7BFC;
    color: #fff;
    display: flex;
    align-items: center;
    justify-content: space-between;
    border-top-left-radius: 20px;
    border-top-right-radius: 20px;
    padding: 0 20px;
    font-size: 18px;
    font-weight: bold;
    cursor: move;
  }
  .close-btn {
    background: none;
    border: none;
    color: #fff;
    font-size: 24px;
    cursor: pointer;
    line-height: 1;
    margin-right: 0px;
  }
  .ai-chat-messages {
    flex: 1;
    padding: 20px;
    overflow-y: auto;
    background: #F5F6FA;
    display: flex;
    flex-direction: column;
    gap: 12px;
  }
  .ai-message-row {
    display: flex;
    align-items: flex-end;
  }
  .ai-message-row.bot {
    flex-direction: row;
    justify-content: flex-start;
  }
  .ai-message-row.user {
    flex-direction: row;
    justify-content: flex-end;
  }
  .ai-message-row.user .ai-message {
    margin-left: 0;
    margin-right: 8px;
    align-self: flex-end;
  }
  .ai-message-row.user .ai-avatar {
    margin-left: 0;
    margin-right: 0;
    align-self: flex-end;
  }
  .ai-avatar {
    font-size: 22px;
    margin: 0 8px;
    align-self: flex-end;
  }
  .ai-message {
    max-width: 75%;
    padding: 12px 16px;
    border-radius: 16px;
    font-size: 15px;
    word-break: break-all;
    display: inline-block;
  }
  .ai-message.bot {
    background: #3F7BFC;
    color: #fff;
    align-self: flex-start;
    border-bottom-left-radius: 4px;
  }
  .ai-message.user {
    background: #fff;
    color: #3F7BFC;
    align-self: flex-end;
    border-bottom-right-radius: 4px;
    border: 1px solid #E0E6ED;
  }
  .ai-chat-input-area {
    display: flex;
    align-items: center;
    padding: 12px 16px;
    background: #fff;
    border-bottom-left-radius: 20px;
    border-bottom-right-radius: 20px;
    border-top: 1px solid #E0E6ED;
  }
  .ai-chat-input {
    flex: 1;
    height: 40px;
    border: 1px solid #E0E6ED;
    border-radius: 20px;
    padding: 0 16px;
    font-size: 15px;
    outline: none;
    background: #F5F6FA;
    margin-right: 12px;
    color: #222;
    transition: border 0.2s;
  }
  .ai-chat-input:focus {
    border: 1.5px solid #3F7BFC;
    background: #fff;
  }
  .ai-chat-send {
    width: 40px;
    height: 40px;
    background: #3F7BFC;
    border: none;
    border-radius: 50%;
    display: flex;
    align-items: center;
    justify-content: center;
    cursor: pointer;
    transition: background 0.2s;
  }
  .ai-chat-send:hover {
    background: #2556b8;
  }
  .ai-chat-send:active {
    background: #17408b;
  }
  .ai-customer-btn {
    position: fixed;
    right: 40px;
    bottom: 40px;
    width: 64px;
    height: 64px;
    background: rgba(255,255,255,0.85);
    border-radius: 50%;
    box-shadow: 0 4px 16px rgba(63,123,252,0.18);
    display: flex;
    align-items: center;
    justify-content: center;
    cursor: pointer;
    z-index: 10000;
    transition: box-shadow 0.2s;
  }
  .ai-customer-btn:hover {
    box-shadow: 0 8px 32px rgba(63,123,252,0.28);
  }
  .ai-customer-btn img {
    width: 40px;
    height: 40px;
    border-radius: 50%;
    object-fit: cover;
  }
  .ai-chat-voice-btn {
    width: 40px;
    height: 40px;
    background: none;
    border: none;
    display: flex;
    align-items: center;
    justify-content: center;
    cursor: pointer;
    margin-right: 8px;
  }
  
  .voice-input-area {
    flex: 1;
    display: flex;
    align-items: center;
    gap: 12px;
  }
  
  .voice-record-btn {
    flex: 1;
    height: 40px;
    background: #F5F6FA;
    border: 1px solid #E0E6ED;
    border-radius: 20px;
    color: #666;
    font-size: 15px;
    cursor: pointer;
    transition: all 0.2s;
  }
  
  .voice-record-btn:active {
    background: #E0E6ED;
  }
  
  .voice-cancel-btn {
    width: 40px;
    height: 40px;
    background: none;
    border: none;
    display: flex;
    align-items: center;
    justify-content: center;
    cursor: pointer;
  }
  </style>