<template>
  <div class="main">
    <canvas
      v-if="calling"
      ref="waveCanvas"
      :width="canvasSize"
      :height="canvasSize"
      class="wave-canvas"
      @click="handleCanvasClick"></canvas>
    <div class="tip">
      {{ statusText }}
    </div>
  </div>
</template>

<script setup>
import { ref, watch, onBeforeUnmount, onMounted } from 'vue';
import { useAiChatStore } from '@/store';
import { chat } from '@/api';
import config from '@/config';
import { playButtonSound } from '@/utils/sound';
const canvasSize = 300; // 定义 canvasSize 以避免 Vue 警告

const props = defineProps({ calling: Boolean });
const store = useAiChatStore();

const statusText = ref(''); // 状态提示
const recognizing = ref(false);
const processing = ref(false);
const answering = ref(false);
const listening = ref(false); // 声波动画联动
const ttsPaused = ref(false); // TTS 是否被暂停
const ttsAudio = ref(null); // 当前 TTS 播放的 audio 元素
const waveCanvas = ref(null);
let canRecognize = true; // 互斥控制
let audioContext = null;
let analyser = null;
let dataArray = null;
let animationId = null;
let mediaStream = null;
let recognition = null;

// 兼容性检测
function isSupportSpeechRecognition() {
  return !!(window.SpeechRecognition || window.webkitSpeechRecognition);
}
function isSupportGetUserMedia() {
  return !!(navigator.mediaDevices && navigator.mediaDevices.getUserMedia);
}
function isSupportAudioContext() {
  return !!(window.AudioContext || window.webkitAudioContext);
}

// 首次交互标记
let inited = false;
function userInitAll() {
  if (inited) return;
  inited = true;
  if (!isSupportSpeechRecognition()) {
    statusText.value = '当前浏览器不支持语音识别';
    return;
  }
  if (!isSupportGetUserMedia()) {
    statusText.value = '当前浏览器不支持录音';
    return;
  }
  if (!isSupportAudioContext()) {
    statusText.value = '当前浏览器不支持音频分析';
    return;
  }
  initRecognition();
}

const interrupted = ref(false); // 新增：中断动画状态
let interruptFlashCount = 0;
function handleCanvasClick() {
  userInitAll();
  // 回答中或处理中：彻底打断流程并停止语音输出
  if ((answering.value && ttsAudio.value) || processing.value) {
    stopTTS();
    processing.value = false;
    answering.value = false;
    statusText.value = '已中断，点击下方按钮开始通话';
    canRecognize = true;
    interrupted.value = true; // 进入中断动画
    interruptFlashCount = 0;
    if (props.calling) startRecognition();
    // 只闪烁两下
    const flashInterval = setInterval(() => {
      interruptFlashCount++;
      if (interruptFlashCount >= 4) {
        // 0,1,2,3 共4次变色，2次完整闪烁
        interrupted.value = false;
        clearInterval(flashInterval);
      }
    }, 200);
    return;
  }
}

// 初始化语音识别
function initRecognition() {
  if (!isSupportSpeechRecognition()) {
    statusText.value = '当前浏览器不支持语音识别';
    return;
  }
  statusText.value = '点击下方按钮开始通话';
  const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;
  recognition = new SpeechRecognition();
  recognition.lang = 'zh-CN';
  recognition.continuous = false;
  recognition.interimResults = false;
  recognition.onstart = () => {
    recognizing.value = true;
    statusText.value = '正在听...';
    startAudio();
  };
  recognition.onresult = (event) => {
    let text = '';
    for (let i = event.resultIndex; i < event.results.length; ++i) {
      text += event.results[i][0].transcript;
    }
    recognizing.value = false;
    stopAudio();
    statusText.value = '处理中...';
    processText(text);
  };
  recognition.onerror = (e) => {
    recognizing.value = false;
    stopAudio();
    statusText.value = '识别出错：' + (e.error || '未知错误');
  };
  recognition.onend = () => {
    recognizing.value = false;
    stopAudio();
    // 确保识别状态已完全重置
    canRecognize = true;
    // 只要还在通话中就自动重启识别
    if (props.calling && !processing.value && !answering.value) {
      setTimeout(() => {
        if (props.calling && !processing.value && !answering.value) startRecognition();
      }, 300);
    } else if (!processing.value && !answering.value) {
      statusText.value = '点击下方按钮开始通话';
    }
  };
}

// 处理识别到的文本
async function processText(text) {
  if (!text.trim()) {
    statusText.value = '未识别到内容';
    canRecognize = true;
    if (props.calling) startRecognition();
    return;
  }
  canRecognize = false;
  processing.value = true;
  statusText.value = '处理中...';
  store.addMessage({ role: 'user', content: text });
  const messages = [config.messages[0], ...store.messages.filter((m) => m.role !== 'system')];
  let aiText = '';
  answering.value = true;
  // 收集完整内容后再一次性朗读
  let ttsPromise = null;
  await chat({ messages, stream: true }, (content, all) => {
    if (content) {
      aiText += content;
      // 不再进行流式朗读，只收集内容
    }
  });
  if (aiText) {
    store.addMessage({ role: 'assistant', content: aiText });
    // 在收集完所有内容后，一次性进行语音合成
    ttsPromise = speakStream(aiText);
    // 等待TTS朗读全部完成
    if (ttsPromise) await ttsPromise;
  }
  answering.value = false;
  processing.value = false;
  statusText.value = '点击下方按钮开始通话';
  canRecognize = true;
  if (props.calling) startRecognition();
}

// TTS一次性朗读完整内容
async function speakStream(text) {
  if (!text) return;
  answering.value = true;
  canRecognize = false;

  // 检查是否已被中断
  if (!props.calling || !answering.value) {
    return;
  }

  try {
    const res = await fetch('https://openai.qiniu.com/v1/voice/tts', {
      method: 'POST',
      headers: {
        'Content-Type': 'application/json',
        Authorization: config.headers.Authorization
      },
      body: JSON.stringify({
        audio: {
          voice_type: 'qiniu_zh_female_wwxkjx',
          encoding: 'mp3',
          speed_ratio: 0.9
        },
        request: {
          text: text
        }
      })
    });
    const data = await res.json();
    if (data && data.data) {
      let audioSrc = '';
      if (data.data.speak_url) {
        audioSrc = data.data.speak_url;
      } else if (typeof data.data === 'string') {
        audioSrc = 'data:audio/mp3;base64,' + data.data;
      }
      if (audioSrc) {
        await playBase64Audio(audioSrc);
      }
    }
  } catch (e) {
    statusText.value = '语音合成失败';
  }
  answering.value = false;
  statusText.value = '点击下方按钮开始通话';
  canRecognize = true;
}

// 播放 base64/mp3 音频，支持中断/继续
async function playBase64Audio(audioSrc) {
  stopTTS();
  // 新增：挂断或中断后不再播放
  if (!props.calling || (processing.value === false && answering.value === false)) {
    return;
  }
  const audio = new Audio(audioSrc);
  ttsAudio.value = audio;
  ttsPaused.value = false;
  statusText.value = '回答中...';
  try {
    await new Promise((resolve, reject) => {
      audio.onended = () => {
        answering.value = false;
        statusText.value = '点击下方按钮开始通话';
        canRecognize = true;
        ttsAudio.value = null;
        ttsPaused.value = false;
        if (props.calling) startRecognition();
        resolve();
      };
      audio.onerror = (e) => {
        statusText.value = '语音播放失败';
        resolve();
      };
      audio.play().catch((e) => {
        statusText.value = '语音播放被拦截，请点击页面允许播放';
        reject(e);
      });
    });
  } catch (e) {
    statusText.value = '语音播放失败：' + (e.message || e);
  }
}

function stopTTS() {
  if (ttsAudio.value) {
    ttsAudio.value.pause();
    ttsAudio.value.currentTime = 0;
    ttsAudio.value.src = ''; // 清除音频源
    ttsAudio.value = null;
    ttsPaused.value = false;
  }
  answering.value = false;

  // 强制终止所有未完成的 TTS 请求
  if (window.currentTTSRequest) {
    window.currentTTSRequest.abort();
    window.currentTTSRequest = null;
  }
}

// 控制识别流程
function startRecognition() {
  userInitAll(); // 保证每次都初始化
  if (!recognition) return;
  // 更严格的状态检查
  if (recognizing.value || !canRecognize) {
    console.log('语音识别已经在运行中或不可用，无需重复启动');
    return;
  }
  if (!processing.value && !answering.value) {
    try {
      // 确保识别完全停止后再启动
      if (recognition && recognition.abort) recognition.abort();
      recognition.start();
      canRecognize = false; // 标记为识别中
    } catch (e) {
      statusText.value = '语音识别启动失败：' + (e.message || e);
      console.error('语音识别启动失败:', e);
      // 重置识别状态，以便下次可以重试
      recognizing.value = false;
      canRecognize = true;
    }
  }
}
function stopRecognition() {
  if (recognition) {
    // 确保完全停止识别
    if (recognizing.value) {
      try {
        recognition.stop();
      } catch (e) {
        console.log('停止语音识别时出错:', e);
      }
    }
    // 确保识别器完全重置
    if (recognition.abort) recognition.abort();
  }
  canRecognize = true; // 重置为可识别状态
}

// 声波动画
function startAudio() {
  if (audioContext) return;
  if (!isSupportGetUserMedia() || !isSupportAudioContext()) return;
  navigator.mediaDevices
    .getUserMedia({ audio: true })
    .then((stream) => {
      mediaStream = stream;
      // iOS兼容：必须在用户交互后创建AudioContext
      audioContext = new (window.AudioContext || window.webkitAudioContext)();
      const source = audioContext.createMediaStreamSource(stream);
      analyser = audioContext.createAnalyser();
      source.connect(analyser);
      dataArray = new Uint8Array(analyser.frequencyBinCount);
      drawCircleWave();
      listening.value = true;
    })
    .catch((err) => {
      statusText.value = '录音权限被拒绝或不可用';
    });
}
function stopAudio() {
  if (animationId) cancelAnimationFrame(animationId);
  if (audioContext) {
    audioContext.close();
    audioContext = null;
  }
  if (mediaStream) {
    mediaStream.getTracks().forEach((track) => track.stop());
    mediaStream = null;
  }
  listening.value = false;
}
function drawCircleWave() {
  if (!waveCanvas.value || !analyser) return;
  const ctx = waveCanvas.value.getContext('2d');
  const w = waveCanvas.value.width;
  const h = waveCanvas.value.height;
  ctx.clearRect(0, 0, w, h);
  analyser.getByteTimeDomainData(dataArray);
  let sum = 0;
  for (let i = 0; i < dataArray.length; i++) sum += Math.abs(dataArray[i] - 128);
  const avg = sum / dataArray.length;
  const baseRadius = w / 6;
  const radius = baseRadius + avg * 0.8;
  ctx.save();
  ctx.beginPath();
  ctx.arc(w / 2, h / 2, radius, 0, 2 * Math.PI);
  // 动画：中断时红色闪烁两下，否则蓝色
  if (interrupted.value) {
    ctx.strokeStyle = interruptFlashCount % 2 === 0 ? '#ff4d4f' : '#fff';
    ctx.lineWidth = 8;
    ctx.shadowColor = '#ff4d4f';
    ctx.shadowBlur = 16;
  } else {
    ctx.strokeStyle = '#409eff';
    ctx.lineWidth = 6;
    ctx.shadowColor = '#409eff';
    ctx.shadowBlur = 10;
  }
  ctx.stroke();
  ctx.restore();
  animationId = requestAnimationFrame(drawCircleWave);
}

onMounted(() => {
  initRecognition();
});

watch(
  () => props.calling,
  (val) => {
    if (val) {
      canRecognize = true;
      userInitAll();
      startRecognition();
    } else {
      // 挂断时彻底中断所有操作
      stopTTS();
      stopRecognition();
      stopAudio();

      // 重置所有状态
      recognizing.value = false;
      processing.value = false;
      answering.value = false;
      listening.value = false;
      ttsPaused.value = false;
      ttsAudio.value = null;
      canRecognize = true;

      // 清除所有未完成的 TTS 请求
      if (window.currentTTSRequest) {
        window.currentTTSRequest.abort();
        window.currentTTSRequest = null;
      }

      statusText.value = '通话已结束';
      inited = false;
    }
  }
);

onBeforeUnmount(() => {
  stopRecognition();
  stopAudio();
  stopTTS();
});
</script>

<style scoped>
.main {
  flex: 1;
  height: 100%;
  display: flex;
  flex-direction: column;
  justify-content: center;
  align-items: center;
}
.wave-canvas {
  width: 200px;
  height: 200px;
  background: #181c24;
  border-radius: 50%;
  box-shadow: 0 2px 24px #000a;
  margin-bottom: 16px;
  display: block;
}
.tip {
  color: #aaa;
  font-size: 14px;
}
</style>