<template>
<!-- 这个组件同样是无UI的后台服务 -->
<div style="display: none;"></div>
</template>
<script setup lang="ts">
import { onMounted, onUnmounted, ref, nextTick } from 'vue'
import { webSocketAudioService } from '../services/webSocketAudioService'
import { syncService } from '../services/syncService';
import { AppConfig } from '../services/config';

// --- 配置 ---
const SAMPLE_RATE = AppConfig.ai.sampleRate; // 必须与后端发送的音频采样率一致！
const CHANNELS = AppConfig.ai.channels; // 单声道

// --- 响应式状态 ---
const audioContext = ref<AudioContext | null>(null);
const audioQueue = ref<ArrayBuffer[]>([]);
const isPlaying = ref(false);
// 下一个音频块应该开始播放的时间点，用于无缝连接
let nextStartTime = 0;
// 用于日志记录的计数器
let chunkCounter = 0;

// --- 初始化与清理 ---
onMounted(async () => {
console.log('AudioStreamPlayer component mounted.');

// 首次用户交互后才能创建AudioContext
const initialize = () => {
initAudioContext();
const wsUrl = AppConfig.webSocket.baseUrl+'/ai/chat?clientId='+ (Math.floor(Math.random() * 100) + 1).toString();
webSocketAudioService.connect(wsUrl);
webSocketAudioService.onAudioData(handleAudioData);

document.removeEventListener('click', initialize);
document.removeEventListener('keydown', initialize);
};

document.addEventListener('click', initialize, { once: true });
document.addEventListener('keydown', initialize, { once: true });
});

onUnmounted(() => {
console.log('AudioStreamPlayer component unmounted.');
if (audioContext.value) {
audioContext.value.close();
audioContext.value = null;
}
audioQueue.value = [];
isPlaying.value = false;
});

// --- 音频处理核心逻辑 ---

const initAudioContext = () => {
if (audioContext.value) return;
try {
audioContext.value = new (window.AudioContext || (window as any).webkitAudioContext)({
sampleRate: SAMPLE_RATE
});
console.log(`AudioContext initialized with sample rate ${audioContext.value.sampleRate}.`);
if (audioContext.value.state === 'suspended') {
audioContext.value.resume();
}
} catch (e) {
console.error('Failed to initialize AudioContext:', e);
}
};

// 从 service 收到音频数据
const handleAudioData = (data: ArrayBuffer) => {
if (!audioContext.value || data.byteLength === 0) return;

// 检查是否是当前会话的音频数据
if (syncService.state.currentSyncId && !syncService.state.isAudioReady) {
syncService.signalAudioReady(syncService.state.currentSyncId);
}

// ===================== 新增日志点 1: 接收数据 =====================
chunkCounter++;
console.log(
`[接收] Chunk #${chunkCounter}: 收到 ${data.byteLength} 字节的原始音频数据。`
);
// ================================================================

// 如果是 int16 数据，需要转换为 float32
const pcmData = new Int16Array(data);
const float32Data = new Float32Array(pcmData.length);
for (let i = 0; i < pcmData.length; i++) {
float32Data[i] = pcmData[i] / 32768.0; // 归一化到 -1.0 到 1.0
}

audioQueue.value.push(float32Data.buffer);

// ===================== 新增日志点 2: 更新队列 =====================
console.log(`[队列] 音频队列长度现在是: ${audioQueue.value.length}`);
// ================================================================

if (!isPlaying.value) {
// nextTick(() => {
// schedulePlayback();
// });
schedulePlayback();
}
};

// 调度并播放音频队列
const schedulePlayback = () => {
  if (!audioContext.value || audioQueue.value.length === 0) {
    isPlaying.value = false;
    return;
  }

  isPlaying.value = true;

  if (audioContext.value.state === 'suspended') {
    audioContext.value.resume().then(() => schedulePlayback());
    return;
  }

  const dataBuffer = audioQueue.value.shift()!;
  const float32Data = new Float32Array(dataBuffer);
  const frameCount = float32Data.length / CHANNELS;

  const audioBuffer = audioContext.value.createBuffer(CHANNELS, frameCount, SAMPLE_RATE);
  audioBuffer.copyToChannel(float32Data, 0);

  const source = audioContext.value.createBufferSource();
  source.buffer = audioBuffer;

  const gainNode = audioContext.value.createGain();
  gainNode.connect(audioContext.value.destination);
  source.connect(gainNode);

  const currentTime = audioContext.value.currentTime;
  const startTime = Math.max(currentTime, nextStartTime);
  const duration = audioBuffer.duration;
  
  // ===================== 优化：使用指数曲线 (setTargetAtTime) =====================
  const FADE_DURATION = 0.003; // 3毫秒，可以微调

  // timeConstant 控制曲线的陡峭程度，值越小，曲线越陡，变化越快
  // 通常设置为 FADE_DURATION 的 1/5 到 1/10 左右
  const timeConstant = FADE_DURATION / 5;

  // 1. 设置初始音量为接近 0 的一个极小值，以避免 setTargetAtTime 从 0 开始的突变
  gainNode.gain.setValueAtTime(0.0001, startTime);
  
  // 2. 淡入：从 startTime 开始，指数逼近 1.0
  gainNode.gain.setTargetAtTime(1.0, startTime, timeConstant);
  
  // 3. 淡出：在音频块结束前 FADE_DURATION 开始，指数逼近 0
  const fadeOutStartTime = startTime + duration - FADE_DURATION;
  if (fadeOutStartTime > startTime) { // 确保音频块足够长以执行淡出
      // (可选) 在淡出开始前，取消之前的 setTargetAtTime 计划，确保音量为 1
      gainNode.gain.cancelScheduledValues(fadeOutStartTime);
      gainNode.gain.setValueAtTime(1.0, fadeOutStartTime);
      // 执行淡出
      gainNode.gain.setTargetAtTime(0.0001, fadeOutStartTime, timeConstant);
  }
  // ========================================================================
  
  source.start(startTime);
  nextStartTime = startTime + duration;

  source.onended = () => {
    source.disconnect();
    gainNode.disconnect();
    schedulePlayback();
  };
};
</script>