
<template>
  <div class="container">
    <el-header height="60px" width="100%" style="text-align: center; line-height: 60px; font-size: 24px; font-weight: bold;">
      AI VTuber Chat Room
    </el-header>
    
    
    <canvas ref="canvasRef" style="width: 100%; height: 600px;"></canvas>
    <textarea ref="textInputRef" placeholder="Type your message here..." style="width: 100%; height: 100px; margin-top: 10px;"></textarea>
    <el-button type="primary" @click="process_send_text()" style="margin-top: 10px;">Send Text</el-button>
    <el-button type="primary" @click="stop_vad()" style="margin-top: 10px;">stop_vad</el-button>
    <el-button type="primary" @click="start_vad()" style="margin-top: 10px;">start_vad</el-button>
    显示vad状态: {{ vad_running ? '运行中' : '已停止' }}
    是否启用打断功能(会在下一次说话和发送文本时生效):
    <el-switch v-model="isInterruptEnabled" active-color="#13ce66" inactive-color="#ff4949"></el-switch>
    是否暂停播放，并清空列表
    <el-button type="danger" @click="interruptCurrentSpeech()" style="margin-top: 10px;">Interrupt Current Speech</el-button>

  </div>
</template>

<script setup>
import { ref, onMounted, onBeforeUnmount } from 'vue';
import { sendAudioBlob,sendText,getAudioFile } from '@/api/server';

import { MicVAD } from "@ricky0123/vad-web"

import * as PIXI from 'pixi.js'
import { Live2DModel } from 'pixi-live2d-display/cubism4'

window.PIXI = PIXI

// 👇 绑定 canvas 元素
const canvasRef = ref(null) 
const textInputRef = ref(null)
let model = null;
let myvad = null;
let vad_running = ref(false);

// 当前音频队列序号
let currentRequestId = 0;
const responseQueue = new Map(); // 存储 { requestId: blob }
let lastPlayedRequestId = -1;   // 最后成功播放完成的 requestId


// 存储当前正在播放的 Audio 实例
let currentAudio = null;
let isInterruptEnabled = true; // 是否启用打断功能

onMounted(() => {
    live2d();
    vad();
});

onBeforeUnmount(() => {
    if (model) {
        model.destroy();
        model = null;
    }
});

const vad = async() => {
  myvad = await MicVAD.new({
    baseAssetPath: "/", // or whatever you want
    onnxWASMBasePath: "/", // or whatever you want
    positiveSpeechThreshold: 0.6,   //确定概率阈值，超过此值则认为存在语音。默认值：0.3
    negativeSpeechThreshold: 0.55,   //确定概率阈值，低于此值则认为不存在语音。默认值：0.25
    minSpeechMs: 800,               //最小语音持续时间（毫秒）。默认值：400
    onSpeechEnd: async (audio) => {
        interruptCurrentSpeech(); // 🔥 打断当前播放
        const requestId = ++currentRequestId; // 全局递增 ID
        const encoder = new TextEncoder();
        const header = encoder.encode(JSON.stringify({
          sampleRate: 16000,
          channels: 1,
          length: audio.length
        }));

        const headerLenBuf = new ArrayBuffer(4);
        const headerLenView = new DataView(headerLenBuf);
        headerLenView.setUint32(0, header.length, true); // 小端

        const parts = [
          headerLenBuf,
          header,
          audio.buffer
        ];

        const fullBuffer = new Blob(parts, { type: 'application/octet-stream' });
        console.log('已接收到音频数据，正在发送到服务器...');
        const result = await sendAudioBlob(fullBuffer);
        await handleResponse(requestId,result);
    },
  })
}

// 打断当前播放，清空队列
function interruptCurrentSpeech() {
    if (!isInterruptEnabled) return; // 如果打断功能关闭，直接返回

    if (currentAudio) {
        currentAudio.pause();
        currentAudio = null;
        console.log('[打断] 当前音频已停止');
    }

    // 清空所有未播放的响应
    for (const [id] of responseQueue) {
        if (id > lastPlayedRequestId) {
            responseQueue.delete(id);
            console.log(`[清理] 请求 #${id} 已从队列移除`);
        }
    }
}

// 打断当前播放，清空队列
// function interruptCurrentSpeech() {
//     currentAudio?.pause();
//     currentAudio = null;
//     console.log('[打断] 当前音频已停止');
//     responseQueue.clear();
//     lastPlayedRequestId = -1; // 重置最后播放的 ID
//     currentRequestId = 0;
//     console.log('[清理] 音频队列已清空');  

// }

const live2d = async() => {
    const app = new PIXI.Application({
        view: canvasRef.value, 
        autoStart: true,
        backgroundAlpha: 0,
        resizeTo: window,
        antialias: true,
        resolution: window.devicePixelRatio || 1
      })
    model = await Live2DModel.from('/live2d/Haru/Haru.model3.json')
    app.stage.addChild(model)
    
    model.scale.set(0.5)
    model.x = app.renderer.width/5
}

function expressions(type) {
    model.expression(type);
}

function stop_vad(){
    myvad.pause()
    vad_running.value = false
}
function start_vad(){
    myvad.start()
    vad_running.value = true
}

async function process_send_text() {
    interruptCurrentSpeech(); // 🔥 打断当前播放
    const text = textInputRef.value.value;
    if (text.trim() === '') {
        alert('Please enter some text.');
        return;
    }
    const requestId = ++currentRequestId;
    const result = await sendText(text);
    await handleResponse(requestId,result);
}

//处理响应，并播放音频
async function handleResponse(requestId,result) {
    if (result && result.data) {
        let response = result.data
        console.log(response);
        if (response.error) {
            alert(`错误: ${response.error}`);
        } else if (response.data && response.data.audio_url) {
            const audioBlob = await getAudioFileBlob(response.data.audio_url); // 调用播放音频函数
            if(audioBlob) {
                // 缓存到队列中
                responseQueue.set(requestId, audioBlob);
                console.log('音频已添加到队列，当前队列长度:', responseQueue.size);
                // 尝试播放（从 lastPlayedRequestId + 1 开始）
                tryPlayNext();
            }
        } else {
            alert('未返回音频地址');
        }
    } else {
        alert('未知错误');
    }
}


//从队列中取出并播放下一个音频
async function tryPlayNext() {
    const nextId = lastPlayedRequestId + 1;
    const audioBlob = responseQueue.get(nextId);

    if (!audioBlob) return; // 下一个音频还没准备好
    
    const audioUrl = URL.createObjectURL(audioBlob);
    const audio = new Audio(audioUrl);
    currentAudio = audio; // 记录当前播放的音频


    // 无论成功失败，都要释放 URL
    const cleanup = () => {
        URL.revokeObjectURL(audioUrl);  // 释放内存
        responseQueue.delete(nextId);  // 从队列中移除已播放的音频
    };

    // 播放结束后释放 URL 并播放下一个音频
    audio.onended = () => {
        cleanup();
        lastPlayedRequestId = nextId; // 更新最后播放的 ID
        console.log('音频播放完成，当前队列长度:', responseQueue.size);
        tryPlayNext(); // 播放下一个音频
    };


    audio.onerror = (err) => {
        cleanup();
        lastPlayedRequestId = nextId;
        console.error(`[播放错误] 请求 #${nextId}`, err);

        tryPlayNext(); // 错误也算“处理完”，继续下一个
    };

    // 播放音频
    try {
        await audio.play();
        console.log(`[开始播放] 请求 #${nextId}`);
    } catch (err) {
        console.error(`[播放失败] 请求 #${nextId}`, err);
        cleanup();
        lastPlayedRequestId = nextId;
        tryPlayNext(); // 失败也推进，防止卡住
    }
}



// 获取音频blob数据
async function getAudioFileBlob(audioApi) {
  try {
    const response = await getAudioFile(audioApi); // 替换为你的音频接口
    const blob = response.data; // 假设你用的是 axios，数据在 .data

    // 检查是否是音频 Blob
    if (!blob.type.startsWith('audio/')) {
      console.error('返回的不是音频文件');
      return;
    }else{
      console.log('成功获取音频文件');
      return blob;
    }

    // // 创建 Object URL
    // const audioUrl = URL.createObjectURL(blob);

    // // 创建 Audio 对象并播放
    // const audio = new Audio(audioUrl);
    // audio.play().catch(err => {
    //   console.error('播放失败:', err);
    // });

    // // 可选：播放结束后释放 URL（节省内存）
    // audio.onended = () => {
    //   URL.revokeObjectURL(audioUrl); // 释放内存
    // };
  } catch (error) {
    console.error('获取音频失败:', error);
  }
}


</script>



<style scoped>
.container {
  display: flex;
  flex-direction: column;
  height: 100vh; /* 占满全屏高度 */
  margin: 0;
  padding: 0;
  overflow: hidden;
}
</style>
