
<script setup>
import { ref, onMounted } from 'vue';
import { sendAudioStream, sendText,getAudioFile } from '@/api/server';

const micBtn = ref(null);
const textInput = ref(null);
const status = ref(null);

let mediaStream = null;
let audioContext = null;
let analyser = null;
let microphone = null;
let mediaRecorder = null;
let audioChunks = [];
let isRecording = false;
let silenceTimer = null;

const SILENCE_THRESHOLD = -20;
const SILENCE_DURATION = 1500;
const CHECK_INTERVAL = 500;

onMounted(() => {
  requestMicPermission();
});

function requestMicPermission() {
  if (status.value) status.value.textContent = '正在请求麦克风权限...';
  navigator.mediaDevices.getUserMedia({ audio: true })
    .then(stream => {
      mediaStream = stream;
      setupAudioProcessing();
      if (status.value) status.value.textContent = '点击【开启监听】开始';
    })
    .catch(err => {
      if (status.value) status.value.textContent = `权限被拒绝: ${err.message}`;
    });
}

// ========== 设置音频分析环境 ==========
function setupAudioProcessing() {
  audioContext = new (window.AudioContext || window.webkitAudioContext)();
  analyser = audioContext.createAnalyser();
  microphone = audioContext.createMediaStreamSource(mediaStream);
  microphone.connect(analyser);
  analyser.fftSize = 512;
  const bufferLength = analyser.frequencyBinCount;
  const dataArray = new Uint8Array(bufferLength);

  // 创建 MediaRecorder（延迟创建）
  function createRecorder() {
    mediaRecorder = new MediaRecorder(mediaStream,{ mimeType: 'audio/webm' });
    //mediaRecorder.mimeType = 'audio/webm';
    mediaRecorder.ondataavailable = (event) => {
      if (event.data.size > 0 && isRecording) {
        console.log('记录音频');
        const blob = new Blob([event.data], { type: 'audio/webm' });
        sendToBackend('audio', blob);
        isRecording = false;
        if (status.value) status.value.textContent = '已发送音频片段';
      }
    };
    mediaRecorder.onstop = () => {
      stopSilenceDetection();
    };
  }
 // ===== 语音活动检测 loop =====
  function detectVoice() {
    analyser.getByteFrequencyData(dataArray);
    const volume = dataArray.reduce((a, b) => a + b) / dataArray.length;
    const dB = 20 * (Math.log(volume / 255) / Math.LN10);
    console.log('当前分贝'+ dB );
    console.log('是否录音中'+ isRecording );

    if (dB > SILENCE_THRESHOLD && !isRecording) {
      audioChunks = [];
      createRecorder();
      mediaRecorder.start();
      isRecording = true;
      if (status.value) status.value.textContent = '🎙 正在录音...';
      clearTimeout(silenceTimer);
    } else if (dB <= SILENCE_THRESHOLD && isRecording) {
      console.log('当前状态: 静音中' );
      if (!silenceTimer) {
        silenceTimer = setTimeout(() => {
          console.log('mediaRecorder: 自动停止' );
          mediaRecorder.stop();
        }, SILENCE_DURATION);
      }
    } else if (isRecording) {
      clearTimeout(silenceTimer);
      silenceTimer = null;
    }
    if (micBtn.value && micBtn.value.classList.contains('listening')) {
      setTimeout(() => {
        if (micBtn.value && micBtn.value.classList.contains('listening')) {
          detectVoice();
        }
      }, CHECK_INTERVAL);
    }
  }

  function stopSilenceDetection() {
    if (silenceTimer) {
      clearTimeout(silenceTimer);
      silenceTimer = null;
    }
    if (mediaRecorder && mediaRecorder.state === 'recording') {
      mediaRecorder.stop();
    }
    isRecording = false;
  }

  // 绑定麦克风按钮事件
  if (micBtn.value) {
    micBtn.value.onclick = () => {
      if (micBtn.value.classList.contains('listening')) {
        micBtn.value.textContent = '🎤 开启监听';
        micBtn.value.classList.remove('listening');
        stopSilenceDetection();
        if (status.value) status.value.textContent = '已停止监听';
      } else {
        micBtn.value.textContent = '🛑 关闭监听';
        micBtn.value.classList.add('listening');
        if (status.value) status.value.textContent = '监听中...';
        detectVoice();
      }
    };
  }
}

const handleSendText = () => {
  const text = textInput.value?.value.trim();
  if (text) {
    sendToBackend('text', text);
    textInput.value.value = '';
  } else {
    alert('请输入文字');
  }
};

const handleKeyPress = (e) => {
  // 在这个场景下，默认行为是按下回车键时，textarea 会换行。加上这句后，按下回车不会换行，而是执行你自定义的发送操作（即触发发送按钮点击）
  if (e.key === 'Enter' && !e.shiftKey) {
    e.preventDefault();
    handleSendText();
  }
};

// 播放音频的函数
async function playAudio(audioApi) {
  try {
    const response = await getAudioFile(audioApi); // 替换为你的音频接口
    const blob = response.data; // 假设你用的是 axios，数据在 .data

    // 检查是否是音频 Blob
    if (!blob.type.startsWith('audio/')) {
      console.error('返回的不是音频文件');
      return;
    }

    // 创建 Object URL
    const audioUrl = URL.createObjectURL(blob);

    // 创建 Audio 对象并播放
    const audio = new Audio(audioUrl);
    audio.play().catch(err => {
      console.error('播放失败:', err);
    });

    // 可选：播放结束后释放 URL（节省内存）
    audio.onended = () => {
      URL.revokeObjectURL(audioUrl); // 释放内存
    };
  } catch (error) {
    console.error('获取音频失败:', error);
  }
}

async function sendToBackend(type, data) {
    //根据文件类型调用不同的API,接收到json数据后进行处理,如果有error则提示错误
    console.log(`发送${type}数据到后端`);
    let result = null;
    if (type === 'audio') {
      result = await sendAudioStream(data);
    } else if (type === 'text') {
      result = await sendText(data);
    }
    if (result && result.data) {
      let response = result.data
      console.log(response);
      if (response.error) {
          alert(`错误: ${response.error}`);
      } else if (response.data && response.data.audio_url) {
            await playAudio(response.data.audio_url); // 调用播放音频函数
      } else {
          alert('未返回音频地址');
      }
    } else {
        alert('未知错误');
    }
}


</script>

<template>
  <div class="container">
    <h2>语音或文字上传（自动分段）</h2>
    <!-- 麦克风按钮 -->
    <button ref="micBtn" id="micBtn">🎤 开启监听</button>
    <span class="status" ref="status" id="status">等待操作...</span>
    <!-- 文字输入框 -->
    <textarea
      ref="textInput"
      id="textInput"
      placeholder="输入文字后按回车或点击发送"
      @keypress="handleKeyPress"
    ></textarea>
    <button id="sendTextBtn" @click="handleSendText">📤 发送文字</button>
  </div>
</template>

<style scoped>
body {
  font-family: Arial, sans-serif;
  padding: 20px;
  background-color: #f5f5f5;
}
.container {
  margin: 0;
  background: white;
  padding: 20px;
  border-radius: 8px;
  box-shadow: 0 2px 10px rgba(0,0,0,0.1);
  width: 100%;
}
h2 {
  text-align: center;
  color: #333;
}
textarea {
  width: 100%;
  height: 100px;
  padding: 10px;
  margin: 10px 0;
  border: 1px solid #ccc;
  border-radius: 4px;
  font-size: 16px;
  resize: vertical;
}
button {
  background-color: #007bff;
  color: white;
  padding: 10px 15px;
  border: none;
  border-radius: 4px;
  cursor: pointer;
  font-size: 16px;
  margin-right: 10px;
}
button:disabled {
  background-color: #cccccc;
}
.status {
  margin-top: 10px;
  font-size: 14px;
  color: #666;
}
.listening {
  background-color: #28a745;
}
.recording {
  background-color: #dc3545;
}
.recording::after {
  content: ' ●';
  color: red;
  animation: blink 1.2s infinite;
}
@keyframes blink {
  0%, 100% { opacity: 1; }
  50% { opacity: 0.2; }
}
</style>
