<template>
  <div class="audio-recorder">
    <h3>语音录音转文本</h3>
    
    <div class="controls">
      <button 
        @click="toggleRecording" 
        :disabled="isProcessing"
        :class="{ recording: isRecording, disabled: isProcessing }"
      >
        <i class="fas" :class="isRecording ? 'fa-stop' : 'fa-microphone'"></i>
        {{ isRecording ? '停止录音' : '开始录音' }}
      </button>
      
      <button 
        @click="transcribeAudio" 
        :disabled="!hasRecording || isProcessing"
        class="transcribe-btn"
      >
        <i class="fas fa-magic"></i> 转换为文本
      </button>
    </div>
    
    <div v-if="audioUrl" class="audio-preview">
      <h4>录音预览:</h4>
      <audio :src="audioUrl" controls></audio>
    </div>
    
    <div v-if="transcription" class="transcription-result">
      <h4>转换结果:</h4>
      <p>{{ transcription }}</p>
    </div>
    
    <div v-if="error" class="error-message">
      <i class="fas fa-exclamation-circle"></i> {{ error }}
    </div>
  </div>
</template>

<script setup>
import { ref, onBeforeUnmount } from 'vue';

// 状态管理
const isRecording = ref(false);
const isProcessing = ref(false);
const hasRecording = ref(false);
const audioUrl = ref('');
const transcription = ref('');
const error = ref('');

// 录音相关变量
let mediaRecorder = null;
let audioChunks = ref([]);
let stream = null;

// 切换录音状态
const toggleRecording = async () => {
  if (isRecording.value) {
    stopRecording();
  } else {
    await startRecording();
  }
};

// 开始录音
const startRecording = async () => {
  try {
    // 重置状态
    error.value = '';
    transcription.value = '';
    audioChunks.value = [];
    
    // 请求麦克风权限
    stream = await navigator.mediaDevices.getUserMedia({ 
      audio: {
        sampleRate: 44100,
        channelCount: 1,
        echoCancellation: true
      } 
    });
    
    // 创建媒体录制器
    mediaRecorder = new MediaRecorder(stream);
    
    // 开始录制
    mediaRecorder.start();
    isRecording.value = true;
    isProcessing.value = false;
  } catch (err) {
    error.value = `无法访问麦克风: ${err.message}`;
    console.error('录音错误:', err);
  }
};

// 停止录音
const stopRecording = () => {
  if (!mediaRecorder) return;
  
  // 停止录制
  mediaRecorder.stop();
  isRecording.value = false;
  
  // 停止流
  if (stream) {
    stream.getTracks().forEach(track => track.stop());
    stream = null;
  }
  
  // 处理录制的数据
  mediaRecorder.ondataavailable = (event) => {
    audioChunks.value.push(event.data);
  };
  
  mediaRecorder.onstop = () => {
    // 创建音频URL
    const audioBlob = new Blob(audioChunks.value, { type: 'audio/wav' });
    audioUrl.value = URL.createObjectURL(audioBlob);
    hasRecording.value = true;
    isProcessing.value = false;
  };
};

// 将录音转换为文本
const transcribeAudio = async () => {
  if (!audioUrl.value) return;
  
  try {
    isProcessing.value = true;
    error.value = '';
    transcription.value = '正在转换...';
    
    // 检查浏览器是否支持语音识别
    if (!('SpeechRecognition' in window || 'webkitSpeechRecognition' in window)) {
      throw new Error('您的浏览器不支持语音识别功能');
    }
    
    // 获取音频 blob
    const response = await fetch(audioUrl.value);
    const audioBlob = await response.blob();
    
    // 使用 Web Speech API 进行语音识别
    const recognition = new (window.SpeechRecognition || window.webkitSpeechRecognition)();
    recognition.lang = 'zh-CN'; // 设置为中文
    recognition.interimResults = false;
    recognition.maxAlternatives = 1;
    
    // 处理识别结果
    recognition.onresult = (event) => {
      const transcript = event.results[0][0].transcript;
      transcription.value = transcript;
      isProcessing.value = false;
    };
    
    recognition.onerror = (event) => {
      throw new Error(`识别错误: ${event.error}`);
    };
    
    recognition.onend = () => {
      isProcessing.value = false;
    };
    
    // 将 blob 转换为 AudioContext 可处理的格式
    const audioContext = new (window.AudioContext || window.webkitAudioContext)();
    const audioBuffer = await audioContext.decodeAudioData(await audioBlob.arrayBuffer());
    
    // 这里简化处理，实际生产环境可能需要更复杂的处理
    recognition.start();
    
  } catch (err) {
    error.value = err.message;
    transcription.value = '';
    isProcessing.value = false;
    console.error('转文本错误:', err);
  }
};

// 组件卸载前清理
onBeforeUnmount(() => {
  if (isRecording.value) {
    stopRecording();
  }
  if (audioUrl.value) {
    URL.revokeObjectURL(audioUrl.value);
  }
});
</script>

<style scoped>
.audio-recorder {
  max-width: 600px;
  margin: 20px auto;
  padding: 20px;
  border-radius: 8px;
  box-shadow: 0 2px 10px rgba(0, 0, 0, 0.1);
  background-color: #fff;
}

.controls {
  display: flex;
  gap: 10px;
  margin: 20px 0;
}

button {
  padding: 10px 20px;
  border: none;
  border-radius: 4px;
  cursor: pointer;
  font-size: 16px;
  display: flex;
  align-items: center;
  gap: 8px;
  transition: all 0.2s;
}

button:not(.disabled):hover {
  transform: translateY(-2px);
}

button.recording {
  background-color: #ff4d4f;
  color: white;
}

button:not(.recording):not(.disabled) {
  background-color: #1890ff;
  color: white;
}

button.disabled {
  background-color: #ccc;
  color: #666;
  cursor: not-allowed;
}

.transcribe-btn {
  background-color: #52c41a !important;
}

.audio-preview {
  margin: 20px 0;
  padding: 15px;
  background-color: #f5f5f5;
  border-radius: 4px;
}

.transcription-result {
  margin: 20px 0;
  padding: 15px;
  background-color: #f0f7ff;
  border-radius: 4px;
  border-left: 4px solid #1890ff;
}

.error-message {
  color: #ff4d4f;
  margin: 10px 0;
  padding: 10px;
  background-color: #fff2f0;
  border-radius: 4px;
  display: flex;
  align-items: center;
  gap: 8px;
}
</style>
