<template>
  <div class="audio-recorder">
    <div class="record-button-container">
      <button
        :class="['record-button', { 'recording': message?.type === 'recording' }]"
        @click="toggleRecording"
        :disabled="!isSupported"
      >
        <span class="record-icon"></span>
      </button>
    </div>
    <p class="status-message" :class="getStatusClass">
      <span v-if="message?.type === 'info'" class="spinner"></span>
      {{ getStatusMessage }}
    </p>
  </div>
</template>

<script setup>
import { ref, onMounted, computed } from 'vue';

// 是否正在录音
const isRecording = ref(false);
// 状态信息 { type: 'info' | 'success' | 'error' | 'recording', message: string }
const message = ref(null);
// 浏览器是否支持录音功能
const isSupported = ref(false);
// 媒体录音器实例
const mediaRecorder = ref(null);
// 存储录音的音频数据块
const audioChunks = ref([]);
// speech-to-text 识别结果
const recognizedText = ref('');
// ai 回复结果
const aiResponse = ref('');

onMounted(async () => {
  try {
    await navigator.mediaDevices.getUserMedia({ audio: true });
    isSupported.value = true;
  } catch (err) {
    isSupported.value = false;
  }
});

// 点击录音按钮
const toggleRecording = () => {
  if (isRecording.value) {
    stopRecording();
  } else {
    startRecording();
  }
};

// 添加 getStatusClass 计算属性
const getStatusClass = computed(() => {
  if (!isSupported.value) return 'error';
  return message.value?.type || '';
});

// 删除这里重复的 getStatusMessage 定义
const getStatusMessage = computed(() => {
  if (!isSupported.value) return '您的浏览器不支持录音功能';  // 修改这行，添加 .value
  return message.value?.message || '';  // 修改这行，添加.value
});

// 修改 startRecording 函数
const startRecording = async () => {
  try {
    message.value = { type: 'recording', message: '正在录音...' };
    const stream = await navigator.mediaDevices.getUserMedia({
      audio: {
        sampleRate: 16000,
        channelCount: 1,
        echoCancellation: true,
        noiseSuppression: true
      }
    });
    
    mediaRecorder.value = new MediaRecorder(stream);
    audioChunks.value = [];

    mediaRecorder.value.ondataavailable = (event) => {
      audioChunks.value.push(event.data);
    };

    mediaRecorder.value.onstop = async () => {
      // 将音频数据转换为 WAV 格式
      const audioContext = new AudioContext();
      const audioBlob = new Blob(audioChunks.value);
      const arrayBuffer = await audioBlob.arrayBuffer();
      const audioBuffer = await audioContext.decodeAudioData(arrayBuffer);
      
      // 创建 WAV 文件
      const wavBuffer = audioBufferToWav(audioBuffer);
      const wavBlob = new Blob([wavBuffer], { type: 'audio/wav' });
      
      // 开始上传录音
      await uploadAudio(wavBlob);
    };

    mediaRecorder.value.start();
    isRecording.value = true;
  } catch (err) {
    console.error('录音失败:', err);
    message.value = { type: 'error', message: '录音失败' };
  }
};

// 添加 WAV 转换函数
function audioBufferToWav(buffer) {
  const numChannels = buffer.numberOfChannels;
  const sampleRate = buffer.sampleRate;
  const format = 1; // PCM
  const bitDepth = 16;
  
  const bytesPerSample = bitDepth / 8;
  const blockAlign = numChannels * bytesPerSample;
  
  const data = interleave(buffer);
  const dataBytes = data.length * bytesPerSample;
  const headerBytes = 44;
  const totalBytes = headerBytes + dataBytes;
  
  const wav = new ArrayBuffer(totalBytes);
  const view = new DataView(wav);
  
  // RIFF chunk descriptor
  writeString(view, 0, 'RIFF');
  view.setUint32(4, 36 + dataBytes, true);
  writeString(view, 8, 'WAVE');
  
  // fmt sub-chunk
  writeString(view, 12, 'fmt ');
  view.setUint32(16, 16, true);
  view.setUint16(20, format, true);
  view.setUint16(22, numChannels, true);
  view.setUint32(24, sampleRate, true);
  view.setUint32(28, sampleRate * blockAlign, true);
  view.setUint16(32, blockAlign, true);
  view.setUint16(34, bitDepth, true);
  
  // data sub-chunk
  writeString(view, 36, 'data');
  view.setUint32(40, dataBytes, true);
  
  floatTo16BitPCM(view, 44, data);
  
  return wav;
}

function interleave(buffer) {
  const numChannels = buffer.numberOfChannels;
  const length = buffer.length * numChannels;
  const result = new Float32Array(length);
  
  for (let channel = 0; channel < numChannels; channel++) {
    const channelData = buffer.getChannelData(channel);
    for (let i = 0; i < buffer.length; i++) {
      result[i * numChannels + channel] = channelData[i];
    }
  }
  
  return result;
}

function floatTo16BitPCM(view, offset, input) {
  for (let i = 0; i < input.length; i++, offset += 2) {
    const s = Math.max(-1, Math.min(1, input[i]));
    view.setInt16(offset, s < 0 ? s * 0x8000 : s * 0x7FFF, true);
  }
}

function writeString(view, offset, string) {
  for (let i = 0; i < string.length; i++) {
    view.setUint8(offset + i, string.charCodeAt(i));
  }
}

const stopRecording = () => {
  if (mediaRecorder.value && mediaRecorder.value.state === 'recording') {
    mediaRecorder.value.stop();
    mediaRecorder.value.stream.getTracks().forEach(track => track.stop());
    message.value = { type: 'info', message: '正在处理录音...' };
  }
};


const playAIResponse = async (text) => {
  try {
    // 在 playAIResponse 中
    message.value = { type: 'info', message: '播放AI回复...' };
    // message.value = { type: 'success', message: '' };
    // message.value = { type: 'error', message: `上传失败: ${error.message}` };
    const response = await fetch('http://localhost:5000/api/text-to-speech', {
      method: 'POST',
      headers: {
        'Content-Type': 'application/json'
      },
      body: JSON.stringify({ text })
    });

    if (!response.ok) {
      throw new Error('语音合成失败');
    }

    // 清除状态信息
    message.value = { type: 'success', message: '' };
    // 获取音频数据并播放
    const blob = await response.blob();
    const audioUrl = URL.createObjectURL(blob);
    const audio = new Audio(audioUrl);
    await audio.play();

    // 清理资源
    audio.onended = () => {
      URL.revokeObjectURL(audioUrl);
    };
  } catch (error) {
    console.error('播放语音失败:', error);
    message.value = { type: 'error', message: '播放语音失败' };
  } finally {
    // 重置状态，准备下一轮录音
    isRecording.value = false;
    audioChunks.value = [];
    // 清理提示信息
    message.value = null;
  }
};

// 修改 uploadAudio 函数中 AI 回复的部分
// 添加 emit 定义
const emit = defineEmits(['update:recognizedText', 'update:aiResponse'])

const uploadAudio = async (audioBlob) => {
  const formData = new FormData();
  formData.append('audio', audioBlob, 'recording.wav');
  message.value = { type: 'info', message: '正在识别语音...' };
  try {
    const response = await fetch('http://localhost:5000/api/speech-to-text', {
      method: 'POST',
      body: formData
    });

    const result = await response.json();
    
    if (response.ok) {
      recognizedText.value = result.text || '';
      // 发射事件，添加角色信息
      emit('update:recognizedText', {
        role: 'human',
        content: recognizedText.value
      })
      
      // AI 对话部分
      if (recognizedText.value) {
        message.value = { type: 'info', message: 'AI思考中...' };
        try {
          const chatResponse = await fetch('http://localhost:5000/api/chat', {
            method: 'POST',
            headers: {
              'Content-Type': 'application/json'
            },
            body: JSON.stringify({
              message: recognizedText.value
            })
          });
          
          const chatResult = await chatResponse.json();
          if (chatResponse.ok) {
            aiResponse.value = chatResult.reply;
            // 发射事件，添加角色信息
            emit('update:aiResponse', {
              role: 'ai',
              content: chatResult.reply
            })
            // 播放语音
            await playAIResponse(chatResult.reply);
          } else {
            throw new Error(chatResult.error || 'AI 回复失败');
          }
        } catch (error) {
          console.error('AI 对话错误:', error);
        } finally {
        }
      }
    } else {
      throw new Error(result.error || '上传失败');
    }
  } catch (error) {
    console.error('上传错误:', error);
    recognizedText.value = '';
    aiResponse.value = '';
    message.value = { type: 'error', message: '上传失败' };
  } finally {
    // 重置录音状态
    isRecording.value = false;
    audioChunks.value = [];
  }
};



</script>

<style scoped>
.audio-recorder {
  display: flex;
  flex-direction: column;
  align-items: center;
  gap: 1rem;
}

.record-button {
  position: relative;
  width: 54px; /* 将 80px 缩小为 54px */
  height: 54px; /* 将 80px 缩小为 54px */
  border-radius: 50%;
  background-color: #fff;
  border: 2px solid #ff4444;
  cursor: pointer;
  transition: all 0.3s ease;
}

.record-icon {
  position: absolute;
  top: 50%;
  left: 50%;
  transform: translate(-50%, -50%);
  width: 27px; /* 将 40px 缩小为 27px */
  height: 27px; /* 将 40px 缩小为 27px */
  background-color: #ff4444;
  border-radius: 50%;
  transition: all 0.3s ease;
}

.record-button.recording .record-icon {
  width: 13px; /* 将 20px 缩小为 13px */
  height: 13px; /* 将 20px 缩小为 13px */
  animation: pulse 1.5s infinite;
}

.record-button:hover {
  transform: scale(1.05);
}

.record-button:disabled {
  opacity: 0.5;
  cursor: not-allowed;
}

.record-icon {
  position: absolute;
  top: 50%;
  left: 50%;
  transform: translate(-50%, -50%);
  width: 40px;
  height: 40px;
  background-color: #ff4444;
  border-radius: 50%;
  transition: all 0.3s ease;
}

.record-button.recording .record-icon {
  width: 20px;
  height: 20px;
  animation: pulse 1.5s infinite;
}

@keyframes pulse {
  0% {
    transform: translate(-50%, -50%) scale(1);
    opacity: 1;
  }
  50% {
    transform: translate(-50%, -50%) scale(1.2);
    opacity: 0.5;
  }
  100% {
    transform: translate(-50%, -50%) scale(1);
    opacity: 1;
  }
}

.error-message {
  color: #ff4444;
  margin: 0;
  font-size: 0.9rem;
}

.upload-status {
  margin: 0;
  font-size: 0.9rem;
}

.upload-status.success {
  color: #4caf50;
}

.upload-status.error {
  color: #ff4444;
}

.upload-status.info {
  color: #2196f3;
}

.recognized-text {
  margin: 1rem 0;
  padding: 1rem;
  background-color: #f5f5f5;
  border-radius: 8px;
  width: 100%;
  max-width: 400px;
  text-align: center;
  color: #333;
  font-size: 1rem;
  line-height: 1.5;
}

.user-text, .ai-text {
  display: block;
  margin-bottom: 0.5rem;
}

.ai-response {
  margin: 1rem 0;
  padding: 1rem;
  background-color: #e3f2fd;
  border-radius: 8px;
  width: 100%;
  max-width: 400px;
  text-align: left;
  color: #333;
  font-size: 1rem;
  line-height: 1.5;
}

.loading-spinner {
  display: flex;
  flex-direction: column;
  align-items: center;
  gap: 1rem;
  margin: 1rem 0;
}

.spinner {
  width: 40px;
  height: 40px;
  border: 4px solid #f3f3f3;
  border-top: 4px solid #3498db;
  border-radius: 50%;
  animation: spin 1s linear infinite;
}

@keyframes spin {
  0% { transform: rotate(0deg); }
  100% { transform: rotate(360deg); }
}

.status-message {
  margin: 0 0;
  font-size: 0.9rem;
  text-align: center;
  min-height: 1.2em;
  display: flex;
  align-items: center;
  justify-content: center;
  gap: 0.5rem;
}

.status-message.error {
  color: #ff4444;
}

.status-message.success {
  color: #4caf50;
}

.status-message.recording {
  color: #4caf50;
}

.status-message.info {
  color: #2196f3;
}

.status-message.loading {
  color: #3498db;
}

.status-message .spinner {
  width: 16px;
  height: 16px;
  border: 2px solid #f3f3f3;
  border-top: 2px solid #3498db;
  border-radius: 50%;
  animation: spin 1s linear infinite;
  display: inline-block;
  vertical-align: middle;
  margin-right: 0.5rem;
}

/* 可以删除原来的 loading-spinner 相关样式 */
</style>