<template>
  <div class="voice-input-wrapper">
    <div class="voice-controls">
      <button 
        class="voice-btn" 
        :class="{ 
          'recording': isRecording, 
          'listening': isListening,
          'disabled': !isSupported 
        }"
        @click="toggleVoice"
        :disabled="!isSupported"
      >
        <i v-if="!isRecording && !isListening" class="icon-mic">🎤</i>
        <i v-else-if="isListening" class="icon-listening">👂</i>
        <i v-else class="icon-recording">🔴</i>
      </button>
      
      <div class="voice-status-panel">
        <div class="status-text" :class="statusClass">
          {{ statusText }}
        </div>
        <div v-if="volumeLevel > 0" class="volume-indicator">
          <div class="volume-bar" :style="{ width: volumeLevel + '%' }"></div>
        </div>
      </div>
    </div>

    <div v-if="transcript" class="transcript-panel">
      <div class="transcript-header">
        <span class="transcript-title">识别结果</span>
        <button class="clear-btn" @click="clearTranscript">清除</button>
      </div>
      <div class="transcript-content">{{ transcript }}</div>
    </div>

    <div v-if="error" class="error-panel">
      <i class="error-icon">⚠️</i>
      <span class="error-text">{{ error }}</span>
    </div>
  </div>
</template>

<script>
export default {
  name: 'BrowserVoiceInput',
  props: {
    language: {
      type: String,
      default: 'zh-CN'
    },
    continuous: {
      type: Boolean,
      default: true
    },
    interimResults: {
      type: Boolean,
      default: true
    }
  },
  data() {
    return {
      isSupported: false,
      isListening: false,
      isRecording: false,
      transcript: '',
      error: '',
      recognition: null,
      mediaRecorder: null,
      audioChunks: [],
      volumeLevel: 0,
      volumeInterval: null
    }
  },
  computed: {
    statusText() {
      if (!this.isSupported) return '浏览器不支持语音识别'
      if (this.isListening) return '正在监听...'
      if (this.isRecording) return '正在录音...'
      return '点击开始语音输入'
    },
    statusClass() {
      if (!this.isSupported) return 'disabled'
      if (this.isListening) return 'listening'
      if (this.isRecording) return 'recording'
      return 'ready'
    }
  },
  mounted() {
    this.initSpeechRecognition()
  },
  beforeUnmount() {
    this.cleanup()
  },
  methods: {
    initSpeechRecognition() {
      // 检查浏览器支持
      const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition
      if (!SpeechRecognition) {
        this.error = '当前浏览器不支持语音识别功能'
        return
      }

      this.isSupported = true
      this.recognition = new SpeechRecognition()
      
      // 配置语音识别
      this.recognition.continuous = this.continuous
      this.recognition.interimResults = this.interimResults
      this.recognition.lang = this.language

      // 事件监听
      this.recognition.onstart = () => {
        this.isListening = true
        this.error = ''
        this.$emit('start')
      }

      this.recognition.onresult = (event) => {
        let finalTranscript = ''
        let interimTranscript = ''

        for (let i = event.resultIndex; i < event.results.length; i++) {
          const transcript = event.results[i][0].transcript
          if (event.results[i].isFinal) {
            finalTranscript += transcript
          } else {
            interimTranscript += transcript
          }
        }

        this.transcript = finalTranscript || interimTranscript
        this.$emit('transcript', this.transcript)
        
        if (finalTranscript) {
          this.$emit('final-result', finalTranscript)
        }
      }

      this.recognition.onerror = (event) => {
        this.error = this.getErrorMessage(event.error)
        this.isListening = false
        this.$emit('error', event.error)
      }

      this.recognition.onend = () => {
        this.isListening = false
        this.$emit('end')
      }
    },

    async toggleVoice() {
      if (!this.isSupported) return

      if (this.isListening) {
        this.stopListening()
      } else {
        await this.startListening()
      }
    },

    async startListening() {
      try {
        // 请求麦克风权限
        await navigator.mediaDevices.getUserMedia({ audio: true })
        
        this.error = ''
        this.recognition.start()
        
        // 开始音量检测
        this.startVolumeDetection()
      } catch (err) {
        this.error = '无法访问麦克风，请检查权限设置'
        console.error('Microphone access denied:', err)
      }
    },

    stopListening() {
      if (this.recognition && this.isListening) {
        this.recognition.stop()
      }
      this.stopVolumeDetection()
    },

    async startVolumeDetection() {
      try {
        const stream = await navigator.mediaDevices.getUserMedia({ audio: true })
        const audioContext = new (window.AudioContext || window.webkitAudioContext)()
        const analyser = audioContext.createAnalyser()
        const microphone = audioContext.createMediaStreamSource(stream)
        
        analyser.fftSize = 256
        microphone.connect(analyser)
        
        const dataArray = new Uint8Array(analyser.frequencyBinCount)
        
        this.volumeInterval = setInterval(() => {
          analyser.getByteFrequencyData(dataArray)
          const average = dataArray.reduce((a, b) => a + b) / dataArray.length
          this.volumeLevel = Math.min(100, (average / 128) * 100)
        }, 100)
      } catch (err) {
        console.error('Volume detection failed:', err)
      }
    },

    stopVolumeDetection() {
      if (this.volumeInterval) {
        clearInterval(this.volumeInterval)
        this.volumeInterval = null
        this.volumeLevel = 0
      }
    },

    clearTranscript() {
      this.transcript = ''
      this.$emit('clear')
    },

    getErrorMessage(error) {
      const errorMessages = {
        'network': '网络错误，请检查网络连接',
        'not-allowed': '麦克风权限被拒绝',
        'no-speech': '未检测到语音输入',
        'audio-capture': '音频捕获失败',
        'service-not-allowed': '语音识别服务不可用'
      }
      return errorMessages[error] || `语音识别错误: ${error}`
    },

    cleanup() {
      this.stopListening()
      this.stopVolumeDetection()
    }
  }
}
</script>

<style scoped>
.voice-input-wrapper {
  width: 100%;
  max-width: 500px;
  margin: 0 auto;
}

.voice-controls {
  display: flex;
  align-items: center;
  gap: 16px;
  padding: 20px;
  background: linear-gradient(135deg, #f8fafc 0%, #e2e8f0 100%);
  border-radius: 16px;
  box-shadow: 0 4px 20px rgba(0, 0, 0, 0.08);
  border: 1px solid rgba(255, 255, 255, 0.3);
  backdrop-filter: blur(10px);
  transition: all 0.3s ease;
}

.voice-controls:hover {
  transform: translateY(-2px);
  box-shadow: 0 8px 30px rgba(0, 0, 0, 0.12);
}

.voice-btn {
  width: 60px;
  height: 60px;
  border-radius: 50%;
  border: none;
  background: linear-gradient(135deg, #409eff 0%, #67c23a 100%);
  color: white;
  display: flex;
  align-items: center;
  justify-content: center;
  cursor: pointer;
  transition: all 0.3s ease;
  box-shadow: 0 4px 12px rgba(64, 158, 255, 0.3);
  font-size: 24px;
}

.voice-btn:hover:not(.disabled) {
  transform: scale(1.05);
  box-shadow: 0 6px 20px rgba(64, 158, 255, 0.4);
}

.voice-btn:active:not(.disabled) {
  transform: scale(0.95);
}

.voice-btn.listening {
  background: linear-gradient(135deg, #67c23a 0%, #85ce61 100%);
  animation: listening-pulse 2s infinite;
}

.voice-btn.recording {
  background: linear-gradient(135deg, #f56c6c 0%, #e6a23c 100%);
  animation: recording-pulse 1.5s infinite;
}

.voice-btn.disabled {
  background: #c0c4cc;
  cursor: not-allowed;
  transform: none;
}

@keyframes listening-pulse {
  0%, 100% {
    box-shadow: 0 0 0 0 rgba(103, 194, 58, 0.7);
  }
  50% {
    box-shadow: 0 0 0 15px rgba(103, 194, 58, 0);
  }
}

@keyframes recording-pulse {
  0%, 100% {
    box-shadow: 0 0 0 0 rgba(245, 108, 108, 0.7);
  }
  50% {
    box-shadow: 0 0 0 10px rgba(245, 108, 108, 0);
  }
}

.voice-status-panel {
  flex: 1;
}

.status-text {
  font-size: 16px;
  font-weight: 500;
  margin-bottom: 8px;
  transition: color 0.3s ease;
}

.status-text.ready {
  color: #606266;
}

.status-text.listening {
  color: #67c23a;
}

.status-text.recording {
  color: #f56c6c;
}

.status-text.disabled {
  color: #c0c4cc;
}

.volume-indicator {
  width: 100%;
  height: 4px;
  background: rgba(0, 0, 0, 0.1);
  border-radius: 2px;
  overflow: hidden;
}

.volume-bar {
  height: 100%;
  background: linear-gradient(90deg, #67c23a 0%, #409eff 50%, #f56c6c 100%);
  border-radius: 2px;
  transition: width 0.1s ease;
}

.transcript-panel {
  margin-top: 16px;
  padding: 16px;
  background: white;
  border-radius: 12px;
  box-shadow: 0 2px 12px rgba(0, 0, 0, 0.1);
  border: 1px solid #e4e7ed;
}

.transcript-header {
  display: flex;
  justify-content: space-between;
  align-items: center;
  margin-bottom: 12px;
}

.transcript-title {
  font-size: 14px;
  font-weight: 600;
  color: #303133;
}

.clear-btn {
  padding: 4px 12px;
  background: #f56c6c;
  color: white;
  border: none;
  border-radius: 6px;
  font-size: 12px;
  cursor: pointer;
  transition: all 0.3s ease;
}

.clear-btn:hover {
  background: #f78989;
}

.transcript-content {
  font-size: 16px;
  line-height: 1.6;
  color: #303133;
  min-height: 24px;
  padding: 8px;
  background: #f8f9fa;
  border-radius: 8px;
  border-left: 3px solid #409eff;
}

.error-panel {
  margin-top: 16px;
  padding: 12px 16px;
  background: #fef0f0;
  border: 1px solid #fbc4c4;
  border-radius: 8px;
  display: flex;
  align-items: center;
  gap: 8px;
}

.error-icon {
  font-size: 18px;
}

.error-text {
  color: #f56c6c;
  font-size: 14px;
}

/* 响应式设计 */
@media (max-width: 768px) {
  .voice-controls {
    padding: 16px;
    gap: 12px;
  }
  
  .voice-btn {
    width: 50px;
    height: 50px;
    font-size: 20px;
  }
  
  .status-text {
    font-size: 14px;
  }
}
</style>