<template>
  <div class="voice-recorder">
    <a-card title="语音录制" class="recorder-card">
      <!-- 录音控制区域 -->
      <div class="recorder-controls">
        <a-button
          :type="isRecording ? 'danger' : 'primary'"
          :loading="isConnecting"
          size="large"
          shape="circle"
          class="record-button"
          @click="toggleRecording"
        >
          <template #icon>
            <Mic v-if="!isRecording" :size="24" />
            <Square v-else :size="24" />
          </template>
        </a-button>
        
        <div class="status-text">
          <span v-if="isConnecting" class="status connecting">处理中...</span>
          <span v-else-if="isRecording" class="status recording">正在录音</span>
          <span v-else class="status idle">点击开始录音</span>
        </div>
      </div>

      <!-- 音频可视化区域 -->
      <div class="audio-visualizer" v-if="isRecording">
        <div class="wave-container">
          <div 
            v-for="i in 20" 
            :key="i" 
            class="wave-bar"
            :style="{ animationDelay: `${i * 0.1}s` }"
          ></div>
        </div>
      </div>

      <!-- 录音时长显示 -->
      <div class="recording-info" v-if="isRecording">
        <a-statistic 
          title="录音时长" 
          :value="recordingDuration" 
          suffix="秒"
          :precision="1"
        />
      </div>

      <!-- 转录结果显示 -->
      <div class="transcription-result" v-if="transcriptionText">
        <a-divider>转录结果</a-divider>
        <a-typography-paragraph 
          :copyable="{ text: transcriptionText }"
          class="result-text"
        >
          {{ transcriptionText }}
        </a-typography-paragraph>
      </div>

      <!-- 错误信息显示 -->
      <a-alert
        v-if="errorMessage"
        :message="errorMessage"
        type="error"
        show-icon
        closable
        @close="errorMessage = ''"
        class="error-alert"
      />
    </a-card>
  </div>
</template>

<script setup lang="ts">
import { ref, onMounted, onUnmounted, computed } from 'vue'
import { message } from 'ant-design-vue'
import { Mic, Square } from 'lucide-vue-next'
import { Codewin } from '../../api/generated/Codewin'
import { useAuthStore } from '@/stores'

interface Props {
  maxDuration?: number // 最大录音时长（秒）
  language?: string // 语音识别语言，默认为zh
}

interface Emits {
  transcriptionUpdate: [text: string, audioUrl: string]
  recordingStart: []
  recordingStop: []
  error: [error: string]
}

const props = withDefaults(defineProps<Props>(), {
  maxDuration: 300, // 默认5分钟
  language: 'zh' // 默认中文
})

const emit = defineEmits<Emits>()

// 录音状态
const isRecording = ref(false)
const isConnecting = ref(false) // 现在表示处理状态（上传+转录）
const recordingDuration = ref(0)
const transcriptionText = ref('')
const errorMessage = ref('')

// 媒体相关
let mediaRecorder: MediaRecorder | null = null
let audioStream: MediaStream | null = null
let recordingTimer: number | null = null
let audioChunks: Blob[] = []

// 检查浏览器支持
const isSupported = computed(() => {
  // 检查浏览器是否支持所需的录音API
  if (!(
    navigator.mediaDevices && 
    typeof navigator.mediaDevices.getUserMedia === 'function' &&
    typeof MediaRecorder === 'function'
  )) {
    return false
  }
  
  // 优先检查MP3支持，其次WebM
  return MediaRecorder.isTypeSupported('audio/m p') ||
         MediaRecorder.isTypeSupported('audio/mpeg') ||
         MediaRecorder.isTypeSupported('audio/webm;codecs=opus')
})

// 初始化录音
const initializeRecording = async () => {
  try {
    if (!isSupported.value) {
      throw new Error('您的浏览器不支持录音功能')
    }

    // 获取音频流
    audioStream = await navigator.mediaDevices.getUserMedia({
      audio: {
        echoCancellation: true,
        noiseSuppression: true,
        sampleRate: 16000
      }
    })

    // 创建MediaRecorder - 优先使用MP3格式
    let mimeType = 'audio/webm;codecs=opus' // 默认格式
    
    // 检查浏览器支持的音频格式
    console.log('浏览器音频格式支持情况:')
    console.log('audio/mp3:', MediaRecorder.isTypeSupported('audio/mp3'))
    console.log('audio/mpeg:', MediaRecorder.isTypeSupported('audio/mpeg'))
    console.log('audio/webm;codecs=opus:', MediaRecorder.isTypeSupported('audio/webm;codecs=opus'))
    
    if (MediaRecorder.isTypeSupported('audio/mp3')) {
      mimeType = 'audio/mp3'
      console.log('使用 mp3 格式录音')
    } else if (MediaRecorder.isTypeSupported('audio/mpeg')) {
      mimeType = 'audio/mpeg'
      console.log('使用 MPEG 格式录音')
    } else {
      console.log('使用 WebM 格式录音（默认）')
    }
    
    mediaRecorder = new MediaRecorder(audioStream, {
      mimeType: mimeType
    })

    // 监听录音数据
    mediaRecorder.ondataavailable = (event) => {
      if (event.data.size > 0) {
        // 收集音频数据块
        audioChunks.push(event.data)
      }
    }

    mediaRecorder.onstart = () => {
      emit('recordingStart')
      startRecordingTimer()
    }

    mediaRecorder.onstop = () => {
      emit('recordingStop')
      stopRecordingTimer()
    }

    return true
  } catch (error) {
    let errorMsg = '录音初始化失败'
    
    if (error instanceof Error) {
      if (error.name === 'NotAllowedError' || error.message.includes('Permission denied')) {
        errorMsg = '麦克风权限被拒绝，请在浏览器设置中允许访问麦克风'
      } else if (error.name === 'NotFoundError') {
        errorMsg = '未找到可用的麦克风设备'
      } else if (error.name === 'NotSupportedError') {
        errorMsg = '您的浏览器不支持录音功能'
      } else {
        errorMsg = error.message
      }
    }
    
    handleError(errorMsg)
    return false
  }
}

// 获取认证store和API实例
const authStore = useAuthStore()
const api = new Codewin({
  baseURL: import.meta.env.VITE_API_BASE_URL || '/api',
  securityWorker: () => ({
    headers: {
      Authorization: `Bearer ${authStore.token}`
    }
  })
})

// 处理录音完成后的上传和转录
const processRecording = async () => {
  try {
    isConnecting.value = true
    
    // 创建音频文件 - 根据录音格式确定文件类型
    let mimeType = 'audio/webm;codecs=opus'
    let fileExtension = 'webm'
    
    if (mediaRecorder && mediaRecorder.mimeType) {
      mimeType = mediaRecorder.mimeType
      if (mimeType.includes('mp3')) {
        fileExtension = 'mp3'
      } else if (mimeType.includes('mpeg')) {
        fileExtension = 'mp3'
      }
    }
    
    const audioBlob = new Blob(audioChunks, { type: mimeType })
    const audioFile = new File([audioBlob], `recording_${Date.now()}.${fileExtension}`, {
      type: mimeType
    })
    
    console.log('准备上传文件:', {
      name: audioFile.name,
      size: audioFile.size,
      type: audioFile.type,
      extension: fileExtension,
      actualMimeType: mimeType
    })
    
    // 上传文件
    const uploadResult = await api.uploadFile({ file: audioFile })
    console.log('上传结果:', uploadResult)
    console.log('上传结果数据:', uploadResult.data)
    
    if (!uploadResult.data || uploadResult.data.code != 0) {
      throw new Error(`文件上传失败: ${uploadResult.data?.message || uploadResult.data?.data || '未知错误'}`)
    }
    
    const fileUrl = uploadResult.data.data
    if (!fileUrl) {
      throw new Error('未获取到文件URL')
    }
    
    // 调用语音转文本接口
    console.log('调用语音转文本接口，参数:', {
      url: fileUrl,
      language: props.language
    })
    
    const transcribeResult = await api.transcribe({
      url: fileUrl,
      language: props.language
    })
    
    console.log('语音转文本结果:', transcribeResult)
    console.log('语音转文本结果数据:', transcribeResult.data)
    
    if (!transcribeResult.data || transcribeResult.data.code != 0) {
      throw new Error(`语音转文本失败: ${transcribeResult.data?.message || transcribeResult.data?.data || '未知错误'}`)
    }
    
    const transcribedText = transcribeResult.data.data
    if (transcribedText) {
      transcriptionText.value = transcribedText
      emit('transcriptionUpdate', transcribedText, fileUrl)
      message.success('语音转文本完成')
    }
    
  } catch (error) {
    const errorMsg = error instanceof Error ? error.message : '处理录音失败'
    handleError(errorMsg)
  } finally {
    isConnecting.value = false
  }
}

// 开始录音
const startRecording = async () => {
  try {
    // 初始化录音设备
    const recordingReady = await initializeRecording()
    if (!recordingReady) return

    // 开始录音
    if (mediaRecorder && mediaRecorder.state === 'inactive') {
      audioChunks = [] // 清空之前的录音数据
      mediaRecorder.start(1000) // 每秒收集一次数据
      isRecording.value = true
      transcriptionText.value = ''
      errorMessage.value = ''
    }
  } catch (error) {
    handleError('开始录音失败')
  }
}

// 停止录音
const stopRecording = async () => {
  if (mediaRecorder && mediaRecorder.state === 'recording') {
    mediaRecorder.stop()
  }
  
  if (audioStream) {
    audioStream.getTracks().forEach(track => track.stop())
    audioStream = null
  }
  
  isRecording.value = false
  isConnecting.value = false
  
  // 处理录音完成后的上传和转录
  if (audioChunks.length > 0) {
    await processRecording()
  }
}

// 切换录音状态
const toggleRecording = () => {
  if (isRecording.value) {
    stopRecording()
  } else {
    startRecording()
  }
}

// 录音计时器
const startRecordingTimer = () => {
  recordingDuration.value = 0
  recordingTimer = window.setInterval(() => {
    recordingDuration.value += 0.1
    
    // 检查是否超过最大录音时长
    if (recordingDuration.value >= props.maxDuration) {
      stopRecording()
      message.warning(`录音已达到最大时长 ${props.maxDuration} 秒`)
    }
  }, 100)
}

const stopRecordingTimer = () => {
  if (recordingTimer) {
    clearInterval(recordingTimer)
    recordingTimer = null
  }
}

// 错误处理
const handleError = (error: string) => {
  errorMessage.value = error
  emit('error', error)
  message.error(error)
  
  // 停止录音
  if (isRecording.value) {
    stopRecording()
  }
}

// 组件挂载时的初始化
onMounted(() => {
  // 初始化完成
})

// 组件卸载时清理资源
onUnmounted(() => {
  stopRecording()
  if (recordingTimer) {
    clearInterval(recordingTimer)
  }
})

// 暴露方法给父组件
defineExpose({
  startRecording,
  stopRecording,
  toggleRecording,
  isRecording: computed(() => isRecording.value),
  transcriptionText: computed(() => transcriptionText.value)
})
</script>

<style scoped>
.voice-recorder {
  width: 100%;
  max-width: 500px;
  margin: 0 auto;
}

.recorder-card {
  text-align: center;
}

.recorder-controls {
  display: flex;
  flex-direction: column;
  align-items: center;
  gap: 1rem;
  margin-bottom: 2rem;
}

.record-button {
  display: flex;
  align-items: center;
  justify-content: center;
  transition: all 0.3s ease;
  border: none !important;
  box-shadow: 0 8px 24px rgba(96, 165, 250, 0.3);
}

.record-button:not(.ant-btn-dangerous) {
  background: linear-gradient(135deg, #60a5fa 0%, #3b82f6 100%) !important;
  border-color: transparent !important;
}

.record-button.ant-btn-dangerous {
  background: linear-gradient(135deg, #f87171 0%, #ef4444 100%) !important;
  border-color: transparent !important;
  box-shadow: 0 8px 24px rgba(239, 68, 68, 0.3);
}

.record-button:hover {
  transform: scale(1.08);
  box-shadow: 0 12px 32px rgba(96, 165, 250, 0.4);
}

.record-button.ant-btn-dangerous:hover {
  box-shadow: 0 12px 32px rgba(239, 68, 68, 0.4);
}

.status-text {
  font-size: 1.1rem;
  font-weight: 500;
}

.status.connecting {
  color: #faad14;
}

.status.recording {
  color: #f5222d;
  animation: pulse 1.5s infinite;
}

.status.idle {
  color: #60a5fa;
}

@keyframes pulse {
  0%, 100% { opacity: 1; }
  50% { opacity: 0.5; }
}

.audio-visualizer {
  margin: 2rem 0;
  padding: 1rem;
  background: linear-gradient(135deg, #f0f4ff 0%, #e0e7ff 100%);
  border-radius: 12px;
}

.wave-container {
  display: flex;
  justify-content: center;
  align-items: center;
  gap: 3px;
  height: 60px;
}

.wave-bar {
  width: 4px;
  background: linear-gradient(to top, #60a5fa, #3b82f6);
  border-radius: 2px;
  animation: wave 1.5s ease-in-out infinite;
}

@keyframes wave {
  0%, 100% { height: 10px; }
  50% { height: 40px; }
}

.recording-info {
  margin: 1.5rem 0;
  padding: 1rem;
  background: #f8fafc;
  border-radius: 8px;
}

.transcription-result {
  margin-top: 2rem;
  text-align: left;
}

.result-text {
  background: #f8fafc;
  padding: 1rem;
  border-radius: 8px;
  border-left: 4px solid #60a5fa;
  margin: 0;
}

.error-alert {
  margin-top: 1rem;
}

/* 响应式设计 */
@media (max-width: 768px) {
  .voice-recorder {
    max-width: 100%;
    padding: 0 1rem;
  }
  
  .record-button {
    width: 85px;
    height: 85px;
  }
  
  .wave-container {
    height: 50px;
  }
}
</style>