<template>
  <div class="audio-editor">
    <el-card class="editor-card" shadow="hover">
      <!-- 文件处理区域 -->
      <div v-if="audioFile" class="audio-section">
        <div class="file-info">
          <el-tag type="success" size="large">
            <el-icon><document /></el-icon>
            {{ audioFile.name }}
          </el-tag>
        </div>

        <!-- 控制面板 -->
        <el-row :gutter="20" class="control-panel">
          <el-col :span="12">
            <el-card shadow="never">
              <h3>静音检测设置</h3>
              <el-form label-width="120px">
                <el-form-item label="静音阈值:">
                  <el-slider 
                    v-model="silenceThreshold" 
                    :min="-60" 
                    :max="-10" 
                    :step="1"
                    show-input
                  />
                  <small>{{ silenceThreshold }} dB</small>
                </el-form-item>
                <el-form-item label="最小静音时长:">
                  <el-input-number 
                    v-model="minSilenceDuration" 
                    :min="0.1" 
                    :max="5" 
                    :step="0.1"
                    :precision="1"
                  />
                  <small>秒</small>
                </el-form-item>
                <el-form-item label="分割缓冲时间:">
                  <el-input-number 
                    v-model="bufferTime" 
                    :min="0" 
                    :max="1" 
                    :step="0.05"
                    :precision="2"
                  />
                  <small>秒</small>
                </el-form-item>
              </el-form>
            </el-card>
          </el-col>
          <el-col :span="12">
            <el-card shadow="never">
              <h3>处理状态</h3>
              <div class="status-info">
                <p>音频时长: {{ formatTime(audioDuration) }}</p>
                <p>检测到的静音段: {{ silenceSegments.length }} 个</p>
                <p>可提取的音频段: {{ audioSegments.length }} 个</p>
              </div>
            </el-card>
          </el-col>
        </el-row>

        <!-- 操作按钮 -->
        <div class="action-buttons">
          <el-button 
            type="primary" 
            @click="analyzeAudio" 
            :loading="isAnalyzing"
            size="large"
          >
            <el-icon><search /></el-icon>
            {{ isAnalyzing ? '分析中...' : '分析音频' }}
          </el-button>
        </div>

        <!-- 波形显示区域 -->
        <div v-if="waveformData" class="waveform-section">
          <h3>音频波形</h3>
          <canvas 
            ref="waveformCanvas" 
            class="waveform-canvas"
          ></canvas>
          
          <!-- 音频播放器 -->
          <div class="audio-player">
            <audio 
              ref="audioPlayer" 
              :src="audioUrl" 
              @loadedmetadata="onAudioLoaded"
              @timeupdate="onTimeUpdate"
              controls
            ></audio>
          </div>
        </div>

        <!-- 分段列表 -->
        <div v-if="audioSegments.length > 0" class="segments-section">
          <h3>音频分段 ({{ audioSegments.length }} 个)</h3>
          <el-table :data="audioSegments" stripe>
            <el-table-column prop="index" label="序号" width="80" />
            <el-table-column prop="startTime" label="开始时间" width="120">
              <template #default="scope">
                {{ formatTime(scope.row.startTime) }}
              </template>
            </el-table-column>
            <el-table-column prop="endTime" label="结束时间" width="120">
              <template #default="scope">
                {{ formatTime(scope.row.endTime) }}
              </template>
            </el-table-column>
            <el-table-column prop="duration" label="时长" width="100">
              <template #default="scope">
                {{ formatTime(scope.row.duration) }}
              </template>
            </el-table-column>
            <el-table-column label="操作">
              <template #default="scope">
                <el-button 
                  size="small" 
                  @click="playSegment(scope.row)"
                  type="primary"
                  plain
                >
                  <el-icon><video-play /></el-icon>
                  播放
                </el-button>
              </template>
            </el-table-column>
          </el-table>
        </div>
      </div>
    </el-card>
  </div>
</template>

<script setup>
import { ref, watch, nextTick, onMounted } from 'vue'
import { ElMessage } from 'element-plus'
import { Document, Search, VideoPlay } from '@element-plus/icons-vue'

// Props
const props = defineProps({
  initialAudioFile: {
    type: File,
    default: null
  }
})

// Emits
const emit = defineEmits(['segments-updated'])

// 响应式数据
const audioFile = ref(null)
const audioUrl = ref('')
const audioBuffer = ref(null)
const audioDuration = ref(0)
const audioContext = ref(null)
const waveformData = ref(null)
const waveformCanvas = ref(null)
const audioPlayer = ref(null)

// 分析参数
const silenceThreshold = ref(-30) // dB
const minSilenceDuration = ref(1.0) // 秒
const bufferTime = ref(0.2) // 缓冲时间（秒）

// 分析结果
const silenceSegments = ref([])
const audioSegments = ref([])
const isAnalyzing = ref(false)

// 监听初始音频文件
watch(() => props.initialAudioFile, (newFile) => {
  if (newFile) {
    audioFile.value = newFile
    audioUrl.value = URL.createObjectURL(newFile)
    
    // 重置状态
    silenceSegments.value = []
    audioSegments.value = []
    waveformData.value = null
    
    // 自动分析音频
    nextTick(() => {
      analyzeAudio()
    })
  }
}, { immediate: true })

// 监听分段结果变化
watch(audioSegments, (newSegments) => {
  // 为每个分段生成音频Blob
  Promise.all(newSegments.map(async (segment, index) => {
    const segmentBuffer = await extractAudioSegment(segment)
    const blob = await audioBufferToBlob(segmentBuffer)
    return {
      ...segment,
      blob: blob
    }
  })).then(segmentsWithBlobs => {
    emit('segments-updated', segmentsWithBlobs)
  })
}, { deep: true })

// 初始化音频上下文
const initAudioContext = () => {
  if (!audioContext.value) {
    audioContext.value = new (window.AudioContext || window.webkitAudioContext)()
  }
}

// 音频加载完成
const onAudioLoaded = () => {
  audioDuration.value = audioPlayer.value.duration
}

// 时间更新
const onTimeUpdate = () => {
  if (waveformCanvas.value) {
    drawWaveform()
  }
}

// 分析音频
const analyzeAudio = async () => {
  if (!audioFile.value) return
  
  isAnalyzing.value = true
  
  try {
    initAudioContext()
    
    // 读取音频文件
    const arrayBuffer = await audioFile.value.arrayBuffer()
    audioBuffer.value = await audioContext.value.decodeAudioData(arrayBuffer)
    
    // 生成波形数据
    generateWaveformData()
    
    // 检测静音段
    detectSilenceSegments()
    
    // 生成音频段
    generateAudioSegments()
    
    // 绘制波形
    await nextTick()
    drawWaveform()
    
    ElMessage.success(`分析完成：检测到 ${silenceSegments.value.length} 个静音段，可提取 ${audioSegments.value.length} 个音频段`)
    
  } catch (error) {
    console.error('音频分析失败:', error)
    ElMessage.error('音频分析失败，请检查文件格式')
  } finally {
    isAnalyzing.value = false
  }
}

// 生成波形数据
const generateWaveformData = () => {
  const channelData = audioBuffer.value.getChannelData(0)
  const duration = audioBuffer.value.duration
  
  // 降采样到合适的分辨率
  const targetSamples = 2000
  const step = Math.floor(channelData.length / targetSamples)
  
  const waveform = []
  for (let i = 0; i < channelData.length; i += step) {
    let sum = 0
    let count = 0
    
    // 计算区间内的RMS值
    for (let j = i; j < Math.min(i + step, channelData.length); j++) {
      sum += channelData[j] * channelData[j]
      count++
    }
    
    const rms = Math.sqrt(sum / count)
    waveform.push(rms)
  }
  
  waveformData.value = {
    data: waveform,
    duration,
    step: duration / waveform.length
  }
}

// 检测静音段
const detectSilenceSegments = () => {
  if (!audioBuffer.value) return
  
  const channelData = audioBuffer.value.getChannelData(0)
  const sampleRate = audioBuffer.value.sampleRate
  const threshold = Math.pow(10, silenceThreshold.value / 20)
  const minSilenceSamples = Math.floor(minSilenceDuration.value * sampleRate)
  
  const silences = []
  let silenceStart = -1
  let consecutiveSilentSamples = 0
  
  // 分析音频数据
  const windowSize = Math.floor(sampleRate * 0.01) // 10ms窗口
  
  for (let i = 0; i < channelData.length; i += windowSize) {
    // 计算窗口内的RMS值
    let sum = 0
    const end = Math.min(i + windowSize, channelData.length)
    
    for (let j = i; j < end; j++) {
      sum += channelData[j] * channelData[j]
    }
    
    const rms = Math.sqrt(sum / (end - i))
    const isSilent = rms < threshold
    
    if (isSilent) {
      if (silenceStart === -1) {
        silenceStart = i / sampleRate
      }
      consecutiveSilentSamples += windowSize
    } else {
      if (silenceStart !== -1 && consecutiveSilentSamples >= minSilenceSamples) {
        silences.push({
          start: silenceStart,
          end: i / sampleRate,
          duration: (i / sampleRate) - silenceStart
        })
      }
      silenceStart = -1
      consecutiveSilentSamples = 0
    }
  }
  
  // 处理结尾的静音
  if (silenceStart !== -1 && consecutiveSilentSamples >= minSilenceSamples) {
    silences.push({
      start: silenceStart,
      end: channelData.length / sampleRate,
      duration: (channelData.length / sampleRate) - silenceStart
    })
  }
  
  silenceSegments.value = silences
}

// 生成音频段
const generateAudioSegments = () => {
  if (!audioBuffer.value || silenceSegments.value.length === 0) {
    // 如果没有静音段，整个音频就是一个段
    if (audioBuffer.value) {
      audioSegments.value = [{
        index: 1,
        startTime: 0,
        endTime: audioBuffer.value.duration,
        duration: audioBuffer.value.duration
      }]
    }
    return
  }
  
  const segments = []
  let segmentIndex = 1
  const buffer = bufferTime.value
  
  // 第一个音频段
  const firstSilenceStart = silenceSegments.value[0].start
  const firstSegmentEnd = Math.max(0, firstSilenceStart + buffer)
  
  if (firstSegmentEnd > 0.1) {
    segments.push({
      index: segmentIndex++,
      startTime: 0,
      endTime: firstSegmentEnd,
      duration: firstSegmentEnd
    })
  }
  
  // 静音段之间的音频段
  for (let i = 0; i < silenceSegments.value.length - 1; i++) {
    const currentSilenceEnd = silenceSegments.value[i].end
    const nextSilenceStart = silenceSegments.value[i + 1].start
    
    const segmentStart = Math.max(0, currentSilenceEnd - buffer)
    const segmentEnd = Math.max(segmentStart + 0.1, nextSilenceStart + buffer)
    
    if (segmentEnd - segmentStart > 0.1) {
      segments.push({
        index: segmentIndex++,
        startTime: segmentStart,
        endTime: segmentEnd,
        duration: segmentEnd - segmentStart
      })
    }
  }
  
  // 最后一个音频段
  const lastSilenceEnd = silenceSegments.value[silenceSegments.value.length - 1].end
  const lastSegmentStart = Math.max(0, lastSilenceEnd - buffer)
  
  if (audioBuffer.value.duration - lastSegmentStart > 0.1) {
    segments.push({
      index: segmentIndex++,
      startTime: lastSegmentStart,
      endTime: audioBuffer.value.duration,
      duration: audioBuffer.value.duration - lastSegmentStart
    })
  }
  
  audioSegments.value = segments
}

// 绘制波形
const drawWaveform = () => {
  if (!waveformCanvas.value || !waveformData.value) return
  
  const canvas = waveformCanvas.value
  const ctx = canvas.getContext('2d')
  const width = canvas.width = canvas.offsetWidth * window.devicePixelRatio
  const height = canvas.height = canvas.offsetHeight * window.devicePixelRatio
  
  ctx.scale(window.devicePixelRatio, window.devicePixelRatio)
  
  const displayWidth = canvas.offsetWidth
  const displayHeight = canvas.offsetHeight
  
  // 清空画布
  ctx.fillStyle = '#f5f5f5'
  ctx.fillRect(0, 0, displayWidth, displayHeight)
  
  // 绘制波形
  const data = waveformData.value.data
  const barWidth = displayWidth / data.length
  
  ctx.fillStyle = '#409EFF'
  
  for (let i = 0; i < data.length; i++) {
    const barHeight = data[i] * displayHeight * 0.8
    const x = i * barWidth
    const y = (displayHeight - barHeight) / 2
    
    ctx.fillRect(x, y, Math.max(barWidth - 1, 1), barHeight)
  }
  
  // 绘制静音段
  ctx.fillStyle = 'rgba(245, 108, 108, 0.3)'
  silenceSegments.value.forEach(silence => {
    const startX = (silence.start / waveformData.value.duration) * displayWidth
    const endX = (silence.end / waveformData.value.duration) * displayWidth
    ctx.fillRect(startX, 0, endX - startX, displayHeight)
  })
}

// 播放音频段
const playSegment = (segment) => {
  if (!audioPlayer.value) return
  
  audioPlayer.value.currentTime = segment.startTime
  audioPlayer.value.play()
  
  // 设置停止时间
  const checkTime = () => {
    if (audioPlayer.value.currentTime >= segment.endTime) {
      audioPlayer.value.pause()
    } else if (!audioPlayer.value.paused) {
      requestAnimationFrame(checkTime)
    }
  }
  
  requestAnimationFrame(checkTime)
}

// 提取音频段
const extractAudioSegment = async (segment) => {
  const startSample = Math.floor(segment.startTime * audioBuffer.value.sampleRate)
  const endSample = Math.floor(segment.endTime * audioBuffer.value.sampleRate)
  const length = endSample - startSample
  
  const segmentBuffer = audioContext.value.createBuffer(
    audioBuffer.value.numberOfChannels,
    length,
    audioBuffer.value.sampleRate
  )
  
  for (let channel = 0; channel < audioBuffer.value.numberOfChannels; channel++) {
    const originalData = audioBuffer.value.getChannelData(channel)
    const segmentData = segmentBuffer.getChannelData(channel)
    
    for (let i = 0; i < length; i++) {
      segmentData[i] = originalData[startSample + i] || 0
    }
  }
  
  return segmentBuffer
}

// 音频缓冲区转换为Blob
const audioBufferToBlob = (buffer) => {
  const length = buffer.length * buffer.numberOfChannels * 2
  const arrayBuffer = new ArrayBuffer(44 + length)
  const view = new DataView(arrayBuffer)
  
  // WAV 文件头
  const writeString = (offset, string) => {
    for (let i = 0; i < string.length; i++) {
      view.setUint8(offset + i, string.charCodeAt(i))
    }
  }
  
  writeString(0, 'RIFF')
  view.setUint32(4, 36 + length, true)
  writeString(8, 'WAVE')
  writeString(12, 'fmt ')
  view.setUint32(16, 16, true)
  view.setUint16(20, 1, true)
  view.setUint16(22, buffer.numberOfChannels, true)
  view.setUint32(24, buffer.sampleRate, true)
  view.setUint32(28, buffer.sampleRate * buffer.numberOfChannels * 2, true)
  view.setUint16(32, buffer.numberOfChannels * 2, true)
  view.setUint16(34, 16, true)
  writeString(36, 'data')
  view.setUint32(40, length, true)
  
  // 音频数据
  let offset = 44
  for (let i = 0; i < buffer.length; i++) {
    for (let channel = 0; channel < buffer.numberOfChannels; channel++) {
      const sample = Math.max(-1, Math.min(1, buffer.getChannelData(channel)[i]))
      view.setInt16(offset, sample * 0x7FFF, true)
      offset += 2
    }
  }
  
  return new Blob([arrayBuffer], { type: 'audio/wav' })
}

// 格式化时间
const formatTime = (seconds) => {
  if (!seconds || seconds < 0) return '00:00'
  
  const mins = Math.floor(seconds / 60)
  const secs = Math.floor(seconds % 60)
  return `${mins.toString().padStart(2, '0')}:${secs.toString().padStart(2, '0')}`
}
</script>

<style scoped>
.audio-editor {
  width: 100%;
}

.editor-card {
  background: white;
  border-radius: 12px;
}

.file-info {
  display: flex;
  justify-content: space-between;
  align-items: center;
  margin-bottom: 20px;
  padding: 15px;
  background-color: #f8f9fa;
  border-radius: 8px;
}

.control-panel {
  margin-bottom: 20px;
}

.control-panel h3 {
  margin-top: 0;
  color: #333;
}

.status-info p {
  margin: 8px 0;
  color: #666;
}

.action-buttons {
  text-align: center;
  margin-bottom: 30px;
}

.waveform-section {
  margin-bottom: 30px;
}

.waveform-section h3 {
  margin-bottom: 15px;
  color: #333;
}

.waveform-canvas {
  width: 100%;
  height: 200px;
  border: 1px solid #e4e7ed;
  border-radius: 6px;
  cursor: pointer;
  background: #f5f5f5;
}

.audio-player {
  margin-top: 15px;
  text-align: center;
}

.audio-player audio {
  width: 100%;
  max-width: 600px;
}

.segments-section h3 {
  margin-bottom: 15px;
  color: #333;
}
</style> 