<template>
  <transition name="dialog-fade">
    <div v-if="visible" class="audio-dialog-overlay" @click.self="closeDialog">
      <div class="audio-dialog">
        <div class="audio-dialog-header">
          <h3>音频播放器</h3>
          <button @click="closeDialog" class="close-btn" title="关闭">✕</button>
        </div>
        <div class="audio-dialog-content">
          <div class="audio-info">
            <p>{{ isPlaying ? '正在播放...' : isRecording ? '正在录音...' : '准备播放' }}</p>
            <div v-if="isRecording" class="audio-level">
              <div class="level-bar">
                <div class="level-fill" :style="{ width: audioLevel + '%' }"></div>
              </div>
              <span class="level-text">音量: {{ Math.round(audioLevel) }}%</span>
            </div>
          </div>
          <div class="button-group">
            <button @click="playAudio" class="play-button" :disabled="isPlaying">
              {{ isPlaying ? '播放中...' : '点击播放音频' }}
            </button>
            <button v-if="isPlaying" @click="stopAudio" class="stop-button">
              停止播放
            </button>
          </div>
          <div class="button-group">
            <button @click="toggleRecording" class="record-button" :class="{ 'recording': isRecording }">
              {{ isRecording ? '停止录音' : '开始录音' }}
            </button>
          </div>
          <!-- <audio ref="audioElement" :src="audioUrl" @ended="onAudioEnded" @error="onAudioError"></audio> -->
        </div>
      </div>
    </div>
  </transition>
</template>

<script setup lang="ts">
/**
 * AudioPlayer 音频播放器组件
 * 
 * 功能描述：
 * 1. 播放远程音频文件
 * 2. 录制用户声音并转换为 PCM 格式
 * 3. 显示音频波形/音量条
 * 4. 音频数据的重采样 (Resampling) 和格式转换
 * 
 * 核心技术：
 * - MediaRecorder API 进行录音
 * - AudioContext API 进行音频分析和处理
 * - OfflineAudioContext 进行离线重采样
 */

import { ref, watch } from 'vue'

// ==================== 组件接口 ====================

/** 组件 Props 定义 */
interface Props {
  /** 控制弹窗的显示/隐藏 */
  visible: boolean
}

const props = defineProps<Props>()

/** 组件 Events 定义 */
const emit = defineEmits<{
  /** 更新 visible 状态 */
  (e: 'update:visible', value: boolean): void
}>()

// ==================== 常量定义 ====================

// 音频演示 URL
const audioUrl = 'http://231017d0.nat123.top:40037/audio/audio.wav'
// const audioUrl = 'http://downsc.chinaz.net/Files/DownLoad/sound1/201906/11582.mp3'

// ==================== 响应式状态 ====================

/** HTML Audio 元素引用 */
const audioElement = ref<HTMLAudioElement | null>(null)
/** 是否正在播放音频 */
const isPlaying = ref<boolean>(false)
/** 是否正在录音 */
const isRecording = ref<boolean>(false)
/** MediaRecorder 实例 */
const mediaRecorder = ref<MediaRecorder | null>(null)
/** 录制的音频数据块 */
const audioChunks = ref<Blob[]>([])
/** 当前音量大小 (0-100)，用于可视化展示 */
const audioLevel = ref<number>(0)

// ==================== 监听器 ====================

/**
 * 监听 visible 属性变化
 * 当弹窗关闭时，强制停止当前的播放和录音
 */
watch(() => props.visible, (newValue) => {
  if (!newValue) {
    stopAudio()
  }
})

// ==================== 音频播放控制 ====================

/**
 * 播放音频
 * TODO: 需绑定到实际的 audio 元素
 */
const playAudio = () => {
  // if (audioElement.value) {
  //   audioElement.value.play()
  //   isPlaying.value = true
  // }
}

/**
 * 音频播放结束回调
 */
const onAudioEnded = () => {
  isPlaying.value = false
}

/**
 * 音频加载错误回调
 */
const onAudioError = () => {
  isPlaying.value = false
  alert('音频加载失败，请检查网络连接')
}

/**
 * 停止音频播放
 */
const stopAudio = () => {
  if (audioElement.value) {
    audioElement.value.pause()
    audioElement.value.currentTime = 0
  }
  isPlaying.value = false
}

// ==================== 弹窗控制 ====================

/**
 * 关闭弹窗
 * 同时停止播放和录音
 */
const closeDialog = () => {
  // 停止音频播放
  stopAudio()
  // 停止录音
  if (isRecording.value) {
    stopRecording()
  }
  emit('update:visible', false)
}

// ==================== 录音功能 ====================

/**
 * 开始录音
 * 
 * 流程：
 * 1. 请求麦克风权限
 * 2. 创建 AudioContext 和 AnalyserNode 用于音量分析
 * 3. 创建 MediaRecorder 开始录音
 * 4. 实时收集音频数据块
 * 5. 录音结束时进行数据处理
 */
const startRecording = async () => {
  // 使用 Promise 链处理异步操作
  return navigator.mediaDevices.getUserMedia({ 
    audio: true  // 简化配置,使用浏览器默认设置
  })
    .then(stream => {
      audioChunks.value = []
      
      // 调试日志：显示使用的音频设备信息
      stream.getAudioTracks().forEach(track => {
        console.log('使用的音频设备:', track.label)
        console.log('设备设置:', track.getSettings())
      })
      
      // 创建音频分析器来检测音量
      const audioContext = new (window.AudioContext || (window as any).webkitAudioContext)()
      const source = audioContext.createMediaStreamSource(stream)
      const analyser = audioContext.createAnalyser()
      analyser.fftSize = 256
      source.connect(analyser)
      
      const dataArray = new Uint8Array(analyser.frequencyBinCount)
      
      // 启动音量检测循环
      const checkAudioLevel = () => {
        if (isRecording.value) {
          analyser.getByteFrequencyData(dataArray)
          // 计算平均音量
          const average = dataArray.reduce((a, b) => a + b) / dataArray.length
          audioLevel.value = (average / 255) * 100
          requestAnimationFrame(checkAudioLevel)
        }
      }
      checkAudioLevel()
      
      // 初始化 MediaRecorder
      mediaRecorder.value = new MediaRecorder(stream)
      
      // 收集数据块
      mediaRecorder.value.ondataavailable = (event) => {
        if (event.data.size > 0) {
          console.log('收到音频数据块:', event.data.size, 'bytes')
          audioChunks.value.push(event.data)
        }
      }
      
      // 录音停止时的处理逻辑
      mediaRecorder.value.onstop = async () => {
        const audioBlob = new Blob(audioChunks.value, { type: 'audio/webm' })
        console.log('录音 Blob 大小:', audioBlob.size, 'bytes')
        
        // 处理音频数据：转换为 PCM 并编码
        await processAudioToPCM(audioBlob)
        
        // 停止所有音频轨道，释放麦克风
        stream.getTracks().forEach(track => track.stop())
      }
      
      // 每100ms收集一次数据，确保数据块不过大
      mediaRecorder.value.start(100)
      isRecording.value = true
      console.log('录音已开始')
    })
    .catch(error => {
      console.error('无法访问麦克风:', error)
      alert('无法访问麦克风，请检查权限设置')
    })
}

/**
 * 停止录音
 */
const stopRecording = () => {
  if (mediaRecorder.value && isRecording.value) {
    mediaRecorder.value.stop()
    isRecording.value = false
    audioLevel.value = 0
    console.log('录音已停止')
  }
}

/**
 * 切换录音状态 (开始/停止)
 */
const toggleRecording = () => {
  if (isRecording.value) {
    stopRecording()
  } else {
    startRecording()
  }
}

// ==================== 音频数据处理 ====================

/**
 * 将音频 Blob 转换为 PCM 格式并编码为 Base64
 * 
 * 流程：
 * 1. 读取 Blob 为 ArrayBuffer
 * 2. 解码 AudioData
 * 3. 使用 OfflineAudioContext 重采样到 16kHz
 * 4. 转换为 16-bit PCM 数据
 * 5. 转换为 Base64 字符串
 * 
 * @param audioBlob - 原始录音数据
 * @returns Promise<JSON对象>
 */
const processAudioToPCM = async (audioBlob: Blob) => {
  return new Promise((resolve, reject) => {
    const fileReader = new FileReader()
    
    fileReader.onload = async (e) => {
      const arrayBuffer = e.target?.result as ArrayBuffer
      console.log('ArrayBuffer 大小:', arrayBuffer.byteLength, 'bytes')
      
      // 创建 AudioContext (不指定采样率,使用默认)
      const audioContext = new (window.AudioContext || (window as any).webkitAudioContext)()
      console.log('AudioContext 采样率:', audioContext.sampleRate, 'Hz')
      
      // 创建离线 AudioContext 用于重采样到 16kHz
      // 计算目标长度：原长度 * (16000 / 原采样率)
      const offlineContext = new OfflineAudioContext(1, Math.ceil(arrayBuffer.byteLength / 2 * 16000 / audioContext.sampleRate), 16000)
      console.log('离线 AudioContext 采样率:', offlineContext.sampleRate, 'Hz')
      
      audioContext.decodeAudioData(arrayBuffer)
        .then(async (audioBuffer) => {
          console.log('解码后音频信息:', {
            duration: audioBuffer.duration,
            sampleRate: audioBuffer.sampleRate,
            numberOfChannels: audioBuffer.numberOfChannels,
            length: audioBuffer.length
          })
          
          // 使用 OfflineAudioContext 重采样到 16kHz
          const targetSampleRate = 16000
          const duration = audioBuffer.duration
          const targetLength = Math.ceil(duration * targetSampleRate)
          
          const offlineCtx = new OfflineAudioContext(1, targetLength, targetSampleRate)
          const source = offlineCtx.createBufferSource()
          source.buffer = audioBuffer
          source.connect(offlineCtx.destination)
          source.start(0)
          
          console.log('开始重采样到 16kHz...')
          const resampledBuffer = await offlineCtx.startRendering()
          console.log('重采样完成,新长度:', resampledBuffer.length)
          
          // 获取重采样后的单声道数据
          const channelData = resampledBuffer.getChannelData(0)
          
          // 检查音频数据是否有效（非静音）
          let hasNonZero = false
          let maxValue = 0
          for (let i = 0; i < channelData.length; i++) {
            const absValue = Math.abs(channelData[i])
            if (absValue > 0.0001) {
              hasNonZero = true
            }
            if (absValue > maxValue) {
              maxValue = absValue
            }
          }
          console.log('重采样后音频数据检测:', { hasNonZero, maxValue })
          
          if (!hasNonZero) {
            console.warn('[警告] 录音数据全为0(静音)! 请检查麦克风设置')
          }
          
          // 转换为 16-bit PCM
          const pcmData = convertToPCM16(channelData)
          console.log('PCM 数据大小:', pcmData.byteLength, 'bytes')
          
          // Base64 编码
          const base64Data = arrayBufferToBase64(pcmData)
          
          // 构建最终的 JSON 数据
          const jsonData = {
            format: 'pcm',
            sampleRate: 16000,
            bitDepth: 16,
            channels: 1,
            duration: audioBuffer.duration,
            dataLength: pcmData.byteLength,
            data: base64Data
          }
          
          console.log('录音数据 (JSON):', jsonData)
          console.log('Base64 数据长度:', base64Data.length)
          console.log('Base64 前100个字符:', base64Data.substring(0, 100))
          
          audioContext.close()
          resolve(jsonData)
        })
        .catch(error => {
          console.error('音频解码失败:', error)
          reject(error)
        })
    }
    
    fileReader.onerror = () => {
      reject(new Error('文件读取失败'))
    }
    
    fileReader.readAsArrayBuffer(audioBlob)
  })
}

/**
 * 音频重采样 (线性插值算法)
 * 注意：目前主要使用 OfflineAudioContext 进行重采样，此函数作为备选或参考
 * 
 * @param audioData - 原始浮点音频数据
 * @param fromSampleRate - 原始采样率
 * @param toSampleRate - 目标采样率
 * @returns 重采样后的数据
 */
const resampleAudio = (audioData: Float32Array, fromSampleRate: number, toSampleRate: number): Float32Array => {
  const ratio = fromSampleRate / toSampleRate
  const newLength = Math.round(audioData.length / ratio)
  const result = new Float32Array(newLength)
  
  for (let i = 0; i < newLength; i++) {
    const position = i * ratio
    const index = Math.floor(position)
    const fraction = position - index
    
    if (index + 1 < audioData.length) {
      // 线性插值计算
      result[i] = audioData[index] * (1 - fraction) + audioData[index + 1] * fraction
    } else {
      result[i] = audioData[index]
    }
  }
  
  return result
}

/**
 * 将 Float32Array (-1.0 ~ 1.0) 转换为 16-bit PCM ArrayBuffer
 * 
 * @param float32Array - 浮点音频数据
 * @returns 16位整数 PCM 数据
 */
const convertToPCM16 = (float32Array: Float32Array): ArrayBuffer => {
  const buffer = new ArrayBuffer(float32Array.length * 2)
  const view = new DataView(buffer)
  
  for (let i = 0; i < float32Array.length; i++) {
    // 将 -1.0 到 1.0 的浮点数映射到 -32768 到 32767 的整数范围
    const s = Math.max(-1, Math.min(1, float32Array[i]))
    const val = s < 0 ? s * 0x8000 : s * 0x7FFF
    view.setInt16(i * 2, val, true) // true 表示使用小端序 (Little Endian)
  }
  
  return buffer
}

/**
 * ArrayBuffer 转 Base64 字符串
 * 
 * @param buffer - 二进制数据
 * @returns Base64 字符串
 */
const arrayBufferToBase64 = (buffer: ArrayBuffer): string => {
  const bytes = new Uint8Array(buffer)
  const len = bytes.byteLength
  let binary = ''
  
  // 分块处理，避免大文件导致栈溢出
  const chunkSize = 0x8000 // 32KB
  for (let i = 0; i < len; i += chunkSize) {
    const chunk = bytes.subarray(i, Math.min(i + chunkSize, len))
    binary += String.fromCharCode.apply(null, Array.from(chunk))
  }
  
  return btoa(binary)
}
</script>

<style scoped>
.audio-dialog-overlay {
  position: fixed;
  top: 0;
  left: 0;
  width: 100%;
  height: 100%;
  background-color: rgba(0, 0, 0, 0.6);
  display: flex;
  justify-content: center;
  align-items: center;
  z-index: 1000;
}

.audio-dialog {
  background: linear-gradient(135deg, #1a1f2e 0%, #2d3748 100%);
  border-radius: 16px;
  width: 400px;
  max-width: 90%;
  box-shadow: 0 10px 40px rgba(0, 0, 0, 0.5);
  overflow: hidden;
  border: 1px solid rgba(255, 255, 255, 0.1);
}

.audio-dialog-header {
  display: flex;
  justify-content: space-between;
  align-items: center;
  padding: 20px 24px;
  background: rgba(0, 0, 0, 0.2);
  border-bottom: 1px solid rgba(255, 255, 255, 0.1);
}

.audio-dialog-header h3 {
  margin: 0;
  color: #ffffff;
  font-size: 18px;
  font-weight: 600;
}

.close-btn {
  background: none;
  border: none;
  color: #ffffff;
  font-size: 24px;
  cursor: pointer;
  padding: 0;
  width: 30px;
  height: 30px;
  display: flex;
  align-items: center;
  justify-content: center;
  border-radius: 50%;
  transition: all 0.3s ease;
}

.close-btn:hover {
  background: rgba(255, 255, 255, 0.1);
  transform: rotate(90deg);
}

.audio-dialog-content {
  padding: 30px 24px;
  display: flex;
  flex-direction: column;
  align-items: center;
  gap: 20px;
}

.audio-info {
  text-align: center;
  color: #a0aec0;
  font-size: 14px;
}

.button-group {
  display: flex;
  gap: 12px;
  align-items: center;
}

.play-button,
.stop-button {
  color: #ffffff;
  border: none;
  padding: 14px 32px;
  font-size: 16px;
  font-weight: 600;
  border-radius: 8px;
  cursor: pointer;
  transition: all 0.3s ease;
}

.play-button {
  background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
  box-shadow: 0 4px 15px rgba(102, 126, 234, 0.4);
}

.play-button:hover:not(:disabled) {
  transform: translateY(-2px);
  box-shadow: 0 6px 20px rgba(102, 126, 234, 0.6);
}

.play-button:active:not(:disabled) {
  transform: translateY(0);
}

.play-button:disabled {
  opacity: 0.6;
  cursor: not-allowed;
}

.stop-button {
  background: linear-gradient(135deg, #f093fb 0%, #f5576c 100%);
  box-shadow: 0 4px 15px rgba(245, 87, 108, 0.4);
}

.stop-button:hover {
  transform: translateY(-2px);
  box-shadow: 0 6px 20px rgba(245, 87, 108, 0.6);
}

.stop-button:active {
  transform: translateY(0);
}

.record-button {
  background: linear-gradient(135deg, #4facfe 0%, #00f2fe 100%);
  box-shadow: 0 4px 15px rgba(79, 172, 254, 0.4);
  color: #ffffff;
  border: none;
  padding: 14px 32px;
  font-size: 16px;
  font-weight: 600;
  border-radius: 8px;
  cursor: pointer;
  transition: all 0.3s ease;
}

.record-button:hover {
  transform: translateY(-2px);
  box-shadow: 0 6px 20px rgba(79, 172, 254, 0.6);
}

.record-button:active {
  transform: translateY(0);
}

.record-button.recording {
  background: linear-gradient(135deg, #fa709a 0%, #fee140 100%);
  box-shadow: 0 4px 15px rgba(250, 112, 154, 0.4);
  animation: pulse 1.5s ease-in-out infinite;
}

@keyframes pulse {
  0%, 100% {
    box-shadow: 0 4px 15px rgba(250, 112, 154, 0.4);
  }
  50% {
    box-shadow: 0 6px 25px rgba(250, 112, 154, 0.8);
  }
}

/* 过渡动画 */
.dialog-fade-enter-active,
.dialog-fade-leave-active {
  transition: opacity 0.3s ease;
}

.dialog-fade-enter-from,
.dialog-fade-leave-to {
  opacity: 0;
}

.dialog-fade-enter-active .audio-dialog,
.dialog-fade-leave-active .audio-dialog {
  transition: transform 0.3s ease;
}

.dialog-fade-enter-from .audio-dialog,
.dialog-fade-leave-to .audio-dialog {
  transform: scale(0.9);
}

audio {
  display: none;
}

.audio-level {
  margin-top: 12px;
  width: 100%;
}

.level-bar {
  width: 100%;
  height: 8px;
  background: rgba(255, 255, 255, 0.1);
  border-radius: 4px;
  overflow: hidden;
  margin-bottom: 8px;
}

.level-fill {
  height: 100%;
  background: linear-gradient(90deg, #4facfe 0%, #00f2fe 50%, #fa709a 100%);
  transition: width 0.1s ease;
}

.level-text {
  color: #a0aec0;
  font-size: 12px;
}
</style>
