<template>
  <div class="text-to-speech">
    <a-card title="文本转语音" class="tts-card">
      <!-- 文本输入区域 -->
      <div class="input-section">
        <a-textarea
          v-model:value="inputText"
          :rows="4"
          :maxlength="maxLength"
          :placeholder="placeholder"
          :disabled="isGenerating || isPlaying"
          class="text-input"
          show-count
        />

        <!-- 语音设置 -->
        <!-- <div class="voice-settings">
          <a-row :gutter="16">
            <a-col :span="8">
              <a-select
                v-model:value="selectedVoice"
                placeholder="选择语音"
                :disabled="isGenerating || isPlaying"
                class="voice-select"
              >
                <a-select-option
                  v-for="voice in availableVoices"
                  :key="voice.id"
                  :value="voice.id"
                >
                  {{ voice.name }} ({{ voice.language }})
                </a-select-option>
              </a-select>
            </a-col>
            <a-col :span="8">
              <a-slider
                v-model:value="speechRate"
                :min="0.5"
                :max="2"
                :step="0.1"
                :disabled="isGenerating || isPlaying"
                :tooltip-formatter="(value: any) => `${value}x`"
              />
              <div class="slider-label">语速: {{ speechRate }}x</div>
            </a-col>
            <a-col :span="8">
              <a-slider
                v-model:value="speechPitch"
                :min="0.5"
                :max="2"
                :step="0.1"
                :disabled="isGenerating || isPlaying"
                :tooltip-formatter="(value: any) => `${value}x`"
              />
              <div class="slider-label">音调: {{ speechPitch }}x</div>
            </a-col>
          </a-row>
        </div> -->
      </div>

      <!-- 控制按钮区域 -->
      <div class="control-section">
        <a-space size="large">
          <a-button
            type="primary"
            :loading="isGenerating"
            :disabled="!inputText.trim() || isPlaying"
            @click="generateSpeech"
            class="generate-button"
          >
            <template #icon>
              <Volume2 :size="16" />
            </template>
            {{ isGenerating ? '生成中...' : '生成语音' }}
          </a-button>

          <a-button
            v-if="audioUrl"
            :type="isPlaying ? 'default' : 'primary'"
            :loading="isPlaying && !isPaused"
            @click="togglePlayback"
            class="play-button"
          >
            <template #icon>
              <Play v-if="!isPlaying || isPaused" :size="16" />
              <Pause v-else :size="16" />
            </template>
            {{ isPlaying && !isPaused ? '暂停' : '播放' }}
          </a-button>

          <a-button
            v-if="audioUrl"
            :disabled="!isPlaying"
            @click="stopPlayback"
            class="stop-button"
          >
            <template #icon>
              <Square :size="16" />
            </template>
            停止
          </a-button>

          <a-button
            v-if="audioUrl"
            @click="downloadAudio"
            class="download-button"
          >
            <template #icon>
              <Download :size="16" />
            </template>
            下载
          </a-button>

          <a-button
            v-if="inputText || audioUrl"
            @click="clearAll"
            class="clear-button"
          >
            <template #icon>
              <Trash2 :size="16" />
            </template>
            清空
          </a-button>
        </a-space>
      </div>

      <!-- 播放进度区域 -->
      <div class="progress-section" v-if="audioUrl">
        <div class="progress-info">
          <span class="time-display">{{ formatTime(currentTime) }} / {{ formatTime(duration) }}</span>
          <span class="status-text" :class="statusClass">{{ statusText }}</span>
        </div>
        <a-slider
          v-model:value="progressValue"
          :max="100"
          :disabled="!audioUrl"
          @change="seekTo"
          class="progress-slider"
        />
      </div>

      <!-- 音频可视化区域 -->
      <div class="visualizer-section" v-if="isPlaying && !isPaused">
        <div class="audio-visualizer">
          <div class="wave-container">
            <div class="wave" v-for="i in 20" :key="i" :style="{ animationDelay: `${i * 0.1}s` }"></div>
          </div>
        </div>
      </div>

      <!-- 统计信息 -->
      <div class="stats-section" v-if="audioUrl">
        <a-row :gutter="16">
          <a-col :span="6">
            <a-statistic title="文本字数" :value="inputText.length" />
          </a-col>
          <a-col :span="6">
            <a-statistic title="生成时间" :value="generationTime" suffix="秒" :precision="1" />
          </a-col>
          <a-col :span="6">
            <a-statistic title="音频时长" :value="duration" suffix="秒" :precision="1" />
          </a-col>
          <a-col :span="6">
            <a-statistic title="播放次数" :value="playCount" />
          </a-col>
        </a-row>


      </div>
      <TTSPlayButton
            message="这是另一个只显示图标的按钮"
            :show-text="false"
            type="default"
          />
    </a-card>

    <!-- 隐藏的音频元素 -->
    <audio
      ref="audioElement"
      @loadedmetadata="onAudioLoaded"
      @timeupdate="onTimeUpdate"
      @ended="onAudioEnded"
      @play="onAudioPlay"
      @pause="onAudioPause"
      @error="onAudioError"
      style="display: none;"
    />


  </div>
</template>

<script setup lang="ts">
import { ref, computed, onMounted, onUnmounted, nextTick } from 'vue'
import { message } from 'ant-design-vue'
import { Volume2, Play, Pause, Square, Download, Trash2 } from 'lucide-vue-next'
import { useAuthStore } from '@/stores'
import TTSPlayButton from './TTSPlayButton.vue'

interface Props {
  wsUrl?: string // WebSocket地址
  maxLength?: number
  placeholder?: string
  enableMockData?: boolean
  voiceModel?: string // 语音模型参数
}

interface Emits {
  generate: [text: string, voice: string]
  play: []
  pause: []
  stop: []
  complete: [audioUrl: string]
  error: [error: string]
}


const props = withDefaults(defineProps<Props>(), {
  wsUrl: 'wss://www.codewin.top/codewin/ai/speech/tts-stream',
  maxLength: 1000,
  placeholder: '请输入要转换为语音的文本内容...',
  enableMockData: false,
  voiceModel: '5'
})

const emit = defineEmits<Emits>()

// 响应式数据
const inputText = ref('')
const selectedVoice = ref('zh-CN-XiaoxiaoNeural')
const speechRate = ref(1.0)
const speechPitch = ref(1.0)
const isGenerating = ref(false)
const isPlaying = ref(false)
const isPaused = ref(false)
const audioUrl = ref('')
const currentTime = ref(0)
const duration = ref(0)
const progressValue = ref(0)
const generationTime = ref(0)
const playCount = ref(0)

// 音频元素引用
const audioElement = ref<HTMLAudioElement>()



// 计算属性
const statusClass = computed(() => {
  if (isGenerating.value) return 'generating'
  if (isPlaying.value && !isPaused.value) return 'playing'
  if (isPaused.value) return 'paused'
  return 'ready'
})

const statusText = computed(() => {
  if (isGenerating.value) return '正在生成语音...'
  if (isPlaying.value && !isPaused.value) return '正在播放'
  if (isPaused.value) return '已暂停'
  if (audioUrl.value) return '准备就绪'
  return '等待生成'
})

// WebSocket连接和TTS处理
let websocket: WebSocket | null = null
let audioChunks: Uint8Array[] = []

// Web Audio API相关变量
let audioContext: AudioContext | null = null
let audioBuffers: AudioBuffer[] = []
let isAudioPlaying = false
const SAMPLE_RATE = 22050 // 与服务器PCM采样率一致
const MIN_BUFFER_SIZE = 4096 // 最小缓存字节数

// 初始化Web Audio Context
const initAudioContext = async (): Promise<boolean> => {
  try {
    if (!audioContext) {
      console.log('初始化音频上下文...')
      const ctx = new (window.AudioContext || (window as any).webkitAudioContext)({
        sampleRate: SAMPLE_RATE
      })
      audioContext = ctx
      console.log(`音频上下文创建成功（采样率：${ctx.sampleRate}）`)
    }

    if (audioContext.state !== 'running') {
      console.log('等待音频上下文解锁...')
      await audioContext.resume()
      console.log('音频上下文已解锁（状态：running）')
    }
    return true
  } catch (e) {
    console.error(`音频上下文初始化错误: ${(e as Error).message}`)
    return false
  }
}

// 创建WebSocket连接
const createWebSocketConnection = (): Promise<WebSocket> => {
  return new Promise((resolve, reject) => {
    const authStore = useAuthStore()

    // 检查token是否存在
    if (!authStore.isLoggedIn || authStore.isTokenExpired) {
      reject(new Error('用户未登录或token已过期'))
      return
    }

    // 在WebSocket URL中添加token作为查询参数进行鉴权
    const wsUrlWithAuth = `${props.wsUrl}?token=${encodeURIComponent(authStore.token)}&voice_id=${encodeURIComponent(props.voiceModel)}`
    // console.log('TTS WebSocket URL:', wsUrlWithAuth)
    const ws = new WebSocket(wsUrlWithAuth)
    ws.binaryType = 'blob' // 明确指定接收二进制数据为Blob

    ws.onopen = () => {
      console.log('TTS WebSocket连接已建立')
      resolve(ws)
    }

    ws.onerror = (error) => {
      console.error('TTS WebSocket连接错误:', error)
      reject(new Error('WebSocket连接失败，请检查网络或重新登录'))
    }

    ws.onclose = (event) => {
      console.log('TTS WebSocket连接已关闭', event.code, event.reason)
      if (event.code === 1008 || event.code === 4001) {
        // 鉴权失败的错误码
        message.error('身份验证失败，请重新登录')
        authStore.logout()
      }
      websocket = null
    }
  })
}

// 通过WebSocket进行TTS处理
const callTTSAPI = async (text: string, voice: string): Promise<void> => {
  if (props.enableMockData) {
    // 模拟API延迟
    await new Promise(resolve => setTimeout(resolve, 2000 + Math.random() * 3000))

    // 使用Web Speech API生成真实语音（如果支持）
    if ('speechSynthesis' in window) {
      return new Promise<void>((resolve, reject) => {
        try {
          const utterance = new SpeechSynthesisUtterance(text)
          utterance.rate = speechRate.value
          utterance.pitch = speechPitch.value

          // 尝试设置语音
          const voices = speechSynthesis.getVoices()
          const selectedVoiceObj = voices.find(v =>
            v.name.includes('Chinese') || v.lang.includes('zh') || v.name.includes('Microsoft')
          )
          if (selectedVoiceObj) {
            utterance.voice = selectedVoiceObj
          }

          utterance.onend = () => {
            resolve()
          }

          utterance.onerror = (event) => {
            reject(new Error(`语音合成错误: ${event.error}`))
          }

          speechSynthesis.speak(utterance)
        } catch (error) {
          reject(error)
        }
      })
    } else {
      // 模拟完成
      return Promise.resolve()
    }
  }

  try {
    // 创建WebSocket连接
    if (!websocket || websocket.readyState !== WebSocket.OPEN) {
      websocket = await createWebSocketConnection()
    }

    // 确保音频上下文初始化
    const audioReady = await initAudioContext()
    if (!audioReady) {
      throw new Error('音频初始化失败')
    }

    // 清空之前的音频数据
    audioChunks = []
    audioBuffers = []

    return new Promise<void>((resolve, reject) => {
      // 设置消息处理器
      websocket!.onmessage = async (event) => {
        try {
          if (event.data instanceof Blob) {
            // 接收到Blob格式的音频数据
            console.log('收到音频包（大小：', event.data.size, '字节）')
            await processAudioBlob(event.data, resolve)
          } else if (typeof event.data === 'string') {
            // 接收到JSON消息
            console.log(event.data)
            const data = JSON.parse(event.data)
            console.log('收到服务端消息：', JSON.stringify(data))

            if (data.status === 'complete') {
              console.log('服务端合成完成，触发剩余缓存播放')
              // 服务端通知完成后，若仍有缓存未播放，强制触发一次播放
              if (!isAudioPlaying && audioBuffers.length > 0) {
                await playCachedAudio()
              }
              // 等待所有音频播放完成后resolve
              const checkComplete = () => {
                if (!isAudioPlaying && audioBuffers.length === 0) {
                  resolve()
                } else {
                  setTimeout(checkComplete, 100)
                }
              }
              checkComplete()
            } else if (data.error) {
              console.error('服务端错误:', data.error)
              reject(new Error(data.error))
            }
          }
        } catch (error) {
          console.error('解析TTS WebSocket消息失败:', error)
          reject(new Error('解析响应数据失败'))
        }
      }

      // 发送TTS请求 - 只发送文本内容，token已在连接时进行鉴权
      websocket!.send(JSON.stringify(text))
    })

  } catch (error) {
    console.error('TTS WebSocket通信失败:', error)
    throw error
  }
}

// 处理音频Blob数据
const processAudioBlob = async (blob: Blob, resolve?: () => void) => {
  const reader = new FileReader()
  reader.onload = async (e) => {
    try {
      const result = e.target?.result
      if (!result || !(result instanceof ArrayBuffer)) {
        console.log('警告：读取到空的音频数据，跳过处理')
        return
      }
      const arrayBuffer = result as ArrayBuffer
      if (arrayBuffer.byteLength === 0) {
        console.log('警告：读取到空的音频数据，跳过处理')
        return
      }

      // 确保音频上下文已初始化
       if (!audioContext || audioContext.state === 'closed') {
         console.warn('AudioContext未初始化或已关闭，跳过音频处理')
         return
       }
       const ctx = audioContext as AudioContext

       // 16位PCM转Float32（服务器通常返回16位PCM，需归一化到[-1,1]）
       const pcm16 = new Int16Array(arrayBuffer)
       const pcm32 = new Float32Array(pcm16.length)
       for (let i = 0; i < pcm16.length; i++) {
         pcm32[i] = (pcm16[i] || 0) / 32768.0 // 关键：16位PCM最大值为32767，归一化避免失真
       }

       // 创建音频缓冲区并加入全局缓存
       const audioBuf = ctx.createBuffer(1, pcm32.length, SAMPLE_RATE)
       audioBuf.getChannelData(0).set(pcm32)
       audioBuffers.push(audioBuf)
       console.log(`音频包处理完成（缓存总数：${audioBuffers.length}，当前缓存总样本数：${getTotalBufferSamples()}）`)

       // 若未在播放，且缓存达到最小播放阈值，触发播放（避免等待延迟超时）
       if (!isAudioPlaying && getTotalBufferSize() >= MIN_BUFFER_SIZE) {
         console.log(`缓存达到最小阈值（${MIN_BUFFER_SIZE}字节），触发播放`)
         await playCachedAudio()
       }
    } catch (e) {
      console.error(`处理音频数据错误: ${(e as Error).message}`)
    }
  }

  reader.onerror = () => {
    console.error(`读取音频Blob错误: ${reader.error?.message}`)
  }

  // 读取Blob为ArrayBuffer（确保二进制数据完整）
  reader.readAsArrayBuffer(blob)
}

// 播放缓存的音频（核心优化：避免数据覆盖，处理堆积缓存）
const playCachedAudio = async (): Promise<void> => {
  // 若正在播放，或无缓存，直接返回（避免并发播放导致数据混乱）
  if (isAudioPlaying || audioBuffers.length === 0 || !audioContext || audioContext.state === 'closed') {
    console.log(`跳过播放请求（isAudioPlaying: ${isAudioPlaying}，缓存数：${audioBuffers.length}）`)
    return
  }
  const ctx = audioContext as AudioContext

  try {
    isAudioPlaying = true // 锁定播放状态
    isPlaying.value = true
    isPaused.value = false

    // 关键：复制当前缓存到临时数组，避免后续新包干扰正在播放的缓存
    const tempBuffers = [...audioBuffers]
    audioBuffers = [] // 清空全局缓存，准备接收新包（避免遗漏新数据）
    console.log(`开始播放：从临时缓存读取${tempBuffers.length}个包，全局缓存已清空（准备接收新包）`)

    // 合并临时缓存的所有音频包（减少AudioBufferSource创建次数，优化流畅度）
    const totalSamples = tempBuffers.reduce((sum, buf) => sum + buf.length, 0)
    const mergedBuf = ctx.createBuffer(1, totalSamples, SAMPLE_RATE)
    const mergedData = mergedBuf.getChannelData(0)

    let writeOffset = 0
    for (const buf of tempBuffers) {
      mergedData.set(buf.getChannelData(0), writeOffset)
      writeOffset += buf.length
    }
    console.log(`合并完成：总样本数${totalSamples}（约${(totalSamples/SAMPLE_RATE).toFixed(2)}秒）`)

    // 创建音频源并播放
    const source = ctx.createBufferSource()
    source.buffer = mergedBuf
    source.connect(ctx.destination)

    // 播放结束后：解锁状态，处理剩余缓存（避免堆积导致遗漏）
    source.onended = () => {
      console.log(`当前段音频播放完成（时长：${mergedBuf.duration.toFixed(2)}秒）`)
      isAudioPlaying = false // 解锁播放状态

      // 若全局缓存仍有新数据，立即继续播放（处理堆积的包）
      if (audioBuffers.length > 0) {
        console.log(`检测到新缓存（${audioBuffers.length}个包），继续播放`)
        playCachedAudio()
      } else {
        // 无新缓存时，更新状态
        isPlaying.value = false
        isPaused.value = false
        console.log('当前无新缓存，等待服务端发送后续音频包')
      }
    }

    // 播放出错时的处理（避免状态卡死）
    // 注意：AudioBufferSourceNode没有onerror事件，使用try-catch处理错误
    try {
      source.start(0) // 启动播放
      console.log(`音频播放已启动`)
      emit('play')
    } catch (e) {
      console.error(`音频播放启动错误: ${(e as Error).message}`)
      isAudioPlaying = false
      isPlaying.value = false
      isPaused.value = false
      return
    }

  } catch (e) {
    console.error(`播放音频异常: ${(e as Error).message}`)
    isAudioPlaying = false
    isPlaying.value = false
    isPaused.value = false
  }
}

// 辅助函数：计算当前缓存的总样本数
const getTotalBufferSamples = (): number => {
  return audioBuffers.reduce((sum, buf) => sum + buf.length, 0)
}

// 辅助函数：计算当前缓存的总字节数（1样本=4字节Float32）
const getTotalBufferSize = (): number => {
  return getTotalBufferSamples() * 4
}

// 生成语音
const generateSpeech = async () => {
  if (!inputText.value.trim()) {
    message.warning('请输入要转换的文本')
    return
  }

  const text = inputText.value.trim()
  emit('generate', text, selectedVoice.value)

  try {
    isGenerating.value = true
    const startTime = Date.now()

    // 调用TTS API (使用Web Audio API流式播放)
    await callTTSAPI(text, selectedVoice.value)

    generationTime.value = (Date.now() - startTime) / 1000

    // 使用Web Audio API，不需要传统的audio元素
    if (props.enableMockData && 'speechSynthesis' in window) {
      playWithWebSpeechAPI(text)
    }

    emit('complete', 'web-audio-stream')
    message.success('语音生成成功！')

  } catch (error) {
    const errorMsg = error instanceof Error ? error.message : '语音生成失败'
    emit('error', errorMsg)
    message.error(errorMsg)
  } finally {
    isGenerating.value = false
  }
}

// 使用Web Speech API播放
const playWithWebSpeechAPI = (text: string) => {
  if ('speechSynthesis' in window) {
    const utterance = new SpeechSynthesisUtterance(text)
    utterance.rate = speechRate.value
    utterance.pitch = speechPitch.value

    const voices = speechSynthesis.getVoices()
    const selectedVoiceObj = voices.find(v =>
      v.name.includes('Chinese') || v.lang.includes('zh') || v.name.includes('Microsoft')
    )
    if (selectedVoiceObj) {
      utterance.voice = selectedVoiceObj
    }

    utterance.onstart = () => {
      isPlaying.value = true
      isPaused.value = false
      playCount.value++
      emit('play')
    }

    utterance.onend = () => {
      isPlaying.value = false
      isPaused.value = false
      emit('stop')
    }

    utterance.onerror = (event) => {
      isPlaying.value = false
      isPaused.value = false
      emit('error', '语音播放失败')
    }

    speechSynthesis.speak(utterance)
  }
}

// 切换播放状态
const togglePlayback = () => {
  if (!audioUrl.value) return

  if (props.enableMockData && 'speechSynthesis' in window) {
    if (isPlaying.value) {
      speechSynthesis.pause()
      isPaused.value = true
      emit('pause')
    } else {
      if (isPaused.value) {
        speechSynthesis.resume()
        isPaused.value = false
      } else {
        playWithWebSpeechAPI(inputText.value)
      }
      emit('play')
    }
  } else {
    if (audioElement.value) {
      if (isPlaying.value) {
        audioElement.value.pause()
      } else {
        audioElement.value.play()
        if (!playCount.value || currentTime.value === 0) {
          playCount.value++
        }
      }
    }
  }
}

// 停止播放
const stopPlayback = () => {
  if (props.enableMockData && 'speechSynthesis' in window) {
    speechSynthesis.cancel()
    isPlaying.value = false
    isPaused.value = false
  } else {
    if (audioElement.value) {
      audioElement.value.pause()
      audioElement.value.currentTime = 0
    }
  }
  emit('stop')
}

// 跳转到指定位置
const seekTo = (value: number) => {
  if (audioElement.value && duration.value > 0) {
    const newTime = (value / 100) * duration.value
    audioElement.value.currentTime = newTime
  }
}

// 下载音频
const downloadAudio = () => {
  if (!audioUrl.value) return

  const link = document.createElement('a')
  link.href = audioUrl.value
  link.download = `tts-audio-${Date.now()}.wav`
  document.body.appendChild(link)
  link.click()
  document.body.removeChild(link)

  message.success('音频下载开始')
}

// 清空所有内容
const clearAll = () => {
  stopPlayback()
  inputText.value = ''
  audioUrl.value = ''
  currentTime.value = 0
  duration.value = 0
  progressValue.value = 0
  generationTime.value = 0
  playCount.value = 0

  // 清空Web Audio API相关缓存
  audioBuffers = []
  isAudioPlaying = false

  // 不设置空的音频源，避免触发错误事件
  if (audioElement.value) {
    audioElement.value.removeAttribute('src')
    audioElement.value.load()
  }
}

// 格式化时间
const formatTime = (seconds: number): string => {
  const mins = Math.floor(seconds / 60)
  const secs = Math.floor(seconds % 60)
  return `${mins.toString().padStart(2, '0')}:${secs.toString().padStart(2, '0')}`
}

// 音频事件处理
const onAudioLoaded = () => {
  if (audioElement.value) {
    duration.value = audioElement.value.duration || 0
  }
}

const onTimeUpdate = () => {
  if (audioElement.value) {
    currentTime.value = audioElement.value.currentTime
    if (duration.value > 0) {
      progressValue.value = (currentTime.value / duration.value) * 100
    }
  }
}

const onAudioEnded = () => {
  isPlaying.value = false
  isPaused.value = false
  currentTime.value = 0
  progressValue.value = 0
  emit('stop')
}

const onAudioPlay = () => {
  isPlaying.value = true
  isPaused.value = false
  emit('play')
}

const onAudioPause = () => {
  isPaused.value = true
  emit('pause')
}

const onAudioError = (event: Event) => {
  isPlaying.value = false
  isPaused.value = false

  const target = event.target as HTMLAudioElement
  let errorDetails = '未知错误'

  if (target && target.error) {
    switch (target.error.code) {
      case MediaError.MEDIA_ERR_ABORTED:
        errorDetails = '音频播放被中止'
        break
      case MediaError.MEDIA_ERR_NETWORK:
        errorDetails = '网络错误导致音频下载失败'
        break
      case MediaError.MEDIA_ERR_DECODE:
        errorDetails = '音频解码失败'
        break
      case MediaError.MEDIA_ERR_SRC_NOT_SUPPORTED:
        errorDetails = '音频格式不支持'
        break
      default:
        errorDetails = `音频错误代码: ${target.error.code}`
    }
  }

  console.error('音频播放错误详情:', errorDetails, event)
  const errorMsg = `音频播放失败: ${errorDetails}`
  emit('error', errorMsg)
  message.error(errorMsg)
}

// 组件挂载时初始化
onMounted(() => {
  // 加载可用语音列表（如果支持Web Speech API）
  if ('speechSynthesis' in window) {
    const loadVoices = () => {
      const voices = speechSynthesis.getVoices()
      if (voices.length > 0) {
        // 可以根据实际可用语音更新列表
      }
    }

    speechSynthesis.onvoiceschanged = loadVoices
    loadVoices()
  }
})

// 组件卸载时清理
onUnmounted(() => {
  stopPlayback()

  // 关闭WebSocket连接
  if (websocket) {
    websocket.close()
    websocket = null
  }

  // 清理音频URL
  if (audioUrl.value && audioUrl.value.startsWith('blob:')) {
    URL.revokeObjectURL(audioUrl.value)
  }

  // 清理音频数据
  audioChunks = []
})

// 暴露方法给父组件
defineExpose({
  generateSpeech,
  togglePlayback,
  stopPlayback,
  clearAll
})
</script>

<style scoped>
.text-to-speech {
  width: 100%;
  max-width: 800px;
  margin: 0 auto;
}

.tts-card {
  border-radius: 16px;
  box-shadow: 0 4px 20px rgba(0, 0, 0, 0.1);
  border: 1px solid #e0e7ff;
}

.tts-card :deep(.ant-card-head) {
  background: linear-gradient(135deg, #f0f4ff 0%, #e0e7ff 100%);
  border-bottom: 1px solid #c7d2fe;
}

.tts-card :deep(.ant-card-head-title) {
  color: #4338ca;
  font-weight: 600;
}

.input-section {
  margin-bottom: 1.5rem;
}

.text-input {
  margin-bottom: 1rem;
  border-radius: 8px;
  border: 2px solid #e5e7eb;
  transition: border-color 0.3s ease;
}

.text-input:focus {
  border-color: #60a5fa;
  box-shadow: 0 0 0 3px rgba(96, 165, 250, 0.1);
}

.voice-settings {
  background: #f8fafc;
  padding: 1rem;
  border-radius: 8px;
  border: 1px solid #e2e8f0;
}

.voice-select {
  width: 100%;
}

.slider-label {
  text-align: center;
  font-size: 0.875rem;
  color: #6b7280;
  margin-top: 0.5rem;
}

.control-section {
  margin: 1.5rem 0;
  text-align: center;
}

.generate-button {
  background: linear-gradient(135deg, #60a5fa 0%, #3b82f6 100%);
  border: none;
  border-radius: 8px;
  height: 40px;
  padding: 0 1.5rem;
  font-weight: 600;
  box-shadow: 0 2px 8px rgba(96, 165, 250, 0.3);
  transition: all 0.3s ease;
}

.generate-button:hover {
  background: linear-gradient(135deg, #3b82f6 0%, #2563eb 100%);
  box-shadow: 0 4px 12px rgba(96, 165, 250, 0.4);
  transform: translateY(-1px);
}

.play-button, .stop-button, .download-button, .clear-button {
  border-radius: 8px;
  height: 40px;
  transition: all 0.3s ease;
}

.play-button:hover, .stop-button:hover, .download-button:hover, .clear-button:hover {
  transform: translateY(-1px);
}

.progress-section {
  margin: 1.5rem 0;
  padding: 1rem;
  background: #f8fafc;
  border-radius: 8px;
  border: 1px solid #e2e8f0;
}

.progress-info {
  display: flex;
  justify-content: space-between;
  align-items: center;
  margin-bottom: 0.5rem;
}

.time-display {
  font-family: 'Courier New', monospace;
  font-weight: 600;
  color: #374151;
}

.status-text {
  font-size: 0.875rem;
  font-weight: 500;
}

.status-text.generating {
  color: #f59e0b;
}

.status-text.playing {
  color: #10b981;
}

.status-text.paused {
  color: #6b7280;
}

.status-text.ready {
  color: #3b82f6;
}

.progress-slider {
  margin-top: 0.5rem;
}

.visualizer-section {
  margin: 1.5rem 0;
  display: flex;
  justify-content: center;
}

.audio-visualizer {
  background: linear-gradient(135deg, #1e293b 0%, #334155 100%);
  border-radius: 12px;
  padding: 1rem;
  width: 100%;
  max-width: 400px;
}

.wave-container {
  display: flex;
  justify-content: center;
  align-items: center;
  gap: 3px;
  height: 60px;
}

.wave {
  width: 4px;
  background: linear-gradient(to top, #60a5fa, #a78bfa);
  border-radius: 2px;
  animation: wave 1.5s ease-in-out infinite alternate;
}

@keyframes wave {
  0% {
    height: 10px;
    opacity: 0.5;
  }
  100% {
    height: 50px;
    opacity: 1;
  }
}

.stats-section {
  margin-top: 1.5rem;
  padding: 1rem;
  background: linear-gradient(135deg, #f0f9ff 0%, #e0f2fe 100%);
  border-radius: 8px;
  border: 1px solid #7dd3fc;
}

/* 响应式设计 */
@media (max-width: 768px) {
  .text-to-speech {
    max-width: 100%;
    padding: 0 1rem;
  }

  .control-section .ant-space {
    flex-wrap: wrap;
    justify-content: center;
  }

  .voice-settings .ant-row {
    flex-direction: column;
  }

  .voice-settings .ant-col {
    width: 100% !important;
    margin-bottom: 1rem;
  }

  .wave-container {
    height: 40px;
  }

  .wave {
    width: 3px;
  }
}
</style>
