<template>
  <a-button
    :type="buttonType"
    :loading="isGenerating"
    :disabled="!message || isPlaying"
    @click="handlePlay"
    :size="size"
    :class="customClass"
  >
    <template #icon>
      <Volume2 v-if="!isGenerating && !isPlaying" :size="iconSize" />
      <Play v-if="isPlaying && isPaused" :size="iconSize" />
      <Pause v-if="isPlaying && !isPaused" :size="iconSize" />
    </template>
    {{ buttonText }}
  </a-button>
</template>

<script setup lang="ts">
import { ref, computed, onUnmounted } from 'vue'
import { message as antMessage } from 'ant-design-vue'
import { Volume2, Play, Pause } from 'lucide-vue-next'
import { useAuthStore } from '@/stores'

interface Props {
  message: string // 要转换为语音的文本
  wsUrl?: string // WebSocket地址
  voice?: string // 语音类型
  voiceId?: string // 语音ID，用于TTS API
  size?: 'small' | 'middle' | 'large'
  type?: 'primary' | 'default' | 'dashed' | 'link' | 'text'
  customClass?: string
  showText?: boolean // 是否显示按钮文字
  enableMockData?: boolean // 是否启用模拟数据
}

interface Emits {
  start: []
  playing: []
  paused: []
  stopped: []
  completed: []
  error: [error: string]
}

const props = withDefaults(defineProps<Props>(), {
  wsUrl: 'wss://www.codewin.top/codewin/ai/speech/tts-stream',
  voice: 'zh-CN-XiaoxiaoNeural',
  voiceId: '5',
  size: 'middle',
  type: 'default',
  showText: true,
  enableMockData: false
})

const emit = defineEmits<Emits>()

// 获取认证store
const authStore = useAuthStore()

// 响应式状态
const isGenerating = ref(false)
const isPlaying = ref(false)
const isPaused = ref(false)

// WebSocket和音频相关变量
let websocket: WebSocket | null = null
let audioContext: AudioContext | null = null
let audioBuffers: AudioBuffer[] = []
let isAudioPlaying = false
const SAMPLE_RATE = 22050
const MIN_BUFFER_SIZE = 4096

// 计算属性
const buttonType = computed(() => {
  if (isPlaying.value && !isPaused.value) return 'default'
  return props.type
})

const iconSize = computed(() => {
  switch (props.size) {
    case 'small': return 14
    case 'large': return 18
    default: return 16
  }
})

const buttonText = computed(() => {
  if (!props.showText) return ''
  if (isGenerating.value) return '生成中...'
  if (isPlaying.value && !isPaused.value) return '播放中'
  if (isPaused.value) return '已暂停'
  return '播放'
})

// 初始化音频上下文
const initAudioContext = async (): Promise<boolean> => {
  try {
    if (!audioContext) {
      const ctx = new (window.AudioContext || (window as any).webkitAudioContext)({
        sampleRate: SAMPLE_RATE
      })
      audioContext = ctx
    }

    if (audioContext.state !== 'running') {
      await audioContext.resume()
    }
    return true
  } catch (e) {
    console.error('音频上下文初始化错误:', e)
    return false
  }
}

// 创建WebSocket连接
const createWebSocketConnection = (): Promise<WebSocket> => {
  return new Promise((resolve, reject) => {
    // 检查token是否存在
    if (!authStore.token) {
      reject(new Error('未登录，请先登录后再使用语音功能'))
      return
    }

    // 将token和voice_id作为查询参数附加到WebSocket URL
    const url = new URL(props.wsUrl!)
    url.searchParams.append('token', authStore.token)
    url.searchParams.append('voice_id', props.voiceId)

    const ws = new WebSocket(url.toString())
    ws.binaryType = 'blob'

    ws.onopen = () => {
      resolve(ws)
    }

    ws.onerror = (error) => {
      reject(new Error('WebSocket连接失败'))
    }

    ws.onclose = (event) => {
      websocket = null
      // 处理认证相关的关闭代码
      if (event.code === 1008 || event.code === 4001) {
        antMessage.error('认证失败，请重新登录')
      }
    }
  })
}

// 处理音频Blob数据
const processAudioBlob = async (blob: Blob) => {
  const reader = new FileReader()
  reader.onload = async (e) => {
    try {
      const result = e.target?.result
      if (!result || !(result instanceof ArrayBuffer)) return

      const arrayBuffer = result as ArrayBuffer
      if (arrayBuffer.byteLength === 0) return

      if (!audioContext || audioContext.state === 'closed') return
      const ctx = audioContext as AudioContext

      // 16位PCM转Float32
      const pcm16 = new Int16Array(arrayBuffer)
      const pcm32 = new Float32Array(pcm16.length)
      for (let i = 0; i < pcm16.length; i++) {
        pcm32[i] = (pcm16[i] || 0) / 32768.0
      }

      // 创建音频缓冲区
      const audioBuf = ctx.createBuffer(1, pcm32.length, SAMPLE_RATE)
      audioBuf.getChannelData(0).set(pcm32)
      audioBuffers.push(audioBuf)

      // 如果缓存足够且未在播放，开始播放
      if (!isAudioPlaying && getTotalBufferSize() >= MIN_BUFFER_SIZE) {
        await playCachedAudio()
      }
    } catch (e) {
      console.error('处理音频数据错误:', e)
    }
  }

  reader.readAsArrayBuffer(blob)
}

// 播放缓存的音频
const playCachedAudio = async (): Promise<void> => {
  if (isAudioPlaying || audioBuffers.length === 0 || !audioContext || audioContext.state === 'closed') {
    return
  }

  const ctx = audioContext as AudioContext

  try {
    isAudioPlaying = true
    isPlaying.value = true
    isPaused.value = false
    emit('playing')

    const tempBuffers = [...audioBuffers]
    audioBuffers = []

    // 合并音频包
    const totalSamples = tempBuffers.reduce((sum, buf) => sum + buf.length, 0)
    const mergedBuf = ctx.createBuffer(1, totalSamples, SAMPLE_RATE)
    const mergedData = mergedBuf.getChannelData(0)

    let writeOffset = 0
    for (const buf of tempBuffers) {
      mergedData.set(buf.getChannelData(0), writeOffset)
      writeOffset += buf.length
    }

    // 创建音频源并播放
    const source = ctx.createBufferSource()
    source.buffer = mergedBuf
    source.connect(ctx.destination)

    source.onended = () => {
      isAudioPlaying = false

      if (audioBuffers.length > 0) {
        playCachedAudio()
      } else {
        isPlaying.value = false
        isPaused.value = false
        emit('completed')
      }
    }

    source.start(0)
  } catch (e) {
    console.error('播放音频异常:', e)
    isAudioPlaying = false
    isPlaying.value = false
    isPaused.value = false
    emit('error', '播放失败')
  }
}

// 计算缓存大小
const getTotalBufferSize = (): number => {
  return audioBuffers.reduce((sum, buf) => sum + buf.length, 0) * 4
}

// 通过WebSocket进行TTS处理
const callTTSAPI = async (text: string): Promise<void> => {
  if (props.enableMockData) {
    // 模拟延迟
    await new Promise(resolve => setTimeout(resolve, 1000 + Math.random() * 2000))

    // 使用Web Speech API
    if ('speechSynthesis' in window) {
      return new Promise<void>((resolve, reject) => {
        try {
          const utterance = new SpeechSynthesisUtterance(text)

          const voices = speechSynthesis.getVoices()
          const selectedVoiceObj = voices.find(v =>
            v.name.includes('Chinese') || v.lang.includes('zh')
          )
          if (selectedVoiceObj) {
            utterance.voice = selectedVoiceObj
          }

          utterance.onstart = () => {
            isPlaying.value = true
            emit('playing')
          }

          utterance.onend = () => {
            isPlaying.value = false
            emit('completed')
            resolve()
          }

          utterance.onerror = (event) => {
            reject(new Error(`语音合成错误: ${event.error}`))
          }

          speechSynthesis.speak(utterance)
        } catch (error) {
          reject(error)
        }
      })
    }
    return Promise.resolve()
  }

  try {
    // 创建WebSocket连接
    if (!websocket || websocket.readyState !== WebSocket.OPEN) {
      websocket = await createWebSocketConnection()
    }

    // 初始化音频上下文
    const audioReady = await initAudioContext()
    if (!audioReady) {
      throw new Error('音频初始化失败')
    }

    // 清空之前的音频数据
    audioBuffers = []

    return new Promise<void>((resolve, reject) => {
      websocket!.onmessage = async (event) => {
        try {
          if (event.data instanceof Blob) {
            await processAudioBlob(event.data)
          } else if (typeof event.data === 'string') {
            const data = JSON.parse(event.data)

            if (data.status === 'complete') {
              if (!isAudioPlaying && audioBuffers.length > 0) {
                await playCachedAudio()
              }

              const checkComplete = () => {
                if (!isAudioPlaying && audioBuffers.length === 0) {
                  resolve()
                } else {
                  setTimeout(checkComplete, 100)
                }
              }
              checkComplete()
            } else if (data.error) {
              reject(new Error(data.error))
            }
          }
        } catch (error) {
          reject(new Error('解析响应数据失败'))
        }
      }

      websocket!.send(JSON.stringify(text))
    })
  } catch (error) {
    throw error
  }
}

// 处理播放按钮点击
const handlePlay = async () => {
  if (!props.message?.trim()) {
    antMessage.warning('没有可播放的文本内容')
    return
  }

  if (isPlaying.value && !isPaused.value) {
    // 当前正在播放，点击暂停
    stopPlayback()
    return
  }

  try {
    isGenerating.value = true
    emit('start')

    await callTTSAPI(props.message.trim())

  } catch (error) {
    const errorMsg = error instanceof Error ? error.message : '语音播放失败'
    emit('error', errorMsg)
    antMessage.error(errorMsg)
  } finally {
    isGenerating.value = false
  }
}

// 停止播放
const stopPlayback = () => {
  if ('speechSynthesis' in window && props.enableMockData) {
    speechSynthesis.cancel()
  }

  isPlaying.value = false
  isPaused.value = false
  isAudioPlaying = false
  audioBuffers = []
  emit('stopped')
}

// 组件卸载时清理资源
onUnmounted(() => {
  stopPlayback()

  if (websocket) {
    websocket.close()
    websocket = null
  }

  if (audioContext) {
    audioContext.close()
    audioContext = null
  }
})
</script>

<style scoped>
/* 可以根据需要添加自定义样式 */
</style>
