import { useEffect, useRef, useState } from 'react'
import { SixteenKMonoRecorder, bytesToBase64 } from '../utils/audioRecorder'
import { recognizeShortSpeech, BAIDU_DIALECTS } from '../utils/baiduAsr'
import { callDeepseek } from '../utils/deepseek'
import { synthesizeYoudao } from '../utils/youdaoTts'


type Props = {
  accessToken: string
  cuid?: string
  onTranscript?: (text: string) => void
  onActionCommand?: (actionName: string) => void // 新增：动作指令回调
  showModelSelector?: boolean // 是否显示模型选择器
  showVoiceButton?: boolean // 是否显示语音按钮
  dialect?: keyof typeof BAIDU_DIALECTS // 方言类型
  onDialectChange?: (dialect: keyof typeof BAIDU_DIALECTS) => void // 方言切换回调
}

// 可用的DeepSeek模型选项
const DEEPSEEK_MODELS = [
  { value: 'deepseek-chat', label: 'DeepSeek Chat (实时)' },
  { value: 'deepseek-reasoner', label: 'DeepSeek Reasoner (实时)' },
  { value: 'deepseek-r1-250120', label: 'DeepSeek R1 (推理模型)' }
]

function SpeechRecognizer({ accessToken, cuid = 'web-client', onTranscript, onActionCommand, showModelSelector = true, showVoiceButton = true, dialect = 'MANDARIN', onDialectChange }: Props) {
  const [recording, setRecording] = useState(false)
  const [subtitle, setSubtitle] = useState('')
  const [busy, setBusy] = useState(false)
  const [llmReply, setLlmReply] = useState('')
  const [selectedModel, setSelectedModel] = useState('deepseek-chat') // 默认选择Chat模型
  
  // 调试：显示当前选择的模型
  console.log('🎯 当前选择的模型:', selectedModel)
  
  // 调试：显示模型选项
  console.log('📋 可用模型选项:', DEEPSEEK_MODELS)
  const audioRef = useRef<HTMLAudioElement | null>(null)
  const recorderRef = useRef<SixteenKMonoRecorder | null>(null)
  const allPcmRef = useRef<Uint8Array[]>([])

  // 动作模式匹配系统
  const actionPatterns = {
    'zhanli': [
      '站立', '站好', '立正', '站直', '保持站立', '不要动', '保持姿势',
      '站', '立', '直', '正'
    ],
    'daiji': [
      '待机', '休息', '等待', '放松', '歇会', '待着', '不动',
      '等', '歇', '静'
    ],
    'zhanli2': [
      '站立2', '站姿2', '另一种站立', '换个姿势',
      '站2', '姿2'
    ],
    'dance': [
      '跳舞', '舞蹈', '表演', '来段舞蹈', '跳个舞', '动起来', '跳起来',
      '舞', '跳', '表演', '动'
    ],
    'flair': [
      '街舞', '街舞表演', '来段街舞', '街舞动作', '炫酷舞蹈',
      '街', '炫', '酷'
    ],
    'guixia': [
      '跪下', '跪', '下跪', '跪地', '跪拜',
      '跪', '下'
    ],
    'smoking': [
      '吸烟', '抽烟', '吸烟动作', '抽烟动作',
      '烟', '抽'
    ]
  }

  // 动作名称映射（用于语音反馈）
  const actionNames = {
    'zhanli': '站立',
    'daiji': '待机',
    'zhanli2': '站立姿势2',
    'dance': '跳舞',
    'flair': '街舞',
    'guixia': '跪下',
    'smoking': '吸烟'
  }

  // 检测语音指令是否为动作命令
  const detectActionCommand = (text: string): string | null => {
    const cleanText = text.toLowerCase().trim()
    
    // 遍历所有动作模式
    for (const [actionName, patterns] of Object.entries(actionPatterns)) {
      for (const pattern of patterns) {
        // 精确匹配
        if (cleanText === pattern.toLowerCase()) {
          return actionName
        }
        // 包含匹配
        if (cleanText.includes(pattern.toLowerCase())) {
          return actionName
        }
      }
    }
    
    return null
  }

  // 计算文本相似度（简单的编辑距离算法）
  const calculateSimilarity = (text1: string, text2: string): number => {
    const len1 = text1.length
    const len2 = text2.length
    const matrix = Array(len2 + 1).fill(null).map(() => Array(len1 + 1).fill(null))
    
    for (let i = 0; i <= len1; i++) matrix[0][i] = i
    for (let j = 0; j <= len2; j++) matrix[j][0] = j
    
    for (let j = 1; j <= len2; j++) {
      for (let i = 1; i <= len1; i++) {
        const cost = text1[i - 1] === text2[j - 1] ? 0 : 1
        matrix[j][i] = Math.min(
          matrix[j][i - 1] + 1,     // deletion
          matrix[j - 1][i] + 1,     // insertion
          matrix[j - 1][i - 1] + cost // substitution
        )
      }
    }
    
    const distance = matrix[len2][len1]
    return 1 - distance / Math.max(len1, len2)
  }

  // 模糊匹配动作指令
  const fuzzyMatchAction = (text: string): string | null => {
    const cleanText = text.toLowerCase().trim()
    let bestMatch = ''
    let bestScore = 0.6 // 最低相似度阈值
    
    for (const [actionName, patterns] of Object.entries(actionPatterns)) {
      for (const pattern of patterns) {
        const similarity = calculateSimilarity(cleanText, pattern.toLowerCase())
        if (similarity > bestScore) {
          bestScore = similarity
          bestMatch = actionName
        }
      }
    }
    
    return bestMatch || null
  }

  useEffect(() => {
    return () => {
      recorderRef.current?.stop()
    }
  }, [])

  async function handleStart() {
    if (recording) return
    const recorder = new SixteenKMonoRecorder({ chunkDurationMs: 3000, targetSampleRate: 16000 })
    recorderRef.current = recorder
    setRecording(true)
    allPcmRef.current = []

    await recorder.start(async ({ rawPcmBytes }) => {
      // Only buffer; do not send during recording
      allPcmRef.current.push(rawPcmBytes)
    })
  }

  async function handleStop() {
    if (!recording) return
    await recorderRef.current?.stop()
    setRecording(false)
    setBusy(true)
    try {
      // Concatenate all PCM chunks
      const totalLen = allPcmRef.current.reduce((acc, b) => acc + b.length, 0)
      const merged = new Uint8Array(totalLen)
      let offset = 0
      for (const b of allPcmRef.current) { merged.set(b, offset); offset += b.length }

      const base64 = bytesToBase64(merged)
      const res = await recognizeShortSpeech({
        accessToken,
        cuid,
        format: 'pcm',
        sampleRate: 16000,
        speechBase64: base64,
        rawBytesLength: merged.length,
        devPid: BAIDU_DIALECTS[dialect], // 使用指定的方言
      })
      setSubtitle(res.text)
      if (onTranscript) onTranscript(res.text)

      // Call DeepSeek with recognized text
      // 首先检查是否为动作指令
      if (res.text) {
        console.log('🎤 识别到语音:', res.text)
        
        // 检测动作指令
        let actionCommand = detectActionCommand(res.text)
        
        // 如果没有精确匹配，尝试模糊匹配
        if (!actionCommand) {
          actionCommand = fuzzyMatchAction(res.text)
        }
        
        if (actionCommand) {
          console.log('🎭 检测到动作指令:', actionCommand)
          
          // 执行动作指令
          if (onActionCommand) {
            onActionCommand(actionCommand)
          }
          
          // 语音反馈
          const actionName = actionNames[actionCommand as keyof typeof actionNames]
          const feedbackText = `好的，我来${actionName}`
          console.log('🔊 语音反馈:', feedbackText)
          
          try {
            const appId = (import.meta as unknown as { env: Record<string,string> }).env.VITE_TTS_APP_ID
            const appSecret = (import.meta as unknown as { env: Record<string,string> }).env.VITE_TTS_APP_SECRET
            if (appId && appSecret) {
              const audioData = await synthesizeYoudao({
                text: feedbackText,
                appKey: appId,
                appSecret,
              })
              if (audioData) {
                const audioBlob = new Blob([audioData], { type: 'audio/mp3' })
                const audioUrl = URL.createObjectURL(audioBlob)
                if (audioRef.current) {
                  audioRef.current.src = audioUrl
                  audioRef.current.play()
                }
              }
            }
          } catch (error) {
            console.error('语音反馈失败:', error)
          }
          
          // 设置字幕显示
          setSubtitle(feedbackText)
          
          // 动作指令不需要AI对话，直接返回
          return
        }
      }

      const dsToken = (import.meta as unknown as { env: Record<string,string> }).env.VITE_DEEPSEEK_APITOKEN
      if (dsToken && res.text) {
        try {
          const reply = await callDeepseek({
            apiKey: dsToken,
            model: selectedModel,
            messages: [
              { role: 'system', content: '你是一个有帮助的助理，用中文回答。' },
              { role: 'user', content: res.text },
            ],
            appId: (import.meta as unknown as { env: Record<string,string> }).env.VITE_DEEPSEEK_APP_ID,
            appSecret: (import.meta as unknown as { env: Record<string,string> }).env.VITE_DEEPSEEK_API_KEY,
          })
          console.log('🤖 DeepSeek回复:', reply)
          setLlmReply(reply)

          // TTS: synthesize DeepSeek reply and play
          try {
            const appId = (import.meta as unknown as { env: Record<string,string> }).env.VITE_TTS_APP_ID
            const appSecret = (import.meta as unknown as { env: Record<string,string> }).env.VITE_TTS_APP_SECRET
            if (appId && appSecret) {
              const blob = await synthesizeYoudao({
                text: reply,
                appKey: appId,
                appSecret,
                voiceName: 'youxiaozhi',
                format: 'mp3',
              })
              
              // 简单播放：复用同一个 <audio>
              const url = URL.createObjectURL(blob)
              // 释放旧URL（仅当是 blob:url 时）
              if (audioRef.current && audioRef.current.src && audioRef.current.src.startsWith('blob:')) {
                try { URL.revokeObjectURL(audioRef.current.src) } catch (e) {
                  console.warn('Failed to revoke URL:', e)
                }
              }
              
              if (!audioRef.current) {
                audioRef.current = new Audio()
              }
              const el = audioRef.current
              el.pause()
              el.src = url
              el.crossOrigin = 'anonymous'
              el.muted = false
              el.volume = 1.0

              // 播放流程：若已可播放则直接 play，否则等待 canplay
              const tryPlay = () => {
                el.currentTime = 0.03
                el.play().then(() => {
                }).catch(err => {
                  console.error('音频播放失败:', err)
                })
              }
              if (el.readyState >= 2) {
                tryPlay()
              } else {
                el.addEventListener('canplay', tryPlay, { once: true })
              }
              // 结束后再释放当前URL
              el.addEventListener('ended', () => {
                if (el.src && el.src.startsWith('blob:')) {
                  try { URL.revokeObjectURL(el.src) } catch (e) {
                    console.warn('Failed to revoke URL:', e)
                  }
                }
              }, { once: true })

              // 驱动嘴型：直接用 Blob 驱动（避免重复绑定导致的冲突）
              try {
                const drive = (window as unknown as { driveMouthFromBlob?: (blob: Blob) => void }).driveMouthFromBlob
                if (typeof drive === 'function') {
                  drive(blob)
                }
              } catch (e) {
                console.warn('drive mouth failed', e)
              }
            }
          } catch (e) {
            console.error('TTS error', e)
          }
        } catch (e) {
          console.error('DeepSeek error', e)
          setLlmReply('（DeepSeek调用失败）')
        }
      }
    } catch (err) {
      console.error('ASR error', err)
    } finally {
      setBusy(false)
      allPcmRef.current = []
    }
  }

  async function handleToggle() {
    if (recording) {
      await handleStop()
    } else {
      await handleStart()
    }
  }

  const circleLabel = recording ? (busy ? '处理中' : '结束') : '开始'
  const circleStateClass = recording ? (busy ? 'busy' : 'recording') : 'idle'

  return (
    <div className="sr-container" style={{ pointerEvents: 'auto' }}>
      {/* 模型选择下拉框 */}
      {showModelSelector && (
        <div className="model-selector" style={{ 
          marginBottom: '0px',
          padding: '8px',
          border: '1px solid rgba(255, 255, 255, 0.3)',
          borderRadius: '6px',
          backgroundColor: 'rgba(255, 255, 255, 0.1)',
          position: 'relative',
          zIndex: 1000,
          pointerEvents: 'auto'
        }}>
          <label htmlFor="model-select" style={{ 
            display: 'block', 
            marginBottom: '5px', 
            fontSize: '12px', 
            color: '#ffffff',
            fontWeight: 'bold'
          }}>
            选择AI模型:
          </label>
          <select
            id="model-select"
            value={selectedModel}
            onChange={(e) => {
              console.log('🔄 模型切换:', e.target.value)
              setSelectedModel(e.target.value)
            }}
            onMouseDown={(e) => {
              console.log('🖱️ 下拉框被按下')
              e.stopPropagation()
            }}
            onFocus={(e) => {
              console.log('🎯 下拉框获得焦点')
              e.stopPropagation()
            }}
            style={{
              width: '100%',
              padding: '6px',
              border: '1px solid rgba(255, 255, 255, 0.3)',
              borderRadius: '4px',
              fontSize: '12px',
              backgroundColor: 'rgba(255, 255, 255, 0.9)',
              color: '#333',
              zIndex: 1001,
              position: 'relative',
              cursor: 'pointer',
              outline: 'none',
              pointerEvents: 'auto',
              userSelect: 'none',
              WebkitAppearance: 'menulist',
              MozAppearance: 'menulist',
              appearance: 'menulist'
            }}
          >
            {DEEPSEEK_MODELS.map((model) => (
              <option key={model.value} value={model.value}>
                {model.label}
              </option>
            ))}
          </select>
          
          <label htmlFor="dialect-select" style={{ 
            display: 'block', 
            marginTop: '8px',
            marginBottom: '5px', 
            fontSize: '12px', 
            color: '#ffffff',
            fontWeight: 'bold'
          }}>
            选择方言:
          </label>
          <select
            id="dialect-select"
            value={dialect}
            onChange={(e) => {
              console.log('🔄 方言切换:', e.target.value)
              if (onDialectChange) {
                onDialectChange(e.target.value as keyof typeof BAIDU_DIALECTS)
              }
            }}
            style={{
              width: '100%',
              padding: '6px',
              border: '1px solid rgba(255, 255, 255, 0.3)',
              borderRadius: '4px',
              fontSize: '12px',
              backgroundColor: 'rgba(255, 255, 255, 0.9)',
              color: '#333',
              zIndex: 1001,
              position: 'relative',
              cursor: 'pointer',
              outline: 'none',
              pointerEvents: 'auto',
              userSelect: 'none',
              WebkitAppearance: 'menulist',
              MozAppearance: 'menulist',
              appearance: 'menulist'
            }}
          >
            <option value="MANDARIN">普通话</option>
            <option value="CANTONESE">粤语</option>
            <option value="SICHUAN">四川话</option>
            <option value="ENGLISH">英语</option>
          </select>
        </div>
      )}
      
      {showVoiceButton && (
        <button
          className={`sr-circle ${circleStateClass}`}
          onClick={handleToggle}
          aria-pressed={recording}
          aria-label={circleLabel}
        >
          {circleLabel}
        </button>
      )}
      
      {/* 交换位置：DeepSeek回复在上，识别字幕在下，中间保留间隙 */}
      {/* 上方：识别字幕；下方：DeepSeek 回复。使用栈容器避免重叠 */}
      <div className="sr-stack">
        {!!subtitle && <div className="sr-panel">{subtitle}</div>}
        {!!llmReply && <div className="sr-panel">{llmReply}</div>}
      </div>
    </div>
  )
}

export default SpeechRecognizer
