import { useFrame } from '@react-three/fiber'
import { GLTFLoader } from 'three/examples/jsm/loaders/GLTFLoader'
import { FBXLoader } from 'three/examples/jsm/loaders/FBXLoader'
import { useEffect, useMemo, useRef, useState } from 'react'
import { Canvas, useThree, extend } from '@react-three/fiber'
import SRControl from './SpeechRecognition'
import useTextToSpeech from './TextToSpeech'
// 已移除高级口型依赖，专注纯前端能量驱动
import { OrbitControls } from 'three/examples/jsm/controls/OrbitControls'
import { Box3, Vector3, Group } from 'three'
import * as THREE from 'three'
import * as SkeletonUtils from 'three/examples/jsm/utils/SkeletonUtils'
import { useTranslation } from 'react-i18next'
import { hasPermission, Permission } from '../utils/auth'
extend({ OrbitControls })

const Orbit = ({
  target = new Vector3(0, 0, 0),
  minDistance,
  maxDistance,
  minPolarAngle,
  maxPolarAngle,
  enablePan
}: {
  target?: THREE.Vector3
  minDistance?: number
  maxDistance?: number
  minPolarAngle?: number
  maxPolarAngle?: number
  enablePan?: boolean
}) => {
  const { gl, camera } = useThree()
  type OrbitRef = {
    target: THREE.Vector3
    minDistance: number
    maxDistance: number
    minPolarAngle: number
    maxPolarAngle: number
    enablePan: boolean
    update: () => void
  } | null
  const ref = useRef<OrbitRef>(null)
  useEffect(() => {
    const c = ref.current
    if (!c) return
    c.target.copy(target)
    if (typeof minDistance === 'number') c.minDistance = minDistance
    if (typeof maxDistance === 'number') c.maxDistance = maxDistance
    if (typeof minPolarAngle === 'number') c.minPolarAngle = minPolarAngle
    if (typeof maxPolarAngle === 'number') c.maxPolarAngle = maxPolarAngle
    if (typeof enablePan === 'boolean') c.enablePan = enablePan
    c.update()
  }, [target, minDistance, maxDistance, minPolarAngle, maxPolarAngle, enablePan])
  return <orbitControls ref={ref} args={[camera, gl.domElement]} />
}

// 驱动 AnimationMixer 的组件（必须作为 Canvas 子组件使用）
const MixerUpdater = ({ mixerRef }: { mixerRef: React.MutableRefObject<THREE.AnimationMixer | null> }) => {
  useFrame((_, delta) => {
    if (mixerRef.current) mixerRef.current.update(delta)
  })
  return null
}

interface MySelfProps {
  theme?: 'dark' | 'red';
}

function MySelf({ theme = 'dark' }: MySelfProps) {
  const { t } = useTranslation()
  const mySelfRef = useRef<Group | null>(null)
  const mixerRef = useRef<THREE.AnimationMixer | null>(null)
  const actionsRef = useRef<Record<string, THREE.AnimationAction>>({})
  const currentActionRef = useRef<THREE.AnimationAction | null>(null)
  const [isPlaying, setIsPlaying] = useState(false)
  const [availableActions, setAvailableActions] = useState<string[]>([])
  const [lastRecognized, setLastRecognized] = useState<string>("")
  const [aiReply, setAiReply] = useState<string>("")
  const [thinking, setThinking] = useState<boolean>(false)
  const [selectedAction, setSelectedAction] = useState<string>("")
  const [isLoading, setIsLoading] = useState(true)
  const [modelCenter, setModelCenter] = useState<THREE.Vector3>(new Vector3(0, 0, 0))
  const [modelRadius, setModelRadius] = useState<number>(1)
  const [morphNames, setMorphNames] = useState<string[]>([])
  const faceMeshesRef = useRef<THREE.Mesh[]>([])
  const jawBoneRef = useRef<THREE.Bone | null>(null)

  const lipsyncRafRef = useRef<number | null>(null)
  const audioCtxRef = useRef<AudioContext | null>(null)
  const analyserRef = useRef<AnalyserNode | null>(null)
  const sourceRef = useRef<MediaElementAudioSourceNode | null>(null)
  const freqDataRef = useRef<Uint8Array | null>(null)
  const lastEnergyRef = useRef<number>(0)
  const mouthOpenRef = useRef<boolean>(false)
  const lastAboveGateAtRef = useRef<number>(0)
  const closedHoldUntilRef = useRef<number>(0)
  const pulsingRef = useRef<boolean>(false)
  const pulsingValueRef = useRef<number>(0)
  const delayBufferRef = useRef<Array<{ t: number; v: number }>>([])
  const lipDelayMsRef = useRef<number>(60) // 降低延迟，显著提升“开-合-开”速度
  // 平滑/缓冲需要的引用（缺失会导致未定义）
  const lastAppliedRef = useRef<number>(0)
  const smoothBufRef = useRef<number[]>([])
  const onsetSinceRef = useRef<number>(0) // 高于开口阈值的开始时间
  const onsetArmedRef = useRef<boolean>(false) // 满足起音判定
  const openUnlockStartRef = useRef<number>(0) // 开口上限解锁起点
  // 长句防“干张嘴”
  const highOpenSinceRef = useRef<number>(0) // 持续高开口起点
  const microCloseUntilRef = useRef<number>(0) // 微闭合结束时间
  const forceCloseUntilRef = useRef<number>(0)
  const microCloseStartRef = useRef<number>(0) // 微闭合开始时间
  const tempCapUntilRef = useRef<number>(0) // 临时上限生效截止
  const patternActiveRef = useRef<boolean>(false)
  const shortCloseUntilRef = useRef<number>(0) // 短促闭合（模拟“一字一闭合”）
  const lastCloseAtRef = useRef<number>(0) // 最近一次真正闭口的时间
  const [ttsStatus, setTtsStatus] = useState<'idle' | 'synthesizing' | 'playing' | 'error'>('idle')

  const tts = useTextToSpeech({ onStatusChange: (s) => setTtsStatus(s) })
  
  // 将 morphTargets 输入固定，避免每次渲染新建数组导致子 Hook 触发
  const morphTargetsForLipsync = useMemo(() => {
    return morphNames.map(name => ({ name, influence: 0 }))
  }, [morphNames])

  // 不使用高级口型系统
  
  // 检查用户权限
  const canControl = hasPermission(Permission.ANIMATION_CONTROL)
  const canVoiceChat = hasPermission(Permission.VOICE_CHAT)

  // 气泡基础样式（更清晰的布局与可读性）
  const bubbleBase: React.CSSProperties = {
    display: 'flex',
    alignItems: 'center',
    gap: 8,
    padding: '10px 14px',
    borderRadius: 16,
    boxShadow: '0 6px 18px rgba(0,0,0,0.28)',
    maxWidth: 420,
    backdropFilter: 'blur(4px)',
    border: '1px solid rgba(255,255,255,0.08)'
  }
  const bubbleText: React.CSSProperties = {
    fontSize: 15,
    lineHeight: '22px',
    color: '#fff',
    display: '-webkit-box',
    WebkitBoxOrient: 'vertical' as const,
    WebkitLineClamp: 2 as const,
    overflow: 'hidden'
  }
  // 使用错误边界包装模型加载，避免外部模型加载失败影响整个页面
  const [model, setModel] = useState<{ scene: THREE.Group; animations: THREE.AnimationClip[] } | null>(null)
  const [modelError, setModelError] = useState<string | null>(null)
  
  useEffect(() => {
    const loadModel = async () => {
      try {
        const loader = new GLTFLoader()
        const loadedModel = await loader.loadAsync("https://models.readyplayer.me/68c0c1314b2306b86e9c3f8d.glb")
        setModel(loadedModel)
        setIsLoading(false)
      } catch (error) {
        console.warn('外部模型加载失败，使用默认模型:', error)
        setModelError('外部模型加载失败')
        // 创建一个简单的默认模型
        const defaultModel = {
          scene: new THREE.Group(),
          animations: []
        }
        setModel(defaultModel)
        setIsLoading(false)
      }
    }
    
    loadModel()
  }, [])

  // 收集模型中的 blendshape/morph target
  useEffect(() => {
    if (!model) return
    const names = new Set<string>()
    faceMeshesRef.current = []
    model.scene.traverse((obj) => {
      const mesh = obj as THREE.Mesh & { morphTargetDictionary?: Record<string, number>, morphTargetInfluences?: number[] }
      if (mesh.morphTargetDictionary && mesh.morphTargetInfluences) {
        faceMeshesRef.current.push(mesh)
        Object.keys(mesh.morphTargetDictionary).forEach(n => names.add(n))
      }
      // 寻找下颌骨/口部骨骼
      if (!jawBoneRef.current && (obj as THREE.Bone).isBone) {
        const bone = obj as THREE.Bone & { name?: string }
        const n = (bone.name || '').toLowerCase()
        if (/jaw|chin|mouth/.test(n)) {
          jawBoneRef.current = bone
        }
      }
    })
    const list = Array.from(names)
    setMorphNames(list)
    // console.log('[MorphTargets] count=', list.length, list)
    try { (window as unknown as { __morphs?: string[] }).__morphs = list } catch {}
    
    // 仅能量驱动，不初始化高级口型系统
  }, [model])

  // 不再根据 morphNames 初始化高级口型系统

  const applyMorph = (name: string, value: number) => {
    faceMeshesRef.current.forEach(mesh => {
      const dict = (mesh as unknown as { morphTargetDictionary?: Record<string, number> }).morphTargetDictionary
      const infl = (mesh as unknown as { morphTargetInfluences?: number[] }).morphTargetInfluences
      if (!dict || !infl) return
      const idx = dict[name]
      if (typeof idx === 'number') infl[idx] = value
    })
  }

  // 批量应用多个形变名（如果存在）
  const applyMorphMany = (names: string[], value: number, scale = 1) => {
    const v = Math.max(0, Math.min(1, value * scale))
    names.forEach(n => applyMorph(n, v))
  }

  // 获取优先使用的“张嘴”形变名称
  const getPreferredMouthName = (): string | undefined => {
    return (
      morphNames.find(n => /jaw|mouth.*open|mouthopen|viseme_?a|^a$|^aa$|open/i.test(n)) ||
      morphNames[0]
    )
  }

  // 执行一次“单字”式的开合脉冲（独立于能量口型）
  // 已禁用开场/文本脉冲

  // 已移除批量/匹配工具，专注嘴部动作

  // 能量口型：当 TTS 播放中，基于能量驱动嘴部开合
  useEffect(() => {
    const preferred = getPreferredMouthName()
    if (ttsStatus !== 'playing' || !tts.audioRef?.current || !preferred) {
      // 停止并复位
      if (lipsyncRafRef.current) cancelAnimationFrame(lipsyncRafRef.current)
      lipsyncRafRef.current = null
      if (preferred) applyMorph(preferred, 0)
      return
    }

    // 初始化音频分析链
    type AudioCtor = { new(): AudioContext }
    type WindowWithAudio = Window & { AudioContext?: AudioCtor; webkitAudioContext?: AudioCtor }
    if (!audioCtxRef.current) {
      const AC: AudioCtor | undefined = (window as WindowWithAudio).AudioContext ?? (window as WindowWithAudio).webkitAudioContext
      if (!AC) {
        // 当前环境不支持 WebAudio，直接退出并复位口型
        applyMorph(preferred, 0)
        return
      }
      audioCtxRef.current = new AC()
    }
    const ctx = audioCtxRef.current
    if (!ctx) {
      applyMorph(preferred, 0)
      return
    }
    // 每次进入播放都基于当前 audio 元素重建 Source 节点
    const mediaEl = tts.audioRef.current
    if (!mediaEl) {
      applyMorph(preferred, 0)
      return
    }
    try { sourceRef.current?.disconnect() } catch { /* noop */ }
    try {
      sourceRef.current = ctx.createMediaElementSource(mediaEl)
    } catch {
      // 若已绑定过同一元素，忽略错误
    }
    if (!analyserRef.current) analyserRef.current = ctx.createAnalyser()
    const analyser = analyserRef.current
    analyser.fftSize = 512
    const bufLen = analyser.frequencyBinCount
    if (!freqDataRef.current || freqDataRef.current.length !== bufLen) freqDataRef.current = new Uint8Array(bufLen)

    if (sourceRef.current && analyser && ctx) {
      // 连接节点（避免重复连接导致异常，先断开再连接）
      try { sourceRef.current.disconnect() } catch { /* noop */ }
      sourceRef.current.connect(analyser)
      analyser.connect(ctx.destination)
    }
    // 确保音频上下文处于运行态
    try { if (ctx.state !== 'running') { /* resume context if suspended */ ctx.resume() } } catch { /* noop */ }

    const smooth = { last: 0 }
    // 平滑参数：进一步加快上升，收口更猛
    const attack = 0.45
    // 简易峰值增强（咬字劲）
    let lastPeak = 0
    let lastPeakAt = 0

    // 尝试寻找“闭嘴”相关的形变，用于收口更明显
    // 广谱匹配多个形变，尽最大努力驱动到位
    const openNames = morphNames.filter(n => /(jaw.?open|mouth.*open|open$|viseme.*(aa|ah|ao)|^a$|^aa$|mouth_a)/i.test(n))
    const closeNames = morphNames.filter(n => /(mouth.*close|lips.*together|closed|press|seal)/i.test(n))
    const upperLipNames = morphNames.filter(n => /(mouth.*upper.*up|upperlip|lip.*upper.*up|upper.?lip)/i.test(n))
    const smileNames = morphNames.filter(n => /(smile|mouthSmile|lipCorner.*up)/i.test(n))

    // 优先使用你模型的真实形变名
    const hardOpenName = morphNames.includes('mouthOpen') ? 'mouthOpen' : preferred
    const hardSmileName = morphNames.includes('mouthSmile') ? 'mouthSmile' : undefined

    const tick = () => {
      const data = freqDataRef.current!
      analyser.getByteFrequencyData(data)
      // 频段与能量映射：扩大到 150–2500Hz，更贴合人声
      const nyquist = ctx.sampleRate / 2
      const lowHz = 150, highHz = 2500
      const lowIndex = Math.max(0, Math.floor(lowHz / nyquist * data.length))
      const highIndex = Math.min(data.length - 1, Math.ceil(highHz / nyquist * data.length))
      let sum = 0
      for (let i = lowIndex; i <= highIndex; i++) sum += data[i]
      const avg = sum / (highIndex - lowIndex + 1)
      // 归一化 + 动态压缩（小音量更明显，大音量不过饱和）
      const base = Math.min(1, Math.max(0, (avg / 170)))
      let energy = Math.pow(base, 0.45)
      // 峰值加重：短时间内能量突增时，叠加一点“劲头”
      let nowTs = performance.now()
      if (base > lastPeak + 0.15) {
        lastPeak = base
        lastPeakAt = nowTs
      }
      if (nowTs - lastPeakAt < 90) {
        energy = Math.min(1, energy + 0.08)
      } else {
        lastPeak *= 0.98
      }
      // 噪声门（带回差）与静音收口 + 起音判定
      nowTs = performance.now()
      const gateOpen = 0.16
      const gateClose = 0.08
      if (energy > gateOpen) {
        mouthOpenRef.current = true
        lastAboveGateAtRef.current = nowTs
        if (!onsetArmedRef.current) {
          if (onsetSinceRef.current === 0) onsetSinceRef.current = nowTs
          if (nowTs - onsetSinceRef.current >= 35) {
            onsetArmedRef.current = true // 连续超过 60ms 才允许开口
            if (openUnlockStartRef.current === 0) openUnlockStartRef.current = nowTs
          }
        }
      } else if (energy < gateClose) {
        // 只有保持低能量一段时间才判定静音
        if (nowTs - lastAboveGateAtRef.current > 130) {
          mouthOpenRef.current = false
          closedHoldUntilRef.current = nowTs + 140 // 关口保持更久，闭合感更强
        }
        onsetSinceRef.current = 0
        onsetArmedRef.current = false
      }
      // 计算目标开口（切换为“二值+滞回”模式，强制明显开合）
      let target = 0
      if (energy > gateOpen) target = 1
      else if (energy < gateClose) target = 0
      // 未满足起音判定前，不开口
      if (!onsetArmedRef.current) target = 0
      if (!mouthOpenRef.current || nowTs < closedHoldUntilRef.current) {
        target = 0
      }
      // 初段限幅，逐步解锁：开讲前 400ms 内上限从 0.4 线性过渡到 1.0
      if (openUnlockStartRef.current > 0) {
        const since = nowTs - openUnlockStartRef.current
        const unlock = Math.min(1, since / 400)
        const cap = 0.4 + 0.6 * unlock
        if (target > cap) target = cap
      }
      // 长句处理：持续高开口时定期插入微闭合，并在超长时段临时降低上限
      const highOpen = Math.max(target, smooth.last) > 0.6
      if (highOpen) {
        if (highOpenSinceRef.current === 0) highOpenSinceRef.current = nowTs
        const highDur = nowTs - highOpenSinceRef.current
        // 超过450ms触发一次微闭合，更短更快
        if (highDur > 450 && nowTs > microCloseUntilRef.current) {
          microCloseStartRef.current = nowTs
          microCloseUntilRef.current = nowTs + 110
          highOpenSinceRef.current = nowTs // 重置计时，下一次再触发
        }
        // 超过900ms 启动临时上限0.75，持续约400ms
        if (highDur > 900 && nowTs < tempCapUntilRef.current) {
          // 已在生效期
        } else if (highDur > 900) {
          tempCapUntilRef.current = nowTs + 400
        }
        // 超过1200ms 强制一次完全闭口 90ms
        if (highDur > 1200 && nowTs > forceCloseUntilRef.current) {
          forceCloseUntilRef.current = nowTs + 90
          highOpenSinceRef.current = nowTs
        }
      } else {
        highOpenSinceRef.current = 0
      }
      // 应用微闭合与临时上限
      if (nowTs < forceCloseUntilRef.current) {
        target = 0
      }
      if (nowTs < microCloseUntilRef.current && microCloseStartRef.current > 0) {
        const span = Math.max(1, microCloseUntilRef.current - microCloseStartRef.current)
        const p = Math.min(1, Math.max(0, (nowTs - microCloseStartRef.current) / span))
        // 半收半放：在 0.3~0.55 区间内做一次平滑起伏
        const mid = 0.45
        const amp = 0.15
        const cap = mid + amp * Math.sin(p * Math.PI)
        target = Math.min(target, cap)
      }
      if (nowTs < tempCapUntilRef.current) {
        if (target > 0.75) target = 0.75
      }
      // 加入“节拍式闭口”：若 220ms 内未闭合则强制闭一次 120ms
      if (nowTs - lastCloseAtRef.current > 220 && nowTs > shortCloseUntilRef.current + 150) {
        shortCloseUntilRef.current = nowTs + 120
      }
      // 双速平滑；下降更快，确保能闭合
      const isClosing = target <= smooth.last
      const k = isClosing ? 0.6 : 0.5
      smooth.last = smooth.last + (target - smooth.last) * k
      // 不再跳过能量驱动，改为“脉冲值与能量值取最大”，保证全程连续
      // 叠加：blendshape + 下颌骨旋转提升开口幅度
      const mouthValue = smooth.last
      const pulseVal = pulsingValueRef.current
      let openValInstant = Math.min(1, Math.max(pulseVal, mouthValue * 1.2))
      // 每帧最大变化量限制（柔化转折）
      const lastApplied = lastAppliedRef.current || 0
      const maxDelta = 0.35
      if (openValInstant > lastApplied + maxDelta) openValInstant = lastApplied + maxDelta
      if (openValInstant < lastApplied - maxDelta) openValInstant = lastApplied - maxDelta
      // 追加到延迟缓冲
      delayBufferRef.current.push({ t: nowTs, v: openValInstant })
      // 控制缓冲大小（~2秒）
      if (delayBufferRef.current.length > 180) delayBufferRef.current.shift()
      // 读取延迟后的值
      const wantTs = nowTs - lipDelayMsRef.current
      let delayed = openValInstant
      for (let i = delayBufferRef.current.length - 1; i >= 0; i--) {
        const it = delayBufferRef.current[i]
        if (it.t <= wantTs) { delayed = it.v; break }
      }
      // 三帧移动均值，进一步柔化
      const buf = smoothBufRef.current
      buf.push(delayed)
      if (buf.length > 2) buf.shift()
      const openValRaw = buf.reduce((a, b) => a + b, 0) / buf.length
      let openVal = openValRaw
      // 短促闭合：当能量瞬降且最近 220ms 内未闭合过，触发 80ms 的快速闭嘴
      if (!isClosing && mouthOpenRef.current && (lastApplied > 0.18) && (openValRaw < 0.20) && nowTs > shortCloseUntilRef.current + 180) {
        shortCloseUntilRef.current = nowTs + 140
      }
      if (nowTs < shortCloseUntilRef.current) openVal = 0
      if (openVal === 0) lastCloseAtRef.current = nowTs
      lastAppliedRef.current = openVal
      // 先把非关键形变清零，避免“干张嘴”的底噪
      faceMeshesRef.current.forEach(mesh => {
        const dict = (mesh as unknown as { morphTargetDictionary?: Record<string, number> }).morphTargetDictionary
        const infl = (mesh as unknown as { morphTargetInfluences?: number[] }).morphTargetInfluences
        if (!dict || !infl) return
        Object.keys(dict).forEach(n => {
          if (n !== hardOpenName && n !== hardSmileName) infl[dict[n]] = 0
        })
      })

      // 强制绑定实际的 mouthOpen
      applyMorph(hardOpenName!, Math.min(1, openVal * 1.6))
      if (openNames.length > 0) applyMorphMany(openNames, openVal, 1.6)
      if (upperLipNames.length > 0) applyMorphMany(upperLipNames, openVal, 0.7)
      // 控制笑角：只在较大开口时给一点点，避免一直笑导致“半张嘴”视觉
      if (hardSmileName) applyMorph(hardSmileName, openVal > 0.4 ? 0.12 : 0)
      if (smileNames.length > 0) applyMorphMany(smileNames, Math.max(0, openVal - 0.35), 0.25)
      if (closeNames.length > 0) {
        const closeVal = Math.max(0, Math.min(1, 0.9 - openVal * 1.2))
        applyMorphMany(closeNames, closeVal, 1)
      }
      if (jawBoneRef.current) {
        // 将开口映射到下颌骨旋转（最多 ~10°）
        const maxRad = 18 * Math.PI / 180
        const jawPrev = jawBoneRef.current.rotation.x
        const jawTarget = -openVal * maxRad * 0.5
        // 颌骨响应更快以强调闭合
        jawBoneRef.current.rotation.x = jawPrev + (jawTarget - jawPrev) * 0.35
      }
      // 若模型没有可用的张嘴形变，则仅用颌骨强力驱动
      if (openNames.length === 0 && jawBoneRef.current) {
        const maxRad = 20 * Math.PI / 180
        const jawPrev = jawBoneRef.current.rotation.x
        const jawTarget = -openVal * maxRad
        jawBoneRef.current.rotation.x = jawPrev + (jawTarget - jawPrev) * 0.45
      }
      lastEnergyRef.current = smooth.last
      lipsyncRafRef.current = requestAnimationFrame(tick)
    }
    lipsyncRafRef.current = requestAnimationFrame(tick)

    return () => {
      if (lipsyncRafRef.current) cancelAnimationFrame(lipsyncRafRef.current)
      lipsyncRafRef.current = null
      applyMorph(preferred, 0)
    }
  }, [ttsStatus, tts.audioRef, morphNames])

  // 移除开场与文本节奏脉冲，避免“干张嘴”先发
  useEffect(() => {
    // 仅用于重置延迟与起音状态
    if (ttsStatus === 'playing') {
      delayBufferRef.current = []
      pulsingValueRef.current = 0
      pulsingRef.current = false
      patternActiveRef.current = false
      onsetSinceRef.current = 0
      onsetArmedRef.current = false
      openUnlockStartRef.current = 0
    }
  }, [ttsStatus])

  // 已去除面部表情，仅保留嘴部动作
  // 加载动作来源模型（FBX，有动画轨道）- 添加错误处理
  const [animationSources, setAnimationSources] = useState<Record<string, THREE.Group | null>>({})
  
  useEffect(() => {
    const loadAnimations = async () => {
      const animationFiles = [
        'Waving.fbx',
        'Wave Hip Hop Dance.fbx', 
        'Praying.fbx',
        'Salute.fbx',
        'Breakdance.fbx',
        'Angry.fbx',
        'Talking.fbx'
      ]
      
      const sources: Record<string, THREE.Group | null> = {}
      
      for (const file of animationFiles) {
        try {
          const loader = new FBXLoader()
          const source = await loader.loadAsync(`/animations/${file}`)
          sources[file.replace('.fbx', '')] = source
        } catch (error) {
          console.warn(`动画文件 ${file} 加载失败:`, error)
          // 创建空的默认动画源
          sources[file.replace('.fbx', '')] = null
        }
      }
      
      setAnimationSources(sources)
    }
    
    loadAnimations()
  }, [])

  // 方案A：将脚底作为锚点（把脚底放到世界原点 y=0，xz 居中）
  useEffect(() => {
    if (!model) return
    const root = model.scene
    const box = new Box3().setFromObject(root)
    const center = box.getCenter(new Vector3())
    // 将脚底放到 y=0，并让 x/z 居中到 0
    const offset = new Vector3(center.x, box.min.y, center.z)
    root.position.sub(offset)
    // 记录中心与半径（用于相机限制）
    setModelCenter(new Vector3(0, 0, 0))
    const size = box.getSize(new Vector3())
    const radius = Math.max(size.x, size.y, size.z) * 0.55
    setModelRadius(Math.max(0.5, radius))
  }, [model])

  // 生成动作 Action，并做基础调试（优先直接绑定；必要时再尝试重定向）
  useEffect(() => {
    if (!model || !animationSources.Waving) return
    // 创建并绑定到当前模型
    mixerRef.current = new THREE.AnimationMixer(model.scene)

    const sources = [
      { name: '挥手', key: 'actions.wave', src: animationSources.Waving },
      animationSources['Wave Hip Hop Dance'] ? { name: '跳舞', key: 'actions.dance', src: animationSources['Wave Hip Hop Dance'] } : null,
      animationSources.Praying ? { name: '跪下求饶', key: 'actions.pray', src: animationSources.Praying } : null,
      animationSources.Salute ? { name: '敬礼', key: 'actions.salute', src: animationSources.Salute } : null,
      animationSources.Breakdance ? { name: '超级无敌螺旋升天', key: 'actions.breakdance', src: animationSources.Breakdance } : null,
      animationSources.Angry ? { name: '思考/皱眉', key: 'actions.thinking', src: animationSources.Angry } : null,
      animationSources.Talking ? { name: '说话/陈述', key: 'actions.talking', src: animationSources.Talking } : null
    ].filter(Boolean) as Array<{ name: string; key: string; src: THREE.Group | null }>

    const collected: string[] = []

    sources.forEach(({ name: srcName, key: translationKey, src }) => {
      if (!src) return
      const clips = (src as THREE.Group).animations || []
      // console.log('[Anim] loaded clips from', srcName, ':', clips.map((c: THREE.AnimationClip) => ({ name: c.name, tracks: c.tracks.length, duration: c.duration })))
      clips.forEach((clip: THREE.AnimationClip, index: number) => {
        let finalClip: THREE.AnimationClip = clip

        const sampleTrack = clip.tracks.find(t => typeof (t as THREE.KeyframeTrack).name === 'string') as THREE.KeyframeTrack | undefined
        const targetName = sampleTrack ? String(sampleTrack.name).split('.')[0] : null
        const directTarget = targetName ? model.scene.getObjectByName(targetName) : null

        if (!directTarget) {
          let sourceHasSkeleton = false
          let targetHasSkeleton = false
          ;(src as THREE.Group).traverse?.((o: THREE.Object3D) => { const sk = (o as THREE.SkinnedMesh).skeleton as unknown | undefined; if (sk) sourceHasSkeleton = true })
          model.scene.traverse?.((o: THREE.Object3D) => { const sk = (o as THREE.SkinnedMesh).skeleton as unknown | undefined; if (sk) targetHasSkeleton = true })
          if (sourceHasSkeleton && targetHasSkeleton) {
            try {
              finalClip = SkeletonUtils.retargetClip((src as THREE.Group), model.scene, clip)
            } catch (e) {
              console.warn('[Anim] retargetClip 失败(来源', srcName, '):', e)
            }
          }
        }

        const displayName = `${translationKey}${clips.length>1?`_${index+1}`:''}`
        const action = mixerRef.current!.clipAction(finalClip)
        action.setLoop(THREE.LoopRepeat, Infinity)
        actionsRef.current[displayName] = action
        collected.push(displayName)
      })
    })

    setAvailableActions(collected)
    if (collected.length > 0) setSelectedAction(collected[0])
    return () => {
      mixerRef.current?.stopAllAction()
      mixerRef.current = null
      actionsRef.current = {}
    }
  }, [model, animationSources])

  // 每帧驱动动画改到 Canvas 内部的 MixerUpdater 里

  const playAction = (name: string) => {
    if (!mixerRef.current) return
    const next = actionsRef.current[name]
    if (!next) return
    next.enabled = true
    next.clampWhenFinished = false
    next.setLoop(THREE.LoopRepeat, Infinity)
    const prev = currentActionRef.current
    const fadeOutDur = 0.25
    const fadeInDur = 0.25
    if (prev && prev !== next) {
      // 先淡出上一个，再启动下一个，避免两条动画叠加导致骨骼数值“乱飞”
      prev.fadeOut(fadeOutDur)
      setTimeout(() => {
        prev.stop()
        next.reset().fadeIn(fadeInDur).play()
      }, Math.ceil(fadeOutDur * 1000) + 30)
    } else if (!prev) {
      next.reset().fadeIn(fadeInDur).play()
    } else if (prev === next && !isPlaying) {
      next.reset().fadeIn(fadeInDur).play()
    }
    currentActionRef.current = next
    setIsPlaying(true)
  }

  const handleStop = () => {
    if (!mixerRef.current) return
    const prev = currentActionRef.current
    if (prev) {
      prev.fadeOut(0.2)
      setTimeout(() => prev.stop(), 220)
    }
    mixerRef.current.stopAllAction()
    setIsPlaying(false)
    currentActionRef.current = null
  }

  // 根据前缀查找已注册的动作 key（因为有的 FBX 可能带多个 clip，会自动加 _1 后缀）
  const findActionKeyByPrefix = (prefix: string): string | null => {
    const keys = Object.keys(actionsRef.current)
    const found = keys.find(k => k.startsWith(prefix))
    return found || null
  }

  // 思考/说话状态驱动动作切换
  useEffect(() => {
    // 优先级：Speaking > Thinking > 其余
    if (ttsStatus === 'playing') {
      const k = findActionKeyByPrefix('actions.talking')
      if (k) playAction(k)
      return
    }
    if (thinking) {
      const k = findActionKeyByPrefix('actions.thinking')
      if (k) playAction(k)
      return
    }
    // 语音已结束且不在思考时，切到跳舞动作；若缺失则回待机
    if (ttsStatus === 'idle' && !thinking) {
      const danceKey = findActionKeyByPrefix('actions.dance')
      if (danceKey) {
        playAction(danceKey)
      } else {
        handleStop()
      }
    }
  }, [ttsStatus, thinking, availableActions])

  // 如果正在加载，显示加载状态
  if (isLoading) {
    return (
      <div style={{ 
        height: "100%", 
        width: "100%", 
        position: 'relative',
        display: 'flex',
        alignItems: 'center',
        justifyContent: 'center',
        background: 'rgba(0,0,0,0.1)',
        borderRadius: '12px'
      }}>
        <div style={{
          textAlign: 'center',
          color: theme === 'red' ? '#333' : '#fff',
          fontSize: '14px'
        }}>
          <div style={{ fontSize: '24px', marginBottom: '8px' }}>🤖</div>
          <div>数字人加载中...</div>
        </div>
      </div>
    )
  }

  // 如果模型加载失败，显示错误状态
  if (modelError) {
    return (
      <div style={{ 
        height: "100%", 
        width: "100%", 
        position: 'relative',
        display: 'flex',
        alignItems: 'center',
        justifyContent: 'center',
        background: 'rgba(255,77,77,0.1)',
        borderRadius: '12px',
        border: '1px solid rgba(255,77,77,0.3)'
      }}>
        <div style={{
          textAlign: 'center',
          color: theme === 'red' ? '#333' : '#fff',
          fontSize: '12px'
        }}>
          <div style={{ fontSize: '24px', marginBottom: '8px' }}>⚠️</div>
          <div>数字人模型加载失败</div>
          <div style={{ fontSize: '10px', opacity: 0.7, marginTop: '4px' }}>
            使用默认模型
          </div>
        </div>
      </div>
    )
  }

  return (
    <div style={{ height: "100%", width: "100%", position: 'relative' }}>
      <Canvas
        style={{ background: 'transparent', height: '100%', width: '100%' }}
        camera={{ position: [-0.5, 1, 3] }}
        // 让动画和控制器正常刷新；保留低功耗和合理 dpr
        frameloop="always"
        dpr={[1, 1.5]}
        gl={{ alpha: true, powerPreference: 'low-power' }}
        onCreated={({ gl }) => {
          gl.setClearAlpha(0)
        }}
      >
        <Orbit
          target={modelCenter}
          minDistance={(ttsStatus === 'idle' && !thinking) ? modelRadius * 1.0 : undefined}
          maxDistance={(ttsStatus === 'idle' && !thinking) ? modelRadius * 4.0 : undefined}
          minPolarAngle={(ttsStatus === 'idle' && !thinking) ? 0.2 : undefined}
          maxPolarAngle={(ttsStatus === 'idle' && !thinking) ? 1.35 : undefined}
          enablePan={(ttsStatus === 'idle' && !thinking) ? false : true}
        />
        <MixerUpdater mixerRef={mixerRef} />
        {/* 进一步微调脚底与容器底边的距离，向下为负值 */}
        <group ref={mySelfRef} position={[0, -1.5, 0]}>
          <hemisphereLight intensity={0.15} groundColor="black" />
          <ambientLight />
          {model && <primitive object={model.scene} scale={1.3} />}
        </group>
      </Canvas>
      {/* 控制面板 - 重新设计 - 仅管理员可见 */}
      {canControl && (
        <div style={{ 
          position: 'absolute', 
          bottom: 20, 
          left: '50%', 
          transform: 'translateX(-50%)',
          display: 'flex',
          flexDirection: 'column',
          gap: 12,
          alignItems: 'center',
          zIndex: 10
        }}>
        {/* 主要控制区 */}
        <div style={{
          display: 'flex',
          gap: 12,
          alignItems: 'center',
          padding: '12px 20px',
          background: theme === 'red' ? 'rgba(255,255,255,0.9)' : 'rgba(0,0,0,0.75)',
          borderRadius: 24,
          backdropFilter: 'blur(12px)',
          border: theme === 'red' ? '1px solid rgba(0,0,0,0.2)' : '1px solid rgba(255,255,255,0.1)',
          boxShadow: theme === 'red' ? '0 8px 32px rgba(0,0,0,0.1)' : '0 8px 32px rgba(0,0,0,0.3)'
        }}>
          {/* 动作选择器 */}
          <div style={{ position: 'relative' }}>
            <select
              value={selectedAction}
              onChange={(e) => {
                const value = e.target.value
                setSelectedAction(value)
                playAction(value)
              }}
              style={{ 
                padding: '8px 16px 8px 12px', 
                borderRadius: 12, 
                border: theme === 'red' ? '1px solid rgba(0,0,0,0.3)' : '1px solid rgba(255,255,255,0.2)', 
                background: theme === 'red' ? 'rgba(255,255,255,0.95)' : 'rgba(255,255,255,0.95)',
                color: theme === 'red' ? '#333' : '#333',
                fontSize: 14,
                fontWeight: 500,
                cursor: 'pointer',
                outline: 'none',
                minWidth: 120
              }}
            >
              {availableActions.map(name => (
                <option key={name} value={name}>{t(name)}</option>
              ))}
            </select>
          </div>

          {/* 停止按钮 */}
          <button
            onClick={handleStop}
            disabled={!isPlaying}
            style={{ 
              padding: '8px 16px', 
              borderRadius: 12, 
              border: 'none',
              background: isPlaying ? 'rgba(255,77,77,0.9)' : (theme === 'red' ? 'rgba(0,0,0,0.2)' : 'rgba(255,255,255,0.3)'),
              color: isPlaying ? '#fff' : (theme === 'red' ? '#333' : 'rgba(255,255,255,0.6)'),
              fontSize: 14,
              fontWeight: 500,
              cursor: isPlaying ? 'pointer' : 'not-allowed',
              transition: 'all 0.2s ease',
              minWidth: 60
            }}
          >
            {t('voice.stop')}
          </button>

          {/* 已移除“测试口型”按钮 */}

          {/* 语音控制 - 仅管理员可用 */}
          {canVoiceChat && (
            <SRControl
              onResult={async (text) => {
                setLastRecognized(text)
                try {
                  setThinking(true)
                  const reply = await askDeepSeek(text)
                  setAiReply(reply)
                  try { 
                    // 使用高级口型同步系统
                    const audioData = await tts.synthesizeText(reply)
                    console.log('🎭 TTS时间戳数据:', audioData.timestamps)
                    
                    // 启动高级口型同步
                    if (audioData.timestamps && audioData.timestamps.length > 0) {
                      advancedLipsync.startLipsync(audioData.timestamps)
                    }
                    
                    // 播放音频
                    await tts.playAudio(audioData)
                  } catch (error) {
                  console.warn('语音合成失败:', error)
                }
                } finally {
                  setThinking(false)
                }
              }}
              onStatusChange={() => {}}
              inline
              theme={theme}
            />
          )}
        </div>

        {/* 已移除手动测试面板 */}

        {/* 对话气泡区 */}
        {(lastRecognized || aiReply || thinking) && (
          <div style={{
            display: 'flex',
            flexDirection: 'column',
            gap: 8,
            width: '100%',
            maxWidth: 400,
            alignItems: 'center'
          }}>
            {lastRecognized && (
              <div style={{ 
                ...bubbleBase, 
                background: 'rgba(0,0,0,0.8)',
                alignSelf: 'flex-start',
                marginLeft: 0
              }}>
                <span style={{ fontSize: 18 }}>🗣️</span>
                <span style={bubbleText}>{lastRecognized}</span>
              </div>
            )}
            {thinking ? (
              <div style={{ 
                ...bubbleBase, 
                background: 'rgba(255,193,7,0.8)',
                alignSelf: 'center'
              }}>
                <span style={{ fontSize: 18 }}>💭</span>
                <span style={bubbleText}>{t('voice.thinking')}</span>
              </div>
            ) : aiReply ? (
              <div style={{ 
                ...bubbleBase, 
                background: 'linear-gradient(135deg, rgba(46,160,67,0.95), rgba(52,168,83,0.9))',
                alignSelf: 'flex-end',
                marginLeft: 0
              }}>
                <span style={{ fontSize: 18 }}>🤖</span>
                <span style={bubbleText}>{aiReply}</span>
              </div>
            ) : null}
          </div>
        )}
        </div>
      )}
      
      {/* 普通用户提示信息 */}
      {!canControl && (
        <div style={{
          position: 'absolute',
          bottom: 20,
          left: '50%',
          transform: 'translateX(-50%)',
          padding: '8px 16px',
          background: theme === 'red' ? 'rgba(255,255,255,0.9)' : 'rgba(0,0,0,0.75)',
          borderRadius: '20px',
          backdropFilter: 'blur(12px)',
          border: theme === 'red' ? '1px solid rgba(0,0,0,0.2)' : '1px solid rgba(255,255,255,0.1)',
          color: theme === 'red' ? '#333' : '#fff',
          fontSize: '12px',
          textAlign: 'center',
          zIndex: 10
        }}>
          👀 您正在查看数字人（仅查看模式）
        </div>
      )}
    </div>

  )
}
export default MySelf

// --- DeepSeek 请求封装（非流式，简洁快速） ---
const YOUDAO_BASE = 'https://openapi.youdao.com/llmgateway/api/v1/chat/completions'
const YOUDAO_API_KEY = '671aa542ee4495e7'
const YOUDAO_MODEL = 'deepseek-r1-250120'

async function askDeepSeek(userText: string): Promise<string> {
  const body = {
    model: YOUDAO_MODEL,
    messages: [
      { role: 'system', content: '请用简洁中文回答，用一两句话。' },
      { role: 'user', content: userText }
    ],
    stream: false,
    max_tokens: 256,
    temperature: 0.2,
    top_p: 0.8
  }
  const res = await fetch(YOUDAO_BASE, {
    method: 'POST',
    headers: {
      'Content-Type': 'application/json',
      'Authorization': `Bearer ${YOUDAO_API_KEY}`
    },
    body: JSON.stringify(body)
  })
  if (!res.ok) throw new Error('DeepSeek 请求失败')
  const data = await res.json()
  const text = data?.choices?.[0]?.message?.content || ''
  return String(text).trim()
}