<template>
  <div class="digital-human">
    <!-- 导航栏 -->
    <van-nav-bar
      title="🤖 伴伴AI数字人通话"
      left-arrow
      @click-left="goBack"
      class="nav-bar"
    />

    <!-- 连接状态 -->
    <div v-if="callState === 'connecting'" class="connecting-state">
      <van-loading size="24px" vertical>连接中...</van-loading>
      <p>正在连接伴伴AI数字人...</p>
    </div>

    <!-- 通话界面 -->
    <div v-else-if="callState === 'connected'" :class="['call-interface', subtitleVisible ? 'with-subtitle' : 'no-subtitle']">
      <!-- 数字人显示区域 -->
      <div class="digital-human-hero">
        <!-- 数字人Avatar -->
        <div class="character avatar" :class="subtitleVisible ? 'align-left' : 'align-center'">
          <div :class="['_video-box', avatarLoading ? 'is-loading' : 'is-loaded']" :style="{ aspectRatio: videoAspectRatio }">
            <!-- 加载动画 -->
            <ul v-if="avatarLoading" class="_video-loading">
              <li></li>
              <li></li>
              <li></li>
              <li></li>
              <li></li>
            </ul>
            <!-- 数字人视频 -->
            <video
              ref="avatarVideoRef"
              class="avatar-video"
              autoplay
              muted
              playsinline
              @canplay="onAvatarLoaded"
              @timeupdate="onVideoTimeUpdate"
              @loadeddata="onVideoLoadedData"
              @error="onVideoError"
            />
            <!-- 视频背景 -->
            <div class="_video-background"></div>
          </div>
        </div>

        <!-- 字幕历史列表（右侧面板，放入5等分网格）-->
        <div v-if="subtitleVisible" class="chat-container sidebar">
          <div class="chat-header">
            <h3>💬 对话历史</h3>
            <div class="header-right">
              <span class="status-indicator">
                <span v-if="isSpeaking" class="status-dot speaking">🎤</span>
                <span v-else-if="agentSpeaking" class="status-dot agent-speaking">🤖</span>
                <span v-else class="status-dot listening">👂</span>
                {{ getStatusText() }}
              </span>
              <span class="chat-count">{{ subtitleHistory.length }} 条消息</span>
              <span class="scroll-hint">📜 可滚动</span>
            </div>
          </div>

          <div class="chat-messages" ref="chatMessagesRef">
            <div v-if="subtitleHistory.length === 0" class="empty-chat">
              <div class="empty-icon">🤖</div>
              <p>请开始说话，与伴伴AI数字人对话...</p>
              <p class="empty-hint">支持实时语音识别和伴伴AI回复</p>
            </div>

            <div
              v-for="(message, index) in subtitleHistory"
              :key="`${message.source}-${message.sentenceId}`"
              :class="['message-item', message.source]"
            >
              <div class="message-content">
                <div class="message-text">{{ message.text }}</div>
                <div class="message-time">{{ formatTime(message.timestamp) }}</div>
              </div>
              <div class="message-avatar">
                {{ message.source === 'user' ? '👤' : '🤖' }}
              </div>
            </div>
          </div>

          <div class="chat-footer">
            <van-button size="small" @click="clearChat" type="default">
              🗑️ 清空对话
            </van-button>
            <van-button size="small" @click="scrollToBottom" type="primary">
              ⬇️ 滚动到底部
            </van-button>
          </div>
        </div>

      </div>



      <!-- 当前字幕显示 -->
      <div v-if="subtitleVisible && currentSubtitle && currentSubtitle.text" class="subtitle-display">
        <div class="subtitle-inner">
          <div class="subtitle-source">
            <div v-if="currentSubtitle.source === 'agent'" class="agent-icon">🤖</div>
            <div v-else class="user-icon">👤</div>
          </div>
          <div class="subtitle-text">{{ currentSubtitle.text }}</div>
        </div>
      </div>


      <!-- 控制按钮 -->
      <div class="control-buttons">
        <div class="control-item">
          <van-button
            :type="microphoneMuted ? 'default' : 'primary'"
            size="large"
            round
            @click="toggleMicrophone"
            class="control-btn"
          >
            {{ microphoneMuted ? '🔇' : '🎤' }}
          </van-button>
          <span class="control-label">{{ microphoneMuted ? '麦克风已关闭' : '麦克风已开启' }}</span>
        </div>

        <div class="control-item">
          <van-button
            type="danger"
            size="large"
            round
            @click="endCall"
            class="control-btn end-call"
          >
            📞
          </van-button>
          <span class="control-label">挂断通话</span>
        </div>

        <div class="control-item">
          <van-button
            :type="subtitleVisible ? 'primary' : 'default'"
            size="large"
            round
            @click="toggleSubtitle"
            class="control-btn"
          >
            📝
          </van-button>
          <span class="control-label">{{ subtitleVisible ? '字幕已开启' : '字幕已关闭' }}</span>
        </div>

        <div class="control-item">
          <van-button
            type="default"
            size="large"
            round
            @click="toggleSettings"
            class="control-btn"
          >
            ⚙️
          </van-button>
          <span class="control-label">通话设置</span>
        </div>
      </div>
    </div>

    <!-- 错误状态 -->
    <div v-else-if="callState === 'error'" class="error-state">
      <div class="error-icon">❌</div>
      <h3>连接失败</h3>
      <p>{{ callErrorMessage || '无法连接到伴伴AI数字人，请稍后重试' }}</p>
      <div class="error-details">
        <p>可能的原因：</p>
        <ul>
          <li>网络连接不稳定</li>
          <li>数字人服务暂时不可用</li>
          <li>浏览器不支持相关功能</li>
        </ul>
      </div>
      <div class="error-actions">
        <van-button type="primary" @click="retryConnection">🔄 重新连接</van-button>
        <van-button type="default" @click="goBack">🏠 返回首页</van-button>
      </div>
    </div>

    <!-- 初始状态 - 中转页面 -->
    <div v-else class="initial-state">
      <div class="welcome-container">
        <div class="welcome-icon">🤖</div>
        <h2>伴伴AI数字人通话</h2>
        <p class="welcome-desc">与伴伴AI数字人进行面对面的智能对话</p>



        <!-- 功能介绍 -->
        <div class="features">
          <h3>✨ 功能特色</h3>
          <div class="feature-list">
            <div class="feature-item">
              <span class="feature-icon">🎙️</span>
              <span>实时语音对话</span>
            </div>
            <div class="feature-item">
              <span class="feature-icon">📝</span>
              <span>智能字幕显示</span>
            </div>
            <div class="feature-item">
              <span class="feature-icon">🎭</span>
              <span>表情动作同步</span>
            </div>
            <div class="feature-item">
              <span class="feature-icon">🧠</span>
              <span>上下文理解</span>
            </div>
          </div>
        </div>

        <!-- 开始按钮 -->
        <div class="start-section">
          <van-button
            type="primary"
            size="large"
            round
            @click="startCall"
            :loading="callState === 'connecting'"
            class="start-btn"
          >
            🚀 开始数字人通话
          </van-button>
          <p class="start-hint">点击开始与伴伴AI数字人对话</p>

          <!-- 开发者工具 -->
          <div class="dev-tools" v-if="import.meta.env.DEV">
            <van-button
              type="default"
              size="small"
              @click="$router.push('/api-test')"
              class="dev-btn"
            >
              🔧 API连接测试
            </van-button>
          </div>
        </div>
      </div>
    </div>

    <!-- 设置弹窗 -->
    <van-popup v-model:show="showSettings" position="bottom" :style="{ height: '60%' }">
      <div class="settings-panel">
        <div class="settings-header">
          <h3>⚙️ 通话设置</h3>
          <van-button size="small" @click="showSettings = false">关闭</van-button>
        </div>

        <div class="settings-content">
          <div class="setting-group">
            <h4>数字人设置</h4>
            <van-cell title="语音音量" :value="`${voiceVolume}%`">
              <template #right-icon>
                <van-slider v-model="voiceVolume" :min="0" :max="100" />
              </template>
            </van-cell>
          </div>

          <div class="setting-group">
            <h4>显示设置</h4>
            <van-cell title="字幕显示" :value="subtitleVisible ? '开启' : '关闭'">
              <template #right-icon>
                <van-switch v-model="subtitleVisible" />
              </template>
            </van-cell>
          </div>
        </div>
      </div>
    </van-popup>


  </div>
</template>

<script setup lang="ts">
import { ref, computed, onMounted, onUnmounted, onBeforeUnmount, nextTick } from 'vue'
import { useRouter } from 'vue-router'
import { showToast } from 'vant'
import ARTCAICallEngine, { AICallAgentType, AICallAgentConfig } from 'aliyun-auikit-aicall'

// 路由
const router = useRouter()

// 通话状态
const callState = ref<'none' | 'connecting' | 'connected' | 'error'>('none')
const callErrorMessage = ref<string>('')

// 数字人类型 - 固定为AvatarAgent
const agentType = ref<string>('AvatarAgent')
// 默认使用的智能体ID（与后端请求保持一致）
const DEFAULT_AGENT_ID = 'f97e8f48f6e64b679a977fb01bc9e28f'

// 音视频状态
const isSpeaking = ref<boolean>(false)
const agentSpeaking = ref<boolean>(false)
const microphoneMuted = ref<boolean>(false)

// 字幕相关
const subtitleVisible = ref<boolean>(true)
const currentSubtitle = ref<any>(null)
const subtitleHistory = ref<Array<any>>([])

// 视频纵横比（默认16:9），根据实际视频尺寸更新，避免变形/裁切
const videoAspectRatio = ref<string>('16 / 9')
const updateAspectFromVideo = () => {
  const v = avatarVideoRef.value as HTMLVideoElement | null
  if (v && v.videoWidth && v.videoHeight) {
    videoAspectRatio.value = `${v.videoWidth} / ${v.videoHeight}`
  }
}

// 设置相关
const showSettings = ref<boolean>(false)
const voiceVolume = ref<number>(80)

// 视频相关
const avatarVideoRef = ref<HTMLVideoElement | null>(null)
const chatMessagesRef = ref<HTMLElement | null>(null)
const avatarLoading = ref<boolean>(true)

// 数字人控制器
let digitalHumanController: any = null
let digitalHumanEngine: any = null
let agentInfo: any = null
const isMockEngine = ref<boolean>(false)


// 方法

// 提取字幕文本的通用方法（兼容对象/数组/嵌套data）
const extractSubtitleText = (payload: any): string | null => {
  const item = Array.isArray(payload) ? (payload[0] ?? null) : payload
  if (!item) return null
  const text = item.text ?? item.data?.text ?? null
  return typeof text === 'string' && text.trim() ? text : null
}

const goBack = () => {
  router.push('/')
}



const getStatusText = () => {
  if (agentSpeaking.value) return '数字人正在说话'
  if (isSpeaking.value) return '正在聆听您的声音'
  return '等待对话'
}

const formatTime = (timestamp: number) => {
  const date = new Date(timestamp)
  return date.toLocaleTimeString('zh-CN', {
    hour12: false,
    hour: '2-digit',
    minute: '2-digit',
    second: '2-digit'
  })
}

const startCall = async () => {
  try {
    callState.value = 'connecting'

    // 调用真实的数字人API
    const agentInfo = await callDigitalHumanAPI()

    // 切换到连接状态，等待DOM更新
    callState.value = 'connected'

    // 等待DOM更新后初始化视频
    await nextTick()
    await initializeAvatarVideo(agentInfo)

    // 初始化麦克风状态（确保开始时麦克风是开启的）
    microphoneMuted.value = false
    console.log('🎤 初始化麦克风状态为开启')

    // 首次问候（仅在模拟模式下）
    if (isMockEngine.value) {
      setTimeout(() => {
        addSubtitle('agent', '您好！我是伴伴AI数字人，很高兴为您服务。')
      }, 1000)
    }

  } catch (error) {
    console.error('启动数字人通话失败:', error)
    callErrorMessage.value = error instanceof Error ? error.message : '启动失败'
    callState.value = 'error'
  }
}

const callDigitalHumanAPI = async () => {
  try {
    // 调用后端API生成数字人通话实例
    const response = await fetch('http://localhost:8082/api/v2/digital-human/generateCall', {
      method: 'POST',
      headers: {
        'Content-Type': 'application/json'
      },
      body: JSON.stringify({
        userId: 'user_' + Date.now(),
        aiAgentId: 'f97e8f48f6e64b679a977fb01bc9e28f', // 数字人智能体ID
        agentType: 'AvatarAgent',
        region: 'cn-beijing', // 使用北京地区
        expire: 24 * 60 * 60 // 24小时
      })
    })

    if (!response.ok) {
      throw new Error(`HTTP ${response.status}: ${response.statusText}`)
    }

    const data = await response.json()

    if (data.code !== 200) {
      throw new Error(data.message || '生成数字人实例失败')
    }

    console.log('数字人实例创建成功:', data.data)
    return data.data

  } catch (error) {
    console.error('调用数字人API失败:', error)
    throw error
  }
}

const initializeAvatarVideo = async (agentData: any) => {
  try {
    avatarLoading.value = true
    agentInfo = agentData

    console.log('初始化数字人视频，数据:', agentData)

    // 等待视频元素渲染
    let retryCount = 0
    while (!avatarVideoRef.value && retryCount < 10) {
      await new Promise(resolve => setTimeout(resolve, 100))
      retryCount++
    }

    if (!avatarVideoRef.value) {
      throw new Error('视频元素未找到')
    }

    // 初始化数字人引擎
    await initializeDigitalHumanEngine(agentData)

    console.log('数字人视频初始化完成')

  } catch (error) {
    console.error('初始化数字人视频失败:', error)
    avatarLoading.value = false
    throw error
  }
}

// 初始化数字人引擎
const initializeDigitalHumanEngine = async (agentData: any) => {
  try {
    console.log('🔧 初始化数字人引擎，使用数据:', agentData)

    // 优先尝试使用真实的数字人
    try {
      await initializeRealDigitalHuman(agentData)
      isMockEngine.value = false
      console.log('✅ 真实数字人初始化成功')
    } catch (error) {
      console.warn('⚠️ 真实数字人初始化失败，使用模拟模式:', error)
      await initializeMockDigitalHuman(agentData)
      isMockEngine.value = true
    }

    // 设置引擎事件监听
    setupEngineEvents()

  } catch (error) {
    console.error('❌ 数字人引擎初始化完全失败:', error)
    throw error
  }
}

// 初始化真实的数字人（阿里云SDK）
const initializeRealDigitalHuman = async (agentData: any) => {
  try {
    const normalized = {
      channelId: agentData.channelId || agentData.channel_id,
      aiAgentUserId: agentData.aiAgentUserId || agentData.ai_agent_user_id,
      token: agentData.token || agentData.rtcAuthToken || agentData.rtc_auth_token,
      instanceId: agentData.instanceId || agentData.aiAgentInstanceId || agentData.ai_agent_instance_id
    }

    const { channelId, aiAgentUserId, token } = normalized

    console.log('🚀 初始化真实数字人')
    console.log('📺 频道ID:', channelId)
    console.log('🤖 数字人用户ID:', aiAgentUserId)
    console.log('🔑 Token:', token ? 'Available' : 'Missing')

    // 如果使用 aliyun-auikit-aicall，可直接走引擎，不需要 AliRTCSdk
    await connectToRealDigitalHuman(normalized)

  } catch (error) {
    console.error('❌ 真实数字人初始化失败:', error)
    console.log('🔄 回退到模拟模式')
    await initializeMockDigitalHuman(agentData)
    isMockEngine.value = true
  }
}

// 加载阿里云RTC SDK（备用方案，优先使用 aliyun-auikit-aicall 引擎）
const loadAliRTCSDK = async () => {
  const urls = [
    '/libs/AliRTCSdk.js' // 本地兜底（请将 SDK 文件放到 public/libs/AliRTCSdk.js）
  ]
  for (const url of urls) {
    try {
      await new Promise((resolve, reject) => {
        if ((window as any).AliRTCSdk) return resolve(true)
        const script = document.createElement('script')
        script.src = url
        script.crossOrigin = 'anonymous'
        script.onload = () => (window as any).AliRTCSdk ? resolve(true) : reject(new Error('SDK对象不存在'))
        script.onerror = () => reject(new Error('脚本加载失败'))
        document.head.appendChild(script)
      })
      console.log('✅ 阿里云RTC SDK加载成功:', url)
      return true
    } catch (e) {
      console.warn('⚠️ 阿里云RTC SDK加载失败，尝试备用源:', url, e)
    }
  }
  throw new Error('SDK加载失败')
}

// 连接到真实数字人（通过 aliyun-auikit-aicall 引擎）
const connectToRealDigitalHuman = async (agentData: any) => {
  try {
    const channelId = agentData.channelId
    const aiAgentUserId = agentData.aiAgentUserId
    const userToken = agentData.token
    const instanceId = agentData.instanceId

    if (!userToken) throw new Error('缺少用户RTC认证Token')
    if (!channelId || !aiAgentUserId) throw new Error('缺少必要的连接信息')

    // 从Token解析用户ID（更稳妥，保持与Token一致）
    let userIdFromToken = 'user_' + Date.now()
    try {
      const tokenData = JSON.parse(atob(userToken))
      if (tokenData && tokenData.userid) {
        userIdFromToken = tokenData.userid
      }
    } catch (e) {
      console.warn('解析用户Token失败，将使用随机用户ID:', e)
    }

    // 引擎初始化
    const engine = new ARTCAICallEngine()
    const engineConfig = {
      agentElement: avatarVideoRef.value,
      rtcEngineConfig: { environment: 'PROD' as const }
    }

    await engine.init(AICallAgentType.AvatarAgent, engineConfig)

    // 构造与 React 标准控制器一致的实例信息
    const instanceInfo = {
      agentType: AICallAgentType.AvatarAgent,
      instanceId: instanceId,
      channelId: channelId,
      userId: aiAgentUserId,
      rtcToken: userToken,
      reqId: ''
    }

    // 使用现有实例发起通话（与 React 标准实现一致）
    // 监听自动播放失败，提示用户点击播放
    try {
      // @ts-ignore 事件名来自引擎
      engine.on && engine.on('autoPlayFailed', async () => {
        console.warn('autoPlayFailed: 需要用户手势触发播放')
        try { await avatarVideoRef.value?.play() } catch {}
      })
    } catch {}

    await engine.call(userIdFromToken, instanceInfo)

    // 显式绑定视频元素，确保渲染
    if (avatarVideoRef.value) {
      try { engine.setAgentView(avatarVideoRef.value) } catch {}
      try { await avatarVideoRef.value.play() } catch {}
    }

    // 绑定真实引擎的实时字幕事件（用户/智能体）- 参考语音通话的流式字幕实现
    try {
      // AI字幕监听 - 参考语音通话的智能合并逻辑
      engine.on && engine.on('agentSubtitleNotify', (subtitleData: any) => {
        let subtitle = null

        // 处理不同的数据格式
        if (Array.isArray(subtitleData) && subtitleData.length > 0) {
          subtitle = subtitleData[0]
        } else if (subtitleData && typeof subtitleData === 'object') {
          subtitle = subtitleData
        } else {
          return
        }

        if (subtitle && subtitle.sentenceId !== undefined) {
          updateSubtitle({
            data: subtitle,
            source: 'agent'
          })
        }
      })

      // 用户字幕监听 - 参考语音通话的处理逻辑
      engine.on && engine.on('userSubtitleNotify', (subtitleData: any) => {
        let subtitle = null

        // 处理不同的数据格式
        if (Array.isArray(subtitleData) && subtitleData.length > 0) {
          subtitle = subtitleData[0]
        } else if (subtitleData && typeof subtitleData === 'object') {
          subtitle = subtitleData
        } else {
          return
        }

        if (subtitle && subtitle.sentenceId !== undefined) {
          updateSubtitle({
            data: subtitle,
            source: 'user'
          })
        }
      })
      // 监听本地说话音量（不同SDK版本事件名不同）
      try {
        // @ts-ignore
        engine.on && engine.on('activeSpeakerVolumeChanged', (userId: string, volume: number) => {
          if (userId === '' || userId == null) isSpeaking.value = volume > 30
        })
      } catch {}
      try {
        // @ts-ignore
        engine.on && engine.on('speakingVolumeChanged', (userId: string, volume: number) => {
          if (userId === '' || userId == null) isSpeaking.value = volume > 30
        })
      } catch {}
    } catch (e) {
      console.warn('绑定字幕事件失败', e)
    }


    digitalHumanEngine = {
      client: engine,
      setAgentView: (element: HTMLVideoElement) => engine.setAgentView(element),
      muteLocalMicrophone: (muted: boolean) => {
        // 尝试多种可能的麦克风控制方法
        if (typeof engine.mute === 'function') {
          return engine.mute(muted)
        } else if (typeof engine.muteLocalMic === 'function') {
          return engine.muteLocalMic(muted)
        } else if (typeof engine.muteLocalMicrophone === 'function') {
          return engine.muteLocalMicrophone(muted)
        } else {
          console.warn('真实引擎未找到麦克风控制方法')
          return false
        }
      },
      sendTextToAgent: (text: string) => engine.sendTextToAgent(text),
      handup: async () => { await engine.handup() },
      destroy: () => engine.destroy()
    }

    avatarLoading.value = false
    console.log('✅ 真实数字人连接成功（AUI引擎）')

  } catch (error) {
    console.error('❌ 连接真实数字人失败:', error)
    throw error
  }
}

// 初始化模拟数字人
const initializeMockDigitalHuman = async (agentData: any) => {
  try {
    // 创建模拟的数字人视频流
    const canvas = document.createElement('canvas')
    canvas.width = 640

    canvas.height = 480
    const ctx = canvas.getContext('2d')

    if (ctx && avatarVideoRef.value) {
      // 创建动态数字人画面
      const drawDigitalHuman = () => {
        // 清除画布
        ctx.clearRect(0, 0, canvas.width, canvas.height)

        // 渐变背景
        const gradient = ctx.createLinearGradient(0, 0, canvas.width, canvas.height)
        gradient.addColorStop(0, '#667eea')
        gradient.addColorStop(1, '#764ba2')
        ctx.fillStyle = gradient
        ctx.fillRect(0, 0, canvas.width, canvas.height)

        // 数字人头像
        const centerX = canvas.width / 2
        const centerY = canvas.height / 2 - 50

        // 头部
        ctx.fillStyle = 'rgba(255, 255, 255, 0.9)'
        ctx.beginPath()
        ctx.arc(centerX, centerY, 80, 0, Math.PI * 2)
        ctx.fill()

        // 眼睛
        ctx.fillStyle = '#333'
        ctx.beginPath()
        ctx.arc(centerX - 25, centerY - 20, 8, 0, Math.PI * 2)
        ctx.arc(centerX + 25, centerY - 20, 8, 0, Math.PI * 2)
        ctx.fill()

        // 嘴巴（根据说话状态变化）
        ctx.strokeStyle = '#333'
        ctx.lineWidth = 3
        ctx.beginPath()
        if (agentSpeaking.value) {
          // 说话时的嘴型
          ctx.arc(centerX, centerY + 20, 15, 0, Math.PI)
        } else {
          // 静默时的嘴型
          ctx.arc(centerX, centerY + 20, 8, 0, Math.PI)
        }
        ctx.stroke()

        // 状态文字
        ctx.fillStyle = 'white'
        ctx.font = 'bold 24px Arial'
        ctx.textAlign = 'center'
        ctx.fillText('伴伴AI数字人', centerX, centerY + 120)

        ctx.font = '16px Arial'
        if (agentSpeaking.value) {
          ctx.fillText('正在说话...', centerX, centerY + 150)
        } else {
          ctx.fillText('等待对话', centerX, centerY + 150)
        }

        // 连接信息
        ctx.font = '12px Arial'
        ctx.fillStyle = 'rgba(255, 255, 255, 0.7)'
        ctx.fillText(`实例ID: ${agentData.ai_agent_instance_id || 'Mock'}`, centerX, centerY + 180)

        requestAnimationFrame(drawDigitalHuman)
      }

      drawDigitalHuman()

      // 将canvas转换为视频流
      const stream = canvas.captureStream(30)
      avatarVideoRef.value.srcObject = stream

      // 模拟数字人引擎 - 改进麦克风控制
      digitalHumanEngine = {
        client: {
          // 模拟真实引擎的麦克风控制方法
          mute: async (muted: boolean) => {
            console.log('模拟引擎 - 麦克风静音:', muted)
            microphoneMuted.value = muted
            return true
          },
          muteLocalMic: async (muted: boolean) => {
            console.log('模拟引擎 - 本地麦克风静音:', muted)
            microphoneMuted.value = muted
            return true
          },
          enableMicrophone: async (enabled: boolean) => {
            console.log('模拟引擎 - 启用麦克风:', enabled)
            microphoneMuted.value = !enabled
            return true
          }
        },
        setAgentView: (element: HTMLVideoElement) => {
          console.log('设置数字人视频元素:', element)
        },
        muteLocalMicrophone: (muted: boolean) => {
          console.log('麦克风静音:', muted)
          microphoneMuted.value = muted
          return true
        },
        sendTextToAgent: (text: string) => {
          console.log('发送文本给数字人:', text)
          return true
        },
        handup: async () => {
          console.log('挂断数字人通话')
        },
        destroy: () => {
          console.log('销毁数字人引擎')
          if (avatarVideoRef.value?.srcObject) {
            const stream = avatarVideoRef.value.srcObject as MediaStream
            stream.getTracks().forEach(track => track.stop())
            avatarVideoRef.value.srcObject = null
          }
        }
      }

      console.log('模拟数字人引擎初始化完成')
    }

  } catch (error) {
    console.error('模拟数字人初始化失败:', error)
    throw error
  }
}

// 设置引擎事件监听
const setupEngineEvents = () => {
  // 模拟数字人状态变化
  setTimeout(() => {
    console.log('数字人状态: Ready')
    avatarLoading.value = false
  }, 1000)

  // 模拟字幕事件（仅在模拟模式下启用）
  setInterval(() => {
    if (!isMockEngine.value) return
    if (callState.value === 'connected' && Math.random() > 0.8) {
      const responses = [
        '我正在聆听您的问题',
        '请继续说话，我在认真听',
        '有什么可以帮助您的吗？',
        '我理解您的意思'
      ]
      const randomResponse = responses[Math.floor(Math.random() * responses.length)]
      addSubtitle('agent', randomResponse)
    }
  }, 10000)
}

// 添加视频背景效果（参考React版本）
const addVideoBackground = (element: HTMLVideoElement) => {
  // 移动端不支持
  const isMobile = /Android|webOS|iPhone|iPad|iPod|BlackBerry|IEMobile|Opera Mini/i.test(navigator.userAgent)
  if (isMobile) return

  // 已经添加过
  if (element.getAttribute('data-has-background')) return

  const handleTimeUpdate = () => {
    if (!element.videoWidth || !element.videoHeight) return
    const currentTime = element.currentTime
    if (currentTime > 0) {
      element.removeEventListener('timeupdate', handleTimeUpdate)
      const canvas = document.createElement('canvas')
      canvas.width = element.videoWidth
      canvas.height = element.videoHeight
      const ctx = canvas.getContext('2d')
      ctx?.drawImage(element, 0, 0, canvas.width, canvas.height)

      element.setAttribute('data-has-background', 'true')

      // 转换为 Blob
      canvas.toBlob(function (blob) {
        if (!blob) return

        // 获取 Blob 地址
        const blobUrl = URL.createObjectURL(blob)
        const parent = element.parentElement as HTMLDivElement
        if (parent) {
          const div = document.createElement('div')
          div.className = '_video-background'
          div.style.backgroundImage = `url(${blobUrl})`
          parent.appendChild(div)
        }
      }, 'image/png')
    }
  }

  element.addEventListener('timeupdate', handleTimeUpdate)
}

const endCall = async () => {
  console.log('🔴 开始挂断数字人通话...')

  try {
    // 1. 销毁数字人引擎
    if (digitalHumanEngine) {
      console.log('📞 正在挂断数字人引擎连接...')

      try {
        if (digitalHumanEngine.handup) {
          await digitalHumanEngine.handup()
          console.log('✅ 数字人引擎 handup 成功')
        }
      } catch (hangupError) {
        console.error('❌ 数字人引擎 handup 失败:', hangupError)
      }

      try {
        if (digitalHumanEngine.destroy) {
          digitalHumanEngine.destroy()
          console.log('✅ 数字人引擎 destroy 成功')
        }
      } catch (destroyError) {
        console.error('❌ 数字人引擎 destroy 失败:', destroyError)
      }

      digitalHumanEngine = null
      console.log('🗑️ 数字人引擎对象已清空')
    } else {
      console.log('⚠️ 数字人引擎对象不存在，跳过销毁')
    }

    // 2. 停止所有媒体流
    console.log('🎥 正在停止所有媒体流...')
    stopAllMediaStreams()

    // 3. 重置状态
    console.log('🔄 正在重置通话状态...')
    callState.value = 'none'
    subtitleHistory.value = []
    currentSubtitle.value = null
    isSpeaking.value = false
    agentSpeaking.value = false
    avatarLoading.value = true
    agentInfo = null

    console.log('✅ 数字人通话挂断完成')
    showToast('通话已结束')

  } catch (error) {
    console.error('❌ 结束通话失败:', error)
    showToast('挂断通话时出现错误')
  }
}

const stopAllMediaStreams = () => {
  console.log('🎬 开始停止所有媒体流...')

  // 停止数字人视频流
  if (avatarVideoRef.value?.srcObject) {
    console.log('📹 正在停止数字人视频流...')
    const stream = avatarVideoRef.value.srcObject as MediaStream
    const tracks = stream.getTracks()
    console.log(`📹 找到 ${tracks.length} 个视频轨道`)

    tracks.forEach((track, index) => {
      console.log(`📹 停止视频轨道 ${index + 1}: ${track.kind} - ${track.label}`)
      track.stop()
    })

    avatarVideoRef.value.srcObject = null
    console.log('✅ 数字人视频流已停止')
  } else {
    console.log('⚠️ 没有找到数字人视频流')
  }



  console.log('✅ 所有媒体流停止完成')
}

const retryConnection = () => {
  callState.value = 'none'
  callErrorMessage.value = ''
  startCall()
}





const toggleSubtitle = () => {
  subtitleVisible.value = !subtitleVisible.value
}

const toggleSettings = () => {
  showSettings.value = !showSettings.value
}

// 参考语音通话的字幕更新逻辑
const updateSubtitle = (subtitle: { data: any, source: 'user' | 'agent' }) => {
  const existSubtitleIndex = subtitleHistory.value.findIndex(
    (item) => item.sentenceId === subtitle.data.sentenceId && item.source === subtitle.source
  )

  // 字幕已经存在，更新内容
  if (existSubtitleIndex > -1) {
    const existSubtitle = subtitleHistory.value[existSubtitleIndex]
    if (subtitle.source === 'agent') {
      // AI字幕累积文本
      subtitle.data.text = existSubtitle.text + subtitle.data.text
    }

    // 更新现有字幕
    subtitleHistory.value[existSubtitleIndex] = {
      source: subtitle.source,
      text: subtitle.data.text,
      timestamp: Date.now(),
      sentenceId: subtitle.data.sentenceId,
      end: subtitle.data.end
    }

    // 触发响应式更新
    subtitleHistory.value = [...subtitleHistory.value]
  } else if (subtitle.data.text) {
    // 新字幕，添加到列表
    const newSubtitle = {
      source: subtitle.source,
      text: subtitle.data.text,
      timestamp: Date.now(),
      sentenceId: subtitle.data.sentenceId,
      end: subtitle.data.end
    }

    subtitleHistory.value = [...subtitleHistory.value, newSubtitle]
  }

  // 更新当前字幕显示
  if (subtitle.data.text) {
    currentSubtitle.value = {
      source: subtitle.source,
      text: subtitle.data.text,
      timestamp: Date.now(),
      sentenceId: subtitle.data.sentenceId,
      end: subtitle.data.end
    }
  }

  // 自动滚动到底部
  nextTick(() => {
    scrollToBottom()
  })
}

const addSubtitle = (source: 'user' | 'agent', text: string) => {
  const subtitle = {
    source,
    text,
    timestamp: Date.now(),
    sentenceId: Date.now().toString()
  }

  currentSubtitle.value = subtitle
  subtitleHistory.value.push(subtitle)

  // 自动滚动到底部
  nextTick(() => {
    scrollToBottom()
  })

  // 清除当前字幕显示
  setTimeout(() => {
    if (currentSubtitle.value === subtitle) {
      currentSubtitle.value = null
    }
  }, 3000)
}

const clearChat = () => {
  subtitleHistory.value = []
  currentSubtitle.value = null
}

const scrollToBottom = () => {
  if (chatMessagesRef.value) {
    chatMessagesRef.value.scrollTop = chatMessagesRef.value.scrollHeight
  }
}

const onAvatarLoaded = () => {
  console.log('数字人Avatar加载完成')
  avatarLoading.value = false
}

const onVideoLoadedData = () => {
  console.log('数字人视频数据加载完成')
  updateAspectFromVideo()
  avatarLoading.value = false
}

const onVideoError = (event: Event) => {
  console.error('数字人视频加载错误:', event)
  avatarLoading.value = false
}

const onVideoTimeUpdate = () => {
  // 视频时间更新事件
  if (avatarVideoRef.value && avatarVideoRef.value.currentTime > 0) {
    updateAspectFromVideo()
    avatarLoading.value = false
  }
}

// 数字人通话交互（不需要语音识别）
const startDigitalHumanInteraction = () => {
  console.log('数字人通话已建立')

  if (isMockEngine.value) {
    // 模拟数字人的一些交互行为
    setTimeout(() => {
      agentSpeaking.value = true
      setTimeout(() => {
        agentSpeaking.value = false
        addSubtitle('agent', '我可以看到您了，有什么可以帮助您的吗？')
      }, 2000)
    }, 3000)
  }
}

// 数字人文本输入处理
const sendTextToDigitalHuman = (text: string) => {
  console.log('发送文本给数字人:', text)

  if (digitalHumanEngine) {
    digitalHumanEngine.sendTextToAgent(text)
  }

  if (isMockEngine.value) {
    // 模拟数字人回复
    setTimeout(() => {



      agentSpeaking.value = true
      setTimeout(() => {
        agentSpeaking.value = false
        const responses = [
          '我理解您的意思。',
          '这是一个很好的问题。',
          '让我为您详细解答。',
          '感谢您的提问。'
        ]
        const response = responses[Math.floor(Math.random() * responses.length)]
        addSubtitle('agent', response)
      }, 2000)
    }, 1000)
  }
}

// 数字人通话控制 - 参考语音通话的麦克风控制逻辑
const toggleMicrophone = async () => {
  console.log('🎤 切换麦克风状态，当前状态:', microphoneMuted.value ? '关闭' : '开启')

  if (digitalHumanEngine) {
    const newMutedState = !microphoneMuted.value
    console.log('🎤 新的麦克风状态将设置为:', newMutedState ? '关闭' : '开启')

    try {
      let methodUsed = ''
      let result = false

      // 尝试多种可能的API方法，参考语音通话的实现
      if (typeof digitalHumanEngine.client?.mute === 'function') {
        methodUsed = 'client.mute'
        result = await digitalHumanEngine.client.mute(newMutedState)
      } else if (typeof digitalHumanEngine.client?.muteLocalMic === 'function') {
        methodUsed = 'client.muteLocalMic'
        result = await digitalHumanEngine.client.muteLocalMic(newMutedState)
      } else if (typeof digitalHumanEngine.client?.enableMicrophone === 'function') {
        methodUsed = 'client.enableMicrophone'
        result = await digitalHumanEngine.client.enableMicrophone(!newMutedState)
      } else if (typeof digitalHumanEngine.muteLocalMicrophone === 'function') {
        methodUsed = 'muteLocalMicrophone'
        result = await digitalHumanEngine.muteLocalMicrophone(newMutedState)
      } else {
        console.warn('🎤 未找到麦克风控制方法')
        console.log('🎤 可用的方法:', Object.keys(digitalHumanEngine))
        if (digitalHumanEngine.client) {
          console.log('🎤 client可用的方法:', Object.keys(digitalHumanEngine.client))
        }
      }

      console.log(`🎤 使用方法: ${methodUsed}, 结果: ${result}`)

      // 只有在方法调用成功后才更新状态
      if (methodUsed) {
        microphoneMuted.value = newMutedState
      }

      // 如果是开启麦克风，重新请求权限
      if (!microphoneMuted.value) {
        try {
          const stream = await navigator.mediaDevices.getUserMedia({ audio: true })
          console.log('麦克风重新激活成功')
          // 不要立即停止流，让引擎使用
        } catch (error) {
          console.error('麦克风激活失败:', error)
          microphoneMuted.value = true // 回滚状态
          showToast({
            type: 'fail',
            message: '麦克风激活失败'
          })
          return
        }
      }

      showToast({
        type: 'success',
        message: microphoneMuted.value ? '麦克风已关闭' : '麦克风已开启'
      })
    } catch (error) {
      console.error('切换麦克风失败:', error)
      showToast({
        type: 'fail',
        message: '操作失败'
      })
    }
  } else {
    showToast({
      type: 'fail',
      message: '数字人引擎未初始化'
    })
  }
}



// 生命周期
onMounted(() => {
  console.log('数字人页面已加载')
  console.log('🎤 初始麦克风状态:', microphoneMuted.value ? '关闭' : '开启')
})

onUnmounted(async () => {
  console.log('🔄 页面即将卸载，开始清理数字人通话资源...')

  // 如果还在通话中，先挂断
  if (callState.value === 'connected' || callState.value === 'connecting') {
    console.log('📞 检测到通话进行中，执行挂断操作...')
    await endCall()
  } else {
    console.log('🎬 直接清理媒体流资源...')
    stopAllMediaStreams()
  }

  console.log('✅ 数字人通话资源清理完成')
})
// 页面即将卸载时的清理（更早触发）
onBeforeUnmount(async () => {
  console.log('⚠️ 页面即将卸载，提前清理数字人通话...')

  if (digitalHumanEngine) {
    console.log('🔴 提前挂断数字人引擎...')
    try {
      if (digitalHumanEngine.handup) {
        await digitalHumanEngine.handup()
        console.log('✅ 提前挂断成功')
      }
    } catch (error) {
      console.error('❌ 提前挂断失败:', error)
    }
  }
})
</script>

<style scoped>
/* 基础布局 */
.digital-human {
  height: 100vh;
  background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
  display: flex;
  flex-direction: column;
}

.nav-bar {
  background: rgba(255, 255, 255, 0.1);
  backdrop-filter: blur(10px);
}

/* 连接状态 */
.connecting-state {
  flex: 1;
  display: flex;
  flex-direction: column;
  align-items: center;
  justify-content: center;
  color: white;
  text-align: center;
}

.connecting-state p {
  margin-top: 16px;
  font-size: 16px;
  opacity: 0.8;
}

/* 通话界面 */
.call-interface {
  flex: 1;
  display: flex;
  flex-direction: column;
  overflow: hidden;
}

/* 数字人显示区域 */
.digital-human-hero {
  flex: 1;
  display: grid;
  grid-template-columns: repeat(5, 1fr); /* 屏幕均分5等分 */
  align-items: center;
  justify-content: center;
  position: relative;
  min-height: 300px;
  gap: 16px;
}

/* 开启字幕：视频占1-3列，字幕占4-5列 */
.call-interface.with-subtitle .character.avatar { grid-column: 1 / span 3; }
.call-interface.with-subtitle .chat-container.sidebar { grid-column: 4 / span 2; }

/* 关闭字幕：视频居中，占2-4列 */
.call-interface.no-subtitle .character.avatar { grid-column: 2 / span 3; }

/* 右侧字幕面板布局 */
.chat-container.sidebar {
  height: 100%;
  max-height: none;
  align-self: stretch;
  display: flex;
  flex-direction: column;
}

/* 数字人Avatar容器 - 参考React版本 */
.character.avatar {
  position: relative;
  width: 100%;
  max-width: 960px; /* 限制最大宽度，避免过大 */
  /* 固定高度在某些设备上会挤压视频，使用比例盒自适应 */
  min-height: 240px;
  margin: 0 auto;
}


/* 限制视频最大高度，避免过高占满屏幕 */
.call-interface ._video-box { max-height: 60vh; }

._video-box {
  position: relative;
  width: 100%;
  height: auto;
  border-radius: 16px;
  overflow: hidden;
  background: #000;
  transition: all 0.3s ease;
}

._video-box.is-loading {
  background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
}

._video-box.is-loaded {
  background: #000;
}

.avatar-video {
  width: 100%;
  height: 100%;
  object-fit: contain; /* 避免裁切，保持比例 */
  background: #000;   /* 留出信箱黑边 */
  border-radius: 16px;
}

/* 视频背景效果 */
._video-background {
  position: absolute;
  top: 0;
  left: 0;
  right: 0;
  bottom: 0;
  background-size: cover;
  background-position: center;
  filter: blur(20px);
  opacity: 0.3;
  z-index: -1;
}

/* 加载动画 - 参考React版本 */
._video-loading {
  position: absolute;
  top: 50%;
  left: 50%;
  transform: translate(-50%, -50%);
  display: flex;
  gap: 8px;
  z-index: 10;
}

._video-loading li {
  width: 12px;
  height: 12px;
  background: rgba(255, 255, 255, 0.8);
  border-radius: 50%;
  list-style: none;
  animation: loading-bounce 1.4s ease-in-out infinite both;
}

._video-loading li:nth-child(1) { animation-delay: -0.32s; }
._video-loading li:nth-child(2) { animation-delay: -0.16s; }
._video-loading li:nth-child(3) { animation-delay: 0s; }
._video-loading li:nth-child(4) { animation-delay: 0.16s; }
._video-loading li:nth-child(5) { animation-delay: 0.32s; }

@keyframes loading-bounce {
  0%, 80%, 100% {
    transform: scale(0);
  }
  40% {
    transform: scale(1);
  }
}



/* 字幕显示 */
.subtitle-display {
  position: absolute;
  bottom: 200px;
  left: 50%;
  transform: translateX(-50%);
  width: 90%;
  max-width: 600px;
  z-index: 100;
}

.subtitle-inner {
  display: flex;
  align-items: center;
  gap: 12px;
  background: rgba(0, 0, 0, 0.8);
  backdrop-filter: blur(10px);
  padding: 12px 16px;
  border-radius: 24px;
  color: white;
}

.subtitle-source {
  flex-shrink: 0;
}

.agent-icon,
.user-icon {
  width: 32px;
  height: 32px;
  border-radius: 50%;
  display: flex;
  align-items: center;
  justify-content: center;
  font-size: 16px;
}

.agent-icon {
  background: #4CAF50;
}

.user-icon {
  background: #2196F3;
}

.subtitle-text {
  flex: 1;
  font-size: 16px;
  line-height: 1.4;
}

/* 聊天容器 */
.chat-container {
  background: rgba(255, 255, 255, 0.95);
  backdrop-filter: blur(10px);
  margin: 16px;
  border-radius: 16px;
  max-height: 300px;
  display: flex;
  flex-direction: column;
}



.chat-header {
  display: flex;
  justify-content: space-between;
  align-items: center;
  padding: 12px 16px;
  border-bottom: 1px solid rgba(0, 0, 0, 0.1);
  color: #333;
}

.chat-header h3 {
  margin: 0;
  font-size: 16px;
  font-weight: 600;
}

.header-right {
  display: flex;
  align-items: center;
  gap: 12px;
  font-size: 12px;
  color: #666;
}

.status-indicator {
  display: flex;
  align-items: center;
  gap: 4px;
}

.status-dot {
  display: inline-block;
}

.status-dot.speaking,
.status-dot.agent-speaking {
  animation: pulse 1s infinite;
}

.chat-messages {
  flex: 1;
  overflow-y: auto;
  padding: 16px;
  max-height: 200px;
}

.empty-chat {
  text-align: center;
  color: #666;
  padding: 32px 16px;
}

.empty-icon {
  font-size: 48px;
  margin-bottom: 16px;
}

.empty-hint {
  font-size: 14px;
  opacity: 0.7;
  margin-top: 8px;
}

.message-item {
  display: flex;
  margin-bottom: 16px;
  gap: 12px;
}

.message-item.user {
  flex-direction: row-reverse;
}

.message-content {
  flex: 1;
  max-width: 70%;
}

.message-item.user .message-content {
  text-align: right;
}

.message-text {
  background: #f5f5f5;
  padding: 12px 16px;
  border-radius: 16px;
  font-size: 14px;
  line-height: 1.4;
  word-break: break-word;
}

.message-item.user .message-text {
  background: #2196F3;
  color: white;
}

.message-time {
  font-size: 12px;
  color: #999;
  margin-top: 4px;
}

.message-avatar {
  width: 32px;
  height: 32px;
  border-radius: 50%;
  display: flex;
  align-items: center;
  justify-content: center;
  font-size: 16px;
  background: #f0f0f0;
  flex-shrink: 0;
}

.chat-footer {
  display: flex;
  justify-content: space-between;
  padding: 12px 16px;
  border-top: 1px solid rgba(0, 0, 0, 0.1);
}

/* 控制按钮 */
.control-buttons {
  display: flex;
  justify-content: space-around;
  padding: 20px 16px;
  background: rgba(255, 255, 255, 0.1);
  backdrop-filter: blur(10px);
}

.control-item {
  display: flex;
  flex-direction: column;
  align-items: center;
  gap: 8px;
}

.control-btn {
  width: 56px !important;
  height: 56px !important;
  font-size: 24px;
}

.control-btn.end-call {
  background: #f44336 !important;
  border-color: #f44336 !important;
}

.control-label {
  font-size: 12px;
  color: rgba(255, 255, 255, 0.8);
  text-align: center;
  white-space: nowrap;
}

/* 错误状态 */
.error-state {
  flex: 1;
  display: flex;
  flex-direction: column;
  align-items: center;
  justify-content: center;
  color: white;
  text-align: center;
  padding: 32px;
}

.error-icon {
  font-size: 64px;
  margin-bottom: 16px;
}

.error-state h3 {
  margin: 0 0 16px 0;
  font-size: 24px;
}

.error-state p {
  margin: 0 0 24px 0;
  font-size: 16px;
  opacity: 0.8;
}

.error-details {
  background: rgba(255, 255, 255, 0.1);
  padding: 16px;
  border-radius: 12px;
  margin-bottom: 24px;
  text-align: left;
}

.error-details ul {
  margin: 8px 0 0 0;
  padding-left: 20px;
}

.error-actions {
  display: flex;
  gap: 16px;
}

/* 初始状态 */
.initial-state {
  flex: 1;
  overflow-y: auto;
}

.welcome-container {
  padding: 32px 16px;
  text-align: center;
  color: white;
}

.welcome-icon {
  font-size: 80px;
  margin-bottom: 16px;
}

.welcome-container h2 {
  margin: 0 0 8px 0;
  font-size: 28px;
  font-weight: 700;
}

.welcome-desc {
  margin: 0 0 32px 0;
  font-size: 16px;
  opacity: 0.8;
}



/* 功能特色 */
.features {
  margin-bottom: 32px;
}

.features h3 {
  margin: 0 0 16px 0;
  font-size: 18px;
  font-weight: 600;
}

.feature-list {
  display: grid;
  grid-template-columns: repeat(2, 1fr);
  gap: 12px;
  max-width: 400px;
  margin: 0 auto;
}

.feature-item {
  display: flex;
  align-items: center;
  gap: 8px;
  background: rgba(255, 255, 255, 0.1);
  padding: 12px;
  border-radius: 8px;
  font-size: 14px;
}

.feature-icon {
  font-size: 20px;
}

/* 开始按钮 */
.start-section {
  margin-bottom: 32px;
}

.start-btn {
  width: 200px !important;
  height: 48px !important;
  font-size: 16px !important;
  font-weight: 600 !important;
  margin-bottom: 16px;
}

.start-hint {
  margin: 0;
  font-size: 14px;
  opacity: 0.7;
}

/* 设置面板 */
.settings-panel {
  height: 100%;
  display: flex;
  flex-direction: column;
}

.settings-header {
  display: flex;
  justify-content: space-between;
  align-items: center;
  padding: 16px;
  border-bottom: 1px solid #eee;
}

.settings-header h3 {
  margin: 0;
  font-size: 18px;
  font-weight: 600;
}

.settings-content {
  flex: 1;
  overflow-y: auto;
  padding: 16px;
}

.setting-group {
  margin-bottom: 24px;
}

.setting-group h4 {
  margin: 0 0 12px 0;
  font-size: 16px;
  font-weight: 600;
  color: #333;
}



/* 动画 */
@keyframes wave-expand {
  0% {
    opacity: 1;
    transform: scale(1);
  }
  100% {
    opacity: 0;
    transform: scale(1.5);
  }
}

@keyframes pulse {
  0%, 100% {
    opacity: 1;
    transform: scale(1);
  }
  50% {
    opacity: 0.7;
    transform: scale(1.1);
  }
}

/* 响应式设计 */
@media (max-width: 768px) {
  .feature-list {
    grid-template-columns: 1fr;
  }

  .control-buttons {
    padding: 16px 8px;
  }

  .control-item {
    gap: 4px;
  }

  .control-btn {
    width: 48px !important;
    height: 48px !important;
    font-size: 20px;
  }

  .control-label {
    font-size: 10px;
  }
}

/* 开发者工具样式 */
.dev-tools {
  margin-top: 20px;
  padding-top: 20px;
  border-top: 1px dashed #e0e0e0;
}

.dev-btn {
  opacity: 0.7;
  font-size: 12px;
}

.dev-btn:hover {
  opacity: 1;
}
</style>
