<template>
  <div class="digital-human-vision">
    <div class="vision-container">
      <!-- 用户摄像头视频 -->
      <div class="user-video-container">
        <video 
          ref="userVideoRef" 
          class="user-video"
          autoplay
          muted
          playsinline
        />
        <div class="user-video-label">您</div>
      </div>
      
      <!-- 数字人视频 -->
      <div class="agent-video-container">
        <video 
          ref="agentVideoRef" 
          class="agent-video"
          autoplay
          playsinline
        />
        <div class="agent-video-label">数字人</div>
      </div>
      
      <!-- 视觉分析信息 -->
      <div class="vision-info">
        <div class="vision-status">
          <div class="status-indicator" :class="visionStatusClass"></div>
          <span>{{ visionStatusText }}</span>
        </div>
        <div v-if="visionData" class="vision-data">
          <p>检测到: {{ visionData.objects?.join(', ') || '无' }}</p>
          <p>情绪: {{ visionData.emotion || '中性' }}</p>
        </div>
      </div>
    </div>
  </div>
</template>

<script setup lang="ts">
import { ref, computed, onMounted, onUnmounted, watch } from 'vue'
import type { DigitalHumanController } from '../controller/DigitalHumanController'

// Props
interface Props {
  controller: DigitalHumanController | null
}

const props = defineProps<Props>()

// 响应式数据
const userVideoRef = ref<HTMLVideoElement | null>(null)
const agentVideoRef = ref<HTMLVideoElement | null>(null)
const visionStatus = ref<string>('initializing')
const visionData = ref<any>(null)

// 计算属性
const visionStatusClass = computed(() => {
  switch (visionStatus.value) {
    case 'active':
      return 'status-active'
    case 'error':
      return 'status-error'
    default:
      return 'status-initializing'
  }
})

const visionStatusText = computed(() => {
  switch (visionStatus.value) {
    case 'active':
      return '视觉分析中'
    case 'error':
      return '视觉错误'
    default:
      return '初始化中'
  }
})

// 启动用户摄像头
const startUserCamera = async () => {
  try {
    const stream = await navigator.mediaDevices.getUserMedia({ 
      video: true, 
      audio: false 
    })
    if (userVideoRef.value) {
      userVideoRef.value.srcObject = stream
      visionStatus.value = 'active'
    }
  } catch (error) {
    console.error('Failed to start user camera:', error)
    visionStatus.value = 'error'
  }
}

// 模拟视觉分析数据
const simulateVisionAnalysis = () => {
  const objects = ['人脸', '手势', '书本']
  const emotions = ['高兴', '专注', '思考', '疑惑']
  
  setInterval(() => {
    if (visionStatus.value === 'active') {
      visionData.value = {
        objects: objects.slice(0, Math.floor(Math.random() * 3) + 1),
        emotion: emotions[Math.floor(Math.random() * emotions.length)]
      }
    }
  }, 3000)
}

// 监听控制器变化
watch(() => props.controller, (newController) => {
  if (newController && agentVideoRef.value) {
    newController.setAgentView(agentVideoRef.value)
  }
}, { immediate: true })

// 生命周期
onMounted(() => {
  startUserCamera()
  simulateVisionAnalysis()
  
  if (agentVideoRef.value && props.controller) {
    props.controller.setAgentView(agentVideoRef.value)
  }
})

onUnmounted(() => {
  // 停止用户摄像头
  if (userVideoRef.value?.srcObject) {
    const stream = userVideoRef.value.srcObject as MediaStream
    stream.getTracks().forEach(track => track.stop())
  }
})
</script>

<style scoped>
.digital-human-vision {
  width: 100%;
  height: 100%;
  display: flex;
  align-items: center;
  justify-content: center;
  background: #000;
}

.vision-container {
  position: relative;
  width: 100%;
  height: 100%;
  display: grid;
  grid-template-columns: 1fr 1fr;
  gap: 1rem;
  padding: 1rem;
}

.user-video-container,
.agent-video-container {
  position: relative;
  border-radius: 1rem;
  overflow: hidden;
  background: #333;
}

.user-video,
.agent-video {
  width: 100%;
  height: 100%;
  object-fit: cover;
}

.user-video-label,
.agent-video-label {
  position: absolute;
  top: 0.5rem;
  left: 0.5rem;
  background: rgba(0, 0, 0, 0.7);
  backdrop-filter: blur(10px);
  color: white;
  padding: 0.25rem 0.75rem;
  border-radius: 1rem;
  font-size: 0.75rem;
  font-weight: 500;
}

.vision-info {
  position: absolute;
  bottom: 1rem;
  left: 50%;
  transform: translateX(-50%);
  background: rgba(0, 0, 0, 0.8);
  backdrop-filter: blur(10px);
  color: white;
  padding: 1rem;
  border-radius: 1rem;
  min-width: 200px;
  text-align: center;
}

.vision-status {
  display: flex;
  align-items: center;
  justify-content: center;
  gap: 0.5rem;
  margin-bottom: 0.5rem;
  font-size: 0.875rem;
  font-weight: 500;
}

.status-indicator {
  width: 8px;
  height: 8px;
  border-radius: 50%;
  animation: pulse 2s infinite;
}

.status-initializing {
  background: #ffc107;
}

.status-active {
  background: #28a745;
}

.status-error {
  background: #dc3545;
  animation: none;
}

.vision-data {
  font-size: 0.75rem;
  opacity: 0.8;
}

.vision-data p {
  margin: 0.25rem 0;
}

@keyframes pulse {
  0% {
    opacity: 1;
    transform: scale(1);
  }
  50% {
    opacity: 0.5;
    transform: scale(1.2);
  }
  100% {
    opacity: 1;
    transform: scale(1);
  }
}

/* 响应式设计 */
@media (max-width: 768px) {
  .vision-container {
    grid-template-columns: 1fr;
    grid-template-rows: 1fr 1fr;
    gap: 0.5rem;
    padding: 0.5rem;
  }
  
  .vision-info {
    bottom: 0.5rem;
    padding: 0.75rem;
    min-width: 150px;
  }
}
</style>
