<template>
  <el-container style="height: 100vh; padding: 20px;">
    <el-header>
      <h2>模拟面试系统</h2>
    </el-header>

    <el-main>
      <el-row justify="center">
        <!-- 摄像头视频流 -->
        <video 
          ref="videoRef" 
          class="video-stream" 
          autoplay 
          playsinline
          v-show="isInterviewStarted"
        ></video>

        <!-- 开始面试按钮 -->
        <div v-if="!isInterviewStarted" class="start-button">
          <el-button type="primary" @click="startInterview">开始面试</el-button>
        </div>

        <!-- 面试操作按钮 -->
        <div v-else class="operation-buttons">
          <el-button 
            @click="toggleAnswering"
            :type="isAnswering ? 'danger' : 'success'"
            :loading="isProcessing"
          >
            {{ isAnswering ? '结束回答' : '开始回答' }}
          </el-button>
          <el-button @click="endInterview">结束面试</el-button>
          <el-button v-if="showDownloadButton" @click="downloadPCMFile">下载音频</el-button>
        </div>
      </el-row>

      <!-- 面试报告展示 -->
      <div v-if="showReport" class="report-section">
        <h2>面试分析报告</h2>
        <div v-for="(record, index) in interviewRecords" :key="index" class="report-item">
          <p class="bigger">语音识别结果: {{ record.transcription }}</p>
          <div v-if="record.expression">
            <p class="bigger">表情分析: {{ record.expression.emotion }}</p>
            <p class="bigger">置信度: {{ record.expression.confidence }}%</p>
            <img :src="record.snapshot" class="snapshot-image" />
          </div>
        </div>
      </div>
    </el-main>
  </el-container>
</template>

<script setup lang="ts">
import { ref, onBeforeUnmount } from 'vue'
import { ElButton } from 'element-plus'

// 面试状态
const isInterviewStarted = ref(false)
const isAnswering = ref(false)
const isProcessing = ref(false)
const showReport = ref(false)
const showDownloadButton = ref(false)

// 摄像头相关
const videoRef = ref<HTMLVideoElement | null>(null)
const mediaStream = ref<MediaStream | null>(null)
const interviewRecords = ref<Array<{
  transcription: string;
  snapshot: string;
  expression: { emotion: string; confidence: number } | null;
}>>([])

// 录音相关
let audioContext: AudioContext | null = null
let processor: ScriptProcessorNode | null = null
let sourceNode: MediaStreamAudioSourceNode | null = null
let audioChunks: Int16Array[] = []
let sampleRate = 16000 // 16kHz采样率
let snapshotInterval: number | null = null

// PCM 文件存储
const pcmFile = ref<Blob | null>(null)

// 开始面试
const startInterview = async () => {
  try {
    // 初始化摄像头
    mediaStream.value = await navigator.mediaDevices.getUserMedia({ 
      video: true, 
      audio: true 
    })
    
    if (videoRef.value && mediaStream.value) {
      videoRef.value.srcObject = mediaStream.value
    }
    
    isInterviewStarted.value = true
    
    // 初始化音频上下文
    audioContext = new (window.AudioContext || (window as any).webkitAudioContext)({
      sampleRate: sampleRate
    })
    
    // 每20秒自动截图
    snapshotInterval = window.setInterval(takeSnapshotAndAnalyze, 20000)
  } catch (error) {
    console.error('摄像头初始化失败:', error)
  }
}

// 切换回答状态
const toggleAnswering = () => {
  if (isAnswering.value) {
    stopRecording()
  } else {
    startRecording()
  }
  isAnswering.value = !isAnswering.value
}

// 开始录音
const startRecording = () => {
  if (!mediaStream.value || !audioContext) return
  
  audioChunks = []
  
  // 创建音频源节点
  sourceNode = audioContext.createMediaStreamSource(mediaStream.value)
  
  // 创建处理器节点
  processor = audioContext.createScriptProcessor(4096, 1, 1)
  
  processor.onaudioprocess = (e) => {
    const inputData = e.inputBuffer.getChannelData(0)
    const output = new Int16Array(inputData.length)
    
    // 将Float32转换为Int16
    for (let i = 0; i < inputData.length; i++) {
      output[i] = Math.max(-1, Math.min(1, inputData[i])) * 32767
    }
    
    audioChunks.push(output)
  }
  
  // 连接节点
  sourceNode.connect(processor)
  processor.connect(audioContext.destination)
}

// 停止录音并上传识别
const stopRecording = async () => {
  if (!processor || !sourceNode) return
  
  isProcessing.value = true
  
  // 断开音频节点
  sourceNode.disconnect()
  processor.disconnect()
  
  try {
    // 合并所有音频数据
    const totalLength = audioChunks.reduce((acc, chunk) => acc + chunk.length, 0)
    const mergedData = new Int16Array(totalLength)
    let offset = 0
    
    for (const chunk of audioChunks) {
      mergedData.set(chunk, offset)
      offset += chunk.length
    }
    
    // 创建PCM文件
    const pcmBlob = new Blob([mergedData.buffer], { type: 'audio/pcm' })
    const file = new File([pcmBlob], 'interview_response.pcm', { type: 'audio/pcm' })

    // 保存供下载使用
    pcmFile.value = pcmBlob
    showDownloadButton.value = true
    
    // 上传识别
    const formData = new FormData()
    formData.append('file', file)
    
    const response = await fetch('http://localhost:8080/listener', {
      method: 'POST',
      body: formData
    })
    
    const data = await response.json()
    
    // 获取当前截图
    const snapshot = takeSnapshot()
    
    // 添加记录
    interviewRecords.value.push({
      transcription: data.msg || '识别失败',
      snapshot,
      expression: null
    })
    
  } catch (error) {
    console.error('录音处理失败:', error)
  } finally {
    isProcessing.value = false
  }
}

// 下载PCM文件
const downloadPCMFile = () => {
  if (!pcmFile.value) return

  const url = URL.createObjectURL(pcmFile.value)
  const a = document.createElement('a')
  a.href = url
  a.download = 'interview_response.pcm'
  a.click()
  URL.revokeObjectURL(url)
}

// 截图并分析表情
const takeSnapshotAndAnalyze = async () => {
  if (!isInterviewStarted.value || isAnswering.value) return
  
  const snapshot = takeSnapshot()
  
  if (snapshot) {
    try {
      // 将base64转换为Blob
      const blob = await fetch(snapshot).then(r => r.blob())
      const file = new File([blob], 'snapshot.jpg', { type: 'image/jpeg' })
      
      const formData = new FormData()
      formData.append('file', file)
      
      const response = await fetch('http://localhost:8080/api/face/analyze', {
        method: 'POST',
        body: formData
      })
      
      const data = await response.json()
      
      // 解析表情数据
      const expression = parseFaceResult(data.payload.face_detect_result.text)
      
      // 更新最近的记录
      const lastRecord = interviewRecords.value[interviewRecords.value.length - 1]
      if (lastRecord) {
        lastRecord.expression = {
          emotion: expression.emotion,
          confidence: expression.confidence
        }
      }
    } catch (error) {
      console.error('表情分析失败:', error)
    }
  }
}

// 解析人脸结果（与您原始代码一致）
const parseFaceResult = (base64Str: string) => {
  const EMOTION_MAP = ['惊讶', '害怕', '厌恶', '高兴', '悲伤', '生气', '正常']
  
  const decodedStr = atob(base64Str)
  const jsonData = JSON.parse(decodedStr)
  
  if (jsonData.face_num < 1) {
    throw new Error('未检测到人脸')
  }
  
  const firstFace = jsonData['face_1']
  if (!firstFace?.property) {
    throw new Error('人脸数据格式异常')
  }
  
  const expression = firstFace.property.expression
  if (typeof expression !== 'number' || expression < 0 || expression > 6) {
    throw new Error(`无效的表情编号：${expression}`)
  }
  
  return {
    emotion: EMOTION_MAP[expression] || '未知表情',
    confidence: Math.round(firstFace.score * 100)
  }
}

// 截图功能
const takeSnapshot = (): string => {
  if (!videoRef.value) return ''
  
  const canvas = document.createElement('canvas')
  canvas.width = videoRef.value.videoWidth
  canvas.height = videoRef.value.videoHeight
  
  const ctx = canvas.getContext('2d')
  if (!ctx) return ''
  
  ctx.drawImage(videoRef.value, 0, 0)
  return canvas.toDataURL('image/jpeg')
}

// 结束面试
const endInterview = () => {
  // 清理资源
  if (mediaStream.value) {
    mediaStream.value.getTracks().forEach(track => track.stop())
  }
  
  if (snapshotInterval) {
    clearInterval(snapshotInterval)
  }
  
  if (audioContext) {
    audioContext.close()
  }
  
  isInterviewStarted.value = false
  showReport.value = true
}

// 组件卸载时清理
onBeforeUnmount(() => {
  if (mediaStream.value) {
    mediaStream.value.getTracks().forEach(track => track.stop())
  }
  if (snapshotInterval) {
    clearInterval(snapshotInterval)
  }
  if (audioContext) {
    audioContext.close()
  }
})
</script>

<style scoped>
.video-stream {
  width: 100%;
  max-width: 800px;
  margin: 20px auto;
  border-radius: 8px;
  box-shadow: 0 2px 8px rgba(0, 0, 0, 0.1);
}

.start-button {
  display: flex;
  justify-content: center;
  margin-top: 30px;
}

.operation-buttons {
  display: flex;
  justify-content: space-around;
  margin-top: 20px;
}

.report-section {
  margin-top: 30px;
  padding: 20px;
  background-color: #f9f9f9;
  border-radius: 8px;
}

.report-item {
  margin-bottom: 20px;
  padding: 15px;
  background-color: #fff;
  border-radius: 6px;
  box-shadow: 0 1px 3px rgba(0, 0, 0, 0.05);
}

.snapshot-image {
  max-width: 100%;
  border-radius: 6px;
  margin-top: 10px;
}

.bigger {
  font-size: 20px;
}
</style>
