<template>
  <div class="page-container">
    <div class="content-wrapper">
      <!-- 上部分：答案展示区 -->
      <div class="top-section">
        <!-- 连接状态 -->
        <div class="status-bar">
          <el-tag :type="wsStatusInfo.type" size="small">
            {{ wsStatusInfo.text }}
          </el-tag>
          <span class="status-divider">|</span>
          <el-tag :type="audioStatusInfo.type" size="small">
            {{ audioStatusInfo.text }}
          </el-tag>
          <span v-if="isAudioActive" class="recording-time">{{ recordingDuration }}s</span>
          <!-- 流式处理状态 -->
          <span v-if="apiMode === 2 && isProcessingText" class="status-divider">|</span>
          <el-tag v-if="apiMode === 2 && isProcessingText" type="info" size="small" class="streaming-tag">
            <el-icon class="is-loading">
              <Loading/>
            </el-icon>
            流式处理中...
          </el-tag>
        </div>

        <!-- 答案展示 -->
        <div class="answer-display">
          <!-- 无数据 -->
          <div v-if="answers.length === 0 && !isProcessingText && !hasSearched" class="empty-state">
            <el-icon :size="50" color="#c0c4cc">
              <ChatDotRound/>
            </el-icon>
            <p>等待识别结果...</p>
          </div>

          <!-- 无结果 -->
          <div v-else-if="answers.length === 0 && hasSearched" class="no-data-state">
            <el-icon :size="50" color="#f56c6c">
              <WarnTriangleFilled/>
            </el-icon>
            <p>没有找到相关数据</p>
          </div>

          <!-- 答案展示 -->
          <div v-else class="answer-display">
            <div class="answer-item">
              <div class="answer-content">
                {{ answers[currentAnswerIndex] }}
                <!-- 流式展示指示器 -->
                <span v-if="apiMode === 2 && isProcessingText" class="streaming-cursor">|</span>
              </div>
            </div>
            <div v-if="answers.length > 1" class="answer-controls">
              <el-button
                  size="small"
                  :disabled="currentAnswerIndex === 0"
                  @click="prevAnswer"
              >
                上一个
              </el-button>
              <span class="answer-counter">
                {{ currentAnswerIndex + 1 }} / {{ answers.length }}
              </span>
              <el-button
                  size="small"
                  :disabled="currentAnswerIndex === answers.length - 1"
                  @click="nextAnswer"
              >
                下一个
              </el-button>
            </div>
          </div>
        </div>

        <!-- 实时识别和最终文字 -->
        <div class="text-display">
          <div v-if="currentTranscript" class="realtime-text">
            <span class="label">实时识别：</span>
            <span class="text">{{ currentTranscript }}</span>
          </div>
          <div v-if="recognizedText" class="final-text">
            <span class="label">识别结果：</span>
            <span class="text">{{ recognizedText }}</span>
          </div>
        </div>
      </div>

      <!-- 下部分：麦克风控制 -->
      <div class="bottom-section">
        <!-- 模式切换 -->
        <div class="mode-selector">
          <el-radio-group v-model="sendMode" @change="onModeChange" size="small">
            <el-radio-button :label="1">手动模式</el-radio-button>
            <el-radio-button :label="2">自动模式</el-radio-button>
          </el-radio-group>
        </div>

        <!-- 接口模式选择 -->
        <div class="api-mode-selector">
          <el-radio-group v-model="apiMode" @change="onApiModeChange" size="small">
            <el-radio-button :label="1">原生text接口</el-radio-button>
            <el-radio-button :label="2">流式streamText接口</el-radio-button>
          </el-radio-group>
        </div>

        <!-- 麦克风按钮 -->
        <div class="mic-container">
          <!-- 手动模式 -->
          <template v-if="sendMode === 1">
            <div
                class="mic-button"
                :class="{ 'mic-active': isSending, 'mic-disabled': !isWsConnected }"
                @click="toggleSending"
            >
              <div class="mic-circle">
                <el-icon :size="40">
                  <Microphone/>
                </el-icon>
              </div>
              <div class="mic-text">
                <template v-if="!isWsConnected">未连接</template>
                <template v-else-if="isSending">发送到后端中</template>
                <template v-else>点击开始发送到后端</template>
              </div>
            </div>
          </template>

          <!-- 自动模式 -->
          <template v-else>
            <div class="mic-button mic-auto">
              <div class="mic-circle">
                <el-icon :size="40">
                  <Microphone/>
                </el-icon>
              </div>
              <div class="mic-text">自动模式，持续发送到后端</div>
            </div>
          </template>
        </div>

        <!--        &lt;!&ndash; 调试日志（可折叠） &ndash;&gt;-->
        <!--        <el-collapse v-model="debugPanelOpen" class="debug-collapse">-->
        <!--          <el-collapse-item name="1">-->
        <!--            <template #title>-->
        <!--              <span>调试日志</span>-->
        <!--            </template>-->
        <!--            <div class="debug-log">-->
        <!--              <el-button @click="debugMessages = []" size="small" type="primary" text>清空</el-button>-->
        <!--              <div class="debug-list">-->
        <!--                <div v-for="(msg, index) in debugMessages.slice(0, 20)" :key="index" class="debug-item">-->
        <!--                  {{ msg }}-->
        <!--                </div>-->
        <!--              </div>-->
        <!--            </div>-->
        <!--          </el-collapse-item>-->
        <!--        </el-collapse>-->
      </div>
    </div>
  </div>
</template>

<script setup lang="ts">
import {ref, computed, onMounted, onUnmounted} from 'vue'
import {ElMessage} from 'element-plus'
import {
  Microphone,
  ChatDotRound,
  Loading,
  VideoPause,
  SuccessFilled,
  WarnTriangleFilled
} from '@element-plus/icons-vue'
import request, {baseURL} from '../utils/request'

// 阿里云配置
const ALIYUN_CONFIG = {
  url: 'wss://nls-gateway-cn-shanghai.aliyuncs.com/ws/v1',
  appkey: 'V87ouA6Kw8vmmxD2',
  token: ''
}
// 生成 UUID
const generateUUID = (): string => {
  return 'xxxxxxxxxxxx4xxxyxxxxxxxxxxxxxxx'.replace(/[xy]/g, (c) => {
    const r = (Math.random() * 16) | 0
    const v = c === 'x' ? r : (r & 0x3) | 0x8
    return v.toString(16)
  })
}

// 固定的 task_id（页面初始化时生成一次）
const TASK_ID = generateUUID();

// 响应式数据
const sendMode = ref(1) // 1=手动发送 2=自动发送
const apiMode = ref(1) // 1=原生text接口 2=流式streamText接口
const isSending = ref(false) // 是否正在发送给阿里云
const isProcessingText = ref(false)
const isStreaming = ref(false) // 是否正在流式读取
const currentTranscript = ref('')
const recognizedText = ref('')
const answers = ref<string[]>([])
const currentAnswerIndex = ref(0) // 当前答案索引
const recordingDuration = ref(0)
const debugMessages = ref<string[]>([])
const hasSearched = ref(false) // 是否已经查询过
const debugPanelOpen = ref<string[]>([]) // 调试面板折叠状态

// WebSocket 和音频相关
const websocket = ref<WebSocket | null>(null)
const audioContext = ref<AudioContext | null>(null)
const mediaStream = ref<MediaStream | null>(null)
const scriptProcessor = ref<ScriptProcessorNode | null>(null)
const recordingTimer = ref<number | null>(null)
const isWsConnected = ref(false)
const isAudioActive = ref(false)

// WebSocket 状态
const wsStatusInfo = computed(() => {
  if (websocket.value?.readyState === WebSocket.OPEN) {
    return {type: 'success', text: '已连接'}
  }
  if (websocket.value?.readyState === WebSocket.CONNECTING) {
    return {type: 'warning', text: '连接中'}
  }
  return {type: 'danger', text: '未连接'}
})

// 音频状态
const audioStatusInfo = computed(() => {
  if (!isAudioActive.value) {
    return {type: 'info', text: '未激活'}
  }
  if (isSending.value) {
    return {type: 'success', text: '发送到后端'}
  }
  return {type: 'warning', text: '仅识别'}
})

// 添加调试信息
const addDebugMessage = (message: string) => {
  const timestamp = new Date().toLocaleTimeString()
  debugMessages.value.unshift(`[${timestamp}] ${message}`)
  if (debugMessages.value.length > 100) {
    debugMessages.value.pop()
  }
  console.log(`[语音系统] ${message}`)
}


// 模式切换
const onModeChange = (mode: number) => {
  addDebugMessage(`切换到${mode === 1 ? '手动' : '自动'}发送模式`)

  if (mode === 1) {
    // 切换到手动模式，停止发送
    if (isSending.value) {
      stopSending()
    }
  } else {
    // 切换到自动模式，自动开始发送
    if (isWsConnected.value && isAudioActive.value) {
      startSending()
    }
  }
}

// 答案切换
const prevAnswer = () => {
  if (currentAnswerIndex.value > 0) {
    currentAnswerIndex.value--
  }
}

const nextAnswer = () => {
  if (currentAnswerIndex.value < answers.value.length - 1) {
    currentAnswerIndex.value++
  }
}

// 接口模式切换
const onApiModeChange = (mode: number) => {
  addDebugMessage(`切换到${mode === 1 ? '原生text接口' : '流式streamText接口'}`)
  apiMode.value = mode
}

// 处理流式数据块
const processStreamChunk = (rawText: string): string => {
  // 1. 处理 Server-Sent Events (SSE) 格式
  let processedText = rawText

  // 去掉 SSE 格式的 data: 前缀
  processedText = processedText.replace(/^data:\s*/gm, '')

  // 去掉 SSE 格式的 event: 行
  processedText = processedText.replace(/^event:.*$/gm, '')

  // 去掉 SSE 格式的 id: 行
  processedText = processedText.replace(/^id:.*$/gm, '')

  // 2. 按行分割并过滤
  const lines = processedText.split('\n')
  const filteredLines = lines
      .map(line => line.trim()) // 去掉每行首尾空格
      .filter(line => line.length > 0) // 过滤空行
      .filter(line => !line.startsWith('data:')) // 再次过滤可能遗漏的 data: 行
      .filter(line => !line.startsWith('event:')) // 过滤 event: 行
      .filter(line => !line.startsWith('id:')) // 过滤 id: 行

  // 3. 处理标题格式（#/##/### 前面加换行符）
  const processedLines = filteredLines.map(line => {
    // 检查是否以 #、##、### 开头
    if (line.match(/^#{1,4}\s/)) {
      return '\n' + line
    }
    return line
  })

  // 4. 重新组合文本
  const result = processedLines.join('')

  return result
}

// 处理流式文本接口
const processStreamText = async (text: string) => {
  try {
    addDebugMessage('使用流式接口处理文本...')

    // 设置流式读取状态
    isStreaming.value = true

    // 清空之前的答案，准备接收流式数据
    answers.value = []
    currentAnswerIndex.value = 0

    // 在浏览器环境中使用 fetch 处理流式响应
    const response = await fetch(`${baseURL}/api/1.0/ai/streamText?text=${encodeURIComponent(text)}`, {
      method: 'GET',
      headers: {
        'Accept': 'text/event-stream',
        'Cache-Control': 'no-cache',
        'Accept-Charset': 'utf-8'  // 明确指定字符集
      }
    })

    if (!response.ok) {
      throw new Error(`HTTP error! status: ${response.status}`)
    }

    addDebugMessage('开始接收流式数据...')

    // 处理流式数据
    let streamText = ''
    const reader = response.body?.getReader()


    if (!reader) {
      throw new Error('无法获取响应流')
    }

    const decoder = new TextDecoder('utf-8'); // 明确指定UTF-8解码器

    // 读取流式数据
    let accumulatedText = ''

    while (true) {
      // 检查是否被终止
      if (!isStreaming.value) {
        addDebugMessage('流式读取被用户终止')
        reader.cancel()
        break
      }

      const {done, value} = await reader.read()

      if (done) {
        addDebugMessage('流式数据接收完成')
        break
      }

      const chunk = decoder.decode(value, {stream: true})
      accumulatedText += chunk

      // 实时处理并显示
      const processedText = processStreamChunk(accumulatedText)

      // 立即更新页面显示
      answers.value = [processedText]

      // 添加调试信息
      if (chunk.length > 0) {
        addDebugMessage(`实时接收: ${chunk.length} 字符，页面显示: ${processedText.length} 字符`)
      }

      // 使用 requestAnimationFrame 确保UI及时更新
      await new Promise(resolve => requestAnimationFrame(resolve))
    }

    // 重置流式读取状态
    isStreaming.value = false

    if (answers.value.length > 0 && answers.value[0]?.trim()) {
      addDebugMessage(`流式处理完成，最终长度: ${answers.value[0]?.length} 字符`)
      ElMessage.success('流式处理完成')
    } else {
      answers.value = []
      addDebugMessage('流式接口返回空数据')
      ElMessage.warning('没有找到相关数据')
    }

  } catch (error) {
    console.error('❌ 流式请求失败:', error)
    addDebugMessage(`流式处理失败: ${error}`)
    ElMessage.error('流式处理失败，请重试')
    isStreaming.value = false
  }
}

// 获取阿里云 Token
const getAliyunToken = async (): Promise<string> => {
  try {
    addDebugMessage('获取 Token...')
    const result = await request.get('/api/1.0/ai/getToken')

    if (result.data) {
      addDebugMessage(`Token 获取成功`)
      return result.data
    } else {
      throw new Error('获取 Token 失败')
    }
  } catch (error) {
    addDebugMessage(`Token 获取失败: ${error}`)
    throw error
  }
}

// 连接阿里云 WebSocket
const connectWebSocket = (token: string): Promise<void> => {
  return new Promise((resolve, reject) => {
    try {
      const wsUrl = `${ALIYUN_CONFIG.url}?token=${token}`
      addDebugMessage(`连接 WebSocket...`)

      websocket.value = new WebSocket(wsUrl)
      websocket.value.binaryType = 'arraybuffer'

      websocket.value.onopen = () => {
        isWsConnected.value = true
        addDebugMessage('WebSocket 已连接')
        resolve()
      }

      websocket.value.onmessage = (event) => {
        try {
          const message = JSON.parse(event.data)
          handleWebSocketMessage(message)
        } catch (error) {
          addDebugMessage(`消息解析失败: ${error}`)
        }
      }

      websocket.value.onerror = (error) => {
        isWsConnected.value = false
        addDebugMessage(`WebSocket 错误`)
        reject(error)
      }

      websocket.value.onclose = () => {
        isWsConnected.value = false
        addDebugMessage('WebSocket 已断开')
      }

    } catch (error) {
      addDebugMessage(`连接失败: ${error}`)
      reject(error)
    }
  })
}

// 处理 WebSocket 消息
const handleWebSocketMessage = (message: any) => {
  const name = message.header?.name

  switch (name) {
    case 'TranscriptionStarted':
      addDebugMessage(`识别任务已开始`)
      break

    case 'SentenceBegin':
      currentTranscript.value = ''
      break

    case 'TranscriptionResultChanged':
      // 中间识别结果（实时显示）
      const intermediateText = message.payload?.result
      if (intermediateText) {
        currentTranscript.value = intermediateText
      }
      break

    case 'SentenceEnd':
      // 最终识别结果
      const finalText = message.payload?.result
      if (finalText) {
        recognizedText.value = finalText
        currentTranscript.value = ''
        addDebugMessage(`识别结果: ${finalText}`)

        // 只有在 isSending 为 true 时才调用后端接口
        if (isSending.value) {
          processRecognizedText(finalText)
        } else {
          addDebugMessage('识别完成，但未开启发送模式，不调用后端接口')
        }

        // 手动模式：一句话结束后自动停止发送
        if (sendMode.value === 1 && isSending.value) {
          stopSending()
        }
      }
      break

    case 'TranscriptionCompleted':
      addDebugMessage('识别任务完成')
      break

    case 'TaskFailed':
      console.log('TaskFailed', message)
      const errorMsg = message.payload?.message || '识别失败'
      addDebugMessage(`任务失败: ${errorMsg}`)
      ElMessage.error(errorMsg)
      break
  }
}

// 发送启动消息到阿里云
const sendStartMessage = () => {
  const startMessage = {
    header: {
      message_id: generateUUID(), // 每次重新生成
      task_id: TASK_ID, // 使用固定的 task_id
      namespace: 'SpeechTranscriber',
      name: 'StartTranscription',
      appkey: ALIYUN_CONFIG.appkey
    },
    payload: {
      format: 'pcm',
      sample_rate: 16000,
      enable_intermediate_result: true,
      enable_punctuation_prediction: true,
      enable_inverse_text_normalization: true
    }
  }
  console.log("sendStartMessage：状态")
  console.log(websocket.value?.readyState)
  if (websocket.value?.readyState === WebSocket.OPEN) {
    websocket.value.send(JSON.stringify(startMessage))
    addDebugMessage('发送启动消息')
  }
}

// 发送停止消息到阿里云
const sendStopMessage = () => {
  const stopMessage = {
    header: {
      message_id: generateUUID(), // 每次重新生成
      task_id: TASK_ID, // 使用固定的 task_id
      namespace: 'SpeechTranscriber',
      name: 'StopTranscription',
      appkey: ALIYUN_CONFIG.appkey
    }
  }

  if (websocket.value?.readyState === WebSocket.OPEN) {
    websocket.value.send(JSON.stringify(stopMessage))
    addDebugMessage('发送停止消息')
  }
}

// 发送音频数据
const sendAudioData = (audioData: ArrayBuffer) => {
  // 有声音就发送给阿里云（不依赖 isSending）
  if (websocket.value?.readyState === WebSocket.OPEN) {
    websocket.value.send(audioData)
  }
}

// 将 Float32Array 转换为 PCM16
const convertFloat32ToPCM16 = (float32Array: Float32Array): Int16Array => {
  const int16Array = new Int16Array(float32Array.length)
  for (let i = 0; i < float32Array.length; i++) {
    const s = Math.max(-1, Math.min(1, float32Array[i] || 0))
    int16Array[i] = s < 0 ? s * 0x8000 : s * 0x7fff
  }
  return int16Array
}

// 初始化音频采集
const initAudioCapture = async () => {
  try {
    addDebugMessage('请求麦克风权限...')

    // 获取麦克风流
    mediaStream.value = await navigator.mediaDevices.getUserMedia({
      audio: {
        channelCount: 1,
        sampleRate: 16000,
        echoCancellation: true,
        noiseSuppression: true,
        autoGainControl: true
      }
    })

    addDebugMessage('麦克风权限已获取')
    isAudioActive.value = true

    // 创建音频上下文（16kHz 采样率）
    audioContext.value = new (window.AudioContext || (window as any).webkitAudioContext)({
      sampleRate: 16000
    })

    const source = audioContext.value.createMediaStreamSource(mediaStream.value)

    // 创建 ScriptProcessorNode 处理音频数据
    const bufferSize = 4096
    scriptProcessor.value = audioContext.value.createScriptProcessor(bufferSize, 1, 1)

    scriptProcessor.value.onaudioprocess = (e) => {
      const inputData = e.inputBuffer.getChannelData(0)
      const pcmData = convertFloat32ToPCM16(inputData)
      sendAudioData(pcmData.buffer as ArrayBuffer) // 根据 isSending 状态决定是否真正发送
    }

    source.connect(scriptProcessor.value)
    scriptProcessor.value.connect(audioContext.value.destination)

    addDebugMessage('音频处理管道已建立')

    // 开始计时
    recordingTimer.value = window.setInterval(() => {
      recordingDuration.value++
    }, 1000)

  } catch (error) {
    addDebugMessage(`音频初始化失败: ${error}`)
    throw error
  }
}

// 开始发送（手动模式下点击按钮）
const startSending = () => {
  // 如果流式读取正在进行，先终止它
  if (isStreaming.value) {
    addDebugMessage('检测到流式读取正在进行，先终止流式读取')
    isStreaming.value = false
    ElMessage.info('已终止正在进行的流式读取')
  }

  if (!isWsConnected.value) {
    ElMessage.warning('WebSocket 未连接')
    return
  }

  if (!isAudioActive.value) {
    ElMessage.warning('音频未激活')
    return
  }

  isSending.value = true
  sendStartMessage()
  addDebugMessage('开始发送音频到阿里云')
}

// 停止发送
const stopSending = () => {
  if (isSending.value) {
    isSending.value = false
  }
}

// 切换发送状态（用于点击麦克风按钮）
const toggleSending = () => {
  // 如果流式读取正在进行，先终止它
  if (isStreaming.value) {
    addDebugMessage('检测到流式读取正在进行，先终止流式读取')
    isStreaming.value = false
    ElMessage.info('已终止正在进行的流式读取')
  }

  if (!isWsConnected.value) {
    ElMessage.warning('WebSocket 未连接')
    return
  }
  // 如果正在发送中，则停止发送
  if (isSending.value) {
    isSending.value = false;
  } else {
    isSending.value = true;
  }
}

// 处理识别出的文本
const processRecognizedText = async (text: string) => {
  if (!text || text.trim().length === 0) {
    addDebugMessage('识别文本为空')
    return
  }

  try {
    isProcessingText.value = true
    hasSearched.value = true
    addDebugMessage(`发送文本到后端: ${text}`)

    console.log('=== 开始请求后端 ===')
    console.log('请求文本:', text)
    console.log('接口模式:', apiMode.value === 1 ? '原生text接口' : '流式streamText接口')

    if (apiMode.value === 1) {
      // 原生text接口
      const result = await request.get('/api/1.0/ai/text', {
        params: {
          text: text
        }
      })

      console.log('=== 后端返回结果 ===')
      console.log('完整结果:', result)
      console.log('result.data:', result.data)
      console.log('result.data 类型:', typeof result.data)
      console.log('result.data 是否为数组:', Array.isArray(result.data))

      if (result.data && result.data.length > 0) {
        answers.value = result.data
        currentAnswerIndex.value = 0 // 重置到第一个答案
        console.log('✅ 设置答案成功:', answers.value)
        addDebugMessage(`收到 ${result.data.length} 个答案`)
        ElMessage.success('处理成功')
      } else {
        // 返回数据为空
        answers.value = []
        currentAnswerIndex.value = 0 // 重置索引
        console.log('⚠️ 后端返回数据为空')
        addDebugMessage('后端返回数据为空')
        ElMessage.warning('没有找到相关数据')
      }
    } else {
      // 流式streamText接口
      await processStreamText(text)
    }

  } catch (error) {
    console.error('❌ 请求失败:', error)
    addDebugMessage(`处理失败: ${error}`)
    ElMessage.error('处理失败，请重试')
  } finally {
    isProcessingText.value = false
    console.log('=== 处理完成 ===')
    console.log('最终 answers.value:', answers.value)
    console.log('isProcessingText:', isProcessingText.value)
    console.log('hasSearched:', hasSearched.value)
  }
}

// 初始化系统
const initSystem = async () => {
  try {
    addDebugMessage('=== 系统初始化开始 ===')

    // 1. 获取 Token
    const token = await getAliyunToken()
    ALIYUN_CONFIG.token = token

    // 2. 连接 WebSocket
    await connectWebSocket(token)
    await new Promise(resolve => setTimeout(resolve, 300))

    // 3. 发送启动消息到阿里云（开始识别）
    sendStartMessage()

    // 4. 初始化音频采集（有声音就发送）
    await initAudioCapture()

    // 5. 根据模式决定是否开启发送到后端
    if (sendMode.value === 2) {
      // 自动模式：立即开启发送到后端
      startSending()
    } else {
      // 手动模式：等待用户点击
      addDebugMessage('手动模式：点击麦克风按钮开始发送到后端')
    }

    addDebugMessage('=== 系统初始化完成 ===')
    ElMessage.success('系统初始化成功')

  } catch (error) {
    addDebugMessage(`系统初始化失败: ${error}`)
    ElMessage.error('系统初始化失败')
  }
}

// 清理资源
const cleanup = () => {
  // 停止计时器
  if (recordingTimer.value) {
    clearInterval(recordingTimer.value)
    recordingTimer.value = null
  }

  // 断开音频处理节点
  if (scriptProcessor.value) {
    scriptProcessor.value.disconnect()
    scriptProcessor.value = null
  }

  // 关闭音频上下文
  if (audioContext.value) {
    audioContext.value.close()
    audioContext.value = null
  }

  // 停止媒体流
  if (mediaStream.value) {
    mediaStream.value.getTracks().forEach(track => track.stop())
    mediaStream.value = null
  }

  // 关闭 WebSocket
  if (websocket.value) {
    websocket.value.close()
    websocket.value = null
  }

  recordingDuration.value = 0
  currentTranscript.value = ''
  isWsConnected.value = false
  isAudioActive.value = false
  isSending.value = false
}

// 组件挂载时初始化
onMounted(() => {
  initSystem()
})

// 组件卸载时清理
onUnmounted(() => {
  cleanup()
  addDebugMessage('组件已卸载')
})
</script>

<style scoped>
/* 页面容器 */
.page-container {
  min-height: 100vh;
  background: #f5f7fa;
  padding: 20px;
  display: flex;
  justify-content: center;
  align-items: center;
}

/* 内容包裹器 - 电脑端60%，手机端100% */
.content-wrapper {
  width: 100%;
  max-width: 1200px;
  background: white;
  border-radius: 12px;
  box-shadow: 0 2px 12px rgba(0, 0, 0, 0.1);
  overflow: hidden;
  display: flex;
  flex-direction: column;
  min-height: 80vh;
  position: relative;
}

/* 电脑端：60%宽度居中 */
@media (min-width: 769px) {
  .content-wrapper {
    width: 60%;
  }
}

/* 手机端：全宽 */
@media (max-width: 768px) {
  .page-container {
    padding: 0;
  }

  .content-wrapper {
    width: 100%;
    min-height: 100vh;
    border-radius: 0;
  }
}

/* ========== 上部分：答案展示区 ========== */
.top-section {
  flex: 1;
  display: flex;
  flex-direction: column;
  padding: 20px;
  padding-bottom: 200px; /* 为固定的底部区域留出空间 */
  overflow: hidden;
  border-bottom: 1px solid #e4e7ed;
}

/* 状态栏 */
.status-bar {
  display: flex;
  align-items: center;
  gap: 8px;
  margin-bottom: 16px;
  padding: 12px;
  background: #f5f7fa;
  border-radius: 8px;
}

.status-divider {
  color: #dcdfe6;
  margin: 0 4px;
}

.recording-time {
  margin-left: 8px;
  font-size: 14px;
  font-weight: 600;
  color: #409eff;
}

.streaming-tag {
  margin-left: 8px;
}

/* 答案展示 */
.answer-display {
  flex: 1;
  overflow: hidden;
  margin-bottom: 16px;
  min-height: 300px;
}

.empty-state,
.loading-state,
.no-data-state {
  height: 100%;
  display: flex;
  flex-direction: column;
  align-items: center;
  justify-content: center;
  color: #909399;
}

.empty-state p,
.loading-state p,
.no-data-state p {
  margin-top: 12px;
  font-size: 14px;
}

.rotating {
  animation: rotate 1.5s linear infinite;
}

@keyframes rotate {
  from {
    transform: rotate(0deg);
  }
  to {
    transform: rotate(360deg);
  }
}

.answer-display {
  height: 100%;
  display: flex;
  flex-direction: column;
}

.answer-item {
  flex: 1;
  padding: 20px;
  display: flex;
  flex-direction: column;
}

.answer-content {
  flex: 1;
  font-size: 16px;
  line-height: 1.8;
  color: #303133;
  overflow-y: auto;
  white-space: pre-wrap;
  word-wrap: break-word;
}

/* 流式展示指示器 */
.streaming-cursor {
  display: inline-block;
  color: #409eff;
  font-weight: bold;
  animation: blink 1s infinite;
  margin-left: 2px;
}

@keyframes blink {
  0%, 50% {
    opacity: 1;
  }
  51%, 100% {
    opacity: 0;
  }
}

.answer-controls {
  display: flex;
  align-items: center;
  justify-content: center;
  gap: 12px;
  padding: 12px 20px;
  border-top: 1px solid #ebeef5;
  background: #fafafa;
}

.answer-counter {
  font-size: 14px;
  color: #606266;
  min-width: 60px;
  text-align: center;
}

/* 文字显示区 */
.text-display {
  background: #f5f7fa;
  border-radius: 8px;
  padding: 12px;
}

.realtime-text,
.final-text {
  margin-bottom: 8px;
  font-size: 14px;
}

.realtime-text:last-child,
.final-text:last-child {
  margin-bottom: 0;
}

.text-display .label {
  font-weight: 600;
  color: #606266;
  margin-right: 8px;
}

.text-display .text {
  color: #303133;
}

/* ========== 下部分：麦克风控制 ========== */
.bottom-section {
  padding: 30px 20px;
  background: #fafafa;
  position: fixed;
  bottom: 0;
  left: 0;
  right: 0;
  z-index: 1000;
  border-top: 1px solid #ebeef5;
}

/* 模式选择器 */
.mode-selector {
  display: flex;
  justify-content: center;
  margin-bottom: 20px;
}

/* 接口模式选择器 */
.api-mode-selector {
  display: flex;
  justify-content: center;
  margin-bottom: 30px;
}

/* 麦克风容器 */
.mic-container {
  display: flex;
  justify-content: center;
  margin-bottom: 20px;
}

/* 麦克风按钮 */
.mic-button {
  display: flex;
  flex-direction: column;
  align-items: center;
  cursor: pointer;
  user-select: none;
  transition: all 0.3s ease;
}

.mic-button:hover:not(.mic-disabled) {
  transform: scale(1.05);
}

.mic-button:active:not(.mic-disabled) {
  transform: scale(0.95);
}

/* 麦克风圆圈 - 默认蓝色 */
.mic-circle {
  width: 80px;
  height: 80px;
  border-radius: 50%;
  background: #409eff;
  display: flex;
  align-items: center;
  justify-content: center;
  color: white;
  box-shadow: 0 4px 12px rgba(64, 158, 255, 0.3);
  transition: all 0.3s ease;
  margin-bottom: 12px;
}

/* 录音中 - 红色 */
.mic-active .mic-circle {
  background: #f56c6c;
  box-shadow: 0 4px 12px rgba(245, 108, 108, 0.3);
  animation: pulse 1.5s ease-in-out infinite;
}

/* 未连接 - 灰色 */
.mic-disabled .mic-circle {
  background: #c0c4cc;
  box-shadow: none;
  cursor: not-allowed;
}

/* 自动模式 - 绿色 */
.mic-auto .mic-circle {
  background: #67c23a;
  box-shadow: 0 4px 12px rgba(103, 194, 58, 0.3);
  animation: pulse 1.5s ease-in-out infinite;
}

@keyframes pulse {
  0%, 100% {
    transform: scale(1);
  }
  50% {
    transform: scale(1.05);
  }
}

/* 麦克风文字 */
.mic-text {
  font-size: 14px;
  color: #606266;
  text-align: center;
  max-width: 200px;
}

.mic-active .mic-text {
  color: #f56c6c;
  font-weight: 600;
}

.mic-disabled .mic-text {
  color: #c0c4cc;
}

.mic-auto .mic-text {
  color: #67c23a;
  font-weight: 600;
}

/* 调试日志 */
.debug-collapse {
  border: none;
  margin-top: 20px;
}

.debug-log {
  padding: 12px 0;
}

.debug-list {
  margin-top: 12px;
  max-height: 200px;
  overflow-y: auto;
  background: #f5f7fa;
  border-radius: 6px;
  padding: 12px;
}

.debug-item {
  font-size: 12px;
  font-family: 'Courier New', monospace;
  color: #606266;
  margin-bottom: 6px;
  padding: 4px 8px;
  background: white;
  border-radius: 4px;
  word-break: break-all;
}

.debug-item:last-child {
  margin-bottom: 0;
}

/* 滚动条样式 */
.answer-content::-webkit-scrollbar,
.debug-list::-webkit-scrollbar {
  width: 6px;
}

.answer-content::-webkit-scrollbar-track,
.debug-list::-webkit-scrollbar-track {
  background: #f1f1f1;
  border-radius: 3px;
}

.answer-content::-webkit-scrollbar-thumb,
.debug-list::-webkit-scrollbar-thumb {
  background: #c0c4cc;
  border-radius: 3px;
}

/* ========== 移动端适配 ========== */
@media (max-width: 768px) {
  .top-section {
    padding: 16px;
    padding-bottom: 200px; /* 为固定的底部区域留出空间 */
  }

  .bottom-section {
    padding: 20px 16px;
  }

  .mic-circle {
    width: 70px;
    height: 70px;
  }

  .answer-content {
    font-size: 14px;
  }

  .status-bar {
    flex-wrap: wrap;
  }
}
</style>
