<template>
  <div class="p-6 space-y-6">
    <!-- 弱提醒组件 -->
    <div
      v-if="showToast"
      class="fixed top-4 right-4 z-50 bg-green-500 text-white px-4 py-2 rounded-md shadow-lg transition-all duration-300"
      :class="toastClass"
    >
      <div class="flex items-center space-x-2">
        <span class="text-lg">✅</span>
        <span>{{ toastMessage }}</span>
      </div>
    </div>

    <div class="text-center">
      <h2 class="text-xl font-bold text-gray-900 mb-2">语音输入</h2>
      <p class="text-gray-600">使用语音识别输入文字</p>
      
      <!-- 录入模式选择 -->
      <div class="mt-4 mb-6">
        <div class="flex justify-center space-x-4">
          <button
            @click="setInputMode('normal')"
            :class="[
              'px-4 py-2 rounded-lg text-sm font-medium transition-colors',
              inputMode === 'normal'
                ? 'bg-blue-500 text-white'
                : 'bg-gray-200 text-gray-700 hover:bg-gray-300'
            ]"
          >
            📝 普通录入
          </button>
          <button
            @click="setInputMode('streaming')"
            :class="[
              'px-4 py-2 rounded-lg text-sm font-medium transition-colors',
              inputMode === 'streaming'
                ? 'bg-green-500 text-white'
                : 'bg-gray-200 text-gray-700 hover:bg-gray-300'
            ]"
          >
            ⚡ 实时录入
          </button>
        </div>
        <p class="text-xs text-gray-500 mt-2">
          {{ inputMode === 'streaming' ? '边录音边识别，每秒实时同步到设备' : '录音完成后统一发送' }}
        </p>
      </div>
      
              <p class="text-xs text-gray-500">
          当前模式: {{ recordingMode === 'browser' ? '浏览器识别' : '后端识别' }}
          <span v-if="inputMode === 'streaming'" class="text-blue-600 font-medium">
            (实时录入模式)
          </span>
        </p>
    </div>

    <!-- 流式录入设备选择 -->
    <div v-if="inputMode === 'streaming' && !isRecording" class="bg-yellow-50 border border-yellow-200 p-4 rounded-lg">
      <h3 class="text-sm font-medium text-yellow-700 mb-3">🎯 选择流式录入目标设备</h3>
      <div v-if="onlineDevices.length === 0" class="text-center py-4">
        <p class="text-yellow-600 text-sm">暂无在线设备，请先连接浏览器插件</p>
      </div>
      <div v-else class="space-y-2">
        <div 
          v-for="device in onlineDevices" 
          :key="device.deviceId"
          @click="selectStreamingDevice(device.deviceId)"
          :class="[
            'flex items-center justify-between p-3 rounded-lg cursor-pointer transition-colors',
            selectedStreamingDevice === device.deviceId
              ? 'bg-yellow-200 border-2 border-yellow-400'
              : 'bg-white border border-yellow-300 hover:bg-yellow-100'
          ]"
        >
          <div class="flex items-center space-x-3">
            <div class="w-2 h-2 bg-green-500 rounded-full"></div>
            <div>
              <div class="font-medium text-gray-900">{{ device.deviceName || '我的浏览器插件' }}</div>
              <div class="text-xs text-gray-500">{{ device.deviceId }}</div>
            </div>
          </div>
          <div class="flex items-center space-x-2">
            <div class="text-xs text-green-600">在线</div>
            <div v-if="selectedStreamingDevice === device.deviceId" class="text-yellow-600">
              ✅ 已选择
            </div>
          </div>
        </div>
      </div>
    </div>

    <!-- 录音按钮 -->
    <div class="text-center">
      <button
        @click="toggleRecording"
        :disabled="!isSupported || (inputMode === 'streaming' && !selectedStreamingDevice)"
        :class="[
          'w-24 h-24 rounded-full text-2xl transition-all duration-300',
          isRecording
            ? 'bg-red-500 text-white animate-pulse-slow'
            : inputMode === 'streaming' && !selectedStreamingDevice
              ? 'bg-gray-400 text-gray-600 cursor-not-allowed'
              : 'bg-blue-500 text-white hover:bg-blue-600'
        ]"
      >
        {{ isRecording ? '⏹️' : '🎤' }}
      </button>
      <p class="mt-2 text-sm text-gray-600">
        {{ isRecording ? '录音中...' : inputMode === 'streaming' && !selectedStreamingDevice ? '请先选择目标设备' : '点击开始录音' }}
      </p>
      <div v-if="isRecording" class="mt-2 text-xs text-red-500 animate-pulse">
        🎤 正在录音，请说话...
      </div>
      <div v-if="inputMode === 'streaming' && selectedStreamingDevice && !isRecording" class="mt-2 text-xs text-green-600">
        🎯 已选择设备: {{ getSelectedDeviceName() }}
      </div>
    </div>

    <!-- 识别结果 -->
    <div v-if="result" class="bg-blue-50 border border-blue-200 p-4 rounded-lg">
      <h3 class="text-sm font-medium text-blue-700 mb-2">
        {{ inputMode === 'streaming' ? '⚡ 实时识别结果' : '🎯 识别结果' }}
      </h3>
      <div class="text-blue-900 text-lg font-medium">{{ result }}</div>
      <div v-if="confidence > 0" class="mt-2 text-xs text-blue-600">
        置信度: {{ (confidence * 100).toFixed(1) }}%
      </div>
      <div v-if="inputMode === 'streaming' && isRecording" class="mt-2 text-xs text-green-600 animate-pulse">
        ⚡ 实时同步中... (每秒识别一次)
      </div>
      <div v-if="inputMode === 'streaming' && realTimeText && realTimeText !== result" class="mt-2 text-xs text-orange-600">
        ⚡ 最新识别: {{ realTimeText }}
      </div>
    </div>





    <!-- 在线设备列表 -->
    <div v-if="onlineDevices.length > 0" class="bg-white border border-gray-200 rounded-lg p-4">
      <h3 class="text-sm font-medium text-gray-700 mb-3">📱 我的在线设备</h3>
      
      <!-- 选择全部按钮 -->
      <div v-if="result && inputMode === 'normal'" class="mb-4">
        <button
          @click="syncToAllDevices"
          :disabled="isProcessing"
          class="w-full btn-secondary disabled:opacity-50 mb-3"
        >
          {{ isProcessing ? '发送中...' : `📤 发送到全部设备 (${onlineDevices.length}个)` }}
        </button>
      </div>
      
      <div class="space-y-3">
        <div 
          v-for="device in onlineDevices" 
          :key="device.deviceId"
          class="flex items-center justify-between p-3 bg-gray-50 rounded-lg"
        >
          <div class="flex items-center space-x-3">
            <div class="w-2 h-2 bg-green-500 rounded-full"></div>
            <div>
              <div class="font-medium text-gray-900">{{ device.deviceName || '我的浏览器插件' }}</div>
              <div class="text-xs text-gray-500">{{ device.deviceId }}</div>
            </div>
          </div>
          
          <div class="flex items-center space-x-2">
            <div class="text-xs text-green-600">在线</div>
            <!-- 直接发送按钮 -->
            <button
              v-if="result && inputMode === 'normal'"
              @click="syncToDevice(device.deviceId)"
              :disabled="isProcessing"
              class="px-3 py-1 text-xs bg-blue-500 text-white rounded-md hover:bg-blue-600 disabled:opacity-50 transition-colors"
            >
              {{ isProcessing ? '发送中...' : '📤 发送' }}
            </button>
          </div>
        </div>
      </div>
    </div>

    <!-- 无设备提示 -->
    <div v-else class="bg-orange-50 border border-orange-200 rounded-lg p-4">
      <div class="flex items-center space-x-2">
        <div class="text-orange-600">⚠️</div>
        <div class="text-sm text-orange-800">
          <div class="font-medium">暂无在线设备</div>
          <div class="text-xs mt-1">请确保您的浏览器插件已登录并在线</div>
        </div>
      </div>
    </div>

    <!-- 状态信息 -->
    <div class="bg-gray-50 rounded-lg p-4">
      <div class="space-y-2 text-xs text-gray-600">
        <div class="flex items-center justify-between">
          <span>录音模式:</span>
          <span class="font-medium">{{ recordingMode === 'browser' ? '浏览器识别' : '后端识别' }}</span>
        </div>
        <div class="flex items-center justify-between">
          <span>连接状态:</span>
          <span :class="store.isConnected ? 'text-green-600' : 'text-red-600'" class="font-medium">
            {{ store.isConnected ? '已连接' : '未连接' }}
          </span>
        </div>
        <div class="flex items-center justify-between">
          <span>在线设备:</span>
          <span class="font-medium">{{ onlineDevices.length }} 个</span>
        </div>
        <div v-if="recordingMode === 'browser'" class="flex items-center justify-between">
          <span>麦克风权限:</span>
          <div class="flex items-center space-x-2">
            <span 
              :class="[
                permissionStatus === 'granted' ? 'text-green-600' : 
                permissionStatus === 'denied' ? 'text-red-600' : 
                'text-orange-600'
              ]"
              class="font-medium"
            >
              {{ 
                permissionStatus === 'granted' ? '已授权' : 
                permissionStatus === 'denied' ? '被拒绝' : 
                '未知' 
              }}
            </span>
            <button
              v-if="permissionStatus !== 'granted'"
              @click="manuallyRequestPermission"
              class="px-2 py-1 text-xs bg-blue-500 text-white rounded hover:bg-blue-600 transition-colors"
            >
              请求权限
            </button>
          </div>
        </div>
      </div>
    </div>

    <!-- 浏览器不支持提示 -->
    <div v-if="!isSupported" class="bg-red-50 border border-red-200 rounded-lg p-4">
      <div class="flex items-center space-x-2">
        <div class="text-red-600">❌</div>
        <div class="text-sm text-red-800">
          <div class="font-medium">浏览器不支持语音识别</div>
          <div class="text-xs mt-1">建议切换到后端识别模式或使用Chrome浏览器</div>
        </div>
      </div>
    </div>
  </div>
</template>

<script setup lang="ts">
import { ref, computed, onMounted } from 'vue';
import { useAppStore } from '@/store';
import { deviceAPI, voiceAPI } from '@/services/api';
import { communicationService } from '@/services/communication';
import { voiceWebSocket, type VoiceRecognitionMessage } from '@/services/websocket';
import type { Device } from '@/types';

// 扩展Window接口以支持Web Speech API
declare global {
  interface Window {
    SpeechRecognition?: any;
    webkitSpeechRecognition?: any;
  }
}

const store = useAppStore();

const recordingMode = ref<'browser' | 'backend'>('backend');

// 将Float32Array转换为16bit PCM数据
const convertFloat32ToPCM16 = (float32Array: Float32Array): Int16Array => {
  const pcm16Array = new Int16Array(float32Array.length);
  for (let i = 0; i < float32Array.length; i++) {
    // 将浮点数 (-1.0 到 1.0) 转换为16bit整数 (-32768 到 32767)
    const sample = Math.max(-1, Math.min(1, float32Array[i])); // 限制范围
    pcm16Array[i] = sample < 0 ? sample * 0x8000 : sample * 0x7FFF;
  }
  return pcm16Array;
};
const isRecording = ref(false);
const result = ref('');
const confidence = ref(0);
const devices = ref<Device[]>([]);
const isProcessing = ref(false);
const isSupported = ref(true);
const permissionStatus = ref<'granted' | 'denied' | 'unknown' | ''>('');

// 弱提醒状态
const showToast = ref(false);
const toastMessage = ref('');
const toastClass = ref('opacity-0 translate-y-2');

// 流式录入状态
const inputMode = ref<'normal' | 'streaming'>('normal');
const selectedStreamingDevice = ref<string>('');

// 流式录音状态
const streamingRecorder: MediaRecorder | null = null;
const streamingChunks: Blob[] = [];
const streamingInterval: number | null = null;
const streamingText = ref('');
const accumulatedText = ref(''); // 累计的识别文本
const realTimeText = ref(''); // 实时识别文本
const isRealTimeProcessing = ref(false); // 实时处理状态

// 浏览器语音识别
let recognition: any = null;
let mediaRecorder: MediaRecorder | null = null;
let audioChunks: Blob[] = [];

// 初始化语音识别
const initSpeechRecognition = () => {
  if (!('webkitSpeechRecognition' in window) && !('SpeechRecognition' in window)) {
    console.warn('浏览器不支持Web Speech API');
    return;
  }
  
  try {
    recognition = new (window.SpeechRecognition || window.webkitSpeechRecognition)();
    recognition.continuous = false;
    recognition.interimResults = false;
    recognition.lang = 'zh-CN';
    
    recognition.onresult = (event: any) => {
      const transcript = event.results[0][0].transcript;
      result.value = transcript;
      confidence.value = event.results[0][0].confidence;
      console.log('语音识别结果:', transcript);
      
      // 实时录入模式：实时同步到设备
      if (inputMode.value === 'streaming' && selectedStreamingDevice.value) {
        streamToDevice(transcript);
      }
    };
    
    recognition.onerror = (event: any) => {
      console.error('语音识别错误:', event.error);
      isRecording.value = false;
      
      // 处理不同类型的错误
      let errorMessage = '语音识别失败';
      if (event.error === 'not-allowed') {
        errorMessage = '麦克风权限被拒绝\n\n请按以下步骤操作：\n1. 点击浏览器地址栏左侧的锁定图标\n2. 找到"麦克风"选项\n3. 选择"允许"\n4. 刷新页面后重试';
      } else if (event.error === 'no-speech') {
        errorMessage = '未检测到语音\n\n请确保：\n1. 麦克风工作正常\n2. 说话声音足够大\n3. 环境噪音较小';
      } else if (event.error === 'audio-capture') {
        errorMessage = '音频捕获失败\n\n请检查：\n1. 麦克风是否被其他应用占用\n2. 设备麦克风是否正常工作';
      } else if (event.error === 'network') {
        errorMessage = '网络错误\n\n请检查网络连接后重试';
      } else if (event.error === 'service-not-allowed') {
        errorMessage = '语音识别服务不可用\n\n请尝试：\n1. 使用不同的浏览器\n2. 检查网络连接\n3. 切换到后端识别模式';
      }
      
      console.error('语音识别错误:', errorMessage);
    };
    
    recognition.onend = () => {
      console.log('语音识别结束');
      isRecording.value = false;
    };
    
    console.log('语音识别初始化成功');
  } catch (error) {
    console.error('初始化语音识别失败:', error);
  }
};

// 只显示当前用户的在线浏览器插件设备
const onlineDevices = computed(() => 
  devices.value.filter(device => 
    device.online && 
    device.deviceType === 'browser_extension' &&
    device.userId === store.user?.uid // 确保是当前用户的设备
  )
);

// 显示弱提醒
const showToastMessage = (message: string, duration: number = 3000) => {
  toastMessage.value = message;
  showToast.value = true;
  
  // 触发动画
  setTimeout(() => {
    toastClass.value = 'opacity-100 translate-y-0';
  }, 10);
  
  // 自动隐藏
  setTimeout(() => {
    toastClass.value = 'opacity-0 translate-y-2';
    setTimeout(() => {
      showToast.value = false;
    }, 300);
  }, duration);
};

// 设置录入模式
const setInputMode = (mode: 'normal' | 'streaming') => {
  inputMode.value = mode;
  if (mode === 'normal') {
    selectedStreamingDevice.value = '';
    // 切换到普通模式时清空实时相关状态
    accumulatedText.value = '';
    streamingText.value = '';
    realTimeText.value = '';
  } else {
    // 切换到实时模式时清空之前的结果
    result.value = '';
    confidence.value = 0;
  }
};

// 选择流式录入设备
const selectStreamingDevice = (deviceId: string) => {
  selectedStreamingDevice.value = deviceId;
  showToastMessage(`已选择设备: ${getSelectedDeviceName()}`, 2000);
};

// 获取选中的设备名称
const getSelectedDeviceName = () => {
  const device = onlineDevices.value.find(d => d.deviceId === selectedStreamingDevice.value);
  return device?.deviceName || '我的浏览器插件';
};

// 实时同步到设备
const streamToDevice = async (text: string) => {
  if (!selectedStreamingDevice.value) return;
  
  try {
    communicationService.sendTextToDevice(selectedStreamingDevice.value, text);
    console.log('实时同步成功:', text);
  } catch (error) {
    console.error('实时同步失败:', error);
  }
};

// 开始实时录音（WebSocket版本）
const startRealTimeRecording = async (stream: MediaStream) => {
  try {
    // 开始新录音时清空之前的结果
    console.log('开始新的实时录音，清空之前的结果...');
    result.value = '';
    confidence.value = 0;
    streamingText.value = '';
    realTimeText.value = '';
    accumulatedText.value = '';
    
    // 连接WebSocket
    await voiceWebSocket.connect();
    
    // 设置WebSocket消息处理
    voiceWebSocket.onMessage((message: VoiceRecognitionMessage) => {
      console.log('收到WebSocket消息:', message);
      
      switch (message.type) {
        case 'device_selected':
          if (message.success) {
            console.log('设备选择成功:', message.deviceId);
            showToast('设备选择成功', 'success');
          }
          break;
          
        case 'recognition_started':
          if (message.success) {
            console.log('语音识别已开始');
            showToast('语音识别已开始', 'success');
          }
          break;
          
        case 'recognition_interim':
          // 临时识别结果 - 替换当前显示，不累计
          if (message.success && message.text) {
            console.log('WebSocket临时识别结果:', message.text);
            
            // 更新实时识别文本（临时结果）
            realTimeText.value = message.text;
            streamingText.value = message.text;
            
            // 显示：累计文本 + 当前临时结果
            result.value = accumulatedText.value + (accumulatedText.value ? ' ' : '') + message.text;
            confidence.value = message.confidence || 0;
            
            // 临时结果不同步到设备
          }
          break;
          
        case 'recognition_result':
          // 最终识别结果 - 累计到历史文本
          if (message.success && message.text) {
            console.log('WebSocket最终识别结果:', message.text);
            console.log('累计前状态:', {
              accumulatedText: accumulatedText.value,
              result: result.value
            });
            
            // 累计最终识别结果
            if (accumulatedText.value && message.text) {
              accumulatedText.value += ' ' + message.text;
            } else if (message.text) {
              accumulatedText.value = message.text;
            }
            
            // 更新显示结果
            realTimeText.value = '';  // 清空临时结果
            streamingText.value = message.text;
            result.value = accumulatedText.value;
            confidence.value = message.confidence || 0;
            
            console.log('累计后状态:', {
              accumulatedText: accumulatedText.value,
              result: result.value,
              confidence: confidence.value
            });
            
            // 最终结果同步到设备
            // 注意：在实时录音模式下，文本同步由后端处理，手机端不需要重复发送
            // if (selectedStreamingDevice.value) {
            //   streamToDevice(accumulatedText.value);
            //   console.log('已同步到设备:', selectedStreamingDevice.value);
            // }
            console.log('最终结果将由后端自动同步到插件端，手机端不重复发送');
          }
          break;
          
        case 'recognition_stopped':
          if (message.success) {
            console.log('语音识别已停止');
            showToast('语音识别已停止', 'success');
          }
          break;
          
        case 'error':
          console.error('WebSocket识别错误:', message.error);
          showToast(`识别错误: ${message.error}`, 'error');
          break;
      }
    });
    
    // 设置错误处理
    voiceWebSocket.onError((error: string) => {
      console.error('WebSocket错误:', error);
      showToast('WebSocket连接错误', 'error');
    });
    
    // 选择设备
    if (selectedStreamingDevice.value) {
      voiceWebSocket.selectDevice(selectedStreamingDevice.value);
    }
    
    // 开始语音识别
    voiceWebSocket.startRecognition();
    
    // 🎤 尝试使用Web Audio API获取PCM数据
    try {
      console.log('尝试使用Web Audio API (PCM格式)...');
      await startPCMStreaming(stream);
      console.log('✅ 使用Web Audio API (PCM格式) 录音');
    } catch (pcmError) {
      console.warn('⚠️ Web Audio API失败，降级到MediaRecorder:', pcmError);
      await startMediaRecorderStreaming(stream);
      console.log('📱 使用MediaRecorder (WebM/MP4格式) 录音');
    }
    
  } catch (error) {
    console.error('启动WebSocket实时录音失败:', error);
    handleMicrophoneError(error);
  }
};

// 使用Web Audio API进行PCM录音
const startPCMStreaming = async (stream: MediaStream) => {
  const audioContext = new AudioContext({
    sampleRate: 16000 // 阿里云要求16KHz
  });
  
  const source = audioContext.createMediaStreamSource(stream);
  
  // 创建ScriptProcessorNode来处理音频数据
  const bufferSize = 4096; // 处理缓冲区大小
  const processor = audioContext.createScriptProcessor(bufferSize, 1, 1); // 单声道
  
  processor.onaudioprocess = (event) => {
    const inputBuffer = event.inputBuffer;
    const channelData = inputBuffer.getChannelData(0); // 获取单声道数据
    
    // 将Float32Array转换为16bit PCM数据
    const pcmData = convertFloat32ToPCM16(channelData);
    
    // 实时发送PCM数据到WebSocket
    voiceWebSocket.sendAudioData(pcmData.buffer);
  };
  
  // 连接音频节点
  source.connect(processor);
  processor.connect(audioContext.destination);
  
  // 开始录音
  isRecording.value = true;
  console.log('Web Audio API PCM录音已开始');
  
  // 保存音频上下文和处理器引用
  (window as any).pcmStreamingContext = {
    audioContext,
    processor,
    source,
    stream
  };
};

// 使用MediaRecorder进行录音（后备方案）
const startMediaRecorderStreaming = async (stream: MediaStream) => {
  const recorder = new MediaRecorder(stream, {
    mimeType: MediaRecorder.isTypeSupported('audio/webm') ? 'audio/webm' : 'audio/mp4'
  });
  
  // 收集音频数据并实时发送
  recorder.ondataavailable = (event) => {
    if (event.data.size > 0) {
      // 实时发送音频数据到WebSocket
      voiceWebSocket.sendAudioData(event.data);
    }
  };
  
  // 录音开始
  recorder.onstart = () => {
    isRecording.value = true;
    console.log('MediaRecorder录音已开始');
  };
  
  // 录音停止
  recorder.onstop = () => {
    voiceWebSocket.stopRecognition();
    stream.getTracks().forEach(track => track.stop());
    isRecording.value = false;
    console.log('MediaRecorder录音已停止');
  };
  
  // 开始录音，每200毫秒收集一次数据，提高实时性
  recorder.start(200);
  
  // 保存录音器引用
  (window as any).streamingRecorder = recorder;
};

// 检查麦克风权限
const checkMicrophonePermission = async (): Promise<boolean> => {
  try {
    console.log('检查麦克风权限...');
    
    // 检查是否支持现代麦克风API
    if (!navigator.mediaDevices?.getUserMedia) {
      console.warn('浏览器不支持现代麦克风API');
      return false;
    }
    
    // 检查权限状态（如果支持）
    if (navigator.permissions && navigator.permissions.query) {
      try {
        const permission = await navigator.permissions.query({ name: 'microphone' as PermissionName });
        console.log('当前麦克风权限状态:', permission.state);
        
        if (permission.state === 'denied') {
          console.warn('麦克风权限已被拒绝');
          return false;
        }
      } catch (permError) {
        console.warn('无法查询权限状态:', permError);
      }
    }
    
    // 尝试获取权限，使用16000Hz采样率
    const stream = await navigator.mediaDevices.getUserMedia({ 
      audio: {
        sampleRate: 16000,
        channelCount: 1,
        echoCancellation: true,
        noiseSuppression: true,
        autoGainControl: true
      } 
    });
    stream.getTracks().forEach(track => track.stop()); // 立即停止录音
    console.log('麦克风权限已获取');
    return true;
  } catch (error) {
    console.warn('麦克风权限检查失败:', error);
    
    // 详细记录错误信息
    if (error instanceof Error) {
      if (error.name === 'NotAllowedError') {
        console.error('用户拒绝了麦克风权限');
      } else if (error.name === 'NotFoundError') {
        console.error('未找到麦克风设备');
      } else if (error.name === 'NotReadableError') {
        console.error('麦克风被其他应用占用');
      } else if (error.name === 'OverconstrainedError') {
        console.error('麦克风配置不满足要求');
      } else if (error.name === 'SecurityError') {
        console.error('安全错误：可能不是HTTPS环境');
      } else {
        console.error('未知错误:', error.name, error.message);
      }
    }
    
    return false;
  }
};

// 请求麦克风权限
const requestMicrophonePermission = async (): Promise<boolean> => {
  console.log('请求麦克风权限...');
  
  try {
    if (!navigator.mediaDevices?.getUserMedia) {
      console.warn('浏览器不支持麦克风API');
      return false;
    }
    
    // 先尝试简单的音频配置
    const stream = await navigator.mediaDevices.getUserMedia({ 
      audio: true  // 使用最简单的配置
    });
    stream.getTracks().forEach(track => track.stop());
    console.log('麦克风权限已获取（简单配置）');
    
    // 如果简单配置成功，再尝试高级配置
    try {
      const advancedStream = await navigator.mediaDevices.getUserMedia({ 
        audio: {
          sampleRate: 16000,
          channelCount: 1,
          echoCancellation: true,
          noiseSuppression: true,
          autoGainControl: true
        } 
      });
      advancedStream.getTracks().forEach(track => track.stop());
      console.log('麦克风权限已获取（高级配置）');
    } catch (advancedError) {
      console.warn('高级音频配置失败，使用简单配置:', advancedError);
    }
    
    return true;
  } catch (error) {
    console.warn('麦克风权限获取失败:', error);
    
    // 详细记录错误信息
    if (error instanceof Error) {
      if (error.name === 'NotAllowedError') {
        console.error('用户拒绝了麦克风权限');
      } else if (error.name === 'NotFoundError') {
        console.error('未找到麦克风设备');
      } else if (error.name === 'NotReadableError') {
        console.error('麦克风被其他应用占用');
      } else if (error.name === 'OverconstrainedError') {
        console.error('麦克风配置不满足要求');
      } else if (error.name === 'SecurityError') {
        console.error('安全错误：可能不是HTTPS环境');
      } else {
        console.error('未知错误:', error.name, error.message);
      }
    }
    
    return false;
  }
};

onMounted(async () => {
  // 从localStorage读取录音模式
  const savedMode = localStorage.getItem('smartinput-voice-mode');
  if (savedMode === 'browser' || savedMode === 'backend') {
    recordingMode.value = savedMode;
  }
  
  // 检查Web Speech API支持
  if (!('webkitSpeechRecognition' in window) && !('SpeechRecognition' in window)) {
    isSupported.value = false;
    recordingMode.value = 'backend';
  } else {
    initSpeechRecognition();
  }

  // 加载设备列表
  await loadDevices();
  
  // 定期刷新设备列表
  setInterval(loadDevices, 10000); // 每10秒刷新一次
  
  // 主动检查并提醒用户授权录音权限
  await checkAndRequestPermission();
});

const loadDevices = async () => {
  if (store.user) {
    try {
      console.log('加载用户设备列表...');
      devices.value = await deviceAPI.getUserDevices();
      console.log('设备列表:', devices.value);
      
      // 显示所有在线设备的信息
      const onlineDevices = devices.value.filter(device => device.online);
      console.log('在线设备:', onlineDevices);
      
      // 过滤出在线的浏览器插件设备（用于选择目标设备）
      const browserDevices = devices.value.filter(device => 
        device.online && device.deviceType === 'browser_extension'
      );
      console.log('在线浏览器插件设备:', browserDevices);
    } catch (error) {
      console.error('加载设备列表失败:', error);
      devices.value = [];
    }
  }
};

const toggleRecording = async () => {
  // 防止重复点击
  if (isProcessing.value) {
    console.log('正在处理中，跳过重复点击');
    return;
  }
  
  console.log('切换录音状态 - 当前状态:', isRecording.value);
  console.log('录音模式:', recordingMode.value);
  console.log('录入模式:', inputMode.value);
  
  isProcessing.value = true;
  
  try {
    if (isRecording.value) {
      console.log('停止录音...');
      stopRecording();
    } else {
      console.log('开始录音...');
      
      // 强制重置所有录音相关状态
      console.log('重置录音状态...');
      
      // 停止任何正在运行的录音器
      if (mediaRecorder && mediaRecorder.state !== 'inactive') {
        console.log('停止旧的mediaRecorder');
        try {
          mediaRecorder.stop();
          mediaRecorder.stream.getTracks().forEach(track => track.stop());
        } catch (error) {
          console.error('停止旧录音器失败:', error);
        }
      }
      
      // 停止PCM录音
      if ((window as any).pcmStreamingContext) {
        console.log('停止旧的PCM录音');
        try {
          const context = (window as any).pcmStreamingContext;
          context.processor.disconnect();
          context.source.disconnect();
          context.audioContext.close();
          context.stream.getTracks().forEach((track: MediaStreamTrack) => track.stop());
          (window as any).pcmStreamingContext = null;
        } catch (error) {
          console.error('停止PCM录音失败:', error);
        }
      }
      
      // 停止流式录音器（兼容旧版本）
      if ((window as any).streamingRecorder && (window as any).streamingRecorder.state !== 'inactive') {
        console.log('停止旧的streamingRecorder');
        try {
          (window as any).streamingRecorder.stop();
          if ((window as any).streamingInterval) {
            clearInterval((window as any).streamingInterval);
          }
        } catch (error) {
          console.error('停止旧流式录音器失败:', error);
        }
      }
      
      // 清理所有录音相关变量
      mediaRecorder = null;
      audioChunks = [];
      (window as any).streamingRecorder = null;
      (window as any).streamingInterval = null;
      (window as any).pcmStreamingContext = null;
      
      // 重置录音状态
      isRecording.value = false;
      
      console.log('录音状态已重置，开始新录音...');
      await startRecording();
    }
  } catch (error) {
    console.error('切换录音状态失败:', error);
    // 确保状态重置
    isRecording.value = false;
    isProcessing.value = false;
  } finally {
    // 延迟重置处理状态，避免快速重复点击
    setTimeout(() => {
      isProcessing.value = false;
      console.log('处理状态已重置');
    }, 200);
  }
};

const startRecording = async () => {
  console.log('开始录音 - 录音模式:', recordingMode.value);
  console.log('录入模式:', inputMode.value);
  console.log('当前录音状态:', isRecording.value);
  
  if (recordingMode.value === 'browser') {
    console.log('使用浏览器语音识别...');
    // 开始新录音时清空之前的结果
    result.value = '';
    confidence.value = 0;
    
    // 浏览器语音识别
    if (recognition) {
      try {
        // 检查语音识别是否已经在运行
        if (recognition.state === 'inactive') {
          recognition.start();
          isRecording.value = true;
          console.log('浏览器语音识别已开始');
        } else {
          console.warn('语音识别已经在运行中，跳过重复启动');
        }
      } catch (error) {
        console.error('启动语音识别失败:', error);
        if (error.name === 'InvalidStateError') {
          console.log('语音识别状态错误，尝试重新初始化');
          // 尝试重新初始化语音识别
          initSpeechRecognition();
          setTimeout(() => {
            if (recognition && recognition.state === 'inactive') {
              recognition.start();
              isRecording.value = true;
            }
          }, 100);
        }
      }
    }
  } else {
    console.log('使用后端语音识别...');
    // 后端语音识别
    try {
      // 检查 mediaDevices API 是否可用
      if (!navigator.mediaDevices || !navigator.mediaDevices.getUserMedia) {
        throw new Error('浏览器不支持音频录制功能，请使用现代浏览器');
      }
      
      console.log('请求麦克风权限...');
      // 使用现代API获取音频流，指定采样率为16000Hz以匹配阿里云要求
      const stream = await navigator.mediaDevices.getUserMedia({ 
        audio: {
          sampleRate: 16000,
          channelCount: 1,
          echoCancellation: true,
          noiseSuppression: true,
          autoGainControl: true
        } 
      });
      
      console.log('麦克风权限已获取，音频流:', stream);
      
      if (inputMode.value === 'streaming') {
        console.log('启动实时录音...');
        // 实时录音模式
        startRealTimeRecording(stream);
      } else {
        console.log('启动普通录音...');
        // 开始新录音时清空之前的结果
        result.value = '';
        confidence.value = 0;
        
        // 普通录音模式，使用浏览器支持的格式
        mediaRecorder = new MediaRecorder(stream, {
          mimeType: MediaRecorder.isTypeSupported('audio/webm') ? 'audio/webm' : 'audio/mp4'
        });
        audioChunks = [];
        
        console.log('设置录音器事件监听器...');
        mediaRecorder.ondataavailable = (event) => {
          console.log('音频数据可用:', event.data.size, 'bytes');
          audioChunks.push(event.data);
        };
        
        mediaRecorder.onstop = async () => {
          console.log('录音停止，开始处理音频数据...');
          await processAudioData();
        };
        
        console.log('开始录音...');
        mediaRecorder.start();
        isRecording.value = true;
        console.log('普通录音已开始');
      }
    } catch (error) {
      console.error('启动录音失败:', error);
      handleMicrophoneError(error);
    }
  }
};

// 处理麦克风错误
const handleMicrophoneError = (error: any) => {
  let errorMessage = '无法访问麦克风';
  
  if (error.name === 'NotAllowedError') {
    errorMessage = '麦克风权限被拒绝\n\n请按以下步骤操作：\n1. 点击浏览器地址栏左侧的锁定图标\n2. 找到"麦克风"选项\n3. 选择"允许"\n4. 刷新页面后重试';
  } else if (error.name === 'NotFoundError') {
    errorMessage = '未找到麦克风设备\n\n请检查：\n1. 设备是否有麦克风硬件\n2. 麦克风是否被其他应用占用';
  } else if (error.name === 'NotSupportedError') {
    errorMessage = '浏览器不支持音频录制功能\n\n请尝试使用不同的浏览器';
  } else if (error.name === 'SecurityError') {
    errorMessage = '安全错误\n\n请在HTTPS环境下使用此功能，或使用localhost访问';
  } else if (error.name === 'NotReadableError') {
    errorMessage = '麦克风被占用\n\n请关闭其他使用麦克风的应用后重试';
  }
  
  alert(errorMessage);
};

const stopRecording = () => {
  console.log('停止录音 - 录音模式:', recordingMode.value);
  console.log('当前录音状态:', isRecording.value);
  
  if (recordingMode.value === 'browser') {
    console.log('停止浏览器语音识别...');
    if (recognition) {
      try {
        // 检查语音识别状态
        if (recognition.state === 'recording' || recognition.state === 'starting') {
          recognition.stop();
          console.log('浏览器语音识别已停止');
        } else {
          console.warn('语音识别未在运行，跳过停止操作');
        }
      } catch (error) {
        console.error('停止语音识别失败:', error);
      }
    }
  } else {
    console.log('停止后端录音...');
    // 检查是否是PCM录音
    if ((window as any).pcmStreamingContext) {
      try {
        console.log('停止PCM录音...');
        const context = (window as any).pcmStreamingContext;
        context.processor.disconnect();
        context.source.disconnect();
        context.audioContext.close();
        context.stream.getTracks().forEach((track: MediaStreamTrack) => track.stop());
        (window as any).pcmStreamingContext = null;
        voiceWebSocket.stopRecognition();
        console.log('PCM录音已停止');
      } catch (error) {
        console.error('停止PCM录音失败:', error);
      }
    } else if ((window as any).streamingRecorder && (window as any).streamingRecorder.state === 'recording') {
      try {
        console.log('停止流式录音器...');
        (window as any).streamingRecorder.stop();
        if ((window as any).streamingInterval) {
          clearInterval((window as any).streamingInterval);
        }
        console.log('流式录音已停止');
      } catch (error) {
        console.error('停止流式录音失败:', error);
      }
    } else if (mediaRecorder && mediaRecorder.state === 'recording') {
      try {
        console.log('停止普通录音器...');
        mediaRecorder.stop();
        mediaRecorder.stream.getTracks().forEach(track => track.stop());
        console.log('普通录音已停止');
      } catch (error) {
        console.error('停止录音失败:', error);
      }
    } else {
      console.log('没有检测到正在运行的录音器');
    }
  }
  
  // 重置录音状态
  console.log('重置录音状态...');
  isRecording.value = false;
  
  // 清理录音相关变量
  console.log('清理录音相关变量...');
  mediaRecorder = null;
  audioChunks = [];
  (window as any).streamingRecorder = null;
  (window as any).streamingInterval = null;
  (window as any).pcmStreamingContext = null;
  
  // 实时录入模式：录音结束后保持结果显示，不清空
  if (inputMode.value === 'streaming') {
    console.log('实时录入已结束，保持结果显示...');
    console.log('当前结果状态:', {
      result: result.value,
      accumulatedText: accumulatedText.value,
      streamingText: streamingText.value,
      realTimeText: realTimeText.value
    });
    
    // 只清空临时状态，保留最终结果
    realTimeText.value = '';
    
    // 确保最终结果保持显示
    if (accumulatedText.value) {
      result.value = accumulatedText.value;
      console.log('确保结果显示:', result.value);
    }
    
    showToastMessage('实时录入已结束，结果已保留', 2000);
  }
  
  console.log('录音状态已完全重置，可以继续录音');
};

const processAudioData = async () => {
  try {
    console.log('开始处理音频数据...');
    console.log('音频块数量:', audioChunks.length);
    
    if (audioChunks.length === 0) {
      console.warn('没有音频数据可处理');
      return;
    }
    
    const audioBlob = new Blob(audioChunks, { type: mediaRecorder?.mimeType || 'audio/webm' });
    console.log('音频Blob大小:', audioBlob.size, 'bytes');
    
    // 使用当前设备ID或生成一个临时ID
    const deviceId = store.currentDeviceId || `mobile_${Date.now()}`;
    console.log('使用设备ID:', deviceId);
    
    const response = await voiceAPI.recognize(audioBlob, deviceId);
    console.log('语音识别响应:', response);
    
    if (response.success) {
      result.value = response.data.text;
      confidence.value = response.data.confidence || 0;
      console.log('语音识别成功:', result.value);
      
      // 普通录入模式：发送到设备
      if (inputMode.value === 'normal' && selectedStreamingDevice.value) {
        streamToDevice(response.data.text);
      }
    } else {
      console.error('语音识别失败:', response);
      showToastMessage('语音识别失败，请重试', 3000);
    }
  } catch (error) {
    console.error('处理音频数据失败:', error);
    showToastMessage('语音识别失败，请重试', 3000);
  } finally {
    // 处理完成后清理音频数据，确保可以继续录音
    audioChunks = [];
    console.log('音频数据处理完成，已清理音频块');
  }
};



const syncToDevice = async (deviceId: string) => {
  if (!result.value || !store.currentDeviceId) {
    showToastMessage('缺少必要信息', 2000);
    return;
  }

  isProcessing.value = true;
  try {
    // 通过WebSocket发送文字到指定设备
    communicationService.sendTextToDevice(deviceId, result.value);
    
    // 显示成功消息（弱提醒）
    const deviceName = onlineDevices.value.find(d => d.deviceId === deviceId)?.deviceName || '设备';
    showToastMessage(`文字已发送到: ${deviceName}`);
    result.value = '';
    confidence.value = 0;
  } catch (error) {
    console.error('发送失败:', error);
    showToastMessage('发送失败，请重试', 3000);
  } finally {
    isProcessing.value = false;
  }
};

const syncToAllDevices = async () => {
  if (!result.value || !store.currentDeviceId) {
    showToastMessage('缺少必要信息', 2000);
    return;
  }

  isProcessing.value = true;
  try {
    // 推送到所有设备
    await voiceAPI.saveRecord(store.currentDeviceId, result.value, confidence.value);
    
    showToastMessage(`文字已发送到全部 ${onlineDevices.value.length} 个设备`);
    result.value = '';
    confidence.value = 0;
  } catch (error) {
    console.error('同步失败:', error);
    showToastMessage('同步失败，请重试', 3000);
  } finally {
    isProcessing.value = false;
  }
};

// 主动检查并提醒用户授权录音权限
const checkAndRequestPermission = async () => {
  console.log('主动检查录音权限...');
  
  // 延迟2秒后检查，确保页面完全加载
  setTimeout(async () => {
    try {
      // 检查是否在HTTPS环境下
      const isHttps = window.location.protocol === 'https:';
      const isLocalhost = window.location.hostname === 'localhost' || window.location.hostname === '127.0.0.1';
      
      if (!isHttps && !isLocalhost) {
        permissionStatus.value = 'denied';
        alert('⚠️ 安全警告\n\n现代浏览器要求HTTPS环境才能访问麦克风。\n\n请使用HTTPS地址访问：\nhttps://m.mensanyun.com');
        return;
      }
      
      // 只检查权限状态，不主动请求
      if (navigator.permissions && navigator.permissions.query) {
        try {
          const permission = await navigator.permissions.query({ name: 'microphone' as PermissionName });
          console.log('当前麦克风权限状态:', permission.state);
          
          if (permission.state === 'granted') {
            permissionStatus.value = 'granted';
            console.log('麦克风权限已获取，可以正常使用语音录音功能');
            return;
          } else if (permission.state === 'denied') {
            permissionStatus.value = 'denied';
            showPermissionGuide();
            return;
          }
        } catch (permError) {
          console.warn('无法查询权限状态:', permError);
        }
      }
      
      // 如果没有权限状态信息，设置为未知
      permissionStatus.value = 'unknown';
      console.log('麦克风权限状态未知，等待用户交互时请求');
      
    } catch (error) {
      console.error('权限检查失败:', error);
      permissionStatus.value = 'unknown';
    }
  }, 2000);
};

// 显示权限设置指南
const showPermissionGuide = () => {
  const permissionGuide = `
🎤 语音录音权限提醒

为了正常使用语音录音功能，需要您授权麦克风权限。

📱 多种授权方式：

【方式一：地址栏设置】
Chrome/Edge：
1. 查看地址栏左侧是否有锁定图标 🔒 或信息图标 ℹ️
2. 点击该图标，找到"麦克风"选项
3. 选择"允许"或"始终允许"
4. 刷新页面

Safari：
1. 查看地址栏左侧是否有锁定图标 🔒
2. 点击该图标，选择"允许"麦克风访问
3. 刷新页面

Firefox：
1. 查看地址栏左侧是否有锁定图标 🔒
2. 点击该图标，找到"麦克风"选项
3. 选择"允许"
4. 刷新页面

【方式二：浏览器设置】
Chrome/Edge：
1. 点击右上角三个点 ⋯
2. 选择"设置" → "隐私设置和安全性"
3. 找到"网站设置" → "麦克风"
4. 允许当前网站使用麦克风

Safari：
1. 点击右上角齿轮图标 ⚙️
2. 选择"偏好设置" → "网站"
3. 找到"麦克风"，允许当前网站

Firefox：
1. 点击右上角三条线 ☰
2. 选择"设置" → "隐私与安全"
3. 找到"权限" → "麦克风"
4. 选择"允许"

【方式三：系统设置】
Android：
1. 进入"设置" → "应用管理"
2. 找到浏览器应用（Chrome/Safari等）
3. 点击"权限" → "麦克风"
4. 选择"允许"

iOS：
1. 进入"设置" → "Safari"
2. 找到"麦克风"
3. 选择"允许"

【方式四：强制授权】
如果以上方法都不行，请尝试：
1. 关闭浏览器，重新打开
2. 清除浏览器缓存和Cookie
3. 使用无痕/隐私模式访问
4. 尝试其他浏览器（推荐Chrome）

【方式五：直接测试】
1. 点击页面上的"重新检查麦克风权限"按钮
2. 如果弹出权限请求，选择"允许"
3. 如果仍然失败，尝试切换到"后端识别"模式

💡 故障排除：
- 确保在HTTPS环境下访问（或localhost）
- 检查设备是否有麦克风硬件
- 关闭其他使用麦克风的应用
- 重启浏览器或设备

✅ 授权后即可正常使用语音录音功能！
  `;
  
  if (confirm('🎤 需要麦克风权限才能使用语音录音功能\n\n是否查看详细设置指南？')) {
    alert(permissionGuide);
  }
};

// 手动请求麦克风权限
const manuallyRequestPermission = async () => {
  console.log('手动请求麦克风权限...');
  
  try {
    const hasPermission = await requestMicrophonePermission();
    if (hasPermission) {
      permissionStatus.value = 'granted';
      showToastMessage('✅ 麦克风权限已获取！', 3000);
    } else {
      permissionStatus.value = 'denied';
      showToastMessage('❌ 麦克风权限获取失败，请查看设置指南', 5000);
      showPermissionGuide();
    }
  } catch (error) {
    console.error('手动请求权限失败:', error);
    permissionStatus.value = 'denied';
    showToastMessage('❌ 权限请求失败', 3000);
  }
};






</script> 