// 用户在浏览器说话，录音并且把其转换为数据
// 语音录制
// 实时语音对话的WebSocket逻辑（适配后端）
import { ref, reactive, computed } from 'vue';

export function useVoiceChat() {
  // 状态管理
  const state = ref('disconnected'); // disconnected, connected, listening, processing, speaking
  const isConnected = ref(false);
  const canStart = ref(true);
  const currentRecognition = ref('');
  const messages = reactive([]);
  const selectedCharacter = ref(null);
  
  // WebSocket 连接
  let ws = null;
  let recognition = null;
  let currentAudio = null;
  let userId = 'user_' + Date.now();
  
  // 状态文本映射
  const statusText = computed(() => {
    switch (state.value) {
      case 'connected': return '已连接，准备对话';
      case 'listening': return '正在听取您的语音...';
      case 'processing': return 'AI正在思考...';
      case 'speaking': return 'AI正在回复...';
      default: return '未连接';
    }
  });
  
  // 开始语音对话
  const startVoiceChat = async (characterId = 'harry_potter') => {
    try {
      // 1. 连接WebSocket（注意：后端使用的是 /ws 路径）
      ws = new WebSocket(`ws://localhost:8001/ws?user_id=${userId}`);
      
      ws.onopen = () => {
        console.log('WebSocket连接成功');
        state.value = 'connected';
        isConnected.value = true;
        canStart.value = false;
        
        // 发送角色切换消息
        changeCharacter(characterId);
        
        // 启动语音识别
        startSpeechRecognition();
      };
      
      ws.onmessage = (event) => {
        const data = JSON.parse(event.data);
        handleWebSocketMessage(data);
      };
      
      ws.onclose = (event) => {
        console.log('WebSocket连接关闭:', event.code, event.reason);
        state.value = 'disconnected';
        isConnected.value = false;
        canStart.value = true;
        stopSpeechRecognition();
      };
      
      ws.onerror = (error) => {
        console.error('WebSocket错误:', error);
        state.value = 'disconnected';
        isConnected.value = false;
      };
      
    } catch (error) {
      console.error('连接WebSocket失败:', error);
    }
  };
  
  // 切换角色
  const changeCharacter = (characterId) => {
    if (ws && ws.readyState === WebSocket.OPEN) {
      ws.send(JSON.stringify({
        type: 'character_change',
        character_id: characterId
      }));
    }
  };
  
  // 处理WebSocket消息
  const handleWebSocketMessage = (data) => {
    switch (data.type) {
      case 'character_changed':
        console.log('角色切换成功:', data.character_id);
        // 可以在这里更新UI显示当前角色
        break;
        
      case 'message_received':
        console.log('消息已接收:', data.content);
        break;
        
      default:
        console.log('收到消息:', data);
    }
  };
  
  // 启动语音识别
  const startSpeechRecognition = () => {
    if (!('webkitSpeechRecognition' in window) && !('SpeechRecognition' in window)) {
      alert('您的浏览器不支持语音识别');
      return;
    }
    
    const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;
    recognition = new SpeechRecognition();
    recognition.continuous = true;
    recognition.interimResults = true;
    recognition.lang = 'zh-CN';
    
    recognition.onstart = () => {
      state.value = 'listening';
      console.log('语音识别开始');
    };
    
    recognition.onresult = (event) => {
      let finalTranscript = '';
      let interimTranscript = '';
      
      for (let i = event.resultIndex; i < event.results.length; i++) {
        const transcript = event.results[i][0].transcript;
        if (event.results[i].isFinal) {
          finalTranscript += transcript;
        } else {
          interimTranscript += transcript;
        }
      }
      
      // 显示实时识别结果
      currentRecognition.value = interimTranscript;
      
      // 发送完整的识别结果
      if (finalTranscript.trim()) {
        sendUserMessage(finalTranscript.trim());
        currentRecognition.value = '';
      }
    };
    
    recognition.onerror = (event) => {
      console.error('语音识别错误:', event.error);
      if (event.error === 'not-allowed') {
        alert('请允许麦克风权限以使用语音功能');
      }
    };
    
    recognition.onend = () => {
      // 如果连接还在，重新开始识别
      if (isConnected.value && (state.value === 'listening' || state.value === 'connected')) {
        setTimeout(() => {
          try {
            recognition.start();
          } catch (e) {
            console.log('重启语音识别失败:', e);
          }
        }, 100);
      }
    };
    
    try {
      recognition.start();
    } catch (e) {
      console.error('启动语音识别失败:', e);
    }
  };
  
  // 发送用户消息（适配后端的 message 格式）
  const sendUserMessage = (text) => {
    // 添加用户消息到聊天记录
    messages.push({
      role: 'user',
      content: text,
      timestamp: new Date()
    });
    
    // 发送到后端（使用后端期望的格式）
    if (ws && ws.readyState === WebSocket.OPEN) {
      ws.send(JSON.stringify({
        type: 'message',
        content: text
      }));
      
      state.value = 'processing';
    }
  };
  
  // 模拟AI回复（由于后端代码中没看到AI回复的WebSocket发送逻辑，可能需要后端补充）
  const simulateAIResponse = (userText) => {
    // 这里可能需要等待后端发送AI回复
    // 目前先模拟一个响应
    setTimeout(() => {
      const aiMessage = {
        role: 'assistant',
        content: `我收到了您的消息："${userText}"，正在思考回复...`,
        timestamp: new Date(),
        isPlaying: false
      };
      messages.push(aiMessage);
      state.value = 'listening';
    }, 1000);
  };
  
  // 发送播放状态更新
  const updatePlaybackStatus = (status, queueSize = 0) => {
    if (ws && ws.readyState === WebSocket.OPEN) {
      ws.send(JSON.stringify({
        type: 'playback_status',
        status: status, // 'playing' 或 'stopped'
        queue_size: queueSize
      }));
    }
  };
  
  // 打断AI说话
  const interruptAI = () => {
    // 停止当前播放的音频
    if (currentAudio) {
      currentAudio.pause();
      currentAudio.currentTime = 0;
      currentAudio = null;
    }
    
    // 找到正在播放的AI消息并停止
    const playingMessage = messages.find(msg => msg.isPlaying);
    if (playingMessage) {
      playingMessage.isPlaying = false;
    }
    
    // 更新播放状态
    updatePlaybackStatus('stopped', 0);
    
    state.value = 'listening';
  };
  
  // 停止语音对话
  const stopVoiceChat = () => {
    // 关闭WebSocket连接
    if (ws) {
      ws.close();
      ws = null;
    }
    
    // 停止语音识别
    stopSpeechRecognition();
    
    // 停止音频播放
    if (currentAudio) {
      currentAudio.pause();
      currentAudio = null;
    }
    
    // 重置状态
    state.value = 'disconnected';
    isConnected.value = false;
    canStart.value = true;
    currentRecognition.value = '';
  };
  
  // 停止语音识别
  const stopSpeechRecognition = () => {
    if (recognition) {
      try {
        recognition.stop();
      } catch (e) {
        console.log('停止语音识别出错:', e);
      }
      recognition = null;
    }
  };
  
  return {
    // 状态
    state,
    isConnected,
    canStart,
    statusText,
    currentRecognition,
    messages,
    selectedCharacter,
    
    // 方法
    startVoiceChat,
    stopVoiceChat,
    interruptAI,
    changeCharacter,
    sendUserMessage,
    updatePlaybackStatus
  };
}