import React, { useState, useCallback } from 'react';
import SpeechRecognition from './SpeechRecognition';
import useTextToSpeech from './TextToSpeech';

interface VoiceChatManagerProps {
  onStatusChange: (status: 'idle' | 'listening' | 'thinking' | 'responding' | 'synthesizing' | 'playing' | 'error') => void;
}

const VoiceChatManager: React.FC<VoiceChatManagerProps> = ({ onStatusChange }) => {
  const [conversationHistory, setConversationHistory] = useState<Array<{
    type: 'user' | 'assistant';
    content: string;
    timestamp: Date;
  }>>([]);
  
  const [currentResponse, setCurrentResponse] = useState('');
  const [isProcessing, setIsProcessing] = useState(false);
  const abortControllerRef = React.useRef<AbortController | null>(null);
  const requestSeqRef = React.useRef<number>(0);

  // 分句即时播：维护队列与余留文本（放在 tts 初始化之后避免 TDZ）
  const tts = useTextToSpeech({
    onStatusChange: (status) => {
      if (status === 'synthesizing') {
        onStatusChange('synthesizing');
      } else if (status === 'playing') {
        onStatusChange('playing');
      } else if (status === 'error') {
        onStatusChange('error');
      }
    }
  });

  const ttsQueueRef = React.useRef<string[]>([]);
  const isTtsPlayingRef = React.useRef<boolean>(false);
  const remainderRef = React.useRef<string>('');

  const enqueueTts = React.useCallback(async (text: string) => {
    const content = (text || '').trim();
    if (!content) return;
    ttsQueueRef.current.push(content);
    if (isTtsPlayingRef.current) return;
    isTtsPlayingRef.current = true;
    while (ttsQueueRef.current.length > 0) {
      const next = ttsQueueRef.current.shift() as string;
      try { await tts.speakText(next); } catch { /* ignore */ }
    }
    isTtsPlayingRef.current = false;
  }, [tts]);

  const processStreamDelta = React.useCallback((deltaText: string) => {
    if (!deltaText) return;
    remainderRef.current += deltaText;
    // 以中文/英文句末符与换行作为分句边界
    const splitter = /([。！？!?\n])/g;
    const parts: string[] = [];
    let buffer = '';
    for (const chunk of remainderRef.current.split(splitter)) {
      if (!chunk) continue;
      buffer += chunk;
      if (/[。！？!?\n]$/.test(buffer)) {
        parts.push(buffer);
        buffer = '';
      }
    }
    remainderRef.current = buffer; // 保留未成句尾巴
    parts.forEach(enqueueTts);
  }, [enqueueTts]);

  // tts 已在上方初始化

  // DeepSeek API 配置
  const config = {
    apiKey: '671aa542ee4495e7',
    appId: '3482573dee44622c',
    appKey: 'JYGcbcrbm8vwX50bNK1D7mESFLlmJc5A',
    model: 'deepseek-r1-250120',
    baseUrl: 'https://openapi.youdao.com/llmgateway/api/v1/chat/completions'
  };

  // 发送消息到DeepSeek（流式）
  const sendMessageToDeepSeek = async (userInput: string): Promise<string> => {
    try {
      setIsProcessing(true);
      onStatusChange('thinking');

      const messages = [
        {
          role: 'system',
          content: '你是一个友好的AI助手，请用非常简短、直接的中文回答（尽量不超过两句话）。'
        },
        {
          role: 'user',
          content: userInput
        }
      ];

      const requestBody = {
        model: config.model,
        messages: messages,
        stream: true,
        max_tokens: 256,
        temperature: 0.2,
        top_p: 0.8
      };

      // 调试时可打开
      // console.log('发送请求到DeepSeek(流式):', requestBody);

      // 为本次请求建立可取消控制器
      const controller = new AbortController();
      abortControllerRef.current = controller;
      const mySeq = ++requestSeqRef.current;

      const response = await fetch(config.baseUrl, {
        method: 'POST',
        headers: {
          'Content-Type': 'application/json',
          'Authorization': `Bearer ${config.apiKey}`
        },
        body: JSON.stringify(requestBody),
        signal: controller.signal
      });

      if (!response.ok || !response.body) {
        let errorMsg = response.statusText;
        try { errorMsg = (await response.json()).msg || errorMsg } catch { /* ignore */ }
        throw new Error(`API请求失败: ${response.status} - ${errorMsg}`);
      }

      onStatusChange('responding');
      const reader = response.body.getReader();
      const decoder = new TextDecoder('utf-8');
      let fullText = '';

      while (true) {
        const { value, done } = await reader.read();
        if (done) break;
        // 若在读取过程中被新问题打断，则停止处理旧流
        if (mySeq !== requestSeqRef.current) break;
        const chunk = decoder.decode(value, { stream: true });
        const lines = chunk.split('\n');
        for (const line of lines) {
          const trimmed = line.trim();
          if (!trimmed) continue;
          if (trimmed === 'data:[DONE]') continue;
          if (trimmed.startsWith('data:')) {
            const dataStr = trimmed.slice(5).trim();
            try {
              const json = JSON.parse(dataStr);
              const delta = json.choices?.[0]?.delta;
              const part = delta?.content || '';
              if (part) {
                fullText += part;
                if (mySeq === requestSeqRef.current) setCurrentResponse(fullText);
                // 分句即时播：只要增量来了就尝试切句并入队
                processStreamDelta(part);
              }
              // 也可能分段给出 reasoning_content，这里忽略展示
            } catch { /* ignore malformed line */ }
          }
        }
      }

      fullText = fullText.trim();
      if (fullText.length === 0) throw new Error('流式返回为空');

      // 结束后写入历史
      setConversationHistory(prev => [...prev, {
        type: 'assistant',
        content: fullText,
        timestamp: new Date()
      }]);

      // 将剩余未成句文本（如果有）也入队（仍只在最新请求时处理）
      if (mySeq === requestSeqRef.current && remainderRef.current.trim()) {
        await enqueueTts(remainderRef.current);
        remainderRef.current = '';
      }

      return fullText;

    } catch (err) {
      console.error('DeepSeek API调用失败:', err);
      onStatusChange('error');
      throw err;
    } finally {
      setIsProcessing(false);
    }
  };

  // 处理语音识别结果
  const handleSpeechResult = useCallback(async (text: string) => {
    console.log('收到语音识别结果:', text);
    
    // 中断上一轮：停止TTS、取消旧流
    try { tts.stopPlayback(); } catch { /* ignore */ }
    if (abortControllerRef.current) {
      try { abortControllerRef.current.abort(); } catch { /* ignore */ }
      abortControllerRef.current = null;
    }

    // 添加到对话历史
    setConversationHistory(prev => [...prev, {
      type: 'user',
      content: text,
      timestamp: new Date()
    }]);

    // 清空当前回复
    setCurrentResponse('');

    try {
      // 调用DeepSeek API
      await sendMessageToDeepSeek(text);
    } catch (error) {
      console.error('DeepSeek API调用失败:', error);
      onStatusChange('error');
    }
  }, [onStatusChange, sendMessageToDeepSeek]);

  // 立即结束当前AI回复（停止流 + 停止TTS）
  const stopCurrentAnswer = useCallback(() => {
    try { tts.stopPlayback(); } catch { /* ignore */ }
    if (abortControllerRef.current) {
      try { abortControllerRef.current.abort(); } catch { /* ignore */ }
      abortControllerRef.current = null;
    }
    setIsProcessing(false);
    onStatusChange('idle');
  }, [onStatusChange, tts]);

  // 处理语音识别状态变化
  const handleSpeechStatusChange = useCallback((status: 'idle' | 'listening' | 'processing' | 'error') => {
    if (status === 'listening') {
      onStatusChange('listening');
    } else if (status === 'idle' && !isProcessing) {
      onStatusChange('idle');
    } else if (status === 'error') {
      onStatusChange('error');
    }
  }, [onStatusChange, isProcessing]);

  return (
    <div style={{ position: 'relative' }}>
      {/* 语音识别组件 */}
      {/* 将麦克风按钮以内联模式渲染，紧贴页面已有控件（例如下拉框） */}
      <div style={{ position: 'absolute', left: 8, bottom: 8, display: 'flex', gap: 8, alignItems: 'center', zIndex: 10000 }}>
        <SpeechRecognition 
          onResult={handleSpeechResult}
          onStatusChange={handleSpeechStatusChange}
          inline
        />
      </div>
      
      {/* 对话历史显示 */}
      {conversationHistory.length > 0 && (
        <div style={{
          position: 'absolute',
          bottom: '100px',
          left: '20px',
          right: '20px',
          maxHeight: '200px',
          overflowY: 'auto',
          background: 'rgba(0,0,0,0.8)',
          borderRadius: '8px',
          padding: '10px',
          zIndex: 999
        }}>
          {conversationHistory.map((message, index) => (
            <div key={index} style={{
              marginBottom: '8px',
              padding: '5px 10px',
              borderRadius: '4px',
              background: message.type === 'user' ? 'rgba(0,123,255,0.2)' : 'rgba(40,167,69,0.2)',
              color: 'white',
              fontSize: '12px',
              wordWrap: 'break-word'
            }}>
              <div style={{ 
                fontWeight: 'bold', 
                marginBottom: '2px',
                fontSize: '10px',
                opacity: 0.7
              }}>
                {message.type === 'user' ? '👤 您' : '🤖 AI'} - {message.timestamp.toLocaleTimeString()}
              </div>
              <div>{message.content}</div>
            </div>
          ))}
          
          {/* 当前正在生成的回复 */}
          {isProcessing && currentResponse && (
            <div style={{
              marginBottom: '8px',
              padding: '5px 10px',
              borderRadius: '4px',
              background: 'rgba(255,193,7,0.2)',
              color: 'white',
              fontSize: '12px',
              wordWrap: 'break-word'
            }}>
              <div style={{ 
                fontWeight: 'bold', 
                marginBottom: '2px',
                fontSize: '10px',
                opacity: 0.7
              }}>
                🤖 AI - 正在回复...
              </div>
              <div>{currentResponse}</div>
            </div>
          )}
        </div>
      )}

      {/* 结束当前AI回复按钮 */}
      <button
        onClick={stopCurrentAnswer}
        style={{
          position: 'absolute',
          right: 20,
          bottom: 20,
          padding: '8px 12px',
          borderRadius: 6,
          border: '1px solid #888',
          background: '#222',
          color: '#fff',
          cursor: 'pointer',
          zIndex: 1000
        }}
      >结束回复</button>
    </div>
  );
};

export default VoiceChatManager;
