import React, { useState, useRef, useEffect } from 'react';
import { Button, Input, Card, Avatar, Space, Typography, Modal, Switch, Tooltip } from 'antd';
import { SendOutlined, AudioOutlined, SettingOutlined, UserOutlined, RobotOutlined, StopOutlined } from '@ant-design/icons';
import './AIChatWithAnimation.css';

const { TextArea } = Input;
const { Text } = Typography;

// 语音识别类型声明
type _SpeechRecognition = typeof window extends { webkitSpeechRecognition: infer T } ? T : unknown;
type _SpeechRecognitionEvent = typeof window extends { webkitSpeechRecognitionEvent: infer T } ? T : unknown;
declare global {
  interface Window {
    webkitSpeechRecognition: unknown;
    SpeechRecognition: unknown;
    webkitSpeechRecognitionEvent: unknown;
  }
}

interface Message {
  id: string;
  from: 'user' | 'ai';
  text: string;
  timestamp: Date;
  isSpeaking?: boolean;
}

interface AIChatWithAnimationProps {
  setAnimation: (animation: string) => void;
  lang?: 'zh' | 'en';
  className?: string;
}

const AIChatWithAnimation: React.FC<AIChatWithAnimationProps> = ({
  setAnimation,
  lang = 'zh',
  className = ''
}) => {
  const [messages, setMessages] = useState<Message[]>([]);
  const [inputText, setInputText] = useState('');
  const [isLoading, setIsLoading] = useState(false);
  const [isSpeaking, setIsSpeaking] = useState(false);
  const [showSettings, setShowSettings] = useState(false);
  const [autoSpeak, setAutoSpeak] = useState(true);
  const [speechRate, setSpeechRate] = useState(1);
  const [speechPitch, setSpeechPitch] = useState(1);
  const [speechVolume, setSpeechVolume] = useState(1);
  
  // 语音识别相关状态
  const [isRecording, setIsRecording] = useState(false);
  const [voiceText, setVoiceText] = useState('');
  const [recognitionError, setRecognitionError] = useState('');
  
  const messagesEndRef = useRef<HTMLDivElement>(null);
  const speechRef = useRef<SpeechSynthesisUtterance | null>(null);
  const currentSpeakingMessageRef = useRef<string | null>(null);
  const recognitionRef = useRef<_SpeechRecognition | null>(null);

  // AI聊天API地址
  const AI_CHAT_URL = 'http://localhost:8000/api/ai-chat';

  // 动作关键词映射
  const actionKeywords = [
    { key: '跳舞', action: 'dance' },
    { key: '舞蹈', action: 'dance' },
    { key: '待机', action: 'idle' },
    { key: '休息', action: 'idle' },
    { key: '哭', action: 'crying' },
    { key: '哭泣', action: 'crying' },
    { key: '伤心', action: 'crying' },
    { key: '倒立', action: 'breakDance' },
    { key: '翻跟头', action: 'breakDance' },
    { key: '特技', action: 'breakDance' },
    { key: '说话', action: 'speaking' },
    { key: '聊天', action: 'speaking' },
    { key: '对话', action: 'speaking' },
    { key: '交流', action: 'speaking' },
    { key: '谈谈', action: 'speaking' },
    { key: '笑', action: 'laughing' },
    { key: '开心', action: 'laughing' },
    { key: '高兴', action: 'laughing' },
    { key: '点头', action: 'nodding' },
    { key: '摇头', action: 'shaking' },
  ];

  // 检测文本中的动作关键词
  const detectActionFromText = (text: string): string | null => {
    for (const item of actionKeywords) {
      if (text.includes(item.key)) return item.action;
    }
    return null;
  };

  // 智能动作识别
  const smartActionRecognition = (text: string) => {
    const lowerText = text.toLowerCase();
    
    // 跳舞相关指令
    if (lowerText.includes('跳舞') || lowerText.includes('舞蹈') || lowerText.includes('表演') || 
        lowerText.includes('展示') || lowerText.includes('动起来') || lowerText.includes('跳个舞') ||
        lowerText.includes('来跳舞') || lowerText.includes('跳支舞') || lowerText.includes('舞动')) {
      return { action: 'dance', response: '好的，我来为您跳舞！' };
    }
    
    // 停止/待机相关指令
    if (lowerText.includes('停止') || lowerText.includes('别动') || lowerText.includes('安静') || 
        lowerText.includes('休息') || lowerText.includes('待机') || lowerText.includes('停下') ||
        lowerText.includes('不要动') || lowerText.includes('静止')) {
      return { action: 'idle', response: '好的，我安静待机。' };
    }
    
    // 哭泣相关指令
    if (lowerText.includes('哭泣') || lowerText.includes('哭') || lowerText.includes('伤心') ||
        lowerText.includes('难过') || lowerText.includes('悲伤')) {
      return { action: 'crying', response: '呜呜，我有点伤心...' };
    }
    
    // 倒立相关指令
    if (lowerText.includes('倒立') || lowerText.includes('翻跟头') || lowerText.includes('特技') ||
        lowerText.includes('杂技') || lowerText.includes('翻转')) {
      return { action: 'breakDance', response: '看我的倒立表演！' };
    }
    
    // 说话相关指令
    if (lowerText.includes('说话') || lowerText.includes('聊天') || lowerText.includes('对话') ||
        lowerText.includes('交流') || lowerText.includes('谈谈')) {
      return { action: 'speaking', response: '我在听您说话，请继续...' };
    }
    
    return null;
  };

  // 检查是否为动作指令
  const isActionCommand = (text: string): boolean => {
    return smartActionRecognition(text) !== null;
  };

  // 执行动作指令
  const executeActionCommand = (text: string) => {
    const result = smartActionRecognition(text);
    if (result) {
      console.log(`执行动作指令: ${text} -> ${result.action}`);
      setAnimation(result.action);
      return result;
    }
    return null;
  };

  // 语音识别功能
  const startVoiceRecognition = () => {
    const SpeechRecognition = 
      (window as { SpeechRecognition?: unknown; webkitSpeechRecognition?: unknown }).SpeechRecognition || 
      (window as { SpeechRecognition?: unknown; webkitSpeechRecognition?: unknown }).webkitSpeechRecognition;
    
    if (!SpeechRecognition) {
      setRecognitionError('当前浏览器不支持语音识别');
      return;
    }

    const recognition = new (SpeechRecognition as new () => _SpeechRecognition)() as _SpeechRecognition;
    recognition.lang = 'zh-CN';
    recognition.interimResults = false;
    recognition.maxAlternatives = 1;

    recognition.onresult = (event: _SpeechRecognitionEvent) => {
      console.log('语音识别结果:', event);
      if (event.results && event.results[0] && event.results[0][0]) {
        const text = event.results[0][0].transcript;
        console.log('识别文本:', text);
        setVoiceText(text);
        setInputText(text); // 将识别结果填入输入框
        setRecognitionError('');
      } else {
        setRecognitionError('未识别到语音内容');
      }
    };

    recognition.onerror = (event: { error?: string }) => {
      console.error('语音识别错误:', event);
      setRecognitionError(`语音识别错误: ${event.error || '未知错误'}`);
      setIsRecording(false);
    };

    recognition.onend = () => {
      console.log('语音识别结束');
      setIsRecording(false);
    };

    recognition.onaudiostart = () => console.log('音频捕获开始');
    recognition.onaudioend = () => console.log('音频捕获结束');
    recognition.onsoundstart = () => console.log('检测到声音');
    recognition.onsoundend = () => console.log('声音结束');
    recognition.onspeechstart = () => console.log('检测到语音');
    recognition.onspeechend = () => console.log('语音结束');

    recognitionRef.current = recognition;
    recognition.start();
    setIsRecording(true);
    setVoiceText('');
    setRecognitionError('');
    console.log('语音识别已启动');
  };

  const stopVoiceRecognition = () => {
    console.log('停止语音识别');
    if (recognitionRef.current && typeof recognitionRef.current.stop === 'function') {
      recognitionRef.current.stop();
    }
    setIsRecording(false);
  };

  // 发送消息到AI
  const sendMessage = async (text: string) => {
    if (!text.trim()) return;

    const userMessage: Message = {
      id: Date.now().toString(),
      from: 'user',
      text: text.trim(),
      timestamp: new Date()
    };

    setMessages(prev => [...prev, userMessage]);
    setInputText('');
    setVoiceText('');
    setIsLoading(true);

    // 检查是否为动作指令
    if (isActionCommand(text)) {
      const action = executeActionCommand(text);
      if (action) {
        const aiMessage: Message = {
          id: (Date.now() + 1).toString(),
          from: 'ai',
          text: action.response,
          timestamp: new Date()
        };
        setMessages(prev => [...prev, aiMessage]);
        setIsLoading(false);
        
        // 如果是说话动作，播放语音反馈
        if (action.action === 'speaking') {
          speakText(action.response);
        }
        return;
      }
    }

    try {
      const response = await fetch(AI_CHAT_URL, {
        method: 'POST',
        headers: { 'Content-Type': 'application/json' },
        body: JSON.stringify({ message: text.trim() }),
      });

      const data = await response.json();
      
      if (data.reply) {
        const aiMessage: Message = {
          id: (Date.now() + 1).toString(),
          from: 'ai',
          text: data.reply,
          timestamp: new Date()
        };
        
        setMessages(prev => [...prev, aiMessage]);
        
        // 检查AI回复内容中的动作关键词
        const detectedAction = detectActionFromText(data.reply);
        if (detectedAction) {
          setAnimation(detectedAction);
        }
        
        // 自动播放语音
        if (autoSpeak) {
          speakText(data.reply);
        }
      } else {
        const errorMessage: Message = {
          id: (Date.now() + 1).toString(),
          from: 'ai',
          text: data.error || '抱歉，我现在无法回答您的问题。',
          timestamp: new Date()
        };
        setMessages(prev => [...prev, errorMessage]);
      }
    } catch (error) {
      console.error('AI聊天错误:', error);
      const errorMessage: Message = {
        id: (Date.now() + 1).toString(),
        from: 'ai',
        text: '抱歉，网络连接出现问题，请稍后再试。',
        timestamp: new Date()
      };
      setMessages(prev => [...prev, errorMessage]);
    } finally {
      setIsLoading(false);
    }
  };

  // 语音合成函数
  const speakText = (text: string) => {
    if (!('speechSynthesis' in window)) {
      console.warn('当前浏览器不支持语音合成');
      return;
    }

    // 停止当前正在播放的语音
    if (speechRef.current) {
      window.speechSynthesis.cancel();
    }

    // 创建新的语音合成实例
    const utterance = new SpeechSynthesisUtterance(text);
    utterance.lang = 'zh-CN';
    utterance.rate = speechRate;
    utterance.pitch = speechPitch;
    utterance.volume = speechVolume;

    // 设置语音事件
    utterance.onstart = () => {
      console.log('开始播放语音:', text);
      setIsSpeaking(true);
      currentSpeakingMessageRef.current = text;
      setAnimation('speaking'); // 开始说话动画
      
      // 更新消息状态为正在说话
      setMessages(prev => prev.map(msg => 
        msg.text === text ? { ...msg, isSpeaking: true } : msg
      ));
    };

    utterance.onend = () => {
      console.log('语音播放结束');
      setIsSpeaking(false);
      currentSpeakingMessageRef.current = null;
      setAnimation('idle'); // 恢复待机动画
      
      // 更新消息状态为说话结束
      setMessages(prev => prev.map(msg => ({ ...msg, isSpeaking: false })));
    };

    utterance.onerror = (event) => {
      console.error('语音播放错误:', event);
      setIsSpeaking(false);
      currentSpeakingMessageRef.current = null;
      setAnimation('idle');
      
      // 更新消息状态为说话结束
      setMessages(prev => prev.map(msg => ({ ...msg, isSpeaking: false })));
    };

    speechRef.current = utterance;
    window.speechSynthesis.speak(utterance);
  };

  // 停止语音播放
  const stopSpeaking = () => {
    if ('speechSynthesis' in window) {
      window.speechSynthesis.cancel();
      setIsSpeaking(false);
      currentSpeakingMessageRef.current = null;
      setAnimation('idle');
      
      // 更新消息状态为说话结束
      setMessages(prev => prev.map(msg => ({ ...msg, isSpeaking: false })));
    }
  };

  // 处理发送按钮点击
  const handleSend = () => {
    sendMessage(inputText);
  };

  // 处理回车键发送
  const handleKeyPress = (e: React.KeyboardEvent) => {
    if (e.key === 'Enter' && !e.shiftKey) {
      e.preventDefault();
      handleSend();
    }
  };

  // 处理语音识别按钮点击
  const handleVoiceButtonClick = () => {
    if (isRecording) {
      stopVoiceRecognition();
    } else {
      startVoiceRecognition();
    }
  };

  // 自动滚动到底部
  useEffect(() => {
    messagesEndRef.current?.scrollIntoView({ behavior: 'smooth' });
  }, [messages]);

  // 组件卸载时清理语音
  useEffect(() => {
    return () => {
      if ('speechSynthesis' in window) {
        window.speechSynthesis.cancel();
      }
      if (recognitionRef.current && typeof recognitionRef.current.stop === 'function') {
        recognitionRef.current.stop();
      }
    };
  }, []);

  return (
    <div className={`ai-chat-container ${className}`}>
      <Card 
        title={
          <Space>
            <RobotOutlined style={{ color: '#1890ff' }} />
            <span>{lang === 'zh' ? 'AI智能助手' : 'AI Assistant'}</span>
            {isSpeaking && (
              <div className="speaking-indicator">
                <div className="speaking-dots">
                  <span></span>
                  <span></span>
                  <span></span>
                </div>
              </div>
            )}
          </Space>
        }
        extra={
          <Space>
            <Button
              icon={<SettingOutlined />}
              size="small"
              onClick={() => setShowSettings(true)}
              title={lang === 'zh' ? '设置' : 'Settings'}
            />
            {isSpeaking && (
              <Button
                size="small"
                danger
                onClick={stopSpeaking}
                title={lang === 'zh' ? '停止语音' : 'Stop Speaking'}
              >
                {lang === 'zh' ? '停止' : 'Stop'}
              </Button>
            )}
          </Space>
        }
        className="chat-card"
      >
        {/* 消息列表 */}
        <div className="messages-container">
          {messages.length === 0 ? (
            <div className="empty-state">
              <RobotOutlined style={{ fontSize: 48, color: '#d9d9d9' }} />
              <Text type="secondary">
                {lang === 'zh' ? '开始与AI助手对话吧！' : 'Start chatting with AI assistant!'}
              </Text>
            </div>
          ) : (
            messages.map((message) => (
              <div
                key={message.id}
                className={`message ${message.from} ${message.isSpeaking ? 'speaking' : ''}`}
              >
                <Avatar
                  icon={message.from === 'user' ? <UserOutlined /> : <RobotOutlined />}
                  className={`message-avatar ${message.from}`}
                />
                <div className="message-content">
                  <div className="message-text">{message.text}</div>
                  <div className="message-time">
                    {message.timestamp.toLocaleTimeString()}
                  </div>
                </div>
              </div>
            ))
          )}
          {isLoading && (
            <div className="message ai">
              <Avatar icon={<RobotOutlined />} className="message-avatar ai" />
              <div className="message-content">
                <div className="loading-indicator">
                  <div className="loading-dots">
                    <span></span>
                    <span></span>
                    <span></span>
                  </div>
                </div>
              </div>
            </div>
          )}
          <div ref={messagesEndRef} />
        </div>

        {/* 语音识别状态显示 */}
        {isRecording && (
          <div className="voice-recognition-status">
            <div className="recording-indicator">
              <div className="recording-dots">
                <span></span>
                <span></span>
                <span></span>
              </div>
            </div>
            <Text type="secondary">
              {lang === 'zh' ? '正在听您说话...' : 'Listening...'}
            </Text>
          </div>
        )}

        {voiceText && (
          <div className="voice-recognition-result">
            <Text type="secondary">
              {lang === 'zh' ? '识别结果：' : 'Recognition result: '}
              {voiceText}
            </Text>
          </div>
        )}

        {recognitionError && (
          <div className="voice-recognition-error">
            <Text type="danger">{recognitionError}</Text>
          </div>
        )}

        {/* 输入区域 */}
        <div className="input-container">
          <TextArea
            value={inputText}
            onChange={(e) => setInputText(e.target.value)}
            onKeyPress={handleKeyPress}
            placeholder={lang === 'zh' ? '输入您的问题或点击麦克风进行语音输入...' : 'Type your question or click microphone for voice input...'}
            autoSize={{ minRows: 1, maxRows: 4 }}
            disabled={isLoading || isRecording}
            className="message-input"
          />
          <div className="input-buttons">
            <Tooltip title={isRecording ? (lang === 'zh' ? '停止录音' : 'Stop Recording') : (lang === 'zh' ? '语音输入' : 'Voice Input')}>
              <Button
                type={isRecording ? 'primary' : 'default'}
                danger={isRecording}
                icon={isRecording ? <StopOutlined /> : <AudioOutlined />}
                onClick={handleVoiceButtonClick}
                loading={isRecording}
                disabled={isLoading}
                className="voice-button"
              />
            </Tooltip>
            <Button
              type="primary"
              icon={<SendOutlined />}
              onClick={handleSend}
              loading={isLoading}
              disabled={!inputText.trim() || isRecording}
              className="send-button"
            >
              {lang === 'zh' ? '发送' : 'Send'}
            </Button>
          </div>
        </div>
      </Card>

      {/* 设置弹窗 */}
      <Modal
        title={lang === 'zh' ? '语音设置' : 'Voice Settings'}
        open={showSettings}
        onCancel={() => setShowSettings(false)}
        footer={null}
        width={400}
      >
        <div className="settings-content">
          <div className="setting-item">
            <Text>{lang === 'zh' ? '自动播放语音' : 'Auto Play Voice'}</Text>
            <Switch checked={autoSpeak} onChange={setAutoSpeak} />
          </div>
          
          <div className="setting-item">
            <Text>{lang === 'zh' ? '语速' : 'Speech Rate'}</Text>
            <input
              type="range"
              min="0.5"
              max="2"
              step="0.1"
              value={speechRate}
              onChange={(e) => setSpeechRate(parseFloat(e.target.value))}
              className="range-slider"
            />
            <Text type="secondary">{speechRate.toFixed(1)}x</Text>
          </div>
          
          <div className="setting-item">
            <Text>{lang === 'zh' ? '音调' : 'Pitch'}</Text>
            <input
              type="range"
              min="0.5"
              max="2"
              step="0.1"
              value={speechPitch}
              onChange={(e) => setSpeechPitch(parseFloat(e.target.value))}
              className="range-slider"
            />
            <Text type="secondary">{speechPitch.toFixed(1)}x</Text>
          </div>
          
          <div className="setting-item">
            <Text>{lang === 'zh' ? '音量' : 'Volume'}</Text>
            <input
              type="range"
              min="0"
              max="1"
              step="0.1"
              value={speechVolume}
              onChange={(e) => setSpeechVolume(parseFloat(e.target.value))}
              className="range-slider"
            />
            <Text type="secondary">{Math.round(speechVolume * 100)}%</Text>
          </div>
        </div>
      </Modal>
    </div>
  );
};

export default AIChatWithAnimation; 