import React, { useState, useEffect, useRef, useCallback } from 'react';
import {
  FaMicrophone,
  FaMicrophoneSlash,
  FaDownload,
  FaTrash,
} from 'react-icons/fa';
import './MeetingTranscript.css';

interface TranscriptEntry {
  id: string;
  speaker: string;
  text: string;
  timestamp: Date;
  confidence: number;
}

interface MeetingTranscriptProps {
  roomId: string;
  userId: string;
  userName?: string;
  onToggleRecording: (recording: boolean) => void;
}

const MeetingTranscript: React.FC<MeetingTranscriptProps> = ({
  roomId,
  userId,
  userName,
  onToggleRecording,
}) => {
  const [transcripts, setTranscripts] = useState<TranscriptEntry[]>([]);
  const [isListening, setIsListening] = useState<boolean>(false);
  const [currentText, setCurrentText] = useState<string>('');
  const [isSupported, setIsSupported] = useState<boolean>(false);
  const [showTranscript, setShowTranscript] = useState<boolean>(false);
  const [audioContext, setAudioContext] = useState<AudioContext | null>(null);

  const transcriptContainerRef = useRef<HTMLDivElement>(null);
  const recognitionRef = useRef<SpeechRecognition | null>(null);
  const audioChunksRef = useRef<Blob[]>([]);
  const mediaRecorderRef = useRef<MediaRecorder | null>(null);
  const streamRef = useRef<MediaStream | null>(null);

  // 检查浏览器支持
  useEffect(() => {
    const SpeechRecognition =
      window.SpeechRecognition || window.webkitSpeechRecognition;
    if (SpeechRecognition) {
      setIsSupported(true);
      const recognitionInstance = new SpeechRecognition();
      recognitionInstance.continuous = true;
      recognitionInstance.interimResults = true;
      recognitionInstance.lang = 'zh-CN'; // 设置为中文
      recognitionInstance.maxAlternatives = 1;

      recognitionRef.current = recognitionInstance;
    } else {
      setIsSupported(false);
      console.warn('浏览器不支持语音识别功能');
    }
  }, []);

  // 初始化音频录制
  const initAudioRecording = useCallback(async () => {
    try {
      const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
      streamRef.current = stream;

      const mediaRecorder = new MediaRecorder(stream, {
        mimeType: 'audio/webm;codecs=opus',
      });

      mediaRecorderRef.current = mediaRecorder;

      mediaRecorder.ondataavailable = event => {
        if (event.data.size > 0) {
          audioChunksRef.current.push(event.data);
        }
      };

      mediaRecorder.onstop = () => {
        const audioBlob = new Blob(audioChunksRef.current, {
          type: 'audio/webm',
        });
        audioChunksRef.current = [];

        // 音频录制完成，可以在这里添加音频处理逻辑
        console.log('音频录制完成，大小:', audioBlob.size);
      };
    } catch (error) {
      console.error('初始化音频录制失败:', error);
    }
  }, [audioContext]);

  // 初始化AudioContext
  useEffect(() => {
    const ctx = new (window.AudioContext || window.webkitAudioContext)();
    setAudioContext(ctx);

    return () => {
      ctx.close();
    };
  }, []);

  // 开始/停止录音
  const toggleRecording = useCallback(async () => {
    if (!isSupported || !recognitionRef.current) {
      alert('浏览器不支持语音识别功能，请使用Chrome、Edge或Safari浏览器');
      return;
    }

    if (isListening) {
      // 停止录音
      try {
        recognitionRef.current.stop();
        if (
          mediaRecorderRef.current &&
          mediaRecorderRef.current.state === 'recording'
        ) {
          mediaRecorderRef.current.stop();
        }
        setIsListening(false);
        onToggleRecording(false);
        console.log('会议纪要录音已停止');
      } catch (error) {
        console.error('停止录音失败:', error);
      }
    } else {
      // 开始录音
      try {
        await initAudioRecording();

        if (mediaRecorderRef.current) {
          mediaRecorderRef.current.start(1000); // 每秒收集一次数据
        }

        recognitionRef.current.start();
        setIsListening(true);
        onToggleRecording(true);
        console.log('会议纪要录音已开始');
      } catch (error) {
        console.error('开始录音失败:', error);
        alert(
          '无法访问麦克风，请检查权限设置。\n\n提示：\n1. 确保浏览器允许访问麦克风\n2. 检查系统麦克风权限\n3. 尝试刷新页面重新授权'
        );
      }
    }
  }, [isListening, isSupported, initAudioRecording, onToggleRecording]);

  // 设置语音识别事件处理
  useEffect(() => {
    if (!recognitionRef.current) return;

    const recognition = recognitionRef.current;

    recognition.onstart = () => {
      console.log('语音识别开始');
    };

    recognition.onresult = event => {
      let finalTranscript = '';
      let interimTranscript = '';

      for (let i = event.resultIndex; i < event.results.length; i++) {
        const transcript = event.results[i][0].transcript;
        const confidence = event.results[i][0].confidence;

        if (event.results[i].isFinal) {
          finalTranscript += transcript;

          // 添加最终结果到会议纪要
          if (finalTranscript.trim()) {
            const newEntry: TranscriptEntry = {
              id: `transcript_${Date.now()}_${Math.random()}`,
              speaker: userName || userId,
              text: finalTranscript.trim(),
              timestamp: new Date(),
              confidence: confidence,
            };

            setTranscripts(prev => [...prev, newEntry]);
          }
        } else {
          interimTranscript += transcript;
        }
      }

      setCurrentText(interimTranscript);
    };

    recognition.onerror = event => {
      console.error('语音识别错误:', event.error);
      if (event.error === 'not-allowed') {
        alert('麦克风权限被拒绝，请允许访问麦克风');
      }
    };

    recognition.onend = () => {
      console.log('语音识别结束');
      if (isListening) {
        // 如果还在监听状态，重新开始识别
        setTimeout(() => {
          if (isListening && recognitionRef.current) {
            recognitionRef.current.start();
          }
        }, 100);
      }
    };

    return () => {
      if (recognition) {
        recognition.onstart = null;
        recognition.onresult = null;
        recognition.onerror = null;
        recognition.onend = null;
      }
    };
  }, [isListening, userId, userName]);

  // 清理资源
  useEffect(() => {
    return () => {
      if (streamRef.current) {
        streamRef.current.getTracks().forEach(track => track.stop());
      }
      if (recognitionRef.current) {
        recognitionRef.current.stop();
      }
    };
  }, []);

  // 滚动到底部
  useEffect(() => {
    if (transcriptContainerRef.current) {
      transcriptContainerRef.current.scrollTop =
        transcriptContainerRef.current.scrollHeight;
    }
  }, [transcripts]);

  // 导出会议纪要
  const exportTranscript = useCallback(() => {
    const content = transcripts
      .map(entry => {
        const time = entry.timestamp.toLocaleTimeString('zh-CN');
        return `[${time}] ${entry.speaker}: ${entry.text}`;
      })
      .join('\n');

    const blob = new Blob([content], { type: 'text/plain;charset=utf-8' });
    const url = URL.createObjectURL(blob);
    const a = document.createElement('a');
    a.href = url;
    a.download = `会议纪要_${roomId}_${new Date().toLocaleDateString('zh-CN')}.txt`;
    document.body.appendChild(a);
    a.click();
    document.body.removeChild(a);
    URL.revokeObjectURL(url);
  }, [transcripts, roomId]);

  // 清空会议纪要
  const clearTranscript = useCallback(() => {
    if (window.confirm('确定要清空所有会议纪要吗？')) {
      setTranscripts([]);
      setCurrentText('');
    }
  }, []);

  if (!isSupported) {
    return (
      <div className="transcript-unsupported">
        <p>您的浏览器不支持语音识别功能</p>
        <p>请使用Chrome、Edge或Safari浏览器</p>
      </div>
    );
  }

  return (
    <div className="meeting-transcript">
      <div className="transcript-header">
        <h3>会议纪要</h3>
        <div className="transcript-controls">
          <button
            onClick={toggleRecording}
            className={`record-button ${isListening ? 'recording' : ''}`}
            title={isListening ? '停止录音' : '开始录音'}
          >
            {isListening ? <FaMicrophoneSlash /> : <FaMicrophone />}
          </button>

          <button
            onClick={() => setShowTranscript(!showTranscript)}
            className="toggle-button"
            title={showTranscript ? '隐藏纪要' : '显示纪要'}
          >
            纪要
          </button>

          {transcripts.length > 0 && (
            <>
              <button onClick={exportTranscript} title="导出纪要">
                <FaDownload />
              </button>
              <button onClick={clearTranscript} title="清空纪要">
                <FaTrash />
              </button>
            </>
          )}
        </div>
      </div>

      {showTranscript && (
        <div className="transcript-content">
          <div className="transcript-status">
            <span className={`status-indicator ${isListening ? 'active' : ''}`}>
              {isListening ? '正在录音...' : '录音已停止'}
            </span>
            {currentText && (
              <div className="interim-text">正在识别: {currentText}</div>
            )}
          </div>

          <div className="transcript-list" ref={transcriptContainerRef}>
            {transcripts.length === 0 ? (
              <div className="empty-transcript">
                <p>暂无会议纪要</p>
                <p>点击麦克风按钮开始录音</p>
              </div>
            ) : (
              transcripts.map(entry => (
                <div key={entry.id} className="transcript-entry">
                  <div className="transcript-meta">
                    <span className="speaker">{entry.speaker}</span>
                    <span className="timestamp">
                      {entry.timestamp.toLocaleTimeString('zh-CN')}
                    </span>
                    <span className="confidence">
                      置信度: {Math.round(entry.confidence * 100)}%
                    </span>
                  </div>
                  <div className="transcript-text">{entry.text}</div>
                </div>
              ))
            )}
          </div>
        </div>
      )}
    </div>
  );
};

export default MeetingTranscript;
