import React, { useState, useEffect, useRef } from 'react';
import SpeechRecognition, { useSpeechRecognition } from 'react-speech-recognition';
import { sendVoiceForRecognition, mockSpeechRecognition } from '../api/llmService';
import styles from '../styles/Home.module.css';
import { HistoryItem } from './HistoryRecord';

interface VoiceInputProps {
  onRecognitionResult: (text: string) => void;
  useMockApi?: boolean;
  selectedHistory?: HistoryItem | null;
}

const VoiceInput: React.FC<VoiceInputProps> = ({ 
  onRecognitionResult,
  useMockApi = true, // 默认使用模拟API，实际开发中改为false
  selectedHistory = null
}) => {
  const [isRecording, setIsRecording] = useState(false);
  const [isProcessing, setIsProcessing] = useState(false);
  const [mediaRecorder, setMediaRecorder] = useState<MediaRecorder | null>(null);
  const [audioChunks, setAudioChunks] = useState<Blob[]>([]);
  const [audioUrl, setAudioUrl] = useState<string | null>(null);
  const [recordingTime, setRecordingTime] = useState(0);
  const audioRef = useRef<HTMLAudioElement>(null);
  const timerRef = useRef<number | null>(null);
  
  // 使用Web Speech API进行浏览器内语音识别
  const {
    transcript,
    listening,
    resetTranscript,
    browserSupportsSpeechRecognition
  } = useSpeechRecognition();

  // 当浏览器内语音识别结果变化时，传递给父组件
  useEffect(() => {
    if (transcript && !listening && !useMockApi) {
      onRecognitionResult(transcript);
    }
  }, [transcript, listening, onRecognitionResult, useMockApi]);

  // 清理计时器
  useEffect(() => {
    return () => {
      if (timerRef.current) {
        window.clearInterval(timerRef.current);
      }
    };
  }, []);

  // 处理选中的历史记录
  useEffect(() => {
    if (selectedHistory && selectedHistory.type === 'voice' && selectedHistory.userInput) {
      onRecognitionResult(selectedHistory.userInput);
    }
  }, [selectedHistory, onRecognitionResult]);

  // 保存历史记录
  const saveToHistory = (text: string, audioUrl: string | null = null) => {
    try {
      // 获取现有历史记录
      const savedHistory = localStorage.getItem('llm-voice-history');
      let historyItems: HistoryItem[] = [];
      
      if (savedHistory) {
        historyItems = JSON.parse(savedHistory);
      }
      
      // 创建新的历史记录项
      const newHistoryItem: HistoryItem = {
        id: `voice-${Date.now()}-${Math.random().toString(36).substr(2, 9)}`,
        timestamp: Date.now(),
        type: 'voice',
        userInput: text,
        audioUrl: audioUrl || undefined
      };
      
      // 添加到历史记录
      historyItems.push(newHistoryItem);
      
      // 如果历史记录太长，删除最旧的
      if (historyItems.length > 50) {
        historyItems = historyItems.slice(-50);
      }
      
      // 保存到localStorage
      localStorage.setItem('llm-voice-history', JSON.stringify(historyItems));
    } catch (error) {
      console.error('保存历史记录失败:', error);
    }
  };

  // 开始录音
  const startRecording = async () => {
    resetTranscript();
    setAudioChunks([]);
    setAudioUrl(null);
    setRecordingTime(0);
    
    if (!useMockApi && browserSupportsSpeechRecognition) {
      // 使用Web Speech API
      SpeechRecognition.startListening({ continuous: true, language: 'zh-CN' });
      setIsRecording(true);
      return;
    }
    
    try {
      // 使用MediaRecorder API录制音频
      const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
      
      // 使用更低的采样率和比特率
      const options = { 
        mimeType: 'audio/webm;codecs=opus',
        audioBitsPerSecond: 128000
      };
      
      const recorder = new MediaRecorder(stream, options);
      
      // 设置数据可用事件处理
      recorder.ondataavailable = (event) => {
        if (event.data.size > 0) {
          setAudioChunks((prev) => [...prev, event.data]);
        }
      };
      
      // 每秒请求数据
      recorder.start(1000);
      setMediaRecorder(recorder);
      setIsRecording(true);
      
      // 开始计时
      timerRef.current = window.setInterval(() => {
        setRecordingTime(prev => prev + 1);
      }, 1000);
      
    } catch (error) {
      console.error('无法访问麦克风:', error);
      alert('无法访问麦克风，请检查浏览器权限设置。');
    }
  };

  // 停止录音
  const stopRecording = () => {
    // 停止计时
    if (timerRef.current) {
      window.clearInterval(timerRef.current);
      timerRef.current = null;
    }
    
    if (!useMockApi && browserSupportsSpeechRecognition) {
      // 停止Web Speech API
      SpeechRecognition.stopListening();
      setIsRecording(false);
      return;
    }
    
    if (mediaRecorder && isRecording) {
      // 确保录音时间至少为1秒
      if (recordingTime < 1) {
        setTimeout(() => {
          if (mediaRecorder.state === 'recording') {
            mediaRecorder.stop();
          }
        }, 1000);
      } else {
        mediaRecorder.stop();
      }
      
      setIsRecording(false);
      setIsProcessing(true);
      
      // 等待录音数据收集完成
      mediaRecorder.onstop = async () => {
        try {
          console.log('录音数据块数量:', audioChunks.length);
          console.log('录音时长:', recordingTime, '秒');
          
          // 确保有数据
          if (audioChunks.length === 0) {
            throw new Error('没有录制到音频数据');
          }
          
          // 创建音频Blob
          const audioBlob = new Blob(audioChunks, { type: 'audio/webm' });
          console.log('音频Blob大小:', audioBlob.size, '字节');
          
          // 创建临时URL用于播放
          const url = URL.createObjectURL(audioBlob);
          setAudioUrl(url);
          
          // 发送到API或使用模拟API
          const recognizedText = useMockApi 
            ? await mockSpeechRecognition(audioBlob)
            : await sendVoiceForRecognition(audioBlob);
          
          // 保存到历史记录
          saveToHistory(recognizedText, url);
          
          onRecognitionResult(recognizedText);
        } catch (error) {
          console.error('语音识别处理失败:', error);
          alert('语音识别处理失败，请重试。');
        } finally {
          setIsProcessing(false);
        }
      };
      
      // 关闭麦克风
      mediaRecorder.stream.getTracks().forEach(track => track.stop());
    }
  };

  // 下载录音文件
  const downloadAudio = () => {
    if (audioUrl) {
      const a = document.createElement('a');
      a.href = audioUrl;
      a.download = `recording-${new Date().getTime()}.webm`;
      a.click();
    }
  };

  // 如果浏览器不支持语音识别，显示提示
  if (!browserSupportsSpeechRecognition && !useMockApi) {
    return <div>您的浏览器不支持语音识别功能。请使用Chrome浏览器。</div>;
  }

  return (
    <div className={styles.voiceInputSection}>
      <h2 className={styles.sectionTitle}>语音输入</h2>
      
      <div className={styles.statusIndicator}>
        <div className={`${styles.statusDot} ${(isRecording || listening) ? styles.active : ''}`}></div>
        <span>
          {isRecording || listening ? `正在录音... ${recordingTime}秒` : 
           isProcessing ? '正在处理...' : '准备就绪'}
        </span>
      </div>
      
      <div className={styles.controlsContainer}>
        <button 
          className={`btn ${isRecording ? 'btn-secondary' : 'btn-primary'}`}
          onClick={isRecording ? stopRecording : startRecording}
          disabled={isProcessing}
        >
          {isRecording ? '停止录音' : '开始录音'}
        </button>
        
        {(transcript || isRecording || listening) && (
          <button 
            className="btn"
            onClick={resetTranscript}
            disabled={isProcessing || isRecording || listening}
          >
            清除
          </button>
        )}
      </div>
      
      {/* 显示录音文件 */}
      {audioUrl && (
        <div className={styles.audioPlayer}>
          <h3 className="mb-4">录音文件:</h3>
          <audio ref={audioRef} controls>
            <source src={audioUrl} type="audio/webm" />
            您的浏览器不支持音频播放。
          </audio>
          <button 
            className="btn btn-primary mt-4"
            onClick={downloadAudio}
          >
            下载录音文件
          </button>
          <p className="mt-4">
            <small>
              录音文件存储在浏览器内存中，作为Blob URL。您可以下载保存到本地。
              <br />
              录音时长: {recordingTime} 秒
            </small>
          </p>
        </div>
      )}
      
      {transcript && !useMockApi && (
        <div className={styles.resultContainer}>
          <p>{transcript}</p>
        </div>
      )}
    </div>
  );
};

export default VoiceInput; 