import React, { useState, useRef, useEffect, useContext } from 'react';
import { locationContext } from '@/contexts/locationContext'
import VoiceBar from '@/assets/voice-bar.svg'
import CloseSvg from '@/assets/close.svg'

// 假设以下类型和导入已正确引入
declare class WebAudioSpeechRecognizer {
  constructor(params: any);
  start(): void;
  stop(): void;

  // 回调函数
  OnRecognitionStart?: (res: any) => void;
  OnSentenceBegin?: (res: any) => void;
  OnRecognitionResultChange?: (res: any) => void;
  OnSentenceEnd?: (res: any) => void;
  OnRecognitionComplete?: (res: any) => void;
  OnError?: (res: any) => void;
}

interface VoiceRecognitionProps {
  onRecognizedText: (text: string) => void;
  visible: boolean;
  onClose: () => void;
}
const SpeechRecognizer: React.FC<VoiceRecognitionProps> = ({ onRecognizedText, visible, onClose }) => {
  const { appConfig } = useContext(locationContext);
  const [status, setStatus] = useState<'idle' | 'connecting' | 'recognizing'>('idle');
  const [recognizedText, setRecognizedText] = useState<string>('');

  const recognizerRef = useRef<WebAudioSpeechRecognizer | null>(null);
  const isCanStopRef = useRef<boolean>(false);
  const tokenRef = useRef('');


  const startRecognition = async () => {
    // Reset text
    setRecognizedText('');
    setStatus('connecting');

    try {
      const response = await fetch(`${appConfig.baiyingFilesUrl}/internal/chatbot/asr/getTempToken`, {
        headers: {
          'content-type': 'application/json',
          'Accesstoken': tokenRef.current
        },
      });
      const res = await response.json();

      if (res.code === 200) {
        recognizerRef.current = new WebAudioSpeechRecognizer({
          secretid: res.data.tmpSecretId,
          secretkey: res.data.tmpSecretKey,
          token: res.data.token,
          appid: '1326716587',
          engine_model_type: '16k_zh',
          needvad: 1,
          filter_dirty: 1,
          vad_silence_time: 1000,
          filter_punc: 1,
          max_speak_time: 5000,
          noise_threshold: 0.3,
        });

        // 开始识别
        recognizerRef.current.OnRecognitionStart = (res: any) => {
          setStatus('recognizing');
        };

        // 一句话开始
        recognizerRef.current.OnSentenceBegin = (res: any) => {
          isCanStopRef.current = true;
        };

        // 识别变化时
        recognizerRef.current.OnRecognitionResultChange = (res: any) => {
          if (isCanStopRef.current) {
            setRecognizedText(res.result.voice_text_str);
          }
        };

        // 一句话结束
        recognizerRef.current.OnSentenceEnd = (res: any) => {
          if (isCanStopRef.current) {
            onRecognizedText(res.result.voice_text_str)
            setRecognizedText('');
            stopRecognition()
          }
        };

        // 识别结束
        recognizerRef.current.OnRecognitionComplete = (res: any) => {
          stopRecognition()
        };

        // 识别失败
        recognizerRef.current.OnError = (res: any) => {
          setStatus('idle');
        };

        // Start recognition
        recognizerRef.current.start();
      }
    } catch (error) {
      console.error('Failed to start recognition:', error);
      setStatus('idle');
    }
  };

  const stopRecognition = (): void => {
    setStatus('idle');
    onClose()
    if (isCanStopRef.current && recognizerRef.current) {
      console.log('Stopping recognition');
      recognizerRef.current.stop();
      isCanStopRef.current = false;
    }
  };


  useEffect(() => {
    async function handleStart() {
      if (!tokenRef.current) {
        await fetchToken()
      }
      startRecognition()
    }

    if (visible) {
      handleStart()
    }

  }, [visible]);


  // Get token
  const fetchToken = async () => {
    try {
      const response = await fetch(`${appConfig.baiyingFilesUrl}/gateway/token/auth`, {
        method: 'POST',
        headers: {
          'content-type': 'application/json',
        },
        body: JSON.stringify({ "appId": "f546786038604a83baab2e133626e32b", "secret": "4792386812184b1c9253f6bc51797b66" })
      });
      const res = await response.json();
      if (res.code === 200) {
        tokenRef.current = res.data.token;
        startRecognition()
      }
    } catch (error) {
      console.error('Failed to fetch token:', error);
    }
  };

  // Cleanup
  useEffect(() => {
    return () => {
      if (recognizerRef.current && isCanStopRef.current) {
        recognizerRef.current.stop();
      }
    };
  }, []);

  if (!visible) return null;

  return (
    <div className="speechRecognizer-box">

      <img src={VoiceBar} alt="" className="bar" />

      <div className="text">
        {status === 'connecting' && (
          <div >建立连接中...</div>
        )}
        {status === 'recognizing' && (
          <div>{recognizedText ? recognizedText : '识别中...'}</div>
        )}
      </div>

      <div className="cancel" onClick={stopRecognition}>
        点击取消
      </div>
      <img src={CloseSvg} alt="" className='close' onClick={stopRecognition} />
      {/* <div className="content">
        <div className="content-item left-content">
          {status === 'idle' && (
            <button className="button" onClick={startRecognition}>
              开始识别
            </button>
          )}

          {status === 'connecting' && (
            <div className="connecting">建立连接中...</div>
          )}

          {status === 'recognizing' && (
            <>
              <span className="recognizing">识别中...</span>
              <button className="button end-btn" onClick={stopRecognition}>
                结束识别
              </button>
            </>
          )}
        </div>

        <div className="content-item">
          <div className="recognize-content">
            {recognizedText}
          </div>
        </div>
      </div> */}
    </div>
  );
};

export default SpeechRecognizer;
