import React,{useEffect, useState} from "react";
import {recordAudio} from '../../utils/recordAudio';
import {recognizeSpeech} from '../../utils/speechRecognition';
import {textToSpeech} from '../../utils/textToSpeech';
const VoiceInteraction: React.FC = () => {
    const [isRecording, setIsRecording] = useState(false);
    const [recognizedText, setRecognizedText] = useState('');

    const startRecording = async () => {
        setIsRecording(true);
        try {
            const audioBlob = await recordAudio();
            const text = await recognizeSpeech(audioBlob);
            if (text) {
                setRecognizedText(text);
                await textToSpeech(`你说的是：${text}`);
            }
        } catch (error) {
            console.error('录音或识别出错:', error);
        } finally {
            setIsRecording(false);
        }
    };

    return (
        <div>
            <h1>百度 AI 语音交互</h1>
            <button onClick={startRecording} disabled={isRecording}>
                {isRecording ? '录音中...' : '开始录音'}
            </button>
            {recognizedText && <p>识别结果: {recognizedText}</p>}
            
        </div>
    );
};


export default VoiceInteraction;