import gradio as gr import openai import soundfile as sf import speech_recognition as sr from gtts import gTTS import tempfile import os # Set your OpenAI API key # Load API keys from an environment variables OPENAI_SECRET_KEY = os.environ.get("OPENAI_SECRET_KEY") openai.api_key = 'OPENAI_SECRET_KEY' # Initialize recognizer recognizer = sr.Recognizer() # Generate a conversational question in French def generate_question(): prompt = ( "Tu es un professeur de français. Pose une question en français qui imite un scénario réel, créatif et diversifié, " "pour un élève qui apprend le français. Évite les questions banales sur le tourisme et les voyages." ) response = openai.Completion.create( engine="davinci", prompt=prompt, max_tokens=50, n=1, stop=None, temperature=0.7 ) question = response.choices[0].text.strip() return question # Record the user's response def record_response(): with sr.Microphone() as source: print("Recording your response...") recognizer.adjust_for_ambient_noise(source) audio_data = recognizer.listen(source) return audio_data # Transcribe the recorded audio def transcribe_audio(audio_data): try: transcription = recognizer.recognize_google(audio_data, language="fr-FR") return transcription except sr.UnknownValueError: return "Désolé, je n'ai pas compris. Pouvez-vous répéter?" except sr.RequestError: return "Erreur de reconnaissance vocale. Veuillez réessayer." # Analyze the transcription and generate feedback def generate_feedback(transcription): prompt = ( f"Tu es professeur de français. Voici une réponse d'un élève : {transcription}. " "Donne trois suggestions pour améliorer son français en te concentrant sur la grammaire et en évitant les anglicismes." ) response = openai.Completion.create( engine="davinci", prompt=prompt, max_tokens=100, n=1, stop=None, temperature=0.7 ) feedback = response.choices[0].text.strip() return feedback # Text-to-Speech for feedback def speak_feedback(feedback): tts = gTTS(text=feedback, lang='fr') with tempfile.NamedTemporaryFile(suffix=".mp3", delete=False) as temp_audio: tts.save(temp_audio.name) audio_file = temp_audio.name return audio_file # Gradio UI functions def ask_question(): question = generate_question() return question def record_and_transcribe(): audio_data = record_response() transcription = transcribe_audio(audio_data) return transcription def provide_feedback(transcription): feedback = generate_feedback(transcription) return feedback def speak_out_feedback(feedback): audio_file = speak_feedback(feedback) return audio_file # Gradio Interface with gr.Blocks() as french_chatbot: question_output = gr.Textbox(label="Question pour vous") transcription_output = gr.Textbox(label="Votre réponse transcrite") feedback_output = gr.Textbox(label="Suggestions pour améliorer votre français") with gr.Row(): generate_question_button = gr.Button("Générer une Question") record_response_button = gr.Button("Enregistrer et Transcrire") generate_feedback_button = gr.Button("Générer des Suggestions") speak_feedback_button = gr.Audio(label="Écouter Suggestions") generate_question_button.click(fn=ask_question, outputs=question_output) record_response_button.click(fn=record_and_transcribe, outputs=transcription_output) generate_feedback_button.click(fn=provide_feedback, inputs=transcription_output, outputs=feedback_output) speak_feedback_button.click(fn=speak_out_feedback, inputs=feedback_output, outputs=speak_feedback_button) french_chatbot.launch()