| |
| import gradio as gr |
| import requests |
| import os |
|
|
| |
| |
| from api.main import app as fastapi_app |
|
|
| |
| |
| |
| BASE_URL = os.getenv("SPACE_URL", "http://127.0.0.1:7860") |
| API_PREDICT_URL = f"{BASE_URL}/api/predict" |
| API_COACH_URL = f"{BASE_URL}/api/coach" |
|
|
|
|
| |
| |
| |
|
|
| def chatbot_response(chat_message, chat_history, edad, sexo, asistencia, notas): |
| """ |
| Función que Gradio ChatInterface llamará. |
| Se comunica con la API (FastAPI) para obtener respuestas. |
| """ |
| |
| |
| predict_payload = { |
| "edad": edad, |
| "sexo": sexo, |
| "asistencia": asistencia, |
| "notas": notas |
| } |
| |
| try: |
| |
| response_predict = requests.post(API_PREDICT_URL, json=predict_payload) |
| response_predict.raise_for_status() |
| predict_data = response_predict.json() |
| score = predict_data.get("score", 0.0) |
| |
| except requests.exceptions.RequestException as e: |
| yield f"Error al conectar con el motor de riesgo (/predict): {e}" |
| return |
|
|
| |
| coach_payload = { |
| "consulta": chat_message, |
| "riesgo": score |
| } |
|
|
| try: |
| |
| response_coach = requests.post(API_COACH_URL, json=coach_payload) |
| response_coach.raise_for_status() |
| coach_data = response_coach.json() |
| |
| |
| plan_texto = coach_data.get("plan", "No se pudo generar un plan.") |
| citas = coach_data.get("citas", []) |
| |
| |
| respuesta_final = f"**Riesgo Estimado: {score*100:.0f}%**\n\n{plan_texto}" |
| |
| if citas: |
| respuesta_final += "\n\n**Fuentes (Mock):**\n" |
| for cita in citas: |
| respuesta_final += f"- `{cita}`\n" |
| |
| yield respuesta_final |
| |
| except requests.exceptions.RequestException as e: |
| yield f"Error al conectar con el Coach RAG (/coach): {e}" |
|
|
|
|
| |
| with gr.Blocks(theme=gr.themes.Soft(), title="Tutor Virtual") as demo: |
| gr.Markdown("# 🤖 Tutor Virtual Adaptativo (Demo)") |
| gr.Markdown("Esta demo usa un backend **simulado (mock)** para pruebas.") |
| |
| with gr.Row(): |
| |
| with gr.Column(scale=1, min_width=350): |
| with gr.Accordion("Perfil del Alumno", open=True): |
| gr.Markdown("Ingrese los datos del alumno para la simulación.") |
| input_edad = gr.Slider(10, 25, value=18, label="Edad") |
| input_sexo = gr.Radio(["Masculino", "Femenino", "Otro"], value="Masculino", label="Sexo") |
| input_asistencia = gr.Slider(0, 100, value=80, label="Asistencia (%)") |
| input_notas = gr.Slider(1.0, 7.0, step=0.1, value=4.5, label="Promedio Notas") |
| |
| gr.Markdown("*(Nota: El puntaje de riesgo cambiará si las notas son < 4.0)*") |
|
|
| |
| |
| with gr.Column(scale=4): |
| gr.ChatInterface( |
| fn=chatbot_response, |
| type="messages", |
| |
| chatbot=gr.Chatbot( |
| height=500, |
| label="Chat con Tutor", |
| avatar_images=("user.png", "bot.png"), |
| type='messages' |
| ), |
| |
| textbox=gr.Textbox(placeholder="Hola, ¿en qué puedo ayudarte hoy?"), |
| submit_btn="Enviar Consulta", |
| additional_inputs=[input_edad, input_sexo, input_asistencia, input_notas] |
| ) |
|
|
| |
|
|
| |
|
|
| |
| |
| app = gr.mount_app(demo, fastapi_app, path="/api") |
|
|
| |
| |