|
import gradio as gr |
|
from transformers import pipeline |
|
|
|
|
|
def obtener_respuesta(contexto, pregunta): |
|
|
|
model_name = "deepset/roberta-base-squad2" |
|
nlp = pipeline('question-answering', model=model_name, tokenizer=model_name) |
|
|
|
|
|
res = nlp({'question': pregunta, 'context': contexto}) |
|
return res['answer'] |
|
|
|
|
|
interfaz = gr.Interface( |
|
fn=obtener_respuesta, |
|
inputs=[ |
|
gr.Textbox(label="Contexto"), |
|
gr.Textbox(label="Pregunta") |
|
], |
|
outputs=gr.Textbox(label="Respuesta"), |
|
title="QA con Gradio", |
|
description="Introduce el contexto y la pregunta para obtener la respuesta.", |
|
theme="glass" |
|
) |
|
|
|
|
|
interfaz.launch(share=True) |
|
|