import os os.environ["GRADIO_SERVER_PORT"] = "7861" from transformers import AutoTokenizer, AutoModelForQuestionAnswering from transformers import pipeline from torch import torch import gradio as gr model_name = "PlanTL-GOB-ES/roberta-large-bne-sqac" tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForQuestionAnswering.from_pretrained(model_name) def get_answer(question, context): inputs = tokenizer(question, context, return_tensors="pt", truncation=True) outputs = model(**inputs) answer_start_scores = outputs.start_logits answer_end_scores = outputs.end_logits answer_start = torch.argmax(answer_start_scores) answer_end = torch.argmax(answer_end_scores) + 1 answer = tokenizer.convert_ids_to_tokens(inputs["input_ids"][0][answer_start:answer_end]) return answer def gradio_interface(question, context): answer = get_answer(question, context) return answer # Crear la interfaz de Gradio iface = gr.Interface( fn=gradio_interface, inputs=[gr.Textbox(lines=2, label="Pregunta"), gr.Textbox(lines=5, label="Contexto")], outputs="text", title="Respuesta a la pregunta", ) iface.launch(server_port=7861)