|
from transformers import AutoTokenizer, AutoModelForCausalLM
|
|
import gradio as gr
|
|
|
|
|
|
file_path = "/texto_plano.txt"
|
|
|
|
|
|
try:
|
|
with open(file_path, 'r', encoding='utf-8') as file:
|
|
context_data = file.read()
|
|
print("Texto cargado correctamente.")
|
|
except FileNotFoundError:
|
|
print(f"El archivo {file_path} no se encuentra. Verifica la ruta y vuelve a intentarlo.")
|
|
context_data = ""
|
|
|
|
if context_data:
|
|
print(f"Context data cargado: {context_data[:100]}...")
|
|
|
|
|
|
print("Cargando el modelo bigscience/bloomz-560m...")
|
|
model_name = "bigscience/bloomz-560m"
|
|
tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=False)
|
|
model = AutoModelForCausalLM.from_pretrained(model_name)
|
|
print("Modelo bigscience/bloomz-560m cargado correctamente.")
|
|
|
|
|
|
def answer_question(question):
|
|
input_text = f"Pregunta: {question}\nContexto: {context_data}"
|
|
inputs = tokenizer.encode(input_text, return_tensors="pt")
|
|
outputs = model.generate(inputs, max_length=200)
|
|
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
|
return f"Pregunta: {question}\nRespuesta: {response}"
|
|
|
|
|
|
def answer_question_interface(question):
|
|
return answer_question(question)
|
|
|
|
interface = gr.Interface(
|
|
fn=answer_question_interface,
|
|
inputs="text",
|
|
outputs="text",
|
|
title="QA - bigscience/bloomz-560m",
|
|
description="Haz preguntas abiertas sobre el contenido narrativo."
|
|
)
|
|
|
|
interface.launch()
|
|
|