juanpasanper's picture
Update app.py
e02b803
import gradio as gr
import torch
import torch.nn as nn
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("juanpasanper/tigo_question_answer")
model = AutoModelForCausalLM.from_pretrained("juanpasanper/tigo_question_answer")
model.load_state_dict(torch.load(juanpasanper/tigo_question_answer))
def question_answer(context, question):
predictions, raw_outputs = model.predict([{"context": context, "qas": [{"question": question, "id": "0",}],}])
prediccion = predictions[0]['answer'][0]
return prediccion
iface = gr.Interface(fn=question_answer, inputs=["text", "text"], outputs=["text"],
allow_flagging="manual",
flagging_options=["correcto", "incorrecto"],
flagging_dir='flagged',
enable_queue = True)
iface.launch()