qa_sparse_bert / app.py
Benjamin Consolvo
bigger context box
08009f0
raw
history blame
2.01 kB
import gradio as gr
from transformers import pipeline
qa_pipeline = pipeline(task="question-answering",model="Intel/bert-base-uncased-squadv1.1-sparse-80-1x4-block-pruneofa")
def greet(name):
return "Hello " + name + "!!"
def predict(context="There are seven continents in the world.",question="How many continents are there in the world?"):
'''
Sample prediction should return a dictionary of the form:
{'score': 0.9376363158226013, 'start': 10, 'end': 15, 'answer': 'seven'}
'''
predictions = qa_pipeline(context=context,question=question)
print(f'predictions={predictions}')
score = predictions['score']
answer = predictions['answer']
start = predictions['start']
end = predictions['end']
return score,answer,start
md = """
If you came looking for chatGPT, sorry to disappoint, but this is different. This prediction model is designed to answer a question about a text. It is designed to do reading comprehension. The model does not just answer questions in general -- it only works from the text that you provide. However, accomplishing accurate reading comprehension can be a very valuable task, especially if you are attempting to get quick answers from a large (and maybe boring!) document.
The model is based on the Zafrir et al. (2021) paper: [Prune Once for All: Sparse Pre-Trained Language Models](https://arxiv.org/abs/2111.05754) paper.
Training dataset: SQuADv1.1, based on the Rajpurkar et al. (2016) paper: [SQuAD: 100,000+ Questions for Machine Comprehension of Text](https://aclanthology.org/D16-1264/)
"""
# predict()
context=gr.Text(lines=10,label="Context")
question=gr.Text(label="Question")
score=gr.Text(label="Score")
start=gr.Text(label="Answer found at character")
answer=gr.Text(label="Answer")
iface = gr.Interface(
fn=predict,
inputs=[context,question],
outputs=[score,start,answer],
title = "Question & Answer with Sparse BERT using the SQuAD dataset",
description = md
)
iface.launch()