Spaces:
Sleeping
Sleeping
import streamlit as st | |
from transformers import pipeline | |
from transformers import AutoModelForQuestionAnswering, AutoTokenizer | |
st.set_page_config(page_title="Automated Question Answering System") # set page title | |
# heading | |
st.markdown("<h2 style='text-align: center;'>Question Answering on Academic Essays</h2>", unsafe_allow_html=True) | |
# description | |
st.markdown("<h3 style='text-align: left; color:#F63366; font-size:18px;'><b>What is extractive question answering about?<b></h3>", unsafe_allow_html=True) | |
st.write("Extractive question answering is a Natural Language Processing task where text is provided for a model so that the model can refer to it and make predictions about where the answer to a question is.") | |
# store the model in cache resources to enhance efficiency (ref: https://docs.streamlit.io/library/advanced-features/caching) | |
def question_model(): | |
# call my model for question answering | |
model_name = "deepset/deberta-v3-base-squad2" | |
tokenizer = AutoTokenizer.from_pretrained(model_name) | |
model = AutoModelForQuestionAnswering.from_pretrained(model_name) | |
question_answerer = pipeline("question-answering", model=model, tokenizer=tokenizer) | |
return question_answerer | |
# get the answer by passing the context & question to the model | |
def question_answering(context, question): | |
with st.spinner(text="Loading question model..."): | |
question_answerer = question_model() | |
with st.spinner(text="Getting answer..."): | |
answer = question_answerer(context=context, question=question) | |
answer_score = str(answer["score"]) | |
answer = answer["answer"] | |
# display the result in container | |
container = st.container(border=True) | |
container.write("<h5><b>Answer:</b></h5>"+answer+"<p><small>(F1 score: "+answer_score+")</small></p><br>", unsafe_allow_html=True) | |
# choose the source with different tabs | |
tab1, tab2 = st.tabs(["Input text", "Upload File"]) | |
#---------- input text ---------- | |
# if type the text as input | |
with tab1: | |
# set the example | |
sample_question = "What is NLP?" | |
with open("sample.txt", "r") as text_file: | |
sample_text = text_file.read() | |
# Get the initial values of context and question | |
context = st.session_state.get("contextInput", "") | |
question = st.session_state.get("questionInput", "") | |
# Button to try the example | |
example = st.button("Try example") | |
# Update the values if the "Try example" button is clicked | |
if example: | |
context = sample_text | |
question = sample_question | |
# Display the text area and text input with the updated or default values | |
context = st.text_area("Enter the essay below:", value=context, key="contextInput", height=330) | |
question = st.text_input(label="Enter the question: ", value=question, key="questionInput") | |
# perform question answering when "get answer" button clicked | |
button = st.button("Get answer", key="textInput") | |
if button: | |
if context=="" or question=="": | |
st.error ("Please enter BOTH the context and the question", icon="π¨") | |
else: | |
question_answering(context, question) | |
# ---------- upload file ---------- | |
# if upload file as input | |
with tab2: | |
# provide upload place | |
uploaded_file = st.file_uploader("Choose a .txt file to upload", type=["txt"]) | |
# transfer file to context and allow ask question, then perform question answering | |
if uploaded_file is not None: | |
raw_text = str(uploaded_file.read(),"utf-8") | |
context = st.text_area("Your essay context: ", value=raw_text, height=330) | |
question = st.text_input(label="Enter your question", value="Enter question here") | |
# perform question answering when "get answer" button clicked | |
button2 = st.button("Get answer", key="fileInput") | |
if button2: | |
if context=="" or question=="": | |
st.error ("Please enter BOTH the context and the question", icon="π¨") | |
else: | |
question_answering(context, question) | |
st.markdown("<br><br><br><br><br>", unsafe_allow_html=True) | |