Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,22 +1,27 @@
|
|
1 |
import streamlit as st
|
2 |
from transformers import pipeline, AutoModelForQuestionAnswering, AutoTokenizer
|
|
|
3 |
@st.cache(allow_output_mutation=True)
|
4 |
def load_qa_model():
|
5 |
model_name = "mrm8488/mobilebert-uncased-finetuned-squadv2"
|
6 |
model = AutoModelForQuestionAnswering.from_pretrained(model_name)
|
7 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
8 |
qa = pipeline("question-answering", model=model, tokenizer=tokenizer)
|
9 |
-
|
|
|
10 |
|
11 |
-
qa = load_qa_model()
|
12 |
st.title("Ask Questions about your Text")
|
13 |
sentence = st.text_area('Please paste your article :', height=30)
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
with st.spinner("Discovering Answers.."):
|
20 |
if button and sentence:
|
21 |
-
|
22 |
-
|
|
|
|
|
|
|
|
|
|
1 |
import streamlit as st
|
2 |
from transformers import pipeline, AutoModelForQuestionAnswering, AutoTokenizer
|
3 |
+
|
4 |
@st.cache(allow_output_mutation=True)
|
5 |
def load_qa_model():
|
6 |
model_name = "mrm8488/mobilebert-uncased-finetuned-squadv2"
|
7 |
model = AutoModelForQuestionAnswering.from_pretrained(model_name)
|
8 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
9 |
qa = pipeline("question-answering", model=model, tokenizer=tokenizer)
|
10 |
+
text_generator = pipeline("text-generation")
|
11 |
+
return qa, text_generator
|
12 |
|
13 |
+
qa, text_generator = load_qa_model()
|
14 |
st.title("Ask Questions about your Text")
|
15 |
sentence = st.text_area('Please paste your article :', height=30)
|
16 |
+
num_questions = st.number_input("Number of questions to generate:", min_value=1, max_value=10, value=3, step=1)
|
17 |
+
num_answers = st.number_input("Number of answers per question:", min_value=1, max_value=5, value=1, step=1)
|
18 |
+
button = st.button("Generate Questions and Answers")
|
19 |
+
|
20 |
+
with st.spinner("Generating Questions and Answers.."):
|
|
|
21 |
if button and sentence:
|
22 |
+
generated_questions = text_generator(sentence, max_length=50, num_return_sequences=num_questions)
|
23 |
+
for question_index, question_output in enumerate(generated_questions):
|
24 |
+
st.subheader(f"Question {question_index + 1}: {question_output['generated_text']}")
|
25 |
+
answers = qa(question=question_output['generated_text'], context=sentence, topk=num_answers)
|
26 |
+
for answer_index, answer in enumerate(answers):
|
27 |
+
st.write(f"Answer {answer_index + 1}: {answer['answer']}")
|