add qa api
Browse files
app.py
CHANGED
@@ -1,4 +1,68 @@
|
|
1 |
import streamlit as st
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
from transformers import pipeline
|
3 |
|
4 |
x = st.slider('Select a value')
|
@@ -6,10 +70,10 @@ st.write(x, 'squared is', x * x)
|
|
6 |
|
7 |
question_answerer = pipeline("question-answering")
|
8 |
|
9 |
-
context = r"
|
10 |
An example of a question answering dataset is the SQuAD dataset, which is entirely based on that task.
|
11 |
If you would like to fine-tune a model on a SQuAD task, you may leverage the
|
12 |
-
examples/pytorch/question-answering/run_squad.py script."
|
13 |
question = "What is extractive question answering?" #"What is a good example of a question answering dataset?"
|
14 |
result = question_answerer(question=question, context=context)
|
15 |
answer = result['answer']
|
@@ -19,3 +83,4 @@ span = f"start: {result['start']}, end: {result['end']}"
|
|
19 |
st.write(answer)
|
20 |
st.write(f"score: {score}")
|
21 |
st.write(f"span: {span}")
|
|
|
|
1 |
import streamlit as st
|
2 |
+
|
3 |
+
# TODO: improve layout (columns, sidebar, forms)
|
4 |
+
# st.set_page_config(layout='wide')
|
5 |
+
|
6 |
+
|
7 |
+
st.title('Question answering help desk application')
|
8 |
+
|
9 |
+
|
10 |
+
##########################################################
|
11 |
+
st.subheader('1. A simple question')
|
12 |
+
##########################################################
|
13 |
+
|
14 |
+
|
15 |
+
WIKI_URL = 'https://en.wikipedia.org/w/api.php'
|
16 |
+
WIKI_QUERY = "?format=json&action=query&prop=extracts&explaintext=1"
|
17 |
+
WIKI_BERT = "&titles=BERT_(language_model)"
|
18 |
+
WIKI_METHOD = 'GET'
|
19 |
+
|
20 |
+
response = req.request(WIKI_METHOD, f'{WIKI_URL}{WIKI_QUERY}{WIKI_BERT}')
|
21 |
+
resp_json = json.loads(response.content.decode("utf-8"))
|
22 |
+
wiki_bert = resp_json['query']['pages']['62026514']['extract']
|
23 |
+
paragraph = wiki_bert
|
24 |
+
|
25 |
+
written_passage = st.text_area(
|
26 |
+
'Paragraph used for QA (you can also edit, or copy/paste new content)',
|
27 |
+
paragraph,
|
28 |
+
height=250
|
29 |
+
)
|
30 |
+
if written_passage:
|
31 |
+
paragraph = written_passage
|
32 |
+
|
33 |
+
question = 'How many languages does bert understand?'
|
34 |
+
written_question = st.text_input(
|
35 |
+
'Question used for QA (you can also edit, and experiment with the answers)',
|
36 |
+
question
|
37 |
+
)
|
38 |
+
if written_question:
|
39 |
+
question = written_question
|
40 |
+
|
41 |
+
QA_URL = "https://api-inference.huggingface.co/models/deepset/roberta-base-squad2"
|
42 |
+
QA_METHOD = 'POST'
|
43 |
+
|
44 |
+
|
45 |
+
if st.button('Run QA inference (get answer prediction)'):
|
46 |
+
if paragraph and question:
|
47 |
+
inputs = {'question': question, 'context': paragraph}
|
48 |
+
payload = json.dumps(inputs)
|
49 |
+
prediction = req.request(QA_METHOD, QA_URL, data=payload)
|
50 |
+
answer = json.loads(prediction.content.decode("utf-8"))
|
51 |
+
answer_span = answer["answer"]
|
52 |
+
answer_score = answer["score"]
|
53 |
+
st.write(f'Answer: **{answer_span}**')
|
54 |
+
start_par = max(0, answer["start"]-86)
|
55 |
+
stop_para = min(answer["end"]+90, len(paragraph))
|
56 |
+
answer_context = paragraph[start_par:stop_para].replace(answer_span, f'**{answer_span}**')
|
57 |
+
st.write(f'Answer context (and score): ... _{answer_context}_ ... (score: {format(answer_score, ".3f")})')
|
58 |
+
st.write(f'Answer JSON: ')
|
59 |
+
st.write(answer)
|
60 |
+
else:
|
61 |
+
st.write('Write some passage of text and a question')
|
62 |
+
st.stop()
|
63 |
+
|
64 |
+
|
65 |
+
"""
|
66 |
from transformers import pipeline
|
67 |
|
68 |
x = st.slider('Select a value')
|
|
|
70 |
|
71 |
question_answerer = pipeline("question-answering")
|
72 |
|
73 |
+
context = r" Extractive Question Answering is the task of extracting an answer from a text given a question.
|
74 |
An example of a question answering dataset is the SQuAD dataset, which is entirely based on that task.
|
75 |
If you would like to fine-tune a model on a SQuAD task, you may leverage the
|
76 |
+
examples/pytorch/question-answering/run_squad.py script."
|
77 |
question = "What is extractive question answering?" #"What is a good example of a question answering dataset?"
|
78 |
result = question_answerer(question=question, context=context)
|
79 |
answer = result['answer']
|
|
|
83 |
st.write(answer)
|
84 |
st.write(f"score: {score}")
|
85 |
st.write(f"span: {span}")
|
86 |
+
"""
|