stmnk commited on
Commit
afa25e5
1 Parent(s): b429621

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +37 -59
app.py CHANGED
@@ -1,75 +1,53 @@
1
- import json
2
- import streamlit as st
3
- import requests as req
4
-
5
- # TODO: improve layout (columns, sidebar, forms)
6
- # st.set_page_config(layout='wide')
7
-
8
-
9
  st.title('Question Answering example')
10
-
11
-
12
- ##########################################################
13
  st.subheader('1. A simple question (extractive, closed domain)')
14
- ##########################################################
15
-
16
-
17
- WIKI_URL = 'https://en.wikipedia.org/w/api.php'
18
- WIKI_QUERY = "?format=json&action=query&prop=extracts&explaintext=1"
19
- WIKI_BERT = "&titles=BERT_(language_model)"
20
- WIKI_METHOD = 'GET'
21
-
22
  response = req.request(WIKI_METHOD, f'{WIKI_URL}{WIKI_QUERY}{WIKI_BERT}')
23
  resp_json = json.loads(response.content.decode("utf-8"))
24
  wiki_bert = resp_json['query']['pages']['62026514']['extract']
25
- paragraph = wiki_bert
26
-
27
- written_passage = st.text_area(
28
- 'Paragraph used for QA (you can also edit, or copy/paste new content)',
29
- paragraph,
30
- height=250
31
- )
32
  if written_passage:
33
  paragraph = written_passage
34
-
35
- # question = 'How many languages does bert understand?'
36
- question = 'How many attention heads does Bert have?'
37
-
38
- written_question = st.text_input(
39
- 'Question used for QA (you can also edit, and experiment with the answers)',
40
- question
41
- )
42
  if written_question:
43
  question = written_question
44
-
45
- QA_URL = "https://api-inference.huggingface.co/models/deepset/roberta-base-squad2"
46
- QA_METHOD = 'POST'
47
-
48
-
49
  if st.button('Run QA inference (get answer prediction)'):
50
  if paragraph and question:
51
  inputs = {'question': question, 'context': paragraph}
52
  payload = json.dumps(inputs)
53
  prediction = req.request(QA_METHOD, QA_URL, data=payload)
54
  answer = json.loads(prediction.content.decode("utf-8"))
55
- # >>> answer structure:
56
- # {
57
- # "score": 0.24088488519191742,
58
- # "start": 3595,
59
- # "end": 3602,
60
- # "answer": "over 70"
61
- # }
62
- answer_dict = dict(answer)
63
- # st.write(answer_dict)
64
- answer_span = answer_dict["answer"]
65
- answer_score = answer_dict["score"]
66
- st.write(f'Answer: **{answer_span}**')
67
- start_par = max(0, answer_dict["start"]-86)
68
- stop_para = min(answer_dict["end"]+90, len(paragraph))
69
- answer_context = paragraph[start_par:stop_para].replace(answer_span, f'**{answer_span}**')
70
- st.write(f'Answer context (and score): ... _{answer_context}_ ... (score: {format(answer_score, ".3f")})')
71
- st.write(f'Answer JSON: ')
72
- st.write(answer)
 
 
 
 
 
 
73
  else:
74
- st.write('Write some passage of text and a question')
75
- st.stop()
 
1
+ import json; import streamlit as st; import requests as req; from transformers import pipeline
2
+ WIKI_URL = 'https://en.wikipedia.org/w/api.php'; WIKI_BERT = "&titles=BERT_(language_model)"
3
+ WIKI_QUERY = "?format=json&action=query&prop=extracts&explaintext=1"; WIKI_METHOD = 'GET'
4
+ pipe_exqa = pipeline("question-answering") #, model="distilbert-base-cased-distilled-squad"
 
 
 
 
5
  st.title('Question Answering example')
 
 
 
6
  st.subheader('1. A simple question (extractive, closed domain)')
 
 
 
 
 
 
 
 
7
  response = req.request(WIKI_METHOD, f'{WIKI_URL}{WIKI_QUERY}{WIKI_BERT}')
8
  resp_json = json.loads(response.content.decode("utf-8"))
9
  wiki_bert = resp_json['query']['pages']['62026514']['extract']
10
+ paragraph = wiki_bert
11
+ par_text = 'Paragraph used for QA (you can also edit, or copy/paste new content)'
12
+ written_passage = st.text_area(par_text, paragraph, height=250)
 
 
 
 
13
  if written_passage:
14
  paragraph = written_passage
15
+ question = 'How many attention heads does Bert have?' # question = 'How many languages does bert understand?'
16
+ query_text = 'Question used for QA (you can also edit, and experiment with the answers)'
17
+ written_question = st.text_input(query_text, question)
 
 
 
 
 
18
  if written_question:
19
  question = written_question
20
+ QA_URL = "https://api-inference.huggingface.co/models/deepset/roberta-base-squad2"; QA_METHOD = 'POST'
 
 
 
 
21
  if st.button('Run QA inference (get answer prediction)'):
22
  if paragraph and question:
23
  inputs = {'question': question, 'context': paragraph}
24
  payload = json.dumps(inputs)
25
  prediction = req.request(QA_METHOD, QA_URL, data=payload)
26
  answer = json.loads(prediction.content.decode("utf-8"))
27
+ # >>> answer structure: # { "answer": "over 70", "score": 0.240, "start": 35, "end": 62 }
28
+ answer_dict = dict(answer) # st.write(answer_dict)
29
+ print(answer_dict)
30
+ if "answer" in answer_dict.keys():
31
+ answer_span, answer_score = answer_dict["answer"], answer_dict["score"]
32
+ st.write(f'Answer: **{answer_span}**')
33
+ start_par, stop_para = max(0, answer_dict["start"]-86), min(answer_dict["end"]+90, len(paragraph))
34
+ answer_context = paragraph[start_par:stop_para].replace(answer_span, f'**{answer_span}**')
35
+ st.write(f'Answer context (and score): ... _{answer_context}_ ... (score: {format(answer_score, ".3f")})')
36
+ st.write(f'Answer JSON: '); st.write(answer)
37
+ else:
38
+ try:
39
+ qa_result = pipe_exqa(question=question, context=paragraph)
40
+ except Exception as e:
41
+ qa_result = str(e)
42
+
43
+ if "answer" in qa_result.keys():
44
+ answer_span, answer_score = qa_result["answer"], qa_result["score"]
45
+ st.write(f'Answer: **{answer_span}**')
46
+ start_par, stop_para = max(0, qa_result["start"]-86), min(qa_result["end"]+90, len(paragraph))
47
+ answer_context = paragraph[start_par:stop_para].replace(answer_span, f'**{answer_span}**')
48
+ st.write(f'Answer context (and score): ... _{answer_context}_ ... (score: {format(answer_score, ".3f")})')
49
+
50
+ st.write(f'Answer JSON: '); st.write(qa_result)
51
  else:
52
+ st.write('Write some passage of text and a question'); st.stop()
53
+ # x = st.slider('Select a value'); st.write(x, 'squared is', x * x)