sunwaee commited on
Commit
8718841
1 Parent(s): c652473

added spinners to make it less boring

Browse files
Files changed (1) hide show
  1. app.py +27 -22
app.py CHANGED
@@ -90,31 +90,34 @@ if task == 'Questions/Answers Generation':
90
  sent_tokenized = nltk.sent_tokenize(inputs)
91
  res = {}
92
 
93
- # Iterate over sentences
94
- for sentence in sent_tokenized:
95
- predictions = model.multitask([sentence], max_length=512)
96
- questions, answers, answers_bis = predictions['questions'], predictions['answers'], predictions[
97
- 'answers_bis']
98
-
99
- # Build answer dict
100
- content = {}
101
- for question, answer, answer_bis in zip(questions[0], answers[0], answers_bis[0]):
102
- content[question] = {'answer (extracted)': answer, 'answer (generated)': answer_bis}
103
- res[sentence] = content
 
104
 
105
  # Answer area
106
  st.write(res)
107
 
108
  else:
109
- # Prediction
110
- predictions = model.multitask([inputs], max_length=512)
111
- questions, answers, answers_bis = predictions['questions'], predictions['answers'], predictions['answers_bis']
 
 
 
 
 
 
 
112
 
113
- # Answer area
114
- zip = zip(questions[0], answers[0], answers_bis[0])
115
- content = {}
116
- for question, answer, answer_bis in zip:
117
- content[question] = {'answer (extracted)': answer, 'answer (generated)': answer_bis}
118
  st.write(content)
119
 
120
  elif task == 'Question Answering':
@@ -136,8 +139,9 @@ elif task == 'Question Answering':
136
  question = st.text_input('Question:', value="What forced Bohemond to retreat from his campaign? ")
137
 
138
  # Prediction
139
- predictions = model.qa([{'question': question, 'context': inputs}], max_length=512)
140
- answer = {question: predictions[0]}
 
141
 
142
  # Answer area
143
  st.write(answer)
@@ -173,7 +177,8 @@ elif task == 'Question Generation':
173
  answers.append(inputs[hl_index[i]: hl_index[i + 1] + 4].replace('<hl>', '').strip())
174
 
175
  # Prediction
176
- predictions = model.qg(contexts, max_length=512)
 
177
 
178
  # Answer area
179
  content = {}
 
90
  sent_tokenized = nltk.sent_tokenize(inputs)
91
  res = {}
92
 
93
+ with st.spinner('Please wait while the inputs are being processed...'):
94
+ # Iterate over sentences
95
+ for sentence in sent_tokenized:
96
+ predictions = model.multitask([sentence], max_length=512)
97
+ questions, answers, answers_bis = predictions['questions'], predictions['answers'], predictions[
98
+ 'answers_bis']
99
+
100
+ # Build answer dict
101
+ content = {}
102
+ for question, answer, answer_bis in zip(questions[0], answers[0], answers_bis[0]):
103
+ content[question] = {'answer (extracted)': answer, 'answer (generated)': answer_bis}
104
+ res[sentence] = content
105
 
106
  # Answer area
107
  st.write(res)
108
 
109
  else:
110
+ with st.spinner('Please wait while the inputs are being processed...'):
111
+ # Prediction
112
+ predictions = model.multitask([inputs], max_length=512)
113
+ questions, answers, answers_bis = predictions['questions'], predictions['answers'], predictions['answers_bis']
114
+
115
+ # Answer area
116
+ zip = zip(questions[0], answers[0], answers_bis[0])
117
+ content = {}
118
+ for question, answer, answer_bis in zip:
119
+ content[question] = {'answer (extracted)': answer, 'answer (generated)': answer_bis}
120
 
 
 
 
 
 
121
  st.write(content)
122
 
123
  elif task == 'Question Answering':
 
139
  question = st.text_input('Question:', value="What forced Bohemond to retreat from his campaign? ")
140
 
141
  # Prediction
142
+ with st.spinner('Please wait while the inputs are being processed...'):
143
+ predictions = model.qa([{'question': question, 'context': inputs}], max_length=512)
144
+ answer = {question: predictions[0]}
145
 
146
  # Answer area
147
  st.write(answer)
 
177
  answers.append(inputs[hl_index[i]: hl_index[i + 1] + 4].replace('<hl>', '').strip())
178
 
179
  # Prediction
180
+ with st.spinner('Please wait while the inputs are being processed...'):
181
+ predictions = model.qg(contexts, max_length=512)
182
 
183
  # Answer area
184
  content = {}