Chananchida commited on
Commit
fd37bcf
1 Parent(s): 3b14c3f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -27
app.py CHANGED
@@ -125,25 +125,8 @@ def predict_faiss(model, tokenizer, embedding_model, df, question, index):
125
  }
126
  return output
127
 
128
- def predict(model, tokenizer, embedding_model, df, question, index):
129
- t = time.time()
130
- question = question.strip()
131
- question_vector = get_embeddings(embedding_model, question)
132
- question_vector = prepare_sentences_vector([question_vector])
133
- distances,indices = faiss_search(index, question_vector)
134
 
135
- # Answer = model_pipeline(model, tokenizer, df['Question'][indices[0][0]], df['Context'][indices[0][0]])
136
- Answer = model_pipeline(model, tokenizer, question, df['Context'][indices[0][0]])
137
- _time = time.time() - t
138
- output = {
139
- "user_question": question,
140
- "answer": Answer,
141
- "totaltime": round(_time, 3),
142
- "distance": round(distances[0][0], 4)
143
- }
144
- return Answer
145
-
146
- def predict_test(model, tokenizer, embedding_model, df, question, index): # sent_tokenize pythainlp
147
  t = time.time()
148
  question = question.strip()
149
  question_vector = get_embeddings(embedding_model, question)
@@ -206,12 +189,9 @@ def highlight_text(text, start_index, end_index):
206
  highlighted_text += "</mark>"
207
  return highlighted_text
208
 
209
- def chat_interface_before(question, history):
210
- response = predict(model, tokenizer, embedding_model, df, question, index)
211
- return response
212
 
213
- def chat_interface_after(question, history):
214
- response = predict_test(model, tokenizer, embedding_model, df, question, index)
215
  highlighted_answer = highlight_text(response["answer"], response["highlight_start"], response["highlight_end"])
216
  return highlighted_answer
217
 
@@ -223,13 +203,10 @@ examples=[
223
  'อยากทราบความถี่ในการดึงข้อมูลของ DXT360 บน Twitter',
224
  # 'ช่องทางติดตามข่าวสารของเรา',
225
  ]
226
- demo_before = gr.ChatInterface(fn=chat_interface_before,
227
- examples=examples)
228
 
229
- demo_after = gr.ChatInterface(fn=chat_interface_after,
230
  examples=examples)
231
 
232
- interface = gr.TabbedInterface([demo_before, demo_after], ["Before", "After"])
233
 
234
  if __name__ == "__main__":
235
  # Load your model, tokenizer, data, and index here...
 
125
  }
126
  return output
127
 
 
 
 
 
 
 
128
 
129
+ def predict(model, tokenizer, embedding_model, df, question, index): # sent_tokenize pythainlp
 
 
 
 
 
 
 
 
 
 
 
130
  t = time.time()
131
  question = question.strip()
132
  question_vector = get_embeddings(embedding_model, question)
 
189
  highlighted_text += "</mark>"
190
  return highlighted_text
191
 
 
 
 
192
 
193
+ def chat_interface(question, history):
194
+ response = predict(model, tokenizer, embedding_model, df, question, index)
195
  highlighted_answer = highlight_text(response["answer"], response["highlight_start"], response["highlight_end"])
196
  return highlighted_answer
197
 
 
203
  'อยากทราบความถี่ในการดึงข้อมูลของ DXT360 บน Twitter',
204
  # 'ช่องทางติดตามข่าวสารของเรา',
205
  ]
 
 
206
 
207
+ interface = gr.ChatInterface(fn=chat_interface,
208
  examples=examples)
209
 
 
210
 
211
  if __name__ == "__main__":
212
  # Load your model, tokenizer, data, and index here...