import os; import json; import requests | |
import streamlit as st | |
ES_URL = os.environ.get("ES_URL") | |
question = 'What is the capital of Netherlands?' | |
query_text = 'Query used for keyword search (you can also edit, and experiment with the responses)' | |
written_question = st.text_input(query_text, question) | |
if written_question: | |
question = written_question | |
if st.button('Run keyword search'): | |
if question: | |
try: | |
url = f"{ES_URL}/document/_search?pretty" | |
# payload = json.dumps({"query":{"match":{"content":"moldova"}}}) | |
payload = json.dumps({"query": { | |
"more_like_this": { "like": question, # "What is the capital city of Netherlands?" | |
"fields": ["content"], "min_term_freq": 1.9, "min_doc_freq": 4, "max_query_terms": 50 | |
}}}) | |
headers = {'Content-Type': 'application/json'} | |
response = requests.request("GET", url, headers=headers, data=payload) | |
kws_result = response.json() # print(response.text) | |
# qa_result = pipe_exqa(question=question, context=paragraph) | |
except Exception as e: | |
qa_result = str(e) | |
top_5_hits = kws_result['hits']['hits'][:5] # print("First 5 results:") | |
top_5_text = [{'text': hit['_source']['content'][:500], | |
'confidence': hit['_score']} for hit in top_5_hits ] | |
for i, doc_hit in enumerate(top_5_text): | |
st.subheader(f'Search result #{i+1} (and score):') | |
st.write(f'<em>{doc_hit["text"]}...</em>', unsafe_allow_html = True) | |
st.markdown(f'> (*confidence score*: **{format(doc_hit["confidence"], ".3f")}**)') | |
st.write(f'Answer JSON: '); st.write(top_5_text) # st.write(qa_result) | |
else: | |
st.write('Write a query to submit your keyword search'); st.stop() | |
# if "answer" in qa_result.keys(): | |
# answer_span, answer_score = qa_result["answer"], qa_result["score"] | |
# st.write(f'Answer: **{answer_span}**') | |
# start_par, stop_para = max(0, qa_result["start"]-86), min(qa_result["end"]+90, len(paragraph)) | |
# answer_context = paragraph[start_par:stop_para].replace(answer_span, f'**{answer_span}**') | |
# st.write(f'Answer context (and score): ... _{answer_context}_ ... (score: {format(answer_score, ".3f")})') | |
# question_similarity = [ (hit['_score'], hit['_source']['content'][:200]) | |
# for hit in result_first_two_hits ] # print(question_similarity) | |
# top_hit = result['hits']['hits'][0] | |
# context = top_hit['_source']['content'] | |
# # context = r" Extractive Question Answering is the task of extracting | |
# # an answer from a text given a question. An example of a question | |
# # answering dataset is the SQuAD dataset, which is entirely based | |
# # on that task. If you would like to fine-tune a model on a SQuAD task, | |
# # you may leverage the `examples/pytorch/question-answering/run_squad.py` script." | |
# question = input # "What is extractive question answering?" | |
# # "What is a good example of a question answering dataset?" | |
# print(question) | |
# context = context[:5000] | |
# print(context) | |
# try: | |
# qa_result = pipe_exqa(question=question, context=context) | |
# except Exception as e: | |
# return {"output": str(e)} | |
# return {"output": str(qa_result)} | |
# answer = qa_result['answer'] | |
# score = round(qa_result['score'], 4) | |
# span = f"start: {qa_result['start']}, end: {qa_result['end']}" | |
# # st.write(answer); st.write(f"score: {score}"); st.write(f"span: {span}") | |
# output = f"{str(answer)} \n {str(score)} \n {str(span)}" | |
# return {"output": output} or {"output": str(question_similarity)} or result or {"Hello": "World!"} | |