File size: 5,086 Bytes
02f6b9f
c26f53d
02f6b9f
 
 
9662705
02f6b9f
 
 
b589436
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4f3156d
5426fb4
b589436
 
 
4f3156d
 
b589436
 
3665eac
b589436
 
5aea166
b589436
 
3665eac
4f3156d
5426fb4
4f3156d
 
b589436
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6657a8f
02f6b9f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
import os; import json; import requests
import streamlit as st; from transformers import pipeline
ES_URL = os.environ.get("ES_URL")

question = 'What is the capital of Netherlands?'
query_text = 'Query used for search or question answering (you can also edit, and experiment with the anwers)'
written_question = st.text_input(query_text, question)
if written_question:
    question = written_question

if st.button('Run semantic question answering'):
    if question:
        try:
            url = f"{ES_URL}/document/_search?pretty"
            # payload = json.dumps({"query":{"match":{"content":"moldova"}}})
            payload = json.dumps({"query": {
                "more_like_this": { "like": question, # "What is the capital city of Netherlands?"
                "fields": ["content"], "min_term_freq": 1.9, "min_doc_freq": 4, "max_query_terms": 50
            }}})
            headers = {'Content-Type': 'application/json'}
            response = requests.request("GET", url, headers=headers, data=payload) 
            kws_result = response.json() # print(response.text)
            
        except Exception as e:
            qa_result = str(e)

        top_5_hits = kws_result['hits']['hits'][:5] # print("First 5 results:")
        top_5_text = [{'text': hit['_source']['content'][:500],
                       'confidence': hit['_score']} for hit in top_5_hits ]
        top_3_para = [hit['_source']['content'][:5000] for hit in top_5_hits[:3]]
        # top_5_para = [hit['_source']['content'][:5000] for hit in top_5_hits]

        DPR_MODEL = "deepset/roberta-base-squad2" #, model="distilbert-base-cased-distilled-squad"
        pipe_exqa = pipeline("question-answering", model=DPR_MODEL) 
        # qa_results = [pipe_exqa(question=question, context=paragraph) for paragraph in top_5_para]
        qa_results = [pipe_exqa(question=question, context=paragraph) for paragraph in top_3_para]
        
        for i, qa_result in enumerate(qa_results):
            if "answer" in qa_result.keys(): # and qa_result["answer"] is not ""
                answer_span, answer_score = qa_result["answer"], qa_result["score"]
                st.write(f'Answer: **{answer_span}**')
                paragraph = top_3_para[i]
                start_par, stop_para = max(0, qa_result["start"]-86), min(qa_result["end"]+90, len(paragraph))
                answer_context = paragraph[start_par:stop_para].replace(answer_span, f'**{answer_span}**')
                qa_result.update({'context': answer_context, 'paragraph': paragraph})
                st.write(f'Answer context (and score): ... _{answer_context}_ ...') 
                color_string = 'green' if answer_score > 0.65 else 'orange' if answer_score > 0.45 else 'red'
                # st.markdown("""This text is :red[colored red]""")
                st.markdown(f'(answer confidence: :{color_string}[{format(answer_score, ".3f")}])')            

        st.write(f'Answers JSON: '); st.write(qa_results) 

        for i, doc_hit in enumerate(top_5_text):
            st.subheader(f'Search result #{i+1} (and score):')
            st.write(f'<em>{doc_hit["text"]}...</em>', unsafe_allow_html = True)
            st.markdown(f'> (*confidence score*: **{format(doc_hit["confidence"], ".3f")}**)')
            
        st.write(f'Search results JSON: '); st.write(top_5_text) 
    else:
        st.write('Write a query to submit your keyword search'); st.stop()

# question_similarity = [ (hit['_score'], hit['_source']['content'][:200])
#     for hit in result_first_two_hits ] # print(question_similarity)

if st.button('Run syntactic keyword search'):
    if question:
        try:
            url = f"{ES_URL}/document/_search?pretty"
            # payload = json.dumps({"query":{"match":{"content":"moldova"}}})
            payload = json.dumps({"query": {
                "more_like_this": { "like": question, # "What is the capital city of Netherlands?"
                "fields": ["content"], "min_term_freq": 1.9, "min_doc_freq": 4, "max_query_terms": 50
            }}})
            headers = {'Content-Type': 'application/json'}
            response = requests.request("GET", url, headers=headers, data=payload) 
            kws_result = response.json() # print(response.text)
            # qa_result = pipe_exqa(question=question, context=paragraph)
            
        except Exception as e:
            qa_result = str(e)

        top_5_hits = kws_result['hits']['hits'][:5] # print("First 5 results:")
        top_5_text = [{'text': hit['_source']['content'][:500],
                       'confidence': hit['_score']} for hit in top_5_hits ]

        for i, doc_hit in enumerate(top_5_text):
            st.subheader(f'Search result #{i+1} (and score):')
            st.write(f'<em>{doc_hit["text"]}...</em>', unsafe_allow_html = True)
            st.markdown(f'> (*confidence score*: **{format(doc_hit["confidence"], ".3f")}**)')
            
        st.write(f'Answer JSON: '); st.write(top_5_text) # st.write(qa_result)
    else:
        st.write('Write a query to submit your keyword search'); st.stop()