File size: 4,842 Bytes
7a48124
 
 
fb48f03
7a48124
 
 
 
 
 
 
 
 
 
 
 
 
fb48f03
7a48124
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fb48f03
7a48124
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
import dill
import streamlit as st
import os
from haystack.utils import convert_files_to_docs
from haystack.schema import Answer
from haystack.document_stores import InMemoryDocumentStore
from haystack.pipelines import ExtractiveQAPipeline
from haystack.nodes import FARMReader, BM25Retriever
import logging
from markdown import markdown
from annotated_text import annotation
from streamlit_lottie import st_lottie

st.set_page_config(page_title="QA-project", page_icon="πŸ“‡")
os.environ['TOKENIZERS_PARALLELISM'] = "false"
DATA_DIR = './dataset'
DOCS_PATH = os.path.join(DATA_DIR, 'all_docs_36838.pkl')
LOTTIE_PATH = './img/108423-search-for-documents.json'
PROG_TITLE = "QA project Demo"
# Adjust to a question that you would like users to see in the search bar when they load the UI:
DEFAULT_QUESTION_AT_STARTUP = os.getenv("DEFAULT_QUESTION_AT_STARTUP", "What's the capital of France?")
DEFAULT_ANSWER_AT_STARTUP = os.getenv("DEFAULT_ANSWER_AT_STARTUP", "Paris")

def place_header_center(text, lottie_data):
    cgap1, ctitle, cgap2 = st.columns([3, 3, 1])
    with cgap1:
        st_lottie(lottie_data, height=150)
    with ctitle:
        st.title(text)
    with cgap2:
        st.write("")


@st.experimental_memo
def get_lottie(path):
    with open(path, 'r', errors='ignore') as f:
        lottie_data = json.load(f)
        return lottie_data


def load_and_write_data(document_store):

    with open(DOCS_PATH, "rb") as f:
        docs = dill.load(f)

    document_store.write_documents(docs)


# Haystack Components
# @st.cache(allow_output_mutation=True)
# def start_haystack():
document_store = InMemoryDocumentStore() # use_bm25=True
load_and_write_data(document_store)
retriever = BM25Retriever(document_store=document_store)
reader = FARMReader(model_name_or_path="mrm8488/RuPERTa-base-finetuned-squadv1",
                    use_gpu=False,
                    num_processes=1)
pipeline = ExtractiveQAPipeline(reader, retriever)


def set_state_if_absent(key, value):
    if key not in st.session_state:
        st.session_state[key] = value

set_state_if_absent("question", DEFAULT_QUESTION_AT_STARTUP)
set_state_if_absent("answer", DEFAULT_ANSWER_AT_STARTUP)
set_state_if_absent("results", None)


def reset_results(*args):
    st.session_state.results = None

# Streamlit App
lottie_data = get_lottie(LOTTIE_PATH)
place_header_center(PROG_TITLE, lottie_data)

st.markdown("""
This QA demo uses a [Haystack Extractive QA Pipeline](https://haystack.deepset.ai/components/ready-made-pipelines#extractiveqapipeline) with 
an [InMemoryDocumentStore](https://haystack.deepset.ai/components/document-store) which contains documents about different program modules
Go ahead and ask questions about the program modules functionality!
""", unsafe_allow_html=True)

question = st.text_input("", value=st.session_state.question, max_chars=100, on_change=reset_results)


def ask_question(question):
    prediction = pipeline.run(query=question, params={"Retriever": {"top_k": 10}, "Reader": {"top_k": 5}})
    results = []
    for answer in prediction["answers"]:
        answer = answer.to_dict()
        if answer["answer"]:
            results.append(
                {
                    "context": "..." + answer["context"] + "...",
                    "answer": answer["answer"],
                    "relevance": round(answer["score"] * 100, 2),
                    "offset_start_in_doc": answer["offsets_in_document"][0]["start"],
                }
            )
        else:
            results.append(
                {
                    "context": None,
                    "answer": None,
                    "relevance": round(answer["score"] * 100, 2),
                }
            )
    return results


if question:
    with st.spinner("πŸ•°οΈ    Performing semantic search on program modules..."):
        try:
            msg = 'Asked ' + question
            logging.info(msg)
            st.session_state.results = ask_question(question)
        except Exception as e:
            logging.exception(e)


if st.session_state.results:
    st.write('## Top Results')
    for count, result in enumerate(st.session_state.results):
        if result["answer"]:
            answer, context = result["answer"], result["context"]
            start_idx = context.find(answer)
            end_idx = start_idx + len(answer)
            st.write(
                markdown(context[:start_idx] + str(annotation(body=answer, label="ANSWER", background="#ff700f", color='#ffffff')) + context[end_idx:]),
                unsafe_allow_html=True,
            )
            st.markdown(f"**Relevance:** {result['relevance']}")
        else:
            st.info(
                "πŸ€”    Haystack is unsure whether any of the documents contain an answer to your question. Try to reformulate it!"
            )