Timjo88 commited on
Commit
a4b8171
β€’
1 Parent(s): 91531e7

Create new file

Browse files
Files changed (1) hide show
  1. app.py +98 -0
app.py ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import pandas as pd
3
+
4
+ from haystack.schema import Answer
5
+ from haystack.document_stores import InMemoryDocumentStore
6
+ from haystack.pipeline import FAQPipeline
7
+ from haystack.retriever.dense import EmbeddingRetriever
8
+ import logging
9
+
10
+ #Haystack function calls - streamlit structure from Tuana GoT QA Haystack demo
11
+ @st.cache(hash_funcs={"builtins.SwigPyObject": lambda _: None},allow_output_mutation=True) # use streamlit cache
12
+
13
+ def start_haystack():
14
+ document_store = InMemoryDocumentStore(index="document", embedding_field='embedding', embedding_dim=384, similarity='cosine')
15
+ retriever = EmbeddingRetriever(document_store=document_store, embedding_model='sentence-transformers/all-MiniLM-L6-v2', use_gpu=False, top_k=1)
16
+ load_data_to_store(document_store,retriever)
17
+ pipeline = FAQPipeline(retriever=retriever)
18
+ return pipeline
19
+
20
+ def load_data_to_store(document_store, retriever):
21
+ df = pd.read_csv('monopoly_qa-v1.csv')
22
+ questions = list(df.Question)
23
+ df['embedding'] = retriever.embed_queries(texts=questions)
24
+ df = df.rename(columns={"Question":"content","Answer":"answer"})
25
+ df.drop('link to source (to prevent duplicate sources)',axis=1, inplace=True)
26
+
27
+ dicts = df.to_dict(orient="records")
28
+ document_store.write_documents(dicts)
29
+
30
+ pipeline = start_haystack()
31
+
32
+ # Streamlit App section
33
+
34
+ def set_state_if_absent(key, value):
35
+ if key not in st.session_state:
36
+ st.session_state[key] = value
37
+
38
+ set_state_if_absent("question", "how much money should each player have at the beginning?")
39
+ set_state_if_absent("results", None)
40
+
41
+
42
+ st.markdown( """
43
+ Haystack FAQ Semantic Search Pipeline
44
+ """, unsafe_allow_html=True)
45
+
46
+ question = st.text_input("", value=st.session_state.question, max_chars=100, on_change=reset_results)
47
+
48
+ def ask_question(question):
49
+ prediction = pipeline.run(query=question, params={"Retriever": {"top_k": 10}, "Reader": {"top_k": 5}})
50
+ results = []
51
+ for answer in prediction["answers"]:
52
+ answer = answer.to_dict()
53
+ if answer["answer"]:
54
+ results.append(
55
+ {
56
+ "context": "..." + answer["context"] + "...",
57
+ "answer": answer["answer"],
58
+ "relevance": round(answer["score"] * 100, 2),
59
+ "offset_start_in_doc": answer["offsets_in_document"][0]["start"],
60
+ }
61
+ )
62
+ else:
63
+ results.append(
64
+ {
65
+ "context": None,
66
+ "answer": None,
67
+ "relevance": round(answer["score"] * 100, 2),
68
+ }
69
+ )
70
+ return results
71
+
72
+ if question:
73
+ with st.spinner("πŸ‘‘    Performing semantic search on royal scripts..."):
74
+ try:
75
+ msg = 'Asked ' + question
76
+ logging.info(msg)
77
+ st.session_state.results = ask_question(question)
78
+ except Exception as e:
79
+ logging.exception(e)
80
+
81
+
82
+
83
+ if st.session_state.results:
84
+ st.write('## Top Results')
85
+ for count, result in enumerate(st.session_state.results):
86
+ if result["answer"]:
87
+ answer, context = result["answer"], result["context"]
88
+ start_idx = context.find(answer)
89
+ end_idx = start_idx + len(answer)
90
+ st.write(
91
+ markdown(context[:start_idx] + str(annotation(body=answer, label="ANSWER", background="#964448", color='#ffffff')) + context[end_idx:]),
92
+ unsafe_allow_html=True,
93
+ )
94
+ st.markdown(f"**Relevance:** {result['relevance']}")
95
+ else:
96
+ st.info(
97
+ "πŸ€”    Haystack is unsure whether any of the documents contain an answer to your question. Try to reformulate it!"
98
+ )