peter2000 commited on
Commit
f75d001
1 Parent(s): 943de3e

Update appStore/keyword_search.py

Browse files
Files changed (1) hide show
  1. appStore/keyword_search.py +67 -136
appStore/keyword_search.py CHANGED
@@ -24,6 +24,43 @@ import numpy as np
24
  import tempfile
25
  import sqlite3
26
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27
  def app():
28
 
29
  with st.container():
@@ -58,151 +95,45 @@ def app():
58
  st.write("Filename: ", file.name)
59
 
60
  # load document
61
- docs = pre.load_document(temp.name, file)
 
62
 
63
  # preprocess document
64
- haystackDoc, dataframeDoc, textData, paraList = clean.preprocessing(docs)
65
 
66
- # testing
67
- # st.write(len(all_text))
68
- # for i in par_list:
69
- # st.write(i)
70
 
71
- keyword = st.text_input("Please enter here what you want to search, we will look for similar context in the document.",
72
  value="floods",)
73
 
74
- @st.cache(allow_output_mutation=True)
75
- def load_sentenceTransformer(name):
76
- return SentenceTransformer(name)
77
-
78
- bi_encoder = load_sentenceTransformer('msmarco-distilbert-cos-v5') # multi-qa-MiniLM-L6-cos-v1
79
- bi_encoder.max_seq_length = 64 #Truncate long passages to 256 tokens
80
- top_k = 32
81
-
82
- #@st.cache(allow_output_mutation=True)
83
- #def load_crossEncoder(name):
84
- # return CrossEncoder(name)
85
 
86
- # cross_encoder = load_crossEncoder('cross-encoder/ms-marco-MiniLM-L-6-v2')
87
- document_embeddings = bi_encoder.encode(paraList, convert_to_tensor=True, show_progress_bar=False)
88
-
89
- def bm25_tokenizer(text):
90
- tokenized_doc = []
91
- for token in text.lower().split():
92
- token = token.strip(string.punctuation)
93
-
94
- if len(token) > 0 and token not in _stop_words.ENGLISH_STOP_WORDS:
95
- tokenized_doc.append(token)
96
- return tokenized_doc
97
 
98
- def bm25TokenizeDoc(paraList):
99
- tokenized_corpus = []
100
- for passage in tqdm(paraList):
101
- if len(passage.split()) >256:
102
- temp = " ".join(passage.split()[:256])
103
- tokenized_corpus.append(bm25_tokenizer(temp))
104
- temp = " ".join(passage.split()[256:])
105
- tokenized_corpus.append(bm25_tokenizer(temp))
106
- else:
107
- tokenized_corpus.append(bm25_tokenizer(passage))
108
-
109
- return tokenized_corpus
110
 
111
- tokenized_corpus = bm25TokenizeDoc(paraList)
112
 
113
-
114
- document_bm25 = BM25Okapi(tokenized_corpus)
115
-
116
-
117
- def search(keyword):
118
- ##### BM25 search (lexical search) #####
119
- bm25_scores = document_bm25.get_scores(bm25_tokenizer(keyword))
120
- top_n = np.argpartition(bm25_scores, -10)[-10:]
121
- bm25_hits = [{'corpus_id': idx, 'score': bm25_scores[idx]} for idx in top_n]
122
- bm25_hits = sorted(bm25_hits, key=lambda x: x['score'], reverse=True)
123
-
124
- ##### Sematic Search #####
125
- # Encode the query using the bi-encoder and find potentially relevant passages
126
- #query = "Does document contain {} issues ?".format(keyword)
127
- question_embedding = bi_encoder.encode(keyword, convert_to_tensor=True)
128
-
129
- hits = util.semantic_search(question_embedding, document_embeddings, top_k=top_k)
130
- hits = hits[0] # Get the hits for the first query
131
-
132
-
133
- ##### Re-Ranking #####
134
- # Now, score all retrieved passages with the cross_encoder
135
- #cross_inp = [[query, paraList[hit['corpus_id']]] for hit in hits]
136
- #cross_scores = cross_encoder.predict(cross_inp)
137
-
138
- # Sort results by the cross-encoder scores
139
- #for idx in range(len(cross_scores)):
140
- # hits[idx]['cross-score'] = cross_scores[idx]
141
-
142
-
143
- return bm25_hits, hits
144
-
145
-
146
- if st.button("Find them."):
147
- bm25_hits, hits = search(keyword)
148
-
149
- st.markdown("""
150
- We will provide with 2 kind of results. The 'lexical search' and the semantic search.
151
- """)
152
- # In the semantic search part we provide two kind of results one with only Retriever (Bi-Encoder) and other the ReRanker (Cross Encoder)
153
- st.markdown("Top few lexical search (BM25) hits")
154
- for hit in bm25_hits[0:5]:
155
- if hit['score'] > 0.00:
156
- st.write("\t Score: {:.3f}: \t{}".format(hit['score'], paraList[hit['corpus_id']].replace("\n", " ")))
157
-
158
-
159
-
160
-
161
-
162
- # st.table(bm25_hits[0:3])
163
-
164
- st.markdown("\n-------------------------\n")
165
- st.markdown("Top few Bi-Encoder Retrieval hits")
166
-
167
- hits = sorted(hits, key=lambda x: x['score'], reverse=True)
168
- for hit in hits[0:5]:
169
- # if hit['score'] > 0.45:
170
- st.write("\t Score: {:.3f}: \t{}".format(hit['score'], paraList[hit['corpus_id']].replace("\n", " ")))
171
- #st.table(hits[0:3]
172
-
173
- #st.markdown("-------------------------")
174
-
175
- #hits = sorted(hits, key=lambda x: x['cross-score'], reverse=True)
176
- #st.markdown("Top few Cross-Encoder Re-ranker hits")
177
- #for hit in hits[0:3]:
178
- # st.write("\t Score: {:.3f}: \t{}".format(hit['cross-score'], paraList[hit['corpus_id']].replace("\n", " ")))
179
- #st.table(hits[0:3]
180
-
181
-
182
-
183
-
184
-
185
- #for hit in bm25_hits[0:3]:
186
- # print("\t{:.3f}\t{}".format(hit['score'], paraList[hit['corpus_id']].replace("\n", " ")))
187
-
188
-
189
-
190
-
191
-
192
-
193
-
194
- # Output of top-5 hits from bi-encoder
195
- #print("\n-------------------------\n")
196
- #print("Top-3 Bi-Encoder Retrieval hits")
197
- #hits = sorted(hits, key=lambda x: x['score'], reverse=True)
198
- #for hit in hits[0:3]:
199
- # print("\t{:.3f}\t{}".format(hit['score'], paraList[hit['corpus_id']].replace("\n", " ")))
200
-
201
- # Output of top-5 hits from re-ranker
202
- # print("\n-------------------------\n")
203
- #print("Top-3 Cross-Encoder Re-ranker hits")
204
- # hits = sorted(hits, key=lambda x: x['cross-score'], reverse=True)
205
- # for hit in hits[0:3]:
206
- # print("\t{:.3f}\t{}".format(hit['cross-score'], paraList[hit['corpus_id']].replace("\n", " ")))
207
 
208
 
 
24
  import tempfile
25
  import sqlite3
26
 
27
+ #Haystack Components
28
+ @st.cache(hash_funcs={"builtins.SwigPyObject": lambda _: None},allow_output_mutation=True)
29
+ def start_haystack(temp.name, file):
30
+ document_store = InMemoryDocumentStore()
31
+ documents = pre.load_document(temp.name, file)
32
+ documents_processed = pre.preprocessing(documents)
33
+ document_store.write_documents(documents_processed)
34
+ retriever = TfidfRetriever(document_store=document_store)
35
+ reader = FARMReader(model_name_or_path="deepset/roberta-base-squad2-distilled", use_gpu=True)
36
+ pipeline = ExtractiveQAPipeline(reader, retriever)
37
+ return pipeline
38
+
39
+
40
+ def ask_question(question):
41
+ prediction = pipeline.run(query=question, params={"Retriever": {"top_k": 10}, "Reader": {"top_k": 5}})
42
+ results = []
43
+ for answer in prediction["answers"]:
44
+ answer = answer.to_dict()
45
+ if answer["answer"]:
46
+ results.append(
47
+ {
48
+ "context": "..." + answer["context"] + "...",
49
+ "answer": answer["answer"],
50
+ "relevance": round(answer["score"] * 100, 2),
51
+ "offset_start_in_doc": answer["offsets_in_document"][0]["start"],
52
+ }
53
+ )
54
+ else:
55
+ results.append(
56
+ {
57
+ "context": None,
58
+ "answer": None,
59
+ "relevance": round(answer["score"] * 100, 2),
60
+ }
61
+ )
62
+ return results
63
+
64
  def app():
65
 
66
  with st.container():
 
95
  st.write("Filename: ", file.name)
96
 
97
  # load document
98
+ pipeline = start_haystack(temp.name, file)
99
+ #docs = pre.load_document(temp.name, file)
100
 
101
  # preprocess document
102
+ #haystackDoc, dataframeDoc, textData, paraList = clean.preprocessing(docs)
103
 
104
+
 
 
 
105
 
106
+ question = st.text_input("Please enter your question here, we will look for the answer in the document.",
107
  value="floods",)
108
 
 
 
 
 
 
 
 
 
 
 
 
109
 
110
+ if st.button("Find them."):
111
+ with st.spinner("👑 Performing semantic search on"):#+file.name+"..."):
112
+ try:
113
+ msg = 'Asked ' + question
114
+ logging.info(msg)
115
+ st.session_state.results = ask_question(question)
116
+ except Exception as e:
117
+ logging.exception(e)
 
 
 
118
 
 
 
 
 
 
 
 
 
 
 
 
 
119
 
 
120
 
121
+ if st.session_state.results:
122
+ st.write('## Top Results')
123
+ for count, result in enumerate(st.session_state.results):
124
+ if result["answer"]:
125
+ answer, context = result["answer"], result["context"]
126
+ start_idx = context.find(answer)
127
+ end_idx = start_idx + len(answer)
128
+ st.write(
129
+ markdown(context[:start_idx] + str(annotation(body=answer, label="ANSWER", background="#964448", color='#ffffff')) + context[end_idx:]),
130
+ unsafe_allow_html=True,
131
+ )
132
+ st.markdown(f"**Relevance:** {result['relevance']}")
133
+ else:
134
+ st.info(
135
+ "🤔    Haystack is unsure whether any of the documents contain an answer to your question. Try to reformulate it!"
136
+ )
137
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
138
 
139