peter2000 commited on
Commit
dd124ec
1 Parent(s): ed1a990

Create new file

Browse files
Files changed (1) hide show
  1. appStore/keyword_search.py +208 -0
appStore/keyword_search.py ADDED
@@ -0,0 +1,208 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # set path
2
+ import glob, os, sys; sys.path.append('../udfPreprocess')
3
+
4
+ #import helper
5
+ import udfPreprocess.docPreprocessing as pre
6
+ import udfPreprocess.cleaning as clean
7
+
8
+ #import needed libraries
9
+ import seaborn as sns
10
+ from pandas import DataFrame
11
+ from sentence_transformers import SentenceTransformer, CrossEncoder, util
12
+ # from keybert import KeyBERT
13
+ from transformers import pipeline
14
+ import matplotlib.pyplot as plt
15
+ import numpy as np
16
+ import streamlit as st
17
+ import pandas as pd
18
+ from rank_bm25 import BM25Okapi
19
+ from sklearn.feature_extraction import _stop_words
20
+ import string
21
+ from tqdm.autonotebook import tqdm
22
+ import numpy as np
23
+
24
+ import tempfile
25
+ import sqlite3
26
+
27
+ def app():
28
+
29
+ with st.container():
30
+ st.markdown("<h1 style='text-align: center; color: black;'> Keyword Search</h1>", unsafe_allow_html=True)
31
+ st.write(' ')
32
+ st.write(' ')
33
+
34
+ with st.expander("ℹ️ - About this app", expanded=True):
35
+
36
+ st.write(
37
+ """
38
+ The *Keyword Search* app is an easy-to-use interface built in Streamlit for doing keyword search in policy document - developed by GIZ Data and the Sustainable Development Solution Network.
39
+ """
40
+ )
41
+
42
+ st.markdown("")
43
+
44
+ st.markdown("")
45
+ st.markdown("## 📌 Step One: Upload document ")
46
+
47
+ with st.container():
48
+
49
+ file = st.file_uploader('Upload PDF File', type=['pdf', 'docx', 'txt'])
50
+
51
+ if file is not None:
52
+
53
+
54
+ with tempfile.NamedTemporaryFile(mode="wb") as temp:
55
+ bytes_data = file.getvalue()
56
+ temp.write(bytes_data)
57
+
58
+ st.write("Filename: ", file.name)
59
+
60
+ # load document
61
+ docs = pre.load_document(temp.name, file)
62
+
63
+ # preprocess document
64
+ haystackDoc, dataframeDoc, textData, paraList = clean.preprocessing(docs)
65
+
66
+ # testing
67
+ # st.write(len(all_text))
68
+ # for i in par_list:
69
+ # st.write(i)
70
+
71
+ keyword = st.text_input("Please enter here what you want to search, we will look for similar context in the document.",
72
+ value="floods",)
73
+
74
+ @st.cache(allow_output_mutation=True)
75
+ def load_sentenceTransformer(name):
76
+ return SentenceTransformer(name)
77
+
78
+ bi_encoder = load_sentenceTransformer('msmarco-distilbert-cos-v5') # multi-qa-MiniLM-L6-cos-v1
79
+ bi_encoder.max_seq_length = 64 #Truncate long passages to 256 tokens
80
+ top_k = 32
81
+
82
+ #@st.cache(allow_output_mutation=True)
83
+ #def load_crossEncoder(name):
84
+ # return CrossEncoder(name)
85
+
86
+ # cross_encoder = load_crossEncoder('cross-encoder/ms-marco-MiniLM-L-6-v2')
87
+ document_embeddings = bi_encoder.encode(paraList, convert_to_tensor=True, show_progress_bar=False)
88
+
89
+ def bm25_tokenizer(text):
90
+ tokenized_doc = []
91
+ for token in text.lower().split():
92
+ token = token.strip(string.punctuation)
93
+
94
+ if len(token) > 0 and token not in _stop_words.ENGLISH_STOP_WORDS:
95
+ tokenized_doc.append(token)
96
+ return tokenized_doc
97
+
98
+ def bm25TokenizeDoc(paraList):
99
+ tokenized_corpus = []
100
+ for passage in tqdm(paraList):
101
+ if len(passage.split()) >256:
102
+ temp = " ".join(passage.split()[:256])
103
+ tokenized_corpus.append(bm25_tokenizer(temp))
104
+ temp = " ".join(passage.split()[256:])
105
+ tokenized_corpus.append(bm25_tokenizer(temp))
106
+ else:
107
+ tokenized_corpus.append(bm25_tokenizer(passage))
108
+
109
+ return tokenized_corpus
110
+
111
+ tokenized_corpus = bm25TokenizeDoc(paraList)
112
+
113
+
114
+ document_bm25 = BM25Okapi(tokenized_corpus)
115
+
116
+
117
+ def search(keyword):
118
+ ##### BM25 search (lexical search) #####
119
+ bm25_scores = document_bm25.get_scores(bm25_tokenizer(keyword))
120
+ top_n = np.argpartition(bm25_scores, -10)[-10:]
121
+ bm25_hits = [{'corpus_id': idx, 'score': bm25_scores[idx]} for idx in top_n]
122
+ bm25_hits = sorted(bm25_hits, key=lambda x: x['score'], reverse=True)
123
+
124
+ ##### Sematic Search #####
125
+ # Encode the query using the bi-encoder and find potentially relevant passages
126
+ #query = "Does document contain {} issues ?".format(keyword)
127
+ question_embedding = bi_encoder.encode(keyword, convert_to_tensor=True)
128
+
129
+ hits = util.semantic_search(question_embedding, document_embeddings, top_k=top_k)
130
+ hits = hits[0] # Get the hits for the first query
131
+
132
+
133
+ ##### Re-Ranking #####
134
+ # Now, score all retrieved passages with the cross_encoder
135
+ #cross_inp = [[query, paraList[hit['corpus_id']]] for hit in hits]
136
+ #cross_scores = cross_encoder.predict(cross_inp)
137
+
138
+ # Sort results by the cross-encoder scores
139
+ #for idx in range(len(cross_scores)):
140
+ # hits[idx]['cross-score'] = cross_scores[idx]
141
+
142
+
143
+ return bm25_hits, hits
144
+
145
+
146
+ if st.button("Find them."):
147
+ bm25_hits, hits = search(keyword)
148
+
149
+ st.markdown("""
150
+ We will provide with 2 kind of results. The 'lexical search' and the semantic search.
151
+ """)
152
+ # In the semantic search part we provide two kind of results one with only Retriever (Bi-Encoder) and other the ReRanker (Cross Encoder)
153
+ st.markdown("Top few lexical search (BM25) hits")
154
+ for hit in bm25_hits[0:5]:
155
+ if hit['score'] > 0.00:
156
+ st.write("\t Score: {:.3f}: \t{}".format(hit['score'], paraList[hit['corpus_id']].replace("\n", " ")))
157
+
158
+
159
+
160
+
161
+
162
+ # st.table(bm25_hits[0:3])
163
+
164
+ st.markdown("\n-------------------------\n")
165
+ st.markdown("Top few Bi-Encoder Retrieval hits")
166
+
167
+ hits = sorted(hits, key=lambda x: x['score'], reverse=True)
168
+ for hit in hits[0:5]:
169
+ # if hit['score'] > 0.45:
170
+ st.write("\t Score: {:.3f}: \t{}".format(hit['score'], paraList[hit['corpus_id']].replace("\n", " ")))
171
+ #st.table(hits[0:3]
172
+
173
+ #st.markdown("-------------------------")
174
+
175
+ #hits = sorted(hits, key=lambda x: x['cross-score'], reverse=True)
176
+ #st.markdown("Top few Cross-Encoder Re-ranker hits")
177
+ #for hit in hits[0:3]:
178
+ # st.write("\t Score: {:.3f}: \t{}".format(hit['cross-score'], paraList[hit['corpus_id']].replace("\n", " ")))
179
+ #st.table(hits[0:3]
180
+
181
+
182
+
183
+
184
+
185
+ #for hit in bm25_hits[0:3]:
186
+ # print("\t{:.3f}\t{}".format(hit['score'], paraList[hit['corpus_id']].replace("\n", " ")))
187
+
188
+
189
+
190
+
191
+
192
+
193
+
194
+ # Output of top-5 hits from bi-encoder
195
+ #print("\n-------------------------\n")
196
+ #print("Top-3 Bi-Encoder Retrieval hits")
197
+ #hits = sorted(hits, key=lambda x: x['score'], reverse=True)
198
+ #for hit in hits[0:3]:
199
+ # print("\t{:.3f}\t{}".format(hit['score'], paraList[hit['corpus_id']].replace("\n", " ")))
200
+
201
+ # Output of top-5 hits from re-ranker
202
+ # print("\n-------------------------\n")
203
+ #print("Top-3 Cross-Encoder Re-ranker hits")
204
+ # hits = sorted(hits, key=lambda x: x['cross-score'], reverse=True)
205
+ # for hit in hits[0:3]:
206
+ # print("\t{:.3f}\t{}".format(hit['cross-score'], paraList[hit['corpus_id']].replace("\n", " ")))
207
+
208
+