Spaces:
GIZ
/
Running on CPU Upgrade

prashant commited on
Commit
949b596
1 Parent(s): dd2ab07
appStore/keyword_search.py CHANGED
@@ -107,13 +107,12 @@ def app():
107
  split_by=lexical_split_by,
108
  split_length=lexical_split_length,
109
  split_overlap=lexical_split_overlap,
110
- removePunc=lexical_remove_punc)
111
  logging.info("performing lexical search")
112
  with st.spinner("Performing Exact matching search \
113
  (Lexical search) for you"):
114
- lexical_search(
115
- query=queryList,
116
- documents = all_documents['documents'],
117
  top_k = lexical_top_k )
118
  else:
119
  all_documents = runSemanticPreprocessingPipeline(
@@ -122,7 +121,7 @@ def app():
122
  split_by=split_by,
123
  split_length= split_length,
124
  split_overlap=split_overlap,
125
- removePunc= remove_punc,
126
  split_respect_sentence_boundary=split_respect_sentence_boundary)
127
  if len(all_documents['documents']) > 100:
128
  warning_msg = ": This might take sometime, please sit back and relax."
 
107
  split_by=lexical_split_by,
108
  split_length=lexical_split_length,
109
  split_overlap=lexical_split_overlap,
110
+ remove_punc=lexical_remove_punc)
111
  logging.info("performing lexical search")
112
  with st.spinner("Performing Exact matching search \
113
  (Lexical search) for you"):
114
+ lexical_search(query=queryList,
115
+ documents = all_documents['documents'],
 
116
  top_k = lexical_top_k )
117
  else:
118
  all_documents = runSemanticPreprocessingPipeline(
 
121
  split_by=split_by,
122
  split_length= split_length,
123
  split_overlap=split_overlap,
124
+ remove_punc= remove_punc,
125
  split_respect_sentence_boundary=split_respect_sentence_boundary)
126
  if len(all_documents['documents']) > 100:
127
  warning_msg = ": This might take sometime, please sit back and relax."
paramconfig.cfg CHANGED
@@ -16,7 +16,7 @@ READER = deepset/tinyroberta-squad2
16
  READER_TOP_K = 10
17
  THRESHOLD = 0.1
18
  SPLIT_BY = sentence
19
- SPLIT_LENGTH = 3
20
  SPLIT_OVERLAP = 1
21
  RESPECT_SENTENCE_BOUNDARY = 1
22
  REMOVE_PUNC = 0
 
16
  READER_TOP_K = 10
17
  THRESHOLD = 0.1
18
  SPLIT_BY = sentence
19
+ SPLIT_LENGTH = 4
20
  SPLIT_OVERLAP = 1
21
  RESPECT_SENTENCE_BOUNDARY = 1
22
  REMOVE_PUNC = 0
utils/lexical_search.py CHANGED
@@ -230,7 +230,7 @@ def lexical_search(query:Text,top_k:int, documents:List[Document]):
230
  if len(matches) != 0:
231
  if flag:
232
  flag = False
233
- if check_streamlit:
234
  st.markdown("##### Top few lexical search (TFIDF) hits #####")
235
  else:
236
  print("Top few lexical search (TFIDF) hits")
@@ -242,4 +242,7 @@ def lexical_search(query:Text,top_k:int, documents:List[Document]):
242
  spacyAnnotator(matches, doc)
243
 
244
  if flag:
245
- st.info("🤔 No relevant result found. Please try another keyword.")
 
 
 
 
230
  if len(matches) != 0:
231
  if flag:
232
  flag = False
233
+ if check_streamlit():
234
  st.markdown("##### Top few lexical search (TFIDF) hits #####")
235
  else:
236
  print("Top few lexical search (TFIDF) hits")
 
242
  spacyAnnotator(matches, doc)
243
 
244
  if flag:
245
+ if check_streamlit():
246
+ st.info("🤔 No relevant result found. Please try another keyword.")
247
+ else:
248
+ print("No relevant result found. Please try another keyword.")
utils/sdg_classifier.py CHANGED
@@ -94,7 +94,7 @@ def sdg_classification(haystack_doc:List[Document],
94
  """
95
  logging.info("Working on SDG Classification")
96
  if not classifier_model:
97
- if check_streamlit:
98
  classifier_model = st.session_state['sdg_classifier']
99
  else:
100
  logging.warning("No streamlit envinornment found, Pass the classifier")
 
94
  """
95
  logging.info("Working on SDG Classification")
96
  if not classifier_model:
97
+ if check_streamlit():
98
  classifier_model = st.session_state['sdg_classifier']
99
  else:
100
  logging.warning("No streamlit envinornment found, Pass the classifier")