Sean-Case commited on
Commit
4ce2224
β€’
1 Parent(s): 8c115b3

Upgraded to Gradio 4.16.0. Added Spacy fuzzy search functionality.

Browse files
README.md CHANGED
@@ -4,7 +4,7 @@ emoji: πŸ”
4
  colorFrom: purple
5
  colorTo: green
6
  sdk: gradio
7
- sdk_version: 3.50.2
8
  app_file: app.py
9
  pinned: false
10
  license: apache-2.0
 
4
  colorFrom: purple
5
  colorTo: green
6
  sdk: gradio
7
+ sdk_version: 4.16.0
8
  app_file: app.py
9
  pinned: false
10
  license: apache-2.0
app.py CHANGED
@@ -3,6 +3,8 @@ from search_funcs.bm25_functions import prepare_bm25_input_data, prepare_bm25, b
3
  #from search_funcs.semantic_ingest_functions import parse_csv_or_excel, csv_excel_text_to_docs
4
  #from search_funcs.semantic_functions import docs_to_jina_embed_np_array, jina_simple_retrieval
5
  from search_funcs.helper_functions import dummy_function, display_info, initial_data_load, put_columns_in_join_df, get_temp_folder_path, empty_folder
 
 
6
 
7
  import gradio as gr
8
  import pandas as pd
@@ -33,6 +35,7 @@ with block:
33
 
34
  corpus_state = gr.State()
35
  keyword_data_state = gr.State(pd.DataFrame())
 
36
  join_data_state = gr.State(pd.DataFrame())
37
  semantic_data_state = gr.State(pd.DataFrame())
38
 
@@ -74,15 +77,15 @@ depends on factors such as the type of documents or queries. Information taken f
74
  load_finished_message = gr.Textbox(label="Load progress", scale = 2)
75
 
76
  with gr.Accordion(label = "Search data", open=True):
 
77
  with gr.Row():
78
- keyword_query = gr.Textbox(label="Enter your search term")
79
-
80
- keyword_search_button = gr.Button(value="Search text")
81
-
82
  with gr.Row():
83
  output_single_text = gr.Textbox(label="Top result")
84
  output_file = gr.File(label="File output")
85
 
 
86
  # with gr.Tab("Semantic search"):
87
  # gr.Markdown(
88
  # """
@@ -131,8 +134,10 @@ depends on factors such as the type of documents or queries. Information taken f
131
  in_no_search_results_button = gr.Button(value = "Search results number info", scale = 1)
132
  with gr.Row():
133
  in_search_param_button = gr.Button(value="Load search parameters (Need to click this if you changed anything above)")
134
- with gr.Accordion(label="Semantic search options", open = False):
135
- semantic_min_distance = gr.Slider(label = "Minimum distance score for search result to be included", value = 0.75, minimum=0, maximum=0.95, step=0.01)
 
 
136
  with gr.Accordion(label = "Join on additional dataframes to results", open = False):
137
  in_join_file = gr.File(label="Upload your data to join here")
138
  in_join_message = gr.Textbox(label="Join file load progress")
@@ -153,12 +158,16 @@ depends on factors such as the type of documents or queries. Information taken f
153
  in_join_file.upload(put_columns_in_join_df, inputs=[in_join_file], outputs=[in_join_column, join_data_state, in_join_message])
154
 
155
  # Load in BM25 data
156
- load_bm25_data_button.click(fn=prepare_bm25_input_data, inputs=[in_bm25_file, in_bm25_column, keyword_data_state, tokenised_state, in_clean_data, return_intermediate_files], outputs=[corpus_state, load_finished_message, keyword_data_state, output_file, output_file]).\
157
  then(fn=prepare_bm25, inputs=[corpus_state, in_bm25_file, in_bm25_column, search_index_state, in_clean_data, return_intermediate_files, in_k1, in_b, in_alpha], outputs=[load_finished_message, output_file])#.\
158
 
159
  # BM25 search functions on click or enter
160
  keyword_search_button.click(fn=bm25_search, inputs=[keyword_query, in_no_search_results, keyword_data_state, in_bm25_column, join_data_state, in_clean_data, in_join_column, search_df_join_column], outputs=[output_single_text, output_file], api_name="keyword")
161
  keyword_query.submit(fn=bm25_search, inputs=[keyword_query, in_no_search_results, keyword_data_state, in_bm25_column, join_data_state, in_clean_data, in_join_column, search_df_join_column], outputs=[output_single_text, output_file])
 
 
 
 
162
 
163
  ### SEMANTIC SEARCH ###
164
  # Load in a csv/excel file for semantic search
 
3
  #from search_funcs.semantic_ingest_functions import parse_csv_or_excel, csv_excel_text_to_docs
4
  #from search_funcs.semantic_functions import docs_to_jina_embed_np_array, jina_simple_retrieval
5
  from search_funcs.helper_functions import dummy_function, display_info, initial_data_load, put_columns_in_join_df, get_temp_folder_path, empty_folder
6
+ from search_funcs.spacy_search_funcs import spacy_fuzzy_search
7
+
8
 
9
  import gradio as gr
10
  import pandas as pd
 
35
 
36
  corpus_state = gr.State()
37
  keyword_data_state = gr.State(pd.DataFrame())
38
+ keyword_data_list_state = gr.State([])
39
  join_data_state = gr.State(pd.DataFrame())
40
  semantic_data_state = gr.State(pd.DataFrame())
41
 
 
77
  load_finished_message = gr.Textbox(label="Load progress", scale = 2)
78
 
79
  with gr.Accordion(label = "Search data", open=True):
80
+ keyword_query = gr.Textbox(label="Enter your search term")
81
  with gr.Row():
82
+ keyword_search_button = gr.Button(value="Keyword search", variant="primary")
83
+ fuzzy_search_button = gr.Button(value="Fuzzy search (much slower)", variant="secondary")
 
 
84
  with gr.Row():
85
  output_single_text = gr.Textbox(label="Top result")
86
  output_file = gr.File(label="File output")
87
 
88
+
89
  # with gr.Tab("Semantic search"):
90
  # gr.Markdown(
91
  # """
 
134
  in_no_search_results_button = gr.Button(value = "Search results number info", scale = 1)
135
  with gr.Row():
136
  in_search_param_button = gr.Button(value="Load search parameters (Need to click this if you changed anything above)")
137
+ with gr.Accordion(label="Fuzzy search options", open = False):
138
+ no_spelling_mistakes = gr.Slider(label = "Number of spelling mistakes allowed in fuzzy search", value = 1, minimum=1, maximum=4, step=1)
139
+ # with gr.Accordion(label="Semantic search options", open = False):
140
+ # semantic_min_distance = gr.Slider(label = "Minimum distance score for search result to be included", value = 0.75, minimum=0, maximum=0.95, step=0.01)
141
  with gr.Accordion(label = "Join on additional dataframes to results", open = False):
142
  in_join_file = gr.File(label="Upload your data to join here")
143
  in_join_message = gr.Textbox(label="Join file load progress")
 
158
  in_join_file.upload(put_columns_in_join_df, inputs=[in_join_file], outputs=[in_join_column, join_data_state, in_join_message])
159
 
160
  # Load in BM25 data
161
+ load_bm25_data_button.click(fn=prepare_bm25_input_data, inputs=[in_bm25_file, in_bm25_column, keyword_data_state, tokenised_state, in_clean_data, return_intermediate_files], outputs=[corpus_state, load_finished_message, keyword_data_state, output_file, output_file, keyword_data_list_state]).\
162
  then(fn=prepare_bm25, inputs=[corpus_state, in_bm25_file, in_bm25_column, search_index_state, in_clean_data, return_intermediate_files, in_k1, in_b, in_alpha], outputs=[load_finished_message, output_file])#.\
163
 
164
  # BM25 search functions on click or enter
165
  keyword_search_button.click(fn=bm25_search, inputs=[keyword_query, in_no_search_results, keyword_data_state, in_bm25_column, join_data_state, in_clean_data, in_join_column, search_df_join_column], outputs=[output_single_text, output_file], api_name="keyword")
166
  keyword_query.submit(fn=bm25_search, inputs=[keyword_query, in_no_search_results, keyword_data_state, in_bm25_column, join_data_state, in_clean_data, in_join_column, search_df_join_column], outputs=[output_single_text, output_file])
167
+
168
+ # Fuzzy search functions on click
169
+
170
+ fuzzy_search_button.click(fn=spacy_fuzzy_search, inputs=[keyword_query, keyword_data_list_state, keyword_data_state, in_bm25_column, join_data_state, search_df_join_column, in_join_column, no_spelling_mistakes], outputs=[output_single_text, output_file], api_name="fuzzy")
171
 
172
  ### SEMANTIC SEARCH ###
173
  # Load in a csv/excel file for semantic search
requirements.txt CHANGED
@@ -7,4 +7,4 @@ openpyxl==3.1.2
7
  # torch==2.1.2
8
  spacy==3.7.2
9
  en_core_web_sm @ https://github.com/explosion/spacy-models/releases/download/en_core_web_sm-3.7.1/en_core_web_sm-3.7.1.tar.gz
10
- gradio==3.50.2
 
7
  # torch==2.1.2
8
  spacy==3.7.2
9
  en_core_web_sm @ https://github.com/explosion/spacy-models/releases/download/en_core_web_sm-3.7.1/en_core_web_sm-3.7.1.tar.gz
10
+ gradio==4.16.0
search_funcs/bm25_functions.py CHANGED
@@ -236,7 +236,7 @@ def prepare_bm25_input_data(in_file, text_column, data_state, tokenised_state, c
236
 
237
  if not in_file:
238
  print("No input file found. Please load in at least one file.")
239
- return None, "No input file found. Please load in at least one file.", data_state, None, None, None
240
 
241
  progress(0, desc = "Loading in data")
242
  file_list = [string.name for string in in_file]
@@ -246,10 +246,10 @@ def prepare_bm25_input_data(in_file, text_column, data_state, tokenised_state, c
246
  data_file_names = [string for string in file_list if "tokenised" not in string.lower() and "npz" not in string.lower() and "gz" not in string.lower()]
247
 
248
  if not data_file_names:
249
- return None, "Please load in at least one csv/Excel/parquet data file.", data_state, None, None, None
250
 
251
  if not text_column:
252
- return None, "Please enter a column name to search.", data_state, None, None, None
253
 
254
  data_file_name = data_file_names[0]
255
 
@@ -329,9 +329,9 @@ def prepare_bm25_input_data(in_file, text_column, data_state, tokenised_state, c
329
 
330
  pd.DataFrame(data={"Corpus":corpus}).to_parquet(tokenised_data_file_name)
331
 
332
- return corpus, message, df, out_file_name, tokenised_data_file_name
333
 
334
- return corpus, message, df, out_file_name, None # tokenised_data_file_name
335
 
336
  def save_prepared_bm25_data(in_file_name, prepared_text_list, in_df, in_bm25_column, progress=gr.Progress(track_tqdm=True)):
337
 
@@ -506,7 +506,7 @@ def bm25_search(free_text_query, in_no_search_results, original_data, text_colum
506
  # Duplicates dropped so as not to expand out dataframe
507
  join_df = join_df.drop_duplicates(in_join_column)
508
 
509
- results_df_out = results_df_out.merge(join_df,left_on=search_df_join_column, right_on=in_join_column, how="left")#.drop(in_join_column, axis=1)
510
 
511
  # Reorder results by score
512
  results_df_out = results_df_out.sort_values('search_score_abs', ascending=False)
 
236
 
237
  if not in_file:
238
  print("No input file found. Please load in at least one file.")
239
+ return None, "No input file found. Please load in at least one file.", data_state, None, None, None, []
240
 
241
  progress(0, desc = "Loading in data")
242
  file_list = [string.name for string in in_file]
 
246
  data_file_names = [string for string in file_list if "tokenised" not in string.lower() and "npz" not in string.lower() and "gz" not in string.lower()]
247
 
248
  if not data_file_names:
249
+ return None, "Please load in at least one csv/Excel/parquet data file.", data_state, None, None, None, []
250
 
251
  if not text_column:
252
+ return None, "Please enter a column name to search.", data_state, None, None, None, []
253
 
254
  data_file_name = data_file_names[0]
255
 
 
329
 
330
  pd.DataFrame(data={"Corpus":corpus}).to_parquet(tokenised_data_file_name)
331
 
332
+ return corpus, message, df, out_file_name, tokenised_data_file_name, df_list
333
 
334
+ return corpus, message, df, out_file_name, None, df_list
335
 
336
  def save_prepared_bm25_data(in_file_name, prepared_text_list, in_df, in_bm25_column, progress=gr.Progress(track_tqdm=True)):
337
 
 
506
  # Duplicates dropped so as not to expand out dataframe
507
  join_df = join_df.drop_duplicates(in_join_column)
508
 
509
+ results_df_out = results_df_out.merge(join_df,left_on=search_df_join_column, right_on=in_join_column, how="left", suffixes=('','_y'))#.drop(in_join_column, axis=1)
510
 
511
  # Reorder results by score
512
  results_df_out = results_df_out.sort_values('search_score_abs', ascending=False)
search_funcs/helper_functions.py CHANGED
@@ -72,11 +72,11 @@ def read_file(filename):
72
  print("Loading in file")
73
 
74
  if file_type == 'csv':
75
- file = pd.read_csv(filename, low_memory=False).reset_index().drop(["index", "Unnamed: 0"], axis=1, errors="ignore")
76
  elif file_type == 'xlsx':
77
- file = pd.read_excel(filename).reset_index().drop(["index", "Unnamed: 0"], axis=1, errors="ignore")
78
  elif file_type == 'parquet':
79
- file = pd.read_parquet(filename).reset_index().drop(["index", "Unnamed: 0"], axis=1, errors="ignore")
80
  elif file_type == 'pkl.gz':
81
  with gzip.open(filename, 'rb') as file:
82
  file = pickle.load(file)
 
72
  print("Loading in file")
73
 
74
  if file_type == 'csv':
75
+ file = pd.read_csv(filename, low_memory=False).reset_index()#.drop(["index", "Unnamed: 0"], axis=1, errors="ignore")
76
  elif file_type == 'xlsx':
77
+ file = pd.read_excel(filename).reset_index()#.drop(["index", "Unnamed: 0"], axis=1, errors="ignore")
78
  elif file_type == 'parquet':
79
+ file = pd.read_parquet(filename).reset_index()#.drop(["index", "Unnamed: 0"], axis=1, errors="ignore")
80
  elif file_type == 'pkl.gz':
81
  with gzip.open(filename, 'rb') as file:
82
  file = pickle.load(file)
search_funcs/semantic_functions.py CHANGED
@@ -214,7 +214,7 @@ def process_data_from_scores_df(df_docs, in_join_file, out_passages, vec_score_c
214
 
215
  results_df_out[search_df_join_column] = results_df_out[search_df_join_column].astype(str).str.replace("\.0$","", regex=True)
216
 
217
- results_df_out = results_df_out.merge(join_df,left_on=search_df_join_column, right_on=in_join_column, how="left")#.drop(in_join_column, axis=1)
218
 
219
  return results_df_out
220
 
 
214
 
215
  results_df_out[search_df_join_column] = results_df_out[search_df_join_column].astype(str).str.replace("\.0$","", regex=True)
216
 
217
+ results_df_out = results_df_out.merge(join_df,left_on=search_df_join_column, right_on=in_join_column, how="left", suffixes=('','_y'))#.drop(in_join_column, axis=1)
218
 
219
  return results_df_out
220
 
search_funcs/spacy_search_funcs.py CHANGED
@@ -4,16 +4,19 @@ import numpy as np
4
  import gradio as gr
5
  import pandas as pd
6
  from typing import List, Type
 
7
 
8
  PandasDataFrame = Type[pd.DataFrame]
9
 
 
 
10
  nlp = spacy.load("en_core_web_sm")
11
 
12
  string_query = "knife attack run fast"
13
  df_list = ["Last week someone was grievously injured in a knife attack on Exmoor road. Running away. They ran as fast as possible. I run.","This is the 3rd knifing in the area in as many weeks; knives everywhere.", "attacks of this kind have been increasing for years. Knife attack or knife attack.", "Nothing happened here"]
14
 
15
 
16
- def spacy_fuzzy_search(string_query:str, df_list: List[str], original_data: PandasDataFrame, search_df_join_column:str, in_join_column:str, no_spelling_mistakes:int = 1, progress=gr.Progress(track_tqdm=True)):
17
  ''' Conduct fuzzy match on a list of data.'''
18
 
19
  query = nlp(string_query)
@@ -26,52 +29,17 @@ def spacy_fuzzy_search(string_query:str, df_list: List[str], original_data: Pand
26
  if len(tokenised_query) > 1:
27
  pattern_lemma = [{"LEMMA": {"IN": tokenised_query}}]
28
  pattern_fuzz = [{"TEXT": {spelling_mistakes_fuzzy_pattern: {"IN": tokenised_query}}}]
29
- elif len(tokenised_query) == 1:
30
  pattern_lemma = [{"LEMMA": tokenised_query[0]}]
31
  pattern_fuzz = [{"TEXT": {spelling_mistakes_fuzzy_pattern: tokenised_query[0]}}]
32
- else:
33
- tokenised_query = [""]
34
-
35
- # %%
36
- search_pattern = pattern_fuzz.copy()
37
- search_pattern.extend(pattern_lemma)
38
 
39
-
40
  # %%
41
  matcher = Matcher(nlp.vocab)
42
-
43
- # %% [markdown]
44
- # from spacy.tokens import Span
45
- # from spacy import displacy
46
- #
47
- # def add_event_ent(matcher, doc, i, matches):
48
- # # Get the current match and create tuple of entity label, start and end.
49
- # # Append entity to the doc's entity. (Don't overwrite doc.ents!)
50
- # match_id, start, end = matches[i]
51
- # entity = Span(doc, start, end, label="EVENT")
52
- # doc.ents += (entity,)
53
- # print(entity.text)
54
-
55
- # %% [markdown]
56
- # matched_sents = [] # Collect data of matched sentences to be visualized
57
- #
58
- # def collect_sents(matcher, doc, i, matches):
59
- # match_id, start, end = matches[i]
60
- # span = doc[start:end] # Matched span
61
- # sent = span.sent # Sentence containing matched span
62
- # # Append mock entity for match in displaCy style to matched_sents
63
- # # get the match span by ofsetting the start and end of the span with the
64
- # # start and end of the sentence in the doc
65
- # match_ents = [{
66
- # "start": span.start_char - sent.start_char,
67
- # "end": span.end_char - sent.start_char,
68
- # "label": "MATCH",
69
- # }]
70
- # matched_sents.append({"text": sent.text, "ents": match_ents})
71
-
72
  # %%
73
- matcher.add(string_query, [pattern_fuzz])#, on_match=add_event_ent)
74
- matcher.add(string_query, [pattern_lemma])#, on_match=add_event_ent)
75
 
76
  # %%
77
  batch_size = 256
@@ -100,8 +68,11 @@ def spacy_fuzzy_search(string_query:str, df_list: List[str], original_data: Pand
100
  results_df = pd.DataFrame(data={"index": list(range(len(df_list))),
101
  "search_text": df_list,
102
  "search_score_abs": match_scores})
103
- results_df['search_score_abs'] = abs(round(results_df['search_score_abs'], 2))
104
- results_df_out = results_df[['index', 'search_text', 'search_score_abs']].merge(original_data,left_on="index", right_index=True, how="left")#.drop("index", axis=1)
 
 
 
105
 
106
  # Join on additional files
107
  if not in_join_file.empty:
@@ -113,13 +84,13 @@ def spacy_fuzzy_search(string_query:str, df_list: List[str], original_data: Pand
113
  # Duplicates dropped so as not to expand out dataframe
114
  join_df = join_df.drop_duplicates(in_join_column)
115
 
116
- results_df_out = results_df_out.merge(join_df,left_on=search_df_join_column, right_on=in_join_column, how="left")#.drop(in_join_column, axis=1)
117
 
118
  # Reorder results by score
119
  results_df_out = results_df_out.sort_values('search_score_abs', ascending=False)
120
 
121
  # Out file
122
- query_str_file = ("_").join(token_query)
123
  results_df_name = "keyword_search_result_" + today_rev + "_" + query_str_file + ".xlsx"
124
 
125
  print("Saving search file output")
@@ -130,8 +101,4 @@ def spacy_fuzzy_search(string_query:str, df_list: List[str], original_data: Pand
130
 
131
  print("Returning results")
132
 
133
- return results_first_text, results_df_name
134
-
135
-
136
- match_list = spacy_fuzzy_search(string_query, df_list)
137
- print(match_list)
 
4
  import gradio as gr
5
  import pandas as pd
6
  from typing import List, Type
7
+ from datetime import datetime
8
 
9
  PandasDataFrame = Type[pd.DataFrame]
10
 
11
+ today_rev = datetime.now().strftime("%Y%m%d")
12
+
13
  nlp = spacy.load("en_core_web_sm")
14
 
15
  string_query = "knife attack run fast"
16
  df_list = ["Last week someone was grievously injured in a knife attack on Exmoor road. Running away. They ran as fast as possible. I run.","This is the 3rd knifing in the area in as many weeks; knives everywhere.", "attacks of this kind have been increasing for years. Knife attack or knife attack.", "Nothing happened here"]
17
 
18
 
19
+ def spacy_fuzzy_search(string_query:str, df_list: List[str], original_data: PandasDataFrame, text_column:str, in_join_file: PandasDataFrame, search_df_join_column:str, in_join_column:str, no_spelling_mistakes:int = 1, progress=gr.Progress(track_tqdm=True)):
20
  ''' Conduct fuzzy match on a list of data.'''
21
 
22
  query = nlp(string_query)
 
29
  if len(tokenised_query) > 1:
30
  pattern_lemma = [{"LEMMA": {"IN": tokenised_query}}]
31
  pattern_fuzz = [{"TEXT": {spelling_mistakes_fuzzy_pattern: {"IN": tokenised_query}}}]
32
+ else:
33
  pattern_lemma = [{"LEMMA": tokenised_query[0]}]
34
  pattern_fuzz = [{"TEXT": {spelling_mistakes_fuzzy_pattern: tokenised_query[0]}}]
 
 
 
 
 
 
35
 
36
+
37
  # %%
38
  matcher = Matcher(nlp.vocab)
39
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
40
  # %%
41
+ matcher.add(string_query, [pattern_fuzz])
42
+ matcher.add(string_query, [pattern_lemma])
43
 
44
  # %%
45
  batch_size = 256
 
68
  results_df = pd.DataFrame(data={"index": list(range(len(df_list))),
69
  "search_text": df_list,
70
  "search_score_abs": match_scores})
71
+ results_df['search_score_abs'] = abs(round(results_df['search_score_abs']*100, 2))
72
+ results_df_out = results_df[['index', 'search_text', 'search_score_abs']].merge(original_data,left_on="index", right_index=True, how="left")
73
+
74
+ # Keep only results with at least one match
75
+ results_df_out = results_df_out.loc[results_df["search_score_abs"] > 0, :]
76
 
77
  # Join on additional files
78
  if not in_join_file.empty:
 
84
  # Duplicates dropped so as not to expand out dataframe
85
  join_df = join_df.drop_duplicates(in_join_column)
86
 
87
+ results_df_out = results_df_out.merge(join_df,left_on=search_df_join_column, right_on=in_join_column, how="left", suffixes=('','_y'))#.drop(in_join_column, axis=1)
88
 
89
  # Reorder results by score
90
  results_df_out = results_df_out.sort_values('search_score_abs', ascending=False)
91
 
92
  # Out file
93
+ query_str_file = ("_").join(tokenised_query)
94
  results_df_name = "keyword_search_result_" + today_rev + "_" + query_str_file + ".xlsx"
95
 
96
  print("Saving search file output")
 
101
 
102
  print("Returning results")
103
 
104
+ return results_first_text, results_df_name