Sean-Case
Fixed data input for semantic search. Allowed for docs to be loaded in directly for semantic search. 0.2.1
3df8e40
from typing import Type | |
from search_funcs.bm25_functions import prepare_bm25_input_data, prepare_bm25, bm25_search | |
from search_funcs.semantic_ingest_functions import parse_csv_or_excel, csv_excel_text_to_docs | |
from search_funcs.semantic_functions import docs_to_jina_embed_np_array, jina_simple_retrieval | |
from search_funcs.helper_functions import dummy_function, display_info, put_columns_in_df, put_columns_in_join_df, get_temp_folder_path, empty_folder | |
import gradio as gr | |
import pandas as pd | |
PandasDataFrame = Type[pd.DataFrame] | |
# Attempt to delete temporary files generated by previous use of the app (as the files can be very big!) | |
temp_folder_path = get_temp_folder_path() | |
empty_folder(temp_folder_path) | |
## Gradio app - BM25 search | |
block = gr.Blocks(theme = gr.themes.Base()) | |
with block: | |
ingest_text = gr.State() | |
ingest_metadata = gr.State() | |
ingest_docs = gr.State() | |
vectorstore_state = gr.State() # globals()["vectorstore"] | |
embeddings_state = gr.State() # globals()["embeddings"] | |
k_val = gr.State(9999) | |
out_passages = gr.State(9999) | |
vec_weight = gr.State(1) | |
#docs_keep_as_doc_state = gr.State() | |
#doc_df_state = gr.State() | |
#docs_keep_out_state = gr.State() | |
corpus_state = gr.State() | |
keyword_data_state = gr.State(pd.DataFrame()) | |
semantic_data_state = gr.State(pd.DataFrame()) | |
in_k1_info = gr.State("""k1: Constant used for influencing the term frequency saturation. After saturation is reached, additional | |
presence for the term adds a significantly less additional score. According to [1]_, experiments suggest | |
that 1.2 < k1 < 2 yields reasonably good results, although the optimal value depends on factors such as | |
the type of documents or queries. Information taken from https://github.com/Inspirateur/Fast-BM25""") | |
in_b_info = gr.State("""b: Constant used for influencing the effects of different document lengths relative to average document length. | |
When b is bigger, lengthier documents (compared to average) have more impact on its effect. According to | |
[1]_, experiments suggest that 0.5 < b < 0.8 yields reasonably good results, although the optimal value | |
depends on factors such as the type of documents or queries. Information taken from https://github.com/Inspirateur/Fast-BM25""") | |
in_alpha_info = gr.State("""alpha: IDF cutoff, terms with a lower idf score than alpha will be dropped. A higher alpha will lower the accuracy of BM25 but increase performance. Information taken from https://github.com/Inspirateur/Fast-BM25""") | |
in_no_search_info = gr.State("""Search results number: Maximum number of search results that will be returned. Bear in mind that if the alpha value is greater than the minimum, common words will be removed from the dataset, and so the number of search results returned may be lower than this value.""") | |
in_clean_info = gr.State("""Clean text: Clean the input text and search query. The function will try to remove email components and tags, and then will 'stem' the words. I.e. it will remove the endings of words (e.g. smashed becomes smash) so that the search engine is looking for the common 'core' of words between the query and dataset.""") | |
gr.Markdown( | |
""" | |
# Data text search | |
Search through long-form text fields in your tabular data. Either for exact, specific terms (Keyword search), or thematic, 'fuzzy' search (Semantic search). More instructions are provided in the relevant tabs below. | |
""") | |
with gr.Tab(label="Keyword search"): | |
gr.Markdown( | |
""" | |
**Exact term keyword search** | |
1. Load in data file (ideally a file with '_cleaned' at the end of the name), with (optionally) the '...search_index.pkl.gz' in the same folder to save loading time. 2. Select the field in your data to search. A field with the suffix '_cleaned' means that html tags have been removed. 3. Wait for the data file to be prepared for search. 4. Enter the search term in the relevant box below and press Enter/click on 'Search text'. 4. Your search results will be saved in a csv file and will be presented in the 'File output' area below. | |
""") | |
with gr.Row(): | |
current_source = gr.Textbox(label="Current data source(s)", value="None") | |
with gr.Accordion(label = "Load in data", open=True): | |
in_bm25_file = gr.File(label="Upload data for keyword search", file_count= 'multiple', file_types =['.parquet', '.csv', '.pkl', '.pkl.gz']) | |
with gr.Row(): | |
in_bm25_column = gr.Dropdown(label="Enter the name of the text column in the data file to search") | |
load_bm25_data_button = gr.Button(value="Load data") | |
with gr.Row(): | |
load_finished_message = gr.Textbox(label="Load progress", scale = 2) | |
with gr.Accordion(label = "Search data", open=True): | |
with gr.Row(): | |
keyword_query = gr.Textbox(label="Enter your search term") | |
#mod_query = gr.Textbox(label="Cleaned search term (the terms that are passed to the search engine)") | |
keyword_search_button = gr.Button(value="Search text") | |
with gr.Row(): | |
output_single_text = gr.Textbox(label="Top result") | |
output_file = gr.File(label="File output") | |
with gr.Tab("Semantic search"): | |
gr.Markdown( | |
""" | |
**Thematic/semantic search** | |
This search type enables you to search for broader themes (e.g. happiness, nature) and the search will pick out text passages that relate to these themes even if they don't contain the exact words. 1. Load in data file (ideally a file with '_cleaned' at the end of the name, a pkl.gz file), with (optionally) the 'embeddings... .npz' file in the same folder to save loading time. 2. Select the field in your data to search. If you loaded in a documents pkl.gz file, this will be 'page_contents'. 3. Wait for the data file to be prepared for search. 4. Enter the search term in the 'Enter semantic search query here' box below and press Enter/click on 'Start semantic search'. 4. Your search results will be saved in a csv file and will be presented in the 'File output' area below. | |
""") | |
with gr.Row(): | |
current_source_semantic = gr.Textbox(label="Current data source(s)", value="None") | |
with gr.Accordion("Load in data", open = True): | |
in_semantic_file = gr.File(label="Upload data file for semantic search", file_count= 'multiple', file_types = ['.parquet', '.csv', '.npy', '.npz', '.pkl', '.pkl.gz']) | |
with gr.Row(): | |
in_semantic_column = gr.Dropdown(label="Enter the name of the text column in the data file to search") | |
load_semantic_data_button = gr.Button(value="Load data", variant="secondary") | |
semantic_load_progress = gr.Textbox(label="Load progress") | |
semantic_query = gr.Textbox(label="Enter semantic search query here") | |
semantic_submit = gr.Button(value="Start semantic search", variant="secondary", scale = 1) | |
with gr.Row(): | |
semantic_output_single_text = gr.Textbox(label="Top result") | |
semantic_output_file = gr.File(label="File output") | |
with gr.Tab(label="Advanced options"): | |
with gr.Accordion(label="Data load / save options", open = True): | |
with gr.Row(): | |
in_clean_data = gr.Dropdown(label = "Clean text during load (remove html tags). For large files this may take some time!", value="No", choices=["Yes", "No"]) | |
return_intermediate_files = gr.Dropdown(label = "Return intermediate processing files from file preparation. Files can be loaded in to save processing time in future.", value="No", choices=["Yes", "No"]) | |
embedding_super_compress = gr.Dropdown(label = "Round embeddings to three dp for smaller files with less accuracy.", value="No", choices=["Yes", "No"]) | |
#save_clean_data_button = gr.Button(value = "Save loaded data to file", scale = 1) | |
with gr.Accordion(label="Keyword search options", open = False): | |
with gr.Row(): | |
in_k1 = gr.Slider(label = "k1 value", value = 1.5, minimum = 0.1, maximum = 5, step = 0.1, scale = 3) | |
in_k1_button = gr.Button(value = "k1 value info", scale = 1) | |
with gr.Row(): | |
in_b = gr.Slider(label = "b value", value = 0.75, minimum = 0.1, maximum = 5, step = 0.05, scale = 3) | |
in_b_button = gr.Button(value = "b value info", scale = 1) | |
with gr.Row(): | |
in_alpha = gr.Slider(label = "alpha value / IDF cutoff", value = -5, minimum = -5, maximum = 10, step = 1, scale = 3) | |
in_alpha_button = gr.Button(value = "alpha value info", scale = 1) | |
with gr.Row(): | |
in_no_search_results = gr.Slider(label="Maximum number of search results to return", value = 100000, minimum=10, maximum=100000, step=10, scale = 3) | |
in_no_search_results_button = gr.Button(value = "Search results number info", scale = 1) | |
with gr.Row(): | |
in_search_param_button = gr.Button(value="Load search parameters (Need to click this if you changed anything above)") | |
with gr.Accordion(label="Semantic search options", open = False): | |
semantic_min_distance = gr.Slider(label = "Minimum distance score for search result to be included", value = 0.7, minimum=0, maximum=0.95, step=0.01) | |
with gr.Accordion(label = "Join on additional dataframes to results", open = False): | |
in_join_file = gr.File(label="Upload your data to join here") | |
in_join_column = gr.Dropdown(label="Column to join in new data frame") | |
search_df_join_column = gr.Dropdown(label="Column to join in search data frame") | |
in_search_param_button.click(fn=prepare_bm25, inputs=[corpus_state, in_k1, in_b, in_alpha], outputs=[load_finished_message]) | |
# --- | |
in_k1_button.click(display_info, inputs=in_k1_info) | |
in_b_button.click(display_info, inputs=in_b_info) | |
in_alpha_button.click(display_info, inputs=in_alpha_info) | |
in_no_search_results_button.click(display_info, inputs=in_no_search_info) | |
### BM25 SEARCH ### | |
# Update dropdowns upon initial file load | |
in_bm25_file.upload(put_columns_in_df, inputs=[in_bm25_file, in_bm25_column], outputs=[in_bm25_column, in_clean_data, search_df_join_column, keyword_data_state]) | |
in_join_file.upload(put_columns_in_join_df, inputs=[in_join_file, in_join_column], outputs=[in_join_column]) | |
# Load in BM25 data | |
load_bm25_data_button.click(fn=prepare_bm25_input_data, inputs=[in_bm25_file, in_bm25_column, keyword_data_state, in_clean_data, return_intermediate_files], outputs=[corpus_state, load_finished_message, keyword_data_state, output_file, output_file, current_source]).\ | |
then(fn=prepare_bm25, inputs=[corpus_state, in_bm25_file, return_intermediate_files, in_k1, in_b, in_alpha], outputs=[load_finished_message, output_file])#.\ | |
#then(fn=put_columns_in_df, inputs=[in_bm25_file, in_bm25_column], outputs=[in_bm25_column, in_clean_data, search_df_join_column]) | |
# BM25 search functions on click or enter | |
keyword_search_button.click(fn=bm25_search, inputs=[keyword_query, in_no_search_results, keyword_data_state, in_bm25_column, in_clean_data, in_join_file, in_join_column, search_df_join_column], outputs=[output_single_text, output_file], api_name="keyword") | |
keyword_query.submit(fn=bm25_search, inputs=[keyword_query, in_no_search_results, keyword_data_state, in_bm25_column, in_clean_data, in_join_file, in_join_column, search_df_join_column], outputs=[output_single_text, output_file]) | |
### SEMANTIC SEARCH ### | |
# Load in a csv/excel file for semantic search | |
in_semantic_file.upload(put_columns_in_df, inputs=[in_semantic_file, in_semantic_column], outputs=[in_semantic_column, in_clean_data, search_df_join_column, semantic_data_state]) | |
load_semantic_data_button.click(parse_csv_or_excel, inputs=[in_semantic_file, semantic_data_state, in_semantic_column], outputs=[ingest_text, current_source_semantic, semantic_load_progress]).\ | |
then(csv_excel_text_to_docs, inputs=[ingest_text, in_semantic_file, in_semantic_column, in_clean_data, return_intermediate_files], outputs=[ingest_docs, semantic_load_progress]).\ | |
then(docs_to_jina_embed_np_array, inputs=[ingest_docs, in_semantic_file, return_intermediate_files, embedding_super_compress], outputs=[semantic_load_progress, vectorstore_state, semantic_output_file]) | |
# Semantic search query | |
semantic_submit.click(jina_simple_retrieval, inputs=[semantic_query, vectorstore_state, ingest_docs, in_semantic_column, k_val, out_passages, semantic_min_distance, vec_weight, in_join_file, in_join_column, search_df_join_column], outputs=[semantic_output_single_text, semantic_output_file], api_name="semantic") | |
semantic_query.submit(jina_simple_retrieval, inputs=[semantic_query, vectorstore_state, ingest_docs, in_semantic_column, k_val, out_passages, semantic_min_distance, vec_weight, in_join_file, in_join_column, search_df_join_column], outputs=[semantic_output_single_text, semantic_output_file]) | |
# Dummy functions just to get dropdowns to work correctly with Gradio 3.50 | |
in_bm25_column.change(dummy_function, in_bm25_column, None) | |
search_df_join_column.change(dummy_function, search_df_join_column, None) | |
in_join_column.change(dummy_function, in_join_column, None) | |
in_semantic_column.change(dummy_function, in_join_column, None) | |
block.queue().launch(debug=True) | |