import json import os import traceback from typing import List, Tuple import gradio as gr import requests from huggingface_hub import HfApi hf_api = HfApi() roots_datasets = { dset.id.split("/")[-1]: dset for dset in hf_api.list_datasets( author="bigscience-data", use_auth_token=os.environ.get("bigscience_data_token") ) } def get_docid_html(docid): data_org, dataset, docid = docid.split("/") metadata = roots_datasets[dataset] if metadata.private: docid_html = ( f"🔒{dataset}/{docid}' ) else: docid_html = ( f"{dataset}/{docid}' ) return docid_html PII_TAGS = {"KEY", "EMAIL", "USER", "IP_ADDRESS", "ID", "IPv4", "IPv6"} PII_PREFIX = "PI:" def process_pii(text): for tag in PII_TAGS: text = text.replace( PII_PREFIX + tag, """REDACTED {}""".format( tag ), ) return text def flag(query, language, num_results, issue_description): try: post_data = { "query": query, "k": num_results, "flag": True, "description": issue_description, } if language != "detect_language": post_data["lang"] = language output = requests.post( os.environ.get("address"), headers={"Content-type": "application/json"}, data=json.dumps(post_data), timeout=120, ) results = json.loads(output.text) except: print("Error flagging") return "" def format_result(result, highlight_terms, exact_search): text, url, docid = result if exact_search: query_start = text.find(highlight_terms) query_end = query_start + len(highlight_terms) tokens_html = text[0:query_start] tokens_html += "{}".format(text[query_start:query_end]) tokens_html += text[query_end:] else: tokens = text.split() tokens_html = [] for token in tokens: if token in highlight_terms: tokens_html.append("{}".format(token)) else: tokens_html.append(token) tokens_html = " ".join(tokens_html) tokens_html = process_pii(tokens_html) meta_html = ( """

{}

""".format( url, url ) if url is not None else "" ) docid_html = get_docid_html(docid) language = "FIXME" return """{}

Document ID: {}

Language: {}

{}


""".format( meta_html, docid_html, language, tokens_html ) def format_result_page( language, results, highlight_terms, num_results, exact_search ) -> gr.HTML: header_html = "" # FIX lang detection by normalizing format on the backend if language == "detect_language" and not exact_search: header_html = f"""

Detected language: FIX MEEEE !!!




""" results_html = "" for lang, results_for_lang in results.items(): if len(results_for_lang) == 0: if exact_search: results_html += f"""

No results found.


""" else: results_html += f"""

No results for language: {lang}


""" continue results_for_lang_html = "" for result in results_for_lang: results_for_lang_html += format_result( result, highlight_terms, exact_search ) if language == "all" and not exact_search: results_for_lang_html = f"""
Results for language: {lang}
{results_for_lang_html}
""" results_html += results_for_lang_html return header_html + results_html def extract_results_from_payload(query, language, payload, exact_search): results = payload["results"] processed_results = dict() highlight_terms = None num_results = None if exact_search: highlight_terms = query num_results = payload["num_results"] results = {language: results} else: highlight_terms = payload["highlight_terms"] # unify format - might be best fixed on server side if language != "all": results = {language: results} for lang, results_for_lang in results.items(): processed_results[lang] = list() for result in results_for_lang: text = result["text"] url = ( result["meta"]["url"] if "meta" in result and result["meta"] is not None and "url" in result["meta"] else None ) docid = result["docid"] processed_results[lang].append((text, url, docid)) return processed_results, highlight_terms, num_results def process_error(error_type): if error_type == "unsupported_lang": detected_lang = payload["err"]["meta"]["detected_lang"] return f"""

Detected language {detected_lang} is not supported.
Please choose a language from the dropdown or type another query.




""" def extract_error_from_payload(payload): if "err" in payload: return payload["err"]["type"] return None def request_payload( query, language, exact_search, num_results=10 ) -> List[Tuple[str, str]]: post_data = {"query": query, "k": num_results} if language != "detect_language": post_data["lang"] = language address = "http://34.105.160.81:8080" if exact_search else os.environ.get("address") output = requests.post( address, headers={"Content-type": "application/json"}, data=json.dumps(post_data), timeout=60, ) payload = json.loads(output.text) return payload description = """#

🌸 🔎 ROOTS search tool 🔍 🌸

The ROOTS corpus was developed during the [BigScience workshop](https://bigscience.huggingface.co/) for the purpose of training the Multilingual Large Language Model [BLOOM](https://huggingface.co/bigscience/bloom). This tool allows you to search through the ROOTS corpus. We serve a BM25 index for each language or group of languages included in ROOTS. You can read more about the details of the tool design [here](https://huggingface.co/spaces/bigscience-data/scisearch/blob/main/roots_search_tool_specs.pdf). For more information and instructions on how to access the full corpus check [this form](https://forms.gle/qyYswbEL5kA23Wu99).""" if __name__ == "__main__": demo = gr.Blocks( css=".underline-on-hover:hover { text-decoration: underline; } .flagging { font-size:12px; color:Silver; }" ) with demo: with gr.Row(): gr.Markdown(value=description) with gr.Row(): query = gr.Textbox( lines=1, max_lines=1, placeholder="Put your query in double quotes for exact search.", label="Query", ) with gr.Row(): lang = gr.Dropdown( choices=[ "ar", "ca", "code", "en", "es", "eu", "fr", "id", "indic", "nigercongo", "pt", "vi", "zh", "detect_language", "all", ], value="en", label="Language", ) with gr.Row(): k = gr.Slider(1, 100, value=10, step=1, label="Max Results") with gr.Row(): """ with gr.Column(scale=1): exact_search = gr.Checkbox( value=False, label="Exact Search", variant="compact" ) """ with gr.Column(scale=4): submit_btn = gr.Button("Submit") with gr.Row(visible=False) as datasets_filter: available_datasets = gr.Dropdown( type="value", choices=[], value=[], label="Datasets", multiselect=True, ) with gr.Row(): results = gr.HTML(label="Results") with gr.Column(visible=False) as flagging_form: flag_txt = gr.Textbox( lines=1, placeholder="Type here...", label="""If you choose to flag your search, we will save the query, language and the number of results you requested. Please consider adding relevant additional context below:""", ) flag_btn = gr.Button("Flag Results") flag_btn.click(flag, inputs=[query, lang, k, flag_txt], outputs=[flag_txt]) def submit(query, lang, k, dropdown_input): print("submitting", query, lang, k) query = query.strip() exact_search = False if query.startswith('"') and query.endswith('"') and len(query) >= 2: exact_search = True query = query[1:-1] else: query = " ".join(query.split()) if query == "" or query is None: return "", "" results_html = "" payload = request_payload(query, lang, exact_search, k) err = extract_error_from_payload(payload) if err is not None: results_html = process_error(err) else: ( processed_results, highlight_terms, num_results, ) = extract_results_from_payload(query, lang, payload, exact_search) results_html = format_result_page( lang, processed_results, highlight_terms, num_results, exact_search ) datasets = [] print(datasets) return { results: results_html, flagging_form: gr.update(visible=True), datasets_filter: gr.update(visible=True), available_datasets: gr.Dropdown.update( choices=datasets, value=datasets ), } def filter_datasets(): pass query.submit( fn=submit, inputs=[query, lang, k, available_datasets], outputs=[results, flagging_form, datasets_filter, available_datasets], ) submit_btn.click( submit, inputs=[query, lang, k, available_datasets], outputs=[results, flagging_form, datasets_filter, available_datasets], ) available_datasets.change(filter_datasets, inputs=[], outputs=[]) demo.launch(enable_queue=True, debug=True)