import os import json import timeit import gradio as gr import pandas as pd from datetime import date from scripts.genbit import * from scripts.gender_profession_bias import * from scripts.gender_distribution import * from datasets import load_dataset as hf_load_dataset from huggingface_hub import DatasetFilter, list_datasets from avidtools.datamodels.report import Report from avidtools.datamodels.components import * from avidtools.datamodels.enums import * MAX_THRESHOLD = 1000 METHODOLOGIES = json.load(open("config/methodologies.json", "r", encoding="utf8")) DATASET = { "name": None, "source": None, "df": None, "sampling_method": None, "sampling_size": None, "column": None, "methodology": None, "result_df": None, } def generate_avid_report(): dataset_id = DATASET["name"] methodology = DATASET["methodology"] result_json = DATASET["result_df"].to_dict(orient="list") report = Report() report.affects = Affects( developer=[], deployer=["Hugging Face"] if DATASET["source"] == "HuggingFace Hub" else [], artifacts=[Artifact(type=ArtifactTypeEnum.dataset, name=dataset_id)], ) report.problemtype = Problemtype( classof=ClassEnum.na, type=TypeEnum.detection, description=LangValue( lang="eng", value="Dataset Bias Detection using BiasAware" ), ) report.metrics = [ Metric( name=methodology, detection_method=Detection(type=MethodEnum.test, name=methodology), results=result_json, ) ] report.references = ( [ Reference( label="""{dataset_id} on Hugging Face""".format(model_id=dataset_id), url="""https://huggingface.co/{dataset_id}""".format( dataset_id=dataset_id ), ) ] if DATASET["source"] == "HuggingFace Hub" else [] ) report.description = LangValue( lang="eng", value=METHODOLOGIES[methodology]["short_description"] ) report.impact = Impact( avid=AvidTaxonomy( vuln_id="", risk_domain=["Ethics"], sep_view=[SepEnum.E0101], lifecycle_view=[LifecycleEnum.L03], taxonomy_version="0.2", ) ) report.reported_date = date.today() return gr.JSON(value=report.model_dump(), visible=True) def evaluate(): if DATASET["methodology"] == "GenBiT (Microsoft Gender Bias Tool)": DATASET["sampling_size"] = min(DATASET["sampling_size"], 100) print( f"Dataset : {DATASET['name']}\n" f"Source : {DATASET['source']}\n" f"Sampling Method : {DATASET['sampling_method']}\n" f"Sampling Size : {DATASET['sampling_size']}\n" f"Column : {DATASET['column']}\n" f"Methodology : {DATASET['methodology']}\n" f"Time Taken : ", end="", ) try: start = timeit.default_timer() data = DATASET["df"].copy() data = data[[DATASET["column"]]] if DATASET["sampling_method"] == "First": data = data.head(DATASET["sampling_size"]) elif DATASET["sampling_method"] == "Last": data = data.tail(DATASET["sampling_size"]) elif DATASET["sampling_method"] == "Random": data = data.sample(n=DATASET["sampling_size"], random_state=42) result_df, result_plot = globals()[ METHODOLOGIES.get(DATASET["methodology"]).get("fx") ](data) DATASET["result_df"] = result_df stop = timeit.default_timer() print(f"{stop - start:.2f} seconds") return ( gr.Plot(result_plot, visible=True), gr.Dataframe(result_df, visible=True), gr.Button(visible=True, interactive=True), gr.JSON(visible=True), ) except Exception as e: print(e) return ( gr.Plot(visible=False), gr.Dataframe(visible=False), gr.Button(visible=False), gr.JSON(visible=False), ) def load_dataset(local_dataset, hf_dataset): DATASET["name"] = ( os.path.splitext(os.path.basename(local_dataset.name))[0] if local_dataset else hf_dataset ) DATASET["source"] = "Local Dataset" if local_dataset else "HuggingFace Hub" DATASET["df"] = ( pd.read_csv(local_dataset.name) if local_dataset else hf_load_dataset(hf_dataset, split="train[0:100]").to_pandas() ) columns = DATASET["df"].select_dtypes(include=["object"]).columns.tolist() column_corpus = DATASET["df"][columns[0]].tolist()[:5] dataset_sampling_method = gr.Radio( label="Scope", info="Determines the scope of the dataset to be analyzed", choices=["First", "Last", "Random"], value="First", visible=True, interactive=True, ) dataset_sampling_size = gr.Slider( label=f"Number of Entries", info=f"Determines the number of entries to be analyzed. Due to computational constraints, the maximum number of entries that can be analyzed is {MAX_THRESHOLD}.", minimum=1, maximum=min(DATASET["df"].shape[0], MAX_THRESHOLD), value=min(DATASET["df"].shape[0], MAX_THRESHOLD), visible=True, interactive=True, ) dataset_column = gr.Radio( label="Column", info="Determines the column to be analyzed. These are the columns with text data.", choices=columns, value=columns[0], visible=True, interactive=True, ) dataset_column_corpus = gr.Dataframe( value=pd.DataFrame({f"{columns[0]}": column_corpus}), visible=True ) dataset_import_btn = gr.Button( value="Import Dataset", interactive=True, variant="primary", visible=True, ) return ( dataset_sampling_method, dataset_sampling_size, dataset_column, dataset_column_corpus, dataset_import_btn, ) def show_hf_dataset_search_results(hf_dataset): choices = [ dataset.id for dataset in list_datasets( filter=DatasetFilter(dataset_name=hf_dataset, language="en"), limit=10 ) ] return ( gr.Button( value=f"Load", interactive=True, variant="secondary", visible=True, ), gr.Radio( label="HuggingFace Hub Search Results", info="Select the dataset to be imported", choices=choices, value=choices[0], interactive=True, visible=True, ), ) def import_dataset(dataset_sampling_method, dataset_sampling_size, dataset_column): DATASET["sampling_method"] = dataset_sampling_method DATASET["sampling_size"] = dataset_sampling_size DATASET["column"] = dataset_column return ( gr.Markdown( f"## Results (Dataset: {'✅' if DATASET['name'] else '❎'}) (Methodology: {'✅' if DATASET['methodology'] else '❎'})" ), gr.Button( value="Evaluate", interactive=(True if DATASET["name"] and DATASET["methodology"] else False), variant="primary", visible=True, ), ) def import_methodology(methodology): DATASET["methodology"] = methodology return ( gr.Markdown( f"## Results (Dataset: {'✅' if DATASET['name'] else '❎'}) (Methodology: {'✅' if DATASET['methodology'] else '❎'})" ), gr.Markdown( METHODOLOGIES[methodology]["description"], visible=True, ), gr.Button( value="Evaluate", interactive=(True if DATASET["name"] and DATASET["methodology"] else False), variant="primary", visible=True, ), ) BiasAware = gr.Blocks(title="BiasAware: Dataset Bias Detection") with BiasAware: gr.Markdown( """ # BiasAware: Dataset Bias Detection BiasAware is a specialized tool for detecting and quantifying biases within datasets used for Natural Language Processing (NLP) tasks. NLP training datasets frequently mirror the inherent biases of their source materials, resulting in AI models that unintentionally perpetuate stereotypes, exhibit underrepresentation, and showcase skewed perspectives. """ ) with gr.Row(): with gr.Column(scale=1): dataset_title = gr.Markdown("## Dataset") dataset_import_type = gr.Radio( label="Import Type", info="Determines the mode of importing the dataset", choices=["Local Dataset", "HuggingFace Hub"], value="Local Dataset", ) local_dataset = gr.File( label="Dataset", file_types=["csv"], value=None, visible=True ) local_dataset_examples = gr.Examples( examples=[ os.path.join(os.path.dirname(__file__), "data", filename) for filename in os.listdir( os.path.join(os.path.dirname(__file__), "data") ) if filename.endswith(".csv") ], inputs=local_dataset, label="Local Examples", ) hf_dataset = gr.Textbox(visible=False) with gr.Row(): with gr.Column(scale=1): dataset_load_btn = gr.Button(visible=False) with gr.Column(scale=1): dataset_import_btn = gr.Button(visible=False) dataset_sampling_method = gr.Radio(visible=False) dataset_sampling_size = gr.Slider(visible=False) dataset_column = gr.Radio(visible=False) dataset_column_corpus = gr.Dataframe(visible=False) with gr.Column(scale=2): methodology_title = gr.Markdown("## Methodology") methodology = gr.Radio( label="Methodology", info="Determines the methodology to be used for bias detection", choices=METHODOLOGIES.keys(), ) methodology_description = gr.Markdown(visible=False) with gr.Column(scale=2): result_title = gr.Markdown("## Results (Dataset: ❎) (Methodology: ❎)") evaluation_btn = gr.Button( value="Evaluate", interactive=False, variant="primary", visible=True, ) result_plot = gr.Plot(show_label=False, container=False) result_df = gr.DataFrame(visible=False) generate_avid_report_btn = gr.Button( value="Generate AVID Report", interactive=False, variant="primary", ) avid_report = gr.JSON(label="AVID Report", visible=False) # # Event Handlers # dataset_import_type.input( fn=lambda import_type: ( gr.File(label="Dataset", file_types=["csv"], value=None, visible=True) if import_type == "Local Dataset" else gr.Textbox(visible=False), gr.Textbox( label="HuggingFace Hub", placeholder="Search for a dataset", value="amazon_multi", interactive=True, visible=True, ) if import_type == "HuggingFace Hub" else gr.File(value=None, visible=False), gr.Button(visible=False), gr.Radio(visible=False), gr.Slider(visible=False), gr.Radio(visible=False), gr.Dataframe(visible=False), gr.Button(visible=False), ), inputs=[dataset_import_type], outputs=[ local_dataset, hf_dataset, dataset_load_btn, dataset_sampling_method, dataset_sampling_size, dataset_column, dataset_column_corpus, dataset_import_btn, ], ) local_dataset.change( fn=lambda _: gr.Button( value=f"Load", interactive=True, variant="secondary", visible=True, ), inputs=[local_dataset], outputs=[dataset_load_btn], ) hf_dataset.submit( fn=show_hf_dataset_search_results, inputs=[hf_dataset], outputs=[dataset_load_btn], ) dataset_load_btn.click( fn=load_dataset, inputs=[local_dataset, hf_dataset], outputs=[ dataset_sampling_method, dataset_sampling_size, dataset_column, dataset_column_corpus, dataset_import_btn, ], ) dataset_column.input( fn=lambda column: gr.Dataframe( value=pd.DataFrame( {f"{column}": DATASET["df"][column].tolist()[:5]}, ), visible=True, ), inputs=[dataset_column], outputs=[dataset_column_corpus], ) dataset_import_btn.click( fn=import_dataset, inputs=[ dataset_sampling_method, dataset_sampling_size, dataset_column, ], outputs=[result_title, evaluation_btn], ) methodology.input( fn=import_methodology, inputs=[methodology], outputs=[result_title, methodology_description, evaluation_btn], ) evaluation_btn.click( fn=evaluate, inputs=None, outputs=[result_plot, result_df, generate_avid_report_btn, avid_report], ) generate_avid_report_btn.click( fn=generate_avid_report, inputs=None, outputs=[avid_report] ) if __name__ == "__main__": BiasAware.launch()