import json import os import timeit from datetime import date import gradio as gr import pandas as pd from avidtools.datamodels.components import * from avidtools.datamodels.enums import * from avidtools.datamodels.report import Report from datasets import load_dataset as hf_load_dataset from scripts.genbit import * from scripts.gender_distribution import * from scripts.gender_profession_bias import * SAMPLING_SIZE_THRESHOLD = 2000 METHODOLOGIES = json.load(open("config/methodologies.json", "r")) EVALUATION = { "dataset_id": None, "source": None, "df": None, "sampling_method": None, "sampling_size": None, "column": None, "methodology": None, "result_df": None, } def generate_avid_report(): dataset_id = EVALUATION["dataset_id"] methodology = EVALUATION["methodology"] result_json = EVALUATION["result_df"].to_dict(orient="list") report = Report() report.affects = Affects( developer=[], deployer=["Hugging Face"] if EVALUATION["source"] == "HuggingFace Hub" else [], artifacts=[Artifact(type=ArtifactTypeEnum.dataset, name=dataset_id)], ) report.problemtype = Problemtype( classof=ClassEnum.na, type=TypeEnum.detection, description=LangValue( lang="eng", value="Dataset Bias Detection using BiasAware" ), ) report.metrics = [ Metric( name=methodology, detection_method=Detection(type=MethodEnum.test, name=methodology), results=result_json, ) ] report.references = ( [ Reference( type="", label="""{dataset_id} on Hugging Face""".format(dataset_id=dataset_id), url="""https://huggingface.co/datasets/{dataset_id}""".format( dataset_id=dataset_id ), ) ] if EVALUATION["source"] == "HuggingFace Hub" else [] ) report.description = LangValue( lang="eng", value=METHODOLOGIES[methodology]["short_description"] ) report.impact = Impact( avid=AvidTaxonomy( vuln_id="", risk_domain=["Ethics"], sep_view=[SepEnum.E0101], lifecycle_view=[LifecycleEnum.L03], taxonomy_version="0.2", ) ) report.reported_date = date.today() return gr.JSON(value=report.model_dump(), visible=True) def evaluate(): if EVALUATION["methodology"] == "GenBiT (Microsoft Gender Bias Tool)": EVALUATION["sampling_size"] = min(EVALUATION["sampling_size"], 100) print( f"Dataset : {EVALUATION['dataset_id']}\n" f"Source : {EVALUATION['source']}\n" f"Sampling Method : {EVALUATION['sampling_method']}\n" f"Sampling Size : {EVALUATION['sampling_size']}\n" f"Column : {EVALUATION['column']}\n" f"Methodology : {EVALUATION['methodology']}\n" f"Time Taken : ", end="", ) try: start = timeit.default_timer() data = EVALUATION["df"].copy() data = data[[EVALUATION["column"]]] if EVALUATION["sampling_method"] == "First": data = data.head(EVALUATION["sampling_size"]) elif EVALUATION["sampling_method"] == "Last": data = data.tail(EVALUATION["sampling_size"]) elif EVALUATION["sampling_method"] == "Random": data = data.sample(n=EVALUATION["sampling_size"], random_state=42) result_df, result_plot = globals()[ METHODOLOGIES.get(EVALUATION["methodology"]).get("fx") ](data) EVALUATION["result_df"] = result_df stop = timeit.default_timer() print(f"{stop - start:.2f} seconds") return ( gr.Plot(result_plot, visible=True), gr.Dataframe(result_df, visible=True), gr.Button(visible=True, interactive=True), gr.JSON(visible=True), ) except Exception as e: print(e) return ( gr.Plot(visible=False), gr.Dataframe(visible=False), gr.Button(visible=False), gr.JSON(visible=False), ) def load_dataset(local_dataset, hf_dataset): try: if local_dataset: EVALUATION["dataset_id"] = os.path.splitext( os.path.basename(local_dataset.name) )[0] EVALUATION["source"] = "Local Dataset" EVALUATION["df"] = pd.read_csv(local_dataset.name) else: EVALUATION["dataset_id"] = hf_dataset EVALUATION["source"] = "HuggingFace Hub" EVALUATION["df"] = hf_load_dataset( hf_dataset, split="train[0:100]" ).to_pandas() columns = EVALUATION["df"].select_dtypes(include=["object"]).columns.tolist() column_corpus = EVALUATION["df"][columns[0]].tolist()[:5] dataset_sampling_method = gr.Radio( label="Scope", info="Determines the scope of the dataset to be analyzed", choices=["First", "Last", "Random"], value="First", visible=True, interactive=True, ) dataset_sampling_size = gr.Slider( label=f"Number of Entries", info=f"Determines the number of entries to be analyzed. Due to computational constraints, the maximum number of entries that can be analyzed is {SAMPLING_SIZE_THRESHOLD}.", minimum=1, maximum=min(EVALUATION["df"].shape[0], SAMPLING_SIZE_THRESHOLD), value=min(EVALUATION["df"].shape[0], SAMPLING_SIZE_THRESHOLD), visible=True, interactive=True, ) dataset_column = gr.Radio( label="Column", info="Determines the column to be analyzed. These are the columns with text data.", choices=columns, value=columns[0], visible=True, interactive=True, ) dataset_column_corpus = gr.Dataframe( value=pd.DataFrame({f"{columns[0]}": column_corpus}), visible=True ) dataset_import_btn = gr.Button( value="Import Dataset", interactive=True, variant="primary", visible=True, ) return ( dataset_sampling_method, dataset_sampling_size, dataset_column, dataset_column_corpus, dataset_import_btn, ) except FileNotFoundError as e: print(f"FileNotFoundError: {e}") return ( gr.Radio(visible=False), gr.Slider(visible=False), gr.Radio(visible=False), gr.Dataframe(visible=False), gr.Button(visible=False), ) def import_dataset(dataset_sampling_method, dataset_sampling_size, dataset_column): EVALUATION["sampling_method"] = dataset_sampling_method EVALUATION["sampling_size"] = dataset_sampling_size EVALUATION["column"] = dataset_column return ( gr.Markdown( "## Results (Dataset: {}{}) (Methodology: {}{})".format( "\u2705" if EVALUATION["dataset_id"] else "\u274E", "", "\u2705" if EVALUATION["methodology"] else "\u274E", "", ) ), gr.Button( value="Evaluate", interactive=( True if EVALUATION["dataset_id"] and EVALUATION["methodology"] else False ), variant="primary", visible=True, ), ) def import_methodology(methodology): EVALUATION["methodology"] = methodology return ( gr.Markdown( "## Results (Dataset: {}{}) (Methodology: {}{})".format( "\u2705" if EVALUATION["dataset_id"] else "\u274E", "", "\u2705" if EVALUATION["methodology"] else "\u274E", "", ) ), gr.Markdown( METHODOLOGIES[methodology]["description"], visible=True, ), gr.Button( value="Evaluate", interactive=( True if EVALUATION["dataset_id"] and EVALUATION["methodology"] else False ), variant="primary", visible=True, ), ) BiasAware = gr.Blocks(title="BiasAware: Dataset Bias Detection") with BiasAware: gr.Markdown( """ # BiasAware: Dataset Bias Detection BiasAware is a specialized tool for detecting and quantifying biases within datasets used for Natural Language Processing (NLP) tasks. NLP training datasets frequently mirror the inherent biases of their source materials, resulting in AI models that unintentionally perpetuate stereotypes, exhibit underrepresentation, and showcase skewed perspectives. """ ) with gr.Row(): with gr.Column(scale=1): dataset_title = gr.Markdown("## Dataset") dataset_import_type = gr.Radio( label="Import Type", info="Determines the mode of importing the dataset", choices=["Local Dataset", "HuggingFace Hub"], value="Local Dataset", ) local_dataset = gr.File( label="Dataset", file_types=["csv"], value=None, visible=True ) local_dataset_examples = gr.Examples( examples=[ os.path.join(os.path.dirname(__file__), "data", filename) for filename in os.listdir( os.path.join(os.path.dirname(__file__), "data") ) if filename.endswith(".csv") ], inputs=local_dataset, label="Local Examples", ) hf_dataset = gr.Textbox(visible=False) with gr.Row(): with gr.Column(scale=1): dataset_load_btn = gr.Button(visible=False) with gr.Column(scale=1): dataset_import_btn = gr.Button(visible=False) dataset_sampling_method = gr.Radio(visible=False) dataset_sampling_size = gr.Slider(visible=False) dataset_column = gr.Radio(visible=False) dataset_column_corpus = gr.Dataframe(visible=False) with gr.Column(scale=2): methodology_title = gr.Markdown("## Methodology") methodology = gr.Radio( label="Methodology", info="Determines the methodology to be used for bias detection", choices=METHODOLOGIES.keys(), ) methodology_description = gr.Markdown(visible=False) with gr.Column(scale=2): result_title = gr.Markdown( "## Results (Dataset: \u274E) (Methodology: \u274E)" ) evaluation_btn = gr.Button( value="Evaluate", interactive=False, variant="primary", visible=True, ) result_plot = gr.Plot(show_label=False, container=False) result_df = gr.DataFrame(visible=False) generate_avid_report_btn = gr.Button( value="Generate AVID Report", interactive=False, variant="primary", ) avid_report = gr.JSON(label="AVID Report", visible=False) # # Event Handlers # dataset_import_type.input( fn=lambda import_type: ( gr.File(label="Dataset", file_types=["csv"], value=None, visible=True) if import_type == "Local Dataset" else gr.Textbox(visible=False), gr.Textbox( label="HuggingFace Hub", placeholder="Search for a dataset", value="imdb", interactive=True, visible=True, ) if import_type == "HuggingFace Hub" else gr.File(value=None, visible=False), gr.Button(visible=False), gr.Radio(visible=False), gr.Slider(visible=False), gr.Radio(visible=False), gr.Dataframe(visible=False), gr.Button(visible=False), ), inputs=[dataset_import_type], outputs=[ local_dataset, hf_dataset, dataset_load_btn, dataset_sampling_method, dataset_sampling_size, dataset_column, dataset_column_corpus, dataset_import_btn, ], ) local_dataset.change( fn=lambda _: gr.Button( value=f"Load", interactive=True, variant="secondary", visible=True, ), inputs=[local_dataset], outputs=[dataset_load_btn], ) hf_dataset.submit( fn=lambda _: gr.Button( value=f"Load", interactive=True, variant="secondary", visible=True, ), inputs=[hf_dataset], outputs=[dataset_load_btn], ) dataset_load_btn.click( fn=load_dataset, inputs=[local_dataset, hf_dataset], outputs=[ dataset_sampling_method, dataset_sampling_size, dataset_column, dataset_column_corpus, dataset_import_btn, ], ) dataset_column.input( fn=lambda column: gr.Dataframe( value=pd.DataFrame( {f"{column}": EVALUATION["df"][column].tolist()[:5]}, ), visible=True, ), inputs=[dataset_column], outputs=[dataset_column_corpus], ) dataset_import_btn.click( fn=import_dataset, inputs=[ dataset_sampling_method, dataset_sampling_size, dataset_column, ], outputs=[result_title, evaluation_btn], ) methodology.input( fn=import_methodology, inputs=[methodology], outputs=[result_title, methodology_description, evaluation_btn], ) evaluation_btn.click( fn=evaluate, inputs=None, outputs=[result_plot, result_df, generate_avid_report_btn, avid_report], ) generate_avid_report_btn.click( fn=generate_avid_report, inputs=None, outputs=[avid_report] ) if __name__ == "__main__": BiasAware.launch()