import gradio as gr from allpreds import make_preds import os import tempfile def get_credentials(): creds_json_str = os.getenv("SERVICE_ACC_KEY") with tempfile.NamedTemporaryFile(mode="w+", delete=False, suffix=".json") as temp: temp.write(creds_json_str) temp_filename = temp.name return temp_filename os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = get_credentials() # the examples variable is a list of lists. It contains an example input in each list. # for eg. here there is only one example so the size of the list 'examples' is 1 # in the first example, two elements are there - the text and the Bias type examples = [['''Women are often considered to be more emotional and sensitive than men, which can make them less suitable for high-stakes decision-making roles in fields such as finance and politics. Men, on the other hand, are seen as logical and rational, making them better equipped to handle the pressures of leadership.''', "Gender Bias"]] # this variable 'demo' defines the look of the web page - meaning the layout of inputs and outputs, etc. # A gradio interface needs 3 things - a function, inputs and outputs. # The inputs are given to the function as input arguments and outputs are passed to the output components demo = gr.Interface( fn=make_preds, # our webpage has 2 inputs - the content which is a text field and the bias type which is a radio button inputs=[gr.TextArea(label="Content", show_label=True, placeholder="Enter some text to detect bias....."), gr.Radio(choices=["Gender Bias", "Racial Bias", "Political Bias", "Hate Speech"], label="Bias Type")], # there are 3 outputs - the highlighttext which shows the highlighted text output, the bias % output and the chart output outputs=[gr.HighlightedText(combine_adjacent=True, label="Bias Analysis", show_label=True), gr.Label(label="Percentage", show_label=True), gr.HTML(label="Bias Score", show_label=True)], examples=examples, title="Bias Analyzer", description="This app lets you detect and analyze different types of bias in written content. Right now we have support for four bias categories: gender, racial, political, and hate-speech. More categories will be added soon!" ) demo.launch()