import gradio as gr import pandas as pd from datasets import load_dataset # Initialize total counts total_yes_count = 0 total_count = 0 def calculate_percentage(*answers): """ Calculate the percentage of 'yes' answers. :param answers: Iterable containing the answers. :returns: Percentage of 'yes' answers. """ global total_yes_count, total_count yes_count = sum(answers) total_yes_count += yes_count total_count += len(answers) percentage = yes_count / len(answers) * 100 return f"{percentage}%" def calculate_overall_percentage(): """ Calculate the overall percentage of 'yes' answers. :returns: Overall percentage of 'yes' answers. """ global total_yes_count, total_count if total_count != 100: return "Make sure you have submitted your answers in all the tabs." overall_percentage = total_yes_count return f"{overall_percentage}%" # Load data dataset = load_dataset("mariagrandury/fmti-indicators") df = pd.DataFrame(dataset["train"]) grouped = df.groupby(["Domain", "Subdomain"]) # Create an interface per group of indicators interfaces = [] tab_names = [] for (domain, subdomain), group in grouped: questions = group["Definition"].tolist() inputs = [gr.Checkbox(label=question) for question in questions] output = gr.Textbox(label="Subdomain Percentage") iface = gr.Interface( fn=calculate_percentage, inputs=inputs, outputs=output, title=f"{domain} - {subdomain}", allow_flagging="never", ) interfaces.append(iface) tab_names.append(subdomain) # Add overall percentage button overall_button = gr.Interface( fn=calculate_overall_percentage, inputs=[], outputs=gr.Textbox(label="Overall Percentage"), title="Transparency Score", allow_flagging="never", ) interfaces.append(overall_button) tab_names.append("Total Transparency Score") # Create the tabbed interface tabbed_interface = gr.TabbedInterface( interface_list=interfaces, tab_names=tab_names, title="The Foundation Model Transparency Index", ) # Combine blocks to create demo with gr.Blocks(title="FMTI") as demo: gr.Markdown( """ # Transparency Self-Assessment (FMTI) This tool allows you to self-assess the transparency of your model development based on the Foundation Model Transparency Index published by the Center for Research on Foundation Models. """ ) with gr.Accordion(label="Instructions", open=True): gr.Markdown( """ The FMTI defines 100 indicators that characterize transparency for foundation model developers. They are divided into three broad domains: "Upstream" (model building), "Model" (model properties and function) and "Downstream" (model distribution). In addition to these top-level domains, the indicators are also grouped together into subdomains. Each tab below contains yes-or-no questions for each subdomain. Read all questions and check the boxes corresponding to the 'yes' responses. "Submit" your answers before proceeding to the next tab. Upon reaching the final tab, "Total Transparency Score", click on "Generate" to compute your model's overall transparency score. More info about the FMTI at https://crfm.stanford.edu/fmti/. Please note: this tool is research work and NOT a commercial or legal product. """ ) gr.TabbedInterface( interface_list=interfaces, tab_names=tab_names, title="", ) gr.Markdown( """ ## Compare your results The original study evaluated the developers of 10 top foundation models. How transparent are you compared to the developers in the 2023 study? Check the graphics below! Images source: https://crfm.stanford.edu/fmti """ ) with gr.Row(): gr.Image( "https://crfm.stanford.edu/fmti/fmti-flagship.jpg", show_label=False, show_download_button=False, ) gr.Image( "https://crfm.stanford.edu/fmti/subdomain-scores.png", show_label=False, show_download_button=False, ) demo.launch()