meg's picture
meg HF staff
sasha_ui (#1)
313342d
raw
history blame
3.62 kB
import json
import gradio as gr
import os
from PIL import Image
import plotly.graph_objects as go
import plotly.express as px
TITLE = "Diffusion Faces Cluster Explorer"
clusters_12 = json.load(open("clusters/id_all_blip_clusters_12.json"))
clusters_24 = json.load(open("clusters/id_all_blip_clusters_24.json"))
clusters_48 = json.load(open("clusters/id_all_blip_clusters_48.json"))
clusters_by_size = {
12: clusters_12,
24: clusters_24,
48: clusters_48,
}
def show_cluster(cl_id, num_clusters):
if not cl_id:
cl_id = 0
if not num_clusters:
num_clusters = 12
cl_dct = clusters_by_size[num_clusters][cl_id]
images = []
for i in range(6):
img_path = "/".join([st.replace("/", "") for st in cl_dct['img_path_list'][i].split("//")][3:])
images.append((Image.open(os.path.join("identities-images", img_path)), "_".join([img_path.split("/")[0], img_path.split("/")[-1]]).replace('Photo_portrait_of_an_','').replace('Photo_portrait_of_a_','').replace('SD_v2_random_seeds_identity_','(SD v.2) ').replace('dataset-identities-dalle2_','(Dall-E 2) ').replace('SD_v1.4_random_seeds_identity_','(SD v.1.4) ').replace('_',' ')))
model_fig = go.Figure()
model_fig.add_trace(go.Bar(x=list(dict(cl_dct["labels_model"]).keys()),
y=list(dict(cl_dct["labels_model"]).values()),
marker_color=px.colors.qualitative.G10))
gender_fig = go.Figure()
gender_fig.add_trace(go.Bar(x=list(dict(cl_dct["labels_gender"]).keys()),
y=list(dict(cl_dct["labels_gender"]).values()),
marker_color=px.colors.qualitative.G10))
ethnicity_fig = go.Figure()
ethnicity_fig.add_trace(go.Bar(x=list(dict(cl_dct["labels_ethnicity"]).keys()),
y=list(dict(cl_dct["labels_ethnicity"]).values()),
marker_color=px.colors.qualitative.G10))
return (len(cl_dct['img_path_list']),
gender_fig,
model_fig,
ethnicity_fig,
images)
with gr.Blocks(title=TITLE) as demo:
gr.Markdown(f"# {TITLE}")
gr.Markdown("## This Space lets you explore the data generated from [DiffusionBiasExplorer](https://huggingface.co/spaces/society-ethics/DiffusionBiasExplorer).")
gr.HTML("""<span style="color:red" font-size:smaller>⚠️ DISCLAIMER: the images displayed by this tool were generated by text-to-image models and may depict offensive stereotypes or contain explicit content.</span>""")
num_clusters = gr.Radio([12,24,48], value=12, label="How many clusters do you want to make from the data?")
with gr.Row():
with gr.Column(scale=4):
gallery = gr.Gallery(label="Most representative images in cluster").style(grid=(3,3))
with gr.Column():
cluster_id = gr.Slider(minimum=0, maximum=num_clusters.value-1, step=1, value=0, label="Click to move between clusters")
a = gr.Text(label="Number of images")
with gr.Row():
c = gr.Plot(label="Model makeup of cluster")
b = gr.Plot(label="Gender label makeup of cluster")
d = gr.Plot(label="Ethnicity label makeup of cluster")
demo.load(fn=show_cluster, inputs=[cluster_id, num_clusters], outputs=[a,b,c,d, gallery])
num_clusters.change(fn=show_cluster, inputs=[cluster_id, num_clusters], outputs=[a,b,c,d, gallery])
cluster_id.change(fn=show_cluster, inputs=[cluster_id, num_clusters], outputs=[a,b,c,d, gallery])
if __name__ == "__main__":
demo.queue().launch(debug=True)