import gradio as gr import random, os, shutil from PIL import Image import pandas as pd import tempfile def open_stable_ims(profession): if len(profession) != 0: dirname = 'images/stable_diffusion/'+ profession+'/' images = [Image.open(os.path.join(dirname+im)).convert("RGB") for im in os.listdir(dirname)] return images[:9] def open_fair_ims(profession): if len(profession) != 0: dirname = 'images/fair_diffusion/' + profession+'/' images = [Image.open(os.path.join(dirname+im)).convert("RGB") for im in os.listdir(dirname)] return images[:9] professions = sorted(os.listdir('images/fair_diffusion')) with gr.Blocks() as demo: gr.Markdown("# Fair Diffusion Explorer") gr.Markdown("#### Choose from the occupations below to compare how Stable Diffusion (left) and Fair Diffusion (right) represent different professions.") with gr.Row(): with gr.Column(): gr.Markdown('## Stable Diffusion Generations') choice1 = gr.Dropdown(professions, label = "Choose a profession", multiselect= False, interactive=True) images1 = gr.Gallery(label="Images").style(grid=[3], height="auto") with gr.Column(): gr.Markdown('## Fair Diffusion Generations') choice2 = gr.Dropdown(professions, label = "Choose a profession", multiselect = False, interactive=True) images2 = gr.Gallery(label="Images").style(grid=[3], height="auto") gr.Markdown("We present a novel strategy, called **Fair Diffusion**, to attenuate biases after the deployment of generative text-to-image models. Specifically, we demonstrate shifting a bias, based on human instructions, in any direction yielding arbitrarily new proportions for, e.g., identity groups. As our empirical evaluation demonstrates, this introduced control enables instructing generative image models on fairness, with no data filtering and additional training required. For the full paper by Friedrich et al., see [here](https://arxiv.org/pdf/2302.10893.pdf).") choice1.change(open_stable_ims, choice1, [images1]) choice2.change(open_fair_ims, choice2, [images2]) demo.launch()