import gradio as gr # import torch # from torch import autocast # from diffusers import StableDiffusionPipeline from datasets import load_dataset from PIL import Image from io import BytesIO # import base64 # import re import os import requests import json import base64 # from urllib import parse from share_btn import community_icon_html, loading_icon_html, share_js is_gpu_busy = False def safe_sd(prompt, n_samples, steps, scale, seed, mode): url = os.getenv('BACKEND_URL_SAFE_NEW') token = os.getenv('BACKEND_TOKEN') user = os.getenv('BACKEND_USER') res = requests.post(url, json={ "model": "togethercomputer/UniversalSD", "prompt": prompt, "n": n_samples, "mode": mode, "steps": steps, "seed": seed, "guidance_scale": scale, }, headers={ "Authorization": token, "User-Agent": user }) return res def infer(prompt, n_samples, steps, scale, seed): global is_gpu_busy # generator = torch.Generator(device=device).manual_seed(seed) # print("Is GPU busy? ", is_gpu_busy) images = [] if prompt == "": raise gr.Error("Empty prompt. Please provide a prompt.") response = safe_sd(prompt, int(n_samples), max(50,int(steps)), scale, seed, mode="text2img") data = json.load(BytesIO(response.content)) if 'output' not in data: raise gr.Error("An error occurred.") else: if data['output']['result_type'] == "error": raise gr.Error(data['output']['value']) for image in data['output']['choices']: im = Image.open(BytesIO(base64.b64decode(image['image_base64']))) images.append(im) response = safe_sd(prompt, int(n_samples), max(50,int(steps)), scale, seed, mode="safe_text2img") data = json.load(BytesIO(response.content)) if 'output' not in data: raise gr.Error("An error occurred.") else: for image in data['output']['choices']: im = Image.open(BytesIO(base64.b64decode(image['image_base64']))) images.append(im) return images css = """ .gradio-container { font-family: 'IBM Plex Sans', sans-serif; } .gr-button { color: white; border-color: #3a669bff; background: #3a669bff; } input[type='range'] { accent-color: #3a669bff; } .dark input[type='range'] { accent-color: #3a669bff; } .container { max-width: 730px; margin: auto; padding-top: 1.5rem; } #gallery { min-height: 22rem; margin-bottom: 15px; margin-left: auto; margin-right: auto; border-bottom-right-radius: .5rem !important; border-bottom-left-radius: .5rem !important; } #gallery>div>.h-full { min-height: 20rem; } .details:hover { text-decoration: underline; } .gr-button { white-space: nowrap; } .gr-button:focus { border-color: rgb(147 197 253 / var(--tw-border-opacity)); outline: none; box-shadow: var(--tw-ring-offset-shadow), var(--tw-ring-shadow), var(--tw-shadow, 0 0 #0000); --tw-border-opacity: 1; --tw-ring-offset-shadow: var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color); --tw-ring-shadow: var(--tw-ring-inset) 0 0 0 calc(3px var(--tw-ring-offset-width)) var(--tw-ring-color); --tw-ring-color: rgb(191 219 254 / var(--tw-ring-opacity)); --tw-ring-opacity: .5; } #advanced-btn { font-size: .7rem !important; line-height: 19px; margin-top: 12px; margin-bottom: 12px; padding: 2px 8px; border-radius: 14px !important; } #advanced-options { display: none; margin-bottom: 20px; } .footer { margin-bottom: 45px; margin-top: 35px; text-align: center; border-bottom: 1px solid #e5e5e5; } .footer>p { font-size: .8rem; display: inline-block; padding: 0 10px; transform: translateY(10px); background: white; } .dark .footer { border-color: #303030; } .dark .footer>p { background: #0b0f19; } .acknowledgments h4{ margin: 1.25em 0 .25em 0; font-weight: bold; font-size: 115%; } #container-advanced-btns{ display: flex; flex-wrap: wrap; justify-content: space-between; align-items: center; } .animate-spin { animation: spin 1s linear infinite; } @keyframes spin { from { transform: rotate(0deg); } to { transform: rotate(360deg); } } #share-btn-container { display: flex; padding-left: 0.5rem !important; padding-right: 0.5rem !important; background-color: #3a669bff; justify-content: center; align-items: center; border-radius: 9999px !important; width: 13rem; } #share-btn { all: initial; color: #ffffff;font-weight: 600; cursor:pointer; font-family: 'IBM Plex Sans', sans-serif; margin-left: 0.5rem !important; padding-top: 0.25rem !important; padding-bottom: 0.25rem !important; } #share-btn * { all: unset; } .gr-form{ flex: 1 1 50%; border-top-right-radius: 0; border-bottom-right-radius: 0; } #prompt-container{ gap: 0; } """ block = gr.Blocks(css=css) examples = [ [ 'a photograph by vanessa beecroft', 1, 50, 7.5, 24803839, ], [ 'a gorgeous female photo', 1, 50, 7.5, 733664822, ], [ 'a gorgeous male photo', 1, 50, 7.5, 881355, ], [ 'the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c. leyendecker', 1, 50, 7.5, 557645701 ], [ 'portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and children from bahnhof zoo, detailed ', 1, 50, 9, 1115417309, ], [ 'portrait of Sickly diseased dying Samurai warrior, sun shining, photo realistic illustration by greg rutkowski, thomas kindkade, alphonse mucha, loish, norman rockwell.', 1, 50, 10, 1714108957, ] ] with block: gr.HTML( """

Stable Diffusion vs. Safe Stable Diffusion

Safe Stable Diffusion extends Stable Diffusion with safety guidance. In the case of NSFW images it returns the closest non-NSFW images instead of a black square. Details can be found in the Safe Latent Diffusion: Mitigating Inappropriate Degeneration in Diffusion Models paper.

""" ) with gr.Group(): with gr.Box(): with gr.Row(elem_id="prompt-container").style(mobile_collapse=False, equal_height=True): text = gr.Textbox( label="Enter your prompt", show_label=False, max_lines=1, placeholder="Enter your prompt", elem_id="prompt-text-input", ).style( border=(True, False, True, True), rounded=(True, False, False, True), container=False, ) btn = gr.Button("Generate image").style( margin=False, rounded=(False, True, True, False), full_width=False, ) gallery = gr.Gallery( label="Left: Stable Diffusion, Right: Safe Stable Diffusion", show_label=True, elem_id="gallery" ).style(grid=[2], height="auto") with gr.Group(elem_id="container-advanced-btns"): advanced_button = gr.Button("Advanced options", elem_id="advanced-btn") with gr.Group(elem_id="share-btn-container"): community_icon = gr.HTML(community_icon_html) loading_icon = gr.HTML(loading_icon_html) share_button = gr.Button("Share to community", elem_id="share-btn") with gr.Row(elem_id="advanced-options"): #gr.Markdown("Advanced settings are temporarily unavailable") samples = gr.Slider(label="Images", minimum=1, maximum=1, value=1, step=1) steps = gr.Slider(label="Steps", minimum=50, maximum=50, value=50, step=1) scale = gr.Slider( label="Guidance Scale", minimum=7.5, maximum=20, value=7.5, step=0.5 ) seed = gr.Slider( label="Seed", minimum=0, maximum=2147483647, step=1, randomize=True, ) ex = gr.Examples(examples=examples, fn=infer, inputs=[text, samples, steps, scale, seed], outputs=[gallery, community_icon, loading_icon, share_button], cache_examples=False) ex.dataset.headers = [""] text.submit(infer, inputs=[text, samples, steps, scale, seed], outputs=gallery) btn.click(infer, inputs=[text, samples, steps, scale, seed], outputs=gallery) advanced_button.click( None, [], text, _js=""" () => { const options = document.querySelector("body > gradio-app").querySelector("#advanced-options"); options.style.display = ["none", ""].includes(options.style.display) ? "flex" : "none"; }""", ) share_button.click( None, [], [], _js=share_js, ) gr.HTML( """

LICENSE

The model is licensed with a CreativeML Open RAIL-M license. The authors claim no rights on the outputs you generate, you are free to use them and are accountable for their use which must not go against the provisions set in this license. The license forbids you from sharing any content that violates any laws, produce any harm to a person, disseminate any personal information that would be meant for harm, spread misinformation and target vulnerable groups. For the full list of restrictions please read the license.

Biases and content acknowledgment

Despite how impressive being able to turn text into image is, beware to the fact that this model may output content that reinforces or exacerbates societal biases, as well as realistic faces, pornography and violence. While the applied safety guidance suppresses the majority of inappropriate content, this still could apply to Safe Stable Diffusion models. The original model was trained on the LAION-5B dataset, which scraped non-curated image-text-pairs from the internet (the exception being the removal of illegal content) and is meant for research purposes. Safety guidance suppresses potentially inappropriate content during inference. You can read more in the model card.

""" ) block.queue(concurrency_count=40, max_size=20).launch(max_threads=150)