import gradio as gr import torch from PIL.ImageDraw import Draw from diffusers import StableDiffusionPipeline from PIL import Image, ImageOps import os # Load pipeline once device = "cuda" if torch.cuda.is_available() else "cpu" checkpoint = "Deci/DeciDiffusion-v2-0" auth_token = os.environ.get("HF_ACCESS_TOKEN", False) pipe = StableDiffusionPipeline.from_pretrained(checkpoint, custom_pipeline=checkpoint, torch_dtype=torch.float32, use_auth_token=auth_token) pipe.unet = pipe.unet.from_pretrained(checkpoint, subfolder='flexible_unet', torch_dtype=torch.float32, use_auth_token=auth_token) pipe = pipe.to(device) def read_content(file_path: str) -> str: """read the content of target file """ with open(file_path, 'r', encoding='utf-8') as f: content = f.read() return content def predict(_prompt: str, _seed: int = 42, _guidance_scale: float = 7.5, _guidance_rescale: float = 0.7, _negative_prompt: str = ""): _negative_prompt = [_negative_prompt] if _negative_prompt else None output = pipe(prompt=[_prompt], negative_prompt=_negative_prompt, num_inference_steps=16, guidance_scale=_guidance_scale, guidance_rescale=_guidance_rescale, generator=torch.Generator(device).manual_seed(_seed), ) output_image = output.images[0] # Add border beneath the image with Deci logo + prompt if len(_prompt) > 52: _prompt = _prompt[:52] + "..." original_image_height = output_image.size[1] output_image = ImageOps.expand(output_image, border=(0, 0, 0, 64), fill='white') deci_logo = Image.open('./deci_logo_white.png') output_image.paste(deci_logo, (0, original_image_height)) Draw(output_image).text((deci_logo.size[0], original_image_height + 26), _prompt, (127, 127, 127)) return output_image css = ''' .gradio-container { max-width: 1100px !important; background-image: url(https://huggingface.co/spaces/Deci/Deci-DeciDiffusionClean/resolve/main/background-image.png); background-size: cover; background-position: center center; background-repeat: no-repeat; } .footer {margin-bottom: 45px;margin-top: 35px !important;text-align: center;border-bottom: 1px solid #e5e5e5} .footer>p {font-size: .8rem; display: inline-block; padding: 0 10px;transform: translateY(10px);background: white} .dark .footer {border-color: #303030} .dark .footer>p {background: #0b0f19} .acknowledgments h4{margin: 1.25em 0 .25em 0;font-weight: bold;font-size: 115%} @keyframes spin { from { transform: rotate(0deg); } to { transform: rotate(360deg); } } ''' demo = gr.Blocks(css=css, elem_id="total-container") with demo: gr.HTML(read_content("header.html")) with gr.Row(): with gr.Column(): with gr.Row(mobile_collapse=False, equal_height=True): prompt = gr.Textbox(placeholder="Your prompt", show_label=False, elem_id="prompt", autofocus=True, lines=3, ) with gr.Accordion(label="Advanced Settings", open=False): with gr.Row(mobile_collapse=False, equal_height=True): seed = gr.Slider(value=42, minimum=1, maximum=100, step=1, label="seed", interactive=True) guidance_scale = gr.Slider(value=7.5, minimum=2, maximum=20, step=0.5, label='guidance_scale', interactive=True) guidance_rescale = gr.Slider(value=0.8, minimum=0.0, maximum=0.99, step=0.05, label='guidance_rescale', interactive=True) with gr.Row(mobile_collapse=False, equal_height=True): negative_prompt = gr.Textbox(label="negative_prompt", placeholder="Your negative prompt", info="what you don't want to see in the image", lines=3) with gr.Row(): btn = gr.Button(value="Generate!", elem_id="run_button") with gr.Column(): image_out = gr.Image(label="Output", elem_id="output-img", height=400) btn.click(fn=predict, inputs=[prompt, seed, guidance_scale, guidance_rescale, negative_prompt], outputs=[image_out], api_name='run') gr.HTML( """

LICENSE

The model is licensed with a CreativeML Open RAIL-M license. The authors claim no rights on the outputs you generate, you are free to use them and are accountable for their use which must not go against the provisions set in this license. The license forbids you from sharing any content that violates any laws, produce any harm to a person, disseminate any personal information that would be meant for harm, spread misinformation and target vulnerable groups. For the full list of restrictions please read the license

Biases and content acknowledgment

Despite how impressive being able to turn text into image is, beware to the fact that this model may output content that reinforces or exacerbates societal biases, as well as realistic faces, pornography and violence. The model was trained on the LAION-5B dataset, which scraped non-curated image-text-pairs from the internet (the exception being the removal of illegal content) and is meant for research purposes. You can read more in the model card

""" ) demo.queue(max_size=50).launch()