Spaces:
Running
on
Zero
Running
on
Zero
File size: 5,582 Bytes
ec3b96a 4230ad8 13334d2 1f2f15c 166b6db 2509eb1 a7253cf 1f2f15c 8182a62 a7253cf 8182a62 1f2f15c 13334d2 bc8b35c 13334d2 1f2f15c 34fa983 1f2f15c 13334d2 fa88557 1f2f15c 8182a62 1f2f15c 8182a62 93780aa 3566189 b879202 1f2f15c 77e039c ec3b96a 1f2f15c ec3b96a e4b31e4 8182a62 3566189 88a2efd 8182a62 682806c 88a2efd b879202 1f2f15c 8182a62 b879202 1f2f15c b879202 1f2f15c 8182a62 1f2f15c ec3b96a 1f2f15c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 |
import gradio as gr
import spaces
import torch
from diffusers import StableDiffusionXLPipeline, EDMEulerScheduler, StableDiffusionXLInstructPix2PixPipeline, AutoencoderKL
from custom_pipeline import CosStableDiffusionXLInstructPix2PixPipeline
from huggingface_hub import hf_hub_download
import numpy as np
import math
from PIL import Image
edit_file = hf_hub_download(repo_id="stabilityai/cosxl", filename="cosxl_edit.safetensors")
normal_file = hf_hub_download(repo_id="stabilityai/cosxl", filename="cosxl.safetensors")
def resize_image(image, resolution):
original_width, original_height = image.size
if original_width > original_height:
new_width = resolution
new_height = int((resolution / original_width) * original_height)
else:
new_height = resolution
new_width = int((resolution / original_height) * original_width)
resized_img = image.resize((new_width, new_height), Image.Resampling.LANCZOS)
return resized_img
vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
pipe_edit = StableDiffusionXLInstructPix2PixPipeline.from_single_file(
edit_file, num_in_channels=8, is_cosxl_edit=True, vae=vae, torch_dtype=torch.float16,
)
pipe_edit.scheduler = EDMEulerScheduler(sigma_min=0.002, sigma_max=120.0, sigma_data=1.0, prediction_type="v_prediction", sigma_schedule="exponential")
pipe_edit.to("cuda")
pipe_normal = StableDiffusionXLPipeline.from_single_file(normal_file, torch_dtype=torch.float16, vae=vae)
pipe_normal.scheduler = EDMEulerScheduler(sigma_min=0.002, sigma_max=120.0, sigma_data=1.0, prediction_type="v_prediction", sigma_schedule="exponential")
pipe_normal.to("cuda")
@spaces.GPU
def run_normal(prompt, negative_prompt="", guidance_scale=7, steps=20, progress=gr.Progress(track_tqdm=True)):
return pipe_normal(prompt, negative_prompt=negative_prompt, guidance_scale=guidance_scale, num_inference_steps=steps).images[0]
@spaces.GPU
def run_edit(image, prompt, negative_prompt="", guidance_scale=7, steps=20, progress=gr.Progress(track_tqdm=True)):
image = resize_image(image, 1024)
print("Image resized to ", image.size)
width, height = image.size
#image.resize((resolution, resolution))
return pipe_edit(prompt=prompt,image=image,height=height,width=width,negative_prompt=negative_prompt, guidance_scale=guidance_scale,num_inference_steps=steps).images[0]
css = '''
.gradio-container{
max-width: 768px !important;
margin: 0 auto;
}
'''
normal_examples = ["portrait photo of a girl, photograph, highly detailed face, depth of field, moody light, golden hour, style by Dan Winters, Russell James, Steve McCurry, centered, extremely detailed, Nikon D850, award winning photography", "backlit photography of a dog", "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k", "A photo of beautiful mountain with realistic sunset and blue lake, highly detailed, masterpiece"]
edit_examples = [["mountain.png", "make it a cloudy day"], ["painting.png", "make the earring fancier"]]
with gr.Blocks(css=css) as demo:
gr.Markdown('''# CosXL demo
Unofficial demo for CosXL, a SDXL model tuned to produce full color range images. CosXL Edit allows you to perform edits on images. Both have a [non-commercial community license](https://huggingface.co/stabilityai/cosxl/blob/main/LICENSE)
''')
with gr.Tab("CosXL Edit"):
with gr.Group():
image_edit = gr.Image(label="Image you would like to edit", type="pil")
with gr.Row():
prompt_edit = gr.Textbox(show_label=False, scale=4, placeholder="Edit instructions, e.g.: Make the day cloudy")
button_edit = gr.Button("Generate", min_width=120)
output_edit = gr.Image(label="Your result image", interactive=False)
with gr.Accordion("Advanced Settings", open=False):
negative_prompt_edit = gr.Textbox(label="Negative Prompt")
guidance_scale_edit = gr.Number(label="Guidance Scale", value=7)
steps_edit = gr.Slider(label="Steps", minimum=10, maximum=50, value=20)
gr.Examples(examples=edit_examples, fn=run_edit, inputs=[image_edit, prompt_edit], outputs=[output_edit], cache_examples=True)
with gr.Tab("CosXL"):
with gr.Group():
with gr.Row():
prompt_normal = gr.Textbox(show_label=False, scale=4, placeholder="Your prompt, e.g.: backlit photography of a dog")
button_normal = gr.Button("Generate", min_width=120)
output_normal = gr.Image(label="Your result image", interactive=False)
with gr.Accordion("Advanced Settings", open=False):
negative_prompt_normal = gr.Textbox(label="Negative Prompt")
guidance_scale_normal = gr.Number(label="Guidance Scale", value=7)
steps_normal = gr.Slider(label="Steps", minimum=10, maximum=50, value=20)
gr.Examples(examples=normal_examples, fn=run_normal, inputs=[prompt_normal], outputs=[output_normal], cache_examples="lazy")
gr.on(
triggers=[
button_normal.click,
prompt_normal.submit
],
fn=run_normal,
inputs=[prompt_normal, negative_prompt_normal, guidance_scale_normal, steps_normal],
outputs=[output_normal],
)
gr.on(
triggers=[
button_edit.click,
prompt_edit.submit
],
fn=run_edit,
inputs=[image_edit, prompt_edit, negative_prompt_edit, guidance_scale_edit, steps_edit],
outputs=[output_edit]
)
if __name__ == "__main__":
demo.launch(share=True)
|