multimodalart HF staff commited on
Commit
8182a62
1 Parent(s): 88a2efd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +25 -8
app.py CHANGED
@@ -24,6 +24,19 @@ def set_timesteps_patched(self, num_inference_steps: int, device = None):
24
  self._begin_index = None
25
  self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication
26
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27
  EDMEulerScheduler.set_timesteps = set_timesteps_patched
28
 
29
  pipe_edit = CosStableDiffusionXLInstructPix2PixPipeline.from_single_file(
@@ -37,14 +50,16 @@ pipe_normal.scheduler = EDMEulerScheduler(sigma_min=0.002, sigma_max=120.0, sigm
37
  pipe_normal.to("cuda")
38
 
39
  @spaces.GPU
40
- def run_normal(prompt, negative_prompt="", guidance_scale=7, progress=gr.Progress(track_tqdm=True)):
41
- return pipe_normal(prompt, negative_prompt=negative_prompt, guidance_scale=guidance_scale, num_inference_steps=20).images[0]
42
 
43
  @spaces.GPU
44
- def run_edit(image, prompt, negative_prompt="", guidance_scale=7, progress=gr.Progress(track_tqdm=True)):
45
- resolution = 1024
46
- image.resize((resolution, resolution))
47
- return pipe_edit(prompt=prompt,image=image,height=resolution,width=resolution,negative_prompt=negative_prompt, guidance_scale=guidance_scale,num_inference_steps=20).images[0]
 
 
48
  css = '''
49
  .gradio-container{
50
  max-width: 768px !important;
@@ -67,6 +82,7 @@ with gr.Blocks(css=css) as demo:
67
  with gr.Accordion("Advanced Settings", open=False):
68
  negative_prompt_edit = gr.Textbox(label="Negative Prompt")
69
  guidance_scale_edit = gr.Number(label="Guidance Scale", value=7)
 
70
  gr.Examples(examples=edit_examples, fn=run_edit, inputs=[image_edit, prompt_edit], outputs=[output_edit], cache_examples=True)
71
  with gr.Tab("CosXL"):
72
  with gr.Group():
@@ -77,6 +93,7 @@ with gr.Blocks(css=css) as demo:
77
  with gr.Accordion("Advanced Settings", open=False):
78
  negative_prompt_normal = gr.Textbox(label="Negative Prompt")
79
  guidance_scale_normal = gr.Number(label="Guidance Scale", value=7)
 
80
  gr.Examples(examples=normal_examples, fn=run_normal, inputs=[prompt_normal], outputs=[output_normal], cache_examples=True)
81
 
82
  gr.on(
@@ -85,7 +102,7 @@ with gr.Blocks(css=css) as demo:
85
  prompt_normal.submit
86
  ],
87
  fn=run_normal,
88
- inputs=[prompt_normal, negative_prompt_normal, guidance_scale_normal],
89
  outputs=[output_normal],
90
  )
91
  gr.on(
@@ -94,7 +111,7 @@ with gr.Blocks(css=css) as demo:
94
  prompt_edit.submit
95
  ],
96
  fn=run_edit,
97
- inputs=[image_edit, prompt_edit, negative_prompt_edit, guidance_scale_edit],
98
  outputs=[output_edit]
99
  )
100
  if __name__ == "__main__":
 
24
  self._begin_index = None
25
  self.sigmas = self.sigmas.to("cpu") # to avoid too much CPU/GPU communication
26
 
27
+ def resize_image(image, resolution):
28
+ original_width, original_height = image.size
29
+
30
+ if original_width > original_height:
31
+ new_width = resolution
32
+ new_height = int((resolution / original_width) * original_height)
33
+ else:
34
+ new_height = resolution
35
+ new_width = int((resolution / original_height) * original_width)
36
+
37
+ resized_img = image.resize((new_width, new_height), Image.ANTIALIAS)
38
+ return resized_img
39
+
40
  EDMEulerScheduler.set_timesteps = set_timesteps_patched
41
 
42
  pipe_edit = CosStableDiffusionXLInstructPix2PixPipeline.from_single_file(
 
50
  pipe_normal.to("cuda")
51
 
52
  @spaces.GPU
53
+ def run_normal(prompt, negative_prompt="", guidance_scale=7, steps=20, progress=gr.Progress(track_tqdm=True)):
54
+ return pipe_normal(prompt, negative_prompt=negative_prompt, guidance_scale=guidance_scale, num_inference_steps=steps).images[0]
55
 
56
  @spaces.GPU
57
+ def run_edit(image, prompt, negative_prompt="", guidance_scale=7, steps=20, progress=gr.Progress(track_tqdm=True)):
58
+ image = resize_image(image, 1024)
59
+ print("Image resized to ", image.size)
60
+ width, height = image.size
61
+ #image.resize((resolution, resolution))
62
+ return pipe_edit(prompt=prompt,image=image,height=height,width=width,negative_prompt=negative_prompt, guidance_scale=guidance_scale,num_inference_steps=steps).images[0]
63
  css = '''
64
  .gradio-container{
65
  max-width: 768px !important;
 
82
  with gr.Accordion("Advanced Settings", open=False):
83
  negative_prompt_edit = gr.Textbox(label="Negative Prompt")
84
  guidance_scale_edit = gr.Number(label="Guidance Scale", value=7)
85
+ steps_edit = gr.Slider(label="Steps", minimum=10, maximum=50, value=20)
86
  gr.Examples(examples=edit_examples, fn=run_edit, inputs=[image_edit, prompt_edit], outputs=[output_edit], cache_examples=True)
87
  with gr.Tab("CosXL"):
88
  with gr.Group():
 
93
  with gr.Accordion("Advanced Settings", open=False):
94
  negative_prompt_normal = gr.Textbox(label="Negative Prompt")
95
  guidance_scale_normal = gr.Number(label="Guidance Scale", value=7)
96
+ steps_normal = gr.Slider(label="Steps", minimum=10, maximum=50, value=20)
97
  gr.Examples(examples=normal_examples, fn=run_normal, inputs=[prompt_normal], outputs=[output_normal], cache_examples=True)
98
 
99
  gr.on(
 
102
  prompt_normal.submit
103
  ],
104
  fn=run_normal,
105
+ inputs=[prompt_normal, negative_prompt_normal, guidance_scale_normal, steps_normal],
106
  outputs=[output_normal],
107
  )
108
  gr.on(
 
111
  prompt_edit.submit
112
  ],
113
  fn=run_edit,
114
+ inputs=[image_edit, prompt_edit, negative_prompt_edit, guidance_scale_edit, steps_edit],
115
  outputs=[output_edit]
116
  )
117
  if __name__ == "__main__":