Riccardo Giorato commited on
Commit
e44e44b
1 Parent(s): 3e7216e

update space

Browse files
Files changed (3) hide show
  1. README.md +1 -1
  2. app.py +46 -52
  3. requirements.txt +3 -2
README.md CHANGED
@@ -4,7 +4,7 @@ emoji: 🎮
4
  colorFrom: gray
5
  colorTo: green
6
  sdk: gradio
7
- sdk_version: 3.9
8
  app_file: app.py
9
  pinned: false
10
  license: mit
 
4
  colorFrom: gray
5
  colorTo: green
6
  sdk: gradio
7
+ sdk_version: 3.6
8
  app_file: app.py
9
  pinned: false
10
  license: mit
app.py CHANGED
@@ -1,4 +1,4 @@
1
- from diffusers import AutoencoderKL, UNet2DConditionModel, StableDiffusionPipeline, StableDiffusionImg2ImgPipeline
2
  import gradio as gr
3
  import torch
4
  from PIL import Image
@@ -17,23 +17,43 @@ class Model:
17
  models = [
18
  Model("Beeple", "riccardogiorato/beeple-diffusion", "beeple style "),
19
  Model("Avatar", "riccardogiorato/avatar-diffusion", "avatartwow style "),
20
- Model("Beksinski", "s3nh/beksinski-style-stable-diffusion", "beksinski style"),
21
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
 
23
  last_mode = "txt2img"
24
- current_model = models[0]
25
  current_model_path = current_model.path
26
 
27
  if is_colab:
28
- pipe = StableDiffusionPipeline.from_pretrained(current_model.path, torch_dtype=torch.float16)
29
 
30
  else: # download all models
31
  vae = AutoencoderKL.from_pretrained(current_model.path, subfolder="vae", torch_dtype=torch.float16)
32
- for model in models[0:]:
33
  try:
34
  unet = UNet2DConditionModel.from_pretrained(model.path, subfolder="unet", torch_dtype=torch.float16)
35
- model.pipe_t2i = StableDiffusionPipeline.from_pretrained(model.path, unet=unet, vae=vae, torch_dtype=torch.float16)
36
- model.pipe_i2i = StableDiffusionImg2ImgPipeline.from_pretrained(model.path, unet=unet, vae=vae, torch_dtype=torch.float16)
37
  except:
38
  models.remove(model)
39
  pipe = models[0].pipe_t2i
@@ -71,8 +91,8 @@ def txt_to_img(model_path, prompt, neg_prompt, guidance, steps, width, height, g
71
  if model_path != current_model_path or last_mode != "txt2img":
72
  current_model_path = model_path
73
 
74
- if is_colab or current_model == models[0]:
75
- pipe = StableDiffusionPipeline.from_pretrained(current_model_path, torch_dtype=torch.float16)
76
  else:
77
  pipe.to("cpu")
78
  pipe = current_model.pipe_t2i
@@ -81,7 +101,7 @@ def txt_to_img(model_path, prompt, neg_prompt, guidance, steps, width, height, g
81
  pipe = pipe.to("cuda")
82
  last_mode = "txt2img"
83
 
84
- prompt = current_model.prefix + prompt
85
  result = pipe(
86
  prompt,
87
  negative_prompt = neg_prompt,
@@ -102,8 +122,8 @@ def img_to_img(model_path, prompt, neg_prompt, img, strength, guidance, steps, w
102
  if model_path != current_model_path or last_mode != "img2img":
103
  current_model_path = model_path
104
 
105
- if is_colab or current_model == models[0]:
106
- pipe = StableDiffusionImg2ImgPipeline.from_pretrained(current_model_path, torch_dtype=torch.float16)
107
  else:
108
  pipe.to("cpu")
109
  pipe = current_model.pipe_i2i
@@ -135,43 +155,12 @@ def replace_nsfw_images(results):
135
  results.images[i] = Image.open("nsfw.png")
136
  return results.images[0]
137
 
138
- css = """
139
- <style>
140
- .finetuned-diffusion-div {
141
- text-align: center;
142
- max-width: 700px;
143
- margin: 0 auto;
144
- }
145
- .finetuned-diffusion-div div {
146
- display: inline-flex;
147
- align-items: center;
148
- gap: 0.8rem;
149
- font-size: 1.75rem;
150
- }
151
- .finetuned-diffusion-div div h1 {
152
- font-weight: 900;
153
- margin-bottom: 7px;
154
- }
155
- .finetuned-diffusion-div p {
156
- margin-bottom: 10px;
157
- font-size: 94%;
158
- }
159
- .finetuned-diffusion-div p a {
160
- text-decoration: underline;
161
- }
162
- .tabs {
163
- margin-top: 0px;
164
- margin-bottom: 0px;
165
- }
166
- #gallery {
167
- min-height: 20rem;
168
- }
169
- </style>
170
  """
171
  with gr.Blocks(css=css) as demo:
172
  gr.HTML(
173
  f"""
174
- <div class="finetuned-diffusion-div">
175
  <div>
176
  <h1>Playground Diffusion</h1>
177
  </div>
@@ -191,7 +180,8 @@ with gr.Blocks(css=css) as demo:
191
 
192
  with gr.Column(scale=55):
193
  with gr.Group():
194
- model_name = gr.Dropdown(label="Model", choices=[m.name for m in models], value=current_model.name)
 
195
  with gr.Row():
196
  prompt = gr.Textbox(label="Prompt", show_label=False, max_lines=2,placeholder="Enter prompt. Style applied automatically").style(container=False)
197
  generate = gr.Button(value="Generate").style(rounded=(False, True, True, False))
@@ -211,7 +201,7 @@ with gr.Blocks(css=css) as demo:
211
 
212
  with gr.Row():
213
  guidance = gr.Slider(label="Guidance scale", value=7.5, maximum=15)
214
- steps = gr.Slider(label="Steps", value=50, minimum=2, maximum=100, step=1)
215
 
216
  with gr.Row():
217
  width = gr.Slider(label="Width", value=512, minimum=64, maximum=1024, step=8)
@@ -224,18 +214,22 @@ with gr.Blocks(css=css) as demo:
224
  image = gr.Image(label="Image", height=256, tool="editor", type="pil")
225
  strength = gr.Slider(label="Transformation strength", minimum=0, maximum=1, step=0.01, value=0.5)
226
 
 
 
 
 
227
  inputs = [model_name, prompt, guidance, steps, width, height, seed, image, strength, neg_prompt]
228
  prompt.submit(inference, inputs=inputs, outputs=image_out)
229
  generate.click(inference, inputs=inputs, outputs=image_out)
230
 
231
  ex = gr.Examples([
232
  [models[0].name, "Neon techno-magic robot with spear pierces an ancient beast, hyperrealism, no blur, 4k resolution, ultra detailed", 7.5, 50],
 
233
  ], [model_name, prompt, guidance, steps, seed], image_out, inference, cache_examples=False)
234
 
235
- gr.Markdown('''
236
- Models by [@riccardogiorato](https://huggingface.co/riccardogiorato) <br>
237
- Space by: [![Twitter Follow](https://img.shields.io/twitter/follow/riccardogiorato?style=social)](https://twitter.com/riccardogiorato)
238
- ''')
239
 
240
  if not is_colab:
241
  demo.queue(concurrency_count=1)
 
1
+ from diffusers import AutoencoderKL, UNet2DConditionModel, StableDiffusionPipeline, StableDiffusionImg2ImgPipeline, DPMSolverMultistepScheduler
2
  import gradio as gr
3
  import torch
4
  from PIL import Image
 
17
  models = [
18
  Model("Beeple", "riccardogiorato/beeple-diffusion", "beeple style "),
19
  Model("Avatar", "riccardogiorato/avatar-diffusion", "avatartwow style "),
20
+ Model("Beksinski", "s3nh/beksinski-style-stable-diffusion", "beksinski style "),
21
+ Model("Robo Diffusion", "nousr/robo-diffusion", ""),
22
+ Model("Guohua", "Langboat/Guohua-Diffusion", "guohua style ")
23
+ ]
24
+
25
+ scheduler = DPMSolverMultistepScheduler(
26
+ beta_start=0.00085,
27
+ beta_end=0.012,
28
+ beta_schedule="scaled_linear",
29
+ num_train_timesteps=1000,
30
+ trained_betas=None,
31
+ predict_epsilon=True,
32
+ thresholding=False,
33
+ algorithm_type="dpmsolver++",
34
+ solver_type="midpoint",
35
+ lower_order_final=True,
36
+ )
37
+
38
+ custom_model = None
39
+ if is_colab:
40
+ models.insert(0, Model("Custom model", "", ""))
41
+ custom_model = models[0]
42
 
43
  last_mode = "txt2img"
44
+ current_model = models[1] if is_colab else models[0]
45
  current_model_path = current_model.path
46
 
47
  if is_colab:
48
+ pipe = StableDiffusionPipeline.from_pretrained(current_model.path, torch_dtype=torch.float16, scheduler=scheduler)
49
 
50
  else: # download all models
51
  vae = AutoencoderKL.from_pretrained(current_model.path, subfolder="vae", torch_dtype=torch.float16)
52
+ for model in models:
53
  try:
54
  unet = UNet2DConditionModel.from_pretrained(model.path, subfolder="unet", torch_dtype=torch.float16)
55
+ model.pipe_t2i = StableDiffusionPipeline.from_pretrained(model.path, unet=unet, vae=vae, torch_dtype=torch.float16, scheduler=scheduler)
56
+ model.pipe_i2i = StableDiffusionImg2ImgPipeline.from_pretrained(model.path, unet=unet, vae=vae, torch_dtype=torch.float16, scheduler=scheduler)
57
  except:
58
  models.remove(model)
59
  pipe = models[0].pipe_t2i
 
91
  if model_path != current_model_path or last_mode != "txt2img":
92
  current_model_path = model_path
93
 
94
+ if is_colab or current_model == custom_model:
95
+ pipe = StableDiffusionPipeline.from_pretrained(current_model_path, torch_dtype=torch.float16, scheduler=scheduler)
96
  else:
97
  pipe.to("cpu")
98
  pipe = current_model.pipe_t2i
 
101
  pipe = pipe.to("cuda")
102
  last_mode = "txt2img"
103
 
104
+ prompt = current_model.prefix + prompt
105
  result = pipe(
106
  prompt,
107
  negative_prompt = neg_prompt,
 
122
  if model_path != current_model_path or last_mode != "img2img":
123
  current_model_path = model_path
124
 
125
+ if is_colab or current_model == custom_model:
126
+ pipe = StableDiffusionImg2ImgPipeline.from_pretrained(current_model_path, torch_dtype=torch.float16, scheduler=scheduler)
127
  else:
128
  pipe.to("cpu")
129
  pipe = current_model.pipe_i2i
 
155
  results.images[i] = Image.open("nsfw.png")
156
  return results.images[0]
157
 
158
+ css = """.playground-diffusion-div div{display:inline-flex;align-items:center;gap:.8rem;font-size:1.75rem}.playground-diffusion-div div h1{font-weight:900;margin-bottom:7px}.playground-diffusion-div p{margin-bottom:10px;font-size:94%}a{text-decoration:underline}.tabs{margin-top:0;margin-bottom:0}#gallery{min-height:20rem}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
159
  """
160
  with gr.Blocks(css=css) as demo:
161
  gr.HTML(
162
  f"""
163
+ <div class="playground-diffusion-div">
164
  <div>
165
  <h1>Playground Diffusion</h1>
166
  </div>
 
180
 
181
  with gr.Column(scale=55):
182
  with gr.Group():
183
+ model_name = gr.Dropdown(label="Model", choices=[m.name for m in models], value=current_model.name)
184
+
185
  with gr.Row():
186
  prompt = gr.Textbox(label="Prompt", show_label=False, max_lines=2,placeholder="Enter prompt. Style applied automatically").style(container=False)
187
  generate = gr.Button(value="Generate").style(rounded=(False, True, True, False))
 
201
 
202
  with gr.Row():
203
  guidance = gr.Slider(label="Guidance scale", value=7.5, maximum=15)
204
+ steps = gr.Slider(label="Steps", value=25, minimum=2, maximum=75, step=1)
205
 
206
  with gr.Row():
207
  width = gr.Slider(label="Width", value=512, minimum=64, maximum=1024, step=8)
 
214
  image = gr.Image(label="Image", height=256, tool="editor", type="pil")
215
  strength = gr.Slider(label="Transformation strength", minimum=0, maximum=1, step=0.01, value=0.5)
216
 
217
+ if is_colab:
218
+ model_name.change(lambda x: gr.update(visible = x == models[0].name), inputs=model_name, outputs=custom_model_group)
219
+ # n_images.change(lambda n: gr.Gallery().style(grid=[2 if n > 1 else 1], height="auto"), inputs=n_images, outputs=gallery)
220
+
221
  inputs = [model_name, prompt, guidance, steps, width, height, seed, image, strength, neg_prompt]
222
  prompt.submit(inference, inputs=inputs, outputs=image_out)
223
  generate.click(inference, inputs=inputs, outputs=image_out)
224
 
225
  ex = gr.Examples([
226
  [models[0].name, "Neon techno-magic robot with spear pierces an ancient beast, hyperrealism, no blur, 4k resolution, ultra detailed", 7.5, 50],
227
+ [models[0].name, "halfturn portrait of a big crystal face of a beautiful abstract ancient Egyptian elderly shaman woman, made of iridescent golden crystals, half - turn, bottom view, ominous, intricate, studio, art by anthony macbain and greg rutkowski and alphonse mucha, concept art, 4k, sharp focus", 7.5, 25],
228
  ], [model_name, prompt, guidance, steps, seed], image_out, inference, cache_examples=False)
229
 
230
+ gr.HTML("""
231
+ <p>Models by <a href="https://huggingface.co/riccardogiorato">@riccardogiorato</a><br></p>
232
+ """)
 
233
 
234
  if not is_colab:
235
  demo.queue(concurrency_count=1)
requirements.txt CHANGED
@@ -1,6 +1,7 @@
1
  --extra-index-url https://download.pytorch.org/whl/cu113
2
  torch
3
- diffusers
4
  transformers
5
  scipy
6
- ftfy
 
 
1
  --extra-index-url https://download.pytorch.org/whl/cu113
2
  torch
3
+ git+https://github.com/huggingface/diffusers.git
4
  transformers
5
  scipy
6
+ ftfy
7
+ accelerate