Prgckwb commited on
Commit
58cedc4
1 Parent(s): 1dcfe8d

:tada: change some process

Browse files
app.py CHANGED
@@ -2,6 +2,7 @@ import gradio as gr
2
  import spaces
3
  import torch
4
  from PIL import Image
 
5
  from diffusers import DiffusionPipeline
6
  from diffusers.utils import make_image_grid
7
 
@@ -51,6 +52,9 @@ def load_pipeline(model_id, use_model_offload, safety_checker):
51
  else:
52
  pipe = pipe.to(device)
53
 
 
 
 
54
  return pipe
55
 
56
 
@@ -73,12 +77,22 @@ def inference(
73
  progress(0, 'Loading pipeline...')
74
  pipe = load_pipeline(model_id, use_model_offload, safety_checker)
75
 
 
 
 
 
 
 
 
 
 
 
76
  generator = torch.Generator(device=device).manual_seed(seed)
77
 
78
  progress(0.3, 'Generating images...')
79
  images = pipe(
80
- prompt,
81
- negative_prompt=negative_prompt,
82
  width=width,
83
  height=height,
84
  guidance_scale=guidance_scale,
@@ -164,7 +178,7 @@ def build_interface():
164
  seed,
165
  ]
166
 
167
- btn = gr.Button("Generate")
168
  btn.click(
169
  fn=inference,
170
  inputs=inputs,
 
2
  import spaces
3
  import torch
4
  from PIL import Image
5
+ from compel import Compel, DiffusersTextualInversionManager
6
  from diffusers import DiffusionPipeline
7
  from diffusers.utils import make_image_grid
8
 
 
52
  else:
53
  pipe = pipe.to(device)
54
 
55
+ if not safety_checker:
56
+ pipe.safety_checker = None
57
+
58
  return pipe
59
 
60
 
 
77
  progress(0, 'Loading pipeline...')
78
  pipe = load_pipeline(model_id, use_model_offload, safety_checker)
79
 
80
+ # For Compel
81
+ textual_inversion_manager = DiffusersTextualInversionManager(pipe)
82
+ compel_procs = Compel(
83
+ tokenizer=pipe.tokenizer,
84
+ text_encoder=pipe.text_encoder,
85
+ textual_inversion_manager=textual_inversion_manager,
86
+ )
87
+ prompt_embed = compel_procs(prompt)
88
+ negative_prompt_embed = compel_procs(negative_prompt)
89
+
90
  generator = torch.Generator(device=device).manual_seed(seed)
91
 
92
  progress(0.3, 'Generating images...')
93
  images = pipe(
94
+ prompt_embeds=prompt_embed,
95
+ negative_prompt_embeds=negative_prompt_embed,
96
  width=width,
97
  height=height,
98
  guidance_scale=guidance_scale,
 
178
  seed,
179
  ]
180
 
181
+ btn = gr.Button("Generate", variant='primary')
182
  btn.click(
183
  fn=inference,
184
  inputs=inputs,
checkpoints/lora/Hand v3 SD1.5.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:096dbb36579dcdb9f14c8be76af0d80f2159fc24d55b89c57dc8beecd9f07900
3
+ size 302108616
checkpoints/lora/detailed style SD1.5.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6d60b0ee0d66a2e661b6697bc82dff6717eeb2e3daca408b494ccc5b778ee7d4
3
+ size 302108920
checkpoints/lora/perfection style SD1.5.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f714b35fd69731cacedc0150bf5828c456fc18163b59adff6d72c6597c496ffd
3
+ size 302109088
src/example.py CHANGED
@@ -37,9 +37,9 @@ EXAMPLES = [
37
  Example(
38
  prompt='Cinematic Photo of a beautiful korean fashion model bokeh train',
39
  model_id='Beautiful Realistic Asians',
40
- negative_prompt='worst_quality, BadNegAnatomyV1-neg, bradhands cartoon, cgi, render, illustration, painting, drawing',
41
- width=512,
42
- height=512,
43
  guidance_scale=5.0,
44
  num_inference_step=50,
45
  ).to_list()
 
37
  Example(
38
  prompt='Cinematic Photo of a beautiful korean fashion model bokeh train',
39
  model_id='Beautiful Realistic Asians',
40
+ negative_prompt='(worst_quality)++, (BadNegAnatomyV1-neg), bradhands cartoon, cgi, render, illustration, painting, drawing',
41
+ width=768,
42
+ height=960,
43
  guidance_scale=5.0,
44
  num_inference_step=50,
45
  ).to_list()