phillipinseoul commited on
Commit
bcb5a3c
1 Parent(s): b29e176

fix example error

Browse files
app.py CHANGED
@@ -8,6 +8,12 @@ import gradio as gr
8
  from syncdiffusion.syncdiffusion_model import SyncDiffusion
9
  from syncdiffusion.utils import seed_everything
10
 
 
 
 
 
 
 
11
  def run_inference(
12
  prompt: str,
13
  width: int = 2048,
@@ -15,15 +21,9 @@ def run_inference(
15
  sync_thres: int = 5,
16
  seed: int = 0
17
  ):
18
- # set device
19
- device = torch.device = torch.device("cuda")
20
-
21
  # set random seed
22
  seed_everything(seed)
23
 
24
- # load SyncDiffusion model
25
- syncdiffusion = SyncDiffusion(device, sd_version="2.0")
26
-
27
  img = syncdiffusion.sample_syncdiffusion(
28
  prompts = prompt,
29
  negative_prompts = "",
@@ -61,7 +61,7 @@ if __name__=="__main__":
61
  prompt = gr.Textbox(label="Text Prompt", value='a cinematic view of a castle in the sunset')
62
  width = gr.Slider(label="Width", minimum=512, maximum=4096, value=2048, step=128)
63
  sync_weight = gr.Slider(label="Sync Weight", minimum=0.0, maximum=30.0, value=20.0, step=5.0)
64
- sync_thres = gr.Slider(label="Sync Threshold (If N, apply SyncDiffusion for the first N steps)", minimum=0, maximum=50, value=5, step=1)
65
  seed = gr.Number(label="Seed", value=0)
66
 
67
  with gr.Column():
@@ -70,11 +70,11 @@ if __name__=="__main__":
70
  # display examples
71
  examples = gr.Examples(
72
  examples=[
73
- 'a cinematic view of a castle in the sunset',
74
- 'natural landscape in anime style illustration',
75
- 'a photo of a lake under the northern lights',
76
  ],
77
- inputs=[prompt],
78
  )
79
 
80
  ips = [prompt, width, sync_weight, sync_thres, seed]
 
8
  from syncdiffusion.syncdiffusion_model import SyncDiffusion
9
  from syncdiffusion.utils import seed_everything
10
 
11
+ # set device
12
+ device = torch.device("cuda")
13
+
14
+ # load SyncDiffusion model
15
+ syncdiffusion = SyncDiffusion(device, sd_version="2.0")
16
+
17
  def run_inference(
18
  prompt: str,
19
  width: int = 2048,
 
21
  sync_thres: int = 5,
22
  seed: int = 0
23
  ):
 
 
 
24
  # set random seed
25
  seed_everything(seed)
26
 
 
 
 
27
  img = syncdiffusion.sample_syncdiffusion(
28
  prompts = prompt,
29
  negative_prompts = "",
 
61
  prompt = gr.Textbox(label="Text Prompt", value='a cinematic view of a castle in the sunset')
62
  width = gr.Slider(label="Width", minimum=512, maximum=4096, value=2048, step=128)
63
  sync_weight = gr.Slider(label="Sync Weight", minimum=0.0, maximum=30.0, value=20.0, step=5.0)
64
+ sync_thres = gr.Slider(label="Sync Threshold (If N, apply SyncDiffusion for the first N steps)", minimum=0, maximum=15, value=5, step=1)
65
  seed = gr.Number(label="Seed", value=0)
66
 
67
  with gr.Column():
 
70
  # display examples
71
  examples = gr.Examples(
72
  examples=[
73
+ ['a cinematic view of a castle in the sunset', 2048, 20.0, 5, 1],
74
+ ['natural landscape in anime style illustration', 2048, 20.0, 5, 2],
75
+ ['a photo of a lake under the northern lights', 2048, 20.0, 5, 6]
76
  ],
77
+ inputs=[prompt, width, sync_weight, sync_thres, seed],
78
  )
79
 
80
  ips = [prompt, width, sync_weight, sync_thres, seed]
syncdiffusion/__pycache__/syncdiffusion_model.cpython-39.pyc CHANGED
Binary files a/syncdiffusion/__pycache__/syncdiffusion_model.cpython-39.pyc and b/syncdiffusion/__pycache__/syncdiffusion_model.cpython-39.pyc differ
 
syncdiffusion/__pycache__/utils.cpython-39.pyc CHANGED
Binary files a/syncdiffusion/__pycache__/utils.cpython-39.pyc and b/syncdiffusion/__pycache__/utils.cpython-39.pyc differ