multimodalart HF staff commited on
Commit
69b5899
1 Parent(s): 8db0bd5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -5
app.py CHANGED
@@ -7,7 +7,7 @@ from diffusers import DiffusionPipeline
7
  import torch
8
 
9
  device = "cuda" if torch.cuda.is_available() else "cpu"
10
- model_repo_id = "stabilityai/stable-diffusion-3.5-large"
11
 
12
  if torch.cuda.is_available():
13
  torch_dtype = torch.bfloat16
@@ -28,8 +28,8 @@ def infer(
28
  randomize_seed=False,
29
  width=1024,
30
  height=1024,
31
- guidance_scale=4.5,
32
- num_inference_steps=28,
33
  progress=gr.Progress(track_tqdm=True),
34
  ):
35
  if randomize_seed:
@@ -118,7 +118,7 @@ with gr.Blocks(css=css) as demo:
118
  minimum=0.0,
119
  maximum=7.5,
120
  step=0.1,
121
- value=4.5,
122
  )
123
 
124
  num_inference_steps = gr.Slider(
@@ -126,7 +126,7 @@ with gr.Blocks(css=css) as demo:
126
  minimum=1,
127
  maximum=50,
128
  step=1,
129
- value=28,
130
  )
131
 
132
  gr.Examples(examples=examples, inputs=[prompt], outputs=[result, seed], fn=infer, cache_examples=True, cache_mode="lazy")
 
7
  import torch
8
 
9
  device = "cuda" if torch.cuda.is_available() else "cpu"
10
+ model_repo_id = "stabilityai/stable-diffusion-3.5-large-turbo"
11
 
12
  if torch.cuda.is_available():
13
  torch_dtype = torch.bfloat16
 
28
  randomize_seed=False,
29
  width=1024,
30
  height=1024,
31
+ guidance_scale=0.0,
32
+ num_inference_steps=4,
33
  progress=gr.Progress(track_tqdm=True),
34
  ):
35
  if randomize_seed:
 
118
  minimum=0.0,
119
  maximum=7.5,
120
  step=0.1,
121
+ value=0.0,
122
  )
123
 
124
  num_inference_steps = gr.Slider(
 
126
  minimum=1,
127
  maximum=50,
128
  step=1,
129
+ value=4,
130
  )
131
 
132
  gr.Examples(examples=examples, inputs=[prompt], outputs=[result, seed], fn=infer, cache_examples=True, cache_mode="lazy")