enable torch compile + minor changes
Browse files
app.py
CHANGED
@@ -21,7 +21,7 @@ if not torch.cuda.is_available():
|
|
21 |
MAX_SEED = np.iinfo(np.int32).max
|
22 |
CACHE_EXAMPLES = torch.cuda.is_available() and os.getenv("CACHE_EXAMPLES") == "1"
|
23 |
MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "1536"))
|
24 |
-
USE_TORCH_COMPILE =
|
25 |
ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD") == "1"
|
26 |
PREVIEW_IMAGES = True
|
27 |
|
@@ -187,16 +187,16 @@ with gr.Blocks(css="style.css") as demo:
|
|
187 |
)
|
188 |
prior_num_inference_steps = gr.Slider(
|
189 |
label="Prior Inference Steps",
|
190 |
-
minimum=
|
191 |
-
maximum=
|
192 |
step=1,
|
193 |
-
value=
|
194 |
)
|
195 |
|
196 |
decoder_guidance_scale = gr.Slider(
|
197 |
label="Decoder Guidance Scale",
|
198 |
minimum=0,
|
199 |
-
maximum=
|
200 |
step=0.1,
|
201 |
value=0.0,
|
202 |
)
|
|
|
21 |
MAX_SEED = np.iinfo(np.int32).max
|
22 |
CACHE_EXAMPLES = torch.cuda.is_available() and os.getenv("CACHE_EXAMPLES") == "1"
|
23 |
MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "1536"))
|
24 |
+
USE_TORCH_COMPILE = True
|
25 |
ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD") == "1"
|
26 |
PREVIEW_IMAGES = True
|
27 |
|
|
|
187 |
)
|
188 |
prior_num_inference_steps = gr.Slider(
|
189 |
label="Prior Inference Steps",
|
190 |
+
minimum=30,
|
191 |
+
maximum=30,
|
192 |
step=1,
|
193 |
+
value=30,
|
194 |
)
|
195 |
|
196 |
decoder_guidance_scale = gr.Slider(
|
197 |
label="Decoder Guidance Scale",
|
198 |
minimum=0,
|
199 |
+
maximum=0,
|
200 |
step=0.1,
|
201 |
value=0.0,
|
202 |
)
|