Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -18,7 +18,6 @@ from io import BytesIO
|
|
18 |
from diffusers.utils import load_image
|
19 |
from diffusers import StableDiffusionXLControlNetPipeline, StableDiffusionXLControlNetInpaintPipeline, ControlNetModel, AutoencoderKL, DiffusionPipeline, AutoPipelineForImage2Image, AutoPipelineForInpainting, EulerDiscreteScheduler, DPMSolverMultistepScheduler
|
20 |
|
21 |
-
DESCRIPTION = "# Run any LoRA or SD Model"
|
22 |
if not torch.cuda.is_available():
|
23 |
DESCRIPTION += "\n<p>β οΈ This space is running on the CPU. This demo doesn't work on CPU π! Run on a GPU by duplicating this space or test our website for free and unlimited by <a href='https://squaadai.com'>clicking here</a>, which provides these and more options.</p>"
|
24 |
|
@@ -221,12 +220,9 @@ theme = gr.themes.Monochrome(
|
|
221 |
)
|
222 |
|
223 |
with gr.Blocks(theme=theme, css="style.css") as demo:
|
224 |
-
gr.
|
225 |
-
|
226 |
-
|
227 |
-
gr.Markdown('''# LoRA Ease π§ββοΈ
|
228 |
-
### Train a high quality SDXL LoRA in a breeze ΰΌ with state-of-the-art techniques and for cheap
|
229 |
-
<small>Dreambooth with Pivotal Tuning, Prodigy and more! Use the trained LoRAs with diffusers, AUTO1111, Comfy. [blog about the training script](https://huggingface.co/blog/sdxl_lora_advanced_script), [Colab Pro](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/SDXL_Dreambooth_LoRA_advanced_example.ipynb), [run locally or in a cloud](https://github.com/huggingface/diffusers/blob/main/examples/advanced_diffusion_training/train_dreambooth_lora_sdxl_advanced.py)</small>.''', elem_id="main_title")
|
230 |
with gr.Group():
|
231 |
model = gr.Text(label='Model', placeholder='e.g. stabilityai/stable-diffusion-xl-base-1.0')
|
232 |
vaecall = gr.Text(label='VAE', placeholder='e.g. madebyollin/sdxl-vae-fp16-fix')
|
|
|
18 |
from diffusers.utils import load_image
|
19 |
from diffusers import StableDiffusionXLControlNetPipeline, StableDiffusionXLControlNetInpaintPipeline, ControlNetModel, AutoencoderKL, DiffusionPipeline, AutoPipelineForImage2Image, AutoPipelineForInpainting, EulerDiscreteScheduler, DPMSolverMultistepScheduler
|
20 |
|
|
|
21 |
if not torch.cuda.is_available():
|
22 |
DESCRIPTION += "\n<p>β οΈ This space is running on the CPU. This demo doesn't work on CPU π! Run on a GPU by duplicating this space or test our website for free and unlimited by <a href='https://squaadai.com'>clicking here</a>, which provides these and more options.</p>"
|
23 |
|
|
|
220 |
)
|
221 |
|
222 |
with gr.Blocks(theme=theme, css="style.css") as demo:
|
223 |
+
gr.Markdown('''# Squaad AI πͺ
|
224 |
+
### Run Stable Diffusion for free, in seconds
|
225 |
+
<small>Squaad AI was a free Artificial Intelligence website that brought together the best of the AI world in one place. Unfortunately, it had to cease operations after facing compatibility issues. This space is intended to make its legacy available as open source and allow anyone to run SD in a simple and free way.</small>.''', elem_id="main_title")
|
|
|
|
|
|
|
226 |
with gr.Group():
|
227 |
model = gr.Text(label='Model', placeholder='e.g. stabilityai/stable-diffusion-xl-base-1.0')
|
228 |
vaecall = gr.Text(label='VAE', placeholder='e.g. madebyollin/sdxl-vae-fp16-fix')
|