multimodalart HF staff commited on
Commit
8f40af2
1 Parent(s): 8205b3e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -13
app.py CHANGED
@@ -64,16 +64,6 @@ pipe.load_lora_weights(
64
  use_auth_token=HF_TOKEN,
65
  )
66
 
67
- ## Load papercut LoRA
68
- #pipe.load_lora_weights(
69
- # "TheLastBen/Papercut_SDXL",
70
- # weight_name="papercut.safetensors",
71
- # adapter_name="papercut",
72
- #)
73
-
74
- # Mix the LoRAs
75
- #pipe.set_adapters(["lcm", "papercut"], adapter_weights=[1.0, 0.8])
76
-
77
  compel_proc = Compel(
78
  tokenizer=[pipe.tokenizer, pipe.tokenizer_2],
79
  text_encoder=[pipe.text_encoder, pipe.text_encoder_2],
@@ -123,7 +113,7 @@ css = """
123
  with gr.Blocks(css=css) as demo:
124
  with gr.Column(elem_id="container"):
125
  gr.Markdown(
126
- """# Ultra-Fast SDXL with Latent Consistency LoRA
127
  SDXL is loaded with a LCM-LoRA, giving it the super power of doing inference in as little as 4 steps. [Learn more on our blog](#) or [technical report](#).
128
  """,
129
  elem_id="intro",
@@ -145,12 +135,16 @@ with gr.Blocks(css=css) as demo:
145
  randomize=True, minimum=0, maximum=12013012031030, label="Seed", step=1
146
  )
147
  with gr.Group():
148
- gr.Markdown('''## Using it with `diffusers`
 
 
 
 
149
  ```py
150
  from diffusers import DiffusionPipeline, LCMScheduler
151
  pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0").to("cuda")
152
  pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
153
- pipe.load_lora_weights("lcm-sd/lcm-sdxl-lora")
154
 
155
  results = pipe(
156
  prompt="The spirit of a tamagotchi wandering in the city of Vienna",
 
64
  use_auth_token=HF_TOKEN,
65
  )
66
 
 
 
 
 
 
 
 
 
 
 
67
  compel_proc = Compel(
68
  tokenizer=[pipe.tokenizer, pipe.tokenizer_2],
69
  text_encoder=[pipe.text_encoder, pipe.text_encoder_2],
 
113
  with gr.Blocks(css=css) as demo:
114
  with gr.Column(elem_id="container"):
115
  gr.Markdown(
116
+ """# SDXL in 4 steps with Latent Consistency LoRAs
117
  SDXL is loaded with a LCM-LoRA, giving it the super power of doing inference in as little as 4 steps. [Learn more on our blog](#) or [technical report](#).
118
  """,
119
  elem_id="intro",
 
135
  randomize=True, minimum=0, maximum=12013012031030, label="Seed", step=1
136
  )
137
  with gr.Group():
138
+ gr.Markdown('''## Running LCM-LoRAs it with `diffusers`
139
+ ```bash
140
+ pip install diffusers==0.23.0
141
+ ```
142
+
143
  ```py
144
  from diffusers import DiffusionPipeline, LCMScheduler
145
  pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0").to("cuda")
146
  pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
147
+ pipe.load_lora_weights("lcm-sd/lcm-sdxl-lora") #yes, it is a real LoRA that gives superpowers to SDXL!
148
 
149
  results = pipe(
150
  prompt="The spirit of a tamagotchi wandering in the city of Vienna",