multimodalart HF staff commited on
Commit
e1232bd
1 Parent(s): a1c7876

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -9
app.py CHANGED
@@ -64,15 +64,15 @@ pipe.load_lora_weights(
64
  use_auth_token=HF_TOKEN,
65
  )
66
 
67
- # Load papercut LoRA
68
- pipe.load_lora_weights(
69
- "TheLastBen/Papercut_SDXL",
70
- weight_name="papercut.safetensors",
71
- adapter_name="papercut",
72
- )
73
 
74
  # Mix the LoRAs
75
- pipe.set_adapters(["lcm", "papercut"], adapter_weights=[1.0, 0.8])
76
 
77
  compel_proc = Compel(
78
  tokenizer=[pipe.tokenizer, pipe.tokenizer_2],
@@ -123,8 +123,7 @@ css = """
123
  with gr.Blocks(css=css) as demo:
124
  with gr.Column(elem_id="container"):
125
  gr.Markdown(
126
- """# Ultra-Fast SDXL with LoRAs borrowed from Latent Consistency Models
127
- Featuring [Papercut_SDXL Lora](https://huggingface.co/TheLastBen/Papercut_SDXL), use **papercut** token to activate the model.
128
  """,
129
  elem_id="intro",
130
  )
 
64
  use_auth_token=HF_TOKEN,
65
  )
66
 
67
+ ## Load papercut LoRA
68
+ #pipe.load_lora_weights(
69
+ # "TheLastBen/Papercut_SDXL",
70
+ # weight_name="papercut.safetensors",
71
+ # adapter_name="papercut",
72
+ #)
73
 
74
  # Mix the LoRAs
75
+ #pipe.set_adapters(["lcm", "papercut"], adapter_weights=[1.0, 0.8])
76
 
77
  compel_proc = Compel(
78
  tokenizer=[pipe.tokenizer, pipe.tokenizer_2],
 
123
  with gr.Blocks(css=css) as demo:
124
  with gr.Column(elem_id="container"):
125
  gr.Markdown(
126
+ """# Ultra-Fast SDXL a Latent Consistency Model LoRA
 
127
  """,
128
  elem_id="intro",
129
  )