alfredplpl commited on
Commit
98775f1
1 Parent(s): 58ba30b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -7
app.py CHANGED
@@ -14,16 +14,25 @@ from safetensors.torch import load_file
14
 
15
  model_id = 'aipicasso/emi'
16
  auth_token=os.environ["ACCESS_TOKEN"]
 
17
 
18
  #scheduler=DPMSolverMultistepScheduler()
19
 
20
- pipe = StableDiffusionXLPipeline.from_pretrained(
21
- model_id,
22
- torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
23
- use_auth_token=auth_token)
24
 
 
 
 
 
 
25
 
26
  pipe=pipe.to("cuda")
 
 
 
27
  #pipe.enable_freeu(s1=0.9, s2=0.2, b1=1.3, b2=1.4)
28
  #pipe.enable_freeu(s1=0.6, s2=0.4, b1=1.1, b2=1.2)
29
  #pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
@@ -118,7 +127,7 @@ with gr.Blocks(css=css) as demo:
118
  f"""
119
  <div class="main-div">
120
  <div>
121
- <h1>Emi Demo</h1>
122
  <!--
123
  <h2>
124
  Other Demos:
@@ -165,8 +174,8 @@ with gr.Blocks(css=css) as demo:
165
  disable_auto_prompt_correction = gr.Checkbox(label="Disable auto prompt corretion.")
166
 
167
  with gr.Row():
168
- guidance = gr.Slider(label="Guidance scale", value=7.5, maximum=25)
169
- steps = gr.Slider(label="Steps", value=20, minimum=2, maximum=30, step=1)
170
 
171
  seed = gr.Slider(0, 2147483647, label='Seed (0 = random)', value=0, step=1)
172
 
 
14
 
15
  model_id = 'aipicasso/emi'
16
  auth_token=os.environ["ACCESS_TOKEN"]
17
+ adapter_id = "latent-consistency/lcm-lora-sdxl"
18
 
19
  #scheduler=DPMSolverMultistepScheduler()
20
 
21
+ #pipe = StableDiffusionXLPipeline.from_pretrained(
22
+ # model_id,
23
+ # torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
24
+ # use_auth_token=auth_token)
25
 
26
+ pipe = AutoPipelineForText2Image.from_pretrained(
27
+ model_id,
28
+ torch_dtype=torch.float16,
29
+ use_auth_token=auth_token
30
+ )
31
 
32
  pipe=pipe.to("cuda")
33
+ pipe.load_lora_weights(adapter_id)
34
+ pipe.fuse_lora()
35
+
36
  #pipe.enable_freeu(s1=0.9, s2=0.2, b1=1.3, b2=1.4)
37
  #pipe.enable_freeu(s1=0.6, s2=0.4, b1=1.1, b2=1.2)
38
  #pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
 
127
  f"""
128
  <div class="main-div">
129
  <div>
130
+ <h1>Emi+LCM-LoRA Demo</h1>
131
  <!--
132
  <h2>
133
  Other Demos:
 
174
  disable_auto_prompt_correction = gr.Checkbox(label="Disable auto prompt corretion.")
175
 
176
  with gr.Row():
177
+ guidance = gr.Slider(label="Guidance scale", value=3, maximum=25)
178
+ steps = gr.Slider(label="Steps", value=8, minimum=2, maximum=30, step=1)
179
 
180
  seed = gr.Slider(0, 2147483647, label='Seed (0 = random)', value=0, step=1)
181