adding lora
Browse files- app.py +52 -6
- appConfig.json +28 -0
- config.py +14 -2
- requirements.txt +2 -1
app.py
CHANGED
@@ -176,13 +176,32 @@ def adapters_textual_inversion_change(adapter_textual_inversion, config):
|
|
176 |
|
177 |
return adapter_textual_inversion_description, in_adapters_textual_inversion_token, config, config, assemble_code(config)
|
178 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
179 |
def textual_inversion_token_change(adapter_textual_inversion_token, config):
|
180 |
|
181 |
config = set_config(config, 'adapter_textual_inversion_token', adapter_textual_inversion_token)
|
182 |
|
183 |
return config, config, assemble_code(config)
|
184 |
|
185 |
-
def run_inference(config, config_history, progress=gr.Progress(track_tqdm=True)):
|
186 |
|
187 |
# str_config = str_config.replace("'", '"').replace('None', 'null').replace('False', 'false')
|
188 |
# config = json.loads(str_config)
|
@@ -246,9 +265,16 @@ def run_inference(config, config_history, progress=gr.Progress(track_tqdm=True))
|
|
246 |
if str(config["adapter_textual_inversion"]).lower() != 'none' and str(config["adapter_textual_inversion"]).lower() != 'null':
|
247 |
pipeline.load_textual_inversion(config["adapter_textual_inversion"], token=config["adapter_textual_inversion_token"])
|
248 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
249 |
progress((3,3), desc="Creating the result...")
|
250 |
|
251 |
-
prompt = config["prompt"] + config["trigger_token"] + config["adapter_textual_inversion_token"]
|
252 |
|
253 |
image = pipeline(
|
254 |
prompt = prompt,
|
@@ -266,11 +292,11 @@ def run_inference(config, config_history, progress=gr.Progress(track_tqdm=True))
|
|
266 |
|
267 |
config_history.append(config.copy())
|
268 |
|
269 |
-
return image[0], dict_list_to_markdown_table(config_history), config_history
|
270 |
|
271 |
else:
|
272 |
|
273 |
-
return "Please select a model AND a scheduler.", None, config_history
|
274 |
|
275 |
appConfig = load_app_config()
|
276 |
models = appConfig.get("models", {})
|
@@ -294,6 +320,11 @@ js = '''function js(){
|
|
294 |
// some things I just don't understand, this is one of them
|
295 |
return [adapter_textual_inversion, null, adapter_textual_inversion, config, null]
|
296 |
}
|
|
|
|
|
|
|
|
|
|
|
297 |
window.set_cookie_2 = function(key, value, config){
|
298 |
document.cookie = key+'='+value+'; Path=/; SameSite=Strict';
|
299 |
// some things I just don't understand, this is one of them
|
@@ -307,6 +338,7 @@ with gr.Blocks(analytics_enabled=False) as demo:
|
|
307 |
|
308 |
config = gr.State(value=get_initial_config())
|
309 |
config_history = gr.State(value=[])
|
|
|
310 |
|
311 |
gr.Markdown('''## Text-2-Image Playground
|
312 |
<small>by <a target="_blank" href="https://www.linkedin.com/in/nickyreinert/">Nicky Reinert</a> |
|
@@ -364,6 +396,15 @@ with gr.Blocks(analytics_enabled=False) as demo:
|
|
364 |
in_adapters_textual_inversion = gr.Dropdown(value="None", choices=list(adapters['textual_inversion'].keys()), label="Textual Inversion Adapter", info="leave empty to not use an adapter")
|
365 |
in_adapters_textual_inversion_token = gr.Textbox(value="None", label="Token", info="required to activate the token, will be added to your prompt")
|
366 |
out_adapters_textual_inversion_description = gr.Textbox(value="", label="Description")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
367 |
|
368 |
gr.Markdown("### Inference settings")
|
369 |
with gr.Row():
|
@@ -392,6 +433,7 @@ with gr.Blocks(analytics_enabled=False) as demo:
|
|
392 |
in_schedulers.change(schedulers_change, inputs=[in_schedulers, config], outputs=[out_scheduler_description, config, out_config, out_code], js="(value, config) => set_cookie_2('scheduler', value, config)")
|
393 |
in_auto_encoders.change(auto_encoders_change, inputs=[in_auto_encoders, config], outputs=[out_auto_encoder_description, config, out_config, out_code], js="(value, config) => set_cookie_2('auto_encoder', value, config)")
|
394 |
in_adapters_textual_inversion.change(adapters_textual_inversion_change, inputs=[in_adapters_textual_inversion, config], outputs=[out_adapters_textual_inversion_description, in_adapters_textual_inversion_token, config, out_config, out_code], js="(adapter_textual_inversion, config) => set_adapter_textual_inversion_cookie(adapter_textual_inversion, config)")
|
|
|
395 |
|
396 |
# `GENERIC` CHANGE LISTENERS, SAME INPUT, SAME OUTPUT STRUCTURE
|
397 |
in_devices.change(fn=device_change, inputs=[in_devices, config], outputs=[config, out_config, out_code], js="(value, config) => set_cookie('device', value, config)")
|
@@ -412,7 +454,7 @@ with gr.Blocks(analytics_enabled=False) as demo:
|
|
412 |
in_trigger_token.change(trigger_token_change, inputs=[in_trigger_token, config], outputs=[config, out_config, out_code], js="(value, config) => set_cookie('trigger_token', value, config)")
|
413 |
in_negative_prompt.change(negative_prompt_change, inputs=[in_negative_prompt, config], outputs=[config, out_config, out_code], js="(value, config) => set_cookie('negative_prompt', value, config)")
|
414 |
|
415 |
-
ev_run_inference = btn_start_pipeline.click(run_inference, inputs=[config, config_history], outputs=[out_image, out_config_history, config_history])
|
416 |
btn_stop_pipeline.click(fn=None, inputs=None, outputs=None, cancels=[ev_run_inference])
|
417 |
|
418 |
# send current respect initial config to init_config to populate parameters to all relevant input fields
|
@@ -439,7 +481,11 @@ with gr.Blocks(analytics_enabled=False) as demo:
|
|
439 |
in_inference_steps,
|
440 |
in_manual_seed,
|
441 |
in_guidance_scale,
|
442 |
-
in_adapters_textual_inversion
|
|
|
|
|
|
|
|
|
443 |
])
|
444 |
|
445 |
demo.launch(show_error=True)
|
|
|
176 |
|
177 |
return adapter_textual_inversion_description, in_adapters_textual_inversion_token, config, config, assemble_code(config)
|
178 |
|
179 |
+
def adapters_lora_change(adapter_lora, config):
|
180 |
+
|
181 |
+
if str(adapter_lora) != 'None' and str(adapter_lora) != 'null' and type(adapter_lora) != list:
|
182 |
+
|
183 |
+
adapter_lora_description = adapters['lora'][adapter_lora]['description']
|
184 |
+
adapter_lora_token = adapters['lora'][adapter_lora]['token']
|
185 |
+
adapter_lora_weight = adapters['lora'][adapter_lora]['weight']
|
186 |
+
|
187 |
+
else:
|
188 |
+
adapter_lora_description = ""
|
189 |
+
adapter_lora_token = ""
|
190 |
+
adapter_lora_weight = ""
|
191 |
+
|
192 |
+
config = set_config(config, 'adapter_lora', adapter_lora)
|
193 |
+
config = set_config(config, 'adapter_lora_token', adapter_lora_token)
|
194 |
+
config = set_config(config, 'adapter_lora_weight', adapter_lora_weight)
|
195 |
+
|
196 |
+
return adapter_lora_description, adapter_lora_token, adapter_lora_weight, config, config, assemble_code(config)
|
197 |
+
|
198 |
def textual_inversion_token_change(adapter_textual_inversion_token, config):
|
199 |
|
200 |
config = set_config(config, 'adapter_textual_inversion_token', adapter_textual_inversion_token)
|
201 |
|
202 |
return config, config, assemble_code(config)
|
203 |
|
204 |
+
def run_inference(config, config_history, pipeline, progress=gr.Progress(track_tqdm=True)):
|
205 |
|
206 |
# str_config = str_config.replace("'", '"').replace('None', 'null').replace('False', 'false')
|
207 |
# config = json.loads(str_config)
|
|
|
265 |
if str(config["adapter_textual_inversion"]).lower() != 'none' and str(config["adapter_textual_inversion"]).lower() != 'null':
|
266 |
pipeline.load_textual_inversion(config["adapter_textual_inversion"], token=config["adapter_textual_inversion_token"])
|
267 |
|
268 |
+
# LoRA
|
269 |
+
if str(config["adapter_lora"]).lower() != 'none' and str(config["adapter_lora"]).lower() != 'null':
|
270 |
+
if str(config["adapter_lora_weight"]).lower() != 'none':
|
271 |
+
pipeline.load_lora_weights(config["adapter_lora"], weight_name=config["adapter_lora_weight"], token=config["adapter_lora_token"])
|
272 |
+
else:
|
273 |
+
pipeline.load_lora_weights(config["adapter_lora"], token=config["adapter_lora_token"])
|
274 |
+
|
275 |
progress((3,3), desc="Creating the result...")
|
276 |
|
277 |
+
prompt = config["prompt"] + config["trigger_token"] + config["adapter_textual_inversion_token"] + config["adapter_lora_token"]
|
278 |
|
279 |
image = pipeline(
|
280 |
prompt = prompt,
|
|
|
292 |
|
293 |
config_history.append(config.copy())
|
294 |
|
295 |
+
return image[0], dict_list_to_markdown_table(config_history), config_history, pipeline
|
296 |
|
297 |
else:
|
298 |
|
299 |
+
return "Please select a model AND a scheduler.", None, config_history, pipeline
|
300 |
|
301 |
appConfig = load_app_config()
|
302 |
models = appConfig.get("models", {})
|
|
|
320 |
// some things I just don't understand, this is one of them
|
321 |
return [adapter_textual_inversion, null, adapter_textual_inversion, config, null]
|
322 |
}
|
323 |
+
window.set_adapter_lora_cookie = function(adapter_lora, config){
|
324 |
+
document.cookie = 'adapter_lora='+ adapter_lora+'; Path=/; SameSite=Strict';
|
325 |
+
// some things I just don't understand, this is one of them
|
326 |
+
return [adapter_lora, null, null, adapter_lora, config, null]
|
327 |
+
}
|
328 |
window.set_cookie_2 = function(key, value, config){
|
329 |
document.cookie = key+'='+value+'; Path=/; SameSite=Strict';
|
330 |
// some things I just don't understand, this is one of them
|
|
|
338 |
|
339 |
config = gr.State(value=get_initial_config())
|
340 |
config_history = gr.State(value=[])
|
341 |
+
pipeline = gr.State()
|
342 |
|
343 |
gr.Markdown('''## Text-2-Image Playground
|
344 |
<small>by <a target="_blank" href="https://www.linkedin.com/in/nickyreinert/">Nicky Reinert</a> |
|
|
|
396 |
in_adapters_textual_inversion = gr.Dropdown(value="None", choices=list(adapters['textual_inversion'].keys()), label="Textual Inversion Adapter", info="leave empty to not use an adapter")
|
397 |
in_adapters_textual_inversion_token = gr.Textbox(value="None", label="Token", info="required to activate the token, will be added to your prompt")
|
398 |
out_adapters_textual_inversion_description = gr.Textbox(value="", label="Description")
|
399 |
+
with gr.Row():
|
400 |
+
gr.Markdown('#### LoRA')
|
401 |
+
with gr.Row():
|
402 |
+
gr.Markdown('(Low-Rank-Adaption is a performant fine tuning technique)')
|
403 |
+
with gr.Row():
|
404 |
+
in_adapters_lora = gr.Dropdown(value="None", choices=list(adapters['lora'].keys()), label="LoRA Adapter", info="leave empty to not use an adapter")
|
405 |
+
in_adapters_lora_token = gr.Textbox(value="None", label="Token", info="required to activate the token, will be added to your prompt")
|
406 |
+
in_adapters_lora_weight = gr.Textbox(value="", label="Description")
|
407 |
+
out_adapters_lora_description = gr.Textbox(value="", label="Description")
|
408 |
|
409 |
gr.Markdown("### Inference settings")
|
410 |
with gr.Row():
|
|
|
433 |
in_schedulers.change(schedulers_change, inputs=[in_schedulers, config], outputs=[out_scheduler_description, config, out_config, out_code], js="(value, config) => set_cookie_2('scheduler', value, config)")
|
434 |
in_auto_encoders.change(auto_encoders_change, inputs=[in_auto_encoders, config], outputs=[out_auto_encoder_description, config, out_config, out_code], js="(value, config) => set_cookie_2('auto_encoder', value, config)")
|
435 |
in_adapters_textual_inversion.change(adapters_textual_inversion_change, inputs=[in_adapters_textual_inversion, config], outputs=[out_adapters_textual_inversion_description, in_adapters_textual_inversion_token, config, out_config, out_code], js="(adapter_textual_inversion, config) => set_adapter_textual_inversion_cookie(adapter_textual_inversion, config)")
|
436 |
+
in_adapters_lora.change(adapters_lora_change, inputs=[in_adapters_lora, config], outputs=[out_adapters_lora_description, in_adapters_lora_token, in_adapters_lora_weight, config, out_config, out_code], js="(adapter_lora, config) => set_adapter_lora_cookie(adapter_lora, config)")
|
437 |
|
438 |
# `GENERIC` CHANGE LISTENERS, SAME INPUT, SAME OUTPUT STRUCTURE
|
439 |
in_devices.change(fn=device_change, inputs=[in_devices, config], outputs=[config, out_config, out_code], js="(value, config) => set_cookie('device', value, config)")
|
|
|
454 |
in_trigger_token.change(trigger_token_change, inputs=[in_trigger_token, config], outputs=[config, out_config, out_code], js="(value, config) => set_cookie('trigger_token', value, config)")
|
455 |
in_negative_prompt.change(negative_prompt_change, inputs=[in_negative_prompt, config], outputs=[config, out_config, out_code], js="(value, config) => set_cookie('negative_prompt', value, config)")
|
456 |
|
457 |
+
ev_run_inference = btn_start_pipeline.click(run_inference, inputs=[config, config_history, pipeline], outputs=[out_image, out_config_history, config_history, pipeline])
|
458 |
btn_stop_pipeline.click(fn=None, inputs=None, outputs=None, cancels=[ev_run_inference])
|
459 |
|
460 |
# send current respect initial config to init_config to populate parameters to all relevant input fields
|
|
|
481 |
in_inference_steps,
|
482 |
in_manual_seed,
|
483 |
in_guidance_scale,
|
484 |
+
in_adapters_textual_inversion,
|
485 |
+
in_adapters_textual_inversion_token,
|
486 |
+
in_adapters_lora,
|
487 |
+
in_adapters_lora_token,
|
488 |
+
in_adapters_lora_weight
|
489 |
])
|
490 |
|
491 |
demo.launch(show_error=True)
|
appConfig.json
CHANGED
@@ -85,6 +85,34 @@
|
|
85 |
"token": "<gta-artwork>",
|
86 |
"description": "see https://huggingface.co/sd-concepts-library/gta5-artwork"
|
87 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
88 |
}
|
89 |
},
|
90 |
"schedulers": {
|
|
|
85 |
"token": "<gta-artwork>",
|
86 |
"description": "see https://huggingface.co/sd-concepts-library/gta5-artwork"
|
87 |
}
|
88 |
+
},
|
89 |
+
"lora": {
|
90 |
+
"None": {"token": "", "description": "", "weight": ""},
|
91 |
+
"nerijs/pixel-art-xl": {
|
92 |
+
"token": "pixel",
|
93 |
+
"weight": "pixel-art-xl.safetensors",
|
94 |
+
"description": "see https://huggingface.co/nerijs/pixel-art-xl"
|
95 |
+
},
|
96 |
+
"ByteDance/SDXL-Lightning": {
|
97 |
+
"token": "SDXLL",
|
98 |
+
"weight": "sdxl_lightning_4step_lora.safetensors",
|
99 |
+
"description": "see https://huggingface.co/ByteDance/SDXL-Lightning"
|
100 |
+
},
|
101 |
+
"super-cereal-sdxl-lora": {
|
102 |
+
"token": "cereals",
|
103 |
+
"weight": "cereal_box_sdxl_v1.safetensors",
|
104 |
+
"description": "see https://huggingface.co/ostris/super-cereal-sdxl-lora"
|
105 |
+
},
|
106 |
+
"CiroN2022/toy-face": {
|
107 |
+
"token": "toy face",
|
108 |
+
"weight": "toy_face_sdxl.safetensors",
|
109 |
+
"description": "see https://huggingface.co/CiroN2022/toy-face"
|
110 |
+
},
|
111 |
+
"latent-consistency/lcm-lora-sdxl": {
|
112 |
+
"token": "lora",
|
113 |
+
"weight": "None",
|
114 |
+
"description": "required base model is stabilityai/stable-diffusion-xl-base-1.0; required scheduler is LCMScheduler, Latent Consistency Models (LCM) enable quality image generation in typically 2-4 steps making it possible to use diffusion models in almost real-time settings; see, https://huggingface.co/docs/diffusers/using-diffusers/inference_with_lcm_lora and https://huggingface.co/latent-consistency/lcm-lora-sdxl"
|
115 |
+
}
|
116 |
}
|
117 |
},
|
118 |
"schedulers": {
|
config.py
CHANGED
@@ -54,6 +54,9 @@ def get_initial_config():
|
|
54 |
"guidance_scale": 5,
|
55 |
"adapter_textual_inversion": None,
|
56 |
"adapter_textual_inversion_token": None,
|
|
|
|
|
|
|
57 |
"prompt": 'A white rabbit',
|
58 |
"trigger_token": '',
|
59 |
"negative_prompt": 'lowres, cropped, worst quality, low quality',
|
@@ -107,7 +110,10 @@ def get_config_from_url(initial_config, request: Request):
|
|
107 |
return_config['manual_seed'],
|
108 |
return_config['guidance_scale'],
|
109 |
return_config['adapter_textual_inversion'],
|
110 |
-
return_config['adapter_textual_inversion_token']
|
|
|
|
|
|
|
111 |
]
|
112 |
|
113 |
def load_app_config():
|
@@ -198,10 +204,16 @@ def assemble_code(str_config):
|
|
198 |
code.append(f'manual_seed = {config["manual_seed"]}')
|
199 |
code.append(f'generator = torch.manual_seed(manual_seed)')
|
200 |
|
|
|
201 |
if str(config["adapter_textual_inversion"]).lower() != 'none':
|
202 |
code.append(f'pipeline.load_textual_inversion("{config["adapter_textual_inversion"]}", token="{config["adapter_textual_inversion_token"]}")')
|
|
|
|
|
|
|
|
|
|
|
203 |
|
204 |
-
code.append(f'prompt = "{config["prompt"]} {config["trigger_token"]} {config["adapter_textual_inversion_token"]}"')
|
205 |
code.append(f'negative_prompt = "{config["negative_prompt"]}"')
|
206 |
code.append(f'inference_steps = {config["inference_steps"]}')
|
207 |
code.append(f'guidance_scale = {config["guidance_scale"]}')
|
|
|
54 |
"guidance_scale": 5,
|
55 |
"adapter_textual_inversion": None,
|
56 |
"adapter_textual_inversion_token": None,
|
57 |
+
"adapter_lora": None,
|
58 |
+
"adapter_lora_token": None,
|
59 |
+
"adapter_lora_weight": None,
|
60 |
"prompt": 'A white rabbit',
|
61 |
"trigger_token": '',
|
62 |
"negative_prompt": 'lowres, cropped, worst quality, low quality',
|
|
|
110 |
return_config['manual_seed'],
|
111 |
return_config['guidance_scale'],
|
112 |
return_config['adapter_textual_inversion'],
|
113 |
+
return_config['adapter_textual_inversion_token'],
|
114 |
+
return_config['adapter_lora'],
|
115 |
+
return_config['adapter_lora_token'],
|
116 |
+
return_config['adapter_lora_weight']
|
117 |
]
|
118 |
|
119 |
def load_app_config():
|
|
|
204 |
code.append(f'manual_seed = {config["manual_seed"]}')
|
205 |
code.append(f'generator = torch.manual_seed(manual_seed)')
|
206 |
|
207 |
+
# ADAPTER
|
208 |
if str(config["adapter_textual_inversion"]).lower() != 'none':
|
209 |
code.append(f'pipeline.load_textual_inversion("{config["adapter_textual_inversion"]}", token="{config["adapter_textual_inversion_token"]}")')
|
210 |
+
if str(config["adapter_lora"]).lower() != 'none':
|
211 |
+
if str(config["adapter_lora_weight"]).lower() != 'none':
|
212 |
+
code.append(f'pipeline.load_lora_weights("{config["adapter_lora"]}", weight_name="{config["adapter_lora_weight"]}", adapter_name="{config["adapter_lora_token"]}")')
|
213 |
+
else:
|
214 |
+
code.append(f'pipeline.load_lora_weights("{config["adapter_lora"]}", adapter_name="{config["adapter_lora_token"]}")')
|
215 |
|
216 |
+
code.append(f'prompt = "{config["prompt"]} {config["trigger_token"]} {config["adapter_textual_inversion_token"]} {config["adapter_lora_token"]}"')
|
217 |
code.append(f'negative_prompt = "{config["negative_prompt"]}"')
|
218 |
code.append(f'inference_steps = {config["inference_steps"]}')
|
219 |
code.append(f'guidance_scale = {config["guidance_scale"]}')
|
requirements.txt
CHANGED
@@ -7,4 +7,5 @@ urllib3==1.26.6
|
|
7 |
transformers
|
8 |
gradio
|
9 |
stripe>=9.0.0
|
10 |
-
accelerate
|
|
|
|
7 |
transformers
|
8 |
gradio
|
9 |
stripe>=9.0.0
|
10 |
+
accelerate
|
11 |
+
peft
|