n42 commited on
Commit
cbbd444
·
1 Parent(s): aaf1179

allowing multiple lora adapters now

Browse files
Files changed (3) hide show
  1. app.py +49 -25
  2. appConfig.json +0 -1
  3. config.py +22 -10
app.py CHANGED
@@ -173,25 +173,39 @@ def adapters_textual_inversion_change(adapter_textual_inversion, config):
173
 
174
  return adapter_textual_inversion_description, in_adapters_textual_inversion_token, config, config, assemble_code(config)
175
 
176
- def adapters_lora_change(adapter_lora, config):
177
 
178
- if str(adapter_lora) != 'None' and str(adapter_lora) != 'null' and type(adapter_lora) != list:
179
-
180
- adapter_lora_description = adapters['lora'][adapter_lora]['description']
181
- adapter_lora_token = adapters['lora'][adapter_lora]['token']
182
- adapter_lora_weight = adapters['lora'][adapter_lora]['weight']
183
 
 
 
 
 
 
 
 
 
 
 
184
  else:
185
- adapter_lora_description = ""
186
- adapter_lora_token = ""
187
- adapter_lora_weight = ""
 
188
 
189
- config = set_config(config, 'adapter_lora', adapter_lora)
190
  config = set_config(config, 'adapter_lora_token', adapter_lora_token)
191
  config = set_config(config, 'adapter_lora_weight', adapter_lora_weight)
 
192
 
193
- return adapter_lora_description, adapter_lora_token, adapter_lora_weight, config, config, assemble_code(config)
 
 
 
 
194
 
 
 
195
  def textual_inversion_token_change(adapter_textual_inversion_token, config):
196
 
197
  config = set_config(config, 'adapter_textual_inversion_token', adapter_textual_inversion_token)
@@ -263,15 +277,21 @@ def run_inference(config, config_history, pipeline, progress=gr.Progress(track_t
263
  pipeline.load_textual_inversion(config["adapter_textual_inversion"], token=config["adapter_textual_inversion_token"])
264
 
265
  # LoRA
266
- if str(config["adapter_lora"]).lower() != 'none' and str(config["adapter_lora"]).lower() != 'null':
267
- if str(config["adapter_lora_weight"]).lower() != 'none':
268
- pipeline.load_lora_weights(config["adapter_lora"], weight_name=config["adapter_lora_weight"], token=config["adapter_lora_token"])
269
- else:
270
- pipeline.load_lora_weights(config["adapter_lora"], token=config["adapter_lora_token"])
 
 
 
 
 
 
271
 
272
  progress((3,3), desc="Creating the result...")
273
 
274
- prompt = config["prompt"] + config["trigger_token"] + config["adapter_textual_inversion_token"] + config["adapter_lora_token"]
275
 
276
  image = pipeline(
277
  prompt = prompt,
@@ -319,9 +339,9 @@ js = '''function js(){
319
  return [adapter_textual_inversion, null, adapter_textual_inversion, config, null]
320
  }
321
  window.set_adapter_lora_cookie = function(adapter_lora, config){
322
- document.cookie = 'adapter_lora='+ adapter_lora+'; Path=/; SameSite=Strict';
323
  // some things I just don't understand, this is one of them
324
- return [adapter_lora, null, null, adapter_lora, config, null]
325
  }
326
  window.set_cookie_2 = function(key, value, config){
327
  document.cookie = key+'='+value+'; Path=/; SameSite=Strict';
@@ -400,10 +420,12 @@ with gr.Blocks(analytics_enabled=False) as demo:
400
  with gr.Row():
401
  gr.Markdown('(Low-Rank-Adaption is a performant fine tuning technique)')
402
  with gr.Row():
403
- in_adapters_lora = gr.Dropdown(value="None", choices=list(adapters['lora'].keys()), label="LoRA Adapter", info="leave empty to not use an adapter")
404
- in_adapters_lora_token = gr.Textbox(value="None", label="Token", info="required to activate the token, will be added to your prompt")
405
- in_adapters_lora_weight = gr.Textbox(value="", label="Description")
406
  out_adapters_lora_description = gr.Textbox(value="", label="Description")
 
 
 
 
407
 
408
  gr.Markdown("### Inference settings")
409
  with gr.Row():
@@ -432,7 +454,7 @@ with gr.Blocks(analytics_enabled=False) as demo:
432
  in_schedulers.change(schedulers_change, inputs=[in_schedulers, config], outputs=[out_scheduler_description, config, out_config, out_code], js="(value, config) => set_cookie_2('scheduler', value, config)")
433
  in_auto_encoders.change(auto_encoders_change, inputs=[in_auto_encoders, config], outputs=[out_auto_encoder_description, config, out_config, out_code], js="(value, config) => set_cookie_2('auto_encoder', value, config)")
434
  in_adapters_textual_inversion.change(adapters_textual_inversion_change, inputs=[in_adapters_textual_inversion, config], outputs=[out_adapters_textual_inversion_description, in_adapters_textual_inversion_token, config, out_config, out_code], js="(adapter_textual_inversion, config) => set_adapter_textual_inversion_cookie(adapter_textual_inversion, config)")
435
- in_adapters_lora.change(adapters_lora_change, inputs=[in_adapters_lora, config], outputs=[out_adapters_lora_description, in_adapters_lora_token, in_adapters_lora_weight, config, out_config, out_code], js="(adapter_lora, config) => set_adapter_lora_cookie(adapter_lora, config)")
436
 
437
  # `GENERIC` CHANGE LISTENERS, SAME INPUT, SAME OUTPUT STRUCTURE
438
  in_devices.change(fn=device_change, inputs=[in_devices, config], outputs=[config, out_config, out_code], js="(value, config) => set_cookie('device', value, config)")
@@ -448,7 +470,8 @@ with gr.Blocks(analytics_enabled=False) as demo:
448
  in_guidance_scale.change(guidance_scale_change, inputs=[in_guidance_scale, config], outputs=[config, out_config, out_code], js="(value, config) => set_cookie('guidance_scale', value, config)")
449
  in_enable_vae_slicing.change(enable_vae_slicing_change, inputs=[in_enable_vae_slicing, config], outputs=[config, out_config, out_code], js="(value, config) => set_cookie('enable_vae_slicing', value, config)")
450
  in_enable_vae_tiling.change(enable_vae_tiling_change, inputs=[in_enable_vae_tiling, config], outputs=[config, out_config, out_code], js="(value, config) => set_cookie('enable_vae_tiling', value, config)")
451
- in_adapters_textual_inversion_token.change(textual_inversion_token_change, inputs=[in_adapters_textual_inversion_token, config], outputs=[config, out_config, out_code])
 
452
  in_prompt.change(prompt_change, inputs=[in_prompt, config], outputs=[config, out_config, out_code], js="(value, config) => set_cookie('prompt', value, config)")
453
  in_trigger_token.change(trigger_token_change, inputs=[in_trigger_token, config], outputs=[config, out_config, out_code], js="(value, config) => set_cookie('trigger_token', value, config)")
454
  in_negative_prompt.change(negative_prompt_change, inputs=[in_negative_prompt, config], outputs=[config, out_config, out_code], js="(value, config) => set_cookie('negative_prompt', value, config)")
@@ -484,7 +507,8 @@ with gr.Blocks(analytics_enabled=False) as demo:
484
  in_adapters_textual_inversion_token,
485
  in_adapters_lora,
486
  in_adapters_lora_token,
487
- in_adapters_lora_weight
 
488
  ])
489
 
490
  demo.launch(show_error=True)
 
173
 
174
  return adapter_textual_inversion_description, in_adapters_textual_inversion_token, config, config, assemble_code(config)
175
 
176
+ def adapters_lora_change(adapter_loras, config):
177
 
178
+ if len(adapter_loras) > 0:
 
 
 
 
179
 
180
+ adapter_lora_description = '; '.join([adapters['lora'][adapter_lora]['description'] for adapter_lora in adapter_loras])
181
+ adapter_lora_token = [adapters['lora'][adapter_lora]['token'] for adapter_lora in adapter_loras]
182
+ adapter_lora_weight = [adapters['lora'][adapter_lora]['weight'] for adapter_lora in adapter_loras]
183
+ adapter_lora_balancing = {}
184
+ for adapter_lora in adapter_loras:
185
+ if not adapter_lora in config['adapter_lora_balancing']:
186
+ adapter_lora_balancing[adapter_lora] = 1
187
+ else:
188
+ adapter_lora_balancing[adapter_lora] = config['adapter_lora_balancing'][adapter_lora]
189
+
190
  else:
191
+ adapter_lora_description = []
192
+ adapter_lora_token = []
193
+ adapter_lora_weight = []
194
+ adapter_lora_balancing = {}
195
 
196
+ config = set_config(config, 'adapter_lora', adapter_loras)
197
  config = set_config(config, 'adapter_lora_token', adapter_lora_token)
198
  config = set_config(config, 'adapter_lora_weight', adapter_lora_weight)
199
+ config = set_config(config, 'adapter_lora_balancing', adapter_lora_balancing)
200
 
201
+ return adapter_lora_description, adapter_lora_token, adapter_lora_weight, adapter_lora_balancing, config, config, assemble_code(config)
202
+
203
+ def adapters_lora_balancing_change(adapter_lora_balancing, config):
204
+
205
+ config = set_config(config, 'adapter_lora_balancing', json.loads(adapter_lora_balancing.replace("'", '"').replace('None', 'null').replace('False', 'False')))
206
 
207
+ return config, config, assemble_code(config)
208
+
209
  def textual_inversion_token_change(adapter_textual_inversion_token, config):
210
 
211
  config = set_config(config, 'adapter_textual_inversion_token', adapter_textual_inversion_token)
 
277
  pipeline.load_textual_inversion(config["adapter_textual_inversion"], token=config["adapter_textual_inversion_token"])
278
 
279
  # LoRA
280
+ if len(config["adapter_lora"]) > 0 and len(config["adapter_lora"]) == len(config["adapter_lora_weight"]):
281
+ adapter_lora_balancing = []
282
+ for adapter_lora_index, adapter_lora in enumerate(config["adapter_lora"]):
283
+ if str(config["adapter_lora_weight"][adapter_lora_index]).lower() != 'none':
284
+ pipeline.load_lora_weights(adapter_lora, weight_name=config["adapter_lora_weight"][adapter_lora_index], adapter_name=config["adapter_lora_token"][adapter_lora_index])
285
+ else:
286
+ pipeline.load_lora_weights(adapter_lora, adapter_name=config["adapter_lora_token"][adapter_lora_index])
287
+ adapter_lora_balancing.append(config["adapter_lora_balancing"][adapter_lora])
288
+
289
+ adapter_weights = {adapter_lora_balancing}
290
+ pipeline.set_adapters({config["adapter_lora_token"]}, adapter_weights=adapter_weights)
291
 
292
  progress((3,3), desc="Creating the result...")
293
 
294
+ prompt = config["prompt"] + config["trigger_token"] + config["adapter_textual_inversion_token"] + ' '.join(config["adapter_lora_token"])
295
 
296
  image = pipeline(
297
  prompt = prompt,
 
339
  return [adapter_textual_inversion, null, adapter_textual_inversion, config, null]
340
  }
341
  window.set_adapter_lora_cookie = function(adapter_lora, config){
342
+ document.cookie = 'adapter_lora='+ JSON.stringify(adapter_lora)+'; Path=/; SameSite=Strict';
343
  // some things I just don't understand, this is one of them
344
+ return [adapter_lora, null, null, null, adapter_lora, config, null]
345
  }
346
  window.set_cookie_2 = function(key, value, config){
347
  document.cookie = key+'='+value+'; Path=/; SameSite=Strict';
 
420
  with gr.Row():
421
  gr.Markdown('(Low-Rank-Adaption is a performant fine tuning technique)')
422
  with gr.Row():
423
+ in_adapters_lora = gr.Dropdown(value="None", choices=list(adapters['lora'].keys()), multiselect=True, label="LoRA Adapter", info="leave empty to not use an adapter")
 
 
424
  out_adapters_lora_description = gr.Textbox(value="", label="Description")
425
+ with gr.Row():
426
+ in_adapters_lora_token = gr.Textbox(value="None", label="Token(s)", info="required to activate the token, will be added to your prompt")
427
+ in_adapters_lora_weight = gr.Textbox(value="", label="Weight(s)/Checkpoint(s)")
428
+ in_adapters_lora_balancing = gr.Textbox(value={}, label="Balancing", info="provide a list of balancing weights in the order of your LoRA adapter (according to `token`s)")
429
 
430
  gr.Markdown("### Inference settings")
431
  with gr.Row():
 
454
  in_schedulers.change(schedulers_change, inputs=[in_schedulers, config], outputs=[out_scheduler_description, config, out_config, out_code], js="(value, config) => set_cookie_2('scheduler', value, config)")
455
  in_auto_encoders.change(auto_encoders_change, inputs=[in_auto_encoders, config], outputs=[out_auto_encoder_description, config, out_config, out_code], js="(value, config) => set_cookie_2('auto_encoder', value, config)")
456
  in_adapters_textual_inversion.change(adapters_textual_inversion_change, inputs=[in_adapters_textual_inversion, config], outputs=[out_adapters_textual_inversion_description, in_adapters_textual_inversion_token, config, out_config, out_code], js="(adapter_textual_inversion, config) => set_adapter_textual_inversion_cookie(adapter_textual_inversion, config)")
457
+ in_adapters_lora.change(adapters_lora_change, inputs=[in_adapters_lora, config], outputs=[out_adapters_lora_description, in_adapters_lora_token, in_adapters_lora_weight, in_adapters_lora_balancing, config, out_config, out_code], js="(adapter_lora, config) => set_adapter_lora_cookie(adapter_lora, config)")
458
 
459
  # `GENERIC` CHANGE LISTENERS, SAME INPUT, SAME OUTPUT STRUCTURE
460
  in_devices.change(fn=device_change, inputs=[in_devices, config], outputs=[config, out_config, out_code], js="(value, config) => set_cookie('device', value, config)")
 
470
  in_guidance_scale.change(guidance_scale_change, inputs=[in_guidance_scale, config], outputs=[config, out_config, out_code], js="(value, config) => set_cookie('guidance_scale', value, config)")
471
  in_enable_vae_slicing.change(enable_vae_slicing_change, inputs=[in_enable_vae_slicing, config], outputs=[config, out_config, out_code], js="(value, config) => set_cookie('enable_vae_slicing', value, config)")
472
  in_enable_vae_tiling.change(enable_vae_tiling_change, inputs=[in_enable_vae_tiling, config], outputs=[config, out_config, out_code], js="(value, config) => set_cookie('enable_vae_tiling', value, config)")
473
+ # in_adapters_textual_inversion_token.change(textual_inversion_token_change, inputs=[in_adapters_textual_inversion_token, config], outputs=[config, out_config, out_code])
474
+ in_adapters_lora_balancing.change(adapters_lora_balancing_change, inputs=[in_adapters_lora_balancing, config], outputs=[config, out_config, out_code], js="(value, config) => set_cookie('adapter_lora_balancing', value, config)")
475
  in_prompt.change(prompt_change, inputs=[in_prompt, config], outputs=[config, out_config, out_code], js="(value, config) => set_cookie('prompt', value, config)")
476
  in_trigger_token.change(trigger_token_change, inputs=[in_trigger_token, config], outputs=[config, out_config, out_code], js="(value, config) => set_cookie('trigger_token', value, config)")
477
  in_negative_prompt.change(negative_prompt_change, inputs=[in_negative_prompt, config], outputs=[config, out_config, out_code], js="(value, config) => set_cookie('negative_prompt', value, config)")
 
507
  in_adapters_textual_inversion_token,
508
  in_adapters_lora,
509
  in_adapters_lora_token,
510
+ in_adapters_lora_weight,
511
+ in_adapters_lora_balancing,
512
  ])
513
 
514
  demo.launch(show_error=True)
appConfig.json CHANGED
@@ -79,7 +79,6 @@
79
  }
80
  },
81
  "lora": {
82
- "None": {"token": "", "description": "", "weight": ""},
83
  "nerijs/pixel-art-xl": {
84
  "token": "pixel",
85
  "weight": "pixel-art-xl.safetensors",
 
79
  }
80
  },
81
  "lora": {
 
82
  "nerijs/pixel-art-xl": {
83
  "token": "pixel",
84
  "weight": "pixel-art-xl.safetensors",
config.py CHANGED
@@ -54,9 +54,10 @@ def get_initial_config():
54
  "guidance_scale": 5,
55
  "adapter_textual_inversion": None,
56
  "adapter_textual_inversion_token": None,
57
- "adapter_lora": None,
58
- "adapter_lora_token": None,
59
- "adapter_lora_weight": None,
 
60
  "prompt": 'A white rabbit',
61
  "trigger_token": '',
62
  "negative_prompt": 'lowres, cropped, worst quality, low quality',
@@ -85,7 +86,10 @@ def get_config_from_url(initial_config, request: Request):
85
  for key in initial_config.keys():
86
  if key in request.cookies:
87
  value = request.cookies[key]
 
88
  if value == 'null' or value == '': value = None
 
 
89
  initial_config[key] = value
90
 
91
  return_config = initial_config
@@ -113,7 +117,8 @@ def get_config_from_url(initial_config, request: Request):
113
  return_config['adapter_textual_inversion_token'],
114
  return_config['adapter_lora'],
115
  return_config['adapter_lora_token'],
116
- return_config['adapter_lora_weight']
 
117
  ]
118
 
119
  def load_app_config():
@@ -207,13 +212,20 @@ def assemble_code(str_config):
207
  # ADAPTER
208
  if str(config["adapter_textual_inversion"]).lower() != 'none':
209
  code.append(f'pipeline.load_textual_inversion("{config["adapter_textual_inversion"]}", token="{config["adapter_textual_inversion_token"]}")')
210
- if str(config["adapter_lora"]).lower() != 'none':
211
- if str(config["adapter_lora_weight"]).lower() != 'none':
212
- code.append(f'pipeline.load_lora_weights("{config["adapter_lora"]}", weight_name="{config["adapter_lora_weight"]}", adapter_name="{config["adapter_lora_token"]}")')
213
- else:
214
- code.append(f'pipeline.load_lora_weights("{config["adapter_lora"]}", adapter_name="{config["adapter_lora_token"]}")')
215
 
216
- code.append(f'prompt = "{config["prompt"]} {config["trigger_token"]} {config["adapter_textual_inversion_token"]} {config["adapter_lora_token"]}"')
 
 
 
 
 
 
 
 
 
 
 
 
217
  code.append(f'negative_prompt = "{config["negative_prompt"]}"')
218
  code.append(f'inference_steps = {config["inference_steps"]}')
219
  code.append(f'guidance_scale = {config["guidance_scale"]}')
 
54
  "guidance_scale": 5,
55
  "adapter_textual_inversion": None,
56
  "adapter_textual_inversion_token": None,
57
+ "adapter_lora": [],
58
+ "adapter_lora_token": [],
59
+ "adapter_lora_weight": [],
60
+ "adapter_lora_balancing": {},
61
  "prompt": 'A white rabbit',
62
  "trigger_token": '',
63
  "negative_prompt": 'lowres, cropped, worst quality, low quality',
 
86
  for key in initial_config.keys():
87
  if key in request.cookies:
88
  value = request.cookies[key]
89
+ # transform empty values to a "Python-like" None
90
  if value == 'null' or value == '': value = None
91
+ # if value expected to be a list, transform the string to list
92
+ if type(initial_config[key]) == list: value = json.loads(value)
93
  initial_config[key] = value
94
 
95
  return_config = initial_config
 
117
  return_config['adapter_textual_inversion_token'],
118
  return_config['adapter_lora'],
119
  return_config['adapter_lora_token'],
120
+ return_config['adapter_lora_weight'],
121
+ return_config['adapter_lora_balancing']
122
  ]
123
 
124
  def load_app_config():
 
212
  # ADAPTER
213
  if str(config["adapter_textual_inversion"]).lower() != 'none':
214
  code.append(f'pipeline.load_textual_inversion("{config["adapter_textual_inversion"]}", token="{config["adapter_textual_inversion_token"]}")')
 
 
 
 
 
215
 
216
+ if len(config["adapter_lora"]) > 0 and len(config["adapter_lora"]) == len(config["adapter_lora_weight"]):
217
+ adapter_lora_balancing = []
218
+ for adapter_lora_index, adapter_lora in enumerate(config["adapter_lora"]):
219
+ if str(config["adapter_lora_weight"][adapter_lora_index]).lower() != 'none':
220
+ code.append(f'pipeline.load_lora_weights("{adapter_lora}", weight_name="{config["adapter_lora_weight"][adapter_lora_index]}", adapter_name="{config["adapter_lora_token"][adapter_lora_index]}")')
221
+ else:
222
+ code.append(f'pipeline.load_lora_weights("{adapter_lora}", adapter_name="{config["adapter_lora_token"][adapter_lora_index]}")')
223
+ adapter_lora_balancing.append(config["adapter_lora_balancing"][adapter_lora])
224
+
225
+ code.append(f'adapter_weights = {adapter_lora_balancing}')
226
+ code.append(f'pipeline.set_adapters({config["adapter_lora_token"]}, adapter_weights=adapter_weights)')
227
+
228
+ code.append(f'prompt = "{config["prompt"]} {config["trigger_token"]} {config["adapter_textual_inversion_token"]} {" ".join(config["adapter_lora_token"])}"')
229
  code.append(f'negative_prompt = "{config["negative_prompt"]}"')
230
  code.append(f'inference_steps = {config["inference_steps"]}')
231
  code.append(f'guidance_scale = {config["guidance_scale"]}')