ClaireOzzz commited on
Commit
847b4f9
1 Parent(s): 48462d5

Update app_inference.py

Browse files
Files changed (1) hide show
  1. app_inference.py +131 -132
app_inference.py CHANGED
@@ -226,138 +226,137 @@ button#load_model_btn{
226
  font-size: 0.9em;
227
  }
228
  """
229
- def create_inference_demo() -> gr.Blocks:
230
 
231
- with gr.Blocks(css=css) as demo:
232
- with gr.Column(elem_id="col-container"):
233
- if is_shared_ui:
234
- top_description = gr.HTML(f'''
235
- <div class="gr-prose">
236
- <h2><svg xmlns="http://www.w3.org/2000/svg" width="18px" height="18px" style="margin-right: 0px;display: inline-block;"fill="none"><path fill="#fff" d="M7 13.2a6.3 6.3 0 0 0 4.4-10.7A6.3 6.3 0 0 0 .6 6.9 6.3 6.3 0 0 0 7 13.2Z"/><path fill="#fff" fill-rule="evenodd" d="M7 0a6.9 6.9 0 0 1 4.8 11.8A6.9 6.9 0 0 1 0 7 6.9 6.9 0 0 1 7 0Zm0 0v.7V0ZM0 7h.6H0Zm7 6.8v-.6.6ZM13.7 7h-.6.6ZM9.1 1.7c-.7-.3-1.4-.4-2.2-.4a5.6 5.6 0 0 0-4 1.6 5.6 5.6 0 0 0-1.6 4 5.6 5.6 0 0 0 1.6 4 5.6 5.6 0 0 0 4 1.7 5.6 5.6 0 0 0 4-1.7 5.6 5.6 0 0 0 1.7-4 5.6 5.6 0 0 0-1.7-4c-.5-.5-1.1-.9-1.8-1.2Z" clip-rule="evenodd"/><path fill="#000" fill-rule="evenodd" d="M7 2.9a.8.8 0 1 1 0 1.5A.8.8 0 0 1 7 3ZM5.8 5.7c0-.4.3-.6.6-.6h.7c.3 0 .6.2.6.6v3.7h.5a.6.6 0 0 1 0 1.3H6a.6.6 0 0 1 0-1.3h.4v-3a.6.6 0 0 1-.6-.7Z" clip-rule="evenodd"/></svg>
237
- Note: you might want to use a <strong>private</strong> custom LoRa model</h2>
238
- <p class="main-message">
239
- To do so, <strong>duplicate the Space</strong> and run it on your own profile using <strong>your own access token</strong> and eventually a GPU (T4-small or A10G-small) for faster inference without waiting in the queue.<br />
240
- </p>
241
- <p class="actions">
242
- <a href="https://huggingface.co/spaces/{os.environ['SPACE_ID']}?duplicate=true">
243
- <img src="https://huggingface.co/datasets/huggingface/badges/resolve/main/duplicate-this-space-lg-dark.svg" alt="Duplicate this Space" />
244
- </a>
245
- to start using private models and skip the queue
246
- </p>
247
- </div>
248
- ''', elem_id="warning-duplicate")
249
- gr.HTML("""
250
- <h2 style="text-align: center;">SD-XL Control LoRas</h2>
251
- <p style="text-align: center;">Use StableDiffusion XL with <a href="https://huggingface.co/collections/diffusers/sdxl-controlnets-64f9c35846f3f06f5abe351f">Diffusers' SDXL ControlNets</a></p>
252
-
253
- """)
254
-
255
- use_custom_model = gr.Checkbox(label="Use a custom pre-trained LoRa model ? (optional)", value=False, info="To use a private model, you'll need to duplicate the space with your own access token.")
256
-
257
- with gr.Box(visible=False) as custom_model_box:
258
- with gr.Row():
259
- with gr.Column():
260
- if not is_shared_ui:
261
- your_username = api.whoami()["name"]
262
- my_models = api.list_models(author=your_username, filter=["diffusers", "stable-diffusion-xl", 'lora'])
263
- model_names = [item.modelId for item in my_models]
264
-
265
- if not is_shared_ui:
266
- custom_model = gr.Dropdown(
267
- label = "Your custom model ID",
268
- info="You can pick one of your private models",
269
- choices = model_names,
270
- allow_custom_value = True
271
- #placeholder = "username/model_id"
272
- )
273
- else:
274
- custom_model = gr.Textbox(
275
- label="Your custom model ID",
276
- placeholder="your_username/your_trained_model_name",
277
- info="Make sure your model is set to PUBLIC"
278
- )
279
-
280
- weight_name = gr.Dropdown(
281
- label="Safetensors file",
282
- #value="pytorch_lora_weights.safetensors",
283
- info="specify which one if model has several .safetensors files",
284
- allow_custom_value=True,
285
- visible = False
286
- )
287
- with gr.Column():
288
- with gr.Group():
289
- load_model_btn = gr.Button("Load my model", elem_id="load_model_btn")
290
- previous_model = gr.Textbox(
291
- visible = False
292
- )
293
- model_status = gr.Textbox(
294
- label = "model status",
295
- show_label = False,
296
- elem_id = "status_info"
297
- )
298
- trigger_word = gr.Textbox(label="Trigger word", interactive=False, visible=False)
299
-
300
- image_in = gr.Image(source="upload", type="filepath")
301
-
302
- with gr.Row():
303
-
304
- with gr.Column():
305
- with gr.Group():
306
- prompt = gr.Textbox(label="Prompt")
307
- negative_prompt = gr.Textbox(label="Negative prompt", value="extra digit, fewer digits, cropped, worst quality, low quality, glitch, deformed, mutated, ugly, disfigured")
308
- with gr.Group():
309
- guidance_scale = gr.Slider(label="Guidance Scale", minimum=1.0, maximum=10.0, step=0.1, value=7.5)
310
- inf_steps = gr.Slider(label="Inference Steps", minimum="25", maximum="50", step=1, value=25)
311
- custom_lora_weight = gr.Slider(label="Custom model weights", minimum=0.1, maximum=0.9, step=0.1, value=0.9)
312
-
313
- with gr.Column():
314
- with gr.Group():
315
- preprocessor = gr.Dropdown(label="Preprocessor", choices=["canny"], value="canny", interactive=False, info="For the moment, only canny is available")
316
- controlnet_conditioning_scale = gr.Slider(label="Controlnet conditioning Scale", minimum=0.1, maximum=0.9, step=0.01, value=0.5)
317
- with gr.Group():
318
- seed = gr.Slider(
319
- label="Seed",
320
- info = "-1 denotes a random seed",
321
- minimum=-1,
322
- maximum=423538377342,
323
- step=1,
324
- value=-1
325
- )
326
- last_used_seed = gr.Number(
327
- label = "Last used seed",
328
- info = "the seed used in the last generation",
329
- )
330
-
331
-
332
- submit_btn = gr.Button("Submit")
333
-
334
- result = gr.Image(label="Result")
335
-
336
- use_custom_model.change(
337
- fn = check_use_custom_or_no,
338
- inputs =[use_custom_model],
339
- outputs = [custom_model_box],
340
- queue = False
341
- )
342
- custom_model.blur(
343
- fn=custom_model_changed,
344
- inputs = [custom_model, previous_model],
345
- outputs = [model_status],
346
- queue = False
347
- )
348
- load_model_btn.click(
349
- fn = load_model,
350
- inputs=[custom_model],
351
- outputs = [previous_model, model_status, weight_name, trigger_word],
352
- queue = False
353
- )
354
- submit_btn.click(
355
- fn = infer,
356
- inputs = [use_custom_model, custom_model, weight_name, custom_lora_weight, image_in, prompt, negative_prompt, preprocessor, controlnet_conditioning_scale, guidance_scale, inf_steps, seed],
357
- outputs = [result, last_used_seed]
358
- )
359
-
360
- return demo
361
 
362
 
363
- #demo.queue(max_size=12).launch(share=True)
 
226
  font-size: 0.9em;
227
  }
228
  """
229
+ # def create_inference_demo() -> gr.Blocks:
230
 
231
+ with gr.Blocks(css=css) as demo:
232
+ with gr.Column(elem_id="col-container"):
233
+ if is_shared_ui:
234
+ top_description = gr.HTML(f'''
235
+ <div class="gr-prose">
236
+ <h2><svg xmlns="http://www.w3.org/2000/svg" width="18px" height="18px" style="margin-right: 0px;display: inline-block;"fill="none"><path fill="#fff" d="M7 13.2a6.3 6.3 0 0 0 4.4-10.7A6.3 6.3 0 0 0 .6 6.9 6.3 6.3 0 0 0 7 13.2Z"/><path fill="#fff" fill-rule="evenodd" d="M7 0a6.9 6.9 0 0 1 4.8 11.8A6.9 6.9 0 0 1 0 7 6.9 6.9 0 0 1 7 0Zm0 0v.7V0ZM0 7h.6H0Zm7 6.8v-.6.6ZM13.7 7h-.6.6ZM9.1 1.7c-.7-.3-1.4-.4-2.2-.4a5.6 5.6 0 0 0-4 1.6 5.6 5.6 0 0 0-1.6 4 5.6 5.6 0 0 0 1.6 4 5.6 5.6 0 0 0 4 1.7 5.6 5.6 0 0 0 4-1.7 5.6 5.6 0 0 0 1.7-4 5.6 5.6 0 0 0-1.7-4c-.5-.5-1.1-.9-1.8-1.2Z" clip-rule="evenodd"/><path fill="#000" fill-rule="evenodd" d="M7 2.9a.8.8 0 1 1 0 1.5A.8.8 0 0 1 7 3ZM5.8 5.7c0-.4.3-.6.6-.6h.7c.3 0 .6.2.6.6v3.7h.5a.6.6 0 0 1 0 1.3H6a.6.6 0 0 1 0-1.3h.4v-3a.6.6 0 0 1-.6-.7Z" clip-rule="evenodd"/></svg>
237
+ Note: you might want to use a <strong>private</strong> custom LoRa model</h2>
238
+ <p class="main-message">
239
+ To do so, <strong>duplicate the Space</strong> and run it on your own profile using <strong>your own access token</strong> and eventually a GPU (T4-small or A10G-small) for faster inference without waiting in the queue.<br />
240
+ </p>
241
+ <p class="actions">
242
+ <a href="https://huggingface.co/spaces/{os.environ['SPACE_ID']}?duplicate=true">
243
+ <img src="https://huggingface.co/datasets/huggingface/badges/resolve/main/duplicate-this-space-lg-dark.svg" alt="Duplicate this Space" />
244
+ </a>
245
+ to start using private models and skip the queue
246
+ </p>
247
+ </div>
248
+ ''', elem_id="warning-duplicate")
249
+ gr.HTML("""
250
+ <h2 style="text-align: center;">SD-XL Control LoRas</h2>
251
+ <p style="text-align: center;">Use StableDiffusion XL with <a href="https://huggingface.co/collections/diffusers/sdxl-controlnets-64f9c35846f3f06f5abe351f">Diffusers' SDXL ControlNets</a></p>
252
+ """)
253
+
254
+ use_custom_model = gr.Checkbox(label="Use a custom pre-trained LoRa model ? (optional)", value=False, info="To use a private model, you'll need to duplicate the space with your own access token.")
255
+
256
+ with gr.Blocks(visible=False) as custom_model_box:
257
+ with gr.Row():
258
+ with gr.Column():
259
+ if not is_shared_ui:
260
+ your_username = api.whoami()["name"]
261
+ my_models = api.list_models(author=your_username, filter=["diffusers", "stable-diffusion-xl", 'lora'])
262
+ model_names = [item.modelId for item in my_models]
263
+
264
+ if not is_shared_ui:
265
+ custom_model = gr.Dropdown(
266
+ label = "Your custom model ID",
267
+ info="You can pick one of your private models",
268
+ choices = model_names,
269
+ allow_custom_value = True
270
+ #placeholder = "username/model_id"
271
+ )
272
+ else:
273
+ custom_model = gr.Textbox(
274
+ label="Your custom model ID",
275
+ placeholder="your_username/your_trained_model_name",
276
+ info="Make sure your model is set to PUBLIC"
277
+ )
278
+
279
+ weight_name = gr.Dropdown(
280
+ label="Safetensors file",
281
+ #value="pytorch_lora_weights.safetensors",
282
+ info="specify which one if model has several .safetensors files",
283
+ allow_custom_value=True,
284
+ visible = False
285
+ )
286
+ with gr.Column():
287
+ with gr.Group():
288
+ load_model_btn = gr.Button("Load my model", elem_id="load_model_btn")
289
+ previous_model = gr.Textbox(
290
+ visible = False
291
+ )
292
+ model_status = gr.Textbox(
293
+ label = "model status",
294
+ show_label = False,
295
+ elem_id = "status_info"
296
+ )
297
+ trigger_word = gr.Textbox(label="Trigger word", interactive=False, visible=False)
298
+
299
+ image_in = gr.Image(sources="upload", type="filepath")
300
+
301
+ with gr.Row():
302
+
303
+ with gr.Column():
304
+ with gr.Group():
305
+ prompt = gr.Textbox(label="Prompt")
306
+ negative_prompt = gr.Textbox(label="Negative prompt", value="extra digit, fewer digits, cropped, worst quality, low quality, glitch, deformed, mutated, ugly, disfigured")
307
+ with gr.Group():
308
+ guidance_scale = gr.Slider(label="Guidance Scale", minimum=1.0, maximum=10.0, step=0.1, value=7.5)
309
+ inf_steps = gr.Slider(label="Inference Steps", minimum="25", maximum="50", step=1, value=25)
310
+ custom_lora_weight = gr.Slider(label="Custom model weights", minimum=0.1, maximum=0.9, step=0.1, value=0.9)
311
+
312
+ with gr.Column():
313
+ with gr.Group():
314
+ preprocessor = gr.Dropdown(label="Preprocessor", choices=["canny"], value="canny", interactive=False, info="For the moment, only canny is available")
315
+ controlnet_conditioning_scale = gr.Slider(label="Controlnet conditioning Scale", minimum=0.1, maximum=0.9, step=0.01, value=0.5)
316
+ with gr.Group():
317
+ seed = gr.Slider(
318
+ label="Seed",
319
+ info = "-1 denotes a random seed",
320
+ minimum=-1,
321
+ maximum=423538377342,
322
+ step=1,
323
+ value=-1
324
+ )
325
+ last_used_seed = gr.Number(
326
+ label = "Last used seed",
327
+ info = "the seed used in the last generation",
328
+ )
329
+
330
+
331
+ submit_btn = gr.Button("Submit")
332
+
333
+ result = gr.Image(label="Result")
334
+
335
+ use_custom_model.change(
336
+ fn = check_use_custom_or_no,
337
+ inputs =[use_custom_model],
338
+ outputs = [custom_model_box],
339
+ queue = False
340
+ )
341
+ custom_model.blur(
342
+ fn=custom_model_changed,
343
+ inputs = [custom_model, previous_model],
344
+ outputs = [model_status],
345
+ queue = False
346
+ )
347
+ load_model_btn.click(
348
+ fn = load_model,
349
+ inputs=[custom_model],
350
+ outputs = [previous_model, model_status, weight_name, trigger_word],
351
+ queue = False
352
+ )
353
+ submit_btn.click(
354
+ fn = infer,
355
+ inputs = [use_custom_model, custom_model, weight_name, custom_lora_weight, image_in, prompt, negative_prompt, preprocessor, controlnet_conditioning_scale, guidance_scale, inf_steps, seed],
356
+ outputs = [result, last_used_seed]
357
+ )
358
+
359
+ # return demo
 
360
 
361
 
362
+ demo.queue(max_size=12).launch(share=True)