change layout + adjust logic a bit

#2
by linoyts HF staff - opened
Files changed (2) hide show
  1. app.py +94 -78
  2. requirements.txt +2 -1
app.py CHANGED
@@ -1,6 +1,6 @@
1
  import os
2
- os.system("pip uninstall -y gradio")
3
- os.system('pip install gradio==3.43.1')
4
 
5
  import torch
6
  import torchvision
@@ -21,7 +21,7 @@ from editing import get_direction, debias
21
  from sampling import sample_weights
22
  from lora_w2w import LoRAw2w
23
  from huggingface_hub import snapshot_download
24
-
25
  global device
26
  global generator
27
  global unet
@@ -32,7 +32,7 @@ global noise_scheduler
32
  global network
33
  device = "cuda:0"
34
  generator = torch.Generator(device=device)
35
-
36
 
37
 
38
 
@@ -61,7 +61,7 @@ def sample_model():
61
 
62
 
63
  @torch.no_grad()
64
- def inference( prompt, negative_prompt, guidance_scale, ddim_steps, seed):
65
  global device
66
  global generator
67
  global unet
@@ -113,7 +113,7 @@ def inference( prompt, negative_prompt, guidance_scale, ddim_steps, seed):
113
 
114
 
115
  @torch.no_grad()
116
- def edit_inference(prompt, negative_prompt, guidance_scale, ddim_steps, seed, start_noise, a1, a2, a3, a4):
117
 
118
  global device
119
  global generator
@@ -196,7 +196,7 @@ def edit_inference(prompt, negative_prompt, guidance_scale, ddim_steps, seed, st
196
  network.proj = torch.nn.Parameter(original_weights)
197
  network.reset()
198
 
199
- return image
200
 
201
 
202
  def sample_then_run():
@@ -344,10 +344,11 @@ def invert(image, mask, pcs=10000, epochs=400, weight_decay = 1e-10, lr=1e-1):
344
 
345
 
346
 
347
- def run_inversion(dict, pcs, epochs, weight_decay,lr):
348
  global network
349
- init_image = dict["image"].convert("RGB").resize((512, 512))
350
- mask = dict["mask"].convert("RGB").resize((512, 512))
 
351
  network = invert([init_image], mask, pcs, epochs, weight_decay,lr)
352
 
353
 
@@ -359,7 +360,7 @@ def run_inversion(dict, pcs, epochs, weight_decay,lr):
359
  steps = 50
360
  image = inference( prompt, negative_prompt, cfg, steps, seed)
361
  torch.save(network.proj, "model.pt" )
362
- return image, "model.pt"
363
 
364
 
365
 
@@ -430,71 +431,83 @@ intro = """
430
 
431
  with gr.Blocks(css="style.css") as demo:
432
  gr.HTML(intro)
433
- with gr.Row():
434
- with gr.Column():
435
- gr.Markdown("""<div style="text-align: justify;"> Click below to sample an identity-encoding model.""")
436
- sample = gr.Button("Sample New Model")
437
- gr.Markdown("""<div style="text-align: justify;"> Or upload an image below and click \"invert\". You can also optionally draw over the face to define a mask.""")
438
- input_image = gr.Image(source='upload', elem_id="image_upload", tool='sketch', type='pil', label="Upload image and draw to define mask",
439
- height=512, width=512, brush_color='#00FFFF', mask_opacity=0.6)
440
-
441
- lr = gr.Number(value=1e-1, label="Learning Rate", interactive=True)
442
- pcs = gr.Slider(label="# Principal Components", value=10000, step=1, minimum=1, maximum=10000, interactive=True)
443
- with gr.Accordion("Advanced Options", open=False):
444
  with gr.Column():
445
- epochs = gr.Slider(label="Epochs", value=400, step=1, minimum=1, maximum=2000, interactive=True)
446
- weight_decay = gr.Number(value=1e-10, label="Weight Decay", interactive=True)
447
-
448
- invert_button = gr.Button("Invert")
449
-
450
- gr.Markdown("""<div style="text-align: justify;"> Or you can upload a model below downloaded from this demo.""")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
451
 
452
- file_input = gr.File(label="Upload Model", container=True)
453
 
454
 
455
-
456
-
457
 
458
- with gr.Column():
459
- gallery1 = gr.Image(label="Identity from Original Model", interactive=False)
460
- prompt1 = gr.Textbox(label="Prompt",
461
- info="Make sure to include 'sks person'" ,
462
- placeholder="sks person",
463
- value="sks person")
464
- seed1 = gr.Number(value=5, label="Seed", precision=0, interactive=True)
465
-
466
  with gr.Accordion("Advanced Options", open=False):
467
- with gr.Column():
468
- cfg1= gr.Slider(label="CFG", value=3.0, step=0.1, minimum=0, maximum=10, interactive=True)
469
- steps1 = gr.Slider(label="Inference Steps", value=50, step=1, minimum=0, maximum=100, interactive=True)
470
- negative_prompt1 = gr.Textbox(label="Negative Prompt", placeholder="low quality, blurry, unfinished, nudity, weapon", value="low quality, blurry, unfinished, nudity, weapon")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
471
 
472
  submit1 = gr.Button("Generate")
473
-
474
-
475
-
476
- with gr.Column():
477
- gallery2 = gr.Image(label="Identity from Edited Model", interactive=False )
478
- with gr.Row():
479
- a1 = gr.Slider(label="- Young +", value=0, step=0.001, minimum=-1, maximum=1, interactive=True)
480
- a2 = gr.Slider(label="- Pointy Nose +", value=0, step=0.001, minimum=-1, maximum=1, interactive=True)
481
- with gr.Row():
482
- a3 = gr.Slider(label="- Curly Hair +", value=0, step=0.001, minimum=-1, maximum=1, interactive=True)
483
- a4 = gr.Slider(label="- Thick Eyebrows +", value=0, step=0.001, minimum=-1, maximum=1, interactive=True)
484
- prompt2 = gr.Textbox(label="Prompt",
485
- info="Make sure to include 'sks person'" ,
486
- placeholder="sks person",
487
- value="sks person")
488
- seed2 = gr.Number(value=5, label="Seed", precision=0, interactive=True)
489
- with gr.Accordion("Advanced Options", open=False):
490
- with gr.Column():
491
- cfg2 = gr.Slider(label="CFG", value=3.0, step=0.1, minimum=0, maximum=10, interactive=True)
492
- steps2 = gr.Slider(label="Inference Steps", value=50, step=1, minimum=0, maximum=100, interactive=True)
493
- injection_step = gr.Slider(label="Injection Step", value=800, step=1, minimum=0, maximum=1000, interactive=True)
494
- negative_prompt2 = gr.Textbox(label="Negative Prompt", placeholder="low quality, blurry, unfinished, nudity, weapon", value="low quality, blurry, unfinished, nudity, weapon")
495
-
496
- submit2 = gr.Button("Generate")
497
-
498
 
499
 
500
  gr.Markdown("""<div style="text-align: justify;"> After sampling a new model or inverting, you can download the model below.""")
@@ -508,18 +521,21 @@ with gr.Blocks(css="style.css") as demo:
508
 
509
  invert_button.click(fn=run_inversion,
510
  inputs=[input_image, pcs, epochs, weight_decay,lr],
511
- outputs = [gallery1, file_output])
512
 
513
 
514
- sample.click(fn=sample_then_run, outputs=[gallery1, file_output])
515
-
516
- submit1.click(fn=inference,
517
- inputs=[prompt1, negative_prompt1, cfg1, steps1, seed1],
518
- outputs=gallery1)
519
- submit2.click(fn=edit_inference,
520
- inputs=[prompt2, negative_prompt2, cfg2, steps2, seed2, injection_step, a1, a2, a3, a4],
521
- outputs=gallery2)
522
- file_input.change(fn=file_upload, inputs=file_input, outputs = gallery1)
 
 
 
523
 
524
 
525
 
 
1
  import os
2
+ # os.system("pip uninstall -y gradio")
3
+ # #os.system('pip install gradio==3.43.1')
4
 
5
  import torch
6
  import torchvision
 
21
  from sampling import sample_weights
22
  from lora_w2w import LoRAw2w
23
  from huggingface_hub import snapshot_download
24
+ import numpy as np
25
  global device
26
  global generator
27
  global unet
 
32
  global network
33
  device = "cuda:0"
34
  generator = torch.Generator(device=device)
35
+ from gradio_imageslider import ImageSlider
36
 
37
 
38
 
 
61
 
62
 
63
  @torch.no_grad()
64
+ def inference(prompt, negative_prompt, guidance_scale, ddim_steps, seed):
65
  global device
66
  global generator
67
  global unet
 
113
 
114
 
115
  @torch.no_grad()
116
+ def edit_inference(input_image, prompt, negative_prompt, guidance_scale, ddim_steps, seed, start_noise, a1, a2, a3, a4):
117
 
118
  global device
119
  global generator
 
196
  network.proj = torch.nn.Parameter(original_weights)
197
  network.reset()
198
 
199
+ return (image, input_image["background"])
200
 
201
 
202
  def sample_then_run():
 
344
 
345
 
346
 
347
+ def run_inversion(input_image, pcs, epochs, weight_decay,lr):
348
  global network
349
+ print(len(input_image["layers"]))
350
+ init_image = input_image["background"].convert("RGB").resize((512, 512))
351
+ mask = input_image["layers"][0].convert("RGB").resize((512, 512))
352
  network = invert([init_image], mask, pcs, epochs, weight_decay,lr)
353
 
354
 
 
360
  steps = 50
361
  image = inference( prompt, negative_prompt, cfg, steps, seed)
362
  torch.save(network.proj, "model.pt" )
363
+ return (image,init_image), "model.pt"
364
 
365
 
366
 
 
431
 
432
  with gr.Blocks(css="style.css") as demo:
433
  gr.HTML(intro)
434
+
435
+ gr.Markdown("""
436
+ Click sample (to sample an identity) *or* upload an image & click `invert` to get started ✨
437
+ > 💡 When inverting, draw a mask over the face for improved results.
438
+ > To use a model previously downloaded from this demo see `Uplaoding a model` in the `Advanced options`
439
+ """)
440
+ with gr.Column():
441
+ with gr.Row():
 
 
 
442
  with gr.Column():
443
+ # input_image = gr.Image(source='upload', elem_id="image_upload", tool='sketch', type='pil', label="Upload image and draw to define mask",
444
+ # height=512, width=512, brush_color='#00FFFF', mask_opacity=0.6)
445
+ input_image = gr.ImageEditor(elem_id="image_upload", type='pil', label="Upload image and draw to define mask",
446
+ height=512, width=512, brush=gr.Brush(), layers=False)
447
+ with gr.Row():
448
+ sample = gr.Button("Sample New Model")
449
+ invert_button = gr.Button("Invert")
450
+ with gr.Column():
451
+ image_slider = ImageSlider(position=1., type="pil", height=512, width=512)
452
+ # gallery1 = gr.Image(label="Identity from Original Model",height=512, width=512, interactive=False)
453
+
454
+ prompt1 = gr.Textbox(label="Prompt",
455
+ info="Make sure to include 'sks person'" ,
456
+ placeholder="sks person",
457
+ value="sks person")
458
+
459
+ # Editing
460
+ with gr.Column():
461
+ #gallery2 = gr.Image(label="Identity from Edited Model", interactive=False, visible=False )
462
+ with gr.Row():
463
+ a1 = gr.Slider(label="- Young +", value=0, step=0.001, minimum=-1, maximum=1, interactive=True)
464
+ a2 = gr.Slider(label="- Pointy Nose +", value=0, step=0.001, minimum=-1, maximum=1, interactive=True)
465
+ with gr.Row():
466
+ a3 = gr.Slider(label="- Curly Hair +", value=0, step=0.001, minimum=-1, maximum=1, interactive=True)
467
+ a4 = gr.Slider(label="- Thick Eyebrows +", value=0, step=0.001, minimum=-1, maximum=1, interactive=True)
468
+ # prompt2 = gr.Textbox(label="Prompt",
469
+ # info="Make sure to include 'sks person'" ,
470
+ # placeholder="sks person",
471
+ # value="sks person", visible=False)
472
+ # seed2 = gr.Number(value=5, label="Seed", precision=0, interactive=True, visible=False)
473
 
 
474
 
475
 
476
+ # submit2 = gr.Button("Generate", visible=False)
 
477
 
478
+
479
+
 
 
 
 
 
 
480
  with gr.Accordion("Advanced Options", open=False):
481
+ with gr.Tab("Inversion"):
482
+ with gr.Row():
483
+ lr = gr.Number(value=1e-1, label="Learning Rate", interactive=True)
484
+ pcs = gr.Slider(label="# Principal Components", value=10000, step=1, minimum=1, maximum=10000, interactive=True)
485
+ with gr.Row():
486
+ epochs = gr.Slider(label="Epochs", value=400, step=1, minimum=1, maximum=2000, interactive=True)
487
+ weight_decay = gr.Number(value=1e-10, label="Weight Decay", interactive=True)
488
+ with gr.Tab("Sampling"):
489
+ with gr.Row():
490
+ cfg1= gr.Slider(label="CFG", value=3.0, step=0.1, minimum=0, maximum=10, interactive=True)
491
+ steps1 = gr.Slider(label="Inference Steps", value=50, step=1, minimum=0, maximum=100, interactive=True)
492
+ seed1 = gr.Number(value=5, label="Seed", precision=0, interactive=True)
493
+ with gr.Row():
494
+ negative_prompt1 = gr.Textbox(label="Negative Prompt", placeholder="low quality, blurry, unfinished, nudity, weapon", value="low quality, blurry, unfinished, nudity, weapon")
495
+ injection_step = gr.Slider(label="Injection Step", value=800, step=1, minimum=0, maximum=1000, interactive=True)
496
+
497
+
498
+ # with gr.Tab("Editing"):
499
+ # with gr.Column():
500
+ # cfg2 = gr.Slider(label="CFG", value=3.0, step=0.1, minimum=0, maximum=10, interactive=True)
501
+ # steps2 = gr.Slider(label="Inference Steps", value=50, step=1, minimum=0, maximum=100, interactive=True)
502
+ # injection_step = gr.Slider(label="Injection Step", value=800, step=1, minimum=0, maximum=1000, interactive=True)
503
+ # negative_prompt2 = gr.Textbox(label="Negative Prompt", placeholder="low quality, blurry, unfinished, nudity, weapon", value="low quality, blurry, unfinished, nudity, weapon")
504
+ with gr.Tab("Uploading a model"):
505
+ gr.Markdown("""<div style="text-align: justify;">Upload a model below downloaded from this demo.""")
506
+
507
+ file_input = gr.File(label="Upload Model", container=True)
508
 
509
  submit1 = gr.Button("Generate")
510
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
511
 
512
 
513
  gr.Markdown("""<div style="text-align: justify;"> After sampling a new model or inverting, you can download the model below.""")
 
521
 
522
  invert_button.click(fn=run_inversion,
523
  inputs=[input_image, pcs, epochs, weight_decay,lr],
524
+ outputs = [image_slider, file_output])
525
 
526
 
527
+ sample.click(fn=sample_then_run, outputs=[input_image, file_output])
528
+
529
+ # submit1.click(fn=inference,
530
+ # inputs=[prompt1, negative_prompt1, cfg1, steps1, seed1],
531
+ # outputs=gallery1)
532
+ # submit1.click(fn=edit_inference,
533
+ # inputs=[input_image, prompt1, negative_prompt1, cfg1, steps1, seed1, injection_step, a1, a2, a3, a4],
534
+ # outputs=image_slider)
535
+ submit1.click(
536
+ fn=edit_inference, inputs=[input_image, prompt1, negative_prompt1, cfg1, steps1, seed1, injection_step, a1, a2, a3, a4], outputs=[image_slider]
537
+ )
538
+ file_input.change(fn=file_upload, inputs=file_input, outputs = input_image)
539
 
540
 
541
 
requirements.txt CHANGED
@@ -70,4 +70,5 @@ urllib3==2.2.1
70
  wandb==0.17.0
71
  xxhash==3.4.1
72
  yarl==1.9.4
73
- zipp==3.19.0
 
 
70
  wandb==0.17.0
71
  xxhash==3.4.1
72
  yarl==1.9.4
73
+ zipp==3.19.0
74
+ gradio_imageslider==0.0.20