multimodalart HF staff commited on
Commit
1b8bc30
1 Parent(s): c15e7de

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +320 -301
app.py CHANGED
@@ -204,12 +204,17 @@ def start_training(
204
  dataloader_num_workers,
205
  local_rank,
206
  dataset_folder,
207
- token,
208
- progress = gr.Progress(track_tqdm=True)
209
- ):
210
- print("Started training")
 
 
 
 
211
  if not lora_name:
212
  raise gr.Error("You forgot to insert your LoRA name! This name has to be unique.")
 
213
  slugged_lora_name = slugify(lora_name)
214
  spacerunner_folder = str(uuid.uuid4())
215
  commands = [
@@ -466,7 +471,7 @@ def run_captioning(*inputs):
466
  final_captions[index] = final_caption
467
  yield final_captions
468
 
469
- def check_token(token):
470
  try:
471
  api = HfApi(token=token)
472
  user_data = api.whoami()
@@ -494,335 +499,343 @@ css = '''.gr-group{background-color: transparent;box-shadow: var(--block-shadow)
494
  #training_cost h4{margin-top: 1.25em;margin-bottom: 0}
495
  #training_cost small{font-weight: normal}
496
  .accordion {color: var(--body-text-color)}
 
497
  '''
498
  theme = gr.themes.Monochrome(
499
  text_size=gr.themes.Size(lg="18px", md="15px", sm="13px", xl="22px", xs="12px", xxl="24px", xxs="9px"),
500
  font=[gr.themes.GoogleFont('Source Sans Pro'), 'ui-sans-serif', 'system-ui', 'sans-serif'],
501
  )
 
 
 
 
 
502
  with gr.Blocks(css=css, theme=theme) as demo:
503
  dataset_folder = gr.State()
504
  gr.Markdown('''# LoRA Ease 🧞‍♂️
505
  ### Train a high quality SDXL LoRA in a breeze ༄ with state-of-the-art techniques
506
  <small>Dreambooth with Pivotal Tuning, Prodigy and more! Use the trained LoRAs with diffusers, AUTO1111, Comfy. [blog about the training script](#), [Colab Pro](#), [run locally or in a cloud](#)</small>''', elem_id="main_title")
507
- lora_name = gr.Textbox(label="The name of your LoRA", info="This has to be a unique name", placeholder="e.g.: Persian Miniature Painting style, Cat Toy")
508
- training_option = gr.Radio(
509
- label="What are you training?", choices=["object", "style", "face", "custom"]
510
- )
511
- concept_sentence = gr.Textbox(
512
- label="Concept sentence",
513
- info="Sentence to be used in all images for captioning. TOK is a special mandatory token, used to teach the model your concept.",
514
- placeholder="e.g.: A photo of TOK, in the style of TOK",
515
- visible=False,
516
- interactive=True,
517
- )
518
- with gr.Group(visible=False) as image_upload:
519
- with gr.Row():
520
- images = gr.File(
521
- file_types=["image"],
522
- label="Upload your images",
523
- file_count="multiple",
524
- interactive=True,
525
- visible=True,
526
- scale=1,
527
- )
528
- with gr.Column(scale=3, visible=False) as captioning_area:
529
- with gr.Column():
530
- gr.Markdown(
531
- """# Custom captioning
532
- To improve the quality of your outputs, you can add a custom caption for each image, describing exactly what is taking place in each of them. Including TOK is mandatory. You can leave things as is if you don't want to include captioning.
533
- """
534
- )
535
- do_captioning = gr.Button("Add AI captions with BLIP-2")
536
- output_components = [captioning_area]
537
- caption_list = []
538
- for i in range(1, MAX_IMAGES + 1):
539
- locals()[f"captioning_row_{i}"] = gr.Row(visible=False)
540
- with locals()[f"captioning_row_{i}"]:
541
- locals()[f"image_{i}"] = gr.Image(
542
- width=111,
543
- height=111,
544
- min_width=111,
545
- interactive=False,
546
- scale=2,
547
- show_label=False,
548
- show_share_button=False,
549
- show_download_button=False
550
- )
551
- locals()[f"caption_{i}"] = gr.Textbox(
552
- label=f"Caption {i}", scale=15
553
- )
554
-
555
- output_components.append(locals()[f"captioning_row_{i}"])
556
- output_components.append(locals()[f"image_{i}"])
557
- output_components.append(locals()[f"caption_{i}"])
558
- caption_list.append(locals()[f"caption_{i}"])
559
- with gr.Accordion(open=False, label="Advanced options", visible=False, elem_classes=['accordion']) as advanced:
560
- with gr.Row():
561
- with gr.Column():
562
- optimizer = gr.Dropdown(
563
- label="Optimizer",
564
- info="Prodigy is an auto-optimizer and works good by default. If you prefer to set your own learning rates, change it to AdamW. If you don't have enough VRAM to train with AdamW, pick 8-bit Adam.",
565
- choices=[
566
- ("Prodigy", "prodigy"),
567
- ("AdamW", "adamW"),
568
- ("8-bit Adam", "8bitadam"),
569
- ],
570
- value="prodigy",
571
  interactive=True,
 
 
572
  )
573
- use_snr_gamma = gr.Checkbox(label="Use SNR Gamma")
574
- snr_gamma = gr.Number(
575
- label="snr_gamma",
576
- info="SNR weighting gamma to re-balance the loss",
577
- value=5.000,
578
- step=0.1,
579
- visible=False,
580
- )
581
- mixed_precision = gr.Dropdown(
582
- label="Mixed Precision",
583
- choices=["no", "fp16", "bf16"],
584
- value="bf16",
585
- )
586
- learning_rate = gr.Number(
587
- label="UNet Learning rate",
588
- minimum=0.0,
589
- maximum=10.0,
590
- step=0.0000001,
591
- value=1.0, # For prodigy you start high and it will optimize down
592
- )
593
- max_train_steps = gr.Number(
594
- label="Max train steps", minimum=1, maximum=50000, value=1000
595
- )
596
- lora_rank = gr.Number(
597
- label="LoRA Rank",
598
- info="Rank for the Low Rank Adaptation (LoRA), a higher rank produces a larger LoRA",
599
- value=8,
600
- step=2,
601
- minimum=2,
602
- maximum=1024,
603
- )
604
- repeats = gr.Number(
605
- label="Repeats",
606
- info="How many times to repeat the training data.",
607
- value=1,
608
- minimum=1,
609
- maximum=200,
610
- )
611
- with gr.Column():
612
- with_prior_preservation = gr.Checkbox(
613
- label="Prior preservation loss",
614
- info="Prior preservation helps to ground the model to things that are similar to your concept. Good for faces.",
615
- value=False,
616
- )
617
- with gr.Column(visible=False) as prior_preservation_params:
618
- with gr.Tab("prompt"):
619
- class_prompt = gr.Textbox(
620
- label="Class Prompt",
621
- info="The prompt that will be used to generate your class images",
622
- )
623
-
624
- with gr.Tab("images"):
625
- class_images = gr.File(
626
- file_types=["image"],
627
- label="Upload your images",
628
- file_count="multiple",
629
  )
630
- num_class_images = gr.Number(
631
- label="Number of class images, if there are less images uploaded then the number you put here, additional images will be sampled with Class Prompt",
632
- value=20,
633
- )
634
- train_text_encoder_ti = gr.Checkbox(
635
- label="Do textual inversion",
636
- value=True,
637
- info="Will train a textual inversion embedding together with the LoRA. Increases quality significantly. If untoggled, you can remove the special TOK token from the prompts.",
638
- )
639
- with gr.Group(visible=True) as pivotal_tuning_params:
640
- train_text_encoder_ti_frac = gr.Number(
641
- label="Pivot Textual Inversion",
642
- info="% of epochs to train textual inversion for",
643
- value=0.5,
644
- step=0.1,
645
- )
646
- num_new_tokens_per_abstraction = gr.Number(
647
- label="Tokens to train",
648
- info="Number of tokens to train in the textual inversion",
649
- value=2,
650
- minimum=1,
651
- maximum=1024,
652
- interactive=True,
653
- )
654
- with gr.Group(visible=False) as text_encoder_train_params:
655
- train_text_encoder = gr.Checkbox(
656
- label="Train Text Encoder", value=True
657
- )
658
- train_text_encoder_frac = gr.Number(
659
- label="Pivot Text Encoder",
660
- info="% of epochs to train the text encoder for",
661
- value=0.8,
662
- step=0.1,
663
- )
664
- text_encoder_learning_rate = gr.Number(
665
- label="Text encoder learning rate",
666
- minimum=0.0,
667
- maximum=10.0,
668
- step=0.0000001,
669
- value=1.0,
670
- )
671
- seed = gr.Number(label="Seed", value=42)
672
- resolution = gr.Number(
673
- label="Resolution",
674
- info="Only square sizes are supported for now, the value will be width and height",
675
- value=1024,
676
- )
677
-
678
- with gr.Accordion(open=False, label="Even more advanced options", elem_classes=['accordion']):
679
  with gr.Row():
680
  with gr.Column():
681
- gradient_accumulation_steps = gr.Number(
682
- info="If you change this setting, the pricing calculation will be wrong",
683
- label="gradient_accumulation_steps",
684
- value=1
685
- )
686
- train_batch_size = gr.Number(
687
- info="If you change this setting, the pricing calculation will be wrong",
688
- label="Train batch size",
689
- value=2
690
- )
691
- num_train_epochs = gr.Number(
692
- info="If you change this setting, the pricing calculation will be wrong",
693
- label="num_train_epochs",
694
- value=1
695
- )
696
- checkpointing_steps = gr.Number(
697
- info="How many steps to save intermediate checkpoints",
698
- label="checkpointing_steps",
699
- value=5000
700
- )
701
- prior_loss_weight = gr.Number(
702
- label="prior_loss_weight",
703
- value=1
704
- )
705
- gradient_checkpointing = gr.Checkbox(
706
- label="gradient_checkpointing",
707
- info="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass",
708
- value=True,
709
- )
710
- adam_beta1 = gr.Number(
711
- label="adam_beta1",
712
- value=0.9,
713
- minimum=0,
714
- maximum=1,
715
- step=0.01
716
- )
717
- adam_beta2 = gr.Number(
718
- label="adam_beta2",
719
- minimum=0,
720
- maximum=1,
721
- step=0.01,
722
- value=0.999
723
- )
724
- prodigy_beta3 = gr.Number(
725
- label="Prodigy Beta 3",
726
- value=None,
727
- step=0.01,
728
- minimum=0,
729
- maximum=1,
730
- )
731
- prodigy_decouple = gr.Checkbox(label="Prodigy Decouple")
732
- adam_weight_decay = gr.Number(
733
- label="Adam Weight Decay",
734
- value=1e-04,
735
- step=0.00001,
736
- minimum=0,
737
- maximum=1,
738
  )
739
- adam_weight_decay_text_encoder = gr.Number(
740
- label="Adam Weight Decay Text Encoder",
741
- value=None,
742
- step=0.00001,
743
- minimum=0,
744
- maximum=1,
 
745
  )
746
- adam_epsilon = gr.Number(
747
- label="Adam Epsilon",
748
- value=1e-08,
749
- step=0.00000001,
750
- minimum=0,
751
- maximum=1,
752
  )
753
- prodigy_use_bias_correction = gr.Checkbox(
754
- label="Prodigy Use Bias Correction",
755
- value=True
 
 
 
756
  )
757
- prodigy_safeguard_warmup = gr.Checkbox(
758
- label="Prodigy Safeguard Warmup",
759
- value=True
760
  )
761
- max_grad_norm = gr.Number(
762
- label="Max Grad Norm",
763
- value=1.0,
764
- minimum=0.1,
765
- maximum=10,
766
- step=0.1,
 
767
  )
768
- enable_xformers_memory_efficient_attention = gr.Checkbox(
769
- label="enable_xformers_memory_efficient_attention"
 
 
 
 
770
  )
771
  with gr.Column():
772
- scale_lr = gr.Checkbox(
773
- label="Scale learning rate",
774
- info="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size",
775
- )
776
- lr_num_cycles = gr.Number(
777
- label="lr_num_cycles",
778
- value=1
779
  )
780
- lr_scheduler = gr.Dropdown(
781
- label="lr_scheduler",
782
- choices=[
783
- "linear",
784
- "cosine",
785
- "cosine_with_restarts",
786
- "polynomial",
787
- "constant",
788
- "constant_with_warmup",
789
- ],
790
- value="constant",
 
 
 
 
 
 
 
 
 
 
791
  )
792
- lr_power = gr.Number(
793
- label="lr_power",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
794
  value=1.0,
795
- minimum=0.1,
796
- maximum=10
797
- )
798
- lr_warmup_steps = gr.Number(
799
- label="lr_warmup_steps",
800
- value=0
801
- )
802
- dataloader_num_workers = gr.Number(
803
- label="Dataloader num workers", value=0, minimum=0, maximum=64
804
  )
805
- local_rank = gr.Number(
806
- label="local_rank",
807
- value=-1
 
 
808
  )
809
- with gr.Column(visible=False) as cost_estimation:
810
- with gr.Group(elem_id="cost_box"):
811
- training_cost_estimate = gr.Markdown(elem_id="training_cost")
812
- token = gr.Textbox(label="Your Hugging Face write token", info="A Hugging Face write token you can obtain on the settings page", type="password", placeholder="hf_OhHiThIsIsNoTaReALToKeNGOoDTry")
813
- with gr.Group(visible=False) as no_payment_method:
814
- with gr.Row():
815
- gr.HTML("<h3 style='margin: 0'>Your Hugging Face account doesn't have a payment method set up. Set one up <a href='https://huggingface.co/settings/billing/payment' target='_blank'>here</a> and come back here to train your LoRA</h3>")
816
- payment_setup = gr.Button("I have set up a payment method")
817
 
818
- start = gr.Button("Start training", visible=False, interactive=True)
819
- progress_area = gr.Markdown("")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
820
 
821
  output_components.insert(1, advanced)
822
  output_components.insert(1, cost_estimation)
823
  gr.on(
824
  triggers=[
825
- token.change,
826
  payment_setup.click
827
  ],
828
  fn=check_token,
@@ -950,7 +963,7 @@ To improve the quality of your outputs, you can add a custom caption for each im
950
  dataloader_num_workers,
951
  local_rank,
952
  dataset_folder,
953
- token
954
  ],
955
  outputs = progress_area,
956
  queue=False
@@ -959,6 +972,12 @@ To improve the quality of your outputs, you can add a custom caption for each im
959
  do_captioning.click(
960
  fn=run_captioning, inputs=[images] + caption_list + [training_option], outputs=caption_list
961
  )
 
 
 
 
 
 
962
  if __name__ == "__main__":
963
  demo.queue()
964
  demo.launch(share=True)
 
204
  dataloader_num_workers,
205
  local_rank,
206
  dataset_folder,
207
+ #token,
208
+ progress = gr.Progress(track_tqdm=True),
209
+ oauth_token = gr.OAuthToken
210
+ ):
211
+ if(oauth_token is None):
212
+ raise gr.Error("You aren't logged in!")
213
+ else:
214
+ token = oauth_token
215
  if not lora_name:
216
  raise gr.Error("You forgot to insert your LoRA name! This name has to be unique.")
217
+ print("Started training")
218
  slugged_lora_name = slugify(lora_name)
219
  spacerunner_folder = str(uuid.uuid4())
220
  commands = [
 
471
  final_captions[index] = final_caption
472
  yield final_captions
473
 
474
+ def check_token(token: gr.OAuthToken):
475
  try:
476
  api = HfApi(token=token)
477
  user_data = api.whoami()
 
499
  #training_cost h4{margin-top: 1.25em;margin-bottom: 0}
500
  #training_cost small{font-weight: normal}
501
  .accordion {color: var(--body-text-color)}
502
+ .main_unlogged{opacity: 0.5, pointer-events: none}
503
  '''
504
  theme = gr.themes.Monochrome(
505
  text_size=gr.themes.Size(lg="18px", md="15px", sm="13px", xl="22px", xs="12px", xxl="24px", xxs="9px"),
506
  font=[gr.themes.GoogleFont('Source Sans Pro'), 'ui-sans-serif', 'system-ui', 'sans-serif'],
507
  )
508
+ def swap_opacity(token: gr.OAuthToken):
509
+ if token is None:
510
+ return gr.update(elem_classes=["main_unlogged"])
511
+ else:
512
+ return gr.update(elem_classes=["main_logged"])
513
  with gr.Blocks(css=css, theme=theme) as demo:
514
  dataset_folder = gr.State()
515
  gr.Markdown('''# LoRA Ease 🧞‍♂️
516
  ### Train a high quality SDXL LoRA in a breeze ༄ with state-of-the-art techniques
517
  <small>Dreambooth with Pivotal Tuning, Prodigy and more! Use the trained LoRAs with diffusers, AUTO1111, Comfy. [blog about the training script](#), [Colab Pro](#), [run locally or in a cloud](#)</small>''', elem_id="main_title")
518
+ gr.LoginButton()
519
+ with gr.Column(elem_classes=["main_unlogged"]) as main_ui:
520
+ lora_name = gr.Textbox(label="The name of your LoRA", info="This has to be a unique name", placeholder="e.g.: Persian Miniature Painting style, Cat Toy")
521
+ training_option = gr.Radio(
522
+ label="What are you training?", choices=["object", "style", "face", "custom"]
523
+ )
524
+ concept_sentence = gr.Textbox(
525
+ label="Concept sentence",
526
+ info="Sentence to be used in all images for captioning. TOK is a special mandatory token, used to teach the model your concept.",
527
+ placeholder="e.g.: A photo of TOK, in the style of TOK",
528
+ visible=False,
529
+ interactive=True,
530
+ )
531
+ with gr.Group(visible=False) as image_upload:
532
+ with gr.Row():
533
+ images = gr.File(
534
+ file_types=["image"],
535
+ label="Upload your images",
536
+ file_count="multiple",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
537
  interactive=True,
538
+ visible=True,
539
+ scale=1,
540
  )
541
+ with gr.Column(scale=3, visible=False) as captioning_area:
542
+ with gr.Column():
543
+ gr.Markdown(
544
+ """# Custom captioning
545
+ To improve the quality of your outputs, you can add a custom caption for each image, describing exactly what is taking place in each of them. Including TOK is mandatory. You can leave things as is if you don't want to include captioning.
546
+ """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
547
  )
548
+ do_captioning = gr.Button("Add AI captions with BLIP-2")
549
+ output_components = [captioning_area]
550
+ caption_list = []
551
+ for i in range(1, MAX_IMAGES + 1):
552
+ locals()[f"captioning_row_{i}"] = gr.Row(visible=False)
553
+ with locals()[f"captioning_row_{i}"]:
554
+ locals()[f"image_{i}"] = gr.Image(
555
+ width=111,
556
+ height=111,
557
+ min_width=111,
558
+ interactive=False,
559
+ scale=2,
560
+ show_label=False,
561
+ show_share_button=False,
562
+ show_download_button=False
563
+ )
564
+ locals()[f"caption_{i}"] = gr.Textbox(
565
+ label=f"Caption {i}", scale=15
566
+ )
567
+
568
+ output_components.append(locals()[f"captioning_row_{i}"])
569
+ output_components.append(locals()[f"image_{i}"])
570
+ output_components.append(locals()[f"caption_{i}"])
571
+ caption_list.append(locals()[f"caption_{i}"])
572
+ with gr.Accordion(open=False, label="Advanced options", visible=False, elem_classes=['accordion']) as advanced:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
573
  with gr.Row():
574
  with gr.Column():
575
+ optimizer = gr.Dropdown(
576
+ label="Optimizer",
577
+ info="Prodigy is an auto-optimizer and works good by default. If you prefer to set your own learning rates, change it to AdamW. If you don't have enough VRAM to train with AdamW, pick 8-bit Adam.",
578
+ choices=[
579
+ ("Prodigy", "prodigy"),
580
+ ("AdamW", "adamW"),
581
+ ("8-bit Adam", "8bitadam"),
582
+ ],
583
+ value="prodigy",
584
+ interactive=True,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
585
  )
586
+ use_snr_gamma = gr.Checkbox(label="Use SNR Gamma")
587
+ snr_gamma = gr.Number(
588
+ label="snr_gamma",
589
+ info="SNR weighting gamma to re-balance the loss",
590
+ value=5.000,
591
+ step=0.1,
592
+ visible=False,
593
  )
594
+ mixed_precision = gr.Dropdown(
595
+ label="Mixed Precision",
596
+ choices=["no", "fp16", "bf16"],
597
+ value="bf16",
 
 
598
  )
599
+ learning_rate = gr.Number(
600
+ label="UNet Learning rate",
601
+ minimum=0.0,
602
+ maximum=10.0,
603
+ step=0.0000001,
604
+ value=1.0, # For prodigy you start high and it will optimize down
605
  )
606
+ max_train_steps = gr.Number(
607
+ label="Max train steps", minimum=1, maximum=50000, value=1000
 
608
  )
609
+ lora_rank = gr.Number(
610
+ label="LoRA Rank",
611
+ info="Rank for the Low Rank Adaptation (LoRA), a higher rank produces a larger LoRA",
612
+ value=8,
613
+ step=2,
614
+ minimum=2,
615
+ maximum=1024,
616
  )
617
+ repeats = gr.Number(
618
+ label="Repeats",
619
+ info="How many times to repeat the training data.",
620
+ value=1,
621
+ minimum=1,
622
+ maximum=200,
623
  )
624
  with gr.Column():
625
+ with_prior_preservation = gr.Checkbox(
626
+ label="Prior preservation loss",
627
+ info="Prior preservation helps to ground the model to things that are similar to your concept. Good for faces.",
628
+ value=False,
 
 
 
629
  )
630
+ with gr.Column(visible=False) as prior_preservation_params:
631
+ with gr.Tab("prompt"):
632
+ class_prompt = gr.Textbox(
633
+ label="Class Prompt",
634
+ info="The prompt that will be used to generate your class images",
635
+ )
636
+
637
+ with gr.Tab("images"):
638
+ class_images = gr.File(
639
+ file_types=["image"],
640
+ label="Upload your images",
641
+ file_count="multiple",
642
+ )
643
+ num_class_images = gr.Number(
644
+ label="Number of class images, if there are less images uploaded then the number you put here, additional images will be sampled with Class Prompt",
645
+ value=20,
646
+ )
647
+ train_text_encoder_ti = gr.Checkbox(
648
+ label="Do textual inversion",
649
+ value=True,
650
+ info="Will train a textual inversion embedding together with the LoRA. Increases quality significantly. If untoggled, you can remove the special TOK token from the prompts.",
651
  )
652
+ with gr.Group(visible=True) as pivotal_tuning_params:
653
+ train_text_encoder_ti_frac = gr.Number(
654
+ label="Pivot Textual Inversion",
655
+ info="% of epochs to train textual inversion for",
656
+ value=0.5,
657
+ step=0.1,
658
+ )
659
+ num_new_tokens_per_abstraction = gr.Number(
660
+ label="Tokens to train",
661
+ info="Number of tokens to train in the textual inversion",
662
+ value=2,
663
+ minimum=1,
664
+ maximum=1024,
665
+ interactive=True,
666
+ )
667
+ with gr.Group(visible=False) as text_encoder_train_params:
668
+ train_text_encoder = gr.Checkbox(
669
+ label="Train Text Encoder", value=True
670
+ )
671
+ train_text_encoder_frac = gr.Number(
672
+ label="Pivot Text Encoder",
673
+ info="% of epochs to train the text encoder for",
674
+ value=0.8,
675
+ step=0.1,
676
+ )
677
+ text_encoder_learning_rate = gr.Number(
678
+ label="Text encoder learning rate",
679
+ minimum=0.0,
680
+ maximum=10.0,
681
+ step=0.0000001,
682
  value=1.0,
 
 
 
 
 
 
 
 
 
683
  )
684
+ seed = gr.Number(label="Seed", value=42)
685
+ resolution = gr.Number(
686
+ label="Resolution",
687
+ info="Only square sizes are supported for now, the value will be width and height",
688
+ value=1024,
689
  )
 
 
 
 
 
 
 
 
690
 
691
+ with gr.Accordion(open=False, label="Even more advanced options", elem_classes=['accordion']):
692
+ with gr.Row():
693
+ with gr.Column():
694
+ gradient_accumulation_steps = gr.Number(
695
+ info="If you change this setting, the pricing calculation will be wrong",
696
+ label="gradient_accumulation_steps",
697
+ value=1
698
+ )
699
+ train_batch_size = gr.Number(
700
+ info="If you change this setting, the pricing calculation will be wrong",
701
+ label="Train batch size",
702
+ value=2
703
+ )
704
+ num_train_epochs = gr.Number(
705
+ info="If you change this setting, the pricing calculation will be wrong",
706
+ label="num_train_epochs",
707
+ value=1
708
+ )
709
+ checkpointing_steps = gr.Number(
710
+ info="How many steps to save intermediate checkpoints",
711
+ label="checkpointing_steps",
712
+ value=5000
713
+ )
714
+ prior_loss_weight = gr.Number(
715
+ label="prior_loss_weight",
716
+ value=1
717
+ )
718
+ gradient_checkpointing = gr.Checkbox(
719
+ label="gradient_checkpointing",
720
+ info="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass",
721
+ value=True,
722
+ )
723
+ adam_beta1 = gr.Number(
724
+ label="adam_beta1",
725
+ value=0.9,
726
+ minimum=0,
727
+ maximum=1,
728
+ step=0.01
729
+ )
730
+ adam_beta2 = gr.Number(
731
+ label="adam_beta2",
732
+ minimum=0,
733
+ maximum=1,
734
+ step=0.01,
735
+ value=0.999
736
+ )
737
+ prodigy_beta3 = gr.Number(
738
+ label="Prodigy Beta 3",
739
+ value=None,
740
+ step=0.01,
741
+ minimum=0,
742
+ maximum=1,
743
+ )
744
+ prodigy_decouple = gr.Checkbox(label="Prodigy Decouple")
745
+ adam_weight_decay = gr.Number(
746
+ label="Adam Weight Decay",
747
+ value=1e-04,
748
+ step=0.00001,
749
+ minimum=0,
750
+ maximum=1,
751
+ )
752
+ adam_weight_decay_text_encoder = gr.Number(
753
+ label="Adam Weight Decay Text Encoder",
754
+ value=None,
755
+ step=0.00001,
756
+ minimum=0,
757
+ maximum=1,
758
+ )
759
+ adam_epsilon = gr.Number(
760
+ label="Adam Epsilon",
761
+ value=1e-08,
762
+ step=0.00000001,
763
+ minimum=0,
764
+ maximum=1,
765
+ )
766
+ prodigy_use_bias_correction = gr.Checkbox(
767
+ label="Prodigy Use Bias Correction",
768
+ value=True
769
+ )
770
+ prodigy_safeguard_warmup = gr.Checkbox(
771
+ label="Prodigy Safeguard Warmup",
772
+ value=True
773
+ )
774
+ max_grad_norm = gr.Number(
775
+ label="Max Grad Norm",
776
+ value=1.0,
777
+ minimum=0.1,
778
+ maximum=10,
779
+ step=0.1,
780
+ )
781
+ enable_xformers_memory_efficient_attention = gr.Checkbox(
782
+ label="enable_xformers_memory_efficient_attention"
783
+ )
784
+ with gr.Column():
785
+ scale_lr = gr.Checkbox(
786
+ label="Scale learning rate",
787
+ info="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size",
788
+ )
789
+ lr_num_cycles = gr.Number(
790
+ label="lr_num_cycles",
791
+ value=1
792
+ )
793
+ lr_scheduler = gr.Dropdown(
794
+ label="lr_scheduler",
795
+ choices=[
796
+ "linear",
797
+ "cosine",
798
+ "cosine_with_restarts",
799
+ "polynomial",
800
+ "constant",
801
+ "constant_with_warmup",
802
+ ],
803
+ value="constant",
804
+ )
805
+ lr_power = gr.Number(
806
+ label="lr_power",
807
+ value=1.0,
808
+ minimum=0.1,
809
+ maximum=10
810
+ )
811
+ lr_warmup_steps = gr.Number(
812
+ label="lr_warmup_steps",
813
+ value=0
814
+ )
815
+ dataloader_num_workers = gr.Number(
816
+ label="Dataloader num workers", value=0, minimum=0, maximum=64
817
+ )
818
+ local_rank = gr.Number(
819
+ label="local_rank",
820
+ value=-1
821
+ )
822
+ with gr.Column(visible=False) as cost_estimation:
823
+ with gr.Group(elem_id="cost_box"):
824
+ training_cost_estimate = gr.Markdown(elem_id="training_cost")
825
+ #token = gr.Textbox(label="Your Hugging Face write token", info="A Hugging Face write token you can obtain on the settings page", type="password", placeholder="hf_OhHiThIsIsNoTaReALToKeNGOoDTry")
826
+ with gr.Group(visible=False) as no_payment_method:
827
+ with gr.Row():
828
+ gr.HTML("<h3 style='margin: 0'>Your Hugging Face account doesn't have a payment method set up. Set one up <a href='https://huggingface.co/settings/billing/payment' target='_blank'>here</a> and come back here to train your LoRA</h3>")
829
+ payment_setup = gr.Button("I have set up a payment method")
830
+
831
+ start = gr.Button("Start training", visible=False, interactive=True)
832
+ progress_area = gr.Markdown("")
833
 
834
  output_components.insert(1, advanced)
835
  output_components.insert(1, cost_estimation)
836
  gr.on(
837
  triggers=[
838
+ #token.change,
839
  payment_setup.click
840
  ],
841
  fn=check_token,
 
963
  dataloader_num_workers,
964
  local_rank,
965
  dataset_folder,
966
+ #token
967
  ],
968
  outputs = progress_area,
969
  queue=False
 
972
  do_captioning.click(
973
  fn=run_captioning, inputs=[images] + caption_list + [training_option], outputs=caption_list
974
  )
975
+ demo.load(fn=swap_opacity, outputs=[main_ui], queue=False, concurrency_limit=50).then(
976
+ fn=check_token,
977
+ outputs=[no_payment_method, start],
978
+ concurrency_limit=50,
979
+ queue=False
980
+ )
981
  if __name__ == "__main__":
982
  demo.queue()
983
  demo.launch(share=True)