TheLastBen commited on
Commit
c847850
1 Parent(s): 777e633

Update Scripts/mainpaperspacev2.py

Browse files
Files changed (1) hide show
  1. Scripts/mainpaperspacev2.py +12 -24
Scripts/mainpaperspacev2.py CHANGED
@@ -38,7 +38,7 @@ def Deps(force_reinstall):
38
  print('Modules and notebooks updated, dependencies already installed')
39
 
40
  else:
41
- print('Installing the dependencies...')
42
  call("pip install --root-user-action=ignore --no-deps -q accelerate==0.12.0", shell=True, stdout=open('/dev/null', 'w'))
43
  if not os.path.exists('/usr/local/lib/python3.9/dist-packages/safetensors'):
44
  os.chdir('/usr/local/lib/python3.9/dist-packages')
@@ -142,7 +142,7 @@ def downloadmodel_pthv2(CKPT_Path, Custom_Model_Version):
142
 
143
  else:
144
  while not os.path.exists(str(CKPT_Path)):
145
- print('Wrong path, use the colab file explorer to copy the path')
146
  os.chdir('/notebooks')
147
  time.sleep(5)
148
 
@@ -219,7 +219,7 @@ def sessv2(Session_Name, Session_Link_optional, Model_Version, MODEL_NAMEv2):
219
  WORKSPACE='/notebooks/Fast-Dreambooth'
220
 
221
  if Session_Link_optional !="":
222
- print('Downloading session...')
223
 
224
  if Session_Link_optional != "":
225
  if not os.path.exists(str(WORKSPACE+'/Sessions')):
@@ -363,10 +363,6 @@ def uplder(Remove_existing_instance_images, Crop_images, Crop_size, IMAGES_FOLDE
363
  def upld(Remove_existing_instance_images, Crop_images, Crop_size, IMAGES_FOLDER_OPTIONAL, INSTANCE_DIR, CAPTIONS_DIR, uploader, ren):
364
 
365
 
366
- if os.path.exists(CAPTIONS_DIR+"off"):
367
- call('mv '+CAPTIONS_DIR+"off"+' '+CAPTIONS_DIR, shell=True)
368
- time.sleep(2)
369
-
370
  if Remove_existing_instance_images:
371
  if os.path.exists(str(INSTANCE_DIR)):
372
  call("rm -r " +INSTANCE_DIR, shell=True)
@@ -469,11 +465,7 @@ def upld(Remove_existing_instance_images, Crop_images, Crop_size, IMAGES_FOLDER_
469
 
470
 
471
  def caption(CAPTIONS_DIR, INSTANCE_DIR):
472
-
473
- if os.path.exists(CAPTIONS_DIR+"off"):
474
- call('mv '+CAPTIONS_DIR+"off"+' '+CAPTIONS_DIR, shell=True)
475
- time.sleep(2)
476
-
477
  paths=""
478
  out=""
479
  widgets_l=""
@@ -555,10 +547,6 @@ def dbtrainv2(Resume_Training, UNet_Training_Steps, UNet_Learning_Rate, Text_Enc
555
  print('No model found, use the "Model Download" cell to download a model.')
556
  time.sleep(5)
557
 
558
- if os.path.exists(CAPTIONS_DIR+"off"):
559
- call('mv '+CAPTIONS_DIR+"off"+' '+CAPTIONS_DIR, shell=True)
560
- time.sleep(2)
561
-
562
  MODELT_NAME=MODEL_NAMEv2
563
 
564
  Seed=random.randint(1, 999999)
@@ -624,6 +612,7 @@ def dbtrainv2(Resume_Training, UNet_Training_Steps, UNet_Learning_Rate, Text_Enc
624
  def dump_only_textenc(trnonltxt, MODELT_NAME, INSTANCE_DIR, OUTPUT_DIR, PT, Seed, precision, Training_Steps):
625
  call('accelerate launch /notebooks/diffusers/examples/dreambooth/train_dreambooth_pps.py \
626
  '+trnonltxt+' \
 
627
  --train_text_encoder \
628
  --image_captions_filename \
629
  --dump_only_text_encoder \
@@ -638,7 +627,7 @@ def dbtrainv2(Resume_Training, UNet_Training_Steps, UNet_Learning_Rate, Text_Enc
638
  --gradient_accumulation_steps=1 --gradient_checkpointing \
639
  --use_8bit_adam \
640
  --learning_rate='+str(Text_Encoder_Learning_Rate)+' \
641
- --lr_scheduler="polynomial" \
642
  --lr_warmup_steps=0 \
643
  --max_train_steps='+str(Training_Steps), shell=True)
644
 
@@ -667,7 +656,7 @@ def dbtrainv2(Resume_Training, UNet_Training_Steps, UNet_Learning_Rate, Text_Enc
667
  --gradient_accumulation_steps=1 '+GCUNET+' \
668
  --use_8bit_adam \
669
  --learning_rate='+str(UNet_Learning_Rate)+' \
670
- --lr_scheduler="polynomial" \
671
  --lr_warmup_steps=0 \
672
  --max_train_steps='+str(Training_Steps), shell=True)
673
 
@@ -703,7 +692,7 @@ def dbtrainv2(Resume_Training, UNet_Training_Steps, UNet_Learning_Rate, Text_Enc
703
  if UNet_Training_Steps!=0:
704
  train_only_unet(stp, stpsv, SESSION_DIR, MODELT_NAME, INSTANCE_DIR, OUTPUT_DIR, Text_Encoder_Training_Steps, PT, Seed, Resolution, Style, extrnlcptn, precision, Training_Steps=UNet_Training_Steps)
705
 
706
- if UNet_Training_Steps==0 and Text_Encoder_Concept_Training_Steps==0 and External_Captions :
707
  print('Nothing to do')
708
  else:
709
  if os.path.exists(OUTPUT_DIR+'/unet/diffusion_pytorch_model.bin'):
@@ -817,7 +806,7 @@ def test(Custom_Path, Previous_Session_Name, Session_Name, User, Password, Use_l
817
  os.chdir('/notebooks/sd/stable-diffusion-webui')
818
  clear_output()
819
 
820
- configf="--disable-console-progressbars --no-half-vae --disable-safe-unpickle --api --xformers --medvram --skip-version-check --ckpt "+path_to_trained_model+" "+auth+" "+share
821
 
822
  return configf
823
 
@@ -903,14 +892,14 @@ def hfv2(Name_of_your_concept, Save_concept_to, hf_token_write, INSTANCE_NAME, O
903
  br="Uploading to HuggingFace : " '|'+'█' * prg + ' ' * (25-prg)+'| ' +str(prg*4)+ "%"
904
  return br
905
 
906
- print("Loading...")
907
 
908
  os.chdir(OUTPUT_DIR)
909
  call('rm -r feature_extractor .git', shell=True)
910
  clear_output()
911
  call('git init', shell=True)
912
  call('git lfs install --system --skip-repo', shell=True)
913
- call('git remote add -f origin "https://USER:'+hf_token+'@huggingface.co/stabilityai/stable-diffusion-2-1"', shell=True)
914
  call('git config core.sparsecheckout true', shell=True)
915
  call('echo -e "\nfeature_extractor" > .git/info/sparse-checkout', shell=True)
916
  call('git pull origin main', shell=True)
@@ -926,9 +915,8 @@ def hfv2(Name_of_your_concept, Save_concept_to, hf_token_write, INSTANCE_NAME, O
926
  - text-to-image
927
  - stable-diffusion
928
  ---
929
- ### {Name_of_your_concept} Dreambooth model trained by {api.whoami(token=hf_token)["name"]} with [TheLastBen's fast-DreamBooth](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast-DreamBooth.ipynb) notebook
930
 
931
- Test the concept via A1111 Colab [fast-Colab-A1111](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast_stable_diffusion_AUTOMATIC1111.ipynb)
932
  '''
933
  #Save the readme to a file
934
  readme_file = open("README.md", "w")
 
38
  print('Modules and notebooks updated, dependencies already installed')
39
 
40
  else:
41
+ print('Installing the dependencies...')
42
  call("pip install --root-user-action=ignore --no-deps -q accelerate==0.12.0", shell=True, stdout=open('/dev/null', 'w'))
43
  if not os.path.exists('/usr/local/lib/python3.9/dist-packages/safetensors'):
44
  os.chdir('/usr/local/lib/python3.9/dist-packages')
 
142
 
143
  else:
144
  while not os.path.exists(str(CKPT_Path)):
145
+ print('Wrong path, use the file explorer to copy the path')
146
  os.chdir('/notebooks')
147
  time.sleep(5)
148
 
 
219
  WORKSPACE='/notebooks/Fast-Dreambooth'
220
 
221
  if Session_Link_optional !="":
222
+ print('Downloading session...')
223
 
224
  if Session_Link_optional != "":
225
  if not os.path.exists(str(WORKSPACE+'/Sessions')):
 
363
  def upld(Remove_existing_instance_images, Crop_images, Crop_size, IMAGES_FOLDER_OPTIONAL, INSTANCE_DIR, CAPTIONS_DIR, uploader, ren):
364
 
365
 
 
 
 
 
366
  if Remove_existing_instance_images:
367
  if os.path.exists(str(INSTANCE_DIR)):
368
  call("rm -r " +INSTANCE_DIR, shell=True)
 
465
 
466
 
467
  def caption(CAPTIONS_DIR, INSTANCE_DIR):
468
+
 
 
 
 
469
  paths=""
470
  out=""
471
  widgets_l=""
 
547
  print('No model found, use the "Model Download" cell to download a model.')
548
  time.sleep(5)
549
 
 
 
 
 
550
  MODELT_NAME=MODEL_NAMEv2
551
 
552
  Seed=random.randint(1, 999999)
 
612
  def dump_only_textenc(trnonltxt, MODELT_NAME, INSTANCE_DIR, OUTPUT_DIR, PT, Seed, precision, Training_Steps):
613
  call('accelerate launch /notebooks/diffusers/examples/dreambooth/train_dreambooth_pps.py \
614
  '+trnonltxt+' \
615
+ '+extrnlcptn+' \
616
  --train_text_encoder \
617
  --image_captions_filename \
618
  --dump_only_text_encoder \
 
627
  --gradient_accumulation_steps=1 --gradient_checkpointing \
628
  --use_8bit_adam \
629
  --learning_rate='+str(Text_Encoder_Learning_Rate)+' \
630
+ --lr_scheduler="linear" \
631
  --lr_warmup_steps=0 \
632
  --max_train_steps='+str(Training_Steps), shell=True)
633
 
 
656
  --gradient_accumulation_steps=1 '+GCUNET+' \
657
  --use_8bit_adam \
658
  --learning_rate='+str(UNet_Learning_Rate)+' \
659
+ --lr_scheduler="linear" \
660
  --lr_warmup_steps=0 \
661
  --max_train_steps='+str(Training_Steps), shell=True)
662
 
 
692
  if UNet_Training_Steps!=0:
693
  train_only_unet(stp, stpsv, SESSION_DIR, MODELT_NAME, INSTANCE_DIR, OUTPUT_DIR, Text_Encoder_Training_Steps, PT, Seed, Resolution, Style, extrnlcptn, precision, Training_Steps=UNet_Training_Steps)
694
 
695
+ if UNet_Training_Steps==0 and Text_Encoder_Concept_Training_Steps==0 and Text_Encoder_Training_Steps==0 :
696
  print('Nothing to do')
697
  else:
698
  if os.path.exists(OUTPUT_DIR+'/unet/diffusion_pytorch_model.bin'):
 
806
  os.chdir('/notebooks/sd/stable-diffusion-webui')
807
  clear_output()
808
 
809
+ configf="--disable-console-progressbars --no-half-vae --disable-safe-unpickle --api --xformers --enable-insecure-extension-access --medvram --skip-version-check --ckpt "+path_to_trained_model+" "+auth+" "+share
810
 
811
  return configf
812
 
 
892
  br="Uploading to HuggingFace : " '|'+'█' * prg + ' ' * (25-prg)+'| ' +str(prg*4)+ "%"
893
  return br
894
 
895
+ print("Loading...")
896
 
897
  os.chdir(OUTPUT_DIR)
898
  call('rm -r feature_extractor .git', shell=True)
899
  clear_output()
900
  call('git init', shell=True)
901
  call('git lfs install --system --skip-repo', shell=True)
902
+ call('git remote add -f origin https://huggingface.co/stabilityai/stable-diffusion-2-1', shell=True)
903
  call('git config core.sparsecheckout true', shell=True)
904
  call('echo -e "\nfeature_extractor" > .git/info/sparse-checkout', shell=True)
905
  call('git pull origin main', shell=True)
 
915
  - text-to-image
916
  - stable-diffusion
917
  ---
918
+ ### {Name_of_your_concept} Dreambooth model trained by {api.whoami(token=hf_token)["name"]} with TheLastBen's fast-DreamBooth notebook
919
 
 
920
  '''
921
  #Save the readme to a file
922
  readme_file = open("README.md", "w")