TheLastBen commited on
Commit
6f159ac
1 Parent(s): 8d31d21

Update Scripts/mainpaperspacev2.py

Browse files
Files changed (1) hide show
  1. Scripts/mainpaperspacev2.py +32 -85
Scripts/mainpaperspacev2.py CHANGED
@@ -25,16 +25,7 @@ import numpy as np
25
  def Deps(force_reinstall):
26
 
27
  if not force_reinstall and os.path.exists('/usr/local/lib/python3.9/dist-packages/safetensors'):
28
- os.chdir('/notebooks')
29
- if not os.path.exists('Latest_Notebooks'):
30
- call('mkdir Latest_Notebooks', shell=True)
31
- else:
32
- call('rm -r Latest_Notebooks', shell=True)
33
- call('mkdir Latest_Notebooks', shell=True)
34
- os.chdir('/notebooks/Latest_Notebooks')
35
- call('wget -q -i https://huggingface.co/datasets/TheLastBen/PPS/raw/main/Notebooks.txt', shell=True)
36
- call('rm Notebooks.txt', shell=True)
37
- os.chdir('/notebooks')
38
  print('Modules and notebooks updated, dependencies already installed')
39
 
40
  else:
@@ -43,18 +34,7 @@ def Deps(force_reinstall):
43
  if not os.path.exists('/usr/local/lib/python3.9/dist-packages/safetensors'):
44
  os.chdir('/usr/local/lib/python3.9/dist-packages')
45
  call("rm -r torch torch-1.12.0+cu116.dist-info torchaudio* torchvision* PIL Pillow* transformers* numpy* gdown*", shell=True, stdout=open('/dev/null', 'w'))
46
-
47
- os.chdir('/notebooks')
48
- if not os.path.exists('Latest_Notebooks'):
49
- call('mkdir Latest_Notebooks', shell=True)
50
- else:
51
- call('rm -r Latest_Notebooks', shell=True)
52
- call('mkdir Latest_Notebooks', shell=True)
53
- os.chdir('/notebooks/Latest_Notebooks')
54
- call('wget -q -i https://huggingface.co/datasets/TheLastBen/PPS/raw/main/Notebooks.txt', shell=True)
55
- call('rm Notebooks.txt', shell=True)
56
- os.chdir('/notebooks')
57
-
58
  if not os.path.exists('/models'):
59
  call('mkdir /models', shell=True)
60
  if not os.path.exists('/notebooks/models'):
@@ -79,11 +59,25 @@ def Deps(force_reinstall):
79
  clear_output()
80
 
81
  done()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
82
 
83
 
84
  def downloadmodel_hfv2(Path_to_HuggingFace):
85
  import wget
86
-
87
  if os.path.exists('/models/stable-diffusion-custom'):
88
  call("rm -r /models/stable-diffusion-custom", shell=True)
89
  clear_output()
@@ -115,22 +109,18 @@ def downloadmodel_hfv2(Path_to_HuggingFace):
115
  time.sleep(5)
116
 
117
 
118
-
119
 
120
  def downloadmodel_pthv2(CKPT_Path, Custom_Model_Version):
121
  import wget
122
  os.chdir('/models')
123
  clear_output()
124
  if os.path.exists(str(CKPT_Path)):
 
125
  if Custom_Model_Version=='512':
126
- wget.download('https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dreambooth/convertodiffv2.py')
127
- clear_output()
128
- call('python convertodiff.py '+CKPT_Path+' stable-diffusion-custom --v2 --reference_model stabilityai/stable-diffusion-2-1-base', shell=True)
129
  elif Custom_Model_Version=='768':
130
- wget.download('https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dreambooth/convertodiffv2.py')
131
- clear_output()
132
- call('python convertodiff.py '+CKPT_Path+' stable-diffusion-custom --v2 --reference_model stabilityai/stable-diffusion-2-1', shell=True)
133
- call('rm convertodiff.py', shell=True)
134
  if os.path.exists('stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
135
  os.chdir('/notebooks')
136
  clear_output()
@@ -139,7 +129,6 @@ def downloadmodel_pthv2(CKPT_Path, Custom_Model_Version):
139
  print('Conversion error')
140
  os.chdir('/notebooks')
141
  time.sleep(5)
142
-
143
  else:
144
  while not os.path.exists(str(CKPT_Path)):
145
  print('Wrong path, use the file explorer to copy the path')
@@ -269,7 +258,7 @@ def sessv2(Session_Name, Session_Link_optional, Model_Version, MODEL_NAMEv2):
269
  if n!="000":
270
  f(int(n))
271
  print('Using the model '+ mdls[int(n)]+" ...")
272
- time.sleep(8)
273
  else:
274
  print('Skipping the intermediary checkpoints.')
275
 
@@ -287,23 +276,22 @@ def sessv2(Session_Name, Session_Link_optional, Model_Version, MODEL_NAMEv2):
287
  call("wget -q -O convertodiff.py https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dreambooth/convertodiffv2.py", shell=True)
288
  clear_output()
289
  print('Session found, loading the trained model ...')
290
- call('python /notebooks/convertodiff.py '+MDLPTH+' '+OUTPUT_DIR+' --v2 --reference_model stabilityai/stable-diffusion-2-1-base', shell=True)
291
 
292
  elif Model_Version=='768':
293
  call('wget -q -O convertodiff.py https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dreambooth/convertodiffv2.py', shell=True)
294
  clear_output()
295
  print('Session found, loading the trained model ...')
296
- call('python /notebooks/convertodiff.py '+MDLPTH+' '+OUTPUT_DIR+' --v2 --reference_model stabilityai/stable-diffusion-2-1', shell=True)
297
 
298
- call('rm /notebooks/convertodiff.py', shell=True)
299
 
300
  if os.path.exists(OUTPUT_DIR+'/unet/diffusion_pytorch_model.bin'):
301
  resumev2=True
302
  clear_output()
303
  print('Session loaded.')
304
  else:
305
- if not os.path.exists(OUTPUT_DIR+'/unet/diffusion_pytorch_model.bin'):
306
- print('Conversion error, if the error persists, remove the CKPT file from the current session folder')
307
 
308
  elif not os.path.exists(str(SESSION_DIR)):
309
  call('mkdir -p '+INSTANCE_DIR, shell=True)
@@ -357,8 +345,6 @@ def uplder(Remove_existing_instance_images, Crop_images, Crop_size, IMAGES_FOLDE
357
  upld(Remove_existing_instance_images, Crop_images, Crop_size, IMAGES_FOLDER_OPTIONAL, INSTANCE_DIR, CAPTIONS_DIR, uploader, ren)
358
  done()
359
 
360
-
361
-
362
 
363
  def upld(Remove_existing_instance_images, Crop_images, Crop_size, IMAGES_FOLDER_OPTIONAL, INSTANCE_DIR, CAPTIONS_DIR, uploader, ren):
364
 
@@ -404,8 +390,6 @@ def upld(Remove_existing_instance_images, Crop_images, Crop_size, IMAGES_FOLDER_
404
  for filename in tqdm(os.listdir(IMAGES_FOLDER_OPTIONAL), bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):
405
  call("cp -r " +IMAGES_FOLDER_OPTIONAL+"/. " +INSTANCE_DIR, shell=True)
406
 
407
-
408
-
409
  elif IMAGES_FOLDER_OPTIONAL =="":
410
  up=""
411
  for filename, file in uploader.value.items():
@@ -639,7 +623,6 @@ def dbtrainv2(Resume_Training, UNet_Training_Steps, UNet_Learning_Rate, Text_Enc
639
  call('accelerate launch /notebooks/diffusers/examples/dreambooth/train_dreambooth_pps.py \
640
  '+Style+' \
641
  '+extrnlcptn+' \
642
- --stop_text_encoder_training='+str(Text_Encoder_Training_Steps)+' \
643
  --image_captions_filename \
644
  --train_only_unet \
645
  --Session_dir='+SESSION_DIR+' \
@@ -778,9 +761,9 @@ def test(Custom_Path, Previous_Session_Name, Session_Name, User, Password, Use_l
778
  os.chdir('/notebooks')
779
  call('nohup lt --port 7860 > srv.txt 2>&1 &', shell=True)
780
  time.sleep(2)
781
- call("grep -o 'https[^ ]*' /notebooks/srv.txt >srvr.txt", shell=True)
782
  time.sleep(2)
783
- srv= getoutput('cat /notebooks/srvr.txt')
784
 
785
  for line in fileinput.input('/usr/local/lib/python3.9/dist-packages/gradio/blocks.py', inplace=True):
786
  if line.strip().startswith('self.server_name ='):
@@ -795,10 +778,7 @@ def test(Custom_Path, Previous_Session_Name, Session_Name, User, Password, Use_l
795
  line = ''
796
  sys.stdout.write(line)
797
 
798
- call('rm /notebooks/srv.txt', shell=True)
799
- call('rm /notebooks/srvr.txt', shell=True)
800
-
801
-
802
 
803
  os.chdir('/notebooks/sd/stable-diffusion-webui/modules')
804
  call('wget -q -O paths.py https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/AUTOMATIC1111_files/paths.py', shell=True)
@@ -806,7 +786,7 @@ def test(Custom_Path, Previous_Session_Name, Session_Name, User, Password, Use_l
806
  os.chdir('/notebooks/sd/stable-diffusion-webui')
807
  clear_output()
808
 
809
- configf="--disable-console-progressbars --no-half-vae --disable-safe-unpickle --api --xformers --enable-insecure-extension-access --medvram --skip-version-check --ckpt "+path_to_trained_model+" "+auth+" "+share
810
 
811
  return configf
812
 
@@ -880,33 +860,14 @@ def hfv2(Name_of_your_concept, Save_concept_to, hf_token_write, INSTANCE_NAME, O
880
  api = HfApi()
881
  your_username = api.whoami(token=hf_token)["name"]
882
 
883
- if(Save_concept_to == "Public_Library"):
884
- repo_id = f"sd-dreambooth-library/{slugify(Name_of_your_concept)}"
885
- #Join the Concepts Library organization if you aren't part of it already
886
- call("curl -X POST -H 'Authorization: Bearer '"+hf_token+" -H 'Content-Type: application/json' https://huggingface.co/organizations/sd-dreambooth-library/share/SSeOwppVCscfTEzFGQaqpfcjukVeNrKNHX", shell=True)
887
- else:
888
- repo_id = f"{your_username}/{slugify(Name_of_your_concept)}"
889
  output_dir = f'/notebooks/models/'+INSTANCE_NAME
890
 
891
  def bar(prg):
 
892
  br="Uploading to HuggingFace : " '|'+'█' * prg + ' ' * (25-prg)+'| ' +str(prg*4)+ "%"
893
  return br
894
 
895
- print("Loading...")
896
-
897
- os.chdir(OUTPUT_DIR)
898
- call('rm -r feature_extractor .git', shell=True)
899
- clear_output()
900
- call('git init', shell=True)
901
- call('git lfs install --system --skip-repo', shell=True)
902
- call('git remote add -f origin https://huggingface.co/stabilityai/stable-diffusion-2-1', shell=True)
903
- call('git config core.sparsecheckout true', shell=True)
904
- call('echo -e "\nfeature_extractor" > .git/info/sparse-checkout', shell=True)
905
- call('git pull origin main', shell=True)
906
- call('rm -r .git', shell=True)
907
- os.chdir('/notebooks')
908
- clear_output()
909
-
910
  print(bar(1))
911
 
912
  readme_text = f'''---
@@ -937,14 +898,6 @@ def hfv2(Name_of_your_concept, Save_concept_to, hf_token_write, INSTANCE_NAME, O
937
  token=hf_token
938
  )
939
 
940
- api.upload_folder(
941
- folder_path=OUTPUT_DIR+"/feature_extractor",
942
- path_in_repo="feature_extractor",
943
- repo_id=repo_id,
944
- token=hf_token
945
- )
946
-
947
- clear_output()
948
  print(bar(8))
949
 
950
  api.upload_folder(
@@ -954,7 +907,6 @@ def hfv2(Name_of_your_concept, Save_concept_to, hf_token_write, INSTANCE_NAME, O
954
  token=hf_token
955
  )
956
 
957
- clear_output()
958
  print(bar(9))
959
 
960
  api.upload_folder(
@@ -964,7 +916,6 @@ def hfv2(Name_of_your_concept, Save_concept_to, hf_token_write, INSTANCE_NAME, O
964
  token=hf_token
965
  )
966
 
967
- clear_output()
968
  print(bar(12))
969
 
970
  api.upload_folder(
@@ -974,7 +925,6 @@ def hfv2(Name_of_your_concept, Save_concept_to, hf_token_write, INSTANCE_NAME, O
974
  token=hf_token
975
  )
976
 
977
- clear_output()
978
  print(bar(13))
979
 
980
  api.upload_folder(
@@ -984,7 +934,6 @@ def hfv2(Name_of_your_concept, Save_concept_to, hf_token_write, INSTANCE_NAME, O
984
  token=hf_token
985
  )
986
 
987
- clear_output()
988
  print(bar(21))
989
 
990
  api.upload_folder(
@@ -994,7 +943,6 @@ def hfv2(Name_of_your_concept, Save_concept_to, hf_token_write, INSTANCE_NAME, O
994
  token=hf_token
995
  )
996
 
997
- clear_output()
998
  print(bar(23))
999
 
1000
  api.upload_file(
@@ -1004,7 +952,6 @@ def hfv2(Name_of_your_concept, Save_concept_to, hf_token_write, INSTANCE_NAME, O
1004
  token=hf_token
1005
  )
1006
 
1007
- clear_output()
1008
  print(bar(25))
1009
 
1010
  print("Your concept was saved successfully at https://huggingface.co/"+repo_id)
 
25
  def Deps(force_reinstall):
26
 
27
  if not force_reinstall and os.path.exists('/usr/local/lib/python3.9/dist-packages/safetensors'):
28
+ ntbk()
 
 
 
 
 
 
 
 
 
29
  print('Modules and notebooks updated, dependencies already installed')
30
 
31
  else:
 
34
  if not os.path.exists('/usr/local/lib/python3.9/dist-packages/safetensors'):
35
  os.chdir('/usr/local/lib/python3.9/dist-packages')
36
  call("rm -r torch torch-1.12.0+cu116.dist-info torchaudio* torchvision* PIL Pillow* transformers* numpy* gdown*", shell=True, stdout=open('/dev/null', 'w'))
37
+ ntbk()
 
 
 
 
 
 
 
 
 
 
 
38
  if not os.path.exists('/models'):
39
  call('mkdir /models', shell=True)
40
  if not os.path.exists('/notebooks/models'):
 
59
  clear_output()
60
 
61
  done()
62
+
63
+
64
+ def ntbk():
65
+
66
+ os.chdir('/notebooks')
67
+ if not os.path.exists('Latest_Notebooks'):
68
+ call('mkdir Latest_Notebooks', shell=True)
69
+ else:
70
+ call('rm -r Latest_Notebooks', shell=True)
71
+ call('mkdir Latest_Notebooks', shell=True)
72
+ os.chdir('/notebooks/Latest_Notebooks')
73
+ call('wget -q -i https://huggingface.co/datasets/TheLastBen/PPS/raw/main/Notebooks.txt', shell=True)
74
+ call('rm Notebooks.txt', shell=True)
75
+ os.chdir('/notebooks')
76
 
77
 
78
  def downloadmodel_hfv2(Path_to_HuggingFace):
79
  import wget
80
+
81
  if os.path.exists('/models/stable-diffusion-custom'):
82
  call("rm -r /models/stable-diffusion-custom", shell=True)
83
  clear_output()
 
109
  time.sleep(5)
110
 
111
 
 
112
 
113
  def downloadmodel_pthv2(CKPT_Path, Custom_Model_Version):
114
  import wget
115
  os.chdir('/models')
116
  clear_output()
117
  if os.path.exists(str(CKPT_Path)):
118
+ wget.download('https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dreambooth/convertodiffv2.py')
119
  if Custom_Model_Version=='512':
120
+ call('python convertodiffv2.py '+CKPT_Path+' stable-diffusion-custom --v2 --reference_model stabilityai/stable-diffusion-2-1-base', shell=True)
 
 
121
  elif Custom_Model_Version=='768':
122
+ call('python convertodiffv2.py '+CKPT_Path+' stable-diffusion-custom --v2 --reference_model stabilityai/stable-diffusion-2-1', shell=True)
123
+ call('rm convertodiffv2.py', shell=True)
 
 
124
  if os.path.exists('stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
125
  os.chdir('/notebooks')
126
  clear_output()
 
129
  print('Conversion error')
130
  os.chdir('/notebooks')
131
  time.sleep(5)
 
132
  else:
133
  while not os.path.exists(str(CKPT_Path)):
134
  print('Wrong path, use the file explorer to copy the path')
 
258
  if n!="000":
259
  f(int(n))
260
  print('Using the model '+ mdls[int(n)]+" ...")
261
+ time.sleep(4)
262
  else:
263
  print('Skipping the intermediary checkpoints.')
264
 
 
276
  call("wget -q -O convertodiff.py https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dreambooth/convertodiffv2.py", shell=True)
277
  clear_output()
278
  print('Session found, loading the trained model ...')
279
+ call('python convertodiff.py '+MDLPTH+' '+OUTPUT_DIR+' --v2 --reference_model stabilityai/stable-diffusion-2-1-base', shell=True)
280
 
281
  elif Model_Version=='768':
282
  call('wget -q -O convertodiff.py https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dreambooth/convertodiffv2.py', shell=True)
283
  clear_output()
284
  print('Session found, loading the trained model ...')
285
+ call('python convertodiff.py '+MDLPTH+' '+OUTPUT_DIR+' --v2 --reference_model stabilityai/stable-diffusion-2-1', shell=True)
286
 
287
+ call('rm convertodiff.py', shell=True)
288
 
289
  if os.path.exists(OUTPUT_DIR+'/unet/diffusion_pytorch_model.bin'):
290
  resumev2=True
291
  clear_output()
292
  print('Session loaded.')
293
  else:
294
+ print('Conversion error, if the error persists, remove the CKPT file from the current session folder')
 
295
 
296
  elif not os.path.exists(str(SESSION_DIR)):
297
  call('mkdir -p '+INSTANCE_DIR, shell=True)
 
345
  upld(Remove_existing_instance_images, Crop_images, Crop_size, IMAGES_FOLDER_OPTIONAL, INSTANCE_DIR, CAPTIONS_DIR, uploader, ren)
346
  done()
347
 
 
 
348
 
349
  def upld(Remove_existing_instance_images, Crop_images, Crop_size, IMAGES_FOLDER_OPTIONAL, INSTANCE_DIR, CAPTIONS_DIR, uploader, ren):
350
 
 
390
  for filename in tqdm(os.listdir(IMAGES_FOLDER_OPTIONAL), bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):
391
  call("cp -r " +IMAGES_FOLDER_OPTIONAL+"/. " +INSTANCE_DIR, shell=True)
392
 
 
 
393
  elif IMAGES_FOLDER_OPTIONAL =="":
394
  up=""
395
  for filename, file in uploader.value.items():
 
623
  call('accelerate launch /notebooks/diffusers/examples/dreambooth/train_dreambooth_pps.py \
624
  '+Style+' \
625
  '+extrnlcptn+' \
 
626
  --image_captions_filename \
627
  --train_only_unet \
628
  --Session_dir='+SESSION_DIR+' \
 
761
  os.chdir('/notebooks')
762
  call('nohup lt --port 7860 > srv.txt 2>&1 &', shell=True)
763
  time.sleep(2)
764
+ call("grep -o 'https[^ ]*' srv.txt >srvr.txt", shell=True)
765
  time.sleep(2)
766
+ srv= getoutput('cat srvr.txt')
767
 
768
  for line in fileinput.input('/usr/local/lib/python3.9/dist-packages/gradio/blocks.py', inplace=True):
769
  if line.strip().startswith('self.server_name ='):
 
778
  line = ''
779
  sys.stdout.write(line)
780
 
781
+ call('rm srv.txt srvr.txt', shell=True))
 
 
 
782
 
783
  os.chdir('/notebooks/sd/stable-diffusion-webui/modules')
784
  call('wget -q -O paths.py https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/AUTOMATIC1111_files/paths.py', shell=True)
 
786
  os.chdir('/notebooks/sd/stable-diffusion-webui')
787
  clear_output()
788
 
789
+ configf="--disable-console-progressbars --no-half-vae --disable-safe-unpickle --api --xformers --enable-insecure-extension-access --skip-version-check --ckpt "+path_to_trained_model+" "+auth+" "+share
790
 
791
  return configf
792
 
 
860
  api = HfApi()
861
  your_username = api.whoami(token=hf_token)["name"]
862
 
863
+ repo_id = f"{your_username}/{slugify(Name_of_your_concept)}"
 
 
 
 
 
864
  output_dir = f'/notebooks/models/'+INSTANCE_NAME
865
 
866
  def bar(prg):
867
+ clear_output()
868
  br="Uploading to HuggingFace : " '|'+'█' * prg + ' ' * (25-prg)+'| ' +str(prg*4)+ "%"
869
  return br
870
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
871
  print(bar(1))
872
 
873
  readme_text = f'''---
 
898
  token=hf_token
899
  )
900
 
 
 
 
 
 
 
 
 
901
  print(bar(8))
902
 
903
  api.upload_folder(
 
907
  token=hf_token
908
  )
909
 
 
910
  print(bar(9))
911
 
912
  api.upload_folder(
 
916
  token=hf_token
917
  )
918
 
 
919
  print(bar(12))
920
 
921
  api.upload_folder(
 
925
  token=hf_token
926
  )
927
 
 
928
  print(bar(13))
929
 
930
  api.upload_folder(
 
934
  token=hf_token
935
  )
936
 
 
937
  print(bar(21))
938
 
939
  api.upload_folder(
 
943
  token=hf_token
944
  )
945
 
 
946
  print(bar(23))
947
 
948
  api.upload_file(
 
952
  token=hf_token
953
  )
954
 
 
955
  print(bar(25))
956
 
957
  print("Your concept was saved successfully at https://huggingface.co/"+repo_id)