TheLastBen commited on
Commit
537cdef
1 Parent(s): 74eab38

Update Scripts/mainpaperspacev1.py

Browse files
Files changed (1) hide show
  1. Scripts/mainpaperspacev1.py +40 -37
Scripts/mainpaperspacev1.py CHANGED
@@ -51,7 +51,7 @@ def Deps(force_reinstall):
51
  call('tar -C / --zstd -xf pps.tar.zst', shell=True, stdout=open('/dev/null', 'w'))
52
  call("sed -i 's@~/.cache@/notebooks/cache@' /usr/local/lib/python3.9/dist-packages/transformers/utils/hub.py", shell=True)
53
  os.chdir('/notebooks')
54
- call("git clone --depth 1 -q --branch updt https://github.com/TheLastBen/diffusers /diffusers", shell=True, stdout=open('/dev/null', 'w'))
55
  if not os.path.exists('/notebooks/diffusers'):
56
  call('ln -s /diffusers /notebooks', shell=True)
57
  call("rm -r /deps", shell=True)
@@ -112,19 +112,21 @@ def downloadmodel_hf(Path_to_HuggingFace):
112
 
113
 
114
 
115
- def downloadmodel_pth(CKPT_Path):
 
 
 
 
 
116
  import wget
117
  os.chdir('/notebooks')
118
  clear_output()
119
  if os.path.exists(str(CKPT_Path)):
120
- wget.download('https://github.com/TheLastBen/fast-stable-diffusion/raw/main/Dreambooth/refmdlz')
121
- call('unzip -o -q refmdlz', shell=True)
122
- call('rm -f refmdlz', shell=True)
123
- wget.download('https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dreambooth/convertodiffv1.py')
124
  clear_output()
125
- call('python convertodiffv1.py '+CKPT_Path+' /models/stable-diffusion-custom --v1', shell=True)
126
- call('rm convertodiffv1.py', shell=True)
127
- call('rm -r refmdl', shell=True)
128
  if os.path.exists('/models/stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
129
  clear_output()
130
  done()
@@ -137,23 +139,28 @@ def downloadmodel_pth(CKPT_Path):
137
  time.sleep(5)
138
 
139
 
140
- def downloadmodel_lnk(CKPT_Link):
 
 
 
 
 
 
 
 
141
  import wget
142
  os.chdir('/notebooks')
143
- call("gdown --fuzzy " +CKPT_Link+ " -O /models/model.ckpt", shell=True)
144
-
145
- if os.path.exists('/models/model.ckpt'):
146
- if os.path.getsize("/models/model.ckpt") > 1810671599:
147
- wget.download('https://github.com/TheLastBen/fast-stable-diffusion/raw/main/Dreambooth/refmdlz')
148
- call('unzip -o -q refmdlz', shell=True)
149
- call('rm -f refmdlz', shell=True)
150
- wget.download('https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dreambooth/convertodiffv1.py')
151
  clear_output()
152
- call('python convertodiffv1.py /models/model.ckpt /models/stable-diffusion-custom --v1', shell=True)
153
- call('rm convertodiffv1.py', shell=True)
154
- call('rm -r refmdl', shell=True)
155
  if os.path.exists('/models/stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
156
- call('rm /models/model.ckpt', shell=True)
157
  clear_output()
158
  done()
159
  else:
@@ -161,7 +168,7 @@ def downloadmodel_lnk(CKPT_Link):
161
  print('Conversion error')
162
  time.sleep(5)
163
  else:
164
- while os.path.getsize('/models/model.ckpt') < 1810671599:
165
  print('Wrong link, check that the link is valid')
166
  time.sleep(5)
167
 
@@ -172,10 +179,10 @@ def dl(Path_to_HuggingFace, CKPT_Path, CKPT_Link):
172
  downloadmodel_hf(Path_to_HuggingFace)
173
  MODEL_NAME="/models/stable-diffusion-custom"
174
  elif CKPT_Path !="":
175
- downloadmodel_pth(CKPT_Path)
176
  MODEL_NAME="/models/stable-diffusion-custom"
177
  elif CKPT_Link !="":
178
- downloadmodel_lnk(CKPT_Link)
179
  MODEL_NAME="/models/stable-diffusion-custom"
180
  else:
181
  MODEL_NAME="/datasets/stable-diffusion-diffusers/stable-diffusion-v1-5"
@@ -262,14 +269,11 @@ def sess(Session_Name, Session_Link_optional, MODEL_NAME):
262
 
263
  elif os.path.exists(MDLPTH):
264
  print('Session found, loading the trained model ...')
265
- wget.download('https://github.com/TheLastBen/fast-stable-diffusion/raw/main/Dreambooth/refmdlz')
266
- call('unzip -o -q refmdlz', shell=True, stdout=open('/dev/null', 'w'))
267
- call('rm -f refmdlz', shell=True, stdout=open('/dev/null', 'w'))
268
- wget.download('https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dreambooth/convertodiffv1.py')
269
- call('python convertodiffv1.py '+MDLPTH+' '+OUTPUT_DIR+' --v1', shell=True)
270
- call('rm convertodiffv1.py', shell=True)
271
- call('rm -r refmdl', shell=True)
272
 
 
273
  if os.path.exists(OUTPUT_DIR+'/unet/diffusion_pytorch_model.bin'):
274
  resume=True
275
  clear_output()
@@ -333,8 +337,6 @@ def uplder(Remove_existing_instance_images, Crop_images, Crop_size, IMAGES_FOLDE
333
  done()
334
 
335
 
336
-
337
-
338
  def upld(Remove_existing_instance_images, Crop_images, Crop_size, IMAGES_FOLDER_OPTIONAL, INSTANCE_DIR, CAPTIONS_DIR, uploader, ren):
339
 
340
 
@@ -615,8 +617,9 @@ def dbtrain(Resume_Training, UNet_Training_Steps, UNet_Learning_Rate, Text_Encod
615
  --lr_scheduler="linear" \
616
  --lr_warmup_steps=0 \
617
  --max_train_steps='+str(Training_Steps), shell=True)
618
-
619
- def train_only_unet(stp, stpsv, SESSION_DIR, MODELT_NAME, INSTANCE_DIR, OUTPUT_DIR, Text_Encoder_Training_Steps, PT, Seed, Resolution, Style, extrnlcptn, precision, Training_Steps):
 
620
  clear_output()
621
  if resuming=="Yes":
622
  print('Resuming Training...')
@@ -675,7 +678,7 @@ def dbtrain(Resume_Training, UNet_Training_Steps, UNet_Learning_Rate, Text_Encod
675
  time.sleep(8)
676
 
677
  if UNet_Training_Steps!=0:
678
- train_only_unet(stp, stpsv, SESSION_DIR, MODELT_NAME, INSTANCE_DIR, OUTPUT_DIR, Text_Encoder_Training_Steps, PT, Seed, Resolution, Style, extrnlcptn, precision, Training_Steps=UNet_Training_Steps)
679
 
680
  if UNet_Training_Steps==0 and Text_Encoder_Concept_Training_Steps==0 and Text_Encoder_Training_Steps==0 :
681
  print('Nothing to do')
 
51
  call('tar -C / --zstd -xf pps.tar.zst', shell=True, stdout=open('/dev/null', 'w'))
52
  call("sed -i 's@~/.cache@/notebooks/cache@' /usr/local/lib/python3.9/dist-packages/transformers/utils/hub.py", shell=True)
53
  os.chdir('/notebooks')
54
+ call("git clone --depth 1 -q --branch main https://github.com/TheLastBen/diffusers /diffusers", shell=True, stdout=open('/dev/null', 'w'))
55
  if not os.path.exists('/notebooks/diffusers'):
56
  call('ln -s /diffusers /notebooks', shell=True)
57
  call("rm -r /deps", shell=True)
 
112
 
113
 
114
 
115
+ def downloadmodel_pth(CKPT_Path, safetensors):
116
+
117
+ sftnsr=""
118
+ if safetensors:
119
+ sftnsr="--from_safetensors"
120
+
121
  import wget
122
  os.chdir('/notebooks')
123
  clear_output()
124
  if os.path.exists(str(CKPT_Path)):
125
+ call('wget -q -O config.yaml https://github.com/CompVis/stable-diffusion/raw/main/configs/stable-diffusion/v1-inference.yaml', shell=True)
126
+ call('python /diffusers/scripts/convert_original_stable_diffusion_to_diffusers.py --checkpoint_path '+CKPT_Path+' --dump_path /models/stable-diffusion-custom --original_config_file config.yaml '+sftnsr, shell=True)
 
 
127
  clear_output()
128
+
129
+ call('rm config.yaml', shell=True)
 
130
  if os.path.exists('/models/stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
131
  clear_output()
132
  done()
 
139
  time.sleep(5)
140
 
141
 
142
+ def downloadmodel_lnk(CKPT_Link, safetensors):
143
+
144
+ sftnsr=""
145
+ if not safetensors:
146
+ modelnm="model.ckpt"
147
+ else:
148
+ modelnm="model.safetensors"
149
+ sftnsr="--from_safetensors"
150
+
151
  import wget
152
  os.chdir('/notebooks')
153
+ call("gdown --fuzzy " +CKPT_Link+ " -O /models/"+modelnm, shell=True)
154
+
155
+ if os.path.exists("/models/"+modelnm):
156
+ if os.path.getsize("/models/"+modelnm) > 1810671599:
157
+ call('wget -q -O config.yaml https://github.com/CompVis/stable-diffusion/raw/main/configs/stable-diffusion/v1-inference.yaml', shell=True)
158
+ call('python /diffusers/scripts/convert_original_stable_diffusion_to_diffusers.py --checkpoint_path '+modelnm+' --dump_path /models/stable-diffusion-custom --original_config_file config.yaml '+sftnsr, shell=True)
 
 
159
  clear_output()
160
+
161
+ call('rm config.yaml', shell=True)
 
162
  if os.path.exists('/models/stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
163
+ call('rm /models/'+modelnm, shell=True)
164
  clear_output()
165
  done()
166
  else:
 
168
  print('Conversion error')
169
  time.sleep(5)
170
  else:
171
+ while os.path.getsize("/models/"+modelnm) < 1810671599:
172
  print('Wrong link, check that the link is valid')
173
  time.sleep(5)
174
 
 
179
  downloadmodel_hf(Path_to_HuggingFace)
180
  MODEL_NAME="/models/stable-diffusion-custom"
181
  elif CKPT_Path !="":
182
+ downloadmodel_pth(CKPT_Path, safetensors)
183
  MODEL_NAME="/models/stable-diffusion-custom"
184
  elif CKPT_Link !="":
185
+ downloadmodel_lnk(CKPT_Link, safetensors)
186
  MODEL_NAME="/models/stable-diffusion-custom"
187
  else:
188
  MODEL_NAME="/datasets/stable-diffusion-diffusers/stable-diffusion-v1-5"
 
269
 
270
  elif os.path.exists(MDLPTH):
271
  print('Session found, loading the trained model ...')
272
+ call('wget -q -O config.yaml https://github.com/CompVis/stable-diffusion/raw/main/configs/stable-diffusion/v1-inference.yaml', shell=True)
273
+ call('python /diffusers/scripts/convert_original_stable_diffusion_to_diffusers.py --checkpoint_path '+MDLPTH+' --dump_path '+OUTPUT_DIR+' --original_config_file config.yaml', shell=True)
274
+ clear_output()
 
 
 
 
275
 
276
+ call('rm config.yaml', shell=True)
277
  if os.path.exists(OUTPUT_DIR+'/unet/diffusion_pytorch_model.bin'):
278
  resume=True
279
  clear_output()
 
337
  done()
338
 
339
 
 
 
340
  def upld(Remove_existing_instance_images, Crop_images, Crop_size, IMAGES_FOLDER_OPTIONAL, INSTANCE_DIR, CAPTIONS_DIR, uploader, ren):
341
 
342
 
 
617
  --lr_scheduler="linear" \
618
  --lr_warmup_steps=0 \
619
  --max_train_steps='+str(Training_Steps), shell=True)
620
+
621
+
622
+ def train_only_unet(stp, stpsv, SESSION_DIR, MODELT_NAME, INSTANCE_DIR, OUTPUT_DIR, Text_Encoder_Training_Steps, PT, Seed, Resolution, extrnlcptn, precision, Training_Steps):
623
  clear_output()
624
  if resuming=="Yes":
625
  print('Resuming Training...')
 
678
  time.sleep(8)
679
 
680
  if UNet_Training_Steps!=0:
681
+ train_only_unet(stp, stpsv, SESSION_DIR, MODELT_NAME, INSTANCE_DIR, OUTPUT_DIR, Text_Encoder_Training_Steps, PT, Seed, Resolution, extrnlcptn, precision, Training_Steps=UNet_Training_Steps)
682
 
683
  if UNet_Training_Steps==0 and Text_Encoder_Concept_Training_Steps==0 and Text_Encoder_Training_Steps==0 :
684
  print('Nothing to do')