TheLastBen commited on
Commit
e505dda
1 Parent(s): d5daa45

Create mainpaperspacev2.py

Browse files
Files changed (1) hide show
  1. Scripts/mainpaperspacev2.py +1279 -0
Scripts/mainpaperspacev2.py ADDED
@@ -0,0 +1,1279 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from IPython.display import clear_output
2
+ from subprocess import call, getoutput
3
+ from IPython.display import display
4
+ import ipywidgets as widgets
5
+ import io
6
+ from PIL import Image, ImageDraw
7
+ import fileinput
8
+ import time
9
+ import os
10
+ from os import listdir
11
+ from os.path import isfile
12
+ from tqdm import tqdm
13
+ import gdown
14
+ import random
15
+ import sys
16
+ import cv2
17
+ from io import BytesIO
18
+ import requests
19
+ from collections import defaultdict
20
+ from math import log, sqrt
21
+ import numpy as np
22
+
23
+
24
+
25
+ def Deps(force_reinstall):
26
+
27
+ if not force_reinstall and os.path.exists('/usr/local/lib/python3.9/dist-packages/safetensors'):
28
+ os.chdir('/notebooks')
29
+ if not os.path.exists('Latest_Notebooks'):
30
+ call('mkdir Latest_Notebooks', shell=True)
31
+ else:
32
+ call('rm -r Latest_Notebooks', shell=True)
33
+ call('mkdir Latest_Notebooks', shell=True)
34
+ os.chdir('/notebooks/Latest_Notebooks')
35
+ call('wget -q -i https://huggingface.co/datasets/TheLastBen/PPS/raw/main/Notebooks.txt', shell=True)
36
+ call('rm Notebooks.txt', shell=True)
37
+ os.chdir('/notebooks')
38
+ print('Modules and notebooks updated, dependencies already installed')
39
+
40
+ else:
41
+ print('Installing the dependencies...')
42
+ call("pip install --root-user-action=ignore --no-deps -q accelerate==0.12.0", shell=True, stdout=open('/dev/null', 'w'))
43
+ if not os.path.exists('/usr/local/lib/python3.9/dist-packages/safetensors'):
44
+ os.chdir('/usr/local/lib/python3.9/dist-packages')
45
+ call("rm -r torch torch-1.12.0+cu116.dist-info torchaudio* torchvision* PIL Pillow* transformers* numpy* gdown*", shell=True, stdout=open('/dev/null', 'w'))
46
+
47
+ os.chdir('/notebooks')
48
+ if not os.path.exists('Latest_Notebooks'):
49
+ call('mkdir Latest_Notebooks', shell=True)
50
+ else:
51
+ call('rm -r Latest_Notebooks', shell=True)
52
+ call('mkdir Latest_Notebooks', shell=True)
53
+ os.chdir('/notebooks/Latest_Notebooks')
54
+ call('wget -q -i https://huggingface.co/datasets/TheLastBen/PPS/raw/main/Notebooks.txt', shell=True)
55
+ call('rm Notebooks.txt', shell=True)
56
+ os.chdir('/notebooks')
57
+ call('wget -q -i https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dependencies/aptdeps.txt', shell=True)
58
+ os.chdir('/notebooks')
59
+ if not os.path.exists('/models'):
60
+ call('mkdir /models', shell=True)
61
+ if not os.path.exists('/notebooks/models'):
62
+ call('ln -s /models /notebooks', shell=True)
63
+ if os.path.exists('/deps'):
64
+ call("rm -r /deps", shell=True)
65
+ call('mkdir /deps', shell=True)
66
+ if not os.path.exists('cache'):
67
+ call('mkdir cache', shell=True)
68
+ os.chdir('/deps')
69
+ call('wget -q -i https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dependencies/aptdeps.txt', shell=True)
70
+ call('dpkg -i *.deb', shell=True, stdout=open('/dev/null', 'w'))
71
+ call('wget -q https://huggingface.co/TheLastBen/dependencies/resolve/main/pps.tar.zst', shell=True, stdout=open('/dev/null', 'w'))
72
+ call('tar -C / --zstd -xf pps.tar.zst', shell=True, stdout=open('/dev/null', 'w'))
73
+ call("sed -i 's@~/.cache@/notebooks/cache@' /usr/local/lib/python3.9/dist-packages/transformers/utils/hub.py", shell=True)
74
+ os.chdir('/notebooks')
75
+ call("git clone --depth 1 -q --branch updt https://github.com/TheLastBen/diffusers /diffusers", shell=True, stdout=open('/dev/null', 'w'))
76
+ if not os.path.exists('/notebooks/diffusers'):
77
+ call('ln -s /diffusers /notebooks', shell=True)
78
+ call("rm -r /deps", shell=True)
79
+ os.chdir('/notebooks')
80
+ clear_output()
81
+
82
+ done()
83
+
84
+
85
+
86
+
87
+ def downloadmodel_hfv2(Path_to_HuggingFace):
88
+ import wget
89
+
90
+ if os.path.exists('/models/stable-diffusion-custom'):
91
+ call("rm -r /models/stable-diffusion-custom", shell=True)
92
+ clear_output()
93
+
94
+ if os.path.exists('/notebooks/Fast-Dreambooth/token.txt'):
95
+ with open("/notebooks/Fast-Dreambooth/token.txt") as f:
96
+ token = f.read()
97
+ authe=f'https://USER:{token}@'
98
+ else:
99
+ authe="https://"
100
+
101
+ clear_output()
102
+ call("mkdir /models/stable-diffusion-custom", shell=True)
103
+ os.chdir("/models/stable-diffusion-custom")
104
+ call("git init", shell=True)
105
+ call("git lfs install --system --skip-repo", shell=True)
106
+ call('git remote add -f origin '+authe+'huggingface.co/'+Path_to_HuggingFace, shell=True)
107
+ call("git config core.sparsecheckout true", shell=True)
108
+ call('echo -e "\nscheduler\ntext_encoder\ntokenizer\nunet\nvae\nfeature_extractor\nmodel_index.json\n!*.safetensors" > .git/info/sparse-checkout', shell=True)
109
+ call("git pull origin main", shell=True)
110
+ if os.path.exists('unet/diffusion_pytorch_model.bin'):
111
+ call("rm -r .git", shell=True)
112
+ os.chdir('/notebooks')
113
+ clear_output()
114
+ done()
115
+ while not os.path.exists('/models/stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
116
+ print('Check the link you provided')
117
+ os.chdir('/notebooks')
118
+ time.sleep(5)
119
+
120
+
121
+
122
+
123
+ def downloadmodel_pthv2(CKPT_Path, Custom_Model_Version):
124
+ import wget
125
+ os.chdir('/models')
126
+ clear_output()
127
+ if os.path.exists(str(CKPT_Path)):
128
+ if Custom_Model_Version=='512':
129
+ wget.download('https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dreambooth/convertodiffv2.py')
130
+ clear_output()
131
+ call('python convertodiff.py '+CKPT_Path+' stable-diffusion-custom --v2 --reference_model stabilityai/stable-diffusion-2-1-base', shell=True)
132
+ elif Custom_Model_Version=='768':
133
+ wget.download('https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dreambooth/convertodiffv2.py')
134
+ clear_output()
135
+ call('python convertodiff.py '+CKPT_Path+' stable-diffusion-custom --v2 --reference_model stabilityai/stable-diffusion-2-1', shell=True)
136
+ call('rm convertodiff.py', shell=True)
137
+ if os.path.exists('stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
138
+ os.chdir('/notebooks')
139
+ clear_output()
140
+ done()
141
+ while not os.path.exists('stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
142
+ print('Conversion error')
143
+ os.chdir('/notebooks')
144
+ time.sleep(5)
145
+
146
+ else:
147
+ while not os.path.exists(str(CKPT_Path)):
148
+ print('Wrong path, use the colab file explorer to copy the path')
149
+ os.chdir('/notebooks')
150
+ time.sleep(5)
151
+
152
+
153
+
154
+
155
+ def downloadmodel_lnkv2(CKPT_Link, Custom_Model_Version):
156
+ import wget
157
+ os.chdir('/models')
158
+ call("gdown --fuzzy " +CKPT_Link+ " -O model.ckpt", shell=True)
159
+
160
+ if os.path.exists('model.ckpt'):
161
+ if os.path.getsize("model.ckpt") > 1810671599:
162
+ wget.download('https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dreambooth/convertodiffv2.py')
163
+ if Custom_Model_Version=='512':
164
+ call('python convertodiffv2.py model.ckpt stable-diffusion-custom --v2 --reference_model stabilityai/stable-diffusion-2-1-base', shell=True)
165
+ elif Custom_Model_Version=='768':
166
+ call('python convertodiffv2.py model.ckpt stable-diffusion-custom --v2 --reference_model stabilityai/stable-diffusion-2-1', shell=True)
167
+ call('rm convertodiffv2.py', shell=True)
168
+ if os.path.exists('stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
169
+ call('rm model.ckpt', shell=True)
170
+ os.chdir('/notebooks')
171
+ clear_output()
172
+ done()
173
+ else:
174
+ while not os.path.exists('/models/stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
175
+ print('Conversion error')
176
+ os.chdir('/notebooks')
177
+ time.sleep(5)
178
+ else:
179
+ while os.path.getsize('/models/model.ckpt') < 1810671599:
180
+ print('Wrong link, check that the link is valid')
181
+ os.chdir('/notebooks')
182
+ time.sleep(5)
183
+
184
+
185
+
186
+
187
+ def dlv2(Path_to_HuggingFace, CKPT_Path, CKPT_Link, Model_Version, Custom_Model_Version):
188
+
189
+ if Path_to_HuggingFace != "":
190
+ downloadmodel_hfv2(Path_to_HuggingFace)
191
+ MODEL_NAMEv2="/models/stable-diffusion-custom"
192
+ elif CKPT_Path !="":
193
+ downloadmodel_pthv2(CKPT_Path, Custom_Model_Version)
194
+ MODEL_NAMEv2="/models/stable-diffusion-custom"
195
+ elif CKPT_Link !="":
196
+ downloadmodel_lnkv2(CKPT_Link, Custom_Model_Version)
197
+ MODEL_NAMEv2="/models/stable-diffusion-custom"
198
+ else:
199
+ if Model_Version=="512":
200
+ MODEL_NAMEv2="dataset"
201
+ print('Using the original V2-512 model')
202
+ elif Model_Version=="768":
203
+ MODEL_NAMEv2="/datasets/stable-diffusion-v2-1/stable-diffusion-2-1"
204
+ print('Using the original V2-768 model')
205
+ else:
206
+ MODEL_NAMEv2=""
207
+ print('Wrong model version')
208
+
209
+ return MODEL_NAMEv2
210
+
211
+
212
+ def sessv2(Session_Name, Session_Link_optional, Model_Version, MODEL_NAMEv2):
213
+ import gdown
214
+ os.chdir('/notebooks')
215
+ PT=""
216
+
217
+ while Session_Name=="":
218
+ print('Input the Session Name:')
219
+ Session_Name=input("")
220
+ Session_Name=Session_Name.replace(" ","_")
221
+
222
+ WORKSPACE='/notebooks/Fast-Dreambooth'
223
+
224
+ if Session_Link_optional !="":
225
+ print('Downloading session...')
226
+
227
+ if Session_Link_optional != "":
228
+ if not os.path.exists(str(WORKSPACE+'/Sessions')):
229
+ call("mkdir -p " +WORKSPACE+ "/Sessions", shell=True)
230
+ time.sleep(1)
231
+ os.chdir(WORKSPACE+'/Sessions')
232
+ gdown.download_folder(url=Session_Link_optional, output=Session_Name, quiet=True, remaining_ok=True, use_cookies=False)
233
+ os.chdir(Session_Name)
234
+ call("rm -r " +instance_images, shell=True)
235
+ call("unzip " +instance_images.zip, shell=True, stdout=open('/dev/null', 'w'))
236
+ call("rm -r " +concept_images, shell=True)
237
+ call("unzip " +concept_images.zip, shell=True, stdout=open('/dev/null', 'w'))
238
+ call("rm -r " +captions, shell=True)
239
+ call("unzip " +captions.zip, shell=True, stdout=open('/dev/null', 'w'))
240
+ os.chdir('/notebooks')
241
+ clear_output()
242
+
243
+ INSTANCE_NAME=Session_Name
244
+ OUTPUT_DIR="/models/"+Session_Name
245
+ SESSION_DIR=WORKSPACE+"/Sessions/"+Session_Name
246
+ CONCEPT_DIR=SESSION_DIR+"/concept_images"
247
+ INSTANCE_DIR=SESSION_DIR+"/instance_images"
248
+ CAPTIONS_DIR=SESSION_DIR+'/captions'
249
+ MDLPTH=str(SESSION_DIR+"/"+Session_Name+'.ckpt')
250
+ resumev2=False
251
+
252
+ if os.path.exists(str(SESSION_DIR)):
253
+ mdls=[ckpt for ckpt in listdir(SESSION_DIR) if ckpt.split(".")[-1]=="ckpt"]
254
+ if not os.path.exists(MDLPTH) and '.ckpt' in str(mdls):
255
+
256
+ def f(n):
257
+ k=0
258
+ for i in mdls:
259
+ if k==n:
260
+ call('mv '+SESSION_DIR+'/'+i+' '+MDLPTH, shell=True)
261
+ k=k+1
262
+
263
+ k=0
264
+ print('No final checkpoint model found, select which intermediary checkpoint to use, enter only the number, (000 to skip):\n')
265
+
266
+ for i in mdls:
267
+ print(str(k)+'- '+i)
268
+ k=k+1
269
+ n=input()
270
+ while int(n)>k-1:
271
+ n=input()
272
+ if n!="000":
273
+ f(int(n))
274
+ print('Using the model '+ mdls[int(n)]+" ...")
275
+ time.sleep(8)
276
+ else:
277
+ print('Skipping the intermediary checkpoints.')
278
+
279
+
280
+ if os.path.exists(str(SESSION_DIR)) and not os.path.exists(MDLPTH):
281
+ print('Loading session with no previous model, using the original model or the custom downloaded model')
282
+ if MODEL_NAMEv2=="":
283
+ print('No model found, use the "Model Download" cell to download a model.')
284
+ else:
285
+ print('Session Loaded, proceed to uploading instance images')
286
+
287
+ elif os.path.exists(MDLPTH):
288
+ print('Session found, loading the trained model ...')
289
+ if Model_Version=='512':
290
+ call("wget -q -O convertodiff.py https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dreambooth/convertodiffv2.py", shell=True)
291
+ clear_output()
292
+ print('Session found, loading the trained model ...')
293
+ call('python /notebooks/convertodiff.py '+MDLPTH+' '+OUTPUT_DIR+' --v2 --reference_model stabilityai/stable-diffusion-2-1-base', shell=True)
294
+
295
+ elif Model_Version=='768':
296
+ call('wget -q -O convertodiff.py https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dreambooth/convertodiffv2.py', shell=True)
297
+ clear_output()
298
+ print('Session found, loading the trained model ...')
299
+ call('python /notebooks/convertodiff.py '+MDLPTH+' '+OUTPUT_DIR+' --v2 --reference_model stabilityai/stable-diffusion-2-1', shell=True)
300
+
301
+ call('rm /notebooks/convertodiff.py', shell=True)
302
+
303
+ if os.path.exists(OUTPUT_DIR+'/unet/diffusion_pytorch_model.bin'):
304
+ resumev2=True
305
+ clear_output()
306
+ print('Session loaded.')
307
+ else:
308
+ if not os.path.exists(OUTPUT_DIR+'/unet/diffusion_pytorch_model.bin'):
309
+ print('Conversion error, if the error persists, remove the CKPT file from the current session folder')
310
+
311
+ elif not os.path.exists(str(SESSION_DIR)):
312
+ call('mkdir -p '+INSTANCE_DIR, shell=True)
313
+ print('Creating session...')
314
+ if MODEL_NAMEv2=="":
315
+ print('No model found, use the "Model Download" cell to download a model.')
316
+ else:
317
+ print('Session created, proceed to uploading instance images')
318
+
319
+ return PT, WORKSPACE, Session_Name, INSTANCE_NAME, OUTPUT_DIR, SESSION_DIR, CONCEPT_DIR, INSTANCE_DIR, CAPTIONS_DIR, MDLPTH, MODEL_NAMEv2, resumev2
320
+
321
+
322
+
323
+ def done():
324
+ done = widgets.Button(
325
+ description='Done!',
326
+ disabled=True,
327
+ button_style='success',
328
+ tooltip='',
329
+ icon='check'
330
+ )
331
+ display(done)
332
+
333
+
334
+
335
+
336
+ def uplder(Remove_existing_instance_images, Crop_images, Crop_size, IMAGES_FOLDER_OPTIONAL, INSTANCE_DIR, CAPTIONS_DIR, ren):
337
+
338
+ uploader = widgets.FileUpload(description="Choose images",accept='image/*', multiple=True)
339
+ Upload = widgets.Button(
340
+ description='Upload',
341
+ disabled=False,
342
+ button_style='info',
343
+ tooltip='Click to upload the chosen instance images',
344
+ icon=''
345
+ )
346
+
347
+
348
+ def up(Upload):
349
+ with out:
350
+ uploader.close()
351
+ Upload.close()
352
+ upld(Remove_existing_instance_images, Crop_images, Crop_size, IMAGES_FOLDER_OPTIONAL, INSTANCE_DIR, CAPTIONS_DIR, uploader, ren)
353
+ done()
354
+ out=widgets.Output()
355
+
356
+ if IMAGES_FOLDER_OPTIONAL=="":
357
+ Upload.on_click(up)
358
+ display(uploader, Upload, out)
359
+ else:
360
+ upld(Remove_existing_instance_images, Crop_images, Crop_size, IMAGES_FOLDER_OPTIONAL, INSTANCE_DIR, CAPTIONS_DIR, uploader, ren)
361
+ done()
362
+
363
+
364
+
365
+
366
+ def upld(Remove_existing_instance_images, Crop_images, Crop_size, IMAGES_FOLDER_OPTIONAL, INSTANCE_DIR, CAPTIONS_DIR, uploader, ren):
367
+
368
+
369
+ if os.path.exists(CAPTIONS_DIR+"off"):
370
+ call('mv '+CAPTIONS_DIR+"off"+' '+CAPTIONS_DIR, shell=True)
371
+ time.sleep(2)
372
+
373
+ if Remove_existing_instance_images:
374
+ if os.path.exists(str(INSTANCE_DIR)):
375
+ call("rm -r " +INSTANCE_DIR, shell=True)
376
+ if os.path.exists(str(CAPTIONS_DIR)):
377
+ call("rm -r " +CAPTIONS_DIR, shell=True)
378
+
379
+
380
+ if not os.path.exists(str(INSTANCE_DIR)):
381
+ call("mkdir -p " +INSTANCE_DIR, shell=True)
382
+ if not os.path.exists(str(CAPTIONS_DIR)):
383
+ call("mkdir -p " +CAPTIONS_DIR, shell=True)
384
+
385
+
386
+ if IMAGES_FOLDER_OPTIONAL !="":
387
+ if any(file.endswith('.{}'.format('txt')) for file in os.listdir(IMAGES_FOLDER_OPTIONAL)):
388
+ call('mv '+IMAGES_FOLDER_OPTIONAL+'/*.txt '+CAPTIONS_DIR, shell=True)
389
+ if Crop_images:
390
+ os.chdir(str(IMAGES_FOLDER_OPTIONAL))
391
+ call('find . -name "* *" -type f | rename ' "'s/ /-/g'", shell=True)
392
+ os.chdir('/notebooks')
393
+ for filename in tqdm(os.listdir(IMAGES_FOLDER_OPTIONAL), bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):
394
+ extension = filename.split(".")[-1]
395
+ identifier=filename.split(".")[0]
396
+ new_path_with_file = os.path.join(INSTANCE_DIR, filename)
397
+ file = Image.open(IMAGES_FOLDER_OPTIONAL+"/"+filename)
398
+ width, height = file.size
399
+ image = file
400
+ if file.size !=(Crop_size, Crop_size):
401
+ image=crop_image(file, Crop_size)
402
+ if (extension.upper() == "JPG" or "jpg"):
403
+ image[0].save(new_path_with_file, format="JPEG", quality = 100)
404
+ else:
405
+ image[0].save(new_path_with_file, format=extension.upper())
406
+
407
+ else:
408
+ call("cp \'"+IMAGES_FOLDER_OPTIONAL+"/"+filename+"\' "+INSTANCE_DIR, shell=True)
409
+
410
+ else:
411
+ for filename in tqdm(os.listdir(IMAGES_FOLDER_OPTIONAL), bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):
412
+ call("cp -r " +IMAGES_FOLDER_OPTIONAL+"/. " +INSTANCE_DIR, shell=True)
413
+
414
+
415
+
416
+ elif IMAGES_FOLDER_OPTIONAL =="":
417
+ up=""
418
+ for filename, file in uploader.value.items():
419
+ if filename.split(".")[-1]=="txt":
420
+ with open(CAPTIONS_DIR+'/'+filename, 'w') as f:
421
+ f.write(file['content'].decode())
422
+ up=[(filename, file) for filename, file in uploader.value.items() if filename.split(".")[-1]!="txt"]
423
+ if Crop_images:
424
+ for filename, file_info in tqdm(up, bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):
425
+ img = Image.open(io.BytesIO(file_info['content']))
426
+ extension = filename.split(".")[-1]
427
+ identifier=filename.split(".")[0]
428
+
429
+ if (extension.upper() == "JPG" or "jpg"):
430
+ img.save(INSTANCE_DIR+"/"+filename, format="JPEG", quality = 100)
431
+ else:
432
+ img.save(INSTANCE_DIR+"/"+filename, format=extension.upper())
433
+
434
+ new_path_with_file = os.path.join(INSTANCE_DIR, filename)
435
+ file = Image.open(new_path_with_file)
436
+ width, height = file.size
437
+ image = img
438
+ if file.size !=(Crop_size, Crop_size):
439
+ image=crop_image(file, Crop_size)
440
+ if (extension.upper() == "JPG" or "jpg"):
441
+ image[0].save(new_path_with_file, format="JPEG", quality = 100)
442
+ else:
443
+ image[0].save(new_path_with_file, format=extension.upper())
444
+
445
+ else:
446
+ for filename, file_info in tqdm(uploader.value.items(), bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):
447
+ img = Image.open(io.BytesIO(file_info['content']))
448
+
449
+ extension = filename.split(".")[-1]
450
+ identifier=filename.split(".")[0]
451
+
452
+ if (extension.upper() == "JPG" or "jpg"):
453
+ img.save(INSTANCE_DIR+"/"+filename, format="JPEG", quality = 100)
454
+ else:
455
+ img.save(INSTANCE_DIR+"/"+filename, format=extension.upper())
456
+
457
+
458
+ if ren:
459
+ i=0
460
+ for filename in tqdm(os.listdir(INSTANCE_DIR), bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Renamed'):
461
+ extension = filename.split(".")[-1]
462
+ identifier=filename.split(".")[0]
463
+ new_path_with_file = os.path.join(INSTANCE_DIR, "conceptimagedb"+str(i)+"."+extension)
464
+ call('mv "'+os.path.join(INSTANCE_DIR,filename)+'" "'+new_path_with_file+'"', shell=True)
465
+ i=i+1
466
+
467
+ os.chdir(INSTANCE_DIR)
468
+ call('find . -name "* *" -type f | rename ' "'s/ /-/g'", shell=True)
469
+ os.chdir(CAPTIONS_DIR)
470
+ call('find . -name "* *" -type f | rename ' "'s/ /-/g'", shell=True)
471
+ os.chdir('/notebooks')
472
+
473
+
474
+ def caption(CAPTIONS_DIR, INSTANCE_DIR):
475
+
476
+ if os.path.exists(CAPTIONS_DIR+"off"):
477
+ call('mv '+CAPTIONS_DIR+"off"+' '+CAPTIONS_DIR, shell=True)
478
+ time.sleep(2)
479
+
480
+ paths=""
481
+ out=""
482
+ widgets_l=""
483
+ clear_output()
484
+ def Caption(path):
485
+ if path!="Select an instance image to caption":
486
+
487
+ name = os.path.splitext(os.path.basename(path))[0]
488
+ ext=os.path.splitext(os.path.basename(path))[-1][1:]
489
+ if ext=="jpg" or "JPG":
490
+ ext="JPEG"
491
+
492
+ if os.path.exists(CAPTIONS_DIR+"/"+name + '.txt'):
493
+ with open(CAPTIONS_DIR+"/"+name + '.txt', 'r') as f:
494
+ text = f.read()
495
+ else:
496
+ with open(CAPTIONS_DIR+"/"+name + '.txt', 'w') as f:
497
+ f.write("")
498
+ with open(CAPTIONS_DIR+"/"+name + '.txt', 'r') as f:
499
+ text = f.read()
500
+
501
+ img=Image.open(os.path.join(INSTANCE_DIR,path))
502
+ img=img.resize((420, 420))
503
+ image_bytes = BytesIO()
504
+ img.save(image_bytes, format=ext, qualiy=10)
505
+ image_bytes.seek(0)
506
+ image_data = image_bytes.read()
507
+ img= image_data
508
+ image = widgets.Image(
509
+ value=img,
510
+ width=420,
511
+ height=420
512
+ )
513
+ text_area = widgets.Textarea(value=text, description='', disabled=False, layout={'width': '300px', 'height': '120px'})
514
+
515
+
516
+ def update_text(text):
517
+ with open(CAPTIONS_DIR+"/"+name + '.txt', 'w') as f:
518
+ f.write(text)
519
+
520
+ button = widgets.Button(description='Save', button_style='success')
521
+ button.on_click(lambda b: update_text(text_area.value))
522
+
523
+ return widgets.VBox([widgets.HBox([image, text_area, button])])
524
+
525
+
526
+ paths = os.listdir(INSTANCE_DIR)
527
+ widgets_l = widgets.Select(options=["Select an instance image to caption"]+paths, rows=25)
528
+
529
+
530
+ out = widgets.Output()
531
+
532
+ def click(change):
533
+ with out:
534
+ out.clear_output()
535
+ display(Caption(change.new))
536
+
537
+ widgets_l.observe(click, names='value')
538
+ display(widgets.HBox([widgets_l, out]))
539
+
540
+
541
+
542
+
543
+ def dbtrainv2(Resume_Training, UNet_Training_Steps, UNet_Learning_Rate, Text_Encoder_Training_Steps, Text_Encoder_Concept_Training_Steps, Text_Encoder_Learning_Rate, Style_Training, Resolution, MODEL_NAMEv2, SESSION_DIR, INSTANCE_DIR, CONCEPT_DIR, CAPTIONS_DIR, External_Captions, INSTANCE_NAME, Session_Name, OUTPUT_DIR, PT, resumev2, Save_Checkpoint_Every_n_Steps, Start_saving_from_the_step, Save_Checkpoint_Every):
544
+
545
+ if resumev2 and not Resume_Training:
546
+ print('Overwrite your previously trained model ?, answering "yes" will train a new model, answering "no" will resumev2 the training of the previous model?  yes or no ?')
547
+ while True:
548
+ ansres=input('')
549
+ if ansres=='no':
550
+ Resume_Training = True
551
+ break
552
+ elif ansres=='yes':
553
+ Resume_Training = False
554
+ resumev2= False
555
+ break
556
+
557
+ while not Resume_Training and not os.path.exists(MODEL_NAMEv2+'/unet/diffusion_pytorch_model.bin'):
558
+ print('No model found, use the "Model Download" cell to download a model.')
559
+ time.sleep(5)
560
+
561
+ if os.path.exists(CAPTIONS_DIR+"off"):
562
+ call('mv '+CAPTIONS_DIR+"off"+' '+CAPTIONS_DIR, shell=True)
563
+ time.sleep(2)
564
+
565
+ MODELT_NAME=MODEL_NAMEv2
566
+
567
+ Seed=random.randint(1, 999999)
568
+
569
+ Style=""
570
+ if Style_Training:
571
+ Style="--Style"
572
+
573
+ extrnlcptn=""
574
+ if External_Captions:
575
+ extrnlcptn="--external_captions"
576
+
577
+ precision="fp16"
578
+
579
+ GCUNET="--gradient_checkpointing"
580
+ if Resolution<=640:
581
+ GCUNET=""
582
+
583
+ resuming=""
584
+ if Resume_Training and os.path.exists(OUTPUT_DIR+'/unet/diffusion_pytorch_model.bin'):
585
+ MODELT_NAME=OUTPUT_DIR
586
+ print('Resuming Training...')
587
+ resuming="Yes"
588
+ elif Resume_Training and not os.path.exists(OUTPUT_DIR+'/unet/diffusion_pytorch_model.bin'):
589
+ print('Previous model not found, training a new model...')
590
+ MODELT_NAME=MODEL_NAMEv2
591
+ while MODEL_NAMEv2=="":
592
+ print('No model found, use the "Model Download" cell to download a model.')
593
+ time.sleep(5)
594
+
595
+
596
+ trnonltxt=""
597
+ if UNet_Training_Steps==0:
598
+ trnonltxt="--train_only_text_encoder"
599
+
600
+ Enable_text_encoder_training= True
601
+ Enable_Text_Encoder_Concept_Training= True
602
+
603
+
604
+ if Text_Encoder_Training_Steps==0 or External_Captions:
605
+ Enable_text_encoder_training= False
606
+ else:
607
+ stptxt=Text_Encoder_Training_Steps
608
+
609
+ if Text_Encoder_Concept_Training_Steps==0:
610
+ Enable_Text_Encoder_Concept_Training= False
611
+ else:
612
+ stptxtc=Text_Encoder_Concept_Training_Steps
613
+
614
+
615
+ if Save_Checkpoint_Every==None:
616
+ Save_Checkpoint_Every=1
617
+ stp=0
618
+ if Start_saving_from_the_step==None:
619
+ Start_saving_from_the_step=0
620
+ if (Start_saving_from_the_step < 200):
621
+ Start_saving_from_the_step=Save_Checkpoint_Every
622
+ stpsv=Start_saving_from_the_step
623
+ if Save_Checkpoint_Every_n_Steps:
624
+ stp=Save_Checkpoint_Every
625
+
626
+
627
+ def dump_only_textenc(trnonltxt, MODELT_NAME, INSTANCE_DIR, OUTPUT_DIR, PT, Seed, precision, Training_Steps):
628
+ call('accelerate launch /notebooks/diffusers/examples/dreambooth/train_dreambooth_pps.py \
629
+ '+trnonltxt+' \
630
+ --train_text_encoder \
631
+ --image_captions_filename \
632
+ --dump_only_text_encoder \
633
+ --pretrained_model_name_or_path='+MODELT_NAME+' \
634
+ --instance_data_dir='+INSTANCE_DIR+' \
635
+ --output_dir='+OUTPUT_DIR+' \
636
+ --instance_prompt='+PT+' \
637
+ --seed='+str(Seed)+' \
638
+ --resolution=512 \
639
+ --mixed_precision='+str(precision)+' \
640
+ --train_batch_size=1 \
641
+ --gradient_accumulation_steps=1 --gradient_checkpointing \
642
+ --use_8bit_adam \
643
+ --learning_rate='+str(Text_Encoder_Learning_Rate)+' \
644
+ --lr_scheduler="polynomial" \
645
+ --lr_warmup_steps=0 \
646
+ --max_train_steps='+str(Training_Steps), shell=True)
647
+
648
+ def train_only_unet(stp, stpsv, SESSION_DIR, MODELT_NAME, INSTANCE_DIR, OUTPUT_DIR, Text_Encoder_Training_Steps, PT, Seed, Resolution, Style, extrnlcptn, precision, Training_Steps):
649
+ clear_output()
650
+ if resuming=="Yes":
651
+ print('Resuming Training...')
652
+ print('Training the UNet...')
653
+ call('accelerate launch /notebooks/diffusers/examples/dreambooth/train_dreambooth_pps.py \
654
+ '+Style+' \
655
+ '+extrnlcptn+' \
656
+ --stop_text_encoder_training='+str(Text_Encoder_Training_Steps)+' \
657
+ --image_captions_filename \
658
+ --train_only_unet \
659
+ --Session_dir='+SESSION_DIR+' \
660
+ --save_starting_step='+str(stpsv)+' \
661
+ --save_n_steps='+str(stp)+' \
662
+ --pretrained_model_name_or_path='+MODELT_NAME+' \
663
+ --instance_data_dir='+INSTANCE_DIR+' \
664
+ --output_dir='+OUTPUT_DIR+' \
665
+ --instance_prompt='+PT+' \
666
+ --seed='+str(Seed)+' \
667
+ --resolution='+str(Resolution)+' \
668
+ --mixed_precision='+str(precision)+' \
669
+ --train_batch_size=1 \
670
+ --gradient_accumulation_steps=1 '+GCUNET+' \
671
+ --use_8bit_adam \
672
+ --learning_rate='+str(UNet_Learning_Rate)+' \
673
+ --lr_scheduler="polynomial" \
674
+ --lr_warmup_steps=0 \
675
+ --max_train_steps='+str(Training_Steps), shell=True)
676
+
677
+ if Enable_text_encoder_training :
678
+ print('Training the text encoder...')
679
+ if os.path.exists(OUTPUT_DIR+'/'+'text_encoder_trained'):
680
+ call('rm -r '+OUTPUT_DIR+'/text_encoder_trained', shell=True)
681
+ dump_only_textenc(trnonltxt, MODELT_NAME, INSTANCE_DIR, OUTPUT_DIR, PT, Seed, precision, Training_Steps=stptxt)
682
+
683
+ if Enable_Text_Encoder_Concept_Training:
684
+ if os.path.exists(CONCEPT_DIR):
685
+ if os.listdir(CONCEPT_DIR)!=[]:
686
+ clear_output()
687
+ if resuming=="Yes":
688
+ print('Resuming Training...')
689
+ print('Training the text encoder on the concept...')
690
+ dump_only_textenc(trnonltxt, MODELT_NAME, CONCEPT_DIR, OUTPUT_DIR, PT, Seed, precision, Training_Steps=stptxtc)
691
+ else:
692
+ clear_output()
693
+ if resuming=="Yes":
694
+ print('Resuming Training...')
695
+ print('No concept images found, skipping concept training...')
696
+ Text_Encoder_Concept_Training_Steps=0
697
+ time.sleep(8)
698
+ else:
699
+ clear_output()
700
+ if resuming=="Yes":
701
+ print('Resuming Training...')
702
+ print('No concept images found, skipping concept training...')
703
+ Text_Encoder_Concept_Training_Steps=0
704
+ time.sleep(8)
705
+
706
+ if UNet_Training_Steps!=0:
707
+ train_only_unet(stp, stpsv, SESSION_DIR, MODELT_NAME, INSTANCE_DIR, OUTPUT_DIR, Text_Encoder_Training_Steps, PT, Seed, Resolution, Style, extrnlcptn, precision, Training_Steps=UNet_Training_Steps)
708
+
709
+ if UNet_Training_Steps==0 and Text_Encoder_Concept_Training_Steps==0 and External_Captions :
710
+ print('Nothing to do')
711
+ else:
712
+ if os.path.exists(OUTPUT_DIR+'/unet/diffusion_pytorch_model.bin'):
713
+
714
+ call('python /notebooks/diffusers/scripts/convertosdv2.py --fp16 '+OUTPUT_DIR+' '+SESSION_DIR+'/'+Session_Name+'.ckpt', shell=True)
715
+ clear_output()
716
+ if os.path.exists(SESSION_DIR+"/"+INSTANCE_NAME+'.ckpt'):
717
+ clear_output()
718
+ print("DONE, the CKPT model is in the session's folder")
719
+ else:
720
+ print("Something went wrong")
721
+
722
+ else:
723
+ print("Something went wrong")
724
+
725
+ return resumev2
726
+
727
+
728
+ def test(Custom_Path, Previous_Session_Name, Session_Name, User, Password, Use_localtunnel):
729
+
730
+
731
+ if Previous_Session_Name!="":
732
+ print("Loading a previous session model")
733
+ mdldir='/notebooks/Fast-Dreambooth/Sessions/'+Previous_Session_Name
734
+ path_to_trained_model=mdldir+"/"+Previous_Session_Name+'.ckpt'
735
+
736
+
737
+ while not os.path.exists(path_to_trained_model):
738
+ print("There is no trained model in the previous session")
739
+ time.sleep(5)
740
+
741
+ elif Custom_Path!="":
742
+ print("Loading model from a custom path")
743
+ path_to_trained_model=Custom_Path
744
+
745
+
746
+ while not os.path.exists(path_to_trained_model):
747
+ print("Wrong Path")
748
+ time.sleep(5)
749
+
750
+ else:
751
+ print("Loading the trained model")
752
+ mdldir='/notebooks/Fast-Dreambooth/Sessions/'+Session_Name
753
+ path_to_trained_model=mdldir+"/"+Session_Name+'.ckpt'
754
+
755
+
756
+ while not os.path.exists(path_to_trained_model):
757
+ print("There is no trained model in this session")
758
+ time.sleep(5)
759
+
760
+ auth=f"--gradio-auth {User}:{Password}"
761
+ if User =="" or Password=="":
762
+ auth=""
763
+
764
+ os.chdir('/notebooks')
765
+ if not os.path.exists('/notebooks/sd/stablediffusion'):
766
+ call('wget -q -O sd_rep.tar.zst https://huggingface.co/TheLastBen/dependencies/resolve/main/sd_rep.tar.zst', shell=True)
767
+ call('tar --zstd -xf sd_rep.tar.zst', shell=True)
768
+ call('rm sd_rep.tar.zst', shell=True)
769
+
770
+ os.chdir('/notebooks/sd')
771
+ if not os.path.exists('stable-diffusion-webui'):
772
+ call('git clone -q --depth 1 --branch master https://github.com/AUTOMATIC1111/stable-diffusion-webui', shell=True)
773
+
774
+ os.chdir('/notebooks/sd/stable-diffusion-webui/')
775
+ call('git reset --hard', shell=True, stdout=open('/dev/null', 'w'))
776
+ print('')
777
+ call('git pull', shell=True, stdout=open('/dev/null', 'w'))
778
+ os.chdir('/notebooks')
779
+ clear_output()
780
+
781
+ if not os.path.exists('/usr/lib/node_modules/localtunnel'):
782
+ call('npm install -g localtunnel --silent', shell=True, stdout=open('/dev/null', 'w'))
783
+
784
+ share=''
785
+ call('wget -q -O /usr/local/lib/python3.9/dist-packages/gradio/blocks.py https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/AUTOMATIC1111_files/blocks.py', shell=True)
786
+
787
+ if not Use_localtunnel:
788
+ share='--share'
789
+
790
+ else:
791
+ share=''
792
+ os.chdir('/notebooks')
793
+ call('nohup lt --port 7860 > srv.txt 2>&1 &', shell=True)
794
+ time.sleep(2)
795
+ call("grep -o 'https[^ ]*' /notebooks/srv.txt >srvr.txt", shell=True)
796
+ time.sleep(2)
797
+ srv= getoutput('cat /notebooks/srvr.txt')
798
+
799
+ for line in fileinput.input('/usr/local/lib/python3.9/dist-packages/gradio/blocks.py', inplace=True):
800
+ if line.strip().startswith('self.server_name ='):
801
+ line = f' self.server_name = "{srv[8:]}"\n'
802
+ if line.strip().startswith('self.server_port ='):
803
+ line = ' self.server_port = 443\n'
804
+ if line.strip().startswith('self.protocol = "https"'):
805
+ line = ' self.protocol = "https"\n'
806
+ if line.strip().startswith('if self.local_url.startswith("https") or self.is_colab'):
807
+ line = ''
808
+ if line.strip().startswith('else "http"'):
809
+ line = ''
810
+ sys.stdout.write(line)
811
+
812
+ call('rm /notebooks/srv.txt', shell=True)
813
+ call('rm /notebooks/srvr.txt', shell=True)
814
+
815
+
816
+
817
+ os.chdir('/notebooks/sd/stable-diffusion-webui/modules')
818
+ call('wget -q -O paths.py https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/AUTOMATIC1111_files/paths.py', shell=True)
819
+ call("sed -i 's@/content/gdrive/MyDrive/sd/stablediffusion@/notebooks/sd/stablediffusion@' /notebooks/sd/stable-diffusion-webui/modules/paths.py", shell=True)
820
+ os.chdir('/notebooks/sd/stable-diffusion-webui')
821
+ clear_output()
822
+
823
+ configf="--disable-console-progressbars --no-half-vae --disable-safe-unpickle --api --xformers --medvram --skip-version-check --ckpt "+path_to_trained_model+" "+auth+" "+share
824
+
825
+ return configf
826
+
827
+
828
+
829
+ def clean():
830
+
831
+ Sessions=os.listdir("/notebooks/Fast-Dreambooth/Sessions")
832
+
833
+ s = widgets.Select(
834
+ options=Sessions,
835
+ rows=5,
836
+ description='',
837
+ disabled=False
838
+ )
839
+
840
+ out=widgets.Output()
841
+
842
+ d = widgets.Button(
843
+ description='Remove',
844
+ disabled=False,
845
+ button_style='warning',
846
+ tooltip='Removet the selected session',
847
+ icon='warning'
848
+ )
849
+
850
+ def rem(d):
851
+ with out:
852
+ if s.value is not None:
853
+ clear_output()
854
+ print("THE SESSION "+s.value+" HAS BEEN REMOVED FROM THE STORAGE")
855
+ call('rm -r /notebooks/Fast-Dreambooth/Sessions/'+s.value, shell=True)
856
+ if os.path.exists('/notebooks/models/'+s.value):
857
+ call('rm -r /notebooks/models/'+s.value, shell=True)
858
+ s.options=os.listdir("/notebooks/Fast-Dreambooth/Sessions")
859
+
860
+
861
+ else:
862
+ d.close()
863
+ s.close()
864
+ clear_output()
865
+ print("NOTHING TO REMOVE")
866
+
867
+ d.on_click(rem)
868
+ if s.value is not None:
869
+ display(s,d,out)
870
+ else:
871
+ print("NOTHING TO REMOVE")
872
+
873
+
874
+
875
+ def hfv2(Name_of_your_concept, Save_concept_to, hf_token_write, INSTANCE_NAME, OUTPUT_DIR, Session_Name, MDLPTH):
876
+
877
+ from slugify import slugify
878
+ from huggingface_hub import HfApi, HfFolder, CommitOperationAdd
879
+ from huggingface_hub import create_repo
880
+ from IPython.display import display_markdown
881
+
882
+ if(Name_of_your_concept == ""):
883
+ Name_of_your_concept = Session_Name
884
+ Name_of_your_concept=Name_of_your_concept.replace(" ","-")
885
+
886
+
887
+
888
+ if hf_token_write =="":
889
+ print('Your Hugging Face write access token : ')
890
+ hf_token_write=input()
891
+
892
+ hf_token = hf_token_write
893
+
894
+ api = HfApi()
895
+ your_username = api.whoami(token=hf_token)["name"]
896
+
897
+ if(Save_concept_to == "Public_Library"):
898
+ repo_id = f"sd-dreambooth-library/{slugify(Name_of_your_concept)}"
899
+ #Join the Concepts Library organization if you aren't part of it already
900
+ call("curl -X POST -H 'Authorization: Bearer '"+hf_token+" -H 'Content-Type: application/json' https://huggingface.co/organizations/sd-dreambooth-library/share/SSeOwppVCscfTEzFGQaqpfcjukVeNrKNHX", shell=True)
901
+ else:
902
+ repo_id = f"{your_username}/{slugify(Name_of_your_concept)}"
903
+ output_dir = f'/notebooks/models/'+INSTANCE_NAME
904
+
905
+ def bar(prg):
906
+ br="Uploading to HuggingFace : " '|'+'█' * prg + ' ' * (25-prg)+'| ' +str(prg*4)+ "%"
907
+ return br
908
+
909
+ print("Loading...")
910
+
911
+ os.chdir(OUTPUT_DIR)
912
+ call('rm -r feature_extractor .git', shell=True)
913
+ clear_output()
914
+ call('git init', shell=True)
915
+ call('git lfs install --system --skip-repo', shell=True)
916
+ call('git remote add -f origin "https://USER:'+hf_token+'@huggingface.co/stabilityai/stable-diffusion-2-1"', shell=True)
917
+ call('git config core.sparsecheckout true', shell=True)
918
+ call('echo -e "\nfeature_extractor" > .git/info/sparse-checkout', shell=True)
919
+ call('git pull origin main', shell=True)
920
+ call('rm -r .git', shell=True)
921
+ os.chdir('/notebooks')
922
+ clear_output()
923
+
924
+ print(bar(1))
925
+
926
+ readme_text = f'''---
927
+ license: creativeml-openrail-m
928
+ tags:
929
+ - text-to-image
930
+ - stable-diffusion
931
+ ---
932
+ ### {Name_of_your_concept} Dreambooth model trained by {api.whoami(token=hf_token)["name"]} with [TheLastBen's fast-DreamBooth](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast-DreamBooth.ipynb) notebook
933
+
934
+ Test the concept via A1111 Colab [fast-Colab-A1111](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast_stable_diffusion_AUTOMATIC1111.ipynb)
935
+ '''
936
+ #Save the readme to a file
937
+ readme_file = open("README.md", "w")
938
+ readme_file.write(readme_text)
939
+ readme_file.close()
940
+
941
+ operations = [
942
+ CommitOperationAdd(path_in_repo="README.md", path_or_fileobj="README.md"),
943
+ CommitOperationAdd(path_in_repo=f"{Session_Name}.ckpt",path_or_fileobj=MDLPTH)
944
+
945
+ ]
946
+ create_repo(repo_id,private=True, token=hf_token)
947
+
948
+ api.create_commit(
949
+ repo_id=repo_id,
950
+ operations=operations,
951
+ commit_message=f"Upload the concept {Name_of_your_concept} embeds and token",
952
+ token=hf_token
953
+ )
954
+
955
+ api.upload_folder(
956
+ folder_path=OUTPUT_DIR+"/feature_extractor",
957
+ path_in_repo="feature_extractor",
958
+ repo_id=repo_id,
959
+ token=hf_token
960
+ )
961
+
962
+ clear_output()
963
+ print(bar(8))
964
+
965
+ api.upload_folder(
966
+ folder_path=OUTPUT_DIR+"/scheduler",
967
+ path_in_repo="scheduler",
968
+ repo_id=repo_id,
969
+ token=hf_token
970
+ )
971
+
972
+ clear_output()
973
+ print(bar(9))
974
+
975
+ api.upload_folder(
976
+ folder_path=OUTPUT_DIR+"/text_encoder",
977
+ path_in_repo="text_encoder",
978
+ repo_id=repo_id,
979
+ token=hf_token
980
+ )
981
+
982
+ clear_output()
983
+ print(bar(12))
984
+
985
+ api.upload_folder(
986
+ folder_path=OUTPUT_DIR+"/tokenizer",
987
+ path_in_repo="tokenizer",
988
+ repo_id=repo_id,
989
+ token=hf_token
990
+ )
991
+
992
+ clear_output()
993
+ print(bar(13))
994
+
995
+ api.upload_folder(
996
+ folder_path=OUTPUT_DIR+"/unet",
997
+ path_in_repo="unet",
998
+ repo_id=repo_id,
999
+ token=hf_token
1000
+ )
1001
+
1002
+ clear_output()
1003
+ print(bar(21))
1004
+
1005
+ api.upload_folder(
1006
+ folder_path=OUTPUT_DIR+"/vae",
1007
+ path_in_repo="vae",
1008
+ repo_id=repo_id,
1009
+ token=hf_token
1010
+ )
1011
+
1012
+ clear_output()
1013
+ print(bar(23))
1014
+
1015
+ api.upload_file(
1016
+ path_or_fileobj=OUTPUT_DIR+"/model_index.json",
1017
+ path_in_repo="model_index.json",
1018
+ repo_id=repo_id,
1019
+ token=hf_token
1020
+ )
1021
+
1022
+ clear_output()
1023
+ print(bar(25))
1024
+
1025
+ print("Your concept was saved successfully at https://huggingface.co/"+repo_id)
1026
+ done()
1027
+
1028
+
1029
+
1030
+ def crop_image(im, size):
1031
+
1032
+ GREEN = "#0F0"
1033
+ BLUE = "#00F"
1034
+ RED = "#F00"
1035
+
1036
+ def focal_point(im, settings):
1037
+ corner_points = image_corner_points(im, settings) if settings.corner_points_weight > 0 else []
1038
+ entropy_points = image_entropy_points(im, settings) if settings.entropy_points_weight > 0 else []
1039
+ face_points = image_face_points(im, settings) if settings.face_points_weight > 0 else []
1040
+
1041
+ pois = []
1042
+
1043
+ weight_pref_total = 0
1044
+ if len(corner_points) > 0:
1045
+ weight_pref_total += settings.corner_points_weight
1046
+ if len(entropy_points) > 0:
1047
+ weight_pref_total += settings.entropy_points_weight
1048
+ if len(face_points) > 0:
1049
+ weight_pref_total += settings.face_points_weight
1050
+
1051
+ corner_centroid = None
1052
+ if len(corner_points) > 0:
1053
+ corner_centroid = centroid(corner_points)
1054
+ corner_centroid.weight = settings.corner_points_weight / weight_pref_total
1055
+ pois.append(corner_centroid)
1056
+
1057
+ entropy_centroid = None
1058
+ if len(entropy_points) > 0:
1059
+ entropy_centroid = centroid(entropy_points)
1060
+ entropy_centroid.weight = settings.entropy_points_weight / weight_pref_total
1061
+ pois.append(entropy_centroid)
1062
+
1063
+ face_centroid = None
1064
+ if len(face_points) > 0:
1065
+ face_centroid = centroid(face_points)
1066
+ face_centroid.weight = settings.face_points_weight / weight_pref_total
1067
+ pois.append(face_centroid)
1068
+
1069
+ average_point = poi_average(pois, settings)
1070
+
1071
+ return average_point
1072
+
1073
+
1074
+ def image_face_points(im, settings):
1075
+
1076
+ np_im = np.array(im)
1077
+ gray = cv2.cvtColor(np_im, cv2.COLOR_BGR2GRAY)
1078
+
1079
+ tries = [
1080
+ [ f'{cv2.data.haarcascades}haarcascade_eye.xml', 0.01 ],
1081
+ [ f'{cv2.data.haarcascades}haarcascade_frontalface_default.xml', 0.05 ],
1082
+ [ f'{cv2.data.haarcascades}haarcascade_profileface.xml', 0.05 ],
1083
+ [ f'{cv2.data.haarcascades}haarcascade_frontalface_alt.xml', 0.05 ],
1084
+ [ f'{cv2.data.haarcascades}haarcascade_frontalface_alt2.xml', 0.05 ],
1085
+ [ f'{cv2.data.haarcascades}haarcascade_frontalface_alt_tree.xml', 0.05 ],
1086
+ [ f'{cv2.data.haarcascades}haarcascade_eye_tree_eyeglasses.xml', 0.05 ],
1087
+ [ f'{cv2.data.haarcascades}haarcascade_upperbody.xml', 0.05 ]
1088
+ ]
1089
+ for t in tries:
1090
+ classifier = cv2.CascadeClassifier(t[0])
1091
+ minsize = int(min(im.width, im.height) * t[1]) # at least N percent of the smallest side
1092
+ try:
1093
+ faces = classifier.detectMultiScale(gray, scaleFactor=1.1,
1094
+ minNeighbors=7, minSize=(minsize, minsize), flags=cv2.CASCADE_SCALE_IMAGE)
1095
+ except:
1096
+ continue
1097
+
1098
+ if len(faces) > 0:
1099
+ rects = [[f[0], f[1], f[0] + f[2], f[1] + f[3]] for f in faces]
1100
+ return [PointOfInterest((r[0] +r[2]) // 2, (r[1] + r[3]) // 2, size=abs(r[0]-r[2]), weight=1/len(rects)) for r in rects]
1101
+ return []
1102
+
1103
+
1104
+ def image_corner_points(im, settings):
1105
+ grayscale = im.convert("L")
1106
+
1107
+
1108
+ gd = ImageDraw.Draw(grayscale)
1109
+ gd.rectangle([0, im.height*.9, im.width, im.height], fill="#999")
1110
+
1111
+ np_im = np.array(grayscale)
1112
+
1113
+ points = cv2.goodFeaturesToTrack(
1114
+ np_im,
1115
+ maxCorners=100,
1116
+ qualityLevel=0.04,
1117
+ minDistance=min(grayscale.width, grayscale.height)*0.06,
1118
+ useHarrisDetector=False,
1119
+ )
1120
+
1121
+ if points is None:
1122
+ return []
1123
+
1124
+ focal_points = []
1125
+ for point in points:
1126
+ x, y = point.ravel()
1127
+ focal_points.append(PointOfInterest(x, y, size=4, weight=1/len(points)))
1128
+
1129
+ return focal_points
1130
+
1131
+
1132
+ def image_entropy_points(im, settings):
1133
+ landscape = im.height < im.width
1134
+ portrait = im.height > im.width
1135
+ if landscape:
1136
+ move_idx = [0, 2]
1137
+ move_max = im.size[0]
1138
+ elif portrait:
1139
+ move_idx = [1, 3]
1140
+ move_max = im.size[1]
1141
+ else:
1142
+ return []
1143
+
1144
+ e_max = 0
1145
+ crop_current = [0, 0, settings.crop_width, settings.crop_height]
1146
+ crop_best = crop_current
1147
+ while crop_current[move_idx[1]] < move_max:
1148
+ crop = im.crop(tuple(crop_current))
1149
+ e = image_entropy(crop)
1150
+
1151
+ if (e > e_max):
1152
+ e_max = e
1153
+ crop_best = list(crop_current)
1154
+
1155
+ crop_current[move_idx[0]] += 4
1156
+ crop_current[move_idx[1]] += 4
1157
+
1158
+ x_mid = int(crop_best[0] + settings.crop_width/2)
1159
+ y_mid = int(crop_best[1] + settings.crop_height/2)
1160
+
1161
+ return [PointOfInterest(x_mid, y_mid, size=25, weight=1.0)]
1162
+
1163
+
1164
+ def image_entropy(im):
1165
+ # greyscale image entropy
1166
+ # band = np.asarray(im.convert("L"))
1167
+ band = np.asarray(im.convert("1"), dtype=np.uint8)
1168
+ hist, _ = np.histogram(band, bins=range(0, 256))
1169
+ hist = hist[hist > 0]
1170
+ return -np.log2(hist / hist.sum()).sum()
1171
+
1172
+ def centroid(pois):
1173
+ x = [poi.x for poi in pois]
1174
+ y = [poi.y for poi in pois]
1175
+ return PointOfInterest(sum(x)/len(pois), sum(y)/len(pois))
1176
+
1177
+
1178
+ def poi_average(pois, settings):
1179
+ weight = 0.0
1180
+ x = 0.0
1181
+ y = 0.0
1182
+ for poi in pois:
1183
+ weight += poi.weight
1184
+ x += poi.x * poi.weight
1185
+ y += poi.y * poi.weight
1186
+ avg_x = round(weight and x / weight)
1187
+ avg_y = round(weight and y / weight)
1188
+
1189
+ return PointOfInterest(avg_x, avg_y)
1190
+
1191
+
1192
+ def is_landscape(w, h):
1193
+ return w > h
1194
+
1195
+
1196
+ def is_portrait(w, h):
1197
+ return h > w
1198
+
1199
+
1200
+ def is_square(w, h):
1201
+ return w == h
1202
+
1203
+
1204
+ class PointOfInterest:
1205
+ def __init__(self, x, y, weight=1.0, size=10):
1206
+ self.x = x
1207
+ self.y = y
1208
+ self.weight = weight
1209
+ self.size = size
1210
+
1211
+ def bounding(self, size):
1212
+ return [
1213
+ self.x - size//2,
1214
+ self.y - size//2,
1215
+ self.x + size//2,
1216
+ self.y + size//2
1217
+ ]
1218
+
1219
+ class Settings:
1220
+ def __init__(self, crop_width=512, crop_height=512, corner_points_weight=0.5, entropy_points_weight=0.5, face_points_weight=0.5):
1221
+ self.crop_width = crop_width
1222
+ self.crop_height = crop_height
1223
+ self.corner_points_weight = corner_points_weight
1224
+ self.entropy_points_weight = entropy_points_weight
1225
+ self.face_points_weight = face_points_weight
1226
+
1227
+ settings = Settings(
1228
+ crop_width = size,
1229
+ crop_height = size,
1230
+ face_points_weight = 0.9,
1231
+ entropy_points_weight = 0.15,
1232
+ corner_points_weight = 0.5,
1233
+ )
1234
+
1235
+ scale_by = 1
1236
+ if is_landscape(im.width, im.height):
1237
+ scale_by = settings.crop_height / im.height
1238
+ elif is_portrait(im.width, im.height):
1239
+ scale_by = settings.crop_width / im.width
1240
+ elif is_square(im.width, im.height):
1241
+ if is_square(settings.crop_width, settings.crop_height):
1242
+ scale_by = settings.crop_width / im.width
1243
+ elif is_landscape(settings.crop_width, settings.crop_height):
1244
+ scale_by = settings.crop_width / im.width
1245
+ elif is_portrait(settings.crop_width, settings.crop_height):
1246
+ scale_by = settings.crop_height / im.height
1247
+
1248
+ im = im.resize((int(im.width * scale_by), int(im.height * scale_by)))
1249
+ im_debug = im.copy()
1250
+
1251
+ focus = focal_point(im_debug, settings)
1252
+
1253
+ # take the focal point and turn it into crop coordinates that try to center over the focal
1254
+ # point but then get adjusted back into the frame
1255
+ y_half = int(settings.crop_height / 2)
1256
+ x_half = int(settings.crop_width / 2)
1257
+
1258
+ x1 = focus.x - x_half
1259
+ if x1 < 0:
1260
+ x1 = 0
1261
+ elif x1 + settings.crop_width > im.width:
1262
+ x1 = im.width - settings.crop_width
1263
+
1264
+ y1 = focus.y - y_half
1265
+ if y1 < 0:
1266
+ y1 = 0
1267
+ elif y1 + settings.crop_height > im.height:
1268
+ y1 = im.height - settings.crop_height
1269
+
1270
+ x2 = x1 + settings.crop_width
1271
+ y2 = y1 + settings.crop_height
1272
+
1273
+ crop = [x1, y1, x2, y2]
1274
+
1275
+ results = []
1276
+
1277
+ results.append(im.crop(tuple(crop)))
1278
+
1279
+ return results