TheLastBen commited on
Commit
152e69f
1 Parent(s): d74b49a

Create mainpaperspacev2.py

Browse files
Files changed (1) hide show
  1. mainpaperspacev2.py +1255 -0
mainpaperspacev2.py ADDED
@@ -0,0 +1,1255 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from IPython.display import clear_output
2
+ from subprocess import call, getoutput
3
+ from IPython.display import display
4
+ import ipywidgets as widgets
5
+ import io
6
+ from PIL import Image, ImageDraw
7
+ import fileinput
8
+ import time
9
+ import os
10
+ from os import listdir
11
+ from os.path import isfile
12
+ from tqdm import tqdm
13
+ import random
14
+ import sys
15
+ import cv2
16
+ from io import BytesIO
17
+ import requests
18
+ from collections import defaultdict
19
+ from math import log, sqrt
20
+ import numpy as np
21
+
22
+
23
+
24
+ def Deps(force_reinstall):
25
+
26
+ if not force_reinstall and os.path.exists('/usr/local/lib/python3.9/dist-packages/safetensors'):
27
+ print('Dependencies already installed')
28
+ else:
29
+ print('Installing the dependencies...')
30
+ call("pip install --root-user-action=ignore --no-deps -q accelerate==0.12.0", shell=True, stdout=open('/dev/null', 'w'))
31
+ if not os.path.exists('/usr/local/lib/python3.9/dist-packages/safetensors'):
32
+ os.chdir('/usr/local/lib/python3.9/dist-packages')
33
+ call("rm -r torch torch-1.12.0+cu116.dist-info torchaudio* torchvision* PIL Pillow* transformers* numpy* gdown*", shell=True, stdout=open('/dev/null', 'w'))
34
+ os.chdir('/notebooks')
35
+ if not os.path.exists('/models'):
36
+ call('mkdir /models', shell=True)
37
+ if not os.path.exists('/notebooks/models'):
38
+ call('ln -s /models /notebooks', shell=True)
39
+ if os.path.exists('/deps'):
40
+ call("rm -r /deps", shell=True)
41
+ call('mkdir /deps', shell=True)
42
+ if not os.path.exists('cache'):
43
+ call('mkdir cache', shell=True)
44
+ os.chdir('/deps')
45
+ call('wget -q -i https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dependencies/aptdeps.txt', shell=True)
46
+ call('dpkg -i *.deb', shell=True, stdout=open('/dev/null', 'w'))
47
+ call('wget -q https://huggingface.co/TheLastBen/dependencies/resolve/main/pps.tar.zst', shell=True, stdout=open('/dev/null', 'w'))
48
+ call('tar -C / --zstd -xf pps.tar.zst', shell=True, stdout=open('/dev/null', 'w'))
49
+ call("sed -i 's@~/.cache@/notebooks/cache@' /usr/local/lib/python3.9/dist-packages/transformers/utils/hub.py", shell=True)
50
+ os.chdir('/notebooks')
51
+ call("git clone --depth 1 -q --branch updt https://github.com/TheLastBen/diffusers /diffusers", shell=True, stdout=open('/dev/null', 'w'))
52
+ if not os.path.exists('/notebooks/diffusers'):
53
+ call('ln -s /diffusers /notebooks', shell=True)
54
+ call("rm -r /deps", shell=True)
55
+ os.chdir('/notebooks')
56
+ clear_output()
57
+
58
+ done()
59
+
60
+
61
+
62
+ def downloadmodel_hfv2(Path_to_HuggingFace):
63
+ import wget
64
+ os.chdir('/models')
65
+ if os.path.exists('stable-diffusion-custom'):
66
+ call("rm -r stable-diffusion-custom", shell=True)
67
+
68
+ if os.path.exists('/content/gdrive/MyDrive/Fast-Dreambooth/token.txt'):
69
+ with open("/content/gdrive/MyDrive/Fast-Dreambooth/token.txt") as f:
70
+ token = f.read()
71
+ authe=f'https://USER:{token}@'
72
+ else:
73
+ authe="https://"
74
+
75
+ call("mkdir stable-diffusion-custom", shell=True)
76
+ os.chdir("stable-diffusion-custom")
77
+ call("git init", shell=True)
78
+ call("git lfs install --system --skip-repo", shell=True)
79
+ call('git remote add -f origin '+authe+'huggingface.co/'+Path_to_HuggingFace, shell=True)
80
+ call("git config core.sparsecheckout true", shell=True)
81
+ call('echo -e "\nscheduler\ntext_encoder\ntokenizer\nunet\nvae\nfeature_extractor\nmodel_index.json\n!*.safetensors" > .git/info/sparse-checkout', shell=True)
82
+ call("git pull origin main", shell=True)
83
+ if os.path.exists('stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
84
+ call("rm -r stable-diffusion-custom/.git", shell=True)
85
+ clear_output()
86
+ done()
87
+ else:
88
+ while not os.path.exists('stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
89
+ print('Check the link you provided')
90
+ time.sleep(5)
91
+ os.chdir('/notebooks')
92
+
93
+
94
+
95
+ def downloadmodel_pthv2(CKPT_Path, Custom_Model_Version):
96
+ import wget
97
+ os.chdir('/models')
98
+ clear_output()
99
+ if os.path.exists(str(CKPT_Path)):
100
+ if Custom_Model_Version=='512':
101
+ wget.download('https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dreambooth/convertodiffv2.py')
102
+ clear_output()
103
+ call('python convertodiff.py '+CKPT_Path+' stable-diffusion-custom --v2 --reference_model stabilityai/stable-diffusion-2-1-base', shell=True)
104
+ elif Custom_Model_Version=='768':
105
+ wget.download('https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dreambooth/convertodiffv2.py')
106
+ clear_output()
107
+ call('python convertodiff.py '+CKPT_Path+' stable-diffusion-custom --v2 --reference_model stabilityai/stable-diffusion-2-1', shell=True)
108
+ call('rm convertodiff.py', shell=True)
109
+ if os.path.exists('stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
110
+ clear_output()
111
+ done()
112
+ else:
113
+ call('rm -r stable-diffusion-custom', shell=True)
114
+ while not os.path.exists('stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
115
+ print('Conversion error')
116
+ time.sleep(5)
117
+ else:
118
+ while not os.path.exists(str(CKPT_Path)):
119
+ print('Wrong path, use the colab file explorer to copy the path')
120
+ time.sleep(5)
121
+ os.chdir('/notebooks')
122
+
123
+
124
+
125
+ def downloadmodel_lnkv2(CKPT_Link, Custom_Model_Version):
126
+ import wget
127
+ os.chdir('/models')
128
+ call("gdown --fuzzy " +CKPT_Link+ " -O model.ckpt", shell=True)
129
+ gdown.download(url=CKPT_Link, output="model.ckpt", quiet=False, fuzzy=True)
130
+
131
+ if os.path.exists('model.ckpt'):
132
+ if os.path.getsize("model.ckpt") > 1810671599:
133
+ if Custom_Model_Version=='512':
134
+ wget.download('https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dreambooth/convertodiffv2.py')
135
+ clear_output()
136
+ call('python convertodiff.py '+CKPT_Path+' stable-diffusion-custom --v2 --reference_model stabilityai/stable-diffusion-2-1-base', shell=True)
137
+ elif Custom_Model_Version=='768':
138
+ wget.download('https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dreambooth/convertodiffv2.py')
139
+ clear_output()
140
+ call('python convertodiff.py '+CKPT_Path+' stable-diffusion-custom --v2 --reference_model stabilityai/stable-diffusion-2-1', shell=True)
141
+ call('rm convertodiff.py', shell=True)
142
+ call('rm model.ckpt', shell=True)
143
+ if os.path.exists('stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
144
+ clear_output()
145
+ done()
146
+ else:
147
+ call('rm -r stable-diffusion-custom', shell=True)
148
+ while not os.path.exists('stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
149
+ print('Conversion error')
150
+ time.sleep(5)
151
+ else:
152
+ while os.path.getsize('model.ckpt') < 1810671599:
153
+ print('Wrong link, check that the link is valid')
154
+ time.sleep(5)
155
+ os.chdir('/notebooks')
156
+
157
+
158
+ def dlv2(Path_to_HuggingFace, CKPT_Path, CKPT_Link, Model_Version, Custom_Model_Version):
159
+
160
+ if Path_to_HuggingFace != "":
161
+ downloadmodel_hfv2(Path_to_HuggingFace)
162
+ MODEL_NAMEv2="/models/stable-diffusion-custom"
163
+ elif CKPT_Path !="":
164
+ downloadmodel_pthv2(CKPT_Path, Custom_Model_Version)
165
+ MODEL_NAMEv2="/models/stable-diffusion-custom"
166
+ elif CKPT_Link !="":
167
+ downloadmodel_lnkv2(CKPT_Link, Custom_Model_Version)
168
+ MODEL_NAMEv2="/models/stable-diffusion-custom"
169
+ else:
170
+ if Model_Version=="512":
171
+ MODEL_NAMEv2="dataset"
172
+ print('Using the original V2-512 model')
173
+ elif Model_Version=="768":
174
+ MODEL_NAMEv2="dataset"
175
+ print('Using the original V2-768 model')
176
+ else:
177
+ MODEL_NAMEv2=""
178
+ print('Wrong model version')
179
+
180
+ return MODEL_NAMEv2
181
+
182
+
183
+ def sessv2(Session_Name, Session_Link_optional, Model_Version, MODEL_NAMEv2):
184
+
185
+ os.chdir('/notebooks')
186
+ PT=""
187
+
188
+ while Session_Name=="":
189
+ print('Input the Session Name:')
190
+ Session_Name=input("")
191
+ Session_Name=Session_Name.replace(" ","_")
192
+
193
+ WORKSPACE='/notebooks/Fast-Dreambooth'
194
+
195
+ if Session_Link_optional !="":
196
+ print('Downloading session...')
197
+
198
+ if Session_Link_optional != "":
199
+ if not os.path.exists(str(WORKSPACE+'/Sessions')):
200
+ call("mkdir -p " +WORKSPACE+ "/Sessions", shell=True)
201
+ time.sleep(1)
202
+ os.chdir(WORKSPACE+'/Sessions')
203
+ gdown.download_folder(url=Session_Link_optional, output=Session_Name, quiet=True, remaining_ok=True, use_cookies=False)
204
+ os.chdir(Session_Name)
205
+ call("rm -r " +instance_images, shell=True)
206
+ call("unzip " +instance_images.zip, shell=True, stdout=open('/dev/null', 'w'))
207
+ call("rm -r " +concept_images, shell=True)
208
+ call("unzip " +concept_images.zip, shell=True, stdout=open('/dev/null', 'w'))
209
+ call("rm -r " +captions, shell=True)
210
+ call("unzip " +captions.zip, shell=True, stdout=open('/dev/null', 'w'))
211
+ os.chdir('/notebooks')
212
+ clear_output()
213
+
214
+ INSTANCE_NAME=Session_Name
215
+ OUTPUT_DIR="/models/"+Session_Name
216
+ SESSION_DIR=WORKSPACE+"/Sessions/"+Session_Name
217
+ CONCEPT_DIR=SESSION_DIR+"/concept_images"
218
+ INSTANCE_DIR=SESSION_DIR+"/instance_images"
219
+ CAPTIONS_DIR=SESSION_DIR+'/captions'
220
+ MDLPTH=str(SESSION_DIR+"/"+Session_Name+'.ckpt')
221
+ resumev2=False
222
+
223
+ if os.path.exists(str(SESSION_DIR)):
224
+ mdls=[ckpt for ckpt in listdir(SESSION_DIR) if ckpt.split(".")[-1]=="ckpt"]
225
+ if not os.path.exists(MDLPTH) and '.ckpt' in str(mdls):
226
+
227
+ def f(n):
228
+ k=0
229
+ for i in mdls:
230
+ if k==n:
231
+ call('mv '+SESSION_DIR+'/'+i+' '+MDLPTH, shell=True)
232
+ k=k+1
233
+
234
+ k=0
235
+ print('No final checkpoint model found, select which intermediary checkpoint to use, enter only the number, (000 to skip):\n')
236
+
237
+ for i in mdls:
238
+ print(str(k)+'- '+i)
239
+ k=k+1
240
+ n=input()
241
+ while int(n)>k-1:
242
+ n=input()
243
+ if n!="000":
244
+ f(int(n))
245
+ print('Using the model '+ mdls[int(n)]+" ...")
246
+ time.sleep(8)
247
+ else:
248
+ print('Skipping the intermediary checkpoints.')
249
+
250
+
251
+ if os.path.exists(str(SESSION_DIR)) and not os.path.exists(MDLPTH):
252
+ print('Loading session with no previous model, using the original model or the custom downloaded model')
253
+ if MODEL_NAMEv2=="":
254
+ print('No model found, use the "Model Download" cell to download a model.')
255
+ else:
256
+ print('Session Loaded, proceed to uploading instance images')
257
+
258
+ elif os.path.exists(MDLPTH):
259
+ print('Session found, loading the trained model ...')
260
+ if Model_Version=='512':
261
+ call("wget -q -O convertodiff.py https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dreambooth/convertodiffv2.py", shell=True)
262
+ clear_output()
263
+ print('Session found, loading the trained model ...')
264
+ call('python /notebooks/convertodiff.py '+MDLPTH+' '+OUTPUT_DIR+' --v2 --reference_model stabilityai/stable-diffusion-2-1-base', shell=True)
265
+
266
+ elif Model_Version=='768':
267
+ call('wget -q -O convertodiff.py https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dreambooth/convertodiffv2.py', shell=True)
268
+ clear_output()
269
+ print('Session found, loading the trained model ...')
270
+ call('python /notebooks/convertodiff.py '+MDLPTH+' '+OUTPUT_DIR+' --v2 --reference_model stabilityai/stable-diffusion-2-1', shell=True)
271
+
272
+ call('rm /notebooks/convertodiff.py', shell=True)
273
+
274
+ if os.path.exists(OUTPUT_DIR+'/unet/diffusion_pytorch_model.bin'):
275
+ resumev2=True
276
+ clear_output()
277
+ print('Session loaded.')
278
+ else:
279
+ if not os.path.exists(OUTPUT_DIR+'/unet/diffusion_pytorch_model.bin'):
280
+ print('Conversion error, if the error persists, remove the CKPT file from the current session folder')
281
+
282
+ elif not os.path.exists(str(SESSION_DIR)):
283
+ call('mkdir -p '+INSTANCE_DIR, shell=True)
284
+ print('Creating session...')
285
+ if MODEL_NAMEv2=="":
286
+ print('No model found, use the "Model Download" cell to download a model.')
287
+ else:
288
+ print('Session created, proceed to uploading instance images')
289
+
290
+ return PT, WORKSPACE, Session_Name, INSTANCE_NAME, OUTPUT_DIR, SESSION_DIR, CONCEPT_DIR, INSTANCE_DIR, CAPTIONS_DIR, MDLPTH, MODEL_NAMEv2, resumev2
291
+
292
+
293
+
294
+ def done():
295
+ done = widgets.Button(
296
+ description='Done!',
297
+ disabled=True,
298
+ button_style='success',
299
+ tooltip='',
300
+ icon='check'
301
+ )
302
+ display(done)
303
+
304
+
305
+
306
+
307
+ def uplder(Remove_existing_instance_images, Crop_images, Crop_size, IMAGES_FOLDER_OPTIONAL, INSTANCE_DIR, CAPTIONS_DIR, ren):
308
+
309
+ uploader = widgets.FileUpload(description="Choose images",accept='image/*', multiple=True)
310
+ Upload = widgets.Button(
311
+ description='Upload',
312
+ disabled=False,
313
+ button_style='info',
314
+ tooltip='Click to upload the chosen instance images',
315
+ icon=''
316
+ )
317
+
318
+
319
+ def up(Upload):
320
+ with out:
321
+ uploader.close()
322
+ Upload.close()
323
+ upld(Remove_existing_instance_images, Crop_images, Crop_size, IMAGES_FOLDER_OPTIONAL, INSTANCE_DIR, CAPTIONS_DIR, uploader, ren)
324
+ done()
325
+ out=widgets.Output()
326
+
327
+ if IMAGES_FOLDER_OPTIONAL=="":
328
+ Upload.on_click(up)
329
+ display(uploader, Upload, out)
330
+ else:
331
+ upld(Remove_existing_instance_images, Crop_images, Crop_size, IMAGES_FOLDER_OPTIONAL, INSTANCE_DIR, CAPTIONS_DIR, uploader, ren)
332
+ done()
333
+
334
+
335
+
336
+
337
+ def upld(Remove_existing_instance_images, Crop_images, Crop_size, IMAGES_FOLDER_OPTIONAL, INSTANCE_DIR, CAPTIONS_DIR, uploader, ren):
338
+
339
+
340
+ if os.path.exists(CAPTIONS_DIR+"off"):
341
+ call('mv '+CAPTIONS_DIR+"off"+' '+CAPTIONS_DIR, shell=True)
342
+ time.sleep(2)
343
+
344
+ if Remove_existing_instance_images:
345
+ if os.path.exists(str(INSTANCE_DIR)):
346
+ call("rm -r " +INSTANCE_DIR, shell=True)
347
+ if os.path.exists(str(CAPTIONS_DIR)):
348
+ call("rm -r " +CAPTIONS_DIR, shell=True)
349
+
350
+
351
+ if not os.path.exists(str(INSTANCE_DIR)):
352
+ call("mkdir -p " +INSTANCE_DIR, shell=True)
353
+ if not os.path.exists(str(CAPTIONS_DIR)):
354
+ call("mkdir -p " +CAPTIONS_DIR, shell=True)
355
+
356
+
357
+ if IMAGES_FOLDER_OPTIONAL !="":
358
+ if any(file.endswith('.{}'.format('txt')) for file in os.listdir(IMAGES_FOLDER_OPTIONAL)):
359
+ call('mv '+IMAGES_FOLDER_OPTIONAL+'/*.txt '+CAPTIONS_DIR, shell=True)
360
+ if Crop_images:
361
+ os.chdir(str(IMAGES_FOLDER_OPTIONAL))
362
+ call('find . -name "* *" -type f | rename ' "'s/ /-/g'", shell=True)
363
+ os.chdir('/notebooks')
364
+ for filename in tqdm(os.listdir(IMAGES_FOLDER_OPTIONAL), bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):
365
+ extension = filename.split(".")[-1]
366
+ identifier=filename.split(".")[0]
367
+ new_path_with_file = os.path.join(INSTANCE_DIR, filename)
368
+ file = Image.open(IMAGES_FOLDER_OPTIONAL+"/"+filename)
369
+ width, height = file.size
370
+ image = file
371
+ if file.size !=(Crop_size, Crop_size):
372
+ image=crop_image(file, Crop_size)
373
+ if (extension.upper() == "JPG" or "jpg"):
374
+ image[0].save(new_path_with_file, format="JPEG", quality = 100)
375
+ else:
376
+ image[0].save(new_path_with_file, format=extension.upper())
377
+
378
+ else:
379
+ call("cp \'"+IMAGES_FOLDER_OPTIONAL+"/"+filename+"\' "+INSTANCE_DIR, shell=True)
380
+
381
+ else:
382
+ for filename in tqdm(os.listdir(IMAGES_FOLDER_OPTIONAL), bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):
383
+ call("cp -r " +IMAGES_FOLDER_OPTIONAL+"/. " +INSTANCE_DIR, shell=True)
384
+
385
+
386
+
387
+ elif IMAGES_FOLDER_OPTIONAL =="":
388
+ up=""
389
+ for filename, file in uploader.value.items():
390
+ if filename.split(".")[-1]=="txt":
391
+ with open(CAPTIONS_DIR+'/'+filename, 'w') as f:
392
+ f.write(file['content'].decode())
393
+ up=[(filename, file) for filename, file in uploader.value.items() if filename.split(".")[-1]!="txt"]
394
+ if Crop_images:
395
+ for filename, file_info in tqdm(up, bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):
396
+ img = Image.open(io.BytesIO(file_info['content']))
397
+ extension = filename.split(".")[-1]
398
+ identifier=filename.split(".")[0]
399
+
400
+ if (extension.upper() == "JPG" or "jpg"):
401
+ img.save(INSTANCE_DIR+"/"+filename, format="JPEG", quality = 100)
402
+ else:
403
+ img.save(INSTANCE_DIR+"/"+filename, format=extension.upper())
404
+
405
+ new_path_with_file = os.path.join(INSTANCE_DIR, filename)
406
+ file = Image.open(new_path_with_file)
407
+ width, height = file.size
408
+ image = img
409
+ if file.size !=(Crop_size, Crop_size):
410
+ image=crop_image(file, Crop_size)
411
+ if (extension.upper() == "JPG" or "jpg"):
412
+ image[0].save(new_path_with_file, format="JPEG", quality = 100)
413
+ else:
414
+ image[0].save(new_path_with_file, format=extension.upper())
415
+
416
+ else:
417
+ for filename, file_info in tqdm(uploader.value.items(), bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):
418
+ img = Image.open(io.BytesIO(file_info['content']))
419
+
420
+ extension = filename.split(".")[-1]
421
+ identifier=filename.split(".")[0]
422
+
423
+ if (extension.upper() == "JPG" or "jpg"):
424
+ img.save(INSTANCE_DIR+"/"+filename, format="JPEG", quality = 100)
425
+ else:
426
+ img.save(INSTANCE_DIR+"/"+filename, format=extension.upper())
427
+
428
+
429
+ if ren:
430
+ i=0
431
+ for filename in tqdm(os.listdir(INSTANCE_DIR), bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Renamed'):
432
+ extension = filename.split(".")[-1]
433
+ identifier=filename.split(".")[0]
434
+ new_path_with_file = os.path.join(INSTANCE_DIR, "conceptimagedb"+str(i)+"."+extension)
435
+ call('mv "'+os.path.join(INSTANCE_DIR,filename)+'" "'+new_path_with_file+'"', shell=True)
436
+ i=i+1
437
+
438
+ os.chdir(INSTANCE_DIR)
439
+ call('find . -name "* *" -type f | rename ' "'s/ /-/g'", shell=True)
440
+ os.chdir(CAPTIONS_DIR)
441
+ call('find . -name "* *" -type f | rename ' "'s/ /-/g'", shell=True)
442
+ os.chdir('/notebooks')
443
+
444
+
445
+ def caption(CAPTIONS_DIR, INSTANCE_DIR):
446
+
447
+ if os.path.exists(CAPTIONS_DIR+"off"):
448
+ call('mv '+CAPTIONS_DIR+"off"+' '+CAPTIONS_DIR, shell=True)
449
+ time.sleep(2)
450
+
451
+ paths=""
452
+ out=""
453
+ widgets_l=""
454
+ clear_output()
455
+ def Caption(path):
456
+ if path!="Select an instance image to caption":
457
+
458
+ name = os.path.splitext(os.path.basename(path))[0]
459
+ ext=os.path.splitext(os.path.basename(path))[-1][1:]
460
+ if ext=="jpg" or "JPG":
461
+ ext="JPEG"
462
+
463
+ if os.path.exists(CAPTIONS_DIR+"/"+name + '.txt'):
464
+ with open(CAPTIONS_DIR+"/"+name + '.txt', 'r') as f:
465
+ text = f.read()
466
+ else:
467
+ with open(CAPTIONS_DIR+"/"+name + '.txt', 'w') as f:
468
+ f.write("")
469
+ with open(CAPTIONS_DIR+"/"+name + '.txt', 'r') as f:
470
+ text = f.read()
471
+
472
+ img=Image.open(os.path.join(INSTANCE_DIR,path))
473
+ img=img.resize((420, 420))
474
+ image_bytes = BytesIO()
475
+ img.save(image_bytes, format=ext, qualiy=10)
476
+ image_bytes.seek(0)
477
+ image_data = image_bytes.read()
478
+ img= image_data
479
+ image = widgets.Image(
480
+ value=img,
481
+ width=420,
482
+ height=420
483
+ )
484
+ text_area = widgets.Textarea(value=text, description='', disabled=False, layout={'width': '300px', 'height': '120px'})
485
+
486
+
487
+ def update_text(text):
488
+ with open(CAPTIONS_DIR+"/"+name + '.txt', 'w') as f:
489
+ f.write(text)
490
+
491
+ button = widgets.Button(description='Save', button_style='success')
492
+ button.on_click(lambda b: update_text(text_area.value))
493
+
494
+ return widgets.VBox([widgets.HBox([image, text_area, button])])
495
+
496
+
497
+ paths = os.listdir(INSTANCE_DIR)
498
+ widgets_l = widgets.Select(options=["Select an instance image to caption"]+paths, rows=25)
499
+
500
+
501
+ out = widgets.Output()
502
+
503
+ def click(change):
504
+ with out:
505
+ out.clear_output()
506
+ display(Caption(change.new))
507
+
508
+ widgets_l.observe(click, names='value')
509
+ display(widgets.HBox([widgets_l, out]))
510
+
511
+
512
+
513
+
514
+ def dbtrainv2(Resume_Training, UNet_Training_Steps, UNet_Learning_Rate, Text_Encoder_Training_Steps, Text_Encoder_Concept_Training_Steps, Text_Encoder_Learning_Rate, Style_Training, Resolution, MODEL_NAMEv2, SESSION_DIR, INSTANCE_DIR, CONCEPT_DIR, CAPTIONS_DIR, External_Captions, INSTANCE_NAME, Session_Name, OUTPUT_DIR, PT, resumev2, Save_Checkpoint_Every_n_Steps, Start_saving_from_the_step, Save_Checkpoint_Every):
515
+
516
+ if resumev2 and not Resume_Training:
517
+ print('Overwrite your previously trained model ?, answering "yes" will train a new model, answering "no" will resumev2 the training of the previous model?  yes or no ?')
518
+ while True:
519
+ ansres=input('')
520
+ if ansres=='no':
521
+ Resume_Training = True
522
+ break
523
+ elif ansres=='yes':
524
+ Resume_Training = False
525
+ resumev2= False
526
+ break
527
+
528
+ while not Resume_Training and not os.path.exists(MODEL_NAMEv2+'/unet/diffusion_pytorch_model.bin'):
529
+ print('No model found, use the "Model Download" cell to download a model.')
530
+ time.sleep(5)
531
+
532
+ if os.path.exists(CAPTIONS_DIR+"off"):
533
+ call('mv '+CAPTIONS_DIR+"off"+' '+CAPTIONS_DIR, shell=True)
534
+ time.sleep(2)
535
+
536
+ MODELT_NAME=MODEL_NAMEv2
537
+
538
+ Seed=random.randint(1, 999999)
539
+
540
+ Style=""
541
+ if Style_Training:
542
+ Style="--Style"
543
+
544
+ extrnlcptn=""
545
+ if External_Captions:
546
+ extrnlcptn="--external_captions"
547
+
548
+ precision="fp16"
549
+
550
+ GCUNET="--gradient_checkpointing"
551
+ if Resolution<=640:
552
+ GCUNET=""
553
+
554
+ resuming=""
555
+ if Resume_Training and os.path.exists(OUTPUT_DIR+'/unet/diffusion_pytorch_model.bin'):
556
+ MODELT_NAME=OUTPUT_DIR
557
+ print('Resuming Training...')
558
+ resuming="Yes"
559
+ elif Resume_Training and not os.path.exists(OUTPUT_DIR+'/unet/diffusion_pytorch_model.bin'):
560
+ print('Previous model not found, training a new model...')
561
+ MODELT_NAME=MODEL_NAMEv2
562
+ while MODEL_NAMEv2=="":
563
+ print('No model found, use the "Model Download" cell to download a model.')
564
+ time.sleep(5)
565
+
566
+
567
+ trnonltxt=""
568
+ if UNet_Training_Steps==0:
569
+ trnonltxt="--train_only_text_encoder"
570
+
571
+ Enable_text_encoder_training= True
572
+ Enable_Text_Encoder_Concept_Training= True
573
+
574
+
575
+ if Text_Encoder_Training_Steps==0 or External_Captions:
576
+ Enable_text_encoder_training= False
577
+ else:
578
+ stptxt=Text_Encoder_Training_Steps
579
+
580
+ if Text_Encoder_Concept_Training_Steps==0:
581
+ Enable_Text_Encoder_Concept_Training= False
582
+ else:
583
+ stptxtc=Text_Encoder_Concept_Training_Steps
584
+
585
+
586
+ if Save_Checkpoint_Every==None:
587
+ Save_Checkpoint_Every=1
588
+ stp=0
589
+ if Start_saving_from_the_step==None:
590
+ Start_saving_from_the_step=0
591
+ if (Start_saving_from_the_step < 200):
592
+ Start_saving_from_the_step=Save_Checkpoint_Every
593
+ stpsv=Start_saving_from_the_step
594
+ if Save_Checkpoint_Every_n_Steps:
595
+ stp=Save_Checkpoint_Every
596
+
597
+
598
+ def dump_only_textenc(trnonltxt, MODELT_NAME, INSTANCE_DIR, OUTPUT_DIR, PT, Seed, precision, Training_Steps):
599
+ call('accelerate launch /notebooks/diffusers/examples/dreambooth/train_dreambooth_pps.py \
600
+ '+trnonltxt+' \
601
+ --train_text_encoder \
602
+ --image_captions_filename \
603
+ --dump_only_text_encoder \
604
+ --pretrained_model_name_or_path='+MODELT_NAME+' \
605
+ --instance_data_dir='+INSTANCE_DIR+' \
606
+ --output_dir='+OUTPUT_DIR+' \
607
+ --instance_prompt='+PT+' \
608
+ --seed='+str(Seed)+' \
609
+ --resolution=512 \
610
+ --mixed_precision='+str(precision)+' \
611
+ --train_batch_size=1 \
612
+ --gradient_accumulation_steps=1 --gradient_checkpointing \
613
+ --use_8bit_adam \
614
+ --learning_rate='+str(Text_Encoder_Learning_Rate)+' \
615
+ --lr_scheduler="polynomial" \
616
+ --lr_warmup_steps=0 \
617
+ --max_train_steps='+str(Training_Steps), shell=True)
618
+
619
+ def train_only_unet(stp, stpsv, SESSION_DIR, MODELT_NAME, INSTANCE_DIR, OUTPUT_DIR, Text_Encoder_Training_Steps, PT, Seed, Resolution, Style, extrnlcptn, precision, Training_Steps):
620
+ clear_output()
621
+ if resuming=="Yes":
622
+ print('Resuming Training...')
623
+ print('Training the UNet...')
624
+ call('accelerate launch /notebooks/diffusers/examples/dreambooth/train_dreambooth_pps.py \
625
+ '+Style+' \
626
+ '+extrnlcptn+' \
627
+ --stop_text_encoder_training='+str(Text_Encoder_Training_Steps)+' \
628
+ --image_captions_filename \
629
+ --train_only_unet \
630
+ --Session_dir='+SESSION_DIR+' \
631
+ --save_starting_step='+str(stpsv)+' \
632
+ --save_n_steps='+str(stp)+' \
633
+ --pretrained_model_name_or_path='+MODELT_NAME+' \
634
+ --instance_data_dir='+INSTANCE_DIR+' \
635
+ --output_dir='+OUTPUT_DIR+' \
636
+ --instance_prompt='+PT+' \
637
+ --seed='+str(Seed)+' \
638
+ --resolution='+str(Resolution)+' \
639
+ --mixed_precision='+str(precision)+' \
640
+ --train_batch_size=1 \
641
+ --gradient_accumulation_steps=1 '+GCUNET+' \
642
+ --use_8bit_adam \
643
+ --learning_rate='+str(UNet_Learning_Rate)+' \
644
+ --lr_scheduler="polynomial" \
645
+ --lr_warmup_steps=0 \
646
+ --max_train_steps='+str(Training_Steps), shell=True)
647
+
648
+ if Enable_text_encoder_training :
649
+ print('Training the text encoder...')
650
+ if os.path.exists(OUTPUT_DIR+'/'+'text_encoder_trained'):
651
+ call('rm -r '+OUTPUT_DIR+'/text_encoder_trained', shell=True)
652
+ dump_only_textenc(trnonltxt, MODELT_NAME, INSTANCE_DIR, OUTPUT_DIR, PT, Seed, precision, Training_Steps=stptxt)
653
+
654
+ if Enable_Text_Encoder_Concept_Training:
655
+ if os.path.exists(CONCEPT_DIR):
656
+ if os.listdir(CONCEPT_DIR)!=[]:
657
+ clear_output()
658
+ if resuming=="Yes":
659
+ print('Resuming Training...')
660
+ print('Training the text encoder on the concept...')
661
+ dump_only_textenc(trnonltxt, MODELT_NAME, CONCEPT_DIR, OUTPUT_DIR, PT, Seed, precision, Training_Steps=stptxtc)
662
+ else:
663
+ clear_output()
664
+ if resuming=="Yes":
665
+ print('Resuming Training...')
666
+ print('No concept images found, skipping concept training...')
667
+ Text_Encoder_Concept_Training_Steps=0
668
+ time.sleep(8)
669
+ else:
670
+ clear_output()
671
+ if resuming=="Yes":
672
+ print('Resuming Training...')
673
+ print('No concept images found, skipping concept training...')
674
+ Text_Encoder_Concept_Training_Steps=0
675
+ time.sleep(8)
676
+
677
+ if UNet_Training_Steps!=0:
678
+ train_only_unet(stp, stpsv, SESSION_DIR, MODELT_NAME, INSTANCE_DIR, OUTPUT_DIR, Text_Encoder_Training_Steps, PT, Seed, Resolution, Style, extrnlcptn, precision, Training_Steps=UNet_Training_Steps)
679
+
680
+ if UNet_Training_Steps==0 and Text_Encoder_Concept_Training_Steps==0 and External_Captions :
681
+ print('Nothing to do')
682
+ else:
683
+ if os.path.exists(OUTPUT_DIR+'/unet/diffusion_pytorch_model.bin'):
684
+
685
+ call('python /notebooks/diffusers/scripts/convertosdv2.py --fp16 '+OUTPUT_DIR+' '+SESSION_DIR+'/'+Session_Name+'.ckpt', shell=True)
686
+ clear_output()
687
+ if os.path.exists(SESSION_DIR+"/"+INSTANCE_NAME+'.ckpt'):
688
+ clear_output()
689
+ print("DONE, the CKPT model is in the session's folder")
690
+ else:
691
+ print("Something went wrong")
692
+
693
+ else:
694
+ print("Something went wrong")
695
+
696
+ return resumev2
697
+
698
+
699
+ def testv2(Custom_Path, Previous_Session_Name, Session_Name, Model_Version, User, Password, Use_localtunnel):
700
+
701
+
702
+ if Previous_Session_Name!="":
703
+ print("Loading a previous session model")
704
+ mdldir='/notebooks/Fast-Dreambooth/Sessions/'+Previous_Session_Name
705
+ path_to_trained_model=mdldir+"/"+Previous_Session_Name+'.ckpt'
706
+
707
+
708
+ while not os.path.exists(path_to_trained_model):
709
+ print("There is no trained model in the previous session")
710
+ time.sleep(5)
711
+
712
+ elif Custom_Path!="":
713
+ print("Loading model from a custom path")
714
+ path_to_trained_model=Custom_Path
715
+
716
+
717
+ while not os.path.exists(path_to_trained_model):
718
+ print("Wrong Path")
719
+ time.sleep(5)
720
+
721
+ else:
722
+ print("Loading the trained model")
723
+ mdldir='/notebooks/Fast-Dreambooth/Sessions/'+Session_Name
724
+ path_to_trained_model=mdldir+"/"+Session_Name+'.ckpt'
725
+
726
+
727
+ while not os.path.exists(path_to_trained_model):
728
+ print("There is no trained model in this session")
729
+ time.sleep(5)
730
+
731
+
732
+ auth=f"--gradio-auth {User}:{Password}"
733
+ if User =="" or Password=="":
734
+ auth=""
735
+
736
+ os.chdir('/notebooks')
737
+ if not os.path.exists('sd_db'):
738
+ call('mkdir sd_db', shell=True)
739
+ os.chdir('/notebooks/sd_db')
740
+ call('git clone --depth 1 --branch main https://github.com/Stability-AI/stablediffusion', shell=True)
741
+ call('git clone --depth 1 --branch Paperspacedb https://github.com/TheLastBen/stable-diffusion-webui', shell=True)
742
+ clear_output()
743
+
744
+ if not os.path.exists('/notebooks/sd_db/stablediffusion/src/k-diffusion/k_diffusion'):
745
+ call('mkdir /notebooks/sd_db/stablediffusion/src', shell=True)
746
+ os.chdir('/notebooks/sd_db/stablediffusion/src')
747
+ call('git clone -q --depth 1 --no-tags https://github.com/TheLastBen/taming-transformers.git', shell=True)
748
+ call('git clone -q --depth 1 https://github.com/salesforce/BLIP', shell=True)
749
+ call('git clone -q --depth 1 https://github.com/sczhou/CodeFormer', shell=True)
750
+ call('git clone -q --depth 1 --branch master https://github.com/crowsonkb/k-diffusion', shell=True)
751
+
752
+
753
+ if not os.path.exists('/usr/lib/node_modules/localtunnel'):
754
+ call('npm install -g localtunnel --silent', shell=True, stdout=open('/dev/null', 'w'))
755
+
756
+ share=''
757
+ if not Use_localtunnel:
758
+ share='--share'
759
+ call('wget -q -O /usr/local/lib/python3.9/dist-packages/gradio/blocks.py https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dreambooth/blocks.py', shell=True)
760
+
761
+ else:
762
+
763
+ share=''
764
+ os.chdir('/notebooks')
765
+ call('nohup lt --port 7860 > srv.txt 2>&1 &', shell=True)
766
+ time.sleep(2)
767
+ call("grep -o 'https[^ ]*' /notebooks/srv.txt >srvr.txt", shell=True)
768
+ time.sleep(2)
769
+ srv= getoutput('cat /notebooks/srvr.txt')
770
+
771
+ for line in fileinput.input('/usr/local/lib/python3.9/dist-packages/gradio/blocks.py', inplace=True):
772
+ if line.strip().startswith('self.server_name ='):
773
+ line = f' self.server_name = "{srv[8:]}"\n'
774
+ if line.strip().startswith('self.server_port ='):
775
+ line = ' self.server_port = 443\n'
776
+ if line.strip().startswith('self.protocol = "https"'):
777
+ line = ' self.protocol = "https"\n'
778
+ if line.strip().startswith('if self.local_url.startswith("https") or self.is_colab'):
779
+ line = ''
780
+ if line.strip().startswith('else "http"'):
781
+ line = ''
782
+ sys.stdout.write(line)
783
+
784
+ call('rm /notebooks/srv.txt', shell=True)
785
+ call('rm /notebooks/srvr.txt', shell=True)
786
+
787
+
788
+
789
+ os.chdir('/notebooks/sd_db/stable-diffusion-webui')
790
+ print('')
791
+ call('git pull', shell=True)
792
+ clear_output()
793
+
794
+ if Model_Version == "768":
795
+ configf="--config /notebooks/sd_db/stablediffusion/configs/stable-diffusion/v2-inference-v.yaml --no-half"
796
+ elif Model_Version == "512":
797
+ configf="--config /notebooks/sd_db/stablediffusion/configs/stable-diffusion/v2-inference.yaml --no-half"
798
+ else:
799
+ configf="--config /notebooks/sd_db/stablediffusion/configs/stable-diffusion/v2-inference-v.yaml --no-half"
800
+
801
+ return path_to_trained_model, configf, auth
802
+
803
+
804
+
805
+ def clean():
806
+
807
+ Sessions=os.listdir("/notebooks/Fast-Dreambooth/Sessions")
808
+
809
+ s = widgets.Select(
810
+ options=Sessions,
811
+ rows=5,
812
+ description='',
813
+ disabled=False
814
+ )
815
+
816
+ out=widgets.Output()
817
+
818
+ d = widgets.Button(
819
+ description='Remove',
820
+ disabled=False,
821
+ button_style='warning',
822
+ tooltip='Removet the selected session',
823
+ icon='warning'
824
+ )
825
+
826
+ def rem(d):
827
+ with out:
828
+ if s.value is not None:
829
+ clear_output()
830
+ print("THE SESSION "+s.value+" HAS BEEN REMOVED FROM THE STORAGE")
831
+ call('rm -r /notebooks/Fast-Dreambooth/Sessions/'+s.value, shell=True)
832
+ if os.path.exists('/notebooks/models/'+s.value):
833
+ call('rm -r /notebooks/models/'+s.value, shell=True)
834
+ s.options=os.listdir("/notebooks/Fast-Dreambooth/Sessions")
835
+
836
+
837
+ else:
838
+ d.close()
839
+ s.close()
840
+ clear_output()
841
+ print("NOTHING TO REMOVE")
842
+
843
+ d.on_click(rem)
844
+ if s.value is not None:
845
+ display(s,d,out)
846
+ else:
847
+ print("NOTHING TO REMOVE")
848
+
849
+
850
+
851
+ def hfv2(Name_of_your_concept, Save_concept_to, hf_token_write, INSTANCE_NAME, OUTPUT_DIR, Session_Name, MDLPTH):
852
+
853
+ from slugify import slugify
854
+ from huggingface_hub import HfApi, HfFolder, CommitOperationAdd
855
+ from huggingface_hub import create_repo
856
+ from IPython.display import display_markdown
857
+
858
+ if(Name_of_your_concept == ""):
859
+ Name_of_your_concept = Session_Name
860
+ Name_of_your_concept=Name_of_your_concept.replace(" ","-")
861
+
862
+
863
+
864
+ if hf_token_write =="":
865
+ print('Your Hugging Face write access token : ')
866
+ hf_token_write=input()
867
+
868
+ hf_token = hf_token_write
869
+
870
+ api = HfApi()
871
+ your_username = api.whoami(token=hf_token)["name"]
872
+
873
+ if(Save_concept_to == "Public_Library"):
874
+ repo_id = f"sd-dreambooth-library/{slugify(Name_of_your_concept)}"
875
+ #Join the Concepts Library organization if you aren't part of it already
876
+ call("curl -X POST -H 'Authorization: Bearer '"+hf_token+" -H 'Content-Type: application/json' https://huggingface.co/organizations/sd-dreambooth-library/share/SSeOwppVCscfTEzFGQaqpfcjukVeNrKNHX", shell=True)
877
+ else:
878
+ repo_id = f"{your_username}/{slugify(Name_of_your_concept)}"
879
+ output_dir = f'/notebooks/models/'+INSTANCE_NAME
880
+
881
+ def bar(prg):
882
+ br="Uploading to HuggingFace : " '|'+'█' * prg + ' ' * (25-prg)+'| ' +str(prg*4)+ "%"
883
+ return br
884
+
885
+ print("Loading...")
886
+
887
+ os.chdir(OUTPUT_DIR)
888
+ call('rm -r feature_extractor .git', shell=True)
889
+ clear_output()
890
+ call('git init', shell=True)
891
+ call('git lfs install --system --skip-repo', shell=True)
892
+ call('git remote add -f origin "https://USER:'+hf_token+'@huggingface.co/stabilityai/stable-diffusion-2-1"', shell=True)
893
+ call('git config core.sparsecheckout true', shell=True)
894
+ call('echo -e "\nfeature_extractor" > .git/info/sparse-checkout', shell=True)
895
+ call('git pull origin main', shell=True)
896
+ call('rm -r .git', shell=True)
897
+ os.chdir('/notebooks')
898
+ clear_output()
899
+
900
+ print(bar(1))
901
+
902
+ readme_text = f'''---
903
+ license: creativeml-openrail-m
904
+ tags:
905
+ - text-to-image
906
+ - stable-diffusion
907
+ ---
908
+ ### {Name_of_your_concept} Dreambooth model trained by {api.whoami(token=hf_token)["name"]} with [TheLastBen's fast-DreamBooth](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast-DreamBooth.ipynb) notebook
909
+
910
+ Test the concept via A1111 Colab [fast-Colab-A1111](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast_stable_diffusion_AUTOMATIC1111.ipynb)
911
+ '''
912
+ #Save the readme to a file
913
+ readme_file = open("README.md", "w")
914
+ readme_file.write(readme_text)
915
+ readme_file.close()
916
+
917
+ operations = [
918
+ CommitOperationAdd(path_in_repo="README.md", path_or_fileobj="README.md"),
919
+ CommitOperationAdd(path_in_repo=f"{Session_Name}.ckpt",path_or_fileobj=MDLPTH)
920
+
921
+ ]
922
+ create_repo(repo_id,private=True, token=hf_token)
923
+
924
+ api.create_commit(
925
+ repo_id=repo_id,
926
+ operations=operations,
927
+ commit_message=f"Upload the concept {Name_of_your_concept} embeds and token",
928
+ token=hf_token
929
+ )
930
+
931
+ api.upload_folder(
932
+ folder_path=OUTPUT_DIR+"/feature_extractor",
933
+ path_in_repo="feature_extractor",
934
+ repo_id=repo_id,
935
+ token=hf_token
936
+ )
937
+
938
+ clear_output()
939
+ print(bar(8))
940
+
941
+ api.upload_folder(
942
+ folder_path=OUTPUT_DIR+"/scheduler",
943
+ path_in_repo="scheduler",
944
+ repo_id=repo_id,
945
+ token=hf_token
946
+ )
947
+
948
+ clear_output()
949
+ print(bar(9))
950
+
951
+ api.upload_folder(
952
+ folder_path=OUTPUT_DIR+"/text_encoder",
953
+ path_in_repo="text_encoder",
954
+ repo_id=repo_id,
955
+ token=hf_token
956
+ )
957
+
958
+ clear_output()
959
+ print(bar(12))
960
+
961
+ api.upload_folder(
962
+ folder_path=OUTPUT_DIR+"/tokenizer",
963
+ path_in_repo="tokenizer",
964
+ repo_id=repo_id,
965
+ token=hf_token
966
+ )
967
+
968
+ clear_output()
969
+ print(bar(13))
970
+
971
+ api.upload_folder(
972
+ folder_path=OUTPUT_DIR+"/unet",
973
+ path_in_repo="unet",
974
+ repo_id=repo_id,
975
+ token=hf_token
976
+ )
977
+
978
+ clear_output()
979
+ print(bar(21))
980
+
981
+ api.upload_folder(
982
+ folder_path=OUTPUT_DIR+"/vae",
983
+ path_in_repo="vae",
984
+ repo_id=repo_id,
985
+ token=hf_token
986
+ )
987
+
988
+ clear_output()
989
+ print(bar(23))
990
+
991
+ api.upload_file(
992
+ path_or_fileobj=OUTPUT_DIR+"/model_index.json",
993
+ path_in_repo="model_index.json",
994
+ repo_id=repo_id,
995
+ token=hf_token
996
+ )
997
+
998
+ clear_output()
999
+ print(bar(25))
1000
+
1001
+ print("Your concept was saved successfully at https://huggingface.co/"+repo_id)
1002
+ done()
1003
+
1004
+
1005
+
1006
+ def crop_image(im, size):
1007
+
1008
+ GREEN = "#0F0"
1009
+ BLUE = "#00F"
1010
+ RED = "#F00"
1011
+
1012
+ def focal_point(im, settings):
1013
+ corner_points = image_corner_points(im, settings) if settings.corner_points_weight > 0 else []
1014
+ entropy_points = image_entropy_points(im, settings) if settings.entropy_points_weight > 0 else []
1015
+ face_points = image_face_points(im, settings) if settings.face_points_weight > 0 else []
1016
+
1017
+ pois = []
1018
+
1019
+ weight_pref_total = 0
1020
+ if len(corner_points) > 0:
1021
+ weight_pref_total += settings.corner_points_weight
1022
+ if len(entropy_points) > 0:
1023
+ weight_pref_total += settings.entropy_points_weight
1024
+ if len(face_points) > 0:
1025
+ weight_pref_total += settings.face_points_weight
1026
+
1027
+ corner_centroid = None
1028
+ if len(corner_points) > 0:
1029
+ corner_centroid = centroid(corner_points)
1030
+ corner_centroid.weight = settings.corner_points_weight / weight_pref_total
1031
+ pois.append(corner_centroid)
1032
+
1033
+ entropy_centroid = None
1034
+ if len(entropy_points) > 0:
1035
+ entropy_centroid = centroid(entropy_points)
1036
+ entropy_centroid.weight = settings.entropy_points_weight / weight_pref_total
1037
+ pois.append(entropy_centroid)
1038
+
1039
+ face_centroid = None
1040
+ if len(face_points) > 0:
1041
+ face_centroid = centroid(face_points)
1042
+ face_centroid.weight = settings.face_points_weight / weight_pref_total
1043
+ pois.append(face_centroid)
1044
+
1045
+ average_point = poi_average(pois, settings)
1046
+
1047
+ return average_point
1048
+
1049
+
1050
+ def image_face_points(im, settings):
1051
+
1052
+ np_im = np.array(im)
1053
+ gray = cv2.cvtColor(np_im, cv2.COLOR_BGR2GRAY)
1054
+
1055
+ tries = [
1056
+ [ f'{cv2.data.haarcascades}haarcascade_eye.xml', 0.01 ],
1057
+ [ f'{cv2.data.haarcascades}haarcascade_frontalface_default.xml', 0.05 ],
1058
+ [ f'{cv2.data.haarcascades}haarcascade_profileface.xml', 0.05 ],
1059
+ [ f'{cv2.data.haarcascades}haarcascade_frontalface_alt.xml', 0.05 ],
1060
+ [ f'{cv2.data.haarcascades}haarcascade_frontalface_alt2.xml', 0.05 ],
1061
+ [ f'{cv2.data.haarcascades}haarcascade_frontalface_alt_tree.xml', 0.05 ],
1062
+ [ f'{cv2.data.haarcascades}haarcascade_eye_tree_eyeglasses.xml', 0.05 ],
1063
+ [ f'{cv2.data.haarcascades}haarcascade_upperbody.xml', 0.05 ]
1064
+ ]
1065
+ for t in tries:
1066
+ classifier = cv2.CascadeClassifier(t[0])
1067
+ minsize = int(min(im.width, im.height) * t[1]) # at least N percent of the smallest side
1068
+ try:
1069
+ faces = classifier.detectMultiScale(gray, scaleFactor=1.1,
1070
+ minNeighbors=7, minSize=(minsize, minsize), flags=cv2.CASCADE_SCALE_IMAGE)
1071
+ except:
1072
+ continue
1073
+
1074
+ if len(faces) > 0:
1075
+ rects = [[f[0], f[1], f[0] + f[2], f[1] + f[3]] for f in faces]
1076
+ return [PointOfInterest((r[0] +r[2]) // 2, (r[1] + r[3]) // 2, size=abs(r[0]-r[2]), weight=1/len(rects)) for r in rects]
1077
+ return []
1078
+
1079
+
1080
+ def image_corner_points(im, settings):
1081
+ grayscale = im.convert("L")
1082
+
1083
+
1084
+ gd = ImageDraw.Draw(grayscale)
1085
+ gd.rectangle([0, im.height*.9, im.width, im.height], fill="#999")
1086
+
1087
+ np_im = np.array(grayscale)
1088
+
1089
+ points = cv2.goodFeaturesToTrack(
1090
+ np_im,
1091
+ maxCorners=100,
1092
+ qualityLevel=0.04,
1093
+ minDistance=min(grayscale.width, grayscale.height)*0.06,
1094
+ useHarrisDetector=False,
1095
+ )
1096
+
1097
+ if points is None:
1098
+ return []
1099
+
1100
+ focal_points = []
1101
+ for point in points:
1102
+ x, y = point.ravel()
1103
+ focal_points.append(PointOfInterest(x, y, size=4, weight=1/len(points)))
1104
+
1105
+ return focal_points
1106
+
1107
+
1108
+ def image_entropy_points(im, settings):
1109
+ landscape = im.height < im.width
1110
+ portrait = im.height > im.width
1111
+ if landscape:
1112
+ move_idx = [0, 2]
1113
+ move_max = im.size[0]
1114
+ elif portrait:
1115
+ move_idx = [1, 3]
1116
+ move_max = im.size[1]
1117
+ else:
1118
+ return []
1119
+
1120
+ e_max = 0
1121
+ crop_current = [0, 0, settings.crop_width, settings.crop_height]
1122
+ crop_best = crop_current
1123
+ while crop_current[move_idx[1]] < move_max:
1124
+ crop = im.crop(tuple(crop_current))
1125
+ e = image_entropy(crop)
1126
+
1127
+ if (e > e_max):
1128
+ e_max = e
1129
+ crop_best = list(crop_current)
1130
+
1131
+ crop_current[move_idx[0]] += 4
1132
+ crop_current[move_idx[1]] += 4
1133
+
1134
+ x_mid = int(crop_best[0] + settings.crop_width/2)
1135
+ y_mid = int(crop_best[1] + settings.crop_height/2)
1136
+
1137
+ return [PointOfInterest(x_mid, y_mid, size=25, weight=1.0)]
1138
+
1139
+
1140
+ def image_entropy(im):
1141
+ # greyscale image entropy
1142
+ # band = np.asarray(im.convert("L"))
1143
+ band = np.asarray(im.convert("1"), dtype=np.uint8)
1144
+ hist, _ = np.histogram(band, bins=range(0, 256))
1145
+ hist = hist[hist > 0]
1146
+ return -np.log2(hist / hist.sum()).sum()
1147
+
1148
+ def centroid(pois):
1149
+ x = [poi.x for poi in pois]
1150
+ y = [poi.y for poi in pois]
1151
+ return PointOfInterest(sum(x)/len(pois), sum(y)/len(pois))
1152
+
1153
+
1154
+ def poi_average(pois, settings):
1155
+ weight = 0.0
1156
+ x = 0.0
1157
+ y = 0.0
1158
+ for poi in pois:
1159
+ weight += poi.weight
1160
+ x += poi.x * poi.weight
1161
+ y += poi.y * poi.weight
1162
+ avg_x = round(weight and x / weight)
1163
+ avg_y = round(weight and y / weight)
1164
+
1165
+ return PointOfInterest(avg_x, avg_y)
1166
+
1167
+
1168
+ def is_landscape(w, h):
1169
+ return w > h
1170
+
1171
+
1172
+ def is_portrait(w, h):
1173
+ return h > w
1174
+
1175
+
1176
+ def is_square(w, h):
1177
+ return w == h
1178
+
1179
+
1180
+ class PointOfInterest:
1181
+ def __init__(self, x, y, weight=1.0, size=10):
1182
+ self.x = x
1183
+ self.y = y
1184
+ self.weight = weight
1185
+ self.size = size
1186
+
1187
+ def bounding(self, size):
1188
+ return [
1189
+ self.x - size//2,
1190
+ self.y - size//2,
1191
+ self.x + size//2,
1192
+ self.y + size//2
1193
+ ]
1194
+
1195
+ class Settings:
1196
+ def __init__(self, crop_width=512, crop_height=512, corner_points_weight=0.5, entropy_points_weight=0.5, face_points_weight=0.5):
1197
+ self.crop_width = crop_width
1198
+ self.crop_height = crop_height
1199
+ self.corner_points_weight = corner_points_weight
1200
+ self.entropy_points_weight = entropy_points_weight
1201
+ self.face_points_weight = face_points_weight
1202
+
1203
+ settings = Settings(
1204
+ crop_width = size,
1205
+ crop_height = size,
1206
+ face_points_weight = 0.9,
1207
+ entropy_points_weight = 0.15,
1208
+ corner_points_weight = 0.5,
1209
+ )
1210
+
1211
+ scale_by = 1
1212
+ if is_landscape(im.width, im.height):
1213
+ scale_by = settings.crop_height / im.height
1214
+ elif is_portrait(im.width, im.height):
1215
+ scale_by = settings.crop_width / im.width
1216
+ elif is_square(im.width, im.height):
1217
+ if is_square(settings.crop_width, settings.crop_height):
1218
+ scale_by = settings.crop_width / im.width
1219
+ elif is_landscape(settings.crop_width, settings.crop_height):
1220
+ scale_by = settings.crop_width / im.width
1221
+ elif is_portrait(settings.crop_width, settings.crop_height):
1222
+ scale_by = settings.crop_height / im.height
1223
+
1224
+ im = im.resize((int(im.width * scale_by), int(im.height * scale_by)))
1225
+ im_debug = im.copy()
1226
+
1227
+ focus = focal_point(im_debug, settings)
1228
+
1229
+ # take the focal point and turn it into crop coordinates that try to center over the focal
1230
+ # point but then get adjusted back into the frame
1231
+ y_half = int(settings.crop_height / 2)
1232
+ x_half = int(settings.crop_width / 2)
1233
+
1234
+ x1 = focus.x - x_half
1235
+ if x1 < 0:
1236
+ x1 = 0
1237
+ elif x1 + settings.crop_width > im.width:
1238
+ x1 = im.width - settings.crop_width
1239
+
1240
+ y1 = focus.y - y_half
1241
+ if y1 < 0:
1242
+ y1 = 0
1243
+ elif y1 + settings.crop_height > im.height:
1244
+ y1 = im.height - settings.crop_height
1245
+
1246
+ x2 = x1 + settings.crop_width
1247
+ y2 = y1 + settings.crop_height
1248
+
1249
+ crop = [x1, y1, x2, y2]
1250
+
1251
+ results = []
1252
+
1253
+ results.append(im.crop(tuple(crop)))
1254
+
1255
+ return results