TheLastBen commited on
Commit
d74b49a
1 Parent(s): eebb9e6

Create mainpaperspacev1.py

Browse files
Files changed (1) hide show
  1. mainpaperspacev1.py +1247 -0
mainpaperspacev1.py ADDED
@@ -0,0 +1,1247 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from IPython.display import clear_output
2
+ from subprocess import call, getoutput
3
+ from IPython.display import display
4
+ import ipywidgets as widgets
5
+ import io
6
+ from PIL import Image, ImageDraw
7
+ import fileinput
8
+ import time
9
+ import os
10
+ from os import listdir
11
+ from os.path import isfile
12
+ from tqdm import tqdm
13
+ import random
14
+ import sys
15
+ import cv2
16
+ from io import BytesIO
17
+ import requests
18
+ from collections import defaultdict
19
+ from math import log, sqrt
20
+ import numpy as np
21
+
22
+
23
+
24
+ def Deps(force_reinstall):
25
+
26
+ if not force_reinstall and os.path.exists('/usr/local/lib/python3.9/dist-packages/safetensors'):
27
+ print('Dependencies already installed')
28
+ else:
29
+ print('Installing the dependencies...')
30
+ call("pip install --root-user-action=ignore --no-deps -q accelerate==0.12.0", shell=True, stdout=open('/dev/null', 'w'))
31
+ if not os.path.exists('/usr/local/lib/python3.9/dist-packages/safetensors'):
32
+ os.chdir('/usr/local/lib/python3.9/dist-packages')
33
+ call("rm -r torch torch-1.12.0+cu116.dist-info torchaudio* torchvision* PIL Pillow* transformers* numpy* gdown*", shell=True, stdout=open('/dev/null', 'w'))
34
+ os.chdir('/notebooks')
35
+ if not os.path.exists('/models'):
36
+ call('mkdir /models', shell=True)
37
+ if not os.path.exists('/notebooks/models'):
38
+ call('ln -s /models /notebooks', shell=True)
39
+ if os.path.exists('/deps'):
40
+ call("rm -r /deps", shell=True)
41
+ call('mkdir /deps', shell=True)
42
+ if not os.path.exists('cache'):
43
+ call('mkdir cache', shell=True)
44
+ os.chdir('/deps')
45
+ call('wget -q -i https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dependencies/aptdeps.txt', shell=True)
46
+ call('dpkg -i *.deb', shell=True, stdout=open('/dev/null', 'w'))
47
+ call('wget -q https://huggingface.co/TheLastBen/dependencies/resolve/main/pps.tar.zst', shell=True, stdout=open('/dev/null', 'w'))
48
+ call('tar -C / --zstd -xf pps.tar.zst', shell=True, stdout=open('/dev/null', 'w'))
49
+ call("sed -i 's@~/.cache@/notebooks/cache@' /usr/local/lib/python3.9/dist-packages/transformers/utils/hub.py", shell=True)
50
+ os.chdir('/notebooks')
51
+ call("git clone --depth 1 -q --branch updt https://github.com/TheLastBen/diffusers /diffusers", shell=True, stdout=open('/dev/null', 'w'))
52
+ if not os.path.exists('/notebooks/diffusers'):
53
+ call('ln -s /diffusers /notebooks', shell=True)
54
+ call("rm -r /deps", shell=True)
55
+ os.chdir('/notebooks')
56
+ clear_output()
57
+
58
+ done()
59
+
60
+
61
+
62
+
63
+ def downloadmodel_hf(Path_to_HuggingFace):
64
+ import wget
65
+
66
+ if os.path.exists('/notebooks/stable-diffusion-custom'):
67
+ call("rm -r /notebooks/stable-diffusion-custom", shell=True)
68
+ clear_output()
69
+
70
+ if os.path.exists('/content/gdrive/MyDrive/Fast-Dreambooth/token.txt'):
71
+ with open("/content/gdrive/MyDrive/Fast-Dreambooth/token.txt") as f:
72
+ token = f.read()
73
+ authe=f'https://USER:{token}@'
74
+ else:
75
+ authe="https://"
76
+
77
+ os.chdir('/notebooks')
78
+ clear_output()
79
+ call("mkdir /notebooks/stable-diffusion-custom", shell=True)
80
+ os.chdir("/notebooks/stable-diffusion-custom")
81
+ call("git init", shell=True)
82
+ call("git lfs install --system --skip-repo", shell=True)
83
+ call('git remote add -f origin '+authe+'huggingface.co/'+Path_to_HuggingFace, shell=True)
84
+ call("git config core.sparsecheckout true", shell=True)
85
+ call('echo -e "\nscheduler\ntext_encoder\ntokenizer\nunet\nvae\nmodel_index.json\n!*.safetensors" > .git/info/sparse-checkout', shell=True)
86
+ call("git pull origin main", shell=True)
87
+ if os.path.exists('/notebooks/stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
88
+ call("rm -r /notebooks/stable-diffusion-custom/.git", shell=True)
89
+ call("rm -r /notebooks/stable-diffusion-custom/model_index.json", shell=True)
90
+ wget.download('https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dreambooth/model_index.json')
91
+ os.chdir('/notebooks')
92
+ clear_output()
93
+ done()
94
+ else:
95
+ while not os.path.exists('/notebooks/stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
96
+ print('Check the link you provided')
97
+ time.sleep(5)
98
+
99
+
100
+
101
+ def downloadmodel_pth(CKPT_Path):
102
+ import wget
103
+ os.chdir('/notebooks')
104
+ clear_output()
105
+ if os.path.exists(str(CKPT_Path)):
106
+ wget.download('https://github.com/TheLastBen/fast-stable-diffusion/raw/main/Dreambooth/refmdlz')
107
+ call('unzip -o -q refmdlz', shell=True)
108
+ call('rm -f refmdlz', shell=True)
109
+ wget.download('https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dreambooth/convertodiffv1.py')
110
+ clear_output()
111
+ call('python /notebooks/convertodiffv1.py '+CKPT_Path+' /models/stable-diffusion-custom --v1', shell=True)
112
+ call('rm /notebooks/convertodiffv1.py', shell=True)
113
+ call('rm -r /notebooks/refmdl', shell=True)
114
+ if os.path.exists('/notebooks/stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
115
+ clear_output()
116
+ done()
117
+ else:
118
+ call('rm -r /notebooks/stable-diffusion-custom', shell=True)
119
+ while not os.path.exists('/notebooks/stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
120
+ print('Conversion error')
121
+ time.sleep(5)
122
+ else:
123
+ while not os.path.exists(str(CKPT_Path)):
124
+ print('Wrong path, use the colab file explorer to copy the path')
125
+ time.sleep(5)
126
+
127
+
128
+ def downloadmodel_lnk(CKPT_Link):
129
+ import wget
130
+ os.chdir('/notebooks')
131
+ gdown.download(url=CKPT_Link, output="model.ckpt", quiet=False, fuzzy=True)
132
+
133
+ if os.path.exists('/notebooks/model.ckpt'):
134
+ if os.path.getsize("/notebooks/model.ckpt") > 1810671599:
135
+ wget.download('https://github.com/TheLastBen/fast-stable-diffusion/raw/main/Dreambooth/refmdlz')
136
+ call('unzip -o -q refmdlz', shell=True)
137
+ call('rm -f refmdlz', shell=True)
138
+ wget.download('https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dreambooth/convertodiffv1.py')
139
+ clear_output()
140
+ call('python /notebooks/convertodiffv1.py '+CKPT_Path+' /models/stable-diffusion-custom --v1', shell=True)
141
+ call('rm /notebooks/convertodiffv1.py', shell=True)
142
+ call('rm -r /notebooks/refmdl', shell=True)
143
+ if os.path.exists('/notebooks/stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
144
+ clear_output()
145
+ done()
146
+ else:
147
+ call('rm -r /notebooks/stable-diffusion-custom', shell=True)
148
+ while not os.path.exists('/notebooks/stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
149
+ print('Conversion error')
150
+ time.sleep(5)
151
+ else:
152
+ while os.path.getsize('/notebooks/model.ckpt') < 1810671599:
153
+ print('Wrong link, check that the link is valid')
154
+ time.sleep(5)
155
+
156
+
157
+ def dl(Path_to_HuggingFace, CKPT_Path, CKPT_Link):
158
+
159
+ if Path_to_HuggingFace != "":
160
+ downloadmodel_hf(Path_to_HuggingFace)
161
+ MODEL_NAME="/notebooks/stable-diffusion-custom"
162
+ elif CKPT_Path !="":
163
+ downloadmodel_pth(CKPT_Path)
164
+ MODEL_NAME="/notebooks/stable-diffusion-custom"
165
+ elif CKPT_Link !="":
166
+ downloadmodel_lnk(CKPT_Link)
167
+ MODEL_NAME="/notebooks/stable-diffusion-custom"
168
+ else:
169
+ MODEL_NAME="dataset"
170
+ print('Using the original V1.5 model')
171
+
172
+ return MODEL_NAME
173
+
174
+
175
+ def sess(Session_Name, Session_Link_optional, MODEL_NAME):
176
+ import wget
177
+ os.chdir('/notebooks')
178
+ PT=""
179
+
180
+ while Session_Name=="":
181
+ print('Input the Session Name:')
182
+ Session_Name=input("")
183
+ Session_Name=Session_Name.replace(" ","_")
184
+
185
+ WORKSPACE='/notebooks/Fast-Dreambooth'
186
+
187
+ if Session_Link_optional !="":
188
+ print('Downloading session...')
189
+
190
+ if Session_Link_optional != "":
191
+ if not os.path.exists(str(WORKSPACE+'/Sessions')):
192
+ call("mkdir -p " +WORKSPACE+ "/Sessions", shell=True)
193
+ time.sleep(1)
194
+ os.chdir(WORKSPACE+'/Sessions')
195
+ gdown.download_folder(url=Session_Link_optional, output=Session_Name, quiet=True, remaining_ok=True, use_cookies=False)
196
+ os.chdir(Session_Name)
197
+ call("rm -r " +instance_images, shell=True)
198
+ call("unzip " +instance_images.zip, shell=True, stdout=open('/dev/null', 'w'))
199
+ call("rm -r " +concept_images, shell=True)
200
+ call("unzip " +concept_images.zip, shell=True, stdout=open('/dev/null', 'w'))
201
+ call("rm -r " +captions, shell=True)
202
+ call("unzip " +captions.zip, shell=True, stdout=open('/dev/null', 'w'))
203
+ os.chdir('/notebooks')
204
+ clear_output()
205
+
206
+ INSTANCE_NAME=Session_Name
207
+ OUTPUT_DIR="/models/"+Session_Name
208
+ SESSION_DIR=WORKSPACE+"/Sessions/"+Session_Name
209
+ CONCEPT_DIR=SESSION_DIR+"/concept_images"
210
+ INSTANCE_DIR=SESSION_DIR+"/instance_images"
211
+ CAPTIONS_DIR=SESSION_DIR+'/captions'
212
+ MDLPTH=str(SESSION_DIR+"/"+Session_Name+'.ckpt')
213
+ resume=False
214
+
215
+ if os.path.exists(str(SESSION_DIR)):
216
+ mdls=[ckpt for ckpt in listdir(SESSION_DIR) if ckpt.split(".")[-1]=="ckpt"]
217
+ if not os.path.exists(MDLPTH) and '.ckpt' in str(mdls):
218
+
219
+ def f(n):
220
+ k=0
221
+ for i in mdls:
222
+ if k==n:
223
+ call('mv '+SESSION_DIR+'/'+i+' '+MDLPTH, shell=True)
224
+ k=k+1
225
+
226
+ k=0
227
+ print('No final checkpoint model found, select which intermediary checkpoint to use, enter only the number, (000 to skip):\n')
228
+
229
+ for i in mdls:
230
+ print(str(k)+'- '+i)
231
+ k=k+1
232
+ n=input()
233
+ while int(n)>k-1:
234
+ n=input()
235
+ if n!="000":
236
+ f(int(n))
237
+ print('Using the model '+ mdls[int(n)]+" ...")
238
+ time.sleep(8)
239
+ clear_output()
240
+ else:
241
+ print('Skipping the intermediary checkpoints.')
242
+
243
+
244
+ if os.path.exists(str(SESSION_DIR)) and not os.path.exists(MDLPTH):
245
+ print('Loading session with no previous model, using the original model or the custom downloaded model')
246
+ if MODEL_NAME=="":
247
+ print('No model found, use the "Model Download" cell to download a model.')
248
+ else:
249
+ print('Session Loaded, proceed to uploading instance images')
250
+
251
+ elif os.path.exists(MDLPTH):
252
+ print('Session found, loading the trained model ...')
253
+ wget.download('https://github.com/TheLastBen/fast-stable-diffusion/raw/main/Dreambooth/refmdlz')
254
+ call('unzip -o -q refmdlz', shell=True, stdout=open('/dev/null', 'w'))
255
+ call('rm -f refmdlz', shell=True, stdout=open('/dev/null', 'w'))
256
+ wget.download('https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dreambooth/convertodiffv1.py')
257
+ call('python /notebooks/convertodiffv1.py '+MDLPTH+' '+OUTPUT_DIR+' --v1', shell=True)
258
+ call('rm /notebooks/convertodiffv1.py', shell=True)
259
+ call('rm -r /notebooks/refmdl', shell=True)
260
+
261
+
262
+ if os.path.exists(OUTPUT_DIR+'/unet/diffusion_pytorch_model.bin'):
263
+ resume=True
264
+ clear_output()
265
+ print('Session loaded.')
266
+ else:
267
+ if not os.path.exists(OUTPUT_DIR+'/unet/diffusion_pytorch_model.bin'):
268
+ print('Conversion error, if the error persists, remove the CKPT file from the current session folder')
269
+
270
+ elif not os.path.exists(str(SESSION_DIR)):
271
+ call('mkdir -p '+INSTANCE_DIR, shell=True)
272
+ print('Creating session...')
273
+ if MODEL_NAME=="":
274
+ print('No model found, use the "Model Download" cell to download a model.')
275
+ else:
276
+ print('Session created, proceed to uploading instance images')
277
+
278
+ return PT, WORKSPACE, Session_Name, INSTANCE_NAME, OUTPUT_DIR, SESSION_DIR, CONCEPT_DIR, INSTANCE_DIR, CAPTIONS_DIR, MDLPTH, MODEL_NAME, resume
279
+
280
+
281
+
282
+ def done():
283
+ done = widgets.Button(
284
+ description='Done!',
285
+ disabled=True,
286
+ button_style='success',
287
+ tooltip='',
288
+ icon='check'
289
+ )
290
+ display(done)
291
+
292
+
293
+
294
+ def uplder(Remove_existing_instance_images, Crop_images, Crop_size, IMAGES_FOLDER_OPTIONAL, INSTANCE_DIR, CAPTIONS_DIR, ren):
295
+
296
+ uploader = widgets.FileUpload(description="Choose images",accept='image/*', multiple=True)
297
+ Upload = widgets.Button(
298
+ description='Upload',
299
+ disabled=False,
300
+ button_style='info',
301
+ tooltip='Click to upload the chosen instance images',
302
+ icon=''
303
+ )
304
+
305
+
306
+ def up(Upload):
307
+ with out:
308
+ uploader.close()
309
+ Upload.close()
310
+ upld(Remove_existing_instance_images, Crop_images, Crop_size, IMAGES_FOLDER_OPTIONAL, INSTANCE_DIR, CAPTIONS_DIR, uploader, ren)
311
+ done()
312
+ out=widgets.Output()
313
+
314
+ if IMAGES_FOLDER_OPTIONAL=="":
315
+ Upload.on_click(up)
316
+ display(uploader, Upload, out)
317
+ else:
318
+ upld(Remove_existing_instance_images, Crop_images, Crop_size, IMAGES_FOLDER_OPTIONAL, INSTANCE_DIR, CAPTIONS_DIR, uploader, ren)
319
+ done()
320
+
321
+
322
+
323
+
324
+ def upld(Remove_existing_instance_images, Crop_images, Crop_size, IMAGES_FOLDER_OPTIONAL, INSTANCE_DIR, CAPTIONS_DIR, uploader, ren):
325
+
326
+
327
+ if os.path.exists(CAPTIONS_DIR+"off"):
328
+ call('mv '+CAPTIONS_DIR+"off"+' '+CAPTIONS_DIR, shell=True)
329
+ time.sleep(2)
330
+
331
+ if Remove_existing_instance_images:
332
+ if os.path.exists(str(INSTANCE_DIR)):
333
+ call("rm -r " +INSTANCE_DIR, shell=True)
334
+ if os.path.exists(str(CAPTIONS_DIR)):
335
+ call("rm -r " +CAPTIONS_DIR, shell=True)
336
+
337
+
338
+ if not os.path.exists(str(INSTANCE_DIR)):
339
+ call("mkdir -p " +INSTANCE_DIR, shell=True)
340
+ if not os.path.exists(str(CAPTIONS_DIR)):
341
+ call("mkdir -p " +CAPTIONS_DIR, shell=True)
342
+
343
+
344
+ if IMAGES_FOLDER_OPTIONAL !="":
345
+ if any(file.endswith('.{}'.format('txt')) for file in os.listdir(IMAGES_FOLDER_OPTIONAL)):
346
+ call('mv '+IMAGES_FOLDER_OPTIONAL+'/*.txt '+CAPTIONS_DIR, shell=True)
347
+ if Crop_images:
348
+ os.chdir(str(IMAGES_FOLDER_OPTIONAL))
349
+ call('find . -name "* *" -type f | rename ' "'s/ /-/g'", shell=True)
350
+ os.chdir('/notebooks')
351
+ for filename in tqdm(os.listdir(IMAGES_FOLDER_OPTIONAL), bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):
352
+ extension = filename.split(".")[-1]
353
+ identifier=filename.split(".")[0]
354
+ new_path_with_file = os.path.join(INSTANCE_DIR, filename)
355
+ file = Image.open(IMAGES_FOLDER_OPTIONAL+"/"+filename)
356
+ width, height = file.size
357
+ image = file
358
+ if file.size !=(Crop_size, Crop_size):
359
+ image=crop_image(file, Crop_size)
360
+ if (extension.upper() == "JPG" or "jpg"):
361
+ image[0].save(new_path_with_file, format="JPEG", quality = 100)
362
+ else:
363
+ image[0].save(new_path_with_file, format=extension.upper())
364
+
365
+ else:
366
+ call("cp \'"+IMAGES_FOLDER_OPTIONAL+"/"+filename+"\' "+INSTANCE_DIR, shell=True)
367
+
368
+ else:
369
+ for filename in tqdm(os.listdir(IMAGES_FOLDER_OPTIONAL), bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):
370
+ call("cp -r " +IMAGES_FOLDER_OPTIONAL+"/. " +INSTANCE_DIR, shell=True)
371
+
372
+
373
+
374
+ elif IMAGES_FOLDER_OPTIONAL =="":
375
+ up=""
376
+ for filename, file in uploader.value.items():
377
+ if filename.split(".")[-1]=="txt":
378
+ with open(CAPTIONS_DIR+'/'+filename, 'w') as f:
379
+ f.write(file['content'].decode())
380
+ up=[(filename, file) for filename, file in uploader.value.items() if filename.split(".")[-1]!="txt"]
381
+ if Crop_images:
382
+ for filename, file_info in tqdm(up, bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):
383
+ img = Image.open(io.BytesIO(file_info['content']))
384
+ extension = filename.split(".")[-1]
385
+ identifier=filename.split(".")[0]
386
+
387
+ if (extension.upper() == "JPG" or "jpg"):
388
+ img.save(INSTANCE_DIR+"/"+filename, format="JPEG", quality = 100)
389
+ else:
390
+ img.save(INSTANCE_DIR+"/"+filename, format=extension.upper())
391
+
392
+ new_path_with_file = os.path.join(INSTANCE_DIR, filename)
393
+ file = Image.open(new_path_with_file)
394
+ width, height = file.size
395
+ image = img
396
+ if file.size !=(Crop_size, Crop_size):
397
+ image=crop_image(file, Crop_size)
398
+ if (extension.upper() == "JPG" or "jpg"):
399
+ image[0].save(new_path_with_file, format="JPEG", quality = 100)
400
+ else:
401
+ image[0].save(new_path_with_file, format=extension.upper())
402
+
403
+ else:
404
+ for filename, file_info in tqdm(uploader.value.items(), bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):
405
+ img = Image.open(io.BytesIO(file_info['content']))
406
+
407
+ extension = filename.split(".")[-1]
408
+ identifier=filename.split(".")[0]
409
+
410
+ if (extension.upper() == "JPG" or "jpg"):
411
+ img.save(INSTANCE_DIR+"/"+filename, format="JPEG", quality = 100)
412
+ else:
413
+ img.save(INSTANCE_DIR+"/"+filename, format=extension.upper())
414
+
415
+
416
+ if ren:
417
+ i=0
418
+ for filename in tqdm(os.listdir(INSTANCE_DIR), bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Renamed'):
419
+ extension = filename.split(".")[-1]
420
+ identifier=filename.split(".")[0]
421
+ new_path_with_file = os.path.join(INSTANCE_DIR, "conceptimagedb"+str(i)+"."+extension)
422
+ call('mv "'+os.path.join(INSTANCE_DIR,filename)+'" "'+new_path_with_file+'"', shell=True)
423
+ i=i+1
424
+
425
+ os.chdir(INSTANCE_DIR)
426
+ call('find . -name "* *" -type f | rename ' "'s/ /-/g'", shell=True)
427
+ os.chdir(CAPTIONS_DIR)
428
+ call('find . -name "* *" -type f | rename ' "'s/ /-/g'", shell=True)
429
+ os.chdir('/notebooks')
430
+
431
+
432
+
433
+ def caption(CAPTIONS_DIR, INSTANCE_DIR):
434
+
435
+ if os.path.exists(CAPTIONS_DIR+"off"):
436
+ call('mv '+CAPTIONS_DIR+"off"+' '+CAPTIONS_DIR, shell=True)
437
+ time.sleep(2)
438
+
439
+ paths=""
440
+ out=""
441
+ widgets_l=""
442
+ clear_output()
443
+ def Caption(path):
444
+ if path!="Select an instance image to caption":
445
+
446
+ name = os.path.splitext(os.path.basename(path))[0]
447
+ ext=os.path.splitext(os.path.basename(path))[-1][1:]
448
+ if ext=="jpg" or "JPG":
449
+ ext="JPEG"
450
+
451
+ if os.path.exists(CAPTIONS_DIR+"/"+name + '.txt'):
452
+ with open(CAPTIONS_DIR+"/"+name + '.txt', 'r') as f:
453
+ text = f.read()
454
+ else:
455
+ with open(CAPTIONS_DIR+"/"+name + '.txt', 'w') as f:
456
+ f.write("")
457
+ with open(CAPTIONS_DIR+"/"+name + '.txt', 'r') as f:
458
+ text = f.read()
459
+
460
+ img=Image.open(os.path.join(INSTANCE_DIR,path))
461
+ img=img.resize((420, 420))
462
+ image_bytes = BytesIO()
463
+ img.save(image_bytes, format=ext, qualiy=10)
464
+ image_bytes.seek(0)
465
+ image_data = image_bytes.read()
466
+ img= image_data
467
+ image = widgets.Image(
468
+ value=img,
469
+ width=420,
470
+ height=420
471
+ )
472
+ text_area = widgets.Textarea(value=text, description='', disabled=False, layout={'width': '300px', 'height': '120px'})
473
+
474
+
475
+ def update_text(text):
476
+ with open(CAPTIONS_DIR+"/"+name + '.txt', 'w') as f:
477
+ f.write(text)
478
+
479
+ button = widgets.Button(description='Save', button_style='success')
480
+ button.on_click(lambda b: update_text(text_area.value))
481
+
482
+ return widgets.VBox([widgets.HBox([image, text_area, button])])
483
+
484
+
485
+ paths = os.listdir(INSTANCE_DIR)
486
+ widgets_l = widgets.Select(options=["Select an instance image to caption"]+paths, rows=25)
487
+
488
+
489
+ out = widgets.Output()
490
+
491
+ def click(change):
492
+ with out:
493
+ out.clear_output()
494
+ display(Caption(change.new))
495
+
496
+ widgets_l.observe(click, names='value')
497
+ display(widgets.HBox([widgets_l, out]))
498
+
499
+
500
+
501
+ def dbtrain(Resume_Training, UNet_Training_Steps, UNet_Learning_Rate, Text_Encoder_Training_Steps, Text_Encoder_Concept_Training_Steps, Text_Encoder_Learning_Rate, Style_Training, Resolution, MODEL_NAME, SESSION_DIR, INSTANCE_DIR, CONCEPT_DIR, CAPTIONS_DIR, External_Captions, INSTANCE_NAME, Session_Name, OUTPUT_DIR, PT, resume, Save_Checkpoint_Every_n_Steps, Start_saving_from_the_step, Save_Checkpoint_Every):
502
+
503
+ if resume and not Resume_Training:
504
+ print('Overwrite your previously trained model ?, answering "yes" will train a new model, answering "no" will resume the training of the previous model?  yes or no ?')
505
+ while True:
506
+ ansres=input('')
507
+ if ansres=='no':
508
+ Resume_Training = True
509
+ break
510
+ elif ansres=='yes':
511
+ Resume_Training = False
512
+ resume= False
513
+ break
514
+
515
+ while not Resume_Training and not os.path.exists(MODEL_NAME+'/unet/diffusion_pytorch_model.bin'):
516
+ print('No model found, use the "Model Download" cell to download a model.')
517
+ time.sleep(5)
518
+
519
+ if os.path.exists(CAPTIONS_DIR+"off"):
520
+ call('mv '+CAPTIONS_DIR+"off"+' '+CAPTIONS_DIR, shell=True)
521
+ time.sleep(2)
522
+
523
+ MODELT_NAME=MODEL_NAME
524
+
525
+ Seed=random.randint(1, 999999)
526
+
527
+ Style=""
528
+ if Style_Training:
529
+ Style="--Style"
530
+
531
+ extrnlcptn=""
532
+ if External_Captions:
533
+ extrnlcptn="--external_captions"
534
+
535
+ precision="fp16"
536
+
537
+ GCUNET="--gradient_checkpointing"
538
+ if Resolution<=640:
539
+ GCUNET=""
540
+
541
+ resuming=""
542
+ if Resume_Training and os.path.exists(OUTPUT_DIR+'/unet/diffusion_pytorch_model.bin'):
543
+ MODELT_NAME=OUTPUT_DIR
544
+ print('Resuming Training...')
545
+ resuming="Yes"
546
+ elif Resume_Training and not os.path.exists(OUTPUT_DIR+'/unet/diffusion_pytorch_model.bin'):
547
+ print('Previous model not found, training a new model...')
548
+ MODELT_NAME=MODEL_NAME
549
+ while MODEL_NAME=="":
550
+ print('No model found, use the "Model Download" cell to download a model.')
551
+ time.sleep(5)
552
+
553
+
554
+ trnonltxt=""
555
+ if UNet_Training_Steps==0:
556
+ trnonltxt="--train_only_text_encoder"
557
+
558
+ Enable_text_encoder_training= True
559
+ Enable_Text_Encoder_Concept_Training= True
560
+
561
+
562
+ if Text_Encoder_Training_Steps==0 or External_Captions:
563
+ Enable_text_encoder_training= False
564
+ else:
565
+ stptxt=Text_Encoder_Training_Steps
566
+
567
+ if Text_Encoder_Concept_Training_Steps==0:
568
+ Enable_Text_Encoder_Concept_Training= False
569
+ else:
570
+ stptxtc=Text_Encoder_Concept_Training_Steps
571
+
572
+
573
+ if Save_Checkpoint_Every==None:
574
+ Save_Checkpoint_Every=1
575
+ stp=0
576
+ if Start_saving_from_the_step==None:
577
+ Start_saving_from_the_step=0
578
+ if (Start_saving_from_the_step < 200):
579
+ Start_saving_from_the_step=Save_Checkpoint_Every
580
+ stpsv=Start_saving_from_the_step
581
+ if Save_Checkpoint_Every_n_Steps:
582
+ stp=Save_Checkpoint_Every
583
+
584
+
585
+ def dump_only_textenc(trnonltxt, MODELT_NAME, INSTANCE_DIR, OUTPUT_DIR, PT, Seed, precision, Training_Steps):
586
+ call('accelerate launch /notebooks/diffusers/examples/dreambooth/train_dreambooth_pps.py \
587
+ '+trnonltxt+' \
588
+ --train_text_encoder \
589
+ --image_captions_filename \
590
+ --dump_only_text_encoder \
591
+ --pretrained_model_name_or_path='+MODELT_NAME+' \
592
+ --instance_data_dir='+INSTANCE_DIR+' \
593
+ --output_dir='+OUTPUT_DIR+' \
594
+ --instance_prompt='+PT+' \
595
+ --seed='+str(Seed)+' \
596
+ --resolution=512 \
597
+ --mixed_precision='+str(precision)+' \
598
+ --train_batch_size=1 \
599
+ --gradient_accumulation_steps=1 --gradient_checkpointing \
600
+ --use_8bit_adam \
601
+ --learning_rate='+str(Text_Encoder_Learning_Rate)+' \
602
+ --lr_scheduler="polynomial" \
603
+ --lr_warmup_steps=0 \
604
+ --max_train_steps='+str(Training_Steps), shell=True)
605
+
606
+ def train_only_unet(stp, stpsv, SESSION_DIR, MODELT_NAME, INSTANCE_DIR, OUTPUT_DIR, Text_Encoder_Training_Steps, PT, Seed, Resolution, Style, extrnlcptn, precision, Training_Steps):
607
+ clear_output()
608
+ if resuming=="Yes":
609
+ print('Resuming Training...')
610
+ print('Training the UNet...')
611
+ call('accelerate launch /notebooks/diffusers/examples/dreambooth/train_dreambooth_pps.py \
612
+ '+Style+' \
613
+ '+extrnlcptn+' \
614
+ --stop_text_encoder_training='+str(Text_Encoder_Training_Steps)+' \
615
+ --image_captions_filename \
616
+ --train_only_unet \
617
+ --Session_dir='+SESSION_DIR+' \
618
+ --save_starting_step='+str(stpsv)+' \
619
+ --save_n_steps='+str(stp)+' \
620
+ --pretrained_model_name_or_path='+MODELT_NAME+' \
621
+ --instance_data_dir='+INSTANCE_DIR+' \
622
+ --output_dir='+OUTPUT_DIR+' \
623
+ --instance_prompt='+PT+' \
624
+ --seed='+str(Seed)+' \
625
+ --resolution='+str(Resolution)+' \
626
+ --mixed_precision='+str(precision)+' \
627
+ --train_batch_size=1 \
628
+ --gradient_accumulation_steps=1 '+GCUNET+' \
629
+ --use_8bit_adam \
630
+ --learning_rate='+str(UNet_Learning_Rate)+' \
631
+ --lr_scheduler="polynomial" \
632
+ --lr_warmup_steps=0 \
633
+ --max_train_steps='+str(Training_Steps), shell=True)
634
+
635
+ if Enable_text_encoder_training :
636
+ print('Training the text encoder...')
637
+ if os.path.exists(OUTPUT_DIR+'/'+'text_encoder_trained'):
638
+ call('rm -r '+OUTPUT_DIR+'/text_encoder_trained', shell=True)
639
+ dump_only_textenc(trnonltxt, MODELT_NAME, INSTANCE_DIR, OUTPUT_DIR, PT, Seed, precision, Training_Steps=stptxt)
640
+
641
+ if Enable_Text_Encoder_Concept_Training:
642
+ if os.path.exists(CONCEPT_DIR):
643
+ if os.listdir(CONCEPT_DIR)!=[]:
644
+ clear_output()
645
+ if resuming=="Yes":
646
+ print('Resuming Training...')
647
+ print('Training the text encoder on the concept...')
648
+ dump_only_textenc(trnonltxt, MODELT_NAME, CONCEPT_DIR, OUTPUT_DIR, PT, Seed, precision, Training_Steps=stptxtc)
649
+ else:
650
+ clear_output()
651
+ if resuming=="Yes":
652
+ print('Resuming Training...')
653
+ print('No concept images found, skipping concept training...')
654
+ Text_Encoder_Concept_Training_Steps=0
655
+ time.sleep(8)
656
+ else:
657
+ clear_output()
658
+ if resuming=="Yes":
659
+ print('Resuming Training...')
660
+ print('No concept images found, skipping concept training...')
661
+ Text_Encoder_Concept_Training_Steps=0
662
+ time.sleep(8)
663
+
664
+ if UNet_Training_Steps!=0:
665
+ train_only_unet(stp, stpsv, SESSION_DIR, MODELT_NAME, INSTANCE_DIR, OUTPUT_DIR, Text_Encoder_Training_Steps, PT, Seed, Resolution, Style, extrnlcptn, precision, Training_Steps=UNet_Training_Steps)
666
+
667
+ if UNet_Training_Steps==0 and Text_Encoder_Concept_Training_Steps==0 and External_Captions :
668
+ print('Nothing to do')
669
+ else:
670
+ if os.path.exists(OUTPUT_DIR+'/unet/diffusion_pytorch_model.bin'):
671
+
672
+ call('python /notebooks/diffusers/scripts/convertosdv2.py --fp16 '+OUTPUT_DIR+' '+SESSION_DIR+'/'+Session_Name+'.ckpt', shell=True)
673
+ clear_output()
674
+ if os.path.exists(SESSION_DIR+"/"+INSTANCE_NAME+'.ckpt'):
675
+ clear_output()
676
+ print("DONE, the CKPT model is in the session's folder")
677
+ else:
678
+ print("Something went wrong")
679
+
680
+ else:
681
+ print("Something went wrong")
682
+
683
+ return resume
684
+
685
+
686
+ def test(Custom_Path, Previous_Session_Name, Session_Name, User, Password, Use_localtunnel):
687
+
688
+
689
+ if Previous_Session_Name!="":
690
+ print("Loading a previous session model")
691
+ mdldir='/notebooks/Fast-Dreambooth/Sessions/'+Previous_Session_Name
692
+ path_to_trained_model=mdldir+"/"+Previous_Session_Name+'.ckpt'
693
+
694
+
695
+ while not os.path.exists(path_to_trained_model):
696
+ print("There is no trained model in the previous session")
697
+ time.sleep(5)
698
+
699
+ elif Custom_Path!="":
700
+ print("Loading model from a custom path")
701
+ path_to_trained_model=Custom_Path
702
+
703
+
704
+ while not os.path.exists(path_to_trained_model):
705
+ print("Wrong Path")
706
+ time.sleep(5)
707
+
708
+ else:
709
+ print("Loading the trained model")
710
+ mdldir='/notebooks/Fast-Dreambooth/Sessions/'+Session_Name
711
+ path_to_trained_model=mdldir+"/"+Session_Name+'.ckpt'
712
+
713
+
714
+ while not os.path.exists(path_to_trained_model):
715
+ print("There is no trained model in this session")
716
+ time.sleep(5)
717
+
718
+ auth=f"--gradio-auth {User}:{Password}"
719
+ if User =="" or Password=="":
720
+ auth=""
721
+
722
+ os.chdir('/notebooks')
723
+ if not os.path.exists('sd_db'):
724
+ call('mkdir sd_db', shell=True)
725
+ os.chdir('/notebooks/sd_db')
726
+ call('git clone --depth 1 --branch main https://github.com/Stability-AI/stablediffusion', shell=True)
727
+ call('git clone --depth 1 --branch Paperspacedb https://github.com/TheLastBen/stable-diffusion-webui', shell=True)
728
+ clear_output()
729
+
730
+ if not os.path.exists('/notebooks/sd_db/stablediffusion/src/k-diffusion/k_diffusion'):
731
+ call('mkdir /notebooks/sd_db/stablediffusion/src', shell=True)
732
+ os.chdir('/notebooks/sd_db/stablediffusion/src')
733
+ call('git clone -q --depth 1 --no-tags https://github.com/TheLastBen/taming-transformers.git', shell=True)
734
+ call('git clone -q --depth 1 https://github.com/salesforce/BLIP', shell=True)
735
+ call('git clone -q --depth 1 https://github.com/sczhou/CodeFormer', shell=True)
736
+ call('git clone -q --depth 1 --branch master https://github.com/crowsonkb/k-diffusion', shell=True)
737
+
738
+ if not os.path.exists('/usr/lib/node_modules/localtunnel'):
739
+ call('npm install -g localtunnel --silent', shell=True, stdout=open('/dev/null', 'w'))
740
+
741
+ share=''
742
+ if not Use_localtunnel:
743
+ share='--share'
744
+ call('wget -q -O /usr/local/lib/python3.9/dist-packages/gradio/blocks.py https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dreambooth/blocks.py', shell=True)
745
+
746
+ else:
747
+
748
+ share=''
749
+ os.chdir('/notebooks')
750
+ call('nohup lt --port 7860 > srv.txt 2>&1 &', shell=True)
751
+ time.sleep(2)
752
+ call("grep -o 'https[^ ]*' /notebooks/srv.txt >srvr.txt", shell=True)
753
+ time.sleep(2)
754
+ srv= getoutput('cat /notebooks/srvr.txt')
755
+
756
+ for line in fileinput.input('/usr/local/lib/python3.9/dist-packages/gradio/blocks.py', inplace=True):
757
+ if line.strip().startswith('self.server_name ='):
758
+ line = f' self.server_name = "{srv[8:]}"\n'
759
+ if line.strip().startswith('self.server_port ='):
760
+ line = ' self.server_port = 443\n'
761
+ if line.strip().startswith('self.protocol = "https"'):
762
+ line = ' self.protocol = "https"\n'
763
+ if line.strip().startswith('if self.local_url.startswith("https") or self.is_colab'):
764
+ line = ''
765
+ if line.strip().startswith('else "http"'):
766
+ line = ''
767
+ sys.stdout.write(line)
768
+
769
+ call('rm /notebooks/srv.txt', shell=True)
770
+ call('rm /notebooks/srvr.txt', shell=True)
771
+
772
+
773
+
774
+ os.chdir('/notebooks/sd_db/stable-diffusion-webui')
775
+ print('')
776
+ call('git pull', shell=True)
777
+
778
+ clear_output()
779
+
780
+ return path_to_trained_model, auth, share
781
+
782
+
783
+
784
+ def clean():
785
+
786
+ Sessions=os.listdir("/notebooks/Fast-Dreambooth/Sessions")
787
+
788
+ s = widgets.Select(
789
+ options=Sessions,
790
+ rows=5,
791
+ description='',
792
+ disabled=False
793
+ )
794
+
795
+ out=widgets.Output()
796
+
797
+ d = widgets.Button(
798
+ description='Remove',
799
+ disabled=False,
800
+ button_style='warning',
801
+ tooltip='Removet the selected session',
802
+ icon='warning'
803
+ )
804
+
805
+ def rem(d):
806
+ with out:
807
+ if s.value is not None:
808
+ clear_output()
809
+ print("THE SESSION "+s.value+" HAS BEEN REMOVED FROM THE STORAGE")
810
+ call('rm -r /notebooks/Fast-Dreambooth/Sessions/'+s.value, shell=True)
811
+ if os.path.exists('/notebooks/models/'+s.value):
812
+ call('rm -r /notebooks/models/'+s.value, shell=True)
813
+ s.options=os.listdir("/notebooks/Fast-Dreambooth/Sessions")
814
+
815
+
816
+ else:
817
+ d.close()
818
+ s.close()
819
+ clear_output()
820
+ print("NOTHING TO REMOVE")
821
+
822
+ d.on_click(rem)
823
+ if s.value is not None:
824
+ display(s,d,out)
825
+ else:
826
+ print("NOTHING TO REMOVE")
827
+
828
+
829
+
830
+ def hf(Name_of_your_concept, Save_concept_to, hf_token_write, INSTANCE_NAME, OUTPUT_DIR, Session_Name, MDLPTH):
831
+
832
+ from slugify import slugify
833
+ from huggingface_hub import HfApi, HfFolder, CommitOperationAdd
834
+ from huggingface_hub import create_repo
835
+ from IPython.display import display_markdown
836
+
837
+
838
+ if(Name_of_your_concept == ""):
839
+ Name_of_your_concept = Session_Name
840
+ Name_of_your_concept=Name_of_your_concept.replace(" ","-")
841
+
842
+
843
+
844
+ if hf_token_write =="":
845
+ print('Your Hugging Face write access token : ')
846
+ hf_token_write=input()
847
+
848
+ hf_token = hf_token_write
849
+
850
+ api = HfApi()
851
+ your_username = api.whoami(token=hf_token)["name"]
852
+
853
+ if(Save_concept_to == "Public_Library"):
854
+ repo_id = f"sd-dreambooth-library/{slugify(Name_of_your_concept)}"
855
+ #Join the Concepts Library organization if you aren't part of it already
856
+ call("curl -X POST -H 'Authorization: Bearer '"+hf_token+" -H 'Content-Type: application/json' https://huggingface.co/organizations/sd-dreambooth-library/share/SSeOwppVCscfTEzFGQaqpfcjukVeNrKNHX", shell=True)
857
+ else:
858
+ repo_id = f"{your_username}/{slugify(Name_of_your_concept)}"
859
+ output_dir = f'/notebooks/models/'+INSTANCE_NAME
860
+
861
+ def bar(prg):
862
+ br="Uploading to HuggingFace : " '|'+'█' * prg + ' ' * (25-prg)+'| ' +str(prg*4)+ "%"
863
+ return br
864
+
865
+ print("Loading...")
866
+
867
+
868
+ os.chdir(OUTPUT_DIR)
869
+ call('rm -r safety_checker feature_extractor .git', shell=True)
870
+ call('rm model_index.json', shell=True)
871
+ call('git init', shell=True)
872
+ call('git lfs install --system --skip-repo', shell=True)
873
+ call('git remote add -f origin "https://USER:'+hf_token+'@huggingface.co/runwayml/stable-diffusion-v1-5"', shell=True)
874
+ call('git config core.sparsecheckout true', shell=True)
875
+ call('echo -e "\nfeature_extractor\nsafety_checker\nmodel_index.json" > .git/info/sparse-checkout', shell=True)
876
+ call('git pull origin main', shell=True)
877
+ call('rm -r .git', shell=True)
878
+ os.chdir('/notebooks')
879
+
880
+
881
+ print(bar(1))
882
+
883
+ readme_text = f'''---
884
+ license: creativeml-openrail-m
885
+ tags:
886
+ - text-to-image
887
+ - stable-diffusion
888
+ ---
889
+ ### {Name_of_your_concept} Dreambooth model trained by {api.whoami(token=hf_token)["name"]} with [TheLastBen's fast-DreamBooth](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast-DreamBooth.ipynb) notebook
890
+
891
+ Test the concept via A1111 Colab [fast-Colab-A1111](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast_stable_diffusion_AUTOMATIC1111.ipynb)
892
+ Or you can run your new concept via `diffusers` [Colab Notebook for Inference](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_dreambooth_inference.ipynb)
893
+ '''
894
+ #Save the readme to a file
895
+ readme_file = open("README.md", "w")
896
+ readme_file.write(readme_text)
897
+ readme_file.close()
898
+
899
+ operations = [
900
+ CommitOperationAdd(path_in_repo="README.md", path_or_fileobj="README.md"),
901
+ CommitOperationAdd(path_in_repo=f"{Session_Name}.ckpt",path_or_fileobj=MDLPTH)
902
+
903
+ ]
904
+ create_repo(repo_id,private=True, token=hf_token)
905
+
906
+ api.create_commit(
907
+ repo_id=repo_id,
908
+ operations=operations,
909
+ commit_message=f"Upload the concept {Name_of_your_concept} embeds and token",
910
+ token=hf_token
911
+ )
912
+
913
+ api.upload_folder(
914
+ folder_path=OUTPUT_DIR+"/feature_extractor",
915
+ path_in_repo="feature_extractor",
916
+ repo_id=repo_id,
917
+ token=hf_token
918
+ )
919
+
920
+ clear_output()
921
+ print(bar(4))
922
+
923
+ api.upload_folder(
924
+ folder_path=OUTPUT_DIR+"/safety_checker",
925
+ path_in_repo="safety_checker",
926
+ repo_id=repo_id,
927
+ token=hf_token
928
+ )
929
+
930
+ clear_output()
931
+ print(bar(8))
932
+
933
+ api.upload_folder(
934
+ folder_path=OUTPUT_DIR+"/scheduler",
935
+ path_in_repo="scheduler",
936
+ repo_id=repo_id,
937
+ token=hf_token
938
+ )
939
+
940
+ clear_output()
941
+ print(bar(9))
942
+
943
+ api.upload_folder(
944
+ folder_path=OUTPUT_DIR+"/text_encoder",
945
+ path_in_repo="text_encoder",
946
+ repo_id=repo_id,
947
+ token=hf_token
948
+ )
949
+
950
+ clear_output()
951
+ print(bar(12))
952
+
953
+ api.upload_folder(
954
+ folder_path=OUTPUT_DIR+"/tokenizer",
955
+ path_in_repo="tokenizer",
956
+ repo_id=repo_id,
957
+ token=hf_token
958
+ )
959
+
960
+ clear_output()
961
+ print(bar(13))
962
+
963
+ api.upload_folder(
964
+ folder_path=OUTPUT_DIR+"/unet",
965
+ path_in_repo="unet",
966
+ repo_id=repo_id,
967
+ token=hf_token
968
+ )
969
+
970
+ clear_output()
971
+ print(bar(21))
972
+
973
+ api.upload_folder(
974
+ folder_path=OUTPUT_DIR+"/vae",
975
+ path_in_repo="vae",
976
+ repo_id=repo_id,
977
+ token=hf_token
978
+ )
979
+
980
+ clear_output()
981
+ print(bar(23))
982
+
983
+ api.upload_file(
984
+ path_or_fileobj=OUTPUT_DIR+"/model_index.json",
985
+ path_in_repo="model_index.json",
986
+ repo_id=repo_id,
987
+ token=hf_token
988
+ )
989
+
990
+ clear_output()
991
+ print(bar(25))
992
+
993
+ print("Your concept was saved successfully at https://huggingface.co/"+repo_id)
994
+ done()
995
+
996
+
997
+
998
+ def crop_image(im, size):
999
+
1000
+ GREEN = "#0F0"
1001
+ BLUE = "#00F"
1002
+ RED = "#F00"
1003
+
1004
+ def focal_point(im, settings):
1005
+ corner_points = image_corner_points(im, settings) if settings.corner_points_weight > 0 else []
1006
+ entropy_points = image_entropy_points(im, settings) if settings.entropy_points_weight > 0 else []
1007
+ face_points = image_face_points(im, settings) if settings.face_points_weight > 0 else []
1008
+
1009
+ pois = []
1010
+
1011
+ weight_pref_total = 0
1012
+ if len(corner_points) > 0:
1013
+ weight_pref_total += settings.corner_points_weight
1014
+ if len(entropy_points) > 0:
1015
+ weight_pref_total += settings.entropy_points_weight
1016
+ if len(face_points) > 0:
1017
+ weight_pref_total += settings.face_points_weight
1018
+
1019
+ corner_centroid = None
1020
+ if len(corner_points) > 0:
1021
+ corner_centroid = centroid(corner_points)
1022
+ corner_centroid.weight = settings.corner_points_weight / weight_pref_total
1023
+ pois.append(corner_centroid)
1024
+
1025
+ entropy_centroid = None
1026
+ if len(entropy_points) > 0:
1027
+ entropy_centroid = centroid(entropy_points)
1028
+ entropy_centroid.weight = settings.entropy_points_weight / weight_pref_total
1029
+ pois.append(entropy_centroid)
1030
+
1031
+ face_centroid = None
1032
+ if len(face_points) > 0:
1033
+ face_centroid = centroid(face_points)
1034
+ face_centroid.weight = settings.face_points_weight / weight_pref_total
1035
+ pois.append(face_centroid)
1036
+
1037
+ average_point = poi_average(pois, settings)
1038
+
1039
+ return average_point
1040
+
1041
+
1042
+ def image_face_points(im, settings):
1043
+
1044
+ np_im = np.array(im)
1045
+ gray = cv2.cvtColor(np_im, cv2.COLOR_BGR2GRAY)
1046
+
1047
+ tries = [
1048
+ [ f'{cv2.data.haarcascades}haarcascade_eye.xml', 0.01 ],
1049
+ [ f'{cv2.data.haarcascades}haarcascade_frontalface_default.xml', 0.05 ],
1050
+ [ f'{cv2.data.haarcascades}haarcascade_profileface.xml', 0.05 ],
1051
+ [ f'{cv2.data.haarcascades}haarcascade_frontalface_alt.xml', 0.05 ],
1052
+ [ f'{cv2.data.haarcascades}haarcascade_frontalface_alt2.xml', 0.05 ],
1053
+ [ f'{cv2.data.haarcascades}haarcascade_frontalface_alt_tree.xml', 0.05 ],
1054
+ [ f'{cv2.data.haarcascades}haarcascade_eye_tree_eyeglasses.xml', 0.05 ],
1055
+ [ f'{cv2.data.haarcascades}haarcascade_upperbody.xml', 0.05 ]
1056
+ ]
1057
+ for t in tries:
1058
+ classifier = cv2.CascadeClassifier(t[0])
1059
+ minsize = int(min(im.width, im.height) * t[1]) # at least N percent of the smallest side
1060
+ try:
1061
+ faces = classifier.detectMultiScale(gray, scaleFactor=1.1,
1062
+ minNeighbors=7, minSize=(minsize, minsize), flags=cv2.CASCADE_SCALE_IMAGE)
1063
+ except:
1064
+ continue
1065
+
1066
+ if len(faces) > 0:
1067
+ rects = [[f[0], f[1], f[0] + f[2], f[1] + f[3]] for f in faces]
1068
+ return [PointOfInterest((r[0] +r[2]) // 2, (r[1] + r[3]) // 2, size=abs(r[0]-r[2]), weight=1/len(rects)) for r in rects]
1069
+ return []
1070
+
1071
+
1072
+ def image_corner_points(im, settings):
1073
+ grayscale = im.convert("L")
1074
+
1075
+ # naive attempt at preventing focal points from collecting at watermarks near the bottom
1076
+ gd = ImageDraw.Draw(grayscale)
1077
+ gd.rectangle([0, im.height*.9, im.width, im.height], fill="#999")
1078
+
1079
+ np_im = np.array(grayscale)
1080
+
1081
+ points = cv2.goodFeaturesToTrack(
1082
+ np_im,
1083
+ maxCorners=100,
1084
+ qualityLevel=0.04,
1085
+ minDistance=min(grayscale.width, grayscale.height)*0.06,
1086
+ useHarrisDetector=False,
1087
+ )
1088
+
1089
+ if points is None:
1090
+ return []
1091
+
1092
+ focal_points = []
1093
+ for point in points:
1094
+ x, y = point.ravel()
1095
+ focal_points.append(PointOfInterest(x, y, size=4, weight=1/len(points)))
1096
+
1097
+ return focal_points
1098
+
1099
+
1100
+ def image_entropy_points(im, settings):
1101
+ landscape = im.height < im.width
1102
+ portrait = im.height > im.width
1103
+ if landscape:
1104
+ move_idx = [0, 2]
1105
+ move_max = im.size[0]
1106
+ elif portrait:
1107
+ move_idx = [1, 3]
1108
+ move_max = im.size[1]
1109
+ else:
1110
+ return []
1111
+
1112
+ e_max = 0
1113
+ crop_current = [0, 0, settings.crop_width, settings.crop_height]
1114
+ crop_best = crop_current
1115
+ while crop_current[move_idx[1]] < move_max:
1116
+ crop = im.crop(tuple(crop_current))
1117
+ e = image_entropy(crop)
1118
+
1119
+ if (e > e_max):
1120
+ e_max = e
1121
+ crop_best = list(crop_current)
1122
+
1123
+ crop_current[move_idx[0]] += 4
1124
+ crop_current[move_idx[1]] += 4
1125
+
1126
+ x_mid = int(crop_best[0] + settings.crop_width/2)
1127
+ y_mid = int(crop_best[1] + settings.crop_height/2)
1128
+
1129
+ return [PointOfInterest(x_mid, y_mid, size=25, weight=1.0)]
1130
+
1131
+
1132
+ def image_entropy(im):
1133
+ # greyscale image entropy
1134
+ # band = np.asarray(im.convert("L"))
1135
+ band = np.asarray(im.convert("1"), dtype=np.uint8)
1136
+ hist, _ = np.histogram(band, bins=range(0, 256))
1137
+ hist = hist[hist > 0]
1138
+ return -np.log2(hist / hist.sum()).sum()
1139
+
1140
+ def centroid(pois):
1141
+ x = [poi.x for poi in pois]
1142
+ y = [poi.y for poi in pois]
1143
+ return PointOfInterest(sum(x)/len(pois), sum(y)/len(pois))
1144
+
1145
+
1146
+ def poi_average(pois, settings):
1147
+ weight = 0.0
1148
+ x = 0.0
1149
+ y = 0.0
1150
+ for poi in pois:
1151
+ weight += poi.weight
1152
+ x += poi.x * poi.weight
1153
+ y += poi.y * poi.weight
1154
+ avg_x = round(weight and x / weight)
1155
+ avg_y = round(weight and y / weight)
1156
+
1157
+ return PointOfInterest(avg_x, avg_y)
1158
+
1159
+
1160
+ def is_landscape(w, h):
1161
+ return w > h
1162
+
1163
+
1164
+ def is_portrait(w, h):
1165
+ return h > w
1166
+
1167
+
1168
+ def is_square(w, h):
1169
+ return w == h
1170
+
1171
+
1172
+ class PointOfInterest:
1173
+ def __init__(self, x, y, weight=1.0, size=10):
1174
+ self.x = x
1175
+ self.y = y
1176
+ self.weight = weight
1177
+ self.size = size
1178
+
1179
+ def bounding(self, size):
1180
+ return [
1181
+ self.x - size//2,
1182
+ self.y - size//2,
1183
+ self.x + size//2,
1184
+ self.y + size//2
1185
+ ]
1186
+
1187
+ class Settings:
1188
+ def __init__(self, crop_width=512, crop_height=512, corner_points_weight=0.5, entropy_points_weight=0.5, face_points_weight=0.5):
1189
+ self.crop_width = crop_width
1190
+ self.crop_height = crop_height
1191
+ self.corner_points_weight = corner_points_weight
1192
+ self.entropy_points_weight = entropy_points_weight
1193
+ self.face_points_weight = face_points_weight
1194
+
1195
+ settings = Settings(
1196
+ crop_width = size,
1197
+ crop_height = size,
1198
+ face_points_weight = 0.9,
1199
+ entropy_points_weight = 0.15,
1200
+ corner_points_weight = 0.5,
1201
+ )
1202
+
1203
+ scale_by = 1
1204
+ if is_landscape(im.width, im.height):
1205
+ scale_by = settings.crop_height / im.height
1206
+ elif is_portrait(im.width, im.height):
1207
+ scale_by = settings.crop_width / im.width
1208
+ elif is_square(im.width, im.height):
1209
+ if is_square(settings.crop_width, settings.crop_height):
1210
+ scale_by = settings.crop_width / im.width
1211
+ elif is_landscape(settings.crop_width, settings.crop_height):
1212
+ scale_by = settings.crop_width / im.width
1213
+ elif is_portrait(settings.crop_width, settings.crop_height):
1214
+ scale_by = settings.crop_height / im.height
1215
+
1216
+ im = im.resize((int(im.width * scale_by), int(im.height * scale_by)))
1217
+ im_debug = im.copy()
1218
+
1219
+ focus = focal_point(im_debug, settings)
1220
+
1221
+ # take the focal point and turn it into crop coordinates that try to center over the focal
1222
+ # point but then get adjusted back into the frame
1223
+ y_half = int(settings.crop_height / 2)
1224
+ x_half = int(settings.crop_width / 2)
1225
+
1226
+ x1 = focus.x - x_half
1227
+ if x1 < 0:
1228
+ x1 = 0
1229
+ elif x1 + settings.crop_width > im.width:
1230
+ x1 = im.width - settings.crop_width
1231
+
1232
+ y1 = focus.y - y_half
1233
+ if y1 < 0:
1234
+ y1 = 0
1235
+ elif y1 + settings.crop_height > im.height:
1236
+ y1 = im.height - settings.crop_height
1237
+
1238
+ x2 = x1 + settings.crop_width
1239
+ y2 = y1 + settings.crop_height
1240
+
1241
+ crop = [x1, y1, x2, y2]
1242
+
1243
+ results = []
1244
+
1245
+ results.append(im.crop(tuple(crop)))
1246
+
1247
+ return results