TheLastBen commited on
Commit
d5daa45
1 Parent(s): e450b04

Create mainpaperspacev1.py

Browse files
Files changed (1) hide show
  1. Scripts/mainpaperspacev1.py +1271 -0
Scripts/mainpaperspacev1.py ADDED
@@ -0,0 +1,1271 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from IPython.display import clear_output
2
+ from subprocess import call, getoutput
3
+ from IPython.display import display
4
+ import ipywidgets as widgets
5
+ import io
6
+ from PIL import Image, ImageDraw
7
+ import fileinput
8
+ import time
9
+ import os
10
+ from os import listdir
11
+ from os.path import isfile
12
+ from tqdm import tqdm
13
+ import gdown
14
+ import random
15
+ import sys
16
+ import cv2
17
+ from io import BytesIO
18
+ import requests
19
+ from collections import defaultdict
20
+ from math import log, sqrt
21
+ import numpy as np
22
+
23
+
24
+
25
+ def Deps(force_reinstall):
26
+
27
+ if not force_reinstall and os.path.exists('/usr/local/lib/python3.9/dist-packages/safetensors'):
28
+ os.chdir('/notebooks')
29
+ if not os.path.exists('Latest_Notebooks'):
30
+ call('mkdir Latest_Notebooks', shell=True)
31
+ else:
32
+ call('rm -r Latest_Notebooks', shell=True)
33
+ call('mkdir Latest_Notebooks', shell=True)
34
+ os.chdir('/notebooks/Latest_Notebooks')
35
+ call('wget -q -i https://huggingface.co/datasets/TheLastBen/PPS/raw/main/Notebooks.txt', shell=True)
36
+ call('rm Notebooks.txt', shell=True)
37
+ os.chdir('/notebooks')
38
+ print('Modules and notebooks updated, dependencies already installed')
39
+
40
+ else:
41
+ print('Installing the dependencies...')
42
+ call("pip install --root-user-action=ignore --no-deps -q accelerate==0.12.0", shell=True, stdout=open('/dev/null', 'w'))
43
+ if not os.path.exists('/usr/local/lib/python3.9/dist-packages/safetensors'):
44
+ os.chdir('/usr/local/lib/python3.9/dist-packages')
45
+ call("rm -r torch torch-1.12.0+cu116.dist-info torchaudio* torchvision* PIL Pillow* transformers* numpy* gdown*", shell=True, stdout=open('/dev/null', 'w'))
46
+
47
+ os.chdir('/notebooks')
48
+ if not os.path.exists('Latest_Notebooks'):
49
+ call('mkdir Latest_Notebooks', shell=True)
50
+ else:
51
+ call('rm -r Latest_Notebooks', shell=True)
52
+ call('mkdir Latest_Notebooks', shell=True)
53
+ os.chdir('/notebooks/Latest_Notebooks')
54
+ call('wget -q -i https://huggingface.co/datasets/TheLastBen/PPS/raw/main/Notebooks.txt', shell=True)
55
+ call('rm Notebooks.txt', shell=True)
56
+ os.chdir('/notebooks')
57
+ call('wget -q -i https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dependencies/aptdeps.txt', shell=True)
58
+ os.chdir('/notebooks')
59
+ if not os.path.exists('/models'):
60
+ call('mkdir /models', shell=True)
61
+ if not os.path.exists('/notebooks/models'):
62
+ call('ln -s /models /notebooks', shell=True)
63
+ if os.path.exists('/deps'):
64
+ call("rm -r /deps", shell=True)
65
+ call('mkdir /deps', shell=True)
66
+ if not os.path.exists('cache'):
67
+ call('mkdir cache', shell=True)
68
+ os.chdir('/deps')
69
+ call('wget -q -i https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dependencies/aptdeps.txt', shell=True)
70
+ call('dpkg -i *.deb', shell=True, stdout=open('/dev/null', 'w'))
71
+ call('wget -q https://huggingface.co/TheLastBen/dependencies/resolve/main/pps.tar.zst', shell=True, stdout=open('/dev/null', 'w'))
72
+ call('tar -C / --zstd -xf pps.tar.zst', shell=True, stdout=open('/dev/null', 'w'))
73
+ call("sed -i 's@~/.cache@/notebooks/cache@' /usr/local/lib/python3.9/dist-packages/transformers/utils/hub.py", shell=True)
74
+ os.chdir('/notebooks')
75
+ call("git clone --depth 1 -q --branch updt https://github.com/TheLastBen/diffusers /diffusers", shell=True, stdout=open('/dev/null', 'w'))
76
+ if not os.path.exists('/notebooks/diffusers'):
77
+ call('ln -s /diffusers /notebooks', shell=True)
78
+ call("rm -r /deps", shell=True)
79
+ os.chdir('/notebooks')
80
+ clear_output()
81
+
82
+ done()
83
+
84
+
85
+ def downloadmodel_hf(Path_to_HuggingFace):
86
+ import wget
87
+
88
+ if os.path.exists('/models/stable-diffusion-custom'):
89
+ call("rm -r /models/stable-diffusion-custom", shell=True)
90
+ clear_output()
91
+
92
+ if os.path.exists('/notebooks/Fast-Dreambooth/token.txt'):
93
+ with open("/notebooks/Fast-Dreambooth/token.txt") as f:
94
+ token = f.read()
95
+ authe=f'https://USER:{token}@'
96
+ else:
97
+ authe="https://"
98
+
99
+ clear_output()
100
+ call("mkdir /models/stable-diffusion-custom", shell=True)
101
+ os.chdir("/models/stable-diffusion-custom")
102
+ call("git init", shell=True)
103
+ call("git lfs install --system --skip-repo", shell=True)
104
+ call('git remote add -f origin '+authe+'huggingface.co/'+Path_to_HuggingFace, shell=True)
105
+ call("git config core.sparsecheckout true", shell=True)
106
+ call('echo -e "\nscheduler\ntext_encoder\ntokenizer\nunet\nvae\nmodel_index.json\n!*.safetensors" > .git/info/sparse-checkout', shell=True)
107
+ call("git pull origin main", shell=True)
108
+ if os.path.exists('/models/stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
109
+ call("rm -r /models/stable-diffusion-custom/.git", shell=True)
110
+ call("rm -r /models/stable-diffusion-custom/model_index.json", shell=True)
111
+ wget.download('https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dreambooth/model_index.json')
112
+ os.chdir('/notebooks')
113
+ clear_output()
114
+ done()
115
+
116
+ while not os.path.exists('/models/stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
117
+ print('Check the link you provided')
118
+ os.chdir('/notebooks')
119
+ time.sleep(5)
120
+
121
+
122
+
123
+ def downloadmodel_pth(CKPT_Path):
124
+ import wget
125
+ os.chdir('/notebooks')
126
+ clear_output()
127
+ if os.path.exists(str(CKPT_Path)):
128
+ wget.download('https://github.com/TheLastBen/fast-stable-diffusion/raw/main/Dreambooth/refmdlz')
129
+ call('unzip -o -q refmdlz', shell=True)
130
+ call('rm -f refmdlz', shell=True)
131
+ wget.download('https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dreambooth/convertodiffv1.py')
132
+ clear_output()
133
+ call('python /notebooks/convertodiffv1.py '+CKPT_Path+' /models/stable-diffusion-custom --v1', shell=True)
134
+ call('rm /notebooks/convertodiffv1.py', shell=True)
135
+ call('rm -r /notebooks/refmdl', shell=True)
136
+ if os.path.exists('/models/stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
137
+ clear_output()
138
+ done()
139
+ while not os.path.exists('/models/stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
140
+ print('Conversion error')
141
+ time.sleep(5)
142
+
143
+ else:
144
+ while not os.path.exists(str(CKPT_Path)):
145
+ print('Wrong path, use the colab file explorer to copy the path')
146
+ time.sleep(5)
147
+
148
+
149
+ def downloadmodel_lnk(CKPT_Link):
150
+ import wget
151
+ os.chdir('/notebooks')
152
+ call("gdown --fuzzy " +CKPT_Link+ " -O /models/model.ckpt", shell=True)
153
+
154
+ if os.path.exists('/models/model.ckpt'):
155
+ if os.path.getsize("/models/model.ckpt") > 1810671599:
156
+ wget.download('https://github.com/TheLastBen/fast-stable-diffusion/raw/main/Dreambooth/refmdlz')
157
+ call('unzip -o -q refmdlz', shell=True)
158
+ call('rm -f refmdlz', shell=True)
159
+ wget.download('https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dreambooth/convertodiffv1.py')
160
+ clear_output()
161
+ call('python /notebooks/convertodiffv1.py /models/model.ckpt /models/stable-diffusion-custom --v1', shell=True)
162
+ call('rm /notebooks/convertodiffv1.py', shell=True)
163
+ call('rm -r /notebooks/refmdl', shell=True)
164
+ if os.path.exists('/models/stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
165
+ call('rm -r /models/model.ckpt', shell=True)
166
+ clear_output()
167
+ done()
168
+ else:
169
+ while not os.path.exists('/models/stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
170
+ print('Conversion error')
171
+ time.sleep(5)
172
+ else:
173
+ while os.path.getsize('/models/model.ckpt') < 1810671599:
174
+ print('Wrong link, check that the link is valid')
175
+ time.sleep(5)
176
+
177
+
178
+ def dl(Path_to_HuggingFace, CKPT_Path, CKPT_Link):
179
+
180
+ if Path_to_HuggingFace != "":
181
+ downloadmodel_hf(Path_to_HuggingFace)
182
+ MODEL_NAME="/models/stable-diffusion-custom"
183
+ elif CKPT_Path !="":
184
+ downloadmodel_pth(CKPT_Path)
185
+ MODEL_NAME="/models/stable-diffusion-custom"
186
+ elif CKPT_Link !="":
187
+ downloadmodel_lnk(CKPT_Link)
188
+ MODEL_NAME="/models/stable-diffusion-custom"
189
+ else:
190
+ MODEL_NAME="/datasets/stable-diffusion-diffusers/stable-diffusion-v1-5"
191
+ print('Using the original V1.5 model')
192
+
193
+ return MODEL_NAME
194
+
195
+
196
+ def sess(Session_Name, Session_Link_optional, MODEL_NAME):
197
+ import wget, gdown
198
+ os.chdir('/notebooks')
199
+ PT=""
200
+
201
+ while Session_Name=="":
202
+ print('Input the Session Name:')
203
+ Session_Name=input("")
204
+ Session_Name=Session_Name.replace(" ","_")
205
+
206
+ WORKSPACE='/notebooks/Fast-Dreambooth'
207
+
208
+ if Session_Link_optional !="":
209
+ print('Downloading session...')
210
+
211
+ if Session_Link_optional != "":
212
+ if not os.path.exists(str(WORKSPACE+'/Sessions')):
213
+ call("mkdir -p " +WORKSPACE+ "/Sessions", shell=True)
214
+ time.sleep(1)
215
+ os.chdir(WORKSPACE+'/Sessions')
216
+ gdown.download_folder(url=Session_Link_optional, output=Session_Name, quiet=True, remaining_ok=True, use_cookies=False)
217
+ os.chdir(Session_Name)
218
+ call("rm -r " +instance_images, shell=True)
219
+ call("unzip " +instance_images.zip, shell=True, stdout=open('/dev/null', 'w'))
220
+ call("rm -r " +concept_images, shell=True)
221
+ call("unzip " +concept_images.zip, shell=True, stdout=open('/dev/null', 'w'))
222
+ call("rm -r " +captions, shell=True)
223
+ call("unzip " +captions.zip, shell=True, stdout=open('/dev/null', 'w'))
224
+ os.chdir('/notebooks')
225
+ clear_output()
226
+
227
+ INSTANCE_NAME=Session_Name
228
+ OUTPUT_DIR="/models/"+Session_Name
229
+ SESSION_DIR=WORKSPACE+"/Sessions/"+Session_Name
230
+ CONCEPT_DIR=SESSION_DIR+"/concept_images"
231
+ INSTANCE_DIR=SESSION_DIR+"/instance_images"
232
+ CAPTIONS_DIR=SESSION_DIR+'/captions'
233
+ MDLPTH=str(SESSION_DIR+"/"+Session_Name+'.ckpt')
234
+ resume=False
235
+
236
+ if os.path.exists(str(SESSION_DIR)):
237
+ mdls=[ckpt for ckpt in listdir(SESSION_DIR) if ckpt.split(".")[-1]=="ckpt"]
238
+ if not os.path.exists(MDLPTH) and '.ckpt' in str(mdls):
239
+
240
+ def f(n):
241
+ k=0
242
+ for i in mdls:
243
+ if k==n:
244
+ call('mv '+SESSION_DIR+'/'+i+' '+MDLPTH, shell=True)
245
+ k=k+1
246
+
247
+ k=0
248
+ print('No final checkpoint model found, select which intermediary checkpoint to use, enter only the number, (000 to skip):\n')
249
+
250
+ for i in mdls:
251
+ print(str(k)+'- '+i)
252
+ k=k+1
253
+ n=input()
254
+ while int(n)>k-1:
255
+ n=input()
256
+ if n!="000":
257
+ f(int(n))
258
+ print('Using the model '+ mdls[int(n)]+" ...")
259
+ time.sleep(8)
260
+ clear_output()
261
+ else:
262
+ print('Skipping the intermediary checkpoints.')
263
+
264
+
265
+ if os.path.exists(str(SESSION_DIR)) and not os.path.exists(MDLPTH):
266
+ print('Loading session with no previous model, using the original model or the custom downloaded model')
267
+ if MODEL_NAME=="":
268
+ print('No model found, use the "Model Download" cell to download a model.')
269
+ else:
270
+ print('Session Loaded, proceed to uploading instance images')
271
+
272
+ elif os.path.exists(MDLPTH):
273
+ print('Session found, loading the trained model ...')
274
+ wget.download('https://github.com/TheLastBen/fast-stable-diffusion/raw/main/Dreambooth/refmdlz')
275
+ call('unzip -o -q refmdlz', shell=True, stdout=open('/dev/null', 'w'))
276
+ call('rm -f refmdlz', shell=True, stdout=open('/dev/null', 'w'))
277
+ wget.download('https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dreambooth/convertodiffv1.py')
278
+ call('python /notebooks/convertodiffv1.py '+MDLPTH+' '+OUTPUT_DIR+' --v1', shell=True)
279
+ call('rm /notebooks/convertodiffv1.py', shell=True)
280
+ call('rm -r /notebooks/refmdl', shell=True)
281
+
282
+
283
+ if os.path.exists(OUTPUT_DIR+'/unet/diffusion_pytorch_model.bin'):
284
+ resume=True
285
+ clear_output()
286
+ print('Session loaded.')
287
+ else:
288
+ if not os.path.exists(OUTPUT_DIR+'/unet/diffusion_pytorch_model.bin'):
289
+ print('Conversion error, if the error persists, remove the CKPT file from the current session folder')
290
+
291
+ elif not os.path.exists(str(SESSION_DIR)):
292
+ call('mkdir -p '+INSTANCE_DIR, shell=True)
293
+ print('Creating session...')
294
+ if MODEL_NAME=="":
295
+ print('No model found, use the "Model Download" cell to download a model.')
296
+ else:
297
+ print('Session created, proceed to uploading instance images')
298
+
299
+ return PT, WORKSPACE, Session_Name, INSTANCE_NAME, OUTPUT_DIR, SESSION_DIR, CONCEPT_DIR, INSTANCE_DIR, CAPTIONS_DIR, MDLPTH, MODEL_NAME, resume
300
+
301
+
302
+
303
+ def done():
304
+ done = widgets.Button(
305
+ description='Done!',
306
+ disabled=True,
307
+ button_style='success',
308
+ tooltip='',
309
+ icon='check'
310
+ )
311
+ display(done)
312
+
313
+
314
+
315
+ def uplder(Remove_existing_instance_images, Crop_images, Crop_size, IMAGES_FOLDER_OPTIONAL, INSTANCE_DIR, CAPTIONS_DIR, ren):
316
+
317
+ uploader = widgets.FileUpload(description="Choose images",accept='image/*', multiple=True)
318
+ Upload = widgets.Button(
319
+ description='Upload',
320
+ disabled=False,
321
+ button_style='info',
322
+ tooltip='Click to upload the chosen instance images',
323
+ icon=''
324
+ )
325
+
326
+
327
+ def up(Upload):
328
+ with out:
329
+ uploader.close()
330
+ Upload.close()
331
+ upld(Remove_existing_instance_images, Crop_images, Crop_size, IMAGES_FOLDER_OPTIONAL, INSTANCE_DIR, CAPTIONS_DIR, uploader, ren)
332
+ done()
333
+ out=widgets.Output()
334
+
335
+ if IMAGES_FOLDER_OPTIONAL=="":
336
+ Upload.on_click(up)
337
+ display(uploader, Upload, out)
338
+ else:
339
+ upld(Remove_existing_instance_images, Crop_images, Crop_size, IMAGES_FOLDER_OPTIONAL, INSTANCE_DIR, CAPTIONS_DIR, uploader, ren)
340
+ done()
341
+
342
+
343
+
344
+
345
+ def upld(Remove_existing_instance_images, Crop_images, Crop_size, IMAGES_FOLDER_OPTIONAL, INSTANCE_DIR, CAPTIONS_DIR, uploader, ren):
346
+
347
+
348
+ if os.path.exists(CAPTIONS_DIR+"off"):
349
+ call('mv '+CAPTIONS_DIR+"off"+' '+CAPTIONS_DIR, shell=True)
350
+ time.sleep(2)
351
+
352
+ if Remove_existing_instance_images:
353
+ if os.path.exists(str(INSTANCE_DIR)):
354
+ call("rm -r " +INSTANCE_DIR, shell=True)
355
+ if os.path.exists(str(CAPTIONS_DIR)):
356
+ call("rm -r " +CAPTIONS_DIR, shell=True)
357
+
358
+
359
+ if not os.path.exists(str(INSTANCE_DIR)):
360
+ call("mkdir -p " +INSTANCE_DIR, shell=True)
361
+ if not os.path.exists(str(CAPTIONS_DIR)):
362
+ call("mkdir -p " +CAPTIONS_DIR, shell=True)
363
+
364
+
365
+ if IMAGES_FOLDER_OPTIONAL !="":
366
+ if any(file.endswith('.{}'.format('txt')) for file in os.listdir(IMAGES_FOLDER_OPTIONAL)):
367
+ call('mv '+IMAGES_FOLDER_OPTIONAL+'/*.txt '+CAPTIONS_DIR, shell=True)
368
+ if Crop_images:
369
+ os.chdir(str(IMAGES_FOLDER_OPTIONAL))
370
+ call('find . -name "* *" -type f | rename ' "'s/ /-/g'", shell=True)
371
+ os.chdir('/notebooks')
372
+ for filename in tqdm(os.listdir(IMAGES_FOLDER_OPTIONAL), bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):
373
+ extension = filename.split(".")[-1]
374
+ identifier=filename.split(".")[0]
375
+ new_path_with_file = os.path.join(INSTANCE_DIR, filename)
376
+ file = Image.open(IMAGES_FOLDER_OPTIONAL+"/"+filename)
377
+ width, height = file.size
378
+ image = file
379
+ if file.size !=(Crop_size, Crop_size):
380
+ image=crop_image(file, Crop_size)
381
+ if (extension.upper() == "JPG" or "jpg"):
382
+ image[0].save(new_path_with_file, format="JPEG", quality = 100)
383
+ else:
384
+ image[0].save(new_path_with_file, format=extension.upper())
385
+
386
+ else:
387
+ call("cp \'"+IMAGES_FOLDER_OPTIONAL+"/"+filename+"\' "+INSTANCE_DIR, shell=True)
388
+
389
+ else:
390
+ for filename in tqdm(os.listdir(IMAGES_FOLDER_OPTIONAL), bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):
391
+ call("cp -r " +IMAGES_FOLDER_OPTIONAL+"/. " +INSTANCE_DIR, shell=True)
392
+
393
+
394
+
395
+ elif IMAGES_FOLDER_OPTIONAL =="":
396
+ up=""
397
+ for filename, file in uploader.value.items():
398
+ if filename.split(".")[-1]=="txt":
399
+ with open(CAPTIONS_DIR+'/'+filename, 'w') as f:
400
+ f.write(file['content'].decode())
401
+ up=[(filename, file) for filename, file in uploader.value.items() if filename.split(".")[-1]!="txt"]
402
+ if Crop_images:
403
+ for filename, file_info in tqdm(up, bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):
404
+ img = Image.open(io.BytesIO(file_info['content']))
405
+ extension = filename.split(".")[-1]
406
+ identifier=filename.split(".")[0]
407
+
408
+ if (extension.upper() == "JPG" or "jpg"):
409
+ img.save(INSTANCE_DIR+"/"+filename, format="JPEG", quality = 100)
410
+ else:
411
+ img.save(INSTANCE_DIR+"/"+filename, format=extension.upper())
412
+
413
+ new_path_with_file = os.path.join(INSTANCE_DIR, filename)
414
+ file = Image.open(new_path_with_file)
415
+ width, height = file.size
416
+ image = img
417
+ if file.size !=(Crop_size, Crop_size):
418
+ image=crop_image(file, Crop_size)
419
+ if (extension.upper() == "JPG" or "jpg"):
420
+ image[0].save(new_path_with_file, format="JPEG", quality = 100)
421
+ else:
422
+ image[0].save(new_path_with_file, format=extension.upper())
423
+
424
+ else:
425
+ for filename, file_info in tqdm(uploader.value.items(), bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):
426
+ img = Image.open(io.BytesIO(file_info['content']))
427
+
428
+ extension = filename.split(".")[-1]
429
+ identifier=filename.split(".")[0]
430
+
431
+ if (extension.upper() == "JPG" or "jpg"):
432
+ img.save(INSTANCE_DIR+"/"+filename, format="JPEG", quality = 100)
433
+ else:
434
+ img.save(INSTANCE_DIR+"/"+filename, format=extension.upper())
435
+
436
+
437
+ if ren:
438
+ i=0
439
+ for filename in tqdm(os.listdir(INSTANCE_DIR), bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Renamed'):
440
+ extension = filename.split(".")[-1]
441
+ identifier=filename.split(".")[0]
442
+ new_path_with_file = os.path.join(INSTANCE_DIR, "conceptimagedb"+str(i)+"."+extension)
443
+ call('mv "'+os.path.join(INSTANCE_DIR,filename)+'" "'+new_path_with_file+'"', shell=True)
444
+ i=i+1
445
+
446
+ os.chdir(INSTANCE_DIR)
447
+ call('find . -name "* *" -type f | rename ' "'s/ /-/g'", shell=True)
448
+ os.chdir(CAPTIONS_DIR)
449
+ call('find . -name "* *" -type f | rename ' "'s/ /-/g'", shell=True)
450
+ os.chdir('/notebooks')
451
+
452
+
453
+
454
+ def caption(CAPTIONS_DIR, INSTANCE_DIR):
455
+
456
+ if os.path.exists(CAPTIONS_DIR+"off"):
457
+ call('mv '+CAPTIONS_DIR+"off"+' '+CAPTIONS_DIR, shell=True)
458
+ time.sleep(2)
459
+
460
+ paths=""
461
+ out=""
462
+ widgets_l=""
463
+ clear_output()
464
+ def Caption(path):
465
+ if path!="Select an instance image to caption":
466
+
467
+ name = os.path.splitext(os.path.basename(path))[0]
468
+ ext=os.path.splitext(os.path.basename(path))[-1][1:]
469
+ if ext=="jpg" or "JPG":
470
+ ext="JPEG"
471
+
472
+ if os.path.exists(CAPTIONS_DIR+"/"+name + '.txt'):
473
+ with open(CAPTIONS_DIR+"/"+name + '.txt', 'r') as f:
474
+ text = f.read()
475
+ else:
476
+ with open(CAPTIONS_DIR+"/"+name + '.txt', 'w') as f:
477
+ f.write("")
478
+ with open(CAPTIONS_DIR+"/"+name + '.txt', 'r') as f:
479
+ text = f.read()
480
+
481
+ img=Image.open(os.path.join(INSTANCE_DIR,path))
482
+ img=img.resize((420, 420))
483
+ image_bytes = BytesIO()
484
+ img.save(image_bytes, format=ext, qualiy=10)
485
+ image_bytes.seek(0)
486
+ image_data = image_bytes.read()
487
+ img= image_data
488
+ image = widgets.Image(
489
+ value=img,
490
+ width=420,
491
+ height=420
492
+ )
493
+ text_area = widgets.Textarea(value=text, description='', disabled=False, layout={'width': '300px', 'height': '120px'})
494
+
495
+
496
+ def update_text(text):
497
+ with open(CAPTIONS_DIR+"/"+name + '.txt', 'w') as f:
498
+ f.write(text)
499
+
500
+ button = widgets.Button(description='Save', button_style='success')
501
+ button.on_click(lambda b: update_text(text_area.value))
502
+
503
+ return widgets.VBox([widgets.HBox([image, text_area, button])])
504
+
505
+
506
+ paths = os.listdir(INSTANCE_DIR)
507
+ widgets_l = widgets.Select(options=["Select an instance image to caption"]+paths, rows=25)
508
+
509
+
510
+ out = widgets.Output()
511
+
512
+ def click(change):
513
+ with out:
514
+ out.clear_output()
515
+ display(Caption(change.new))
516
+
517
+ widgets_l.observe(click, names='value')
518
+ display(widgets.HBox([widgets_l, out]))
519
+
520
+
521
+
522
+ def dbtrain(Resume_Training, UNet_Training_Steps, UNet_Learning_Rate, Text_Encoder_Training_Steps, Text_Encoder_Concept_Training_Steps, Text_Encoder_Learning_Rate, Style_Training, Resolution, MODEL_NAME, SESSION_DIR, INSTANCE_DIR, CONCEPT_DIR, CAPTIONS_DIR, External_Captions, INSTANCE_NAME, Session_Name, OUTPUT_DIR, PT, resume, Save_Checkpoint_Every_n_Steps, Start_saving_from_the_step, Save_Checkpoint_Every):
523
+
524
+ if resume and not Resume_Training:
525
+ print('Overwrite your previously trained model ?, answering "yes" will train a new model, answering "no" will resume the training of the previous model?  yes or no ?')
526
+ while True:
527
+ ansres=input('')
528
+ if ansres=='no':
529
+ Resume_Training = True
530
+ break
531
+ elif ansres=='yes':
532
+ Resume_Training = False
533
+ resume= False
534
+ break
535
+
536
+ while not Resume_Training and not os.path.exists(MODEL_NAME+'/unet/diffusion_pytorch_model.bin'):
537
+ print('No model found, use the "Model Download" cell to download a model.')
538
+ time.sleep(5)
539
+
540
+ if os.path.exists(CAPTIONS_DIR+"off"):
541
+ call('mv '+CAPTIONS_DIR+"off"+' '+CAPTIONS_DIR, shell=True)
542
+ time.sleep(2)
543
+
544
+ MODELT_NAME=MODEL_NAME
545
+
546
+ Seed=random.randint(1, 999999)
547
+
548
+ Style=""
549
+ if Style_Training:
550
+ Style="--Style"
551
+
552
+ extrnlcptn=""
553
+ if External_Captions:
554
+ extrnlcptn="--external_captions"
555
+
556
+ precision="fp16"
557
+
558
+ GCUNET="--gradient_checkpointing"
559
+ if Resolution<=640:
560
+ GCUNET=""
561
+
562
+ resuming=""
563
+ if Resume_Training and os.path.exists(OUTPUT_DIR+'/unet/diffusion_pytorch_model.bin'):
564
+ MODELT_NAME=OUTPUT_DIR
565
+ print('Resuming Training...')
566
+ resuming="Yes"
567
+ elif Resume_Training and not os.path.exists(OUTPUT_DIR+'/unet/diffusion_pytorch_model.bin'):
568
+ print('Previous model not found, training a new model...')
569
+ MODELT_NAME=MODEL_NAME
570
+ while MODEL_NAME=="":
571
+ print('No model found, use the "Model Download" cell to download a model.')
572
+ time.sleep(5)
573
+
574
+
575
+ trnonltxt=""
576
+ if UNet_Training_Steps==0:
577
+ trnonltxt="--train_only_text_encoder"
578
+
579
+ Enable_text_encoder_training= True
580
+ Enable_Text_Encoder_Concept_Training= True
581
+
582
+
583
+ if Text_Encoder_Training_Steps==0 or External_Captions:
584
+ Enable_text_encoder_training= False
585
+ else:
586
+ stptxt=Text_Encoder_Training_Steps
587
+
588
+ if Text_Encoder_Concept_Training_Steps==0:
589
+ Enable_Text_Encoder_Concept_Training= False
590
+ else:
591
+ stptxtc=Text_Encoder_Concept_Training_Steps
592
+
593
+
594
+ if Save_Checkpoint_Every==None:
595
+ Save_Checkpoint_Every=1
596
+ stp=0
597
+ if Start_saving_from_the_step==None:
598
+ Start_saving_from_the_step=0
599
+ if (Start_saving_from_the_step < 200):
600
+ Start_saving_from_the_step=Save_Checkpoint_Every
601
+ stpsv=Start_saving_from_the_step
602
+ if Save_Checkpoint_Every_n_Steps:
603
+ stp=Save_Checkpoint_Every
604
+
605
+
606
+ def dump_only_textenc(trnonltxt, MODELT_NAME, INSTANCE_DIR, OUTPUT_DIR, PT, Seed, precision, Training_Steps):
607
+ call('accelerate launch /notebooks/diffusers/examples/dreambooth/train_dreambooth_pps.py \
608
+ '+trnonltxt+' \
609
+ --train_text_encoder \
610
+ --image_captions_filename \
611
+ --dump_only_text_encoder \
612
+ --pretrained_model_name_or_path='+MODELT_NAME+' \
613
+ --instance_data_dir='+INSTANCE_DIR+' \
614
+ --output_dir='+OUTPUT_DIR+' \
615
+ --instance_prompt='+PT+' \
616
+ --seed='+str(Seed)+' \
617
+ --resolution=512 \
618
+ --mixed_precision='+str(precision)+' \
619
+ --train_batch_size=1 \
620
+ --gradient_accumulation_steps=1 --gradient_checkpointing \
621
+ --use_8bit_adam \
622
+ --learning_rate='+str(Text_Encoder_Learning_Rate)+' \
623
+ --lr_scheduler="polynomial" \
624
+ --lr_warmup_steps=0 \
625
+ --max_train_steps='+str(Training_Steps), shell=True)
626
+
627
+ def train_only_unet(stp, stpsv, SESSION_DIR, MODELT_NAME, INSTANCE_DIR, OUTPUT_DIR, Text_Encoder_Training_Steps, PT, Seed, Resolution, Style, extrnlcptn, precision, Training_Steps):
628
+ clear_output()
629
+ if resuming=="Yes":
630
+ print('Resuming Training...')
631
+ print('Training the UNet...')
632
+ call('accelerate launch /notebooks/diffusers/examples/dreambooth/train_dreambooth_pps.py \
633
+ '+Style+' \
634
+ '+extrnlcptn+' \
635
+ --stop_text_encoder_training='+str(Text_Encoder_Training_Steps)+' \
636
+ --image_captions_filename \
637
+ --train_only_unet \
638
+ --Session_dir='+SESSION_DIR+' \
639
+ --save_starting_step='+str(stpsv)+' \
640
+ --save_n_steps='+str(stp)+' \
641
+ --pretrained_model_name_or_path='+MODELT_NAME+' \
642
+ --instance_data_dir='+INSTANCE_DIR+' \
643
+ --output_dir='+OUTPUT_DIR+' \
644
+ --instance_prompt='+PT+' \
645
+ --seed='+str(Seed)+' \
646
+ --resolution='+str(Resolution)+' \
647
+ --mixed_precision='+str(precision)+' \
648
+ --train_batch_size=1 \
649
+ --gradient_accumulation_steps=1 '+GCUNET+' \
650
+ --use_8bit_adam \
651
+ --learning_rate='+str(UNet_Learning_Rate)+' \
652
+ --lr_scheduler="polynomial" \
653
+ --lr_warmup_steps=0 \
654
+ --max_train_steps='+str(Training_Steps), shell=True)
655
+
656
+ if Enable_text_encoder_training :
657
+ print('Training the text encoder...')
658
+ if os.path.exists(OUTPUT_DIR+'/'+'text_encoder_trained'):
659
+ call('rm -r '+OUTPUT_DIR+'/text_encoder_trained', shell=True)
660
+ dump_only_textenc(trnonltxt, MODELT_NAME, INSTANCE_DIR, OUTPUT_DIR, PT, Seed, precision, Training_Steps=stptxt)
661
+
662
+ if Enable_Text_Encoder_Concept_Training:
663
+ if os.path.exists(CONCEPT_DIR):
664
+ if os.listdir(CONCEPT_DIR)!=[]:
665
+ clear_output()
666
+ if resuming=="Yes":
667
+ print('Resuming Training...')
668
+ print('Training the text encoder on the concept...')
669
+ dump_only_textenc(trnonltxt, MODELT_NAME, CONCEPT_DIR, OUTPUT_DIR, PT, Seed, precision, Training_Steps=stptxtc)
670
+ else:
671
+ clear_output()
672
+ if resuming=="Yes":
673
+ print('Resuming Training...')
674
+ print('No concept images found, skipping concept training...')
675
+ Text_Encoder_Concept_Training_Steps=0
676
+ time.sleep(8)
677
+ else:
678
+ clear_output()
679
+ if resuming=="Yes":
680
+ print('Resuming Training...')
681
+ print('No concept images found, skipping concept training...')
682
+ Text_Encoder_Concept_Training_Steps=0
683
+ time.sleep(8)
684
+
685
+ if UNet_Training_Steps!=0:
686
+ train_only_unet(stp, stpsv, SESSION_DIR, MODELT_NAME, INSTANCE_DIR, OUTPUT_DIR, Text_Encoder_Training_Steps, PT, Seed, Resolution, Style, extrnlcptn, precision, Training_Steps=UNet_Training_Steps)
687
+
688
+ if UNet_Training_Steps==0 and Text_Encoder_Concept_Training_Steps==0 and External_Captions :
689
+ print('Nothing to do')
690
+ else:
691
+ if os.path.exists(OUTPUT_DIR+'/unet/diffusion_pytorch_model.bin'):
692
+
693
+ call('python /notebooks/diffusers/scripts/convertosdv2.py --fp16 '+OUTPUT_DIR+' '+SESSION_DIR+'/'+Session_Name+'.ckpt', shell=True)
694
+ clear_output()
695
+ if os.path.exists(SESSION_DIR+"/"+INSTANCE_NAME+'.ckpt'):
696
+ clear_output()
697
+ print("DONE, the CKPT model is in the session's folder")
698
+ else:
699
+ print("Something went wrong")
700
+
701
+ else:
702
+ print("Something went wrong")
703
+
704
+ return resume
705
+
706
+
707
+ def test(Custom_Path, Previous_Session_Name, Session_Name, User, Password, Use_localtunnel):
708
+
709
+
710
+ if Previous_Session_Name!="":
711
+ print("Loading a previous session model")
712
+ mdldir='/notebooks/Fast-Dreambooth/Sessions/'+Previous_Session_Name
713
+ path_to_trained_model=mdldir+"/"+Previous_Session_Name+'.ckpt'
714
+
715
+
716
+ while not os.path.exists(path_to_trained_model):
717
+ print("There is no trained model in the previous session")
718
+ time.sleep(5)
719
+
720
+ elif Custom_Path!="":
721
+ print("Loading model from a custom path")
722
+ path_to_trained_model=Custom_Path
723
+
724
+
725
+ while not os.path.exists(path_to_trained_model):
726
+ print("Wrong Path")
727
+ time.sleep(5)
728
+
729
+ else:
730
+ print("Loading the trained model")
731
+ mdldir='/notebooks/Fast-Dreambooth/Sessions/'+Session_Name
732
+ path_to_trained_model=mdldir+"/"+Session_Name+'.ckpt'
733
+
734
+
735
+ while not os.path.exists(path_to_trained_model):
736
+ print("There is no trained model in this session")
737
+ time.sleep(5)
738
+
739
+ auth=f"--gradio-auth {User}:{Password}"
740
+ if User =="" or Password=="":
741
+ auth=""
742
+
743
+ os.chdir('/notebooks')
744
+ if not os.path.exists('/notebooks/sd/stablediffusion'):
745
+ call('wget -q -O sd_rep.tar.zst https://huggingface.co/TheLastBen/dependencies/resolve/main/sd_rep.tar.zst', shell=True)
746
+ call('tar --zstd -xf sd_rep.tar.zst', shell=True)
747
+ call('rm sd_rep.tar.zst', shell=True)
748
+
749
+ os.chdir('/notebooks/sd')
750
+ if not os.path.exists('stable-diffusion-webui'):
751
+ call('git clone -q --depth 1 --branch master https://github.com/AUTOMATIC1111/stable-diffusion-webui', shell=True)
752
+
753
+ os.chdir('/notebooks/sd/stable-diffusion-webui/')
754
+ call('git reset --hard', shell=True, stdout=open('/dev/null', 'w'))
755
+ print('')
756
+ call('git pull', shell=True, stdout=open('/dev/null', 'w'))
757
+ os.chdir('/notebooks')
758
+ clear_output()
759
+
760
+ if not os.path.exists('/usr/lib/node_modules/localtunnel'):
761
+ call('npm install -g localtunnel --silent', shell=True, stdout=open('/dev/null', 'w'))
762
+
763
+ share=''
764
+ call('wget -q -O /usr/local/lib/python3.9/dist-packages/gradio/blocks.py https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/AUTOMATIC1111_files/blocks.py', shell=True)
765
+
766
+ if not Use_localtunnel:
767
+ share='--share'
768
+
769
+ else:
770
+ share=''
771
+ os.chdir('/notebooks')
772
+ call('nohup lt --port 7860 > srv.txt 2>&1 &', shell=True)
773
+ time.sleep(2)
774
+ call("grep -o 'https[^ ]*' /notebooks/srv.txt >srvr.txt", shell=True)
775
+ time.sleep(2)
776
+ srv= getoutput('cat /notebooks/srvr.txt')
777
+
778
+ for line in fileinput.input('/usr/local/lib/python3.9/dist-packages/gradio/blocks.py', inplace=True):
779
+ if line.strip().startswith('self.server_name ='):
780
+ line = f' self.server_name = "{srv[8:]}"\n'
781
+ if line.strip().startswith('self.server_port ='):
782
+ line = ' self.server_port = 443\n'
783
+ if line.strip().startswith('self.protocol = "https"'):
784
+ line = ' self.protocol = "https"\n'
785
+ if line.strip().startswith('if self.local_url.startswith("https") or self.is_colab'):
786
+ line = ''
787
+ if line.strip().startswith('else "http"'):
788
+ line = ''
789
+ sys.stdout.write(line)
790
+
791
+ call('rm /notebooks/srv.txt', shell=True)
792
+ call('rm /notebooks/srvr.txt', shell=True)
793
+
794
+
795
+
796
+ os.chdir('/notebooks/sd/stable-diffusion-webui/modules')
797
+ call('wget -q -O paths.py https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/AUTOMATIC1111_files/paths.py', shell=True)
798
+ call("sed -i 's@/content/gdrive/MyDrive/sd/stablediffusion@/notebooks/sd/stablediffusion@' /notebooks/sd/stable-diffusion-webui/modules/paths.py", shell=True)
799
+ os.chdir('/notebooks/sd/stable-diffusion-webui')
800
+ clear_output()
801
+
802
+ configf="--disable-console-progressbars --no-half-vae --disable-safe-unpickle --api --xformers --medvram --skip-version-check --ckpt "+path_to_trained_model+" "+auth+" "+share
803
+
804
+ return configf
805
+
806
+
807
+
808
+ def clean():
809
+
810
+ Sessions=os.listdir("/notebooks/Fast-Dreambooth/Sessions")
811
+
812
+ s = widgets.Select(
813
+ options=Sessions,
814
+ rows=5,
815
+ description='',
816
+ disabled=False
817
+ )
818
+
819
+ out=widgets.Output()
820
+
821
+ d = widgets.Button(
822
+ description='Remove',
823
+ disabled=False,
824
+ button_style='warning',
825
+ tooltip='Removet the selected session',
826
+ icon='warning'
827
+ )
828
+
829
+ def rem(d):
830
+ with out:
831
+ if s.value is not None:
832
+ clear_output()
833
+ print("THE SESSION "+s.value+" HAS BEEN REMOVED FROM THE STORAGE")
834
+ call('rm -r /notebooks/Fast-Dreambooth/Sessions/'+s.value, shell=True)
835
+ if os.path.exists('/notebooks/models/'+s.value):
836
+ call('rm -r /notebooks/models/'+s.value, shell=True)
837
+ s.options=os.listdir("/notebooks/Fast-Dreambooth/Sessions")
838
+
839
+
840
+ else:
841
+ d.close()
842
+ s.close()
843
+ clear_output()
844
+ print("NOTHING TO REMOVE")
845
+
846
+ d.on_click(rem)
847
+ if s.value is not None:
848
+ display(s,d,out)
849
+ else:
850
+ print("NOTHING TO REMOVE")
851
+
852
+
853
+
854
+ def hf(Name_of_your_concept, Save_concept_to, hf_token_write, INSTANCE_NAME, OUTPUT_DIR, Session_Name, MDLPTH):
855
+
856
+ from slugify import slugify
857
+ from huggingface_hub import HfApi, HfFolder, CommitOperationAdd
858
+ from huggingface_hub import create_repo
859
+ from IPython.display import display_markdown
860
+
861
+
862
+ if(Name_of_your_concept == ""):
863
+ Name_of_your_concept = Session_Name
864
+ Name_of_your_concept=Name_of_your_concept.replace(" ","-")
865
+
866
+
867
+
868
+ if hf_token_write =="":
869
+ print('Your Hugging Face write access token : ')
870
+ hf_token_write=input()
871
+
872
+ hf_token = hf_token_write
873
+
874
+ api = HfApi()
875
+ your_username = api.whoami(token=hf_token)["name"]
876
+
877
+ if(Save_concept_to == "Public_Library"):
878
+ repo_id = f"sd-dreambooth-library/{slugify(Name_of_your_concept)}"
879
+ #Join the Concepts Library organization if you aren't part of it already
880
+ call("curl -X POST -H 'Authorization: Bearer '"+hf_token+" -H 'Content-Type: application/json' https://huggingface.co/organizations/sd-dreambooth-library/share/SSeOwppVCscfTEzFGQaqpfcjukVeNrKNHX", shell=True)
881
+ else:
882
+ repo_id = f"{your_username}/{slugify(Name_of_your_concept)}"
883
+ output_dir = f'/notebooks/models/'+INSTANCE_NAME
884
+
885
+ def bar(prg):
886
+ br="Uploading to HuggingFace : " '|'+'█' * prg + ' ' * (25-prg)+'| ' +str(prg*4)+ "%"
887
+ return br
888
+
889
+ print("Loading...")
890
+
891
+
892
+ os.chdir(OUTPUT_DIR)
893
+ call('rm -r safety_checker feature_extractor .git', shell=True)
894
+ call('rm model_index.json', shell=True)
895
+ call('git init', shell=True)
896
+ call('git lfs install --system --skip-repo', shell=True)
897
+ call('git remote add -f origin "https://USER:'+hf_token+'@huggingface.co/runwayml/stable-diffusion-v1-5"', shell=True)
898
+ call('git config core.sparsecheckout true', shell=True)
899
+ call('echo -e "\nfeature_extractor\nsafety_checker\nmodel_index.json" > .git/info/sparse-checkout', shell=True)
900
+ call('git pull origin main', shell=True)
901
+ call('rm -r .git', shell=True)
902
+ os.chdir('/notebooks')
903
+
904
+
905
+ print(bar(1))
906
+
907
+ readme_text = f'''---
908
+ license: creativeml-openrail-m
909
+ tags:
910
+ - text-to-image
911
+ - stable-diffusion
912
+ ---
913
+ ### {Name_of_your_concept} Dreambooth model trained by {api.whoami(token=hf_token)["name"]} with [TheLastBen's fast-DreamBooth](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast-DreamBooth.ipynb) notebook
914
+
915
+ Test the concept via A1111 Colab [fast-Colab-A1111](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast_stable_diffusion_AUTOMATIC1111.ipynb)
916
+ Or you can run your new concept via `diffusers` [Colab Notebook for Inference](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_dreambooth_inference.ipynb)
917
+ '''
918
+ #Save the readme to a file
919
+ readme_file = open("README.md", "w")
920
+ readme_file.write(readme_text)
921
+ readme_file.close()
922
+
923
+ operations = [
924
+ CommitOperationAdd(path_in_repo="README.md", path_or_fileobj="README.md"),
925
+ CommitOperationAdd(path_in_repo=f"{Session_Name}.ckpt",path_or_fileobj=MDLPTH)
926
+
927
+ ]
928
+ create_repo(repo_id,private=True, token=hf_token)
929
+
930
+ api.create_commit(
931
+ repo_id=repo_id,
932
+ operations=operations,
933
+ commit_message=f"Upload the concept {Name_of_your_concept} embeds and token",
934
+ token=hf_token
935
+ )
936
+
937
+ api.upload_folder(
938
+ folder_path=OUTPUT_DIR+"/feature_extractor",
939
+ path_in_repo="feature_extractor",
940
+ repo_id=repo_id,
941
+ token=hf_token
942
+ )
943
+
944
+ clear_output()
945
+ print(bar(4))
946
+
947
+ api.upload_folder(
948
+ folder_path=OUTPUT_DIR+"/safety_checker",
949
+ path_in_repo="safety_checker",
950
+ repo_id=repo_id,
951
+ token=hf_token
952
+ )
953
+
954
+ clear_output()
955
+ print(bar(8))
956
+
957
+ api.upload_folder(
958
+ folder_path=OUTPUT_DIR+"/scheduler",
959
+ path_in_repo="scheduler",
960
+ repo_id=repo_id,
961
+ token=hf_token
962
+ )
963
+
964
+ clear_output()
965
+ print(bar(9))
966
+
967
+ api.upload_folder(
968
+ folder_path=OUTPUT_DIR+"/text_encoder",
969
+ path_in_repo="text_encoder",
970
+ repo_id=repo_id,
971
+ token=hf_token
972
+ )
973
+
974
+ clear_output()
975
+ print(bar(12))
976
+
977
+ api.upload_folder(
978
+ folder_path=OUTPUT_DIR+"/tokenizer",
979
+ path_in_repo="tokenizer",
980
+ repo_id=repo_id,
981
+ token=hf_token
982
+ )
983
+
984
+ clear_output()
985
+ print(bar(13))
986
+
987
+ api.upload_folder(
988
+ folder_path=OUTPUT_DIR+"/unet",
989
+ path_in_repo="unet",
990
+ repo_id=repo_id,
991
+ token=hf_token
992
+ )
993
+
994
+ clear_output()
995
+ print(bar(21))
996
+
997
+ api.upload_folder(
998
+ folder_path=OUTPUT_DIR+"/vae",
999
+ path_in_repo="vae",
1000
+ repo_id=repo_id,
1001
+ token=hf_token
1002
+ )
1003
+
1004
+ clear_output()
1005
+ print(bar(23))
1006
+
1007
+ api.upload_file(
1008
+ path_or_fileobj=OUTPUT_DIR+"/model_index.json",
1009
+ path_in_repo="model_index.json",
1010
+ repo_id=repo_id,
1011
+ token=hf_token
1012
+ )
1013
+
1014
+ clear_output()
1015
+ print(bar(25))
1016
+
1017
+ print("Your concept was saved successfully at https://huggingface.co/"+repo_id)
1018
+ done()
1019
+
1020
+
1021
+
1022
+ def crop_image(im, size):
1023
+
1024
+ GREEN = "#0F0"
1025
+ BLUE = "#00F"
1026
+ RED = "#F00"
1027
+
1028
+ def focal_point(im, settings):
1029
+ corner_points = image_corner_points(im, settings) if settings.corner_points_weight > 0 else []
1030
+ entropy_points = image_entropy_points(im, settings) if settings.entropy_points_weight > 0 else []
1031
+ face_points = image_face_points(im, settings) if settings.face_points_weight > 0 else []
1032
+
1033
+ pois = []
1034
+
1035
+ weight_pref_total = 0
1036
+ if len(corner_points) > 0:
1037
+ weight_pref_total += settings.corner_points_weight
1038
+ if len(entropy_points) > 0:
1039
+ weight_pref_total += settings.entropy_points_weight
1040
+ if len(face_points) > 0:
1041
+ weight_pref_total += settings.face_points_weight
1042
+
1043
+ corner_centroid = None
1044
+ if len(corner_points) > 0:
1045
+ corner_centroid = centroid(corner_points)
1046
+ corner_centroid.weight = settings.corner_points_weight / weight_pref_total
1047
+ pois.append(corner_centroid)
1048
+
1049
+ entropy_centroid = None
1050
+ if len(entropy_points) > 0:
1051
+ entropy_centroid = centroid(entropy_points)
1052
+ entropy_centroid.weight = settings.entropy_points_weight / weight_pref_total
1053
+ pois.append(entropy_centroid)
1054
+
1055
+ face_centroid = None
1056
+ if len(face_points) > 0:
1057
+ face_centroid = centroid(face_points)
1058
+ face_centroid.weight = settings.face_points_weight / weight_pref_total
1059
+ pois.append(face_centroid)
1060
+
1061
+ average_point = poi_average(pois, settings)
1062
+
1063
+ return average_point
1064
+
1065
+
1066
+ def image_face_points(im, settings):
1067
+
1068
+ np_im = np.array(im)
1069
+ gray = cv2.cvtColor(np_im, cv2.COLOR_BGR2GRAY)
1070
+
1071
+ tries = [
1072
+ [ f'{cv2.data.haarcascades}haarcascade_eye.xml', 0.01 ],
1073
+ [ f'{cv2.data.haarcascades}haarcascade_frontalface_default.xml', 0.05 ],
1074
+ [ f'{cv2.data.haarcascades}haarcascade_profileface.xml', 0.05 ],
1075
+ [ f'{cv2.data.haarcascades}haarcascade_frontalface_alt.xml', 0.05 ],
1076
+ [ f'{cv2.data.haarcascades}haarcascade_frontalface_alt2.xml', 0.05 ],
1077
+ [ f'{cv2.data.haarcascades}haarcascade_frontalface_alt_tree.xml', 0.05 ],
1078
+ [ f'{cv2.data.haarcascades}haarcascade_eye_tree_eyeglasses.xml', 0.05 ],
1079
+ [ f'{cv2.data.haarcascades}haarcascade_upperbody.xml', 0.05 ]
1080
+ ]
1081
+ for t in tries:
1082
+ classifier = cv2.CascadeClassifier(t[0])
1083
+ minsize = int(min(im.width, im.height) * t[1]) # at least N percent of the smallest side
1084
+ try:
1085
+ faces = classifier.detectMultiScale(gray, scaleFactor=1.1,
1086
+ minNeighbors=7, minSize=(minsize, minsize), flags=cv2.CASCADE_SCALE_IMAGE)
1087
+ except:
1088
+ continue
1089
+
1090
+ if len(faces) > 0:
1091
+ rects = [[f[0], f[1], f[0] + f[2], f[1] + f[3]] for f in faces]
1092
+ return [PointOfInterest((r[0] +r[2]) // 2, (r[1] + r[3]) // 2, size=abs(r[0]-r[2]), weight=1/len(rects)) for r in rects]
1093
+ return []
1094
+
1095
+
1096
+ def image_corner_points(im, settings):
1097
+ grayscale = im.convert("L")
1098
+
1099
+ # naive attempt at preventing focal points from collecting at watermarks near the bottom
1100
+ gd = ImageDraw.Draw(grayscale)
1101
+ gd.rectangle([0, im.height*.9, im.width, im.height], fill="#999")
1102
+
1103
+ np_im = np.array(grayscale)
1104
+
1105
+ points = cv2.goodFeaturesToTrack(
1106
+ np_im,
1107
+ maxCorners=100,
1108
+ qualityLevel=0.04,
1109
+ minDistance=min(grayscale.width, grayscale.height)*0.06,
1110
+ useHarrisDetector=False,
1111
+ )
1112
+
1113
+ if points is None:
1114
+ return []
1115
+
1116
+ focal_points = []
1117
+ for point in points:
1118
+ x, y = point.ravel()
1119
+ focal_points.append(PointOfInterest(x, y, size=4, weight=1/len(points)))
1120
+
1121
+ return focal_points
1122
+
1123
+
1124
+ def image_entropy_points(im, settings):
1125
+ landscape = im.height < im.width
1126
+ portrait = im.height > im.width
1127
+ if landscape:
1128
+ move_idx = [0, 2]
1129
+ move_max = im.size[0]
1130
+ elif portrait:
1131
+ move_idx = [1, 3]
1132
+ move_max = im.size[1]
1133
+ else:
1134
+ return []
1135
+
1136
+ e_max = 0
1137
+ crop_current = [0, 0, settings.crop_width, settings.crop_height]
1138
+ crop_best = crop_current
1139
+ while crop_current[move_idx[1]] < move_max:
1140
+ crop = im.crop(tuple(crop_current))
1141
+ e = image_entropy(crop)
1142
+
1143
+ if (e > e_max):
1144
+ e_max = e
1145
+ crop_best = list(crop_current)
1146
+
1147
+ crop_current[move_idx[0]] += 4
1148
+ crop_current[move_idx[1]] += 4
1149
+
1150
+ x_mid = int(crop_best[0] + settings.crop_width/2)
1151
+ y_mid = int(crop_best[1] + settings.crop_height/2)
1152
+
1153
+ return [PointOfInterest(x_mid, y_mid, size=25, weight=1.0)]
1154
+
1155
+
1156
+ def image_entropy(im):
1157
+ # greyscale image entropy
1158
+ # band = np.asarray(im.convert("L"))
1159
+ band = np.asarray(im.convert("1"), dtype=np.uint8)
1160
+ hist, _ = np.histogram(band, bins=range(0, 256))
1161
+ hist = hist[hist > 0]
1162
+ return -np.log2(hist / hist.sum()).sum()
1163
+
1164
+ def centroid(pois):
1165
+ x = [poi.x for poi in pois]
1166
+ y = [poi.y for poi in pois]
1167
+ return PointOfInterest(sum(x)/len(pois), sum(y)/len(pois))
1168
+
1169
+
1170
+ def poi_average(pois, settings):
1171
+ weight = 0.0
1172
+ x = 0.0
1173
+ y = 0.0
1174
+ for poi in pois:
1175
+ weight += poi.weight
1176
+ x += poi.x * poi.weight
1177
+ y += poi.y * poi.weight
1178
+ avg_x = round(weight and x / weight)
1179
+ avg_y = round(weight and y / weight)
1180
+
1181
+ return PointOfInterest(avg_x, avg_y)
1182
+
1183
+
1184
+ def is_landscape(w, h):
1185
+ return w > h
1186
+
1187
+
1188
+ def is_portrait(w, h):
1189
+ return h > w
1190
+
1191
+
1192
+ def is_square(w, h):
1193
+ return w == h
1194
+
1195
+
1196
+ class PointOfInterest:
1197
+ def __init__(self, x, y, weight=1.0, size=10):
1198
+ self.x = x
1199
+ self.y = y
1200
+ self.weight = weight
1201
+ self.size = size
1202
+
1203
+ def bounding(self, size):
1204
+ return [
1205
+ self.x - size//2,
1206
+ self.y - size//2,
1207
+ self.x + size//2,
1208
+ self.y + size//2
1209
+ ]
1210
+
1211
+ class Settings:
1212
+ def __init__(self, crop_width=512, crop_height=512, corner_points_weight=0.5, entropy_points_weight=0.5, face_points_weight=0.5):
1213
+ self.crop_width = crop_width
1214
+ self.crop_height = crop_height
1215
+ self.corner_points_weight = corner_points_weight
1216
+ self.entropy_points_weight = entropy_points_weight
1217
+ self.face_points_weight = face_points_weight
1218
+
1219
+ settings = Settings(
1220
+ crop_width = size,
1221
+ crop_height = size,
1222
+ face_points_weight = 0.9,
1223
+ entropy_points_weight = 0.15,
1224
+ corner_points_weight = 0.5,
1225
+ )
1226
+
1227
+ scale_by = 1
1228
+ if is_landscape(im.width, im.height):
1229
+ scale_by = settings.crop_height / im.height
1230
+ elif is_portrait(im.width, im.height):
1231
+ scale_by = settings.crop_width / im.width
1232
+ elif is_square(im.width, im.height):
1233
+ if is_square(settings.crop_width, settings.crop_height):
1234
+ scale_by = settings.crop_width / im.width
1235
+ elif is_landscape(settings.crop_width, settings.crop_height):
1236
+ scale_by = settings.crop_width / im.width
1237
+ elif is_portrait(settings.crop_width, settings.crop_height):
1238
+ scale_by = settings.crop_height / im.height
1239
+
1240
+ im = im.resize((int(im.width * scale_by), int(im.height * scale_by)))
1241
+ im_debug = im.copy()
1242
+
1243
+ focus = focal_point(im_debug, settings)
1244
+
1245
+ # take the focal point and turn it into crop coordinates that try to center over the focal
1246
+ # point but then get adjusted back into the frame
1247
+ y_half = int(settings.crop_height / 2)
1248
+ x_half = int(settings.crop_width / 2)
1249
+
1250
+ x1 = focus.x - x_half
1251
+ if x1 < 0:
1252
+ x1 = 0
1253
+ elif x1 + settings.crop_width > im.width:
1254
+ x1 = im.width - settings.crop_width
1255
+
1256
+ y1 = focus.y - y_half
1257
+ if y1 < 0:
1258
+ y1 = 0
1259
+ elif y1 + settings.crop_height > im.height:
1260
+ y1 = im.height - settings.crop_height
1261
+
1262
+ x2 = x1 + settings.crop_width
1263
+ y2 = y1 + settings.crop_height
1264
+
1265
+ crop = [x1, y1, x2, y2]
1266
+
1267
+ results = []
1268
+
1269
+ results.append(im.crop(tuple(crop)))
1270
+
1271
+ return results