TheLastBen commited on
Commit
9b9fb8f
1 Parent(s): 8b18963

Update Scripts/mainpaperspacev2_311.py

Browse files
Files changed (1) hide show
  1. Scripts/mainpaperspacev2_311.py +1346 -0
Scripts/mainpaperspacev2_311.py CHANGED
@@ -0,0 +1,1346 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from IPython.display import clear_output
2
+ from subprocess import call, getoutput
3
+ from IPython.display import display
4
+ import ipywidgets as widgets
5
+ import io
6
+ from PIL import Image, ImageDraw, ImageOps
7
+ import fileinput
8
+ import time
9
+ import os
10
+ from os import listdir
11
+ from os.path import isfile
12
+ from tqdm import tqdm
13
+ import gdown
14
+ import random
15
+ import sys
16
+ import cv2
17
+ from io import BytesIO
18
+ import requests
19
+ from collections import defaultdict
20
+ from math import log, sqrt
21
+ import numpy as np
22
+ from subprocess import check_output
23
+ import six
24
+ import re
25
+
26
+ from urllib.parse import urlparse, parse_qs, unquote
27
+ from urllib.request import urlopen, Request
28
+ import tempfile
29
+ from tqdm import tqdm
30
+
31
+
32
+
33
+
34
+ def Deps(force_reinstall):
35
+
36
+ if not force_reinstall and os.path.exists('/usr/local/lib/python3.11/dist-packages/gradio'):
37
+ ntbk()
38
+ os.environ['TORCH_HOME'] = '/notebooks/cache/torch'
39
+ os.environ['PYTHONWARNINGS'] = 'ignore'
40
+ os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
41
+ print('Modules and notebooks updated, dependencies already installed')
42
+
43
+ else:
44
+ call("pip install --root-user-action=ignore --no-deps -q accelerate==0.12.0", shell=True, stdout=open('/dev/null', 'w'))
45
+ ntbk()
46
+ if not os.path.exists('/models'):
47
+ call('mkdir /models', shell=True)
48
+ if not os.path.exists('/notebooks/models'):
49
+ call('ln -s /models /notebooks', shell=True)
50
+ if os.path.exists('/deps'):
51
+ call("rm -r /deps", shell=True)
52
+ call('mkdir /deps', shell=True)
53
+ if not os.path.exists('cache'):
54
+ call('mkdir cache', shell=True)
55
+ os.chdir('/deps')
56
+ call('wget -q -i https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dependencies/aptdeps.txt', shell=True)
57
+ call('dpkg -i *.deb', shell=True, stdout=open('/dev/null', 'w'))
58
+ depsinst("https://huggingface.co/TheLastBen/dependencies/resolve/main/ppsdeps_311.tar.zst", "/deps/ppsdeps_311.tar.zst")
59
+ call('tar -C / --zstd -xf ppsdeps_311.tar.zst', shell=True, stdout=open('/dev/null', 'w'))
60
+ os.chdir('/notebooks')
61
+ call("git clone --depth 1 -q --branch main https://github.com/TheLastBen/diffusers /diffusers", shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
62
+ os.environ['TORCH_HOME'] = '/notebooks/cache/torch'
63
+ os.environ['PYTHONWARNINGS'] = 'ignore'
64
+ os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
65
+ call("sed -i 's@text = _formatwarnmsg(msg)@text =\"\"@g' /usr/lib/python3.11/warnings.py", shell=True)
66
+ if not os.path.exists('/notebooks/diffusers'):
67
+ call('ln -s /diffusers /notebooks', shell=True)
68
+ call("rm -r /deps", shell=True)
69
+ os.chdir('/notebooks')
70
+ clear_output()
71
+
72
+ done()
73
+
74
+
75
+
76
+ def depsinst(url, dst):
77
+ file_size = None
78
+ req = Request(url, headers={"User-Agent": "torch.hub"})
79
+ u = urlopen(req)
80
+ meta = u.info()
81
+ if hasattr(meta, 'getheaders'):
82
+ content_length = meta.getheaders("Content-Length")
83
+ else:
84
+ content_length = meta.get_all("Content-Length")
85
+ if content_length is not None and len(content_length) > 0:
86
+ file_size = int(content_length[0])
87
+
88
+ with tqdm(total=file_size, disable=False, mininterval=0.5,
89
+ bar_format='Installing dependencies |{bar:20}| {percentage:3.0f}%') as pbar:
90
+ with open(dst, "wb") as f:
91
+ while True:
92
+ buffer = u.read(8192)
93
+ if len(buffer) == 0:
94
+ break
95
+ f.write(buffer)
96
+ pbar.update(len(buffer))
97
+ f.close()
98
+
99
+
100
+ def ntbk():
101
+
102
+ os.chdir('/notebooks')
103
+ if not os.path.exists('Latest_Notebooks'):
104
+ call('mkdir Latest_Notebooks', shell=True)
105
+ else:
106
+ call('rm -r Latest_Notebooks', shell=True)
107
+ call('mkdir Latest_Notebooks', shell=True)
108
+ os.chdir('/notebooks/Latest_Notebooks')
109
+ call('wget -q -i https://huggingface.co/datasets/TheLastBen/PPS/raw/main/Notebooks.txt', shell=True)
110
+ call('rm Notebooks.txt', shell=True)
111
+ os.chdir('/notebooks')
112
+
113
+
114
+
115
+ def downloadmodel_hfv2(Path_to_HuggingFace):
116
+ import wget
117
+
118
+ if os.path.exists('/models/stable-diffusion-custom'):
119
+ call("rm -r /models/stable-diffusion-custom", shell=True)
120
+ clear_output()
121
+
122
+ if os.path.exists('/notebooks/Fast-Dreambooth/token.txt'):
123
+ with open("/notebooks/Fast-Dreambooth/token.txt") as f:
124
+ token = f.read()
125
+ authe=f'https://USER:{token}@'
126
+ else:
127
+ authe="https://"
128
+
129
+ clear_output()
130
+ call("mkdir /models/stable-diffusion-custom", shell=True)
131
+ os.chdir("/models/stable-diffusion-custom")
132
+ call("git init", shell=True)
133
+ call("git lfs install --system --skip-repo", shell=True)
134
+ call('git remote add -f origin '+authe+'huggingface.co/'+Path_to_HuggingFace, shell=True)
135
+ call("git config core.sparsecheckout true", shell=True)
136
+ call('echo -e "\nscheduler\ntext_encoder\ntokenizer\nunet\nvae\nfeature_extractor\nmodel_index.json\n!*.safetensors" > .git/info/sparse-checkout', shell=True)
137
+ call("git pull origin main", shell=True)
138
+ if os.path.exists('unet/diffusion_pytorch_model.bin'):
139
+ call("rm -r .git", shell=True)
140
+ os.chdir('/notebooks')
141
+ clear_output()
142
+ done()
143
+ while not os.path.exists('/models/stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
144
+ print('Check the link you provided')
145
+ os.chdir('/notebooks')
146
+ time.sleep(5)
147
+
148
+
149
+
150
+
151
+
152
+ def downloadmodel_path_v2(MODEL_PATH):
153
+
154
+ modelname=os.path.basename(MODEL_PATH)
155
+ sftnsr=""
156
+ if modelname.split('.')[-1]=='safetensors':
157
+ sftnsr="--from_safetensors"
158
+
159
+ import wget
160
+ os.chdir('/models')
161
+ clear_output()
162
+ if os.path.exists(str(MODEL_PATH)):
163
+
164
+ wget.download('https://github.com/TheLastBen/fast-stable-diffusion/raw/main/Dreambooth/det.py')
165
+ print('Detecting model version...')
166
+ Custom_Model_Version=check_output('python det.py '+sftnsr+' --MODEL_PATH '+MODEL_PATH, shell=True).decode('utf-8').replace('\n', '')
167
+ clear_output()
168
+ print(''+Custom_Model_Version+' Detected')
169
+ call('rm det.py', shell=True)
170
+
171
+ if Custom_Model_Version=='V2.1-512px':
172
+ call('wget -q -O convertodiffv2.py https://github.com/TheLastBen/fast-stable-diffusion/raw/main/Dreambooth/convertodiffv2.py', shell=True)
173
+ call('python convertodiffv2.py '+MODEL_PATH+' stable-diffusion-custom --v2 --reference_model stabilityai/stable-diffusion-2-1-base '+sftnsr, shell=True)
174
+
175
+ elif Custom_Model_Version=='V2.1-768px':
176
+ call('wget -q -O convertodiffv2.py https://github.com/TheLastBen/fast-stable-diffusion/raw/main/Dreambooth/convertodiffv2-768.py', shell=True)
177
+ call('python convertodiffv2.py '+MODEL_PATH+' stable-diffusion-custom --v2 --reference_model stabilityai/stable-diffusion-2-1 '+sftnsr, shell=True)
178
+
179
+ call('rm convertodiffv2.py', shell=True)
180
+ if os.path.exists('/models/stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
181
+ clear_output()
182
+ done()
183
+ while not os.path.exists('/models/stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
184
+ print('Conversion error')
185
+ os.chdir('/workspace')
186
+ time.sleep(5)
187
+ else:
188
+ while not os.path.exists(str(MODEL_PATH)):
189
+ print('Wrong path, use the file explorer to copy the path')
190
+ os.chdir('/workspace')
191
+ time.sleep(5)
192
+
193
+
194
+
195
+
196
+ def downloadmodel_link_v2(MODEL_LINK):
197
+
198
+ import wget
199
+ import gdown
200
+ from gdown.download import get_url_from_gdrive_confirmation
201
+
202
+ def getsrc(url):
203
+ parsed_url = urlparse(url)
204
+ if parsed_url.netloc == 'civitai.com':
205
+ src='civitai'
206
+ elif parsed_url.netloc == 'drive.google.com':
207
+ src='gdrive'
208
+ elif parsed_url.netloc == 'huggingface.co':
209
+ src='huggingface'
210
+ else:
211
+ src='others'
212
+ return src
213
+
214
+ src=getsrc(MODEL_LINK)
215
+
216
+ def get_name(url, gdrive):
217
+ if not gdrive:
218
+ response = requests.get(url, allow_redirects=False)
219
+ if "Location" in response.headers:
220
+ redirected_url = response.headers["Location"]
221
+ quer = parse_qs(urlparse(redirected_url).query)
222
+ if "response-content-disposition" in quer:
223
+ disp_val = quer["response-content-disposition"][0].split(";")
224
+ for vals in disp_val:
225
+ if vals.strip().startswith("filename="):
226
+ filenm=unquote(vals.split("=", 1)[1].strip())
227
+ return filenm.replace("\"","")
228
+ else:
229
+ headers = {"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36"}
230
+ lnk="https://drive.google.com/uc?id={id}&export=download".format(id=url[url.find("/d/")+3:url.find("/view")])
231
+ res = requests.session().get(lnk, headers=headers, stream=True, verify=True)
232
+ res = requests.session().get(get_url_from_gdrive_confirmation(res.text), headers=headers, stream=True, verify=True)
233
+ content_disposition = six.moves.urllib_parse.unquote(res.headers["Content-Disposition"])
234
+ filenm = re.search(r"filename\*=UTF-8''(.*)", content_disposition).groups()[0].replace(os.path.sep, "_")
235
+ return filenm
236
+
237
+ if src=='civitai':
238
+ modelname=get_name(MODEL_LINK, False)
239
+ elif src=='gdrive':
240
+ modelname=get_name(MODEL_LINK, True)
241
+ else:
242
+ modelname=os.path.basename(MODEL_LINK)
243
+
244
+ sftnsr=""
245
+ if modelname.split('.')[-1]!='safetensors':
246
+ modelnm="model.ckpt"
247
+ else:
248
+ modelnm="model.safetensors"
249
+ sftnsr="--from_safetensors"
250
+
251
+ os.chdir('/models')
252
+ call("gdown --fuzzy " +MODEL_LINK+ " -O "+modelnm, shell=True)
253
+
254
+ if os.path.exists(modelnm):
255
+ if os.path.getsize(modelnm) > 1810671599:
256
+
257
+ wget.download('https://github.com/TheLastBen/fast-stable-diffusion/raw/main/Dreambooth/det.py')
258
+ print('Detecting model version...')
259
+ Custom_Model_Version=check_output('python det.py '+sftnsr+' --MODEL_PATH '+modelnm, shell=True).decode('utf-8').replace('\n', '')
260
+ clear_output()
261
+ print(''+Custom_Model_Version+' Detected')
262
+ call('rm det.py', shell=True)
263
+
264
+ if Custom_Model_Version=='V2.1-512px':
265
+ call('wget -q -O convertodiffv2.py https://github.com/TheLastBen/fast-stable-diffusion/raw/main/Dreambooth/convertodiffv2.py', shell=True)
266
+ call('python convertodiffv2.py '+modelnm+' stable-diffusion-custom --v2 --reference_model stabilityai/stable-diffusion-2-1-base '+sftnsr, shell=True)
267
+
268
+ elif Custom_Model_Version=='V2.1-768px':
269
+ call('wget -q -O convertodiffv2.py https://github.com/TheLastBen/fast-stable-diffusion/raw/main/Dreambooth/convertodiffv2-768.py', shell=True)
270
+ call('python convertodiffv2.py '+modelnm+' stable-diffusion-custom --v2 --reference_model stabilityai/stable-diffusion-2-1 '+sftnsr, shell=True)
271
+ call('rm convertodiffv2.py', shell=True)
272
+
273
+ if os.path.exists('/models/stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
274
+ call('rm '+modelnm, shell=True)
275
+ os.chdir('/workspace')
276
+ clear_output()
277
+ done()
278
+ else:
279
+ while not os.path.exists('/models/stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
280
+ print('Conversion error')
281
+ os.chdir('/workspace')
282
+ time.sleep(5)
283
+ else:
284
+ while os.path.getsize(modelnm) < 1810671599:
285
+ print('Wrong link, check that the link is valid')
286
+ os.chdir('/workspace')
287
+ time.sleep(5)
288
+
289
+
290
+
291
+
292
+ def dlsv2(Path_to_HuggingFace, Model_Path, Model_Link, Model_Version):
293
+
294
+ if Path_to_HuggingFace != "":
295
+ downloadmodel_hfv2(Path_to_HuggingFace)
296
+ MODEL_NAMEv2="/models/stable-diffusion-custom"
297
+ elif Model_Path !="":
298
+ downloadmodel_path_v2(Model_Path)
299
+ MODEL_NAMEv2="/models/stable-diffusion-custom"
300
+ elif Model_Link !="":
301
+ downloadmodel_link_v2(Model_Link)
302
+ MODEL_NAMEv2="/models/stable-diffusion-custom"
303
+ else:
304
+ if Model_Version=="512":
305
+ MODEL_NAMEv2="/datasets/stable-diffusion-v2-1-base-diffusers/stable-diffusion-2-1-base"
306
+ print('Using the original V2-512 model')
307
+ elif Model_Version=="768":
308
+ MODEL_NAMEv2="/datasets/stable-diffusion-v2-1/stable-diffusion-2-1"
309
+ print('Using the original V2-768 model')
310
+ else:
311
+ MODEL_NAMEv2=""
312
+ print('Wrong model version')
313
+
314
+ return MODEL_NAMEv2
315
+
316
+
317
+
318
+
319
+ def sessv2(Session_Name, Session_Link_optional, MODEL_NAMEv2):
320
+ import gdown
321
+ import wget
322
+ os.chdir('/notebooks')
323
+ PT=""
324
+
325
+ while Session_Name=="":
326
+ print('Input the Session Name:')
327
+ Session_Name=input("")
328
+ Session_Name=Session_Name.replace(" ","_")
329
+
330
+ WORKSPACE='/notebooks/Fast-Dreambooth'
331
+
332
+ if Session_Link_optional !="":
333
+ print('Downloading session...')
334
+
335
+ if Session_Link_optional != "":
336
+ if not os.path.exists(str(WORKSPACE+'/Sessions')):
337
+ call("mkdir -p " +WORKSPACE+ "/Sessions", shell=True)
338
+ time.sleep(1)
339
+ os.chdir(WORKSPACE+'/Sessions')
340
+ gdown.download_folder(url=Session_Link_optional, output=Session_Name, quiet=True, remaining_ok=True, use_cookies=False)
341
+ os.chdir(Session_Name)
342
+ call("rm -r " +instance_images, shell=True)
343
+ call("unzip " +instance_images.zip, shell=True, stdout=open('/dev/null', 'w'))
344
+ call("rm -r " +concept_images, shell=True)
345
+ call("unzip " +concept_images.zip, shell=True, stdout=open('/dev/null', 'w'))
346
+ call("rm -r " +captions, shell=True)
347
+ call("unzip " +captions.zip, shell=True, stdout=open('/dev/null', 'w'))
348
+ os.chdir('/notebooks')
349
+ clear_output()
350
+
351
+ INSTANCE_NAME=Session_Name
352
+ OUTPUT_DIR="/models/"+Session_Name
353
+ SESSION_DIR=WORKSPACE+"/Sessions/"+Session_Name
354
+ CONCEPT_DIR=SESSION_DIR+"/concept_images"
355
+ INSTANCE_DIR=SESSION_DIR+"/instance_images"
356
+ CAPTIONS_DIR=SESSION_DIR+'/captions'
357
+ MDLPTH=str(SESSION_DIR+"/"+Session_Name+'.ckpt')
358
+ resumev2=False
359
+
360
+ if os.path.exists(str(SESSION_DIR)):
361
+ mdls=[ckpt for ckpt in listdir(SESSION_DIR) if ckpt.split(".")[-1]=="ckpt"]
362
+ if not os.path.exists(MDLPTH) and '.ckpt' in str(mdls):
363
+
364
+ def f(n):
365
+ k=0
366
+ for i in mdls:
367
+ if k==n:
368
+ call('mv '+SESSION_DIR+'/'+i+' '+MDLPTH, shell=True)
369
+ k=k+1
370
+
371
+ k=0
372
+ print('No final checkpoint model found, select which intermediary checkpoint to use, enter only the number, (000 to skip):\n')
373
+
374
+ for i in mdls:
375
+ print(str(k)+'- '+i)
376
+ k=k+1
377
+ n=input()
378
+ while int(n)>k-1:
379
+ n=input()
380
+ if n!="000":
381
+ f(int(n))
382
+ print('Using the model '+ mdls[int(n)]+" ...")
383
+ time.sleep(4)
384
+ else:
385
+ print('Skipping the intermediary checkpoints.')
386
+
387
+
388
+ if os.path.exists(str(SESSION_DIR)) and not os.path.exists(MDLPTH):
389
+ print('Loading session with no previous model, using the original model or the custom downloaded model')
390
+ if MODEL_NAMEv2=="":
391
+ print('No model found, use the "Model Download" cell to download a model.')
392
+ else:
393
+ print('Session Loaded, proceed to uploading instance images')
394
+
395
+ elif os.path.exists(MDLPTH):
396
+ print('Session found, loading the trained model ...')
397
+
398
+ wget.download('https://github.com/TheLastBen/fast-stable-diffusion/raw/main/Dreambooth/det.py')
399
+ print('Detecting model version...')
400
+ Model_Version=check_output('python det.py --MODEL_PATH '+MDLPTH, shell=True).decode('utf-8').replace('\n', '')
401
+ clear_output()
402
+ print(''+Model_Version+' Detected')
403
+ call('rm det.py', shell=True)
404
+
405
+ if Model_Version=='V2.1-512px':
406
+ call('wget -q -O convertodiff.py https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dreambooth/convertodiffv2.py', shell=True)
407
+ call('python convertodiff.py '+MDLPTH+' '+OUTPUT_DIR+' --v2 --reference_model stabilityai/stable-diffusion-2-1-base', shell=True)
408
+ elif Model_Version=='V2.1-768px':
409
+ call('wget -q -O convertodiff.py https://github.com/TheLastBen/fast-stable-diffusion/raw/main/Dreambooth/convertodiffv2-768.py', shell=True)
410
+ call('python convertodiff.py '+MDLPTH+' '+OUTPUT_DIR+' --v2 --reference_model stabilityai/stable-diffusion-2-1', shell=True)
411
+ clear_output()
412
+ call('rm convertodiff.py', shell=True)
413
+ if os.path.exists(OUTPUT_DIR+'/unet/diffusion_pytorch_model.bin'):
414
+ resumev2=True
415
+ clear_output()
416
+ print('Session loaded.')
417
+ else:
418
+ print('Conversion error, if the error persists, remove the CKPT file from the current session folder')
419
+
420
+ elif not os.path.exists(str(SESSION_DIR)):
421
+ call('mkdir -p '+INSTANCE_DIR, shell=True)
422
+ print('Creating session...')
423
+ if MODEL_NAMEv2=="":
424
+ print('No model found, use the "Model Download" cell to download a model.')
425
+ else:
426
+ print('Session created, proceed to uploading instance images')
427
+
428
+ return PT, WORKSPACE, Session_Name, INSTANCE_NAME, OUTPUT_DIR, SESSION_DIR, CONCEPT_DIR, INSTANCE_DIR, CAPTIONS_DIR, MDLPTH, MODEL_NAMEv2, resumev2
429
+
430
+
431
+
432
+ def done():
433
+ done = widgets.Button(
434
+ description='Done!',
435
+ disabled=True,
436
+ button_style='success',
437
+ tooltip='',
438
+ icon='check'
439
+ )
440
+ display(done)
441
+
442
+
443
+
444
+ def uplder(Remove_existing_instance_images, Crop_images, Crop_size, IMAGES_FOLDER_OPTIONAL, INSTANCE_DIR, CAPTIONS_DIR, ren):
445
+
446
+ if os.path.exists(INSTANCE_DIR+"/.ipynb_checkpoints"):
447
+ call('rm -r '+INSTANCE_DIR+'/.ipynb_checkpoints', shell=True)
448
+
449
+ uploader = widgets.FileUpload(description="Choose images",accept='image/*, .txt', multiple=True)
450
+ Upload = widgets.Button(
451
+ description='Upload',
452
+ disabled=False,
453
+ button_style='info',
454
+ tooltip='Click to upload the chosen instance images',
455
+ icon=''
456
+ )
457
+
458
+
459
+ def up(Upload):
460
+ with out:
461
+ uploader.close()
462
+ Upload.close()
463
+ upld(Remove_existing_instance_images, Crop_images, Crop_size, IMAGES_FOLDER_OPTIONAL, INSTANCE_DIR, CAPTIONS_DIR, uploader, ren)
464
+ done()
465
+ out=widgets.Output()
466
+
467
+ if IMAGES_FOLDER_OPTIONAL=="":
468
+ Upload.on_click(up)
469
+ display(uploader, Upload, out)
470
+ else:
471
+ upld(Remove_existing_instance_images, Crop_images, Crop_size, IMAGES_FOLDER_OPTIONAL, INSTANCE_DIR, CAPTIONS_DIR, uploader, ren)
472
+ done()
473
+
474
+
475
+ def upld(Remove_existing_instance_images, Crop_images, Crop_size, IMAGES_FOLDER_OPTIONAL, INSTANCE_DIR, CAPTIONS_DIR, uploader, ren):
476
+
477
+
478
+ if Remove_existing_instance_images:
479
+ if os.path.exists(str(INSTANCE_DIR)):
480
+ call("rm -r " +INSTANCE_DIR, shell=True)
481
+ if os.path.exists(str(CAPTIONS_DIR)):
482
+ call("rm -r " +CAPTIONS_DIR, shell=True)
483
+
484
+
485
+ if not os.path.exists(str(INSTANCE_DIR)):
486
+ call("mkdir -p " +INSTANCE_DIR, shell=True)
487
+ if not os.path.exists(str(CAPTIONS_DIR)):
488
+ call("mkdir -p " +CAPTIONS_DIR, shell=True)
489
+
490
+
491
+ if IMAGES_FOLDER_OPTIONAL !="":
492
+
493
+ if os.path.exists(IMAGES_FOLDER_OPTIONAL+"/.ipynb_checkpoints"):
494
+ call('rm -r '+IMAGES_FOLDER_OPTIONAL+'/.ipynb_checkpoints', shell=True)
495
+
496
+ if any(file.endswith('.{}'.format('txt')) for file in os.listdir(IMAGES_FOLDER_OPTIONAL)):
497
+ call('mv '+IMAGES_FOLDER_OPTIONAL+'/*.txt '+CAPTIONS_DIR, shell=True)
498
+ if Crop_images:
499
+ os.chdir(str(IMAGES_FOLDER_OPTIONAL))
500
+ call('find . -name "* *" -type f | rename ' "'s/ /-/g'", shell=True)
501
+ os.chdir('/notebooks')
502
+ for filename in tqdm(os.listdir(IMAGES_FOLDER_OPTIONAL), bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):
503
+ extension = filename.split(".")[-1]
504
+ identifier=filename.split(".")[0]
505
+ new_path_with_file = os.path.join(INSTANCE_DIR, filename)
506
+ file = Image.open(IMAGES_FOLDER_OPTIONAL+"/"+filename)
507
+ file=file.convert("RGB")
508
+ file=ImageOps.exif_transpose(file)
509
+ width, height = file.size
510
+ if file.size !=(Crop_size, Crop_size):
511
+ image=crop_image(file, Crop_size)
512
+ if extension.upper()=="JPG" or extension.upper()=="jpg":
513
+ image[0].save(new_path_with_file, format="JPEG", quality = 100)
514
+ else:
515
+ image[0].save(new_path_with_file, format=extension.upper())
516
+
517
+ else:
518
+ call("cp \'"+IMAGES_FOLDER_OPTIONAL+"/"+filename+"\' "+INSTANCE_DIR, shell=True)
519
+
520
+ else:
521
+ for filename in tqdm(os.listdir(IMAGES_FOLDER_OPTIONAL), bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):
522
+ call("cp -r " +IMAGES_FOLDER_OPTIONAL+"/. " +INSTANCE_DIR, shell=True)
523
+
524
+ elif IMAGES_FOLDER_OPTIONAL =="":
525
+ up=""
526
+ for file in uploader.value:
527
+ filename = file['name']
528
+ if filename.split(".")[-1]=="txt":
529
+ with open(CAPTIONS_DIR+'/'+filename, 'w') as f:
530
+ f.write(bytes(file['content']).decode())
531
+ up=[file for file in uploader.value if not file['name'].endswith('.txt')]
532
+ if Crop_images:
533
+ for file in tqdm(up, bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):
534
+ filename = file['name']
535
+ img = Image.open(io.BytesIO(file['content']))
536
+ extension = filename.split(".")[-1]
537
+ identifier=filename.split(".")[0]
538
+ img=img.convert("RGB")
539
+ img=ImageOps.exif_transpose(img)
540
+
541
+ if extension.upper()=="JPG" or extension.upper()=="jpg":
542
+ img.save(INSTANCE_DIR+"/"+filename, format="JPEG", quality = 100)
543
+ else:
544
+ img.save(INSTANCE_DIR+"/"+filename, format=extension.upper())
545
+
546
+ new_path_with_file = os.path.join(INSTANCE_DIR, filename)
547
+ file = Image.open(new_path_with_file)
548
+ width, height = file.size
549
+ if file.size !=(Crop_size, Crop_size):
550
+ image=crop_image(file, Crop_size)
551
+ if extension.upper()=="JPG" or extension.upper()=="jpg":
552
+ image[0].save(new_path_with_file, format="JPEG", quality = 100)
553
+ else:
554
+ image[0].save(new_path_with_file, format=extension.upper())
555
+
556
+ else:
557
+ for file in tqdm(uploader.value, bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):
558
+ filename = file['name']
559
+ img = Image.open(io.BytesIO(file['content']))
560
+ img=img.convert("RGB")
561
+ extension = filename.split(".")[-1]
562
+ identifier=filename.split(".")[0]
563
+
564
+ if extension.upper()=="JPG" or extension.upper()=="jpg":
565
+ img.save(INSTANCE_DIR+"/"+filename, format="JPEG", quality = 100)
566
+ else:
567
+ img.save(INSTANCE_DIR+"/"+filename, format=extension.upper())
568
+
569
+ if ren:
570
+ i=0
571
+ for filename in tqdm(os.listdir(INSTANCE_DIR), bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Renamed'):
572
+ extension = filename.split(".")[-1]
573
+ identifier=filename.split(".")[0]
574
+ new_path_with_file = os.path.join(INSTANCE_DIR, "conceptimagedb"+str(i)+"."+extension)
575
+ call('mv "'+os.path.join(INSTANCE_DIR,filename)+'" "'+new_path_with_file+'"', shell=True)
576
+ i=i+1
577
+
578
+ os.chdir(INSTANCE_DIR)
579
+ call('find . -name "* *" -type f | rename ' "'s/ /-/g'", shell=True)
580
+ os.chdir(CAPTIONS_DIR)
581
+ call('find . -name "* *" -type f | rename ' "'s/ /-/g'", shell=True)
582
+ os.chdir('/notebooks')
583
+
584
+
585
+
586
+ def caption(CAPTIONS_DIR, INSTANCE_DIR):
587
+
588
+ paths=""
589
+ out=""
590
+ widgets_l=""
591
+ clear_output()
592
+ def Caption(path):
593
+ if path!="Select an instance image to caption":
594
+
595
+ name = os.path.splitext(os.path.basename(path))[0]
596
+ ext=os.path.splitext(os.path.basename(path))[-1][1:]
597
+ if ext=="jpg" or "JPG":
598
+ ext="JPEG"
599
+
600
+ if os.path.exists(CAPTIONS_DIR+"/"+name + '.txt'):
601
+ with open(CAPTIONS_DIR+"/"+name + '.txt', 'r') as f:
602
+ text = f.read()
603
+ else:
604
+ with open(CAPTIONS_DIR+"/"+name + '.txt', 'w') as f:
605
+ f.write("")
606
+ with open(CAPTIONS_DIR+"/"+name + '.txt', 'r') as f:
607
+ text = f.read()
608
+
609
+ img=Image.open(os.path.join(INSTANCE_DIR,path))
610
+ img=img.convert("RGB")
611
+ img=img.resize((420, 420))
612
+ image_bytes = BytesIO()
613
+ img.save(image_bytes, format=ext, qualiy=10)
614
+ image_bytes.seek(0)
615
+ image_data = image_bytes.read()
616
+ img= image_data
617
+ image = widgets.Image(
618
+ value=img,
619
+ width=420,
620
+ height=420
621
+ )
622
+ text_area = widgets.Textarea(value=text, description='', disabled=False, layout={'width': '300px', 'height': '120px'})
623
+
624
+
625
+ def update_text(text):
626
+ with open(CAPTIONS_DIR+"/"+name + '.txt', 'w') as f:
627
+ f.write(text)
628
+
629
+ button = widgets.Button(description='Save', button_style='success')
630
+ button.on_click(lambda b: update_text(text_area.value))
631
+
632
+ return widgets.VBox([widgets.HBox([image, text_area, button])])
633
+
634
+
635
+ paths = os.listdir(INSTANCE_DIR)
636
+ widgets_l = widgets.Select(options=["Select an instance image to caption"]+paths, rows=25)
637
+
638
+
639
+ out = widgets.Output()
640
+
641
+ def click(change):
642
+ with out:
643
+ out.clear_output()
644
+ display(Caption(change.new))
645
+
646
+ widgets_l.observe(click, names='value')
647
+ display(widgets.HBox([widgets_l, out]))
648
+
649
+
650
+
651
+ def dbtrainv2(Resume_Training, UNet_Training_Steps, UNet_Learning_Rate, Text_Encoder_Training_Steps, Text_Encoder_Concept_Training_Steps, Text_Encoder_Learning_Rate, Offset_Noise, Resolution, MODEL_NAMEv2, SESSION_DIR, INSTANCE_DIR, CONCEPT_DIR, CAPTIONS_DIR, External_Captions, INSTANCE_NAME, Session_Name, OUTPUT_DIR, PT, resumev2, Save_Checkpoint_Every_n_Steps, Start_saving_from_the_step, Save_Checkpoint_Every):
652
+
653
+ if os.path.exists(INSTANCE_DIR+"/.ipynb_checkpoints"):
654
+ call('rm -r '+INSTANCE_DIR+'/.ipynb_checkpoints', shell=True)
655
+ if os.path.exists(CONCEPT_DIR+"/.ipynb_checkpoints"):
656
+ call('rm -r '+CONCEPT_DIR+'/.ipynb_checkpoints', shell=True)
657
+ if os.path.exists(CAPTIONS_DIR+"/.ipynb_checkpoints"):
658
+ call('rm -r '+CAPTIONS_DIR+'/.ipynb_checkpoints', shell=True)
659
+
660
+ if resumev2 and not Resume_Training:
661
+ print('Overwrite your previously trained model ?, answering "yes" will train a new model, answering "no" will resumev2 the training of the previous model? yes or no ?')
662
+ while True:
663
+ ansres=input('')
664
+ if ansres=='no':
665
+ Resume_Training = True
666
+ resumev2= False
667
+ break
668
+ elif ansres=='yes':
669
+ Resume_Training = False
670
+ resumev2= False
671
+ break
672
+
673
+ while not Resume_Training and not os.path.exists(MODEL_NAMEv2+'/unet/diffusion_pytorch_model.bin'):
674
+ print('No model found, use the "Model Download" cell to download a model.')
675
+ time.sleep(5)
676
+
677
+ MODELT_NAME=MODEL_NAMEv2
678
+
679
+ Seed=random.randint(1, 999999)
680
+
681
+ ofstnse=""
682
+ if Offset_Noise:
683
+ ofstnse="--offset_noise"
684
+
685
+ extrnlcptn=""
686
+ if External_Captions:
687
+ extrnlcptn="--external_captions"
688
+
689
+ precision="fp16"
690
+
691
+
692
+ resuming=""
693
+ if Resume_Training and os.path.exists(OUTPUT_DIR+'/unet/diffusion_pytorch_model.bin'):
694
+ MODELT_NAME=OUTPUT_DIR
695
+ print('Resuming Training...')
696
+ resuming="Yes"
697
+ elif Resume_Training and not os.path.exists(OUTPUT_DIR+'/unet/diffusion_pytorch_model.bin'):
698
+ print('Previous model not found, training a new model...')
699
+ MODELT_NAME=MODEL_NAMEv2
700
+ while MODEL_NAMEv2=="":
701
+ print('No model found, use the "Model Download" cell to download a model.')
702
+ time.sleep(5)
703
+
704
+
705
+ trnonltxt=""
706
+ if UNet_Training_Steps==0:
707
+ trnonltxt="--train_only_text_encoder"
708
+
709
+ Enable_text_encoder_training= True
710
+ Enable_Text_Encoder_Concept_Training= True
711
+
712
+
713
+ if Text_Encoder_Training_Steps==0:
714
+ Enable_text_encoder_training= False
715
+ else:
716
+ stptxt=Text_Encoder_Training_Steps
717
+
718
+ if Text_Encoder_Concept_Training_Steps==0:
719
+ Enable_Text_Encoder_Concept_Training= False
720
+ else:
721
+ stptxtc=Text_Encoder_Concept_Training_Steps
722
+
723
+
724
+ if Save_Checkpoint_Every==None:
725
+ Save_Checkpoint_Every=1
726
+ stp=0
727
+ if Start_saving_from_the_step==None:
728
+ Start_saving_from_the_step=0
729
+ if (Start_saving_from_the_step < 200):
730
+ Start_saving_from_the_step=Save_Checkpoint_Every
731
+ stpsv=Start_saving_from_the_step
732
+ if Save_Checkpoint_Every_n_Steps:
733
+ stp=Save_Checkpoint_Every
734
+
735
+
736
+ def dump_only_textenc(trnonltxt, MODELT_NAME, INSTANCE_DIR, OUTPUT_DIR, PT, Seed, precision, Training_Steps):
737
+ call('accelerate launch /notebooks/diffusers/examples/dreambooth/train_dreambooth_pps.py \
738
+ '+trnonltxt+' \
739
+ '+extrnlcptn+' \
740
+ '+ofstnse+' \
741
+ --train_text_encoder \
742
+ --image_captions_filename \
743
+ --dump_only_text_encoder \
744
+ --pretrained_model_name_or_path='+MODELT_NAME+' \
745
+ --instance_data_dir='+INSTANCE_DIR+' \
746
+ --output_dir='+OUTPUT_DIR+' \
747
+ --captions_dir='+CAPTIONS_DIR+' \
748
+ --instance_prompt='+PT+' \
749
+ --seed='+str(Seed)+' \
750
+ --resolution='+str(Resolution)+' \
751
+ --mixed_precision='+str(precision)+' \
752
+ --train_batch_size=1 \
753
+ --gradient_accumulation_steps=1 --gradient_checkpointing \
754
+ --use_8bit_adam \
755
+ --learning_rate='+str(Text_Encoder_Learning_Rate)+' \
756
+ --lr_scheduler="linear" \
757
+ --lr_warmup_steps=0 \
758
+ --max_train_steps='+str(Training_Steps), shell=True)
759
+
760
+ def train_only_unet(stp, stpsv, SESSION_DIR, MODELT_NAME, INSTANCE_DIR, OUTPUT_DIR, Text_Encoder_Training_Steps, PT, Seed, Resolution, extrnlcptn, precision, Training_Steps):
761
+ clear_output()
762
+ if resuming=="Yes":
763
+ print('Resuming Training...')
764
+ print('Training the UNet...')
765
+ call('accelerate launch /notebooks/diffusers/examples/dreambooth/train_dreambooth_pps.py \
766
+ '+extrnlcptn+' \
767
+ '+ofstnse+' \
768
+ --image_captions_filename \
769
+ --train_only_unet \
770
+ --Session_dir='+SESSION_DIR+' \
771
+ --save_starting_step='+str(stpsv)+' \
772
+ --save_n_steps='+str(stp)+' \
773
+ --pretrained_model_name_or_path='+MODELT_NAME+' \
774
+ --instance_data_dir='+INSTANCE_DIR+' \
775
+ --output_dir='+OUTPUT_DIR+' \
776
+ --captions_dir='+CAPTIONS_DIR+' \
777
+ --instance_prompt='+PT+' \
778
+ --seed='+str(Seed)+' \
779
+ --resolution='+str(Resolution)+' \
780
+ --mixed_precision='+str(precision)+' \
781
+ --train_batch_size=1 \
782
+ --gradient_accumulation_steps=1 --gradient_checkpointing \
783
+ --use_8bit_adam \
784
+ --learning_rate='+str(UNet_Learning_Rate)+' \
785
+ --lr_scheduler="linear" \
786
+ --lr_warmup_steps=0 \
787
+ --max_train_steps='+str(Training_Steps), shell=True)
788
+
789
+ if Enable_text_encoder_training :
790
+ print('Training the text encoder...')
791
+ if os.path.exists(OUTPUT_DIR+'/'+'text_encoder_trained'):
792
+ call('rm -r '+OUTPUT_DIR+'/text_encoder_trained', shell=True)
793
+ dump_only_textenc(trnonltxt, MODELT_NAME, INSTANCE_DIR, OUTPUT_DIR, PT, Seed, precision, Training_Steps=stptxt)
794
+
795
+ if Enable_Text_Encoder_Concept_Training:
796
+ if os.path.exists(CONCEPT_DIR):
797
+ if os.listdir(CONCEPT_DIR)!=[]:
798
+ clear_output()
799
+ if resuming=="Yes":
800
+ print('Resuming Training...')
801
+ print('Training the text encoder on the concept...')
802
+ dump_only_textenc(trnonltxt, MODELT_NAME, CONCEPT_DIR, OUTPUT_DIR, PT, Seed, precision, Training_Steps=stptxtc)
803
+ else:
804
+ clear_output()
805
+ if resuming=="Yes":
806
+ print('Resuming Training...')
807
+ print('No concept images found, skipping concept training...')
808
+ Text_Encoder_Concept_Training_Steps=0
809
+ time.sleep(8)
810
+ else:
811
+ clear_output()
812
+ if resuming=="Yes":
813
+ print('Resuming Training...')
814
+ print('No concept images found, skipping concept training...')
815
+ Text_Encoder_Concept_Training_Steps=0
816
+ time.sleep(8)
817
+
818
+ if UNet_Training_Steps!=0:
819
+ train_only_unet(stp, stpsv, SESSION_DIR, MODELT_NAME, INSTANCE_DIR, OUTPUT_DIR, Text_Encoder_Training_Steps, PT, Seed, Resolution, extrnlcptn, precision, Training_Steps=UNet_Training_Steps)
820
+
821
+ if UNet_Training_Steps==0 and Text_Encoder_Concept_Training_Steps==0 and Text_Encoder_Training_Steps==0 :
822
+ print('Nothing to do')
823
+ else:
824
+ if os.path.exists(OUTPUT_DIR+'/unet/diffusion_pytorch_model.bin'):
825
+
826
+ call('python /notebooks/diffusers/scripts/convertosdv2.py --fp16 '+OUTPUT_DIR+' '+SESSION_DIR+'/'+Session_Name+'.ckpt', shell=True)
827
+ clear_output()
828
+ if os.path.exists(SESSION_DIR+"/"+INSTANCE_NAME+'.ckpt'):
829
+ clear_output()
830
+ print("DONE, the CKPT model is in the session's folder")
831
+ else:
832
+ print("Something went wrong")
833
+
834
+ else:
835
+ print("Something went wrong")
836
+
837
+ return resumev2
838
+
839
+
840
+
841
+
842
+ def testui(Custom_Path, Previous_Session_Name, Session_Name, User, Password):
843
+
844
+
845
+ if Previous_Session_Name!="":
846
+ print("Loading a previous session model")
847
+ mdldir='/notebooks/Fast-Dreambooth/Sessions/'+Previous_Session_Name
848
+ path_to_trained_model=mdldir+"/"+Previous_Session_Name+'.ckpt'
849
+
850
+
851
+ while not os.path.exists(path_to_trained_model):
852
+ print("There is no trained model in the previous session")
853
+ time.sleep(5)
854
+
855
+ elif Custom_Path!="":
856
+ print("Loading model from a custom path")
857
+ path_to_trained_model=Custom_Path
858
+
859
+
860
+ while not os.path.exists(path_to_trained_model):
861
+ print("Wrong Path")
862
+ time.sleep(5)
863
+
864
+ else:
865
+ print("Loading the trained model")
866
+ mdldir='/notebooks/Fast-Dreambooth/Sessions/'+Session_Name
867
+ path_to_trained_model=mdldir+"/"+Session_Name+'.ckpt'
868
+
869
+
870
+ while not os.path.exists(path_to_trained_model):
871
+ print("There is no trained model in this session")
872
+ time.sleep(5)
873
+
874
+ auth=f"--gradio-auth {User}:{Password}"
875
+ if User =="" or Password=="":
876
+ auth=""
877
+
878
+ os.chdir('/notebooks')
879
+ if not os.path.exists('/notebooks/sd/stablediffusiond'): #reset later
880
+ call('wget -q -O sd_mrep.tar.zst https://huggingface.co/TheLastBen/dependencies/resolve/main/sd_mrep.tar.zst', shell=True)
881
+ call('tar --zstd -xf sd_mrep.tar.zst', shell=True)
882
+ call('rm sd_mrep.tar.zst', shell=True)
883
+
884
+ os.chdir('/notebooks/sd')
885
+ if not os.path.exists('stable-diffusion-webui'):
886
+ call('git clone -q --depth 1 --branch master https://github.com/AUTOMATIC1111/stable-diffusion-webui', shell=True)
887
+
888
+ os.chdir('/notebooks/sd/stable-diffusion-webui/')
889
+ call('git reset --hard', shell=True, stdout=open('/dev/null', 'w'))
890
+ print('')
891
+ call('git checkout master', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
892
+ call('git pull', shell=True, stdout=open('/dev/null', 'w'))
893
+ os.makedirs('/notebooks/sd/stable-diffusion-webui/repositories', exist_ok=True)
894
+ call('git clone https://github.com/AUTOMATIC1111/stable-diffusion-webui-assets /notebooks/sd/stable-diffusion-webui/repositories/stable-diffusion-webui-assets', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
895
+ os.chdir('/notebooks')
896
+ clear_output()
897
+
898
+ call('wget -q -O /usr/local/lib/python3.11/dist-packages/gradio/blocks.py https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/AUTOMATIC1111_files/blocks.py', shell=True)
899
+
900
+ localurl="tensorboard-"+os.environ.get('PAPERSPACE_FQDN')
901
+
902
+ for line in fileinput.input('/usr/local/lib/python3.11/dist-packages/gradio/blocks.py', inplace=True):
903
+ if line.strip().startswith('self.server_name ='):
904
+ line = f' self.server_name = "{localurl}"\n'
905
+ if line.strip().startswith('self.protocol = "https"'):
906
+ line = ' self.protocol = "https"\n'
907
+ if line.strip().startswith('if self.local_url.startswith("https") or self.is_colab'):
908
+ line = ''
909
+ if line.strip().startswith('else "http"'):
910
+ line = ''
911
+ sys.stdout.write(line)
912
+
913
+
914
+ os.chdir('/notebooks/sd/stable-diffusion-webui/modules')
915
+
916
+ call("sed -i 's@possible_sd_paths =.*@possible_sd_paths = [\"/notebooks/sd/stablediffusion\"]@' /notebooks/sd/stable-diffusion-webui/modules/paths.py", shell=True)
917
+ call("sed -i 's@\.\.\/@src/@g' /notebooks/sd/stable-diffusion-webui/modules/paths.py", shell=True)
918
+ call("sed -i 's@src\/generative-models@generative-models@g' /notebooks/sd/stable-diffusion-webui/modules/paths.py", shell=True)
919
+
920
+ os.chdir('/notebooks/sd/stable-diffusion-webui')
921
+ clear_output()
922
+
923
+ configf="--disable-console-progressbars --no-gradio-queue --no-hashing --no-half-vae --disable-safe-unpickle --api --no-download-sd-model --xformers --enable-insecure-extension-access --port 6006 --listen --skip-version-check --ckpt "+path_to_trained_model+" "+auth
924
+
925
+ return configf
926
+
927
+
928
+
929
+
930
+ def clean():
931
+
932
+ Sessions=os.listdir("/notebooks/Fast-Dreambooth/Sessions")
933
+
934
+ s = widgets.Select(
935
+ options=Sessions,
936
+ rows=5,
937
+ description='',
938
+ disabled=False
939
+ )
940
+
941
+ out=widgets.Output()
942
+
943
+ d = widgets.Button(
944
+ description='Remove',
945
+ disabled=False,
946
+ button_style='warning',
947
+ tooltip='Removet the selected session',
948
+ icon='warning'
949
+ )
950
+
951
+ def rem(d):
952
+ with out:
953
+ if s.value is not None:
954
+ clear_output()
955
+ print("THE SESSION "+s.value+" HAS BEEN REMOVED FROM THE STORAGE")
956
+ call('rm -r /notebooks/Fast-Dreambooth/Sessions/'+s.value, shell=True)
957
+ if os.path.exists('/notebooks/models/'+s.value):
958
+ call('rm -r /notebooks/models/'+s.value, shell=True)
959
+ s.options=os.listdir("/notebooks/Fast-Dreambooth/Sessions")
960
+
961
+
962
+ else:
963
+ d.close()
964
+ s.close()
965
+ clear_output()
966
+ print("NOTHING TO REMOVE")
967
+
968
+ d.on_click(rem)
969
+ if s.value is not None:
970
+ display(s,d,out)
971
+ else:
972
+ print("NOTHING TO REMOVE")
973
+
974
+
975
+
976
+ def hfv2(Name_of_your_concept, Save_concept_to, hf_token_write, INSTANCE_NAME, OUTPUT_DIR, Session_Name, MDLPTH):
977
+
978
+ from slugify import slugify
979
+ from huggingface_hub import HfApi, HfFolder, CommitOperationAdd
980
+ from huggingface_hub import create_repo
981
+ from IPython.display import display_markdown
982
+
983
+ if(Name_of_your_concept == ""):
984
+ Name_of_your_concept = Session_Name
985
+ Name_of_your_concept=Name_of_your_concept.replace(" ","-")
986
+
987
+
988
+
989
+ if hf_token_write =="":
990
+ print('Your Hugging Face write access token : ')
991
+ hf_token_write=input()
992
+
993
+ hf_token = hf_token_write
994
+
995
+ api = HfApi()
996
+ your_username = api.whoami(token=hf_token)["name"]
997
+
998
+ repo_id = f"{your_username}/{slugify(Name_of_your_concept)}"
999
+ output_dir = f'/notebooks/models/'+INSTANCE_NAME
1000
+
1001
+ def bar(prg):
1002
+ clear_output()
1003
+ br="Uploading to HuggingFace : " '|'+'█' * prg + ' ' * (25-prg)+'| ' +str(prg*4)+ "%"
1004
+ return br
1005
+
1006
+ print(bar(1))
1007
+
1008
+ readme_text = f'''---
1009
+ license: creativeml-openrail-m
1010
+ tags:
1011
+ - text-to-image
1012
+ - stable-diffusion
1013
+ ---
1014
+ ### {Name_of_your_concept} Dreambooth model trained by {api.whoami(token=hf_token)["name"]} with TheLastBen's fast-DreamBooth notebook
1015
+
1016
+ '''
1017
+ #Save the readme to a file
1018
+ readme_file = open("README.md", "w")
1019
+ readme_file.write(readme_text)
1020
+ readme_file.close()
1021
+
1022
+ operations = [
1023
+ CommitOperationAdd(path_in_repo="README.md", path_or_fileobj="README.md"),
1024
+ CommitOperationAdd(path_in_repo=f"{Session_Name}.ckpt",path_or_fileobj=MDLPTH)
1025
+
1026
+ ]
1027
+ create_repo(repo_id,private=True, token=hf_token)
1028
+
1029
+ api.create_commit(
1030
+ repo_id=repo_id,
1031
+ operations=operations,
1032
+ commit_message=f"Upload the concept {Name_of_your_concept} embeds and token",
1033
+ token=hf_token
1034
+ )
1035
+
1036
+ print(bar(8))
1037
+
1038
+ api.upload_folder(
1039
+ folder_path=OUTPUT_DIR+"/scheduler",
1040
+ path_in_repo="scheduler",
1041
+ repo_id=repo_id,
1042
+ token=hf_token
1043
+ )
1044
+
1045
+ print(bar(9))
1046
+
1047
+ api.upload_folder(
1048
+ folder_path=OUTPUT_DIR+"/text_encoder",
1049
+ path_in_repo="text_encoder",
1050
+ repo_id=repo_id,
1051
+ token=hf_token
1052
+ )
1053
+
1054
+ print(bar(12))
1055
+
1056
+ api.upload_folder(
1057
+ folder_path=OUTPUT_DIR+"/tokenizer",
1058
+ path_in_repo="tokenizer",
1059
+ repo_id=repo_id,
1060
+ token=hf_token
1061
+ )
1062
+
1063
+ print(bar(13))
1064
+
1065
+ api.upload_folder(
1066
+ folder_path=OUTPUT_DIR+"/unet",
1067
+ path_in_repo="unet",
1068
+ repo_id=repo_id,
1069
+ token=hf_token
1070
+ )
1071
+
1072
+ print(bar(21))
1073
+
1074
+ api.upload_folder(
1075
+ folder_path=OUTPUT_DIR+"/vae",
1076
+ path_in_repo="vae",
1077
+ repo_id=repo_id,
1078
+ token=hf_token
1079
+ )
1080
+
1081
+ print(bar(23))
1082
+
1083
+ api.upload_file(
1084
+ path_or_fileobj=OUTPUT_DIR+"/model_index.json",
1085
+ path_in_repo="model_index.json",
1086
+ repo_id=repo_id,
1087
+ token=hf_token
1088
+ )
1089
+
1090
+ print(bar(25))
1091
+
1092
+ print("Your concept was saved successfully at https://huggingface.co/"+repo_id)
1093
+ done()
1094
+
1095
+
1096
+
1097
+ def crop_image(im, size):
1098
+
1099
+ GREEN = "#0F0"
1100
+ BLUE = "#00F"
1101
+ RED = "#F00"
1102
+
1103
+ def focal_point(im, settings):
1104
+ corner_points = image_corner_points(im, settings) if settings.corner_points_weight > 0 else []
1105
+ entropy_points = image_entropy_points(im, settings) if settings.entropy_points_weight > 0 else []
1106
+ face_points = image_face_points(im, settings) if settings.face_points_weight > 0 else []
1107
+
1108
+ pois = []
1109
+
1110
+ weight_pref_total = 0
1111
+ if len(corner_points) > 0:
1112
+ weight_pref_total += settings.corner_points_weight
1113
+ if len(entropy_points) > 0:
1114
+ weight_pref_total += settings.entropy_points_weight
1115
+ if len(face_points) > 0:
1116
+ weight_pref_total += settings.face_points_weight
1117
+
1118
+ corner_centroid = None
1119
+ if len(corner_points) > 0:
1120
+ corner_centroid = centroid(corner_points)
1121
+ corner_centroid.weight = settings.corner_points_weight / weight_pref_total
1122
+ pois.append(corner_centroid)
1123
+
1124
+ entropy_centroid = None
1125
+ if len(entropy_points) > 0:
1126
+ entropy_centroid = centroid(entropy_points)
1127
+ entropy_centroid.weight = settings.entropy_points_weight / weight_pref_total
1128
+ pois.append(entropy_centroid)
1129
+
1130
+ face_centroid = None
1131
+ if len(face_points) > 0:
1132
+ face_centroid = centroid(face_points)
1133
+ face_centroid.weight = settings.face_points_weight / weight_pref_total
1134
+ pois.append(face_centroid)
1135
+
1136
+ average_point = poi_average(pois, settings)
1137
+
1138
+ return average_point
1139
+
1140
+
1141
+ def image_face_points(im, settings):
1142
+
1143
+ np_im = np.array(im)
1144
+ gray = cv2.cvtColor(np_im, cv2.COLOR_BGR2GRAY)
1145
+
1146
+ tries = [
1147
+ [ f'{cv2.data.haarcascades}haarcascade_eye.xml', 0.01 ],
1148
+ [ f'{cv2.data.haarcascades}haarcascade_frontalface_default.xml', 0.05 ],
1149
+ [ f'{cv2.data.haarcascades}haarcascade_profileface.xml', 0.05 ],
1150
+ [ f'{cv2.data.haarcascades}haarcascade_frontalface_alt.xml', 0.05 ],
1151
+ [ f'{cv2.data.haarcascades}haarcascade_frontalface_alt2.xml', 0.05 ],
1152
+ [ f'{cv2.data.haarcascades}haarcascade_frontalface_alt_tree.xml', 0.05 ],
1153
+ [ f'{cv2.data.haarcascades}haarcascade_eye_tree_eyeglasses.xml', 0.05 ],
1154
+ [ f'{cv2.data.haarcascades}haarcascade_upperbody.xml', 0.05 ]
1155
+ ]
1156
+ for t in tries:
1157
+ classifier = cv2.CascadeClassifier(t[0])
1158
+ minsize = int(min(im.width, im.height) * t[1]) # at least N percent of the smallest side
1159
+ try:
1160
+ faces = classifier.detectMultiScale(gray, scaleFactor=1.1,
1161
+ minNeighbors=7, minSize=(minsize, minsize), flags=cv2.CASCADE_SCALE_IMAGE)
1162
+ except:
1163
+ continue
1164
+
1165
+ if len(faces) > 0:
1166
+ rects = [[f[0], f[1], f[0] + f[2], f[1] + f[3]] for f in faces]
1167
+ return [PointOfInterest((r[0] +r[2]) // 2, (r[1] + r[3]) // 2, size=abs(r[0]-r[2]), weight=1/len(rects)) for r in rects]
1168
+ return []
1169
+
1170
+
1171
+ def image_corner_points(im, settings):
1172
+ grayscale = im.convert("L")
1173
+
1174
+
1175
+ gd = ImageDraw.Draw(grayscale)
1176
+ gd.rectangle([0, im.height*.9, im.width, im.height], fill="#999")
1177
+
1178
+ np_im = np.array(grayscale)
1179
+
1180
+ points = cv2.goodFeaturesToTrack(
1181
+ np_im,
1182
+ maxCorners=100,
1183
+ qualityLevel=0.04,
1184
+ minDistance=min(grayscale.width, grayscale.height)*0.06,
1185
+ useHarrisDetector=False,
1186
+ )
1187
+
1188
+ if points is None:
1189
+ return []
1190
+
1191
+ focal_points = []
1192
+ for point in points:
1193
+ x, y = point.ravel()
1194
+ focal_points.append(PointOfInterest(x, y, size=4, weight=1/len(points)))
1195
+
1196
+ return focal_points
1197
+
1198
+
1199
+ def image_entropy_points(im, settings):
1200
+ landscape = im.height < im.width
1201
+ portrait = im.height > im.width
1202
+ if landscape:
1203
+ move_idx = [0, 2]
1204
+ move_max = im.size[0]
1205
+ elif portrait:
1206
+ move_idx = [1, 3]
1207
+ move_max = im.size[1]
1208
+ else:
1209
+ return []
1210
+
1211
+ e_max = 0
1212
+ crop_current = [0, 0, settings.crop_width, settings.crop_height]
1213
+ crop_best = crop_current
1214
+ while crop_current[move_idx[1]] < move_max:
1215
+ crop = im.crop(tuple(crop_current))
1216
+ e = image_entropy(crop)
1217
+
1218
+ if (e > e_max):
1219
+ e_max = e
1220
+ crop_best = list(crop_current)
1221
+
1222
+ crop_current[move_idx[0]] += 4
1223
+ crop_current[move_idx[1]] += 4
1224
+
1225
+ x_mid = int(crop_best[0] + settings.crop_width/2)
1226
+ y_mid = int(crop_best[1] + settings.crop_height/2)
1227
+
1228
+ return [PointOfInterest(x_mid, y_mid, size=25, weight=1.0)]
1229
+
1230
+
1231
+ def image_entropy(im):
1232
+ # greyscale image entropy
1233
+ # band = np.asarray(im.convert("L"))
1234
+ band = np.asarray(im.convert("1"), dtype=np.uint8)
1235
+ hist, _ = np.histogram(band, bins=range(0, 256))
1236
+ hist = hist[hist > 0]
1237
+ return -np.log2(hist / hist.sum()).sum()
1238
+
1239
+ def centroid(pois):
1240
+ x = [poi.x for poi in pois]
1241
+ y = [poi.y for poi in pois]
1242
+ return PointOfInterest(sum(x)/len(pois), sum(y)/len(pois))
1243
+
1244
+
1245
+ def poi_average(pois, settings):
1246
+ weight = 0.0
1247
+ x = 0.0
1248
+ y = 0.0
1249
+ for poi in pois:
1250
+ weight += poi.weight
1251
+ x += poi.x * poi.weight
1252
+ y += poi.y * poi.weight
1253
+ avg_x = round(weight and x / weight)
1254
+ avg_y = round(weight and y / weight)
1255
+
1256
+ return PointOfInterest(avg_x, avg_y)
1257
+
1258
+
1259
+ def is_landscape(w, h):
1260
+ return w > h
1261
+
1262
+
1263
+ def is_portrait(w, h):
1264
+ return h > w
1265
+
1266
+
1267
+ def is_square(w, h):
1268
+ return w == h
1269
+
1270
+
1271
+ class PointOfInterest:
1272
+ def __init__(self, x, y, weight=1.0, size=10):
1273
+ self.x = x
1274
+ self.y = y
1275
+ self.weight = weight
1276
+ self.size = size
1277
+
1278
+ def bounding(self, size):
1279
+ return [
1280
+ self.x - size//2,
1281
+ self.y - size//2,
1282
+ self.x + size//2,
1283
+ self.y + size//2
1284
+ ]
1285
+
1286
+ class Settings:
1287
+ def __init__(self, crop_width=512, crop_height=512, corner_points_weight=0.5, entropy_points_weight=0.5, face_points_weight=0.5):
1288
+ self.crop_width = crop_width
1289
+ self.crop_height = crop_height
1290
+ self.corner_points_weight = corner_points_weight
1291
+ self.entropy_points_weight = entropy_points_weight
1292
+ self.face_points_weight = face_points_weight
1293
+
1294
+ settings = Settings(
1295
+ crop_width = size,
1296
+ crop_height = size,
1297
+ face_points_weight = 0.9,
1298
+ entropy_points_weight = 0.15,
1299
+ corner_points_weight = 0.5,
1300
+ )
1301
+
1302
+ scale_by = 1
1303
+ if is_landscape(im.width, im.height):
1304
+ scale_by = settings.crop_height / im.height
1305
+ elif is_portrait(im.width, im.height):
1306
+ scale_by = settings.crop_width / im.width
1307
+ elif is_square(im.width, im.height):
1308
+ if is_square(settings.crop_width, settings.crop_height):
1309
+ scale_by = settings.crop_width / im.width
1310
+ elif is_landscape(settings.crop_width, settings.crop_height):
1311
+ scale_by = settings.crop_width / im.width
1312
+ elif is_portrait(settings.crop_width, settings.crop_height):
1313
+ scale_by = settings.crop_height / im.height
1314
+
1315
+ im = im.resize((int(im.width * scale_by), int(im.height * scale_by)))
1316
+ im_debug = im.copy()
1317
+
1318
+ focus = focal_point(im_debug, settings)
1319
+
1320
+ # take the focal point and turn it into crop coordinates that try to center over the focal
1321
+ # point but then get adjusted back into the frame
1322
+ y_half = int(settings.crop_height / 2)
1323
+ x_half = int(settings.crop_width / 2)
1324
+
1325
+ x1 = focus.x - x_half
1326
+ if x1 < 0:
1327
+ x1 = 0
1328
+ elif x1 + settings.crop_width > im.width:
1329
+ x1 = im.width - settings.crop_width
1330
+
1331
+ y1 = focus.y - y_half
1332
+ if y1 < 0:
1333
+ y1 = 0
1334
+ elif y1 + settings.crop_height > im.height:
1335
+ y1 = im.height - settings.crop_height
1336
+
1337
+ x2 = x1 + settings.crop_width
1338
+ y2 = y1 + settings.crop_height
1339
+
1340
+ crop = [x1, y1, x2, y2]
1341
+
1342
+ results = []
1343
+
1344
+ results.append(im.crop(tuple(crop)))
1345
+
1346
+ return results