TheLastBen commited on
Commit
2a72a40
1 Parent(s): 6b35131

Upload sdxllorapps.py

Browse files
Files changed (1) hide show
  1. sdxllorapps.py +1122 -0
sdxllorapps.py ADDED
@@ -0,0 +1,1122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from IPython.display import clear_output
2
+ from subprocess import call, getoutput, Popen
3
+ from IPython.display import display
4
+ import ipywidgets as widgets
5
+ import io
6
+ from PIL import Image, ImageDraw, ImageOps
7
+ import fileinput
8
+ import time
9
+ import os
10
+ from os import listdir
11
+ from os.path import isfile
12
+ import random
13
+ import sys
14
+ from io import BytesIO
15
+ import requests
16
+ from collections import defaultdict
17
+ from math import log, sqrt
18
+ import numpy as np
19
+ import sys
20
+ import fileinput
21
+ import six
22
+ import base64
23
+ import re
24
+
25
+ from urllib.parse import urlparse, parse_qs, unquote
26
+ import urllib.request
27
+ from urllib.request import urlopen, Request
28
+
29
+ import tempfile
30
+ from tqdm import tqdm
31
+
32
+
33
+
34
+
35
+ def Deps(force_reinstall):
36
+
37
+ if not force_reinstall and os.path.exists('/usr/local/lib/python3.9/dist-packages/safetensors'):
38
+ ntbk()
39
+ call('pip install --root-user-action=ignore --disable-pip-version-check -qq diffusers==0.18.1', shell=True, stdout=open('/dev/null', 'w'))
40
+ os.environ['TORCH_HOME'] = '/notebooks/cache/torch'
41
+ os.environ['PYTHONWARNINGS'] = 'ignore'
42
+ print('Modules and notebooks updated, dependencies already installed')
43
+
44
+ else:
45
+ call("pip install --root-user-action=ignore --no-deps -q accelerate==0.12.0", shell=True, stdout=open('/dev/null', 'w'))
46
+ if not os.path.exists('/usr/local/lib/python3.9/dist-packages/safetensors'):
47
+ os.chdir('/usr/local/lib/python3.9/dist-packages')
48
+ call("rm -r torch torch-1.12.1+cu116.dist-info torchaudio* torchvision* PIL Pillow* transformers* numpy* gdown*", shell=True, stdout=open('/dev/null', 'w'))
49
+ ntbk()
50
+ if not os.path.exists('/models'):
51
+ call('mkdir /models', shell=True)
52
+ if not os.path.exists('/notebooks/models'):
53
+ call('ln -s /models /notebooks', shell=True)
54
+ if os.path.exists('/deps'):
55
+ call("rm -r /deps", shell=True)
56
+ call('mkdir /deps', shell=True)
57
+ if not os.path.exists('cache'):
58
+ call('mkdir cache', shell=True)
59
+ os.chdir('/deps')
60
+ call('wget -q -i https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dependencies/aptdeps.txt', shell=True)
61
+ call('dpkg -i *.deb', shell=True, stdout=open('/dev/null', 'w'))
62
+ depsinst("https://huggingface.co/TheLastBen/dependencies/resolve/main/ppsdeps.tar.zst", "/deps/ppsdeps.tar.zst")
63
+ call('tar -C / --zstd -xf ppsdeps.tar.zst', shell=True, stdout=open('/dev/null', 'w'))
64
+ call("sed -i 's@~/.cache@/notebooks/cache@' /usr/local/lib/python3.9/dist-packages/transformers/utils/hub.py", shell=True)
65
+ os.chdir('/notebooks')
66
+ call('pip install --root-user-action=ignore --disable-pip-version-check -qq diffusers==0.18.1', shell=True, stdout=open('/dev/null', 'w'))
67
+ call("git clone --depth 1 -q --branch main https://github.com/TheLastBen/diffusers /diffusers", shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
68
+ os.environ['TORCH_HOME'] = '/notebooks/cache/torch'
69
+ os.environ['PYTHONWARNINGS'] = 'ignore'
70
+ call("sed -i 's@text = _formatwarnmsg(msg)@text =\"\"@g' /usr/lib/python3.9/warnings.py", shell=True)
71
+ call('pip install --root-user-action=ignore --disable-pip-version-check -qq gradio==3.41.2', shell=True, stdout=open('/dev/null', 'w'))
72
+ if not os.path.exists('/notebooks/diffusers'):
73
+ call('ln -s /diffusers /notebooks', shell=True)
74
+ call("rm -r /deps", shell=True)
75
+ os.chdir('/notebooks')
76
+ clear_output()
77
+
78
+ done()
79
+
80
+
81
+ def depsinst(url, dst):
82
+ file_size = None
83
+ req = Request(url, headers={"User-Agent": "torch.hub"})
84
+ u = urlopen(req)
85
+ meta = u.info()
86
+ if hasattr(meta, 'getheaders'):
87
+ content_length = meta.getheaders("Content-Length")
88
+ else:
89
+ content_length = meta.get_all("Content-Length")
90
+ if content_length is not None and len(content_length) > 0:
91
+ file_size = int(content_length[0])
92
+
93
+ with tqdm(total=file_size, disable=False, mininterval=0.5,
94
+ bar_format='Installing dependencies |{bar:20}| {percentage:3.0f}%') as pbar:
95
+ with open(dst, "wb") as f:
96
+ while True:
97
+ buffer = u.read(8192)
98
+ if len(buffer) == 0:
99
+ break
100
+ f.write(buffer)
101
+ pbar.update(len(buffer))
102
+ f.close()
103
+
104
+
105
+
106
+ def dwn(url, dst, msg):
107
+ file_size = None
108
+ req = Request(url, headers={"User-Agent": "torch.hub"})
109
+ u = urlopen(req)
110
+ meta = u.info()
111
+ if hasattr(meta, 'getheaders'):
112
+ content_length = meta.getheaders("Content-Length")
113
+ else:
114
+ content_length = meta.get_all("Content-Length")
115
+ if content_length is not None and len(content_length) > 0:
116
+ file_size = int(content_length[0])
117
+
118
+ with tqdm(total=file_size, disable=False, mininterval=0.5,
119
+ bar_format=msg+' |{bar:20}| {percentage:3.0f}%') as pbar:
120
+ with open(dst, "wb") as f:
121
+ while True:
122
+ buffer = u.read(8192)
123
+ if len(buffer) == 0:
124
+ break
125
+ f.write(buffer)
126
+ pbar.update(len(buffer))
127
+ f.close()
128
+
129
+
130
+
131
+
132
+ def ntbk():
133
+
134
+ os.chdir('/notebooks')
135
+ if not os.path.exists('Latest_Notebooks'):
136
+ call('mkdir Latest_Notebooks', shell=True)
137
+ else:
138
+ call('rm -r Latest_Notebooks', shell=True)
139
+ call('mkdir Latest_Notebooks', shell=True)
140
+ os.chdir('/notebooks/Latest_Notebooks')
141
+ call('wget -q -i https://huggingface.co/datasets/TheLastBen/PPS/raw/main/Notebooks.txt', shell=True)
142
+ call('rm Notebooks.txt', shell=True)
143
+ os.chdir('/notebooks')
144
+
145
+
146
+
147
+
148
+ def ntbks():
149
+
150
+ os.chdir('/notebooks')
151
+ if not os.path.exists('Latest_Notebooks'):
152
+ call('mkdir Latest_Notebooks', shell=True)
153
+ else:
154
+ call('rm -r Latest_Notebooks', shell=True)
155
+ call('mkdir Latest_Notebooks', shell=True)
156
+ os.chdir('/notebooks/Latest_Notebooks')
157
+ call('wget -q -i https://huggingface.co/datasets/TheLastBen/RNPD/raw/main/Notebooks.txt', shell=True)
158
+ call('rm Notebooks.txt', shell=True)
159
+ os.chdir('/notebooks')
160
+
161
+ def done():
162
+ done = widgets.Button(
163
+ description='Done!',
164
+ disabled=True,
165
+ button_style='success',
166
+ tooltip='',
167
+ icon='check'
168
+ )
169
+ display(done)
170
+
171
+
172
+
173
+ def mdlvxl():
174
+
175
+ os.chdir('/notebooks')
176
+
177
+ if os.path.exists('stable-diffusion-XL') and not os.path.exists('/notebooks/stable-diffusion-XL/unet/diffusion_pytorch_model.safetensors'):
178
+ call('rm -r stable-diffusion-XL', shell=True)
179
+ if not os.path.exists('stable-diffusion-XL'):
180
+ print('Downloading SDXL model...')
181
+ call('mkdir stable-diffusion-XL', shell=True)
182
+ os.chdir('stable-diffusion-XL')
183
+ call('git init', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
184
+ call('git lfs install --system --skip-repo', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
185
+ call('git remote add -f origin https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
186
+ call('git config core.sparsecheckout true', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
187
+ call('echo -e "\nscheduler\ntext_encoder\ntext_encoder_2\ntokenizer\ntokenizer_2\nunet\nvae\nfeature_extractor\nmodel_index.json\n!*.safetensors\n!*.bin\n!*.onnx*\n!*.xml" > .git/info/sparse-checkout', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
188
+ call('git pull origin main', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
189
+ dwn('https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/text_encoder/model.safetensors', 'text_encoder/model.safetensors', '1/4')
190
+ dwn('https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/text_encoder_2/model.safetensors', 'text_encoder_2/model.safetensors', '2/4')
191
+ dwn('https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/vae/diffusion_pytorch_model.safetensors', 'vae/diffusion_pytorch_model.safetensors', '3/4')
192
+ dwn('https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/unet/diffusion_pytorch_model.safetensors', 'unet/diffusion_pytorch_model.safetensors', '4/4')
193
+ call('rm -r .git', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
194
+ os.chdir('/notebooks')
195
+ clear_output()
196
+ while not os.path.exists('/notebooks/stable-diffusion-XL/unet/diffusion_pytorch_model.safetensors'):
197
+ print('Invalid HF token, make sure you have access to the model')
198
+ time.sleep(8)
199
+ if os.path.exists('/notebooks/stable-diffusion-XL/unet/diffusion_pytorch_model.safetensors'):
200
+ print('Using SDXL model')
201
+ else:
202
+ print('Using SDXL model')
203
+
204
+ call("sed -i 's@\"force_upcast.*@@' /notebooks/stable-diffusion-XL/vae/config.json", shell=True)
205
+
206
+
207
+
208
+ def downloadmodel_hfxl(Path_to_HuggingFace):
209
+
210
+ os.chdir('/notebooks')
211
+ if os.path.exists('stable-diffusion-custom'):
212
+ call("rm -r stable-diffusion-custom", shell=True)
213
+ clear_output()
214
+
215
+ if os.path.exists('Fast-Dreambooth/token.txt'):
216
+ with open("Fast-Dreambooth/token.txt") as f:
217
+ token = f.read()
218
+ authe=f'https://USER:{token}@'
219
+ else:
220
+ authe="https://"
221
+
222
+ clear_output()
223
+ call("mkdir stable-diffusion-custom", shell=True)
224
+ os.chdir("stable-diffusion-custom")
225
+ call("git init", shell=True)
226
+ call("git lfs install --system --skip-repo", shell=True)
227
+ call('git remote add -f origin '+authe+'huggingface.co/'+Path_to_HuggingFace, shell=True)
228
+ call("git config core.sparsecheckout true", shell=True)
229
+ call('echo -e "\nscheduler\ntext_encoder\ntokenizer\nunet\nvae\nfeature_extractor\nmodel_index.json\n!*.safetensors\n!*.fp16.bin" > .git/info/sparse-checkout', shell=True)
230
+ call("git pull origin main", shell=True)
231
+ if os.path.exists('unet/diffusion_pytorch_model.safetensors'):
232
+ call("rm -r .git", shell=True)
233
+ os.chdir('/notebooks')
234
+ clear_output()
235
+ done()
236
+ while not os.path.exists('/notebooks/stable-diffusion-custom/unet/diffusion_pytorch_model.safetensors'):
237
+ print('Check the link you provided')
238
+ os.chdir('/notebooks')
239
+ time.sleep(5)
240
+
241
+
242
+
243
+ def downloadmodel_link_xl(MODEL_LINK):
244
+
245
+ import wget
246
+ import gdown
247
+ from gdown.download import get_url_from_gdrive_confirmation
248
+
249
+ def getsrc(url):
250
+ parsed_url = urlparse(url)
251
+ if parsed_url.netloc == 'civitai.com':
252
+ src='civitai'
253
+ elif parsed_url.netloc == 'drive.google.com':
254
+ src='gdrive'
255
+ elif parsed_url.netloc == 'huggingface.co':
256
+ src='huggingface'
257
+ else:
258
+ src='others'
259
+ return src
260
+
261
+ src=getsrc(MODEL_LINK)
262
+
263
+ def get_name(url, gdrive):
264
+ if not gdrive:
265
+ response = requests.get(url, allow_redirects=False)
266
+ if "Location" in response.headers:
267
+ redirected_url = response.headers["Location"]
268
+ quer = parse_qs(urlparse(redirected_url).query)
269
+ if "response-content-disposition" in quer:
270
+ disp_val = quer["response-content-disposition"][0].split(";")
271
+ for vals in disp_val:
272
+ if vals.strip().startswith("filename="):
273
+ filenm=unquote(vals.split("=", 1)[1].strip())
274
+ return filenm.replace("\"","")
275
+ else:
276
+ headers = {"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36"}
277
+ lnk="https://drive.google.com/uc?id={id}&export=download".format(id=url[url.find("/d/")+3:url.find("/view")])
278
+ res = requests.session().get(lnk, headers=headers, stream=True, verify=True)
279
+ res = requests.session().get(get_url_from_gdrive_confirmation(res.text), headers=headers, stream=True, verify=True)
280
+ content_disposition = six.moves.urllib_parse.unquote(res.headers["Content-Disposition"])
281
+ filenm = re.search(r"filename\*=UTF-8''(.*)", content_disposition).groups()[0].replace(os.path.sep, "_")
282
+ return filenm
283
+
284
+ if src=='civitai':
285
+ modelname=get_name(MODEL_LINK, False)
286
+ elif src=='gdrive':
287
+ modelname=get_name(MODEL_LINK, True)
288
+ else:
289
+ modelname=os.path.basename(MODEL_LINK)
290
+
291
+
292
+ os.chdir('/notebooks')
293
+ if src=='huggingface':
294
+ dwn(MODEL_LINK, modelname,'Downloading the Model')
295
+ else:
296
+ call("gdown --fuzzy " +MODEL_LINK+ " -O "+modelname, shell=True)
297
+
298
+ if os.path.exists(modelname):
299
+ if os.path.getsize(modelname) > 1810671599:
300
+
301
+ print('Converting to diffusers...')
302
+ call('python /notebooks/diffusers/scripts/convert_original_stable_diffusion_to_diffusers.py --checkpoint_path '+modelname+' --dump_path stable-diffusion-custom --from_safetensors', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
303
+
304
+ if os.path.exists('stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
305
+ os.chdir('/notebooks')
306
+ clear_output()
307
+ done()
308
+ else:
309
+ while not os.path.exists('stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
310
+ print('Conversion error')
311
+ os.chdir('/notebooks')
312
+ time.sleep(5)
313
+ else:
314
+ while os.path.getsize(modelname) < 1810671599:
315
+ print('Wrong link, check that the link is valid')
316
+ os.chdir('/notebooks')
317
+ time.sleep(5)
318
+
319
+
320
+
321
+ def downloadmodel_path_xl(MODEL_PATH):
322
+
323
+ import wget
324
+ os.chdir('/notebooks')
325
+ clear_output()
326
+ if os.path.exists(str(MODEL_PATH)):
327
+
328
+ print('Converting to diffusers...')
329
+ call('python /notebooks/diffusers/scripts/convert_original_stable_diffusion_to_diffusers.py --checkpoint_path '+MODEL_PATH+' --dump_path stable-diffusion-custom --from_safetensors', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
330
+
331
+ if os.path.exists('stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
332
+ clear_output()
333
+ done()
334
+ while not os.path.exists('stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
335
+ print('Conversion error')
336
+ os.chdir('/notebooks')
337
+ time.sleep(5)
338
+ else:
339
+ while not os.path.exists(str(MODEL_PATH)):
340
+ print('Wrong path, use the file explorer to copy the path')
341
+ os.chdir('/notebooks')
342
+ time.sleep(5)
343
+
344
+
345
+
346
+
347
+ def dls_xl(Path_to_HuggingFace, MODEL_PATH, MODEL_LINK):
348
+
349
+ os.chdir('/notebooks')
350
+
351
+ if Path_to_HuggingFace != "":
352
+ downloadmodel_hfxl(Path_to_HuggingFace)
353
+ MODEL_NAMExl="/notebooks/stable-diffusion-custom"
354
+
355
+ elif MODEL_PATH !="":
356
+
357
+ downloadmodel_path_xl(MODEL_PATH)
358
+ MODEL_NAMExl="/notebooks/stable-diffusion-custom"
359
+
360
+ elif MODEL_LINK !="":
361
+
362
+ downloadmodel_link_xl(MODEL_LINK)
363
+ MODEL_NAMExl="/notebooks/stable-diffusion-custom"
364
+
365
+ else:
366
+ mdlvxl()
367
+ MODEL_NAMExl="/notebooks/stable-diffusion-XL"
368
+
369
+ return MODEL_NAMExl
370
+
371
+
372
+
373
+ def sess_xl(Session_Name, MODEL_NAMExl):
374
+ import gdown
375
+ import wget
376
+ os.chdir('/notebooks')
377
+ PT=""
378
+
379
+ while Session_Name=="":
380
+ print('Input the Session Name:')
381
+ Session_Name=input("")
382
+ Session_Name=Session_Name.replace(" ","_")
383
+
384
+ WORKSPACE='/notebooks/Fast-Dreambooth'
385
+
386
+ INSTANCE_NAME=Session_Name
387
+ OUTPUT_DIR="/notebooks/models/"+Session_Name
388
+ SESSION_DIR=WORKSPACE+"/Sessions/"+Session_Name
389
+ INSTANCE_DIR=SESSION_DIR+"/instance_images"
390
+ CAPTIONS_DIR=SESSION_DIR+'/captions'
391
+ MDLPTH=str(SESSION_DIR+"/"+Session_Name+'.safetensors')
392
+
393
+
394
+ if os.path.exists(str(SESSION_DIR)) and not os.path.exists(MDLPTH):
395
+ print('Loading session with no previous LoRa model')
396
+ if MODEL_NAMExl=="":
397
+ print('No model found, use the "Model Download" cell to download a model.')
398
+ else:
399
+ print('Session Loaded, proceed')
400
+
401
+ elif not os.path.exists(str(SESSION_DIR)):
402
+ call('mkdir -p '+INSTANCE_DIR, shell=True)
403
+ print('Creating session...')
404
+ if MODEL_NAMExl=="":
405
+ print('No model found, use the "Model Download" cell to download a model.')
406
+ else:
407
+ print('Session created, proceed to uploading instance images')
408
+ if MODEL_NAMExl=="":
409
+ print('No model found, use the "Model Download" cell to download a model.')
410
+
411
+ else:
412
+ print('Session Loaded, proceed')
413
+
414
+
415
+ return WORKSPACE, Session_Name, INSTANCE_NAME, OUTPUT_DIR, SESSION_DIR, INSTANCE_DIR, CAPTIONS_DIR, MDLPTH, MODEL_NAMExl
416
+
417
+
418
+
419
+ def uplder(Remove_existing_instance_images, Crop_images, Crop_size, IMAGES_FOLDER_OPTIONAL, INSTANCE_DIR, CAPTIONS_DIR):
420
+
421
+ if os.path.exists(INSTANCE_DIR+"/.ipynb_checkpoints"):
422
+ call('rm -r '+INSTANCE_DIR+'/.ipynb_checkpoints', shell=True)
423
+
424
+ uploader = widgets.FileUpload(description="Choose images",accept='image/*, .txt', multiple=True)
425
+ Upload = widgets.Button(
426
+ description='Upload',
427
+ disabled=False,
428
+ button_style='info',
429
+ tooltip='Click to upload the chosen instance images',
430
+ icon=''
431
+ )
432
+
433
+
434
+ def up(Upload):
435
+ with out:
436
+ uploader.close()
437
+ Upload.close()
438
+ upld(Remove_existing_instance_images, Crop_images, Crop_size, IMAGES_FOLDER_OPTIONAL, INSTANCE_DIR, CAPTIONS_DIR, uploader)
439
+ done()
440
+ out=widgets.Output()
441
+
442
+ if IMAGES_FOLDER_OPTIONAL=="":
443
+ Upload.on_click(up)
444
+ display(uploader, Upload, out)
445
+ else:
446
+ upld(Remove_existing_instance_images, Crop_images, Crop_size, IMAGES_FOLDER_OPTIONAL, INSTANCE_DIR, CAPTIONS_DIR, uploader)
447
+ done()
448
+
449
+
450
+
451
+ def upld(Remove_existing_instance_images, Crop_images, Crop_size, IMAGES_FOLDER_OPTIONAL, INSTANCE_DIR, CAPTIONS_DIR, uploader):
452
+
453
+
454
+ if Remove_existing_instance_images:
455
+ if os.path.exists(str(INSTANCE_DIR)):
456
+ call("rm -r " +INSTANCE_DIR, shell=True)
457
+ if os.path.exists(str(CAPTIONS_DIR)):
458
+ call("rm -r " +CAPTIONS_DIR, shell=True)
459
+
460
+
461
+ if not os.path.exists(str(INSTANCE_DIR)):
462
+ call("mkdir -p " +INSTANCE_DIR, shell=True)
463
+ if not os.path.exists(str(CAPTIONS_DIR)):
464
+ call("mkdir -p " +CAPTIONS_DIR, shell=True)
465
+
466
+
467
+ if IMAGES_FOLDER_OPTIONAL !="":
468
+
469
+ if os.path.exists(IMAGES_FOLDER_OPTIONAL+"/.ipynb_checkpoints"):
470
+ call('rm -r '+IMAGES_FOLDER_OPTIONAL+'/.ipynb_checkpoints', shell=True)
471
+
472
+ if any(file.endswith('.{}'.format('txt')) for file in os.listdir(IMAGES_FOLDER_OPTIONAL)):
473
+ call('mv '+IMAGES_FOLDER_OPTIONAL+'/*.txt '+CAPTIONS_DIR, shell=True)
474
+ if Crop_images:
475
+ os.chdir(str(IMAGES_FOLDER_OPTIONAL))
476
+ call('find . -name "* *" -type f | rename ' "'s/ /-/g'", shell=True)
477
+ os.chdir('/notebooks')
478
+ for filename in tqdm(os.listdir(IMAGES_FOLDER_OPTIONAL), bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):
479
+ extension = filename.split(".")[-1]
480
+ identifier=filename.split(".")[0]
481
+ new_path_with_file = os.path.join(INSTANCE_DIR, filename)
482
+ file = Image.open(IMAGES_FOLDER_OPTIONAL+"/"+filename)
483
+ file=file.convert("RGB")
484
+ file=ImageOps.exif_transpose(file)
485
+ width, height = file.size
486
+ if file.size !=(Crop_size, Crop_size):
487
+ image=crop_image(file, Crop_size)
488
+ if extension.upper()=="JPG" or extension.upper()=="jpg":
489
+ image[0].save(new_path_with_file, format="JPEG", quality = 100)
490
+ else:
491
+ image[0].save(new_path_with_file, format=extension.upper())
492
+
493
+ else:
494
+ call("cp \'"+IMAGES_FOLDER_OPTIONAL+"/"+filename+"\' "+INSTANCE_DIR, shell=True)
495
+
496
+ else:
497
+ for filename in tqdm(os.listdir(IMAGES_FOLDER_OPTIONAL), bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):
498
+ call("cp -r " +IMAGES_FOLDER_OPTIONAL+"/. " +INSTANCE_DIR, shell=True)
499
+
500
+ elif IMAGES_FOLDER_OPTIONAL =="":
501
+ up=""
502
+ for file in uploader.value:
503
+ filename = file['name']
504
+ if filename.split(".")[-1]=="txt":
505
+ with open(CAPTIONS_DIR+'/'+filename, 'w') as f:
506
+ f.write(bytes(file['content']).decode())
507
+ up=[file for file in uploader.value if not file['name'].endswith('.txt')]
508
+ if Crop_images:
509
+ for file in tqdm(up, bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):
510
+ filename = file['name']
511
+ img = Image.open(io.BytesIO(file['content']))
512
+ extension = filename.split(".")[-1]
513
+ identifier=filename.split(".")[0]
514
+ img=img.convert("RGB")
515
+ img=ImageOps.exif_transpose(img)
516
+
517
+ if extension.upper()=="JPG" or extension.upper()=="jpg":
518
+ img.save(INSTANCE_DIR+"/"+filename, format="JPEG", quality = 100)
519
+ else:
520
+ img.save(INSTANCE_DIR+"/"+filename, format=extension.upper())
521
+
522
+ new_path_with_file = os.path.join(INSTANCE_DIR, filename)
523
+ file = Image.open(new_path_with_file)
524
+ width, height = file.size
525
+ if file.size !=(Crop_size, Crop_size):
526
+ image=crop_image(file, Crop_size)
527
+ if extension.upper()=="JPG" or extension.upper()=="jpg":
528
+ image[0].save(new_path_with_file, format="JPEG", quality = 100)
529
+ else:
530
+ image[0].save(new_path_with_file, format=extension.upper())
531
+
532
+ else:
533
+ for file in tqdm(uploader.value, bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):
534
+ filename = file['name']
535
+ img = Image.open(io.BytesIO(file['content']))
536
+ img=img.convert("RGB")
537
+ extension = filename.split(".")[-1]
538
+ identifier=filename.split(".")[0]
539
+
540
+ if extension.upper()=="JPG" or extension.upper()=="jpg":
541
+ img.save(INSTANCE_DIR+"/"+filename, format="JPEG", quality = 100)
542
+ else:
543
+ img.save(INSTANCE_DIR+"/"+filename, format=extension.upper())
544
+
545
+ os.chdir(INSTANCE_DIR)
546
+ call('find . -name "* *" -type f | rename ' "'s/ /-/g'", shell=True)
547
+ os.chdir(CAPTIONS_DIR)
548
+ call('find . -name "* *" -type f | rename ' "'s/ /-/g'", shell=True)
549
+ os.chdir('/notebooks')
550
+
551
+
552
+
553
+
554
+ def caption(CAPTIONS_DIR, INSTANCE_DIR):
555
+
556
+ paths=""
557
+ out=""
558
+ widgets_l=""
559
+ clear_output()
560
+ def Caption(path):
561
+ if path!="Select an instance image to caption":
562
+
563
+ name = os.path.splitext(os.path.basename(path))[0]
564
+ ext=os.path.splitext(os.path.basename(path))[-1][1:]
565
+ if ext=="jpg" or "JPG":
566
+ ext="JPEG"
567
+
568
+ if os.path.exists(CAPTIONS_DIR+"/"+name + '.txt'):
569
+ with open(CAPTIONS_DIR+"/"+name + '.txt', 'r') as f:
570
+ text = f.read()
571
+ else:
572
+ with open(CAPTIONS_DIR+"/"+name + '.txt', 'w') as f:
573
+ f.write("")
574
+ with open(CAPTIONS_DIR+"/"+name + '.txt', 'r') as f:
575
+ text = f.read()
576
+
577
+ img=Image.open(os.path.join(INSTANCE_DIR,path))
578
+ img=img.convert("RGB")
579
+ img=img.resize((420, 420))
580
+ image_bytes = BytesIO()
581
+ img.save(image_bytes, format=ext, qualiy=10)
582
+ image_bytes.seek(0)
583
+ image_data = image_bytes.read()
584
+ img= image_data
585
+ image = widgets.Image(
586
+ value=img,
587
+ width=420,
588
+ height=420
589
+ )
590
+ text_area = widgets.Textarea(value=text, description='', disabled=False, layout={'width': '300px', 'height': '120px'})
591
+
592
+
593
+ def update_text(text):
594
+ with open(CAPTIONS_DIR+"/"+name + '.txt', 'w') as f:
595
+ f.write(text)
596
+
597
+ button = widgets.Button(description='Save', button_style='success')
598
+ button.on_click(lambda b: update_text(text_area.value))
599
+
600
+ return widgets.VBox([widgets.HBox([image, text_area, button])])
601
+
602
+
603
+ paths = os.listdir(INSTANCE_DIR)
604
+ widgets_l = widgets.Select(options=["Select an instance image to caption"]+paths, rows=25)
605
+
606
+
607
+ out = widgets.Output()
608
+
609
+ def click(change):
610
+ with out:
611
+ out.clear_output()
612
+ display(Caption(change.new))
613
+
614
+ widgets_l.observe(click, names='value')
615
+ display(widgets.HBox([widgets_l, out]))
616
+
617
+
618
+
619
+ def dbtrainxl(Unet_Training_Epochs, Text_Encoder_Training_Epochs, Unet_Learning_Rate, Text_Encoder_Learning_Rate, dim, Offset_Noise, Resolution, MODEL_NAME, SESSION_DIR, INSTANCE_DIR, CAPTIONS_DIR, External_Captions, INSTANCE_NAME, Session_Name, OUTPUT_DIR, ofstnselvl, Save_VRAM):
620
+
621
+
622
+ if os.path.exists(INSTANCE_DIR+"/.ipynb_checkpoints"):
623
+ call('rm -r '+INSTANCE_DIR+'/.ipynb_checkpoints', shell=True)
624
+ if os.path.exists(CAPTIONS_DIR+"/.ipynb_checkpoints"):
625
+ call('rm -r '+CAPTIONS_DIR+'/.ipynb_checkpoints', shell=True)
626
+
627
+
628
+ Seed=random.randint(1, 999999)
629
+
630
+ ofstnse=""
631
+ if Offset_Noise:
632
+ ofstnse="--offset_noise"
633
+
634
+ GC=''
635
+ if Save_VRAM:
636
+ GC='--gradient_checkpointing'
637
+
638
+ extrnlcptn=""
639
+ if External_Captions:
640
+ extrnlcptn="--external_captions"
641
+
642
+ precision="fp16"
643
+
644
+
645
+
646
+ def train_only_text(SESSION_DIR, MODEL_NAME, INSTANCE_DIR, OUTPUT_DIR, Seed, Resolution, ofstnse, extrnlcptn, precision, Training_Epochs):
647
+ print('Training the Text Encoder...')
648
+ call('accelerate launch /notebooks/diffusers/examples/dreambooth/train_dreambooth_sdxl_TI.py \
649
+ '+ofstnse+' \
650
+ '+extrnlcptn+' \
651
+ --dim='+str(dim)+' \
652
+ --ofstnselvl='+str(ofstnselvl)+' \
653
+ --image_captions_filename \
654
+ --Session_dir='+SESSION_DIR+' \
655
+ --pretrained_model_name_or_path='+MODEL_NAME+' \
656
+ --instance_data_dir='+INSTANCE_DIR+' \
657
+ --output_dir='+OUTPUT_DIR+' \
658
+ --captions_dir='+CAPTIONS_DIR+' \
659
+ --seed='+str(Seed)+' \
660
+ --resolution='+str(Resolution)+' \
661
+ --mixed_precision='+str(precision)+' \
662
+ --train_batch_size=1 \
663
+ --gradient_accumulation_steps=1 '+GC+ ' \
664
+ --use_8bit_adam \
665
+ --learning_rate='+str(Text_Encoder_Learning_Rate)+' \
666
+ --lr_scheduler="cosine" \
667
+ --lr_warmup_steps=0 \
668
+ --num_train_epochs='+str(Training_Epochs), shell=True)
669
+
670
+
671
+
672
+ def train_only_unet(SESSION_DIR, MODEL_NAME, INSTANCE_DIR, OUTPUT_DIR, Seed, Resolution, ofstnse, extrnlcptn, precision, Training_Epochs):
673
+ print('Training the UNet...')
674
+ call('accelerate launch /notebooks/diffusers/examples/dreambooth/train_dreambooth_sdxl_lora.py \
675
+ '+ofstnse+' \
676
+ '+extrnlcptn+' \
677
+ --dim='+str(dim)+' \
678
+ --ofstnselvl='+str(ofstnselvl)+' \
679
+ --image_captions_filename \
680
+ --Session_dir='+SESSION_DIR+' \
681
+ --pretrained_model_name_or_path='+MODEL_NAME+' \
682
+ --instance_data_dir='+INSTANCE_DIR+' \
683
+ --output_dir='+OUTPUT_DIR+' \
684
+ --captions_dir='+CAPTIONS_DIR+' \
685
+ --seed='+str(Seed)+' \
686
+ --resolution='+str(Resolution)+' \
687
+ --mixed_precision='+str(precision)+' \
688
+ --train_batch_size=1 \
689
+ --gradient_accumulation_steps=1 '+GC+ ' \
690
+ --use_8bit_adam \
691
+ --learning_rate='+str(Unet_Learning_Rate)+' \
692
+ --lr_scheduler="cosine" \
693
+ --lr_warmup_steps=0 \
694
+ --num_train_epochs='+str(Training_Epochs), shell=True)
695
+
696
+
697
+
698
+ if Unet_Training_Epochs!=0:
699
+ if Text_Encoder_Training_Epochs!=0:
700
+ train_only_text(SESSION_DIR, MODEL_NAME, INSTANCE_DIR, OUTPUT_DIR, Seed, Resolution, ofstnse, extrnlcptn, precision, Training_Epochs=Text_Encoder_Training_Epochs)
701
+ clear_output()
702
+ train_only_unet(SESSION_DIR, MODEL_NAME, INSTANCE_DIR, OUTPUT_DIR, Seed, Resolution, ofstnse, extrnlcptn, precision, Training_Epochs=Unet_Training_Epochs)
703
+ else :
704
+ print('Nothing to do')
705
+
706
+
707
+ if os.path.exists(SESSION_DIR+'/'+Session_Name+'.safetensors'):
708
+ clear_output()
709
+ print("DONE, the LoRa model is in the session's folder")
710
+ else:
711
+ print("Something went wrong")
712
+
713
+
714
+
715
+
716
+ def sdcmf(MDLPTH):
717
+
718
+ from slugify import slugify
719
+ from huggingface_hub import HfApi, CommitOperationAdd, create_repo
720
+
721
+ os.chdir('/notebooks')
722
+
723
+
724
+ print('Installing/Updating the repo...')
725
+ if not os.path.exists('ComfyUI'):
726
+ call('git clone -q --depth 1 https://github.com/comfyanonymous/ComfyUI', shell=True)
727
+
728
+ os.chdir('ComfyUI')
729
+ call('git reset --hard', shell=True)
730
+ print('')
731
+ call('git pull', shell=True)
732
+
733
+ if os.path.exists(MDLPTH):
734
+ call('ln -s '+os.path.dirname(MDLPTH)+' models/loras', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
735
+
736
+ clean_symlinks('models/loras')
737
+
738
+ if not os.path.exists('models/checkpoints/sd_xl_base_1.0.safetensors'):
739
+ call('ln -s /datasets/stable-diffusion-xl/sd_xl_base_1.0.safetensors models/checkpoints', shell=True)
740
+
741
+ localurl="https://tensorboard-"+os.environ.get('PAPERSPACE_FQDN')
742
+ call("sed -i 's@print(\"To see the GUI go to: http://{}:{}\".format(address, port))@print(\"\u2714 Connected\")\\n print(\""+localurl+"\")@' /notebooks/ComfyUI/server.py", shell=True)
743
+ os.chdir('/notebooks')
744
+
745
+
746
+ def test(MDLPTH, User, Password):
747
+
748
+
749
+ auth=f"--gradio-auth {User}:{Password}"
750
+ if User =="" or Password=="":
751
+ auth=""
752
+
753
+ os.chdir('/notebooks')
754
+ if not os.path.exists('/notebooks/sd/stablediffusiond'): #reset later
755
+ call('wget -q -O sd_mrep.tar.zst https://huggingface.co/TheLastBen/dependencies/resolve/main/sd_mrep.tar.zst', shell=True)
756
+ call('tar --zstd -xf sd_mrep.tar.zst', shell=True)
757
+ call('rm sd_mrep.tar.zst', shell=True)
758
+
759
+ os.chdir('/notebooks/sd')
760
+ if not os.path.exists('stable-diffusion-webui'):
761
+ call('git clone -q --depth 1 --branch master https://github.com/AUTOMATIC1111/stable-diffusion-webui', shell=True)
762
+
763
+ os.chdir('/notebooks/sd/stable-diffusion-webui/')
764
+ call('git reset --hard', shell=True, stdout=open('/dev/null', 'w'))
765
+ print('')
766
+ call('git checkout master', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
767
+ call('git pull', shell=True, stdout=open('/dev/null', 'w'))
768
+ clear_output()
769
+
770
+
771
+ if not os.path.exists('models/Stable-diffusion/sd_xl_base_1.0.safetensors'):
772
+ call('ln -s /datasets/stable-diffusion-xl/sd_xl_base_1.0.safetensors models/Stable-diffusion', shell=True)
773
+
774
+
775
+ if os.path.exists(MDLPTH):
776
+ call('ln -s '+os.path.dirname(MDLPTH)+' models/Lora', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
777
+
778
+ clean_symlinks('models/Lora')
779
+
780
+ call('wget -q -O /usr/local/lib/python3.9/dist-packages/gradio/blocks.py https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/AUTOMATIC1111_files/blocks.py', shell=True)
781
+
782
+ localurl="tensorboard-"+os.environ.get('PAPERSPACE_FQDN')
783
+
784
+ for line in fileinput.input('/usr/local/lib/python3.9/dist-packages/gradio/blocks.py', inplace=True):
785
+ if line.strip().startswith('self.server_name ='):
786
+ line = f' self.server_name = "{localurl}"\n'
787
+ if line.strip().startswith('self.protocol = "https"'):
788
+ line = ' self.protocol = "https"\n'
789
+ if line.strip().startswith('if self.local_url.startswith("https") or self.is_colab'):
790
+ line = ''
791
+ if line.strip().startswith('else "http"'):
792
+ line = ''
793
+ sys.stdout.write(line)
794
+
795
+
796
+ os.chdir('/notebooks/sd/stable-diffusion-webui/modules')
797
+
798
+ call("sed -i 's@possible_sd_paths =.*@possible_sd_paths = [\"/notebooks/sd/stablediffusion\"]@' /notebooks/sd/stable-diffusion-webui/modules/paths.py", shell=True)
799
+ call("sed -i 's@\.\.\/@src/@g' /notebooks/sd/stable-diffusion-webui/modules/paths.py", shell=True)
800
+ call("sed -i 's@src\/generative-models@generative-models@g' /notebooks/sd/stable-diffusion-webui/modules/paths.py", shell=True)
801
+
802
+ call("sed -i 's@-> Network | None@@g' /notebooks/sd/stable-diffusion-webui/extensions-builtin/Lora/network.py", shell=True)
803
+
804
+ call("sed -i 's@\"quicksettings\": OptionInfo(.*@\"quicksettings\": OptionInfo(\"sd_model_checkpoint, sd_vae, CLIP_stop_at_last_layers, inpainting_mask_weight, initial_noise_multiplier\", \"Quicksettings list\"),@' /notebooks/sd/stable-diffusion-webui/modules/shared.py", shell=True)
805
+ os.chdir('/notebooks/sd/stable-diffusion-webui')
806
+ clear_output()
807
+
808
+ configf="--disable-console-progressbars --no-gradio-queue --upcast-sampling --no-hashing --no-half-vae --disable-safe-unpickle --api --no-download-sd-model --xformers --enable-insecure-extension-access --port 6006 --listen --skip-version-check --ckpt /notebooks/sd/stable-diffusion-webui/models/Stable-diffusion/sd_xl_base_1.0.safetensors "+auth
809
+
810
+ return configf
811
+
812
+
813
+
814
+
815
+ def clean():
816
+
817
+ Sessions=os.listdir("/notebooks/Fast-Dreambooth/Sessions")
818
+
819
+ s = widgets.Select(
820
+ options=Sessions,
821
+ rows=5,
822
+ description='',
823
+ disabled=False
824
+ )
825
+
826
+ out=widgets.Output()
827
+
828
+ d = widgets.Button(
829
+ description='Remove',
830
+ disabled=False,
831
+ button_style='warning',
832
+ tooltip='Removet the selected session',
833
+ icon='warning'
834
+ )
835
+
836
+ def rem(d):
837
+ with out:
838
+ if s.value is not None:
839
+ clear_output()
840
+ print("THE SESSION "+s.value+" HAS BEEN REMOVED FROM THE STORAGE")
841
+ call('rm -r /notebooks/Fast-Dreambooth/Sessions/'+s.value, shell=True)
842
+ if os.path.exists('/notebooks/models/'+s.value):
843
+ call('rm -r /notebooks/models/'+s.value, shell=True)
844
+ s.options=os.listdir("/notebooks/Fast-Dreambooth/Sessions")
845
+
846
+
847
+ else:
848
+ d.close()
849
+ s.close()
850
+ clear_output()
851
+ print("NOTHING TO REMOVE")
852
+
853
+ d.on_click(rem)
854
+ if s.value is not None:
855
+ display(s,d,out)
856
+ else:
857
+ print("NOTHING TO REMOVE")
858
+
859
+
860
+
861
+ def crop_image(im, size):
862
+
863
+ import cv2
864
+
865
+ GREEN = "#0F0"
866
+ BLUE = "#00F"
867
+ RED = "#F00"
868
+
869
+ def focal_point(im, settings):
870
+ corner_points = image_corner_points(im, settings) if settings.corner_points_weight > 0 else []
871
+ entropy_points = image_entropy_points(im, settings) if settings.entropy_points_weight > 0 else []
872
+ face_points = image_face_points(im, settings) if settings.face_points_weight > 0 else []
873
+
874
+ pois = []
875
+
876
+ weight_pref_total = 0
877
+ if len(corner_points) > 0:
878
+ weight_pref_total += settings.corner_points_weight
879
+ if len(entropy_points) > 0:
880
+ weight_pref_total += settings.entropy_points_weight
881
+ if len(face_points) > 0:
882
+ weight_pref_total += settings.face_points_weight
883
+
884
+ corner_centroid = None
885
+ if len(corner_points) > 0:
886
+ corner_centroid = centroid(corner_points)
887
+ corner_centroid.weight = settings.corner_points_weight / weight_pref_total
888
+ pois.append(corner_centroid)
889
+
890
+ entropy_centroid = None
891
+ if len(entropy_points) > 0:
892
+ entropy_centroid = centroid(entropy_points)
893
+ entropy_centroid.weight = settings.entropy_points_weight / weight_pref_total
894
+ pois.append(entropy_centroid)
895
+
896
+ face_centroid = None
897
+ if len(face_points) > 0:
898
+ face_centroid = centroid(face_points)
899
+ face_centroid.weight = settings.face_points_weight / weight_pref_total
900
+ pois.append(face_centroid)
901
+
902
+ average_point = poi_average(pois, settings)
903
+
904
+ return average_point
905
+
906
+
907
+ def image_face_points(im, settings):
908
+
909
+ np_im = np.array(im)
910
+ gray = cv2.cvtColor(np_im, cv2.COLOR_BGR2GRAY)
911
+
912
+ tries = [
913
+ [ f'{cv2.data.haarcascades}haarcascade_eye.xml', 0.01 ],
914
+ [ f'{cv2.data.haarcascades}haarcascade_frontalface_default.xml', 0.05 ],
915
+ [ f'{cv2.data.haarcascades}haarcascade_profileface.xml', 0.05 ],
916
+ [ f'{cv2.data.haarcascades}haarcascade_frontalface_alt.xml', 0.05 ],
917
+ [ f'{cv2.data.haarcascades}haarcascade_frontalface_alt2.xml', 0.05 ],
918
+ [ f'{cv2.data.haarcascades}haarcascade_frontalface_alt_tree.xml', 0.05 ],
919
+ [ f'{cv2.data.haarcascades}haarcascade_eye_tree_eyeglasses.xml', 0.05 ],
920
+ [ f'{cv2.data.haarcascades}haarcascade_upperbody.xml', 0.05 ]
921
+ ]
922
+ for t in tries:
923
+ classifier = cv2.CascadeClassifier(t[0])
924
+ minsize = int(min(im.width, im.height) * t[1]) # at least N percent of the smallest side
925
+ try:
926
+ faces = classifier.detectMultiScale(gray, scaleFactor=1.1,
927
+ minNeighbors=7, minSize=(minsize, minsize), flags=cv2.CASCADE_SCALE_IMAGE)
928
+ except:
929
+ continue
930
+
931
+ if len(faces) > 0:
932
+ rects = [[f[0], f[1], f[0] + f[2], f[1] + f[3]] for f in faces]
933
+ return [PointOfInterest((r[0] +r[2]) // 2, (r[1] + r[3]) // 2, size=abs(r[0]-r[2]), weight=1/len(rects)) for r in rects]
934
+ return []
935
+
936
+
937
+ def image_corner_points(im, settings):
938
+ grayscale = im.convert("L")
939
+
940
+ # naive attempt at preventing focal points from collecting at watermarks near the bottom
941
+ gd = ImageDraw.Draw(grayscale)
942
+ gd.rectangle([0, im.height*.9, im.width, im.height], fill="#999")
943
+
944
+ np_im = np.array(grayscale)
945
+
946
+ points = cv2.goodFeaturesToTrack(
947
+ np_im,
948
+ maxCorners=100,
949
+ qualityLevel=0.04,
950
+ minDistance=min(grayscale.width, grayscale.height)*0.06,
951
+ useHarrisDetector=False,
952
+ )
953
+
954
+ if points is None:
955
+ return []
956
+
957
+ focal_points = []
958
+ for point in points:
959
+ x, y = point.ravel()
960
+ focal_points.append(PointOfInterest(x, y, size=4, weight=1/len(points)))
961
+
962
+ return focal_points
963
+
964
+
965
+ def image_entropy_points(im, settings):
966
+ landscape = im.height < im.width
967
+ portrait = im.height > im.width
968
+ if landscape:
969
+ move_idx = [0, 2]
970
+ move_max = im.size[0]
971
+ elif portrait:
972
+ move_idx = [1, 3]
973
+ move_max = im.size[1]
974
+ else:
975
+ return []
976
+
977
+ e_max = 0
978
+ crop_current = [0, 0, settings.crop_width, settings.crop_height]
979
+ crop_best = crop_current
980
+ while crop_current[move_idx[1]] < move_max:
981
+ crop = im.crop(tuple(crop_current))
982
+ e = image_entropy(crop)
983
+
984
+ if (e > e_max):
985
+ e_max = e
986
+ crop_best = list(crop_current)
987
+
988
+ crop_current[move_idx[0]] += 4
989
+ crop_current[move_idx[1]] += 4
990
+
991
+ x_mid = int(crop_best[0] + settings.crop_width/2)
992
+ y_mid = int(crop_best[1] + settings.crop_height/2)
993
+
994
+ return [PointOfInterest(x_mid, y_mid, size=25, weight=1.0)]
995
+
996
+
997
+ def image_entropy(im):
998
+ # greyscale image entropy
999
+ # band = np.asarray(im.convert("L"))
1000
+ band = np.asarray(im.convert("1"), dtype=np.uint8)
1001
+ hist, _ = np.histogram(band, bins=range(0, 256))
1002
+ hist = hist[hist > 0]
1003
+ return -np.log2(hist / hist.sum()).sum()
1004
+
1005
+ def centroid(pois):
1006
+ x = [poi.x for poi in pois]
1007
+ y = [poi.y for poi in pois]
1008
+ return PointOfInterest(sum(x)/len(pois), sum(y)/len(pois))
1009
+
1010
+
1011
+ def poi_average(pois, settings):
1012
+ weight = 0.0
1013
+ x = 0.0
1014
+ y = 0.0
1015
+ for poi in pois:
1016
+ weight += poi.weight
1017
+ x += poi.x * poi.weight
1018
+ y += poi.y * poi.weight
1019
+ avg_x = round(weight and x / weight)
1020
+ avg_y = round(weight and y / weight)
1021
+
1022
+ return PointOfInterest(avg_x, avg_y)
1023
+
1024
+
1025
+ def is_landscape(w, h):
1026
+ return w > h
1027
+
1028
+
1029
+ def is_portrait(w, h):
1030
+ return h > w
1031
+
1032
+
1033
+ def is_square(w, h):
1034
+ return w == h
1035
+
1036
+
1037
+ class PointOfInterest:
1038
+ def __init__(self, x, y, weight=1.0, size=10):
1039
+ self.x = x
1040
+ self.y = y
1041
+ self.weight = weight
1042
+ self.size = size
1043
+
1044
+ def bounding(self, size):
1045
+ return [
1046
+ self.x - size//2,
1047
+ self.y - size//2,
1048
+ self.x + size//2,
1049
+ self.y + size//2
1050
+ ]
1051
+
1052
+ class Settings:
1053
+ def __init__(self, crop_width=512, crop_height=512, corner_points_weight=0.5, entropy_points_weight=0.5, face_points_weight=0.5):
1054
+ self.crop_width = crop_width
1055
+ self.crop_height = crop_height
1056
+ self.corner_points_weight = corner_points_weight
1057
+ self.entropy_points_weight = entropy_points_weight
1058
+ self.face_points_weight = face_points_weight
1059
+
1060
+ settings = Settings(
1061
+ crop_width = size,
1062
+ crop_height = size,
1063
+ face_points_weight = 0.9,
1064
+ entropy_points_weight = 0.15,
1065
+ corner_points_weight = 0.5,
1066
+ )
1067
+
1068
+ scale_by = 1
1069
+ if is_landscape(im.width, im.height):
1070
+ scale_by = settings.crop_height / im.height
1071
+ elif is_portrait(im.width, im.height):
1072
+ scale_by = settings.crop_width / im.width
1073
+ elif is_square(im.width, im.height):
1074
+ if is_square(settings.crop_width, settings.crop_height):
1075
+ scale_by = settings.crop_width / im.width
1076
+ elif is_landscape(settings.crop_width, settings.crop_height):
1077
+ scale_by = settings.crop_width / im.width
1078
+ elif is_portrait(settings.crop_width, settings.crop_height):
1079
+ scale_by = settings.crop_height / im.height
1080
+
1081
+ im = im.resize((int(im.width * scale_by), int(im.height * scale_by)))
1082
+ im_debug = im.copy()
1083
+
1084
+ focus = focal_point(im_debug, settings)
1085
+
1086
+ # take the focal point and turn it into crop coordinates that try to center over the focal
1087
+ # point but then get adjusted back into the frame
1088
+ y_half = int(settings.crop_height / 2)
1089
+ x_half = int(settings.crop_width / 2)
1090
+
1091
+ x1 = focus.x - x_half
1092
+ if x1 < 0:
1093
+ x1 = 0
1094
+ elif x1 + settings.crop_width > im.width:
1095
+ x1 = im.width - settings.crop_width
1096
+
1097
+ y1 = focus.y - y_half
1098
+ if y1 < 0:
1099
+ y1 = 0
1100
+ elif y1 + settings.crop_height > im.height:
1101
+ y1 = im.height - settings.crop_height
1102
+
1103
+ x2 = x1 + settings.crop_width
1104
+ y2 = y1 + settings.crop_height
1105
+
1106
+ crop = [x1, y1, x2, y2]
1107
+
1108
+ results = []
1109
+
1110
+ results.append(im.crop(tuple(crop)))
1111
+
1112
+ return results
1113
+
1114
+
1115
+
1116
+ def clean_symlinks(path):
1117
+ for item in os.listdir(path):
1118
+ lnk = os.path.join(path, item)
1119
+ if os.path.islink(lnk) and not os.path.exists(os.readlink(lnk)):
1120
+ os.remove(lnk)
1121
+
1122
+