TheLastBen commited on
Commit
897d41c
1 Parent(s): 4012b71

Update Scripts/sdxllorapps_311.py

Browse files
Files changed (1) hide show
  1. Scripts/sdxllorapps_311.py +1144 -0
Scripts/sdxllorapps_311.py CHANGED
@@ -0,0 +1,1144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from IPython.display import clear_output
2
+ from subprocess import call, getoutput, Popen
3
+ from IPython.display import display
4
+ import ipywidgets as widgets
5
+ import io
6
+ from PIL import Image, ImageDraw, ImageOps
7
+ import fileinput
8
+ import time
9
+ import os
10
+ from os import listdir
11
+ from os.path import isfile
12
+ import random
13
+ import sys
14
+ from io import BytesIO
15
+ import requests
16
+ from collections import defaultdict
17
+ from math import log, sqrt
18
+ import numpy as np
19
+ import sys
20
+ import fileinput
21
+ import six
22
+ import base64
23
+ import re
24
+ import cv2
25
+
26
+ from urllib.parse import urlparse, parse_qs, unquote
27
+ import urllib.request
28
+ from urllib.request import urlopen, Request
29
+
30
+ import tempfile
31
+ from tqdm import tqdm
32
+
33
+
34
+
35
+
36
+ def Deps(force_reinstall):
37
+
38
+ if not force_reinstall and os.path.exists('/usr/local/lib/python3.11/dist-packages/gradio'):
39
+ ntbk()
40
+ os.environ['TORCH_HOME'] = '/notebooks/cache/torch'
41
+ os.environ['PYTHONWARNINGS'] = 'ignore'
42
+ os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
43
+ print('Modules and notebooks updated, dependencies already installed')
44
+
45
+ else:
46
+ call("pip install --root-user-action=ignore --no-deps -q accelerate==0.12.0", shell=True, stdout=open('/dev/null', 'w'))
47
+ ntbk()
48
+ if not os.path.exists('/models'):
49
+ call('mkdir /models', shell=True)
50
+ if not os.path.exists('/notebooks/models'):
51
+ call('ln -s /models /notebooks', shell=True)
52
+ if os.path.exists('/deps'):
53
+ call("rm -r /deps", shell=True)
54
+ call('mkdir /deps', shell=True)
55
+ if not os.path.exists('cache'):
56
+ call('mkdir cache', shell=True)
57
+ os.chdir('/deps')
58
+ call('wget -q -i https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dependencies/aptdeps.txt', shell=True)
59
+ call('dpkg -i *.deb', shell=True, stdout=open('/dev/null', 'w'))
60
+ depsinst("https://huggingface.co/TheLastBen/dependencies/resolve/main/ppsdeps_311.tar.zst", "/deps/ppsdeps_311.tar.zst")
61
+ call('tar -C / --zstd -xf ppsdeps_311.tar.zst', shell=True, stdout=open('/dev/null', 'w'))
62
+ os.chdir('/notebooks')
63
+ call('pip install --root-user-action=ignore --disable-pip-version-check -qq diffusers==0.18.1', shell=True, stdout=open('/dev/null', 'w'))
64
+ call("git clone --depth 1 -q --branch main https://github.com/TheLastBen/diffusers /diffusers", shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
65
+ os.environ['TORCH_HOME'] = '/notebooks/cache/torch'
66
+ os.environ['PYTHONWARNINGS'] = 'ignore'
67
+ os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
68
+ call("sed -i 's@text = _formatwarnmsg(msg)@text =\"\"@g' /usr/lib/python3.11/warnings.py", shell=True)
69
+ if not os.path.exists('/notebooks/diffusers'):
70
+ call('ln -s /diffusers /notebooks', shell=True)
71
+ call("rm -r /deps", shell=True)
72
+ os.chdir('/notebooks')
73
+ clear_output()
74
+
75
+ done()
76
+
77
+
78
+
79
+
80
+ def depsinst(url, dst):
81
+ file_size = None
82
+ req = Request(url, headers={"User-Agent": "torch.hub"})
83
+ u = urlopen(req)
84
+ meta = u.info()
85
+ if hasattr(meta, 'getheaders'):
86
+ content_length = meta.getheaders("Content-Length")
87
+ else:
88
+ content_length = meta.get_all("Content-Length")
89
+ if content_length is not None and len(content_length) > 0:
90
+ file_size = int(content_length[0])
91
+
92
+ with tqdm(total=file_size, disable=False, mininterval=0.5,
93
+ bar_format='Installing dependencies |{bar:20}| {percentage:3.0f}%') as pbar:
94
+ with open(dst, "wb") as f:
95
+ while True:
96
+ buffer = u.read(8192)
97
+ if len(buffer) == 0:
98
+ break
99
+ f.write(buffer)
100
+ pbar.update(len(buffer))
101
+ f.close()
102
+
103
+
104
+
105
+ def dwn(url, dst, msg):
106
+ file_size = None
107
+ req = Request(url, headers={"User-Agent": "torch.hub"})
108
+ u = urlopen(req)
109
+ meta = u.info()
110
+ if hasattr(meta, 'getheaders'):
111
+ content_length = meta.getheaders("Content-Length")
112
+ else:
113
+ content_length = meta.get_all("Content-Length")
114
+ if content_length is not None and len(content_length) > 0:
115
+ file_size = int(content_length[0])
116
+
117
+ with tqdm(total=file_size, disable=False, mininterval=0.5,
118
+ bar_format=msg+' |{bar:20}| {percentage:3.0f}%') as pbar:
119
+ with open(dst, "wb") as f:
120
+ while True:
121
+ buffer = u.read(8192)
122
+ if len(buffer) == 0:
123
+ break
124
+ f.write(buffer)
125
+ pbar.update(len(buffer))
126
+ f.close()
127
+
128
+
129
+
130
+
131
+ def ntbk():
132
+
133
+ os.chdir('/notebooks')
134
+ if not os.path.exists('Latest_Notebooks'):
135
+ call('mkdir Latest_Notebooks', shell=True)
136
+ else:
137
+ call('rm -r Latest_Notebooks', shell=True)
138
+ call('mkdir Latest_Notebooks', shell=True)
139
+ os.chdir('/notebooks/Latest_Notebooks')
140
+ call('wget -q -i https://huggingface.co/datasets/TheLastBen/PPS/raw/main/Notebooks.txt', shell=True)
141
+ call('rm Notebooks.txt', shell=True)
142
+ os.chdir('/notebooks')
143
+
144
+
145
+
146
+
147
+ def ntbks():
148
+
149
+ os.chdir('/notebooks')
150
+ if not os.path.exists('Latest_Notebooks'):
151
+ call('mkdir Latest_Notebooks', shell=True)
152
+ else:
153
+ call('rm -r Latest_Notebooks', shell=True)
154
+ call('mkdir Latest_Notebooks', shell=True)
155
+ os.chdir('/notebooks/Latest_Notebooks')
156
+ call('wget -q -i https://huggingface.co/datasets/TheLastBen/RNPD/raw/main/Notebooks.txt', shell=True)
157
+ call('rm Notebooks.txt', shell=True)
158
+ os.chdir('/notebooks')
159
+
160
+ def done():
161
+ done = widgets.Button(
162
+ description='Done!',
163
+ disabled=True,
164
+ button_style='success',
165
+ tooltip='',
166
+ icon='check'
167
+ )
168
+ display(done)
169
+
170
+
171
+
172
+ def mdlvxl():
173
+
174
+ os.chdir('/notebooks')
175
+
176
+ if os.path.exists('stable-diffusion-XL') and not os.path.exists('/notebooks/stable-diffusion-XL/unet/diffusion_pytorch_model.safetensors'):
177
+ call('rm -r stable-diffusion-XL', shell=True)
178
+ if not os.path.exists('stable-diffusion-XL'):
179
+ print('Downloading SDXL model...')
180
+ call('mkdir stable-diffusion-XL', shell=True)
181
+ os.chdir('stable-diffusion-XL')
182
+ call('git init', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
183
+ call('git lfs install --system --skip-repo', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
184
+ call('git remote add -f origin https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
185
+ call('git config core.sparsecheckout true', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
186
+ call('echo -e "\nscheduler\ntext_encoder\ntext_encoder_2\ntokenizer\ntokenizer_2\nunet\nvae\nfeature_extractor\nmodel_index.json\n!*.safetensors\n!*.bin\n!*.onnx*\n!*.xml\n!*.msgpack" > .git/info/sparse-checkout', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
187
+ call('git pull origin main', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
188
+ dwn('https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/text_encoder/model.safetensors', 'text_encoder/model.safetensors', '1/4')
189
+ dwn('https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/text_encoder_2/model.safetensors', 'text_encoder_2/model.safetensors', '2/4')
190
+ dwn('https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/vae/diffusion_pytorch_model.safetensors', 'vae/diffusion_pytorch_model.safetensors', '3/4')
191
+ dwn('https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/unet/diffusion_pytorch_model.safetensors', 'unet/diffusion_pytorch_model.safetensors', '4/4')
192
+ call('rm -r .git', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
193
+ os.chdir('/notebooks')
194
+ clear_output()
195
+ while not os.path.exists('/notebooks/stable-diffusion-XL/unet/diffusion_pytorch_model.safetensors'):
196
+ print('Invalid HF token, make sure you have access to the model')
197
+ time.sleep(8)
198
+ if os.path.exists('/notebooks/stable-diffusion-XL/unet/diffusion_pytorch_model.safetensors'):
199
+ print('Using SDXL model')
200
+ else:
201
+ print('Using SDXL model')
202
+
203
+ call("sed -i 's@\"force_upcast.*@@' /notebooks/stable-diffusion-XL/vae/config.json", shell=True)
204
+
205
+
206
+
207
+ def downloadmodel_hfxl(Path_to_HuggingFace):
208
+
209
+ os.chdir('/notebooks')
210
+ if os.path.exists('stable-diffusion-custom'):
211
+ call("rm -r stable-diffusion-custom", shell=True)
212
+ clear_output()
213
+
214
+ if os.path.exists('Fast-Dreambooth/token.txt'):
215
+ with open("Fast-Dreambooth/token.txt") as f:
216
+ token = f.read()
217
+ authe=f'https://USER:{token}@'
218
+ else:
219
+ authe="https://"
220
+
221
+ clear_output()
222
+ call("mkdir stable-diffusion-custom", shell=True)
223
+ os.chdir("stable-diffusion-custom")
224
+ call("git init", shell=True)
225
+ call("git lfs install --system --skip-repo", shell=True)
226
+ call('git remote add -f origin '+authe+'huggingface.co/'+Path_to_HuggingFace, shell=True)
227
+ call("git config core.sparsecheckout true", shell=True)
228
+ call('echo -e "\nscheduler\ntext_encoder\ntokenizer\nunet\nvae\nfeature_extractor\nmodel_index.json\n!*.safetensors\n!*.fp16.bin" > .git/info/sparse-checkout', shell=True)
229
+ call("git pull origin main", shell=True)
230
+ if os.path.exists('unet/diffusion_pytorch_model.safetensors'):
231
+ call("rm -r .git", shell=True)
232
+ os.chdir('/notebooks')
233
+ clear_output()
234
+ done()
235
+ while not os.path.exists('/notebooks/stable-diffusion-custom/unet/diffusion_pytorch_model.safetensors'):
236
+ print('Check the link you provided')
237
+ os.chdir('/notebooks')
238
+ time.sleep(5)
239
+
240
+
241
+
242
+ def downloadmodel_link_xl(MODEL_LINK):
243
+
244
+ import wget
245
+ import gdown
246
+ from gdown.download import get_url_from_gdrive_confirmation
247
+
248
+ def getsrc(url):
249
+ parsed_url = urlparse(url)
250
+ if parsed_url.netloc == 'civitai.com':
251
+ src='civitai'
252
+ elif parsed_url.netloc == 'drive.google.com':
253
+ src='gdrive'
254
+ elif parsed_url.netloc == 'huggingface.co':
255
+ src='huggingface'
256
+ else:
257
+ src='others'
258
+ return src
259
+
260
+ src=getsrc(MODEL_LINK)
261
+
262
+ def get_name(url, gdrive):
263
+ if not gdrive:
264
+ response = requests.get(url, allow_redirects=False)
265
+ if "Location" in response.headers:
266
+ redirected_url = response.headers["Location"]
267
+ quer = parse_qs(urlparse(redirected_url).query)
268
+ if "response-content-disposition" in quer:
269
+ disp_val = quer["response-content-disposition"][0].split(";")
270
+ for vals in disp_val:
271
+ if vals.strip().startswith("filename="):
272
+ filenm=unquote(vals.split("=", 1)[1].strip())
273
+ return filenm.replace("\"","")
274
+ else:
275
+ headers = {"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36"}
276
+ lnk="https://drive.google.com/uc?id={id}&export=download".format(id=url[url.find("/d/")+3:url.find("/view")])
277
+ res = requests.session().get(lnk, headers=headers, stream=True, verify=True)
278
+ res = requests.session().get(get_url_from_gdrive_confirmation(res.text), headers=headers, stream=True, verify=True)
279
+ content_disposition = six.moves.urllib_parse.unquote(res.headers["Content-Disposition"])
280
+ filenm = re.search(r"filename\*=UTF-8''(.*)", content_disposition).groups()[0].replace(os.path.sep, "_")
281
+ return filenm
282
+
283
+ if src=='civitai':
284
+ modelname=get_name(MODEL_LINK, False)
285
+ elif src=='gdrive':
286
+ modelname=get_name(MODEL_LINK, True)
287
+ else:
288
+ modelname=os.path.basename(MODEL_LINK)
289
+
290
+
291
+ os.chdir('/notebooks')
292
+ if src=='huggingface':
293
+ dwn(MODEL_LINK, modelname,'Downloading the Model')
294
+ else:
295
+ call("gdown --fuzzy " +MODEL_LINK+ " -O "+modelname, shell=True)
296
+
297
+ if os.path.exists(modelname):
298
+ if os.path.getsize(modelname) > 1810671599:
299
+
300
+ print('Converting to diffusers...')
301
+ call('python /notebooks/diffusers/scripts/convert_original_stable_diffusion_to_diffusers.py --checkpoint_path '+modelname+' --dump_path stable-diffusion-custom --from_safetensors', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
302
+
303
+ if os.path.exists('stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
304
+ os.chdir('/notebooks')
305
+ clear_output()
306
+ done()
307
+ else:
308
+ while not os.path.exists('stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
309
+ print('Conversion error')
310
+ os.chdir('/notebooks')
311
+ time.sleep(5)
312
+ else:
313
+ while os.path.getsize(modelname) < 1810671599:
314
+ print('Wrong link, check that the link is valid')
315
+ os.chdir('/notebooks')
316
+ time.sleep(5)
317
+
318
+
319
+
320
+ def downloadmodel_path_xl(MODEL_PATH):
321
+
322
+ import wget
323
+ os.chdir('/notebooks')
324
+ clear_output()
325
+ if os.path.exists(str(MODEL_PATH)):
326
+
327
+ print('Converting to diffusers...')
328
+ call('python /notebooks/diffusers/scripts/convert_original_stable_diffusion_to_diffusers.py --checkpoint_path '+MODEL_PATH+' --dump_path stable-diffusion-custom --from_safetensors', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
329
+
330
+ if os.path.exists('stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
331
+ clear_output()
332
+ done()
333
+ while not os.path.exists('stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
334
+ print('Conversion error')
335
+ os.chdir('/notebooks')
336
+ time.sleep(5)
337
+ else:
338
+ while not os.path.exists(str(MODEL_PATH)):
339
+ print('Wrong path, use the file explorer to copy the path')
340
+ os.chdir('/notebooks')
341
+ time.sleep(5)
342
+
343
+
344
+
345
+
346
+ def dls_xl(Path_to_HuggingFace, MODEL_PATH, MODEL_LINK):
347
+
348
+ os.chdir('/notebooks')
349
+
350
+ if Path_to_HuggingFace != "":
351
+ downloadmodel_hfxl(Path_to_HuggingFace)
352
+ MODEL_NAMExl="/notebooks/stable-diffusion-custom"
353
+
354
+ elif MODEL_PATH !="":
355
+
356
+ downloadmodel_path_xl(MODEL_PATH)
357
+ MODEL_NAMExl="/notebooks/stable-diffusion-custom"
358
+
359
+ elif MODEL_LINK !="":
360
+
361
+ downloadmodel_link_xl(MODEL_LINK)
362
+ MODEL_NAMExl="/notebooks/stable-diffusion-custom"
363
+
364
+ else:
365
+ mdlvxl()
366
+ MODEL_NAMExl="/notebooks/stable-diffusion-XL"
367
+
368
+ return MODEL_NAMExl
369
+
370
+
371
+
372
+ def sess_xl(Session_Name, MODEL_NAMExl):
373
+ import gdown
374
+ import wget
375
+ os.chdir('/notebooks')
376
+ PT=""
377
+
378
+ while Session_Name=="":
379
+ print('Input the Session Name:')
380
+ Session_Name=input("")
381
+ Session_Name=Session_Name.replace(" ","_")
382
+
383
+ WORKSPACE='/notebooks/Fast-Dreambooth'
384
+
385
+ INSTANCE_NAME=Session_Name
386
+ OUTPUT_DIR="/notebooks/models/"+Session_Name
387
+ SESSION_DIR=WORKSPACE+"/Sessions/"+Session_Name
388
+ INSTANCE_DIR=SESSION_DIR+"/instance_images"
389
+ CAPTIONS_DIR=SESSION_DIR+'/captions'
390
+ MDLPTH=str(SESSION_DIR+"/"+Session_Name+'.safetensors')
391
+
392
+
393
+ if os.path.exists(str(SESSION_DIR)) and not os.path.exists(MDLPTH):
394
+ print('Loading session with no previous LoRa model')
395
+ if MODEL_NAMExl=="":
396
+ print('No model found, use the "Model Download" cell to download a model.')
397
+ else:
398
+ print('Session Loaded, proceed')
399
+
400
+ elif not os.path.exists(str(SESSION_DIR)):
401
+ call('mkdir -p '+INSTANCE_DIR, shell=True)
402
+ print('Creating session...')
403
+ if MODEL_NAMExl=="":
404
+ print('No model found, use the "Model Download" cell to download a model.')
405
+ else:
406
+ print('Session created, proceed to uploading instance images')
407
+ if MODEL_NAMExl=="":
408
+ print('No model found, use the "Model Download" cell to download a model.')
409
+
410
+ else:
411
+ print('Session Loaded, proceed')
412
+
413
+
414
+ return WORKSPACE, Session_Name, INSTANCE_NAME, OUTPUT_DIR, SESSION_DIR, INSTANCE_DIR, CAPTIONS_DIR, MDLPTH, MODEL_NAMExl
415
+
416
+
417
+
418
+ def uplder(Remove_existing_instance_images, Crop_images, Crop_size, Resize_to_1024_and_keep_aspect_ratio, IMAGES_FOLDER_OPTIONAL, INSTANCE_DIR, CAPTIONS_DIR):
419
+
420
+ if os.path.exists(INSTANCE_DIR+"/.ipynb_checkpoints"):
421
+ call('rm -r '+INSTANCE_DIR+'/.ipynb_checkpoints', shell=True)
422
+
423
+ uploader = widgets.FileUpload(description="Choose images",accept='image/*, .txt', multiple=True)
424
+ Upload = widgets.Button(
425
+ description='Upload',
426
+ disabled=False,
427
+ button_style='info',
428
+ tooltip='Click to upload the chosen instance images',
429
+ icon=''
430
+ )
431
+
432
+
433
+ def up(Upload):
434
+ with out:
435
+ uploader.close()
436
+ Upload.close()
437
+ upld(Remove_existing_instance_images, Crop_images, Crop_size, Resize_to_1024_and_keep_aspect_ratio, IMAGES_FOLDER_OPTIONAL, INSTANCE_DIR, CAPTIONS_DIR, uploader)
438
+ done()
439
+ out=widgets.Output()
440
+
441
+ if IMAGES_FOLDER_OPTIONAL=="":
442
+ Upload.on_click(up)
443
+ display(uploader, Upload, out)
444
+ else:
445
+ upld(Remove_existing_instance_images, Crop_images, Crop_size, Resize_to_1024_and_keep_aspect_ratio, IMAGES_FOLDER_OPTIONAL, INSTANCE_DIR, CAPTIONS_DIR, uploader)
446
+ done()
447
+
448
+
449
+
450
+ def upld(Remove_existing_instance_images, Crop_images, Crop_size, Resize_to_1024_and_keep_aspect_ratio, IMAGES_FOLDER_OPTIONAL, INSTANCE_DIR, CAPTIONS_DIR, uploader):
451
+
452
+
453
+ if Remove_existing_instance_images:
454
+ if os.path.exists(str(INSTANCE_DIR)):
455
+ call("rm -r " +INSTANCE_DIR, shell=True)
456
+ if os.path.exists(str(CAPTIONS_DIR)):
457
+ call("rm -r " +CAPTIONS_DIR, shell=True)
458
+
459
+
460
+ if not os.path.exists(str(INSTANCE_DIR)):
461
+ call("mkdir -p " +INSTANCE_DIR, shell=True)
462
+ if not os.path.exists(str(CAPTIONS_DIR)):
463
+ call("mkdir -p " +CAPTIONS_DIR, shell=True)
464
+
465
+
466
+ if IMAGES_FOLDER_OPTIONAL !="":
467
+
468
+ if os.path.exists(IMAGES_FOLDER_OPTIONAL+"/.ipynb_checkpoints"):
469
+ call('rm -r '+IMAGES_FOLDER_OPTIONAL+'/.ipynb_checkpoints', shell=True)
470
+
471
+ if any(file.endswith('.{}'.format('txt')) for file in os.listdir(IMAGES_FOLDER_OPTIONAL)):
472
+ call('mv '+IMAGES_FOLDER_OPTIONAL+'/*.txt '+CAPTIONS_DIR, shell=True)
473
+ if Crop_images:
474
+ os.chdir(str(IMAGES_FOLDER_OPTIONAL))
475
+ call('find . -name "* *" -type f | rename ' "'s/ /-/g'", shell=True)
476
+ os.chdir('/notebooks')
477
+ for filename in tqdm(os.listdir(IMAGES_FOLDER_OPTIONAL), bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):
478
+ extension = filename.split(".")[-1]
479
+ identifier=filename.split(".")[0]
480
+ new_path_with_file = os.path.join(INSTANCE_DIR, filename)
481
+ file = Image.open(IMAGES_FOLDER_OPTIONAL+"/"+filename)
482
+ file=file.convert("RGB")
483
+ file=ImageOps.exif_transpose(file)
484
+ width, height = file.size
485
+ if file.size !=(Crop_size, Crop_size):
486
+ image=crop_image(file, Crop_size)
487
+ if extension.upper()=="JPG" or extension.upper()=="jpg":
488
+ image[0].save(new_path_with_file, format="JPEG", quality = 100)
489
+ else:
490
+ image[0].save(new_path_with_file, format=extension.upper())
491
+
492
+ else:
493
+ call("cp \'"+IMAGES_FOLDER_OPTIONAL+"/"+filename+"\' "+INSTANCE_DIR, shell=True)
494
+
495
+ else:
496
+ for filename in tqdm(os.listdir(IMAGES_FOLDER_OPTIONAL), bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):
497
+ call("cp -r " +IMAGES_FOLDER_OPTIONAL+"/. " +INSTANCE_DIR, shell=True)
498
+
499
+ elif IMAGES_FOLDER_OPTIONAL =="":
500
+ up=""
501
+ for file in uploader.value:
502
+ filename = file['name']
503
+ if filename.split(".")[-1]=="txt":
504
+ with open(CAPTIONS_DIR+'/'+filename, 'w') as f:
505
+ f.write(bytes(file['content']).decode())
506
+ up=[file for file in uploader.value if not file['name'].endswith('.txt')]
507
+ if Crop_images:
508
+ for file in tqdm(up, bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):
509
+ filename = file['name']
510
+ img = Image.open(io.BytesIO(file['content']))
511
+ extension = filename.split(".")[-1]
512
+ identifier=filename.split(".")[0]
513
+ img=img.convert("RGB")
514
+ img=ImageOps.exif_transpose(img)
515
+
516
+ if extension.upper()=="JPG" or extension.upper()=="jpg":
517
+ img.save(INSTANCE_DIR+"/"+filename, format="JPEG", quality = 100)
518
+ else:
519
+ img.save(INSTANCE_DIR+"/"+filename, format=extension.upper())
520
+
521
+ new_path_with_file = os.path.join(INSTANCE_DIR, filename)
522
+ file = Image.open(new_path_with_file)
523
+ width, height = file.size
524
+ if file.size !=(Crop_size, Crop_size):
525
+ image=crop_image(file, Crop_size)
526
+ if extension.upper()=="JPG" or extension.upper()=="jpg":
527
+ image[0].save(new_path_with_file, format="JPEG", quality = 100)
528
+ else:
529
+ image[0].save(new_path_with_file, format=extension.upper())
530
+
531
+ else:
532
+ for file in tqdm(uploader.value, bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):
533
+ filename = file['name']
534
+ img = Image.open(io.BytesIO(file['content']))
535
+ img=img.convert("RGB")
536
+ extension = filename.split(".")[-1]
537
+ identifier=filename.split(".")[0]
538
+
539
+ if extension.upper()=="JPG" or extension.upper()=="jpg":
540
+ img.save(INSTANCE_DIR+"/"+filename, format="JPEG", quality = 100)
541
+ else:
542
+ img.save(INSTANCE_DIR+"/"+filename, format=extension.upper())
543
+
544
+ os.chdir(INSTANCE_DIR)
545
+ call('find . -name "* *" -type f | rename ' "'s/ /-/g'", shell=True)
546
+ os.chdir(CAPTIONS_DIR)
547
+ call('find . -name "* *" -type f | rename ' "'s/ /-/g'", shell=True)
548
+ os.chdir('/notebooks')
549
+
550
+ if Resize_to_1024_and_keep_aspect_ratio and not Crop_images:
551
+ resize_keep_aspect(INSTANCE_DIR)
552
+
553
+
554
+
555
+ def caption(CAPTIONS_DIR, INSTANCE_DIR):
556
+
557
+ paths=""
558
+ out=""
559
+ widgets_l=""
560
+ clear_output()
561
+ def Caption(path):
562
+ if path!="Select an instance image to caption":
563
+
564
+ name = os.path.splitext(os.path.basename(path))[0]
565
+ ext=os.path.splitext(os.path.basename(path))[-1][1:]
566
+ if ext=="jpg" or "JPG":
567
+ ext="JPEG"
568
+
569
+ if os.path.exists(CAPTIONS_DIR+"/"+name + '.txt'):
570
+ with open(CAPTIONS_DIR+"/"+name + '.txt', 'r') as f:
571
+ text = f.read()
572
+ else:
573
+ with open(CAPTIONS_DIR+"/"+name + '.txt', 'w') as f:
574
+ f.write("")
575
+ with open(CAPTIONS_DIR+"/"+name + '.txt', 'r') as f:
576
+ text = f.read()
577
+
578
+ img=Image.open(os.path.join(INSTANCE_DIR,path))
579
+ img=img.convert("RGB")
580
+ img=img.resize((420, 420))
581
+ image_bytes = BytesIO()
582
+ img.save(image_bytes, format=ext, qualiy=10)
583
+ image_bytes.seek(0)
584
+ image_data = image_bytes.read()
585
+ img= image_data
586
+ image = widgets.Image(
587
+ value=img,
588
+ width=420,
589
+ height=420
590
+ )
591
+ text_area = widgets.Textarea(value=text, description='', disabled=False, layout={'width': '300px', 'height': '120px'})
592
+
593
+
594
+ def update_text(text):
595
+ with open(CAPTIONS_DIR+"/"+name + '.txt', 'w') as f:
596
+ f.write(text)
597
+
598
+ button = widgets.Button(description='Save', button_style='success')
599
+ button.on_click(lambda b: update_text(text_area.value))
600
+
601
+ return widgets.VBox([widgets.HBox([image, text_area, button])])
602
+
603
+
604
+ paths = os.listdir(INSTANCE_DIR)
605
+ widgets_l = widgets.Select(options=["Select an instance image to caption"]+paths, rows=25)
606
+
607
+
608
+ out = widgets.Output()
609
+
610
+ def click(change):
611
+ with out:
612
+ out.clear_output()
613
+ display(Caption(change.new))
614
+
615
+ widgets_l.observe(click, names='value')
616
+ display(widgets.HBox([widgets_l, out]))
617
+
618
+
619
+
620
+ def dbtrainxl(Unet_Training_Epochs, Text_Encoder_Training_Epochs, Unet_Learning_Rate, Text_Encoder_Learning_Rate, dim, Offset_Noise, Resolution, MODEL_NAME, SESSION_DIR, INSTANCE_DIR, CAPTIONS_DIR, External_Captions, INSTANCE_NAME, Session_Name, OUTPUT_DIR, ofstnselvl, Save_VRAM, Intermediary_Save_Epoch):
621
+
622
+
623
+ if os.path.exists(INSTANCE_DIR+"/.ipynb_checkpoints"):
624
+ call('rm -r '+INSTANCE_DIR+'/.ipynb_checkpoints', shell=True)
625
+ if os.path.exists(CAPTIONS_DIR+"/.ipynb_checkpoints"):
626
+ call('rm -r '+CAPTIONS_DIR+'/.ipynb_checkpoints', shell=True)
627
+
628
+
629
+ Seed=random.randint(1, 999999)
630
+
631
+ ofstnse=""
632
+ if Offset_Noise:
633
+ ofstnse="--offset_noise"
634
+
635
+ GC=''
636
+ if Save_VRAM:
637
+ GC='--gradient_checkpointing'
638
+
639
+ extrnlcptn=""
640
+ if External_Captions:
641
+ extrnlcptn="--external_captions"
642
+
643
+ precision="fp16"
644
+
645
+
646
+
647
+ def train_only_text(SESSION_DIR, MODEL_NAME, INSTANCE_DIR, OUTPUT_DIR, Seed, Resolution, ofstnse, extrnlcptn, precision, Training_Epochs):
648
+ print('Training the Text Encoder...')
649
+ call('accelerate launch /notebooks/diffusers/examples/dreambooth/train_dreambooth_sdxl_TI.py \
650
+ '+ofstnse+' \
651
+ '+extrnlcptn+' \
652
+ --dim='+str(dim)+' \
653
+ --ofstnselvl='+str(ofstnselvl)+' \
654
+ --image_captions_filename \
655
+ --Session_dir='+SESSION_DIR+' \
656
+ --pretrained_model_name_or_path='+MODEL_NAME+' \
657
+ --instance_data_dir='+INSTANCE_DIR+' \
658
+ --output_dir='+OUTPUT_DIR+' \
659
+ --captions_dir='+CAPTIONS_DIR+' \
660
+ --seed='+str(Seed)+' \
661
+ --resolution='+str(Resolution)+' \
662
+ --mixed_precision='+str(precision)+' \
663
+ --train_batch_size=1 \
664
+ --gradient_accumulation_steps=1 '+GC+ ' \
665
+ --use_8bit_adam \
666
+ --learning_rate='+str(Text_Encoder_Learning_Rate)+' \
667
+ --lr_scheduler="cosine" \
668
+ --lr_warmup_steps=0 \
669
+ --num_train_epochs='+str(Training_Epochs), shell=True)
670
+
671
+
672
+
673
+ def train_only_unet(SESSION_DIR, MODEL_NAME, INSTANCE_DIR, OUTPUT_DIR, Seed, Resolution, ofstnse, extrnlcptn, precision, Training_Epochs):
674
+ print('Training the UNet...')
675
+ call('accelerate launch /notebooks/diffusers/examples/dreambooth/train_dreambooth_sdxl_lora.py \
676
+ '+ofstnse+' \
677
+ '+extrnlcptn+' \
678
+ --saves='+Intermediary_Save_Epoch+' \
679
+ --dim='+str(dim)+' \
680
+ --ofstnselvl='+str(ofstnselvl)+' \
681
+ --image_captions_filename \
682
+ --Session_dir='+SESSION_DIR+' \
683
+ --pretrained_model_name_or_path='+MODEL_NAME+' \
684
+ --instance_data_dir='+INSTANCE_DIR+' \
685
+ --output_dir='+OUTPUT_DIR+' \
686
+ --captions_dir='+CAPTIONS_DIR+' \
687
+ --seed='+str(Seed)+' \
688
+ --resolution='+str(Resolution)+' \
689
+ --mixed_precision='+str(precision)+' \
690
+ --train_batch_size=1 \
691
+ --gradient_accumulation_steps=1 '+GC+ ' \
692
+ --use_8bit_adam \
693
+ --learning_rate='+str(Unet_Learning_Rate)+' \
694
+ --lr_scheduler="cosine" \
695
+ --lr_warmup_steps=0 \
696
+ --num_train_epochs='+str(Training_Epochs), shell=True)
697
+
698
+
699
+
700
+ if Unet_Training_Epochs!=0:
701
+ if Text_Encoder_Training_Epochs!=0:
702
+ train_only_text(SESSION_DIR, MODEL_NAME, INSTANCE_DIR, OUTPUT_DIR, Seed, Resolution, ofstnse, extrnlcptn, precision, Training_Epochs=Text_Encoder_Training_Epochs)
703
+ clear_output()
704
+ train_only_unet(SESSION_DIR, MODEL_NAME, INSTANCE_DIR, OUTPUT_DIR, Seed, Resolution, ofstnse, extrnlcptn, precision, Training_Epochs=Unet_Training_Epochs)
705
+ else :
706
+ print('Nothing to do')
707
+
708
+
709
+ if os.path.exists(SESSION_DIR+'/'+Session_Name+'.safetensors'):
710
+ clear_output()
711
+ print("DONE, the LoRa model is in the session's folder")
712
+ else:
713
+ print("Something went wrong")
714
+
715
+
716
+
717
+
718
+ def sdcmf(MDLPTH):
719
+
720
+ from slugify import slugify
721
+ from huggingface_hub import HfApi, CommitOperationAdd, create_repo
722
+
723
+ os.chdir('/notebooks')
724
+
725
+
726
+ print('Installing/Updating the repo...')
727
+ if not os.path.exists('ComfyUI'):
728
+ call('git clone -q --depth 1 https://github.com/comfyanonymous/ComfyUI', shell=True)
729
+
730
+ os.chdir('ComfyUI')
731
+ call('git reset --hard', shell=True)
732
+ print('')
733
+ call('git pull', shell=True)
734
+
735
+ if os.path.exists(MDLPTH):
736
+ call('ln -s '+os.path.dirname(MDLPTH)+' models/loras', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
737
+
738
+ clean_symlinks('models/loras')
739
+
740
+ if not os.path.exists('models/checkpoints/sd_xl_base_1.0.safetensors'):
741
+ call('ln -s /datasets/stable-diffusion-xl/sd_xl_base_1.0.safetensors models/checkpoints', shell=True)
742
+
743
+ localurl="https://tensorboard-"+os.environ.get('PAPERSPACE_FQDN')
744
+ call("sed -i 's@logging.info(\"To see the GUI go to: {}://{}:{}\".format(scheme, address, port))@print(\"\u2714 Connected\")\\n print(\""+localurl+"\")@' /notebooks/ComfyUI/server.py", shell=True)
745
+ os.chdir('/notebooks')
746
+
747
+
748
+ def test(MDLPTH, User, Password):
749
+
750
+
751
+ auth=f"--gradio-auth {User}:{Password}"
752
+ if User =="" or Password=="":
753
+ auth=""
754
+
755
+ os.chdir('/notebooks')
756
+ if not os.path.exists('/notebooks/sd/stablediffusiond'): #reset later
757
+ call('wget -q -O sd_mrep.tar.zst https://huggingface.co/TheLastBen/dependencies/resolve/main/sd_mrep.tar.zst', shell=True)
758
+ call('tar --zstd -xf sd_mrep.tar.zst', shell=True)
759
+ call('rm sd_mrep.tar.zst', shell=True)
760
+
761
+ os.chdir('/notebooks/sd')
762
+ if not os.path.exists('stable-diffusion-webui'):
763
+ call('git clone -q --depth 1 --branch master https://github.com/AUTOMATIC1111/stable-diffusion-webui', shell=True)
764
+
765
+ os.chdir('/notebooks/sd/stable-diffusion-webui/')
766
+ call('git reset --hard', shell=True, stdout=open('/dev/null', 'w'))
767
+ print('')
768
+ call('git checkout master', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
769
+ call('git pull', shell=True, stdout=open('/dev/null', 'w'))
770
+ os.makedirs('/notebooks/sd/stable-diffusion-webui/repositories', exist_ok=True)
771
+ call('git clone https://github.com/AUTOMATIC1111/stable-diffusion-webui-assets /notebooks/sd/stable-diffusion-webui/repositories/stable-diffusion-webui-assets', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
772
+ clear_output()
773
+
774
+
775
+ if not os.path.exists('models/Stable-diffusion/sd_xl_base_1.0.safetensors'):
776
+ call('ln -s /datasets/stable-diffusion-xl/sd_xl_base_1.0.safetensors models/Stable-diffusion', shell=True)
777
+
778
+
779
+ if os.path.exists(MDLPTH):
780
+ call('ln -s '+os.path.dirname(MDLPTH)+' models/Lora', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
781
+
782
+ clean_symlinks('models/Lora')
783
+
784
+ call('wget -q -O /usr/local/lib/python3.11/dist-packages/gradio/blocks.py https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/AUTOMATIC1111_files/blocks.py', shell=True)
785
+
786
+ localurl="tensorboard-"+os.environ.get('PAPERSPACE_FQDN')
787
+
788
+ for line in fileinput.input('/usr/local/lib/python3.11/dist-packages/gradio/blocks.py', inplace=True):
789
+ if line.strip().startswith('self.server_name ='):
790
+ line = f' self.server_name = "{localurl}"\n'
791
+ if line.strip().startswith('self.protocol = "https"'):
792
+ line = ' self.protocol = "https"\n'
793
+ if line.strip().startswith('if self.local_url.startswith("https") or self.is_colab'):
794
+ line = ''
795
+ if line.strip().startswith('else "http"'):
796
+ line = ''
797
+ sys.stdout.write(line)
798
+
799
+
800
+ os.chdir('/notebooks/sd/stable-diffusion-webui/modules')
801
+
802
+ call("sed -i 's@possible_sd_paths =.*@possible_sd_paths = [\"/notebooks/sd/stablediffusion\"]@' /notebooks/sd/stable-diffusion-webui/modules/paths.py", shell=True)
803
+ call("sed -i 's@\.\.\/@src/@g' /notebooks/sd/stable-diffusion-webui/modules/paths.py", shell=True)
804
+ call("sed -i 's@src\/generative-models@generative-models@g' /notebooks/sd/stable-diffusion-webui/modules/paths.py", shell=True)
805
+
806
+ os.chdir('/notebooks/sd/stable-diffusion-webui')
807
+ clear_output()
808
+
809
+ configf="--disable-console-progressbars --no-gradio-queue --upcast-sampling --no-hashing --no-half-vae --disable-safe-unpickle --api --no-download-sd-model --xformers --enable-insecure-extension-access --port 6006 --listen --skip-version-check --ckpt /notebooks/sd/stable-diffusion-webui/models/Stable-diffusion/sd_xl_base_1.0.safetensors "+auth
810
+
811
+ return configf
812
+
813
+
814
+
815
+
816
+ def clean():
817
+
818
+ Sessions=os.listdir("/notebooks/Fast-Dreambooth/Sessions")
819
+
820
+ s = widgets.Select(
821
+ options=Sessions,
822
+ rows=5,
823
+ description='',
824
+ disabled=False
825
+ )
826
+
827
+ out=widgets.Output()
828
+
829
+ d = widgets.Button(
830
+ description='Remove',
831
+ disabled=False,
832
+ button_style='warning',
833
+ tooltip='Removet the selected session',
834
+ icon='warning'
835
+ )
836
+
837
+ def rem(d):
838
+ with out:
839
+ if s.value is not None:
840
+ clear_output()
841
+ print("THE SESSION "+s.value+" HAS BEEN REMOVED FROM THE STORAGE")
842
+ call('rm -r /notebooks/Fast-Dreambooth/Sessions/'+s.value, shell=True)
843
+ if os.path.exists('/notebooks/models/'+s.value):
844
+ call('rm -r /notebooks/models/'+s.value, shell=True)
845
+ s.options=os.listdir("/notebooks/Fast-Dreambooth/Sessions")
846
+
847
+
848
+ else:
849
+ d.close()
850
+ s.close()
851
+ clear_output()
852
+ print("NOTHING TO REMOVE")
853
+
854
+ d.on_click(rem)
855
+ if s.value is not None:
856
+ display(s,d,out)
857
+ else:
858
+ print("NOTHING TO REMOVE")
859
+
860
+
861
+
862
+ def crop_image(im, size):
863
+
864
+ import cv2
865
+
866
+ GREEN = "#0F0"
867
+ BLUE = "#00F"
868
+ RED = "#F00"
869
+
870
+ def focal_point(im, settings):
871
+ corner_points = image_corner_points(im, settings) if settings.corner_points_weight > 0 else []
872
+ entropy_points = image_entropy_points(im, settings) if settings.entropy_points_weight > 0 else []
873
+ face_points = image_face_points(im, settings) if settings.face_points_weight > 0 else []
874
+
875
+ pois = []
876
+
877
+ weight_pref_total = 0
878
+ if len(corner_points) > 0:
879
+ weight_pref_total += settings.corner_points_weight
880
+ if len(entropy_points) > 0:
881
+ weight_pref_total += settings.entropy_points_weight
882
+ if len(face_points) > 0:
883
+ weight_pref_total += settings.face_points_weight
884
+
885
+ corner_centroid = None
886
+ if len(corner_points) > 0:
887
+ corner_centroid = centroid(corner_points)
888
+ corner_centroid.weight = settings.corner_points_weight / weight_pref_total
889
+ pois.append(corner_centroid)
890
+
891
+ entropy_centroid = None
892
+ if len(entropy_points) > 0:
893
+ entropy_centroid = centroid(entropy_points)
894
+ entropy_centroid.weight = settings.entropy_points_weight / weight_pref_total
895
+ pois.append(entropy_centroid)
896
+
897
+ face_centroid = None
898
+ if len(face_points) > 0:
899
+ face_centroid = centroid(face_points)
900
+ face_centroid.weight = settings.face_points_weight / weight_pref_total
901
+ pois.append(face_centroid)
902
+
903
+ average_point = poi_average(pois, settings)
904
+
905
+ return average_point
906
+
907
+
908
+ def image_face_points(im, settings):
909
+
910
+ np_im = np.array(im)
911
+ gray = cv2.cvtColor(np_im, cv2.COLOR_BGR2GRAY)
912
+
913
+ tries = [
914
+ [ f'{cv2.data.haarcascades}haarcascade_eye.xml', 0.01 ],
915
+ [ f'{cv2.data.haarcascades}haarcascade_frontalface_default.xml', 0.05 ],
916
+ [ f'{cv2.data.haarcascades}haarcascade_profileface.xml', 0.05 ],
917
+ [ f'{cv2.data.haarcascades}haarcascade_frontalface_alt.xml', 0.05 ],
918
+ [ f'{cv2.data.haarcascades}haarcascade_frontalface_alt2.xml', 0.05 ],
919
+ [ f'{cv2.data.haarcascades}haarcascade_frontalface_alt_tree.xml', 0.05 ],
920
+ [ f'{cv2.data.haarcascades}haarcascade_eye_tree_eyeglasses.xml', 0.05 ],
921
+ [ f'{cv2.data.haarcascades}haarcascade_upperbody.xml', 0.05 ]
922
+ ]
923
+ for t in tries:
924
+ classifier = cv2.CascadeClassifier(t[0])
925
+ minsize = int(min(im.width, im.height) * t[1]) # at least N percent of the smallest side
926
+ try:
927
+ faces = classifier.detectMultiScale(gray, scaleFactor=1.1,
928
+ minNeighbors=7, minSize=(minsize, minsize), flags=cv2.CASCADE_SCALE_IMAGE)
929
+ except:
930
+ continue
931
+
932
+ if len(faces) > 0:
933
+ rects = [[f[0], f[1], f[0] + f[2], f[1] + f[3]] for f in faces]
934
+ return [PointOfInterest((r[0] +r[2]) // 2, (r[1] + r[3]) // 2, size=abs(r[0]-r[2]), weight=1/len(rects)) for r in rects]
935
+ return []
936
+
937
+
938
+ def image_corner_points(im, settings):
939
+ grayscale = im.convert("L")
940
+
941
+ # naive attempt at preventing focal points from collecting at watermarks near the bottom
942
+ gd = ImageDraw.Draw(grayscale)
943
+ gd.rectangle([0, im.height*.9, im.width, im.height], fill="#999")
944
+
945
+ np_im = np.array(grayscale)
946
+
947
+ points = cv2.goodFeaturesToTrack(
948
+ np_im,
949
+ maxCorners=100,
950
+ qualityLevel=0.04,
951
+ minDistance=min(grayscale.width, grayscale.height)*0.06,
952
+ useHarrisDetector=False,
953
+ )
954
+
955
+ if points is None:
956
+ return []
957
+
958
+ focal_points = []
959
+ for point in points:
960
+ x, y = point.ravel()
961
+ focal_points.append(PointOfInterest(x, y, size=4, weight=1/len(points)))
962
+
963
+ return focal_points
964
+
965
+
966
+ def image_entropy_points(im, settings):
967
+ landscape = im.height < im.width
968
+ portrait = im.height > im.width
969
+ if landscape:
970
+ move_idx = [0, 2]
971
+ move_max = im.size[0]
972
+ elif portrait:
973
+ move_idx = [1, 3]
974
+ move_max = im.size[1]
975
+ else:
976
+ return []
977
+
978
+ e_max = 0
979
+ crop_current = [0, 0, settings.crop_width, settings.crop_height]
980
+ crop_best = crop_current
981
+ while crop_current[move_idx[1]] < move_max:
982
+ crop = im.crop(tuple(crop_current))
983
+ e = image_entropy(crop)
984
+
985
+ if (e > e_max):
986
+ e_max = e
987
+ crop_best = list(crop_current)
988
+
989
+ crop_current[move_idx[0]] += 4
990
+ crop_current[move_idx[1]] += 4
991
+
992
+ x_mid = int(crop_best[0] + settings.crop_width/2)
993
+ y_mid = int(crop_best[1] + settings.crop_height/2)
994
+
995
+ return [PointOfInterest(x_mid, y_mid, size=25, weight=1.0)]
996
+
997
+
998
+ def image_entropy(im):
999
+ # greyscale image entropy
1000
+ # band = np.asarray(im.convert("L"))
1001
+ band = np.asarray(im.convert("1"), dtype=np.uint8)
1002
+ hist, _ = np.histogram(band, bins=range(0, 256))
1003
+ hist = hist[hist > 0]
1004
+ return -np.log2(hist / hist.sum()).sum()
1005
+
1006
+ def centroid(pois):
1007
+ x = [poi.x for poi in pois]
1008
+ y = [poi.y for poi in pois]
1009
+ return PointOfInterest(sum(x)/len(pois), sum(y)/len(pois))
1010
+
1011
+
1012
+ def poi_average(pois, settings):
1013
+ weight = 0.0
1014
+ x = 0.0
1015
+ y = 0.0
1016
+ for poi in pois:
1017
+ weight += poi.weight
1018
+ x += poi.x * poi.weight
1019
+ y += poi.y * poi.weight
1020
+ avg_x = round(weight and x / weight)
1021
+ avg_y = round(weight and y / weight)
1022
+
1023
+ return PointOfInterest(avg_x, avg_y)
1024
+
1025
+
1026
+ def is_landscape(w, h):
1027
+ return w > h
1028
+
1029
+
1030
+ def is_portrait(w, h):
1031
+ return h > w
1032
+
1033
+
1034
+ def is_square(w, h):
1035
+ return w == h
1036
+
1037
+
1038
+ class PointOfInterest:
1039
+ def __init__(self, x, y, weight=1.0, size=10):
1040
+ self.x = x
1041
+ self.y = y
1042
+ self.weight = weight
1043
+ self.size = size
1044
+
1045
+ def bounding(self, size):
1046
+ return [
1047
+ self.x - size//2,
1048
+ self.y - size//2,
1049
+ self.x + size//2,
1050
+ self.y + size//2
1051
+ ]
1052
+
1053
+ class Settings:
1054
+ def __init__(self, crop_width=512, crop_height=512, corner_points_weight=0.5, entropy_points_weight=0.5, face_points_weight=0.5):
1055
+ self.crop_width = crop_width
1056
+ self.crop_height = crop_height
1057
+ self.corner_points_weight = corner_points_weight
1058
+ self.entropy_points_weight = entropy_points_weight
1059
+ self.face_points_weight = face_points_weight
1060
+
1061
+ settings = Settings(
1062
+ crop_width = size,
1063
+ crop_height = size,
1064
+ face_points_weight = 0.9,
1065
+ entropy_points_weight = 0.15,
1066
+ corner_points_weight = 0.5,
1067
+ )
1068
+
1069
+ scale_by = 1
1070
+ if is_landscape(im.width, im.height):
1071
+ scale_by = settings.crop_height / im.height
1072
+ elif is_portrait(im.width, im.height):
1073
+ scale_by = settings.crop_width / im.width
1074
+ elif is_square(im.width, im.height):
1075
+ if is_square(settings.crop_width, settings.crop_height):
1076
+ scale_by = settings.crop_width / im.width
1077
+ elif is_landscape(settings.crop_width, settings.crop_height):
1078
+ scale_by = settings.crop_width / im.width
1079
+ elif is_portrait(settings.crop_width, settings.crop_height):
1080
+ scale_by = settings.crop_height / im.height
1081
+
1082
+ im = im.resize((int(im.width * scale_by), int(im.height * scale_by)))
1083
+ im_debug = im.copy()
1084
+
1085
+ focus = focal_point(im_debug, settings)
1086
+
1087
+ # take the focal point and turn it into crop coordinates that try to center over the focal
1088
+ # point but then get adjusted back into the frame
1089
+ y_half = int(settings.crop_height / 2)
1090
+ x_half = int(settings.crop_width / 2)
1091
+
1092
+ x1 = focus.x - x_half
1093
+ if x1 < 0:
1094
+ x1 = 0
1095
+ elif x1 + settings.crop_width > im.width:
1096
+ x1 = im.width - settings.crop_width
1097
+
1098
+ y1 = focus.y - y_half
1099
+ if y1 < 0:
1100
+ y1 = 0
1101
+ elif y1 + settings.crop_height > im.height:
1102
+ y1 = im.height - settings.crop_height
1103
+
1104
+ x2 = x1 + settings.crop_width
1105
+ y2 = y1 + settings.crop_height
1106
+
1107
+ crop = [x1, y1, x2, y2]
1108
+
1109
+ results = []
1110
+
1111
+ results.append(im.crop(tuple(crop)))
1112
+
1113
+ return results
1114
+
1115
+
1116
+
1117
+ def resize_keep_aspect(DIR):
1118
+
1119
+ min_dimension=1024
1120
+
1121
+ for filename in os.listdir(DIR):
1122
+ if filename.lower().endswith(('.png', '.jpg', '.jpeg', '.webp')):
1123
+ image = cv2.imread(os.path.join(DIR, filename))
1124
+
1125
+ org_height, org_width = image.shape[0], image.shape[1]
1126
+
1127
+ if org_width < org_height:
1128
+ new_width = min_dimension
1129
+ new_height = int(org_height * (min_dimension / org_width))
1130
+ else:
1131
+ new_height = min_dimension
1132
+ new_width = int(org_width * (min_dimension / org_height))
1133
+
1134
+ resized_image = cv2.resize(image, (new_width, new_height), interpolation=cv2.INTER_LANCZOS4)
1135
+
1136
+ cv2.imwrite(os.path.join(DIR, filename), resized_image, [int(cv2.IMWRITE_PNG_COMPRESSION), 0])
1137
+
1138
+
1139
+
1140
+ def clean_symlinks(path):
1141
+ for item in os.listdir(path):
1142
+ lnk = os.path.join(path, item)
1143
+ if os.path.islink(lnk) and not os.path.exists(os.readlink(lnk)):
1144
+ os.remove(lnk)