TheLastBen commited on
Commit
b70edda
1 Parent(s): fc0e984

Upload 4 files

Browse files
Scripts/mainpaperspaceA1111.py CHANGED
@@ -19,6 +19,7 @@ def Deps(force_reinstall):
19
 
20
  if not force_reinstall and os.path.exists('/usr/local/lib/python3.9/dist-packages/safetensors'):
21
  ntbk()
 
22
  print('Modules and notebooks updated, dependencies already installed')
23
 
24
  else:
 
19
 
20
  if not force_reinstall and os.path.exists('/usr/local/lib/python3.9/dist-packages/safetensors'):
21
  ntbk()
22
+ call('pip install --root-user-action=ignore --disable-pip-version-check -qq ./diffusers', shell=True, stdout=open('/dev/null', 'w'))
23
  print('Modules and notebooks updated, dependencies already installed')
24
 
25
  else:
Scripts/mainpaperspacev1.py CHANGED
@@ -31,6 +31,7 @@ def Deps(force_reinstall):
31
 
32
  if not force_reinstall and os.path.exists('/usr/local/lib/python3.9/dist-packages/safetensors'):
33
  ntbk()
 
34
  print('Modules and notebooks updated, dependencies already installed')
35
 
36
  else:
 
31
 
32
  if not force_reinstall and os.path.exists('/usr/local/lib/python3.9/dist-packages/safetensors'):
33
  ntbk()
34
+ call('pip install --root-user-action=ignore --disable-pip-version-check -qq ./diffusers', shell=True, stdout=open('/dev/null', 'w'))
35
  print('Modules and notebooks updated, dependencies already installed')
36
 
37
  else:
Scripts/mainpaperspacev2.py CHANGED
@@ -32,6 +32,7 @@ def Deps(force_reinstall):
32
 
33
  if not force_reinstall and os.path.exists('/usr/local/lib/python3.9/dist-packages/safetensors'):
34
  ntbk()
 
35
  print('Modules and notebooks updated, dependencies already installed')
36
 
37
  else:
 
32
 
33
  if not force_reinstall and os.path.exists('/usr/local/lib/python3.9/dist-packages/safetensors'):
34
  ntbk()
35
+ call('pip install --root-user-action=ignore --disable-pip-version-check -qq ./diffusers', shell=True, stdout=open('/dev/null', 'w'))
36
  print('Modules and notebooks updated, dependencies already installed')
37
 
38
  else:
Scripts/sdxllorapps.py ADDED
@@ -0,0 +1,1054 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from IPython.display import clear_output
2
+ from subprocess import call, getoutput, Popen
3
+ from IPython.display import display
4
+ import ipywidgets as widgets
5
+ import io
6
+ from PIL import Image, ImageDraw
7
+ import fileinput
8
+ import time
9
+ import os
10
+ from os import listdir
11
+ from os.path import isfile
12
+ import random
13
+ import sys
14
+ from io import BytesIO
15
+ import requests
16
+ from collections import defaultdict
17
+ from math import log, sqrt
18
+ import numpy as np
19
+ import sys
20
+ import fileinput
21
+ from subprocess import check_output
22
+ import six
23
+ import base64
24
+
25
+ from urllib.parse import urlparse, parse_qs, unquote
26
+ import urllib.request
27
+ from urllib.request import urlopen, Request
28
+
29
+ import tempfile
30
+ from tqdm import tqdm
31
+
32
+
33
+
34
+
35
+ def Deps(force_reinstall):
36
+
37
+ if not force_reinstall and os.path.exists('/usr/local/lib/python3.9/dist-packages/safetensors'):
38
+ ntbk()
39
+ call('pip install --root-user-action=ignore --disable-pip-version-check -qq diffusers -U', shell=True, stdout=open('/dev/null', 'w'))
40
+ print('Modules and notebooks updated, dependencies already installed')
41
+
42
+ else:
43
+ call("pip install --root-user-action=ignore --no-deps -q accelerate==0.12.0", shell=True, stdout=open('/dev/null', 'w'))
44
+ if not os.path.exists('/usr/local/lib/python3.9/dist-packages/safetensors'):
45
+ os.chdir('/usr/local/lib/python3.9/dist-packages')
46
+ call("rm -r torch torch-1.12.1+cu116.dist-info torchaudio* torchvision* PIL Pillow* transformers* numpy* gdown*", shell=True, stdout=open('/dev/null', 'w'))
47
+ ntbk()
48
+ if not os.path.exists('/models'):
49
+ call('mkdir /models', shell=True)
50
+ if not os.path.exists('/notebooks/models'):
51
+ call('ln -s /models /notebooks', shell=True)
52
+ if os.path.exists('/deps'):
53
+ call("rm -r /deps", shell=True)
54
+ call('mkdir /deps', shell=True)
55
+ if not os.path.exists('cache'):
56
+ call('mkdir cache', shell=True)
57
+ os.chdir('/deps')
58
+ call('wget -q -i https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dependencies/aptdeps.txt', shell=True)
59
+ call('dpkg -i *.deb', shell=True, stdout=open('/dev/null', 'w'))
60
+ depsinst("https://huggingface.co/TheLastBen/dependencies/resolve/main/ppsdeps.tar.zst", "/deps/ppsdeps.tar.zst")
61
+ call('tar -C / --zstd -xf ppsdeps.tar.zst', shell=True, stdout=open('/dev/null', 'w'))
62
+ call("sed -i 's@~/.cache@/notebooks/cache@' /usr/local/lib/python3.9/dist-packages/transformers/utils/hub.py", shell=True)
63
+ os.chdir('/notebooks')
64
+ call('pip install --root-user-action=ignore --disable-pip-version-check -qq diffusers -U', shell=True, stdout=open('/dev/null', 'w'))
65
+ call("git clone --depth 1 -q --branch main https://github.com/TheLastBen/diffusers /diffusers", shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
66
+ call('pip install --root-user-action=ignore --disable-pip-version-check -qq tomesd gradio==3.32', shell=True, stdout=open('/dev/null', 'w'))
67
+ if not os.path.exists('/notebooks/diffusers'):
68
+ call('ln -s /diffusers /notebooks', shell=True)
69
+ call("rm -r /deps", shell=True)
70
+ os.chdir('/notebooks')
71
+ clear_output()
72
+
73
+ done()
74
+
75
+
76
+ def depsinst(url, dst):
77
+ file_size = None
78
+ req = Request(url, headers={"User-Agent": "torch.hub"})
79
+ u = urlopen(req)
80
+ meta = u.info()
81
+ if hasattr(meta, 'getheaders'):
82
+ content_length = meta.getheaders("Content-Length")
83
+ else:
84
+ content_length = meta.get_all("Content-Length")
85
+ if content_length is not None and len(content_length) > 0:
86
+ file_size = int(content_length[0])
87
+
88
+ with tqdm(total=file_size, disable=False, mininterval=0.5,
89
+ bar_format='Installing dependencies |{bar:20}| {percentage:3.0f}%') as pbar:
90
+ with open(dst, "wb") as f:
91
+ while True:
92
+ buffer = u.read(8192)
93
+ if len(buffer) == 0:
94
+ break
95
+ f.write(buffer)
96
+ pbar.update(len(buffer))
97
+ f.close()
98
+
99
+
100
+ def dwn2(url, dst, msg, auth):
101
+
102
+ if auth!="":
103
+ credentials = base64.b64encode(f"USER:{auth}".encode('utf-8')).decode('utf-8')
104
+ req = Request(url, headers={"User-Agent": "torch.hub", 'Authorization': f'Basic {credentials}'})
105
+
106
+ file_size = None
107
+ u = urlopen(req)
108
+ meta = u.info()
109
+ if hasattr(meta, 'getheaders'):
110
+ content_length = meta.getheaders("Content-Length")
111
+ else:
112
+ content_length = meta.get_all("Content-Length")
113
+ if content_length is not None and len(content_length) > 0:
114
+ file_size = int(content_length[0])
115
+
116
+ with tqdm(total=file_size, disable=False, mininterval=0.5,
117
+ bar_format=msg+' |{bar:20}| {percentage:3.0f}%') as pbar:
118
+ with open(dst, "wb") as f:
119
+ while True:
120
+ buffer = u.read(8192)
121
+ if len(buffer) == 0:
122
+ break
123
+ f.write(buffer)
124
+ pbar.update(len(buffer))
125
+ f.close()
126
+
127
+
128
+ def ntbks():
129
+
130
+ os.chdir('/notebooks')
131
+ if not os.path.exists('Latest_Notebooks'):
132
+ call('mkdir Latest_Notebooks', shell=True)
133
+ else:
134
+ call('rm -r Latest_Notebooks', shell=True)
135
+ call('mkdir Latest_Notebooks', shell=True)
136
+ os.chdir('/notebooks/Latest_Notebooks')
137
+ call('wget -q -i https://huggingface.co/datasets/TheLastBen/RNPD/raw/main/Notebooks.txt', shell=True)
138
+ call('rm Notebooks.txt', shell=True)
139
+ os.chdir('/notebooks')
140
+
141
+ def done():
142
+ done = widgets.Button(
143
+ description='Done!',
144
+ disabled=True,
145
+ button_style='success',
146
+ tooltip='',
147
+ icon='check'
148
+ )
149
+ display(done)
150
+
151
+
152
+
153
+ def mdlvxl(Huggingface_token):
154
+
155
+ os.chdir('/notebooks')
156
+
157
+ if os.path.exists('stable-diffusion-XL') and not os.path.exists('/notebooks/stable-diffusion-XL/unet/diffusion_pytorch_model.bin'):
158
+ call('rm -r stable-diffusion-XL', shell=True)
159
+ if not os.path.exists('stable-diffusion-XL'):
160
+ print('Downlading SDXL model...')
161
+ call('mkdir stable-diffusion-XL', shell=True)
162
+ os.chdir('stable-diffusion-XL')
163
+ call('git init', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
164
+ call('git lfs install --system --skip-repo', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
165
+ call('git remote add -f origin https://USER:'+Huggingface_token+'@huggingface.co/stabilityai/stable-diffusion-xl-base-0.9', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
166
+ call('git config core.sparsecheckout true', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
167
+ call('echo -e "\nscheduler\ntext_encoder\ntext_encoder_2\ntokenizer\ntokenizer_2\nunet\nvae\nfeature_extractor\nmodel_index.json\n!*.safetensors\n!diffusion_pytorch_model.bin\n!pytorch_model.bin\n!*.fp16.bin" > .git/info/sparse-checkout', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
168
+ call('git pull origin main', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
169
+ dwn2('https://huggingface.co/stabilityai/stable-diffusion-xl-base-0.9/resolve/main/text_encoder/pytorch_model.bin', 'text_encoder/pytorch_model.bin', '1/4', Huggingface_token)
170
+ dwn2('https://huggingface.co/stabilityai/stable-diffusion-xl-base-0.9/resolve/main/text_encoder_2/pytorch_model.bin', 'text_encoder_2/pytorch_model.bin', '2/4', Huggingface_token)
171
+ dwn2('https://huggingface.co/stabilityai/stable-diffusion-xl-base-0.9/resolve/main/vae/diffusion_pytorch_model.bin', 'vae/diffusion_pytorch_model.bin', '3/4', Huggingface_token)
172
+ dwn2('https://huggingface.co/stabilityai/stable-diffusion-xl-base-0.9/resolve/main/unet/diffusion_pytorch_model.bin', 'unet/diffusion_pytorch_model.bin', '4/4', Huggingface_token)
173
+ call('rm -r .git', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
174
+ os.chdir('/notebooks')
175
+ clear_output()
176
+ while not os.path.exists('/notebooks/stable-diffusion-XL/unet/diffusion_pytorch_model.bin'):
177
+ print('Invalid HF token, make sure you have access to the model')
178
+ time.sleep(8)
179
+ if os.path.exists('/notebooks/stable-diffusion-XL/unet/diffusion_pytorch_model.bin'):
180
+ print('Using SDXL model')
181
+ else:
182
+ print('Using SDXL model')
183
+
184
+
185
+
186
+ def downloadmodel_hfxl(Path_to_HuggingFace):
187
+
188
+ os.chdir('/notebooks')
189
+ if os.path.exists('stable-diffusion-custom'):
190
+ call("rm -r stable-diffusion-custom", shell=True)
191
+ clear_output()
192
+
193
+ if os.path.exists('Fast-Dreambooth/token.txt'):
194
+ with open("Fast-Dreambooth/token.txt") as f:
195
+ token = f.read()
196
+ authe=f'https://USER:{token}@'
197
+ else:
198
+ authe="https://"
199
+
200
+ clear_output()
201
+ call("mkdir stable-diffusion-custom", shell=True)
202
+ os.chdir("stable-diffusion-custom")
203
+ call("git init", shell=True)
204
+ call("git lfs install --system --skip-repo", shell=True)
205
+ call('git remote add -f origin '+authe+'huggingface.co/'+Path_to_HuggingFace, shell=True)
206
+ call("git config core.sparsecheckout true", shell=True)
207
+ call('echo -e "\nscheduler\ntext_encoder\ntokenizer\nunet\nvae\nfeature_extractor\nmodel_index.json\n!*.safetensors\n!*.fp16.bin" > .git/info/sparse-checkout', shell=True)
208
+ call("git pull origin main", shell=True)
209
+ if os.path.exists('unet/diffusion_pytorch_model.bin'):
210
+ call("rm -r .git", shell=True)
211
+ os.chdir('/notebooks')
212
+ clear_output()
213
+ done()
214
+ while not os.path.exists('/notebooks/stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
215
+ print('Check the link you provided')
216
+ os.chdir('/notebooks')
217
+ time.sleep(5)
218
+
219
+
220
+
221
+ def downloadmodel_link_xl(MODEL_LINK, Huggingface_token):
222
+
223
+ import wget
224
+ import gdown
225
+ from gdown.download import get_url_from_gdrive_confirmation
226
+
227
+ def getsrc(url):
228
+ parsed_url = urlparse(url)
229
+ if parsed_url.netloc == 'civitai.com':
230
+ src='civitai'
231
+ elif parsed_url.netloc == 'drive.google.com':
232
+ src='gdrive'
233
+ elif parsed_url.netloc == 'huggingface.co':
234
+ src='huggingface'
235
+ else:
236
+ src='others'
237
+ return src
238
+
239
+ src=getsrc(MODEL_LINK)
240
+
241
+ def get_name(url, gdrive):
242
+ if not gdrive:
243
+ response = requests.get(url, allow_redirects=False)
244
+ if "Location" in response.headers:
245
+ redirected_url = response.headers["Location"]
246
+ quer = parse_qs(urlparse(redirected_url).query)
247
+ if "response-content-disposition" in quer:
248
+ disp_val = quer["response-content-disposition"][0].split(";")
249
+ for vals in disp_val:
250
+ if vals.strip().startswith("filename="):
251
+ filenm=unquote(vals.split("=", 1)[1].strip())
252
+ return filenm.replace("\"","")
253
+ else:
254
+ headers = {"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36"}
255
+ lnk="https://drive.google.com/uc?id={id}&export=download".format(id=url[url.find("/d/")+3:url.find("/view")])
256
+ res = requests.session().get(lnk, headers=headers, stream=True, verify=True)
257
+ res = requests.session().get(get_url_from_gdrive_confirmation(res.text), headers=headers, stream=True, verify=True)
258
+ content_disposition = six.moves.urllib_parse.unquote(res.headers["Content-Disposition"])
259
+ filenm = re.search(r"filename\*=UTF-8''(.*)", content_disposition).groups()[0].replace(os.path.sep, "_")
260
+ return filenm
261
+
262
+ if src=='civitai':
263
+ modelname=get_name(MODEL_LINK, False)
264
+ elif src=='gdrive':
265
+ modelname=get_name(MODEL_LINK, True)
266
+ else:
267
+ modelname=os.path.basename(MODEL_LINK)
268
+
269
+
270
+ os.chdir('/notebooks')
271
+ if src=='huggingface':
272
+ dwn2(MODEL_LINK, modelname,'Downloading the Model', Huggingface_token)
273
+ else:
274
+ call("gdown --fuzzy " +MODEL_LINK+ " -O "+modelname, shell=True)
275
+
276
+ if os.path.exists(modelname):
277
+ if os.path.getsize(modelname) > 1810671599:
278
+
279
+ print('Converting to diffusers...')
280
+ call('python /notebooks/diffusers/scripts/convert_original_stable_diffusion_to_diffusers.py --checkpoint_path '+modelname+' --dump_path stable-diffusion-custom --from_safetensors', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
281
+
282
+ if os.path.exists('stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
283
+ os.chdir('/notebooks')
284
+ clear_output()
285
+ done()
286
+ else:
287
+ while not os.path.exists('stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
288
+ print('Conversion error')
289
+ os.chdir('/notebooks')
290
+ time.sleep(5)
291
+ else:
292
+ while os.path.getsize(modelname) < 1810671599:
293
+ print('Wrong link, check that the link is valid')
294
+ os.chdir('/notebooks')
295
+ time.sleep(5)
296
+
297
+
298
+
299
+ def downloadmodel_path_xl(MODEL_PATH):
300
+
301
+ import wget
302
+ os.chdir('/notebooks')
303
+ clear_output()
304
+ if os.path.exists(str(MODEL_PATH)):
305
+
306
+ print('Converting to diffusers...')
307
+ call('python /notebooks/diffusers/scripts/convert_original_stable_diffusion_to_diffusers.py --checkpoint_path '+MODEL_PATH+' --dump_path stable-diffusion-custom --from_safetensors', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
308
+
309
+ if os.path.exists('stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
310
+ clear_output()
311
+ done()
312
+ while not os.path.exists('stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
313
+ print('Conversion error')
314
+ os.chdir('/notebooks')
315
+ time.sleep(5)
316
+ else:
317
+ while not os.path.exists(str(MODEL_PATH)):
318
+ print('Wrong path, use the file explorer to copy the path')
319
+ os.chdir('/notebooks')
320
+ time.sleep(5)
321
+
322
+
323
+
324
+
325
+ def dls_xl(Huggingface_token, Path_to_HuggingFace, MODEL_PATH, MODEL_LINK):
326
+
327
+ os.chdir('/notebooks')
328
+
329
+ if Path_to_HuggingFace != "":
330
+ downloadmodel_hfxl(Path_to_HuggingFace)
331
+ MODEL_NAMExl="/notebooks/stable-diffusion-custom"
332
+
333
+ elif MODEL_PATH !="":
334
+
335
+ downloadmodel_path_xl(MODEL_PATH)
336
+ MODEL_NAMExl="/notebooks/stable-diffusion-custom"
337
+
338
+ elif MODEL_LINK !="":
339
+
340
+ downloadmodel_link_xl(MODEL_LINK, Huggingface_token)
341
+ MODEL_NAMExl="/notebooks/stable-diffusion-custom"
342
+
343
+ else:
344
+ if not os.path.exists('stable-diffusion-XL/unet/diffusion_pytorch_model.bin'):
345
+ if Huggingface_token=="":
346
+ Huggingface_token=input('Your Huggingface Token: ')
347
+ mdlvxl(Huggingface_token)
348
+ MODEL_NAMExl="/notebooks/stable-diffusion-XL"
349
+ else:
350
+ mdlvxl('')
351
+ MODEL_NAMExl="/notebooks/stable-diffusion-XL"
352
+
353
+ return MODEL_NAMExl
354
+
355
+
356
+ def sess_xl(Session_Name, MODEL_NAMExl):
357
+ import gdown
358
+ import wget
359
+ os.chdir('/notebooks')
360
+ PT=""
361
+
362
+ while Session_Name=="":
363
+ print('Input the Session Name:')
364
+ Session_Name=input("")
365
+ Session_Name=Session_Name.replace(" ","_")
366
+
367
+ WORKSPACE='/notebooks/Fast-Dreambooth'
368
+
369
+ INSTANCE_NAME=Session_Name
370
+ OUTPUT_DIR="/notebooks/models/"+Session_Name
371
+ SESSION_DIR=WORKSPACE+"/Sessions/"+Session_Name
372
+ INSTANCE_DIR=SESSION_DIR+"/instance_images"
373
+ CAPTIONS_DIR=SESSION_DIR+'/captions'
374
+ MDLPTH=str(SESSION_DIR+"/"+Session_Name+'.safetensors')
375
+
376
+
377
+ if os.path.exists(str(SESSION_DIR)) and not os.path.exists(MDLPTH):
378
+ print('Loading session with no previous LoRa model')
379
+ if MODEL_NAMExl=="":
380
+ print('No model found, use the "Model Download" cell to download a model.')
381
+ else:
382
+ print('Session Loaded, proceed')
383
+
384
+ elif not os.path.exists(str(SESSION_DIR)):
385
+ call('mkdir -p '+INSTANCE_DIR, shell=True)
386
+ print('Creating session...')
387
+ if MODEL_NAMExl=="":
388
+ print('No model found, use the "Model Download" cell to download a model.')
389
+ else:
390
+ print('Session created, proceed to uploading instance images')
391
+ if MODEL_NAMExl=="":
392
+ print('No model found, use the "Model Download" cell to download a model.')
393
+
394
+ else:
395
+ print('Session Loaded, proceed')
396
+
397
+
398
+ return WORKSPACE, Session_Name, INSTANCE_NAME, OUTPUT_DIR, SESSION_DIR, INSTANCE_DIR, CAPTIONS_DIR, MDLPTH, MODEL_NAMExl
399
+
400
+
401
+
402
+ def uplder(Remove_existing_instance_images, Crop_images, Crop_size, IMAGES_FOLDER_OPTIONAL, INSTANCE_DIR, CAPTIONS_DIR):
403
+
404
+ if os.path.exists(INSTANCE_DIR+"/.ipynb_checkpoints"):
405
+ call('rm -r '+INSTANCE_DIR+'/.ipynb_checkpoints', shell=True)
406
+
407
+ uploader = widgets.FileUpload(description="Choose images",accept='image/*, .txt', multiple=True)
408
+ Upload = widgets.Button(
409
+ description='Upload',
410
+ disabled=False,
411
+ button_style='info',
412
+ tooltip='Click to upload the chosen instance images',
413
+ icon=''
414
+ )
415
+
416
+
417
+ def up(Upload):
418
+ with out:
419
+ uploader.close()
420
+ Upload.close()
421
+ upld(Remove_existing_instance_images, Crop_images, Crop_size, IMAGES_FOLDER_OPTIONAL, INSTANCE_DIR, CAPTIONS_DIR, uploader)
422
+ done()
423
+ out=widgets.Output()
424
+
425
+ if IMAGES_FOLDER_OPTIONAL=="":
426
+ Upload.on_click(up)
427
+ display(uploader, Upload, out)
428
+ else:
429
+ upld(Remove_existing_instance_images, Crop_images, Crop_size, IMAGES_FOLDER_OPTIONAL, INSTANCE_DIR, CAPTIONS_DIR, uploader)
430
+ done()
431
+
432
+
433
+
434
+ def upld(Remove_existing_instance_images, Crop_images, Crop_size, IMAGES_FOLDER_OPTIONAL, INSTANCE_DIR, CAPTIONS_DIR, uploader):
435
+
436
+ from tqdm import tqdm
437
+ if Remove_existing_instance_images:
438
+ if os.path.exists(str(INSTANCE_DIR)):
439
+ call("rm -r " +INSTANCE_DIR, shell=True)
440
+ if os.path.exists(str(CAPTIONS_DIR)):
441
+ call("rm -r " +CAPTIONS_DIR, shell=True)
442
+
443
+
444
+ if not os.path.exists(str(INSTANCE_DIR)):
445
+ call("mkdir -p " +INSTANCE_DIR, shell=True)
446
+ if not os.path.exists(str(CAPTIONS_DIR)):
447
+ call("mkdir -p " +CAPTIONS_DIR, shell=True)
448
+
449
+
450
+ if IMAGES_FOLDER_OPTIONAL !="":
451
+ if os.path.exists(IMAGES_FOLDER_OPTIONAL+"/.ipynb_checkpoints"):
452
+ call('rm -r '+IMAGES_FOLDER_OPTIONAL+'/.ipynb_checkpoints', shell=True)
453
+
454
+ if any(file.endswith('.{}'.format('txt')) for file in os.listdir(IMAGES_FOLDER_OPTIONAL)):
455
+ call('mv '+IMAGES_FOLDER_OPTIONAL+'/*.txt '+CAPTIONS_DIR, shell=True)
456
+ if Crop_images:
457
+ os.chdir(str(IMAGES_FOLDER_OPTIONAL))
458
+ call('find . -name "* *" -type f | rename ' "'s/ /-/g'", shell=True)
459
+ os.chdir('/notebooks')
460
+ for filename in tqdm(os.listdir(IMAGES_FOLDER_OPTIONAL), bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):
461
+ extension = filename.split(".")[-1]
462
+ identifier=filename.split(".")[0]
463
+ new_path_with_file = os.path.join(INSTANCE_DIR, filename)
464
+ file = Image.open(IMAGES_FOLDER_OPTIONAL+"/"+filename)
465
+ width, height = file.size
466
+ image = file
467
+ if file.size !=(Crop_size, Crop_size):
468
+ image=crop_image(file, Crop_size)
469
+ if extension.upper()=="JPG" or extension.upper()=="jpg":
470
+ image[0] = image[0].convert("RGB")
471
+ image[0].save(new_path_with_file, format="JPEG", quality = 100)
472
+ else:
473
+ image[0].save(new_path_with_file, format=extension.upper())
474
+
475
+ else:
476
+ call("cp \'"+IMAGES_FOLDER_OPTIONAL+"/"+filename+"\' "+INSTANCE_DIR, shell=True)
477
+
478
+ else:
479
+ for filename in tqdm(os.listdir(IMAGES_FOLDER_OPTIONAL), bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):
480
+ call("cp -r " +IMAGES_FOLDER_OPTIONAL+"/. " +INSTANCE_DIR, shell=True)
481
+
482
+ elif IMAGES_FOLDER_OPTIONAL =="":
483
+ up=""
484
+ for file in uploader.value:
485
+ filename = file['name']
486
+ if filename.split(".")[-1]=="txt":
487
+ with open(CAPTIONS_DIR+'/'+filename, 'w') as f:
488
+ f.write(bytes(file['content']).decode())
489
+ up=[file for file in uploader.value if not file['name'].endswith('.txt')]
490
+ if Crop_images:
491
+ for file in tqdm(up, bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):
492
+ filename = file['name']
493
+ img = Image.open(io.BytesIO(file['content']))
494
+ extension = filename.split(".")[-1]
495
+ identifier=filename.split(".")[0]
496
+
497
+ if extension.upper()=="JPG" or extension.upper()=="jpg":
498
+ img=img.convert("RGB")
499
+ img.save(INSTANCE_DIR+"/"+filename, format="JPEG", quality = 100)
500
+ else:
501
+ img.save(INSTANCE_DIR+"/"+filename, format=extension.upper())
502
+
503
+ new_path_with_file = os.path.join(INSTANCE_DIR, filename)
504
+ file = Image.open(new_path_with_file)
505
+ width, height = file.size
506
+ image = img
507
+ if file.size !=(Crop_size, Crop_size):
508
+ image=crop_image(file, Crop_size)
509
+ if extension.upper()=="JPG" or extension.upper()=="jpg":
510
+ image[0].save(new_path_with_file, format="JPEG", quality = 100)
511
+ else:
512
+ image[0].save(new_path_with_file, format=extension.upper())
513
+
514
+ else:
515
+ for file in tqdm(uploader.value, bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):
516
+ filename = file['name']
517
+ img = Image.open(io.BytesIO(file['content']))
518
+
519
+ extension = filename.split(".")[-1]
520
+ identifier=filename.split(".")[0]
521
+
522
+ if extension.upper()=="JPG" or extension.upper()=="jpg":
523
+ img=img.convert("RGB")
524
+ img.save(INSTANCE_DIR+"/"+filename, format="JPEG", quality = 100)
525
+ else:
526
+ img.save(INSTANCE_DIR+"/"+filename, format=extension.upper())
527
+
528
+
529
+ os.chdir(INSTANCE_DIR)
530
+ call('find . -name "* *" -type f | rename ' "'s/ /-/g'", shell=True)
531
+ os.chdir(CAPTIONS_DIR)
532
+ call('find . -name "* *" -type f | rename ' "'s/ /-/g'", shell=True)
533
+ os.chdir('/notebooks')
534
+
535
+
536
+
537
+ def caption(CAPTIONS_DIR, INSTANCE_DIR):
538
+
539
+ paths=""
540
+ out=""
541
+ widgets_l=""
542
+ clear_output()
543
+ def Caption(path):
544
+ if path!="Select an instance image to caption":
545
+
546
+ name = os.path.splitext(os.path.basename(path))[0]
547
+ ext=os.path.splitext(os.path.basename(path))[-1][1:]
548
+ if ext=="jpg" or "JPG":
549
+ ext="JPEG"
550
+
551
+ if os.path.exists(CAPTIONS_DIR+"/"+name + '.txt'):
552
+ with open(CAPTIONS_DIR+"/"+name + '.txt', 'r') as f:
553
+ text = f.read()
554
+ else:
555
+ with open(CAPTIONS_DIR+"/"+name + '.txt', 'w') as f:
556
+ f.write("")
557
+ with open(CAPTIONS_DIR+"/"+name + '.txt', 'r') as f:
558
+ text = f.read()
559
+
560
+ img=Image.open(os.path.join(INSTANCE_DIR,path))
561
+ img=img.convert("RGB")
562
+ img=img.resize((420, 420))
563
+ image_bytes = BytesIO()
564
+ img.save(image_bytes, format=ext, qualiy=10)
565
+ image_bytes.seek(0)
566
+ image_data = image_bytes.read()
567
+ img= image_data
568
+ image = widgets.Image(
569
+ value=img,
570
+ width=420,
571
+ height=420
572
+ )
573
+ text_area = widgets.Textarea(value=text, description='', disabled=False, layout={'width': '300px', 'height': '120px'})
574
+
575
+
576
+ def update_text(text):
577
+ with open(CAPTIONS_DIR+"/"+name + '.txt', 'w') as f:
578
+ f.write(text)
579
+
580
+ button = widgets.Button(description='Save', button_style='success')
581
+ button.on_click(lambda b: update_text(text_area.value))
582
+
583
+ return widgets.VBox([widgets.HBox([image, text_area, button])])
584
+
585
+
586
+ paths = os.listdir(INSTANCE_DIR)
587
+ widgets_l = widgets.Select(options=["Select an instance image to caption"]+paths, rows=25)
588
+
589
+
590
+ out = widgets.Output()
591
+
592
+ def click(change):
593
+ with out:
594
+ out.clear_output()
595
+ display(Caption(change.new))
596
+
597
+ widgets_l.observe(click, names='value')
598
+ display(widgets.HBox([widgets_l, out]))
599
+
600
+
601
+
602
+ def dbtrainxl(Resume_Training, UNet_Training_Epochs, UNet_Learning_Rate, dim, Offset_Noise, Resolution, MODEL_NAME, SESSION_DIR, INSTANCE_DIR, CAPTIONS_DIR, External_Captions, INSTANCE_NAME, Session_Name, OUTPUT_DIR, ofstnselvl, Save_VRAM):
603
+
604
+ if os.path.exists(INSTANCE_DIR+"/.ipynb_checkpoints"):
605
+ call('rm -r '+INSTANCE_DIR+'/.ipynb_checkpoints', shell=True)
606
+ if os.path.exists(CAPTIONS_DIR+"/.ipynb_checkpoints"):
607
+ call('rm -r '+CAPTIONS_DIR+'/.ipynb_checkpoints', shell=True)
608
+
609
+
610
+ while not Resume_Training and not os.path.exists(MODEL_NAME+'/unet/diffusion_pytorch_model.bin'):
611
+ print('No model found, use the "Model Download" cell to download a model.')
612
+ time.sleep(5)
613
+
614
+ Seed=random.randint(1, 999999)
615
+
616
+ ofstnse=""
617
+ if Offset_Noise:
618
+ ofstnse="--offset_noise"
619
+
620
+ GC=''
621
+ if Save_VRAM:
622
+ GC='--gradient_checkpointing'
623
+
624
+ extrnlcptn=""
625
+ if External_Captions:
626
+ extrnlcptn="--external_captions"
627
+
628
+ precision="bf16"
629
+
630
+ resume=""
631
+ if Resume_Training and os.path.exists(SESSION_DIR+'/'+Session_Name+'.safetensors'):
632
+ resume="--resume"
633
+
634
+ print('Resuming Training...')
635
+ elif Resume_Training and not os.path.exists(SESSION_DIR+'/'+Session_Name+'.safetensors'):
636
+ while MODEL_NAME=="":
637
+ print('No model found, use the "Model Download" cell to download a model.')
638
+ time.sleep(5)
639
+ print('Previous model not found, training a new model...')
640
+
641
+
642
+
643
+ def train_only_unet(SESSION_DIR, MODEL_NAME, INSTANCE_DIR, OUTPUT_DIR, Seed, Resolution, ofstnse, extrnlcptn, precision, Training_Epochs):
644
+ call('accelerate launch /notebooks/diffusers/examples/dreambooth/train_dreambooth_rnpd_sdxl_lora.py \
645
+ '+resume+' \
646
+ '+ofstnse+' \
647
+ '+extrnlcptn+' \
648
+ --dim='+str(dim)+' \
649
+ --ofstnselvl='+str(ofstnselvl)+' \
650
+ --image_captions_filename \
651
+ --Session_dir='+SESSION_DIR+' \
652
+ --pretrained_model_name_or_path='+MODEL_NAME+' \
653
+ --instance_data_dir='+INSTANCE_DIR+' \
654
+ --output_dir='+OUTPUT_DIR+' \
655
+ --captions_dir='+CAPTIONS_DIR+' \
656
+ --seed='+str(Seed)+' \
657
+ --resolution='+str(Resolution)+' \
658
+ --mixed_precision='+str(precision)+' \
659
+ --train_batch_size=1 \
660
+ --gradient_accumulation_steps=1 '+GC+ ' \
661
+ --use_8bit_adam \
662
+ --learning_rate='+str(UNet_Learning_Rate)+' \
663
+ --lr_scheduler="cosine" \
664
+ --lr_warmup_steps=0 \
665
+ --num_train_epochs='+str(Training_Epochs), shell=True)
666
+
667
+
668
+
669
+ if UNet_Training_Epochs!=0:
670
+ train_only_unet(SESSION_DIR, MODEL_NAME, INSTANCE_DIR, OUTPUT_DIR, Seed, Resolution, ofstnse, extrnlcptn, precision, Training_Epochs=UNet_Training_Epochs)
671
+ else :
672
+ print('Nothing to do')
673
+
674
+ if os.path.exists(SESSION_DIR+'/'+Session_Name+'.safetensors'):
675
+ clear_output()
676
+ print("DONE, the LoRa model is in the session's folder")
677
+ else:
678
+ print("Something went wrong")
679
+
680
+
681
+
682
+
683
+ def sd(MDLPTH):
684
+
685
+ from slugify import slugify
686
+ from huggingface_hub import HfApi, CommitOperationAdd, create_repo
687
+
688
+ os.chdir('/notebooks')
689
+
690
+ print('Installing/Updating the repo...')
691
+ os.chdir('/notebooks')
692
+ if not os.path.exists('ComfyUI'):
693
+ call('git clone -q --depth 1 https://github.com/comfyanonymous/ComfyUI', shell=True)
694
+
695
+ os.chdir('ComfyUI')
696
+ call('git reset --hard', shell=True)
697
+ print('')
698
+ call('git pull', shell=True)
699
+ os.chdir('/notebooks')
700
+ if os.path.exists(MDLPTH):
701
+ call('cp '+MDLPTH+' ComfyUI/models/loras', shell=True)
702
+
703
+
704
+ podid=os.environ.get('RUNPOD_POD_ID')
705
+ localurl="https://tensorboard-"+os.environ.get('PAPERSPACE_FQDN')
706
+ call("sed -i 's@print(\"To see the GUI go to: http://{}:{}\".format(address, port))@print(\"\u2714 Connected\")\\n print(\""+localurl+"\")@' /notebooks/ComfyUI/server.py", shell=True)
707
+
708
+ return restored
709
+
710
+
711
+
712
+
713
+ def sdcmf(MDLPTH, Download_SDXL_Model, Huggingface_token):
714
+
715
+ from slugify import slugify
716
+ from huggingface_hub import HfApi, CommitOperationAdd, create_repo
717
+
718
+ os.chdir('/notebooks')
719
+
720
+
721
+ print('Installing/Updating the repo...')
722
+ if not os.path.exists('ComfyUI'):
723
+ call('git clone -q --depth 1 https://github.com/comfyanonymous/ComfyUI', shell=True)
724
+
725
+ os.chdir('ComfyUI')
726
+ call('git reset --hard', shell=True)
727
+ print('')
728
+ call('git pull', shell=True)
729
+
730
+ if os.path.exists(MDLPTH):
731
+ call('cp '+MDLPTH+' models/loras', shell=True)
732
+
733
+ if Download_SDXL_Model and not os.path.exists('models/checkpoints/sd_xl_base_0.9.safetensors'):
734
+ if Huggingface_token=="":
735
+ Huggingface_token=input('Your Huggingface Token: ')
736
+
737
+ mdllnk= 'https://huggingface.co/stabilityai/stable-diffusion-xl-base-0.9/resolve/main/sd_xl_base_0.9.safetensors'
738
+ try:
739
+ creds = base64.b64encode(f"USER:{Huggingface_token}".encode('utf-8')).decode('utf-8')
740
+ req=urllib.request.Request(mdllnk)
741
+ req.add_header('Authorization', f'Basic {creds}')
742
+ urllib.request.urlopen(req)
743
+ dwn2(mdllnk, 'models/checkpoints/sd_xl_base_0.9.safetensors','Downloading the Model', Huggingface_token)
744
+ except urllib.error.HTTPError as e:
745
+ print('The token provided has no access to the model, skipping model download...')
746
+ else:
747
+ print('Model already exists, skipping download...')
748
+
749
+ localurl="https://tensorboard-"+os.environ.get('PAPERSPACE_FQDN')
750
+ call("sed -i 's@print(\"To see the GUI go to: http://{}:{}\".format(address, port))@print(\"\u2714 Connected\")\\n print(\""+localurl+"\")@' /notebooks/ComfyUI/server.py", shell=True)
751
+ os.chdir('/notebooks')
752
+
753
+
754
+
755
+
756
+
757
+ def clean():
758
+
759
+ Sessions=os.listdir("/notebooks/Fast-Dreambooth/Sessions")
760
+
761
+ s = widgets.Select(
762
+ options=Sessions,
763
+ rows=5,
764
+ description='',
765
+ disabled=False
766
+ )
767
+
768
+ out=widgets.Output()
769
+
770
+ d = widgets.Button(
771
+ description='Remove',
772
+ disabled=False,
773
+ button_style='warning',
774
+ tooltip='Removet the selected session',
775
+ icon='warning'
776
+ )
777
+
778
+ def rem(d):
779
+ with out:
780
+ if s.value is not None:
781
+ clear_output()
782
+ print("THE SESSION "+s.value+" HAS BEEN REMOVED FROM THE STORAGE")
783
+ call('rm -r /notebooks/Fast-Dreambooth/Sessions/'+s.value, shell=True)
784
+ if os.path.exists('/notebooks/models/'+s.value):
785
+ call('rm -r /notebooks/models/'+s.value, shell=True)
786
+ s.options=os.listdir("/notebooks/Fast-Dreambooth/Sessions")
787
+
788
+
789
+ else:
790
+ d.close()
791
+ s.close()
792
+ clear_output()
793
+ print("NOTHING TO REMOVE")
794
+
795
+ d.on_click(rem)
796
+ if s.value is not None:
797
+ display(s,d,out)
798
+ else:
799
+ print("NOTHING TO REMOVE")
800
+
801
+
802
+
803
+ def crop_image(im, size):
804
+
805
+ import cv2
806
+
807
+ GREEN = "#0F0"
808
+ BLUE = "#00F"
809
+ RED = "#F00"
810
+
811
+ def focal_point(im, settings):
812
+ corner_points = image_corner_points(im, settings) if settings.corner_points_weight > 0 else []
813
+ entropy_points = image_entropy_points(im, settings) if settings.entropy_points_weight > 0 else []
814
+ face_points = image_face_points(im, settings) if settings.face_points_weight > 0 else []
815
+
816
+ pois = []
817
+
818
+ weight_pref_total = 0
819
+ if len(corner_points) > 0:
820
+ weight_pref_total += settings.corner_points_weight
821
+ if len(entropy_points) > 0:
822
+ weight_pref_total += settings.entropy_points_weight
823
+ if len(face_points) > 0:
824
+ weight_pref_total += settings.face_points_weight
825
+
826
+ corner_centroid = None
827
+ if len(corner_points) > 0:
828
+ corner_centroid = centroid(corner_points)
829
+ corner_centroid.weight = settings.corner_points_weight / weight_pref_total
830
+ pois.append(corner_centroid)
831
+
832
+ entropy_centroid = None
833
+ if len(entropy_points) > 0:
834
+ entropy_centroid = centroid(entropy_points)
835
+ entropy_centroid.weight = settings.entropy_points_weight / weight_pref_total
836
+ pois.append(entropy_centroid)
837
+
838
+ face_centroid = None
839
+ if len(face_points) > 0:
840
+ face_centroid = centroid(face_points)
841
+ face_centroid.weight = settings.face_points_weight / weight_pref_total
842
+ pois.append(face_centroid)
843
+
844
+ average_point = poi_average(pois, settings)
845
+
846
+ return average_point
847
+
848
+
849
+ def image_face_points(im, settings):
850
+
851
+ np_im = np.array(im)
852
+ gray = cv2.cvtColor(np_im, cv2.COLOR_BGR2GRAY)
853
+
854
+ tries = [
855
+ [ f'{cv2.data.haarcascades}haarcascade_eye.xml', 0.01 ],
856
+ [ f'{cv2.data.haarcascades}haarcascade_frontalface_default.xml', 0.05 ],
857
+ [ f'{cv2.data.haarcascades}haarcascade_profileface.xml', 0.05 ],
858
+ [ f'{cv2.data.haarcascades}haarcascade_frontalface_alt.xml', 0.05 ],
859
+ [ f'{cv2.data.haarcascades}haarcascade_frontalface_alt2.xml', 0.05 ],
860
+ [ f'{cv2.data.haarcascades}haarcascade_frontalface_alt_tree.xml', 0.05 ],
861
+ [ f'{cv2.data.haarcascades}haarcascade_eye_tree_eyeglasses.xml', 0.05 ],
862
+ [ f'{cv2.data.haarcascades}haarcascade_upperbody.xml', 0.05 ]
863
+ ]
864
+ for t in tries:
865
+ classifier = cv2.CascadeClassifier(t[0])
866
+ minsize = int(min(im.width, im.height) * t[1]) # at least N percent of the smallest side
867
+ try:
868
+ faces = classifier.detectMultiScale(gray, scaleFactor=1.1,
869
+ minNeighbors=7, minSize=(minsize, minsize), flags=cv2.CASCADE_SCALE_IMAGE)
870
+ except:
871
+ continue
872
+
873
+ if len(faces) > 0:
874
+ rects = [[f[0], f[1], f[0] + f[2], f[1] + f[3]] for f in faces]
875
+ return [PointOfInterest((r[0] +r[2]) // 2, (r[1] + r[3]) // 2, size=abs(r[0]-r[2]), weight=1/len(rects)) for r in rects]
876
+ return []
877
+
878
+
879
+ def image_corner_points(im, settings):
880
+ grayscale = im.convert("L")
881
+
882
+ # naive attempt at preventing focal points from collecting at watermarks near the bottom
883
+ gd = ImageDraw.Draw(grayscale)
884
+ gd.rectangle([0, im.height*.9, im.width, im.height], fill="#999")
885
+
886
+ np_im = np.array(grayscale)
887
+
888
+ points = cv2.goodFeaturesToTrack(
889
+ np_im,
890
+ maxCorners=100,
891
+ qualityLevel=0.04,
892
+ minDistance=min(grayscale.width, grayscale.height)*0.06,
893
+ useHarrisDetector=False,
894
+ )
895
+
896
+ if points is None:
897
+ return []
898
+
899
+ focal_points = []
900
+ for point in points:
901
+ x, y = point.ravel()
902
+ focal_points.append(PointOfInterest(x, y, size=4, weight=1/len(points)))
903
+
904
+ return focal_points
905
+
906
+
907
+ def image_entropy_points(im, settings):
908
+ landscape = im.height < im.width
909
+ portrait = im.height > im.width
910
+ if landscape:
911
+ move_idx = [0, 2]
912
+ move_max = im.size[0]
913
+ elif portrait:
914
+ move_idx = [1, 3]
915
+ move_max = im.size[1]
916
+ else:
917
+ return []
918
+
919
+ e_max = 0
920
+ crop_current = [0, 0, settings.crop_width, settings.crop_height]
921
+ crop_best = crop_current
922
+ while crop_current[move_idx[1]] < move_max:
923
+ crop = im.crop(tuple(crop_current))
924
+ e = image_entropy(crop)
925
+
926
+ if (e > e_max):
927
+ e_max = e
928
+ crop_best = list(crop_current)
929
+
930
+ crop_current[move_idx[0]] += 4
931
+ crop_current[move_idx[1]] += 4
932
+
933
+ x_mid = int(crop_best[0] + settings.crop_width/2)
934
+ y_mid = int(crop_best[1] + settings.crop_height/2)
935
+
936
+ return [PointOfInterest(x_mid, y_mid, size=25, weight=1.0)]
937
+
938
+
939
+ def image_entropy(im):
940
+ # greyscale image entropy
941
+ # band = np.asarray(im.convert("L"))
942
+ band = np.asarray(im.convert("1"), dtype=np.uint8)
943
+ hist, _ = np.histogram(band, bins=range(0, 256))
944
+ hist = hist[hist > 0]
945
+ return -np.log2(hist / hist.sum()).sum()
946
+
947
+ def centroid(pois):
948
+ x = [poi.x for poi in pois]
949
+ y = [poi.y for poi in pois]
950
+ return PointOfInterest(sum(x)/len(pois), sum(y)/len(pois))
951
+
952
+
953
+ def poi_average(pois, settings):
954
+ weight = 0.0
955
+ x = 0.0
956
+ y = 0.0
957
+ for poi in pois:
958
+ weight += poi.weight
959
+ x += poi.x * poi.weight
960
+ y += poi.y * poi.weight
961
+ avg_x = round(weight and x / weight)
962
+ avg_y = round(weight and y / weight)
963
+
964
+ return PointOfInterest(avg_x, avg_y)
965
+
966
+
967
+ def is_landscape(w, h):
968
+ return w > h
969
+
970
+
971
+ def is_portrait(w, h):
972
+ return h > w
973
+
974
+
975
+ def is_square(w, h):
976
+ return w == h
977
+
978
+
979
+ class PointOfInterest:
980
+ def __init__(self, x, y, weight=1.0, size=10):
981
+ self.x = x
982
+ self.y = y
983
+ self.weight = weight
984
+ self.size = size
985
+
986
+ def bounding(self, size):
987
+ return [
988
+ self.x - size//2,
989
+ self.y - size//2,
990
+ self.x + size//2,
991
+ self.y + size//2
992
+ ]
993
+
994
+ class Settings:
995
+ def __init__(self, crop_width=512, crop_height=512, corner_points_weight=0.5, entropy_points_weight=0.5, face_points_weight=0.5):
996
+ self.crop_width = crop_width
997
+ self.crop_height = crop_height
998
+ self.corner_points_weight = corner_points_weight
999
+ self.entropy_points_weight = entropy_points_weight
1000
+ self.face_points_weight = face_points_weight
1001
+
1002
+ settings = Settings(
1003
+ crop_width = size,
1004
+ crop_height = size,
1005
+ face_points_weight = 0.9,
1006
+ entropy_points_weight = 0.15,
1007
+ corner_points_weight = 0.5,
1008
+ )
1009
+
1010
+ scale_by = 1
1011
+ if is_landscape(im.width, im.height):
1012
+ scale_by = settings.crop_height / im.height
1013
+ elif is_portrait(im.width, im.height):
1014
+ scale_by = settings.crop_width / im.width
1015
+ elif is_square(im.width, im.height):
1016
+ if is_square(settings.crop_width, settings.crop_height):
1017
+ scale_by = settings.crop_width / im.width
1018
+ elif is_landscape(settings.crop_width, settings.crop_height):
1019
+ scale_by = settings.crop_width / im.width
1020
+ elif is_portrait(settings.crop_width, settings.crop_height):
1021
+ scale_by = settings.crop_height / im.height
1022
+
1023
+ im = im.resize((int(im.width * scale_by), int(im.height * scale_by)))
1024
+ im_debug = im.copy()
1025
+
1026
+ focus = focal_point(im_debug, settings)
1027
+
1028
+ # take the focal point and turn it into crop coordinates that try to center over the focal
1029
+ # point but then get adjusted back into the frame
1030
+ y_half = int(settings.crop_height / 2)
1031
+ x_half = int(settings.crop_width / 2)
1032
+
1033
+ x1 = focus.x - x_half
1034
+ if x1 < 0:
1035
+ x1 = 0
1036
+ elif x1 + settings.crop_width > im.width:
1037
+ x1 = im.width - settings.crop_width
1038
+
1039
+ y1 = focus.y - y_half
1040
+ if y1 < 0:
1041
+ y1 = 0
1042
+ elif y1 + settings.crop_height > im.height:
1043
+ y1 = im.height - settings.crop_height
1044
+
1045
+ x2 = x1 + settings.crop_width
1046
+ y2 = y1 + settings.crop_height
1047
+
1048
+ crop = [x1, y1, x2, y2]
1049
+
1050
+ results = []
1051
+
1052
+ results.append(im.crop(tuple(crop)))
1053
+
1054
+ return results