TheLastBen commited on
Commit
a66d873
1 Parent(s): 53462ae

Delete mainpaperspacev2.py

Browse files
Files changed (1) hide show
  1. mainpaperspacev2.py +0 -1255
mainpaperspacev2.py DELETED
@@ -1,1255 +0,0 @@
1
- from IPython.display import clear_output
2
- from subprocess import call, getoutput
3
- from IPython.display import display
4
- import ipywidgets as widgets
5
- import io
6
- from PIL import Image, ImageDraw
7
- import fileinput
8
- import time
9
- import os
10
- from os import listdir
11
- from os.path import isfile
12
- from tqdm import tqdm
13
- import gdown
14
- import random
15
- import sys
16
- import cv2
17
- from io import BytesIO
18
- import requests
19
- from collections import defaultdict
20
- from math import log, sqrt
21
- import numpy as np
22
-
23
-
24
-
25
- def Deps(force_reinstall):
26
-
27
- if not force_reinstall and os.path.exists('/usr/local/lib/python3.9/dist-packages/safetensors'):
28
- print('Modules updated, dependencies already installed')
29
- else:
30
- print('Installing the dependencies...')
31
- call("pip install --root-user-action=ignore --no-deps -q accelerate==0.12.0", shell=True, stdout=open('/dev/null', 'w'))
32
- if not os.path.exists('/usr/local/lib/python3.9/dist-packages/safetensors'):
33
- os.chdir('/usr/local/lib/python3.9/dist-packages')
34
- call("rm -r torch torch-1.12.0+cu116.dist-info torchaudio* torchvision* PIL Pillow* transformers* numpy* gdown*", shell=True, stdout=open('/dev/null', 'w'))
35
- os.chdir('/notebooks')
36
- if not os.path.exists('/models'):
37
- call('mkdir /models', shell=True)
38
- if not os.path.exists('/notebooks/models'):
39
- call('ln -s /models /notebooks', shell=True)
40
- if os.path.exists('/deps'):
41
- call("rm -r /deps", shell=True)
42
- call('mkdir /deps', shell=True)
43
- if not os.path.exists('cache'):
44
- call('mkdir cache', shell=True)
45
- os.chdir('/deps')
46
- call('wget -q -i https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dependencies/aptdeps.txt', shell=True)
47
- call('dpkg -i *.deb', shell=True, stdout=open('/dev/null', 'w'))
48
- call('wget -q https://huggingface.co/TheLastBen/dependencies/resolve/main/pps.tar.zst', shell=True, stdout=open('/dev/null', 'w'))
49
- call('tar -C / --zstd -xf pps.tar.zst', shell=True, stdout=open('/dev/null', 'w'))
50
- call("sed -i 's@~/.cache@/notebooks/cache@' /usr/local/lib/python3.9/dist-packages/transformers/utils/hub.py", shell=True)
51
- os.chdir('/notebooks')
52
- call("git clone --depth 1 -q --branch updt https://github.com/TheLastBen/diffusers /diffusers", shell=True, stdout=open('/dev/null', 'w'))
53
- if not os.path.exists('/notebooks/diffusers'):
54
- call('ln -s /diffusers /notebooks', shell=True)
55
- call("rm -r /deps", shell=True)
56
- os.chdir('/notebooks')
57
- clear_output()
58
-
59
- done()
60
-
61
-
62
-
63
- def downloadmodel_hfv2(Path_to_HuggingFace):
64
- import wget
65
-
66
- if os.path.exists('/models/stable-diffusion-custom'):
67
- call("rm -r /models/stable-diffusion-custom", shell=True)
68
- clear_output()
69
-
70
- if os.path.exists('/notebooks/Fast-Dreambooth/token.txt'):
71
- with open("/notebooks/Fast-Dreambooth/token.txt") as f:
72
- token = f.read()
73
- authe=f'https://USER:{token}@'
74
- else:
75
- authe="https://"
76
-
77
- clear_output()
78
- call("mkdir /models/stable-diffusion-custom", shell=True)
79
- os.chdir("/models/stable-diffusion-custom")
80
- call("git init", shell=True)
81
- call("git lfs install --system --skip-repo", shell=True)
82
- call('git remote add -f origin '+authe+'huggingface.co/'+Path_to_HuggingFace, shell=True)
83
- call("git config core.sparsecheckout true", shell=True)
84
- call('echo -e "\nscheduler\ntext_encoder\ntokenizer\nunet\nvae\nfeature_extractor\nmodel_index.json\n!*.safetensors" > .git/info/sparse-checkout', shell=True)
85
- call("git pull origin main", shell=True)
86
- if os.path.exists('unet/diffusion_pytorch_model.bin'):
87
- call("rm -r .git", shell=True)
88
- os.chdir('/notebooks')
89
- clear_output()
90
- done()
91
- while not os.path.exists('/models/stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
92
- print('Check the link you provided')
93
- os.chdir('/notebooks')
94
- time.sleep(5)
95
-
96
-
97
-
98
-
99
- def downloadmodel_pthv2(CKPT_Path, Custom_Model_Version):
100
- import wget
101
- os.chdir('/models')
102
- clear_output()
103
- if os.path.exists(str(CKPT_Path)):
104
- if Custom_Model_Version=='512':
105
- wget.download('https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dreambooth/convertodiffv2.py')
106
- clear_output()
107
- call('python convertodiff.py '+CKPT_Path+' stable-diffusion-custom --v2 --reference_model stabilityai/stable-diffusion-2-1-base', shell=True)
108
- elif Custom_Model_Version=='768':
109
- wget.download('https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dreambooth/convertodiffv2.py')
110
- clear_output()
111
- call('python convertodiff.py '+CKPT_Path+' stable-diffusion-custom --v2 --reference_model stabilityai/stable-diffusion-2-1', shell=True)
112
- call('rm convertodiff.py', shell=True)
113
- if os.path.exists('stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
114
- os.chdir('/notebooks')
115
- clear_output()
116
- done()
117
- while not os.path.exists('stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
118
- print('Conversion error')
119
- os.chdir('/notebooks')
120
- time.sleep(5)
121
-
122
- else:
123
- while not os.path.exists(str(CKPT_Path)):
124
- print('Wrong path, use the colab file explorer to copy the path')
125
- os.chdir('/notebooks')
126
- time.sleep(5)
127
-
128
-
129
-
130
-
131
- def downloadmodel_lnkv2(CKPT_Link, Custom_Model_Version):
132
- import wget
133
- os.chdir('/models')
134
- call("gdown --fuzzy " +CKPT_Link+ " -O model.ckpt", shell=True)
135
-
136
- if os.path.exists('model.ckpt'):
137
- if os.path.getsize("model.ckpt") > 1810671599:
138
- wget.download('https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dreambooth/convertodiffv2.py')
139
- if Custom_Model_Version=='512':
140
- call('python convertodiffv2.py model.ckpt stable-diffusion-custom --v2 --reference_model stabilityai/stable-diffusion-2-1-base', shell=True)
141
- elif Custom_Model_Version=='768':
142
- call('python convertodiffv2.py model.ckpt stable-diffusion-custom --v2 --reference_model stabilityai/stable-diffusion-2-1', shell=True)
143
- call('rm convertodiffv2.py', shell=True)
144
- if os.path.exists('stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
145
- call('rm model.ckpt', shell=True)
146
- os.chdir('/notebooks')
147
- clear_output()
148
- done()
149
- else:
150
- while not os.path.exists('/models/stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
151
- print('Conversion error')
152
- os.chdir('/notebooks')
153
- time.sleep(5)
154
- else:
155
- while os.path.getsize('/models/model.ckpt') < 1810671599:
156
- print('Wrong link, check that the link is valid')
157
- os.chdir('/notebooks')
158
- time.sleep(5)
159
-
160
-
161
-
162
-
163
- def dlv2(Path_to_HuggingFace, CKPT_Path, CKPT_Link, Model_Version, Custom_Model_Version):
164
-
165
- if Path_to_HuggingFace != "":
166
- downloadmodel_hfv2(Path_to_HuggingFace)
167
- MODEL_NAMEv2="/models/stable-diffusion-custom"
168
- elif CKPT_Path !="":
169
- downloadmodel_pthv2(CKPT_Path, Custom_Model_Version)
170
- MODEL_NAMEv2="/models/stable-diffusion-custom"
171
- elif CKPT_Link !="":
172
- downloadmodel_lnkv2(CKPT_Link, Custom_Model_Version)
173
- MODEL_NAMEv2="/models/stable-diffusion-custom"
174
- else:
175
- if Model_Version=="512":
176
- MODEL_NAMEv2="dataset"
177
- print('Using the original V2-512 model')
178
- elif Model_Version=="768":
179
- MODEL_NAMEv2="/datasets/stable-diffusion-v2-1/stable-diffusion-2-1"
180
- print('Using the original V2-768 model')
181
- else:
182
- MODEL_NAMEv2=""
183
- print('Wrong model version')
184
-
185
- return MODEL_NAMEv2
186
-
187
-
188
- def sessv2(Session_Name, Session_Link_optional, Model_Version, MODEL_NAMEv2):
189
- import gdown
190
- os.chdir('/notebooks')
191
- PT=""
192
-
193
- while Session_Name=="":
194
- print('Input the Session Name:')
195
- Session_Name=input("")
196
- Session_Name=Session_Name.replace(" ","_")
197
-
198
- WORKSPACE='/notebooks/Fast-Dreambooth'
199
-
200
- if Session_Link_optional !="":
201
- print('Downloading session...')
202
-
203
- if Session_Link_optional != "":
204
- if not os.path.exists(str(WORKSPACE+'/Sessions')):
205
- call("mkdir -p " +WORKSPACE+ "/Sessions", shell=True)
206
- time.sleep(1)
207
- os.chdir(WORKSPACE+'/Sessions')
208
- gdown.download_folder(url=Session_Link_optional, output=Session_Name, quiet=True, remaining_ok=True, use_cookies=False)
209
- os.chdir(Session_Name)
210
- call("rm -r " +instance_images, shell=True)
211
- call("unzip " +instance_images.zip, shell=True, stdout=open('/dev/null', 'w'))
212
- call("rm -r " +concept_images, shell=True)
213
- call("unzip " +concept_images.zip, shell=True, stdout=open('/dev/null', 'w'))
214
- call("rm -r " +captions, shell=True)
215
- call("unzip " +captions.zip, shell=True, stdout=open('/dev/null', 'w'))
216
- os.chdir('/notebooks')
217
- clear_output()
218
-
219
- INSTANCE_NAME=Session_Name
220
- OUTPUT_DIR="/models/"+Session_Name
221
- SESSION_DIR=WORKSPACE+"/Sessions/"+Session_Name
222
- CONCEPT_DIR=SESSION_DIR+"/concept_images"
223
- INSTANCE_DIR=SESSION_DIR+"/instance_images"
224
- CAPTIONS_DIR=SESSION_DIR+'/captions'
225
- MDLPTH=str(SESSION_DIR+"/"+Session_Name+'.ckpt')
226
- resumev2=False
227
-
228
- if os.path.exists(str(SESSION_DIR)):
229
- mdls=[ckpt for ckpt in listdir(SESSION_DIR) if ckpt.split(".")[-1]=="ckpt"]
230
- if not os.path.exists(MDLPTH) and '.ckpt' in str(mdls):
231
-
232
- def f(n):
233
- k=0
234
- for i in mdls:
235
- if k==n:
236
- call('mv '+SESSION_DIR+'/'+i+' '+MDLPTH, shell=True)
237
- k=k+1
238
-
239
- k=0
240
- print('No final checkpoint model found, select which intermediary checkpoint to use, enter only the number, (000 to skip):\n')
241
-
242
- for i in mdls:
243
- print(str(k)+'- '+i)
244
- k=k+1
245
- n=input()
246
- while int(n)>k-1:
247
- n=input()
248
- if n!="000":
249
- f(int(n))
250
- print('Using the model '+ mdls[int(n)]+" ...")
251
- time.sleep(8)
252
- else:
253
- print('Skipping the intermediary checkpoints.')
254
-
255
-
256
- if os.path.exists(str(SESSION_DIR)) and not os.path.exists(MDLPTH):
257
- print('Loading session with no previous model, using the original model or the custom downloaded model')
258
- if MODEL_NAMEv2=="":
259
- print('No model found, use the "Model Download" cell to download a model.')
260
- else:
261
- print('Session Loaded, proceed to uploading instance images')
262
-
263
- elif os.path.exists(MDLPTH):
264
- print('Session found, loading the trained model ...')
265
- if Model_Version=='512':
266
- call("wget -q -O convertodiff.py https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dreambooth/convertodiffv2.py", shell=True)
267
- clear_output()
268
- print('Session found, loading the trained model ...')
269
- call('python /notebooks/convertodiff.py '+MDLPTH+' '+OUTPUT_DIR+' --v2 --reference_model stabilityai/stable-diffusion-2-1-base', shell=True)
270
-
271
- elif Model_Version=='768':
272
- call('wget -q -O convertodiff.py https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dreambooth/convertodiffv2.py', shell=True)
273
- clear_output()
274
- print('Session found, loading the trained model ...')
275
- call('python /notebooks/convertodiff.py '+MDLPTH+' '+OUTPUT_DIR+' --v2 --reference_model stabilityai/stable-diffusion-2-1', shell=True)
276
-
277
- call('rm /notebooks/convertodiff.py', shell=True)
278
-
279
- if os.path.exists(OUTPUT_DIR+'/unet/diffusion_pytorch_model.bin'):
280
- resumev2=True
281
- clear_output()
282
- print('Session loaded.')
283
- else:
284
- if not os.path.exists(OUTPUT_DIR+'/unet/diffusion_pytorch_model.bin'):
285
- print('Conversion error, if the error persists, remove the CKPT file from the current session folder')
286
-
287
- elif not os.path.exists(str(SESSION_DIR)):
288
- call('mkdir -p '+INSTANCE_DIR, shell=True)
289
- print('Creating session...')
290
- if MODEL_NAMEv2=="":
291
- print('No model found, use the "Model Download" cell to download a model.')
292
- else:
293
- print('Session created, proceed to uploading instance images')
294
-
295
- return PT, WORKSPACE, Session_Name, INSTANCE_NAME, OUTPUT_DIR, SESSION_DIR, CONCEPT_DIR, INSTANCE_DIR, CAPTIONS_DIR, MDLPTH, MODEL_NAMEv2, resumev2
296
-
297
-
298
-
299
- def done():
300
- done = widgets.Button(
301
- description='Done!',
302
- disabled=True,
303
- button_style='success',
304
- tooltip='',
305
- icon='check'
306
- )
307
- display(done)
308
-
309
-
310
-
311
-
312
- def uplder(Remove_existing_instance_images, Crop_images, Crop_size, IMAGES_FOLDER_OPTIONAL, INSTANCE_DIR, CAPTIONS_DIR, ren):
313
-
314
- uploader = widgets.FileUpload(description="Choose images",accept='image/*', multiple=True)
315
- Upload = widgets.Button(
316
- description='Upload',
317
- disabled=False,
318
- button_style='info',
319
- tooltip='Click to upload the chosen instance images',
320
- icon=''
321
- )
322
-
323
-
324
- def up(Upload):
325
- with out:
326
- uploader.close()
327
- Upload.close()
328
- upld(Remove_existing_instance_images, Crop_images, Crop_size, IMAGES_FOLDER_OPTIONAL, INSTANCE_DIR, CAPTIONS_DIR, uploader, ren)
329
- done()
330
- out=widgets.Output()
331
-
332
- if IMAGES_FOLDER_OPTIONAL=="":
333
- Upload.on_click(up)
334
- display(uploader, Upload, out)
335
- else:
336
- upld(Remove_existing_instance_images, Crop_images, Crop_size, IMAGES_FOLDER_OPTIONAL, INSTANCE_DIR, CAPTIONS_DIR, uploader, ren)
337
- done()
338
-
339
-
340
-
341
-
342
- def upld(Remove_existing_instance_images, Crop_images, Crop_size, IMAGES_FOLDER_OPTIONAL, INSTANCE_DIR, CAPTIONS_DIR, uploader, ren):
343
-
344
-
345
- if os.path.exists(CAPTIONS_DIR+"off"):
346
- call('mv '+CAPTIONS_DIR+"off"+' '+CAPTIONS_DIR, shell=True)
347
- time.sleep(2)
348
-
349
- if Remove_existing_instance_images:
350
- if os.path.exists(str(INSTANCE_DIR)):
351
- call("rm -r " +INSTANCE_DIR, shell=True)
352
- if os.path.exists(str(CAPTIONS_DIR)):
353
- call("rm -r " +CAPTIONS_DIR, shell=True)
354
-
355
-
356
- if not os.path.exists(str(INSTANCE_DIR)):
357
- call("mkdir -p " +INSTANCE_DIR, shell=True)
358
- if not os.path.exists(str(CAPTIONS_DIR)):
359
- call("mkdir -p " +CAPTIONS_DIR, shell=True)
360
-
361
-
362
- if IMAGES_FOLDER_OPTIONAL !="":
363
- if any(file.endswith('.{}'.format('txt')) for file in os.listdir(IMAGES_FOLDER_OPTIONAL)):
364
- call('mv '+IMAGES_FOLDER_OPTIONAL+'/*.txt '+CAPTIONS_DIR, shell=True)
365
- if Crop_images:
366
- os.chdir(str(IMAGES_FOLDER_OPTIONAL))
367
- call('find . -name "* *" -type f | rename ' "'s/ /-/g'", shell=True)
368
- os.chdir('/notebooks')
369
- for filename in tqdm(os.listdir(IMAGES_FOLDER_OPTIONAL), bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):
370
- extension = filename.split(".")[-1]
371
- identifier=filename.split(".")[0]
372
- new_path_with_file = os.path.join(INSTANCE_DIR, filename)
373
- file = Image.open(IMAGES_FOLDER_OPTIONAL+"/"+filename)
374
- width, height = file.size
375
- image = file
376
- if file.size !=(Crop_size, Crop_size):
377
- image=crop_image(file, Crop_size)
378
- if (extension.upper() == "JPG" or "jpg"):
379
- image[0].save(new_path_with_file, format="JPEG", quality = 100)
380
- else:
381
- image[0].save(new_path_with_file, format=extension.upper())
382
-
383
- else:
384
- call("cp \'"+IMAGES_FOLDER_OPTIONAL+"/"+filename+"\' "+INSTANCE_DIR, shell=True)
385
-
386
- else:
387
- for filename in tqdm(os.listdir(IMAGES_FOLDER_OPTIONAL), bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):
388
- call("cp -r " +IMAGES_FOLDER_OPTIONAL+"/. " +INSTANCE_DIR, shell=True)
389
-
390
-
391
-
392
- elif IMAGES_FOLDER_OPTIONAL =="":
393
- up=""
394
- for filename, file in uploader.value.items():
395
- if filename.split(".")[-1]=="txt":
396
- with open(CAPTIONS_DIR+'/'+filename, 'w') as f:
397
- f.write(file['content'].decode())
398
- up=[(filename, file) for filename, file in uploader.value.items() if filename.split(".")[-1]!="txt"]
399
- if Crop_images:
400
- for filename, file_info in tqdm(up, bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):
401
- img = Image.open(io.BytesIO(file_info['content']))
402
- extension = filename.split(".")[-1]
403
- identifier=filename.split(".")[0]
404
-
405
- if (extension.upper() == "JPG" or "jpg"):
406
- img.save(INSTANCE_DIR+"/"+filename, format="JPEG", quality = 100)
407
- else:
408
- img.save(INSTANCE_DIR+"/"+filename, format=extension.upper())
409
-
410
- new_path_with_file = os.path.join(INSTANCE_DIR, filename)
411
- file = Image.open(new_path_with_file)
412
- width, height = file.size
413
- image = img
414
- if file.size !=(Crop_size, Crop_size):
415
- image=crop_image(file, Crop_size)
416
- if (extension.upper() == "JPG" or "jpg"):
417
- image[0].save(new_path_with_file, format="JPEG", quality = 100)
418
- else:
419
- image[0].save(new_path_with_file, format=extension.upper())
420
-
421
- else:
422
- for filename, file_info in tqdm(uploader.value.items(), bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):
423
- img = Image.open(io.BytesIO(file_info['content']))
424
-
425
- extension = filename.split(".")[-1]
426
- identifier=filename.split(".")[0]
427
-
428
- if (extension.upper() == "JPG" or "jpg"):
429
- img.save(INSTANCE_DIR+"/"+filename, format="JPEG", quality = 100)
430
- else:
431
- img.save(INSTANCE_DIR+"/"+filename, format=extension.upper())
432
-
433
-
434
- if ren:
435
- i=0
436
- for filename in tqdm(os.listdir(INSTANCE_DIR), bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Renamed'):
437
- extension = filename.split(".")[-1]
438
- identifier=filename.split(".")[0]
439
- new_path_with_file = os.path.join(INSTANCE_DIR, "conceptimagedb"+str(i)+"."+extension)
440
- call('mv "'+os.path.join(INSTANCE_DIR,filename)+'" "'+new_path_with_file+'"', shell=True)
441
- i=i+1
442
-
443
- os.chdir(INSTANCE_DIR)
444
- call('find . -name "* *" -type f | rename ' "'s/ /-/g'", shell=True)
445
- os.chdir(CAPTIONS_DIR)
446
- call('find . -name "* *" -type f | rename ' "'s/ /-/g'", shell=True)
447
- os.chdir('/notebooks')
448
-
449
-
450
- def caption(CAPTIONS_DIR, INSTANCE_DIR):
451
-
452
- if os.path.exists(CAPTIONS_DIR+"off"):
453
- call('mv '+CAPTIONS_DIR+"off"+' '+CAPTIONS_DIR, shell=True)
454
- time.sleep(2)
455
-
456
- paths=""
457
- out=""
458
- widgets_l=""
459
- clear_output()
460
- def Caption(path):
461
- if path!="Select an instance image to caption":
462
-
463
- name = os.path.splitext(os.path.basename(path))[0]
464
- ext=os.path.splitext(os.path.basename(path))[-1][1:]
465
- if ext=="jpg" or "JPG":
466
- ext="JPEG"
467
-
468
- if os.path.exists(CAPTIONS_DIR+"/"+name + '.txt'):
469
- with open(CAPTIONS_DIR+"/"+name + '.txt', 'r') as f:
470
- text = f.read()
471
- else:
472
- with open(CAPTIONS_DIR+"/"+name + '.txt', 'w') as f:
473
- f.write("")
474
- with open(CAPTIONS_DIR+"/"+name + '.txt', 'r') as f:
475
- text = f.read()
476
-
477
- img=Image.open(os.path.join(INSTANCE_DIR,path))
478
- img=img.resize((420, 420))
479
- image_bytes = BytesIO()
480
- img.save(image_bytes, format=ext, qualiy=10)
481
- image_bytes.seek(0)
482
- image_data = image_bytes.read()
483
- img= image_data
484
- image = widgets.Image(
485
- value=img,
486
- width=420,
487
- height=420
488
- )
489
- text_area = widgets.Textarea(value=text, description='', disabled=False, layout={'width': '300px', 'height': '120px'})
490
-
491
-
492
- def update_text(text):
493
- with open(CAPTIONS_DIR+"/"+name + '.txt', 'w') as f:
494
- f.write(text)
495
-
496
- button = widgets.Button(description='Save', button_style='success')
497
- button.on_click(lambda b: update_text(text_area.value))
498
-
499
- return widgets.VBox([widgets.HBox([image, text_area, button])])
500
-
501
-
502
- paths = os.listdir(INSTANCE_DIR)
503
- widgets_l = widgets.Select(options=["Select an instance image to caption"]+paths, rows=25)
504
-
505
-
506
- out = widgets.Output()
507
-
508
- def click(change):
509
- with out:
510
- out.clear_output()
511
- display(Caption(change.new))
512
-
513
- widgets_l.observe(click, names='value')
514
- display(widgets.HBox([widgets_l, out]))
515
-
516
-
517
-
518
-
519
- def dbtrainv2(Resume_Training, UNet_Training_Steps, UNet_Learning_Rate, Text_Encoder_Training_Steps, Text_Encoder_Concept_Training_Steps, Text_Encoder_Learning_Rate, Style_Training, Resolution, MODEL_NAMEv2, SESSION_DIR, INSTANCE_DIR, CONCEPT_DIR, CAPTIONS_DIR, External_Captions, INSTANCE_NAME, Session_Name, OUTPUT_DIR, PT, resumev2, Save_Checkpoint_Every_n_Steps, Start_saving_from_the_step, Save_Checkpoint_Every):
520
-
521
- if resumev2 and not Resume_Training:
522
- print('Overwrite your previously trained model ?, answering "yes" will train a new model, answering "no" will resumev2 the training of the previous model?  yes or no ?')
523
- while True:
524
- ansres=input('')
525
- if ansres=='no':
526
- Resume_Training = True
527
- break
528
- elif ansres=='yes':
529
- Resume_Training = False
530
- resumev2= False
531
- break
532
-
533
- while not Resume_Training and not os.path.exists(MODEL_NAMEv2+'/unet/diffusion_pytorch_model.bin'):
534
- print('No model found, use the "Model Download" cell to download a model.')
535
- time.sleep(5)
536
-
537
- if os.path.exists(CAPTIONS_DIR+"off"):
538
- call('mv '+CAPTIONS_DIR+"off"+' '+CAPTIONS_DIR, shell=True)
539
- time.sleep(2)
540
-
541
- MODELT_NAME=MODEL_NAMEv2
542
-
543
- Seed=random.randint(1, 999999)
544
-
545
- Style=""
546
- if Style_Training:
547
- Style="--Style"
548
-
549
- extrnlcptn=""
550
- if External_Captions:
551
- extrnlcptn="--external_captions"
552
-
553
- precision="fp16"
554
-
555
- GCUNET="--gradient_checkpointing"
556
- if Resolution<=640:
557
- GCUNET=""
558
-
559
- resuming=""
560
- if Resume_Training and os.path.exists(OUTPUT_DIR+'/unet/diffusion_pytorch_model.bin'):
561
- MODELT_NAME=OUTPUT_DIR
562
- print('Resuming Training...')
563
- resuming="Yes"
564
- elif Resume_Training and not os.path.exists(OUTPUT_DIR+'/unet/diffusion_pytorch_model.bin'):
565
- print('Previous model not found, training a new model...')
566
- MODELT_NAME=MODEL_NAMEv2
567
- while MODEL_NAMEv2=="":
568
- print('No model found, use the "Model Download" cell to download a model.')
569
- time.sleep(5)
570
-
571
-
572
- trnonltxt=""
573
- if UNet_Training_Steps==0:
574
- trnonltxt="--train_only_text_encoder"
575
-
576
- Enable_text_encoder_training= True
577
- Enable_Text_Encoder_Concept_Training= True
578
-
579
-
580
- if Text_Encoder_Training_Steps==0 or External_Captions:
581
- Enable_text_encoder_training= False
582
- else:
583
- stptxt=Text_Encoder_Training_Steps
584
-
585
- if Text_Encoder_Concept_Training_Steps==0:
586
- Enable_Text_Encoder_Concept_Training= False
587
- else:
588
- stptxtc=Text_Encoder_Concept_Training_Steps
589
-
590
-
591
- if Save_Checkpoint_Every==None:
592
- Save_Checkpoint_Every=1
593
- stp=0
594
- if Start_saving_from_the_step==None:
595
- Start_saving_from_the_step=0
596
- if (Start_saving_from_the_step < 200):
597
- Start_saving_from_the_step=Save_Checkpoint_Every
598
- stpsv=Start_saving_from_the_step
599
- if Save_Checkpoint_Every_n_Steps:
600
- stp=Save_Checkpoint_Every
601
-
602
-
603
- def dump_only_textenc(trnonltxt, MODELT_NAME, INSTANCE_DIR, OUTPUT_DIR, PT, Seed, precision, Training_Steps):
604
- call('accelerate launch /notebooks/diffusers/examples/dreambooth/train_dreambooth_pps.py \
605
- '+trnonltxt+' \
606
- --train_text_encoder \
607
- --image_captions_filename \
608
- --dump_only_text_encoder \
609
- --pretrained_model_name_or_path='+MODELT_NAME+' \
610
- --instance_data_dir='+INSTANCE_DIR+' \
611
- --output_dir='+OUTPUT_DIR+' \
612
- --instance_prompt='+PT+' \
613
- --seed='+str(Seed)+' \
614
- --resolution=512 \
615
- --mixed_precision='+str(precision)+' \
616
- --train_batch_size=1 \
617
- --gradient_accumulation_steps=1 --gradient_checkpointing \
618
- --use_8bit_adam \
619
- --learning_rate='+str(Text_Encoder_Learning_Rate)+' \
620
- --lr_scheduler="polynomial" \
621
- --lr_warmup_steps=0 \
622
- --max_train_steps='+str(Training_Steps), shell=True)
623
-
624
- def train_only_unet(stp, stpsv, SESSION_DIR, MODELT_NAME, INSTANCE_DIR, OUTPUT_DIR, Text_Encoder_Training_Steps, PT, Seed, Resolution, Style, extrnlcptn, precision, Training_Steps):
625
- clear_output()
626
- if resuming=="Yes":
627
- print('Resuming Training...')
628
- print('Training the UNet...')
629
- call('accelerate launch /notebooks/diffusers/examples/dreambooth/train_dreambooth_pps.py \
630
- '+Style+' \
631
- '+extrnlcptn+' \
632
- --stop_text_encoder_training='+str(Text_Encoder_Training_Steps)+' \
633
- --image_captions_filename \
634
- --train_only_unet \
635
- --Session_dir='+SESSION_DIR+' \
636
- --save_starting_step='+str(stpsv)+' \
637
- --save_n_steps='+str(stp)+' \
638
- --pretrained_model_name_or_path='+MODELT_NAME+' \
639
- --instance_data_dir='+INSTANCE_DIR+' \
640
- --output_dir='+OUTPUT_DIR+' \
641
- --instance_prompt='+PT+' \
642
- --seed='+str(Seed)+' \
643
- --resolution='+str(Resolution)+' \
644
- --mixed_precision='+str(precision)+' \
645
- --train_batch_size=1 \
646
- --gradient_accumulation_steps=1 '+GCUNET+' \
647
- --use_8bit_adam \
648
- --learning_rate='+str(UNet_Learning_Rate)+' \
649
- --lr_scheduler="polynomial" \
650
- --lr_warmup_steps=0 \
651
- --max_train_steps='+str(Training_Steps), shell=True)
652
-
653
- if Enable_text_encoder_training :
654
- print('Training the text encoder...')
655
- if os.path.exists(OUTPUT_DIR+'/'+'text_encoder_trained'):
656
- call('rm -r '+OUTPUT_DIR+'/text_encoder_trained', shell=True)
657
- dump_only_textenc(trnonltxt, MODELT_NAME, INSTANCE_DIR, OUTPUT_DIR, PT, Seed, precision, Training_Steps=stptxt)
658
-
659
- if Enable_Text_Encoder_Concept_Training:
660
- if os.path.exists(CONCEPT_DIR):
661
- if os.listdir(CONCEPT_DIR)!=[]:
662
- clear_output()
663
- if resuming=="Yes":
664
- print('Resuming Training...')
665
- print('Training the text encoder on the concept...')
666
- dump_only_textenc(trnonltxt, MODELT_NAME, CONCEPT_DIR, OUTPUT_DIR, PT, Seed, precision, Training_Steps=stptxtc)
667
- else:
668
- clear_output()
669
- if resuming=="Yes":
670
- print('Resuming Training...')
671
- print('No concept images found, skipping concept training...')
672
- Text_Encoder_Concept_Training_Steps=0
673
- time.sleep(8)
674
- else:
675
- clear_output()
676
- if resuming=="Yes":
677
- print('Resuming Training...')
678
- print('No concept images found, skipping concept training...')
679
- Text_Encoder_Concept_Training_Steps=0
680
- time.sleep(8)
681
-
682
- if UNet_Training_Steps!=0:
683
- train_only_unet(stp, stpsv, SESSION_DIR, MODELT_NAME, INSTANCE_DIR, OUTPUT_DIR, Text_Encoder_Training_Steps, PT, Seed, Resolution, Style, extrnlcptn, precision, Training_Steps=UNet_Training_Steps)
684
-
685
- if UNet_Training_Steps==0 and Text_Encoder_Concept_Training_Steps==0 and External_Captions :
686
- print('Nothing to do')
687
- else:
688
- if os.path.exists(OUTPUT_DIR+'/unet/diffusion_pytorch_model.bin'):
689
-
690
- call('python /notebooks/diffusers/scripts/convertosdv2.py --fp16 '+OUTPUT_DIR+' '+SESSION_DIR+'/'+Session_Name+'.ckpt', shell=True)
691
- clear_output()
692
- if os.path.exists(SESSION_DIR+"/"+INSTANCE_NAME+'.ckpt'):
693
- clear_output()
694
- print("DONE, the CKPT model is in the session's folder")
695
- else:
696
- print("Something went wrong")
697
-
698
- else:
699
- print("Something went wrong")
700
-
701
- return resumev2
702
-
703
-
704
- def test(Custom_Path, Previous_Session_Name, Session_Name, User, Password, Use_localtunnel):
705
-
706
-
707
- if Previous_Session_Name!="":
708
- print("Loading a previous session model")
709
- mdldir='/notebooks/Fast-Dreambooth/Sessions/'+Previous_Session_Name
710
- path_to_trained_model=mdldir+"/"+Previous_Session_Name+'.ckpt'
711
-
712
-
713
- while not os.path.exists(path_to_trained_model):
714
- print("There is no trained model in the previous session")
715
- time.sleep(5)
716
-
717
- elif Custom_Path!="":
718
- print("Loading model from a custom path")
719
- path_to_trained_model=Custom_Path
720
-
721
-
722
- while not os.path.exists(path_to_trained_model):
723
- print("Wrong Path")
724
- time.sleep(5)
725
-
726
- else:
727
- print("Loading the trained model")
728
- mdldir='/notebooks/Fast-Dreambooth/Sessions/'+Session_Name
729
- path_to_trained_model=mdldir+"/"+Session_Name+'.ckpt'
730
-
731
-
732
- while not os.path.exists(path_to_trained_model):
733
- print("There is no trained model in this session")
734
- time.sleep(5)
735
-
736
- auth=f"--gradio-auth {User}:{Password}"
737
- if User =="" or Password=="":
738
- auth=""
739
-
740
- os.chdir('/notebooks')
741
- if not os.path.exists('/notebooks/sd/stablediffusion'):
742
- call('wget -q -O sd_rep.tar.zst https://huggingface.co/TheLastBen/dependencies/resolve/main/sd_rep.tar.zst', shell=True)
743
- call('tar --zstd -xf sd_rep.tar.zst', shell=True)
744
- call('rm sd_rep.tar.zst', shell=True)
745
-
746
- os.chdir('/notebooks/sd')
747
- if not os.path.exists('stable-diffusion-webui'):
748
- call('git clone -q --depth 1 --branch master https://github.com/AUTOMATIC1111/stable-diffusion-webui', shell=True)
749
-
750
- os.chdir('/notebooks/sd/stable-diffusion-webui/')
751
- call('git reset --hard', shell=True, stdout=open('/dev/null', 'w'))
752
- print('')
753
- call('git pull', shell=True, stdout=open('/dev/null', 'w'))
754
- os.chdir('/notebooks')
755
- clear_output()
756
-
757
- if not os.path.exists('/usr/lib/node_modules/localtunnel'):
758
- call('npm install -g localtunnel --silent', shell=True, stdout=open('/dev/null', 'w'))
759
-
760
- share=''
761
- call('wget -q -O /usr/local/lib/python3.9/dist-packages/gradio/blocks.py https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/AUTOMATIC1111_files/blocks.py', shell=True)
762
-
763
- if not Use_localtunnel:
764
- share='--share'
765
-
766
- else:
767
- share=''
768
- os.chdir('/notebooks')
769
- call('nohup lt --port 7860 > srv.txt 2>&1 &', shell=True)
770
- time.sleep(2)
771
- call("grep -o 'https[^ ]*' /notebooks/srv.txt >srvr.txt", shell=True)
772
- time.sleep(2)
773
- srv= getoutput('cat /notebooks/srvr.txt')
774
-
775
- for line in fileinput.input('/usr/local/lib/python3.9/dist-packages/gradio/blocks.py', inplace=True):
776
- if line.strip().startswith('self.server_name ='):
777
- line = f' self.server_name = "{srv[8:]}"\n'
778
- if line.strip().startswith('self.server_port ='):
779
- line = ' self.server_port = 443\n'
780
- if line.strip().startswith('self.protocol = "https"'):
781
- line = ' self.protocol = "https"\n'
782
- if line.strip().startswith('if self.local_url.startswith("https") or self.is_colab'):
783
- line = ''
784
- if line.strip().startswith('else "http"'):
785
- line = ''
786
- sys.stdout.write(line)
787
-
788
- call('rm /notebooks/srv.txt', shell=True)
789
- call('rm /notebooks/srvr.txt', shell=True)
790
-
791
-
792
-
793
- os.chdir('/notebooks/sd/stable-diffusion-webui/modules')
794
- call('wget -q -O paths.py https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/AUTOMATIC1111_files/paths.py', shell=True)
795
- call("sed -i 's@/content/gdrive/MyDrive/sd/stablediffusion@/notebooks/sd/stablediffusion@' /notebooks/sd/stable-diffusion-webui/modules/paths.py", shell=True)
796
- os.chdir('/notebooks/sd/stable-diffusion-webui')
797
- clear_output()
798
-
799
- configf="--disable-console-progressbars --no-half-vae --disable-safe-unpickle --api --xformers --medvram --skip-version-check --ckpt "+path_to_trained_model+" "+auth+" "+share
800
-
801
- return configf
802
-
803
-
804
-
805
- def clean():
806
-
807
- Sessions=os.listdir("/notebooks/Fast-Dreambooth/Sessions")
808
-
809
- s = widgets.Select(
810
- options=Sessions,
811
- rows=5,
812
- description='',
813
- disabled=False
814
- )
815
-
816
- out=widgets.Output()
817
-
818
- d = widgets.Button(
819
- description='Remove',
820
- disabled=False,
821
- button_style='warning',
822
- tooltip='Removet the selected session',
823
- icon='warning'
824
- )
825
-
826
- def rem(d):
827
- with out:
828
- if s.value is not None:
829
- clear_output()
830
- print("THE SESSION "+s.value+" HAS BEEN REMOVED FROM THE STORAGE")
831
- call('rm -r /notebooks/Fast-Dreambooth/Sessions/'+s.value, shell=True)
832
- if os.path.exists('/notebooks/models/'+s.value):
833
- call('rm -r /notebooks/models/'+s.value, shell=True)
834
- s.options=os.listdir("/notebooks/Fast-Dreambooth/Sessions")
835
-
836
-
837
- else:
838
- d.close()
839
- s.close()
840
- clear_output()
841
- print("NOTHING TO REMOVE")
842
-
843
- d.on_click(rem)
844
- if s.value is not None:
845
- display(s,d,out)
846
- else:
847
- print("NOTHING TO REMOVE")
848
-
849
-
850
-
851
- def hfv2(Name_of_your_concept, Save_concept_to, hf_token_write, INSTANCE_NAME, OUTPUT_DIR, Session_Name, MDLPTH):
852
-
853
- from slugify import slugify
854
- from huggingface_hub import HfApi, HfFolder, CommitOperationAdd
855
- from huggingface_hub import create_repo
856
- from IPython.display import display_markdown
857
-
858
- if(Name_of_your_concept == ""):
859
- Name_of_your_concept = Session_Name
860
- Name_of_your_concept=Name_of_your_concept.replace(" ","-")
861
-
862
-
863
-
864
- if hf_token_write =="":
865
- print('Your Hugging Face write access token : ')
866
- hf_token_write=input()
867
-
868
- hf_token = hf_token_write
869
-
870
- api = HfApi()
871
- your_username = api.whoami(token=hf_token)["name"]
872
-
873
- if(Save_concept_to == "Public_Library"):
874
- repo_id = f"sd-dreambooth-library/{slugify(Name_of_your_concept)}"
875
- #Join the Concepts Library organization if you aren't part of it already
876
- call("curl -X POST -H 'Authorization: Bearer '"+hf_token+" -H 'Content-Type: application/json' https://huggingface.co/organizations/sd-dreambooth-library/share/SSeOwppVCscfTEzFGQaqpfcjukVeNrKNHX", shell=True)
877
- else:
878
- repo_id = f"{your_username}/{slugify(Name_of_your_concept)}"
879
- output_dir = f'/notebooks/models/'+INSTANCE_NAME
880
-
881
- def bar(prg):
882
- br="Uploading to HuggingFace : " '|'+'█' * prg + ' ' * (25-prg)+'| ' +str(prg*4)+ "%"
883
- return br
884
-
885
- print("Loading...")
886
-
887
- os.chdir(OUTPUT_DIR)
888
- call('rm -r feature_extractor .git', shell=True)
889
- clear_output()
890
- call('git init', shell=True)
891
- call('git lfs install --system --skip-repo', shell=True)
892
- call('git remote add -f origin "https://USER:'+hf_token+'@huggingface.co/stabilityai/stable-diffusion-2-1"', shell=True)
893
- call('git config core.sparsecheckout true', shell=True)
894
- call('echo -e "\nfeature_extractor" > .git/info/sparse-checkout', shell=True)
895
- call('git pull origin main', shell=True)
896
- call('rm -r .git', shell=True)
897
- os.chdir('/notebooks')
898
- clear_output()
899
-
900
- print(bar(1))
901
-
902
- readme_text = f'''---
903
- license: creativeml-openrail-m
904
- tags:
905
- - text-to-image
906
- - stable-diffusion
907
- ---
908
- ### {Name_of_your_concept} Dreambooth model trained by {api.whoami(token=hf_token)["name"]} with [TheLastBen's fast-DreamBooth](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast-DreamBooth.ipynb) notebook
909
-
910
- Test the concept via A1111 Colab [fast-Colab-A1111](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast_stable_diffusion_AUTOMATIC1111.ipynb)
911
- '''
912
- #Save the readme to a file
913
- readme_file = open("README.md", "w")
914
- readme_file.write(readme_text)
915
- readme_file.close()
916
-
917
- operations = [
918
- CommitOperationAdd(path_in_repo="README.md", path_or_fileobj="README.md"),
919
- CommitOperationAdd(path_in_repo=f"{Session_Name}.ckpt",path_or_fileobj=MDLPTH)
920
-
921
- ]
922
- create_repo(repo_id,private=True, token=hf_token)
923
-
924
- api.create_commit(
925
- repo_id=repo_id,
926
- operations=operations,
927
- commit_message=f"Upload the concept {Name_of_your_concept} embeds and token",
928
- token=hf_token
929
- )
930
-
931
- api.upload_folder(
932
- folder_path=OUTPUT_DIR+"/feature_extractor",
933
- path_in_repo="feature_extractor",
934
- repo_id=repo_id,
935
- token=hf_token
936
- )
937
-
938
- clear_output()
939
- print(bar(8))
940
-
941
- api.upload_folder(
942
- folder_path=OUTPUT_DIR+"/scheduler",
943
- path_in_repo="scheduler",
944
- repo_id=repo_id,
945
- token=hf_token
946
- )
947
-
948
- clear_output()
949
- print(bar(9))
950
-
951
- api.upload_folder(
952
- folder_path=OUTPUT_DIR+"/text_encoder",
953
- path_in_repo="text_encoder",
954
- repo_id=repo_id,
955
- token=hf_token
956
- )
957
-
958
- clear_output()
959
- print(bar(12))
960
-
961
- api.upload_folder(
962
- folder_path=OUTPUT_DIR+"/tokenizer",
963
- path_in_repo="tokenizer",
964
- repo_id=repo_id,
965
- token=hf_token
966
- )
967
-
968
- clear_output()
969
- print(bar(13))
970
-
971
- api.upload_folder(
972
- folder_path=OUTPUT_DIR+"/unet",
973
- path_in_repo="unet",
974
- repo_id=repo_id,
975
- token=hf_token
976
- )
977
-
978
- clear_output()
979
- print(bar(21))
980
-
981
- api.upload_folder(
982
- folder_path=OUTPUT_DIR+"/vae",
983
- path_in_repo="vae",
984
- repo_id=repo_id,
985
- token=hf_token
986
- )
987
-
988
- clear_output()
989
- print(bar(23))
990
-
991
- api.upload_file(
992
- path_or_fileobj=OUTPUT_DIR+"/model_index.json",
993
- path_in_repo="model_index.json",
994
- repo_id=repo_id,
995
- token=hf_token
996
- )
997
-
998
- clear_output()
999
- print(bar(25))
1000
-
1001
- print("Your concept was saved successfully at https://huggingface.co/"+repo_id)
1002
- done()
1003
-
1004
-
1005
-
1006
- def crop_image(im, size):
1007
-
1008
- GREEN = "#0F0"
1009
- BLUE = "#00F"
1010
- RED = "#F00"
1011
-
1012
- def focal_point(im, settings):
1013
- corner_points = image_corner_points(im, settings) if settings.corner_points_weight > 0 else []
1014
- entropy_points = image_entropy_points(im, settings) if settings.entropy_points_weight > 0 else []
1015
- face_points = image_face_points(im, settings) if settings.face_points_weight > 0 else []
1016
-
1017
- pois = []
1018
-
1019
- weight_pref_total = 0
1020
- if len(corner_points) > 0:
1021
- weight_pref_total += settings.corner_points_weight
1022
- if len(entropy_points) > 0:
1023
- weight_pref_total += settings.entropy_points_weight
1024
- if len(face_points) > 0:
1025
- weight_pref_total += settings.face_points_weight
1026
-
1027
- corner_centroid = None
1028
- if len(corner_points) > 0:
1029
- corner_centroid = centroid(corner_points)
1030
- corner_centroid.weight = settings.corner_points_weight / weight_pref_total
1031
- pois.append(corner_centroid)
1032
-
1033
- entropy_centroid = None
1034
- if len(entropy_points) > 0:
1035
- entropy_centroid = centroid(entropy_points)
1036
- entropy_centroid.weight = settings.entropy_points_weight / weight_pref_total
1037
- pois.append(entropy_centroid)
1038
-
1039
- face_centroid = None
1040
- if len(face_points) > 0:
1041
- face_centroid = centroid(face_points)
1042
- face_centroid.weight = settings.face_points_weight / weight_pref_total
1043
- pois.append(face_centroid)
1044
-
1045
- average_point = poi_average(pois, settings)
1046
-
1047
- return average_point
1048
-
1049
-
1050
- def image_face_points(im, settings):
1051
-
1052
- np_im = np.array(im)
1053
- gray = cv2.cvtColor(np_im, cv2.COLOR_BGR2GRAY)
1054
-
1055
- tries = [
1056
- [ f'{cv2.data.haarcascades}haarcascade_eye.xml', 0.01 ],
1057
- [ f'{cv2.data.haarcascades}haarcascade_frontalface_default.xml', 0.05 ],
1058
- [ f'{cv2.data.haarcascades}haarcascade_profileface.xml', 0.05 ],
1059
- [ f'{cv2.data.haarcascades}haarcascade_frontalface_alt.xml', 0.05 ],
1060
- [ f'{cv2.data.haarcascades}haarcascade_frontalface_alt2.xml', 0.05 ],
1061
- [ f'{cv2.data.haarcascades}haarcascade_frontalface_alt_tree.xml', 0.05 ],
1062
- [ f'{cv2.data.haarcascades}haarcascade_eye_tree_eyeglasses.xml', 0.05 ],
1063
- [ f'{cv2.data.haarcascades}haarcascade_upperbody.xml', 0.05 ]
1064
- ]
1065
- for t in tries:
1066
- classifier = cv2.CascadeClassifier(t[0])
1067
- minsize = int(min(im.width, im.height) * t[1]) # at least N percent of the smallest side
1068
- try:
1069
- faces = classifier.detectMultiScale(gray, scaleFactor=1.1,
1070
- minNeighbors=7, minSize=(minsize, minsize), flags=cv2.CASCADE_SCALE_IMAGE)
1071
- except:
1072
- continue
1073
-
1074
- if len(faces) > 0:
1075
- rects = [[f[0], f[1], f[0] + f[2], f[1] + f[3]] for f in faces]
1076
- return [PointOfInterest((r[0] +r[2]) // 2, (r[1] + r[3]) // 2, size=abs(r[0]-r[2]), weight=1/len(rects)) for r in rects]
1077
- return []
1078
-
1079
-
1080
- def image_corner_points(im, settings):
1081
- grayscale = im.convert("L")
1082
-
1083
-
1084
- gd = ImageDraw.Draw(grayscale)
1085
- gd.rectangle([0, im.height*.9, im.width, im.height], fill="#999")
1086
-
1087
- np_im = np.array(grayscale)
1088
-
1089
- points = cv2.goodFeaturesToTrack(
1090
- np_im,
1091
- maxCorners=100,
1092
- qualityLevel=0.04,
1093
- minDistance=min(grayscale.width, grayscale.height)*0.06,
1094
- useHarrisDetector=False,
1095
- )
1096
-
1097
- if points is None:
1098
- return []
1099
-
1100
- focal_points = []
1101
- for point in points:
1102
- x, y = point.ravel()
1103
- focal_points.append(PointOfInterest(x, y, size=4, weight=1/len(points)))
1104
-
1105
- return focal_points
1106
-
1107
-
1108
- def image_entropy_points(im, settings):
1109
- landscape = im.height < im.width
1110
- portrait = im.height > im.width
1111
- if landscape:
1112
- move_idx = [0, 2]
1113
- move_max = im.size[0]
1114
- elif portrait:
1115
- move_idx = [1, 3]
1116
- move_max = im.size[1]
1117
- else:
1118
- return []
1119
-
1120
- e_max = 0
1121
- crop_current = [0, 0, settings.crop_width, settings.crop_height]
1122
- crop_best = crop_current
1123
- while crop_current[move_idx[1]] < move_max:
1124
- crop = im.crop(tuple(crop_current))
1125
- e = image_entropy(crop)
1126
-
1127
- if (e > e_max):
1128
- e_max = e
1129
- crop_best = list(crop_current)
1130
-
1131
- crop_current[move_idx[0]] += 4
1132
- crop_current[move_idx[1]] += 4
1133
-
1134
- x_mid = int(crop_best[0] + settings.crop_width/2)
1135
- y_mid = int(crop_best[1] + settings.crop_height/2)
1136
-
1137
- return [PointOfInterest(x_mid, y_mid, size=25, weight=1.0)]
1138
-
1139
-
1140
- def image_entropy(im):
1141
- # greyscale image entropy
1142
- # band = np.asarray(im.convert("L"))
1143
- band = np.asarray(im.convert("1"), dtype=np.uint8)
1144
- hist, _ = np.histogram(band, bins=range(0, 256))
1145
- hist = hist[hist > 0]
1146
- return -np.log2(hist / hist.sum()).sum()
1147
-
1148
- def centroid(pois):
1149
- x = [poi.x for poi in pois]
1150
- y = [poi.y for poi in pois]
1151
- return PointOfInterest(sum(x)/len(pois), sum(y)/len(pois))
1152
-
1153
-
1154
- def poi_average(pois, settings):
1155
- weight = 0.0
1156
- x = 0.0
1157
- y = 0.0
1158
- for poi in pois:
1159
- weight += poi.weight
1160
- x += poi.x * poi.weight
1161
- y += poi.y * poi.weight
1162
- avg_x = round(weight and x / weight)
1163
- avg_y = round(weight and y / weight)
1164
-
1165
- return PointOfInterest(avg_x, avg_y)
1166
-
1167
-
1168
- def is_landscape(w, h):
1169
- return w > h
1170
-
1171
-
1172
- def is_portrait(w, h):
1173
- return h > w
1174
-
1175
-
1176
- def is_square(w, h):
1177
- return w == h
1178
-
1179
-
1180
- class PointOfInterest:
1181
- def __init__(self, x, y, weight=1.0, size=10):
1182
- self.x = x
1183
- self.y = y
1184
- self.weight = weight
1185
- self.size = size
1186
-
1187
- def bounding(self, size):
1188
- return [
1189
- self.x - size//2,
1190
- self.y - size//2,
1191
- self.x + size//2,
1192
- self.y + size//2
1193
- ]
1194
-
1195
- class Settings:
1196
- def __init__(self, crop_width=512, crop_height=512, corner_points_weight=0.5, entropy_points_weight=0.5, face_points_weight=0.5):
1197
- self.crop_width = crop_width
1198
- self.crop_height = crop_height
1199
- self.corner_points_weight = corner_points_weight
1200
- self.entropy_points_weight = entropy_points_weight
1201
- self.face_points_weight = face_points_weight
1202
-
1203
- settings = Settings(
1204
- crop_width = size,
1205
- crop_height = size,
1206
- face_points_weight = 0.9,
1207
- entropy_points_weight = 0.15,
1208
- corner_points_weight = 0.5,
1209
- )
1210
-
1211
- scale_by = 1
1212
- if is_landscape(im.width, im.height):
1213
- scale_by = settings.crop_height / im.height
1214
- elif is_portrait(im.width, im.height):
1215
- scale_by = settings.crop_width / im.width
1216
- elif is_square(im.width, im.height):
1217
- if is_square(settings.crop_width, settings.crop_height):
1218
- scale_by = settings.crop_width / im.width
1219
- elif is_landscape(settings.crop_width, settings.crop_height):
1220
- scale_by = settings.crop_width / im.width
1221
- elif is_portrait(settings.crop_width, settings.crop_height):
1222
- scale_by = settings.crop_height / im.height
1223
-
1224
- im = im.resize((int(im.width * scale_by), int(im.height * scale_by)))
1225
- im_debug = im.copy()
1226
-
1227
- focus = focal_point(im_debug, settings)
1228
-
1229
- # take the focal point and turn it into crop coordinates that try to center over the focal
1230
- # point but then get adjusted back into the frame
1231
- y_half = int(settings.crop_height / 2)
1232
- x_half = int(settings.crop_width / 2)
1233
-
1234
- x1 = focus.x - x_half
1235
- if x1 < 0:
1236
- x1 = 0
1237
- elif x1 + settings.crop_width > im.width:
1238
- x1 = im.width - settings.crop_width
1239
-
1240
- y1 = focus.y - y_half
1241
- if y1 < 0:
1242
- y1 = 0
1243
- elif y1 + settings.crop_height > im.height:
1244
- y1 = im.height - settings.crop_height
1245
-
1246
- x2 = x1 + settings.crop_width
1247
- y2 = y1 + settings.crop_height
1248
-
1249
- crop = [x1, y1, x2, y2]
1250
-
1251
- results = []
1252
-
1253
- results.append(im.crop(tuple(crop)))
1254
-
1255
- return results