TheLastBen commited on
Commit
9b9604b
1 Parent(s): d990b9b

Delete mainpaperspacev2.py

Browse files
Files changed (1) hide show
  1. mainpaperspacev2.py +0 -1469
mainpaperspacev2.py DELETED
@@ -1,1469 +0,0 @@
1
- from IPython.display import clear_output
2
- from subprocess import call, getoutput
3
- from IPython.display import display
4
- import ipywidgets as widgets
5
- import io
6
- from PIL import Image, ImageDraw
7
- import fileinput
8
- import time
9
- import os
10
- from os import listdir
11
- from os.path import isfile
12
- from tqdm import tqdm
13
- import gdown
14
- import random
15
- import sys
16
- import cv2
17
- from io import BytesIO
18
- import requests
19
- from collections import defaultdict
20
- from math import log, sqrt
21
- import numpy as np
22
- from subprocess import check_output
23
-
24
-
25
- from urllib.parse import urlparse, parse_qs, unquote
26
- from urllib.request import urlopen, Request
27
- import tempfile
28
- from tqdm import tqdm
29
-
30
-
31
-
32
-
33
- def Deps(force_reinstall):
34
-
35
- if not force_reinstall and os.path.exists('/usr/local/lib/python3.9/dist-packages/safetensors'):
36
- ntbk()
37
- call('pip install --root-user-action=ignore --disable-pip-version-check -qq ./diffusers', shell=True, stdout=open('/dev/null', 'w'))
38
- os.environ['TORCH_HOME'] = '/notebooks/cache/torch'
39
- os.environ['PYTHONWARNINGS'] = 'ignore'
40
- print('Modules and notebooks updated, dependencies already installed')
41
-
42
- else:
43
- call("pip install --root-user-action=ignore --no-deps -q accelerate==0.12.0", shell=True, stdout=open('/dev/null', 'w'))
44
- if not os.path.exists('/usr/local/lib/python3.9/dist-packages/safetensors'):
45
- os.chdir('/usr/local/lib/python3.9/dist-packages')
46
- call("rm -r torch torch-1.12.1+cu116.dist-info torchaudio* torchvision* PIL Pillow* transformers* numpy* gdown*", shell=True, stdout=open('/dev/null', 'w'))
47
- ntbk()
48
- if not os.path.exists('/models'):
49
- call('mkdir /models', shell=True)
50
- if not os.path.exists('/notebooks/models'):
51
- call('ln -s /models /notebooks', shell=True)
52
- if os.path.exists('/deps'):
53
- call("rm -r /deps", shell=True)
54
- call('mkdir /deps', shell=True)
55
- if not os.path.exists('cache'):
56
- call('mkdir cache', shell=True)
57
- os.chdir('/deps')
58
- call('wget -q -i https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dependencies/aptdeps.txt', shell=True)
59
- call('dpkg -i *.deb', shell=True, stdout=open('/dev/null', 'w'))
60
- depsinst("https://huggingface.co/TheLastBen/dependencies/resolve/main/ppsdeps.tar.zst", "/deps/ppsdeps.tar.zst")
61
- call('tar -C / --zstd -xf ppsdeps.tar.zst', shell=True, stdout=open('/dev/null', 'w'))
62
- call("sed -i 's@~/.cache@/notebooks/cache@' /usr/local/lib/python3.9/dist-packages/transformers/utils/hub.py", shell=True)
63
- os.chdir('/notebooks')
64
- call("git clone --depth 1 -q --branch main https://github.com/TheLastBen/diffusers /diffusers", shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
65
- os.environ['TORCH_HOME'] = '/notebooks/cache/torch'
66
- os.environ['PYTHONWARNINGS'] = 'ignore'
67
- if not os.path.exists('/notebooks/diffusers'):
68
- call('ln -s /diffusers /notebooks', shell=True)
69
- call("rm -r /deps", shell=True)
70
- os.chdir('/notebooks')
71
- clear_output()
72
-
73
- done()
74
-
75
-
76
-
77
- def depsinst(url, dst):
78
- file_size = None
79
- req = Request(url, headers={"User-Agent": "torch.hub"})
80
- u = urlopen(req)
81
- meta = u.info()
82
- if hasattr(meta, 'getheaders'):
83
- content_length = meta.getheaders("Content-Length")
84
- else:
85
- content_length = meta.get_all("Content-Length")
86
- if content_length is not None and len(content_length) > 0:
87
- file_size = int(content_length[0])
88
-
89
- with tqdm(total=file_size, disable=False, mininterval=0.5,
90
- bar_format='Installing dependencies |{bar:20}| {percentage:3.0f}%') as pbar:
91
- with open(dst, "wb") as f:
92
- while True:
93
- buffer = u.read(8192)
94
- if len(buffer) == 0:
95
- break
96
- f.write(buffer)
97
- pbar.update(len(buffer))
98
- f.close()
99
-
100
-
101
- def ntbk():
102
-
103
- os.chdir('/notebooks')
104
- if not os.path.exists('Latest_Notebooks'):
105
- call('mkdir Latest_Notebooks', shell=True)
106
- else:
107
- call('rm -r Latest_Notebooks', shell=True)
108
- call('mkdir Latest_Notebooks', shell=True)
109
- os.chdir('/notebooks/Latest_Notebooks')
110
- call('wget -q -i https://huggingface.co/datasets/TheLastBen/PPS/raw/main/Notebooks.txt', shell=True)
111
- call('rm Notebooks.txt', shell=True)
112
- os.chdir('/notebooks')
113
-
114
-
115
- def downloadmodel_hfv2(Path_to_HuggingFace):
116
- import wget
117
-
118
- if os.path.exists('/models/stable-diffusion-custom'):
119
- call("rm -r /models/stable-diffusion-custom", shell=True)
120
- clear_output()
121
-
122
- if os.path.exists('/notebooks/Fast-Dreambooth/token.txt'):
123
- with open("/notebooks/Fast-Dreambooth/token.txt") as f:
124
- token = f.read()
125
- authe=f'https://USER:{token}@'
126
- else:
127
- authe="https://"
128
-
129
- clear_output()
130
- call("mkdir /models/stable-diffusion-custom", shell=True)
131
- os.chdir("/models/stable-diffusion-custom")
132
- call("git init", shell=True)
133
- call("git lfs install --system --skip-repo", shell=True)
134
- call('git remote add -f origin '+authe+'huggingface.co/'+Path_to_HuggingFace, shell=True)
135
- call("git config core.sparsecheckout true", shell=True)
136
- call('echo -e "\nscheduler\ntext_encoder\ntokenizer\nunet\nvae\nfeature_extractor\nmodel_index.json\n!*.safetensors" > .git/info/sparse-checkout', shell=True)
137
- call("git pull origin main", shell=True)
138
- if os.path.exists('unet/diffusion_pytorch_model.bin'):
139
- call("rm -r .git", shell=True)
140
- os.chdir('/notebooks')
141
- clear_output()
142
- done()
143
- while not os.path.exists('/models/stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
144
- print('Check the link you provided')
145
- os.chdir('/notebooks')
146
- time.sleep(5)
147
-
148
-
149
-
150
- def downloadmodel_pthv2(Model_Path, safetensors):
151
-
152
- sftnsr=""
153
- if safetensors:
154
- sftnsr="--from_safetensors"
155
-
156
- import wget
157
- os.chdir('/models')
158
- clear_output()
159
- if os.path.exists(str(Model_Path)):
160
-
161
- wget.download('https://github.com/TheLastBen/fast-stable-diffusion/raw/main/Dreambooth/det.py')
162
- print('Detecting model version...')
163
- Custom_Model_Version=check_output('python det.py '+sftnsr+' --MODEL_PATH '+Model_Path, shell=True).decode('utf-8').replace('\n', '')
164
- clear_output()
165
- print(''+Custom_Model_Version+' Detected')
166
- call('rm det.py', shell=True)
167
-
168
- if Custom_Model_Version=='V2.1-512px':
169
- call('wget -q -O convertodiffv2.py https://github.com/TheLastBen/fast-stable-diffusion/raw/main/Dreambooth/convertodiffv2.py', shell=True)
170
- call('python convertodiffv2.py '+Model_Path+' stable-diffusion-custom --v2 --reference_model stabilityai/stable-diffusion-2-1-base '+sftnsr, shell=True)
171
- elif Custom_Model_Version=='V2.1-768px':
172
- call('wget -q -O convertodiffv2.py https://github.com/TheLastBen/fast-stable-diffusion/raw/main/Dreambooth/convertodiffv2-768.py', shell=True)
173
- call('python convertodiffv2.py '+Model_Path+' stable-diffusion-custom --v2 --reference_model stabilityai/stable-diffusion-2-1 '+sftnsr, shell=True)
174
-
175
- call('rm convertodiffv2.py', shell=True)
176
- if os.path.exists('stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
177
- os.chdir('/notebooks')
178
- clear_output()
179
- done()
180
- while not os.path.exists('/models/stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
181
- print('Conversion error')
182
- os.chdir('/notebooks')
183
- time.sleep(5)
184
- else:
185
- while not os.path.exists(str(Model_Path)):
186
- print('Wrong path, use the file explorer to copy the path')
187
- os.chdir('/notebooks')
188
- time.sleep(5)
189
-
190
-
191
-
192
-
193
- def downloadmodel_path_v2(MODEL_PATH):
194
-
195
- modelname=os.path.basename(MODEL_PATH)
196
- sftnsr=""
197
- if modelname.split('.')[-1]=='safetensors':
198
- sftnsr="--from_safetensors"
199
-
200
- import wget
201
- os.chdir('/models')
202
- clear_output()
203
- if os.path.exists(str(MODEL_PATH)):
204
-
205
- wget.download('https://github.com/TheLastBen/fast-stable-diffusion/raw/main/Dreambooth/det.py')
206
- print('Detecting model version...')
207
- Custom_Model_Version=check_output('python det.py '+sftnsr+' --MODEL_PATH '+MODEL_PATH, shell=True).decode('utf-8').replace('\n', '')
208
- clear_output()
209
- print(''+Custom_Model_Version+' Detected')
210
- call('rm det.py', shell=True)
211
-
212
- if Custom_Model_Version=='V2.1-512px':
213
- call('wget -q -O convertodiffv2.py https://github.com/TheLastBen/fast-stable-diffusion/raw/main/Dreambooth/convertodiffv2.py', shell=True)
214
- call('python convertodiffv2.py '+MODEL_PATH+' stable-diffusion-custom --v2 --reference_model stabilityai/stable-diffusion-2-1-base '+sftnsr, shell=True)
215
-
216
- elif Custom_Model_Version=='V2.1-768px':
217
- call('wget -q -O convertodiffv2.py https://github.com/TheLastBen/fast-stable-diffusion/raw/main/Dreambooth/convertodiffv2-768.py', shell=True)
218
- call('python convertodiffv2.py '+MODEL_PATH+' stable-diffusion-custom --v2 --reference_model stabilityai/stable-diffusion-2-1 '+sftnsr, shell=True)
219
-
220
- call('rm convertodiffv2.py', shell=True)
221
- if os.path.exists('/models/stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
222
- clear_output()
223
- done()
224
- while not os.path.exists('/models/stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
225
- print('Conversion error')
226
- os.chdir('/workspace')
227
- time.sleep(5)
228
- else:
229
- while not os.path.exists(str(MODEL_PATH)):
230
- print('Wrong path, use the file explorer to copy the path')
231
- os.chdir('/workspace')
232
- time.sleep(5)
233
-
234
-
235
-
236
-
237
- def downloadmodel_lnkv2(Model_Link, safetensors):
238
-
239
- sftnsr=""
240
- if not safetensors:
241
- modelnm="model.ckpt"
242
- else:
243
- modelnm="model.safetensors"
244
- sftnsr="--from_safetensors"
245
-
246
- import wget
247
- os.chdir('/models')
248
- call("gdown --fuzzy " +Model_Link+ " -O /models/"+modelnm, shell=True)
249
-
250
- if os.path.exists("/models/"+modelnm):
251
- if os.path.getsize("/models/"+modelnm) > 1810671599:
252
-
253
- wget.download('https://github.com/TheLastBen/fast-stable-diffusion/raw/main/Dreambooth/det.py')
254
- print('Detecting model version...')
255
- Custom_Model_Version=check_output('python det.py '+sftnsr+' --MODEL_PATH '+modelnm, shell=True).decode('utf-8').replace('\n', '')
256
- clear_output()
257
- print(''+Custom_Model_Version+' Detected')
258
- call('rm det.py', shell=True)
259
-
260
- if Custom_Model_Version=='V2.1-512px':
261
- call('wget -q -O convertodiffv2.py https://github.com/TheLastBen/fast-stable-diffusion/raw/main/Dreambooth/convertodiffv2.py', shell=True)
262
- call('python convertodiffv2.py /models/'+modelnm+' stable-diffusion-custom --v2 --reference_model stabilityai/stable-diffusion-2-1-base '+sftnsr, shell=True)
263
-
264
- elif Custom_Model_Version=='V2.1-768px':
265
- call('wget -q -O convertodiffv2.py https://github.com/TheLastBen/fast-stable-diffusion/raw/main/Dreambooth/convertodiffv2-768.py', shell=True)
266
- call('python convertodiffv2.py /models/'+modelnm+' stable-diffusion-custom --v2 --reference_model stabilityai/stable-diffusion-2-1 '+sftnsr, shell=True)
267
- call('rm convertodiffv2.py', shell=True)
268
-
269
- if os.path.exists('stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
270
- call('rm /models/'+modelnm, shell=True)
271
- os.chdir('/notebooks')
272
- clear_output()
273
- done()
274
- else:
275
- while not os.path.exists('/models/stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
276
- print('Conversion error')
277
- os.chdir('/notebooks')
278
- time.sleep(5)
279
- else:
280
- while os.path.getsize("/models/"+modelnm) < 1810671599:
281
- print('Wrong link, check that the link is valid')
282
- os.chdir('/notebooks')
283
- time.sleep(5)
284
-
285
-
286
-
287
-
288
- def downloadmodel_link_v2(MODEL_LINK):
289
-
290
- import wget
291
- import gdown
292
- from gdown.download import get_url_from_gdrive_confirmation
293
-
294
- def getsrc(url):
295
- parsed_url = urlparse(url)
296
- if parsed_url.netloc == 'civitai.com':
297
- src='civitai'
298
- elif parsed_url.netloc == 'drive.google.com':
299
- src='gdrive'
300
- elif parsed_url.netloc == 'huggingface.co':
301
- src='huggingface'
302
- else:
303
- src='others'
304
- return src
305
-
306
- src=getsrc(MODEL_LINK)
307
-
308
- def get_name(url, gdrive):
309
- if not gdrive:
310
- response = requests.get(url, allow_redirects=False)
311
- if "Location" in response.headers:
312
- redirected_url = response.headers["Location"]
313
- quer = parse_qs(urlparse(redirected_url).query)
314
- if "response-content-disposition" in quer:
315
- disp_val = quer["response-content-disposition"][0].split(";")
316
- for vals in disp_val:
317
- if vals.strip().startswith("filename="):
318
- filenm=unquote(vals.split("=", 1)[1].strip())
319
- return filenm.replace("\"","")
320
- else:
321
- headers = {"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36"}
322
- lnk="https://drive.google.com/uc?id={id}&export=download".format(id=url[url.find("/d/")+3:url.find("/view")])
323
- res = requests.session().get(lnk, headers=headers, stream=True, verify=True)
324
- res = requests.session().get(get_url_from_gdrive_confirmation(res.text), headers=headers, stream=True, verify=True)
325
- content_disposition = six.moves.urllib_parse.unquote(res.headers["Content-Disposition"])
326
- filenm = re.search(r"filename\*=UTF-8''(.*)", content_disposition).groups()[0].replace(os.path.sep, "_")
327
- return filenm
328
-
329
- if src=='civitai':
330
- modelname=get_name(MODEL_LINK, False)
331
- elif src=='gdrive':
332
- modelname=get_name(MODEL_LINK, True)
333
- else:
334
- modelname=os.path.basename(MODEL_LINK)
335
-
336
- sftnsr=""
337
- if modelname.split('.')[-1]!='safetensors':
338
- modelnm="model.ckpt"
339
- else:
340
- modelnm="model.safetensors"
341
- sftnsr="--from_safetensors"
342
-
343
- os.chdir('/models')
344
- call("gdown --fuzzy " +MODEL_LINK+ " -O "+modelnm, shell=True)
345
-
346
- if os.path.exists(modelnm):
347
- if os.path.getsize(modelnm) > 1810671599:
348
-
349
- wget.download('https://github.com/TheLastBen/fast-stable-diffusion/raw/main/Dreambooth/det.py')
350
- print('Detecting model version...')
351
- Custom_Model_Version=check_output('python det.py '+sftnsr+' --MODEL_PATH '+modelnm, shell=True).decode('utf-8').replace('\n', '')
352
- clear_output()
353
- print(''+Custom_Model_Version+' Detected')
354
- call('rm det.py', shell=True)
355
-
356
- if Custom_Model_Version=='V2.1-512px':
357
- call('wget -q -O convertodiffv2.py https://github.com/TheLastBen/fast-stable-diffusion/raw/main/Dreambooth/convertodiffv2.py', shell=True)
358
- call('python convertodiffv2.py '+modelnm+' stable-diffusion-custom --v2 --reference_model stabilityai/stable-diffusion-2-1-base '+sftnsr, shell=True)
359
-
360
- elif Custom_Model_Version=='V2.1-768px':
361
- call('wget -q -O convertodiffv2.py https://github.com/TheLastBen/fast-stable-diffusion/raw/main/Dreambooth/convertodiffv2-768.py', shell=True)
362
- call('python convertodiffv2.py '+modelnm+' stable-diffusion-custom --v2 --reference_model stabilityai/stable-diffusion-2-1 '+sftnsr, shell=True)
363
- call('rm convertodiffv2.py', shell=True)
364
-
365
- if os.path.exists('/models/stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
366
- call('rm '+modelnm, shell=True)
367
- os.chdir('/workspace')
368
- clear_output()
369
- done()
370
- else:
371
- while not os.path.exists('/models/stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
372
- print('Conversion error')
373
- os.chdir('/workspace')
374
- time.sleep(5)
375
- else:
376
- while os.path.getsize(modelnm) < 1810671599:
377
- print('Wrong link, check that the link is valid')
378
- os.chdir('/workspace')
379
- time.sleep(5)
380
-
381
-
382
-
383
-
384
- def dlv2(Path_to_HuggingFace, Model_Path, Model_Link, Model_Version, safetensors):
385
-
386
- if Path_to_HuggingFace != "":
387
- downloadmodel_hfv2(Path_to_HuggingFace)
388
- MODEL_NAMEv2="/models/stable-diffusion-custom"
389
- elif Model_Path !="":
390
- downloadmodel_pthv2(Model_Path, safetensors)
391
- MODEL_NAMEv2="/models/stable-diffusion-custom"
392
- elif Model_Link !="":
393
- downloadmodel_lnkv2(Model_Link, safetensors)
394
- MODEL_NAMEv2="/models/stable-diffusion-custom"
395
- else:
396
- if Model_Version=="512":
397
- MODEL_NAMEv2="/datasets/stable-diffusion-v2-1-base-diffusers/stable-diffusion-2-1-base"
398
- print('Using the original V2-512 model')
399
- elif Model_Version=="768":
400
- MODEL_NAMEv2="/datasets/stable-diffusion-v2-1/stable-diffusion-2-1"
401
- print('Using the original V2-768 model')
402
- else:
403
- MODEL_NAMEv2=""
404
- print('Wrong model version')
405
-
406
- return MODEL_NAMEv2
407
-
408
-
409
-
410
-
411
- def dlsv2(Path_to_HuggingFace, Model_Path, Model_Link, Model_Version):
412
-
413
- if Path_to_HuggingFace != "":
414
- downloadmodel_hfv2(Path_to_HuggingFace)
415
- MODEL_NAMEv2="/models/stable-diffusion-custom"
416
- elif Model_Path !="":
417
- downloadmodel_pthv2(Model_Path)
418
- MODEL_NAMEv2="/models/stable-diffusion-custom"
419
- elif Model_Link !="":
420
- downloadmodel_lnkv2(Model_Link)
421
- MODEL_NAMEv2="/models/stable-diffusion-custom"
422
- else:
423
- if Model_Version=="512":
424
- MODEL_NAMEv2="/datasets/stable-diffusion-v2-1-base-diffusers/stable-diffusion-2-1-base"
425
- print('Using the original V2-512 model')
426
- elif Model_Version=="768":
427
- MODEL_NAMEv2="/datasets/stable-diffusion-v2-1/stable-diffusion-2-1"
428
- print('Using the original V2-768 model')
429
- else:
430
- MODEL_NAMEv2=""
431
- print('Wrong model version')
432
-
433
- return MODEL_NAMEv2
434
-
435
-
436
-
437
-
438
- def sessv2(Session_Name, Session_Link_optional, MODEL_NAMEv2):
439
- import gdown
440
- import wget
441
- os.chdir('/notebooks')
442
- PT=""
443
-
444
- while Session_Name=="":
445
- print('Input the Session Name:')
446
- Session_Name=input("")
447
- Session_Name=Session_Name.replace(" ","_")
448
-
449
- WORKSPACE='/notebooks/Fast-Dreambooth'
450
-
451
- if Session_Link_optional !="":
452
- print('Downloading session...')
453
-
454
- if Session_Link_optional != "":
455
- if not os.path.exists(str(WORKSPACE+'/Sessions')):
456
- call("mkdir -p " +WORKSPACE+ "/Sessions", shell=True)
457
- time.sleep(1)
458
- os.chdir(WORKSPACE+'/Sessions')
459
- gdown.download_folder(url=Session_Link_optional, output=Session_Name, quiet=True, remaining_ok=True, use_cookies=False)
460
- os.chdir(Session_Name)
461
- call("rm -r " +instance_images, shell=True)
462
- call("unzip " +instance_images.zip, shell=True, stdout=open('/dev/null', 'w'))
463
- call("rm -r " +concept_images, shell=True)
464
- call("unzip " +concept_images.zip, shell=True, stdout=open('/dev/null', 'w'))
465
- call("rm -r " +captions, shell=True)
466
- call("unzip " +captions.zip, shell=True, stdout=open('/dev/null', 'w'))
467
- os.chdir('/notebooks')
468
- clear_output()
469
-
470
- INSTANCE_NAME=Session_Name
471
- OUTPUT_DIR="/models/"+Session_Name
472
- SESSION_DIR=WORKSPACE+"/Sessions/"+Session_Name
473
- CONCEPT_DIR=SESSION_DIR+"/concept_images"
474
- INSTANCE_DIR=SESSION_DIR+"/instance_images"
475
- CAPTIONS_DIR=SESSION_DIR+'/captions'
476
- MDLPTH=str(SESSION_DIR+"/"+Session_Name+'.ckpt')
477
- resumev2=False
478
-
479
- if os.path.exists(str(SESSION_DIR)):
480
- mdls=[ckpt for ckpt in listdir(SESSION_DIR) if ckpt.split(".")[-1]=="ckpt"]
481
- if not os.path.exists(MDLPTH) and '.ckpt' in str(mdls):
482
-
483
- def f(n):
484
- k=0
485
- for i in mdls:
486
- if k==n:
487
- call('mv '+SESSION_DIR+'/'+i+' '+MDLPTH, shell=True)
488
- k=k+1
489
-
490
- k=0
491
- print('No final checkpoint model found, select which intermediary checkpoint to use, enter only the number, (000 to skip):\n')
492
-
493
- for i in mdls:
494
- print(str(k)+'- '+i)
495
- k=k+1
496
- n=input()
497
- while int(n)>k-1:
498
- n=input()
499
- if n!="000":
500
- f(int(n))
501
- print('Using the model '+ mdls[int(n)]+" ...")
502
- time.sleep(4)
503
- else:
504
- print('Skipping the intermediary checkpoints.')
505
-
506
-
507
- if os.path.exists(str(SESSION_DIR)) and not os.path.exists(MDLPTH):
508
- print('Loading session with no previous model, using the original model or the custom downloaded model')
509
- if MODEL_NAMEv2=="":
510
- print('No model found, use the "Model Download" cell to download a model.')
511
- else:
512
- print('Session Loaded, proceed to uploading instance images')
513
-
514
- elif os.path.exists(MDLPTH):
515
- print('Session found, loading the trained model ...')
516
-
517
- wget.download('https://github.com/TheLastBen/fast-stable-diffusion/raw/main/Dreambooth/det.py')
518
- print('Detecting model version...')
519
- Model_Version=check_output('python det.py --MODEL_PATH '+MDLPTH, shell=True).decode('utf-8').replace('\n', '')
520
- clear_output()
521
- print(''+Model_Version+' Detected')
522
- call('rm det.py', shell=True)
523
-
524
- if Model_Version=='V2.1-512px':
525
- call('wget -q -O convertodiff.py https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dreambooth/convertodiffv2.py', shell=True)
526
- call('python convertodiff.py '+MDLPTH+' '+OUTPUT_DIR+' --v2 --reference_model stabilityai/stable-diffusion-2-1-base', shell=True)
527
- elif Model_Version=='V2.1-768px':
528
- call('wget -q -O convertodiff.py https://github.com/TheLastBen/fast-stable-diffusion/raw/main/Dreambooth/convertodiffv2-768.py', shell=True)
529
- call('python convertodiff.py '+MDLPTH+' '+OUTPUT_DIR+' --v2 --reference_model stabilityai/stable-diffusion-2-1', shell=True)
530
- clear_output()
531
- call('rm convertodiff.py', shell=True)
532
- if os.path.exists(OUTPUT_DIR+'/unet/diffusion_pytorch_model.bin'):
533
- resumev2=True
534
- clear_output()
535
- print('Session loaded.')
536
- else:
537
- print('Conversion error, if the error persists, remove the CKPT file from the current session folder')
538
-
539
- elif not os.path.exists(str(SESSION_DIR)):
540
- call('mkdir -p '+INSTANCE_DIR, shell=True)
541
- print('Creating session...')
542
- if MODEL_NAMEv2=="":
543
- print('No model found, use the "Model Download" cell to download a model.')
544
- else:
545
- print('Session created, proceed to uploading instance images')
546
-
547
- return PT, WORKSPACE, Session_Name, INSTANCE_NAME, OUTPUT_DIR, SESSION_DIR, CONCEPT_DIR, INSTANCE_DIR, CAPTIONS_DIR, MDLPTH, MODEL_NAMEv2, resumev2
548
-
549
-
550
-
551
- def done():
552
- done = widgets.Button(
553
- description='Done!',
554
- disabled=True,
555
- button_style='success',
556
- tooltip='',
557
- icon='check'
558
- )
559
- display(done)
560
-
561
-
562
-
563
- def uplder(Remove_existing_instance_images, Crop_images, Crop_size, IMAGES_FOLDER_OPTIONAL, INSTANCE_DIR, CAPTIONS_DIR, ren):
564
-
565
- if os.path.exists(INSTANCE_DIR+"/.ipynb_checkpoints"):
566
- call('rm -r '+INSTANCE_DIR+'/.ipynb_checkpoints', shell=True)
567
-
568
- uploader = widgets.FileUpload(description="Choose images",accept='image/*, .txt', multiple=True)
569
- Upload = widgets.Button(
570
- description='Upload',
571
- disabled=False,
572
- button_style='info',
573
- tooltip='Click to upload the chosen instance images',
574
- icon=''
575
- )
576
-
577
-
578
- def up(Upload):
579
- with out:
580
- uploader.close()
581
- Upload.close()
582
- upld(Remove_existing_instance_images, Crop_images, Crop_size, IMAGES_FOLDER_OPTIONAL, INSTANCE_DIR, CAPTIONS_DIR, uploader, ren)
583
- done()
584
- out=widgets.Output()
585
-
586
- if IMAGES_FOLDER_OPTIONAL=="":
587
- Upload.on_click(up)
588
- display(uploader, Upload, out)
589
- else:
590
- upld(Remove_existing_instance_images, Crop_images, Crop_size, IMAGES_FOLDER_OPTIONAL, INSTANCE_DIR, CAPTIONS_DIR, uploader, ren)
591
- done()
592
-
593
-
594
- def upld(Remove_existing_instance_images, Crop_images, Crop_size, IMAGES_FOLDER_OPTIONAL, INSTANCE_DIR, CAPTIONS_DIR, uploader, ren):
595
-
596
-
597
- if Remove_existing_instance_images:
598
- if os.path.exists(str(INSTANCE_DIR)):
599
- call("rm -r " +INSTANCE_DIR, shell=True)
600
- if os.path.exists(str(CAPTIONS_DIR)):
601
- call("rm -r " +CAPTIONS_DIR, shell=True)
602
-
603
-
604
- if not os.path.exists(str(INSTANCE_DIR)):
605
- call("mkdir -p " +INSTANCE_DIR, shell=True)
606
- if not os.path.exists(str(CAPTIONS_DIR)):
607
- call("mkdir -p " +CAPTIONS_DIR, shell=True)
608
-
609
-
610
- if IMAGES_FOLDER_OPTIONAL !="":
611
-
612
- if os.path.exists(IMAGES_FOLDER_OPTIONAL+"/.ipynb_checkpoints"):
613
- call('rm -r '+IMAGES_FOLDER_OPTIONAL+'/.ipynb_checkpoints', shell=True)
614
-
615
- if any(file.endswith('.{}'.format('txt')) for file in os.listdir(IMAGES_FOLDER_OPTIONAL)):
616
- call('mv '+IMAGES_FOLDER_OPTIONAL+'/*.txt '+CAPTIONS_DIR, shell=True)
617
- if Crop_images:
618
- os.chdir(str(IMAGES_FOLDER_OPTIONAL))
619
- call('find . -name "* *" -type f | rename ' "'s/ /-/g'", shell=True)
620
- os.chdir('/notebooks')
621
- for filename in tqdm(os.listdir(IMAGES_FOLDER_OPTIONAL), bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):
622
- extension = filename.split(".")[-1]
623
- identifier=filename.split(".")[0]
624
- new_path_with_file = os.path.join(INSTANCE_DIR, filename)
625
- file = Image.open(IMAGES_FOLDER_OPTIONAL+"/"+filename)
626
- width, height = file.size
627
- image = file
628
- if file.size !=(Crop_size, Crop_size):
629
- image=crop_image(file, Crop_size)
630
- if extension.upper()=="JPG" or extension.upper()=="jpg":
631
- image[0] = image[0].convert("RGB")
632
- image[0].save(new_path_with_file, format="JPEG", quality = 100)
633
- else:
634
- image[0].save(new_path_with_file, format=extension.upper())
635
-
636
- else:
637
- call("cp \'"+IMAGES_FOLDER_OPTIONAL+"/"+filename+"\' "+INSTANCE_DIR, shell=True)
638
-
639
- else:
640
- for filename in tqdm(os.listdir(IMAGES_FOLDER_OPTIONAL), bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):
641
- call("cp -r " +IMAGES_FOLDER_OPTIONAL+"/. " +INSTANCE_DIR, shell=True)
642
-
643
- elif IMAGES_FOLDER_OPTIONAL =="":
644
- up=""
645
- for file in uploader.value:
646
- filename = file['name']
647
- if filename.split(".")[-1]=="txt":
648
- with open(CAPTIONS_DIR+'/'+filename, 'w') as f:
649
- f.write(bytes(file['content']).decode())
650
- up=[file for file in uploader.value if not file['name'].endswith('.txt')]
651
- if Crop_images:
652
- for file in tqdm(up, bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):
653
- filename = file['name']
654
- img = Image.open(io.BytesIO(file['content']))
655
- extension = filename.split(".")[-1]
656
- identifier=filename.split(".")[0]
657
-
658
- if extension.upper()=="JPG" or extension.upper()=="jpg":
659
- img=img.convert("RGB")
660
- img.save(INSTANCE_DIR+"/"+filename, format="JPEG", quality = 100)
661
- else:
662
- img.save(INSTANCE_DIR+"/"+filename, format=extension.upper())
663
-
664
- new_path_with_file = os.path.join(INSTANCE_DIR, filename)
665
- file = Image.open(new_path_with_file)
666
- width, height = file.size
667
- image = img
668
- if file.size !=(Crop_size, Crop_size):
669
- image=crop_image(file, Crop_size)
670
- if extension.upper()=="JPG" or extension.upper()=="jpg":
671
- image[0].save(new_path_with_file, format="JPEG", quality = 100)
672
- else:
673
- image[0].save(new_path_with_file, format=extension.upper())
674
-
675
- else:
676
- for file in tqdm(uploader.value, bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):
677
- filename = file['name']
678
- img = Image.open(io.BytesIO(file['content']))
679
-
680
- extension = filename.split(".")[-1]
681
- identifier=filename.split(".")[0]
682
-
683
- if extension.upper()=="JPG" or extension.upper()=="jpg":
684
- img=img.convert("RGB")
685
- img.save(INSTANCE_DIR+"/"+filename, format="JPEG", quality = 100)
686
- else:
687
- img.save(INSTANCE_DIR+"/"+filename, format=extension.upper())
688
-
689
- if ren:
690
- i=0
691
- for filename in tqdm(os.listdir(INSTANCE_DIR), bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Renamed'):
692
- extension = filename.split(".")[-1]
693
- identifier=filename.split(".")[0]
694
- new_path_with_file = os.path.join(INSTANCE_DIR, "conceptimagedb"+str(i)+"."+extension)
695
- call('mv "'+os.path.join(INSTANCE_DIR,filename)+'" "'+new_path_with_file+'"', shell=True)
696
- i=i+1
697
-
698
- os.chdir(INSTANCE_DIR)
699
- call('find . -name "* *" -type f | rename ' "'s/ /-/g'", shell=True)
700
- os.chdir(CAPTIONS_DIR)
701
- call('find . -name "* *" -type f | rename ' "'s/ /-/g'", shell=True)
702
- os.chdir('/notebooks')
703
-
704
-
705
-
706
- def caption(CAPTIONS_DIR, INSTANCE_DIR):
707
-
708
- paths=""
709
- out=""
710
- widgets_l=""
711
- clear_output()
712
- def Caption(path):
713
- if path!="Select an instance image to caption":
714
-
715
- name = os.path.splitext(os.path.basename(path))[0]
716
- ext=os.path.splitext(os.path.basename(path))[-1][1:]
717
- if ext=="jpg" or "JPG":
718
- ext="JPEG"
719
-
720
- if os.path.exists(CAPTIONS_DIR+"/"+name + '.txt'):
721
- with open(CAPTIONS_DIR+"/"+name + '.txt', 'r') as f:
722
- text = f.read()
723
- else:
724
- with open(CAPTIONS_DIR+"/"+name + '.txt', 'w') as f:
725
- f.write("")
726
- with open(CAPTIONS_DIR+"/"+name + '.txt', 'r') as f:
727
- text = f.read()
728
-
729
- img=Image.open(os.path.join(INSTANCE_DIR,path))
730
- img=img.convert("RGB")
731
- img=img.resize((420, 420))
732
- image_bytes = BytesIO()
733
- img.save(image_bytes, format=ext, qualiy=10)
734
- image_bytes.seek(0)
735
- image_data = image_bytes.read()
736
- img= image_data
737
- image = widgets.Image(
738
- value=img,
739
- width=420,
740
- height=420
741
- )
742
- text_area = widgets.Textarea(value=text, description='', disabled=False, layout={'width': '300px', 'height': '120px'})
743
-
744
-
745
- def update_text(text):
746
- with open(CAPTIONS_DIR+"/"+name + '.txt', 'w') as f:
747
- f.write(text)
748
-
749
- button = widgets.Button(description='Save', button_style='success')
750
- button.on_click(lambda b: update_text(text_area.value))
751
-
752
- return widgets.VBox([widgets.HBox([image, text_area, button])])
753
-
754
-
755
- paths = os.listdir(INSTANCE_DIR)
756
- widgets_l = widgets.Select(options=["Select an instance image to caption"]+paths, rows=25)
757
-
758
-
759
- out = widgets.Output()
760
-
761
- def click(change):
762
- with out:
763
- out.clear_output()
764
- display(Caption(change.new))
765
-
766
- widgets_l.observe(click, names='value')
767
- display(widgets.HBox([widgets_l, out]))
768
-
769
-
770
-
771
- def dbtrainv2(Resume_Training, UNet_Training_Steps, UNet_Learning_Rate, Text_Encoder_Training_Steps, Text_Encoder_Concept_Training_Steps, Text_Encoder_Learning_Rate, Offset_Noise, Resolution, MODEL_NAMEv2, SESSION_DIR, INSTANCE_DIR, CONCEPT_DIR, CAPTIONS_DIR, External_Captions, INSTANCE_NAME, Session_Name, OUTPUT_DIR, PT, resumev2, Save_Checkpoint_Every_n_Steps, Start_saving_from_the_step, Save_Checkpoint_Every):
772
-
773
- if os.path.exists(INSTANCE_DIR+"/.ipynb_checkpoints"):
774
- call('rm -r '+INSTANCE_DIR+'/.ipynb_checkpoints', shell=True)
775
- if os.path.exists(CONCEPT_DIR+"/.ipynb_checkpoints"):
776
- call('rm -r '+CONCEPT_DIR+'/.ipynb_checkpoints', shell=True)
777
- if os.path.exists(CAPTIONS_DIR+"/.ipynb_checkpoints"):
778
- call('rm -r '+CAPTIONS_DIR+'/.ipynb_checkpoints', shell=True)
779
-
780
- if resumev2 and not Resume_Training:
781
- print('Overwrite your previously trained model ?, answering "yes" will train a new model, answering "no" will resumev2 the training of the previous model?  yes or no ?')
782
- while True:
783
- ansres=input('')
784
- if ansres=='no':
785
- Resume_Training = True
786
- resumev2= False
787
- break
788
- elif ansres=='yes':
789
- Resume_Training = False
790
- resumev2= False
791
- break
792
-
793
- while not Resume_Training and not os.path.exists(MODEL_NAMEv2+'/unet/diffusion_pytorch_model.bin'):
794
- print('No model found, use the "Model Download" cell to download a model.')
795
- time.sleep(5)
796
-
797
- MODELT_NAME=MODEL_NAMEv2
798
-
799
- Seed=random.randint(1, 999999)
800
-
801
- ofstnse=""
802
- if Offset_Noise:
803
- ofstnse="--offset_noise"
804
-
805
- extrnlcptn=""
806
- if External_Captions:
807
- extrnlcptn="--external_captions"
808
-
809
- precision="fp16"
810
-
811
-
812
- resuming=""
813
- if Resume_Training and os.path.exists(OUTPUT_DIR+'/unet/diffusion_pytorch_model.bin'):
814
- MODELT_NAME=OUTPUT_DIR
815
- print('Resuming Training...')
816
- resuming="Yes"
817
- elif Resume_Training and not os.path.exists(OUTPUT_DIR+'/unet/diffusion_pytorch_model.bin'):
818
- print('Previous model not found, training a new model...')
819
- MODELT_NAME=MODEL_NAMEv2
820
- while MODEL_NAMEv2=="":
821
- print('No model found, use the "Model Download" cell to download a model.')
822
- time.sleep(5)
823
-
824
-
825
- trnonltxt=""
826
- if UNet_Training_Steps==0:
827
- trnonltxt="--train_only_text_encoder"
828
-
829
- Enable_text_encoder_training= True
830
- Enable_Text_Encoder_Concept_Training= True
831
-
832
-
833
- if Text_Encoder_Training_Steps==0:
834
- Enable_text_encoder_training= False
835
- else:
836
- stptxt=Text_Encoder_Training_Steps
837
-
838
- if Text_Encoder_Concept_Training_Steps==0:
839
- Enable_Text_Encoder_Concept_Training= False
840
- else:
841
- stptxtc=Text_Encoder_Concept_Training_Steps
842
-
843
-
844
- if Save_Checkpoint_Every==None:
845
- Save_Checkpoint_Every=1
846
- stp=0
847
- if Start_saving_from_the_step==None:
848
- Start_saving_from_the_step=0
849
- if (Start_saving_from_the_step < 200):
850
- Start_saving_from_the_step=Save_Checkpoint_Every
851
- stpsv=Start_saving_from_the_step
852
- if Save_Checkpoint_Every_n_Steps:
853
- stp=Save_Checkpoint_Every
854
-
855
-
856
- def dump_only_textenc(trnonltxt, MODELT_NAME, INSTANCE_DIR, OUTPUT_DIR, PT, Seed, precision, Training_Steps):
857
- call('accelerate launch /notebooks/diffusers/examples/dreambooth/train_dreambooth_pps.py \
858
- '+trnonltxt+' \
859
- '+extrnlcptn+' \
860
- '+ofstnse+' \
861
- --train_text_encoder \
862
- --image_captions_filename \
863
- --dump_only_text_encoder \
864
- --pretrained_model_name_or_path='+MODELT_NAME+' \
865
- --instance_data_dir='+INSTANCE_DIR+' \
866
- --output_dir='+OUTPUT_DIR+' \
867
- --captions_dir='+CAPTIONS_DIR+' \
868
- --instance_prompt='+PT+' \
869
- --seed='+str(Seed)+' \
870
- --resolution='+str(Resolution)+' \
871
- --mixed_precision='+str(precision)+' \
872
- --train_batch_size=1 \
873
- --gradient_accumulation_steps=1 --gradient_checkpointing \
874
- --use_8bit_adam \
875
- --learning_rate='+str(Text_Encoder_Learning_Rate)+' \
876
- --lr_scheduler="linear" \
877
- --lr_warmup_steps=0 \
878
- --max_train_steps='+str(Training_Steps), shell=True)
879
-
880
- def train_only_unet(stp, stpsv, SESSION_DIR, MODELT_NAME, INSTANCE_DIR, OUTPUT_DIR, Text_Encoder_Training_Steps, PT, Seed, Resolution, extrnlcptn, precision, Training_Steps):
881
- clear_output()
882
- if resuming=="Yes":
883
- print('Resuming Training...')
884
- print('Training the UNet...')
885
- call('accelerate launch /notebooks/diffusers/examples/dreambooth/train_dreambooth_pps.py \
886
- '+extrnlcptn+' \
887
- '+ofstnse+' \
888
- --image_captions_filename \
889
- --train_only_unet \
890
- --Session_dir='+SESSION_DIR+' \
891
- --save_starting_step='+str(stpsv)+' \
892
- --save_n_steps='+str(stp)+' \
893
- --pretrained_model_name_or_path='+MODELT_NAME+' \
894
- --instance_data_dir='+INSTANCE_DIR+' \
895
- --output_dir='+OUTPUT_DIR+' \
896
- --captions_dir='+CAPTIONS_DIR+' \
897
- --instance_prompt='+PT+' \
898
- --seed='+str(Seed)+' \
899
- --resolution='+str(Resolution)+' \
900
- --mixed_precision='+str(precision)+' \
901
- --train_batch_size=1 \
902
- --gradient_accumulation_steps=1 --gradient_checkpointing \
903
- --use_8bit_adam \
904
- --learning_rate='+str(UNet_Learning_Rate)+' \
905
- --lr_scheduler="linear" \
906
- --lr_warmup_steps=0 \
907
- --max_train_steps='+str(Training_Steps), shell=True)
908
-
909
- if Enable_text_encoder_training :
910
- print('Training the text encoder...')
911
- if os.path.exists(OUTPUT_DIR+'/'+'text_encoder_trained'):
912
- call('rm -r '+OUTPUT_DIR+'/text_encoder_trained', shell=True)
913
- dump_only_textenc(trnonltxt, MODELT_NAME, INSTANCE_DIR, OUTPUT_DIR, PT, Seed, precision, Training_Steps=stptxt)
914
-
915
- if Enable_Text_Encoder_Concept_Training:
916
- if os.path.exists(CONCEPT_DIR):
917
- if os.listdir(CONCEPT_DIR)!=[]:
918
- clear_output()
919
- if resuming=="Yes":
920
- print('Resuming Training...')
921
- print('Training the text encoder on the concept...')
922
- dump_only_textenc(trnonltxt, MODELT_NAME, CONCEPT_DIR, OUTPUT_DIR, PT, Seed, precision, Training_Steps=stptxtc)
923
- else:
924
- clear_output()
925
- if resuming=="Yes":
926
- print('Resuming Training...')
927
- print('No concept images found, skipping concept training...')
928
- Text_Encoder_Concept_Training_Steps=0
929
- time.sleep(8)
930
- else:
931
- clear_output()
932
- if resuming=="Yes":
933
- print('Resuming Training...')
934
- print('No concept images found, skipping concept training...')
935
- Text_Encoder_Concept_Training_Steps=0
936
- time.sleep(8)
937
-
938
- if UNet_Training_Steps!=0:
939
- train_only_unet(stp, stpsv, SESSION_DIR, MODELT_NAME, INSTANCE_DIR, OUTPUT_DIR, Text_Encoder_Training_Steps, PT, Seed, Resolution, extrnlcptn, precision, Training_Steps=UNet_Training_Steps)
940
-
941
- if UNet_Training_Steps==0 and Text_Encoder_Concept_Training_Steps==0 and Text_Encoder_Training_Steps==0 :
942
- print('Nothing to do')
943
- else:
944
- if os.path.exists(OUTPUT_DIR+'/unet/diffusion_pytorch_model.bin'):
945
-
946
- call('python /notebooks/diffusers/scripts/convertosdv2.py --fp16 '+OUTPUT_DIR+' '+SESSION_DIR+'/'+Session_Name+'.ckpt', shell=True)
947
- clear_output()
948
- if os.path.exists(SESSION_DIR+"/"+INSTANCE_NAME+'.ckpt'):
949
- clear_output()
950
- print("DONE, the CKPT model is in the session's folder")
951
- else:
952
- print("Something went wrong")
953
-
954
- else:
955
- print("Something went wrong")
956
-
957
- return resumev2
958
-
959
-
960
-
961
-
962
- def testui(Custom_Path, Previous_Session_Name, Session_Name, User, Password):
963
-
964
-
965
- if Previous_Session_Name!="":
966
- print("Loading a previous session model")
967
- mdldir='/notebooks/Fast-Dreambooth/Sessions/'+Previous_Session_Name
968
- path_to_trained_model=mdldir+"/"+Previous_Session_Name+'.ckpt'
969
-
970
-
971
- while not os.path.exists(path_to_trained_model):
972
- print("There is no trained model in the previous session")
973
- time.sleep(5)
974
-
975
- elif Custom_Path!="":
976
- print("Loading model from a custom path")
977
- path_to_trained_model=Custom_Path
978
-
979
-
980
- while not os.path.exists(path_to_trained_model):
981
- print("Wrong Path")
982
- time.sleep(5)
983
-
984
- else:
985
- print("Loading the trained model")
986
- mdldir='/notebooks/Fast-Dreambooth/Sessions/'+Session_Name
987
- path_to_trained_model=mdldir+"/"+Session_Name+'.ckpt'
988
-
989
-
990
- while not os.path.exists(path_to_trained_model):
991
- print("There is no trained model in this session")
992
- time.sleep(5)
993
-
994
- auth=f"--gradio-auth {User}:{Password}"
995
- if User =="" or Password=="":
996
- auth=""
997
-
998
- os.chdir('/notebooks')
999
- if not os.path.exists('/notebooks/sd/stablediffusiond'): #reset later
1000
- call('wget -q -O sd_mrep.tar.zst https://huggingface.co/TheLastBen/dependencies/resolve/main/sd_mrep.tar.zst', shell=True)
1001
- call('tar --zstd -xf sd_mrep.tar.zst', shell=True)
1002
- call('rm sd_mrep.tar.zst', shell=True)
1003
-
1004
- os.chdir('/notebooks/sd')
1005
- if not os.path.exists('stable-diffusion-webui'):
1006
- call('git clone -q --depth 1 --branch master https://github.com/AUTOMATIC1111/stable-diffusion-webui', shell=True)
1007
-
1008
- os.chdir('/notebooks/sd/stable-diffusion-webui/')
1009
- call('git reset --hard', shell=True, stdout=open('/dev/null', 'w'))
1010
- print('')
1011
- call('git checkout master', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
1012
- call('git pull', shell=True, stdout=open('/dev/null', 'w'))
1013
- os.chdir('/notebooks')
1014
- clear_output()
1015
-
1016
-
1017
- call('wget -q -O /usr/local/lib/python3.9/dist-packages/gradio/blocks.py https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/AUTOMATIC1111_files/blocks.py', shell=True)
1018
-
1019
- localurl="tensorboard-"+os.environ.get('PAPERSPACE_FQDN')
1020
-
1021
- for line in fileinput.input('/usr/local/lib/python3.9/dist-packages/gradio/blocks.py', inplace=True):
1022
- if line.strip().startswith('self.server_name ='):
1023
- line = f' self.server_name = "{localurl}"\n'
1024
- if line.strip().startswith('self.protocol = "https"'):
1025
- line = ' self.protocol = "https"\n'
1026
- if line.strip().startswith('if self.local_url.startswith("https") or self.is_colab'):
1027
- line = ''
1028
- if line.strip().startswith('else "http"'):
1029
- line = ''
1030
- sys.stdout.write(line)
1031
-
1032
-
1033
- os.chdir('/notebooks/sd/stable-diffusion-webui/modules')
1034
-
1035
- call("sed -i 's@possible_sd_paths =.*@possible_sd_paths = [\"/notebooks/sd/stablediffusion\"]@' /notebooks/sd/stable-diffusion-webui/modules/paths.py", shell=True)
1036
- call("sed -i 's@\.\.\/@src/@g' /notebooks/sd/stable-diffusion-webui/modules/paths.py", shell=True)
1037
- call("sed -i 's@src\/generative-models@generative-models@g' /notebooks/sd/stable-diffusion-webui/modules/paths.py", shell=True)
1038
-
1039
- call("sed -i 's@-> Network | None@@g' /notebooks/sd/stable-diffusion-webui/extensions-builtin/Lora/network.py", shell=True)
1040
- call("sed -i 's@from scipy.ndimage.filters@from scipy.ndimage@g' /usr/local/lib/python3.9/dist-packages/basicsr/metrics/niqe.py", shell=True)
1041
-
1042
- call("sed -i 's@\"quicksettings\": OptionInfo(.*@\"quicksettings\": OptionInfo(\"sd_model_checkpoint, sd_vae, CLIP_stop_at_last_layers, inpainting_mask_weight, initial_noise_multiplier\", \"Quicksettings list\"),@' /notebooks/sd/stable-diffusion-webui/modules/shared.py", shell=True)
1043
- os.chdir('/notebooks/sd/stable-diffusion-webui')
1044
- clear_output()
1045
-
1046
- configf="--disable-console-progressbars --no-gradio-queue --upcast-sampling --no-half-vae --disable-safe-unpickle --api --no-download-sd-model --xformers --enable-insecure-extension-access --port 6006 --listen --skip-version-check --ckpt "+path_to_trained_model+" "+auth
1047
-
1048
- return configf
1049
-
1050
-
1051
-
1052
-
1053
- def clean():
1054
-
1055
- Sessions=os.listdir("/notebooks/Fast-Dreambooth/Sessions")
1056
-
1057
- s = widgets.Select(
1058
- options=Sessions,
1059
- rows=5,
1060
- description='',
1061
- disabled=False
1062
- )
1063
-
1064
- out=widgets.Output()
1065
-
1066
- d = widgets.Button(
1067
- description='Remove',
1068
- disabled=False,
1069
- button_style='warning',
1070
- tooltip='Removet the selected session',
1071
- icon='warning'
1072
- )
1073
-
1074
- def rem(d):
1075
- with out:
1076
- if s.value is not None:
1077
- clear_output()
1078
- print("THE SESSION "+s.value+" HAS BEEN REMOVED FROM THE STORAGE")
1079
- call('rm -r /notebooks/Fast-Dreambooth/Sessions/'+s.value, shell=True)
1080
- if os.path.exists('/notebooks/models/'+s.value):
1081
- call('rm -r /notebooks/models/'+s.value, shell=True)
1082
- s.options=os.listdir("/notebooks/Fast-Dreambooth/Sessions")
1083
-
1084
-
1085
- else:
1086
- d.close()
1087
- s.close()
1088
- clear_output()
1089
- print("NOTHING TO REMOVE")
1090
-
1091
- d.on_click(rem)
1092
- if s.value is not None:
1093
- display(s,d,out)
1094
- else:
1095
- print("NOTHING TO REMOVE")
1096
-
1097
-
1098
-
1099
- def hfv2(Name_of_your_concept, Save_concept_to, hf_token_write, INSTANCE_NAME, OUTPUT_DIR, Session_Name, MDLPTH):
1100
-
1101
- from slugify import slugify
1102
- from huggingface_hub import HfApi, HfFolder, CommitOperationAdd
1103
- from huggingface_hub import create_repo
1104
- from IPython.display import display_markdown
1105
-
1106
- if(Name_of_your_concept == ""):
1107
- Name_of_your_concept = Session_Name
1108
- Name_of_your_concept=Name_of_your_concept.replace(" ","-")
1109
-
1110
-
1111
-
1112
- if hf_token_write =="":
1113
- print('Your Hugging Face write access token : ')
1114
- hf_token_write=input()
1115
-
1116
- hf_token = hf_token_write
1117
-
1118
- api = HfApi()
1119
- your_username = api.whoami(token=hf_token)["name"]
1120
-
1121
- repo_id = f"{your_username}/{slugify(Name_of_your_concept)}"
1122
- output_dir = f'/notebooks/models/'+INSTANCE_NAME
1123
-
1124
- def bar(prg):
1125
- clear_output()
1126
- br="Uploading to HuggingFace : " '|'+'█' * prg + ' ' * (25-prg)+'| ' +str(prg*4)+ "%"
1127
- return br
1128
-
1129
- print(bar(1))
1130
-
1131
- readme_text = f'''---
1132
- license: creativeml-openrail-m
1133
- tags:
1134
- - text-to-image
1135
- - stable-diffusion
1136
- ---
1137
- ### {Name_of_your_concept} Dreambooth model trained by {api.whoami(token=hf_token)["name"]} with TheLastBen's fast-DreamBooth notebook
1138
-
1139
- '''
1140
- #Save the readme to a file
1141
- readme_file = open("README.md", "w")
1142
- readme_file.write(readme_text)
1143
- readme_file.close()
1144
-
1145
- operations = [
1146
- CommitOperationAdd(path_in_repo="README.md", path_or_fileobj="README.md"),
1147
- CommitOperationAdd(path_in_repo=f"{Session_Name}.ckpt",path_or_fileobj=MDLPTH)
1148
-
1149
- ]
1150
- create_repo(repo_id,private=True, token=hf_token)
1151
-
1152
- api.create_commit(
1153
- repo_id=repo_id,
1154
- operations=operations,
1155
- commit_message=f"Upload the concept {Name_of_your_concept} embeds and token",
1156
- token=hf_token
1157
- )
1158
-
1159
- print(bar(8))
1160
-
1161
- api.upload_folder(
1162
- folder_path=OUTPUT_DIR+"/scheduler",
1163
- path_in_repo="scheduler",
1164
- repo_id=repo_id,
1165
- token=hf_token
1166
- )
1167
-
1168
- print(bar(9))
1169
-
1170
- api.upload_folder(
1171
- folder_path=OUTPUT_DIR+"/text_encoder",
1172
- path_in_repo="text_encoder",
1173
- repo_id=repo_id,
1174
- token=hf_token
1175
- )
1176
-
1177
- print(bar(12))
1178
-
1179
- api.upload_folder(
1180
- folder_path=OUTPUT_DIR+"/tokenizer",
1181
- path_in_repo="tokenizer",
1182
- repo_id=repo_id,
1183
- token=hf_token
1184
- )
1185
-
1186
- print(bar(13))
1187
-
1188
- api.upload_folder(
1189
- folder_path=OUTPUT_DIR+"/unet",
1190
- path_in_repo="unet",
1191
- repo_id=repo_id,
1192
- token=hf_token
1193
- )
1194
-
1195
- print(bar(21))
1196
-
1197
- api.upload_folder(
1198
- folder_path=OUTPUT_DIR+"/vae",
1199
- path_in_repo="vae",
1200
- repo_id=repo_id,
1201
- token=hf_token
1202
- )
1203
-
1204
- print(bar(23))
1205
-
1206
- api.upload_file(
1207
- path_or_fileobj=OUTPUT_DIR+"/model_index.json",
1208
- path_in_repo="model_index.json",
1209
- repo_id=repo_id,
1210
- token=hf_token
1211
- )
1212
-
1213
- print(bar(25))
1214
-
1215
- print("Your concept was saved successfully at https://huggingface.co/"+repo_id)
1216
- done()
1217
-
1218
-
1219
-
1220
- def crop_image(im, size):
1221
-
1222
- GREEN = "#0F0"
1223
- BLUE = "#00F"
1224
- RED = "#F00"
1225
-
1226
- def focal_point(im, settings):
1227
- corner_points = image_corner_points(im, settings) if settings.corner_points_weight > 0 else []
1228
- entropy_points = image_entropy_points(im, settings) if settings.entropy_points_weight > 0 else []
1229
- face_points = image_face_points(im, settings) if settings.face_points_weight > 0 else []
1230
-
1231
- pois = []
1232
-
1233
- weight_pref_total = 0
1234
- if len(corner_points) > 0:
1235
- weight_pref_total += settings.corner_points_weight
1236
- if len(entropy_points) > 0:
1237
- weight_pref_total += settings.entropy_points_weight
1238
- if len(face_points) > 0:
1239
- weight_pref_total += settings.face_points_weight
1240
-
1241
- corner_centroid = None
1242
- if len(corner_points) > 0:
1243
- corner_centroid = centroid(corner_points)
1244
- corner_centroid.weight = settings.corner_points_weight / weight_pref_total
1245
- pois.append(corner_centroid)
1246
-
1247
- entropy_centroid = None
1248
- if len(entropy_points) > 0:
1249
- entropy_centroid = centroid(entropy_points)
1250
- entropy_centroid.weight = settings.entropy_points_weight / weight_pref_total
1251
- pois.append(entropy_centroid)
1252
-
1253
- face_centroid = None
1254
- if len(face_points) > 0:
1255
- face_centroid = centroid(face_points)
1256
- face_centroid.weight = settings.face_points_weight / weight_pref_total
1257
- pois.append(face_centroid)
1258
-
1259
- average_point = poi_average(pois, settings)
1260
-
1261
- return average_point
1262
-
1263
-
1264
- def image_face_points(im, settings):
1265
-
1266
- np_im = np.array(im)
1267
- gray = cv2.cvtColor(np_im, cv2.COLOR_BGR2GRAY)
1268
-
1269
- tries = [
1270
- [ f'{cv2.data.haarcascades}haarcascade_eye.xml', 0.01 ],
1271
- [ f'{cv2.data.haarcascades}haarcascade_frontalface_default.xml', 0.05 ],
1272
- [ f'{cv2.data.haarcascades}haarcascade_profileface.xml', 0.05 ],
1273
- [ f'{cv2.data.haarcascades}haarcascade_frontalface_alt.xml', 0.05 ],
1274
- [ f'{cv2.data.haarcascades}haarcascade_frontalface_alt2.xml', 0.05 ],
1275
- [ f'{cv2.data.haarcascades}haarcascade_frontalface_alt_tree.xml', 0.05 ],
1276
- [ f'{cv2.data.haarcascades}haarcascade_eye_tree_eyeglasses.xml', 0.05 ],
1277
- [ f'{cv2.data.haarcascades}haarcascade_upperbody.xml', 0.05 ]
1278
- ]
1279
- for t in tries:
1280
- classifier = cv2.CascadeClassifier(t[0])
1281
- minsize = int(min(im.width, im.height) * t[1]) # at least N percent of the smallest side
1282
- try:
1283
- faces = classifier.detectMultiScale(gray, scaleFactor=1.1,
1284
- minNeighbors=7, minSize=(minsize, minsize), flags=cv2.CASCADE_SCALE_IMAGE)
1285
- except:
1286
- continue
1287
-
1288
- if len(faces) > 0:
1289
- rects = [[f[0], f[1], f[0] + f[2], f[1] + f[3]] for f in faces]
1290
- return [PointOfInterest((r[0] +r[2]) // 2, (r[1] + r[3]) // 2, size=abs(r[0]-r[2]), weight=1/len(rects)) for r in rects]
1291
- return []
1292
-
1293
-
1294
- def image_corner_points(im, settings):
1295
- grayscale = im.convert("L")
1296
-
1297
-
1298
- gd = ImageDraw.Draw(grayscale)
1299
- gd.rectangle([0, im.height*.9, im.width, im.height], fill="#999")
1300
-
1301
- np_im = np.array(grayscale)
1302
-
1303
- points = cv2.goodFeaturesToTrack(
1304
- np_im,
1305
- maxCorners=100,
1306
- qualityLevel=0.04,
1307
- minDistance=min(grayscale.width, grayscale.height)*0.06,
1308
- useHarrisDetector=False,
1309
- )
1310
-
1311
- if points is None:
1312
- return []
1313
-
1314
- focal_points = []
1315
- for point in points:
1316
- x, y = point.ravel()
1317
- focal_points.append(PointOfInterest(x, y, size=4, weight=1/len(points)))
1318
-
1319
- return focal_points
1320
-
1321
-
1322
- def image_entropy_points(im, settings):
1323
- landscape = im.height < im.width
1324
- portrait = im.height > im.width
1325
- if landscape:
1326
- move_idx = [0, 2]
1327
- move_max = im.size[0]
1328
- elif portrait:
1329
- move_idx = [1, 3]
1330
- move_max = im.size[1]
1331
- else:
1332
- return []
1333
-
1334
- e_max = 0
1335
- crop_current = [0, 0, settings.crop_width, settings.crop_height]
1336
- crop_best = crop_current
1337
- while crop_current[move_idx[1]] < move_max:
1338
- crop = im.crop(tuple(crop_current))
1339
- e = image_entropy(crop)
1340
-
1341
- if (e > e_max):
1342
- e_max = e
1343
- crop_best = list(crop_current)
1344
-
1345
- crop_current[move_idx[0]] += 4
1346
- crop_current[move_idx[1]] += 4
1347
-
1348
- x_mid = int(crop_best[0] + settings.crop_width/2)
1349
- y_mid = int(crop_best[1] + settings.crop_height/2)
1350
-
1351
- return [PointOfInterest(x_mid, y_mid, size=25, weight=1.0)]
1352
-
1353
-
1354
- def image_entropy(im):
1355
- # greyscale image entropy
1356
- # band = np.asarray(im.convert("L"))
1357
- band = np.asarray(im.convert("1"), dtype=np.uint8)
1358
- hist, _ = np.histogram(band, bins=range(0, 256))
1359
- hist = hist[hist > 0]
1360
- return -np.log2(hist / hist.sum()).sum()
1361
-
1362
- def centroid(pois):
1363
- x = [poi.x for poi in pois]
1364
- y = [poi.y for poi in pois]
1365
- return PointOfInterest(sum(x)/len(pois), sum(y)/len(pois))
1366
-
1367
-
1368
- def poi_average(pois, settings):
1369
- weight = 0.0
1370
- x = 0.0
1371
- y = 0.0
1372
- for poi in pois:
1373
- weight += poi.weight
1374
- x += poi.x * poi.weight
1375
- y += poi.y * poi.weight
1376
- avg_x = round(weight and x / weight)
1377
- avg_y = round(weight and y / weight)
1378
-
1379
- return PointOfInterest(avg_x, avg_y)
1380
-
1381
-
1382
- def is_landscape(w, h):
1383
- return w > h
1384
-
1385
-
1386
- def is_portrait(w, h):
1387
- return h > w
1388
-
1389
-
1390
- def is_square(w, h):
1391
- return w == h
1392
-
1393
-
1394
- class PointOfInterest:
1395
- def __init__(self, x, y, weight=1.0, size=10):
1396
- self.x = x
1397
- self.y = y
1398
- self.weight = weight
1399
- self.size = size
1400
-
1401
- def bounding(self, size):
1402
- return [
1403
- self.x - size//2,
1404
- self.y - size//2,
1405
- self.x + size//2,
1406
- self.y + size//2
1407
- ]
1408
-
1409
- class Settings:
1410
- def __init__(self, crop_width=512, crop_height=512, corner_points_weight=0.5, entropy_points_weight=0.5, face_points_weight=0.5):
1411
- self.crop_width = crop_width
1412
- self.crop_height = crop_height
1413
- self.corner_points_weight = corner_points_weight
1414
- self.entropy_points_weight = entropy_points_weight
1415
- self.face_points_weight = face_points_weight
1416
-
1417
- settings = Settings(
1418
- crop_width = size,
1419
- crop_height = size,
1420
- face_points_weight = 0.9,
1421
- entropy_points_weight = 0.15,
1422
- corner_points_weight = 0.5,
1423
- )
1424
-
1425
- scale_by = 1
1426
- if is_landscape(im.width, im.height):
1427
- scale_by = settings.crop_height / im.height
1428
- elif is_portrait(im.width, im.height):
1429
- scale_by = settings.crop_width / im.width
1430
- elif is_square(im.width, im.height):
1431
- if is_square(settings.crop_width, settings.crop_height):
1432
- scale_by = settings.crop_width / im.width
1433
- elif is_landscape(settings.crop_width, settings.crop_height):
1434
- scale_by = settings.crop_width / im.width
1435
- elif is_portrait(settings.crop_width, settings.crop_height):
1436
- scale_by = settings.crop_height / im.height
1437
-
1438
- im = im.resize((int(im.width * scale_by), int(im.height * scale_by)))
1439
- im_debug = im.copy()
1440
-
1441
- focus = focal_point(im_debug, settings)
1442
-
1443
- # take the focal point and turn it into crop coordinates that try to center over the focal
1444
- # point but then get adjusted back into the frame
1445
- y_half = int(settings.crop_height / 2)
1446
- x_half = int(settings.crop_width / 2)
1447
-
1448
- x1 = focus.x - x_half
1449
- if x1 < 0:
1450
- x1 = 0
1451
- elif x1 + settings.crop_width > im.width:
1452
- x1 = im.width - settings.crop_width
1453
-
1454
- y1 = focus.y - y_half
1455
- if y1 < 0:
1456
- y1 = 0
1457
- elif y1 + settings.crop_height > im.height:
1458
- y1 = im.height - settings.crop_height
1459
-
1460
- x2 = x1 + settings.crop_width
1461
- y2 = y1 + settings.crop_height
1462
-
1463
- crop = [x1, y1, x2, y2]
1464
-
1465
- results = []
1466
-
1467
- results.append(im.crop(tuple(crop)))
1468
-
1469
- return results