TheLastBen commited on
Commit
0b86f0a
1 Parent(s): 9ab2841

Update Scripts/mainpaperspacev1_311.py

Browse files
Files changed (1) hide show
  1. Scripts/mainpaperspacev1_311.py +1329 -0
Scripts/mainpaperspacev1_311.py CHANGED
@@ -0,0 +1,1329 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from IPython.display import clear_output
2
+ from subprocess import call, getoutput
3
+ from IPython.display import display
4
+ import ipywidgets as widgets
5
+ import io
6
+ from PIL import Image, ImageDraw, ImageOps
7
+ import fileinput
8
+ import time
9
+ import os
10
+ from os import listdir
11
+ from os.path import isfile
12
+ from tqdm import tqdm
13
+ import gdown
14
+ import random
15
+ import sys
16
+ import cv2
17
+ from io import BytesIO
18
+ import requests
19
+ from collections import defaultdict
20
+ from math import log, sqrt
21
+ import numpy as np
22
+ import six
23
+ import re
24
+
25
+ from urllib.parse import urlparse, parse_qs, unquote
26
+ from urllib.request import urlopen, Request
27
+ import tempfile
28
+ from tqdm import tqdm
29
+
30
+
31
+
32
+
33
+ def Deps(force_reinstall):
34
+
35
+ if not force_reinstall and os.path.exists('/usr/local/lib/python3.11/dist-packages/gradio'):
36
+ ntbk()
37
+ os.environ['TORCH_HOME'] = '/notebooks/cache/torch'
38
+ os.environ['PYTHONWARNINGS'] = 'ignore'
39
+ os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
40
+ print('Modules and notebooks updated, dependencies already installed')
41
+
42
+ else:
43
+ call("pip install --root-user-action=ignore --no-deps -q accelerate==0.12.0", shell=True, stdout=open('/dev/null', 'w'))
44
+ ntbk()
45
+ if not os.path.exists('/models'):
46
+ call('mkdir /models', shell=True)
47
+ if not os.path.exists('/notebooks/models'):
48
+ call('ln -s /models /notebooks', shell=True)
49
+ if os.path.exists('/deps'):
50
+ call("rm -r /deps", shell=True)
51
+ call('mkdir /deps', shell=True)
52
+ if not os.path.exists('cache'):
53
+ call('mkdir cache', shell=True)
54
+ os.chdir('/deps')
55
+ call('wget -q -i https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dependencies/aptdeps.txt', shell=True)
56
+ call('dpkg -i *.deb', shell=True, stdout=open('/dev/null', 'w'))
57
+ depsinst("https://huggingface.co/TheLastBen/dependencies/resolve/main/ppsdeps_311.tar.zst", "/deps/ppsdeps_311.tar.zst")
58
+ call('tar -C / --zstd -xf ppsdeps_311.tar.zst', shell=True, stdout=open('/dev/null', 'w'))
59
+ os.chdir('/notebooks')
60
+ call("git clone --depth 1 -q --branch main https://github.com/TheLastBen/diffusers /diffusers", shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
61
+ os.environ['TORCH_HOME'] = '/notebooks/cache/torch'
62
+ os.environ['PYTHONWARNINGS'] = 'ignore'
63
+ os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
64
+ call("sed -i 's@text = _formatwarnmsg(msg)@text =\"\"@g' /usr/lib/python3.11/warnings.py", shell=True)
65
+ if not os.path.exists('/notebooks/diffusers'):
66
+ call('ln -s /diffusers /notebooks', shell=True)
67
+ call("rm -r /deps", shell=True)
68
+ os.chdir('/notebooks')
69
+ clear_output()
70
+
71
+ done()
72
+
73
+
74
+
75
+ def depsinst(url, dst):
76
+ file_size = None
77
+ req = Request(url, headers={"User-Agent": "torch.hub"})
78
+ u = urlopen(req)
79
+ meta = u.info()
80
+ if hasattr(meta, 'getheaders'):
81
+ content_length = meta.getheaders("Content-Length")
82
+ else:
83
+ content_length = meta.get_all("Content-Length")
84
+ if content_length is not None and len(content_length) > 0:
85
+ file_size = int(content_length[0])
86
+
87
+ with tqdm(total=file_size, disable=False, mininterval=0.5,
88
+ bar_format='Installing dependencies |{bar:20}| {percentage:3.0f}%') as pbar:
89
+ with open(dst, "wb") as f:
90
+ while True:
91
+ buffer = u.read(8192)
92
+ if len(buffer) == 0:
93
+ break
94
+ f.write(buffer)
95
+ pbar.update(len(buffer))
96
+ f.close()
97
+
98
+
99
+ def ntbk():
100
+
101
+ os.chdir('/notebooks')
102
+ if not os.path.exists('Latest_Notebooks'):
103
+ call('mkdir Latest_Notebooks', shell=True)
104
+ else:
105
+ call('rm -r Latest_Notebooks', shell=True)
106
+ call('mkdir Latest_Notebooks', shell=True)
107
+ os.chdir('/notebooks/Latest_Notebooks')
108
+ call('wget -q -i https://huggingface.co/datasets/TheLastBen/PPS/raw/main/Notebooks.txt', shell=True)
109
+ call('rm Notebooks.txt', shell=True)
110
+ os.chdir('/notebooks')
111
+
112
+
113
+
114
+
115
+ def downloadmodel_hf(Path_to_HuggingFace):
116
+ import wget
117
+
118
+ if os.path.exists('/models/stable-diffusion-custom'):
119
+ call("rm -r /models/stable-diffusion-custom", shell=True)
120
+ clear_output()
121
+
122
+ if os.path.exists('/notebooks/Fast-Dreambooth/token.txt'):
123
+ with open("/notebooks/Fast-Dreambooth/token.txt") as f:
124
+ token = f.read()
125
+ authe=f'https://USER:{token}@'
126
+ else:
127
+ authe="https://"
128
+
129
+ clear_output()
130
+ call("mkdir /models/stable-diffusion-custom", shell=True)
131
+ os.chdir("/models/stable-diffusion-custom")
132
+ call("git init", shell=True)
133
+ call("git lfs install --system --skip-repo", shell=True)
134
+ call('git remote add -f origin '+authe+'huggingface.co/'+Path_to_HuggingFace, shell=True)
135
+ call("git config core.sparsecheckout true", shell=True)
136
+ call('echo -e "\nscheduler\ntext_encoder\ntokenizer\nunet\nvae\nmodel_index.json\n!*.safetensors" > .git/info/sparse-checkout', shell=True)
137
+ call("git pull origin main", shell=True)
138
+ if os.path.exists('unet/diffusion_pytorch_model.bin'):
139
+ call("rm -r .git", shell=True)
140
+ call("rm model_index.json", shell=True)
141
+ wget.download('https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/Dreambooth/model_index.json')
142
+ os.chdir('/notebooks')
143
+ clear_output()
144
+ done()
145
+ while not os.path.exists('/models/stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
146
+ print('Check the link you provided')
147
+ os.chdir('/notebooks')
148
+ time.sleep(5)
149
+
150
+
151
+
152
+
153
+ def downloadmodel_path(MODEL_PATH):
154
+
155
+ modelname=os.path.basename(MODEL_PATH)
156
+ sftnsr=""
157
+ if modelname.split('.')[-1]=='safetensors':
158
+ sftnsr="--from_safetensors"
159
+
160
+ import wget
161
+ os.chdir('/notebooks')
162
+ clear_output()
163
+ if os.path.exists(str(MODEL_PATH)):
164
+ call('wget -q -O config.yaml https://github.com/CompVis/stable-diffusion/raw/main/configs/stable-diffusion/v1-inference.yaml', shell=True)
165
+ call('python /diffusers/scripts/convert_original_stable_diffusion_to_diffusers.py --checkpoint_path '+MODEL_PATH+' --dump_path /models/stable-diffusion-custom --original_config_file config.yaml '+sftnsr, shell=True)
166
+ clear_output()
167
+ call('rm config.yaml', shell=True)
168
+ if os.path.exists('/models/stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
169
+ clear_output()
170
+ done()
171
+ while not os.path.exists('/models/stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
172
+ print('Conversion error')
173
+ time.sleep(5)
174
+
175
+ else:
176
+ while not os.path.exists(str(MODEL_PATH)):
177
+ print('Wrong path, use the file explorer to copy the path')
178
+ time.sleep(5)
179
+
180
+
181
+
182
+
183
+ def downloadmodel_link(MODEL_LINK):
184
+
185
+ import wget
186
+ import gdown
187
+ from gdown.download import get_url_from_gdrive_confirmation
188
+
189
+
190
+ def getsrc(url):
191
+ parsed_url = urlparse(url)
192
+ if parsed_url.netloc == 'civitai.com':
193
+ src='civitai'
194
+ elif parsed_url.netloc == 'drive.google.com':
195
+ src='gdrive'
196
+ elif parsed_url.netloc == 'huggingface.co':
197
+ src='huggingface'
198
+ else:
199
+ src='others'
200
+ return src
201
+
202
+ src=getsrc(MODEL_LINK)
203
+
204
+ def get_name(url, gdrive):
205
+ if not gdrive:
206
+ response = requests.get(url, allow_redirects=False)
207
+ if "Location" in response.headers:
208
+ redirected_url = response.headers["Location"]
209
+ quer = parse_qs(urlparse(redirected_url).query)
210
+ if "response-content-disposition" in quer:
211
+ disp_val = quer["response-content-disposition"][0].split(";")
212
+ for vals in disp_val:
213
+ if vals.strip().startswith("filename="):
214
+ filenm=unquote(vals.split("=", 1)[1].strip())
215
+ return filenm.replace("\"","")
216
+ else:
217
+ headers = {"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36"}
218
+ lnk="https://drive.google.com/uc?id={id}&export=download".format(id=url[url.find("/d/")+3:url.find("/view")])
219
+ res = requests.session().get(lnk, headers=headers, stream=True, verify=True)
220
+ res = requests.session().get(get_url_from_gdrive_confirmation(res.text), headers=headers, stream=True, verify=True)
221
+ content_disposition = six.moves.urllib_parse.unquote(res.headers["Content-Disposition"])
222
+ filenm = re.search(r"filename\*=UTF-8''(.*)", content_disposition).groups()[0].replace(os.path.sep, "_")
223
+ return filenm
224
+
225
+ if src=='civitai':
226
+ modelname=get_name(MODEL_LINK, False)
227
+ elif src=='gdrive':
228
+ modelname=get_name(MODEL_LINK, True)
229
+ else:
230
+ modelname=os.path.basename(MODEL_LINK)
231
+
232
+ sftnsr=""
233
+ if modelname.split('.')[-1]!='safetensors':
234
+ modelnm="model.ckpt"
235
+ else:
236
+ modelnm="model.safetensors"
237
+ sftnsr="--from_safetensors"
238
+
239
+ os.chdir('/notebooks')
240
+ call("gdown --fuzzy " +MODEL_LINK+ " -O "+modelnm, shell=True)
241
+
242
+ if os.path.exists(modelnm):
243
+ if os.path.getsize(modelnm) > 1810671599:
244
+ call('wget -q -O config.yaml https://github.com/CompVis/stable-diffusion/raw/main/configs/stable-diffusion/v1-inference.yaml', shell=True)
245
+ call('python /diffusers/scripts/convert_original_stable_diffusion_to_diffusers.py --checkpoint_path '+modelnm+' --dump_path /models/stable-diffusion-custom --original_config_file config.yaml '+sftnsr, shell=True)
246
+ clear_output()
247
+ call('rm config.yaml', shell=True)
248
+ if os.path.exists('/models/stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
249
+ call('rm '+modelnm, shell=True)
250
+ clear_output()
251
+ done()
252
+ else:
253
+ while not os.path.exists('/models/stable-diffusion-custom/unet/diffusion_pytorch_model.bin'):
254
+ print('Conversion error')
255
+ time.sleep(5)
256
+ else:
257
+ while os.path.getsize(modelnm) < 1810671599:
258
+ print('Wrong link, check that the link is valid')
259
+ time.sleep(5)
260
+
261
+
262
+
263
+
264
+ def dls(Path_to_HuggingFace, Model_Path, Model_Link):
265
+
266
+ if Path_to_HuggingFace != "":
267
+ downloadmodel_hf(Path_to_HuggingFace)
268
+ MODEL_NAME="/models/stable-diffusion-custom"
269
+ elif Model_Path !="":
270
+ downloadmodel_path(Model_Path)
271
+ MODEL_NAME="/models/stable-diffusion-custom"
272
+ elif Model_Link !="":
273
+ downloadmodel_link(Model_Link)
274
+ MODEL_NAME="/models/stable-diffusion-custom"
275
+ else:
276
+ MODEL_NAME="/datasets/stable-diffusion-diffusers/stable-diffusion-v1-5"
277
+ print('Using the original V1.5 model')
278
+
279
+ return MODEL_NAME
280
+
281
+
282
+
283
+ def sess(Session_Name, Session_Link_optional, MODEL_NAME):
284
+ import wget, gdown
285
+ os.chdir('/notebooks')
286
+ PT=""
287
+
288
+ while Session_Name=="":
289
+ print('Input the Session Name:')
290
+ Session_Name=input("")
291
+ Session_Name=Session_Name.replace(" ","_")
292
+
293
+ WORKSPACE='/notebooks/Fast-Dreambooth'
294
+
295
+ if Session_Link_optional !="":
296
+ print('Downloading session...')
297
+
298
+ if Session_Link_optional != "":
299
+ if not os.path.exists(str(WORKSPACE+'/Sessions')):
300
+ call("mkdir -p " +WORKSPACE+ "/Sessions", shell=True)
301
+ time.sleep(1)
302
+ os.chdir(WORKSPACE+'/Sessions')
303
+ gdown.download_folder(url=Session_Link_optional, output=Session_Name, quiet=True, remaining_ok=True, use_cookies=False)
304
+ os.chdir(Session_Name)
305
+ call("rm -r " +instance_images, shell=True)
306
+ call("unzip " +instance_images.zip, shell=True, stdout=open('/dev/null', 'w'))
307
+ call("rm -r " +concept_images, shell=True)
308
+ call("unzip " +concept_images.zip, shell=True, stdout=open('/dev/null', 'w'))
309
+ call("rm -r " +captions, shell=True)
310
+ call("unzip " +captions.zip, shell=True, stdout=open('/dev/null', 'w'))
311
+ os.chdir('/notebooks')
312
+ clear_output()
313
+
314
+ INSTANCE_NAME=Session_Name
315
+ OUTPUT_DIR="/models/"+Session_Name
316
+ SESSION_DIR=WORKSPACE+"/Sessions/"+Session_Name
317
+ CONCEPT_DIR=SESSION_DIR+"/concept_images"
318
+ INSTANCE_DIR=SESSION_DIR+"/instance_images"
319
+ CAPTIONS_DIR=SESSION_DIR+'/captions'
320
+ MDLPTH=str(SESSION_DIR+"/"+Session_Name+'.ckpt')
321
+ resume=False
322
+
323
+ if os.path.exists(str(SESSION_DIR)):
324
+ mdls=[ckpt for ckpt in listdir(SESSION_DIR) if ckpt.split(".")[-1]=="ckpt"]
325
+ if not os.path.exists(MDLPTH) and '.ckpt' in str(mdls):
326
+
327
+ def f(n):
328
+ k=0
329
+ for i in mdls:
330
+ if k==n:
331
+ call('mv '+SESSION_DIR+'/'+i+' '+MDLPTH, shell=True)
332
+ k=k+1
333
+
334
+ k=0
335
+ print('No final checkpoint model found, select which intermediary checkpoint to use, enter only the number, (000 to skip):\n')
336
+
337
+ for i in mdls:
338
+ print(str(k)+'- '+i)
339
+ k=k+1
340
+ n=input()
341
+ while int(n)>k-1:
342
+ n=input()
343
+ if n!="000":
344
+ f(int(n))
345
+ print('Using the model '+ mdls[int(n)]+" ...")
346
+ time.sleep(4)
347
+ clear_output()
348
+ else:
349
+ print('Skipping the intermediary checkpoints.')
350
+
351
+
352
+ if os.path.exists(str(SESSION_DIR)) and not os.path.exists(MDLPTH):
353
+ print('Loading session with no previous model, using the original model or the custom downloaded model')
354
+ if MODEL_NAME=="":
355
+ print('No model found, use the "Model Download" cell to download a model.')
356
+ else:
357
+ print('Session Loaded, proceed to uploading instance images')
358
+
359
+ elif os.path.exists(MDLPTH):
360
+ print('Session found, loading the trained model ...')
361
+ call('wget -q -O config.yaml https://github.com/CompVis/stable-diffusion/raw/main/configs/stable-diffusion/v1-inference.yaml', shell=True)
362
+ call('python /diffusers/scripts/convert_original_stable_diffusion_to_diffusers.py --checkpoint_path '+MDLPTH+' --dump_path '+OUTPUT_DIR+' --original_config_file config.yaml', shell=True)
363
+ clear_output()
364
+
365
+ call('rm config.yaml', shell=True)
366
+ if os.path.exists(OUTPUT_DIR+'/unet/diffusion_pytorch_model.bin'):
367
+ resume=True
368
+ clear_output()
369
+ print('Session loaded.')
370
+ else:
371
+ print('Conversion error, if the error persists, remove the CKPT file from the current session folder')
372
+
373
+ elif not os.path.exists(str(SESSION_DIR)):
374
+ call('mkdir -p '+INSTANCE_DIR, shell=True)
375
+ print('Creating session...')
376
+ if MODEL_NAME=="":
377
+ print('No model found, use the "Model Download" cell to download a model.')
378
+ else:
379
+ print('Session created, proceed to uploading instance images')
380
+
381
+ return PT, WORKSPACE, Session_Name, INSTANCE_NAME, OUTPUT_DIR, SESSION_DIR, CONCEPT_DIR, INSTANCE_DIR, CAPTIONS_DIR, MDLPTH, MODEL_NAME, resume
382
+
383
+
384
+
385
+ def done():
386
+ done = widgets.Button(
387
+ description='Done!',
388
+ disabled=True,
389
+ button_style='success',
390
+ tooltip='',
391
+ icon='check'
392
+ )
393
+ display(done)
394
+
395
+
396
+
397
+ def uplder(Remove_existing_instance_images, Crop_images, Crop_size, IMAGES_FOLDER_OPTIONAL, INSTANCE_DIR, CAPTIONS_DIR, ren):
398
+
399
+ if os.path.exists(INSTANCE_DIR+"/.ipynb_checkpoints"):
400
+ call('rm -r '+INSTANCE_DIR+'/.ipynb_checkpoints', shell=True)
401
+
402
+
403
+ uploader = widgets.FileUpload(description="Choose images",accept='image/*, .txt', multiple=True)
404
+ Upload = widgets.Button(
405
+ description='Upload',
406
+ disabled=False,
407
+ button_style='info',
408
+ tooltip='Click to upload the chosen instance images',
409
+ icon=''
410
+ )
411
+
412
+
413
+ def up(Upload):
414
+ with out:
415
+ uploader.close()
416
+ Upload.close()
417
+ upld(Remove_existing_instance_images, Crop_images, Crop_size, IMAGES_FOLDER_OPTIONAL, INSTANCE_DIR, CAPTIONS_DIR, uploader, ren)
418
+ done()
419
+ out=widgets.Output()
420
+
421
+ if IMAGES_FOLDER_OPTIONAL=="":
422
+ Upload.on_click(up)
423
+ display(uploader, Upload, out)
424
+ else:
425
+ upld(Remove_existing_instance_images, Crop_images, Crop_size, IMAGES_FOLDER_OPTIONAL, INSTANCE_DIR, CAPTIONS_DIR, uploader, ren)
426
+ done()
427
+
428
+
429
+ def upld(Remove_existing_instance_images, Crop_images, Crop_size, IMAGES_FOLDER_OPTIONAL, INSTANCE_DIR, CAPTIONS_DIR, uploader, ren):
430
+
431
+
432
+ if Remove_existing_instance_images:
433
+ if os.path.exists(str(INSTANCE_DIR)):
434
+ call("rm -r " +INSTANCE_DIR, shell=True)
435
+ if os.path.exists(str(CAPTIONS_DIR)):
436
+ call("rm -r " +CAPTIONS_DIR, shell=True)
437
+
438
+
439
+ if not os.path.exists(str(INSTANCE_DIR)):
440
+ call("mkdir -p " +INSTANCE_DIR, shell=True)
441
+ if not os.path.exists(str(CAPTIONS_DIR)):
442
+ call("mkdir -p " +CAPTIONS_DIR, shell=True)
443
+
444
+
445
+ if IMAGES_FOLDER_OPTIONAL !="":
446
+
447
+ if os.path.exists(IMAGES_FOLDER_OPTIONAL+"/.ipynb_checkpoints"):
448
+ call('rm -r '+IMAGES_FOLDER_OPTIONAL+'/.ipynb_checkpoints', shell=True)
449
+
450
+ if any(file.endswith('.{}'.format('txt')) for file in os.listdir(IMAGES_FOLDER_OPTIONAL)):
451
+ call('mv '+IMAGES_FOLDER_OPTIONAL+'/*.txt '+CAPTIONS_DIR, shell=True)
452
+ if Crop_images:
453
+ os.chdir(str(IMAGES_FOLDER_OPTIONAL))
454
+ call('find . -name "* *" -type f | rename ' "'s/ /-/g'", shell=True)
455
+ os.chdir('/notebooks')
456
+ for filename in tqdm(os.listdir(IMAGES_FOLDER_OPTIONAL), bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):
457
+ extension = filename.split(".")[-1]
458
+ identifier=filename.split(".")[0]
459
+ new_path_with_file = os.path.join(INSTANCE_DIR, filename)
460
+ file = Image.open(IMAGES_FOLDER_OPTIONAL+"/"+filename)
461
+ file=file.convert("RGB")
462
+ file=ImageOps.exif_transpose(file)
463
+ width, height = file.size
464
+ if file.size !=(Crop_size, Crop_size):
465
+ image=crop_image(file, Crop_size)
466
+ if extension.upper()=="JPG" or extension.upper()=="jpg":
467
+ image[0].save(new_path_with_file, format="JPEG", quality = 100)
468
+ else:
469
+ image[0].save(new_path_with_file, format=extension.upper())
470
+
471
+ else:
472
+ call("cp \'"+IMAGES_FOLDER_OPTIONAL+"/"+filename+"\' "+INSTANCE_DIR, shell=True)
473
+
474
+ else:
475
+ for filename in tqdm(os.listdir(IMAGES_FOLDER_OPTIONAL), bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):
476
+ call("cp -r " +IMAGES_FOLDER_OPTIONAL+"/. " +INSTANCE_DIR, shell=True)
477
+
478
+ elif IMAGES_FOLDER_OPTIONAL =="":
479
+ up=""
480
+ for file in uploader.value:
481
+ filename = file['name']
482
+ if filename.split(".")[-1]=="txt":
483
+ with open(CAPTIONS_DIR+'/'+filename, 'w') as f:
484
+ f.write(bytes(file['content']).decode())
485
+ up=[file for file in uploader.value if not file['name'].endswith('.txt')]
486
+ if Crop_images:
487
+ for file in tqdm(up, bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):
488
+ filename = file['name']
489
+ img = Image.open(io.BytesIO(file['content']))
490
+ extension = filename.split(".")[-1]
491
+ identifier=filename.split(".")[0]
492
+ img=img.convert("RGB")
493
+ img=ImageOps.exif_transpose(img)
494
+
495
+ if extension.upper()=="JPG" or extension.upper()=="jpg":
496
+ img.save(INSTANCE_DIR+"/"+filename, format="JPEG", quality = 100)
497
+ else:
498
+ img.save(INSTANCE_DIR+"/"+filename, format=extension.upper())
499
+
500
+ new_path_with_file = os.path.join(INSTANCE_DIR, filename)
501
+ file = Image.open(new_path_with_file)
502
+ width, height = file.size
503
+ if file.size !=(Crop_size, Crop_size):
504
+ image=crop_image(file, Crop_size)
505
+ if extension.upper()=="JPG" or extension.upper()=="jpg":
506
+ image[0].save(new_path_with_file, format="JPEG", quality = 100)
507
+ else:
508
+ image[0].save(new_path_with_file, format=extension.upper())
509
+
510
+ else:
511
+ for file in tqdm(uploader.value, bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Uploaded'):
512
+ filename = file['name']
513
+ img = Image.open(io.BytesIO(file['content']))
514
+ img=img.convert("RGB")
515
+ extension = filename.split(".")[-1]
516
+ identifier=filename.split(".")[0]
517
+
518
+ if extension.upper()=="JPG" or extension.upper()=="jpg":
519
+ img.save(INSTANCE_DIR+"/"+filename, format="JPEG", quality = 100)
520
+ else:
521
+ img.save(INSTANCE_DIR+"/"+filename, format=extension.upper())
522
+
523
+ if ren:
524
+ i=0
525
+ for filename in tqdm(os.listdir(INSTANCE_DIR), bar_format=' |{bar:15}| {n_fmt}/{total_fmt} Renamed'):
526
+ extension = filename.split(".")[-1]
527
+ identifier=filename.split(".")[0]
528
+ new_path_with_file = os.path.join(INSTANCE_DIR, "conceptimagedb"+str(i)+"."+extension)
529
+ call('mv "'+os.path.join(INSTANCE_DIR,filename)+'" "'+new_path_with_file+'"', shell=True)
530
+ i=i+1
531
+
532
+ os.chdir(INSTANCE_DIR)
533
+ call('find . -name "* *" -type f | rename ' "'s/ /-/g'", shell=True)
534
+ os.chdir(CAPTIONS_DIR)
535
+ call('find . -name "* *" -type f | rename ' "'s/ /-/g'", shell=True)
536
+ os.chdir('/notebooks')
537
+
538
+
539
+ def caption(CAPTIONS_DIR, INSTANCE_DIR):
540
+
541
+ paths=""
542
+ out=""
543
+ widgets_l=""
544
+ clear_output()
545
+ def Caption(path):
546
+ if path!="Select an instance image to caption":
547
+
548
+ name = os.path.splitext(os.path.basename(path))[0]
549
+ ext=os.path.splitext(os.path.basename(path))[-1][1:]
550
+ if ext=="jpg" or "JPG":
551
+ ext="JPEG"
552
+
553
+ if os.path.exists(CAPTIONS_DIR+"/"+name + '.txt'):
554
+ with open(CAPTIONS_DIR+"/"+name + '.txt', 'r') as f:
555
+ text = f.read()
556
+ else:
557
+ with open(CAPTIONS_DIR+"/"+name + '.txt', 'w') as f:
558
+ f.write("")
559
+ with open(CAPTIONS_DIR+"/"+name + '.txt', 'r') as f:
560
+ text = f.read()
561
+
562
+ img=Image.open(os.path.join(INSTANCE_DIR,path))
563
+ img=img.convert("RGB")
564
+ img=img.resize((420, 420))
565
+ image_bytes = BytesIO()
566
+ img.save(image_bytes, format=ext, qualiy=10)
567
+ image_bytes.seek(0)
568
+ image_data = image_bytes.read()
569
+ img= image_data
570
+ image = widgets.Image(
571
+ value=img,
572
+ width=420,
573
+ height=420
574
+ )
575
+ text_area = widgets.Textarea(value=text, description='', disabled=False, layout={'width': '300px', 'height': '120px'})
576
+
577
+
578
+ def update_text(text):
579
+ with open(CAPTIONS_DIR+"/"+name + '.txt', 'w') as f:
580
+ f.write(text)
581
+
582
+ button = widgets.Button(description='Save', button_style='success')
583
+ button.on_click(lambda b: update_text(text_area.value))
584
+
585
+ return widgets.VBox([widgets.HBox([image, text_area, button])])
586
+
587
+
588
+ paths = os.listdir(INSTANCE_DIR)
589
+ widgets_l = widgets.Select(options=["Select an instance image to caption"]+paths, rows=25)
590
+
591
+
592
+ out = widgets.Output()
593
+
594
+ def click(change):
595
+ with out:
596
+ out.clear_output()
597
+ display(Caption(change.new))
598
+
599
+ widgets_l.observe(click, names='value')
600
+ display(widgets.HBox([widgets_l, out]))
601
+
602
+
603
+
604
+ def dbtrain(Resume_Training, UNet_Training_Steps, UNet_Learning_Rate, Text_Encoder_Training_Steps, Text_Encoder_Concept_Training_Steps, Text_Encoder_Learning_Rate, Offset_Noise, Resolution, MODEL_NAME, SESSION_DIR, INSTANCE_DIR, CONCEPT_DIR, CAPTIONS_DIR, External_Captions, INSTANCE_NAME, Session_Name, OUTPUT_DIR, PT, resume, Save_Checkpoint_Every_n_Steps, Start_saving_from_the_step, Save_Checkpoint_Every):
605
+
606
+ if os.path.exists(INSTANCE_DIR+"/.ipynb_checkpoints"):
607
+ call('rm -r '+INSTANCE_DIR+'/.ipynb_checkpoints', shell=True)
608
+ if os.path.exists(CONCEPT_DIR+"/.ipynb_checkpoints"):
609
+ call('rm -r '+CONCEPT_DIR+'/.ipynb_checkpoints', shell=True)
610
+ if os.path.exists(CAPTIONS_DIR+"/.ipynb_checkpoints"):
611
+ call('rm -r '+CAPTIONS_DIR+'/.ipynb_checkpoints', shell=True)
612
+
613
+ if resume and not Resume_Training:
614
+ print('Overwrite your previously trained model ?, answering "yes" will train a new model, answering "no" will resume the training of the previous model? yes or no ?')
615
+ while True:
616
+ ansres=input('')
617
+ if ansres=='no':
618
+ Resume_Training = True
619
+ resume= False
620
+ break
621
+ elif ansres=='yes':
622
+ Resume_Training = False
623
+ resume= False
624
+ break
625
+
626
+ while not Resume_Training and not os.path.exists(MODEL_NAME+'/unet/diffusion_pytorch_model.bin'):
627
+ print('No model found, use the "Model Download" cell to download a model.')
628
+ time.sleep(5)
629
+
630
+ MODELT_NAME=MODEL_NAME
631
+
632
+ Seed=random.randint(1, 999999)
633
+
634
+ ofstnse=""
635
+ if Offset_Noise:
636
+ ofstnse="--offset_noise"
637
+
638
+ extrnlcptn=""
639
+ if External_Captions:
640
+ extrnlcptn="--external_captions"
641
+
642
+ precision="fp16"
643
+
644
+
645
+ resuming=""
646
+ if Resume_Training and os.path.exists(OUTPUT_DIR+'/unet/diffusion_pytorch_model.bin'):
647
+ MODELT_NAME=OUTPUT_DIR
648
+ print('Resuming Training...')
649
+ resuming="Yes"
650
+ elif Resume_Training and not os.path.exists(OUTPUT_DIR+'/unet/diffusion_pytorch_model.bin'):
651
+ print('Previous model not found, training a new model...')
652
+ MODELT_NAME=MODEL_NAME
653
+ while MODEL_NAME=="":
654
+ print('No model found, use the "Model Download" cell to download a model.')
655
+ time.sleep(5)
656
+
657
+
658
+ trnonltxt=""
659
+ if UNet_Training_Steps==0:
660
+ trnonltxt="--train_only_text_encoder"
661
+
662
+ Enable_text_encoder_training= True
663
+ Enable_Text_Encoder_Concept_Training= True
664
+
665
+
666
+ if Text_Encoder_Training_Steps==0:
667
+ Enable_text_encoder_training= False
668
+ else:
669
+ stptxt=Text_Encoder_Training_Steps
670
+
671
+ if Text_Encoder_Concept_Training_Steps==0:
672
+ Enable_Text_Encoder_Concept_Training= False
673
+ else:
674
+ stptxtc=Text_Encoder_Concept_Training_Steps
675
+
676
+
677
+ if Save_Checkpoint_Every==None:
678
+ Save_Checkpoint_Every=1
679
+ stp=0
680
+ if Start_saving_from_the_step==None:
681
+ Start_saving_from_the_step=0
682
+ if (Start_saving_from_the_step < 200):
683
+ Start_saving_from_the_step=Save_Checkpoint_Every
684
+ stpsv=Start_saving_from_the_step
685
+ if Save_Checkpoint_Every_n_Steps:
686
+ stp=Save_Checkpoint_Every
687
+
688
+
689
+ def dump_only_textenc(trnonltxt, MODELT_NAME, INSTANCE_DIR, OUTPUT_DIR, PT, Seed, precision, Training_Steps):
690
+ call('accelerate launch /notebooks/diffusers/examples/dreambooth/train_dreambooth_pps.py \
691
+ '+trnonltxt+' \
692
+ '+extrnlcptn+' \
693
+ '+ofstnse+' \
694
+ --train_text_encoder \
695
+ --image_captions_filename \
696
+ --dump_only_text_encoder \
697
+ --pretrained_model_name_or_path='+MODELT_NAME+' \
698
+ --instance_data_dir='+INSTANCE_DIR+' \
699
+ --output_dir='+OUTPUT_DIR+' \
700
+ --captions_dir='+CAPTIONS_DIR+' \
701
+ --instance_prompt='+PT+' \
702
+ --seed='+str(Seed)+' \
703
+ --resolution='+str(Resolution)+' \
704
+ --mixed_precision='+str(precision)+' \
705
+ --train_batch_size=1 \
706
+ --gradient_accumulation_steps=1 --gradient_checkpointing \
707
+ --use_8bit_adam \
708
+ --learning_rate='+str(Text_Encoder_Learning_Rate)+' \
709
+ --lr_scheduler="linear" \
710
+ --lr_warmup_steps=0 \
711
+ --max_train_steps='+str(Training_Steps), shell=True)
712
+
713
+
714
+ def train_only_unet(stp, stpsv, SESSION_DIR, MODELT_NAME, INSTANCE_DIR, OUTPUT_DIR, Text_Encoder_Training_Steps, PT, Seed, Resolution, extrnlcptn, precision, Training_Steps):
715
+ clear_output()
716
+ if resuming=="Yes":
717
+ print('Resuming Training...')
718
+ print('Training the UNet...')
719
+ call('accelerate launch /notebooks/diffusers/examples/dreambooth/train_dreambooth_pps.py \
720
+ '+extrnlcptn+' \
721
+ '+ofstnse+' \
722
+ --image_captions_filename \
723
+ --train_only_unet \
724
+ --Session_dir='+SESSION_DIR+' \
725
+ --save_starting_step='+str(stpsv)+' \
726
+ --save_n_steps='+str(stp)+' \
727
+ --pretrained_model_name_or_path='+MODELT_NAME+' \
728
+ --instance_data_dir='+INSTANCE_DIR+' \
729
+ --output_dir='+OUTPUT_DIR+' \
730
+ --captions_dir='+CAPTIONS_DIR+' \
731
+ --instance_prompt='+PT+' \
732
+ --seed='+str(Seed)+' \
733
+ --resolution='+str(Resolution)+' \
734
+ --mixed_precision='+str(precision)+' \
735
+ --train_batch_size=1 \
736
+ --gradient_accumulation_steps=1 --gradient_checkpointing \
737
+ --use_8bit_adam \
738
+ --learning_rate='+str(UNet_Learning_Rate)+' \
739
+ --lr_scheduler="linear" \
740
+ --lr_warmup_steps=0 \
741
+ --max_train_steps='+str(Training_Steps), shell=True)
742
+
743
+ if Enable_text_encoder_training :
744
+ print('Training the text encoder...')
745
+ if os.path.exists(OUTPUT_DIR+'/'+'text_encoder_trained'):
746
+ call('rm -r '+OUTPUT_DIR+'/text_encoder_trained', shell=True)
747
+ dump_only_textenc(trnonltxt, MODELT_NAME, INSTANCE_DIR, OUTPUT_DIR, PT, Seed, precision, Training_Steps=stptxt)
748
+
749
+ if Enable_Text_Encoder_Concept_Training:
750
+ if os.path.exists(CONCEPT_DIR):
751
+ if os.listdir(CONCEPT_DIR)!=[]:
752
+ clear_output()
753
+ if resuming=="Yes":
754
+ print('Resuming Training...')
755
+ print('Training the text encoder on the concept...')
756
+ dump_only_textenc(trnonltxt, MODELT_NAME, CONCEPT_DIR, OUTPUT_DIR, PT, Seed, precision, Training_Steps=stptxtc)
757
+ else:
758
+ clear_output()
759
+ if resuming=="Yes":
760
+ print('Resuming Training...')
761
+ print('No concept images found, skipping concept training...')
762
+ Text_Encoder_Concept_Training_Steps=0
763
+ time.sleep(8)
764
+ else:
765
+ clear_output()
766
+ if resuming=="Yes":
767
+ print('Resuming Training...')
768
+ print('No concept images found, skipping concept training...')
769
+ Text_Encoder_Concept_Training_Steps=0
770
+ time.sleep(8)
771
+
772
+ if UNet_Training_Steps!=0:
773
+ train_only_unet(stp, stpsv, SESSION_DIR, MODELT_NAME, INSTANCE_DIR, OUTPUT_DIR, Text_Encoder_Training_Steps, PT, Seed, Resolution, extrnlcptn, precision, Training_Steps=UNet_Training_Steps)
774
+
775
+ if UNet_Training_Steps==0 and Text_Encoder_Concept_Training_Steps==0 and Text_Encoder_Training_Steps==0 :
776
+ print('Nothing to do')
777
+ else:
778
+ if os.path.exists(OUTPUT_DIR+'/unet/diffusion_pytorch_model.bin'):
779
+
780
+ call('python /notebooks/diffusers/scripts/convertosdv2.py --fp16 '+OUTPUT_DIR+' '+SESSION_DIR+'/'+Session_Name+'.ckpt', shell=True)
781
+ clear_output()
782
+ if os.path.exists(SESSION_DIR+"/"+INSTANCE_NAME+'.ckpt'):
783
+ clear_output()
784
+ print("DONE, the CKPT model is in the session's folder")
785
+ else:
786
+ print("Something went wrong")
787
+
788
+ else:
789
+ print("Something went wrong")
790
+
791
+ return resume
792
+
793
+
794
+
795
+ def testui(Custom_Path, Previous_Session_Name, Session_Name, User, Password):
796
+
797
+
798
+ if Previous_Session_Name!="":
799
+ print("Loading a previous session model")
800
+ mdldir='/notebooks/Fast-Dreambooth/Sessions/'+Previous_Session_Name
801
+ path_to_trained_model=mdldir+"/"+Previous_Session_Name+'.ckpt'
802
+
803
+
804
+ while not os.path.exists(path_to_trained_model):
805
+ print("There is no trained model in the previous session")
806
+ time.sleep(5)
807
+
808
+ elif Custom_Path!="":
809
+ print("Loading model from a custom path")
810
+ path_to_trained_model=Custom_Path
811
+
812
+
813
+ while not os.path.exists(path_to_trained_model):
814
+ print("Wrong Path")
815
+ time.sleep(5)
816
+
817
+ else:
818
+ print("Loading the trained model")
819
+ mdldir='/notebooks/Fast-Dreambooth/Sessions/'+Session_Name
820
+ path_to_trained_model=mdldir+"/"+Session_Name+'.ckpt'
821
+
822
+
823
+ while not os.path.exists(path_to_trained_model):
824
+ print("There is no trained model in this session")
825
+ time.sleep(5)
826
+
827
+ auth=f"--gradio-auth {User}:{Password}"
828
+ if User =="" or Password=="":
829
+ auth=""
830
+
831
+ os.chdir('/notebooks')
832
+ if not os.path.exists('/notebooks/sd/stablediffusiond'): #reset later
833
+ call('wget -q -O sd_mrep.tar.zst https://huggingface.co/TheLastBen/dependencies/resolve/main/sd_mrep.tar.zst', shell=True)
834
+ call('tar --zstd -xf sd_mrep.tar.zst', shell=True)
835
+ call('rm sd_mrep.tar.zst', shell=True)
836
+
837
+ os.chdir('/notebooks/sd')
838
+ if not os.path.exists('stable-diffusion-webui'):
839
+ call('git clone -q --depth 1 --branch master https://github.com/AUTOMATIC1111/stable-diffusion-webui', shell=True)
840
+
841
+ os.chdir('/notebooks/sd/stable-diffusion-webui/')
842
+ call('git reset --hard', shell=True, stdout=open('/dev/null', 'w'))
843
+ print('')
844
+ call('git checkout master', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
845
+ call('git pull', shell=True, stdout=open('/dev/null', 'w'))
846
+ os.makedirs('/notebooks/sd/stable-diffusion-webui/repositories', exist_ok=True)
847
+ call('git clone https://github.com/AUTOMATIC1111/stable-diffusion-webui-assets /notebooks/sd/stable-diffusion-webui/repositories/stable-diffusion-webui-assets', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
848
+ os.chdir('/notebooks')
849
+ clear_output()
850
+
851
+ call('wget -q -O /usr/local/lib/python3.11/dist-packages/gradio/blocks.py https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/AUTOMATIC1111_files/blocks.py', shell=True)
852
+
853
+ localurl="tensorboard-"+os.environ.get('PAPERSPACE_FQDN')
854
+
855
+ for line in fileinput.input('/usr/local/lib/python3.11/dist-packages/gradio/blocks.py', inplace=True):
856
+ if line.strip().startswith('self.server_name ='):
857
+ line = f' self.server_name = "{localurl}"\n'
858
+ if line.strip().startswith('self.protocol = "https"'):
859
+ line = ' self.protocol = "https"\n'
860
+ if line.strip().startswith('if self.local_url.startswith("https") or self.is_colab'):
861
+ line = ''
862
+ if line.strip().startswith('else "http"'):
863
+ line = ''
864
+ sys.stdout.write(line)
865
+
866
+
867
+ os.chdir('/notebooks/sd/stable-diffusion-webui/modules')
868
+
869
+ call("sed -i 's@possible_sd_paths =.*@possible_sd_paths = [\"/notebooks/sd/stablediffusion\"]@' /notebooks/sd/stable-diffusion-webui/modules/paths.py", shell=True)
870
+ call("sed -i 's@\.\.\/@src/@g' /notebooks/sd/stable-diffusion-webui/modules/paths.py", shell=True)
871
+ call("sed -i 's@src\/generative-models@generative-models@g' /notebooks/sd/stable-diffusion-webui/modules/paths.py", shell=True)
872
+
873
+ os.chdir('/notebooks/sd/stable-diffusion-webui')
874
+ clear_output()
875
+
876
+ configf="--disable-console-progressbars --no-gradio-queue --no-hashing --no-half-vae --disable-safe-unpickle --api --no-download-sd-model --xformers --enable-insecure-extension-access --port 6006 --listen --skip-version-check --ckpt "+path_to_trained_model+" "+auth
877
+
878
+ return configf
879
+
880
+
881
+
882
+ def clean():
883
+
884
+ Sessions=os.listdir("/notebooks/Fast-Dreambooth/Sessions")
885
+
886
+ s = widgets.Select(
887
+ options=Sessions,
888
+ rows=5,
889
+ description='',
890
+ disabled=False
891
+ )
892
+
893
+ out=widgets.Output()
894
+
895
+ d = widgets.Button(
896
+ description='Remove',
897
+ disabled=False,
898
+ button_style='warning',
899
+ tooltip='Removet the selected session',
900
+ icon='warning'
901
+ )
902
+
903
+ def rem(d):
904
+ with out:
905
+ if s.value is not None:
906
+ clear_output()
907
+ print("THE SESSION "+s.value+" HAS BEEN REMOVED FROM THE STORAGE")
908
+ call('rm -r /notebooks/Fast-Dreambooth/Sessions/'+s.value, shell=True)
909
+ if os.path.exists('/notebooks/models/'+s.value):
910
+ call('rm -r /notebooks/models/'+s.value, shell=True)
911
+ s.options=os.listdir("/notebooks/Fast-Dreambooth/Sessions")
912
+
913
+
914
+ else:
915
+ d.close()
916
+ s.close()
917
+ clear_output()
918
+ print("NOTHING TO REMOVE")
919
+
920
+ d.on_click(rem)
921
+ if s.value is not None:
922
+ display(s,d,out)
923
+ else:
924
+ print("NOTHING TO REMOVE")
925
+
926
+
927
+
928
+ def hf(Name_of_your_concept, hf_token_write, INSTANCE_NAME, OUTPUT_DIR, Session_Name, MDLPTH):
929
+
930
+ from slugify import slugify
931
+ from huggingface_hub import HfApi, HfFolder, CommitOperationAdd
932
+ from huggingface_hub import create_repo
933
+ from IPython.display import display_markdown
934
+
935
+
936
+ if(Name_of_your_concept == ""):
937
+ Name_of_your_concept = Session_Name
938
+ Name_of_your_concept=Name_of_your_concept.replace(" ","-")
939
+
940
+
941
+
942
+ if hf_token_write =="":
943
+ print('Your Hugging Face write access token : ')
944
+ hf_token_write=input()
945
+
946
+ hf_token = hf_token_write
947
+
948
+ api = HfApi()
949
+ your_username = api.whoami(token=hf_token)["name"]
950
+
951
+ repo_id = f"{your_username}/{slugify(Name_of_your_concept)}"
952
+ output_dir = f'/notebooks/models/'+INSTANCE_NAME
953
+
954
+ def bar(prg):
955
+ clear_output()
956
+ br="Uploading to HuggingFace : " '|'+'█' * prg + ' ' * (25-prg)+'| ' +str(prg*4)+ "%"
957
+ return br
958
+
959
+ print("Loading...")
960
+
961
+ os.chdir(OUTPUT_DIR)
962
+ call('rm -r safety_checker feature_extractor .git', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
963
+ call('rm model_index.json', shell=True)
964
+ call('git init', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
965
+ call('git lfs install --system --skip-repo', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
966
+ call('git remote add -f origin https://huggingface.co/runwayml/stable-diffusion-v1-5', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
967
+ call('git config core.sparsecheckout true', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
968
+ call('echo -e "\nfeature_extractor\nsafety_checker\nmodel_index.json\n!*.safetensors" > .git/info/sparse-checkout', shell=True)
969
+ call('git pull origin main', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
970
+ call('rm -r .git', shell=True, stdout=open('/dev/null', 'w'), stderr=open('/dev/null', 'w'))
971
+ os.chdir('/notebooks')
972
+
973
+ print(bar(1))
974
+
975
+ readme_text = f'''---
976
+ license: creativeml-openrail-m
977
+ tags:
978
+ - text-to-image
979
+ - stable-diffusion
980
+ ---
981
+ ### {Name_of_your_concept} Dreambooth model trained by {api.whoami(token=hf_token)["name"]} with TheLastBen's fast-DreamBooth notebook
982
+
983
+ '''
984
+ #Save the readme to a file
985
+ readme_file = open("README.md", "w")
986
+ readme_file.write(readme_text)
987
+ readme_file.close()
988
+
989
+ operations = [
990
+ CommitOperationAdd(path_in_repo="README.md", path_or_fileobj="README.md"),
991
+ CommitOperationAdd(path_in_repo=f"{Session_Name}.ckpt",path_or_fileobj=MDLPTH)
992
+
993
+ ]
994
+ create_repo(repo_id,private=True, token=hf_token)
995
+
996
+ api.create_commit(
997
+ repo_id=repo_id,
998
+ operations=operations,
999
+ commit_message=f"Upload the concept {Name_of_your_concept} embeds and token",
1000
+ token=hf_token
1001
+ )
1002
+
1003
+ api.upload_folder(
1004
+ folder_path=OUTPUT_DIR+"/feature_extractor",
1005
+ path_in_repo="feature_extractor",
1006
+ repo_id=repo_id,
1007
+ token=hf_token
1008
+ )
1009
+
1010
+ print(bar(4))
1011
+
1012
+ api.upload_folder(
1013
+ folder_path=OUTPUT_DIR+"/safety_checker",
1014
+ path_in_repo="safety_checker",
1015
+ repo_id=repo_id,
1016
+ token=hf_token
1017
+ )
1018
+
1019
+ print(bar(8))
1020
+
1021
+ api.upload_folder(
1022
+ folder_path=OUTPUT_DIR+"/scheduler",
1023
+ path_in_repo="scheduler",
1024
+ repo_id=repo_id,
1025
+ token=hf_token
1026
+ )
1027
+
1028
+ print(bar(9))
1029
+
1030
+ api.upload_folder(
1031
+ folder_path=OUTPUT_DIR+"/text_encoder",
1032
+ path_in_repo="text_encoder",
1033
+ repo_id=repo_id,
1034
+ token=hf_token
1035
+ )
1036
+
1037
+ print(bar(12))
1038
+
1039
+ api.upload_folder(
1040
+ folder_path=OUTPUT_DIR+"/tokenizer",
1041
+ path_in_repo="tokenizer",
1042
+ repo_id=repo_id,
1043
+ token=hf_token
1044
+ )
1045
+
1046
+ print(bar(13))
1047
+
1048
+ api.upload_folder(
1049
+ folder_path=OUTPUT_DIR+"/unet",
1050
+ path_in_repo="unet",
1051
+ repo_id=repo_id,
1052
+ token=hf_token
1053
+ )
1054
+
1055
+ print(bar(21))
1056
+
1057
+ api.upload_folder(
1058
+ folder_path=OUTPUT_DIR+"/vae",
1059
+ path_in_repo="vae",
1060
+ repo_id=repo_id,
1061
+ token=hf_token
1062
+ )
1063
+
1064
+ print(bar(23))
1065
+
1066
+ api.upload_file(
1067
+ path_or_fileobj=OUTPUT_DIR+"/model_index.json",
1068
+ path_in_repo="model_index.json",
1069
+ repo_id=repo_id,
1070
+ token=hf_token
1071
+ )
1072
+
1073
+ print(bar(25))
1074
+
1075
+ print("Your concept was saved successfully at https://huggingface.co/"+repo_id)
1076
+ done()
1077
+
1078
+
1079
+
1080
+ def crop_image(im, size):
1081
+
1082
+ GREEN = "#0F0"
1083
+ BLUE = "#00F"
1084
+ RED = "#F00"
1085
+
1086
+ def focal_point(im, settings):
1087
+ corner_points = image_corner_points(im, settings) if settings.corner_points_weight > 0 else []
1088
+ entropy_points = image_entropy_points(im, settings) if settings.entropy_points_weight > 0 else []
1089
+ face_points = image_face_points(im, settings) if settings.face_points_weight > 0 else []
1090
+
1091
+ pois = []
1092
+
1093
+ weight_pref_total = 0
1094
+ if len(corner_points) > 0:
1095
+ weight_pref_total += settings.corner_points_weight
1096
+ if len(entropy_points) > 0:
1097
+ weight_pref_total += settings.entropy_points_weight
1098
+ if len(face_points) > 0:
1099
+ weight_pref_total += settings.face_points_weight
1100
+
1101
+ corner_centroid = None
1102
+ if len(corner_points) > 0:
1103
+ corner_centroid = centroid(corner_points)
1104
+ corner_centroid.weight = settings.corner_points_weight / weight_pref_total
1105
+ pois.append(corner_centroid)
1106
+
1107
+ entropy_centroid = None
1108
+ if len(entropy_points) > 0:
1109
+ entropy_centroid = centroid(entropy_points)
1110
+ entropy_centroid.weight = settings.entropy_points_weight / weight_pref_total
1111
+ pois.append(entropy_centroid)
1112
+
1113
+ face_centroid = None
1114
+ if len(face_points) > 0:
1115
+ face_centroid = centroid(face_points)
1116
+ face_centroid.weight = settings.face_points_weight / weight_pref_total
1117
+ pois.append(face_centroid)
1118
+
1119
+ average_point = poi_average(pois, settings)
1120
+
1121
+ return average_point
1122
+
1123
+
1124
+ def image_face_points(im, settings):
1125
+
1126
+ np_im = np.array(im)
1127
+ gray = cv2.cvtColor(np_im, cv2.COLOR_BGR2GRAY)
1128
+
1129
+ tries = [
1130
+ [ f'{cv2.data.haarcascades}haarcascade_eye.xml', 0.01 ],
1131
+ [ f'{cv2.data.haarcascades}haarcascade_frontalface_default.xml', 0.05 ],
1132
+ [ f'{cv2.data.haarcascades}haarcascade_profileface.xml', 0.05 ],
1133
+ [ f'{cv2.data.haarcascades}haarcascade_frontalface_alt.xml', 0.05 ],
1134
+ [ f'{cv2.data.haarcascades}haarcascade_frontalface_alt2.xml', 0.05 ],
1135
+ [ f'{cv2.data.haarcascades}haarcascade_frontalface_alt_tree.xml', 0.05 ],
1136
+ [ f'{cv2.data.haarcascades}haarcascade_eye_tree_eyeglasses.xml', 0.05 ],
1137
+ [ f'{cv2.data.haarcascades}haarcascade_upperbody.xml', 0.05 ]
1138
+ ]
1139
+ for t in tries:
1140
+ classifier = cv2.CascadeClassifier(t[0])
1141
+ minsize = int(min(im.width, im.height) * t[1]) # at least N percent of the smallest side
1142
+ try:
1143
+ faces = classifier.detectMultiScale(gray, scaleFactor=1.1,
1144
+ minNeighbors=7, minSize=(minsize, minsize), flags=cv2.CASCADE_SCALE_IMAGE)
1145
+ except:
1146
+ continue
1147
+
1148
+ if len(faces) > 0:
1149
+ rects = [[f[0], f[1], f[0] + f[2], f[1] + f[3]] for f in faces]
1150
+ return [PointOfInterest((r[0] +r[2]) // 2, (r[1] + r[3]) // 2, size=abs(r[0]-r[2]), weight=1/len(rects)) for r in rects]
1151
+ return []
1152
+
1153
+
1154
+ def image_corner_points(im, settings):
1155
+ grayscale = im.convert("L")
1156
+
1157
+ # naive attempt at preventing focal points from collecting at watermarks near the bottom
1158
+ gd = ImageDraw.Draw(grayscale)
1159
+ gd.rectangle([0, im.height*.9, im.width, im.height], fill="#999")
1160
+
1161
+ np_im = np.array(grayscale)
1162
+
1163
+ points = cv2.goodFeaturesToTrack(
1164
+ np_im,
1165
+ maxCorners=100,
1166
+ qualityLevel=0.04,
1167
+ minDistance=min(grayscale.width, grayscale.height)*0.06,
1168
+ useHarrisDetector=False,
1169
+ )
1170
+
1171
+ if points is None:
1172
+ return []
1173
+
1174
+ focal_points = []
1175
+ for point in points:
1176
+ x, y = point.ravel()
1177
+ focal_points.append(PointOfInterest(x, y, size=4, weight=1/len(points)))
1178
+
1179
+ return focal_points
1180
+
1181
+
1182
+ def image_entropy_points(im, settings):
1183
+ landscape = im.height < im.width
1184
+ portrait = im.height > im.width
1185
+ if landscape:
1186
+ move_idx = [0, 2]
1187
+ move_max = im.size[0]
1188
+ elif portrait:
1189
+ move_idx = [1, 3]
1190
+ move_max = im.size[1]
1191
+ else:
1192
+ return []
1193
+
1194
+ e_max = 0
1195
+ crop_current = [0, 0, settings.crop_width, settings.crop_height]
1196
+ crop_best = crop_current
1197
+ while crop_current[move_idx[1]] < move_max:
1198
+ crop = im.crop(tuple(crop_current))
1199
+ e = image_entropy(crop)
1200
+
1201
+ if (e > e_max):
1202
+ e_max = e
1203
+ crop_best = list(crop_current)
1204
+
1205
+ crop_current[move_idx[0]] += 4
1206
+ crop_current[move_idx[1]] += 4
1207
+
1208
+ x_mid = int(crop_best[0] + settings.crop_width/2)
1209
+ y_mid = int(crop_best[1] + settings.crop_height/2)
1210
+
1211
+ return [PointOfInterest(x_mid, y_mid, size=25, weight=1.0)]
1212
+
1213
+
1214
+ def image_entropy(im):
1215
+ # greyscale image entropy
1216
+ # band = np.asarray(im.convert("L"))
1217
+ band = np.asarray(im.convert("1"), dtype=np.uint8)
1218
+ hist, _ = np.histogram(band, bins=range(0, 256))
1219
+ hist = hist[hist > 0]
1220
+ return -np.log2(hist / hist.sum()).sum()
1221
+
1222
+ def centroid(pois):
1223
+ x = [poi.x for poi in pois]
1224
+ y = [poi.y for poi in pois]
1225
+ return PointOfInterest(sum(x)/len(pois), sum(y)/len(pois))
1226
+
1227
+
1228
+ def poi_average(pois, settings):
1229
+ weight = 0.0
1230
+ x = 0.0
1231
+ y = 0.0
1232
+ for poi in pois:
1233
+ weight += poi.weight
1234
+ x += poi.x * poi.weight
1235
+ y += poi.y * poi.weight
1236
+ avg_x = round(weight and x / weight)
1237
+ avg_y = round(weight and y / weight)
1238
+
1239
+ return PointOfInterest(avg_x, avg_y)
1240
+
1241
+
1242
+ def is_landscape(w, h):
1243
+ return w > h
1244
+
1245
+
1246
+ def is_portrait(w, h):
1247
+ return h > w
1248
+
1249
+
1250
+ def is_square(w, h):
1251
+ return w == h
1252
+
1253
+
1254
+ class PointOfInterest:
1255
+ def __init__(self, x, y, weight=1.0, size=10):
1256
+ self.x = x
1257
+ self.y = y
1258
+ self.weight = weight
1259
+ self.size = size
1260
+
1261
+ def bounding(self, size):
1262
+ return [
1263
+ self.x - size//2,
1264
+ self.y - size//2,
1265
+ self.x + size//2,
1266
+ self.y + size//2
1267
+ ]
1268
+
1269
+ class Settings:
1270
+ def __init__(self, crop_width=512, crop_height=512, corner_points_weight=0.5, entropy_points_weight=0.5, face_points_weight=0.5):
1271
+ self.crop_width = crop_width
1272
+ self.crop_height = crop_height
1273
+ self.corner_points_weight = corner_points_weight
1274
+ self.entropy_points_weight = entropy_points_weight
1275
+ self.face_points_weight = face_points_weight
1276
+
1277
+ settings = Settings(
1278
+ crop_width = size,
1279
+ crop_height = size,
1280
+ face_points_weight = 0.9,
1281
+ entropy_points_weight = 0.15,
1282
+ corner_points_weight = 0.5,
1283
+ )
1284
+
1285
+ scale_by = 1
1286
+ if is_landscape(im.width, im.height):
1287
+ scale_by = settings.crop_height / im.height
1288
+ elif is_portrait(im.width, im.height):
1289
+ scale_by = settings.crop_width / im.width
1290
+ elif is_square(im.width, im.height):
1291
+ if is_square(settings.crop_width, settings.crop_height):
1292
+ scale_by = settings.crop_width / im.width
1293
+ elif is_landscape(settings.crop_width, settings.crop_height):
1294
+ scale_by = settings.crop_width / im.width
1295
+ elif is_portrait(settings.crop_width, settings.crop_height):
1296
+ scale_by = settings.crop_height / im.height
1297
+
1298
+ im = im.resize((int(im.width * scale_by), int(im.height * scale_by)))
1299
+ im_debug = im.copy()
1300
+
1301
+ focus = focal_point(im_debug, settings)
1302
+
1303
+ # take the focal point and turn it into crop coordinates that try to center over the focal
1304
+ # point but then get adjusted back into the frame
1305
+ y_half = int(settings.crop_height / 2)
1306
+ x_half = int(settings.crop_width / 2)
1307
+
1308
+ x1 = focus.x - x_half
1309
+ if x1 < 0:
1310
+ x1 = 0
1311
+ elif x1 + settings.crop_width > im.width:
1312
+ x1 = im.width - settings.crop_width
1313
+
1314
+ y1 = focus.y - y_half
1315
+ if y1 < 0:
1316
+ y1 = 0
1317
+ elif y1 + settings.crop_height > im.height:
1318
+ y1 = im.height - settings.crop_height
1319
+
1320
+ x2 = x1 + settings.crop_width
1321
+ y2 = y1 + settings.crop_height
1322
+
1323
+ crop = [x1, y1, x2, y2]
1324
+
1325
+ results = []
1326
+
1327
+ results.append(im.crop(tuple(crop)))
1328
+
1329
+ return results