Hanna Skairipa commited on
Commit
ed4851b
1 Parent(s): 0ca8cd3
Files changed (1) hide show
  1. app.py +490 -142
app.py CHANGED
@@ -1,146 +1,494 @@
1
- import gradio as gr
2
- import numpy as np
3
- import random
4
- from diffusers import DiffusionPipeline
5
- import torch
6
-
7
- device = "cuda" if torch.cuda.is_available() else "cpu"
8
-
9
- if torch.cuda.is_available():
10
- torch.cuda.max_memory_allocated(device=device)
11
- pipe = DiffusionPipeline.from_pretrained("stabilityai/sdxl-turbo", torch_dtype=torch.float16, variant="fp16", use_safetensors=True)
12
- pipe.enable_xformers_memory_efficient_attention()
13
- pipe = pipe.to(device)
14
- else:
15
- pipe = DiffusionPipeline.from_pretrained("stabilityai/sdxl-turbo", use_safetensors=True)
16
- pipe = pipe.to(device)
17
-
18
- MAX_SEED = np.iinfo(np.int32).max
19
- MAX_IMAGE_SIZE = 1024
20
-
21
- def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps):
22
-
23
- if randomize_seed:
24
- seed = random.randint(0, MAX_SEED)
25
-
26
- generator = torch.Generator().manual_seed(seed)
27
-
28
- image = pipe(
29
- prompt = prompt,
30
- negative_prompt = negative_prompt,
31
- guidance_scale = guidance_scale,
32
- num_inference_steps = num_inference_steps,
33
- width = width,
34
- height = height,
35
- generator = generator
36
- ).images[0]
37
-
38
- return image
39
-
40
- examples = [
41
- "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k",
42
- "An astronaut riding a green horse",
43
- "A delicious ceviche cheesecake slice",
44
- ]
45
 
46
- css="""
47
- #col-container {
48
- margin: 0 auto;
49
- max-width: 520px;
50
- }
 
 
 
 
 
 
51
  """
52
 
53
- if torch.cuda.is_available():
54
- power_device = "GPU"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
55
  else:
56
- power_device = "CPU"
57
-
58
- with gr.Blocks(css=css) as demo:
59
-
60
- with gr.Column(elem_id="col-container"):
61
- gr.Markdown(f"""
62
- # Text-to-Image Gradio Template
63
- Currently running on {power_device}.
64
- """)
65
-
66
- with gr.Row():
67
-
68
- prompt = gr.Text(
69
- label="Prompt",
70
- show_label=False,
71
- max_lines=1,
72
- placeholder="Enter your prompt",
73
- container=False,
74
- )
75
-
76
- run_button = gr.Button("Run", scale=0)
77
-
78
- result = gr.Image(label="Result", show_label=False)
79
-
80
- with gr.Accordion("Advanced Settings", open=False):
81
-
82
- negative_prompt = gr.Text(
83
- label="Negative prompt",
84
- max_lines=1,
85
- placeholder="Enter a negative prompt",
86
- visible=False,
87
- )
88
-
89
- seed = gr.Slider(
90
- label="Seed",
91
- minimum=0,
92
- maximum=MAX_SEED,
93
- step=1,
94
- value=0,
95
- )
96
-
97
- randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
98
-
99
- with gr.Row():
100
-
101
- width = gr.Slider(
102
- label="Width",
103
- minimum=256,
104
- maximum=MAX_IMAGE_SIZE,
105
- step=32,
106
- value=512,
107
- )
108
-
109
- height = gr.Slider(
110
- label="Height",
111
- minimum=256,
112
- maximum=MAX_IMAGE_SIZE,
113
- step=32,
114
- value=512,
115
- )
116
-
117
- with gr.Row():
118
-
119
- guidance_scale = gr.Slider(
120
- label="Guidance scale",
121
- minimum=0.0,
122
- maximum=10.0,
123
- step=0.1,
124
- value=0.0,
125
- )
126
-
127
- num_inference_steps = gr.Slider(
128
- label="Number of inference steps",
129
- minimum=1,
130
- maximum=12,
131
- step=1,
132
- value=2,
133
- )
134
-
135
- gr.Examples(
136
- examples = examples,
137
- inputs = [prompt]
138
- )
139
-
140
- run_button.click(
141
- fn = infer,
142
- inputs = [prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps],
143
- outputs = [result]
144
- )
145
-
146
- demo.queue().launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """stable_diffusion_1_5_webui.ipynb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
 
4
+ Automatically generated by Colab.
5
+
6
+ Original file is located at
7
+ https://colab.research.google.com/github/PinkQween/Discord-AI-bot-advanced/blob/main/stable_diffusion_1_5_webui.ipynb
8
+
9
+ # AUTOMATIC1111 Stable Diffusion WebUI 1.5 + ChilloutMix Checkpoint
10
+
11
+ For generating AI
12
+
13
+ > [@nang](https://github.com/nathan-149)
14
+ > References: [@wibus-wee](https://github.com/wibus-wee) [camenduru/stable-diffusion-webui-colab](https://github.com/camenduru/stable-diffusion-webui-colab)
15
  """
16
 
17
+ # Commented out IPython magic to ensure Python compatibility.
18
+ from IPython.display import display
19
+ import ipywidgets as widgets
20
+ import requests
21
+
22
+ endpoint = 'https://civitai.com/api/v1/models'
23
+ checkpointName = 'Chilloutmix'
24
+ checkpointID = '' #@param {type:"string"}
25
+ checkpointURL = '' #@param {type:"string"}
26
+
27
+ #@markdown ---
28
+
29
+ #@markdown ## **LoRAs**
30
+
31
+ #@markdown Korean Doll Likeness `v1.0`
32
+ koreanDollLikenessv10 = True #@param {type:"boolean"}
33
+ #@markdown Korean Doll Likeness `v1.5`
34
+ koreanDollLikenessv15 = True #@param {type:"boolean"}
35
+ #@markdown Korean Doll Likeness `v2.0`
36
+ koreanDollLikenessv20 = True #@param {type:"boolean"}
37
+ #@markdown St. Louis (Luxurious Wheels) (Azur Lane)
38
+ stLouis = True #@param {type:"boolean"}
39
+ #@markdown Girls' Frontline-OTs-14"lightning"
40
+ girlsFrontlineOTs14Lightning = True #@param {type:"boolean"}
41
+ #@markdown Yae Miko | Realistic Genshin (Mixed)
42
+ yaeMikoRealisticGenshinMixed = True #@param {type:"boolean"}
43
+ #@markdown Fashion Girl
44
+ fashionGirl = True #@param {type:"boolean"}
45
+ #@markdown Cute Girl mix4
46
+ cuteGirlMix4 = True #@param {type:"boolean"}
47
+ #@markdown More LoRA? (Enter the URL of the lora file, use comma to separate multiple URLs)
48
+ moreLoRA = "" #@param {type:"string"}
49
+
50
+ #@markdown ---
51
+
52
+ #@markdown ## **Textual Inversion**
53
+
54
+ #@markdown Ulzzang-6500 (Korean doll aesthetic)
55
+ ulzzang6500 = True #@param {type:"boolean"}
56
+ #@markdown Pure Eros Face
57
+ pureErosFace = True #@param {type:"boolean"}
58
+
59
+
60
+ defaultCheckpoint = {
61
+ 'Chilloutmix': '6424',
62
+ 'Sunshinemix': '9291',
63
+ 'grapefruit_hentai': '2583'
64
+ }
65
+
66
+ loraDownloadIDs = {
67
+ 'koreanDollLikenessv10': 'https://huggingface.co/Kanbara/doll-likeness-series/resolve/main/koreanDollLikeness_v10.safetensors',
68
+ 'koreanDollLikenessv15': 'https://huggingface.co/Kanbara/doll-likeness-series/resolve/main/koreanDollLikeness_v15.safetensors',
69
+ 'koreanDollLikenessv20': 'https://huggingface.co/Kanbara/doll-likeness-series/resolve/main/koreanDollLikeness_v20.safetensors',
70
+ 'stLouis': 6669,
71
+ 'girlsFrontlineOTs14Lightning': 6525,
72
+ 'yaeMikoRealisticGenshinMixed': 8484,
73
+ # 'chilloutMixss': 10850,
74
+ 'fashionGirl': {
75
+ "id": 8217,
76
+ "version": "v4.5"
77
+ },
78
+ 'cuteGirlMix4': 14171,
79
+ }
80
+
81
+ textualInversionDownloadIDs = {
82
+ 'ulzzang6500': 8109,
83
+ 'pureErosFace': 4514,
84
+ }
85
+
86
+ downloadIds = []
87
+
88
+ if checkpointID != '':
89
+ downloadIds = checkpointID.split(',')
90
+ if checkpointName != 'others':
91
+ downloadIds.append(defaultCheckpoint[checkpointName])
92
+
93
+ globalDropdowns = []
94
+ globalVerions = []
95
+ globalNames = []
96
+ globalTexts = []
97
+ checkpoints = []
98
+ downloadLinks = []
99
+ customLoras = []
100
+ loraDownloadLinks = []
101
+
102
+ def text_on_submit(change):
103
+ checkpoints[checkpoints.index(change['old'])] = change['new']
104
+
105
+ if checkpointURL != '':
106
+ _downloadLinks = checkpointURL.split(',')
107
+ for _downloadLink in _downloadLinks:
108
+ checkpoints.append(_downloadLink.split('/')[-1])
109
+ downloadLinks.append(_downloadLink)
110
+ text = widgets.Text(value=_downloadLink.split('/')[-1], description=_downloadLink.split('/')[-1], disabled=False)
111
+ text.observe(text_on_submit, names='value')
112
+ form = widgets.VBox([text])
113
+ display(form)
114
+ if moreLoRA != '':
115
+ _downloadLinks = moreLoRA.split(',')
116
+ for _downloadLink in _downloadLinks:
117
+ customLoras.append(_downloadLink)
118
+ loraDownloadLinks.append(_downloadLink)
119
+ text = widgets.Text(value=_downloadLink.split('/')[-1], description=_downloadLink.split('/')[-1], disabled=False)
120
+ text.observe(text_on_submit, names='value')
121
+ form = widgets.VBox([text])
122
+ display(form)
123
+
124
+
125
+ def showVerionOptions(downloadId):
126
+ res = requests.get(endpoint + '/' + downloadId).json()
127
+ globalNames.append(res['name'])
128
+ versions = res['modelVersions']
129
+ globalVerions.append(versions)
130
+ options = []
131
+ for version in versions:
132
+ options.append(version['files'][0]['name'])
133
+ dropdown = widgets.Dropdown(options=options, description=res['name'])
134
+ globalDropdowns.append(dropdown)
135
+ form = widgets.VBox([dropdown])
136
+ display(form)
137
+
138
+ for downloadId in downloadIds:
139
+ showVerionOptions(downloadId)
140
+
141
+ def on_button_clicked(b):
142
+ downloadLink = None
143
+ for dropdown in globalDropdowns:
144
+ checkpoint = dropdown.value
145
+ versions = globalVerions[globalDropdowns.index(dropdown)]
146
+ for version in versions:
147
+ if version['files'][0]['name'] == checkpoint:
148
+ downloadLink = version['files'][0]['downloadUrl']
149
+ break
150
+ if downloadLink is None:
151
+ print('Error: downloadLink not assigned')
152
+ return
153
+ checkpoints.append(checkpoint)
154
+ downloadLinks.append(downloadLink)
155
+
156
+ print("已选择模型: " + str(checkpoints) + " <===> " + str(downloadLinks))
157
+ print("自定义 LoRA (仅显示填入 moreLoRA 中的 LoRA): " + str(customLoras) + " <===> " + str(loraDownloadLinks))
158
+ # %store checkpoints
159
+ # %store downloadLinks
160
+
161
+
162
+ button = widgets.Button(description='Use it!')
163
+ button.on_click(on_button_clicked)
164
+ display(button)
165
+
166
+ # Commented out IPython magic to ensure Python compatibility.
167
+ #@title 2. Check GPU & Dev Environment
168
+
169
+ import os, subprocess
170
+ paperspace_m4000 = False
171
+ #@markdown Paperspace platform?
172
+ isPaperspace = False #@param {type:"boolean"}
173
+ appPrefix = 'stable' + '-' + 'diffusion' + '-' + 'webui' # app prefix
174
+ sdModelPrefix = 'Stable' + '-' + 'diffusion'
175
+ ecosystemUIPrefix = "sd-webui"
176
+
177
+ try:
178
+ subprocess.run(['nvidia-smi', '--query-gpu=name', '--format=csv,noheader'], stdout=subprocess.PIPE)
179
+ if 'M4000' in subprocess.run(['nvidia-smi', '--query-gpu=name', '--format=csv,noheader'], stdout=subprocess.PIPE).stdout.decode('utf-8'):
180
+ print("WARNING: You're using Quadro M4000 GPU,xformers won't work。")
181
+ paperspace_m4000 = True
182
+ isPaperspace = True
183
+ else:
184
+ print("Your GPU is suitable - " + subprocess.run(['nvidia-smi', '--query-gpu=name', '--format=csv,noheader'], stdout=subprocess.PIPE).stdout.decode('utf-8') + "。")
185
+ print("Platform: Paperspace" if isPaperspace else "Platform: Colab")
186
+ except:
187
+ print("No GPU appears to be available. Please check your runtime type")
188
+ exit()
189
+
190
+ rootDir = isPaperspace and '/tmp' or '/content'
191
+ diffusionWebUIInstalled = os.path.exists(rootDir + '/' + appPrefix)
192
+ # %store rootDir
193
+ # %store paperspace_m4000
194
+ # %store isPaperspace
195
+ # %store diffusionWebUIInstalled
196
+
197
+ # Commented out IPython magic to ensure Python compatibility.
198
+ import requests
199
+ #@title 3. Install dependencies and extensions
200
+
201
+ #@markdown ## **Extensions**
202
+
203
+ #@markdown xformer
204
+ xformersInstall = True #@param {type:"boolean"}
205
+ #@markdown ControlNet
206
+ controlNetExtension = False #@param {type:"boolean"}
207
+ #@markdown OpenPose Editor
208
+ openPoseExtension = False #@param {type:"boolean"}
209
+ #@markdown Civitai Browser
210
+ civitaiBrowserExtension = False #@param {type:"boolean"}
211
+ #@markdown HuggingFace
212
+ huggingFaceExtension = False #@param {type:"boolean"}
213
+ #@markdown Images Browser
214
+ imagesBrowserExtension = False #@param {type:"boolean"}
215
+ #@markdown Additional Networks
216
+ additionalNetworksExtension = True #@param {type:"boolean"}
217
+ #@markdown Deforum
218
+ deforumExtension = False #@param {type:"boolean"}
219
+ #@markdown Kohya sd-scripts
220
+ kohyaExtension = False #@param {type:"boolean"}
221
+ #@markdown DreamBooth
222
+ dreamBoothExtension = False #@param {type:"boolean"}
223
+
224
+ #@markdown ---
225
+
226
+ #@markdown ## **Others**
227
+
228
+ #@markdown Styles CSV File URL
229
+ stylesCSVFileURL = "" #@param {type:"string"}
230
+ #@markdown Use [Cloudflared](https://github.com/cloudflare/cloudflared) instead of Gradio share
231
+ useCloudflared = False #@param {type:"boolean"}
232
+
233
+ #@markdown ---
234
+
235
+ #@markdown ## **Startup Options**
236
+
237
+ #@markdown API Support
238
+ apiSupport = True #@param {type:"boolean"}
239
+ #@markdown Cors Allow Origins
240
+ corsAllowOrigins = "*" #@param {type:"string"}
241
+
242
+ def getLatestModelDownloadURL(id):
243
+ try:
244
+ if type(id) == int:
245
+ res = requests.get(endpoint + '/' + str(id)).json()
246
+ latest = res['modelVersions'][0]
247
+ downloadLink = latest['files'][0]['downloadUrl']
248
+ name = latest['files'][0]['name']
249
+ return {
250
+ 'url': downloadLink,
251
+ 'name': name
252
+ }
253
+ else:
254
+ return {
255
+ 'url': id,
256
+ 'name': id.split('/')[-1]
257
+ }
258
+ except:
259
+ print("Lora model " + str(id) + " not found. Skip.")
260
+ return None
261
+
262
+ def getSpecificModelDownloadURL(id, version):
263
+ try:
264
+ if type(id) == int:
265
+ res = requests.get(endpoint + '/' + str(id)).json()
266
+ for modelVersion in res['modelVersions']:
267
+ if modelVersion['name'] == version:
268
+ # if modelVersion["baseModel"] != "SD 1.5":
269
+ # print("Lora model " + str(id) + " is not SD 1.5, may not work. Skip.")
270
+ # return None
271
+ downloadLink = modelVersion['files'][0]['downloadUrl']
272
+ name = modelVersion['files'][0]['name']
273
+ return {
274
+ 'url': downloadLink,
275
+ 'name': name
276
+ }
277
+ else:
278
+ return {
279
+ 'url': id,
280
+ 'name': id.split('/')[-1]
281
+ }
282
+ except:
283
+ print("Lora model " + str(id) + " version " + version + " not found. Skip.")
284
+ return None
285
+
286
+ def getLoraDownloadURLs():
287
+ downloadURLs = []
288
+ for key in loraDownloadIDs:
289
+ if not eval(key): # skip if not selected
290
+ continue
291
+ if type(loraDownloadIDs[key]) is int:
292
+ downloadURLs.append(getLatestModelDownloadURL(loraDownloadIDs[key]))
293
+ elif type(loraDownloadIDs[key]) is dict: # {'id': 123, 'version': 'v1.0'}
294
+ downloadURLs.append(getSpecificModelDownloadURL(loraDownloadIDs[key]['id'], loraDownloadIDs[key]['version']))
295
+ elif type(loraDownloadIDs[key]) is str: # url
296
+ downloadURLs.append({ 'url': loraDownloadIDs[key], 'name': loraDownloadIDs[key].split('/')[-1] })
297
+ downloadURLs = [x for x in downloadURLs if x is not None] # remove None
298
+ for custom in customLoras:
299
+ downloadURLs.append({ 'url': loraDownloadLinks[customLoras.index(custom)], 'name': custom })
300
+ return downloadURLs
301
+
302
+ def getTextualInversionDownloadURLs():
303
+ downloadURLs = []
304
+ for key in textualInversionDownloadIDs:
305
+ if not eval(key): # skip if not selected
306
+ continue
307
+ if type(textualInversionDownloadIDs[key]) is int:
308
+ downloadURLs.append(getLatestModelDownloadURL(textualInversionDownloadIDs[key]))
309
+ elif type(textualInversionDownloadIDs[key]) is dict: # {'id': 123, 'version': 'v1.0'}
310
+ downloadURLs.append(getSpecificModelDownloadURL(textualInversionDownloadIDs[key]['id'], textualInversionDownloadIDs[key]['version']))
311
+ elif type(textualInversionDownloadIDs[key]) is str: # url
312
+ downloadURLs.append({ 'url': textualInversionDownloadIDs[key], 'name': textualInversionDownloadIDs[key].split('/')[-1] })
313
+ downloadURLs = [x for x in downloadURLs if x is not None]
314
+ return downloadURLs
315
+
316
+
317
+ loraDownloadURLs = getLoraDownloadURLs()
318
+ textualInversionDownloadURLs = getTextualInversionDownloadURLs()
319
+
320
+
321
+ # %store -r paperspace_m4000
322
+ # %store -r isPaperspace
323
+ # %store -r rootDir
324
+ # %store -r checkpoints
325
+ # %store -r downloadLinks
326
+ # %store -r diffusionWebUIInstalled
327
+
328
+ import subprocess
329
+
330
+ !apt-get -y install -qq aria2
331
+ ariaInstalled = False
332
+
333
+ try:
334
+ subprocess.run(['aria2c', '--version'], stdout=subprocess.PIPE)
335
+ ariaInstalled = True
336
+ except:
337
+ pass
338
+
339
+ !pip install -q torch==2.0.0+cu118 torchvision==0.15.1+cu118 torchaudio==2.0.1+cu118 torchtext==0.15.1 torchdata==0.6.0 --extra-index-url https://download.pytorch.org/whl/cu118 -U
340
+ !pip install -q xformers==0.0.19 triton==2.0.0 -U
341
+
342
+ !git clone https://github.com/AUTOMATIC1111/{appPrefix} {rootDir}/{appPrefix}
343
+ !git clone https://huggingface.co/embed/negative {rootDir}/{appPrefix}/embeddings/negative
344
+ !git clone https://huggingface.co/embed/lora {rootDir}/{appPrefix}/models/Lora/positive
345
+ !aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/embed/upscale/resolve/main/4x-UltraSharp.pth -d {rootDir}/{appPrefix}/models/ESRGAN -o 4x-UltraSharp.pth
346
+ !git clone https://github.com/Bing-su/{ecosystemUIPrefix}-tunnels {rootDir}/{appPrefix}/extensions/{ecosystemUIPrefix}-tunnels
347
+ !git clone https://github.com/thomasasfk/{ecosystemUIPrefix}-aspect-ratio-helper {rootDir}/{appPrefix}/extensions/{ecosystemUIPrefix}-aspect-ratio-helper
348
+ !wget https://raw.githubusercontent.com/camenduru/{appPrefix}-scripts/main/run_n_times.py -O {rootDir}/{appPrefix}/scripts/run_n_times.py
349
+ if deforumExtension:
350
+ !git clone https://github.com/deforum-art/deforum-for-automatic1111-webui {rootDir}/{appPrefix}/extensions/deforum-for-automatic1111-webui
351
+ if imagesBrowserExtension:
352
+ !git clone https://github.com/AlUlkesh/{appPrefix}-images-browser {rootDir}/{appPrefix}/extensions/{appPrefix}-images-browser
353
+ if huggingFaceExtension:
354
+ !git clone https://github.com/camenduru/{appPrefix}-huggingface {rootDir}/{appPrefix}/extensions/{appPrefix}-huggingface
355
+ if civitaiBrowserExtension:
356
+ !git clone https://github.com/Vetchems/sd-civitai-browser {rootDir}/{appPrefix}/extensions/sd-civitai-browser
357
+ if openPoseExtension:
358
+ !git clone https://github.com/fkunn1326/openpose-editor {rootDir}/{appPrefix}/extensions/openpose-editor
359
+ if controlNetExtension:
360
+ !git clone https://github.com/Mikubill/{ecosystemUIPrefix}-controlnet {rootDir}/{appPrefix}/extensions/{ecosystemUIPrefix}-controlnet
361
+ if additionalNetworksExtension:
362
+ !git clone https://github.com/kohya-ss/{ecosystemUIPrefix}-additional-networks {rootDir}/{appPrefix}/extensions/{ecosystemUIPrefix}-additional-networks
363
+ if kohyaExtension:
364
+ !git clone https://github.com/ddpn08/kohya-sd-scripts-webui.git {rootDir}/{appPrefix}/extensions/kohya-sd-scripts-webui
365
+ if dreamBoothExtension:
366
+ !git clone https://github.com/d8ahazard/sd_dreambooth_extension {rootDir}/{appPrefix}/extensions/sd_dreambooth_extension
367
+
368
+ if isPaperspace:
369
+ # %cd /{appPrefix}
370
  else:
371
+ # %cd {rootDir}/{appPrefix}
372
+
373
+
374
+ webuiControlNetModels = [
375
+ "https://huggingface.co/webui/ControlNet-modules-safetensors/resolve/main/control_canny-fp16.safetensors",
376
+ "https://huggingface.co/webui/ControlNet-modules-safetensors/resolve/main/control_depth-fp16.safetensors",
377
+ "https://huggingface.co/webui/ControlNet-modules-safetensors/resolve/main/control_hed-fp16.safetensors",
378
+ "https://huggingface.co/webui/ControlNet-modules-safetensors/resolve/main/control_mlsd-fp16.safetensors",
379
+ "https://huggingface.co/webui/ControlNet-modules-safetensors/resolve/main/control_normal-fp16.safetensors",
380
+ "https://huggingface.co/webui/ControlNet-modules-safetensors/resolve/main/control_openpose-fp16.safetensors",
381
+ "https://huggingface.co/webui/ControlNet-modules-safetensors/resolve/main/control_scribble-fp16.safetensors",
382
+ "https://huggingface.co/webui/ControlNet-modules-safetensors/resolve/main/control_seg-fp16.safetensors",
383
+ ]
384
+ annotatorLink = [
385
+ "https://huggingface.co/ckpt/ControlNet/resolve/main/hand_pose_model.pth",
386
+ "https://huggingface.co/ckpt/ControlNet/resolve/main/body_pose_model.pth",
387
+ "https://huggingface.co/ckpt/ControlNet/resolve/main/dpt_hybrid-midas-501f0c75.pt",
388
+ "https://huggingface.co/ckpt/ControlNet/resolve/main/mlsd_large_512_fp32.pth",
389
+ "https://huggingface.co/ckpt/ControlNet/resolve/main/mlsd_tiny_512_fp32.pth",
390
+ "https://huggingface.co/ckpt/ControlNet/resolve/main/network-bsds500.pth",
391
+ "https://huggingface.co/ckpt/ControlNet/resolve/main/upernet_global_small.pth",
392
+ ]
393
+
394
+ def ariaDownload(downloadLink, checkpoint, path):
395
+ if (type(downloadLink) == list and type(checkpoint) == list):
396
+ for i in downloadLink:
397
+ !aria2c --console-log-level=error -c -x 16 -s 16 -k 1M {i} -d {path} -o {checkpoint[downloadLink.index(i)]}
398
+ else:
399
+ !aria2c --console-log-level=error -c -x 16 -s 16 -k 1M {downloadLink} -d {path} -o {checkpoint}
400
+ def wgetDownload(downloadLink, checkpoint, path):
401
+ if (type(downloadLink) == list and type(checkpoint) == list):
402
+ for i in downloadLink:
403
+ !wget -c {i} -P {path} -O {checkpoint[downloadLink.index(i)]}
404
+ else:
405
+ !wget -c {downloadLink} -P {path} -O {checkpoint}
406
+ def autoDetectDownload(downloadLink, checkpoint, path):
407
+ if ariaInstalled:
408
+ ariaDownload(downloadLink, checkpoint, path)
409
+ else:
410
+ wgetDownload(downloadLink, checkpoint, path)
411
+
412
+ autoDetectDownload(downloadLinks, checkpoints, rootDir + "/" + appPrefix + "/models/" + sdModelPrefix)
413
+ if controlNetExtension:
414
+ for model in webuiControlNetModels:
415
+ autoDetectDownload(model, model.split('/')[-1], rootDir + "/" + appPrefix + "/extensions/" + ecosystemUIPrefix + "-controlnet/models")
416
+ for model in annotatorLink:
417
+ autoDetectDownload(model, model.split('/')[-1], rootDir + "/" + appPrefix + "/extensions/" + ecosystemUIPrefix + "-controlnet/annotator")
418
+ for model in loraDownloadURLs:
419
+ autoDetectDownload(model["url"], model["name"], rootDir + "/" + appPrefix + "/models/Lora")
420
+ for model in textualInversionDownloadURLs:
421
+ autoDetectDownload(model["url"], model["name"], rootDir + "/" + appPrefix + "/embeddings")
422
+ if stylesCSVFileURL:
423
+ # autoDetectDownload(stylesCSVFileURL, "styles.csv", rootDir + "/{appPrefix}")
424
+ autoDetectDownload(stylesCSVFileURL, "styles.csv", rootDir + "/" + appPrefix)
425
+
426
+ if additionalNetworksExtension:
427
+ !ln -s {rootDir}/{appPrefix}/models/Lora {rootDir}/{appPrefix}/extensions/{ecosystemUIPrefix}-additional-networks/models/lora
428
+
429
+
430
+ diffusionWebUIInstalled = True
431
+ # %store diffusionWebUIInstalled
432
+
433
+ # %cd {rootDir}/{appPrefix}
434
+ !sed -i -e 's/\"sd_model_checkpoint\"\,/\"sd_model_checkpoint\,sd_vae\,CLIP_stop_at_last_layers\"\,/g' {rootDir}/{appPrefix}/modules/shared.py
435
+
436
+ if dreamBoothExtension:
437
+ !export REQS_FILE="./extensions/sd_dreambooth_extension/requirements.txt"
438
+
439
+ backgroundProcess = ''
440
+
441
+ if useCloudflared:
442
+ backgroundProcess = 'cloudflared tunnel --url 127.0.0.1:7860'
443
+ # !cloudflared tunnel --url 127.0.0.1:7860
444
+ if backgroundProcess:
445
+ backgroundProcess = backgroundProcess + ' &'
446
+
447
+ if useCloudflared:
448
+ !echo "Installing cloudflared"
449
+ !curl -Lo /usr/bin/cloudflared https://github.com/cloudflare/cloudflared/releases/latest/download/cloudflared-linux-amd64 && chmod +x /usr/bin/cloudflared
450
+
451
+ #@title Download Chilloutmix Checkpoint
452
+
453
+ checkpoint = 'chilloutmix.safetensors' #@param ["chilloutmix.safetensors"]
454
+
455
+ downloadLink = 'https://huggingface.co/naonovn/chilloutmix_NiPrunedFp32Fix/resolve/main/chilloutmix_NiPrunedFp32Fix.safetensors' #@param
456
+
457
+
458
+ !wget -c {downloadLink} -O /content/stable-diffusion-webui/models/Stable-diffusion/{checkpoint}
459
+ !aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/ckpt/sd14/resolve/main/sd-v1-4.ckpt -d /content/stable-diffusion-webui/models/Stable-diffusion -o sd-v1-4.ckpt
460
+
461
+ #@title Download More Specific Loras
462
+
463
+ loraLinks = dict((
464
+ ('koreanDollLikeness_v15.safetensors', 'https://civitai.com/api/download/models/31284'),
465
+ ('koreanDollLikeness_v20.safetensors', 'https://civitai.com/api/download/models/31284'),
466
+ ('xswltry1.safetensors', 'https://civitai.com/api/download/models/29131'),
467
+ ('liyuuLora_liyuuV1.safetensors', 'https://civitai.com/api/download/models/11885'),
468
+ ('aiBeautyIthlinni_ithlinniV1.safetensors', 'https://civitai.com/api/download/models/19671'),
469
+ ('Cute_girl_mix4.safetensors', 'https://civitai.com/api/download/models/16677'),
470
+ ('breastinclassBetter_v141.safetensors', 'https://civitai.com/api/download/models/23250'),
471
+ ('chilloutmixss_xss10.safetensors', 'https://huggingface.co/HankChang/chilloutmixss_xss10/resolve/main/chilloutmixss_xss10.safetensors'),
472
+ ('legspread10.safetensors', 'https://civitai.com/api/download/models/29760'),
473
+ ))
474
+
475
+
476
+ for lora, link in loraLinks.items():
477
+ print('\nKey: %s' % lora)
478
+ print('Value: %s' % link)
479
+ !wget -c {link} -O /content/stable-diffusion-webui/models/Lora/{lora}
480
+
481
+ # Commented out IPython magic to ensure Python compatibility.
482
+ # Restore variables if needed
483
+ # %store -r paperspace_m4000
484
+ # %store -r rootDir
485
+ # %store -r diffusionWebUIInstalled
486
+
487
+ # Navigate to application directory
488
+ # %cd {rootDir}/{appPrefix}
489
+
490
+ # Modify shared.py if necessary
491
+ !sed -i -e 's/\"sd_model_checkpoint\"\,/\"sd_model_checkpoint\,sd_vae\,CLIP_stop_at_last_layers\"\,/g' {rootDir}/{appPrefix}/modules/shared.py
492
+
493
+ # Launch Python application
494
+ !{backgroundProcess} python launch.py --enable-insecure-extension-access {'' if paperspace_m4000 and not xformersInstall else '--xformers'} {'--api --cors-allow-origins "*" --listen' if corsAllowOrigins else ''} {'--share' if not backgroundProcess else ''} --gradio-queue --api