JoPmt commited on
Commit
d3abce5
1 Parent(s): e28acc2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +228 -66
app.py CHANGED
@@ -1,25 +1,30 @@
1
- import gradio as gr
2
- from PIL import Image
3
  import cv2
4
- import os, random
5
  import numpy as np
6
  from transformers import pipeline
7
- import PIL.Image
 
8
  from diffusers.utils import load_image
9
- from accelerate import Accelerator
10
- from diffusers import StableDiffusionPipeline
11
- import torch
12
- from diffusers import StableDiffusionControlNetPipeline, ControlNetModel, UniPCMultistepScheduler
13
  from controlnet_aux import OpenposeDetector
14
-
 
 
 
 
 
 
15
  accelerator = Accelerator(cpu=True)
 
16
 
17
  models =[
18
  "runwayml/stable-diffusion-v1-5",
19
  "prompthero/openjourney-v4",
20
  "CompVis/stable-diffusion-v1-4",
21
  "stabilityai/stable-diffusion-2-1",
22
- "stablediffusionapi/disney-pixal-cartoon",
23
  "stablediffusionapi/edge-of-realism",
24
  "MirageML/fantasy-scene",
25
  "wavymulder/lomo-diffusion",
@@ -39,7 +44,6 @@ models =[
39
  "RayHell/popupBook-diffusion",
40
  "MirageML/lowpoly-world",
41
  "deadman44/SD_Photoreal_Merged_Models",
42
- "Conflictx/CGI_Animation",
43
  "johnslegers/epic-diffusion",
44
  "tilake/China-Chic-illustration",
45
  "wavymulder/modelshoot",
@@ -59,10 +63,8 @@ models =[
59
  "digiplay/RealismEngine_v1",
60
  "digiplay/AIGEN_v1.4_diffusers",
61
  "stablediffusionapi/dreamshaper-v6",
62
- "JackAnon/GorynichMix",
63
  "p1atdev/liminal-space-diffusion",
64
  "nadanainone/gigaschizonegs",
65
- "darkVOYAGE/dvMJv4",
66
  "lckidwell/album-cover-style",
67
  "axolotron/ice-cream-animals",
68
  "perion/ai-avatar",
@@ -77,82 +79,242 @@ models =[
77
  "Akumetsu971/SD_Samurai_Anime_Model",
78
  "Bojaxxx/Fantastic-Mr-Fox-Diffusion",
79
  "sd-dreambooth-library/original-character-cyclps",
80
- ##"AIArtsChannel/steampunk-diffusion",
81
  ]
 
 
82
 
83
- sdulers =[
84
- "UniPCMultistepScheduler",
85
- "DDIMScheduler",
86
- "DDPMScheduler",
87
- "DDIMInverseScheduler",
88
- "CMStochasticIterativeScheduler",
89
- "DEISMultistepScheduler",
90
- "DPMSolverMultistepInverse",
91
- "DPMSolverMultistepScheduler",
92
- "DPMSolverSDEScheduler",
93
- "DPMSolverSinglestepScheduler",
94
- "EulerAncestralDiscreteScheduler",
95
- "EulerDiscreteScheduler",
96
- "HeunDiscreteScheduler",
97
- "IPNDMScheduler",
98
- "KarrasVeScheduler",
99
- "KDPM2AncestralDiscreteScheduler",
100
- "KDPM2DiscreteScheduler",
101
- "LMSDiscreteScheduler",
102
- "PNDMScheduler",
103
- "RePaintScheduler",
104
- "ScoreSdeVeScheduler",
105
- "ScoreSdeVpScheduler",
106
- "VQDiffusionScheduler",
107
- ]
108
 
109
- openpose = OpenposeDetector.from_pretrained("lllyasviel/ControlNet")
110
- controlnet = [
111
- ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-openpose", torch_dtype=torch.float32),
112
- ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", torch_dtype=torch.float32),
113
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
114
 
115
- generator = torch.Generator(device="cpu").manual_seed(random.randint(1, 83647))
 
 
116
 
117
- def plex(mput, prompt, neg_prompt, stips, modal_id, dula, blip, blop):
118
- apol = []
119
- modal_id = ""+modal_id+""
120
- dula = ""+dula+""
121
- pope = accelerator.prepare(StableDiffusionPipeline.from_pretrained(modal_id, use_safetensors=False,torch_dtype=torch.float32, safety_checker=None))
122
- pope.unet.to(memory_format=torch.channels_last)
123
- pope = accelerator.prepare(pope.to("cpu"))
124
- pipe = accelerator.prepare(StableDiffusionControlNetPipeline.from_pretrained(modal_id, use_safetensors=False,controlnet=controlnet,torch_dtype=torch.float32,safety_checker=None))
125
- pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
126
- pipe.unet.to(memory_format=torch.channels_last)
127
- pipe = accelerator.prepare(pipe.to("cpu"))
128
 
129
- tilage = pope(prompt,num_inference_steps=5,height=512,width=512,generator=generator).images[0]
130
- ##tilage.save('./til.png', 'PNG')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
131
  cannyimage = np.array(tilage)
132
  low_threshold = 100
133
  high_threshold = 200
 
134
  cannyimage = cv2.Canny(cannyimage, low_threshold, high_threshold)
 
135
  zero_start = cannyimage.shape[1] // 4
136
  zero_end = zero_start + cannyimage.shape[1] // 2
137
  cannyimage[:, zero_start:zero_end] = 0
138
  cannyimage = cannyimage[:, :, None]
139
  cannyimage = np.concatenate([cannyimage, cannyimage, cannyimage], axis=2)
140
  canny_image = Image.fromarray(cannyimage)
141
- ##canny_image.save('./can.png', 'PNG')
142
  pose_image = load_image(mput).resize((512, 512))
143
- ##pose_image.save('./pos.png', 'PNG')
144
  openpose_image = openpose(pose_image)
145
- ##openpose_image.save('./fin.png','PNG')
146
  images = [openpose_image, canny_image]
147
- imoge = pipe([prompt]*2,images,num_inference_steps=stips,generator=generator,negative_prompt=[neg_prompt]*2,controlnet_conditioning_scale=[blip, blop])
148
- for i, imge in enumerate(imoge["images"]):
149
  apol.append(imge)
 
150
  apol.append(openpose_image)
151
- apol.append(cannyimage)
152
  apol.append(canny_image)
153
  apol.append(tilage)
 
 
 
 
154
  return apol
155
 
156
- iface = gr.Interface(fn=plex,inputs=[gr.Image(type="filepath"), gr.Textbox(label="prompt"), gr.Textbox(label="neg_prompt", value="monochrome, lowres, bad anatomy, worst quality, low quality"), gr.Slider(label="infer_steps", value=5, minimum=1, step=1, maximum=5), gr.Dropdown(choices=models, value=models[0], type="value", label="select a model"), gr.Dropdown(choices=sdulers, value=sdulers[0], type="value", label="schedulrs"), gr.Slider(label="condition_scale_canny", value=0.5, minimum=0.1, step=0.1, maximum=1), gr.Slider(label="condition_scale_pose", value=0.5, minimum=0.1, step=0.1, maximum=1)], outputs=gr.Gallery(columns=2,rows=3), title="Img2Img Guided Multi-Conditioned Canny/Pose Controlnet Selectable StableDiffusion Model Demo", description="by JoPmt.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
157
  iface.queue(max_size=1,api_open=False)
158
- iface.launch(max_threads=1)
 
1
+ from diffusers import StableDiffusionControlNetPipeline, ControlNetModel, StableDiffusionPipeline
2
+ import torch
3
  import cv2
 
4
  import numpy as np
5
  from transformers import pipeline
6
+ import gradio as gr
7
+ from PIL import Image
8
  from diffusers.utils import load_image
9
+ import os, random, gc, re, json, time, shutil, glob
10
+ import PIL.Image
11
+ import tqdm
 
12
  from controlnet_aux import OpenposeDetector
13
+ from accelerate import Accelerator
14
+ from huggingface_hub import HfApi, list_models, InferenceClient, ModelCard, RepoCard, upload_folder, hf_hub_download, HfFileSystem
15
+ HfApi=HfApi()
16
+ HF_TOKEN=os.getenv("HF_TOKEN")
17
+ HF_HUB_DISABLE_TELEMETRY=1
18
+ DO_NOT_TRACK=1
19
+ HF_HUB_ENABLE_HF_TRANSFER=0
20
  accelerator = Accelerator(cpu=True)
21
+ InferenceClient=InferenceClient()
22
 
23
  models =[
24
  "runwayml/stable-diffusion-v1-5",
25
  "prompthero/openjourney-v4",
26
  "CompVis/stable-diffusion-v1-4",
27
  "stabilityai/stable-diffusion-2-1",
 
28
  "stablediffusionapi/edge-of-realism",
29
  "MirageML/fantasy-scene",
30
  "wavymulder/lomo-diffusion",
 
44
  "RayHell/popupBook-diffusion",
45
  "MirageML/lowpoly-world",
46
  "deadman44/SD_Photoreal_Merged_Models",
 
47
  "johnslegers/epic-diffusion",
48
  "tilake/China-Chic-illustration",
49
  "wavymulder/modelshoot",
 
63
  "digiplay/RealismEngine_v1",
64
  "digiplay/AIGEN_v1.4_diffusers",
65
  "stablediffusionapi/dreamshaper-v6",
 
66
  "p1atdev/liminal-space-diffusion",
67
  "nadanainone/gigaschizonegs",
 
68
  "lckidwell/album-cover-style",
69
  "axolotron/ice-cream-animals",
70
  "perion/ai-avatar",
 
79
  "Akumetsu971/SD_Samurai_Anime_Model",
80
  "Bojaxxx/Fantastic-Mr-Fox-Diffusion",
81
  "sd-dreambooth-library/original-character-cyclps",
 
82
  ]
83
+ loris=[]
84
+ apol=[]
85
 
86
+ def smdls(models):
87
+ models=models
88
+ mtlst=HfApi.list_models(filter="diffusers:StableDiffusionPipeline",limit=500,full=True,)
89
+ if mtlst:
90
+ for nea in mtlst:
91
+ vmh=""+str(nea.id)+""
92
+ models.append(vmh)
93
+ return models
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
94
 
95
+ def sldls(loris):
96
+ loris=loris
97
+ ltlst=HfApi.list_models(filter="stable-diffusion",search="lora",limit=500,full=True,)
98
+ if ltlst:
99
+ for noa in ltlst:
100
+ lmh=""+str(noa.id)+""
101
+ loris.append(lmh)
102
+ return loris
103
+
104
+ def chdr(apol,prompt,modil,los,stips,fnamo,gaul):
105
+ try:
106
+ type="SD_controlnet"
107
+ tre='./tmpo/'+fnamo+'.json'
108
+ tra='./tmpo/'+fnamo+'_0.png'
109
+ trm='./tmpo/'+fnamo+'_1.png'
110
+ trv='./tmpo/'+fnamo+'_pose.png'
111
+ trh='./tmpo/'+fnamo+'_canny.png'
112
+ trg='./tmpo/'+fnamo+'_cann_im.png'
113
+ trq='./tmpo/'+fnamo+'_tilage.png'
114
+ flng=["yssup", "sllab", "stsaerb", "sinep", "selppin", "ssa", "tnuc", "mub", "kcoc", "kcid", "anigav", "dekan", "edun", "slatineg", "xes", "nrop", "stit", "ttub", "bojwolb", "noitartenep", "kcuf", "kcus", "kcil", "elttil", "gnuoy", "thgit", "lrig", "etitep", "dlihc", "yxes"]
115
+ flng=[itm[::-1] for itm in flng]
116
+ ptn = r"\b" + r"\b|\b".join(flng) + r"\b"
117
+ if re.search(ptn, prompt, re.IGNORECASE):
118
+ print("onon buddy")
119
+ else:
120
+ dobj={'img_name':fnamo,'model':modil,'lora':los,'prompt':prompt,'steps':stips,'type':type}
121
+ with open(tre, 'w') as f:
122
+ json.dump(dobj, f)
123
+ HfApi.upload_folder(repo_id="JoPmt/hf_community_images",folder_path="./tmpo",repo_type="dataset",path_in_repo="./",token=HF_TOKEN)
124
+ dobj={'img_name':fnamo,'model':modil,'lora':los,'prompt':prompt,'steps':stips,'type':type,'haed':gaul,}
125
+ with open(tre, 'w') as f:
126
+ json.dump(dobj, f)
127
+ HfApi.upload_folder(repo_id="JoPmt/Tst_datast_imgs",folder_path="./tmpo",repo_type="dataset",path_in_repo="./",token=HF_TOKEN)
128
+ try:
129
+ for pgn in glob.glob('./tmpo/*.png'):
130
+ os.remove(pgn)
131
+ for jgn in glob.glob('./tmpo/*.json'):
132
+ os.remove(jgn)
133
+ del tre
134
+ del tra
135
+ del trm
136
+ del trv
137
+ del trh
138
+ del trg
139
+ del trq
140
+ except:
141
+ print("cant")
142
+ except:
143
+ print("failed to umake obj")
144
+
145
+ def crll(dnk):
146
+ lix=""
147
+ lotr=HfApi.list_files_info(repo_id=""+dnk+"",repo_type="model")
148
+ for flre in list(lotr):
149
+ fllr=[]
150
+ gar=re.match(r'.+(\.pt|\.ckpt|\.bin|\.safetensors)$', flre.path)
151
+ yir=re.search(r'[^/]+$', flre.path)
152
+ if gar:
153
+ fllr.append(""+str(yir.group(0))+"")
154
+ lix=""+fllr[-1]+""
155
+ else:
156
+ lix=""
157
+ return lix
158
 
159
+ def plax(gaul,req: gr.Request):
160
+ gaul=str(req.headers)
161
+ return gaul
162
 
163
+ def plex(prompt,mput,neg_prompt,modil,stips,scaly,csal,csbl,nut,wei,hei,los,loca,gaul,progress=gr.Progress(track_tqdm=True)):
164
+ gc.collect()
165
+ adi=""
166
+ ldi=""
 
 
 
 
 
 
 
167
 
168
+ openpose = OpenposeDetector.from_pretrained("lllyasviel/ControlNet")
169
+ controlnet = [
170
+ ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-openpose", torch_dtype=torch.float32),
171
+ ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", torch_dtype=torch.float32),
172
+ ]
173
+ try:
174
+ crda=ModelCard.load(""+modil+"")
175
+ card=ModelCard.load(""+modil+"").data.to_dict().get("instance_prompt")
176
+ cerd=ModelCard.load(""+modil+"").data.to_dict().get("custom_prompt")
177
+ cird=ModelCard.load(""+modil+"").data.to_dict().get("lora_prompt")
178
+ mtch=re.search(r'(?:(?<=trigger words:)|(?<=trigger:)|(?<=You could use)|(?<=You should use))\s*(.*?)\s*(?=to trigger)', crda.text, re.IGNORECASE)
179
+ moch=re.search(r'(?:(?<=trigger words:)|(?<=trigger:)|(?<=You could use)|(?<=You should use))\s*([^.]*)', crda.text, re.IGNORECASE)
180
+ if moch:
181
+ adi+=""+str(moch.group(1))+", "
182
+ else:
183
+ print("no floff trigger")
184
+ if mtch:
185
+ adi+=""+str(mtch.group(1))+", "
186
+ else:
187
+ print("no fluff trigger")
188
+ if card:
189
+ adi+=""+str(card)+", "
190
+ else:
191
+ print("no instance")
192
+ if cerd:
193
+ adi+=""+str(cerd)+", "
194
+ else:
195
+ print("no custom")
196
+ if cird:
197
+ adi+=""+str(cird)+", "
198
+ else:
199
+ print("no lora")
200
+ except:
201
+ print("no card")
202
+ try:
203
+ pope = accelerator.prepare(StableDiffusionPipeline.from_pretrained(""+modil+"", use_safetensors=False,torch_dtype=torch.float32, safety_checker=None))
204
+ pipe = accelerator.prepare(StableDiffusionControlNetPipeline.from_pretrained(""+modil+"", use_safetensors=False,controlnet=controlnet,torch_dtype=torch.float32,safety_checker=None))
205
+ except:
206
+ gc.collect()
207
+ pope = accelerator.prepare(StableDiffusionPipeline.from_pretrained(""+modil+"", use_safetensors=True,torch_dtype=torch.float32, safety_checker=None))
208
+ pipe = accelerator.prepare(StableDiffusionControlNetPipeline.from_pretrained(""+modil+"", use_safetensors=True,controlnet=controlnet,torch_dtype=torch.float32,safety_checker=None))
209
+ if los:
210
+ try:
211
+ lrda=ModelCard.load(""+los+"")
212
+ lard=ModelCard.load(""+los+"").data.to_dict().get("instance_prompt")
213
+ lerd=ModelCard.load(""+los+"").data.to_dict().get("custom_prompt")
214
+ lird=ModelCard.load(""+los+"").data.to_dict().get("stable-diffusion")
215
+ ltch=re.search(r'(?:(?<=trigger words:)|(?<=trigger:)|(?<=You could use)|(?<=You should use))\s*(.*?)\s*(?=to trigger)', lrda.text, re.IGNORECASE)
216
+ loch=re.search(r'(?:(?<=trigger words:)|(?<=trigger:)|(?<=You could use)|(?<=You should use))\s*([^.]*)', lrda.text, re.IGNORECASE)
217
+ if loch and lird:
218
+ ldi+=""+str(loch.group(1))+", "
219
+ else:
220
+ print("no lloff trigger")
221
+ if ltch and lird:
222
+ ldi+=""+str(ltch.group(1))+", "
223
+ else:
224
+ print("no lluff trigger")
225
+ if lard and lird:
226
+ ldi+=""+str(lard)+", "
227
+ else:
228
+ print("no instance")
229
+ ldi+=""
230
+ if lerd and lird:
231
+ ldi+=""+str(lerd)+", "
232
+ else:
233
+ print("no custom")
234
+ ldi+=""
235
+ except:
236
+ print("no trigger")
237
+ try:
238
+ pope.load_lora_weights(""+los+"", weight_name=""+str(crll(los))+"",)
239
+ pope.fuse_lora(fuse_unet=True,fuse_text_encoder=False)
240
+ except:
241
+ print("no can do")
242
+ else:
243
+ los=""
244
+ pope.unet.to(memory_format=torch.channels_last)
245
+ pope = accelerator.prepare(pope.to("cpu"))
246
+ pipe.unet.to(memory_format=torch.channels_last)
247
+ pipe = accelerator.prepare(pipe.to("cpu"))
248
+ gc.collect()
249
+ apol=[]
250
+ height=hei
251
+ width=wei
252
+ prompt=""+str(adi)+""+str(ldi)+""+prompt+""
253
+ negative_prompt=""+neg_prompt+""
254
+ lora_scale=loca
255
+ if nut == 0:
256
+ nm = random.randint(1, 2147483616)
257
+ while nm % 32 != 0:
258
+ nm = random.randint(1, 2147483616)
259
+ else:
260
+ nm=nut
261
+ generator = torch.Generator(device="cpu").manual_seed(nm)
262
+ tilage = pope(prompt,num_inference_steps=5,height=height,width=width,generator=generator,cross_attention_kwargs={"scale": lora_scale}).images[0]
263
  cannyimage = np.array(tilage)
264
  low_threshold = 100
265
  high_threshold = 200
266
+ fnamo=""+str(int(time.time()))+""
267
  cannyimage = cv2.Canny(cannyimage, low_threshold, high_threshold)
268
+ cammyimage=Image.fromarray(cannyimage).save('./tmpo/'+fnamo+'_canny.png', 'PNG')
269
  zero_start = cannyimage.shape[1] // 4
270
  zero_end = zero_start + cannyimage.shape[1] // 2
271
  cannyimage[:, zero_start:zero_end] = 0
272
  cannyimage = cannyimage[:, :, None]
273
  cannyimage = np.concatenate([cannyimage, cannyimage, cannyimage], axis=2)
274
  canny_image = Image.fromarray(cannyimage)
 
275
  pose_image = load_image(mput).resize((512, 512))
 
276
  openpose_image = openpose(pose_image)
 
277
  images = [openpose_image, canny_image]
278
+ omage=pipe([prompt]*2,images,num_inference_steps=stips,generator=generator,negative_prompt=[neg_prompt]*2,controlnet_conditioning_scale=[csal, csbl])
279
+ for i, imge in enumerate(omage["images"]):
280
  apol.append(imge)
281
+ imge.save('./tmpo/'+fnamo+'_'+str(i)+'.png', 'PNG')
282
  apol.append(openpose_image)
283
+ apol.append(cammyimage)
284
  apol.append(canny_image)
285
  apol.append(tilage)
286
+ openpose_image.save('./tmpo/'+fnamo+'_pose.png', 'PNG')
287
+ canny_image.save('./tmpo/'+fnamo+'_cann_im.png', 'PNG')
288
+ tilage.save('./tmpo/'+fnamo+'_tilage.png', 'PNG')
289
+ chdr(apol,prompt,modil,los,stips,fnamo,gaul)
290
  return apol
291
 
292
+ def aip(ill,api_name="/run"):
293
+ return
294
+ def pit(ill,api_name="/predict"):
295
+ return
296
+
297
+ with gr.Blocks(theme=random.choice([gr.themes.Monochrome(),gr.themes.Base.from_hub("gradio/seafoam"),gr.themes.Base.from_hub("freddyaboulton/dracula_revamped"),gr.themes.Glass(),gr.themes.Base(),]),analytics_enabled=False) as iface:
298
+ ##iface.description="Running on cpu, very slow! by JoPmt."
299
+ out=gr.Gallery(label="Generated Output Image", columns=1)
300
+ inut=gr.Textbox(label="Prompt")
301
+ mput=gr.Image(type="filepath")
302
+ gaul=gr.Textbox(visible=False)
303
+ inot=gr.Dropdown(choices=smdls(models),value=random.choice(models), type="value")
304
+ btn=gr.Button("GENERATE")
305
+ with gr.Accordion("Advanced Settings", open=False):
306
+ inlt=gr.Dropdown(choices=sldls(loris),value=None, type="value")
307
+ inet=gr.Textbox(label="Negative_prompt", value="low quality, bad quality,")
308
+ inyt=gr.Slider(label="Num inference steps",minimum=1,step=1,maximum=30,value=20)
309
+ inat=gr.Slider(label="Guidance_scale",minimum=1,step=1,maximum=20,value=7)
310
+ csal=gr.Slider(label="condition_scale_canny", value=0.5, minimum=0.1, step=0.1, maximum=1)
311
+ csbl=gr.Slider(label="condition_scale_pose", value=0.5, minimum=0.1, step=0.1, maximum=1)
312
+ loca=gr.Slider(label="Lora scale",minimum=0.1,step=0.1,maximum=0.9,value=0.5)
313
+ indt=gr.Slider(label="Manual seed (leave 0 for random)",minimum=0,step=32,maximum=2147483616,value=0)
314
+ inwt=gr.Slider(label="Width",minimum=512,step=32,maximum=1024,value=512)
315
+ inht=gr.Slider(label="Height",minimum=512,step=32,maximum=1024,value=512)
316
+
317
+ btn.click(fn=plax,inputs=gaul,outputs=gaul).then(fn=plex, outputs=[out], inputs=[inut,mput,inet,inot,inyt,inat,csal,csbl,indt,inwt,inht,inlt,loca,gaul])
318
+
319
  iface.queue(max_size=1,api_open=False)
320
+ iface.launch(max_threads=20,inline=False,show_api=False)