JoPmt's picture
Update app.py
a180313 verified
raw
history blame
11.3 kB
from diffusers import DiffusionPipeline
import torch
import gradio as gr
from PIL import Image
import os, random, gc, re, json, time, shutil, glob
import PIL.Image
import tqdm
from accelerate import Accelerator
from huggingface_hub import HfApi, list_models, InferenceClient, ModelCard, RepoCard, upload_folder, hf_hub_download, HfFileSystem
HfApi=HfApi()
HF_TOKEN=os.getenv("HF_TOKEN")
HF_HUB_DISABLE_TELEMETRY=1
DO_NOT_TRACK=1
accelerator = Accelerator(cpu=True)
InferenceClient=InferenceClient()
models =[
"runwayml/stable-diffusion-v1-5",
"prompthero/openjourney-v4",
"CompVis/stable-diffusion-v1-4",
"stabilityai/stable-diffusion-2-1",
"stablediffusionapi/edge-of-realism",
"MirageML/fantasy-scene",
"wavymulder/lomo-diffusion",
"sd-dreambooth-library/fashion",
"DucHaiten/DucHaitenDreamWorld",
"VegaKH/Ultraskin",
"kandinsky-community/kandinsky-2-1",
"MirageML/lowpoly-cyberpunk",
"thehive/everyjourney-sdxl-0.9-finetuned",
"plasmo/woolitize-768sd1-5",
"plasmo/food-crit",
"johnslegers/epic-diffusion-v1.1",
"Fictiverse/ElRisitas",
"robotjung/SemiRealMix",
"herpritts/FFXIV-Style",
"prompthero/linkedin-diffusion",
"RayHell/popupBook-diffusion",
"MirageML/lowpoly-world",
"deadman44/SD_Photoreal_Merged_Models",
"johnslegers/epic-diffusion",
"tilake/China-Chic-illustration",
"wavymulder/modelshoot",
"prompthero/openjourney-lora",
"Fictiverse/Stable_Diffusion_VoxelArt_Model",
"darkstorm2150/Protogen_v2.2_Official_Release",
"hassanblend/HassanBlend1.5.1.2",
"hassanblend/hassanblend1.4",
"nitrosocke/redshift-diffusion",
"prompthero/openjourney-v2",
"nitrosocke/Arcane-Diffusion",
"Lykon/DreamShaper",
"wavymulder/Analog-Diffusion",
"nitrosocke/mo-di-diffusion",
"dreamlike-art/dreamlike-diffusion-1.0",
"dreamlike-art/dreamlike-photoreal-2.0",
"digiplay/RealismEngine_v1",
"digiplay/AIGEN_v1.4_diffusers",
"stablediffusionapi/dreamshaper-v6",
"p1atdev/liminal-space-diffusion",
"nadanainone/gigaschizonegs",
"lckidwell/album-cover-style",
"axolotron/ice-cream-animals",
"perion/ai-avatar",
"digiplay/GhostMix",
"ThePioneer/MISA",
"TheLastBen/froggy-style-v21-768",
"FloydianSound/Nixeu_Diffusion_v1-5",
"kakaobrain/karlo-v1-alpha-image-variations",
"digiplay/PotoPhotoRealism_v1",
"ConsistentFactor/Aurora-By_Consistent_Factor",
"rim0/quadruped_mechas",
"Akumetsu971/SD_Samurai_Anime_Model",
"Bojaxxx/Fantastic-Mr-Fox-Diffusion",
"sd-dreambooth-library/original-character-cyclps",
]
loris=[]
apol=[]
def smdls(models):
models=models
mtlst=HfApi.list_models(filter="diffusers:StableDiffusionPipeline",limit=500,full=True,)
if mtlst:
for nea in mtlst:
vmh=""+str(nea.id)+""
models.append(vmh)
return models
def sldls(loris):
loris=loris
ltlst=HfApi.list_models(filter="stable-diffusion",search="lora",limit=500,full=True,)
if ltlst:
for noa in ltlst:
lmh=""+str(noa.id)+""
loris.append(lmh)
return loris
def chdr(apol,prompt,modil,los,stips,fnamo,gaul):
try:
type="SD"
tre='./tmpo/'+fnamo+'.json'
tra='./tmpo/'+fnamo+'.png'
flng=["yssup", "sllab", "stsaerb", "sinep", "selppin", "ssa", "tnuc", "mub", "kcoc", "kcid", "anigav", "dekan", "edun", "slatineg", "xes", "nrop", "stit", "ttub", "bojwolb", "noitartenep", "kcuf", "kcus", "kcil", "elttil", "gnuoy", "thgit", "lrig", "etitep", "dlihc", "yxes"]
flng=[itm[::-1] for itm in flng]
ptn = r"\b" + r"\b|\b".join(flng) + r"\b"
if re.search(ptn, prompt, re.IGNORECASE):
print("onon buddy")
else:
dobj={'img_name':fnamo,'model':modil,'lora':los,'prompt':prompt,'steps':stips,'type':type}
with open(tre, 'w') as f:
json.dump(dobj, f)
HfApi.upload_folder(repo_id="JoPmt/hf_community_images",folder_path="./tmpo",repo_type="dataset",path_in_repo="./",token=HF_TOKEN)
dobj={'img_name':fnamo,'model':modil,'lora':los,'prompt':prompt,'steps':stips,'type':type,'haed':gaul,}
with open(tre, 'w') as f:
json.dump(dobj, f)
HfApi.upload_folder(repo_id="JoPmt/Tst_datast_imgs",folder_path="./tmpo",repo_type="dataset",path_in_repo="./",token=HF_TOKEN)
try:
for pgn in glob.glob('./tmpo/*.png'):
os.remove(pgn)
for jgn in glob.glob('./tmpo/*.json'):
os.remove(jgn)
del tre
del tra
except:
print("cant")
except:
print("failed to umake obj")
def crll(dnk):
lix=""
lotr=HfApi.list_files_info(repo_id=""+dnk+"",repo_type="model")
for flre in list(lotr):
fllr=[]
gar=re.match(r'.+(\.pt|\.ckpt|\.bin|\.safetensors)$', flre.path)
yir=re.search(r'[^/]+$', flre.path)
if gar:
fllr.append(""+str(yir.group(0))+"")
lix=""+fllr[-1]+""
else:
lix=""
return lix
def plax(gaul,req: gr.Request):
gaul=str(req.headers)
return gaul
def plex(prompt,neg_prompt,modil,stips,scaly,nut,wei,hei,los,loca,gaul,progress=gr.Progress(track_tqdm=True)):
gc.collect()
adi=""
ldi=""
try:
crda=ModelCard.load(""+modil+"")
card=ModelCard.load(""+modil+"").data.to_dict().get("instance_prompt")
cerd=ModelCard.load(""+modil+"").data.to_dict().get("custom_prompt")
cird=ModelCard.load(""+modil+"").data.to_dict().get("lora_prompt")
mtch=re.search(r'(?:(?<=trigger words:)|(?<=trigger:)|(?<=You could use)|(?<=You should use))\s*(.*?)\s*(?=to trigger)', crda.text, re.IGNORECASE)
moch=re.search(r'(?:(?<=trigger words:)|(?<=trigger:)|(?<=You could use)|(?<=You should use))\s*([^.]*)', crda.text, re.IGNORECASE)
if moch:
adi+=""+str(moch.group(1))+", "
else:
print("no floff trigger")
if mtch:
adi+=""+str(mtch.group(1))+", "
else:
print("no fluff trigger")
if card:
adi+=""+str(card)+", "
else:
print("no instance")
if cerd:
adi+=""+str(cerd)+", "
else:
print("no custom")
if cird:
adi+=""+str(cird)+", "
else:
print("no lora")
except:
print("no card")
try:
pipe = accelerator.prepare(DiffusionPipeline.from_pretrained(""+modil+"",torch_dtype=torch.bfloat16,variant="fp16",use_safetensors=True,safety_checker=None)) or accelerator.prepare(DiffusionPipeline.from_pretrained(""+modil+"",torch_dtype=torch.float32,variant="fp32",use_safetensors=True,safety_checker=None)) or accelerator.prepare(DiffusionPipeline.from_pretrained(""+modil+"",torch_dtype=torch.bfloat16,variant="fp16",use_safetensors=False,safety_checker=None))
except:
gc.collect()
pipe = accelerator.prepare(DiffusionPipeline.from_pretrained(""+modil+"",torch_dtype=torch.float32,variant="fp32",use_safetensors=False,safety_checker=None)) or accelerator.prepare(DiffusionPipeline.from_pretrained(""+modil+"",torch_dtype=torch.float,variant=None,use_safetensors=True,safety_checker=None)) or accelerator.prepare(DiffusionPipeline.from_pretrained(""+modil+"",torch_dtype=torch.float,variant=None,use_safetensors=False,safety_checker=None))
if los:
try:
lrda=ModelCard.load(""+los+"")
lard=ModelCard.load(""+los+"").data.to_dict().get("instance_prompt")
lerd=ModelCard.load(""+los+"").data.to_dict().get("custom_prompt")
lird=ModelCard.load(""+los+"").data.to_dict().get("stable-diffusion")
ltch=re.search(r'(?:(?<=trigger words:)|(?<=trigger:)|(?<=You could use)|(?<=You should use))\s*(.*?)\s*(?=to trigger)', lrda.text, re.IGNORECASE)
loch=re.search(r'(?:(?<=trigger words:)|(?<=trigger:)|(?<=You could use)|(?<=You should use))\s*([^.]*)', lrda.text, re.IGNORECASE)
if loch and lird:
ldi+=""+str(loch.group(1))+", "
else:
print("no lloff trigger")
if ltch and lird:
ldi+=""+str(ltch.group(1))+", "
else:
print("no lluff trigger")
if lard and lird:
ldi+=""+str(lard)+", "
else:
print("no instance")
ldi+=""
if lerd and lird:
ldi+=""+str(lerd)+", "
else:
print("no custom")
ldi+=""
except:
print("no trigger")
try:
pipe.load_lora_weights(""+los+"", weight_name=""+str(crll(los))+"",)
pipe.fuse_lora(fuse_unet=True,fuse_text_encoder=False)
except:
print("no can do")
else:
los=""
pipe.unet.to(memory_format=torch.channels_last)
pipe.to("cpu")
gc.collect()
apol=[]
lora_scale=loca
if nut == 0:
nm = random.randint(1, 2147483616)
while nm % 32 != 0:
nm = random.randint(1, 2147483616)
else:
nm=nut
generator = torch.Generator(device="cpu").manual_seed(nm)
image = pipe(prompt=""+str(adi)+str(ldi)+prompt+"", negative_prompt=neg_prompt, generator=generator, num_inference_steps=stips, guidance_scale=scaly, width=wei, height=hei, cross_attention_kwargs={"scale": lora_scale})
for a, imze in enumerate(image["images"]):
apol.append(imze)
fnamo=""+str(int(time.time()))+""
imze.save('./tmpo/'+fnamo+'.png', 'PNG')
chdr(apol,prompt,modil,los,stips,fnamo,gaul)
return apol
def aip(ill,api_name="/run"):
return
def pit(ill,api_name="/predict"):
return
with gr.Blocks(theme=random.choice([gr.themes.Monochrome(),gr.themes.Base.from_hub("gradio/seafoam"),gr.themes.Base.from_hub("freddyaboulton/dracula_revamped"),gr.themes.Glass(),gr.themes.Base(),]),analytics_enabled=False) as iface:
iface.description="Running on cpu, very slow! by JoPmt."
out=gr.Gallery(label="Generated Output Image", columns=1)
inut=gr.Textbox(label="Prompt")
gaul=gr.Textbox(visible=False)
inot=gr.Dropdown(choices=smdls(models),value=random.choice(models), type="value")
btn=gr.Button("GENERATE")
with gr.Accordion("Advanced Settings", open=False):
inlt=gr.Dropdown(choices=sldls(loris),value=None, type="value")
inet=gr.Textbox(label="Negative_prompt", value="low quality, bad quality,")
inyt=gr.Slider(label="Num inference steps",minimum=1,step=1,maximum=30,value=20)
inat=gr.Slider(label="Guidance_scale",minimum=1,step=1,maximum=20,value=7)
loca=gr.Slider(label="Lora scale",minimum=0.1,step=0.1,maximum=0.9,value=0.5)
indt=gr.Slider(label="Manual seed (leave 0 for random)",minimum=0,step=32,maximum=2147483616,value=0)
inwt=gr.Slider(label="Width",minimum=512,step=32,maximum=1024,value=512)
inht=gr.Slider(label="Height",minimum=512,step=32,maximum=1024,value=512)
btn.click(fn=plax,inputs=gaul,outputs=gaul,).then(
fn=plex, outputs=[out], inputs=[inut, inet, inot, inyt, inat, indt, inwt, inht, inlt, loca, gaul])
iface.queue(max_size=1,api_open=False)
iface.launch(max_threads=10,inline=False,show_api=False)