JoPmt commited on
Commit
ba9faca
1 Parent(s): 547eb1a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +113 -19
app.py CHANGED
@@ -1,35 +1,129 @@
1
- from PIL import Image
2
- import PIL.Image
3
  import cv2
4
- import gradio as gr
5
  import numpy as np
6
- import torch, os, random
7
- from accelerate import Accelerator
8
  from transformers import pipeline
 
 
9
  from diffusers.utils import load_image
10
- from diffusers import KandinskyV22PriorPipeline, KandinskyV22Pipeline
11
-
 
 
 
 
 
 
 
 
12
  accelerator = Accelerator(cpu=True)
 
 
 
 
13
  pope_prior = accelerator.prepare(KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float32))
 
14
  pope_prior = pope_prior.to("cpu")
15
  pope = accelerator.prepare(KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float32))
 
16
  pope = pope.to("cpu")
17
- generator = torch.Generator(device="cpu").manual_seed(random.randint(1, 4876364))
18
 
19
- def plex(img, cook, one, two, three):
20
- goof = load_image(img).resize((512, 512))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
  prompt = cook
22
- negative_prior_prompt = "lowres,text,bad quality,low quality,jpeg artifacts,ugly,bad hands,bad face,blurry,bad eyes,watermark,signature"
23
- img_emb = pope_prior(prompt=prompt, guidance_scale=0.85, num_inference_steps=5, generator=generator)
24
- negative_emb = pope_prior(prompt=negative_prior_prompt, guidance_scale=1, num_inference_steps=5, generator=generator)
25
- imags = pope(image_embeds=img_emb.image_embeds,negative_image_embeds=negative_emb.image_embeds,num_inference_steps=5,generator=generator,height=512,width=512,).images[0]
 
 
 
 
 
 
 
 
 
 
26
  images_texts = [cook, goof, imags]
27
  weights = [one, two, three]
28
  primpt = ""
29
- prior_out = pope_prior.interpolate(images_texts, weights, num_inference_steps=5)
30
- imas = pope(**prior_out, height=512, width=512, num_inference_steps=5).images[0]
31
- return imas
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
32
 
33
- iface = gr.Interface(fn=plex,inputs=[gr.Image(label="drop", type="filepath"), gr.Textbox(label="prompt"), gr.Slider(label="Text Guide",minimum=0.01,step=0.01,maximum=0.99,value=0.5), gr.Slider(label="Your Image Guide",minimum=0.01,step=0.01,maximum=0.99,value=0.5),gr.Slider(label="Generated Image Guide",minimum=0.01,step=0.01,maximum=0.99,value=0.3)], outputs=gr.Image(), title="Ksky22 Cntrl Gdd Interp", description="ksky22 Cntrl Gdd Interp")
34
  iface.queue(max_size=1,api_open=False)
35
- iface.launch(max_threads=1)
 
1
+ from diffusers import KandinskyV22PriorPipeline, KandinskyV22Pipeline
2
+ import torch
3
  import cv2
 
4
  import numpy as np
 
 
5
  from transformers import pipeline
6
+ import gradio as gr
7
+ from PIL import Image
8
  from diffusers.utils import load_image
9
+ import os, random, gc, re, json, time, shutil, glob
10
+ import PIL.Image
11
+ import tqdm
12
+ from accelerate import Accelerator
13
+ from huggingface_hub import HfApi, InferenceClient, ModelCard, RepoCard, upload_folder, hf_hub_download, HfFileSystem
14
+ HfApi=HfApi()
15
+ HF_TOKEN=os.getenv("HF_TOKEN")
16
+ HF_HUB_DISABLE_TELEMETRY=1
17
+ DO_NOT_TRACK=1
18
+ HF_HUB_ENABLE_HF_TRANSFER=0
19
  accelerator = Accelerator(cpu=True)
20
+ InferenceClient=InferenceClient()
21
+
22
+ apol=[]
23
+
24
  pope_prior = accelerator.prepare(KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float32))
25
+ pope_prior.prior.to(memory_format=torch.channels_last)
26
  pope_prior = pope_prior.to("cpu")
27
  pope = accelerator.prepare(KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float32))
28
+ pope.unet.to(memory_format=torch.channels_last)
29
  pope = pope.to("cpu")
 
30
 
31
+ def chdr(apol,prompt,modil,stips,fnamo,gaul):
32
+ try:
33
+ type="KNDSK22_INTERP"
34
+ tre='./tmpo/'+fnamo+'.json'
35
+ tra='./tmpo/'+fnamo+'_0.png'
36
+ trm='./tmpo/'+fnamo+'_half.png'
37
+ flng=["yssup", "sllab", "stsaerb", "sinep", "selppin", "ssa", "tnuc", "mub", "kcoc", "kcid", "anigav", "dekan", "edun", "slatineg", "xes", "nrop", "stit", "ttub", "bojwolb", "noitartenep", "kcuf", "kcus", "kcil", "elttil", "gnuoy", "thgit", "lrig", "etitep", "dlihc", "yxes"]
38
+ flng=[itm[::-1] for itm in flng]
39
+ ptn = r"\b" + r"\b|\b".join(flng) + r"\b"
40
+ if re.search(ptn, prompt, re.IGNORECASE):
41
+ print("onon buddy")
42
+ else:
43
+ dobj={'img_name':fnamo,'model':modil,'lora':los,'prompt':prompt,'steps':stips,'type':type}
44
+ with open(tre, 'w') as f:
45
+ json.dump(dobj, f)
46
+ HfApi.upload_folder(repo_id="JoPmt/hf_community_images",folder_path="./tmpo",repo_type="dataset",path_in_repo="./",token=HF_TOKEN)
47
+ dobj={'img_name':fnamo,'model':modil,'lora':los,'prompt':prompt,'steps':stips,'type':type,'haed':gaul,}
48
+ with open(tre, 'w') as f:
49
+ json.dump(dobj, f)
50
+ HfApi.upload_folder(repo_id="JoPmt/Tst_datast_imgs",folder_path="./tmpo",repo_type="dataset",path_in_repo="./",token=HF_TOKEN)
51
+ try:
52
+ for pgn in glob.glob('./tmpo/*.png'):
53
+ os.remove(pgn)
54
+ for jgn in glob.glob('./tmpo/*.json'):
55
+ os.remove(jgn)
56
+ del tre
57
+ del tra
58
+ del trm
59
+ except:
60
+ print("cant")
61
+ except:
62
+ print("failed to make obj")
63
+
64
+ def plax(gaul,req: gr.Request):
65
+ gaul=str(req.headers)
66
+ return gaul
67
+
68
+ def plex(cook, img, neg_prompt, stips, prior_stps, itr_stps, one, two, three, nut, wit, het, gaul, progress=gr.Progress(track_tqdm=True)):
69
+ gc.collect()
70
+ apol=[]
71
+ modil="kandinsky-community/kandinsky-2-2-prior,kandinsky-community/kandinsky-2-2-decoder"
72
+ goof = load_image(img).resize((wit, het))
73
  prompt = cook
74
+ negative_prior_prompt = neg_prompt
75
+ nm=0
76
+ fnamo=""+str(int(time.time()))+""
77
+ if nut == 0:
78
+ nm = random.randint(1, 2147483616)
79
+ while nm % 32 != 0:
80
+ nm = random.randint(1, 2147483616)
81
+ else:
82
+ nm=nut
83
+ generator = torch.Generator(device="cpu").manual_seed(nm)
84
+
85
+ img_emb = pope_prior(prompt=prompt, guidance_scale=one, num_inference_steps=prior_stps, generator=generator)
86
+ negative_emb = pope_prior(prompt=negative_prior_prompt, guidance_scale=1, num_inference_steps=prior_stps)
87
+ imags = pope(image_embeds=img_emb.image_embeds,negative_image_embeds=negative_emb.image_embeds,num_inference_steps=stips,generator=generator,height=het,width=wit).images[0]
88
  images_texts = [cook, goof, imags]
89
  weights = [one, two, three]
90
  primpt = ""
91
+ prior_out = pope_prior.interpolate(images_texts, weights, num_inference_steps=itr_stps)
92
+ imas = pope(**prior_out, height=het, width=wit, num_inference_steps=stips)
93
+ for i, imge in enumerate(imas["images"]):
94
+ apol.append(imge)
95
+ imge.save('./tmpo/'+fnamo+'_'+str(i)+'.png', 'PNG')
96
+ imags.save('./tmpo/'+fnamo+'_half.png', 'PNG')
97
+ apol.append(imags)
98
+
99
+ chdr(apol,prompt,modil,stips,fnamo,gaul)
100
+ return apol
101
+
102
+ def aip(ill,api_name="/run"):
103
+ return
104
+ def pit(ill,api_name="/predict"):
105
+ return
106
+
107
+ with gr.Blocks(theme=random.choice([gr.themes.Monochrome(),gr.themes.Base.from_hub("gradio/seafoam"),gr.themes.Base.from_hub("freddyaboulton/dracula_revamped"),gr.themes.Glass(),gr.themes.Base(),]),analytics_enabled=False) as iface:
108
+ ##iface.description="Running on cpu, very slow! by JoPmt."
109
+ out=gr.Gallery(label="Generated Output Image", columns=1)
110
+ inut=gr.Textbox(label="Prompt")
111
+ mput=gr.Image(label="drop", type="filepath")
112
+ gaul=gr.Textbox(visible=False)
113
+ btn=gr.Button("GENERATE")
114
+ with gr.Accordion("Advanced Settings", open=False):
115
+ inet=gr.Textbox(label="Negative_prompt", value="lowres,text,bad quality,low quality,jpeg artifacts,ugly,bad hands,bad face,blurry,bad eyes,watermark,signature")
116
+ inyt=gr.Slider(label="Num inference steps",minimum=1,step=1,maximum=30,value=10)
117
+ ihop=gr.Slider(label="Num prior inference steps",minimum=1,step=1,maximum=10,value=5)
118
+ ihip=gr.Slider(label="Num prior interpolation steps",minimum=1,step=1,maximum=10,value=5)
119
+ inat=gr.Slider(label="Text Guide",minimum=0.01,step=0.01,maximum=0.99,value=0.5)
120
+ csal=gr.Slider(label="Your Image Guide",minimum=0.01,step=0.01,maximum=0.99,value=0.5)
121
+ csbl=gr.Slider(label="Generated Image Guide",minimum=0.01,step=0.01,maximum=0.99,value=0.3)
122
+ indt=gr.Slider(label="Manual seed (leave 0 for random)",minimum=0,step=32,maximum=2147483616,value=0)
123
+ inwt=gr.Slider(label="Width",minimum=256,step=32,maximum=1024,value=768)
124
+ inht=gr.Slider(label="Height",minimum=256,step=32,maximum=1024,value=768)
125
+
126
+ btn.click(fn=plax,inputs=gaul,outputs=gaul).then(fn=plex, outputs=[out], inputs=[inut,mput,inet,inyt,ihop,ihip,inat,csal,csbl,indt,inwt,inht,gaul])
127
 
 
128
  iface.queue(max_size=1,api_open=False)
129
+ iface.launch(max_threads=20,inline=False,show_api=False)