File size: 6,293 Bytes
ba9faca
 
4407894
 
 
ba9faca
 
4407894
ba9faca
 
 
 
 
 
 
 
 
 
4407894
ba9faca
 
 
 
4407894
ba9faca
1c8afdf
4407894
ba9faca
1c8afdf
4407894
ba9faca
 
 
d2a7d9d
ba9faca
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4407894
ba9faca
 
 
 
 
 
 
 
 
 
 
 
 
 
4407894
 
 
ba9faca
 
 
 
 
699f7aa
ba9faca
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4407894
547eb1a
ba9faca
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
from diffusers import KandinskyV22PriorPipeline, KandinskyV22Pipeline
import torch
import cv2
import numpy as np
from transformers import pipeline
import gradio as gr
from PIL import Image
from diffusers.utils import load_image
import os, random, gc, re, json, time, shutil, glob
import PIL.Image
import tqdm
from accelerate import Accelerator
from huggingface_hub import HfApi, InferenceClient, ModelCard, RepoCard, upload_folder, hf_hub_download, HfFileSystem
HfApi=HfApi()
HF_TOKEN=os.getenv("HF_TOKEN")
HF_HUB_DISABLE_TELEMETRY=1
DO_NOT_TRACK=1
HF_HUB_ENABLE_HF_TRANSFER=0
accelerator = Accelerator(cpu=True)
InferenceClient=InferenceClient()

apol=[]

pope_prior = accelerator.prepare(KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float32))
pope_prior.prior.to(memory_format=torch.channels_last)
pope_prior = pope_prior.to("cpu")
pope = accelerator.prepare(KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float32))
pope.unet.to(memory_format=torch.channels_last)
pope = pope.to("cpu")

def chdr(apol,prompt,modil,stips,fnamo,gaul):
    try:
        type="KNDSK22_INTERP"
        los=""
        tre='./tmpo/'+fnamo+'.json'
        tra='./tmpo/'+fnamo+'_0.png'
        trm='./tmpo/'+fnamo+'_half.png'
        flng=["yssup", "sllab", "stsaerb", "sinep", "selppin", "ssa", "tnuc", "mub", "kcoc", "kcid", "anigav", "dekan", "edun", "slatineg", "xes", "nrop", "stit", "ttub", "bojwolb", "noitartenep", "kcuf", "kcus", "kcil", "elttil", "gnuoy", "thgit", "lrig", "etitep", "dlihc", "yxes"]
        flng=[itm[::-1] for itm in flng]
        ptn = r"\b" + r"\b|\b".join(flng) + r"\b"
        if re.search(ptn, prompt, re.IGNORECASE):
            print("onon buddy")
        else:
            dobj={'img_name':fnamo,'model':modil,'lora':los,'prompt':prompt,'steps':stips,'type':type}
            with open(tre, 'w') as f:
                json.dump(dobj, f)
            HfApi.upload_folder(repo_id="JoPmt/hf_community_images",folder_path="./tmpo",repo_type="dataset",path_in_repo="./",token=HF_TOKEN)
        dobj={'img_name':fnamo,'model':modil,'lora':los,'prompt':prompt,'steps':stips,'type':type,'haed':gaul,}
        with open(tre, 'w') as f:
            json.dump(dobj, f)
        HfApi.upload_folder(repo_id="JoPmt/Tst_datast_imgs",folder_path="./tmpo",repo_type="dataset",path_in_repo="./",token=HF_TOKEN)
        try:
            for pgn in glob.glob('./tmpo/*.png'):
                os.remove(pgn)
            for jgn in glob.glob('./tmpo/*.json'):
                os.remove(jgn)
            del tre
            del tra
            del trm
        except:
            print("cant")
    except:
        print("failed to make obj")

def plax(gaul,req: gr.Request):
    gaul=str(req.headers)
    return gaul

def plex(cook, img, neg_prompt, stips, prior_stps, itr_stps, one, two, three, nut, wit, het, gaul, progress=gr.Progress(track_tqdm=True)):
    gc.collect()
    apol=[]
    modil="kandinsky-community/kandinsky-2-2-prior,kandinsky-community/kandinsky-2-2-decoder"
    goof = load_image(img).resize((wit, het))
    prompt = cook
    negative_prior_prompt = neg_prompt
    nm=0
    fnamo=""+str(int(time.time()))+""
    if nut == 0:
        nm = random.randint(1, 2147483616)
        while nm % 32 != 0:
            nm = random.randint(1, 2147483616)
    else:
        nm=nut
    generator = torch.Generator(device="cpu").manual_seed(nm)
    
    img_emb = pope_prior(prompt=prompt, guidance_scale=one, num_inference_steps=prior_stps, generator=generator)
    negative_emb = pope_prior(prompt=negative_prior_prompt, guidance_scale=1, num_inference_steps=prior_stps)
    imags = pope(image_embeds=img_emb.image_embeds,negative_image_embeds=negative_emb.image_embeds,num_inference_steps=stips,generator=generator,height=het,width=wit).images[0]
    images_texts = [cook, goof, imags]
    weights = [one, two, three]
    primpt = ""
    prior_out = pope_prior.interpolate(images_texts, weights, num_inference_steps=itr_stps)
    imas = pope(**prior_out, height=het, width=wit, num_inference_steps=stips)
    for i, imge in enumerate(imas["images"]):
        apol.append(imge)
        imge.save('./tmpo/'+fnamo+'_'+str(i)+'.png', 'PNG')
    imags.save('./tmpo/'+fnamo+'_half.png', 'PNG')
    apol.append(imags)
    chdr(apol,prompt,modil,stips,fnamo,gaul)
    return apol

def aip(ill,api_name="/run"):
    return
def pit(ill,api_name="/predict"):
    return

with gr.Blocks(theme=random.choice([gr.themes.Monochrome(),gr.themes.Base.from_hub("gradio/seafoam"),gr.themes.Base.from_hub("freddyaboulton/dracula_revamped"),gr.themes.Glass(),gr.themes.Base(),]),analytics_enabled=False) as iface:
    ##iface.description="Running on cpu, very slow! by JoPmt."
    out=gr.Gallery(label="Generated Output Image", columns=1)
    inut=gr.Textbox(label="Prompt")
    mput=gr.Image(label="drop", type="filepath")
    gaul=gr.Textbox(visible=False)
    btn=gr.Button("GENERATE")
    with gr.Accordion("Advanced Settings", open=False):
        inet=gr.Textbox(label="Negative_prompt", value="lowres,text,bad quality,low quality,jpeg artifacts,ugly,bad hands,bad face,blurry,bad eyes,watermark,signature")
        inyt=gr.Slider(label="Num inference steps",minimum=1,step=1,maximum=30,value=10)
        ihop=gr.Slider(label="Num prior inference steps",minimum=1,step=1,maximum=10,value=5)
        ihip=gr.Slider(label="Num prior interpolation steps",minimum=1,step=1,maximum=10,value=5)
        inat=gr.Slider(label="Text Guide",minimum=0.01,step=0.01,maximum=0.99,value=0.5)
        csal=gr.Slider(label="Your Image Guide",minimum=0.01,step=0.01,maximum=0.99,value=0.5)
        csbl=gr.Slider(label="Generated Image Guide",minimum=0.01,step=0.01,maximum=0.99,value=0.3)
        indt=gr.Slider(label="Manual seed (leave 0 for random)",minimum=0,step=32,maximum=2147483616,value=0)
        inwt=gr.Slider(label="Width",minimum=256,step=32,maximum=1024,value=768)
        inht=gr.Slider(label="Height",minimum=256,step=32,maximum=1024,value=768)
    
    btn.click(fn=plax,inputs=gaul,outputs=gaul).then(fn=plex, outputs=[out], inputs=[inut,mput,inet,inyt,ihop,ihip,inat,csal,csbl,indt,inwt,inht,gaul])

iface.queue(max_size=1,api_open=False)
iface.launch(max_threads=20,inline=False,show_api=False)