File size: 5,059 Bytes
10882d8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1250327
10882d8
 
 
 
 
 
 
 
 
4c6b997
 
 
 
10882d8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4c6b997
 
10882d8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4c6b997
10882d8
 
4c6b997
10882d8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
from diffusers import StableDiffusionLDM3DPipeline, DDIMScheduler
import torch
from transformers import pipeline
import gradio as gr
from PIL import Image
from diffusers.utils import load_image
import os, random, gc, re, json, time, shutil, glob
import PIL.Image
import tqdm
from accelerate import Accelerator
from huggingface_hub import HfApi, InferenceClient, ModelCard, RepoCard, upload_folder, hf_hub_download, HfFileSystem
HfApi=HfApi()
HF_TOKEN=os.getenv("HF_TOKEN")
HF_HUB_DISABLE_TELEMETRY=1
DO_NOT_TRACK=1
HF_HUB_ENABLE_HF_TRANSFER=0
accelerator = Accelerator(cpu=True)
InferenceClient=InferenceClient()

apol=[]

pipe = accelerator.prepare(StableDiffusionLDM3DPipeline.from_pretrained("Intel/ldm3d-pano", torch_dtype=torch.bfloat16, variant=None, use_safetensors=False, safety_checker=None))
pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
pipe.unet.to(memory_format=torch.channels_last)
pipe.to("cpu")

def chdr(apol,prompt,modil,stips,fnamo,gaul):
    try:
        type="LDM3D"
        los=""
        tre='./tmpo/'+fnamo+'.json'
        tra='./tmpo/'+fnamo+'_rgb_0.png'
        trm='./tmpo/'+fnamo+'_rgb_1.png'
        trh='./tmpo/'+fnamo+'_dep_0.png'
        trv='./tmpo/'+fnamo+'_dep_1.png'
        flng=["yssup", "sllab", "stsaerb", "sinep", "selppin", "ssa", "tnuc", "mub", "kcoc", "kcid", "anigav", "dekan", "edun", "slatineg", "xes", "nrop", "stit", "ttub", "bojwolb", "noitartenep", "kcuf", "kcus", "kcil", "elttil", "gnuoy", "thgit", "lrig", "etitep", "dlihc", "yxes"]
        flng=[itm[::-1] for itm in flng]
        ptn = r"\b" + r"\b|\b".join(flng) + r"\b"
        if re.search(ptn, prompt, re.IGNORECASE):
            print("onon buddy")
        else:
            dobj={'img_name':fnamo,'model':modil,'lora':los,'prompt':prompt,'steps':stips,'type':type}
            with open(tre, 'w') as f:
                json.dump(dobj, f)
            HfApi.upload_folder(repo_id="JoPmt/hf_community_images",folder_path="./tmpo",repo_type="dataset",path_in_repo="./",token=HF_TOKEN)
        dobj={'img_name':fnamo,'model':modil,'lora':los,'prompt':prompt,'steps':stips,'type':type,'haed':gaul,}
        with open(tre, 'w') as f:
            json.dump(dobj, f)
        HfApi.upload_folder(repo_id="JoPmt/Tst_datast_imgs",folder_path="./tmpo",repo_type="dataset",path_in_repo="./",token=HF_TOKEN)
        try:
            for pgn in glob.glob('./tmpo/*.png'):
                os.remove(pgn)
            for jgn in glob.glob('./tmpo/*.json'):
                os.remove(jgn)
            del tre
            del tra
            del trm
            del trh
            del trv
        except:
            print("cant")
    except:
        print("failed to make obj")

def plax(gaul,req: gr.Request):
    gaul=str(req.headers)
    return gaul

def plex(prompt,neg_prompt,stips,nut,wit,het,gaul,progress=gr.Progress(track_tqdm=True)):
    gc.collect()
    apol=[]
    modil="Intel/ldm3d-pano"
    fnamo=""+str(int(time.time()))+""
    prompt="360 view of a "+prompt+""
    if nut == 0:
        nm = random.randint(1, 2147483616)
        while nm % 32 != 0:
            nm = random.randint(1, 2147483616)
    else:
        nm=nut
    generator = torch.Generator(device="cpu").manual_seed(nm)
    image = pipe(prompt=[prompt]*2, negative_prompt=[neg_prompt]*2, generator=generator, guidance_scale=7.0, num_inference_steps=stips,height=het,width=wit)
    for a, imze in enumerate(image["rgb"]):
        apol.append(imze)
        imze.save('./tmpo/'+fnamo+'_rgb_'+str(a)+'.png', 'PNG')
    for b, imbe in enumerate(image["depth"]):
        apol.append(imbe)
        imbe.save('./tmpo/'+fnamo+'_dep_'+str(b)+'.png', 'PNG')
    chdr(apol,prompt,modil,stips,fnamo,gaul)
    return apol

def aip(ill,api_name="/run"):
    return
def pit(ill,api_name="/predict"):
    return

with gr.Blocks(theme=random.choice([gr.themes.Monochrome(),gr.themes.Base.from_hub("gradio/seafoam"),gr.themes.Base.from_hub("freddyaboulton/dracula_revamped"),gr.themes.Glass(),gr.themes.Base(),]),analytics_enabled=False) as iface:
    ##iface.description="Running on cpu, very slow! by JoPmt."
    out=gr.Gallery(label="Generated Output Image", columns=1)
    inut=gr.Textbox(label="Prompt")
    gaul=gr.Textbox(visible=False)
    btn=gr.Button("GENERATE")
    with gr.Accordion("Advanced Settings", open=False):
        inet=gr.Textbox(label="Negative_prompt", value="lowres,text,bad quality,low quality,jpeg artifacts,ugly,bad hands,bad face,blurry,bad eyes,watermark,signature")
        inyt=gr.Slider(label="Num inference steps",minimum=1,step=1,maximum=30,value=20)
        indt=gr.Slider(label="Manual seed (leave 0 for random)",minimum=0,step=32,maximum=2147483616,value=0)
        inwt=gr.Slider(label="Width",minimum=256,step=32,maximum=1024,value=1024)
        inht=gr.Slider(label="Height",minimum=256,step=32,maximum=1024,value=512)
    
    btn.click(fn=plax,inputs=gaul,outputs=gaul).then(fn=plex, outputs=[out], inputs=[inut,inet,inyt,indt,inwt,inht,gaul])

iface.queue(max_size=1,api_open=False)
iface.launch(max_threads=20,inline=False,show_api=False)