Omnibus's picture
Update app.py
26a7afe verified
raw
history blame
6.53 kB
import gradio as gr
from models import models
from PIL import Image
import requests
import uuid
import io
import base64
import torch
from diffusers import AutoPipelineForImage2Image
from diffusers.utils import make_image_grid, load_image
base_url=f'https://omnibus-top-20-img-img.hf.space/file='
loaded_model=[]
for i,model in enumerate(models):
try:
loaded_model.append(gr.load(f'models/{model}'))
except Exception as e:
print(e)
pass
print (loaded_model)
pipeline = AutoPipelineForImage2Image.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float32, use_safetensors=True)
def load_model(model_drop):
pipeline = AutoPipelineForImage2Image.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float32, use_safetensors=True)
def run_dif(prompt,im_path,model_drop,cnt):
print(f'im_path:: {im_path}')
print(f'im_path0:: {im_path.root[0]}')
out_box=[]
for i,ea in im_path.path:
print(f'ea:: {ea}')
print(f'impath:: {im_path.path}')
#url = base_url+ea
#init_image = load_image(url)
init_image=load_image(ea)
#prompt = "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k"
# pass prompt and image to pipeline
#image = pipeline(prompt, image=init_image, strength=0.8,guidance_scale=8.0,negative_prompt=negative_prompt,num_inference_steps=50).images[0]
image = pipeline(prompt, image=init_image, strength=0.2,guidance_scale=8.0,num_inference_steps=2).images[0]
#make_image_grid([init_image, image], rows=1, cols=2)
out_box.append(image)
yield out_box,""
def run_dif_old(out_prompt,model_drop,cnt):
p_seed=""
out_box=[]
out_html=""
#for i,ea in enumerate(loaded_model):
for i in range(int(cnt)):
p_seed+=" "
try:
model=loaded_model[int(model_drop)]
out_img=model(out_prompt+p_seed)
print(out_img)
out_box.append(out_img)
except Exception as e:
print(e)
out_html=str(e)
pass
yield out_box,out_html
def run_dif_og(out_prompt,model_drop,cnt):
out_box=[]
out_html=""
#for i,ea in enumerate(loaded_model):
for i in range(cnt):
try:
#print (ea)
model=loaded_model[int(model_drop)]
out_img=model(out_prompt)
print(out_img)
url=f'https://omnibus-top-20.hf.space/file={out_img}'
print(url)
uid = uuid.uuid4()
#urllib.request.urlretrieve(image, 'tmp.png')
#out=Image.open('tmp.png')
r = requests.get(url, stream=True)
if r.status_code == 200:
img_buffer = io.BytesIO(r.content)
print (f'bytes:: {io.BytesIO(r.content)}')
str_equivalent_image = base64.b64encode(img_buffer.getvalue()).decode()
img_tag = "<img src='data:image/png;base64," + str_equivalent_image + "'/>"
out_html+=f"<div class='img_class'><a href='https://huggingface.co/models/{models[i]}'>{models[i]}</a><br>"+img_tag+"</div>"
out = Image.open(io.BytesIO(r.content))
out_box.append(out)
html_out = "<div class='grid_class'>"+out_html+"</div>"
yield out_box,html_out
except Exception as e:
out_html+=str(e)
html_out = "<div class='grid_class'>"+out_html+"</div>"
yield out_box,html_out
def thread_dif(out_prompt,mod):
out_box=[]
out_html=""
#for i,ea in enumerate(loaded_model):
try:
print (ea)
model=loaded_model[int(mod)]
out_img=model(out_prompt)
print(out_img)
url=f'https://omnibus-top-20.hf.space/file={out_img}'
print(url)
uid = uuid.uuid4()
#urllib.request.urlretrieve(image, 'tmp.png')
#out=Image.open('tmp.png')
r = requests.get(url, stream=True)
if r.status_code == 200:
img_buffer = io.BytesIO(r.content)
print (f'bytes:: {io.BytesIO(r.content)}')
str_equivalent_image = base64.b64encode(img_buffer.getvalue()).decode()
img_tag = "<img src='data:image/png;base64," + str_equivalent_image + "'/>"
#out_html+=f"<div class='img_class'><a href='https://huggingface.co/models/{models[i]}'>{models[i]}</a><br>"+img_tag+"</div>"
out = Image.open(io.BytesIO(r.content))
out_box.append(out)
else:
out_html=r.status_code
html_out = "<div class='grid_class'>"+out_html+"</div>"
return out_box,html_out
except Exception as e:
out_html=str(e)
#out_html+=str(e)
html_out = "<div class='grid_class'>"+out_html+"</div>"
return out_box,html_out
css="""
.grid_class{
display:flex;
height:100%;
}
.img_class{
min-width:200px;
}
"""
def load_im(img):
im_box=[]
im = Image.open(img)
width, height = im.size
new_w=int(width/10)
new_h=new_w
w=0
h=0
for i in range(int(height/new_h)):
print(i)
for b in range(10):
print(b)
# Setting the points for cropped image
left = w
top = h
right = left+new_w
bottom = top+new_h
# Cropped image of above dimension
# (It will not change original image)
im1 = im.crop((left, top, right, bottom))
im_box.append(im1)
w+=new_w
yield im_box,[]
h+=new_h
w=0
yield im_box,im_box
with gr.Blocks(css=css) as app:
with gr.Row():
with gr.Column():
inp=gr.Textbox(label="Prompt")
with gr.Row():
btn=gr.Button()
stop_btn=gr.Button()
with gr.Column():
inp_im=gr.Image(type='filepath')
im_btn=gr.Button("Image Grid")
with gr.Row():
model_drop=gr.Dropdown(label="Models", choices=models, type='index', value=models[0])
cnt = gr.Number(value=1)
out_html=gr.HTML()
outp=gr.Gallery(columns=10)
fingal=gr.Gallery(columns=10)
im_list=gr.Textbox()
im_btn.click(load_im,inp_im,[outp,im_list])
go_btn = btn.click(run_dif,[inp,outp,model_drop,cnt],[fingal,out_html])
stop_btn.click(None,None,None,cancels=[go_btn])
app.queue().launch()