import gradio as gr from models import models from PIL import Image import requests import uuid import io import base64 import torch from diffusers import AutoPipelineForImage2Image from diffusers.utils import make_image_grid, load_image base_url=f'https://omnibus-top-20-img-img.hf.space/file=' loaded_model=[] for i,model in enumerate(models): try: loaded_model.append(gr.load(f'models/{model}')) except Exception as e: print(e) pass print (loaded_model) pipeline = AutoPipelineForImage2Image.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float32, use_safetensors=True) def load_model(model_drop): pipeline = AutoPipelineForImage2Image.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float32, use_safetensors=True) def run_dif(prompt,im_path,model_drop,cnt): out_box=[] url = base_url+im_path init_image = load_image(url) prompt = "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k" # pass prompt and image to pipeline #image = pipeline(prompt, image=init_image, strength=0.8,guidance_scale=8.0,negative_prompt=negative_prompt,num_inference_steps=50).images[0] image = pipeline(prompt, image=init_image, strength=0.8,guidance_scale=8.0,num_inference_steps=2).images[0] #make_image_grid([init_image, image], rows=1, cols=2) out_box.append(image) return out_box,"" def run_dif_old(out_prompt,model_drop,cnt): p_seed="" out_box=[] out_html="" #for i,ea in enumerate(loaded_model): for i in range(int(cnt)): p_seed+=" " try: model=loaded_model[int(model_drop)] out_img=model(out_prompt+p_seed) print(out_img) out_box.append(out_img) except Exception as e: print(e) out_html=str(e) pass yield out_box,out_html def run_dif_og(out_prompt,model_drop,cnt): out_box=[] out_html="" #for i,ea in enumerate(loaded_model): for i in range(cnt): try: #print (ea) model=loaded_model[int(model_drop)] out_img=model(out_prompt) print(out_img) url=f'https://omnibus-top-20.hf.space/file={out_img}' print(url) uid = uuid.uuid4() #urllib.request.urlretrieve(image, 'tmp.png') #out=Image.open('tmp.png') r = requests.get(url, stream=True) if r.status_code == 200: img_buffer = io.BytesIO(r.content) print (f'bytes:: {io.BytesIO(r.content)}') str_equivalent_image = base64.b64encode(img_buffer.getvalue()).decode() img_tag = "" out_html+=f"
{models[i]}
"+img_tag+"
" out = Image.open(io.BytesIO(r.content)) out_box.append(out) html_out = "
"+out_html+"
" yield out_box,html_out except Exception as e: out_html+=str(e) html_out = "
"+out_html+"
" yield out_box,html_out def thread_dif(out_prompt,mod): out_box=[] out_html="" #for i,ea in enumerate(loaded_model): try: print (ea) model=loaded_model[int(mod)] out_img=model(out_prompt) print(out_img) url=f'https://omnibus-top-20.hf.space/file={out_img}' print(url) uid = uuid.uuid4() #urllib.request.urlretrieve(image, 'tmp.png') #out=Image.open('tmp.png') r = requests.get(url, stream=True) if r.status_code == 200: img_buffer = io.BytesIO(r.content) print (f'bytes:: {io.BytesIO(r.content)}') str_equivalent_image = base64.b64encode(img_buffer.getvalue()).decode() img_tag = "" #out_html+=f"
{models[i]}
"+img_tag+"
" out = Image.open(io.BytesIO(r.content)) out_box.append(out) else: out_html=r.status_code html_out = "
"+out_html+"
" return out_box,html_out except Exception as e: out_html=str(e) #out_html+=str(e) html_out = "
"+out_html+"
" return out_box,html_out def start_threads(prompt): t1 = threading.Thread(target=thread_dif, args=(prompt,0)) t2 = threading.Thread(target=thread_dif, args=(prompt,1)) t1.start() t2.start() print (t1) print (t2) a1,a2=t1.result() b1,b2=t2.result() return a1,a2 css=""" .grid_class{ display:flex; height:100%; } .img_class{ min-width:200px; } """ with gr.Blocks(css=css) as app: with gr.Row(): with gr.Column(): inp=gr.Textbox(label="Prompt") btn=gr.Button() inp_im=gr.Image(type='filepath') with gr.Row(): model_drop=gr.Dropdown(label="Models", choices=models, type='index', value=models[0]) cnt = gr.Number(value=1) out_html=gr.HTML() outp=gr.Gallery() btn.click(run_dif,[inp,inp_im,model_drop,cnt],[outp,out_html]) app.launch()