import gradio as gr import random import os import io, base64 from PIL import Image import numpy import shortuuid latent = gr.Interface.load("spaces/multimodalart/latentdiffusion") rudalle = gr.Interface.load("spaces/multimodalart/rudalle") #print(rudalle) #guided = gr.Interface.load("spaces/EleutherAI/clip-guided-diffusion") #print(guided) def text2image_latent(text,steps,width,height,images,diversity): results = latent(text, steps, width, height, images, diversity) image_paths = [] image_arrays = [] for image in results[1]: image_str = image[0] image_str = image_str.replace("data:image/png;base64,","") decoded_bytes = base64.decodebytes(bytes(image_str, "utf-8")) img = Image.open(io.BytesIO(decoded_bytes)) #image_arrays.append(numpy.asarray(img)) url = shortuuid.uuid() temp_dir = './tmp' if not os.path.exists(temp_dir): os.makedirs(temp_dir, exist_ok=True) image_path = f'{temp_dir}/{url}.png' img.save(f'{temp_dir}/{url}.png') image_paths.append(image_path) return(results[0],image_paths) def text2image_rudalle(text,aspect,model): image = rudalle(text,aspect,model)[0] return(image) #def text2image_guided(text): # image = guided(text, None, 10, 600, 0, 0, 0, random.randint(0,2147483647), None, 50, 32) # print(image) # image = image[0] # return(image) css_mt = {"margin-top": "1em"} empty = gr.outputs.HTML() with gr.Blocks() as mindseye: gr.Markdown("

MindsEye Lite run multiple text-to-image models in one place

MindsEye Lite orchestrates multiple text-to-image Hugging Face Spaces in one convenient space, so you can try different models. This work carries the spirit of MindsEye Beta, a tool to run multiple models with a single UI, but adjusted to the current hardware limitations of Spaces. MindsEye Lite was created by @multimodalart, keep up with the latest multimodal ai art news here and consider supporting us on Patreon

") gr.Markdown("") text = gr.inputs.Textbox(placeholder="Try writing something..", label="Prompt") with gr.Column(): with gr.Row(): with gr.Tabs(): with gr.TabItem("Latent Diffusion"): steps = gr.inputs.Slider(label="Steps - more steps can increase quality but will take longer to generate",default=45,maximum=50,minimum=1,step=1) width = gr.inputs.Slider(label="Width", default=256, step=32, maximum=256, minimum=32) height = gr.inputs.Slider(label="Height", default=256, step=32, maximum = 256, minimum=32) images = gr.inputs.Slider(label="Images - How many images you wish to generate", default=2, step=1, minimum=1, maximum=4) diversity = gr.inputs.Slider(label="Diversity scale - How different from one another you wish the images to be",default=5.0, minimum=1.0, maximum=15.0) get_image_latent = gr.Button("Generate Image",css=css_mt) with gr.TabItem("ruDALLE"): aspect = gr.inputs.Radio(label="Aspect Ratio", choices=["Square", "Horizontal", "Vertical"],default="Square") model = gr.inputs.Dropdown(label="Model", choices=["Surrealism","Realism", "Emoji"], default="Surrealism") get_image_rudalle = gr.Button("Generate Image",css=css_mt) with gr.TabItem("VQGAN+CLIP"): pass with gr.TabItem("V-Diffusion"): pass with gr.Row(): with gr.Tabs(): with gr.TabItem("Image output"): image = gr.outputs.Image() with gr.TabItem("Gallery output"): gallery = gr.Gallery(label="Individual images") get_image_latent.click(text2image_latent, inputs=[text,steps,width,height,images,diversity], outputs=[image,gallery]) get_image_rudalle.click(text2image_rudalle, inputs=[text,aspect,model], outputs=image) mindseye.launch()