mindtube's picture
Duplicate from Omnibus/maximum_multiplier_places
69aee09
raw history blame
No virus
12.6 kB
import gradio as gr
import os
import sys
from pathlib import Path
models = [
"johnslegers/epic-diffusion-v1.1",
"andite/anything-v4.0",
"runwayml/stable-diffusion-v1-5",
"claudfuen/photorealistic-fuen-v1",
"naclbit/trinart_stable_diffusion_v2",
"nitrosocke/Arcane-Diffusion",
"nitrosocke/archer-diffusion",
"nitrosocke/elden-ring-diffusion",
"nitrosocke/redshift-diffusion",
"nitrosocke/spider-verse-diffusion",
"nitrosocke/mo-di-diffusion",
"nitrosocke/classic-anim-diffusion",
"dreamlike-art/dreamlike-diffusion-1.0",
"dreamlike-art/dreamlike-photoreal-2.0",
"wavymulder/wavyfusion",
"wavymulder/Analog-Diffusion",
"prompthero/midjourney-v4-diffusion",
"prompthero/openjourney",
"dallinmackay/Van-Gogh-diffusion",
"hakurei/waifu-diffusion",
"DGSpitzer/Cyberpunk-Anime-Diffusion",
"Fictiverse/Stable_Diffusion_BalloonArt_Model",
"dallinmackay/Tron-Legacy-diffusion",
"AstraliteHeart/pony-diffusion",
"nousr/robo-diffusion",
"CompVis/stable-diffusion-v1-4",
]
current_model = models[0]
text_gen1=gr.Interface.load("spaces/Omnibus/MagicPrompt-Stable-Diffusion_link")
text_gen2=gr.Interface.load("spaces/Omnibus/MagicPrompt-Stable-Diffusion_link")
text_gen3=gr.Interface.load("spaces/Omnibus/MagicPrompt-Stable-Diffusion_link")
text_gen4=gr.Interface.load("spaces/Omnibus/MagicPrompt-Stable-Diffusion_link")
text_gen5=gr.Interface.load("spaces/Omnibus/MagicPrompt-Stable-Diffusion_link")
text_gen6=gr.Interface.load("spaces/Omnibus/MagicPrompt-Stable-Diffusion_link")
text_gen7=gr.Interface.load("spaces/Omnibus/MagicPrompt-Stable-Diffusion_link")
text_gen8=gr.Interface.load("spaces/Omnibus/MagicPrompt-Stable-Diffusion_link")
models2=[
gr.Interface.load(f"models/{models[0]}",live=True,preprocess=True),
gr.Interface.load(f"models/{models[1]}",live=True,preprocess=True),
gr.Interface.load(f"models/{models[2]}",live=True,preprocess=True),
gr.Interface.load(f"models/{models[3]}",live=True,preprocess=True),
gr.Interface.load(f"models/{models[4]}",live=True,preprocess=True),
gr.Interface.load(f"models/{models[5]}",live=True,preprocess=True),
gr.Interface.load(f"models/{models[6]}",live=True,preprocess=True),
gr.Interface.load(f"models/{models[7]}",live=True,preprocess=True),
gr.Interface.load(f"models/{models[8]}",live=True,preprocess=True),
gr.Interface.load(f"models/{models[9]}",live=True,preprocess=True),
gr.Interface.load(f"models/{models[10]}",live=True,preprocess=True),
gr.Interface.load(f"models/{models[11]}",live=True,preprocess=True),
gr.Interface.load(f"models/{models[12]}",live=True,preprocess=True),
gr.Interface.load(f"models/{models[13]}",live=True,preprocess=True),
gr.Interface.load(f"models/{models[14]}",live=True,preprocess=True),
gr.Interface.load(f"models/{models[15]}",live=True,preprocess=True),
gr.Interface.load(f"models/{models[16]}",live=True,preprocess=True),
gr.Interface.load(f"models/{models[17]}",live=True,preprocess=True),
gr.Interface.load(f"models/{models[18]}",live=True,preprocess=True),
gr.Interface.load(f"models/{models[19]}",live=True,preprocess=True),
gr.Interface.load(f"models/{models[20]}",live=True,preprocess=True),
gr.Interface.load(f"models/{models[21]}",live=True,preprocess=True),
gr.Interface.load(f"models/{models[22]}",live=True,preprocess=True),
gr.Interface.load(f"models/{models[23]}",live=True,preprocess=True),
gr.Interface.load(f"models/{models[24]}",live=True,preprocess=True),
gr.Interface.load(f"models/{models[25]}",live=True,preprocess=True),
]
def text_it1(inputs,text_gen1=text_gen1):
go_t1=text_gen1(inputs)
return(go_t1)
def text_it2(inputs,text_gen2=text_gen2):
go_t2=text_gen2(inputs)
return(go_t2)
def text_it3(inputs,text_gen3=text_gen3):
go_t3=text_gen3(inputs)
return(go_t3)
def text_it4(inputs,text_gen4=text_gen4):
go_t4=text_gen4(inputs)
return(go_t4)
def text_it5(inputs,text_gen5=text_gen5):
go_t5=text_gen5(inputs)
return(go_t5)
def text_it6(inputs,text_gen6=text_gen6):
go_t6=text_gen6(inputs)
return(go_t6)
def text_it7(inputs,text_gen7=text_gen7):
go_t7=text_gen7(inputs)
return(go_t7)
def text_it8(inputs,text_gen8=text_gen8):
go_t8=text_gen8(inputs)
return(go_t8)
def set_model(current_model):
current_model = models[current_model]
return gr.update(label=(f"{current_model}")),gr.update(label=(f"{current_model}")),gr.update(label=(f"{current_model}")),gr.update(label=(f"{current_model}")),gr.update(label=(f"{current_model}")),gr.update(label=(f"{current_model}")),gr.update(label=(f"{current_model}")),gr.update(label=(f"{current_model}")),
def send_it1(inputs, model_choice):
proc1=models2[model_choice]
output1=proc1(inputs)
return(output1)
def send_it2(inputs, model_choice):
proc2=models2[model_choice]
output2=proc2(inputs)
return(output2)
def send_it3(inputs, model_choice):
proc3=models2[model_choice]
output3=proc3(inputs)
return(output3)
def send_it4(inputs, model_choice):
proc4=models2[model_choice]
output4=proc4(inputs)
return(output4)
def send_it5(inputs, model_choice):
proc5=models2[model_choice]
output5=proc5(inputs)
return(output5)
def send_it6(inputs, model_choice):
proc6=models2[model_choice]
output6=proc6(inputs)
return(output6)
def send_it7(inputs, model_choice):
proc7=models2[model_choice]
output7=proc7(inputs)
return(output7)
def send_it8(inputs, model_choice):
proc8=models2[model_choice]
output8=proc8(inputs)
return(output8)
css=""""""
with gr.Blocks(css=css) as myface:
gr.HTML("""<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8" />
<meta name="twitter:card" content="player"/>
<meta name="twitter:site" content=""/>
<meta name="twitter:player" content="https://omnibus-maximum-multiplier-places.hf.space"/>
<meta name="twitter:player:stream" content="https://omnibus-maximum-multiplier-places.hf.space"/>
<meta name="twitter:player:width" content="100%"/>
<meta name="twitter:player:height" content="600"/>
<meta property="og:title" content="Embedded Live Viewer"/>
<meta property="og:description" content="Tweet Genie - A Huggingface Space"/>
<meta property="og:image" content="https://cdn.glitch.global/80dbe92e-ce75-44af-84d5-74a2e21e9e55/omnicard.png?v=1676772531627"/>
<!--<meta http-equiv="refresh" content="0; url=https://huggingface.co/spaces/corbt/tweet-genie">-->
</head>
</html>
""")
with gr.Row():
with gr.Tab("Title"):
gr.HTML(""" <title>Maximum Multiplier</title><div style="text-align: center; max-width: 1500px; margin: 0 auto;">
<h1>Everything</h1>
<br><br><h4>It just does a lot of things at the same time</h4>
""")
with gr.Tab("Description"):
gr.HTML("""<div style="text-align:center;">
<h4>As many Text-to-Image Models as I can fit here</h4><br>
<h4>Suggest more up in the "Community" button</h4>
</div>""")
with gr.Tab("Tools"):
with gr.Tab("View"):
with gr.Row():
with gr.Column(style="width=50%, height=70%"):
gr.Pil(label="Crop")
with gr.Column(style="width=50%, height=70%"):
gr.Pil(label="Crop")
with gr.Tab("Draw"):
with gr.Column(style="width=50%, height=70%"):
gr.Pil(label="Crop")
with gr.Column(style="width=50%, height=70%"):
gr.Pil(label="Draw")
gr.ImagePaint(label="Draw")
with gr.Tab("Text"):
with gr.Row():
with gr.Column(scale=50):
gr.Textbox(label="", lines=8, interactive=True)
with gr.Column(scale=50):
gr.Textbox(label="", lines=8, interactive=True)
with gr.Tab("Color Picker"):
with gr.Row():
with gr.Column(scale=50):
gr.ColorPicker(label="Color", interactive=True)
with gr.Column(scale=50):
gr.ImagePaint(label="Draw", interactive=True)
with gr.Row():
with gr.Column():
input_text=gr.Textbox(label="Short Prompt",lines=2)
#Model selection dropdown
model_name1 = gr.Dropdown(show_label=False, choices=[m for m in models], type="index", value=current_model, interactive=True)
with gr.Column():
use_short=gr.Button("Use Short Prompt")
see_prompts=gr.Button("Generate Magic Prompts")
run=gr.Button("Launch")
with gr.Tab("Main"):
with gr.Row():
output1=gr.Image(label=(f"{current_model}"))
output2=gr.Image(label=(f"{current_model}"))
output3=gr.Image(label=(f"{current_model}"))
output4=gr.Image(label=(f"{current_model}"))
with gr.Row():
magic1=gr.Textbox(lines=4)
magic2=gr.Textbox(lines=4)
magic3=gr.Textbox(lines=4)
magic4=gr.Textbox(lines=4)
with gr.Row():
output5=gr.Image(label=(f"{current_model}"))
output6=gr.Image(label=(f"{current_model}"))
output7=gr.Image(label=(f"{current_model}"))
output8=gr.Image(label=(f"{current_model}"))
with gr.Row():
magic5=gr.Textbox(lines=4)
magic6=gr.Textbox(lines=4)
magic7=gr.Textbox(lines=4)
magic8=gr.Textbox(lines=4)
def short_prompt(inputs):
return(inputs)
model_name1.change(set_model,inputs=model_name1,outputs=[output1,output2,output3,output4,output5,output6,output7,output8])
run.click(send_it1, inputs=[magic1, model_name1], outputs=[output1])
run.click(send_it2, inputs=[magic2, model_name1], outputs=[output2])
run.click(send_it3, inputs=[magic3, model_name1], outputs=[output3])
run.click(send_it4, inputs=[magic4, model_name1], outputs=[output4])
run.click(send_it5, inputs=[magic5, model_name1], outputs=[output5])
run.click(send_it6, inputs=[magic6, model_name1], outputs=[output6])
run.click(send_it7, inputs=[magic7, model_name1], outputs=[output7])
run.click(send_it8, inputs=[magic8, model_name1], outputs=[output8])
use_short.click(short_prompt,inputs=[input_text],outputs=magic1)
use_short.click(short_prompt,inputs=[input_text],outputs=magic2)
use_short.click(short_prompt,inputs=[input_text],outputs=magic3)
use_short.click(short_prompt,inputs=[input_text],outputs=magic4)
use_short.click(short_prompt,inputs=[input_text],outputs=magic5)
use_short.click(short_prompt,inputs=[input_text],outputs=magic6)
use_short.click(short_prompt,inputs=[input_text],outputs=magic7)
use_short.click(short_prompt,inputs=[input_text],outputs=magic8)
see_prompts.click(text_it1,inputs=[input_text],outputs=magic1)
see_prompts.click(text_it2,inputs=[input_text],outputs=magic2)
see_prompts.click(text_it3,inputs=[input_text],outputs=magic3)
see_prompts.click(text_it4,inputs=[input_text],outputs=magic4)
see_prompts.click(text_it5,inputs=[input_text],outputs=magic5)
see_prompts.click(text_it6,inputs=[input_text],outputs=magic6)
see_prompts.click(text_it7,inputs=[input_text],outputs=magic7)
see_prompts.click(text_it8,inputs=[input_text],outputs=magic8)
myface.queue(concurrency_count=200)
myface.launch(inline=True, show_api=False, max_threads=400)