Spaces:
Runtime error
Runtime error
import gradio as gr | |
import os | |
import sys | |
from pathlib import Path | |
models = [ | |
"Yntec/photoMovieRealistic", | |
"Yntec/a-ZovyaRPGV3VAE", | |
"Yntec/a-ZoviaRPGArtistV2VAE", | |
"Yntec/InsaneRealisticCVAE", | |
"Yntec/Lunar", | |
"Yntec/LunarLuma", | |
"Yntec/QToriReloaded", | |
"Yntec/Ninja-Diffusers", | |
"Yntec/ChildrenStoriesAnime", | |
"Yntec/DucHaitenAnime768", | |
"Yntec/DucHaitenClassicAnime768", | |
"Yntec/WesternAnimation", | |
"Yntec/GOLDFish", | |
"Yntec/Rainbowsphere", | |
"Yntec/DreamAnything", | |
"Yntec/yabalMixTrue25D_v2_VAE", | |
"dreamlike-art/dreamlike-anime-1.0", | |
"Yntec/DucHaiten-Retro-Diffusers", | |
"Hemlok/QuinceMix", | |
"Yntec/KIDSILLUSTRATIONS", | |
"Yntec/COOLKIDSV2", | |
"Yntec/Pavo-Mix-Diffusers", | |
"digiplay/KawaiiRealisticAnimeMix_A0.3", | |
"Ojimi/anime-kawai-diffusion", | |
"LottePeisch/RevAnimated-Diffusers", | |
"Linaqruf/animagine-xl", | |
"Envvi/Inkpunk-Diffusion", | |
"Birchlabs/waifu-diffusion-xl-unofficial", | |
"xiaolxl/GuoFeng3", | |
"digiplay/AI-infinity-V1-fp16", | |
"Yntec/photoMovieX", | |
"Yntec/OrangeRemix", | |
"digiplay/majicMIX_realistic_v6", | |
"digiplay/LuckyStrikeMix1.05_Lovelylady", | |
"digiplay/nk15_diffusers", | |
"digiplay/TWingshadow_v1.2", | |
"digiplay/bluePencilRealistic_v01", | |
"digiplay/LemonCreami", | |
"digiplay/LunarDiffusion_v1.27", | |
"Linaqruf/hermitage-xl", | |
"ddPn08/SwimInLatent", | |
"hogiahien/LoliV5-edited", | |
"NoCrypt/SomethingV2_2", | |
"botp/Realistic_Vision_V1.4", | |
"digiplay/AbsoluteReality_v1.8.1", | |
"coreml/coreml-realisticVision-v50VAE_cn", | |
] | |
current_model = models[0] | |
text_gen1=gr.Interface.load("spaces/Omnibus/MagicPrompt-Stable-Diffusion_link") | |
models2=[ | |
gr.Interface.load(f"models/{models[0]}",live=True,preprocess=False), | |
gr.Interface.load(f"models/{models[1]}",live=True,preprocess=False), | |
gr.Interface.load(f"models/{models[2]}",live=True,preprocess=False), | |
gr.Interface.load(f"models/{models[3]}",live=True,preprocess=False), | |
gr.Interface.load(f"models/{models[4]}",live=True,preprocess=False), | |
gr.Interface.load(f"models/{models[5]}",live=True,preprocess=False), | |
gr.Interface.load(f"models/{models[6]}",live=True,preprocess=False), | |
gr.Interface.load(f"models/{models[7]}",live=True,preprocess=False), | |
gr.Interface.load(f"models/{models[8]}",live=True,preprocess=False), | |
gr.Interface.load(f"models/{models[9]}",live=True,preprocess=False), | |
gr.Interface.load(f"models/{models[10]}",live=True,preprocess=False), | |
gr.Interface.load(f"models/{models[11]}",live=True,preprocess=False), | |
gr.Interface.load(f"models/{models[12]}",live=True,preprocess=False), | |
gr.Interface.load(f"models/{models[13]}",live=True,preprocess=False), | |
gr.Interface.load(f"models/{models[14]}",live=True,preprocess=False), | |
gr.Interface.load(f"models/{models[15]}",live=True,preprocess=False), | |
gr.Interface.load(f"models/{models[16]}",live=True,preprocess=False), | |
gr.Interface.load(f"models/{models[17]}",live=True,preprocess=False), | |
gr.Interface.load(f"models/{models[18]}",live=True,preprocess=False), | |
gr.Interface.load(f"models/{models[19]}",live=True,preprocess=False), | |
gr.Interface.load(f"models/{models[20]}",live=True,preprocess=False), | |
gr.Interface.load(f"models/{models[21]}",live=True,preprocess=False), | |
gr.Interface.load(f"models/{models[22]}",live=True,preprocess=False), | |
gr.Interface.load(f"models/{models[23]}",live=True,preprocess=False), | |
gr.Interface.load(f"models/{models[24]}",live=True,preprocess=False), | |
gr.Interface.load(f"models/{models[25]}",live=True,preprocess=False), | |
gr.Interface.load(f"models/{models[26]}",live=True,preprocess=False), | |
gr.Interface.load(f"models/{models[27]}",live=True,preprocess=False), | |
gr.Interface.load(f"models/{models[28]}",live=True,preprocess=False), | |
gr.Interface.load(f"models/{models[29]}",live=True,preprocess=False), | |
gr.Interface.load(f"models/{models[30]}",live=True,preprocess=False), | |
gr.Interface.load(f"models/{models[31]}",live=True,preprocess=False), | |
gr.Interface.load(f"models/{models[32]}",live=True,preprocess=False), | |
gr.Interface.load(f"models/{models[33]}",live=True,preprocess=False), | |
gr.Interface.load(f"models/{models[34]}",live=True,preprocess=False), | |
gr.Interface.load(f"models/{models[35]}",live=True,preprocess=False), | |
gr.Interface.load(f"models/{models[36]}",live=True,preprocess=False), | |
gr.Interface.load(f"models/{models[37]}",live=True,preprocess=False), | |
gr.Interface.load(f"models/{models[38]}",live=True,preprocess=False), | |
gr.Interface.load(f"models/{models[39]}",live=True,preprocess=False), | |
gr.Interface.load(f"models/{models[40]}",live=True,preprocess=False), | |
gr.Interface.load(f"models/{models[41]}",live=True,preprocess=False), | |
gr.Interface.load(f"models/{models[42]}",live=True,preprocess=False), | |
gr.Interface.load(f"models/{models[43]}",live=True,preprocess=False), | |
gr.Interface.load(f"models/{models[44]}",live=True,preprocess=False), | |
gr.Interface.load(f"models/{models[45]}",live=True,preprocess=False), | |
] | |
def text_it1(inputs,text_gen1=text_gen1): | |
go_t1=text_gen1(inputs) | |
return(go_t1) | |
def set_model(current_model): | |
current_model = models[current_model] | |
return gr.update(label=(f"{current_model}")) | |
def send_it1(inputs, model_choice): | |
proc1=models2[model_choice] | |
output1=proc1(inputs) | |
return(output1) | |
css="""""" | |
with gr.Blocks(css=css) as myface: | |
gr.HTML() | |
with gr.Row(): | |
with gr.Tab("说明"): | |
gr.HTML(""" <title>多模型加速生成</title><div style="text-align: center; max-width: 1500px; margin: 0 auto;"> | |
<h1>输入文字生成图像</h1> | |
<br><h4>第一次导入模型可能需要200秒生成</h4> | |
<br><h4>之后一般只需20秒左右</h4>""") | |
with gr.Tab("内置模型解释"): | |
gr.HTML(""" <title>在内置模型中生成图片可能出现的问题</title><div style="text-align: center; max-width: 1500px; margin: 0 auto;"> | |
<h4>1. 使用Envvi/Inkpunk-Diffusion时,需要在提示词中加上“nvinkpunk”</h4> | |
<h4>2. 使用digiplay/majicMIX_realistic_v6时,在提示词中加上“HI-RES”效果更好</h4> | |
<h4>3. 使用digiplay/LuckyStrikeMix1.05_Lovelylady时,在提示词中加上“photorealism”,“8k”等关键词,效果更好</h4>""") | |
with gr.Row(): | |
with gr.Column(scale=100): | |
magic1=gr.Textbox(label="输入提示词:", lines=4, placeholder = "英文") | |
run=gr.Button("生成图像") | |
with gr.Row(): | |
with gr.Column(scale=100): | |
model_name1 = gr.Dropdown(label="选择模型", choices=[m for m in models], type="index", value=current_model, interactive=True) | |
with gr.Row(): | |
with gr.Column(style="width=1600px"): | |
output1=gr.Image(label=(f"{current_model}"), width=600, height=600) | |
with gr.Row(): | |
with gr.Column(scale=50): | |
gr.HTML("""<h1>没有想法?</h1>""") | |
input_text=gr.Textbox(label="输入提示词点子:",lines=2, placeholder = "英文") | |
use_short=gr.Button("使用精简提示词(使用框中提示词)") | |
see_prompts=gr.Button("扩充框内提示词") | |
def short_prompt(inputs): | |
return(inputs) | |
model_name1.change(set_model,inputs=model_name1,outputs=[output1]) | |
run.click(send_it1, inputs=[magic1, model_name1], outputs=[output1]) | |
use_short.click(short_prompt,inputs=[input_text],outputs=magic1) | |
see_prompts.click(text_it1,inputs=[input_text],outputs=magic1) | |
myface.queue(concurrency_count=200) | |
myface.launch(inline=True, show_api=False, max_threads=400) |