Spaces:
Running
Running
import gradio as gr | |
import requests | |
import time | |
import json | |
from contextlib import closing | |
from websocket import create_connection | |
from deep_translator import GoogleTranslator | |
from langdetect import detect | |
import os | |
from PIL import Image | |
import io | |
import base64 | |
def flip_text(prompt, negative_prompt, task, steps, sampler, cfg_scale, seed): | |
result = {"prompt": prompt,"negative_prompt": negative_prompt,"task": task,"steps": steps,"sampler": sampler,"cfg_scale": cfg_scale,"seed": seed} | |
print(result) | |
language = detect(prompt) | |
if language == 'ru': | |
prompt = GoogleTranslator(source='ru', target='en').translate(prompt) | |
print(prompt) | |
cfg = int(cfg_scale) | |
steps = int(steps) | |
seed = int(seed) | |
url_sd1 = os.getenv("url_sd1") | |
url_sd2 = os.getenv("url_sd2") | |
url_sd3 = os.getenv("url_sd3") | |
if task == 'Realistic Vision 5.0': | |
model = 'Realistic_Vision_V5.0.safetensors+%5B614d1063%5D' | |
if task == 'Dreamshaper 8': | |
model = 'dreamshaper_8.safetensors+%5B9d40847d%5D' | |
if task == 'Deliberate 3': | |
model = 'deliberate_v3.safetensors+%5Bafd9d2d4%5D' | |
if task == 'Analog Diffusion': | |
model = 'analog-diffusion-1.0.ckpt+%5B9ca13f02%5D' | |
if task == 'Lyriel 1.6': | |
model = 'lyriel_v16.safetensors+%5B68fceea2%5D' | |
if task == "Elldreth's Vivid Mix": | |
model = 'elldreths-vivid-mix.safetensors+%5B342d9d26%5D' | |
if task == 'Anything V5': | |
model = 'anything-v4.5-pruned.ckpt+%5B65745d25%5D' | |
if task == 'Openjourney V4': | |
model = 'openjourney_V4.ckpt+%5Bca2f377f%5D' | |
if task == 'AbsoluteReality 1.8.1': | |
model = 'absolutereality_v181.safetensors+%5B3d9d4d2b%5D' | |
if task == 'epiCRealism v5': | |
model = 'epicrealism_naturalSinRC1VAE.safetensors+%5B90a4c676%5D' | |
if task == 'CyberRealistic 3.3': | |
model = 'cyberrealistic_v33.safetensors+%5B82b0d085%5D' | |
if task == 'ToonYou 6': | |
model = 'toonyou_beta6.safetensors+%5B980f6b15%5D' | |
c = 0 | |
r = requests.get(f'{url_sd1}{prompt}&model={model}&negative_prompt={negative_prompt}&steps={steps}&cfg={cfg}&seed={seed}&sampler={sampler}&aspect_ratio=square', timeout=10) | |
job = r.json()['job'] | |
while c < 10: | |
c += 1 | |
time.sleep(2) | |
r2 = requests.get(f'{url_sd2}{job}', timeout=10) | |
status = r2.json()['status'] | |
if status == 'succeeded': | |
photo = f'{url_sd3}{job}.png' | |
return photo | |
if status == "queued": | |
continue | |
if status == 'failed': | |
return None | |
def mirror(image_output, scale_by, method, gfpgan, codeformer): | |
url_up = os.getenv("url_up") | |
url_up_f = os.getenv("url_up_f") | |
scale_by = int(scale_by) | |
gfpgan = int(gfpgan) | |
codeformer = int(codeformer) | |
with open(image_output, "rb") as image_file: | |
encoded_string2 = base64.b64encode(image_file.read()) | |
encoded_string2 = str(encoded_string2).replace("b'", '') | |
encoded_string2 = "data:image/png;base64," + encoded_string2 | |
data = {"fn_index":81,"data":[0,0,encoded_string2,None,"","",True,gfpgan,codeformer,0,scale_by,512,512,None,method,"None",1,False,[],"",""],"session_hash":""} | |
print(data) | |
r = requests.post(f"{url_up}", json=data, timeout=100) | |
print(r.text) | |
ph = f"{url_up_f}" + str(r.json()['data'][0][0]['name']) | |
return ph | |
css = """ | |
#generate { | |
width: 100%; | |
background: #e253dd !important; | |
border: none; | |
border-radius: 50px; | |
outline: none !important; | |
color: white; | |
} | |
#generate:hover { | |
background: #de6bda !important; | |
outline: none !important; | |
color: #fff; | |
} | |
#image_output { | |
display: flex; | |
justify-content: center; | |
} | |
footer {visibility: hidden !important;} | |
#image_output { | |
height: 100% !important; | |
} | |
""" | |
with gr.Blocks(css=css) as demo: | |
with gr.Tab("Базовые настройки"): | |
with gr.Row(): | |
prompt = gr.Textbox(placeholder="Введите описание изображения...", show_label=True, label='Описание изображения:', lines=3) | |
with gr.Row(): | |
task = gr.Radio(interactive=True, value="Deliberate 3", show_label=True, label="Модель нейросети:", | |
choices=["AbsoluteReality 1.8.1", "Elldreth's Vivid Mix", "Anything V5", "Openjourney V4", "Analog Diffusion", | |
"Lyriel 1.6", "Realistic Vision 5.0", "Dreamshaper 8", "epiCRealism v5", | |
"CyberRealistic 3.3", "ToonYou 6", "Deliberate 3"]) | |
with gr.Tab("Расширенные настройки"): | |
with gr.Row(): | |
negative_prompt = gr.Textbox(placeholder="Negative Prompt", show_label=True, label='Negative Prompt:', lines=3, value="[deformed | disfigured], poorly drawn, [bad : wrong] anatomy, [extra | missing | floating | disconnected] limb, (mutated hands and fingers), blurry") | |
with gr.Row(): | |
sampler = gr.Dropdown(value="DPM++ SDE Karras", show_label=True, label="Sampling Method:", choices=[ | |
"Euler", "Euler a", "Heun", "DPM++ 2M Karras", "DPM++ SDE Karras", "DDIM"]) | |
with gr.Row(): | |
steps = gr.Slider(show_label=True, label="Sampling Steps:", minimum=1, maximum=30, value=25, step=1) | |
with gr.Row(): | |
cfg_scale = gr.Slider(show_label=True, label="CFG Scale:", minimum=1, maximum=20, value=7, step=1) | |
with gr.Row(): | |
seed = gr.Number(show_label=True, label="Seed:", minimum=-1, maximum=1000000, value=-1, step=1) | |
with gr.Tab("Настройки апскейлинга"): | |
with gr.Column(): | |
with gr.Row(): | |
scale_by = gr.Number(show_label=True, label="Во сколько раз увеличить:", minimum=1, maximum=4, value=2, step=1) | |
with gr.Row(): | |
method = gr.Dropdown(show_label=True, value="ESRGAN_4x", label="Алгоритм увеличения", choices=["ScuNET GAN", "SwinIR 4x", "ESRGAN_4x", "R-ESRGAN 4x+", "R-ESRGAN 4x+ Anime6B"]) | |
with gr.Column(): | |
with gr.Row(): | |
gfpgan = gr.Slider(show_label=True, label="Эффект GFPGAN", minimum=0, maximum=1, value=0, step=0.1) | |
with gr.Row(): | |
codeformer = gr.Slider(show_label=True, label="Эффект CodeFormer", minimum=0, maximum=1, value=0, step=0.1) | |
with gr.Column(): | |
text_button = gr.Button("Сгенерировать изображение", variant='primary', elem_id="generate") | |
with gr.Column(): | |
image_output = gr.Image(show_label=True, show_download_button=True, interactive=True, label='Результат:', elem_id='image_output', type='filepath') | |
text_button.click(flip_text, inputs=[prompt, negative_prompt, task, steps, sampler, cfg_scale, seed], outputs=image_output) | |
img2img_b = gr.Button("Увеличить изображение", variant='secondary') | |
image_i2i = gr.Image(show_label=True, label='Увеличенное изображение:') | |
img2img_b.click(mirror, inputs=[image_output, scale_by, method, gfpgan, codeformer], outputs=image_i2i) | |
demo.queue(concurrency_count=24) | |
demo.launch() |