Spaces:
Running
Running
import gradio as gr | |
import requests | |
import time | |
import json | |
from contextlib import closing | |
from websocket import create_connection | |
from deep_translator import GoogleTranslator | |
from langdetect import detect | |
import os | |
from PIL import Image | |
import io | |
import base64 | |
import re | |
from gradio_client import Client | |
def flip_text(prompt, negative_prompt, task, steps, sampler, cfg_scale, seed): | |
result = {"prompt": prompt,"negative_prompt": negative_prompt,"task": task,"steps": steps,"sampler": sampler,"cfg_scale": cfg_scale,"seed": seed} | |
print(result) | |
try: | |
language = detect(prompt) | |
if language == 'ru': | |
prompt = GoogleTranslator(source='ru', target='en').translate(prompt) | |
print(prompt) | |
except: | |
pass | |
prompt = re.sub(r'[^a-zA-Zа-яА-Я\s]', '', prompt) | |
cfg = int(cfg_scale) | |
steps = int(steps) | |
seed = int(seed) | |
width = 1024 | |
height = 1024 | |
url_sd1 = os.getenv("url_sd1") | |
url_sd2 = os.getenv("url_sd2") | |
url_sd3 = os.getenv("url_sd3") | |
url_sd4 = os.getenv("url_sd4") | |
print("--3-->", url_sd3) | |
print("--4-->", url_sd4) | |
url_sd5 = os.getenv("url_sd5") | |
url_sd6 = os.getenv("url_sd6") | |
hf_token = os.getenv("hf_token") | |
if task == "Playground v2": | |
playground = str(os.getenv("playground")) | |
client = Client(playground, hf_token=hf_token) | |
result = client.predict(prompt, "", False, 220, 1024, 1024, 3, True, api_name="/run") | |
return result[0][0]['image'] | |
if task == "OpenDalle v1.1": | |
headers = { | |
'authority': 'mrfakename-opendallev1-1-gpu-demo.hf.space', | |
'accept': 'text/event-stream', | |
'accept-language': 'ru,en;q=0.9,la;q=0.8,ja;q=0.7', | |
'cache-control': 'no-cache', | |
'referer': 'https://mrfakename-opendallev1-1-gpu-demo.hf.space/?__theme=light', | |
'sec-ch-ua': '"Not_A Brand";v="8", "Chromium";v="120", "YaBrowser";v="24.1", "Yowser";v="2.5"', | |
'sec-ch-ua-mobile': '?0', | |
'sec-ch-ua-platform': '"Windows"', | |
'sec-fetch-dest': 'empty', | |
'sec-fetch-mode': 'cors', | |
'sec-fetch-site': 'same-origin', | |
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 YaBrowser/24.1.0.0 Safari/537.36', | |
} | |
opendalle = str(os.getenv("opendalle")) | |
client = Client(opendalle, headers=headers) | |
result = client.predict(prompt, "", "", "", False, False, False, 999, 1024, 1024, 5, 5, 25, 25, False, api_name="/run") | |
return result | |
try: | |
with closing(create_connection(f"{url_sd3}", timeout=60)) as conn: | |
conn.send('{"fn_index":3,"session_hash":""}') | |
conn.send(f'{{"data":["{prompt}, 4k photo","[deformed | disfigured], poorly drawn, [bad : wrong] anatomy, [extra | missing | floating | disconnected] limb, (mutated hands and fingers), blurry",7.5,"(No style)"],"event_data":null,"fn_index":3,"session_hash":""}}') | |
while True: | |
status = json.loads(conn.recv())['msg'] | |
if status == 'estimation': | |
continue | |
if status == 'process_starts': | |
break | |
photo = json.loads(conn.recv())['output']['data'][0][0] | |
photo = photo.replace('data:image/jpeg;base64,', '').replace('data:image/png;base64,', '') | |
photo = Image.open(io.BytesIO(base64.decodebytes(bytes(photo, "utf-8")))) | |
return photo | |
#data = {"inputs":f"{prompt}, 4k photo","options":{"negative_prompt":"[deformed | disfigured], poorly drawn, [bad : wrong] anatomy, [extra | missing | floating | disconnected] limb, (mutated hands and fingers), blurry","width":1024,"height":1024,"guidance_scale":7,"num_inference_steps":35}} | |
#response = requests.post(f'{url_sd5}', json=data) | |
#print(response.text) | |
#print(response.json()['image']['file_name']) | |
#file_name = response.json()['image']['file_name'] | |
#photo = f"{url_sd6}{file_name}.png" | |
#return photo | |
except: | |
with closing(create_connection(f"{url_sd4}", timeout=60)) as conn: | |
conn.send('{"fn_index":0,"session_hash":""}') | |
conn.send(f'{{"data":["{prompt}","[deformed | disfigured], poorly drawn, [bad : wrong] anatomy, [extra | missing | floating | disconnected] limb, (mutated hands and fingers), blurry","dreamshaperXL10_alpha2.safetensors [c8afe2ef]",30,"DPM++ 2M Karras",7,1024,1024,-1],"event_data":null,"fn_index":0,"session_hash":""}}') | |
conn.recv() | |
conn.recv() | |
conn.recv() | |
conn.recv() | |
photo = json.loads(conn.recv())['output']['data'][0] | |
photo = photo.replace('data:image/jpeg;base64,', '').replace('data:image/png;base64,', '') | |
photo = Image.open(io.BytesIO(base64.decodebytes(bytes(photo, "utf-8")))) | |
return photo | |
#except: | |
# try: | |
# client = Client("https://prodia-sdxl-stable-diffusion-xl.hf.space") | |
# result = client.predict(prompt,"[deformed | disfigured], poorly drawn, [bad : wrong] anatomy, [extra | missing | floating | disconnected] limb, (mutated hands and fingers), blurry","sd_xl_base_1.0.safetensors [be9edd61]",25,"DPM++ 2M Karras",7,1024,1024,-1,fn_index=0) | |
# return result | |
# except: | |
# print("n_2") | |
# print(url_sd4) | |
# with closing(create_connection(f"{url_sd4}", timeout=60)) as conn: | |
# conn.send('{"fn_index":0,"session_hash":""}') | |
# conn.send(f'{{"data":["{prompt}","[deformed | disfigured], poorly drawn, [bad : wrong] anatomy, [extra | missing | floating | disconnected] limb, (mutated hands and fingers), blurry","dreamshaperXL10_alpha2.safetensors [c8afe2ef]",30,"DPM++ 2M Karras",7,1024,1024,-1],"event_data":null,"fn_index":0,"session_hash":""}}') | |
# conn.recv() | |
# conn.recv() | |
# conn.recv() | |
# conn.recv() | |
# photo = json.loads(conn.recv())['output']['data'][0] | |
# photo = photo.replace('data:image/jpeg;base64,', '').replace('data:image/png;base64,', '') | |
# photo = Image.open(io.BytesIO(base64.decodebytes(bytes(photo, "utf-8")))) | |
# return photo | |
def flipp(): | |
if task == 'Stable Diffusion XL 1.0': | |
model = 'sd_xl_base_1.0' | |
if task == 'Crystal Clear XL': | |
model = '[3d] crystalClearXL_ccxl_97637' | |
if task == 'Juggernaut XL': | |
model = '[photorealistic] juggernautXL_version2_113240' | |
if task == 'DreamShaper XL': | |
model = '[base model] dreamshaperXL09Alpha_alpha2Xl10_91562' | |
if task == 'SDXL Niji': | |
model = '[midjourney] sdxlNijiV51_sdxlNijiV51_112807' | |
if task == 'Cinemax SDXL': | |
model = '[movie] cinemaxAlphaSDXLCinema_alpha1_107473' | |
if task == 'NightVision XL': | |
model = '[photorealistic] nightvisionXLPhotorealisticPortrait_beta0702Bakedvae_113098' | |
print("n_3") | |
negative = negative_prompt | |
try: | |
with closing(create_connection(f"{url_sd1}")) as conn: | |
conn.send('{"fn_index":231,"session_hash":""}') | |
conn.send(f'{{"data":["task()","{prompt}","{negative}",[],{steps},"{sampler}",false,false,1,1,{cfg},{seed},-1,0,0,0,false,{width},{height},false,0.7,2,"Lanczos",0,0,0,"Use same sampler","","",[],"None",true,"{model}","Automatic",null,null,null,false,false,"positive","comma",0,false,false,"","Seed","",[],"Nothing","",[],"Nothing","",[],true,false,false,false,0,null,null,false,null,null,false,null,null,false,50,[],"","",""],"event_data":null,"fn_index":231,"session_hash":""}}') | |
print(conn.recv()) | |
print(conn.recv()) | |
print(conn.recv()) | |
print(conn.recv()) | |
photo = f"{url_sd2}" + str(json.loads(conn.recv())['output']['data'][0][0]["name"]) | |
return photo | |
except: | |
return None | |
def mirror(image_output, scale_by, method, gfpgan, codeformer): | |
url_up = os.getenv("url_up") | |
url_up_f = os.getenv("url_up_f") | |
print("~~ up", url_up) | |
print("~~ f", url_up_f) | |
scale_by = int(scale_by) | |
gfpgan = int(gfpgan) | |
codeformer = int(codeformer) | |
with open(image_output, "rb") as image_file: | |
encoded_string2 = base64.b64encode(image_file.read()) | |
encoded_string2 = str(encoded_string2).replace("b'", '') | |
encoded_string2 = "data:image/png;base64," + encoded_string2 | |
data = {"fn_index":81,"data":[0,0,encoded_string2,None,"","",True,gfpgan,codeformer,0,scale_by,512,512,None,method,"None",1,False,[],"",""],"session_hash":""} | |
print(data) | |
r = requests.post(f"{url_up}", json=data, timeout=100) | |
print(r.text) | |
ph = f"{url_up_f}" + str(r.json()['data'][0][0]['name']) | |
return ph | |
css = """ | |
#generate { | |
width: 100%; | |
background: #e253dd !important; | |
border: none; | |
border-radius: 50px; | |
outline: none !important; | |
color: white; | |
} | |
#generate:hover { | |
background: #de6bda !important; | |
outline: none !important; | |
color: #fff; | |
} | |
footer {visibility: hidden !important;} | |
#image_output { | |
height: 100% !important; | |
} | |
""" | |
with gr.Blocks(css=css) as demo: | |
with gr.Tab("Базовые настройки"): | |
with gr.Row(): | |
prompt = gr.Textbox(placeholder="Введите описание изображения...", show_label=True, label='Описание изображения:', lines=3) | |
with gr.Row(): | |
task = gr.Radio(interactive=True, value="Stable Diffusion XL 1.0", show_label=True, label="Модель нейросети:", choices=['Stable Diffusion XL 1.0', 'Crystal Clear XL', | |
'Juggernaut XL', 'DreamShaper XL', | |
'SDXL Niji', 'Cinemax SDXL', 'NightVision XL', | |
'Playground v2', 'OpenDalle v1.1']) | |
with gr.Tab("Расширенные настройки"): | |
with gr.Row(): | |
negative_prompt = gr.Textbox(placeholder="Negative Prompt", show_label=True, label='Negative Prompt:', lines=3, value="[deformed | disfigured], poorly drawn, [bad : wrong] anatomy, [extra | missing | floating | disconnected] limb, (mutated hands and fingers), blurry") | |
with gr.Row(): | |
sampler = gr.Dropdown(value="DPM++ SDE Karras", show_label=True, label="Sampling Method:", choices=[ | |
"Euler", "Euler a", "Heun", "DPM++ 2M", "DPM++ SDE", "DPM++ 2M Karras", "DPM++ SDE Karras", "DDIM"]) | |
with gr.Row(): | |
steps = gr.Slider(show_label=True, label="Sampling Steps:", minimum=1, maximum=50, value=35, step=1) | |
with gr.Row(): | |
cfg_scale = gr.Slider(show_label=True, label="CFG Scale:", minimum=1, maximum=20, value=7, step=1) | |
with gr.Row(): | |
seed = gr.Number(show_label=True, label="Seed:", minimum=-1, maximum=1000000, value=-1, step=1) | |
with gr.Tab("Настройки апскейлинга"): | |
with gr.Column(): | |
with gr.Row(): | |
scale_by = gr.Number(show_label=True, label="Во сколько раз увеличить:", minimum=1, maximum=2, value=2, step=1) | |
with gr.Row(): | |
method = gr.Dropdown(show_label=True, value="ESRGAN_4x", label="Алгоритм увеличения", choices=["ScuNET GAN", "SwinIR 4x", "ESRGAN_4x", "R-ESRGAN 4x+", "R-ESRGAN 4x+ Anime6B"]) | |
with gr.Column(): | |
with gr.Row(): | |
gfpgan = gr.Slider(show_label=True, label="Эффект GFPGAN (для улучшения лица)", minimum=0, maximum=1, value=0, step=0.1) | |
with gr.Row(): | |
codeformer = gr.Slider(show_label=True, label="Эффект CodeFormer (для улучшения лица)", minimum=0, maximum=1, value=0, step=0.1) | |
with gr.Column(): | |
text_button = gr.Button("Сгенерировать изображение", variant='primary', elem_id="generate") | |
with gr.Column(): | |
image_output = gr.Image(show_download_button=True, interactive=False, label='Результат:', elem_id='image_output', type='filepath') | |
text_button.click(flip_text, inputs=[prompt, negative_prompt, task, steps, sampler, cfg_scale, seed], outputs=image_output) | |
img2img_b = gr.Button("Увеличить изображение", variant='secondary') | |
image_i2i = gr.Image(show_label=True, label='Увеличенное изображение:') | |
img2img_b.click(mirror, inputs=[image_output, scale_by, method, gfpgan, codeformer], outputs=image_i2i) | |
demo.queue(concurrency_count=12) | |
demo.launch() |