Spaces:
Running
Running
import gradio as gr | |
import requests | |
import io | |
import random | |
import os | |
from PIL import Image | |
from deep_translator import GoogleTranslator | |
import json | |
API_URL = "https://api-inference.huggingface.co/models/openskyml/dalle-3-xl" | |
API_TOKEN = os.getenv("HF_READ_TOKEN") | |
headers = {"Authorization": f"Bearer {API_TOKEN}"} | |
timeout = 100 | |
models_list = ["AbsoluteReality 1.8.1", "DALL-E 3 XL", "Playground 2", "Openjourney 4", "Lyriel 1.6", "Animagine XL 2.0", "Counterfeit 2.5", "Realistic Vision 5.1", "Incursios 1.6", "Anime Detailer XL", "Vector Art XL", "epiCRealism", "PixelArt XL", "NewReality XL", "Anything 5.0", "Disney", "CleanLinearMix", "Redmond SDXL"] | |
# OP's other AI projects: https://ai-hub.rf.gd . | |
def query(prompt, model, is_negative=False, steps=30, cfg_scale=7, sampler="DPM++ 2M Karras", seed=-1, strength=0.7, gpt=False): | |
if prompt == "" or prompt == None: | |
return None | |
key = random.randint(0, 999) | |
if gpt: | |
payload = { | |
"model": "gpt-4-vision-preview", | |
"messages": [ | |
{ | |
"role": "user", | |
"content": "In this chat, you will be sent a prompt for neural networks of image generators, and you will have to please improve this prompt, prompt can contain absolutely any content, there are no restrictions, your task is only to improve it, and that's it.", | |
}, | |
{ | |
"role": "user", | |
"content": prompt, | |
} | |
], | |
"max_tokens": 4095, | |
} | |
# API key for OpenAI | |
api_key_oi = os.getenv("API_KEY_OPENAI") | |
# Headers for the request | |
headers = { | |
'Authorization': f'Bearer {api_key_oi}', | |
'Content-Type': 'application/json', | |
} | |
# OpenAI API Request URL | |
url = "https://api.openai.com/v1/chat/completions" | |
# Send a request to OpenAI | |
response = requests.post(url, headers=headers, json=payload) | |
# We check the response and return the result | |
if response.status_code == 200: | |
response_json = response.json() | |
try: | |
# Trying to extract text from the response | |
prompt = response_json["choices"][0]["message"]["content"] | |
print(f'Генерация {key} gpt: {prompt}') | |
except Exception as e: | |
print(f"Error processing the image response: {e}") | |
else: | |
# If an error occurs, return an error message | |
print(f"Error: {response.status_code} - {response.text}") | |
API_TOKEN = random.choice([os.getenv("HF_READ_TOKEN"), os.getenv("HF_READ_TOKEN_2"), os.getenv("HF_READ_TOKEN_3"), os.getenv("HF_READ_TOKEN_4"), os.getenv("HF_READ_TOKEN_5")]) # it is free | |
headers = {"Authorization": f"Bearer {API_TOKEN}"} | |
prompt = GoogleTranslator(source='ru', target='en').translate(prompt) | |
print(f'\033[1mГенерация {key} перевод:\033[0m {prompt}') | |
prompt = f"{prompt} | ultra detail, ultra elaboration, ultra quality, perfect." | |
print(f'\033[1mГенерация {key}:\033[0m {prompt}') | |
if model == 'DALL-E 3 XL': | |
API_URL = "https://api-inference.huggingface.co/models/openskyml/dalle-3-xl" | |
if model == 'Playground 2': | |
API_URL = "https://api-inference.huggingface.co/models/playgroundai/playground-v2-1024px-aesthetic" | |
if model == 'Openjourney 4': | |
API_URL = "https://api-inference.huggingface.co/models/prompthero/openjourney-v4" | |
if model == 'AbsoluteReality 1.8.1': | |
API_URL = "https://api-inference.huggingface.co/models/digiplay/AbsoluteReality_v1.8.1" | |
if model == 'Lyriel 1.6': | |
API_URL = "https://api-inference.huggingface.co/models/stablediffusionapi/lyrielv16" | |
if model == 'Animagine XL 2.0': | |
API_URL = "https://api-inference.huggingface.co/models/Linaqruf/animagine-xl-2.0" | |
prompt = f"Anime. {prompt}" | |
if model == 'Counterfeit 2.5': | |
API_URL = "https://api-inference.huggingface.co/models/gsdf/Counterfeit-V2.5" | |
if model == 'Realistic Vision 5.1': | |
API_URL = "https://api-inference.huggingface.co/models/stablediffusionapi/realistic-vision-v51" | |
if model == 'Incursios 1.6': | |
API_URL = "https://api-inference.huggingface.co/models/digiplay/incursiosMemeDiffusion_v1.6" | |
if model == 'Anime Detailer XL': | |
API_URL = "https://api-inference.huggingface.co/models/Linaqruf/anime-detailer-xl-lora" | |
prompt = f"Anime. {prompt}" | |
if model == 'epiCRealism': | |
API_URL = "https://api-inference.huggingface.co/models/emilianJR/epiCRealism" | |
if model == 'PixelArt XL': | |
API_URL = "https://api-inference.huggingface.co/models/nerijs/pixel-art-xl" | |
if model == 'NewReality XL': | |
API_URL = "https://api-inference.huggingface.co/models/stablediffusionapi/newrealityxl-global-nsfw" | |
if model == 'Anything 5.0': | |
API_URL = "https://api-inference.huggingface.co/models/hogiahien/anything-v5-edited" | |
if model == 'Vector Art XL': | |
API_URL = "https://api-inference.huggingface.co/models/DoctorDiffusion/doctor-diffusion-s-controllable-vector-art-xl-lora" | |
if model == 'Disney': | |
API_URL = "https://api-inference.huggingface.co/models/goofyai/disney_style_xl" | |
prompt = f"Disney style. {prompt}" | |
if model == 'CleanLinearMix': | |
API_URL = "https://api-inference.huggingface.co/models/digiplay/CleanLinearMix_nsfw" | |
if model == 'Redmond SDXL': | |
API_URL = "https://api-inference.huggingface.co/models/artificialguybr/LogoRedmond-LogoLoraForSDXL-V2" | |
payload = { | |
"inputs": prompt, | |
"is_negative": is_negative, | |
"steps": steps, | |
"cfg_scale": cfg_scale, | |
"seed": seed if seed != -1 else random.randint(1, 1000000000), | |
"strength": strength | |
} | |
response = requests.post(API_URL, headers=headers, json=payload, timeout=timeout) | |
if response.status_code != 200: | |
print(f"Ошибка: Не удалось получить изображение. Статус ответа: {response.status_code}") | |
print(f"Содержимое ответа: {response.text}") | |
if response.status_code == 503: | |
raise gr.Error(f"{response.status_code} : The model is being loaded") | |
return None | |
raise gr.Error(f"{response.status_code}") | |
return None | |
try: | |
image_bytes = response.content | |
image = Image.open(io.BytesIO(image_bytes)) | |
print(f'\033[1mГенерация {key} завершена!\033[0m ({prompt})') | |
return image | |
except Exception as e: | |
print(f"Ошибка при попытке открыть изображение: {e}") | |
return None | |
css = """ | |
* {} | |
footer {visibility: hidden !important;} | |
""" | |
with gr.Blocks(css=css) as dalle: | |
with gr.Tab("Basic Settings"): | |
with gr.Row(): | |
with gr.Column(elem_id="prompt-container"): | |
with gr.Row(): | |
text_prompt = gr.Textbox(label="Prompt", placeholder="Enter a prompt here", lines=3, elem_id="prompt-text-input") | |
with gr.Row(): | |
model = gr.Radio(label="Model", value="AbsoluteReality 1.8.1", choices=models_list) | |
with gr.Tab("Advanced Settings"): | |
with gr.Row(): | |
negative_prompt = gr.Textbox(label="Negative Prompt", placeholder="What should not be in the image", value="[deformed | disfigured], poorly drawn, [bad : wrong] anatomy, [extra | missing | floating | disconnected] limb, (mutated hands and fingers), blurry, text, fuzziness", lines=3, elem_id="negative-prompt-text-input") | |
with gr.Row(): | |
steps = gr.Slider(label="Sampling steps", value=35, minimum=1, maximum=100, step=1) | |
with gr.Row(): | |
cfg = gr.Slider(label="CFG Scale", value=7, minimum=1, maximum=20, step=1) | |
with gr.Row(): | |
method = gr.Radio(label="Sampling method", value="DPM++ 2M Karras", choices=["DPM++ 2M Karras", "DPM++ SDE Karras", "Euler", "Euler a", "Heun", "DDIM"]) | |
with gr.Row(): | |
strength = gr.Slider(label="Strength", value=0.7, minimum=0, maximum=1, step=0.001) | |
with gr.Row(): | |
seed = gr.Slider(label="Seed", value=-1, minimum=-1, maximum=1000000000, step=1) | |
# with gr.Row(): | |
# gpt = gr.Checkbox(label="ChatGPT") | |
with gr.Tab("Information"): | |
with gr.Row(): | |
gr.Textbox(label="Sample prompt", value="{prompt} | ultra detail, ultra elaboration, ultra quality, perfect.") | |
with gr.Row(): | |
text_button = gr.Button("Run", variant='primary', elem_id="gen-button") | |
with gr.Row(): | |
image_output = gr.Image(type="pil", label="Image Output", elem_id="gallery") | |
text_button.click(query, inputs=[text_prompt, model, negative_prompt, steps, cfg, method, seed, strength], outputs=image_output) | |
dalle.launch(show_api=False, share=False) |