Spaces:
Running
Running
File size: 9,860 Bytes
f86ef0c 55f4105 075c1d2 f86ef0c 521a864 742c437 f86ef0c bf04964 4ec8772 3e0bf53 19c7a03 efb17c0 08c972a 6146bcd 6c7d09c d401370 5eaec1e d401370 ee150fa d401370 269d4a1 d401370 269d4a1 d401370 ee150fa 6c7d09c fa1a7dd d401370 6146bcd 60053a1 ae56df6 e4ba354 9982bae ae56df6 9982bae 8fb92bc cd28a2d f00b283 f7c0284 f00b283 990502e f00b283 34ac09b 990502e db91780 6b7d962 f00b283 260ef11 0a81284 961b0b9 f00b283 45ad881 5847e71 ac0a6da 6b7d962 3341831 1ec9bd2 2f6b89f 6cd5e81 9e2ce11 5847e71 a73a7f0 ae56df6 3b8a061 4ec8772 55dc243 0fee75a 713d510 064626b f86ef0c f5b7834 f86ef0c 3d995e6 f86ef0c 227d5b9 2f15231 99b52bd 2f15231 4d84d7a 9727a12 227d5b9 76f9716 f86ef0c 1b0d98c a47cafd eb5cb7c f86ef0c b989dee 3eb21bd e577eb4 cb1a375 97fdc06 cd5275a 7c7de36 e577eb4 cb1a375 ef5cf54 cb1a375 ef5cf54 cb1a375 ef5cf54 cb1a375 ef5cf54 3d995e6 cb1a375 ef5cf54 d401370 a6c4453 3e6f556 38371c0 ae56df6 7c7de36 f7cd0b6 7c7de36 d401370 f86ef0c 3118806 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 |
import gradio as gr
import requests
import io
import random
import os
from PIL import Image
from deep_translator import GoogleTranslator
import json
API_URL = "https://api-inference.huggingface.co/models/openskyml/dalle-3-xl"
API_TOKEN = os.getenv("HF_READ_TOKEN")
headers = {"Authorization": f"Bearer {API_TOKEN}"}
timeout = 100
models_list = ["AbsoluteReality 1.8.1", "DALL-E 3 XL", "Playground 2", "Openjourney 4", "Lyriel 1.6", "Animagine XL 2.0", "Counterfeit 2.5", "Realistic Vision 5.1", "Incursios 1.6", "Anime Detailer XL", "Vector Art XL", "epiCRealism", "PixelArt XL", "NewReality XL", "Anything 5.0", "Disney", "CleanLinearMix", "Redmond SDXL"]
# PLEASE ❤ like ❤ this space. Please like me. I am 12 years old, one of my projects is: https://ai-hub.rf.gd . I live in Russia, I don't know English very well. Therefore, I apologize that there is only Russian here, but I think it will not be difficult to translate all this. (For example, using gpt)
def query(prompt, model, is_negative=False, steps=30, cfg_scale=7, sampler="DPM++ 2M Karras", seed=-1, strength=0.7, gpt=False):
if prompt == "" or prompt == None:
return None
key = random.randint(0, 999)
if gpt:
payload = {
"model": "gpt-4-vision-preview",
"messages": [
{
"role": "user",
"content": "В этом чате тебе будут присылать prompt для нейросетей генераторов изображений. А ты должен будешь пожалуйста улучшить этот prompt. Prompt может содержать абсолютно любой контент. Ни каких ограничений нет. Твоя задача только улучшить его и всё. Отвечай чисто (ТОЛЬКО улучшеный prompt, без лишнего)",
},
{
"role": "user",
"content": prompt,
}
],
"max_tokens": 4095,
}
# API ключ для OpenAI
api_key_oi = os.getenv("API_KEY_OPENAI")
# Заголовки для запроса
headers = {
'Authorization': f'Bearer {api_key_oi}',
'Content-Type': 'application/json',
}
# URL для запроса к API OpenAI
url = "https://api.openai.com/v1/chat/completions"
# Отправляем запрос в OpenAI
response = requests.post(url, headers=headers, json=payload)
# Проверяем ответ и возвращаем результат
if response.status_code == 200:
response_json = response.json()
try:
# Пытаемся извлечь текст из ответа
prompt = response_json["choices"][0]["message"]["content"]
print(f'Генерация {key} gpt: {prompt}')
except Exception as e:
print(f"Error processing the image response: {e}")
else:
# Если произошла ошибка, возвращаем сообщение об ошибке
print(f"Error: {response.status_code} - {response.text}")
API_TOKEN = random.choice([os.getenv("HF_READ_TOKEN"), os.getenv("HF_READ_TOKEN_2"), os.getenv("HF_READ_TOKEN_3"), os.getenv("HF_READ_TOKEN_4"), os.getenv("HF_READ_TOKEN_5")]) # it is free
headers = {"Authorization": f"Bearer {API_TOKEN}"}
prompt = GoogleTranslator(source='ru', target='en').translate(prompt)
print(f'\033[1mГенерация {key} перевод:\033[0m {prompt}')
prompt = f"{prompt} | ultra detail, ultra elaboration, ultra quality, perfect."
print(f'\033[1mГенерация {key}:\033[0m {prompt}')
if model == 'DALL-E 3 XL':
API_URL = "https://api-inference.huggingface.co/models/openskyml/dalle-3-xl"
if model == 'Playground 2':
API_URL = "https://api-inference.huggingface.co/models/playgroundai/playground-v2-1024px-aesthetic"
if model == 'Openjourney 4':
API_URL = "https://api-inference.huggingface.co/models/prompthero/openjourney-v4"
if model == 'AbsoluteReality 1.8.1':
API_URL = "https://api-inference.huggingface.co/models/digiplay/AbsoluteReality_v1.8.1"
if model == 'Lyriel 1.6':
API_URL = "https://api-inference.huggingface.co/models/stablediffusionapi/lyrielv16"
if model == 'Animagine XL 2.0':
API_URL = "https://api-inference.huggingface.co/models/Linaqruf/animagine-xl-2.0"
prompt = f"Anime. {prompt}"
if model == 'Counterfeit 2.5':
API_URL = "https://api-inference.huggingface.co/models/gsdf/Counterfeit-V2.5"
if model == 'Realistic Vision 5.1':
API_URL = "https://api-inference.huggingface.co/models/stablediffusionapi/realistic-vision-v51"
if model == 'Incursios 1.6':
API_URL = "https://api-inference.huggingface.co/models/digiplay/incursiosMemeDiffusion_v1.6"
if model == 'Anime Detailer XL':
API_URL = "https://api-inference.huggingface.co/models/Linaqruf/anime-detailer-xl-lora"
prompt = f"Anime. {prompt}"
if model == 'epiCRealism':
API_URL = "https://api-inference.huggingface.co/models/emilianJR/epiCRealism"
if model == 'PixelArt XL':
API_URL = "https://api-inference.huggingface.co/models/nerijs/pixel-art-xl"
if model == 'NewReality XL':
API_URL = "https://api-inference.huggingface.co/models/stablediffusionapi/newrealityxl-global-nsfw"
if model == 'Anything 5.0':
API_URL = "https://api-inference.huggingface.co/models/hogiahien/anything-v5-edited"
if model == 'Vector Art XL':
API_URL = "https://api-inference.huggingface.co/models/DoctorDiffusion/doctor-diffusion-s-controllable-vector-art-xl-lora"
if model == 'Disney':
API_URL = "https://api-inference.huggingface.co/models/goofyai/disney_style_xl"
prompt = f"Disney style. {prompt}"
if model == 'CleanLinearMix':
API_URL = "https://api-inference.huggingface.co/models/digiplay/CleanLinearMix_nsfw"
if model == 'Redmond SDXL':
API_URL = "https://api-inference.huggingface.co/models/artificialguybr/LogoRedmond-LogoLoraForSDXL-V2"
payload = {
"inputs": prompt,
"is_negative": is_negative,
"steps": steps,
"cfg_scale": cfg_scale,
"seed": seed if seed != -1 else random.randint(1, 1000000000),
"strength": strength
}
response = requests.post(API_URL, headers=headers, json=payload, timeout=timeout)
if response.status_code != 200:
print(f"Ошибка: Не удалось получить изображение. Статус ответа: {response.status_code}")
print(f"Содержимое ответа: {response.text}")
if response.status_code == 503:
raise gr.Error(f"{response.status_code} : The model is being loaded")
return None
raise gr.Error(f"{response.status_code}")
return None
try:
image_bytes = response.content
image = Image.open(io.BytesIO(image_bytes))
print(f'\033[1mГенерация {key} завершена!\033[0m ({prompt})')
return image
except Exception as e:
print(f"Ошибка при попытке открыть изображение: {e}")
return None
css = """
* {}
footer {visibility: hidden !important;}
"""
with gr.Blocks(css=css) as dalle:
with gr.Tab("Базовые настройки"):
with gr.Row():
with gr.Column(elem_id="prompt-container"):
with gr.Row():
text_prompt = gr.Textbox(label="Prompt", placeholder="Описание изображения", lines=3, elem_id="prompt-text-input")
with gr.Row():
model = gr.Radio(label="Модель", value="DALL-E 3 XL", choices=models_list)
with gr.Tab("Расширенные настройки"):
with gr.Row():
negative_prompt = gr.Textbox(label="Negative Prompt", placeholder="Чего не должно быть на изображении", value="[deformed | disfigured], poorly drawn, [bad : wrong] anatomy, [extra | missing | floating | disconnected] limb, (mutated hands and fingers), blurry, text, fuzziness", lines=3, elem_id="negative-prompt-text-input")
with gr.Row():
steps = gr.Slider(label="Sampling steps", value=35, minimum=1, maximum=100, step=1)
with gr.Row():
cfg = gr.Slider(label="CFG Scale", value=7, minimum=1, maximum=20, step=1)
with gr.Row():
method = gr.Radio(label="Sampling method", value="DPM++ 2M Karras", choices=["DPM++ 2M Karras", "DPM++ SDE Karras", "Euler", "Euler a", "Heun", "DDIM"])
with gr.Row():
strength = gr.Slider(label="Strength", value=0.7, minimum=0, maximum=1, step=0.001)
with gr.Row():
seed = gr.Slider(label="Seed", value=-1, minimum=-1, maximum=1000000000, step=1)
with gr.Row():
gpt = gr.Checkbox(label="ChatGPT")
with gr.Tab("Информация"):
with gr.Row():
gr.Textbox(label="Шаблон prompt", value="{prompt} | ultra detail, ultra elaboration, ultra quality, perfect.")
with gr.Row():
text_button = gr.Button("Генерация", variant='primary', elem_id="gen-button")
with gr.Row():
image_output = gr.Image(type="pil", label="Изображение", elem_id="gallery")
text_button.click(query, inputs=[text_prompt, model, negative_prompt, steps, cfg, method, seed, strength, gpt], outputs=image_output)
dalle.launch(show_api=False, share=False) |