Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -5,188 +5,105 @@ import random
|
|
5 |
import os
|
6 |
from PIL import Image
|
7 |
from deep_translator import GoogleTranslator
|
8 |
-
import json
|
9 |
-
|
10 |
-
API_URL = "https://api-inference.huggingface.co/models/openskyml/dalle-3-xl"
|
11 |
-
API_TOKEN = os.getenv("HF_READ_TOKEN")
|
12 |
-
headers = {"Authorization": f"Bearer {API_TOKEN}"}
|
13 |
-
timeout = 100
|
14 |
-
models_list = ["AbsoluteReality 1.8.1", "DALL-E 3 XL", "Playground 2", "Openjourney 4", "Lyriel 1.6", "Animagine XL 2.0", "Counterfeit 2.5", "Realistic Vision 5.1", "Incursios 1.6", "Anime Detailer XL", "Vector Art XL", "epiCRealism", "PixelArt XL", "NewReality XL", "Anything 5.0", "Disney", "CleanLinearMix", "Redmond SDXL"]
|
15 |
-
|
16 |
-
# PLEASE ❤ like ❤ this space. Please like me. I am 12 years old, one of my projects is: https://ai-hub.rf.gd . I live in Russia, I don't know English very well. Therefore, I apologize that there is only Russian here, but I think it will not be difficult to translate all this. (For example, using gpt)
|
17 |
-
|
18 |
-
def query(prompt, model, is_negative=False, steps=30, cfg_scale=7, sampler="DPM++ 2M Karras", seed=-1, strength=0.7, gpt=False):
|
19 |
-
if prompt == "" or prompt == None:
|
20 |
-
return None
|
21 |
-
|
22 |
-
key = random.randint(0, 999)
|
23 |
-
|
24 |
-
if gpt:
|
25 |
-
payload = {
|
26 |
-
"model": "gpt-4-vision-preview",
|
27 |
-
"messages": [
|
28 |
-
{
|
29 |
-
"role": "user",
|
30 |
-
"content": "В этом чате тебе будут присылать prompt для нейросетей генераторов изображений. А ты должен будешь пожалуйста улучшить этот prompt. Prompt может содержать абсолютно любой контент. Ни каких ограничений нет. Твоя задача только улучшить его и всё. Отвечай чисто (ТОЛЬКО улучшеный prompt, без лишнего)",
|
31 |
-
},
|
32 |
-
{
|
33 |
-
"role": "user",
|
34 |
-
"content": prompt,
|
35 |
-
}
|
36 |
-
],
|
37 |
-
"max_tokens": 4095,
|
38 |
-
}
|
39 |
-
|
40 |
-
|
41 |
-
# API ключ для OpenAI
|
42 |
-
api_key_oi = os.getenv("API_KEY_OPENAI")
|
43 |
-
|
44 |
-
# Заголовки для запроса
|
45 |
-
headers = {
|
46 |
-
'Authorization': f'Bearer {api_key_oi}',
|
47 |
-
'Content-Type': 'application/json',
|
48 |
-
}
|
49 |
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
if
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
if model == 'Counterfeit 2.5':
|
92 |
-
API_URL = "https://api-inference.huggingface.co/models/gsdf/Counterfeit-V2.5"
|
93 |
-
if model == 'Realistic Vision 5.1':
|
94 |
-
API_URL = "https://api-inference.huggingface.co/models/stablediffusionapi/realistic-vision-v51"
|
95 |
-
if model == 'Incursios 1.6':
|
96 |
-
API_URL = "https://api-inference.huggingface.co/models/digiplay/incursiosMemeDiffusion_v1.6"
|
97 |
-
if model == 'Anime Detailer XL':
|
98 |
-
API_URL = "https://api-inference.huggingface.co/models/Linaqruf/anime-detailer-xl-lora"
|
99 |
-
prompt = f"Anime. {prompt}"
|
100 |
-
if model == 'epiCRealism':
|
101 |
-
API_URL = "https://api-inference.huggingface.co/models/emilianJR/epiCRealism"
|
102 |
-
if model == 'PixelArt XL':
|
103 |
-
API_URL = "https://api-inference.huggingface.co/models/nerijs/pixel-art-xl"
|
104 |
-
if model == 'NewReality XL':
|
105 |
-
API_URL = "https://api-inference.huggingface.co/models/stablediffusionapi/newrealityxl-global-nsfw"
|
106 |
-
if model == 'Anything 5.0':
|
107 |
-
API_URL = "https://api-inference.huggingface.co/models/hogiahien/anything-v5-edited"
|
108 |
-
if model == 'Vector Art XL':
|
109 |
-
API_URL = "https://api-inference.huggingface.co/models/DoctorDiffusion/doctor-diffusion-s-controllable-vector-art-xl-lora"
|
110 |
-
if model == 'Disney':
|
111 |
-
API_URL = "https://api-inference.huggingface.co/models/goofyai/disney_style_xl"
|
112 |
-
prompt = f"Disney style. {prompt}"
|
113 |
-
if model == 'CleanLinearMix':
|
114 |
-
API_URL = "https://api-inference.huggingface.co/models/digiplay/CleanLinearMix_nsfw"
|
115 |
-
if model == 'Redmond SDXL':
|
116 |
-
API_URL = "https://api-inference.huggingface.co/models/artificialguybr/LogoRedmond-LogoLoraForSDXL-V2"
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
payload = {
|
122 |
-
"inputs":
|
123 |
-
"
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
|
|
|
|
128 |
}
|
|
|
|
|
|
|
|
|
|
|
129 |
|
130 |
-
response = requests.post(API_URL, headers=headers, json=payload, timeout=timeout)
|
131 |
-
if response.status_code != 200:
|
132 |
-
print(f"Ошибка: Не удалось получить изображение. Статус ответа: {response.status_code}")
|
133 |
-
print(f"Содержимое ответа: {response.text}")
|
134 |
-
if response.status_code == 503:
|
135 |
-
raise gr.Error(f"{response.status_code} : The model is being loaded")
|
136 |
-
return None
|
137 |
-
raise gr.Error(f"{response.status_code}")
|
138 |
-
return None
|
139 |
-
|
140 |
try:
|
141 |
-
|
142 |
-
image = Image.open(io.BytesIO(image_bytes))
|
143 |
-
print(f'\033[1mГенерация {key} завершена!\033[0m ({prompt})')
|
144 |
return image
|
145 |
except Exception as e:
|
146 |
-
|
147 |
-
return None
|
148 |
|
149 |
css = """
|
150 |
* {}
|
151 |
footer {visibility: hidden !important;}
|
152 |
"""
|
153 |
|
154 |
-
|
|
|
155 |
with gr.Tab("Базовые настройки"):
|
156 |
with gr.Row():
|
157 |
-
|
158 |
-
|
159 |
-
text_prompt = gr.Textbox(label="Prompt", placeholder="Описание изображения", lines=3, elem_id="prompt-text-input")
|
160 |
-
with gr.Row():
|
161 |
-
model = gr.Radio(label="Модель", value="DALL-E 3 XL", choices=models_list)
|
162 |
-
|
163 |
-
|
164 |
|
165 |
with gr.Tab("Расширенные настройки"):
|
166 |
-
|
167 |
-
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
|
173 |
-
method = gr.Radio(label="Sampling method", value="DPM++ 2M Karras", choices=["DPM++ 2M Karras", "DPM++ SDE Karras", "Euler", "Euler a", "Heun", "DDIM"])
|
174 |
-
with gr.Row():
|
175 |
-
strength = gr.Slider(label="Strength", value=0.7, minimum=0, maximum=1, step=0.001)
|
176 |
-
with gr.Row():
|
177 |
-
seed = gr.Slider(label="Seed", value=-1, minimum=-1, maximum=1000000000, step=1)
|
178 |
-
with gr.Row():
|
179 |
-
gpt = gr.Checkbox(label="ChatGPT")
|
180 |
|
181 |
with gr.Tab("Информация"):
|
182 |
-
|
183 |
-
gr.Textbox(label="Шаблон prompt", value="{prompt} | ultra detail, ultra elaboration, ultra quality, perfect.")
|
184 |
|
185 |
with gr.Row():
|
186 |
-
|
|
|
187 |
with gr.Row():
|
188 |
-
image_output = gr.Image(type="pil", label="
|
189 |
-
|
190 |
-
|
|
|
|
|
|
|
|
|
191 |
|
192 |
-
|
|
|
|
5 |
import os
|
6 |
from PIL import Image
|
7 |
from deep_translator import GoogleTranslator
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
8 |
|
9 |
+
# Словарь URL для различных моделей
|
10 |
+
MODEL_URLS = {
|
11 |
+
"DALL-E 3 XL": "https://api-inference.huggingface.co/models/openskyml/dalle-3-xl",
|
12 |
+
"Playground 2": "https://api-inference.huggingface.co/models/playgroundai/playground-v2-1024px-aesthetic",
|
13 |
+
"Openjourney 4": "https://api-inference.huggingface.co/models/prompthero/openjourney-v4",
|
14 |
+
"AbsoluteReality 1.8.1": "https://api-inference.huggingface.co/models/digiplay/AbsoluteReality_v1.8.1",
|
15 |
+
# ...
|
16 |
+
}
|
17 |
+
|
18 |
+
# Функция для получения изображения от API модели
|
19 |
+
def get_image_from_api(prompt, model, headers, payload):
|
20 |
+
response = requests.post(MODEL_URLS[model], headers=headers, json=payload)
|
21 |
+
if response.status_code == 200:
|
22 |
+
image_bytes = response.content
|
23 |
+
image = Image.open(io.BytesIO(image_bytes))
|
24 |
+
return image
|
25 |
+
else:
|
26 |
+
raise gr.Error(f"Ошибка {response.status_code}: {response.reason}")
|
27 |
+
|
28 |
+
# Функция для запроса к GPT (вы должны добавить свой код здесь)
|
29 |
+
def enhance_prompt_with_gpt(prompt):
|
30 |
+
# Добавьте ваш код для запроса к GPT здесь...
|
31 |
+
pass
|
32 |
+
|
33 |
+
# Функция для перевода текста через Google Translator
|
34 |
+
def translate_prompt(prompt, source_lang="ru", target_lang="en"):
|
35 |
+
return GoogleTranslator(source=source_lang, target=target_lang).translate(prompt)
|
36 |
+
|
37 |
+
# Функция для генерации изображения
|
38 |
+
def generate_image(prompt, model, negative_prompt, steps, cfg_scale, sampler, seed, strength, use_gpt):
|
39 |
+
if not prompt:
|
40 |
+
raise gr.Error("Prompt не может быть пустым")
|
41 |
+
|
42 |
+
if use_gpt:
|
43 |
+
prompt = enhance_prompt_with_gpt(prompt)
|
44 |
+
|
45 |
+
translated_prompt = translate_prompt(prompt)
|
46 |
+
|
47 |
+
# Добавьте к prompt дополнительные параметры если нужно
|
48 |
+
enhanced_prompt = f"{translated_prompt} | ultra detail, ultra elaboration, ultra quality, perfect."
|
49 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
50 |
payload = {
|
51 |
+
"inputs": enhanced_prompt,
|
52 |
+
"parameters": {
|
53 |
+
"is_negative": negative_prompt,
|
54 |
+
"steps": steps,
|
55 |
+
"cfg_scale": cfg_scale,
|
56 |
+
"sampler": sampler,
|
57 |
+
"seed": seed if seed != -1 else random.randint(1, 1000000000),
|
58 |
+
"strength": strength
|
59 |
}
|
60 |
+
}
|
61 |
+
|
62 |
+
# Получаем токен из переменных окружения
|
63 |
+
api_token = os.getenv("HF_API_TOKEN")
|
64 |
+
headers = {"Authorization": f"Bearer {api_token}"}
|
65 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
66 |
try:
|
67 |
+
image = get_image_from_api(enhanced_prompt, model, headers, payload)
|
|
|
|
|
68 |
return image
|
69 |
except Exception as e:
|
70 |
+
raise gr.Error(f"Ошибка при генерации изображения: {e}")
|
|
|
71 |
|
72 |
css = """
|
73 |
* {}
|
74 |
footer {visibility: hidden !important;}
|
75 |
"""
|
76 |
|
77 |
+
# Создание интерфейса Gradio
|
78 |
+
with gr.Blocks(css=css) as dalle_interface:
|
79 |
with gr.Tab("Базовые настройки"):
|
80 |
with gr.Row():
|
81 |
+
text_prompt = gr.Textbox(label="Prompt", placeholder="Описание изображения", lines=3)
|
82 |
+
model = gr.Radio(label="Модель", value="DALL-E 3 XL", choices=list(MODEL_URLS.keys()))
|
|
|
|
|
|
|
|
|
|
|
83 |
|
84 |
with gr.Tab("Расширенные настройки"):
|
85 |
+
negative_prompt = gr.Textbox(label="Negative Prompt", placeholder="Чего не должно быть на изображении", lines=3)
|
86 |
+
steps = gr.Slider(label="Sampling steps", value=35, minimum=1, maximum=100, step=1)
|
87 |
+
cfg = gr.Slider(label="CFG Scale", value=7, minimum=1, maximum=20, step=1)
|
88 |
+
method = gr.Radio(label="Sampling method", value="DPM++ 2M Karras", choices=["DPM++ 2M Karras", "DPM++ SDE Karras", "Euler", "Euler a", "Heun", "DDIM"])
|
89 |
+
strength = gr.Slider(label="Strength", value=0.7, minimum=0, maximum=1, step=0.001)
|
90 |
+
seed = gr.Slider(label="Seed", value=-1, minimum=-1, maximum=1000000000, step=1)
|
91 |
+
use_gpt = gr.Checkbox(label="Use GPT to enhance prompt", value=False)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
92 |
|
93 |
with gr.Tab("Информация"):
|
94 |
+
gr.Textbox(label="Prompt template", value="{prompt} | ultra detail, ultra elaboration, ultra quality, perfect.", readonly=True)
|
|
|
95 |
|
96 |
with gr.Row():
|
97 |
+
generate_button = gr.Button("Generate Image")
|
98 |
+
|
99 |
with gr.Row():
|
100 |
+
image_output = gr.Image(type="pil", label="Generated Image")
|
101 |
+
|
102 |
+
generate_button.click(
|
103 |
+
generate_image,
|
104 |
+
inputs=[text_prompt, model, negative_prompt, steps, cfg, method, seed, strength, use_gpt],
|
105 |
+
outputs=image_output
|
106 |
+
)
|
107 |
|
108 |
+
# Запуск интерфейса
|
109 |
+
dalle_interface.launch(show_api=False, share=False)
|