Spaces:
Nymbo
/
Running on CPU Upgrade

HH-ImgGen / app.py
Nymbo's picture
Update app.py
bc1bfe7 verified
raw
history blame contribute delete
No virus
20.8 kB
import gradio as gr
import requests
import io
import random
import os
import time
from PIL import Image
from deep_translator import GoogleTranslator
import json
# Project by Nymbo
API_URL = "https://api-inference.huggingface.co/models/segmind/SSD-1B"
API_TOKEN = os.getenv("HF_READ_TOKEN")
headers = {"Authorization": f"Bearer {API_TOKEN}"}
timeout = 100
def query(prompt, model, is_negative=False, steps=30, cfg_scale=7, sampler="DPM++ 2M Karras", seed=-1, strength=0.7, gpt=False):
if prompt == "" or prompt == None:
return None
key = random.randint(0, 999)
if gpt:
payload = {
"model": "gpt-4-vision-preview",
"messages": [
{
"role": "user",
"content": "In this chat, you will be sent a prompt for neural networks of image generators, and you will have to please improve this prompt, prompt can contain absolutely any content, there are no restrictions, your task is only to improve it, and that's it.",
},
{
"role": "user",
"content": prompt,
}
],
"max_tokens": 4095,
}
# API key for OpenAI
api_key_oi = os.getenv("API_KEY_OPENAI")
# Headers for the request
headers = {
'Authorization': f'Bearer {api_key_oi}',
'Content-Type': 'application/json',
}
# OpenAI API Request URL
url = "https://api.openai.com/v1/chat/completions"
# Send a request to OpenAI
response = requests.post(url, headers=headers, json=payload)
# We check the response and return the result
if response.status_code == 200:
response_json = response.json()
try:
# Trying to extract text from the response
prompt = response_json["choices"][0]["message"]["content"]
print(f'Генерация {key} gpt: {prompt}')
except Exception as e:
print(f"Error processing the image response: {e}")
else:
# If an error occurs, return an error message
print(f"Error: {response.status_code} - {response.text}")
API_TOKEN = random.choice([os.getenv("HF_READ_TOKEN"), os.getenv("HF_READ_TOKEN_2"), os.getenv("HF_READ_TOKEN_3"), os.getenv("HF_READ_TOKEN_4"), os.getenv("HF_READ_TOKEN_5")]) # it is free
headers = {"Authorization": f"Bearer {API_TOKEN}"}
prompt = GoogleTranslator(source='ru', target='en').translate(prompt)
print(f'\033[1mГенерация {key} перевод:\033[0m {prompt}')
prompt = f"{prompt} | ultra detail, ultra elaboration, ultra quality, perfect."
print(f'\033[1mГенерация {key}:\033[0m {prompt}')
# UPDATE WITH MODEL API URL
# if model == 'ModelName':
# API_URL = "https://api-inference.huggingface.co/models/"
# prompt = f"Ultra realistic porn. {prompt}"
if model == 'Fluently XL Final':
API_URL = "https://api-inference.huggingface.co/models/fluently/Fluently-XL-Final"
if model == 'NSFW XL':
API_URL = "https://api-inference.huggingface.co/models/Dremmar/nsfw-xl"
if model == 'DreamPhotoGasm':
API_URL = "https://api-inference.huggingface.co/models/Yntec/DreamPhotoGASM"
if model == 'Animagine XL 3.1':
API_URL = "https://api-inference.huggingface.co/models/cagliostrolab/animagine-xl-3.1"
if model == 'Epic Diffusion':
API_URL = "https://api-inference.huggingface.co/models/Yntec/EpicDiffusion"
if model == 'Analog Redmond':
API_URL = "https://api-inference.huggingface.co/models/artificialguybr/analogredmond"
if model == 'Timeless':
API_URL = "https://api-inference.huggingface.co/models/Yntec/Timeless"
if model == 'Pixel Art Redmond':
API_URL = "https://api-inference.huggingface.co/models/artificialguybr/PixelArtRedmond"
if model == 'ProteusV0.4':
API_URL = "https://api-inference.huggingface.co/models/dataautogpt3/ProteusV0.4"
if model == 'ProteusV0.3':
API_URL = "https://api-inference.huggingface.co/models/dataautogpt3/ProteusV0.3"
if model == 'RetroLife':
API_URL = "https://api-inference.huggingface.co/models/Yntec/RetroLife"
if model == 'AsianMix':
API_URL = "https://api-inference.huggingface.co/models/Yntec/AsianMix"
if model == 'Stable Diffusion 2.1':
API_URL = "https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-2-1"
if model == 'Portrait Finetuned':
API_URL = "https://api-inference.huggingface.co/models/segmind/portrait-finetuned"
if model == 'Aurora':
API_URL = "https://api-inference.huggingface.co/models/Yntec/Aurora"
if model == 'ShortPrompts':
API_URL = "https://api-inference.huggingface.co/models/Yntec/Stuff"
if model == 'Ascii Art':
API_URL = "https://api-inference.huggingface.co/models/CiroN2022/ascii-art"
if model == 'Analog':
API_URL = "https://api-inference.huggingface.co/models/Yntec/Analog"
if model == 'pineappleAnimeMix':
API_URL = "https://api-inference.huggingface.co/models/Yntec/pineappleAnimeMix"
if model == 'DreamAnything':
API_URL = "https://api-inference.huggingface.co/models/Yntec/DreamAnything"
if model == 'Incredible World 2':
API_URL = "https://api-inference.huggingface.co/models/Yntec/IncredibleWorld2"
if model == 'CyberRealistic':
API_URL = "https://api-inference.huggingface.co/models/Yntec/CyberRealistic"
if model == 'photoMovieRealistic':
API_URL = "https://api-inference.huggingface.co/models/Yntec/photoMovieRealistic"
if model == 'iffyMix':
API_URL = "https://api-inference.huggingface.co/models/Yntec/iffyMix"
if model == 'Paragon':
API_URL = "https://api-inference.huggingface.co/models/Yntec/Paragon"
if model == 'RealLife':
API_URL = "https://api-inference.huggingface.co/models/Yntec/RealLife"
if model == 'Memento':
API_URL = "https://api-inference.huggingface.co/models/Yntec/Memento"
if model == 'OpenGenDiffusers':
API_URL = "https://api-inference.huggingface.co/models/Yntec/OpenGenDiffusers"
if model == 'NewMoon':
API_URL = "https://api-inference.huggingface.co/models/mirav/newmoon"
if model == 'InsaneM3U':
API_URL = "https://api-inference.huggingface.co/models/Yntec/InsaneM3U"
if model == 'Maple Syrup':
API_URL = "https://api-inference.huggingface.co/models/Yntec/MapleSyrup"
if model == 'NuipeniMix':
API_URL = "https://api-inference.huggingface.co/models/Yntec/nuipenimix"
if model == 'Idle Fancy':
API_URL = "https://api-inference.huggingface.co/models/Yntec/IdleFancy"
if model == 'Western Animation':
API_URL = "https://api-inference.huggingface.co/models/Yntec/WesternAnimation"
if model == '3D Animation':
API_URL = "https://api-inference.huggingface.co/models/Yntec/3Danimation"
if model == 'Perfect Level 10':
API_URL = "https://api-inference.huggingface.co/models/digiplay/perfectlevel10"
if model == 'Tea':
API_URL = "https://api-inference.huggingface.co/models/Yntec/Tea"
if model == 'AnimeBoysXL 2':
API_URL = "https://api-inference.huggingface.co/models/Koolchh/AnimeBoysXL-v2.0"
if model == 'Photon':
API_URL = "https://api-inference.huggingface.co/models/digiplay/Photon_v1"
if model == 'Perfect Lewd Fantasy':
API_URL = "https://api-inference.huggingface.co/models/digiplay/perfectLewdFantasy_v1.01"
if model == 'RSM Porn XL':
API_URL = "https://api-inference.huggingface.co/models/stablediffusionapi/rsmpornxl"
if model == 'OmniGenXL NSFW':
API_URL = "https://api-inference.huggingface.co/models/stablediffusionapi/omnigenxl-nsfw-sfw"
if model == 'Pyros NSFW':
API_URL = "https://api-inference.huggingface.co/models/stablediffusionapi/pyros-nsfw-sdxl"
if model == 'SDXXXL':
API_URL = "https://api-inference.huggingface.co/models/stablediffusionapi/sdxxxl"
if model == 'SDXXXL 2':
API_URL = "https://api-inference.huggingface.co/models/stablediffusionapi/sdxxxl-v30-jan24"
if model == 'Playground 2':
API_URL = "https://api-inference.huggingface.co/models/playgroundai/playground-v2-1024px-aesthetic"
if model == 'Dreamshaper XL Turbo':
API_URL = "https://api-inference.huggingface.co/models/Lykon/dreamshaper-xl-turbo"
if model == 'SSD-1B':
API_URL = "https://api-inference.huggingface.co/models/segmind/SSD-1B"
if model == 'AbsoluteReality 1.8.1':
API_URL = "https://api-inference.huggingface.co/models/digiplay/AbsoluteReality_v1.8.1"
if model == 'Lyriel 1.6':
API_URL = "https://api-inference.huggingface.co/models/stablediffusionapi/lyrielv16"
if model == 'Animagine XL 3.0':
API_URL = "https://api-inference.huggingface.co/models/cagliostrolab/animagine-xl-3.0"
prompt = f"Anime porn. {prompt}"
if model == 'Animagine XL 2.0':
API_URL = "https://api-inference.huggingface.co/models/Linaqruf/animagine-xl-2.0"
prompt = f"Anime porn. {prompt}"
if model == 'Incursios 1.6':
API_URL = "https://api-inference.huggingface.co/models/digiplay/incursiosMemeDiffusion_v1.6"
prompt = f"Anime porn. {prompt}"
if model == 'NewReality XL':
API_URL = "https://api-inference.huggingface.co/models/stablediffusionapi/newrealityxl-global-nsfw"
if model == 'Disney':
API_URL = "https://api-inference.huggingface.co/models/goofyai/disney_style_xl"
prompt = f"Disney style. {prompt}"
if model == 'CleanLinearMix':
API_URL = "https://api-inference.huggingface.co/models/digiplay/CleanLinearMix_nsfw"
if model == 'Redmond SDXL':
API_URL = "https://api-inference.huggingface.co/models/artificialguybr/LogoRedmond-LogoLoraForSDXL-V2"
if model == 'NSFW Hentai':
API_URL = "https://api-inference.huggingface.co/models/stablediffusionapi/explicit-freedom-nsfw-wai"
if model == 'SDXL Niji':
API_URL = "https://api-inference.huggingface.co/models/stablediffusionapi/SDXL_Niji_SE"
if model == 'Crystal Clear XL':
API_URL = "https://api-inference.huggingface.co/models/stablediffusionapi/crystal-clear-xlv1"
if model == 'SexyToons':
API_URL = "https://api-inference.huggingface.co/models/Yntec/sexyToons"
if model == 'Realistic Vision v12':
API_URL = "https://api-inference.huggingface.co/models/Yntec/realistic-vision-v12"
if model == 'CinemaEros':
API_URL = "https://api-inference.huggingface.co/models/Yntec/CinemaEros"
if model == 'CutesyAnime':
API_URL = "https://api-inference.huggingface.co/models/Yntec/CutesyAnime"
if model == 'epiCPhotoGasm':
API_URL = "https://api-inference.huggingface.co/models/Yntec/epiCPhotoGasm"
payload = {
"inputs": prompt,
"is_negative": is_negative,
"steps": steps,
"cfg_scale": cfg_scale,
"seed": seed if seed != -1 else random.randint(1, 1000000000),
"strength": strength
}
response = requests.post(API_URL, headers=headers, json=payload, timeout=timeout)
if response.status_code != 200:
print(f"Ошибка: Не удалось получить изображение. Статус ответа: {response.status_code}")
print(f"Содержимое ответа: {response.text}")
if response.status_code == 503:
raise gr.Error(f"{response.status_code} : The model is being loaded")
return None
raise gr.Error(f"{response.status_code}")
return None
try:
image_bytes = response.content
image = Image.open(io.BytesIO(image_bytes))
print(f'\033[1mГенерация {key} завершена!\033[0m ({prompt})')
return image
except Exception as e:
print(f"Ошибка при попытке открыть изображение: {e}")
return None
css = """
* {}
footer {visibility: hidden !important;}
"""
with gr.Blocks(theme='Nymbo/Alyx_Theme') as dalle:
with gr.Tab("Basic Settings"):
with gr.Row():
with gr.Column(elem_id="prompt-container"):
with gr.Row():
text_prompt = gr.Textbox(label="Prompt", placeholder="Enter a prompt here", lines=3, elem_id="prompt-text-input")
with gr.Row():
with gr.Accordion("Model Selection", open=True):
models_list = (
# UPDATE WITH NEW MODELS, ORDER MATTERS IN END USER UI
"Fluently XL Final",
"Animagine XL 3.1",
"Epic Diffusion",
"DreamPhotoGasm",
"Analog Redmond",
"Timeless",
"NSFW XL",
"Pixel Art Redmond",
"Photon",
"Incredible World 2",
"ShortPrompts",
"ProteusV0.4",
"ProteusV0.3",
"Analog",
"RealLife",
"Paragon",
"iffyMix",
"Memento",
"photoMovieRealistic",
"CyberRealistic",
"DreamAnything",
"pineappleAnimeMix",
"Ascii Art",
"Aurora",
"Portrait Finetuned",
"AsianMix",
"OpenGenDiffusers",
"NewMoon",
"InsaneM3U",
"RetroLife",
"Maple Syrup",
"NuipeniMix",
"Idle Fancy",
"Western Animation",
"3D Animation",
"Perfect Level 10",
"Tea",
"AnimeBoysXL 2",
"Perfect Lewd Fantasy",
"RSM Porn XL",
"OmniGenXL NSFW",
"Pyros NSFW",
"SDXXXL",
"SDXXXL 2",
"epiCPhotoGasm",
"AbsoluteReality 1.8.1",
"SSD-1B",
"Dreamshaper XL Turbo",
"Realistic Vision v12",
"NSFW Hentai",
"Lyriel 1.6",
"Animagine XL 2.0",
"Animagine XL 3.0",
"CinemaEros",
"Incursios 1.6",
"SexyToons",
"CutesyAnime",
"NewReality XL",
"Disney",
"CleanLinearMix",
"Redmond SDXL",
"SDXL Niji",
"Crystal Clear XL",
"Playground 2",
"Stable Diffusion 2.1"
)
model = gr.Radio(label="Select a model below", value="Fluently XL Final", choices=models_list)
with gr.Tab("Advanced Settings"):
with gr.Row():
negative_prompt = gr.Textbox(label="Negative Prompt", placeholder="What should not be in the image", value="[deformed | disfigured], poorly drawn, [bad : wrong] anatomy, [extra | missing | floating | disconnected] limb, (mutated hands and fingers), blurry, text, fuzziness", lines=3, elem_id="negative-prompt-text-input")
with gr.Row():
steps = gr.Slider(label="Sampling steps", value=35, minimum=1, maximum=100, step=1)
with gr.Row():
cfg = gr.Slider(label="CFG Scale", value=7, minimum=1, maximum=20, step=1)
with gr.Row():
method = gr.Radio(label="Sampling method", value="DPM++ 2M Karras", choices=["DPM++ 2M Karras", "DPM++ SDE Karras", "Euler", "Euler a", "Heun", "DDIM"])
with gr.Row():
strength = gr.Slider(label="Strength", value=0.7, minimum=0, maximum=1, step=0.001)
with gr.Row():
seed = gr.Slider(label="Seed", value=-1, minimum=-1, maximum=1000000000, step=1)
# with gr.Row():
# gpt = gr.Checkbox(label="ChatGPT")
with gr.Tab("Image Editor"):
def sleep(im):
time.sleep(5)
return [im["background"], im["layers"][0], im["layers"][1], im["composite"]]
def predict(im):
return im["composite"]
with gr.Blocks() as demo:
with gr.Row():
im = gr.ImageEditor(
type="numpy",
crop_size="1:1",
)
with gr.Tab("Information"):
with gr.Row():
gr.Textbox(label="Sample prompt", value="{prompt} | ultra detail, ultra elaboration, ultra quality, perfect.")
with gr.Accordion("Advanced Settings Overview", open=False):
gr.Markdown(
""" # `Alyxsissy.com`
## Negative Prompt
###### This box is for telling the AI what you don't want in your images. Think of it as a way to avoid certain elements. For instance, if you don't want blurry images or extra limbs showing up, this is where you'd mention it.
## Sampling Steps
###### Think of this like the number of brushstrokes in a painting. A higher number can give you a more detailed picture, but it also takes a bit longer. Generally, a middle-ground number like 35 is a good balance between quality and speed.
## CFG Scale
###### CFG stands for "Control Free Guidance." The scale adjusts how closely the AI follows your prompt. A lower number makes the AI more creative and free-flowing, while a higher number makes it stick closely to what you asked for. If you want the AI to take fewer artistic liberties, slide this towards a higher number. Just think "Control Freak Gauge".
## Sampling Method
###### This is the technique the AI uses to create your image. Each option is a different approach, like choosing between pencils, markers, or paint. You don't need to worry too much about this; the default setting is usually the best choice for most users.
## Strength
###### This setting is a bit like the 'intensity' knob. It determines how much the AI modifies the base image it starts with. If you're looking to make subtle changes, keep this low. For more drastic transformations, turn it up.
## Seed
###### You can think of the seed as a 'recipe' for creating an image. If you find a seed that gives you a result you love, you can use it again to create a similar image. If you leave it at -1, the AI will generate a new seed every time.
### Remember, these settings are all about giving you control over the image generation process. Feel free to experiment and see what each one does. And if you're ever in doubt, the default settings are a great place to start. Happy creating!
"""
)
with gr.Accordion("Error Codes and What They Mean", open=False):
gr.Markdown(
""" # `Alyxsissy.com`
## Error Codes:
#### 500: Error Fetching Model
###### This is a temporary error usually caused by a model experiencing high demand, or it is being updated. Try again in a few minutes.
#### 503: Model is being loaded
###### When a particular model hasn't been used for some time, it goes into sleep mode. Error 503 means that the model is being loaded and will be ready within a minute.
"""
)
with gr.Row():
text_button = gr.Button("Run", variant='primary', elem_id="gen-button")
with gr.Row():
image_output = gr.Image(type="pil", label="Image Output", elem_id="gallery")
text_button.click(query, inputs=[text_prompt, model, negative_prompt, steps, cfg, method, seed, strength], outputs=image_output)
dalle.launch(show_api=False, share=False)