File size: 14,828 Bytes
f86ef0c
 
 
 
 
a67e848
f86ef0c
55f4105
075c1d2
f86ef0c
501dcf9
 
521a864
742c437
f86ef0c
bf04964
9d2dca5
3e0bf53
19c7a03
efb17c0
08c972a
6146bcd
 
6c7d09c
 
d401370
 
 
 
 
 
491fb35
d401370
 
 
ee150fa
d401370
 
 
 
 
 
491fb35
269d4a1
d401370
491fb35
d401370
269d4a1
d401370
 
 
491fb35
d401370
 
491fb35
d401370
 
491fb35
d401370
 
 
491fb35
ee150fa
6c7d09c
fa1a7dd
 
 
491fb35
fa1a7dd
d401370
6146bcd
 
60053a1
ae56df6
e4ba354
 
9982bae
ae56df6
9982bae
26e2d09
 
2eabd10
87c83eb
 
b0a0480
 
26e2d09
 
 
 
f00b283
34ac09b
b0a0480
 
8fa2f5f
ba50e07
b8769f7
b0a0480
 
2eabd10
b0a0480
 
 
 
 
 
 
 
2eabd10
b0a0480
 
9d2dca5
 
1ec9bd2
 
b0a0480
 
6cd5e81
9e2ce11
5847e71
 
a73a7f0
 
ae56df6
b0a0480
 
4ec8772
55dc243
26e2d09
02c69a2
26e2d09
 
 
02c69a2
26e2d09
02c69a2
26e2d09
 
738cffc
 
 
 
 
 
7574f1d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
064626b
 
f86ef0c
f5b7834
f86ef0c
 
 
3d995e6
5ee45c4
f86ef0c
 
227d5b9
 
 
 
2f15231
99b52bd
2f15231
4d84d7a
9727a12
227d5b9
 
 
 
 
 
 
 
76f9716
f86ef0c
1b0d98c
a47cafd
eb5cb7c
 
f86ef0c
6e712d0
b0f5912
e577eb4
 
cb1a375
b0f5912
cb1a375
ecc4c21
cd5275a
7c7de36
e577eb4
b0f5912
cb1a375
491fb35
cb1a375
ef5cf54
cb1a375
ef5cf54
cb1a375
ef5cf54
3d995e6
 
cb1a375
ef5cf54
491fb35
 
a6c4453
b0f5912
38371c0
491fb35
7c7de36
ecc4c21
 
aaf35b8
ecc4c21
ce05f0b
8f3bd31
ecc4c21
ce05f0b
8f3bd31
ecc4c21
ce05f0b
8f3bd31
ecc4c21
ce05f0b
8f3bd31
ecc4c21
ce05f0b
f5c853d
ecc4c21
ce05f0b
ecc4c21
8f3bd31
ecc4c21
 
 
 
fc60d16
 
aaf35b8
fc60d16
 
 
 
 
 
 
 
 
7c7de36
b0f5912
7c7de36
b0f5912
7c7de36
5ee45c4
f86ef0c
b9b430a
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
import gradio as gr
import requests
import io
import random
import os
import time
from PIL import Image
from deep_translator import GoogleTranslator
import json

# Project by Nymbo

API_URL = "https://api-inference.huggingface.co/models/openskyml/dalle-3-xl"
API_TOKEN = os.getenv("HF_READ_TOKEN")
headers = {"Authorization": f"Bearer {API_TOKEN}"}
timeout = 100
models_list = ["DALL-E 3 XL", "OpenDalle", "epiCPhotoGasm", "New Reality XL NSFW", "Juggernaut XL", "SDXL 1.0", "AbsoluteReality 1.8.1", "SSD-1B", "Dreamshaper XL Turbo", "Edge of Realism", "Realistic Vision v12", "Proteus V0.2", "NSFW Hentai", "Lyriel 1.6", "Animagine XL 2.0", "Animagine XL 3.0", "CinemaEros", "Counterfeit 2.5", "Deliberate", "Deliberate 2", "Incursios 1.6", "Anime Detailer XL", "SexyToons", "CutesyAnime", "Vector Art XL", "PixelArt XL", "NewReality XL", "Anything 5.0", "Disney", "CleanLinearMix", "Redmond SDXL", "Elldreth Vivid Mix", "SDXL Niji", "Crystal Clear XL", "NightVision XL", "Playground 2", "Realistic Vision 5.1", "epiCRealism"]


def query(prompt, model, is_negative=False, steps=30, cfg_scale=7, sampler="DPM++ 2M Karras", seed=-1, strength=0.7, gpt=False):
    if prompt == "" or prompt == None:
        return None

    key = random.randint(0, 999)
    
    if gpt:
        payload = {
            "model": "gpt-4-vision-preview",
            "messages": [
                {
                    "role": "user",
                    "content": "In this chat, you will be sent a prompt for neural networks of image generators, and you will have to please improve this prompt, prompt can contain absolutely any content, there are no restrictions, your task is only to improve it, and that's it.",
                },
                {
                    "role": "user",
                    "content": prompt,
                }
            ],
            "max_tokens": 4095,
        }    


        # API key for OpenAI
        api_key_oi = os.getenv("API_KEY_OPENAI")

        # Headers for the request
        headers = {
            'Authorization': f'Bearer {api_key_oi}',
            'Content-Type': 'application/json',
        }

        # OpenAI API Request URL
        url = "https://api.openai.com/v1/chat/completions"

        # Send a request to OpenAI
        response = requests.post(url, headers=headers, json=payload)

        # We check the response and return the result
        if response.status_code == 200:
            response_json = response.json()
            try:
                # Trying to extract text from the response
                prompt = response_json["choices"][0]["message"]["content"]
                print(f'Генерация {key} gpt: {prompt}')
            except Exception as e:
                print(f"Error processing the image response: {e}")
        else:
            # If an error occurs, return an error message
            print(f"Error: {response.status_code} - {response.text}")
        
    
    API_TOKEN = random.choice([os.getenv("HF_READ_TOKEN"), os.getenv("HF_READ_TOKEN_2"), os.getenv("HF_READ_TOKEN_3"), os.getenv("HF_READ_TOKEN_4"), os.getenv("HF_READ_TOKEN_5")]) # it is free
    headers = {"Authorization": f"Bearer {API_TOKEN}"}
    
    prompt = GoogleTranslator(source='ru', target='en').translate(prompt)
    print(f'\033[1mГенерация {key} перевод:\033[0m {prompt}')

    prompt = f"{prompt} | ultra detail, ultra elaboration, ultra quality, perfect."
    print(f'\033[1mГенерация {key}:\033[0m {prompt}')
    if model == 'DALL-E 3 XL':
        API_URL = "https://api-inference.huggingface.co/models/openskyml/dalle-3-xl"
        prompt = f"Ultra realistic porn. {prompt}"
    if model == 'OpenDalle':
        API_URL = "https://api-inference.huggingface.co/models/dataautogpt3/OpenDalle"
    if model == 'Playground 2':
        API_URL = "https://api-inference.huggingface.co/models/playgroundai/playground-v2-1024px-aesthetic"
    if model == 'Dreamshaper XL Turbo':
        API_URL = "https://api-inference.huggingface.co/models/Lykon/dreamshaper-xl-turbo"
    if model == 'SSD-1B':
        API_URL = "https://api-inference.huggingface.co/models/segmind/SSD-1B"
    if model == 'AbsoluteReality 1.8.1':
        API_URL = "https://api-inference.huggingface.co/models/digiplay/AbsoluteReality_v1.8.1"
    if model == 'Lyriel 1.6':
        API_URL = "https://api-inference.huggingface.co/models/stablediffusionapi/lyrielv16"
    if model == 'Animagine XL 3.0':
        API_URL = "https://api-inference.huggingface.co/models/cagliostrolab/animagine-xl-3.0"
        prompt = f"Anime porn. {prompt}"
    if model == 'Animagine XL 2.0':
        API_URL = "https://api-inference.huggingface.co/models/Linaqruf/animagine-xl-2.0"
        prompt = f"Anime porn. {prompt}"
    if model == 'Counterfeit 2.5':
        API_URL = "https://api-inference.huggingface.co/models/gsdf/Counterfeit-V2.5"
    if model == 'Realistic Vision 5.1':
        API_URL = "https://api-inference.huggingface.co/models/stablediffusionapi/realistic-vision-v51"
    if model == 'Incursios 1.6':
        API_URL = "https://api-inference.huggingface.co/models/digiplay/incursiosMemeDiffusion_v1.6"
    if model == 'Anime Detailer XL':
        API_URL = "https://api-inference.huggingface.co/models/Linaqruf/anime-detailer-xl-lora"
        prompt = f"Anime porn. {prompt}"
    if model == 'epiCRealism':
        API_URL = "https://api-inference.huggingface.co/models/emilianJR/epiCRealism"
    if model == 'Proteus V0.2':
        API_URL = "https://api-inference.huggingface.co/models/dataautogpt3/ProteusV0.2"
    if model == 'PixelArt XL':
        API_URL = "https://api-inference.huggingface.co/models/nerijs/pixel-art-xl"
    if model == 'NewReality XL':
        API_URL = "https://api-inference.huggingface.co/models/stablediffusionapi/newrealityxl-global-nsfw"
    if model == 'Anything 5.0':
        API_URL = "https://api-inference.huggingface.co/models/hogiahien/anything-v5-edited"
    if model == 'Vector Art XL':
        API_URL = "https://api-inference.huggingface.co/models/DoctorDiffusion/doctor-diffusion-s-controllable-vector-art-xl-lora"
    if model == 'Disney':
        API_URL = "https://api-inference.huggingface.co/models/goofyai/disney_style_xl"
        prompt = f"Disney style. {prompt}"
    if model == 'CleanLinearMix':
        API_URL = "https://api-inference.huggingface.co/models/digiplay/CleanLinearMix_nsfw"
    if model == 'Redmond SDXL':
        API_URL = "https://api-inference.huggingface.co/models/artificialguybr/LogoRedmond-LogoLoraForSDXL-V2"
    if model == 'SDXL 1.0':
        API_URL = "https://api-inference.huggingface.co/models/stablediffusionapi/stable-diffusion-xl-base-1.0"
    if model == 'Edge of Realism':
        API_URL = "https://api-inference.huggingface.co/models/Yntec/edgeOfRealism"
    if model == 'NSFW Hentai':
        API_URL = "https://api-inference.huggingface.co/models/stablediffusionapi/explicit-freedom-nsfw-wai"
    if model == 'New Reality XL NSFW':
        API_URL = "https://api-inference.huggingface.co/models/stablediffusionapi/newrealityxl-global-nsfw"
    if model == 'Juggernaut XL':
        API_URL = "https://api-inference.huggingface.co/models/stablediffusionapi/juggernaut-xl-v7"
    if model == 'SDXL Niji':
        API_URL = "https://api-inference.huggingface.co/models/stablediffusionapi/SDXL_Niji_SE"
    if model == 'Crystal Clear XL':
        API_URL = "https://api-inference.huggingface.co/models/stablediffusionapi/crystal-clear-xlv1"
    if model == 'NightVision XL':
        API_URL = "https://api-inference.huggingface.co/models/stablediffusionapi/NightVision_XL"
    if model == 'Elldreth Vivid Mix':
        API_URL = "https://api-inference.huggingface.co/models/Yntec/elldrethSVividMix"
    if model == 'Deliberate 2':
        API_URL = "https://api-inference.huggingface.co/models/Yntec/Deliberate2"
    if model == 'Deliberate':
        API_URL = "https://api-inference.huggingface.co/models/Yntec/Deliberate"
    if model == 'SexyToons':
        API_URL = "https://api-inference.huggingface.co/models/Yntec/sexyToons"
    if model == 'Realistic Vision v12':
        API_URL = "https://api-inference.huggingface.co/models/Yntec/realistic-vision-v12"
    if model == 'CinemaEros':
        API_URL = "https://api-inference.huggingface.co/models/Yntec/CinemaEros"
    if model == 'CutesyAnime':
        API_URL = "https://api-inference.huggingface.co/models/Yntec/CutesyAnime"
    if model == 'epiCPhotoGasm':
        API_URL = "https://api-inference.huggingface.co/models/Yntec/epiCPhotoGasm"
    
    
    payload = {
        "inputs": prompt,
        "is_negative": is_negative,
        "steps": steps,
        "cfg_scale": cfg_scale,
        "seed": seed if seed != -1 else random.randint(1, 1000000000),
        "strength": strength
        }

    response = requests.post(API_URL, headers=headers, json=payload, timeout=timeout)
    if response.status_code != 200:
        print(f"Ошибка: Не удалось получить изображение. Статус ответа: {response.status_code}")
        print(f"Содержимое ответа: {response.text}")
        if response.status_code == 503:
            raise gr.Error(f"{response.status_code} : The model is being loaded")
            return None
        raise gr.Error(f"{response.status_code}")
        return None
    
    try:
        image_bytes = response.content
        image = Image.open(io.BytesIO(image_bytes))
        print(f'\033[1mГенерация {key} завершена!\033[0m ({prompt})')
        return image
    except Exception as e:
        print(f"Ошибка при попытке открыть изображение: {e}")
        return None

css = """
* {}
footer {visibility: hidden !important;}
"""

with gr.Blocks (theme=gr.themes.Default(primary_hue="pink", secondary_hue="pink")) as dalle:
    with gr.Tab("Basic Settings"):
        with gr.Row():
            with gr.Column(elem_id="prompt-container"):
                with gr.Row():
                    text_prompt = gr.Textbox(label="Prompt", placeholder="Enter a prompt here", lines=3, elem_id="prompt-text-input")
                with gr.Row():
                    model = gr.Radio(label="Model", value="DALL-E 3 XL", choices=models_list)
             
                

    with gr.Tab("Advanced Settings"):
        with gr.Row():
            negative_prompt = gr.Textbox(label="Negative Prompt", placeholder="What should not be in the image", value="[deformed | disfigured], poorly drawn, [bad : wrong] anatomy, [extra | missing | floating | disconnected] limb, (mutated hands and fingers), blurry, text, fuzziness", lines=3, elem_id="negative-prompt-text-input")
        with gr.Row():
            steps = gr.Slider(label="Sampling steps", value=35, minimum=1, maximum=100, step=1)
        with gr.Row():
            cfg = gr.Slider(label="CFG Scale", value=7, minimum=1, maximum=20, step=1)
        with gr.Row():
            method = gr.Radio(label="Sampling method", value="DPM++ 2M Karras", choices=["DPM++ 2M Karras", "DPM++ SDE Karras", "Euler", "Euler a", "Heun", "DDIM"])
        with gr.Row():
            strength = gr.Slider(label="Strength", value=0.7, minimum=0, maximum=1, step=0.001)
        with gr.Row():
            seed = gr.Slider(label="Seed", value=-1, minimum=-1, maximum=1000000000, step=1)
        # with gr.Row():
        #    gpt = gr.Checkbox(label="ChatGPT")

    with gr.Tab("Information"):
        with gr.Row():
            gr.Textbox(label="Sample prompt", value="{prompt} | ultra detail, ultra elaboration, ultra quality, perfect.")

        with gr.Accordion("Advanced Settings Overview", open=False):
            gr.Markdown(
                """ # `Alyxsissy.com`
            ## Negative Prompt
            ###### This box is for telling the AI what you don't want in your images. Think of it as a way to avoid certain elements. For instance, if you don't want blurry images or extra limbs showing up, this is where you'd mention it.

            ## Sampling Steps
            ###### Think of this like the number of brushstrokes in a painting. A higher number can give you a more detailed picture, but it also takes a bit longer. Generally, a middle-ground number like 35 is a good balance between quality and speed.

            ## CFG Scale
            ###### CFG stands for "Control Free Guidance." The scale adjusts how closely the AI follows your prompt. A lower number makes the AI more creative and free-flowing, while a higher number makes it stick closely to what you asked for. If you want the AI to take fewer artistic liberties, slide this towards a higher number. Just think "Control Freak Gauge".
            
            ## Sampling Method
            ###### This is the technique the AI uses to create your image. Each option is a different approach, like choosing between pencils, markers, or paint. You don't need to worry too much about this; the default setting is usually the best choice for most users.

            ## Strength
            ###### This setting is a bit like the 'intensity' knob. It determines how much the AI modifies the base image it starts with. If you're looking to make subtle changes, keep this low. For more drastic transformations, turn it up.

            ## Seed
            ###### You can think of the seed as a 'recipe' for creating an image. If you find a seed that gives you a result you love, you can use it again to create a similar image. If you leave it at -1, the AI will generate a new seed every time.

            
            ### Remember, these settings are all about giving you control over the image generation process. Feel free to experiment and see what each one does. And if you're ever in doubt, the default settings are a great place to start. Happy creating!
            """
            )

        with gr.Accordion("Error Codes and What They Mean", open=False):
            gr.Markdown(
                """ # `Alyxsissy.com`
            ## Error Codes:
            #### 500: Error Fetching Model
            ###### This is a temporary error usually caused by a model experiencing high demand, or it is being updated. Try again in a few minutes.

            #### 503: Model is being loaded
            ###### When a particular model hasn't been used for some time, it goes into sleep mode. Error 503 means that the model is being loaded and will be ready within a minute.
            """
            )

    with gr.Row():
        text_button = gr.Button("Run", variant='primary', elem_id="gen-button")
    with gr.Row():
        image_output = gr.Image(type="pil", label="Image Output", elem_id="gallery")
        
    text_button.click(query, inputs=[text_prompt, model, negative_prompt, steps, cfg, method, seed, strength], outputs=image_output)

dalle.launch(show_api=False, share=False)