File size: 6,482 Bytes
983d072
b9e7f35
 
 
 
 
983d072
8e18ff8
08a7509
8e18ff8
 
08a7509
 
8e18ff8
 
11b5377
 
fe42c78
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11b5377
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
686fe7a
8e18ff8
7ffb950
8e18ff8
 
7ffb950
8e18ff8
9ac9544
 
 
 
8e18ff8
 
 
86d1cc7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8e18ff8
 
9ac9544
 
 
 
 
 
8e18ff8
 
7ffb950
9ac9544
8e18ff8
 
7ffb950
 
9ac9544
 
 
 
8e18ff8
 
7ffb950
9ac9544
 
8e18ff8
 
7ffb950
8e18ff8
9ac9544
 
 
 
3bab0ce
 
4c3811e
 
9ac9544
 
 
 
4c3811e
8e18ff8
 
 
 
 
 
 
1ef95ec
8e18ff8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
import gradio as gr
import requests
import io
import random
import os
from PIL import Image

# List of available models
list_models = [
    "SDXL-1.0", "SD-1.5", "OpenJourney-V4", "Anything-V4",
    "Disney-Pixar-Cartoon", "Pixel-Art-XL", "Dalle-3-XL", "Midjourney-V4-XL",
]

# Function to generate images from text
def generate_txt2img(current_model, prompt, is_negative=False, image_style="None style", steps=50, cfg_scale=7, seed=None):

    if current_model == "SD-1.5":
        API_URL = "https://api-inference.huggingface.co/models/runwayml/stable-diffusion-v1-5"
    elif current_model == "SDXL-1.0":
        API_URL = "https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-xl-base-1.0"
    elif current_model == "OpenJourney-V4":
        API_URL = "https://api-inference.huggingface.co/models/prompthero/openjourney"       
    elif current_model == "Anything-V4":
        API_URL = "https://api-inference.huggingface.co/models/xyn-ai/anything-v4.0" 
    elif current_model == "Disney-Pixar-Cartoon":
        API_URL = "https://api-inference.huggingface.co/models/stablediffusionapi/disney-pixar-cartoon"
    elif current_model == "Pixel-Art-XL":
        API_URL = "https://api-inference.huggingface.co/models/nerijs/pixel-art-xl"
    elif current_model == "Dalle-3-XL":
        API_URL = "https://api-inference.huggingface.co/models/openskyml/dalle-3-xl"
    elif current_model == "Midjourney-V4-XL":
        API_URL = "https://api-inference.huggingface.co/models/openskyml/midjourney-v4-xl"    

    API_TOKEN = os.environ.get("HF_READ_TOKEN")
    headers = {"Authorization": f"Bearer {API_TOKEN}"}


    if image_style == "None style":
        payload = {
            "inputs": prompt + ", 8k",
            "is_negative": is_negative,
            "steps": steps,
            "cfg_scale": cfg_scale,
            "seed": seed if seed is not None else random.randint(-1, 2147483647)
        }
    elif image_style == "Cinematic":
        payload = {
            "inputs": prompt + ", realistic, detailed, textured, skin, hair, eyes, by Alex Huguet, Mike Hill, Ian Spriggs, JaeCheol Park, Marek Denko",
            "is_negative": is_negative + ", abstract, cartoon, stylized",
            "steps": steps,
            "cfg_scale": cfg_scale,
            "seed": seed if seed is not None else random.randint(-1, 2147483647)
        }
    elif image_style == "Digital Art":
        payload = {
            "inputs": prompt + ", faded , vintage , nostalgic , by Jose Villa , Elizabeth Messina , Ryan Brenizer , Jonas Peterson , Jasmine Star",
            "is_negative": is_negative + ", sharp , modern , bright",
            "steps": steps,
            "cfg_scale": cfg_scale,
            "seed": seed if seed is not None else random.randint(-1, 2147483647)
        }
    elif image_style == "Portrait":
        payload = {
            "inputs": prompt + ", soft light, sharp, exposure blend, medium shot, bokeh, (hdr:1.4), high contrast, (cinematic, teal and orange:0.85), (muted colors, dim colors, soothing tones:1.3), low saturation, (hyperdetailed:1.2), (noir:0.4), (natural skin texture, hyperrealism, soft light, sharp:1.2)",
            "is_negative": is_negative,
            "steps": steps,
            "cfg_scale": cfg_scale,
            "seed": seed if seed is not None else random.randint(-1, 2147483647)
        }

    image_bytes = requests.post(API_URL, headers=headers, json=payload).content
    image = Image.open(io.BytesIO(image_bytes))
    return image

css = """
/* Custom CSS */
.gradio-container {
    font-family: 'IBM Plex Sans', sans-serif;
    max-width: 900px;
    margin: auto;
    padding: 2rem;
    border-radius: 15px;
    box-shadow: 0px 4px 8px rgba(0, 0, 0, 0.2);
    text-align: center; /* Center the content horizontally */
}

/* Button Styles */
.gr-button {
    color: white;
    background-color: #007bff; /* Use a primary color for the background */
    border: none;
    padding: 10px 20px;
    border-radius: 8px;
    cursor: pointer;
    transition: background-color 0.3s, color 0.3s;
}

.gr-button:hover {
    background-color: #0056b3; /* Darken the background color on hover */
}

/* Custom CSS */
.gradio-container {
    font-family: 'IBM Plex Sans', sans-serif;
    max-width: 900px;
    margin: auto;
    padding: 2rem;
    border-radius: 15px;
    box-shadow: 0px 4px 8px rgba(0, 0, 0, 0.2);
    text-align: center; /* Center the content horizontally */
}

/* Button Styles */
.gr-button {
    color: white;
    background-color: #007bff; /* Use a primary color for the background */
    border: none;
    padding: 10px 20px;
    border-radius: 8px;
    cursor: pointer;
    transition: background-color 0.3s, color 0.3s;
}

.gr-button:hover {
    background-color: #0056b3; /* Darken the background color on hover */
}

/* Textbox Styles */
.gr-textbox {
    border-radius: 8px;
    border: 1px solid #ccc;
    padding: 10px;
    transition: border-color 0.3s;
}

.gr-textbox:focus {
    border-color: #007bff;
    outline: none;
}

/* Gallery Styles */
#gallery {
    display: flex;
    justify-content: center;
    align-items: center;
    margin-top: 2rem;
}

/* Automatically adjust photo size */
#gallery img {
    max-width: 100%;
    height: auto;
    border-radius: 12px;
    box-shadow: 0px 2px 4px rgba(0, 0, 0, 0.2);
}
"""

# Creating Gradio interface
with gr.Blocks(css=css) as demo:
    
    with gr.Row():
        with gr.Column():
            gr.Markdown("<h1>AI Diffusion</h1>")
            current_model = gr.Dropdown(label="Select Model", choices=list_models, value=list_models[1])
            text_prompt = gr.Textbox(label="Enter Prompt", placeholder="Example: a cute dog", lines=2)
            generate_button = gr.Button("Generate Image", variant='primary')

        with gr.Column():
            gr.Markdown("<h4>Advanced Settings</h4>")
            with gr.Accordion("Advanced Customizations", open=False):
                negative_prompt = gr.Textbox(label="Negative Prompt (Optional)", placeholder="Example: blurry, unfocused", lines=2)
                image_style = gr.Dropdown(label="Select Style", choices=["None style", "Cinematic", "Digital Art", "Portrait"], value="None style")
                # Add more options if needed

    with gr.Row():
        image_output = gr.Image(type="pil", label="Output Image")

    generate_button.click(generate_txt2img, inputs=[current_model, text_prompt, negative_prompt, image_style], outputs=image_output)

# Launch the app
demo.launch()