File size: 7,198 Bytes
356a5f4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f0d46f9
356a5f4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
765540e
 
 
 
356a5f4
765540e
 
356a5f4
 
 
 
765540e
356a5f4
 
 
 
 
 
 
 
 
 
 
f0d46f9
 
 
 
33ee284
 
 
 
 
 
f0d46f9
 
356a5f4
 
 
 
 
 
 
33ee284
f0d46f9
356a5f4
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
import gradio as gr
import requests
import io
import random
import os
from PIL import Image

# List of available models
list_models = [
    "SDXL 1.0", "SD 1.5", "OpenJourney", "Anything V4.0",
    "Disney Pixar Cartoon", "Pixel Art XL", "Dalle 3 XL",
    "Midjourney V4 XL", "Open Diffusion V1", "SSD 1B",
    "Segmind Vega", "Animagine XL-2.0", "Animagine XL-3.0",
    "OpenDalle", "OpenDalle V1.1", "PlaygroundV2 1024px aesthetic",
]

# Function to generate images from text
def generate_txt2img(current_model, prompt, is_negative=False, image_style="None style", steps=50, cfg_scale=7, seed=None):

    if current_model == "SD 1.5":
        API_URL = "https://api-inference.huggingface.co/models/runwayml/stable-diffusion-v1-5"
    elif current_model == "SDXL 1.0":
        API_URL = "https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-xl-base-1.0"
    elif current_model == "OpenJourney":
        API_URL = "https://api-inference.huggingface.co/models/prompthero/openjourney"       
    elif current_model == "Anything V4.0":
        API_URL = "https://api-inference.huggingface.co/models/xyn-ai/anything-v4.0" 
    elif current_model == "Disney Pixar Cartoon":
        API_URL = "https://api-inference.huggingface.co/models/stablediffusionapi/disney-pixar-cartoon"
    elif current_model == "Pixel Art XL":
        API_URL = "https://api-inference.huggingface.co/models/nerijs/pixel-art-xl"
    elif current_model == "Dalle 3 XL":
        API_URL = "https://api-inference.huggingface.co/models/openskyml/dalle-3-xl"
    elif current_model == "Midjourney V4 XL":
        API_URL = "https://api-inference.huggingface.co/models/openskyml/midjourney-v4-xl"
    elif current_model == "Open Diffusion V1":
        API_URL = "https://api-inference.huggingface.co/models/openskyml/open-diffusion-v1" 
    elif current_model == "SSD 1B":
        API_URL = "https://api-inference.huggingface.co/models/segmind/SSD-1B"
    elif current_model == "Segmind Vega":
        API_URL = "https://api-inference.huggingface.co/models/segmind/Segmind-Vega"
    elif current_model == "Animagine XL-2.0":
        API_URL = "https://api-inference.huggingface.co/models/Linaqruf/animagine-xl-2.0"
    elif current_model == "Animagine XL-3.0":
        API_URL = "https://api-inference.huggingface.co/models/cagliostrolab/animagine-xl-3.0"    
    elif current_model == "OpenDalle":
        API_URL = "https://api-inference.huggingface.co/models/dataautogpt3/OpenDalle"
    elif current_model == "OpenDalle V1.1":
        API_URL = "https://api-inference.huggingface.co/models/dataautogpt3/OpenDalleV1.1" 
    elif current_model == "PlaygroundV2 1024px aesthetic":
        API_URL = "https://api-inference.huggingface.co/models/playgroundai/playground-v2-1024px-aesthetic"  

        
    API_TOKEN = os.environ.get("HF_READ_TOKEN")
    headers = {"Authorization": f"Bearer {API_TOKEN}"}


    if image_style == "None style":
        payload = {
            "inputs": prompt + ", 8k",
            "is_negative": is_negative,
            "steps": steps,
            "cfg_scale": cfg_scale,
            "seed": seed if seed is not None else random.randint(-1, 2147483647)
        }
    elif image_style == "Cinematic":
        payload = {
            "inputs": prompt + ", realistic, detailed, textured, skin, hair, eyes, by Alex Huguet, Mike Hill, Ian Spriggs, JaeCheol Park, Marek Denko",
            "is_negative": is_negative + ", abstract, cartoon, stylized",
            "steps": steps,
            "cfg_scale": cfg_scale,
            "seed": seed if seed is not None else random.randint(-1, 2147483647)
        }
    elif image_style == "Digital Art":
        payload = {
            "inputs": prompt + ", faded , vintage , nostalgic , by Jose Villa , Elizabeth Messina , Ryan Brenizer , Jonas Peterson , Jasmine Star",
            "is_negative": is_negative + ", sharp , modern , bright",
            "steps": steps,
            "cfg_scale": cfg_scale,
            "seed": seed if seed is not None else random.randint(-1, 2147483647)
        }
    elif image_style == "Portrait":
        payload = {
            "inputs": prompt + ", soft light, sharp, exposure blend, medium shot, bokeh, (hdr:1.4), high contrast, (cinematic, teal and orange:0.85), (muted colors, dim colors, soothing tones:1.3), low saturation, (hyperdetailed:1.2), (noir:0.4), (natural skin texture, hyperrealism, soft light, sharp:1.2)",
            "is_negative": is_negative,
            "steps": steps,
            "cfg_scale": cfg_scale,
            "seed": seed if seed is not None else random.randint(-1, 2147483647)
        }

    image_bytes = requests.post(API_URL, headers=headers, json=payload).content
    image = Image.open(io.BytesIO(image_bytes))
    return image

# Function to read CSS from file
def read_css_from_file(filename):
    with open(filename, "r") as file:
        return file.read()

# Read CSS from file
css = read_css_from_file("style.css")

PTI_SD_DESCRIPTION = '''
<div id="content_align">
  <span style="color:darkred;font-size:32px;font-weight:bold">  
    Stable Diffusion Models Image Generation
  </span>
</div>
<div id="content_align">
  <span style="color:blue;font-size:16px;font-weight:bold">  
    Generate images directly from text prompts (no parameter tuning required)
  </span>
</div>
<div id="content_align" style="margin-top: 10px;">
</div>
'''

# Prompt examples
prompt_examples = [
    "A blue jay standing on a large basket of rainbow macarons.",
    "A dog looking curiously in the mirror, seeing a cat.",
#    "A robot couple fine dining with Eiffel Tower in the background.",
#    "A chrome-plated duck with a golden beak arguing with an angry turtle in a forest.",
#    "A transparent sculpture of a duck made out of glass. The sculpture is in front of a painting of a landscape.",
#    "A cute corgi lives in a house made out of sushi.",
#    "A single beam of light enter the room from the ceiling. The beam of light is illuminating an easel. On the easel there is a Rembrandt painting of a raccoon.",
#    "A photo of a Corgi dog riding a bike in Times Square. It is wearing sunglasses and a beach hat."
]


# Creating Gradio interface
with gr.Blocks(css=css) as demo:
    gr.Markdown(PTI_SD_DESCRIPTION)
    with gr.Row():   
        with gr.Column():
            current_model = gr.Dropdown(label="Select Model", choices=list_models, value=list_models[1])
            text_prompt = gr.Textbox(label="Input Prompt", placeholder="Example: A blue jay ", lines=2)
            text_prompt.examples = prompt_examples
        with gr.Column():
            negative_prompt = gr.Textbox(label="Negative Prompt (optional)", placeholder="Example: blurry, unfocused", lines=2)
            image_style = gr.Dropdown(label="Select Style", choices=["None style", "Cinematic", "Digital Art", "Portrait"], value="None style")
                
        generate_button = gr.Button("Generate Image", variant='primary')
        
    with gr.Row():
        image_output = gr.Image(type="pil", label="Image Output")

    generate_button.click(generate_txt2img, inputs=[current_model, text_prompt, negative_prompt, image_style], outputs=image_output)

# Launch the app
demo.launch()