File size: 11,159 Bytes
e547b24
0b6f327
5c3de75
e547b24
 
5c3de75
f43eb5f
 
 
e7ea28d
f43eb5f
e547b24
5c3de75
f43eb5f
8d50bf7
 
 
 
 
 
 
 
 
 
 
cd8595b
8d50bf7
 
 
7e36583
 
8d50bf7
 
 
7e36583
8d50bf7
 
 
7e36583
8d50bf7
cd8595b
8d50bf7
 
 
 
 
 
 
 
 
 
 
e547b24
 
 
 
78539a4
de32577
 
f43eb5f
 
 
 
04f5764
6f5a32e
e547b24
 
c7accf3
143ca85
c7accf3
e547b24
40d7442
001cbbb
143ca85
9be63af
e547b24
f43eb5f
79e0fd9
 
143ca85
 
e547b24
 
143ca85
3f2e57b
 
2d04fb1
26785ab
143ca85
 
 
e547b24
c50b0b7
e547b24
 
c50b0b7
 
f94e79d
 
 
 
e547b24
 
 
 
6f5a32e
 
e547b24
 
 
143ca85
e547b24
 
 
6f5a32e
143ca85
e547b24
6f5a32e
e547b24
f43eb5f
40d7442
c50b0b7
 
 
40d7442
62f1152
e547b24
396ae78
 
c114906
 
73f7edc
e7ea28d
396ae78
e547b24
c50b0b7
3c9286b
c50b0b7
e7ea28d
c50b0b7
 
 
bad1691
c50b0b7
44a0266
c50b0b7
44a0266
 
e7ea28d
44a0266
 
 
e7ea28d
44a0266
 
 
 
 
 
 
 
f43eb5f
c50b0b7
 
e7ea28d
c50b0b7
 
e7ea28d
c50b0b7
bad1691
 
fc4c6ee
b71017c
e7ea28d
bad1691
fc4c6ee
c50b0b7
 
 
 
3bbe670
 
f43eb5f
a093180
f43eb5f
 
 
 
 
 
f6dbbba
f43eb5f
 
 
 
 
91daaf9
00c94d4
de32577
dbc87ba
de32577
 
 
 
e7ea28d
 
 
f43eb5f
8380498
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
import gradio as gr
import requests
import io
import random
import os
import time
import numpy as np
import subprocess
import torch
import json
from transformers import AutoProcessor, AutoModelForCausalLM
from PIL import Image
from deep_translator import GoogleTranslator
from datetime import datetime
from fastapi import FastAPI

app = FastAPI()

#----------Start of theme----------
theme = gr.themes.Soft(
    primary_hue="zinc",
    secondary_hue="stone",
    font=[gr.themes.GoogleFont('Kavivanar'), gr.themes.GoogleFont('Kavivanar'), 'system-ui', 'sans-serif'],
    font_mono=[gr.themes.GoogleFont('Source Code Pro'), gr.themes.GoogleFont('Inconsolata'), gr.themes.GoogleFont('Inconsolata'), 'monospace'],
).set(
    body_background_fill='*primary_100',
    body_text_color='secondary_600',
    body_text_color_subdued='*primary_500',
    body_text_weight='500',
    background_fill_primary='*primary_100',
    background_fill_secondary='*secondary_200',
    color_accent='*primary_300',
    border_color_accent_subdued='*primary_400',
    border_color_primary='*primary_400',
    block_background_fill='*primary_300',
    block_border_width='*panel_border_width',
    block_info_text_color='*primary_700',
    block_info_text_size='*text_md',
    panel_background_fill='*primary_200',
    accordion_text_color='*primary_600',
    slider_color='*primary_500',
    table_text_color='*primary_600',
    input_background_fill='*primary_50',
    input_background_fill_focus='*primary_100',
    button_primary_background_fill='*primary_500',
    button_primary_background_fill_hover='*primary_400',
    button_primary_text_color='*primary_50',
    button_primary_text_color_hover='*primary_100',
    button_cancel_background_fill='*primary_500',
    button_cancel_background_fill_hover='*primary_400'
)
#----------End of theme----------

API_TOKEN = os.getenv("HF_READ_TOKEN")
headers = {"Authorization": f"Bearer {API_TOKEN}"}
timeout = 100

def flip_image(x):
    return np.fliplr(x)

def clear():
  return None    

def query(lora_id, prompt, is_negative=False, steps=28, cfg_scale=3.5, sampler="DPM++ 2M Karras", seed=-1, strength=100, width=896, height=1152):
    if prompt == "" or prompt == None:
        return None

    if lora_id.strip() == "" or lora_id == None:
        lora_id = "black-forest-labs/FLUX.1-dev"

    key = random.randint(0, 999)

    API_URL = "https://api-inference.huggingface.co/models/"+ lora_id.strip()

    API_TOKEN = random.choice([os.getenv("HF_READ_TOKEN")])
    headers = {"Authorization": f"Bearer {API_TOKEN}"}
    
    # prompt = GoogleTranslator(source='ru', target='en').translate(prompt)
    # print(f'\033[1mGeneration {key} translation:\033[0m {prompt}')
    prompt = GoogleTranslator(source='ru', target='en').translate(prompt)
    print(f'\033[1mGeneration {key} translation:\033[0m {prompt}')

    prompt = f"{prompt} | ultra detail, ultra elaboration, ultra quality, perfect."
    print(f'\033[1mGeneration {key}:\033[0m {prompt}')

    # If seed is -1, generate a random seed and use it
    if seed == -1:
        seed = random.randint(1, 1000000000)

    # Prepare the payload for the API call, including width and height
    payload = {
        "inputs": prompt,
        "is_negative": is_negative,
        "steps": steps,
        "cfg_scale": cfg_scale,
        "seed": seed if seed != -1 else random.randint(1, 1000000000),
        "strength": strength,
        "parameters": {
            "width": width,  # Pass the width to the API
            "height": height  # Pass the height to the API
        }
    }

    response = requests.post(API_URL, headers=headers, json=payload, timeout=timeout)
    if response.status_code != 200:
        print(f"Error: Failed to get image. Response status: {response.status_code}")
        print(f"Response content: {response.text}")
        if response.status_code == 503:
            raise gr.Error(f"{response.status_code} : The model is being loaded")
        raise gr.Error(f"{response.status_code}")

    try:
        image_bytes = response.content
        image = Image.open(io.BytesIO(image_bytes))
        print(f'\033[1mGeneration {key} completed!\033[0m ({prompt})')
        return image, seed
    except Exception as e:
        print(f"Error when trying to open the image: {e}")
        return None

examples = [
    "a beautiful woman with blonde hair and blue eyes",
    "a beautiful woman with brown hair and grey eyes",
    "a beautiful woman with black hair and brown eyes",
]

css = """
#app-container {
    max-width: 930px;
    margin-left: auto;
    margin-right: auto;
}
".gradio-container {background: url('file=abstract.jpg')}
   
"""
with gr.Blocks(theme=theme, css=css, elem_id="app-container") as app:
    gr.HTML("<center><h6>🎨 FLUX.1-Dev with LoRA 🇬🇧</h6></center>")
    with gr.Tab("Text to Image"):
        with gr.Column(elem_id="app-container"):
            with gr.Row():
                with gr.Column(elem_id="prompt-container"):
                    with gr.Row():
                        text_prompt = gr.Textbox(label="Image Prompt", placeholder="Enter a prompt here", lines=2, show_copy_button = True, elem_id="prompt-text-input")
                    with gr.Row():
                        with gr.Accordion("🎨 Lora trigger words", open=False):
                        		gr.Markdown("""
                                    - **sdxl-realistic**: szn style
                                    - **stylesdxl-cyberpunk**: szn style
                                    - **Flux-Super-Realism-LoRA**: Super Realism
                                    - **surreal-harmony**: Surreal Harmony
                                    - **extremely-detailed**: extremely detailed
                                    - **dark-fantasy**: Dark Fantasy
                                    - **Flux.1-Dev-LoRA-HDR-Realism**: HDR
                                    - **jules-bastien-lepage-style**: Jules Bastien Lepage Style
                                    - **john-singer-sargent-style**: John Singer Sargent Style
                                    - **alphonse-mucha-style**: Alphonse Mucha Style
                                    - **ultra-realistic-illustration**: ultra realistic illustration
                                    - **eye-catching**: eye-catching
                                    - **john-constable-style**: John Constable Style
                                    - **film-noir**: in the style of FLMNR
                                    - **flux-lora-pro-headshot**: PROHEADSHOT
                        		""")                       
                            
                    with gr.Row():
                        custom_lora = gr.Dropdown([" ", "jwu114/lora-sdxl-realistic", "issaccyj/lora-sdxl-cyberpunk", "strangerzonehf/Flux-Super-Realism-LoRA", "hugovntr/flux-schnell-realism", "fofr/sdxl-deep-down", "KappaNeuro/surreal-harmony", "ntc-ai/SDXL-LoRA-slider.extremely-detailed", "prithivMLmods/Canopus-LoRA-Flux-FaceRealism", "KappaNeuro/dark-fantasy", "prithivMLmods/Flux.1-Dev-LoRA-HDR-Realism", "KappaNeuro/jules-bastien-lepage-style", "KappaNeuro/john-singer-sargent-style", "KappaNeuro/alphonse-mucha-style", "ntc-ai/SDXL-LoRA-slider.ultra-realistic-illustration", "ntc-ai/SDXL-LoRA-slider.eye-catching", "KappaNeuro/john-constable-style", "dvyio/flux-lora-film-noir", "dvyio/flux-lora-pro-headshot"], label="Custom LoRA",)
                    with gr.Row():
                        with gr.Accordion("⚙️ Advanced Settings", open=False, elem_id="settings-container"):
                            negative_prompt = gr.Textbox(label="Negative Prompt", lines=5, placeholder="What should not be in the image", value=" Bad anatomy, Bad proportions, Deformed, Disconnected limbs, Disfigured, Extra arms, Extra limbs, Extra hands, Fused fingers, Gross proportions, Long neck, Malformed limbs, Mutated, Mutated hands, Mutated limbs, Missing arms, Missing fingers, Poorly drawn hands, Poorly drawn face ugly, tiling, poorly drawn hands, poorly drawn feet, poorly drawn face, out of frame, extra limbs, disfigured, deformed, body out of frame, bad anatomy, watermark, signature, cut off, low contrast, underexposed, overexposed, bad art, beginner, amateur, distorted face, blurry, draft, grainy, text, logo, watermark, banner, extra digits, signature, unprompted-nsfw")
                            with gr.Row():
                                width = gr.Slider(label="Image Width", value=896, minimum=64, maximum=1216, step=32)
                                height = gr.Slider(label="Image Height", value=1152, minimum=64, maximum=1216, step=32)
                            steps = gr.Slider(label="Sampling steps", value=50, minimum=1, maximum=100, step=1)
                            cfg = gr.Slider(label="CFG Scale", value=3.5, minimum=1, maximum=20, step=0.5)
                            method = gr.Radio(label="Sampling method", value="DPM++ 2M Karras", choices=["DPM++ 2M Karras", "DPM++ SDE Karras", "DEIS", "LMS", "DPM Adaptive", "DPM++ 2M", "DPM2 Ancestral", "DPM++ S", "DPM++ SDE", "DDPM", "DPM Fast", "dpmpp_2s_ancestral", "Euler", "Euler CFG PP", "Euler a", "Euler Ancestral", "Euler+beta", "Heun", "Heun PP2", "DDIM", "PLMS", "UniPC", "UniPC BH2"])
                            strength = gr.Slider(label="Prompt Strength", value=100, minimum=0, maximum=100, step=1)
                            seed = gr.Slider(label="Seed", value=-1, minimum=-1, maximum=1000000000, step=1)
                    with gr.Row():
                        with gr.Accordion("🫘Seed", open=False):
                            seed_output = gr.Textbox(label="Seed Used", show_copy_button = True, elem_id="seed-output")
                            
            with gr.Row():
                text_button = gr.Button("Run", variant='primary', elem_id="gen-button")
                clr_button =gr.Button("Clear Prompt",variant="primary", elem_id="clear_button")
                clr_button.click(lambda: gr.Textbox(value=""), None, text_prompt)
                
            with gr.Row():
                image_output = gr.Image(type="pil", label="Image Output", format="png", elem_id="gallery")
            with gr.Row():
                clear_btn = gr.Button(value="Clear Image", variant="primary", elem_id="clear_button")
                clear_btn.click(clear, inputs=[], outputs=[image_output])
    
            gr.Examples(
                examples = examples,
                inputs = [text_prompt],
            )
            
    text_button.click(query, inputs=[custom_lora, text_prompt, negative_prompt, steps, cfg, method, seed, strength, width, height], outputs=[image_output, seed_output])

    with gr.Tab("Flip Image"):
        with gr.Row():
            image_input = gr.Image()
            image_output = gr.Image(format="png")
        with gr.Row():    
            image_button = gr.Button("Run", variant='primary')
            image_button.click(flip_image, inputs=image_input, outputs=image_output, concurrency_limit=None)
            
    app.queue()  # <-- Sets up a queue with default parameters
    if __name__ == "__main__": 
        app.launch(show_api=False, share=False)