Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
shweaung
commited on
Commit
β’
b8e8a65
1
Parent(s):
451e2fe
Update app.py
Browse files
app.py
CHANGED
@@ -3,10 +3,8 @@ import requests
|
|
3 |
import io
|
4 |
import random
|
5 |
import os
|
6 |
-
import time
|
7 |
from PIL import Image
|
8 |
from deep_translator import GoogleTranslator
|
9 |
-
import json
|
10 |
|
11 |
API_TOKEN = os.getenv("HF_READ_TOKEN")
|
12 |
headers = {"Authorization": f"Bearer {API_TOKEN}"}
|
@@ -25,22 +23,25 @@ article_text = """
|
|
25 |
</div>
|
26 |
"""
|
27 |
|
28 |
-
def query(lora_id, prompt, steps=28, cfg_scale=3.5, randomize_seed=True, seed=-1, width=1024, height=1024):
|
29 |
-
if
|
30 |
return None
|
31 |
|
32 |
-
|
|
|
|
|
33 |
key = random.randint(0, 999)
|
34 |
|
35 |
-
API_URL =
|
36 |
|
37 |
-
|
38 |
-
|
39 |
|
40 |
prompt = f"{prompt} | ultra detail, ultra elaboration, ultra quality, perfect."
|
41 |
|
42 |
# If seed is -1, generate a random seed and use it
|
43 |
-
|
|
|
44 |
|
45 |
payload = {
|
46 |
"inputs": prompt,
|
@@ -55,7 +56,8 @@ def query(lora_id, prompt, steps=28, cfg_scale=3.5, randomize_seed=True, seed=-1
|
|
55 |
|
56 |
response = requests.post(API_URL, headers=headers, json=payload, timeout=timeout)
|
57 |
if response.status_code != 200:
|
58 |
-
print(f"Error: Failed to get image.
|
|
|
59 |
if response.status_code == 503:
|
60 |
raise gr.Error(f"{response.status_code} : The model is being loaded")
|
61 |
raise gr.Error(f"{response.status_code}")
|
@@ -64,7 +66,12 @@ def query(lora_id, prompt, steps=28, cfg_scale=3.5, randomize_seed=True, seed=-1
|
|
64 |
image_bytes = response.content
|
65 |
image = Image.open(io.BytesIO(image_bytes))
|
66 |
print(f'\033[1mGeneration {key} completed!\033[0m ({prompt})')
|
67 |
-
|
|
|
|
|
|
|
|
|
|
|
68 |
except Exception as e:
|
69 |
print(f"Error when trying to open the image: {e}")
|
70 |
return None
|
@@ -89,23 +96,36 @@ with gr.Blocks(theme='Nymbo/Nymbo_Theme', css=css) as app:
|
|
89 |
with gr.Column(elem_id="app-container"):
|
90 |
with gr.Row():
|
91 |
with gr.Column(elem_id="prompt-container"):
|
92 |
-
|
93 |
-
|
94 |
-
with gr.
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
105 |
gr.Markdown(article_text)
|
106 |
|
107 |
-
gr.Examples(
|
|
|
|
|
|
|
108 |
|
109 |
-
text_button.click(query, inputs=[custom_lora, text_prompt, steps, cfg, randomize_seed, seed, width, height], outputs=[image_output, seed_output, seed])
|
110 |
|
111 |
app.launch(show_api=False, share=True)
|
|
|
3 |
import io
|
4 |
import random
|
5 |
import os
|
|
|
6 |
from PIL import Image
|
7 |
from deep_translator import GoogleTranslator
|
|
|
8 |
|
9 |
API_TOKEN = os.getenv("HF_READ_TOKEN")
|
10 |
headers = {"Authorization": f"Bearer {API_TOKEN}"}
|
|
|
23 |
</div>
|
24 |
"""
|
25 |
|
26 |
+
def query(lora_id, prompt, steps=28, cfg_scale=3.5, randomize_seed=True, seed=-1, width=1024, height=1024, output_format="PNG"):
|
27 |
+
if prompt == "" or prompt == None:
|
28 |
return None
|
29 |
|
30 |
+
if lora_id.strip() == "" or lora_id == None:
|
31 |
+
lora_id = "black-forest-labs/FLUX.1-dev"
|
32 |
+
|
33 |
key = random.randint(0, 999)
|
34 |
|
35 |
+
API_URL = "https://api-inference.huggingface.co/models/" + lora_id.strip()
|
36 |
|
37 |
+
API_TOKEN = random.choice([os.getenv("HF_READ_TOKEN")])
|
38 |
+
headers = {"Authorization": f"Bearer {API_TOKEN}"}
|
39 |
|
40 |
prompt = f"{prompt} | ultra detail, ultra elaboration, ultra quality, perfect."
|
41 |
|
42 |
# If seed is -1, generate a random seed and use it
|
43 |
+
if randomize_seed:
|
44 |
+
seed = random.randint(1, 4294967296)
|
45 |
|
46 |
payload = {
|
47 |
"inputs": prompt,
|
|
|
56 |
|
57 |
response = requests.post(API_URL, headers=headers, json=payload, timeout=timeout)
|
58 |
if response.status_code != 200:
|
59 |
+
print(f"Error: Failed to get image. Response status: {response.status_code}")
|
60 |
+
print(f"Response content: {response.text}")
|
61 |
if response.status_code == 503:
|
62 |
raise gr.Error(f"{response.status_code} : The model is being loaded")
|
63 |
raise gr.Error(f"{response.status_code}")
|
|
|
66 |
image_bytes = response.content
|
67 |
image = Image.open(io.BytesIO(image_bytes))
|
68 |
print(f'\033[1mGeneration {key} completed!\033[0m ({prompt})')
|
69 |
+
|
70 |
+
# Convert to specified format (JPEG or PNG)
|
71 |
+
img_byte_arr = io.BytesIO()
|
72 |
+
image.save(img_byte_arr, format=output_format)
|
73 |
+
img_byte_arr.seek(0)
|
74 |
+
return Image.open(img_byte_arr), seed, seed
|
75 |
except Exception as e:
|
76 |
print(f"Error when trying to open the image: {e}")
|
77 |
return None
|
|
|
96 |
with gr.Column(elem_id="app-container"):
|
97 |
with gr.Row():
|
98 |
with gr.Column(elem_id="prompt-container"):
|
99 |
+
with gr.Row():
|
100 |
+
text_prompt = gr.Textbox(label="Prompt", placeholder="Enter a prompt here ( English ααα―αα¬αα±αΈααα―α·ααα«αααΊ ) ", lines=2, elem_id="prompt-text-input")
|
101 |
+
with gr.Row():
|
102 |
+
custom_lora = gr.Textbox(label="Custom Model", info="Model path (Walone LoRA Library ααΎα¬ Model path αα»α¬αΈαααα―ααΊαα«αααΊ)", placeholder="shweaung/mawc-cc")
|
103 |
+
with gr.Row():
|
104 |
+
with gr.Accordion("Advanced Settings", open=False):
|
105 |
+
with gr.Row():
|
106 |
+
width = gr.Slider(label="Width", value=1024, minimum=64, maximum=1216, step=8)
|
107 |
+
height = gr.Slider(label="Height", value=1024, minimum=64, maximum=1216, step=8)
|
108 |
+
seed = gr.Slider(label="Seed", value=-1, minimum=-1, maximum=4294967296, step=1)
|
109 |
+
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
|
110 |
+
with gr.Row():
|
111 |
+
steps = gr.Slider(label="Sampling steps", value=28, minimum=1, maximum=100, step=1)
|
112 |
+
cfg = gr.Slider(label="CFG Scale", value=3.5, minimum=1, maximum=20, step=0.5)
|
113 |
+
output_format = gr.Dropdown(label="Output Format", choices=["JPEG", "PNG"], value="PNG")
|
114 |
+
|
115 |
+
with gr.Row():
|
116 |
+
text_button = gr.Button("Run", variant='primary', elem_id="gen-button")
|
117 |
+
with gr.Row():
|
118 |
+
image_output = gr.Image(type="pil", label="Image Output", elem_id="gallery")
|
119 |
+
with gr.Row():
|
120 |
+
seed_output = gr.Textbox(label="Seed Used", show_copy_button=True, elem_id="seed-output")
|
121 |
+
|
122 |
gr.Markdown(article_text)
|
123 |
|
124 |
+
gr.Examples(
|
125 |
+
examples=examples,
|
126 |
+
inputs=[text_prompt],
|
127 |
+
)
|
128 |
|
129 |
+
text_button.click(query, inputs=[custom_lora, text_prompt, steps, cfg, randomize_seed, seed, width, height, output_format], outputs=[image_output, seed_output, seed])
|
130 |
|
131 |
app.launch(show_api=False, share=True)
|