Nech-C commited on
Commit
ce1a537
·
1 Parent(s): 3ef0357

feat: enhance UI

Browse files
Files changed (6) hide show
  1. .python-version +1 -0
  2. app.py +53 -39
  3. makefile +4 -0
  4. mock.py +16 -0
  5. pyproject.toml +15 -0
  6. uv.lock +0 -0
.python-version ADDED
@@ -0,0 +1 @@
 
 
1
+ 3.10
app.py CHANGED
@@ -6,27 +6,32 @@ import spaces
6
  from transformers import CLIPTokenizer
7
 
8
  import torch
9
- from diffusers import StableDiffusionXLPipeline
10
 
 
 
 
 
 
 
 
 
 
11
 
12
- device = "cuda" if torch.cuda.is_available() else "cpu"
13
- model_repo_id = "Ine007/waiNSFWIllustrious_v140"
14
 
15
- torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
 
 
 
 
 
16
 
17
- pipe = StableDiffusionXLPipeline.from_pretrained(
18
- model_repo_id,
19
- torch_dtype=torch_dtype,
20
- use_safetensors=True,
21
- add_watermarker=None,
22
- ).to(device)
23
- tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-base-patch32")
24
-
25
- if torch.cuda.is_available():
26
- torch.backends.cuda.matmul.allow_tf32 = True
27
 
 
28
  MAX_SEED = np.iinfo(np.int32).max
29
- MAX_IMAGE_SIZE = 1024
30
 
31
  # ---- helpers ----
32
  def apply_preset(preset):
@@ -37,7 +42,7 @@ def apply_preset(preset):
37
  "1152×896 (landscape)": (1152, 896),
38
  "768×1344 (portrait, lighter)": (768, 1344),
39
  }
40
- return mapping.get(preset, (768, 768))
41
 
42
  @spaces.GPU
43
  def infer(
@@ -120,11 +125,11 @@ examples = [
120
  ]
121
 
122
  css = """
123
- #col-container { margin: 0 auto; max-width: 1100px; width: 100%; padding: 0 12px; }
124
  #left-col { position: sticky; top: 12px; align-self: start; }
125
 
126
  /* responsive image (FIT view) */
127
- #result_fit img { max-height: 85vh; width: auto; height: auto; }
128
 
129
  /* 'Hide controls' toggle only visible on small screens */
130
  #hide_controls_row { display: none; }
@@ -138,21 +143,20 @@ with gr.Blocks(css=css) as demo:
138
  history_state = gr.State([])
139
 
140
  with gr.Column(elem_id="col-container"):
141
- gr.Markdown("# SDXL Text-to-Image (waiNSFWIllustrious_v140)")
142
 
143
  with gr.Row():
144
  # LEFT: controls
145
  with gr.Column(scale=1, elem_id="left-col"):
 
146
  with gr.Row(elem_id="hide_controls_row"):
147
  hide_controls_cb = gr.Checkbox(label="Hide controls (mobile)", value=False)
148
  with gr.Group(visible=True) as controls_group:
149
  prompt = gr.Text(
150
  label="Prompt",
151
- show_label=False,
152
  lines=2,
153
  max_lines=6,
154
  placeholder="Enter your prompt",
155
- container=False,
156
  scale=1,
157
  min_width=0,
158
  autofocus=True,
@@ -161,17 +165,34 @@ with gr.Blocks(css=css) as demo:
161
  label="Negative prompt",
162
  max_lines=1,
163
  placeholder="Enter a negative prompt",
164
- value="blurry, low quality, watermark",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
165
  )
166
  run_button = gr.Button("Run", variant="primary")
167
 
168
  # RIGHT: image + toggle
169
  with gr.Column(scale=2):
170
- no_rescale_cb = gr.Checkbox(
171
- label="Do not rescale to fit screen",
172
- value=False,
173
- info="Uncheck = fit preview to screen (default).",
174
- )
175
  # two image views: FIT (responsive) and RAW (no scaling)
176
  result_fit = gr.Image(label="Result", show_label=False, elem_id="result_fit", visible=True)
177
  result_raw = gr.Image(label="Result (original size)", show_label=False, visible=False)
@@ -181,27 +202,20 @@ with gr.Blocks(css=css) as demo:
181
  with gr.Row():
182
  size_preset = gr.Dropdown(
183
  ["768×768 (square)", "1024×1024", "832×1216 (portrait)", "1152×896 (landscape)", "768×1344 (portrait, lighter)"],
184
- value="768×768 (square)",
185
  label="Size preset",
186
  )
187
- generations = gr.Slider(
188
- label="Generations",
189
- minimum=1,
190
- maximum=4,
191
- step=1,
192
- value=1,
193
- info="Generates images one-by-one to avoid OOM."
194
- )
195
 
196
  seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
197
  randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
198
 
199
  with gr.Row():
200
- width = gr.Slider(label="Width", minimum=256, maximum=MAX_IMAGE_SIZE, step=32, value=768)
201
- height = gr.Slider(label="Height", minimum=256, maximum=MAX_IMAGE_SIZE, step=32, value=768)
202
 
203
  with gr.Row():
204
- guidance_scale = gr.Slider(label="Guidance scale", minimum=0.0, maximum=12.0, step=0.1, value=5.5)
205
  num_inference_steps = gr.Slider(label="Number of inference steps", minimum=5, maximum=75, step=1, value=25)
206
 
207
  gr.Examples(examples=examples, inputs=[prompt])
 
6
  from transformers import CLIPTokenizer
7
 
8
  import torch
 
9
 
10
+ DEV_MODE = os.getenv("DEV_MODE", "0") == "1"
11
+ if DEV_MODE:
12
+ from mock import MockPipe
13
+ pipe = MockPipe()
14
+ device = "cpu"
15
+ else:
16
+ from diffusers import StableDiffusionXLPipeline
17
+ device = "cuda"
18
+ model_repo_id = "Ine007/waiNSFWIllustrious_v140"
19
 
20
+ torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
 
21
 
22
+ pipe = StableDiffusionXLPipeline.from_pretrained(
23
+ model_repo_id,
24
+ torch_dtype=torch_dtype,
25
+ use_safetensors=True,
26
+ add_watermarker=None,
27
+ ).to(device)
28
 
29
+ if torch.cuda.is_available():
30
+ torch.backends.cuda.matmul.allow_tf32 = True
 
 
 
 
 
 
 
 
31
 
32
+ tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-base-patch32")
33
  MAX_SEED = np.iinfo(np.int32).max
34
+ MAX_IMAGE_SIZE = 1344
35
 
36
  # ---- helpers ----
37
  def apply_preset(preset):
 
42
  "1152×896 (landscape)": (1152, 896),
43
  "768×1344 (portrait, lighter)": (768, 1344),
44
  }
45
+ return mapping.get(preset, (1024, 768))
46
 
47
  @spaces.GPU
48
  def infer(
 
125
  ]
126
 
127
  css = """
128
+ #col-container { margin: 0 auto; max-width: 1250px; width: 100%; padding: 0 12px; }
129
  #left-col { position: sticky; top: 12px; align-self: start; }
130
 
131
  /* responsive image (FIT view) */
132
+ #result_fit img { max-height: 95vh; width: auto; height: auto; }
133
 
134
  /* 'Hide controls' toggle only visible on small screens */
135
  #hide_controls_row { display: none; }
 
143
  history_state = gr.State([])
144
 
145
  with gr.Column(elem_id="col-container"):
146
+
147
 
148
  with gr.Row():
149
  # LEFT: controls
150
  with gr.Column(scale=1, elem_id="left-col"):
151
+ gr.Markdown("# SDXL Text-to-Image (waiNSFWIllustrious_v140)")
152
  with gr.Row(elem_id="hide_controls_row"):
153
  hide_controls_cb = gr.Checkbox(label="Hide controls (mobile)", value=False)
154
  with gr.Group(visible=True) as controls_group:
155
  prompt = gr.Text(
156
  label="Prompt",
 
157
  lines=2,
158
  max_lines=6,
159
  placeholder="Enter your prompt",
 
160
  scale=1,
161
  min_width=0,
162
  autofocus=True,
 
165
  label="Negative prompt",
166
  max_lines=1,
167
  placeholder="Enter a negative prompt",
168
+ value="blurry, low quality, watermark, monochrome, text",
169
+ )
170
+ # generations = gr.Slider(
171
+ # label="Generations",
172
+ # minimum=1,
173
+ # maximum=4,
174
+ # step=1,
175
+ # value=1,
176
+ # info="Generates images one-by-one to avoid OOM."
177
+ # )
178
+ generations = gr.Slider(
179
+ label="Generations",
180
+ maximum=5,
181
+ minimum=1,
182
+ step=1,
183
+ value=1,
184
+ info="Control how many images are generated sequentially.",
185
+ )
186
+ no_rescale_cb = gr.Checkbox(
187
+ label="Do not rescale to fit screen",
188
+ value=False,
189
+ info="Uncheck = fit preview to screen (default).",
190
+ visible=True
191
  )
192
  run_button = gr.Button("Run", variant="primary")
193
 
194
  # RIGHT: image + toggle
195
  with gr.Column(scale=2):
 
 
 
 
 
196
  # two image views: FIT (responsive) and RAW (no scaling)
197
  result_fit = gr.Image(label="Result", show_label=False, elem_id="result_fit", visible=True)
198
  result_raw = gr.Image(label="Result (original size)", show_label=False, visible=False)
 
202
  with gr.Row():
203
  size_preset = gr.Dropdown(
204
  ["768×768 (square)", "1024×1024", "832×1216 (portrait)", "1152×896 (landscape)", "768×1344 (portrait, lighter)"],
205
+ value="1024×1024",
206
  label="Size preset",
207
  )
208
+
 
 
 
 
 
 
 
209
 
210
  seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
211
  randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
212
 
213
  with gr.Row():
214
+ width = gr.Slider(label="Width", minimum=256, maximum=MAX_IMAGE_SIZE, step=32, value=1024)
215
+ height = gr.Slider(label="Height", minimum=256, maximum=MAX_IMAGE_SIZE, step=32, value=1024)
216
 
217
  with gr.Row():
218
+ guidance_scale = gr.Slider(label="Guidance scale", minimum=0.0, maximum=12.0, step=0.1, value=6)
219
  num_inference_steps = gr.Slider(label="Number of inference steps", minimum=5, maximum=75, step=1, value=25)
220
 
221
  gr.Examples(examples=examples, inputs=[prompt])
makefile ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ export DEV_MODE=1
2
+
3
+ dev-gr:
4
+ uv run gradio app.py
mock.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from PIL import Image, ImageDraw, ImageFont
2
+
3
+ class MockOut:
4
+ def __init__(self, img): self.images = [img]
5
+
6
+ class MockPipe:
7
+ def __call__(self, prompt, negative_prompt=None, guidance_scale=6.0,
8
+ num_inference_steps=25, width=1024, height=768, generator=None):
9
+
10
+ img = Image.new("RGB", (int(width), int(height)), (230, 230, 230))
11
+ draw = ImageDraw.Draw(img)
12
+ text = (prompt or "").strip()[:160] or "MOCK IMAGE"
13
+
14
+ draw.rectangle([(10, 10), (int(width)-10, 90)], outline=(0,0,0), width=2)
15
+ draw.text((20, 20), f"[MOCK]\n{text}", fill=(0,0,0))
16
+ return MockOut(img)
pyproject.toml ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [project]
2
+ name = "wainsfwillustrious-v140"
3
+ version = "0.1.0"
4
+ description = "Add your description here"
5
+ readme = "README.md"
6
+ requires-python = ">=3.10"
7
+ dependencies = [
8
+ "diffusers>=0.34.0",
9
+ "gradio>=5.42.0",
10
+ "pillow>=11.3.0",
11
+ "safetensors>=0.6.2",
12
+ "spaces>=0.40.0",
13
+ "torch>=2.8.0",
14
+ "transformers>=4.55.0",
15
+ ]
uv.lock ADDED
The diff for this file is too large to render. See raw diff