Molbap HF Staff commited on
Commit
09407c4
Β·
1 Parent(s): c073d08
Files changed (1) hide show
  1. app.py +230 -121
app.py CHANGED
@@ -1,48 +1,76 @@
1
- import os
2
- import sys
3
  import re
4
- import json
5
- import time
6
- import threading
7
  import subprocess
 
 
8
  from pathlib import Path
9
 
 
10
  import gradio as gr
 
11
  import pandas as pd
12
  import torch
13
  import spaces
 
 
14
 
15
- # ---------------------------
16
- # Markdown rendering (Option A)
17
- # ---------------------------
18
-
19
- def _make_md_markdownit():
20
- # Prefer markdown-it-py + mdit-py-plugins if available
21
  from importlib import import_module
22
  from markdown_it import MarkdownIt
23
- md = MarkdownIt("gfm-like")
 
 
24
 
25
- # Version-agnostic plugin shims
26
- foot_mod = import_module("mdit_py_plugins.footnote")
27
- foot = getattr(foot_mod, "footnote", None) or getattr(foot_mod, "footnote_plugin")
28
- md.use(foot)
 
29
 
30
- tl_mod = import_module("mdit_py_plugins.tasklists")
31
- tasklists = getattr(tl_mod, "tasklists", None) or getattr(tl_mod, "tasklists_plugin")
32
- md.use(tasklists)
 
 
33
 
34
- cont_mod = import_module("mdit_py_plugins.container")
35
- container = getattr(cont_mod, "container", None) or getattr(cont_mod, "container_plugin")
 
 
 
 
 
 
 
36
  try:
37
- md.use(container, "details")
38
- except TypeError:
39
- md.use(lambda m: container(m, name="details"))
40
- return md
41
-
42
- def _make_md_pythonmarkdown():
43
- # Fallback: Python-Markdown + PyMdown
44
- import markdown as md
45
- exts = [
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
46
  "extra", # tables + fenced code
47
  "footnotes",
48
  "admonition",
@@ -51,13 +79,20 @@ def _make_md_pythonmarkdown():
51
  "pymdownx.superfences",
52
  "pymdownx.tasklist",
53
  ]
54
- ext_cfg = {"pymdownx.tasklist": {"custom_checkbox": True}, "toc": {"permalink": True}}
55
- return ("python-markdown", exts, ext_cfg, md)
56
-
57
- try:
58
- _md_engine = ("markdown-it", _make_md_markdownit())
59
- except Exception:
60
- _md_engine = _make_md_pythonmarkdown()
 
 
 
 
 
 
 
61
 
62
  def _obsidian_rewrites(text: str) -> str:
63
  # 1) Obsidian image embeds: ![[img.png]] -> ![](file=content/img.png)
@@ -83,47 +118,45 @@ def _obsidian_rewrites(text: str) -> str:
83
  return text
84
 
85
 
86
- def md_to_html(text: str) -> str:
 
87
  text = _obsidian_rewrites(text)
88
- if _md_engine[0] == "markdown-it":
89
- md = _md_engine[1]
90
- return md.render(text)
 
91
  else:
92
- tag, exts, cfg, md = _md_engine
93
- return md.markdown(text, extensions=exts, extension_configs=cfg, output_format="html5")
94
-
95
- def render_article(md_path: str, inserts: dict[str, callable]):
96
- raw = Path(md_path).read_text(encoding="utf-8") if Path(md_path).exists() else f"**Missing article**: `{md_path}`."
97
- parts = re.split(r"\{\{([A-Z_]+)\}\}", raw)
98
- with gr.Column():
99
- for i, part in enumerate(parts):
100
- if i % 2 == 0:
101
- # Wrap prose in an article container for scoped CSS
102
- gr.HTML(f'<div class="article">{md_to_html(part)}</div>')
103
- else:
104
- (inserts.get(part) or (lambda: gr.HTML(f"<p><em>Unknown insert: {part}</em></p>")))()
105
-
106
 
107
- def old_render_article(md_path: str, inserts: dict[str, callable]):
108
- raw = ""
109
- path = Path(md_path)
110
- if path.exists():
111
- raw = path.read_text(encoding="utf-8")
112
  else:
113
- raw = f"**Missing article**: `{md_path}` not found.\n\nCreate it in your Space repo."
114
-
115
  # Split on {{TOKEN}} markers (e.g., {{ALLOC_PLOT}})
116
- parts = re.split(r"\{\{([A-Z_]+)\}\}", raw)
 
117
  with gr.Column():
118
- for i, part in enumerate(parts):
119
- if i % 2 == 0:
120
- gr.HTML(md_to_html(part))
 
 
121
  else:
122
- build = inserts.get(part)
123
- if build is None:
124
- gr.HTML(f"<p><em>Unknown insert: {part}</em></p>")
 
125
  else:
126
- build()
127
 
128
  # ---------------------------
129
  # Terminal (safe, simplified)
@@ -187,60 +220,65 @@ def build_attn_vis():
187
  # Transformers caching allocator warmup (time vs MiB plot)
188
  # -------------------------------------------------------
189
 
190
- from transformers import AutoModelForCausalLM, modeling_utils as MU # noqa: E402
191
 
192
  def _measure_load_timeline(model_id: str, disable_warmup: bool):
193
  """Measure memory usage during model loading with/without cache warmup."""
194
- orig = getattr(MU, "caching_allocator_warmup", None)
195
- if disable_warmup and orig is not None:
196
- MU.caching_allocator_warmup = lambda *a, **k: None # type: ignore[attr-defined]
197
 
198
  try:
199
  device = "cuda" if torch.cuda.is_available() else "cpu"
200
- tl = []
201
 
202
- def sample(start_t, stop_evt):
203
- while not stop_evt.is_set():
204
  if device == "cuda":
205
  torch.cuda.synchronize()
206
  # Use max memory to capture peaks better
207
- alloc = torch.cuda.max_memory_allocated()
208
  torch.cuda.reset_peak_memory_stats()
209
  else:
210
- alloc = 0
211
- tl.append({"t": time.perf_counter() - start_t, "MiB": alloc / (1024**2)})
 
 
 
212
  time.sleep(0.02) # Sample more frequently
213
 
214
  if device == "cuda":
215
  torch.cuda.empty_cache()
216
  torch.cuda.reset_peak_memory_stats()
217
- initial_mem = torch.cuda.memory_allocated()
218
  else:
219
- initial_mem = 0
220
 
221
- start = time.perf_counter()
222
- stop_evt = threading.Event()
223
- th = threading.Thread(target=sample, args=(start, stop_evt), daemon=True)
224
- th.start()
225
 
226
  # Load model with appropriate settings
227
- kwargs = {"low_cpu_mem_usage": True}
228
  if device == "cuda":
229
- kwargs.update({
230
  "torch_dtype": torch.float16,
231
  "device_map": "cuda:0"
232
  })
233
 
234
- model = AutoModelForCausalLM.from_pretrained(model_id, **kwargs)
235
 
236
- stop_evt.set()
237
- th.join()
238
 
239
  # Final memory measurement
240
  if device == "cuda":
241
  torch.cuda.synchronize()
242
- final_mem = torch.cuda.memory_allocated()
243
- tl.append({"t": time.perf_counter() - start, "MiB": final_mem / (1024**2)})
 
 
 
244
 
245
  # Clean up
246
  del model
@@ -248,43 +286,56 @@ def _measure_load_timeline(model_id: str, disable_warmup: bool):
248
  torch.cuda.empty_cache()
249
  torch.cuda.ipc_collect()
250
 
251
- return tl
252
  finally:
253
- if orig is not None:
254
- MU.caching_allocator_warmup = orig # restore
255
 
256
  @spaces.GPU(duration=240)
257
- def profile_warmup(model_id: str):
 
258
  if not torch.cuda.is_available():
259
  # Create dummy data for CPU demo
260
- import numpy as np
261
- t_points = np.linspace(0, 5, 50)
262
- base_mem = np.cumsum(np.random.exponential(50, 50))
263
- warmup_on = [{"t": t, "MiB": mem, "mode": "warmup ON"} for t, mem in zip(t_points, base_mem * 0.8)]
264
- warmup_off = [{"t": t, "MiB": mem, "mode": "warmup OFF"} for t, mem in zip(t_points, base_mem)]
265
- return pd.DataFrame(warmup_on + warmup_off)
 
 
 
 
 
266
 
267
  try:
268
- on_data = _measure_load_timeline(model_id, disable_warmup=False)
269
- off_data = _measure_load_timeline(model_id, disable_warmup=True)
270
 
271
  # Create DataFrame with better labeling
272
- rows = [{"t": r["t"], "MiB": r["MiB"], "mode": "πŸš€ Warmup ON (Optimized)"} for r in on_data] + \
273
- [{"t": r["t"], "MiB": r["MiB"], "mode": "πŸ“ˆ Warmup OFF (Standard)"} for r in off_data]
 
 
 
 
 
 
 
274
 
275
- df = pd.DataFrame(rows)
276
 
277
- # Add summary stats if we have data
278
- if len(on_data) > 0 and len(off_data) > 0:
279
- on_peak = max(r["MiB"] for r in on_data)
280
- off_peak = max(r["MiB"] for r in off_data)
281
- savings = ((off_peak - on_peak) / off_peak * 100) if off_peak > 0 else 0
282
- print(f"Memory savings: {savings:.1f}% (Peak: {on_peak:.0f} MiB vs {off_peak:.0f} MiB)")
 
283
 
284
- return df
285
- except Exception as e:
286
- print(f"Error profiling {model_id}: {e}")
287
- # Return empty DataFrame on error
288
  return pd.DataFrame(columns=["t", "MiB", "mode"])
289
 
290
  def build_alloc_plot():
@@ -317,7 +368,7 @@ def build_alloc_plot():
317
  )
318
 
319
  gr.Markdown("**Note**: This demo requires GPU access. The warmup feature reduces peak memory usage during model loading.")
320
- go.click(profile_warmup, inputs=[model], outputs=plot)
321
 
322
  # ---------------------------
323
  # Optional FastRTC preview
@@ -335,10 +386,14 @@ def build_fastrtc():
335
  if not HAS_FASTRTC:
336
  gr.Markdown("Install `fastrtc` to enable this section.")
337
  return
 
 
 
 
338
  with gr.Group():
339
  gr.Markdown("Camera loopback using FastRTC WebRTC. Extend with streaming handlers later.")
340
- rtc = WebRTC(mode="send-receive", modality="video")
341
- rtc.stream(ReplyOnPause(_echo_video), inputs=[rtc], outputs=[rtc], time_limit=60)
342
 
343
  # ---------------------------
344
  # Image display functions
@@ -547,6 +602,60 @@ hr { border: 0; border-top: 1px solid var(--border-color); margin: 2rem 0; }
547
  margin-bottom: 0.5rem !important;
548
  }
549
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
550
  """
551
 
552
  with gr.Blocks(css=CSS, fill_height=True, title="Interactive Blog β€” Transformers Feature Showcase") as demo:
 
1
+ # Standard library imports
 
2
  import re
 
 
 
3
  import subprocess
4
+ import threading
5
+ import time
6
  from pathlib import Path
7
 
8
+ # Third-party imports
9
  import gradio as gr
10
+ import numpy as np
11
  import pandas as pd
12
  import torch
13
  import spaces
14
+ from transformers import AutoModelForCausalLM
15
+ from transformers import modeling_utils as transformers_modeling
16
 
17
+ # Optional imports for markdown processing
18
+ try:
 
 
 
 
19
  from importlib import import_module
20
  from markdown_it import MarkdownIt
21
+ HAS_MARKDOWN_IT = True
22
+ except ImportError:
23
+ HAS_MARKDOWN_IT = False
24
 
25
+ try:
26
+ import markdown
27
+ HAS_PYTHON_MARKDOWN = True
28
+ except ImportError:
29
+ HAS_PYTHON_MARKDOWN = False
30
 
31
+ try:
32
+ from fastrtc import WebRTC, ReplyOnPause
33
+ HAS_FASTRTC = True
34
+ except ImportError:
35
+ HAS_FASTRTC = False
36
 
37
+ # ---------------------------
38
+ # Markdown rendering (Option A)
39
+ # ---------------------------
40
+
41
+ def _create_markdownit_renderer():
42
+ """Create markdown-it renderer with plugins if available."""
43
+ if not HAS_MARKDOWN_IT:
44
+ return None
45
+
46
  try:
47
+ markdown_parser = MarkdownIt("gfm-like")
48
+
49
+ # Version-agnostic plugin loading
50
+ footnote_module = import_module("mdit_py_plugins.footnote")
51
+ footnote_plugin = getattr(footnote_module, "footnote", None) or getattr(footnote_module, "footnote_plugin")
52
+ markdown_parser.use(footnote_plugin)
53
+
54
+ tasklist_module = import_module("mdit_py_plugins.tasklists")
55
+ tasklist_plugin = getattr(tasklist_module, "tasklists", None) or getattr(tasklist_module, "tasklists_plugin")
56
+ markdown_parser.use(tasklist_plugin)
57
+
58
+ container_module = import_module("mdit_py_plugins.container")
59
+ container_plugin = getattr(container_module, "container", None) or getattr(container_module, "container_plugin")
60
+ try:
61
+ markdown_parser.use(container_plugin, "details")
62
+ except TypeError:
63
+ markdown_parser.use(lambda m: container_plugin(m, name="details"))
64
+ return markdown_parser
65
+ except Exception:
66
+ return None
67
+
68
+ def _create_python_markdown_config():
69
+ """Create Python-Markdown configuration as fallback."""
70
+ if not HAS_PYTHON_MARKDOWN:
71
+ return None
72
+
73
+ extensions = [
74
  "extra", # tables + fenced code
75
  "footnotes",
76
  "admonition",
 
79
  "pymdownx.superfences",
80
  "pymdownx.tasklist",
81
  ]
82
+ extension_config = {
83
+ "pymdownx.tasklist": {"custom_checkbox": True},
84
+ "toc": {"permalink": True}
85
+ }
86
+ return ("python-markdown", extensions, extension_config, markdown)
87
+
88
+ # Initialize markdown engine
89
+ markdown_renderer = _create_markdownit_renderer()
90
+ if markdown_renderer:
91
+ markdown_engine = ("markdown-it", markdown_renderer)
92
+ else:
93
+ markdown_engine = _create_python_markdown_config()
94
+ if not markdown_engine:
95
+ raise ImportError("No markdown processor available")
96
 
97
  def _obsidian_rewrites(text: str) -> str:
98
  # 1) Obsidian image embeds: ![[img.png]] -> ![](file=content/img.png)
 
118
  return text
119
 
120
 
121
+ def markdown_to_html(text: str) -> str:
122
+ """Convert markdown text to HTML using the configured renderer."""
123
  text = _obsidian_rewrites(text)
124
+
125
+ if markdown_engine[0] == "markdown-it":
126
+ renderer = markdown_engine[1]
127
+ return renderer.render(text)
128
  else:
129
+ engine_type, extensions, extension_config, markdown_module = markdown_engine
130
+ return markdown_module.markdown(
131
+ text,
132
+ extensions=extensions,
133
+ extension_configs=extension_config,
134
+ output_format="html5"
135
+ )
 
 
 
 
 
 
 
136
 
137
+ def render_article(article_path: str, component_inserts: dict[str, callable]):
138
+ """Render article from markdown with embedded interactive components."""
139
+ if Path(article_path).exists():
140
+ raw_content = Path(article_path).read_text(encoding="utf-8")
 
141
  else:
142
+ raw_content = f"**Missing article**: `{article_path}` not found."
143
+
144
  # Split on {{TOKEN}} markers (e.g., {{ALLOC_PLOT}})
145
+ content_parts = re.split(r"\{\{([A-Z_]+)\}\}", raw_content)
146
+
147
  with gr.Column():
148
+ for index, part in enumerate(content_parts):
149
+ if index % 2 == 0:
150
+ # Render markdown content wrapped in article container
151
+ html_content = markdown_to_html(part)
152
+ gr.HTML(f'<div class="article">{html_content}</div>')
153
  else:
154
+ # Render interactive component or show error
155
+ component_builder = component_inserts.get(part)
156
+ if component_builder is None:
157
+ gr.HTML(f"<p><em>Unknown component: {part}</em></p>")
158
  else:
159
+ component_builder()
160
 
161
  # ---------------------------
162
  # Terminal (safe, simplified)
 
220
  # Transformers caching allocator warmup (time vs MiB plot)
221
  # -------------------------------------------------------
222
 
 
223
 
224
  def _measure_load_timeline(model_id: str, disable_warmup: bool):
225
  """Measure memory usage during model loading with/without cache warmup."""
226
+ original_warmup_func = getattr(transformers_modeling, "caching_allocator_warmup", None)
227
+ if disable_warmup and original_warmup_func is not None:
228
+ transformers_modeling.caching_allocator_warmup = lambda *args, **kwargs: None
229
 
230
  try:
231
  device = "cuda" if torch.cuda.is_available() else "cpu"
232
+ timeline_data = []
233
 
234
+ def sample_memory(start_time, stop_event):
235
+ while not stop_event.is_set():
236
  if device == "cuda":
237
  torch.cuda.synchronize()
238
  # Use max memory to capture peaks better
239
+ allocated_memory = torch.cuda.max_memory_allocated()
240
  torch.cuda.reset_peak_memory_stats()
241
  else:
242
+ allocated_memory = 0
243
+ timeline_data.append({
244
+ "t": time.perf_counter() - start_time,
245
+ "MiB": allocated_memory / (1024**2)
246
+ })
247
  time.sleep(0.02) # Sample more frequently
248
 
249
  if device == "cuda":
250
  torch.cuda.empty_cache()
251
  torch.cuda.reset_peak_memory_stats()
252
+ initial_memory = torch.cuda.memory_allocated()
253
  else:
254
+ initial_memory = 0
255
 
256
+ start_time = time.perf_counter()
257
+ stop_event = threading.Event()
258
+ memory_thread = threading.Thread(target=sample_memory, args=(start_time, stop_event), daemon=True)
259
+ memory_thread.start()
260
 
261
  # Load model with appropriate settings
262
+ model_kwargs = {"low_cpu_mem_usage": True}
263
  if device == "cuda":
264
+ model_kwargs.update({
265
  "torch_dtype": torch.float16,
266
  "device_map": "cuda:0"
267
  })
268
 
269
+ model = AutoModelForCausalLM.from_pretrained(model_id, **model_kwargs)
270
 
271
+ stop_event.set()
272
+ memory_thread.join()
273
 
274
  # Final memory measurement
275
  if device == "cuda":
276
  torch.cuda.synchronize()
277
+ final_memory = torch.cuda.memory_allocated()
278
+ timeline_data.append({
279
+ "t": time.perf_counter() - start_time,
280
+ "MiB": final_memory / (1024**2)
281
+ })
282
 
283
  # Clean up
284
  del model
 
286
  torch.cuda.empty_cache()
287
  torch.cuda.ipc_collect()
288
 
289
+ return timeline_data
290
  finally:
291
+ if original_warmup_func is not None:
292
+ transformers_modeling.caching_allocator_warmup = original_warmup_func
293
 
294
  @spaces.GPU(duration=240)
295
+ def profile_warmup_comparison(model_id: str):
296
+ """Profile memory usage with and without cache warmup."""
297
  if not torch.cuda.is_available():
298
  # Create dummy data for CPU demo
299
+ time_points = np.linspace(0, 5, 50)
300
+ base_memory = np.cumsum(np.random.exponential(50, 50))
301
+ warmup_enabled_data = [
302
+ {"t": t, "MiB": mem, "mode": "πŸš€ Warmup ON (Optimized)"}
303
+ for t, mem in zip(time_points, base_memory * 0.8)
304
+ ]
305
+ warmup_disabled_data = [
306
+ {"t": t, "MiB": mem, "mode": "πŸ“ˆ Warmup OFF (Standard)"}
307
+ for t, mem in zip(time_points, base_memory)
308
+ ]
309
+ return pd.DataFrame(warmup_enabled_data + warmup_disabled_data)
310
 
311
  try:
312
+ warmup_enabled_timeline = _measure_load_timeline(model_id, disable_warmup=False)
313
+ warmup_disabled_timeline = _measure_load_timeline(model_id, disable_warmup=True)
314
 
315
  # Create DataFrame with better labeling
316
+ all_data = []
317
+ all_data.extend([
318
+ {"t": entry["t"], "MiB": entry["MiB"], "mode": "πŸš€ Warmup ON (Optimized)"}
319
+ for entry in warmup_enabled_timeline
320
+ ])
321
+ all_data.extend([
322
+ {"t": entry["t"], "MiB": entry["MiB"], "mode": "πŸ“ˆ Warmup OFF (Standard)"}
323
+ for entry in warmup_disabled_timeline
324
+ ])
325
 
326
+ result_dataframe = pd.DataFrame(all_data)
327
 
328
+ # Calculate and log memory savings
329
+ if warmup_enabled_timeline and warmup_disabled_timeline:
330
+ peak_with_warmup = max(entry["MiB"] for entry in warmup_enabled_timeline)
331
+ peak_without_warmup = max(entry["MiB"] for entry in warmup_disabled_timeline)
332
+ if peak_without_warmup > 0:
333
+ savings_percent = ((peak_without_warmup - peak_with_warmup) / peak_without_warmup * 100)
334
+ print(f"Memory savings: {savings_percent:.1f}% (Peak: {peak_with_warmup:.0f} MiB vs {peak_without_warmup:.0f} MiB)")
335
 
336
+ return result_dataframe
337
+ except Exception as error:
338
+ print(f"Error profiling {model_id}: {error}")
 
339
  return pd.DataFrame(columns=["t", "MiB", "mode"])
340
 
341
  def build_alloc_plot():
 
368
  )
369
 
370
  gr.Markdown("**Note**: This demo requires GPU access. The warmup feature reduces peak memory usage during model loading.")
371
+ go.click(profile_warmup_comparison, inputs=[model], outputs=plot)
372
 
373
  # ---------------------------
374
  # Optional FastRTC preview
 
386
  if not HAS_FASTRTC:
387
  gr.Markdown("Install `fastrtc` to enable this section.")
388
  return
389
+
390
+ def echo_video_frame(frame):
391
+ yield frame
392
+
393
  with gr.Group():
394
  gr.Markdown("Camera loopback using FastRTC WebRTC. Extend with streaming handlers later.")
395
+ webrtc_component = WebRTC(mode="send-receive", modality="video")
396
+ webrtc_component.stream(ReplyOnPause(echo_video_frame), inputs=[webrtc_component], outputs=[webrtc_component], time_limit=60)
397
 
398
  # ---------------------------
399
  # Image display functions
 
602
  margin-bottom: 0.5rem !important;
603
  }
604
 
605
+ /* Fix contrast for all interactive components */
606
+ .gr-form, .gr-panel, .gr-block {
607
+ background: #ffffff !important;
608
+ border: 1px solid var(--border-color) !important;
609
+ border-radius: 8px !important;
610
+ }
611
+
612
+ /* Fix text inputs */
613
+ .gr-textbox input {
614
+ background: #ffffff !important;
615
+ color: #1f2937 !important;
616
+ border: 1px solid var(--border-color) !important;
617
+ font-weight: 500 !important;
618
+ }
619
+
620
+ /* Fix all labels */
621
+ .gr-form label, .gr-panel label, .gr-block label {
622
+ color: #374151 !important;
623
+ font-weight: 600 !important;
624
+ }
625
+
626
+ /* Fix info text */
627
+ .gr-form .gr-info, .gr-panel .gr-info {
628
+ color: #6b7280 !important;
629
+ font-weight: 500 !important;
630
+ }
631
+
632
+ /* Fix plot styling */
633
+ .gr-plot {
634
+ border: 1px solid var(--border-color) !important;
635
+ border-radius: 8px !important;
636
+ background: #ffffff !important;
637
+ }
638
+
639
+ /* Fix any remaining low contrast text */
640
+ .gradio-container * {
641
+ color: inherit !important;
642
+ }
643
+
644
+ /* Ensure all text in components has good contrast */
645
+ .gr-form *, .gr-panel *, .gr-block * {
646
+ color: #1f2937 !important;
647
+ }
648
+
649
+ /* Fix markdown in components */
650
+ .gr-markdown {
651
+ color: #1f2937 !important;
652
+ }
653
+
654
+ .gr-markdown h1, .gr-markdown h2, .gr-markdown h3, .gr-markdown h4 {
655
+ color: #111827 !important;
656
+ font-weight: 600 !important;
657
+ }
658
+
659
  """
660
 
661
  with gr.Blocks(css=CSS, fill_height=True, title="Interactive Blog β€” Transformers Feature Showcase") as demo: