Yulu1 commited on
Commit
abd0497
·
verified ·
1 Parent(s): 955fd11

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +605 -0
app.py ADDED
@@ -0,0 +1,605 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import re
3
+ import json
4
+ import time
5
+ import traceback
6
+ from pathlib import Path
7
+ from typing import Dict, Any, List, Optional, Tuple
8
+
9
+ import pandas as pd
10
+ import gradio as gr
11
+ import papermill as pm
12
+
13
+ # Optional LLM (HuggingFace Inference API)
14
+ try:
15
+ from huggingface_hub import InferenceClient
16
+ except Exception:
17
+ InferenceClient = None
18
+
19
+ # =========================================================
20
+ # CONFIG
21
+ # =========================================================
22
+
23
+ BASE_DIR = Path(__file__).resolve().parent
24
+
25
+ NB1 = os.environ.get("NB1", "pythonanalysis.ipynb").strip()
26
+ NB2 = os.environ.get("NB2", "ranalysis.ipynb").strip()
27
+
28
+ RUNS_DIR = BASE_DIR / "runs"
29
+ ART_DIR = BASE_DIR / "artifacts"
30
+ PY_FIG_DIR = ART_DIR / "py" / "figures"
31
+ PY_TAB_DIR = ART_DIR / "py" / "tables"
32
+ R_FIG_DIR = ART_DIR / "r" / "figures"
33
+ R_TAB_DIR = ART_DIR / "r" / "tables"
34
+
35
+ PAPERMILL_TIMEOUT = int(os.environ.get("PAPERMILL_TIMEOUT", "1800"))
36
+ MAX_PREVIEW_ROWS = int(os.environ.get("MAX_FILE_PREVIEW_ROWS", "50"))
37
+ MAX_LOG_CHARS = int(os.environ.get("MAX_LOG_CHARS", "8000"))
38
+
39
+ HF_API_KEY = os.environ.get("HF_API_KEY", "").strip()
40
+ MODEL_NAME = os.environ.get("MODEL_NAME", "deepseek-ai/DeepSeek-R1").strip()
41
+ HF_PROVIDER = os.environ.get("HF_PROVIDER", "novita").strip()
42
+
43
+ LLM_ENABLED = bool(HF_API_KEY) and InferenceClient is not None
44
+ llm_client = (
45
+ InferenceClient(provider=HF_PROVIDER, api_key=HF_API_KEY)
46
+ if LLM_ENABLED
47
+ else None
48
+ )
49
+
50
+ # =========================================================
51
+ # HELPERS
52
+ # =========================================================
53
+
54
+ def ensure_dirs():
55
+ for p in [RUNS_DIR, ART_DIR, PY_FIG_DIR, PY_TAB_DIR, R_FIG_DIR, R_TAB_DIR]:
56
+ p.mkdir(parents=True, exist_ok=True)
57
+
58
+ def stamp():
59
+ return time.strftime("%Y%m%d-%H%M%S")
60
+
61
+ def tail(text: str, n: int = MAX_LOG_CHARS) -> str:
62
+ return (text or "")[-n:]
63
+
64
+ def _ls(dir_path: Path, exts: Tuple[str, ...]) -> List[str]:
65
+ if not dir_path.is_dir():
66
+ return []
67
+ return sorted(p.name for p in dir_path.iterdir() if p.is_file() and p.suffix.lower() in exts)
68
+
69
+ def _read_csv(path: Path) -> pd.DataFrame:
70
+ return pd.read_csv(path, nrows=MAX_PREVIEW_ROWS)
71
+
72
+ def _read_json(path: Path):
73
+ with path.open(encoding="utf-8") as f:
74
+ return json.load(f)
75
+
76
+ def artifacts_index() -> Dict[str, Any]:
77
+ return {
78
+ "python": {
79
+ "figures": _ls(PY_FIG_DIR, (".png", ".jpg", ".jpeg")),
80
+ "tables": _ls(PY_TAB_DIR, (".csv", ".json")),
81
+ },
82
+ "r": {
83
+ "figures": _ls(R_FIG_DIR, (".png", ".jpg", ".jpeg")),
84
+ "tables": _ls(R_TAB_DIR, (".csv", ".json")),
85
+ },
86
+ }
87
+
88
+ # =========================================================
89
+ # PIPELINE RUNNERS
90
+ # =========================================================
91
+
92
+ def run_notebook(nb_name: str) -> str:
93
+ ensure_dirs()
94
+ nb_in = BASE_DIR / nb_name
95
+ if not nb_in.exists():
96
+ return f"ERROR: {nb_name} not found."
97
+ nb_out = RUNS_DIR / f"run_{stamp()}_{nb_name}"
98
+ pm.execute_notebook(
99
+ input_path=str(nb_in),
100
+ output_path=str(nb_out),
101
+ cwd=str(BASE_DIR),
102
+ log_output=True,
103
+ progress_bar=False,
104
+ request_save_on_cell_execute=True,
105
+ execution_timeout=PAPERMILL_TIMEOUT,
106
+ )
107
+ return f"Executed {nb_name}"
108
+
109
+
110
+ def run_pythonanalysis() -> str:
111
+ try:
112
+ log = run_notebook(NB1)
113
+ idx = artifacts_index()
114
+ figs = idx["python"]["figures"]
115
+ tabs = idx["python"]["tables"]
116
+ return (
117
+ f"OK {log}\n\n"
118
+ f"Figures: {', '.join(figs) or '(none)'}\n"
119
+ f"Tables: {', '.join(tabs) or '(none)'}"
120
+ )
121
+ except Exception as e:
122
+ return f"FAILED {e}\n\n{traceback.format_exc()[-2000:]}"
123
+
124
+ def run_ranalysis() -> str:
125
+ try:
126
+ log = run_notebook(NB2)
127
+ idx = artifacts_index()
128
+ figs = idx["r"]["figures"]
129
+ tabs = idx["r"]["tables"]
130
+ return (
131
+ f"OK {log}\n\n"
132
+ f"Figures: {', '.join(figs) or '(none)'}\n"
133
+ f"Tables: {', '.join(tabs) or '(none)'}"
134
+ )
135
+ except Exception as e:
136
+ return f"FAILED {e}\n\n{traceback.format_exc()[-2000:]}"
137
+
138
+
139
+ def run_full_pipeline() -> str:
140
+ logs = []
141
+ logs.append("=" * 50)
142
+ logs.append("STEP 1/2: Python Analysis")
143
+ logs.append("=" * 50)
144
+ logs.append(run_pythonanalysis())
145
+ logs.append("")
146
+ logs.append("=" * 50)
147
+ logs.append("STEP 2/2: R Analysis")
148
+ logs.append("=" * 50)
149
+ logs.append(run_ranalysis())
150
+ return "\n".join(logs)
151
+
152
+
153
+ # =========================================================
154
+ # GALLERY LOADERS
155
+ # =========================================================
156
+
157
+ def _load_all_figures() -> List[Tuple[str, str]]:
158
+ """Return list of (filepath, caption) for Gallery."""
159
+ items = []
160
+ for p in sorted(PY_FIG_DIR.glob("*.png")):
161
+ items.append((str(p), f"Python | {p.stem.replace('_', ' ').title()}"))
162
+ for p in sorted(R_FIG_DIR.glob("*.png")):
163
+ items.append((str(p), f"R | {p.stem.replace('_', ' ').title()}"))
164
+ return items
165
+
166
+
167
+ def _load_table_safe(path: Path) -> pd.DataFrame:
168
+ try:
169
+ if path.suffix == ".json":
170
+ obj = _read_json(path)
171
+ if isinstance(obj, dict):
172
+ return pd.DataFrame([obj])
173
+ return pd.DataFrame(obj)
174
+ return _read_csv(path)
175
+ except Exception as e:
176
+ return pd.DataFrame([{"error": str(e)}])
177
+
178
+
179
+ def refresh_gallery():
180
+ """Called when user clicks Refresh on Gallery tab."""
181
+ figures = _load_all_figures()
182
+ idx = artifacts_index()
183
+
184
+ # Build table choices
185
+ table_choices = []
186
+ for scope in ("python", "r"):
187
+ for name in idx[scope]["tables"]:
188
+ table_choices.append(f"{scope}/{name}")
189
+
190
+ # Default: show first table if available
191
+ default_df = pd.DataFrame()
192
+ if table_choices:
193
+ parts = table_choices[0].split("/", 1)
194
+ base = PY_TAB_DIR if parts[0] == "python" else R_TAB_DIR
195
+ default_df = _load_table_safe(base / parts[1])
196
+
197
+ return (
198
+ figures if figures else [],
199
+ gr.update(choices=table_choices, value=table_choices[0] if table_choices else None),
200
+ default_df,
201
+ )
202
+
203
+
204
+ def on_table_select(choice: str):
205
+ if not choice or "/" not in choice:
206
+ return pd.DataFrame([{"hint": "Select a table above."}])
207
+ scope, name = choice.split("/", 1)
208
+ base = {"python": PY_TAB_DIR, "r": R_TAB_DIR}.get(scope)
209
+ if not base:
210
+ return pd.DataFrame([{"error": f"Unknown scope: {scope}"}])
211
+ path = base / name
212
+ if not path.exists():
213
+ return pd.DataFrame([{"error": f"File not found: {path}"}])
214
+ return _load_table_safe(path)
215
+
216
+
217
+ # =========================================================
218
+ # KPI LOADER
219
+ # =========================================================
220
+
221
+ def load_kpis() -> Dict[str, Any]:
222
+ for candidate in [PY_TAB_DIR / "kpis.json", PY_FIG_DIR / "kpis.json"]:
223
+ if candidate.exists():
224
+ try:
225
+ return _read_json(candidate)
226
+ except Exception:
227
+ pass
228
+ return {}
229
+
230
+
231
+ # =========================================================
232
+ # AI DASHBOARD (Tab 3) -- LLM picks what to display
233
+ # =========================================================
234
+
235
+ DASHBOARD_SYSTEM = """You are an AI dashboard assistant for a book-sales analytics app.
236
+ The user asks questions or requests about their data. You have access to pre-computed
237
+ artifacts from Python and R analysis pipelines.
238
+
239
+ AVAILABLE ARTIFACTS (only reference ones that exist):
240
+ {artifacts_json}
241
+
242
+ KPI SUMMARY: {kpis_json}
243
+
244
+ YOUR JOB:
245
+ 1. Answer the user's question conversationally using the KPIs and your knowledge of the artifacts.
246
+ 2. At the END of your response, output a JSON block (fenced with ```json ... ```) that tells
247
+ the dashboard which artifact to display. The JSON must have this shape:
248
+ {{"show": "figure"|"table"|"none", "scope": "python"|"r", "filename": "..."}}
249
+
250
+ - Use "show": "figure" to display a chart image.
251
+ - Use "show": "table" to display a CSV/JSON table.
252
+ - Use "show": "none" if no artifact is relevant.
253
+
254
+ RULES:
255
+ - If the user asks about sales trends or forecasting by title, show sales_trends or arima figures.
256
+ - If the user asks about sentiment, show sentiment figure or sentiment_counts table.
257
+ - If the user asks about R regression, the R notebook focuses on forecasting, show accuracy_table.csv.
258
+ - If the user asks about forecast accuracy or model comparison, show accuracy_table.csv or forecast_compare.png.
259
+ - If the user asks about top sellers, show top_titles_by_units_sold.csv.
260
+ - If the user asks a general data question, pick the most relevant artifact.
261
+ - Keep your answer concise (2-4 sentences), then the JSON block.
262
+ """
263
+
264
+ JSON_BLOCK_RE = re.compile(r"```json\s*(\{.*?\})\s*```", re.DOTALL)
265
+ FALLBACK_JSON_RE = re.compile(r"\{[^{}]*\"show\"[^{}]*\}", re.DOTALL)
266
+
267
+
268
+ def _parse_display_directive(text: str) -> Dict[str, str]:
269
+ m = JSON_BLOCK_RE.search(text)
270
+ if m:
271
+ try:
272
+ return json.loads(m.group(1))
273
+ except json.JSONDecodeError:
274
+ pass
275
+ m = FALLBACK_JSON_RE.search(text)
276
+ if m:
277
+ try:
278
+ return json.loads(m.group(0))
279
+ except json.JSONDecodeError:
280
+ pass
281
+ return {"show": "none"}
282
+
283
+
284
+ def _clean_response(text: str) -> str:
285
+ """Strip the JSON directive block from the displayed response."""
286
+ return JSON_BLOCK_RE.sub("", text).strip()
287
+
288
+
289
+ def ai_chat(user_msg: str, history: list):
290
+ """Chat function for the AI Dashboard tab."""
291
+ if not user_msg or not user_msg.strip():
292
+ return history, "", None, None
293
+
294
+ idx = artifacts_index()
295
+ kpis = load_kpis()
296
+
297
+ if not LLM_ENABLED:
298
+ reply, directive = _keyword_fallback(user_msg, idx, kpis)
299
+ else:
300
+ system = DASHBOARD_SYSTEM.format(
301
+ artifacts_json=json.dumps(idx, indent=2),
302
+ kpis_json=json.dumps(kpis, indent=2) if kpis else "(no KPIs yet, run the pipeline first)",
303
+ )
304
+ msgs = [{"role": "system", "content": system}]
305
+ for entry in (history or [])[-6:]:
306
+ msgs.append(entry)
307
+ msgs.append({"role": "user", "content": user_msg})
308
+
309
+ try:
310
+ r = llm_client.chat_completion(
311
+ model=MODEL_NAME,
312
+ messages=msgs,
313
+ temperature=0.3,
314
+ max_tokens=600,
315
+ stream=False,
316
+ )
317
+ raw = (
318
+ r["choices"][0]["message"]["content"]
319
+ if isinstance(r, dict)
320
+ else r.choices[0].message.content
321
+ )
322
+ directive = _parse_display_directive(raw)
323
+ reply = _clean_response(raw)
324
+ except Exception as e:
325
+ reply = f"LLM error: {e}. Falling back to keyword matching."
326
+ reply_fb, directive = _keyword_fallback(user_msg, idx, kpis)
327
+ reply += "\n\n" + reply_fb
328
+
329
+ # Resolve artifact paths
330
+ fig_out = None
331
+ tab_out = None
332
+ show = directive.get("show", "none")
333
+ scope = directive.get("scope", "")
334
+ fname = directive.get("filename", "")
335
+
336
+ if show == "figure" and scope and fname:
337
+ base = {"python": PY_FIG_DIR, "r": R_FIG_DIR}.get(scope)
338
+ if base and (base / fname).exists():
339
+ fig_out = str(base / fname)
340
+ else:
341
+ reply += f"\n\n*(Could not find figure: {scope}/{fname})*"
342
+
343
+ if show == "table" and scope and fname:
344
+ base = {"python": PY_TAB_DIR, "r": R_TAB_DIR}.get(scope)
345
+ if base and (base / fname).exists():
346
+ tab_out = _load_table_safe(base / fname)
347
+ else:
348
+ reply += f"\n\n*(Could not find table: {scope}/{fname})*"
349
+
350
+ new_history = (history or []) + [
351
+ {"role": "user", "content": user_msg},
352
+ {"role": "assistant", "content": reply},
353
+ ]
354
+
355
+ return new_history, "", fig_out, tab_out
356
+
357
+
358
+ def _keyword_fallback(msg: str, idx: Dict, kpis: Dict) -> Tuple[str, Dict]:
359
+ """Simple keyword matcher when LLM is unavailable."""
360
+ msg_lower = msg.lower()
361
+
362
+ if not any(idx[s]["figures"] or idx[s]["tables"] for s in ("python", "r")):
363
+ return (
364
+ "No artifacts found yet. Please run the pipeline first (Tab 1), "
365
+ "then come back here to explore the results.",
366
+ {"show": "none"},
367
+ )
368
+
369
+ kpi_text = ""
370
+ if kpis:
371
+ total = kpis.get("total_units_sold", 0)
372
+ kpi_text = (
373
+ f"Quick summary: **{kpis.get('n_titles', '?')}** book titles across "
374
+ f"**{kpis.get('n_months', '?')}** months, with **{total:,.0f}** total units sold."
375
+ )
376
+
377
+ if any(w in msg_lower for w in ["trend", "sales trend", "monthly sale"]):
378
+ return (
379
+ f"Here are the sales trends for sampled titles. {kpi_text}",
380
+ {"show": "figure", "scope": "python", "filename": "sales_trends_sampled_titles.png"},
381
+ )
382
+
383
+ if any(w in msg_lower for w in ["sentiment", "review", "positive", "negative"]):
384
+ return (
385
+ f"Here is the sentiment distribution across sampled book titles. {kpi_text}",
386
+ {"show": "figure", "scope": "python", "filename": "sentiment_distribution_sampled_titles.png"},
387
+ )
388
+
389
+ if any(w in msg_lower for w in ["arima", "forecast", "predict"]):
390
+ if "compar" in msg_lower or "ets" in msg_lower or "accuracy" in msg_lower:
391
+ if "forecast_compare.png" in idx.get("r", {}).get("figures", []):
392
+ return (
393
+ "Here is the ARIMA+Fourier vs ETS forecast comparison from the R analysis.",
394
+ {"show": "figure", "scope": "r", "filename": "forecast_compare.png"},
395
+ )
396
+ return (
397
+ f"Here are the ARIMA forecasts for sampled titles from the Python analysis. {kpi_text}",
398
+ {"show": "figure", "scope": "python", "filename": "arima_forecasts_sampled_titles.png"},
399
+ )
400
+
401
+ if any(w in msg_lower for w in ["regression", "lm", "coefficient", "price effect", "rating effect"]):
402
+ return (
403
+ "The R notebook focuses on forecasting rather than regression. "
404
+ "Here is the forecast accuracy comparison instead.",
405
+ {"show": "table", "scope": "r", "filename": "accuracy_table.csv"},
406
+ )
407
+
408
+ if any(w in msg_lower for w in ["top", "best sell", "popular", "rank"]):
409
+ return (
410
+ f"Here are the top-selling titles by units sold. {kpi_text}",
411
+ {"show": "table", "scope": "python", "filename": "top_titles_by_units_sold.csv"},
412
+ )
413
+
414
+ if any(w in msg_lower for w in ["accuracy", "benchmark", "rmse", "mape"]):
415
+ return (
416
+ "Here is the forecast accuracy comparison (ARIMA+Fourier vs ETS) from the R analysis.",
417
+ {"show": "table", "scope": "r", "filename": "accuracy_table.csv"},
418
+ )
419
+
420
+ if any(w in msg_lower for w in ["r analysis", "r output", "r result"]):
421
+ if "forecast_compare.png" in idx.get("r", {}).get("figures", []):
422
+ return (
423
+ "Here is the main R output: forecast model comparison plot.",
424
+ {"show": "figure", "scope": "r", "filename": "forecast_compare.png"},
425
+ )
426
+
427
+ if any(w in msg_lower for w in ["dashboard", "overview", "summary", "kpi"]):
428
+ return (
429
+ f"Dashboard overview: {kpi_text}\n\nAsk me about sales trends, sentiment, forecasts, "
430
+ "forecast accuracy, or top sellers to see specific visualizations.",
431
+ {"show": "table", "scope": "python", "filename": "df_dashboard.csv"},
432
+ )
433
+
434
+ # Default
435
+ return (
436
+ f"I can show you various analyses. {kpi_text}\n\n"
437
+ "Try asking about: **sales trends**, **sentiment**, **ARIMA forecasts**, "
438
+ "**forecast accuracy**, **top sellers**, or **dashboard overview**.",
439
+ {"show": "none"},
440
+ )
441
+
442
+
443
+ # =========================================================
444
+ # UI
445
+ # =========================================================
446
+
447
+ ensure_dirs()
448
+
449
+ def load_css() -> str:
450
+ css_path = BASE_DIR / "style.css"
451
+ return css_path.read_text(encoding="utf-8") if css_path.exists() else ""
452
+
453
+
454
+ with gr.Blocks(title="RX12 Workshop App") as demo:
455
+
456
+ gr.Markdown(
457
+ "# RX12 - Intro to Python and R - Workshop App\n"
458
+ "*The app to integrate the three notebooks in to get a functioning blueprint of the group project's final product*",
459
+ elem_id="escp_title",
460
+ )
461
+
462
+ # ===========================================================
463
+ # TAB 1 -- Pipeline Runner
464
+ # ===========================================================
465
+ with gr.Tab("Pipeline Runner"):
466
+ gr.Markdown(
467
+ )
468
+
469
+ with gr.Row():
470
+ with gr.Column(scale=1):
471
+ btn_nb1 = gr.Button(
472
+ "Step 1: Python Analysis",
473
+ variant="secondary",
474
+ )
475
+ gr.Markdown(
476
+ )
477
+ with gr.Column(scale=1):
478
+ btn_nb2 = gr.Button(
479
+ "Step 2: R Analysis",
480
+ variant="secondary",
481
+ )
482
+ gr.Markdown(
483
+ )
484
+
485
+ with gr.Row():
486
+ btn_all = gr.Button(
487
+ "Run All 2 Steps",
488
+ variant="primary",
489
+ )
490
+
491
+ run_log = gr.Textbox(
492
+ label="Execution Log",
493
+ lines=18,
494
+ max_lines=30,
495
+ interactive=False,
496
+ )
497
+
498
+ btn_nb1.click(run_pythonanalysis, outputs=[run_log])
499
+ btn_nb2.click(run_ranalysis, outputs=[run_log])
500
+ btn_all.click(run_full_pipeline, outputs=[run_log])
501
+
502
+ # ===========================================================
503
+ # TAB 2 -- Results Gallery
504
+ # ===========================================================
505
+ with gr.Tab("Results Gallery"):
506
+ gr.Markdown(
507
+ "### All generated artifacts\n\n"
508
+ "After running the pipeline, click **Refresh** to load all figures and tables. "
509
+ "Figures are shown in the gallery; select a table from the dropdown to inspect it."
510
+ )
511
+
512
+ refresh_btn = gr.Button("Refresh Gallery", variant="primary")
513
+
514
+ gr.Markdown("#### Figures")
515
+ gallery = gr.Gallery(
516
+ label="All Figures (Python + R)",
517
+ columns=2,
518
+ height=480,
519
+ object_fit="contain",
520
+ )
521
+
522
+ gr.Markdown("#### Tables")
523
+ table_dropdown = gr.Dropdown(
524
+ label="Select a table to view",
525
+ choices=[],
526
+ interactive=True,
527
+ )
528
+ table_display = gr.Dataframe(
529
+ label="Table Preview",
530
+ interactive=False,
531
+ )
532
+
533
+ refresh_btn.click(
534
+ refresh_gallery,
535
+ outputs=[gallery, table_dropdown, table_display],
536
+ )
537
+ table_dropdown.change(
538
+ on_table_select,
539
+ inputs=[table_dropdown],
540
+ outputs=[table_display],
541
+ )
542
+
543
+ # ===========================================================
544
+ # TAB 3 -- AI Dashboard
545
+ # ===========================================================
546
+ with gr.Tab('"AI" Dashboard'):
547
+ gr.Markdown(
548
+ "### Ask questions, get visualisations\n\n"
549
+ "Describe what you want to see and the AI will pick the right chart or table. "
550
+ + (
551
+ "*LLM is active.*"
552
+ if LLM_ENABLED
553
+ else "*No API key detected \u2014 using keyword matching. "
554
+ "Set `HF_API_KEY` in Space secrets for full LLM support.*"
555
+ )
556
+ )
557
+
558
+ with gr.Row(equal_height=True):
559
+ with gr.Column(scale=1):
560
+ chatbot = gr.Chatbot(
561
+ label="Conversation",
562
+ height=380,
563
+ )
564
+ user_input = gr.Textbox(
565
+ label="Ask about your data",
566
+ placeholder="e.g. Show me sales trends / What drives revenue? / Compare forecast models",
567
+ lines=1,
568
+ )
569
+ gr.Examples(
570
+ examples=[
571
+ "Show me the sales trends",
572
+ "What does the sentiment look like?",
573
+ "Which titles sell the most?",
574
+ "Show the forecast accuracy comparison",
575
+ "Compare the ARIMA and ETS forecasts",
576
+ "Give me a dashboard overview",
577
+ ],
578
+ inputs=user_input,
579
+ )
580
+
581
+ with gr.Column(scale=1):
582
+ ai_figure = gr.Image(
583
+ label="Visualisation",
584
+ height=350,
585
+ )
586
+ ai_table = gr.Dataframe(
587
+ label="Data Table",
588
+ interactive=False,
589
+ )
590
+
591
+ user_input.submit(
592
+ ai_chat,
593
+ inputs=[user_input, chatbot],
594
+ outputs=[chatbot, user_input, ai_figure, ai_table],
595
+ )
596
+
597
+
598
+ PORT = int(os.environ.get("PORT", os.environ.get("GRADIO_SERVER_PORT", "7860")))
599
+
600
+ demo.launch(
601
+ server_name="0.0.0.0",
602
+ server_port=PORT,
603
+ css=load_css(),
604
+ allowed_paths=[str(BASE_DIR)],
605
+ )