Yulu1 commited on
Commit
0019fbb
·
verified ·
1 Parent(s): b8b7cf0

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +608 -0
app.py ADDED
@@ -0,0 +1,608 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import re
3
+ import json
4
+ import time
5
+ import traceback
6
+ from pathlib import Path
7
+ from typing import Dict, Any, List, Optional, Tuple
8
+
9
+ import pandas as pd
10
+ import gradio as gr
11
+ import papermill as pm
12
+
13
+ # Optional LLM (HuggingFace Inference API)
14
+ try:
15
+ from huggingface_hub import InferenceClient
16
+ except Exception:
17
+ InferenceClient = None
18
+
19
+ # =========================================================
20
+ # CONFIG
21
+ # =========================================================
22
+
23
+ BASE_DIR = Path(__file__).resolve().parent
24
+
25
+ NB1 = os.environ.get("NB1", "pythonanalysis.ipynb").strip()
26
+ NB2 = os.environ.get("NB2", "ranalysis.ipynb").strip()
27
+
28
+ RUNS_DIR = BASE_DIR / "runs"
29
+ ART_DIR = BASE_DIR / "artifacts"
30
+ PY_FIG_DIR = ART_DIR / "py" / "figures"
31
+ PY_TAB_DIR = ART_DIR / "py" / "tables"
32
+ R_FIG_DIR = ART_DIR / "r" / "figures"
33
+ R_TAB_DIR = ART_DIR / "r" / "tables"
34
+
35
+ PAPERMILL_TIMEOUT = int(os.environ.get("PAPERMILL_TIMEOUT", "1800"))
36
+ MAX_PREVIEW_ROWS = int(os.environ.get("MAX_FILE_PREVIEW_ROWS", "50"))
37
+ MAX_LOG_CHARS = int(os.environ.get("MAX_LOG_CHARS", "8000"))
38
+
39
+ HF_API_KEY = os.environ.get("HF_API_KEY", "").strip()
40
+ MODEL_NAME = os.environ.get("MODEL_NAME", "deepseek-ai/DeepSeek-R1").strip()
41
+ HF_PROVIDER = os.environ.get("HF_PROVIDER", "novita").strip()
42
+
43
+ LLM_ENABLED = bool(HF_API_KEY) and InferenceClient is not None
44
+ llm_client = (
45
+ InferenceClient(provider=HF_PROVIDER, api_key=HF_API_KEY)
46
+ if LLM_ENABLED
47
+ else None
48
+ )
49
+
50
+ # =========================================================
51
+ # HELPERS
52
+ # =========================================================
53
+
54
+ def ensure_dirs():
55
+ for p in [RUNS_DIR, ART_DIR, PY_FIG_DIR, PY_TAB_DIR, R_FIG_DIR, R_TAB_DIR]:
56
+ p.mkdir(parents=True, exist_ok=True)
57
+
58
+ def stamp():
59
+ return time.strftime("%Y%m%d-%H%M%S")
60
+
61
+ def tail(text: str, n: int = MAX_LOG_CHARS) -> str:
62
+ return (text or "")[-n:]
63
+
64
+ def _ls(dir_path: Path, exts: Tuple[str, ...]) -> List[str]:
65
+ if not dir_path.is_dir():
66
+ return []
67
+ return sorted(p.name for p in dir_path.iterdir() if p.is_file() and p.suffix.lower() in exts)
68
+
69
+ def _read_csv(path: Path) -> pd.DataFrame:
70
+ return pd.read_csv(path, nrows=MAX_PREVIEW_ROWS)
71
+
72
+ def _read_json(path: Path):
73
+ with path.open(encoding="utf-8") as f:
74
+ return json.load(f)
75
+
76
+ def artifacts_index() -> Dict[str, Any]:
77
+ return {
78
+ "python": {
79
+ "figures": _ls(PY_FIG_DIR, (".png", ".jpg", ".jpeg")),
80
+ "tables": _ls(PY_TAB_DIR, (".csv", ".json")),
81
+ },
82
+ "r": {
83
+ "figures": _ls(R_FIG_DIR, (".png", ".jpg", ".jpeg")),
84
+ "tables": _ls(R_TAB_DIR, (".csv", ".json")),
85
+ },
86
+ }
87
+
88
+ # =========================================================
89
+ # PIPELINE RUNNERS
90
+ # =========================================================
91
+
92
+ def run_notebook(nb_name: str) -> str:
93
+ ensure_dirs()
94
+ nb_in = BASE_DIR / nb_name
95
+ if not nb_in.exists():
96
+ return f"ERROR: {nb_name} not found."
97
+
98
+ nb_out = RUNS_DIR / f"run_{stamp()}_{nb_name}"
99
+
100
+ pm.execute_notebook(
101
+ input_path=str(nb_in),
102
+ output_path=str(nb_out),
103
+ cwd=str(BASE_DIR),
104
+ log_output=True,
105
+ progress_bar=False,
106
+ request_save_on_cell_execute=True,
107
+ execution_timeout=PAPERMILL_TIMEOUT,
108
+ )
109
+
110
+ return f"Executed {nb_name}"
111
+
112
+
113
+ def run_pythonanalysis() -> str:
114
+ try:
115
+ log = run_notebook(NB1)
116
+ idx = artifacts_index()
117
+ figs = idx["python"]["figures"]
118
+ tabs = idx["python"]["tables"]
119
+ return (
120
+ f"OK {log}\n\n"
121
+ f"Figures: {', '.join(figs) or '(none)'}\n"
122
+ f"Tables: {', '.join(tabs) or '(none)'}"
123
+ )
124
+ except Exception as e:
125
+ return f"FAILED {e}\n\n{traceback.format_exc()[-2000:]}"
126
+
127
+ def run_ranalysis() -> str:
128
+ try:
129
+ log = run_notebook(NB2)
130
+ idx = artifacts_index()
131
+ figs = idx["r"]["figures"]
132
+ tabs = idx["r"]["tables"]
133
+ return (
134
+ f"OK {log}\n\n"
135
+ f"Figures: {', '.join(figs) or '(none)'}\n"
136
+ f"Tables: {', '.join(tabs) or '(none)'}"
137
+ )
138
+ except Exception as e:
139
+ return f"FAILED {e}\n\n{traceback.format_exc()[-2000:]}"
140
+
141
+
142
+ def run_full_pipeline() -> str:
143
+ logs = []
144
+ logs.append("=" * 50)
145
+ logs.append("STEP 1/2: Python Analysis")
146
+ logs.append("=" * 50)
147
+ logs.append(run_pythonanalysis())
148
+ logs.append("")
149
+ logs.append("=" * 50)
150
+ logs.append("STEP 2/2: R Analysis")
151
+ logs.append("=" * 50)
152
+ logs.append(run_ranalysis())
153
+ return "\n".join(logs)
154
+
155
+
156
+ # =========================================================
157
+ # GALLERY LOADERS
158
+ # =========================================================
159
+
160
+ def _load_all_figures() -> List[Tuple[str, str]]:
161
+ """Return list of (filepath, caption) for Gallery."""
162
+ items = []
163
+ for p in sorted(PY_FIG_DIR.glob("*.png")):
164
+ items.append((str(p), f"Python | {p.stem.replace('_', ' ').title()}"))
165
+ for p in sorted(R_FIG_DIR.glob("*.png")):
166
+ items.append((str(p), f"R | {p.stem.replace('_', ' ').title()}"))
167
+ return items
168
+
169
+
170
+ def _load_table_safe(path: Path) -> pd.DataFrame:
171
+ try:
172
+ if path.suffix == ".json":
173
+ obj = _read_json(path)
174
+ if isinstance(obj, dict):
175
+ return pd.DataFrame([obj])
176
+ return pd.DataFrame(obj)
177
+ return _read_csv(path)
178
+ except Exception as e:
179
+ return pd.DataFrame([{"error": str(e)}])
180
+
181
+
182
+ def refresh_gallery():
183
+ """Called when user clicks Refresh on Gallery tab."""
184
+ figures = _load_all_figures()
185
+ idx = artifacts_index()
186
+
187
+ # Build table choices
188
+ table_choices = []
189
+ for scope in ("python", "r"):
190
+ for name in idx[scope]["tables"]:
191
+ table_choices.append(f"{scope}/{name}")
192
+
193
+ # Default: show first table if available
194
+ default_df = pd.DataFrame()
195
+ if table_choices:
196
+ parts = table_choices[0].split("/", 1)
197
+ base = PY_TAB_DIR if parts[0] == "python" else R_TAB_DIR
198
+ default_df = _load_table_safe(base / parts[1])
199
+
200
+ return (
201
+ figures if figures else [],
202
+ gr.update(choices=table_choices, value=table_choices[0] if table_choices else None),
203
+ default_df,
204
+ )
205
+
206
+
207
+ def on_table_select(choice: str):
208
+ if not choice or "/" not in choice:
209
+ return pd.DataFrame([{"hint": "Select a table above."}])
210
+ scope, name = choice.split("/", 1)
211
+ base = {"python": PY_TAB_DIR, "r": R_TAB_DIR}.get(scope)
212
+ if not base:
213
+ return pd.DataFrame([{"error": f"Unknown scope: {scope}"}])
214
+ path = base / name
215
+ if not path.exists():
216
+ return pd.DataFrame([{"error": f"File not found: {path}"}])
217
+ return _load_table_safe(path)
218
+
219
+
220
+ # =========================================================
221
+ # KPI LOADER
222
+ # =========================================================
223
+
224
+ def load_kpis() -> Dict[str, Any]:
225
+ for candidate in [PY_TAB_DIR / "kpis.json", PY_FIG_DIR / "kpis.json"]:
226
+ if candidate.exists():
227
+ try:
228
+ return _read_json(candidate)
229
+ except Exception:
230
+ pass
231
+ return {}
232
+
233
+
234
+ # =========================================================
235
+ # AI DASHBOARD (Tab 3) -- LLM picks what to display
236
+ # =========================================================
237
+
238
+ DASHBOARD_SYSTEM = """You are an AI dashboard assistant for a book-sales analytics app.
239
+ The user asks questions or requests about their data. You have access to pre-computed
240
+ artifacts from Python and R analysis pipelines.
241
+
242
+ AVAILABLE ARTIFACTS (only reference ones that exist):
243
+ {artifacts_json}
244
+
245
+ KPI SUMMARY: {kpis_json}
246
+
247
+ YOUR JOB:
248
+ 1. Answer the user's question conversationally using the KPIs and your knowledge of the artifacts.
249
+ 2. At the END of your response, output a JSON block (fenced with ```json ... ```) that tells
250
+ the dashboard which artifact to display. The JSON must have this shape:
251
+ {{"show": "figure"|"table"|"none", "scope": "python"|"r", "filename": "..."}}
252
+
253
+ - Use "show": "figure" to display a chart image.
254
+ - Use "show": "table" to display a CSV/JSON table.
255
+ - Use "show": "none" if no artifact is relevant.
256
+
257
+ RULES:
258
+ - If the user asks about sales trends or forecasting by title, show sales_trends or arima figures.
259
+ - If the user asks about sentiment, show sentiment figure or sentiment_counts table.
260
+ - If the user asks about R regression, the R notebook focuses on forecasting, show accuracy_table.csv.
261
+ - If the user asks about forecast accuracy or model comparison, show accuracy_table.csv or forecast_compare.png.
262
+ - If the user asks about top sellers, show top_titles_by_units_sold.csv.
263
+ - If the user asks a general data question, pick the most relevant artifact.
264
+ - Keep your answer concise (2-4 sentences), then the JSON block.
265
+ """
266
+
267
+ JSON_BLOCK_RE = re.compile(r"```json\s*(\{.*?\})\s*```", re.DOTALL)
268
+ FALLBACK_JSON_RE = re.compile(r"\{[^{}]*\"show\"[^{}]*\}", re.DOTALL)
269
+
270
+
271
+ def _parse_display_directive(text: str) -> Dict[str, str]:
272
+ m = JSON_BLOCK_RE.search(text)
273
+ if m:
274
+ try:
275
+ return json.loads(m.group(1))
276
+ except json.JSONDecodeError:
277
+ pass
278
+ m = FALLBACK_JSON_RE.search(text)
279
+ if m:
280
+ try:
281
+ return json.loads(m.group(0))
282
+ except json.JSONDecodeError:
283
+ pass
284
+ return {"show": "none"}
285
+
286
+
287
+ def _clean_response(text: str) -> str:
288
+ """Strip the JSON directive block from the displayed response."""
289
+ return JSON_BLOCK_RE.sub("", text).strip()
290
+
291
+
292
+ def ai_chat(user_msg: str, history: list):
293
+ """Chat function for the AI Dashboard tab."""
294
+ if not user_msg or not user_msg.strip():
295
+ return history, "", None, None
296
+
297
+ idx = artifacts_index()
298
+ kpis = load_kpis()
299
+
300
+ if not LLM_ENABLED:
301
+ reply, directive = _keyword_fallback(user_msg, idx, kpis)
302
+ else:
303
+ system = DASHBOARD_SYSTEM.format(
304
+ artifacts_json=json.dumps(idx, indent=2),
305
+ kpis_json=json.dumps(kpis, indent=2) if kpis else "(no KPIs yet, run the pipeline first)",
306
+ )
307
+ msgs = [{"role": "system", "content": system}]
308
+ for entry in (history or [])[-6:]:
309
+ msgs.append(entry)
310
+ msgs.append({"role": "user", "content": user_msg})
311
+
312
+ try:
313
+ r = llm_client.chat_completion(
314
+ model=MODEL_NAME,
315
+ messages=msgs,
316
+ temperature=0.3,
317
+ max_tokens=600,
318
+ stream=False,
319
+ )
320
+ raw = (
321
+ r["choices"][0]["message"]["content"]
322
+ if isinstance(r, dict)
323
+ else r.choices[0].message.content
324
+ )
325
+ directive = _parse_display_directive(raw)
326
+ reply = _clean_response(raw)
327
+ except Exception as e:
328
+ reply = f"LLM error: {e}. Falling back to keyword matching."
329
+ reply_fb, directive = _keyword_fallback(user_msg, idx, kpis)
330
+ reply += "\n\n" + reply_fb
331
+
332
+ # Resolve artifact paths
333
+ fig_out = None
334
+ tab_out = None
335
+ show = directive.get("show", "none")
336
+ scope = directive.get("scope", "")
337
+ fname = directive.get("filename", "")
338
+
339
+ if show == "figure" and scope and fname:
340
+ base = {"python": PY_FIG_DIR, "r": R_FIG_DIR}.get(scope)
341
+ if base and (base / fname).exists():
342
+ fig_out = str(base / fname)
343
+ else:
344
+ reply += f"\n\n*(Could not find figure: {scope}/{fname})*"
345
+
346
+ if show == "table" and scope and fname:
347
+ base = {"python": PY_TAB_DIR, "r": R_TAB_DIR}.get(scope)
348
+ if base and (base / fname).exists():
349
+ tab_out = _load_table_safe(base / fname)
350
+ else:
351
+ reply += f"\n\n*(Could not find table: {scope}/{fname})*"
352
+
353
+ new_history = (history or []) + [
354
+ {"role": "user", "content": user_msg},
355
+ {"role": "assistant", "content": reply},
356
+ ]
357
+
358
+ return new_history, "", fig_out, tab_out
359
+
360
+
361
+ def _keyword_fallback(msg: str, idx: Dict, kpis: Dict) -> Tuple[str, Dict]:
362
+ """Simple keyword matcher when LLM is unavailable."""
363
+ msg_lower = msg.lower()
364
+
365
+ if not any(idx[s]["figures"] or idx[s]["tables"] for s in ("python", "r")):
366
+ return (
367
+ "No artifacts found yet. Please run the pipeline first (Tab 1), "
368
+ "then come back here to explore the results.",
369
+ {"show": "none"},
370
+ )
371
+
372
+ kpi_text = ""
373
+ if kpis:
374
+ total = kpis.get("total_units_sold", 0)
375
+ kpi_text = (
376
+ f"Quick summary: **{kpis.get('n_titles', '?')}** book titles across "
377
+ f"**{kpis.get('n_months', '?')}** months, with **{total:,.0f}** total units sold."
378
+ )
379
+
380
+ if any(w in msg_lower for w in ["trend", "sales trend", "monthly sale"]):
381
+ return (
382
+ f"Here are the sales trends for sampled titles. {kpi_text}",
383
+ {"show": "figure", "scope": "python", "filename": "sales_trends_sampled_titles.png"},
384
+ )
385
+
386
+ if any(w in msg_lower for w in ["sentiment", "review", "positive", "negative"]):
387
+ return (
388
+ f"Here is the sentiment distribution across sampled book titles. {kpi_text}",
389
+ {"show": "figure", "scope": "python", "filename": "sentiment_distribution_sampled_titles.png"},
390
+ )
391
+
392
+ if any(w in msg_lower for w in ["arima", "forecast", "predict"]):
393
+ if "compar" in msg_lower or "ets" in msg_lower or "accuracy" in msg_lower:
394
+ if "forecast_compare.png" in idx.get("r", {}).get("figures", []):
395
+ return (
396
+ "Here is the ARIMA+Fourier vs ETS forecast comparison from the R analysis.",
397
+ {"show": "figure", "scope": "r", "filename": "forecast_compare.png"},
398
+ )
399
+ return (
400
+ f"Here are the ARIMA forecasts for sampled titles from the Python analysis. {kpi_text}",
401
+ {"show": "figure", "scope": "python", "filename": "arima_forecasts_sampled_titles.png"},
402
+ )
403
+
404
+ if any(w in msg_lower for w in ["regression", "lm", "coefficient", "price effect", "rating effect"]):
405
+ return (
406
+ "The R notebook focuses on forecasting rather than regression. "
407
+ "Here is the forecast accuracy comparison instead.",
408
+ {"show": "table", "scope": "r", "filename": "accuracy_table.csv"},
409
+ )
410
+
411
+ if any(w in msg_lower for w in ["top", "best sell", "popular", "rank"]):
412
+ return (
413
+ f"Here are the top-selling titles by units sold. {kpi_text}",
414
+ {"show": "table", "scope": "python", "filename": "top_titles_by_units_sold.csv"},
415
+ )
416
+
417
+ if any(w in msg_lower for w in ["accuracy", "benchmark", "rmse", "mape"]):
418
+ return (
419
+ "Here is the forecast accuracy comparison (ARIMA+Fourier vs ETS) from the R analysis.",
420
+ {"show": "table", "scope": "r", "filename": "accuracy_table.csv"},
421
+ )
422
+
423
+ if any(w in msg_lower for w in ["r analysis", "r output", "r result"]):
424
+ if "forecast_compare.png" in idx.get("r", {}).get("figures", []):
425
+ return (
426
+ "Here is the main R output: forecast model comparison plot.",
427
+ {"show": "figure", "scope": "r", "filename": "forecast_compare.png"},
428
+ )
429
+
430
+ if any(w in msg_lower for w in ["dashboard", "overview", "summary", "kpi"]):
431
+ return (
432
+ f"Dashboard overview: {kpi_text}\n\nAsk me about sales trends, sentiment, forecasts, "
433
+ "forecast accuracy, or top sellers to see specific visualizations.",
434
+ {"show": "table", "scope": "python", "filename": "df_dashboard.csv"},
435
+ )
436
+
437
+ # Default
438
+ return (
439
+ f"I can show you various analyses. {kpi_text}\n\n"
440
+ "Try asking about: **sales trends**, **sentiment**, **ARIMA forecasts**, "
441
+ "**forecast accuracy**, **top sellers**, or **dashboard overview**.",
442
+ {"show": "none"},
443
+ )
444
+
445
+
446
+ # =========================================================
447
+ # UI
448
+ # =========================================================
449
+
450
+ ensure_dirs()
451
+
452
+ def load_css() -> str:
453
+ css_path = BASE_DIR / "style.css"
454
+ return css_path.read_text(encoding="utf-8") if css_path.exists() else ""
455
+
456
+
457
+ with gr.Blocks(title="RX12 Workshop App") as demo:
458
+
459
+ gr.Markdown(
460
+ "# RX12 - Intro to Python and R - Workshop App\n"
461
+ "*The app to integrate the three notebooks in to get a functioning blueprint of the group project's final product*",
462
+ elem_id="escp_title",
463
+ )
464
+
465
+ # ===========================================================
466
+ # TAB 1 -- Pipeline Runner
467
+ # ===========================================================
468
+ with gr.Tab("Pipeline Runner"):
469
+ gr.Markdown(
470
+ )
471
+
472
+ with gr.Row():
473
+ with gr.Column(scale=1):
474
+ btn_nb1 = gr.Button(
475
+ "Step 1: Python Analysis",
476
+ variant="secondary",
477
+ )
478
+ gr.Markdown(
479
+ )
480
+ with gr.Column(scale=1):
481
+ btn_nb2 = gr.Button(
482
+ "Step 2: R Analysis",
483
+ variant="secondary",
484
+ )
485
+ gr.Markdown(
486
+ )
487
+
488
+ with gr.Row():
489
+ btn_all = gr.Button(
490
+ "Run All 2 Steps",
491
+ variant="primary",
492
+ )
493
+
494
+ run_log = gr.Textbox(
495
+ label="Execution Log",
496
+ lines=18,
497
+ max_lines=30,
498
+ interactive=False,
499
+ )
500
+
501
+ btn_nb1.click(run_pythonanalysis, outputs=[run_log])
502
+ btn_nb2.click(run_ranalysis, outputs=[run_log])
503
+ btn_all.click(run_full_pipeline, outputs=[run_log])
504
+
505
+ # ===========================================================
506
+ # TAB 2 -- Results Gallery
507
+ # ===========================================================
508
+ with gr.Tab("Results Gallery"):
509
+ gr.Markdown(
510
+ "### All generated artifacts\n\n"
511
+ "After running the pipeline, click **Refresh** to load all figures and tables. "
512
+ "Figures are shown in the gallery; select a table from the dropdown to inspect it."
513
+ )
514
+
515
+ refresh_btn = gr.Button("Refresh Gallery", variant="primary")
516
+
517
+ gr.Markdown("#### Figures")
518
+ gallery = gr.Gallery(
519
+ label="All Figures (Python + R)",
520
+ columns=2,
521
+ height=480,
522
+ object_fit="contain",
523
+ )
524
+
525
+ gr.Markdown("#### Tables")
526
+ table_dropdown = gr.Dropdown(
527
+ label="Select a table to view",
528
+ choices=[],
529
+ interactive=True,
530
+ )
531
+ table_display = gr.Dataframe(
532
+ label="Table Preview",
533
+ interactive=False,
534
+ )
535
+
536
+ refresh_btn.click(
537
+ refresh_gallery,
538
+ outputs=[gallery, table_dropdown, table_display],
539
+ )
540
+ table_dropdown.change(
541
+ on_table_select,
542
+ inputs=[table_dropdown],
543
+ outputs=[table_display],
544
+ )
545
+
546
+ # ===========================================================
547
+ # TAB 3 -- AI Dashboard
548
+ # ===========================================================
549
+ with gr.Tab('"AI" Dashboard'):
550
+ gr.Markdown(
551
+ "### Ask questions, get visualisations\n\n"
552
+ "Describe what you want to see and the AI will pick the right chart or table. "
553
+ + (
554
+ "*LLM is active.*"
555
+ if LLM_ENABLED
556
+ else "*No API key detected \u2014 using keyword matching. "
557
+ "Set `HF_API_KEY` in Space secrets for full LLM support.*"
558
+ )
559
+ )
560
+
561
+ with gr.Row(equal_height=True):
562
+ with gr.Column(scale=1):
563
+ chatbot = gr.Chatbot(
564
+ label="Conversation",
565
+ height=380,
566
+ )
567
+ user_input = gr.Textbox(
568
+ label="Ask about your data",
569
+ placeholder="e.g. Show me sales trends / What drives revenue? / Compare forecast models",
570
+ lines=1,
571
+ )
572
+ gr.Examples(
573
+ examples=[
574
+ "Show me the sales trends",
575
+ "What does the sentiment look like?",
576
+ "Which titles sell the most?",
577
+ "Show the forecast accuracy comparison",
578
+ "Compare the ARIMA and ETS forecasts",
579
+ "Give me a dashboard overview",
580
+ ],
581
+ inputs=user_input,
582
+ )
583
+
584
+ with gr.Column(scale=1):
585
+ ai_figure = gr.Image(
586
+ label="Visualisation",
587
+ height=350,
588
+ )
589
+ ai_table = gr.Dataframe(
590
+ label="Data Table",
591
+ interactive=False,
592
+ )
593
+
594
+ user_input.submit(
595
+ ai_chat,
596
+ inputs=[user_input, chatbot],
597
+ outputs=[chatbot, user_input, ai_figure, ai_table],
598
+ )
599
+
600
+
601
+ PORT = int(os.environ.get("PORT", os.environ.get("GRADIO_SERVER_PORT", "7860")))
602
+
603
+ demo.launch(
604
+ server_name="0.0.0.0",
605
+ server_port=PORT,
606
+ css=load_css(),
607
+ allowed_paths=[str(BASE_DIR)],
608
+ )