Yulu1 commited on
Commit
02111ce
·
verified ·
1 Parent(s): d0fe910

Upload 12 files

Browse files
.gitattributes ADDED
@@ -0,0 +1 @@
 
 
1
+ background_top.png filter=lfs diff=lfs merge=lfs -text
Dockerfile ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.10-slim
2
+
3
+ ENV DEBIAN_FRONTEND=noninteractive
4
+ ENV PYTHONDONTWRITEBYTECODE=1
5
+ ENV PYTHONUNBUFFERED=1
6
+
7
+ ENV GRADIO_SERVER_NAME=0.0.0.0
8
+ ENV GRADIO_SERVER_PORT=7860
9
+
10
+ # System deps: R + compilers + common R pkg build deps
11
+ RUN apt-get update && apt-get install -y --no-install-recommends \
12
+ r-base \
13
+ r-base-dev \
14
+ build-essential \
15
+ curl \
16
+ git \
17
+ libcurl4-openssl-dev \
18
+ libssl-dev \
19
+ libxml2-dev \
20
+ && rm -rf /var/lib/apt/lists/*
21
+
22
+ # Install required R packages
23
+ RUN R -e "install.packages(c('forecast','ggplot2','jsonlite','readr','dplyr','tidyr','stringr','lubridate','broom'), repos='https://cloud.r-project.org')"
24
+
25
+ WORKDIR /app
26
+ COPY . /app
27
+
28
+ # Python deps (from requirements.txt)
29
+ RUN pip install --no-cache-dir -r requirements.txt
30
+
31
+ # Notebook execution deps
32
+ RUN pip install --no-cache-dir notebook ipykernel papermill
33
+
34
+ # Pre-install packages that the notebooks install via !pip install
35
+ # so papermill doesn't waste time or fail on them at runtime:
36
+ # datacreation.ipynb: beautifulsoup4 pandas matplotlib seaborn numpy textblob
37
+ # pythonanalysis.ipynb: pandas matplotlib seaborn numpy textblob faker transformers vaderSentiment
38
+ # Most are already in requirements.txt; add the extras:
39
+ RUN pip install --no-cache-dir textblob faker transformers
40
+
41
+ RUN python -m ipykernel install --user --name python3 --display-name "Python 3"
42
+
43
+ # R deps for notebook execution via papermill (IRkernel)
44
+ RUN R -e "install.packages('IRkernel', repos='https://cloud.r-project.org/')"
45
+ RUN R -e "IRkernel::installspec(user = FALSE)"
46
+
47
+ EXPOSE 7860
48
+
49
+ CMD ["python", "app.py"]
README.md ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: RX12WorkshopApp
3
+ emoji: 📊
4
+ colorFrom: blue
5
+ colorTo: red
6
+ sdk: docker
7
+ pinned: false
8
+ ---
9
+
10
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,622 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import re
3
+ import json
4
+ import time
5
+ import traceback
6
+ from pathlib import Path
7
+ from typing import Dict, Any, List, Optional, Tuple
8
+
9
+ import pandas as pd
10
+ import gradio as gr
11
+ import papermill as pm
12
+
13
+ # Optional LLM (HuggingFace Inference API)
14
+ try:
15
+ from huggingface_hub import InferenceClient
16
+ except Exception:
17
+ InferenceClient = None
18
+
19
+ # =========================================================
20
+ # CONFIG
21
+ # =========================================================
22
+
23
+ BASE_DIR = Path(__file__).resolve().parent
24
+
25
+ NB1 = os.environ.get("NB1", "datacreation.ipynb").strip()
26
+ NB2 = os.environ.get("NB2", "pythonanalysis.ipynb").strip()
27
+ NB3 = os.environ.get("NB3", "ranalysis.ipynb").strip()
28
+
29
+ RUNS_DIR = BASE_DIR / "runs"
30
+ ART_DIR = BASE_DIR / "artifacts"
31
+ PY_FIG_DIR = ART_DIR / "py" / "figures"
32
+ PY_TAB_DIR = ART_DIR / "py" / "tables"
33
+ R_FIG_DIR = ART_DIR / "r" / "figures"
34
+ R_TAB_DIR = ART_DIR / "r" / "tables"
35
+
36
+ PAPERMILL_TIMEOUT = int(os.environ.get("PAPERMILL_TIMEOUT", "1800"))
37
+ MAX_PREVIEW_ROWS = int(os.environ.get("MAX_FILE_PREVIEW_ROWS", "50"))
38
+ MAX_LOG_CHARS = int(os.environ.get("MAX_LOG_CHARS", "8000"))
39
+
40
+ HF_API_KEY = os.environ.get("HF_API_KEY", "").strip()
41
+ MODEL_NAME = os.environ.get("MODEL_NAME", "deepseek-ai/DeepSeek-R1").strip()
42
+ HF_PROVIDER = os.environ.get("HF_PROVIDER", "novita").strip()
43
+
44
+ LLM_ENABLED = bool(HF_API_KEY) and InferenceClient is not None
45
+ llm_client = (
46
+ InferenceClient(provider=HF_PROVIDER, api_key=HF_API_KEY)
47
+ if LLM_ENABLED
48
+ else None
49
+ )
50
+
51
+ # =========================================================
52
+ # HELPERS
53
+ # =========================================================
54
+
55
+ def ensure_dirs():
56
+ for p in [RUNS_DIR, ART_DIR, PY_FIG_DIR, PY_TAB_DIR, R_FIG_DIR, R_TAB_DIR]:
57
+ p.mkdir(parents=True, exist_ok=True)
58
+
59
+ def stamp():
60
+ return time.strftime("%Y%m%d-%H%M%S")
61
+
62
+ def tail(text: str, n: int = MAX_LOG_CHARS) -> str:
63
+ return (text or "")[-n:]
64
+
65
+ def _ls(dir_path: Path, exts: Tuple[str, ...]) -> List[str]:
66
+ if not dir_path.is_dir():
67
+ return []
68
+ return sorted(p.name for p in dir_path.iterdir() if p.is_file() and p.suffix.lower() in exts)
69
+
70
+ def _read_csv(path: Path) -> pd.DataFrame:
71
+ return pd.read_csv(path, nrows=MAX_PREVIEW_ROWS)
72
+
73
+ def _read_json(path: Path):
74
+ with path.open(encoding="utf-8") as f:
75
+ return json.load(f)
76
+
77
+ def artifacts_index() -> Dict[str, Any]:
78
+ return {
79
+ "python": {
80
+ "figures": _ls(PY_FIG_DIR, (".png", ".jpg", ".jpeg")),
81
+ "tables": _ls(PY_TAB_DIR, (".csv", ".json")),
82
+ },
83
+ "r": {
84
+ "figures": _ls(R_FIG_DIR, (".png", ".jpg", ".jpeg")),
85
+ "tables": _ls(R_TAB_DIR, (".csv", ".json")),
86
+ },
87
+ }
88
+
89
+ # =========================================================
90
+ # PIPELINE RUNNERS
91
+ # =========================================================
92
+
93
+ def run_notebook(nb_name: str) -> str:
94
+ ensure_dirs()
95
+ nb_in = BASE_DIR / nb_name
96
+ if not nb_in.exists():
97
+ return f"ERROR: {nb_name} not found."
98
+ nb_out = RUNS_DIR / f"run_{stamp()}_{nb_name}"
99
+ pm.execute_notebook(
100
+ input_path=str(nb_in),
101
+ output_path=str(nb_out),
102
+ cwd=str(BASE_DIR),
103
+ log_output=True,
104
+ progress_bar=False,
105
+ request_save_on_cell_execute=True,
106
+ execution_timeout=PAPERMILL_TIMEOUT,
107
+ )
108
+ return f"Executed {nb_name}"
109
+
110
+
111
+ def run_datacreation() -> str:
112
+ try:
113
+ log = run_notebook(NB1)
114
+ csvs = [f.name for f in BASE_DIR.glob("*.csv")]
115
+ return f"OK {log}\n\nCSVs now in /app:\n" + "\n".join(f" - {c}" for c in sorted(csvs))
116
+ except Exception as e:
117
+ return f"FAILED {e}\n\n{traceback.format_exc()[-2000:]}"
118
+
119
+
120
+ def run_pythonanalysis() -> str:
121
+ try:
122
+ log = run_notebook(NB2)
123
+ idx = artifacts_index()
124
+ figs = idx["python"]["figures"]
125
+ tabs = idx["python"]["tables"]
126
+ return (
127
+ f"OK {log}\n\n"
128
+ f"Figures: {', '.join(figs) or '(none)'}\n"
129
+ f"Tables: {', '.join(tabs) or '(none)'}"
130
+ )
131
+ except Exception as e:
132
+ return f"FAILED {e}\n\n{traceback.format_exc()[-2000:]}"
133
+
134
+
135
+ def run_r() -> str:
136
+ try:
137
+ log = run_notebook(NB3)
138
+ idx = artifacts_index()
139
+ figs = idx["r"]["figures"]
140
+ tabs = idx["r"]["tables"]
141
+ return (
142
+ f"OK {log}\n\n"
143
+ f"Figures: {', '.join(figs) or '(none)'}\n"
144
+ f"Tables: {', '.join(tabs) or '(none)'}"
145
+ )
146
+ except Exception as e:
147
+ return f"FAILED {e}\n\n{traceback.format_exc()[-2000:]}"
148
+
149
+
150
+ def run_full_pipeline() -> str:
151
+ logs = []
152
+ logs.append("=" * 50)
153
+ logs.append("STEP 1/3: Data Creation (web scraping + synthetic data)")
154
+ logs.append("=" * 50)
155
+ logs.append(run_datacreation())
156
+ logs.append("")
157
+ logs.append("=" * 50)
158
+ logs.append("STEP 2/3: Python Analysis (sentiment, ARIMA, dashboard)")
159
+ logs.append("=" * 50)
160
+ logs.append(run_pythonanalysis())
161
+ logs.append("")
162
+ logs.append("=" * 50)
163
+ logs.append("STEP 3/3: R Analysis (ETS/ARIMA forecasting)")
164
+ logs.append("=" * 50)
165
+ logs.append(run_r())
166
+ return "\n".join(logs)
167
+
168
+
169
+ # =========================================================
170
+ # GALLERY LOADERS
171
+ # =========================================================
172
+
173
+ def _load_all_figures() -> List[Tuple[str, str]]:
174
+ """Return list of (filepath, caption) for Gallery."""
175
+ items = []
176
+ for p in sorted(PY_FIG_DIR.glob("*.png")):
177
+ items.append((str(p), f"Python | {p.stem.replace('_', ' ').title()}"))
178
+ for p in sorted(R_FIG_DIR.glob("*.png")):
179
+ items.append((str(p), f"R | {p.stem.replace('_', ' ').title()}"))
180
+ return items
181
+
182
+
183
+ def _load_table_safe(path: Path) -> pd.DataFrame:
184
+ try:
185
+ if path.suffix == ".json":
186
+ obj = _read_json(path)
187
+ if isinstance(obj, dict):
188
+ return pd.DataFrame([obj])
189
+ return pd.DataFrame(obj)
190
+ return _read_csv(path)
191
+ except Exception as e:
192
+ return pd.DataFrame([{"error": str(e)}])
193
+
194
+
195
+ def refresh_gallery():
196
+ """Called when user clicks Refresh on Gallery tab."""
197
+ figures = _load_all_figures()
198
+ idx = artifacts_index()
199
+
200
+ # Build table choices
201
+ table_choices = []
202
+ for scope in ("python", "r"):
203
+ for name in idx[scope]["tables"]:
204
+ table_choices.append(f"{scope}/{name}")
205
+
206
+ # Default: show first table if available
207
+ default_df = pd.DataFrame()
208
+ if table_choices:
209
+ parts = table_choices[0].split("/", 1)
210
+ base = PY_TAB_DIR if parts[0] == "python" else R_TAB_DIR
211
+ default_df = _load_table_safe(base / parts[1])
212
+
213
+ return (
214
+ figures if figures else [],
215
+ gr.update(choices=table_choices, value=table_choices[0] if table_choices else None),
216
+ default_df,
217
+ )
218
+
219
+
220
+ def on_table_select(choice: str):
221
+ if not choice or "/" not in choice:
222
+ return pd.DataFrame([{"hint": "Select a table above."}])
223
+ scope, name = choice.split("/", 1)
224
+ base = {"python": PY_TAB_DIR, "r": R_TAB_DIR}.get(scope)
225
+ if not base:
226
+ return pd.DataFrame([{"error": f"Unknown scope: {scope}"}])
227
+ path = base / name
228
+ if not path.exists():
229
+ return pd.DataFrame([{"error": f"File not found: {path}"}])
230
+ return _load_table_safe(path)
231
+
232
+
233
+ # =========================================================
234
+ # KPI LOADER
235
+ # =========================================================
236
+
237
+ def load_kpis() -> Dict[str, Any]:
238
+ for candidate in [PY_TAB_DIR / "kpis.json", PY_FIG_DIR / "kpis.json"]:
239
+ if candidate.exists():
240
+ try:
241
+ return _read_json(candidate)
242
+ except Exception:
243
+ pass
244
+ return {}
245
+
246
+
247
+ # =========================================================
248
+ # AI DASHBOARD (Tab 3) -- LLM picks what to display
249
+ # =========================================================
250
+
251
+ DASHBOARD_SYSTEM = """You are an AI dashboard assistant for a book-sales analytics app.
252
+ The user asks questions or requests about their data. You have access to pre-computed
253
+ artifacts from Python and R analysis pipelines.
254
+
255
+ AVAILABLE ARTIFACTS (only reference ones that exist):
256
+ {artifacts_json}
257
+
258
+ KPI SUMMARY: {kpis_json}
259
+
260
+ YOUR JOB:
261
+ 1. Answer the user's question conversationally using the KPIs and your knowledge of the artifacts.
262
+ 2. At the END of your response, output a JSON block (fenced with ```json ... ```) that tells
263
+ the dashboard which artifact to display. The JSON must have this shape:
264
+ {{"show": "figure"|"table"|"none", "scope": "python"|"r", "filename": "..."}}
265
+
266
+ - Use "show": "figure" to display a chart image.
267
+ - Use "show": "table" to display a CSV/JSON table.
268
+ - Use "show": "none" if no artifact is relevant.
269
+
270
+ RULES:
271
+ - If the user asks about sales trends or forecasting by title, show sales_trends or arima figures.
272
+ - If the user asks about sentiment, show sentiment figure or sentiment_counts table.
273
+ - If the user asks about R regression, the R notebook focuses on forecasting, show accuracy_table.csv.
274
+ - If the user asks about forecast accuracy or model comparison, show accuracy_table.csv or forecast_compare.png.
275
+ - If the user asks about top sellers, show top_titles_by_units_sold.csv.
276
+ - If the user asks a general data question, pick the most relevant artifact.
277
+ - Keep your answer concise (2-4 sentences), then the JSON block.
278
+ """
279
+
280
+ JSON_BLOCK_RE = re.compile(r"```json\s*(\{.*?\})\s*```", re.DOTALL)
281
+ FALLBACK_JSON_RE = re.compile(r"\{[^{}]*\"show\"[^{}]*\}", re.DOTALL)
282
+
283
+
284
+ def _parse_display_directive(text: str) -> Dict[str, str]:
285
+ m = JSON_BLOCK_RE.search(text)
286
+ if m:
287
+ try:
288
+ return json.loads(m.group(1))
289
+ except json.JSONDecodeError:
290
+ pass
291
+ m = FALLBACK_JSON_RE.search(text)
292
+ if m:
293
+ try:
294
+ return json.loads(m.group(0))
295
+ except json.JSONDecodeError:
296
+ pass
297
+ return {"show": "none"}
298
+
299
+
300
+ def _clean_response(text: str) -> str:
301
+ """Strip the JSON directive block from the displayed response."""
302
+ return JSON_BLOCK_RE.sub("", text).strip()
303
+
304
+
305
+ def ai_chat(user_msg: str, history: list):
306
+ """Chat function for the AI Dashboard tab."""
307
+ if not user_msg or not user_msg.strip():
308
+ return history, "", None, None
309
+
310
+ idx = artifacts_index()
311
+ kpis = load_kpis()
312
+
313
+ if not LLM_ENABLED:
314
+ reply, directive = _keyword_fallback(user_msg, idx, kpis)
315
+ else:
316
+ system = DASHBOARD_SYSTEM.format(
317
+ artifacts_json=json.dumps(idx, indent=2),
318
+ kpis_json=json.dumps(kpis, indent=2) if kpis else "(no KPIs yet, run the pipeline first)",
319
+ )
320
+ msgs = [{"role": "system", "content": system}]
321
+ for entry in (history or [])[-6:]:
322
+ msgs.append(entry)
323
+ msgs.append({"role": "user", "content": user_msg})
324
+
325
+ try:
326
+ r = llm_client.chat_completion(
327
+ model=MODEL_NAME,
328
+ messages=msgs,
329
+ temperature=0.3,
330
+ max_tokens=600,
331
+ stream=False,
332
+ )
333
+ raw = (
334
+ r["choices"][0]["message"]["content"]
335
+ if isinstance(r, dict)
336
+ else r.choices[0].message.content
337
+ )
338
+ directive = _parse_display_directive(raw)
339
+ reply = _clean_response(raw)
340
+ except Exception as e:
341
+ reply = f"LLM error: {e}. Falling back to keyword matching."
342
+ reply_fb, directive = _keyword_fallback(user_msg, idx, kpis)
343
+ reply += "\n\n" + reply_fb
344
+
345
+ # Resolve artifact paths
346
+ fig_out = None
347
+ tab_out = None
348
+ show = directive.get("show", "none")
349
+ scope = directive.get("scope", "")
350
+ fname = directive.get("filename", "")
351
+
352
+ if show == "figure" and scope and fname:
353
+ base = {"python": PY_FIG_DIR, "r": R_FIG_DIR}.get(scope)
354
+ if base and (base / fname).exists():
355
+ fig_out = str(base / fname)
356
+ else:
357
+ reply += f"\n\n*(Could not find figure: {scope}/{fname})*"
358
+
359
+ if show == "table" and scope and fname:
360
+ base = {"python": PY_TAB_DIR, "r": R_TAB_DIR}.get(scope)
361
+ if base and (base / fname).exists():
362
+ tab_out = _load_table_safe(base / fname)
363
+ else:
364
+ reply += f"\n\n*(Could not find table: {scope}/{fname})*"
365
+
366
+ new_history = (history or []) + [
367
+ {"role": "user", "content": user_msg},
368
+ {"role": "assistant", "content": reply},
369
+ ]
370
+
371
+ return new_history, "", fig_out, tab_out
372
+
373
+
374
+ def _keyword_fallback(msg: str, idx: Dict, kpis: Dict) -> Tuple[str, Dict]:
375
+ """Simple keyword matcher when LLM is unavailable."""
376
+ msg_lower = msg.lower()
377
+
378
+ if not any(idx[s]["figures"] or idx[s]["tables"] for s in ("python", "r")):
379
+ return (
380
+ "No artifacts found yet. Please run the pipeline first (Tab 1), "
381
+ "then come back here to explore the results.",
382
+ {"show": "none"},
383
+ )
384
+
385
+ kpi_text = ""
386
+ if kpis:
387
+ total = kpis.get("total_units_sold", 0)
388
+ kpi_text = (
389
+ f"Quick summary: **{kpis.get('n_titles', '?')}** book titles across "
390
+ f"**{kpis.get('n_months', '?')}** months, with **{total:,.0f}** total units sold."
391
+ )
392
+
393
+ if any(w in msg_lower for w in ["trend", "sales trend", "monthly sale"]):
394
+ return (
395
+ f"Here are the sales trends for sampled titles. {kpi_text}",
396
+ {"show": "figure", "scope": "python", "filename": "sales_trends_sampled_titles.png"},
397
+ )
398
+
399
+ if any(w in msg_lower for w in ["sentiment", "review", "positive", "negative"]):
400
+ return (
401
+ f"Here is the sentiment distribution across sampled book titles. {kpi_text}",
402
+ {"show": "figure", "scope": "python", "filename": "sentiment_distribution_sampled_titles.png"},
403
+ )
404
+
405
+ if any(w in msg_lower for w in ["arima", "forecast", "predict"]):
406
+ if "compar" in msg_lower or "ets" in msg_lower or "accuracy" in msg_lower:
407
+ if "forecast_compare.png" in idx.get("r", {}).get("figures", []):
408
+ return (
409
+ "Here is the ARIMA+Fourier vs ETS forecast comparison from the R analysis.",
410
+ {"show": "figure", "scope": "r", "filename": "forecast_compare.png"},
411
+ )
412
+ return (
413
+ f"Here are the ARIMA forecasts for sampled titles from the Python analysis. {kpi_text}",
414
+ {"show": "figure", "scope": "python", "filename": "arima_forecasts_sampled_titles.png"},
415
+ )
416
+
417
+ if any(w in msg_lower for w in ["regression", "lm", "coefficient", "price effect", "rating effect"]):
418
+ return (
419
+ "The R notebook focuses on forecasting rather than regression. "
420
+ "Here is the forecast accuracy comparison instead.",
421
+ {"show": "table", "scope": "r", "filename": "accuracy_table.csv"},
422
+ )
423
+
424
+ if any(w in msg_lower for w in ["top", "best sell", "popular", "rank"]):
425
+ return (
426
+ f"Here are the top-selling titles by units sold. {kpi_text}",
427
+ {"show": "table", "scope": "python", "filename": "top_titles_by_units_sold.csv"},
428
+ )
429
+
430
+ if any(w in msg_lower for w in ["accuracy", "benchmark", "rmse", "mape"]):
431
+ return (
432
+ "Here is the forecast accuracy comparison (ARIMA+Fourier vs ETS) from the R analysis.",
433
+ {"show": "table", "scope": "r", "filename": "accuracy_table.csv"},
434
+ )
435
+
436
+ if any(w in msg_lower for w in ["r analysis", "r output", "r result"]):
437
+ if "forecast_compare.png" in idx.get("r", {}).get("figures", []):
438
+ return (
439
+ "Here is the main R output: forecast model comparison plot.",
440
+ {"show": "figure", "scope": "r", "filename": "forecast_compare.png"},
441
+ )
442
+
443
+ if any(w in msg_lower for w in ["dashboard", "overview", "summary", "kpi"]):
444
+ return (
445
+ f"Dashboard overview: {kpi_text}\n\nAsk me about sales trends, sentiment, forecasts, "
446
+ "forecast accuracy, or top sellers to see specific visualizations.",
447
+ {"show": "table", "scope": "python", "filename": "df_dashboard.csv"},
448
+ )
449
+
450
+ # Default
451
+ return (
452
+ f"I can show you various analyses. {kpi_text}\n\n"
453
+ "Try asking about: **sales trends**, **sentiment**, **ARIMA forecasts**, "
454
+ "**forecast accuracy**, **top sellers**, or **dashboard overview**.",
455
+ {"show": "none"},
456
+ )
457
+
458
+
459
+ # =========================================================
460
+ # UI
461
+ # =========================================================
462
+
463
+ ensure_dirs()
464
+
465
+ def load_css() -> str:
466
+ css_path = BASE_DIR / "style.css"
467
+ return css_path.read_text(encoding="utf-8") if css_path.exists() else ""
468
+
469
+
470
+ with gr.Blocks(title="RX12 Workshop App") as demo:
471
+
472
+ gr.Markdown(
473
+ "# RX12 - Intro to Python and R - Workshop App\n"
474
+ "*The app to integrate the three notebooks in to get a functioning blueprint of the group project's final product*",
475
+ elem_id="escp_title",
476
+ )
477
+
478
+ # ===========================================================
479
+ # TAB 1 -- Pipeline Runner
480
+ # ===========================================================
481
+ with gr.Tab("Pipeline Runner"):
482
+ gr.Markdown(
483
+ )
484
+
485
+ with gr.Row():
486
+ with gr.Column(scale=1):
487
+ btn_nb1 = gr.Button(
488
+ "Step 1: Data Creation",
489
+ variant="secondary",
490
+ )
491
+ gr.Markdown(
492
+ )
493
+ with gr.Column(scale=1):
494
+ btn_nb2 = gr.Button(
495
+ "Step 2a: Python Analysis",
496
+ variant="secondary",
497
+ )
498
+ gr.Markdown(
499
+ )
500
+ with gr.Column(scale=1):
501
+ btn_r = gr.Button(
502
+ "Step 2b: R Analysis",
503
+ variant="secondary",
504
+ )
505
+ gr.Markdown(
506
+ )
507
+
508
+ with gr.Row():
509
+ btn_all = gr.Button(
510
+ "Run All 3 Steps",
511
+ variant="primary",
512
+ )
513
+
514
+ run_log = gr.Textbox(
515
+ label="Execution Log",
516
+ lines=18,
517
+ max_lines=30,
518
+ interactive=False,
519
+ )
520
+
521
+ btn_nb1.click(run_datacreation, outputs=[run_log])
522
+ btn_nb2.click(run_pythonanalysis, outputs=[run_log])
523
+ btn_r.click(run_r, outputs=[run_log])
524
+ btn_all.click(run_full_pipeline, outputs=[run_log])
525
+
526
+ # ===========================================================
527
+ # TAB 2 -- Results Gallery
528
+ # ===========================================================
529
+ with gr.Tab("Results Gallery"):
530
+ gr.Markdown(
531
+ "### All generated artifacts\n\n"
532
+ "After running the pipeline, click **Refresh** to load all figures and tables. "
533
+ "Figures are shown in the gallery; select a table from the dropdown to inspect it."
534
+ )
535
+
536
+ refresh_btn = gr.Button("Refresh Gallery", variant="primary")
537
+
538
+ gr.Markdown("#### Figures")
539
+ gallery = gr.Gallery(
540
+ label="All Figures (Python + R)",
541
+ columns=2,
542
+ height=480,
543
+ object_fit="contain",
544
+ )
545
+
546
+ gr.Markdown("#### Tables")
547
+ table_dropdown = gr.Dropdown(
548
+ label="Select a table to view",
549
+ choices=[],
550
+ interactive=True,
551
+ )
552
+ table_display = gr.Dataframe(
553
+ label="Table Preview",
554
+ interactive=False,
555
+ )
556
+
557
+ refresh_btn.click(
558
+ refresh_gallery,
559
+ outputs=[gallery, table_dropdown, table_display],
560
+ )
561
+ table_dropdown.change(
562
+ on_table_select,
563
+ inputs=[table_dropdown],
564
+ outputs=[table_display],
565
+ )
566
+
567
+ # ===========================================================
568
+ # TAB 3 -- AI Dashboard
569
+ # ===========================================================
570
+ with gr.Tab('"AI" Dashboard'):
571
+ gr.Markdown(
572
+ "### Ask questions, get visualisations\n\n"
573
+ "Describe what you want to see and the AI will pick the right chart or table. "
574
+ + (
575
+ "*LLM is active.*"
576
+ if LLM_ENABLED
577
+ else "*No API key detected \u2014 using keyword matching. "
578
+ "Set `HF_API_KEY` in Space secrets for full LLM support.*"
579
+ )
580
+ )
581
+
582
+ with gr.Row(equal_height=True):
583
+ with gr.Column(scale=1):
584
+ chatbot = gr.Chatbot(
585
+ label="Conversation",
586
+ height=380,
587
+ )
588
+ user_input = gr.Textbox(
589
+ label="Ask about your data",
590
+ placeholder="e.g. Show me sales trends / What drives revenue? / Compare forecast models",
591
+ lines=1,
592
+ )
593
+ gr.Examples(
594
+ examples=[
595
+ "Show me the sales trends",
596
+ "What does the sentiment look like?",
597
+ "Which titles sell the most?",
598
+ "Show the forecast accuracy comparison",
599
+ "Compare the ARIMA and ETS forecasts",
600
+ "Give me a dashboard overview",
601
+ ],
602
+ inputs=user_input,
603
+ )
604
+
605
+ with gr.Column(scale=1):
606
+ ai_figure = gr.Image(
607
+ label="Visualisation",
608
+ height=350,
609
+ )
610
+ ai_table = gr.Dataframe(
611
+ label="Data Table",
612
+ interactive=False,
613
+ )
614
+
615
+ user_input.submit(
616
+ ai_chat,
617
+ inputs=[user_input, chatbot],
618
+ outputs=[chatbot, user_input, ai_figure, ai_table],
619
+ )
620
+
621
+
622
+ demo.launch(css=load_css(), allowed_paths=[str(BASE_DIR)])
background_bottom.png ADDED
background_mid.png ADDED
background_top.png ADDED

Git LFS Details

  • SHA256: 27e963d20dbb7ae88368fb527d475c85ef0de3df63d8f0d7d5e2af7403a5b365
  • Pointer size: 131 Bytes
  • Size of remote file: 726 kB
datacreation.ipynb ADDED
@@ -0,0 +1,1087 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "markdown",
5
+ "metadata": {
6
+ "id": "4ba6aba8"
7
+ },
8
+ "source": [
9
+ "# 🤖 **Data Collection, Creation, Storage, and Processing**\n"
10
+ ]
11
+ },
12
+ {
13
+ "cell_type": "markdown",
14
+ "metadata": {
15
+ "id": "jpASMyIQMaAq"
16
+ },
17
+ "source": [
18
+ "## **1.** 📦 Install required packages"
19
+ ]
20
+ },
21
+ {
22
+ "cell_type": "code",
23
+ "execution_count": 1,
24
+ "metadata": {
25
+ "colab": {
26
+ "base_uri": "https://localhost:8080/"
27
+ },
28
+ "id": "f48c8f8c",
29
+ "outputId": "13d0dd5e-82c6-489f-b1f0-e970186a4eb7"
30
+ },
31
+ "outputs": [
32
+ {
33
+ "output_type": "stream",
34
+ "name": "stdout",
35
+ "text": [
36
+ "Requirement already satisfied: beautifulsoup4 in /usr/local/lib/python3.12/dist-packages (4.13.5)\n",
37
+ "Requirement already satisfied: pandas in /usr/local/lib/python3.12/dist-packages (2.2.2)\n",
38
+ "Requirement already satisfied: matplotlib in /usr/local/lib/python3.12/dist-packages (3.10.0)\n",
39
+ "Requirement already satisfied: seaborn in /usr/local/lib/python3.12/dist-packages (0.13.2)\n",
40
+ "Requirement already satisfied: numpy in /usr/local/lib/python3.12/dist-packages (2.0.2)\n",
41
+ "Requirement already satisfied: textblob in /usr/local/lib/python3.12/dist-packages (0.19.0)\n",
42
+ "Requirement already satisfied: soupsieve>1.2 in /usr/local/lib/python3.12/dist-packages (from beautifulsoup4) (2.8.3)\n",
43
+ "Requirement already satisfied: typing-extensions>=4.0.0 in /usr/local/lib/python3.12/dist-packages (from beautifulsoup4) (4.15.0)\n",
44
+ "Requirement already satisfied: python-dateutil>=2.8.2 in /usr/local/lib/python3.12/dist-packages (from pandas) (2.9.0.post0)\n",
45
+ "Requirement already satisfied: pytz>=2020.1 in /usr/local/lib/python3.12/dist-packages (from pandas) (2025.2)\n",
46
+ "Requirement already satisfied: tzdata>=2022.7 in /usr/local/lib/python3.12/dist-packages (from pandas) (2025.3)\n",
47
+ "Requirement already satisfied: contourpy>=1.0.1 in /usr/local/lib/python3.12/dist-packages (from matplotlib) (1.3.3)\n",
48
+ "Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.12/dist-packages (from matplotlib) (0.12.1)\n",
49
+ "Requirement already satisfied: fonttools>=4.22.0 in /usr/local/lib/python3.12/dist-packages (from matplotlib) (4.61.1)\n",
50
+ "Requirement already satisfied: kiwisolver>=1.3.1 in /usr/local/lib/python3.12/dist-packages (from matplotlib) (1.4.9)\n",
51
+ "Requirement already satisfied: packaging>=20.0 in /usr/local/lib/python3.12/dist-packages (from matplotlib) (26.0)\n",
52
+ "Requirement already satisfied: pillow>=8 in /usr/local/lib/python3.12/dist-packages (from matplotlib) (11.3.0)\n",
53
+ "Requirement already satisfied: pyparsing>=2.3.1 in /usr/local/lib/python3.12/dist-packages (from matplotlib) (3.3.2)\n",
54
+ "Requirement already satisfied: nltk>=3.9 in /usr/local/lib/python3.12/dist-packages (from textblob) (3.9.1)\n",
55
+ "Requirement already satisfied: click in /usr/local/lib/python3.12/dist-packages (from nltk>=3.9->textblob) (8.3.1)\n",
56
+ "Requirement already satisfied: joblib in /usr/local/lib/python3.12/dist-packages (from nltk>=3.9->textblob) (1.5.3)\n",
57
+ "Requirement already satisfied: regex>=2021.8.3 in /usr/local/lib/python3.12/dist-packages (from nltk>=3.9->textblob) (2025.11.3)\n",
58
+ "Requirement already satisfied: tqdm in /usr/local/lib/python3.12/dist-packages (from nltk>=3.9->textblob) (4.67.3)\n",
59
+ "Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.12/dist-packages (from python-dateutil>=2.8.2->pandas) (1.17.0)\n"
60
+ ]
61
+ }
62
+ ],
63
+ "source": [
64
+ "!pip install beautifulsoup4 pandas matplotlib seaborn numpy textblob"
65
+ ]
66
+ },
67
+ {
68
+ "cell_type": "markdown",
69
+ "metadata": {
70
+ "id": "lquNYCbfL9IM"
71
+ },
72
+ "source": [
73
+ "## **2.** ⛏ Web-scrape all book titles, prices, and ratings from books.toscrape.com"
74
+ ]
75
+ },
76
+ {
77
+ "cell_type": "markdown",
78
+ "metadata": {
79
+ "id": "0IWuNpxxYDJF"
80
+ },
81
+ "source": [
82
+ "### *a. Initial setup*\n",
83
+ "Define the base url of the website you will scrape as well as how and what you will scrape"
84
+ ]
85
+ },
86
+ {
87
+ "cell_type": "code",
88
+ "execution_count": 2,
89
+ "metadata": {
90
+ "id": "91d52125"
91
+ },
92
+ "outputs": [],
93
+ "source": [
94
+ "import requests\n",
95
+ "from bs4 import BeautifulSoup\n",
96
+ "import pandas as pd\n",
97
+ "import time\n",
98
+ "\n",
99
+ "base_url = \"https://books.toscrape.com/catalogue/page-{}.html\"\n",
100
+ "headers = {\"User-Agent\": \"Mozilla/5.0\"}\n",
101
+ "\n",
102
+ "titles, prices, ratings = [], [], []"
103
+ ]
104
+ },
105
+ {
106
+ "cell_type": "markdown",
107
+ "metadata": {
108
+ "id": "oCdTsin2Yfp3"
109
+ },
110
+ "source": [
111
+ "### *b. Fill titles, prices, and ratings from the web pages*"
112
+ ]
113
+ },
114
+ {
115
+ "cell_type": "code",
116
+ "execution_count": 3,
117
+ "metadata": {
118
+ "id": "xqO5Y3dnYhxt"
119
+ },
120
+ "outputs": [],
121
+ "source": [
122
+ "# Loop through all 50 pages\n",
123
+ "for page in range(1, 51):\n",
124
+ " url = base_url.format(page)\n",
125
+ " response = requests.get(url, headers=headers)\n",
126
+ " soup = BeautifulSoup(response.content, \"html.parser\")\n",
127
+ " books = soup.find_all(\"article\", class_=\"product_pod\")\n",
128
+ "\n",
129
+ " for book in books:\n",
130
+ " titles.append(book.h3.a[\"title\"])\n",
131
+ " prices.append(float(book.find(\"p\", class_=\"price_color\").text[1:]))\n",
132
+ " ratings.append(book.p.get(\"class\")[1])\n",
133
+ "\n",
134
+ " time.sleep(0.5) # polite scraping delay"
135
+ ]
136
+ },
137
+ {
138
+ "cell_type": "markdown",
139
+ "metadata": {
140
+ "id": "T0TOeRC4Yrnn"
141
+ },
142
+ "source": [
143
+ "### *c. ✋🏻🛑⛔️ Create a dataframe df_books that contains the now complete \"title\", \"price\", and \"rating\" objects*"
144
+ ]
145
+ },
146
+ {
147
+ "cell_type": "code",
148
+ "execution_count": 4,
149
+ "metadata": {
150
+ "id": "l5FkkNhUYTHh"
151
+ },
152
+ "outputs": [],
153
+ "source": [
154
+ "# 🗂️ Create DataFrame\n",
155
+ "df_books = pd.DataFrame({\n",
156
+ " \"title\": titles,\n",
157
+ " \"price\": prices,\n",
158
+ " \"rating\": ratings\n",
159
+ "})"
160
+ ]
161
+ },
162
+ {
163
+ "cell_type": "markdown",
164
+ "metadata": {
165
+ "id": "duI5dv3CZYvF"
166
+ },
167
+ "source": [
168
+ "### *d. Save web-scraped dataframe either as a CSV or Excel file*"
169
+ ]
170
+ },
171
+ {
172
+ "cell_type": "code",
173
+ "execution_count": 5,
174
+ "metadata": {
175
+ "id": "lC1U_YHtZifh"
176
+ },
177
+ "outputs": [],
178
+ "source": [
179
+ "# 💾 Save to CSV\n",
180
+ "df_books.to_csv(\"books_data.csv\", index=False)\n",
181
+ "\n",
182
+ "# 💾 Or save to Excel\n",
183
+ "# df_books.to_excel(\"books_data.xlsx\", index=False)"
184
+ ]
185
+ },
186
+ {
187
+ "cell_type": "markdown",
188
+ "metadata": {
189
+ "id": "qMjRKMBQZlJi"
190
+ },
191
+ "source": [
192
+ "### *e. ✋🏻🛑⛔️ View first fiew lines*"
193
+ ]
194
+ },
195
+ {
196
+ "cell_type": "code",
197
+ "execution_count": 6,
198
+ "metadata": {
199
+ "colab": {
200
+ "base_uri": "https://localhost:8080/",
201
+ "height": 0
202
+ },
203
+ "id": "O_wIvTxYZqCK",
204
+ "outputId": "349b36b0-c008-4fd5-d4a4-dba38ae18337"
205
+ },
206
+ "outputs": [
207
+ {
208
+ "output_type": "execute_result",
209
+ "data": {
210
+ "text/plain": [
211
+ " title price rating\n",
212
+ "0 A Light in the Attic 51.77 Three\n",
213
+ "1 Tipping the Velvet 53.74 One\n",
214
+ "2 Soumission 50.10 One\n",
215
+ "3 Sharp Objects 47.82 Four\n",
216
+ "4 Sapiens: A Brief History of Humankind 54.23 Five"
217
+ ],
218
+ "text/html": [
219
+ "\n",
220
+ " <div id=\"df-04c87660-4415-45e9-ad3b-3fa19d9402c2\" class=\"colab-df-container\">\n",
221
+ " <div>\n",
222
+ "<style scoped>\n",
223
+ " .dataframe tbody tr th:only-of-type {\n",
224
+ " vertical-align: middle;\n",
225
+ " }\n",
226
+ "\n",
227
+ " .dataframe tbody tr th {\n",
228
+ " vertical-align: top;\n",
229
+ " }\n",
230
+ "\n",
231
+ " .dataframe thead th {\n",
232
+ " text-align: right;\n",
233
+ " }\n",
234
+ "</style>\n",
235
+ "<table border=\"1\" class=\"dataframe\">\n",
236
+ " <thead>\n",
237
+ " <tr style=\"text-align: right;\">\n",
238
+ " <th></th>\n",
239
+ " <th>title</th>\n",
240
+ " <th>price</th>\n",
241
+ " <th>rating</th>\n",
242
+ " </tr>\n",
243
+ " </thead>\n",
244
+ " <tbody>\n",
245
+ " <tr>\n",
246
+ " <th>0</th>\n",
247
+ " <td>A Light in the Attic</td>\n",
248
+ " <td>51.77</td>\n",
249
+ " <td>Three</td>\n",
250
+ " </tr>\n",
251
+ " <tr>\n",
252
+ " <th>1</th>\n",
253
+ " <td>Tipping the Velvet</td>\n",
254
+ " <td>53.74</td>\n",
255
+ " <td>One</td>\n",
256
+ " </tr>\n",
257
+ " <tr>\n",
258
+ " <th>2</th>\n",
259
+ " <td>Soumission</td>\n",
260
+ " <td>50.10</td>\n",
261
+ " <td>One</td>\n",
262
+ " </tr>\n",
263
+ " <tr>\n",
264
+ " <th>3</th>\n",
265
+ " <td>Sharp Objects</td>\n",
266
+ " <td>47.82</td>\n",
267
+ " <td>Four</td>\n",
268
+ " </tr>\n",
269
+ " <tr>\n",
270
+ " <th>4</th>\n",
271
+ " <td>Sapiens: A Brief History of Humankind</td>\n",
272
+ " <td>54.23</td>\n",
273
+ " <td>Five</td>\n",
274
+ " </tr>\n",
275
+ " </tbody>\n",
276
+ "</table>\n",
277
+ "</div>\n",
278
+ " <div class=\"colab-df-buttons\">\n",
279
+ "\n",
280
+ " <div class=\"colab-df-container\">\n",
281
+ " <button class=\"colab-df-convert\" onclick=\"convertToInteractive('df-04c87660-4415-45e9-ad3b-3fa19d9402c2')\"\n",
282
+ " title=\"Convert this dataframe to an interactive table.\"\n",
283
+ " style=\"display:none;\">\n",
284
+ "\n",
285
+ " <svg xmlns=\"http://www.w3.org/2000/svg\" height=\"24px\" viewBox=\"0 -960 960 960\">\n",
286
+ " <path d=\"M120-120v-720h720v720H120Zm60-500h600v-160H180v160Zm220 220h160v-160H400v160Zm0 220h160v-160H400v160ZM180-400h160v-160H180v160Zm440 0h160v-160H620v160ZM180-180h160v-160H180v160Zm440 0h160v-160H620v160Z\"/>\n",
287
+ " </svg>\n",
288
+ " </button>\n",
289
+ "\n",
290
+ " <style>\n",
291
+ " .colab-df-container {\n",
292
+ " display:flex;\n",
293
+ " gap: 12px;\n",
294
+ " }\n",
295
+ "\n",
296
+ " .colab-df-convert {\n",
297
+ " background-color: #E8F0FE;\n",
298
+ " border: none;\n",
299
+ " border-radius: 50%;\n",
300
+ " cursor: pointer;\n",
301
+ " display: none;\n",
302
+ " fill: #1967D2;\n",
303
+ " height: 32px;\n",
304
+ " padding: 0 0 0 0;\n",
305
+ " width: 32px;\n",
306
+ " }\n",
307
+ "\n",
308
+ " .colab-df-convert:hover {\n",
309
+ " background-color: #E2EBFA;\n",
310
+ " box-shadow: 0px 1px 2px rgba(60, 64, 67, 0.3), 0px 1px 3px 1px rgba(60, 64, 67, 0.15);\n",
311
+ " fill: #174EA6;\n",
312
+ " }\n",
313
+ "\n",
314
+ " .colab-df-buttons div {\n",
315
+ " margin-bottom: 4px;\n",
316
+ " }\n",
317
+ "\n",
318
+ " [theme=dark] .colab-df-convert {\n",
319
+ " background-color: #3B4455;\n",
320
+ " fill: #D2E3FC;\n",
321
+ " }\n",
322
+ "\n",
323
+ " [theme=dark] .colab-df-convert:hover {\n",
324
+ " background-color: #434B5C;\n",
325
+ " box-shadow: 0px 1px 3px 1px rgba(0, 0, 0, 0.15);\n",
326
+ " filter: drop-shadow(0px 1px 2px rgba(0, 0, 0, 0.3));\n",
327
+ " fill: #FFFFFF;\n",
328
+ " }\n",
329
+ " </style>\n",
330
+ "\n",
331
+ " <script>\n",
332
+ " const buttonEl =\n",
333
+ " document.querySelector('#df-04c87660-4415-45e9-ad3b-3fa19d9402c2 button.colab-df-convert');\n",
334
+ " buttonEl.style.display =\n",
335
+ " google.colab.kernel.accessAllowed ? 'block' : 'none';\n",
336
+ "\n",
337
+ " async function convertToInteractive(key) {\n",
338
+ " const element = document.querySelector('#df-04c87660-4415-45e9-ad3b-3fa19d9402c2');\n",
339
+ " const dataTable =\n",
340
+ " await google.colab.kernel.invokeFunction('convertToInteractive',\n",
341
+ " [key], {});\n",
342
+ " if (!dataTable) return;\n",
343
+ "\n",
344
+ " const docLinkHtml = 'Like what you see? Visit the ' +\n",
345
+ " '<a target=\"_blank\" href=https://colab.research.google.com/notebooks/data_table.ipynb>data table notebook</a>'\n",
346
+ " + ' to learn more about interactive tables.';\n",
347
+ " element.innerHTML = '';\n",
348
+ " dataTable['output_type'] = 'display_data';\n",
349
+ " await google.colab.output.renderOutput(dataTable, element);\n",
350
+ " const docLink = document.createElement('div');\n",
351
+ " docLink.innerHTML = docLinkHtml;\n",
352
+ " element.appendChild(docLink);\n",
353
+ " }\n",
354
+ " </script>\n",
355
+ " </div>\n",
356
+ "\n",
357
+ "\n",
358
+ " </div>\n",
359
+ " </div>\n"
360
+ ],
361
+ "application/vnd.google.colaboratory.intrinsic+json": {
362
+ "type": "dataframe",
363
+ "variable_name": "df_books",
364
+ "summary": "{\n \"name\": \"df_books\",\n \"rows\": 1000,\n \"fields\": [\n {\n \"column\": \"title\",\n \"properties\": {\n \"dtype\": \"string\",\n \"num_unique_values\": 999,\n \"samples\": [\n \"The Grownup\",\n \"Persepolis: The Story of a Childhood (Persepolis #1-2)\",\n \"Ayumi's Violin\"\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n },\n {\n \"column\": \"price\",\n \"properties\": {\n \"dtype\": \"number\",\n \"std\": 14.446689669952772,\n \"min\": 10.0,\n \"max\": 59.99,\n \"num_unique_values\": 903,\n \"samples\": [\n 19.73,\n 55.65,\n 46.31\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n },\n {\n \"column\": \"rating\",\n \"properties\": {\n \"dtype\": \"category\",\n \"num_unique_values\": 5,\n \"samples\": [\n \"One\",\n \"Two\",\n \"Four\"\n ],\n \"semantic_type\": \"\",\n \"description\": \"\"\n }\n }\n ]\n}"
365
+ }
366
+ },
367
+ "metadata": {},
368
+ "execution_count": 6
369
+ }
370
+ ],
371
+ "source": [
372
+ "df_books.head()"
373
+ ]
374
+ },
375
+ {
376
+ "cell_type": "markdown",
377
+ "metadata": {
378
+ "id": "p-1Pr2szaqLk"
379
+ },
380
+ "source": [
381
+ "## **3.** 🧩 Create a meaningful connection between real & synthetic datasets"
382
+ ]
383
+ },
384
+ {
385
+ "cell_type": "markdown",
386
+ "metadata": {
387
+ "id": "SIaJUGIpaH4V"
388
+ },
389
+ "source": [
390
+ "### *a. Initial setup*"
391
+ ]
392
+ },
393
+ {
394
+ "cell_type": "code",
395
+ "execution_count": 7,
396
+ "metadata": {
397
+ "id": "-gPXGcRPuV_9"
398
+ },
399
+ "outputs": [],
400
+ "source": [
401
+ "import numpy as np\n",
402
+ "import random\n",
403
+ "from datetime import datetime\n",
404
+ "import warnings\n",
405
+ "\n",
406
+ "warnings.filterwarnings(\"ignore\")\n",
407
+ "random.seed(2025)\n",
408
+ "np.random.seed(2025)"
409
+ ]
410
+ },
411
+ {
412
+ "cell_type": "markdown",
413
+ "metadata": {
414
+ "id": "pY4yCoIuaQqp"
415
+ },
416
+ "source": [
417
+ "### *b. Generate popularity scores based on rating (with some randomness) with a generate_popularity_score function*"
418
+ ]
419
+ },
420
+ {
421
+ "cell_type": "code",
422
+ "execution_count": 8,
423
+ "metadata": {
424
+ "id": "mnd5hdAbaNjz"
425
+ },
426
+ "outputs": [],
427
+ "source": [
428
+ "def generate_popularity_score(rating):\n",
429
+ " base = {\"One\": 2, \"Two\": 3, \"Three\": 3, \"Four\": 4, \"Five\": 4}.get(rating, 3)\n",
430
+ " trend_factor = random.choices([-1, 0, 1], weights=[1, 3, 2])[0]\n",
431
+ " return int(np.clip(base + trend_factor, 1, 5))"
432
+ ]
433
+ },
434
+ {
435
+ "cell_type": "markdown",
436
+ "metadata": {
437
+ "id": "n4-TaNTFgPak"
438
+ },
439
+ "source": [
440
+ "### *c. ✋🏻🛑⛔️ Run the function to create a \"popularity_score\" column from \"rating\"*"
441
+ ]
442
+ },
443
+ {
444
+ "cell_type": "code",
445
+ "execution_count": 9,
446
+ "metadata": {
447
+ "id": "V-G3OCUCgR07"
448
+ },
449
+ "outputs": [],
450
+ "source": [
451
+ "df_books[\"popularity_score\"] = df_books[\"rating\"].apply(generate_popularity_score)"
452
+ ]
453
+ },
454
+ {
455
+ "cell_type": "markdown",
456
+ "metadata": {
457
+ "id": "HnngRNTgacYt"
458
+ },
459
+ "source": [
460
+ "### *d. Decide on the sentiment_label based on the popularity score with a get_sentiment function*"
461
+ ]
462
+ },
463
+ {
464
+ "cell_type": "code",
465
+ "execution_count": 10,
466
+ "metadata": {
467
+ "id": "kUtWmr8maZLZ"
468
+ },
469
+ "outputs": [],
470
+ "source": [
471
+ "def get_sentiment(popularity_score):\n",
472
+ " if popularity_score <= 2:\n",
473
+ " return \"negative\"\n",
474
+ " elif popularity_score == 3:\n",
475
+ " return \"neutral\"\n",
476
+ " else:\n",
477
+ " return \"positive\""
478
+ ]
479
+ },
480
+ {
481
+ "cell_type": "markdown",
482
+ "metadata": {
483
+ "id": "HF9F9HIzgT7Z"
484
+ },
485
+ "source": [
486
+ "### *e. ✋🏻🛑⛔️ Run the function to create a \"sentiment_label\" column from \"popularity_score\"*"
487
+ ]
488
+ },
489
+ {
490
+ "cell_type": "code",
491
+ "execution_count": 11,
492
+ "metadata": {
493
+ "id": "tafQj8_7gYCG"
494
+ },
495
+ "outputs": [],
496
+ "source": [
497
+ "df_books[\"sentiment_label\"] = df_books[\"popularity_score\"].apply(get_sentiment)"
498
+ ]
499
+ },
500
+ {
501
+ "cell_type": "markdown",
502
+ "metadata": {
503
+ "id": "T8AdKkmASq9a"
504
+ },
505
+ "source": [
506
+ "## **4.** 📈 Generate synthetic book sales data of 18 months"
507
+ ]
508
+ },
509
+ {
510
+ "cell_type": "markdown",
511
+ "metadata": {
512
+ "id": "OhXbdGD5fH0c"
513
+ },
514
+ "source": [
515
+ "### *a. Create a generate_sales_profit function that would generate sales patterns based on sentiment_label (with some randomness)*"
516
+ ]
517
+ },
518
+ {
519
+ "cell_type": "code",
520
+ "execution_count": 12,
521
+ "metadata": {
522
+ "id": "qkVhYPXGbgEn"
523
+ },
524
+ "outputs": [],
525
+ "source": [
526
+ "def generate_sales_profile(sentiment):\n",
527
+ " months = pd.date_range(end=datetime.today(), periods=18, freq=\"M\")\n",
528
+ "\n",
529
+ " if sentiment == \"positive\":\n",
530
+ " base = random.randint(200, 300)\n",
531
+ " trend = np.linspace(base, base + random.randint(20, 60), len(months))\n",
532
+ " elif sentiment == \"negative\":\n",
533
+ " base = random.randint(20, 80)\n",
534
+ " trend = np.linspace(base, base - random.randint(10, 30), len(months))\n",
535
+ " else: # neutral\n",
536
+ " base = random.randint(80, 160)\n",
537
+ " trend = np.full(len(months), base + random.randint(-10, 10))\n",
538
+ "\n",
539
+ " seasonality = 10 * np.sin(np.linspace(0, 3 * np.pi, len(months)))\n",
540
+ " noise = np.random.normal(0, 5, len(months))\n",
541
+ " monthly_sales = np.clip(trend + seasonality + noise, a_min=0, a_max=None).astype(int)\n",
542
+ "\n",
543
+ " return list(zip(months.strftime(\"%Y-%m\"), monthly_sales))"
544
+ ]
545
+ },
546
+ {
547
+ "cell_type": "markdown",
548
+ "metadata": {
549
+ "id": "L2ak1HlcgoTe"
550
+ },
551
+ "source": [
552
+ "### *b. Run the function as part of building sales_data*"
553
+ ]
554
+ },
555
+ {
556
+ "cell_type": "code",
557
+ "execution_count": 13,
558
+ "metadata": {
559
+ "id": "SlJ24AUafoDB"
560
+ },
561
+ "outputs": [],
562
+ "source": [
563
+ "sales_data = []\n",
564
+ "for _, row in df_books.iterrows():\n",
565
+ " records = generate_sales_profile(row[\"sentiment_label\"])\n",
566
+ " for month, units in records:\n",
567
+ " sales_data.append({\n",
568
+ " \"title\": row[\"title\"],\n",
569
+ " \"month\": month,\n",
570
+ " \"units_sold\": units,\n",
571
+ " \"sentiment_label\": row[\"sentiment_label\"]\n",
572
+ " })"
573
+ ]
574
+ },
575
+ {
576
+ "cell_type": "markdown",
577
+ "metadata": {
578
+ "id": "4IXZKcCSgxnq"
579
+ },
580
+ "source": [
581
+ "### *c. ✋🏻🛑⛔️ Create a df_sales DataFrame from sales_data*"
582
+ ]
583
+ },
584
+ {
585
+ "cell_type": "code",
586
+ "execution_count": 14,
587
+ "metadata": {
588
+ "id": "wcN6gtiZg-ws"
589
+ },
590
+ "outputs": [],
591
+ "source": [
592
+ "df_sales = pd.DataFrame(sales_data)"
593
+ ]
594
+ },
595
+ {
596
+ "cell_type": "markdown",
597
+ "metadata": {
598
+ "id": "EhIjz9WohAmZ"
599
+ },
600
+ "source": [
601
+ "### *d. Save df_sales as synthetic_sales_data.csv & view first few lines*"
602
+ ]
603
+ },
604
+ {
605
+ "cell_type": "code",
606
+ "execution_count": 15,
607
+ "metadata": {
608
+ "colab": {
609
+ "base_uri": "https://localhost:8080/"
610
+ },
611
+ "id": "MzbZvLcAhGaH",
612
+ "outputId": "c692bb04-7263-4115-a2ba-c72fe0180722"
613
+ },
614
+ "outputs": [
615
+ {
616
+ "output_type": "stream",
617
+ "name": "stdout",
618
+ "text": [
619
+ " title month units_sold sentiment_label\n",
620
+ "0 A Light in the Attic 2024-08 100 neutral\n",
621
+ "1 A Light in the Attic 2024-09 109 neutral\n",
622
+ "2 A Light in the Attic 2024-10 102 neutral\n",
623
+ "3 A Light in the Attic 2024-11 107 neutral\n",
624
+ "4 A Light in the Attic 2024-12 108 neutral\n"
625
+ ]
626
+ }
627
+ ],
628
+ "source": [
629
+ "df_sales.to_csv(\"synthetic_sales_data.csv\", index=False)\n",
630
+ "\n",
631
+ "print(df_sales.head())"
632
+ ]
633
+ },
634
+ {
635
+ "cell_type": "markdown",
636
+ "metadata": {
637
+ "id": "7g9gqBgQMtJn"
638
+ },
639
+ "source": [
640
+ "## **5.** 🎯 Generate synthetic customer reviews"
641
+ ]
642
+ },
643
+ {
644
+ "cell_type": "markdown",
645
+ "metadata": {
646
+ "id": "Gi4y9M9KuDWx"
647
+ },
648
+ "source": [
649
+ "### *a. ✋🏻🛑⛔️ Ask ChatGPT to create a list of 50 distinct generic book review texts for the sentiment labels \"positive\", \"neutral\", and \"negative\" called synthetic_reviews_by_sentiment*"
650
+ ]
651
+ },
652
+ {
653
+ "cell_type": "code",
654
+ "execution_count": 16,
655
+ "metadata": {
656
+ "id": "b3cd2a50"
657
+ },
658
+ "outputs": [],
659
+ "source": [
660
+ "synthetic_reviews_by_sentiment = {\n",
661
+ " \"positive\": [\n",
662
+ " \"A compelling and heartwarming read that stayed with me long after I finished.\",\n",
663
+ " \"Brilliantly written! The characters were unforgettable and the plot was engaging.\",\n",
664
+ " \"One of the best books I've read this year — inspiring and emotionally rich.\",\n",
665
+ " \"The author's storytelling was vivid and powerful. Highly recommended!\",\n",
666
+ " \"An absolute masterpiece. I couldn't put it down from start to finish.\",\n",
667
+ " \"Gripping, intelligent, and beautifully crafted — I loved every page.\",\n",
668
+ " \"The emotional depth and layered narrative were just perfect.\",\n",
669
+ " \"A thought-provoking journey with stunning character development.\",\n",
670
+ " \"Everything about this book just clicked. A top-tier read!\",\n",
671
+ " \"A flawless blend of emotion, intrigue, and style. Truly impressive.\",\n",
672
+ " \"Absolutely stunning work of fiction. Five stars from me.\",\n",
673
+ " \"Remarkably executed with breathtaking prose.\",\n",
674
+ " \"The pacing was perfect and I was hooked from page one.\",\n",
675
+ " \"Heartfelt and hopeful — a story well worth telling.\",\n",
676
+ " \"A vivid journey through complex emotions and stunning imagery.\",\n",
677
+ " \"This book had soul. Every word felt like it mattered.\",\n",
678
+ " \"It delivered more than I ever expected. Powerful and wise.\",\n",
679
+ " \"The characters leapt off the page and into my heart.\",\n",
680
+ " \"I could see every scene clearly in my mind — beautifully descriptive.\",\n",
681
+ " \"Refreshing, original, and impossible to forget.\",\n",
682
+ " \"A radiant celebration of resilience and love.\",\n",
683
+ " \"Powerful themes handled with grace and insight.\",\n",
684
+ " \"An unforgettable literary experience.\",\n",
685
+ " \"The best book club pick we’ve had all year.\",\n",
686
+ " \"A layered, lyrical narrative that resonates deeply.\",\n",
687
+ " \"Surprising, profound, and deeply humane.\",\n",
688
+ " \"One of those rare books I wish I could read again for the first time.\",\n",
689
+ " \"Both epic and intimate — a perfect balance.\",\n",
690
+ " \"It reads like a love letter to the human spirit.\",\n",
691
+ " \"Satisfying and uplifting with a memorable ending.\",\n",
692
+ " \"This novel deserves every bit of praise it gets.\",\n",
693
+ " \"Introspective, emotional, and elegantly composed.\",\n",
694
+ " \"A tour de force in contemporary fiction.\",\n",
695
+ " \"Left me smiling, teary-eyed, and completely fulfilled.\",\n",
696
+ " \"A novel with the rare ability to entertain and enlighten.\",\n",
697
+ " \"Incredibly moving. I highlighted so many lines.\",\n",
698
+ " \"A smart, sensitive take on relationships and identity.\",\n",
699
+ " \"You feel wiser by the end of it.\",\n",
700
+ " \"A gorgeously crafted tale about hope and second chances.\",\n",
701
+ " \"Poignant and real — a beautiful escape.\",\n",
702
+ " \"Brims with insight and authenticity.\",\n",
703
+ " \"Compelling characters and a satisfying plot.\",\n",
704
+ " \"An empowering and important read.\",\n",
705
+ " \"Elegantly crafted and deeply humane.\",\n",
706
+ " \"Taut storytelling that never lets go.\",\n",
707
+ " \"Each chapter offered a new treasure.\",\n",
708
+ " \"Lyrical writing that stays with you.\",\n",
709
+ " \"A wonderful blend of passion and thoughtfulness.\",\n",
710
+ " \"Uplifting, honest, and completely engrossing.\",\n",
711
+ " \"This one made me believe in storytelling again.\"\n",
712
+ " ],\n",
713
+ " \"neutral\": [\n",
714
+ " \"An average book — not great, but not bad either.\",\n",
715
+ " \"Some parts really stood out, others felt a bit flat.\",\n",
716
+ " \"It was okay overall. A decent way to pass the time.\",\n",
717
+ " \"The writing was fine, though I didn’t fully connect with the story.\",\n",
718
+ " \"Had a few memorable moments but lacked depth in some areas.\",\n",
719
+ " \"A mixed experience — neither fully engaging nor forgettable.\",\n",
720
+ " \"There was potential, but it didn't quite come together for me.\",\n",
721
+ " \"A reasonable effort that just didn’t leave a lasting impression.\",\n",
722
+ " \"Serviceable but not something I'd go out of my way to recommend.\",\n",
723
+ " \"Not much to dislike, but not much to rave about either.\",\n",
724
+ " \"It had its strengths, though they didn’t shine consistently.\",\n",
725
+ " \"I’m on the fence — parts were enjoyable, others not so much.\",\n",
726
+ " \"The book had a unique concept but lacked execution.\",\n",
727
+ " \"A middle-of-the-road read.\",\n",
728
+ " \"Engaging at times, but it lost momentum.\",\n",
729
+ " \"Would have benefited from stronger character development.\",\n",
730
+ " \"It passed the time, but I wouldn't reread it.\",\n",
731
+ " \"The plot had some holes that affected immersion.\",\n",
732
+ " \"Mediocre pacing made it hard to stay invested.\",\n",
733
+ " \"Satisfying in parts, underwhelming in others.\",\n",
734
+ " \"Neutral on this one — didn’t love it or hate it.\",\n",
735
+ " \"Fairly forgettable but with glimpses of promise.\",\n",
736
+ " \"The themes were solid, but not well explored.\",\n",
737
+ " \"Competent, just not compelling.\",\n",
738
+ " \"Had moments of clarity and moments of confusion.\",\n",
739
+ " \"I didn’t regret reading it, but I wouldn’t recommend it.\",\n",
740
+ " \"Readable, yet uninspired.\",\n",
741
+ " \"There was a spark, but it didn’t ignite.\",\n",
742
+ " \"A slow burn that didn’t quite catch fire.\",\n",
743
+ " \"I expected more nuance given the premise.\",\n",
744
+ " \"A safe, inoffensive choice.\",\n",
745
+ " \"Some parts lagged, others piqued my interest.\",\n",
746
+ " \"Decent, but needed polish.\",\n",
747
+ " \"Moderately engaging but didn’t stick the landing.\",\n",
748
+ " \"It simply lacked that emotional punch.\",\n",
749
+ " \"Just fine — no better, no worse.\",\n",
750
+ " \"Some thoughtful passages amid otherwise dry writing.\",\n",
751
+ " \"I appreciated the ideas more than the execution.\",\n",
752
+ " \"Struggled with cohesion.\",\n",
753
+ " \"Solidly average.\",\n",
754
+ " \"Good on paper, flat in practice.\",\n",
755
+ " \"A few bright spots, but mostly dim.\",\n",
756
+ " \"The kind of book that fades from memory.\",\n",
757
+ " \"It scratched the surface but didn’t dig deep.\",\n",
758
+ " \"Standard fare with some promise.\",\n",
759
+ " \"Okay, but not memorable.\",\n",
760
+ " \"Had potential that went unrealized.\",\n",
761
+ " \"Could have been tighter, sharper, deeper.\",\n",
762
+ " \"A blend of mediocrity and mild interest.\",\n",
763
+ " \"I kept reading, but barely.\"\n",
764
+ " ],\n",
765
+ " \"negative\": [\n",
766
+ " \"I struggled to get through this one — it just didn’t grab me.\",\n",
767
+ " \"The plot was confusing and the characters felt underdeveloped.\",\n",
768
+ " \"Disappointing. I had high hopes, but they weren't met.\",\n",
769
+ " \"Uninspired writing and a story that never quite took off.\",\n",
770
+ " \"Unfortunately, it was dull and predictable throughout.\",\n",
771
+ " \"The pacing dragged and I couldn’t find anything compelling.\",\n",
772
+ " \"This felt like a chore to read — lacked heart and originality.\",\n",
773
+ " \"Nothing really worked for me in this book.\",\n",
774
+ " \"A frustrating read that left me unsatisfied.\",\n",
775
+ " \"I kept hoping it would improve, but it never did.\",\n",
776
+ " \"The characters didn’t feel real, and the dialogue was forced.\",\n",
777
+ " \"I couldn't connect with the story at all.\",\n",
778
+ " \"A slow, meandering narrative with little payoff.\",\n",
779
+ " \"Tried too hard to be deep, but just felt empty.\",\n",
780
+ " \"The tone was uneven and confusing.\",\n",
781
+ " \"Way too repetitive and lacking progression.\",\n",
782
+ " \"The ending was abrupt and unsatisfying.\",\n",
783
+ " \"No emotional resonance — I felt nothing throughout.\",\n",
784
+ " \"I expected much more, but this fell flat.\",\n",
785
+ " \"Poorly edited and full of clichés.\",\n",
786
+ " \"The premise was interesting, but poorly executed.\",\n",
787
+ " \"Just didn’t live up to the praise.\",\n",
788
+ " \"A disjointed mess from start to finish.\",\n",
789
+ " \"Overly long and painfully dull.\",\n",
790
+ " \"Dialogue that felt robotic and unrealistic.\",\n",
791
+ " \"A hollow shell of what it could’ve been.\",\n",
792
+ " \"It lacked a coherent structure.\",\n",
793
+ " \"More confusing than complex.\",\n",
794
+ " \"Reading it felt like a task, not a treat.\",\n",
795
+ " \"There was no tension, no emotion — just words.\",\n",
796
+ " \"Characters with no motivation or development.\",\n",
797
+ " \"The plot twists were nonsensical.\",\n",
798
+ " \"Regret buying this book.\",\n",
799
+ " \"Nothing drew me in, nothing made me stay.\",\n",
800
+ " \"Too many subplots and none were satisfying.\",\n",
801
+ " \"Tedious and unimaginative.\",\n",
802
+ " \"Like reading a rough draft.\",\n",
803
+ " \"Disjointed, distant, and disappointing.\",\n",
804
+ " \"A lot of buildup with no payoff.\",\n",
805
+ " \"I don’t understand the hype.\",\n",
806
+ " \"This book simply didn’t work.\",\n",
807
+ " \"Forgettable in every sense.\",\n",
808
+ " \"More effort should’ve gone into editing.\",\n",
809
+ " \"The story lost its way early on.\",\n",
810
+ " \"It dragged endlessly.\",\n",
811
+ " \"I kept checking how many pages were left.\",\n",
812
+ " \"This lacked vision and clarity.\",\n",
813
+ " \"I expected substance — got fluff.\",\n",
814
+ " \"It failed to make me care.\"\n",
815
+ " ]\n",
816
+ "}"
817
+ ]
818
+ },
819
+ {
820
+ "cell_type": "markdown",
821
+ "metadata": {
822
+ "id": "fQhfVaDmuULT"
823
+ },
824
+ "source": [
825
+ "### *b. Generate 10 reviews per book using random sampling from the corresponding 50*"
826
+ ]
827
+ },
828
+ {
829
+ "cell_type": "code",
830
+ "execution_count": 17,
831
+ "metadata": {
832
+ "id": "l2SRc3PjuTGM"
833
+ },
834
+ "outputs": [],
835
+ "source": [
836
+ "review_rows = []\n",
837
+ "for _, row in df_books.iterrows():\n",
838
+ " title = row['title']\n",
839
+ " sentiment_label = row['sentiment_label']\n",
840
+ " review_pool = synthetic_reviews_by_sentiment[sentiment_label]\n",
841
+ " sampled_reviews = random.sample(review_pool, 10)\n",
842
+ " for review_text in sampled_reviews:\n",
843
+ " review_rows.append({\n",
844
+ " \"title\": title,\n",
845
+ " \"sentiment_label\": sentiment_label,\n",
846
+ " \"review_text\": review_text,\n",
847
+ " \"rating\": row['rating'],\n",
848
+ " \"popularity_score\": row['popularity_score']\n",
849
+ " })"
850
+ ]
851
+ },
852
+ {
853
+ "cell_type": "markdown",
854
+ "metadata": {
855
+ "id": "bmJMXF-Bukdm"
856
+ },
857
+ "source": [
858
+ "### *c. Create the final dataframe df_reviews & save it as synthetic_book_reviews.csv*"
859
+ ]
860
+ },
861
+ {
862
+ "cell_type": "code",
863
+ "execution_count": 18,
864
+ "metadata": {
865
+ "id": "ZUKUqZsuumsp"
866
+ },
867
+ "outputs": [],
868
+ "source": [
869
+ "df_reviews = pd.DataFrame(review_rows)\n",
870
+ "df_reviews.to_csv(\"synthetic_book_reviews.csv\", index=False)"
871
+ ]
872
+ },
873
+ {
874
+ "cell_type": "code",
875
+ "execution_count": 19,
876
+ "metadata": {
877
+ "colab": {
878
+ "base_uri": "https://localhost:8080/"
879
+ },
880
+ "id": "3946e521",
881
+ "outputId": "514d7bef-0488-4933-b03c-953b9e8a7f66"
882
+ },
883
+ "outputs": [
884
+ {
885
+ "output_type": "stream",
886
+ "name": "stdout",
887
+ "text": [
888
+ "✅ Wrote synthetic_title_level_features.csv\n",
889
+ "✅ Wrote synthetic_monthly_revenue_series.csv\n"
890
+ ]
891
+ }
892
+ ],
893
+ "source": [
894
+ "\n",
895
+ "# ============================================================\n",
896
+ "# ✅ Create \"R-ready\" derived inputs (root-level files)\n",
897
+ "# ============================================================\n",
898
+ "# These two files make the R notebook robust and fast:\n",
899
+ "# 1) synthetic_title_level_features.csv -> regression-ready, one row per title\n",
900
+ "# 2) synthetic_monthly_revenue_series.csv -> forecasting-ready, one row per month\n",
901
+ "\n",
902
+ "import numpy as np\n",
903
+ "\n",
904
+ "def _safe_num(s):\n",
905
+ " return pd.to_numeric(\n",
906
+ " pd.Series(s).astype(str).str.replace(r\"[^0-9.]\", \"\", regex=True),\n",
907
+ " errors=\"coerce\"\n",
908
+ " )\n",
909
+ "\n",
910
+ "# --- Clean book metadata (price/rating) ---\n",
911
+ "df_books_r = df_books.copy()\n",
912
+ "if \"price\" in df_books_r.columns:\n",
913
+ " df_books_r[\"price\"] = _safe_num(df_books_r[\"price\"])\n",
914
+ "if \"rating\" in df_books_r.columns:\n",
915
+ " df_books_r[\"rating\"] = _safe_num(df_books_r[\"rating\"])\n",
916
+ "\n",
917
+ "df_books_r[\"title\"] = df_books_r[\"title\"].astype(str).str.strip()\n",
918
+ "\n",
919
+ "# --- Clean sales ---\n",
920
+ "df_sales_r = df_sales.copy()\n",
921
+ "df_sales_r[\"title\"] = df_sales_r[\"title\"].astype(str).str.strip()\n",
922
+ "df_sales_r[\"month\"] = pd.to_datetime(df_sales_r[\"month\"], errors=\"coerce\")\n",
923
+ "df_sales_r[\"units_sold\"] = _safe_num(df_sales_r[\"units_sold\"])\n",
924
+ "\n",
925
+ "# --- Clean reviews ---\n",
926
+ "df_reviews_r = df_reviews.copy()\n",
927
+ "df_reviews_r[\"title\"] = df_reviews_r[\"title\"].astype(str).str.strip()\n",
928
+ "df_reviews_r[\"sentiment_label\"] = df_reviews_r[\"sentiment_label\"].astype(str).str.lower().str.strip()\n",
929
+ "if \"rating\" in df_reviews_r.columns:\n",
930
+ " df_reviews_r[\"rating\"] = _safe_num(df_reviews_r[\"rating\"])\n",
931
+ "if \"popularity_score\" in df_reviews_r.columns:\n",
932
+ " df_reviews_r[\"popularity_score\"] = _safe_num(df_reviews_r[\"popularity_score\"])\n",
933
+ "\n",
934
+ "# --- Sentiment shares per title (from reviews) ---\n",
935
+ "sent_counts = (\n",
936
+ " df_reviews_r.groupby([\"title\", \"sentiment_label\"])\n",
937
+ " .size()\n",
938
+ " .unstack(fill_value=0)\n",
939
+ ")\n",
940
+ "for lab in [\"positive\", \"neutral\", \"negative\"]:\n",
941
+ " if lab not in sent_counts.columns:\n",
942
+ " sent_counts[lab] = 0\n",
943
+ "\n",
944
+ "sent_counts[\"total_reviews\"] = sent_counts[[\"positive\", \"neutral\", \"negative\"]].sum(axis=1)\n",
945
+ "den = sent_counts[\"total_reviews\"].replace(0, np.nan)\n",
946
+ "sent_counts[\"share_positive\"] = sent_counts[\"positive\"] / den\n",
947
+ "sent_counts[\"share_neutral\"] = sent_counts[\"neutral\"] / den\n",
948
+ "sent_counts[\"share_negative\"] = sent_counts[\"negative\"] / den\n",
949
+ "sent_counts = sent_counts.reset_index()\n",
950
+ "\n",
951
+ "# --- Sales aggregation per title ---\n",
952
+ "sales_by_title = (\n",
953
+ " df_sales_r.dropna(subset=[\"title\"])\n",
954
+ " .groupby(\"title\", as_index=False)\n",
955
+ " .agg(\n",
956
+ " months_observed=(\"month\", \"nunique\"),\n",
957
+ " avg_units_sold=(\"units_sold\", \"mean\"),\n",
958
+ " total_units_sold=(\"units_sold\", \"sum\"),\n",
959
+ " )\n",
960
+ ")\n",
961
+ "\n",
962
+ "# --- Title-level features (join sales + books + sentiment) ---\n",
963
+ "df_title = (\n",
964
+ " sales_by_title\n",
965
+ " .merge(df_books_r[[\"title\", \"price\", \"rating\"]], on=\"title\", how=\"left\")\n",
966
+ " .merge(sent_counts[[\"title\", \"share_positive\", \"share_neutral\", \"share_negative\", \"total_reviews\"]],\n",
967
+ " on=\"title\", how=\"left\")\n",
968
+ ")\n",
969
+ "\n",
970
+ "df_title[\"avg_revenue\"] = df_title[\"avg_units_sold\"] * df_title[\"price\"]\n",
971
+ "df_title[\"total_revenue\"] = df_title[\"total_units_sold\"] * df_title[\"price\"]\n",
972
+ "\n",
973
+ "df_title.to_csv(\"synthetic_title_level_features.csv\", index=False)\n",
974
+ "print(\"✅ Wrote synthetic_title_level_features.csv\")\n",
975
+ "\n",
976
+ "# --- Monthly revenue series (proxy: units_sold * price) ---\n",
977
+ "monthly_rev = (\n",
978
+ " df_sales_r.merge(df_books_r[[\"title\", \"price\"]], on=\"title\", how=\"left\")\n",
979
+ ")\n",
980
+ "monthly_rev[\"revenue\"] = monthly_rev[\"units_sold\"] * monthly_rev[\"price\"]\n",
981
+ "\n",
982
+ "df_monthly = (\n",
983
+ " monthly_rev.dropna(subset=[\"month\"])\n",
984
+ " .groupby(\"month\", as_index=False)[\"revenue\"]\n",
985
+ " .sum()\n",
986
+ " .rename(columns={\"revenue\": \"total_revenue\"})\n",
987
+ " .sort_values(\"month\")\n",
988
+ ")\n",
989
+ "# if revenue is all NA (e.g., missing price), fallback to units_sold as a teaching proxy\n",
990
+ "if df_monthly[\"total_revenue\"].notna().sum() == 0:\n",
991
+ " df_monthly = (\n",
992
+ " df_sales_r.dropna(subset=[\"month\"])\n",
993
+ " .groupby(\"month\", as_index=False)[\"units_sold\"]\n",
994
+ " .sum()\n",
995
+ " .rename(columns={\"units_sold\": \"total_revenue\"})\n",
996
+ " .sort_values(\"month\")\n",
997
+ " )\n",
998
+ "\n",
999
+ "df_monthly[\"month\"] = pd.to_datetime(df_monthly[\"month\"], errors=\"coerce\").dt.strftime(\"%Y-%m-%d\")\n",
1000
+ "df_monthly.to_csv(\"synthetic_monthly_revenue_series.csv\", index=False)\n",
1001
+ "print(\"✅ Wrote synthetic_monthly_revenue_series.csv\")\n"
1002
+ ]
1003
+ },
1004
+ {
1005
+ "cell_type": "markdown",
1006
+ "metadata": {
1007
+ "id": "RYvGyVfXuo54"
1008
+ },
1009
+ "source": [
1010
+ "### *d. ✋🏻🛑⛔️ View the first few lines*"
1011
+ ]
1012
+ },
1013
+ {
1014
+ "cell_type": "code",
1015
+ "execution_count": 20,
1016
+ "metadata": {
1017
+ "colab": {
1018
+ "base_uri": "https://localhost:8080/"
1019
+ },
1020
+ "id": "xfE8NMqOurKo",
1021
+ "outputId": "191730ba-d5e2-4df7-97d2-99feb0b704af"
1022
+ },
1023
+ "outputs": [
1024
+ {
1025
+ "output_type": "stream",
1026
+ "name": "stdout",
1027
+ "text": [
1028
+ " title sentiment_label \\\n",
1029
+ "0 A Light in the Attic neutral \n",
1030
+ "1 A Light in the Attic neutral \n",
1031
+ "2 A Light in the Attic neutral \n",
1032
+ "3 A Light in the Attic neutral \n",
1033
+ "4 A Light in the Attic neutral \n",
1034
+ "\n",
1035
+ " review_text rating popularity_score \n",
1036
+ "0 Had potential that went unrealized. Three 3 \n",
1037
+ "1 The themes were solid, but not well explored. Three 3 \n",
1038
+ "2 It simply lacked that emotional punch. Three 3 \n",
1039
+ "3 Serviceable but not something I'd go out of my... Three 3 \n",
1040
+ "4 Standard fare with some promise. Three 3 \n"
1041
+ ]
1042
+ }
1043
+ ],
1044
+ "source": [
1045
+ "print(df_reviews.head())"
1046
+ ]
1047
+ }
1048
+ ],
1049
+ "metadata": {
1050
+ "colab": {
1051
+ "collapsed_sections": [
1052
+ "jpASMyIQMaAq",
1053
+ "lquNYCbfL9IM",
1054
+ "0IWuNpxxYDJF",
1055
+ "oCdTsin2Yfp3",
1056
+ "T0TOeRC4Yrnn",
1057
+ "duI5dv3CZYvF",
1058
+ "qMjRKMBQZlJi",
1059
+ "p-1Pr2szaqLk",
1060
+ "SIaJUGIpaH4V",
1061
+ "pY4yCoIuaQqp",
1062
+ "n4-TaNTFgPak",
1063
+ "HnngRNTgacYt",
1064
+ "HF9F9HIzgT7Z",
1065
+ "T8AdKkmASq9a",
1066
+ "OhXbdGD5fH0c",
1067
+ "L2ak1HlcgoTe",
1068
+ "4IXZKcCSgxnq",
1069
+ "EhIjz9WohAmZ",
1070
+ "Gi4y9M9KuDWx",
1071
+ "fQhfVaDmuULT",
1072
+ "bmJMXF-Bukdm",
1073
+ "RYvGyVfXuo54"
1074
+ ],
1075
+ "provenance": []
1076
+ },
1077
+ "kernelspec": {
1078
+ "display_name": "Python 3",
1079
+ "name": "python3"
1080
+ },
1081
+ "language_info": {
1082
+ "name": "python"
1083
+ }
1084
+ },
1085
+ "nbformat": 4,
1086
+ "nbformat_minor": 0
1087
+ }
gitattributes ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ ESCP_BANNER[[:space:]](2).png filter=lfs diff=lfs merge=lfs -text
37
+ background.png filter=lfs diff=lfs merge=lfs -text
38
+ background_top.png filter=lfs diff=lfs merge=lfs -text
pythonanalysis.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
ranalysis.ipynb ADDED
@@ -0,0 +1,463 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "markdown",
5
+ "id": "75fd9cc6",
6
+ "metadata": {
7
+ "id": "75fd9cc6"
8
+ },
9
+ "source": [
10
+ "# **🤖 Benchmarking & Modeling**"
11
+ ]
12
+ },
13
+ {
14
+ "cell_type": "markdown",
15
+ "id": "fb807724",
16
+ "metadata": {
17
+ "id": "fb807724"
18
+ },
19
+ "source": [
20
+ "## **1.** 📦 Setup"
21
+ ]
22
+ },
23
+ {
24
+ "cell_type": "code",
25
+ "execution_count": null,
26
+ "id": "d40cd131",
27
+ "metadata": {
28
+ "id": "d40cd131"
29
+ },
30
+ "outputs": [],
31
+ "source": [
32
+ "\n",
33
+ "# Uncomment the next line once:\n",
34
+ "install.packages(c(\"readr\",\"dplyr\",\"stringr\",\"tidyr\",\"lubridate\",\"ggplot2\",\"forecast\",\"broom\",\"jsonlite\"), repos=\"https://cloud.r-project.org\")\n",
35
+ "\n",
36
+ "suppressPackageStartupMessages({\n",
37
+ " library(readr)\n",
38
+ " library(dplyr)\n",
39
+ " library(stringr)\n",
40
+ " library(tidyr)\n",
41
+ " library(lubridate)\n",
42
+ " library(ggplot2)\n",
43
+ " library(forecast)\n",
44
+ " library(broom)\n",
45
+ " library(jsonlite)\n",
46
+ "})"
47
+ ]
48
+ },
49
+ {
50
+ "cell_type": "markdown",
51
+ "id": "f01d02e7",
52
+ "metadata": {
53
+ "id": "f01d02e7"
54
+ },
55
+ "source": [
56
+ "## **2.** ✅️ Load & inspect inputs"
57
+ ]
58
+ },
59
+ {
60
+ "cell_type": "code",
61
+ "execution_count": null,
62
+ "id": "29e8f6ce",
63
+ "metadata": {
64
+ "colab": {
65
+ "base_uri": "https://localhost:8080/"
66
+ },
67
+ "id": "29e8f6ce",
68
+ "outputId": "5a1bda1c-c58d-43d0-c85e-db5041c8bc49"
69
+ },
70
+ "outputs": [
71
+ {
72
+ "output_type": "stream",
73
+ "name": "stdout",
74
+ "text": [
75
+ "Loaded: 1000 rows (title-level), 18 rows (monthly)\n"
76
+ ]
77
+ }
78
+ ],
79
+ "source": [
80
+ "\n",
81
+ "must_exist <- function(path, label) {\n",
82
+ " if (!file.exists(path)) stop(paste0(\"Missing \", label, \": \", path))\n",
83
+ "}\n",
84
+ "\n",
85
+ "TITLE_PATH <- \"synthetic_title_level_features.csv\"\n",
86
+ "MONTH_PATH <- \"synthetic_monthly_revenue_series.csv\"\n",
87
+ "\n",
88
+ "must_exist(TITLE_PATH, \"TITLE_PATH\")\n",
89
+ "must_exist(MONTH_PATH, \"MONTH_PATH\")\n",
90
+ "\n",
91
+ "df_title <- read_csv(TITLE_PATH, show_col_types = FALSE)\n",
92
+ "df_month <- read_csv(MONTH_PATH, show_col_types = FALSE)\n",
93
+ "\n",
94
+ "cat(\"Loaded:\", nrow(df_title), \"rows (title-level),\", nrow(df_month), \"rows (monthly)\n",
95
+ "\")"
96
+ ]
97
+ },
98
+ {
99
+ "cell_type": "code",
100
+ "execution_count": null,
101
+ "id": "9fd04262",
102
+ "metadata": {
103
+ "colab": {
104
+ "base_uri": "https://localhost:8080/"
105
+ },
106
+ "id": "9fd04262",
107
+ "outputId": "5f031538-96be-4758-904d-9201ec3c3ea7"
108
+ },
109
+ "outputs": [
110
+ {
111
+ "output_type": "stream",
112
+ "name": "stdout",
113
+ "text": [
114
+ "\u001b[90m# A tibble: 1 × 6\u001b[39m\n",
115
+ " n na_avg_revenue na_price na_rating na_share_pos na_share_neg\n",
116
+ " \u001b[3m\u001b[90m<int>\u001b[39m\u001b[23m \u001b[3m\u001b[90m<int>\u001b[39m\u001b[23m \u001b[3m\u001b[90m<int>\u001b[39m\u001b[23m \u001b[3m\u001b[90m<int>\u001b[39m\u001b[23m \u001b[3m\u001b[90m<int>\u001b[39m\u001b[23m \u001b[3m\u001b[90m<int>\u001b[39m\u001b[23m\n",
117
+ "\u001b[90m1\u001b[39m \u001b[4m1\u001b[24m000 0 0 \u001b[4m1\u001b[24m000 0 0\n",
118
+ "Monthly rows after parsing: 18 \n"
119
+ ]
120
+ }
121
+ ],
122
+ "source": [
123
+ "\n",
124
+ "# ---------- helpers ----------\n",
125
+ "safe_num <- function(x) {\n",
126
+ " # strips anything that is not digit or dot\n",
127
+ " suppressWarnings(as.numeric(str_replace_all(as.character(x), \"[^0-9.]\", \"\")))\n",
128
+ "}\n",
129
+ "\n",
130
+ "parse_rating <- function(x) {\n",
131
+ " # Accept: 4, \"4\", \"4.0\", \"4/5\", \"4 out of 5\", \"⭐⭐⭐⭐\", etc.\n",
132
+ " x <- as.character(x)\n",
133
+ " x <- str_replace_all(x, \"⭐\", \"\")\n",
134
+ " x <- str_to_lower(x)\n",
135
+ " x <- str_replace_all(x, \"stars?\", \"\")\n",
136
+ " x <- str_replace_all(x, \"out of\", \"/\")\n",
137
+ " x <- str_replace_all(x, \"\\\\s+\", \"\")\n",
138
+ " x <- str_replace_all(x, \"[^0-9./]\", \"\")\n",
139
+ " suppressWarnings(as.numeric(str_extract(x, \"^[0-9.]+\")))\n",
140
+ "}\n",
141
+ "\n",
142
+ "parse_month <- function(x) {\n",
143
+ " x <- as.character(x)\n",
144
+ " # try YYYY-MM-DD, then YYYY-MM\n",
145
+ " out <- suppressWarnings(ymd(x))\n",
146
+ " if (mean(is.na(out)) > 0.5) out <- suppressWarnings(ymd(paste0(x, \"-01\")))\n",
147
+ " na_idx <- which(is.na(out))\n",
148
+ " if (length(na_idx) > 0) out[na_idx] <- suppressWarnings(ymd(paste0(x[na_idx], \"-01\")))\n",
149
+ " out\n",
150
+ "}\n",
151
+ "\n",
152
+ "# ---------- normalize keys ----------\n",
153
+ "df_title <- df_title %>% mutate(title = str_squish(as.character(title)))\n",
154
+ "df_month <- df_month %>% mutate(month = as.character(month))\n",
155
+ "\n",
156
+ "# ---------- parse numeric columns defensively ----------\n",
157
+ "need_cols_title <- c(\"title\",\"avg_revenue\",\"total_revenue\",\"price\",\"rating\",\"share_positive\",\"share_negative\",\"share_neutral\")\n",
158
+ "missing_title <- setdiff(need_cols_title, names(df_title))\n",
159
+ "if (length(missing_title) > 0) stop(paste0(\"df_title missing columns: \", paste(missing_title, collapse=\", \")))\n",
160
+ "\n",
161
+ "df_title <- df_title %>%\n",
162
+ " mutate(\n",
163
+ " avg_revenue = safe_num(avg_revenue),\n",
164
+ " total_revenue = safe_num(total_revenue),\n",
165
+ " price = safe_num(price),\n",
166
+ " rating = parse_rating(rating),\n",
167
+ " share_positive = safe_num(share_positive),\n",
168
+ " share_negative = safe_num(share_negative),\n",
169
+ " share_neutral = safe_num(share_neutral)\n",
170
+ " )\n",
171
+ "\n",
172
+ "# basic sanity stats\n",
173
+ "hyg <- df_title %>%\n",
174
+ " summarise(\n",
175
+ " n = n(),\n",
176
+ " na_avg_revenue = sum(is.na(avg_revenue)),\n",
177
+ " na_price = sum(is.na(price)),\n",
178
+ " na_rating = sum(is.na(rating)),\n",
179
+ " na_share_pos = sum(is.na(share_positive)),\n",
180
+ " na_share_neg = sum(is.na(share_negative))\n",
181
+ " )\n",
182
+ "\n",
183
+ "print(hyg)\n",
184
+ "\n",
185
+ "# monthly parsing\n",
186
+ "need_cols_month <- c(\"month\",\"total_revenue\")\n",
187
+ "missing_month <- setdiff(need_cols_month, names(df_month))\n",
188
+ "if (length(missing_month) > 0) stop(paste0(\"df_month missing columns: \", paste(missing_month, collapse=\", \")))\n",
189
+ "\n",
190
+ "df_month2 <- df_month %>%\n",
191
+ " mutate(\n",
192
+ " month = parse_month(month),\n",
193
+ " total_revenue = safe_num(total_revenue)\n",
194
+ " ) %>%\n",
195
+ " filter(!is.na(month)) %>%\n",
196
+ " arrange(month)\n",
197
+ "\n",
198
+ "cat(\"Monthly rows after parsing:\", nrow(df_month2), \"\\n\")"
199
+ ]
200
+ },
201
+ {
202
+ "cell_type": "markdown",
203
+ "id": "b8971bc4",
204
+ "metadata": {
205
+ "id": "b8971bc4"
206
+ },
207
+ "source": [
208
+ "## **3.** 💾 Folder for R outputs for Hugging Face"
209
+ ]
210
+ },
211
+ {
212
+ "cell_type": "code",
213
+ "execution_count": null,
214
+ "id": "dfaa06b1",
215
+ "metadata": {
216
+ "colab": {
217
+ "base_uri": "https://localhost:8080/"
218
+ },
219
+ "id": "dfaa06b1",
220
+ "outputId": "73f6437a-39f4-4968-f88a-99f10a3fd8ae"
221
+ },
222
+ "outputs": [
223
+ {
224
+ "output_type": "stream",
225
+ "name": "stdout",
226
+ "text": [
227
+ "R outputs will be written to: /content/artifacts/r \n"
228
+ ]
229
+ }
230
+ ],
231
+ "source": [
232
+ "\n",
233
+ "ART_DIR <- \"artifacts\"\n",
234
+ "R_FIG_DIR <- file.path(ART_DIR, \"r\", \"figures\")\n",
235
+ "R_TAB_DIR <- file.path(ART_DIR, \"r\", \"tables\")\n",
236
+ "\n",
237
+ "dir.create(R_FIG_DIR, recursive = TRUE, showWarnings = FALSE)\n",
238
+ "dir.create(R_TAB_DIR, recursive = TRUE, showWarnings = FALSE)\n",
239
+ "\n",
240
+ "cat(\"R outputs will be written to:\", normalizePath(file.path(ART_DIR, \"r\"), winslash = \"/\"), \"\n",
241
+ "\")"
242
+ ]
243
+ },
244
+ {
245
+ "cell_type": "markdown",
246
+ "id": "f880c72d",
247
+ "metadata": {
248
+ "id": "f880c72d"
249
+ },
250
+ "source": [
251
+ "## **4.** 🔮 Forecast book sales benchmarking with `accuracy()`"
252
+ ]
253
+ },
254
+ {
255
+ "cell_type": "markdown",
256
+ "source": [
257
+ "We benchmark **three** models on a holdout window (last *h* months):\n",
258
+ "- ARIMA + Fourier (seasonality upgrade)\n",
259
+ "- ETS\n",
260
+ "- Naive baseline\n",
261
+ "\n",
262
+ "Then we export:\n",
263
+ "- `accuracy_table.csv`\n",
264
+ "- `forecast_compare.png`\n",
265
+ "- `rmse_comparison.png`"
266
+ ],
267
+ "metadata": {
268
+ "id": "R0JZlzKegmzW"
269
+ },
270
+ "id": "R0JZlzKegmzW"
271
+ },
272
+ {
273
+ "cell_type": "code",
274
+ "execution_count": null,
275
+ "id": "62e87992",
276
+ "metadata": {
277
+ "colab": {
278
+ "base_uri": "https://localhost:8080/"
279
+ },
280
+ "id": "62e87992",
281
+ "outputId": "73b36487-a25d-4bb9-cf80-8d5a654a2f0d"
282
+ },
283
+ "outputs": [
284
+ {
285
+ "output_type": "stream",
286
+ "name": "stdout",
287
+ "text": [
288
+ "✅ Saved: artifacts/r/tables/accuracy_table.csv\n",
289
+ "✅ Saved: artifacts/r/figures/rmse_comparison.png\n"
290
+ ]
291
+ },
292
+ {
293
+ "output_type": "display_data",
294
+ "data": {
295
+ "text/html": [
296
+ "<strong>agg_record_872216040:</strong> 2"
297
+ ],
298
+ "text/markdown": "**agg_record_872216040:** 2",
299
+ "text/latex": "\\textbf{agg\\textbackslash{}\\_record\\textbackslash{}\\_872216040:} 2",
300
+ "text/plain": [
301
+ "agg_record_872216040 \n",
302
+ " 2 "
303
+ ]
304
+ },
305
+ "metadata": {}
306
+ },
307
+ {
308
+ "output_type": "stream",
309
+ "name": "stdout",
310
+ "text": [
311
+ "✅ Saved: artifacts/r/figures/forecast_compare.png\n"
312
+ ]
313
+ }
314
+ ],
315
+ "source": [
316
+ "\n",
317
+ "# Build monthly ts\n",
318
+ "start_year <- year(min(df_month2$month, na.rm = TRUE))\n",
319
+ "start_mon <- month(min(df_month2$month, na.rm = TRUE))\n",
320
+ "\n",
321
+ "y <- ts(df_month2$total_revenue, frequency = 12, start = c(start_year, start_mon))\n",
322
+ "\n",
323
+ "# holdout size: min(6, 20% of series), at least 1\n",
324
+ "h_test <- min(6, max(1, floor(length(y) / 5)))\n",
325
+ "train_ts <- head(y, length(y) - h_test)\n",
326
+ "test_ts <- tail(y, h_test)\n",
327
+ "\n",
328
+ "# Model A: ARIMA + Fourier\n",
329
+ "K <- 2\n",
330
+ "xreg_train <- fourier(train_ts, K = K)\n",
331
+ "fit_arima <- auto.arima(train_ts, xreg = xreg_train)\n",
332
+ "xreg_future <- fourier(train_ts, K = K, h = h_test)\n",
333
+ "fc_arima <- forecast(fit_arima, xreg = xreg_future, h = h_test)\n",
334
+ "\n",
335
+ "# Model B: ETS\n",
336
+ "fit_ets <- ets(train_ts)\n",
337
+ "fc_ets <- forecast(fit_ets, h = h_test)\n",
338
+ "\n",
339
+ "# Model C: Naive baseline\n",
340
+ "fc_naive <- naive(train_ts, h = h_test)\n",
341
+ "\n",
342
+ "# accuracy() tables\n",
343
+ "acc_arima <- as.data.frame(accuracy(fc_arima, test_ts))\n",
344
+ "acc_ets <- as.data.frame(accuracy(fc_ets, test_ts))\n",
345
+ "acc_naive <- as.data.frame(accuracy(fc_naive, test_ts))\n",
346
+ "\n",
347
+ "accuracy_tbl <- bind_rows(\n",
348
+ " acc_arima %>% mutate(model = \"ARIMA+Fourier\"),\n",
349
+ " acc_ets %>% mutate(model = \"ETS\"),\n",
350
+ " acc_naive %>% mutate(model = \"Naive\")\n",
351
+ ") %>% relocate(model)\n",
352
+ "\n",
353
+ "write_csv(accuracy_tbl, file.path(R_TAB_DIR, \"accuracy_table.csv\"))\n",
354
+ "cat(\"✅ Saved: artifacts/r/tables/accuracy_table.csv\\n\")\n",
355
+ "\n",
356
+ "# RMSE bar chart\n",
357
+ "p_rmse <- ggplot(accuracy_tbl, aes(x = reorder(model, RMSE), y = RMSE)) +\n",
358
+ " geom_col() +\n",
359
+ " coord_flip() +\n",
360
+ " labs(title = \"Forecast model comparison (RMSE on holdout)\", x = \"\", y = \"RMSE\") +\n",
361
+ " theme_minimal()\n",
362
+ "\n",
363
+ "ggsave(file.path(R_FIG_DIR, \"rmse_comparison.png\"), p_rmse, width = 8, height = 4, dpi = 160)\n",
364
+ "cat(\"✅ Saved: artifacts/r/figures/rmse_comparison.png\\n\")\n",
365
+ "\n",
366
+ "# Side-by-side forecast plots (simple, no extra deps)\n",
367
+ "png(file.path(R_FIG_DIR, \"forecast_compare.png\"), width = 1200, height = 500)\n",
368
+ "par(mfrow = c(1, 3))\n",
369
+ "plot(fc_arima, main = \"ARIMA + Fourier\", xlab = \"Time\", ylab = \"Total revenue\"); lines(test_ts, col = \"black\")\n",
370
+ "plot(fc_ets, main = \"ETS\", xlab = \"Time\", ylab = \"Total revenue\"); lines(test_ts, col = \"black\")\n",
371
+ "plot(fc_naive, main = \"Naive\", xlab = \"Time\", ylab = \"Total revenue\"); lines(test_ts, col = \"black\")\n",
372
+ "dev.off()\n",
373
+ "cat(\"✅ Saved: artifacts/r/figures/forecast_compare.png\\n\")"
374
+ ]
375
+ },
376
+ {
377
+ "cell_type": "markdown",
378
+ "id": "30bc017b",
379
+ "metadata": {
380
+ "id": "30bc017b"
381
+ },
382
+ "source": [
383
+ "## **5.** 💾 Some R metadata for Hugging Face"
384
+ ]
385
+ },
386
+ {
387
+ "cell_type": "code",
388
+ "execution_count": null,
389
+ "id": "645cb12b",
390
+ "metadata": {
391
+ "colab": {
392
+ "base_uri": "https://localhost:8080/"
393
+ },
394
+ "id": "645cb12b",
395
+ "outputId": "c00c26da-7d27-4c78-a296-aa33807495d4"
396
+ },
397
+ "outputs": [
398
+ {
399
+ "output_type": "stream",
400
+ "name": "stdout",
401
+ "text": [
402
+ "✅ Saved: artifacts/r/tables/r_meta.json\n",
403
+ "DONE. R artifacts written to: artifacts/r \n"
404
+ ]
405
+ }
406
+ ],
407
+ "source": [
408
+ "# =========================================================\n",
409
+ "# Metadata export (aligned with current notebook objects)\n",
410
+ "# =========================================================\n",
411
+ "\n",
412
+ "meta <- list(\n",
413
+ "\n",
414
+ " # ---------------------------\n",
415
+ " # Dataset footprint\n",
416
+ " # ---------------------------\n",
417
+ " n_titles = nrow(df_title),\n",
418
+ " n_months = nrow(df_month2),\n",
419
+ "\n",
420
+ " # ---------------------------\n",
421
+ " # Forecasting info\n",
422
+ " # (only if these objects exist in your forecasting section)\n",
423
+ " # ---------------------------\n",
424
+ " forecasting = list(\n",
425
+ " holdout_h = h_test,\n",
426
+ " arima_order = forecast::arimaorder(fit_arima),\n",
427
+ " ets_method = fit_ets$method\n",
428
+ " )\n",
429
+ ")\n",
430
+ "\n",
431
+ "jsonlite::write_json(\n",
432
+ " meta,\n",
433
+ " path = file.path(R_TAB_DIR, \"r_meta.json\"),\n",
434
+ " pretty = TRUE,\n",
435
+ " auto_unbox = TRUE\n",
436
+ ")\n",
437
+ "\n",
438
+ "cat(\"✅ Saved: artifacts/r/tables/r_meta.json\\n\")\n",
439
+ "cat(\"DONE. R artifacts written to:\", file.path(ART_DIR, \"r\"), \"\\n\")\n"
440
+ ]
441
+ }
442
+ ],
443
+ "metadata": {
444
+ "colab": {
445
+ "provenance": [],
446
+ "collapsed_sections": [
447
+ "f01d02e7",
448
+ "b8971bc4",
449
+ "f880c72d",
450
+ "30bc017b"
451
+ ]
452
+ },
453
+ "kernelspec": {
454
+ "name": "ir",
455
+ "display_name": "R"
456
+ },
457
+ "language_info": {
458
+ "name": "R"
459
+ }
460
+ },
461
+ "nbformat": 4,
462
+ "nbformat_minor": 5
463
+ }
requirements.txt ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ gradio==6.0.0
2
+ pandas>=2.0.0
3
+ numpy>=1.24.0
4
+ matplotlib>=3.7.0
5
+ seaborn>=0.13.0
6
+ statsmodels>=0.14.0
7
+ scikit-learn>=1.3.0
8
+ papermill>=2.5.0
9
+ nbformat>=5.9.0
10
+ pillow>=10.0.0
11
+ requests>=2.31.0
12
+ beautifulsoup4>=4.12.0
13
+ vaderSentiment>=3.3.2
14
+ huggingface_hub>=0.20.0
15
+ textblob>=0.18.0
16
+ faker>=20.0.0
style.css ADDED
@@ -0,0 +1,336 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* --- Target the Gradio app wrapper for backgrounds --- */
2
+ gradio-app,
3
+ .gradio-app,
4
+ .main,
5
+ #app,
6
+ [data-testid="app"] {
7
+ background-color: rgb(40,9,109) !important;
8
+ background-image:
9
+ url('https://huggingface.co/spaces/escp/rx12workshoptemplate/resolve/main/background_top.png'),
10
+ url('https://huggingface.co/spaces/escp/rx12workshoptemplate/resolve/main/background_mid.png') !important;
11
+ background-position:
12
+ top center,
13
+ 0 913px !important;
14
+ background-repeat:
15
+ no-repeat,
16
+ repeat-y !important;
17
+ background-size:
18
+ 100% auto,
19
+ 100% auto !important;
20
+ min-height: 100vh !important;
21
+ }
22
+
23
+ /* --- Fallback on html/body --- */
24
+ html, body {
25
+ background-color: rgb(40,9,109) !important;
26
+ margin: 0 !important;
27
+ padding: 0 !important;
28
+ min-height: 100vh !important;
29
+ }
30
+
31
+ /* --- Fixed bottom banner using ::after on body --- */
32
+ body::after {
33
+ content: '' !important;
34
+ position: fixed !important;
35
+ bottom: 0 !important;
36
+ left: 0 !important;
37
+ right: 0 !important;
38
+ height: 130px !important;
39
+ background-image: url('https://huggingface.co/spaces/escp/rx12workshoptemplate/resolve/main/background_bottom.png') !important;
40
+ background-size: 100% 100% !important;
41
+ background-repeat: no-repeat !important;
42
+ background-position: bottom center !important;
43
+ pointer-events: none !important;
44
+ z-index: 9999 !important;
45
+ }
46
+
47
+ /* --- Main container --- */
48
+ .gradio-container {
49
+ max-width: 1400px !important;
50
+ width: 94vw !important;
51
+ margin: 0 auto !important;
52
+ padding-top: 220px !important;
53
+ padding-bottom: 150px !important;
54
+ background: transparent !important;
55
+ }
56
+
57
+ /* --- Title in ESCP gold --- */
58
+ #escp_title h1 {
59
+ color: rgb(242,198,55) !important;
60
+ font-size: 3rem !important;
61
+ font-weight: 800 !important;
62
+ text-align: center !important;
63
+ margin: 0 0 12px 0 !important;
64
+ }
65
+
66
+ /* --- Subtitle --- */
67
+ #escp_title p, #escp_title em {
68
+ color: rgba(255,255,255,0.85) !important;
69
+ text-align: center !important;
70
+ }
71
+
72
+ /* --- Tab bar background --- */
73
+ .tabs > .tab-nav,
74
+ .tab-nav,
75
+ div[role="tablist"],
76
+ .svelte-tabs > .tab-nav {
77
+ background: rgba(40,9,109,0.6) !important;
78
+ border-radius: 10px 10px 0 0 !important;
79
+ padding: 4px !important;
80
+ }
81
+
82
+ /* --- ALL tab buttons: force white text --- */
83
+ .tabs > .tab-nav button,
84
+ .tab-nav button,
85
+ div[role="tablist"] button,
86
+ button[role="tab"],
87
+ .svelte-tabs button,
88
+ .tab-nav > button,
89
+ .tabs button {
90
+ color: #ffffff !important;
91
+ font-weight: 600 !important;
92
+ border: none !important;
93
+ background: transparent !important;
94
+ padding: 10px 20px !important;
95
+ border-radius: 8px 8px 0 0 !important;
96
+ opacity: 1 !important;
97
+ }
98
+
99
+ /* --- Selected tab: ESCP gold --- */
100
+ .tabs > .tab-nav button.selected,
101
+ .tab-nav button.selected,
102
+ button[role="tab"][aria-selected="true"],
103
+ button[role="tab"].selected,
104
+ div[role="tablist"] button[aria-selected="true"],
105
+ .svelte-tabs button.selected {
106
+ color: rgb(242,198,55) !important;
107
+ background: rgba(255,255,255,0.12) !important;
108
+ }
109
+
110
+ /* --- Unselected tabs: ensure visibility --- */
111
+ .tabs > .tab-nav button:not(.selected),
112
+ .tab-nav button:not(.selected),
113
+ button[role="tab"][aria-selected="false"],
114
+ button[role="tab"]:not(.selected),
115
+ div[role="tablist"] button:not([aria-selected="true"]) {
116
+ color: #ffffff !important;
117
+ opacity: 1 !important;
118
+ }
119
+
120
+ /* --- White card panels --- */
121
+ .gradio-container .gr-block,
122
+ .gradio-container .gr-box,
123
+ .gradio-container .gr-panel,
124
+ .gradio-container .gr-group {
125
+ background: #ffffff !important;
126
+ border-radius: 10px !important;
127
+ }
128
+
129
+ /* --- Tab content area --- */
130
+ .tabitem {
131
+ background: rgba(255,255,255,0.95) !important;
132
+ border-radius: 0 0 10px 10px !important;
133
+ padding: 16px !important;
134
+ }
135
+
136
+ /* --- Inputs --- */
137
+ .gradio-container input,
138
+ .gradio-container textarea,
139
+ .gradio-container select {
140
+ background: #ffffff !important;
141
+ border: 1px solid #d1d5db !important;
142
+ border-radius: 8px !important;
143
+ }
144
+
145
+ /* --- Buttons: ESCP purple primary --- */
146
+ .gradio-container button:not([role="tab"]) {
147
+ font-weight: 600 !important;
148
+ padding: 10px 16px !important;
149
+ border-radius: 10px !important;
150
+ }
151
+
152
+ button.primary {
153
+ background-color: rgb(40,9,109) !important;
154
+ color: #ffffff !important;
155
+ border: none !important;
156
+ }
157
+
158
+ button.primary:hover {
159
+ background-color: rgb(60,20,140) !important;
160
+ }
161
+
162
+ button.secondary {
163
+ background-color: #ffffff !important;
164
+ color: rgb(40,9,109) !important;
165
+ border: 2px solid rgb(40,9,109) !important;
166
+ }
167
+
168
+ button.secondary:hover {
169
+ background-color: rgb(240,238,250) !important;
170
+ }
171
+
172
+ /* --- Dataframes --- */
173
+ [data-testid="dataframe"] {
174
+ background-color: #ffffff !important;
175
+ border-radius: 10px !important;
176
+ }
177
+
178
+ table {
179
+ font-size: 0.85rem !important;
180
+ }
181
+
182
+ /* --- Chatbot (AI Dashboard tab) --- */
183
+ .gr-chatbot {
184
+ min-height: 380px !important;
185
+ background-color: #ffffff !important;
186
+ border-radius: 12px !important;
187
+ }
188
+
189
+ .gr-chatbot .message.user {
190
+ background-color: rgb(232,225,250) !important;
191
+ border-radius: 12px !important;
192
+ }
193
+
194
+ .gr-chatbot .message.bot {
195
+ background-color: #f3f4f6 !important;
196
+ border-radius: 12px !important;
197
+ }
198
+
199
+ /* --- Gallery --- */
200
+ .gallery {
201
+ background: #ffffff !important;
202
+ border-radius: 10px !important;
203
+ }
204
+
205
+ /* --- Log textbox --- */
206
+ textarea {
207
+ font-family: monospace !important;
208
+ font-size: 0.8rem !important;
209
+ }
210
+
211
+ /* --- Markdown headings inside tabs --- */
212
+ .tabitem h3 {
213
+ color: rgb(40,9,109) !important;
214
+ font-weight: 700 !important;
215
+ }
216
+
217
+ .tabitem h4 {
218
+ color: #374151 !important;
219
+ }
220
+
221
+ /* --- Examples row (AI Dashboard) --- */
222
+ .examples-row button {
223
+ background: rgb(240,238,250) !important;
224
+ color: rgb(40,9,109) !important;
225
+ border: 1px solid rgb(40,9,109) !important;
226
+ border-radius: 8px !important;
227
+ font-size: 0.85rem !important;
228
+ }
229
+
230
+ .examples-row button:hover {
231
+ background: rgb(232,225,250) !important;
232
+ }
233
+
234
+ /* --- Header / footer: transparent over banner --- */
235
+ header, header *,
236
+ footer, footer * {
237
+ background: transparent !important;
238
+ box-shadow: none !important;
239
+ }
240
+
241
+ footer a, footer button,
242
+ header a, header button {
243
+ background: transparent !important;
244
+ border: none !important;
245
+ box-shadow: none !important;
246
+ }
247
+
248
+ #footer, #footer *,
249
+ [class*="footer"], [class*="footer"] *,
250
+ [class*="chip"], [class*="pill"], [class*="chip"] *, [class*="pill"] * {
251
+ background: transparent !important;
252
+ border: none !important;
253
+ box-shadow: none !important;
254
+ }
255
+
256
+ [data-testid*="api"], [data-testid*="settings"],
257
+ [id*="api"], [id*="settings"],
258
+ [class*="api"], [class*="settings"],
259
+ [class*="bottom"], [class*="toolbar"], [class*="controls"] {
260
+ background: transparent !important;
261
+ box-shadow: none !important;
262
+ }
263
+
264
+ [data-testid*="api"] *, [data-testid*="settings"] *,
265
+ [id*="api"] *, [id*="settings"] *,
266
+ [class*="api"] *, [class*="settings"] * {
267
+ background: transparent !important;
268
+ box-shadow: none !important;
269
+ }
270
+
271
+ section footer {
272
+ background: transparent !important;
273
+ }
274
+
275
+ section footer button,
276
+ section footer a {
277
+ background: transparent !important;
278
+ background-color: transparent !important;
279
+ border: none !important;
280
+ box-shadow: none !important;
281
+ color: white !important;
282
+ }
283
+
284
+ section footer button:hover,
285
+ section footer button:focus,
286
+ section footer a:hover,
287
+ section footer a:focus {
288
+ background: transparent !important;
289
+ background-color: transparent !important;
290
+ box-shadow: none !important;
291
+ }
292
+
293
+ section footer button,
294
+ section footer button * {
295
+ background: transparent !important;
296
+ background-color: transparent !important;
297
+ background-image: none !important;
298
+ box-shadow: none !important;
299
+ filter: none !important;
300
+ }
301
+
302
+ section footer button::before,
303
+ section footer button::after {
304
+ background: transparent !important;
305
+ background-color: transparent !important;
306
+ background-image: none !important;
307
+ box-shadow: none !important;
308
+ filter: none !important;
309
+ }
310
+
311
+ section footer a,
312
+ section footer a * {
313
+ background: transparent !important;
314
+ background-color: transparent !important;
315
+ box-shadow: none !important;
316
+ }
317
+
318
+ .gradio-container footer button,
319
+ .gradio-container footer button *,
320
+ .gradio-container .footer button,
321
+ .gradio-container .footer button * {
322
+ background: transparent !important;
323
+ background-color: transparent !important;
324
+ background-image: none !important;
325
+ box-shadow: none !important;
326
+ }
327
+
328
+ .gradio-container footer button::before,
329
+ .gradio-container footer button::after,
330
+ .gradio-container .footer button::before,
331
+ .gradio-container .footer button::after {
332
+ background: transparent !important;
333
+ background-color: transparent !important;
334
+ background-image: none !important;
335
+ box-shadow: none !important;
336
+ }