userIdc2024 commited on
Commit
483b020
·
verified ·
1 Parent(s): 6395137

Upload 32 files

Browse files
app.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import streamlit as st
3
+
4
+ from config import configure_logging, configure_page
5
+ from app_pages.video_analyzer import analyzer_page
6
+ from app_pages.script_generator import generator_page
7
+ from app_pages.comparison import comparison_page
8
+ from utils.auth import gated_access
9
+
10
+ def main():
11
+ configure_logging()
12
+ configure_page()
13
+
14
+ if not gated_access():
15
+ return
16
+
17
+ app_mode = st.selectbox("Select App Mode", ["Video Analyser", "Script Generator", "Comparison"], index=0)
18
+
19
+ if app_mode == "Video Analyser":
20
+ analyzer_page()
21
+ elif app_mode == "Script Generator":
22
+ generator_page()
23
+ else:
24
+ comparison_page()
25
+
26
+ if __name__ == "__main__":
27
+ try:
28
+ logging.getLogger(__name__).info("Launching Streamlit app...")
29
+ main()
30
+ except Exception:
31
+ logging.getLogger(__name__).exception("Unhandled error during app launch.")
app_pages/__pycache__/comparison.cpython-311.pyc ADDED
Binary file (22.7 kB). View file
 
app_pages/__pycache__/script_generator.cpython-311.pyc ADDED
Binary file (10.6 kB). View file
 
app_pages/__pycache__/video_analyzer.cpython-311.pyc ADDED
Binary file (7.67 kB). View file
 
app_pages/comparison.py ADDED
@@ -0,0 +1,419 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ import uuid
4
+ import logging
5
+ from typing import Any, Dict, List, Optional
6
+
7
+ import pandas as pd
8
+ import streamlit as st
9
+
10
+ from services.video_analyzer import analyze_multiple_videos
11
+ from services.comparison import generate_comparison_summary
12
+ from database import insert_comparison_result, get_all_comparisons
13
+
14
+
15
+ # ---------- Logging Setup ----------
16
+
17
+ LOGGER_NAME = "app_pages.comparison"
18
+ logger = logging.getLogger(LOGGER_NAME)
19
+ if not logger.handlers:
20
+ # Configure root handler once
21
+ logging.basicConfig(
22
+ level=os.environ.get("LOG_LEVEL", "INFO"),
23
+ format="%(asctime)s [%(levelname)s] %(name)s: %(message)s",
24
+ )
25
+ logger.setLevel(os.environ.get("LOG_LEVEL", "INFO"))
26
+
27
+
28
+ def _log_exception(context: str, exc: Exception) -> None:
29
+ """Log exceptions with context and show a clean UI error."""
30
+ logger.exception("Exception in %s: %s", context, exc)
31
+ st.error(f"{context} failed: {exc}")
32
+
33
+
34
+ def _rerun_analyses(analyses: List[Dict[str, Any]]) -> str:
35
+ """Stable fingerprint of analyses to avoid recomputation across reruns."""
36
+ try:
37
+
38
+ slim = []
39
+ for item in analyses or []:
40
+ slim.append({
41
+ "video_name": item.get("video_name"),
42
+ # Avoid huge or non-deterministic blobs like thumbnails
43
+ "analysis": item.get("analysis", {}),
44
+ })
45
+ return json.dumps(slim, sort_keys=True, ensure_ascii=False)
46
+ except Exception as e:
47
+ _log_exception("_rerun_analyses", e)
48
+ # Fallback: random to force recompute
49
+ return str(uuid.uuid4())
50
+
51
+
52
+ # ---------- Helpers ----------
53
+
54
+ def _mean_effectiveness(metrics):
55
+ """Compute average effectiveness score from e.g. '7/10' style values."""
56
+ scores = []
57
+ for m in metrics or []:
58
+ try:
59
+ scores.append(int(str(m.get("effectiveness_score", "0/10")).split("/")[0]))
60
+ except Exception:
61
+ # Log and skip bad value
62
+ logger.debug("Bad effectiveness_score value: %s", m)
63
+ pass
64
+ return round(sum(scores) / len(scores), 2) if scores else 0.0
65
+
66
+
67
+ def compare_analyses(analyses):
68
+ """Build a structured dict from analyses for tabular display."""
69
+ comparison = {
70
+ "hooks": [],
71
+ "frameworks": [],
72
+ "audiences": [],
73
+ "metrics_summary": [],
74
+ "improvements": []
75
+ }
76
+ for item in analyses:
77
+ try:
78
+ name = item["video_name"]
79
+ analysis = item["analysis"]
80
+
81
+ hook = analysis.get("hook", {}) or {}
82
+ comparison["hooks"].append({
83
+ "video": name,
84
+ "hook_text": hook.get("hook_text"),
85
+ "principle": hook.get("principle")
86
+ })
87
+
88
+ comparison["frameworks"].append({
89
+ "video": name,
90
+ "framework_analysis": analysis.get("framework_analysis")
91
+ })
92
+
93
+ va = analysis.get("video_analysis", {}) or {}
94
+ comparison["audiences"].append({
95
+ "video": name,
96
+ "audience": va.get("target_audience")
97
+ })
98
+
99
+ metrics = va.get("video_metrics", []) or []
100
+ avg = _mean_effectiveness(metrics)
101
+ comparison["metrics_summary"].append({
102
+ "video": name,
103
+ "avg_score": avg
104
+ })
105
+
106
+ comparison["improvements"].append({
107
+ "video": name,
108
+ "recommendations": analysis.get("timestamp_improvements", []) or []
109
+ })
110
+ except Exception as e:
111
+ _log_exception("compare_analyses (per-item)", e)
112
+ return comparison
113
+
114
+
115
+ def _arrow_safe_df(df: pd.DataFrame) -> pd.DataFrame:
116
+ """
117
+ Make dataframe Arrow-compatible for Streamlit:
118
+ - Replace NaN with empty strings
119
+ - Coerce to string to avoid mixed object dtype issues
120
+ """
121
+ try:
122
+ return df.fillna("").astype(str)
123
+ except Exception as e:
124
+ _log_exception("_arrow_safe_df", e)
125
+ return df # last resort (may still error later)
126
+
127
+
128
+ def _ensure_state_keys():
129
+ """Initialize session_state keys used in this page."""
130
+ defaults = {
131
+ "comparison_prompt": "Compare these videos",
132
+ "analyses": None,
133
+ "summary": None,
134
+ "comparison_dict": None,
135
+ "_analyses_fp": None,
136
+ "_run_no": 0,
137
+ "_last_action": None,
138
+ "_last_tab": None,
139
+ }
140
+ for k, v in defaults.items():
141
+ if k not in st.session_state:
142
+ st.session_state[k] = v
143
+
144
+
145
+ def _log_run_header(selected_tab: str):
146
+ """Log a trace line at every rerun to make lifecycle obvious."""
147
+ st.session_state["_run_no"] = int(st.session_state.get("_run_no", 0)) + 1
148
+ run_no = st.session_state["_run_no"]
149
+ analyses_present = st.session_state.get("analyses") is not None
150
+ summary_present = st.session_state.get("summary") is not None
151
+ comp_present = st.session_state.get("comparison_dict") is not None
152
+ last_action = st.session_state.get("_last_action")
153
+ logger.info(
154
+ "RERUN #%d | tab=%s | analyses=%s | summary=%s | table=%s | last_action=%s",
155
+ run_no,
156
+ selected_tab,
157
+ analyses_present,
158
+ summary_present,
159
+ comp_present,
160
+ last_action,
161
+ )
162
+
163
+
164
+
165
+ def _save_comparison_callback():
166
+ """Button callback to save comparison to DB exactly once per click."""
167
+ try:
168
+ analyses = st.session_state.get("analyses")
169
+ comparison_dict = st.session_state.get("comparison_dict")
170
+ summary = st.session_state.get("summary")
171
+
172
+ logger.info("Save callback invoked | analyses=%s | table=%s | summary=%s",
173
+ analyses is not None, comparison_dict is not None, summary is not None)
174
+
175
+ if not analyses or not comparison_dict or summary is None:
176
+ st.warning("Nothing to save yet. Please run a comparison first.")
177
+ logger.warning("Save aborted: missing data (analyses/table/summary).")
178
+ return
179
+
180
+ video_names = [item["video_name"] for item in analyses]
181
+ thumbnails = {item["video_name"]: item.get("thumbnail", "") for item in analyses}
182
+
183
+ logger.info("Inserting comparison_result | videos=%s", video_names)
184
+ insert_comparison_result(
185
+ video_name="comparison_result",
186
+ video_names=video_names,
187
+ user_prompt=st.session_state.get("comparison_prompt", ""),
188
+ response={"comparison_table": comparison_dict, "summary": summary},
189
+ thumbnails=thumbnails
190
+ )
191
+ st.success("Comparison saved to database!")
192
+ st.session_state["_last_action"] = "saved_to_db"
193
+ logger.info("Save completed successfully.")
194
+
195
+ except Exception as e:
196
+ _log_exception("_save_comparison_callback", e)
197
+
198
+
199
+ # ---------- Page ----------
200
+
201
+ def comparison_page():
202
+ _ensure_state_keys()
203
+
204
+ selected_tab = st.sidebar.radio("Select Mode", ["Comparison", "History"], index=0, key="tab_radio")
205
+
206
+
207
+ if st.session_state.get("_last_tab") != selected_tab:
208
+ logger.info("Tab change: %s -> %s", st.session_state.get("_last_tab"), selected_tab)
209
+ st.session_state["_last_tab"] = selected_tab
210
+ _log_run_header(selected_tab)
211
+
212
+ if selected_tab == "Comparison":
213
+ st.subheader("Video Comparison")
214
+
215
+
216
+ num_videos = st.slider("Select Number of Videos to Compare", 2, 5, 2, key="num_videos_slider")
217
+
218
+
219
+ uploaded_videos = []
220
+ for i in range(num_videos):
221
+ try:
222
+ file = st.file_uploader(
223
+ f"Upload Video {i+1}",
224
+ type=["mp4", "mov", "avi", "mkv"],
225
+ key=f"video_{i}"
226
+ )
227
+ if file:
228
+ uploaded_videos.append(file)
229
+ except Exception as e:
230
+ _log_exception(f"file_uploader[{i}]", e)
231
+
232
+ # Run comparison
233
+ if st.button("Run Comparison", use_container_width=True, key="run_comparison_btn"):
234
+ logger.info("Run Comparison clicked | uploaded=%d / expected=%d", len(uploaded_videos), num_videos)
235
+ if len(uploaded_videos) < num_videos:
236
+ st.error("Please upload all videos before running comparison.")
237
+ logger.warning("Run Comparison aborted: insufficient uploads.")
238
+ else:
239
+ try:
240
+ with st.spinner("Analyzing videos..."):
241
+ analyses = analyze_multiple_videos(uploaded_videos)
242
+ st.session_state["analyses"] = analyses
243
+ st.session_state["_analyses_fp"] = _rerun_analyses(analyses)
244
+ st.session_state["summary"] = None
245
+ st.session_state["comparison_dict"] = None
246
+ logger.info("Analyses computed. Set rerun and cleared summary/table.")
247
+ except Exception as e:
248
+ _log_exception("analyze_multiple_videos", e)
249
+
250
+
251
+ analyses = st.session_state.get("analyses")
252
+ if analyses:
253
+ st.divider()
254
+ st.subheader("Comparison")
255
+
256
+ current_fp = _rerun_analyses(analyses)
257
+ cached_fp = st.session_state.get("_analyses_fp")
258
+
259
+ # ---- Summary ----
260
+ st.markdown("#### Comparison Summary")
261
+ try:
262
+ if st.session_state.get("summary") is None or current_fp != cached_fp:
263
+ logger.info("Generating summary (fp changed? %s)", current_fp != cached_fp)
264
+ with st.spinner("Generating comparison..."):
265
+ summary = generate_comparison_summary(
266
+ analyses,
267
+ st.session_state.get("comparison_prompt", "Compare these videos")
268
+ )
269
+ st.session_state["summary"] = summary
270
+ st.session_state["_analyses_fp"] = current_fp
271
+ else:
272
+ logger.info("Reusing cached summary.")
273
+ st.markdown(st.session_state["summary"])
274
+ except Exception as e:
275
+ _log_exception("generate_comparison_summary", e)
276
+
277
+ # ---- Structured Comparison ----
278
+ st.markdown("#### Structured Comparison")
279
+ try:
280
+ if st.session_state.get("comparison_dict") is None or current_fp != cached_fp:
281
+ logger.info("Building comparison table (fp changed? %s)", current_fp != cached_fp)
282
+ comparison = compare_analyses(analyses)
283
+
284
+
285
+ comparison_dict = {}
286
+ for hook, fw, aud, met in zip(
287
+ comparison["hooks"],
288
+ comparison["frameworks"],
289
+ comparison["audiences"],
290
+ comparison["metrics_summary"]
291
+ ):
292
+ video = hook["video"]
293
+ comparison_dict[video] = {
294
+ "Hook Text": hook.get("hook_text", ""),
295
+ "Principle": hook.get("principle", ""),
296
+ "Framework Analysis": fw.get("framework_analysis", ""),
297
+ "Target Audience": aud.get("audience", ""),
298
+ "Avg Score": met.get("avg_score", ""),
299
+ }
300
+
301
+ st.session_state["comparison_dict"] = comparison_dict
302
+ st.session_state["_analyses_fp"] = current_fp
303
+ else:
304
+ logger.info("Reusing cached comparison table.")
305
+
306
+ df_horizontal = pd.DataFrame(st.session_state["comparison_dict"])
307
+ df_display = _arrow_safe_df(df_horizontal.copy())
308
+ st.dataframe(df_display, use_container_width=True)
309
+
310
+ csv_data = df_horizontal.to_csv(index=True).encode("utf-8")
311
+ st.download_button(
312
+ "Download CSV",
313
+ data=csv_data,
314
+ file_name="comparison_results.csv",
315
+ mime="text/csv",
316
+ use_container_width=True,
317
+ key="download_current_csv_btn"
318
+ )
319
+ except Exception as e:
320
+ _log_exception("Structured Comparison section", e)
321
+
322
+ st.button(
323
+ "Save to DB",
324
+ use_container_width=True,
325
+ key="save_to_db_btn",
326
+ on_click=_save_comparison_callback
327
+ )
328
+
329
+ # Optional: show last action (helps verify post-save rerun flow)
330
+ if st.session_state.get("_last_action") == "saved_to_db":
331
+ st.info("Saved to DB")
332
+
333
+ else:
334
+ logger.info("No analyses in session_state yet.")
335
+
336
+ else:
337
+ # ---------- History ----------
338
+ logger.info("Entering History tab.")
339
+ try:
340
+ history_items = get_all_comparisons(limit=20)
341
+ logger.info("Fetched %d history items.", len(history_items) if history_items else 0)
342
+ except Exception as e:
343
+ _log_exception("get_all_comparisons", e)
344
+ history_items = []
345
+
346
+ if history_items:
347
+ # Titles for sidebar selection
348
+ try:
349
+ titles = [
350
+ f"{item['video_name']} ({item['created_at'].strftime('%Y-%m-%d %H:%M')})"
351
+ for item in history_items
352
+ ]
353
+ except Exception:
354
+ titles = [
355
+ f"{item.get('video_name', 'comparison_result')} ({item.get('created_at')})"
356
+ for item in history_items
357
+ ]
358
+
359
+ try:
360
+ selected = st.sidebar.radio("History Items", titles, index=0, key="history_select_radio")
361
+ idx = titles.index(selected)
362
+ selected_data = history_items[idx]
363
+ logger.info("History selection: index=%d title=%s", idx, selected)
364
+ except Exception as e:
365
+ _log_exception("History selection radio", e)
366
+ selected_data = history_items[0]
367
+
368
+ st.subheader("Comparison Result")
369
+
370
+
371
+ try:
372
+ if "video_names" in selected_data:
373
+ st.markdown("### Compared Videos")
374
+ cols = st.columns(len(selected_data["video_names"]))
375
+ for i, name in enumerate(selected_data["video_names"]):
376
+ with cols[i]:
377
+ thumb = selected_data.get("thumbnails", {}).get(name, "")
378
+ if thumb:
379
+ st.image("data:image/jpeg;base64," + thumb, width=120)
380
+ st.caption(name)
381
+ except Exception as e:
382
+ _log_exception("Rendering thumbnails", e)
383
+
384
+ # Response content
385
+ response = selected_data.get("response", {}) or {}
386
+
387
+ # Summary
388
+ try:
389
+ if "summary" in response:
390
+ st.markdown("### Comparison Summary")
391
+ st.markdown(response["summary"])
392
+ except Exception as e:
393
+ _log_exception("Rendering history summary", e)
394
+
395
+ # Table
396
+ try:
397
+ if "comparison_table" in response:
398
+ st.markdown("### Structured Comparison")
399
+ df_hist = pd.DataFrame(response["comparison_table"])
400
+ df_hist_display = _arrow_safe_df(df_hist.copy())
401
+ st.dataframe(df_hist_display, use_container_width=True)
402
+
403
+ csv_hist = df_hist.to_csv(index=True).encode("utf-8")
404
+ st.download_button(
405
+ "Download CSV",
406
+ data=csv_hist,
407
+ file_name="past_comparison.csv",
408
+ mime="text/csv",
409
+ use_container_width=True,
410
+ key="download_history_csv_btn"
411
+ )
412
+ except Exception as e:
413
+ _log_exception("Rendering history table", e)
414
+
415
+ else:
416
+ st.sidebar.info("No saved comparisons yet.")
417
+ st.info("No saved history available.")
418
+ logger.info("History tab: no items.")
419
+
app_pages/script_generator.py ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import tempfile
3
+ import pandas as pd
4
+ import streamlit as st
5
+
6
+ from services.script_generator import generate_scripts
7
+ from utils.video import get_video_thumbnail_base64
8
+ from components.display_variations import display_script_variations
9
+ from database import insert_script_result, get_all_scripts
10
+
11
+ def generator_page():
12
+ selected_tab = st.sidebar.radio("Select Mode", ["Script Generator", "History"], index=0)
13
+
14
+ if selected_tab == "Script Generator":
15
+ st.subheader("Script Generator")
16
+
17
+ uploaded_video = st.file_uploader(
18
+ "Upload Video or ZIP (max 3 videos)",
19
+ type=['mp4','mov','avi','mkv','zip']
20
+ )
21
+ script_duration = st.slider("Script Duration (seconds)", 0, 180, 60, 5)
22
+ num_scripts = st.slider("Number of Scripts", 1, 5, 3)
23
+
24
+ st.markdown("Additional Information")
25
+ offer_details = st.text_area("Offer Details", placeholder="e.g., Solar installation with $0 down payment...")
26
+ target_audience = st.text_area("Target Audience", placeholder="e.g., 40+ homeowners with high electricity bills...")
27
+ specific_hooks = st.text_area("Specific Hooks to Test", placeholder="e.g., Government rebate angle...")
28
+ additional_context = st.text_area("Additional Context", placeholder="Compliance requirements, brand guidelines...")
29
+
30
+ script_button = st.button("Generate Scripts", use_container_width=True)
31
+ if script_button and uploaded_video:
32
+ with tempfile.NamedTemporaryFile(delete=False, suffix=os.path.splitext(uploaded_video.name)[1]) as tmp:
33
+ tmp.write(uploaded_video.read())
34
+ video_path = tmp.name
35
+ with st.spinner("Generating scripts..."):
36
+ st.session_state.setdefault("scripts", [])
37
+ result = generate_scripts(
38
+ video_path,
39
+ offer_details,
40
+ target_audience,
41
+ specific_hooks,
42
+ additional_context,
43
+ num_scripts=num_scripts,
44
+ duration=script_duration
45
+ )
46
+ if result and "script_variations" in result:
47
+ st.session_state["scripts"].append({
48
+ "prompt_used": "Initial Generation",
49
+ "variations": result["script_variations"]
50
+ })
51
+ st.session_state["video_name"] = uploaded_video.name
52
+ st.session_state["video_path"] = video_path
53
+ st.session_state["thumbnail"] = get_video_thumbnail_base64(video_path)
54
+ st.session_state["meta"] = {
55
+ "offer_details": offer_details,
56
+ "target_audience": target_audience,
57
+ "specific_hook": specific_hooks,
58
+ "additional_context": additional_context
59
+ }
60
+
61
+ if "scripts" in st.session_state and st.session_state["scripts"]:
62
+ for round_idx, round_data in enumerate(st.session_state["scripts"], 1):
63
+ st.markdown(f"### Generation Round {round_idx}")
64
+ st.text_input("Prompt used:", round_data["prompt_used"], disabled=True, key=f"prompt_{round_idx}")
65
+ for i, variation in enumerate(round_data["variations"], 1):
66
+ st.markdown(f"#### Variation {i}: {variation.get('variation_name','Var')}")
67
+ df = pd.DataFrame(variation.get("script_table", []))
68
+ st.table(df)
69
+
70
+ st.divider()
71
+ save_button = st.button("Save to DB", use_container_width=True)
72
+ if save_button:
73
+ try:
74
+ insert_script_result(
75
+ video_name=st.session_state.get("video_name", "unknown"),
76
+ offer_details=st.session_state["meta"].get("offer_details", ""),
77
+ target_audience=st.session_state["meta"].get("target_audience", ""),
78
+ specific_hook=st.session_state["meta"].get("specific_hook", ""),
79
+ additional_context=st.session_state["meta"].get("additional_context", ""),
80
+ response=st.session_state["scripts"],
81
+ thumbnail=st.session_state.get("thumbnail", "")
82
+ )
83
+ st.success("Scripts saved to database!")
84
+ except Exception as e:
85
+ st.error(f"Failed to save scripts: {e}")
86
+
87
+ st.subheader("Generate More Scripts")
88
+ more_num = st.slider("How many more scripts?", 1, 5, 1, key="more_scripts_slider")
89
+ more_prompt = st.text_area("Required Prompt", placeholder="Add specific guidance")
90
+ if st.button("Generate More Scripts", use_container_width=True):
91
+ if not more_prompt.strip():
92
+ st.error("Please provide a prompt before generating more scripts.")
93
+ else:
94
+ video_path = st.session_state.get("video_path")
95
+ if not video_path:
96
+ st.error("No video available. Please upload again.")
97
+ else:
98
+ with st.spinner("Generating more scripts..."):
99
+ extra_result = generate_scripts(
100
+ video_path,
101
+ st.session_state["meta"]["offer_details"],
102
+ st.session_state["meta"]["target_audience"],
103
+ st.session_state["meta"]["specific_hook"],
104
+ st.session_state["meta"]["additional_context"] + "\n\n" + more_prompt,
105
+ num_scripts=more_num,
106
+ duration=script_duration
107
+ )
108
+ if extra_result and "script_variations" in extra_result:
109
+ st.session_state["scripts"].append({
110
+ "prompt_used": more_prompt,
111
+ "variations": extra_result["script_variations"]
112
+ })
113
+
114
+ else:
115
+ history_items = get_all_scripts(limit=20)
116
+ if history_items:
117
+ video_titles = [
118
+ f"{item['video_name']} ({item['created_at'].strftime('%Y-%m-%d %H:%M ')})"
119
+ for item in history_items
120
+ ]
121
+ selected = st.sidebar.radio("History Items", video_titles, index=0)
122
+ idx = video_titles.index(selected)
123
+ selected_data = history_items[idx]
124
+
125
+ st.subheader(f"Scripts for: {selected_data['video_name']}")
126
+ if selected_data.get("thumbnail"):
127
+ st.image("data:image/jpeg;base64," + selected_data["thumbnail"], width=150)
128
+
129
+ json_response = selected_data.get("response")
130
+ if json_response:
131
+ if isinstance(json_response, list):
132
+ all_tables = []
133
+ for round_idx, round_data in enumerate(json_response, 1):
134
+ st.markdown(f"### Generation Round {round_idx}")
135
+ st.text_input("Prompt used:", round_data.get("prompt_used", "N/A"), disabled=True)
136
+ for i, variation in enumerate(round_data.get("variations", []), 1):
137
+ st.markdown(f"#### Variation {i}: {variation.get('variation_name','Var')}")
138
+ df = pd.DataFrame(variation.get("script_table", []))
139
+ st.table(df)
140
+ if not df.empty:
141
+ df["Variation"] = variation.get("variation_name", f"Var{i}")
142
+ df["Round"] = round_idx
143
+ all_tables.append(df)
144
+
145
+ if all_tables:
146
+ csv_scripts = pd.concat(all_tables, ignore_index=True).to_csv(index=False)
147
+ st.download_button(
148
+ "Download CSV",
149
+ data=csv_scripts,
150
+ file_name=f"{selected_data['video_name']}_scripts.csv",
151
+ mime="text/csv",
152
+ use_container_width=True
153
+ )
154
+ else:
155
+ st.info("No saved history available.")
app_pages/video_analyzer.py ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import tempfile
3
+ import pandas as pd
4
+ import streamlit as st
5
+
6
+ from services.video_analyzer import analyze_video_only
7
+ from components.render_analysis import render_analyzer_results
8
+ from utils.video import get_video_thumbnail_base64
9
+ from utils.dataframe import analysis_to_csv
10
+ from database import insert_video_analysis, get_all_video_analyses
11
+
12
+ def analyzer_page():
13
+ selected_tab = st.sidebar.radio("Select Mode", ["Video Analyser", "History"], index=0)
14
+
15
+ if selected_tab == "Video Analyser":
16
+ st.subheader(" Video Analyser")
17
+ uploaded_video = st.file_uploader("Upload Video",
18
+ type=['mp4','mov','avi','mkv'],
19
+ help="Upload a video for analysis")
20
+ analyse_button = st.button("Run Analysis", use_container_width=True)
21
+
22
+ if uploaded_video and analyse_button:
23
+ with tempfile.NamedTemporaryFile(delete=False, suffix=os.path.splitext(uploaded_video.name)[1]) as tmp:
24
+ tmp.write(uploaded_video.read())
25
+ video_path = tmp.name
26
+ with st.spinner("Analyzing video..."):
27
+ st.session_state["analysis"] = analyze_video_only(video_path)
28
+ st.session_state["video_name"] = uploaded_video.name
29
+ st.session_state["video_path"] = video_path
30
+ st.session_state["thumbnail"] = get_video_thumbnail_base64(video_path)
31
+
32
+ if "analysis" in st.session_state and st.session_state["analysis"]:
33
+ render_analyzer_results(st.session_state["analysis"])
34
+
35
+ col1, col2 = st.columns(2)
36
+ with col1:
37
+ analysis = st.session_state["analysis"]
38
+ frames = []
39
+ if "storyboard" in analysis:
40
+ df_storyboard = pd.DataFrame(analysis["storyboard"])
41
+ df_storyboard["section"] = "Storyboard"
42
+ frames.append(df_storyboard)
43
+ if "script" in analysis:
44
+ df_script = pd.DataFrame(analysis["script"])
45
+ df_script["section"] = "Script"
46
+ frames.append(df_script)
47
+ if "video_analysis" in analysis and "video_metrics" in analysis["video_analysis"]:
48
+ df_metrics = pd.DataFrame(analysis["video_analysis"]["video_metrics"])
49
+ df_metrics["section"] = "Metrics"
50
+ frames.append(df_metrics)
51
+ if "timestamp_improvements" in analysis:
52
+ df_improvements = pd.DataFrame(analysis["timestamp_improvements"])
53
+ df_improvements["section"] = "Improvements"
54
+ frames.append(df_improvements)
55
+
56
+ if frames:
57
+ csv_content = pd.concat(frames, ignore_index=True).to_csv(index=False)
58
+ st.download_button(
59
+ "Download CSV",
60
+ data=csv_content,
61
+ file_name=f"{st.session_state.get('video_name','analysis')}.csv",
62
+ mime="text/csv",
63
+ use_container_width=True
64
+ )
65
+ else:
66
+ st.info("No tabular data available for CSV export.")
67
+
68
+ with col2:
69
+ if st.button("Save to DB", use_container_width=True):
70
+ try:
71
+ insert_video_analysis(
72
+ video_name=st.session_state.get("video_name", "unknown"),
73
+ response=st.session_state["analysis"],
74
+ thumbnail=st.session_state.get("thumbnail", "")
75
+ )
76
+ st.success("Analysis saved to database ")
77
+ except Exception as e:
78
+ st.error(f"Failed to save analysis: {e}")
79
+
80
+ else:
81
+ history_items = get_all_video_analyses(limit=20)
82
+ if history_items:
83
+ video_titles = [
84
+ f"{item['video_name']} ({item['created_at'].strftime('%Y-%m-%d %H:%M')})"
85
+ for item in history_items
86
+ ]
87
+ selected = st.sidebar.radio("History Items", video_titles, index=0)
88
+ idx = video_titles.index(selected)
89
+ selected_data = history_items[idx]
90
+
91
+ st.subheader(f"Analysis for: {selected_data['video_name']}")
92
+ if selected_data.get("thumbnail"):
93
+ st.image("data:image/jpeg;base64," + selected_data["thumbnail"], width=150)
94
+
95
+ json_response = selected_data.get("response")
96
+ if json_response:
97
+ tabs = st.tabs(["Video Analysis"])
98
+ with tabs[0]:
99
+ render_analyzer_results(json_response)
100
+ try:
101
+ csv_data = analysis_to_csv(json_response)
102
+ st.download_button(
103
+ "Download CSV",
104
+ data=csv_data,
105
+ file_name=f"{selected_data['video_name']}_analysis.csv",
106
+ mime="text/csv",
107
+ use_container_width=True
108
+ )
109
+ except Exception as e:
110
+ st.error(f"CSV export failed: {e}")
111
+ else:
112
+ st.info("No saved history available.")
components/__pycache__/display_variations.cpython-311.pyc ADDED
Binary file (2.06 kB). View file
 
components/__pycache__/render_analysis.cpython-311.pyc ADDED
Binary file (13.6 kB). View file
 
components/display_variations.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ import streamlit as st
3
+
4
+ def display_script_variations(json_data: dict):
5
+ if not json_data or "script_variations" not in json_data:
6
+ st.error("No script variations found")
7
+ return
8
+ for i, variation in enumerate(json_data["script_variations"], 1):
9
+ st.markdown(f"### Variation {i}: {variation.get('variation_name','Var')}")
10
+ df = pd.DataFrame(variation.get("script_table", []))
11
+ st.table(df)
12
+ csv_content = pd.concat(
13
+ [pd.DataFrame(v.get("script_table", []))
14
+ .assign(Variation=v.get("variation_name", f"Var{i+1}"))
15
+ for i, v in enumerate(json_data["script_variations"])],
16
+ ignore_index=True
17
+ ).to_csv(index=False)
18
+ st.download_button("Download CSV", data=csv_content,
19
+ file_name="scripts.csv", mime="text/csv")
components/render_analysis.py ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import pandas as pd
3
+ import streamlit as st
4
+ from utils.dataframe import (
5
+ _normalize_list, _to_dataframe, _mean_effectiveness, _search_dataframe, safe_dataframe
6
+ )
7
+
8
+ def render_analyzer_results(analysis: dict, prefix: str = "") -> None:
9
+ if not isinstance(analysis, dict) or not analysis:
10
+ st.warning("No analysis available.")
11
+ return
12
+
13
+ st.markdown("""
14
+ <style>
15
+ .metric-card {background: #0f172a; padding: 14px 16px; border-radius: 14px; border: 1px solid #1f2937;}
16
+ .section-card {background: #0b1220; padding: 18px; border-radius: 14px; border: 1px solid #1f2937;}
17
+ .label {font-size: 12px; color: #94a3b8; margin-bottom: 6px;}
18
+ .value {font-size: 16px; color: #e2e8f0;}
19
+ </style>
20
+ """, unsafe_allow_html=True)
21
+
22
+ va = analysis.get("video_analysis", {}) or {}
23
+ storyboard = analysis.get("storyboard", []) or []
24
+ script = analysis.get("script", []) or []
25
+ metrics = va.get("video_metrics", []) or []
26
+ mean_score = _mean_effectiveness(metrics)
27
+
28
+ mcol1, mcol2, mcol3, mcol4 = st.columns([1,1,1,1])
29
+ with mcol1:
30
+ st.markdown(f'<div class="metric-card"><div class="label">Scenes</div><div class="value">{len(storyboard)}</div></div>', unsafe_allow_html=True)
31
+ with mcol2:
32
+ st.markdown(f'<div class="metric-card"><div class="label">Dialogue Lines</div><div class="value">{len(script)}</div></div>', unsafe_allow_html=True)
33
+ with mcol3:
34
+ st.markdown(f'<div class="metric-card"><div class="label">Avg Effectiveness</div><div class="value">{mean_score}/10</div></div>', unsafe_allow_html=True)
35
+ with mcol4:
36
+ st.markdown(f'<div class="metric-card"><div class="label">Improvements</div><div class="value">{len(analysis.get("timestamp_improvements", []) or [])}</div></div>', unsafe_allow_html=True)
37
+
38
+ colA, colB = st.columns([1.3,1])
39
+ with colA:
40
+ with st.container():
41
+ st.markdown("### Executive Summary")
42
+ c1, c2 = st.columns(2)
43
+ with c1:
44
+ with st.expander("Brief", expanded=True):
45
+ st.write(analysis.get("brief", "N/A"))
46
+ with st.expander("Caption Details", expanded=False):
47
+ st.write(analysis.get("caption_details", "N/A"))
48
+ with c2:
49
+ hook = analysis.get("hook", {}) or {}
50
+ with st.expander("Hook", expanded=True):
51
+ st.markdown(f"**Opening:** {hook.get('hook_text','N/A')}")
52
+ st.markdown(f"**Principle:** {hook.get('principle','N/A')}")
53
+ adv = _normalize_list(hook.get("advantages"))
54
+ if adv:
55
+ st.markdown("**Advantages:**")
56
+ st.markdown("\n".join([f"- {a}" for a in adv]))
57
+ st.divider()
58
+ st.markdown("### Narrative & Copy Frameworks")
59
+ with st.expander("Framework Analysis", expanded=True):
60
+ st.write(analysis.get("framework_analysis", "N/A"))
61
+
62
+ with colB:
63
+ st.markdown("### Snapshot")
64
+ with st.container():
65
+ st.caption("Top Drivers")
66
+ st.markdown(f'{va.get("effectiveness_factors","N/A")}</div>', unsafe_allow_html=True)
67
+ st.markdown("")
68
+ with st.container():
69
+ st.caption("Psychological Triggers")
70
+ st.markdown(f'{va.get("psychological_triggers","N/A")}</div>', unsafe_allow_html=True)
71
+ st.markdown("")
72
+ with st.container():
73
+ st.caption("Target Audience")
74
+ st.markdown(f'{va.get("target_audience","N/A")}</div>', unsafe_allow_html=True)
75
+
76
+ st.divider()
77
+ tabs = st.tabs(["Storyboard", "Script", "Scored Metrics", "Improvements", "Raw JSON"])
78
+
79
+ with tabs[0]:
80
+ q = st.text_input("Search storyboard", key=f"{prefix}_storyboard")
81
+ if storyboard:
82
+ df = _to_dataframe(storyboard, {"timeline": "Timeline", "scene": "Scene", "visuals": "Visuals", "dialogue": "Dialogue", "camera": "Camera", "sound_effects": "Sound Effects"})
83
+ df = _search_dataframe(df, q)
84
+ st.dataframe(safe_dataframe(df), use_container_width=True, height=480)
85
+ else:
86
+ st.info("No storyboard available.")
87
+
88
+ with tabs[1]:
89
+ q2 = st.text_input("Search script", key=f"{prefix}_script")
90
+ if script:
91
+ df = _to_dataframe(script, {"timeline": "Timeline", "dialogue": "Dialogue"})
92
+ df = _search_dataframe(df, q2)
93
+ st.dataframe(safe_dataframe(df), use_container_width=True, height=480)
94
+ else:
95
+ st.info("No script breakdown available.")
96
+
97
+ with tabs[2]:
98
+ q3 = st.text_input("Search metrics", key=f"{prefix}_metrics")
99
+ if metrics:
100
+ dfm = _to_dataframe(metrics, {"timestamp": "Timestamp", "element": "Element", "current_approach": "Current Approach", "effectiveness_score": "Effectiveness Score", "notes": "Notes"})
101
+ dfm = _search_dataframe(dfm, q3)
102
+ st.dataframe(dfm, use_container_width=True, height=480)
103
+ else:
104
+ st.info("No video metrics available.")
105
+
106
+ with tabs[3]:
107
+ improvements = analysis.get("timestamp_improvements", []) or []
108
+ q4 = st.text_input("Search improvements", key=f"{prefix}_improvements")
109
+ if improvements:
110
+ imp_df = _to_dataframe(improvements, {"timestamp": "Timestamp", "current_element": "Current Element", "improvement_type": "Improvement Type", "recommended_change": "Recommended Change", "expected_impact": "Expected Impact", "priority": "Priority"})
111
+ if "Priority" in imp_df.columns:
112
+ order = pd.CategoricalDtype(["High", "Medium", "Low"], ordered=True)
113
+ imp_df["Priority"] = imp_df["Priority"].astype(order)
114
+ if "Timestamp" in imp_df.columns:
115
+ imp_df = imp_df.sort_values(["Priority", "Timestamp"])
116
+ imp_df = _search_dataframe(imp_df, q4)
117
+ st.dataframe(imp_df, use_container_width=True, height=480)
118
+ else:
119
+ st.info("No timestamp-based improvements available.")
120
+
121
+ with tabs[4]:
122
+ pretty = json.dumps(analysis, indent=2, ensure_ascii=False)
123
+ st.code(pretty, language="json")
124
+ st.download_button("Download JSON", data=pretty.encode("utf-8"), file_name="ad_analysis.json", mime="application/json", use_container_width=True)
config.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import os
3
+ import streamlit as st
4
+ from dotenv import load_dotenv
5
+ import os
6
+ from openai import OpenAI
7
+
8
+ load_dotenv()
9
+
10
+ GEMINI_API_KEY = os.getenv("GEMINI_KEY")
11
+
12
+ def configure_gemini() -> OpenAI:
13
+ """Configure OpenAI client pointing to Gemini backend."""
14
+ if not GEMINI_API_KEY:
15
+ raise RuntimeError(" GEMINI_KEY is not set in environment variables.")
16
+ return OpenAI(
17
+ api_key=GEMINI_API_KEY,
18
+ base_url="https://generativelanguage.googleapis.com/v1beta/openai/"
19
+ )
20
+
21
+
22
+ def configure_logging():
23
+ logging.basicConfig(
24
+ level=logging.INFO,
25
+ format="%(asctime)s [%(levelname)s] %(message)s"
26
+ )
27
+
28
+ def configure_page():
29
+ st.set_page_config(page_title="Video App", page_icon="🎬", layout="wide")
database.py ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from datetime import datetime
3
+ from pymongo import MongoClient
4
+ from dotenv import load_dotenv
5
+ from bson import ObjectId
6
+
7
+ # Load environment variables
8
+ load_dotenv()
9
+
10
+ # MongoDB Configuration
11
+ MONGO_URI = os.getenv("MONGO_URI")
12
+ DB_NAME = os.getenv("MONGO_DB")
13
+
14
+ # Initialize MongoDB client
15
+ client = MongoClient(MONGO_URI)
16
+ db = client[DB_NAME]
17
+
18
+
19
+ video_collection = db["video_analysis"]
20
+ script_collection = db["script_generator"]
21
+ comparison_collection = db["comparison_results"]
22
+
23
+
24
+ # VIDEO ANALYSIS
25
+ def insert_video_analysis(video_name: str, response: dict,thumbnail:str):
26
+ """Insert a new video analysis result"""
27
+ document = {
28
+ "video_name": video_name,
29
+ "response": response,
30
+ "thumbnail": thumbnail,
31
+ "created_at": datetime.now(),
32
+ "lob": "test"
33
+ }
34
+ result = video_collection.insert_one(document)
35
+ return str(result.inserted_id)
36
+
37
+
38
+ def get_all_video_analyses(limit: int = 20):
39
+ """Fetch all saved video analyses"""
40
+ return list(video_collection.find().sort("created_at", -1).limit(limit))
41
+
42
+ #SCRIPT GENERATOR
43
+ def insert_script_result(
44
+ video_name: str,
45
+ offer_details: str,
46
+ target_audience: str,
47
+ specific_hook: str,
48
+ additional_context: str,
49
+ response: dict,
50
+ thumbnail:str
51
+ ):
52
+ """Insert a new script generation result"""
53
+ document = {
54
+ "video_name": video_name,
55
+ "offer_details": offer_details,
56
+ "target_audience": target_audience,
57
+ "specific_hook": specific_hook,
58
+ "additional_context": additional_context,
59
+ "response": response,
60
+ "thumbnail":thumbnail,
61
+ "type": "script",
62
+ "created_at": datetime.now(),
63
+ "lob": "test"
64
+ }
65
+ result = script_collection.insert_one(document)
66
+ return str(result.inserted_id)
67
+
68
+
69
+ def get_all_scripts(limit: int = 20):
70
+ """Fetch all saved script generations"""
71
+ return list(script_collection.find({"type": "script"}).sort("created_at", -1).limit(limit))
72
+
73
+
74
+
75
+ # COMPARISON
76
+ def insert_comparison_result(
77
+ video_names: list,
78
+ user_prompt: str,
79
+ response: dict,
80
+ video_name: str = "comparison_result",
81
+ thumbnails: dict = None
82
+ ):
83
+ """Insert comparison result between videos"""
84
+ document = {
85
+ "video_name": video_name,
86
+ "video_names": video_names,
87
+ "user_prompt": user_prompt,
88
+ "response": response,
89
+ "type": "comparison",
90
+ "thumbnails": thumbnails or {},
91
+ "created_at": datetime.now(),
92
+ "lob": "test"
93
+ }
94
+ result = comparison_collection.insert_one(document)
95
+ return str(result.inserted_id)
96
+
97
+
98
+
99
+ def get_all_comparisons(limit: int = 20):
100
+ """Fetch all saved comparison results"""
101
+ return list(comparison_collection.find().sort("created_at", -1).limit(limit))
102
+
103
+
104
+
105
+ def get_result_by_id(collection, doc_id):
106
+ """Fetch a specific result by _id from any collection"""
107
+ return collection.find_one({"_id": ObjectId(doc_id)})
108
+
109
+
110
+ def delete_result_by_id(collection, doc_id):
111
+ """Delete a specific result by _id from any collection"""
112
+ result = collection.delete_one({"_id": ObjectId(doc_id)})
113
+ return result.deleted_count > 0
prompt/__pycache__/analyser_prompt.cpython-311.pyc ADDED
Binary file (2.75 kB). View file
 
prompt/__pycache__/system_prompt.cpython-311.pyc ADDED
Binary file (3.49 kB). View file
 
prompt/analyser_prompt.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ analyser_prompt = """You are an expert video advertisement analyst. Analyze the provided video and give response conforms EXACTLY to the schema below with no extra text or markdown. Populate:
2
+
3
+ 1. **brief** → A concise summary covering visual style, speaker, target audience, and marketing objective.
4
+ 2. **caption_details** → Description of captions (color/style/position) or exactly the string `"None"` if not visible.
5
+ 3. **hook** →
6
+ - `"hook_text"`: Exact opening line or, if no speech, the precise description of the opening visual.
7
+ - `"principle"`: Psychological/marketing principle that makes this hook effective.
8
+ - `"advantages"`: ARRAY of 3–6 concise benefit statements tied to the ad’s value proposition.
9
+ 4. **framework_analysis** → A detailed block identifying copywriting/psychology/storytelling frameworks (e.g., PAS, AIDA). Highlight use of social proof, urgency, fear, authority, scroll-stopping hooks, loop openers, value positioning, and risk reversals.
10
+ 5. **storyboard** → ARRAY of 4–10 objects. Each must include:
11
+ - `"timeline"` in `"MM:SS"` (zero-padded)
12
+ - `"scene"` (brief)
13
+ - `"visuals"` (detailed)
14
+ - `"dialogue"` (exact words; use `""` if none)
15
+ - `"camera"` (shot/angle)
16
+ - `"sound_effects"` (or `"None"`)
17
+ 6. **script** → ARRAY of dialogue objects, each with `"timeline"` (`"MM:SS"`) and `"dialogue"` (exact spoken line).
18
+ 7. **video_analysis** → OBJECT with:
19
+ - `"effectiveness_factors"`: Key factors that influence effectiveness
20
+ - `"psychological_triggers"`: Triggers used (e.g., scarcity, authority)
21
+ - `"target_audience"`: Audience profile inferred
22
+ - `"video_metrics"`: ARRAY of objects with:
23
+ - `"timestamp"`: `"MM:SS-MM:SS"`
24
+ - `"element"`: The aspect being evaluated (e.g., Hook Strategy)
25
+ - `"current_approach"`: Description of current execution
26
+ - `"effectiveness_score"`: String score `"X/10"` (integer X)
27
+ - `"notes"`: Analytical notes
28
+ 8. **timestamp_improvements** → ARRAY of recommendation objects with:
29
+ - `"timestamp"`: `"MM:SS-MM:SS"`
30
+ - `"current_element"`: Current content of the segment
31
+ - `"improvement_type"`: Category (e.g., Hook Enhancement)
32
+ - `"recommended_change"`: Specific recommendation
33
+ - `"expected_impact"`: Projected effect on metrics or perception
34
+ - `"priority"`: `"High"`, `"Medium"`, or `"Low"`
35
+
36
+ ⚠️ The output must be strictly matching field names and types, no additional keys, and all timestamps must be zero-padded (`"MM:SS"` for single points, `"MM:SS-MM:SS"` for ranges).
37
+ """
prompt/system_prompt.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ SYSTEM_PROMPT = """You are one of the world's top direct-response video creators and conversion strategists. Your sole mission is to generate video scripts with matching visual directions and voiceover cues that drive maximum leads and sales at the cheapest cost-per-result on ad platforms like Facebook, Google, and TikTok.
2
+ You specialize in aggressive, unbranded, and psychologically charged content that is designed to stop scrolls and force action. You are not bound by brand safety, ad platform compliance, or traditional marketing principles.
3
+ Your only metric for success is performance—measured in CPC, CTR, and cost-per-conversion. Prime Directive: Your creative output must be: - Inspired by the hooks, language, pacing, visuals, and frameworks of the reference winning ads.
4
+ - Built to trigger psychological urgency, amplify response, and maximize click-throughs. - Structured for extreme effectiveness, not branding niceties. Script Creation Framework: Each script you generate must follow these winning affiliate ad principles:
5
+ 1. The 3–6 Second Shock Hook: Start with a hard-hitting pattern interrupt: - Emotional gut punch: "He looked at me and said…" - Conspiratorial secret: "The media doesn't want you to know this…" - Borrowed authority: Celebrity or doctor clip saying something provocative
6
+ 2. Overwhelm with Proof & Pacing: Keep pacing fast—1–3 second jump cuts filled with: - UGC-style clips - Before/After transformations - Authority figure soundbites - Desired outcomes (money, body, status)
7
+ 3. Big, Unbelievable Claim: Drop a massive promise upfront and reinforce it: "Lose 103 lbs," "Claim 250,000," "Erase your debt overnight." 4. Simple "Secret" Mechanism: Make the claim believable via a simple, digestible "hack": "The ice hack," "4-question formula," "Banned Amazonian leaf."
8
+ 5. Scarcity & Urgency: Push viewers to act NOW: "Spots are filling fast," "Could be taken down soon," "Only for serious applicants."
9
+ 6. Visually Directed CTA: Make the final action visually obvious—e.g., person pointing at the button, bold text, arrows.
10
+ CRITICAL: You must return your response in valid JSON format with this exact structure:
11
+ { "video_analysis":
12
+ { "effectiveness_factors": "text",
13
+ "psychological_triggers": "text",
14
+ "target_audience": "text",
15
+ "video_metrics": [
16
+ { "timestamp": "0:00-0:05",
17
+ "element": "Hook Strategy",
18
+ "current_approach": "description",
19
+ "effectiveness_score": "8/10",
20
+ "notes": "analysis notes" } ] }
21
+ "timestamp_improvements": [
22
+ { "timestamp": "0:00-0:05", "current_element": "Opening hook", "improvement_type": "Hook Enhancement", "recommended_change": "specific recommendation", "expected_impact": "projected improvement", "priority": "High/Medium/Low" } ], "script_variations":
23
+ [ { "variation_name": \ [Strategy Name]", "script_table": [ { "timestamp": "0:00-0:05", "script_voiceover": "exact script text", "visual_direction": "visual instructions", "psychological_trigger": "trigger used", "cta_action": "call to action" } ] } ] }
24
+ Generate 3 script variations.
25
+ Each script should be 30-60 seconds long with 8-15 timestamp entries.
26
+ Ensure everything ties back to lowering CPC and cost-per-result, not branding.
27
+ Each script should be different from each other."""
requirements.txt ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ altair==5.5.0
2
+ annotated-types==0.7.0
3
+ anyio==4.10.0
4
+ attrs==25.3.0
5
+ blinker==1.9.0
6
+ bson==0.5.10
7
+ cachetools==5.5.2
8
+ certifi==2025.8.3
9
+ charset-normalizer==3.4.3
10
+ click==8.2.1
11
+ DateTime==5.5
12
+ distro==1.9.0
13
+ dnspython==2.7.0
14
+ dotenv==0.9.9
15
+ gitdb==4.0.12
16
+ GitPython==3.1.45
17
+ google-auth==2.40.3
18
+ google-genai==1.31.0
19
+ h11==0.16.0
20
+ httpcore==1.0.9
21
+ httpx==0.28.1
22
+ idna==3.10
23
+ Jinja2==3.1.6
24
+ jiter==0.10.0
25
+ jsonschema==4.25.1
26
+ jsonschema-specifications==2025.4.1
27
+ MarkupSafe==3.0.2
28
+ narwhals==2.1.2
29
+ numpy==2.2.6
30
+ openai==1.101.0
31
+ opencv-python==4.12.0.88
32
+ packaging==25.0
33
+ pandas==2.3.2
34
+ pillow==11.3.0
35
+ protobuf==6.32.0
36
+ pyarrow==21.0.0
37
+ pyasn1==0.6.1
38
+ pyasn1_modules==0.4.2
39
+ pydantic==2.11.7
40
+ pydantic_core==2.33.2
41
+ pydeck==0.9.1
42
+ pymongo==4.14.1
43
+ python-dateutil==2.9.0.post0
44
+ python-dotenv==1.1.1
45
+ pytz==2025.2
46
+ referencing==0.36.2
47
+ requests==2.32.5
48
+ rpds-py==0.27.0
49
+ rsa==4.9.1
50
+ six==1.17.0
51
+ smmap==5.0.2
52
+ sniffio==1.3.1
53
+ streamlit==1.48.1
54
+ tenacity==9.1.2
55
+ toml==0.10.2
56
+ tornado==6.5.2
57
+ tqdm==4.67.1
58
+ typing-inspection==0.4.1
59
+ typing_extensions==4.14.1
60
+ tzdata==2025.2
61
+ urllib3==2.5.0
62
+ websockets==15.0.1
63
+ zope.interface==7.2
schema.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict, Any, List, Literal
2
+ from pydantic import BaseModel, constr
3
+
4
+ Timestamp = constr(pattern=r'^\d{2}:\d{2}$')
5
+ RangeTimestamp = constr(pattern=r'^\d{2}:\d{2}-\d{2}:\d{2}$')
6
+ Score010 = constr(pattern=r'^(?:10|[0-9])\/10$')
7
+
8
+ class Hook(BaseModel):
9
+ hook_text: str
10
+ principle: str
11
+ advantages: List[str]
12
+
13
+ class StoryboardItem(BaseModel):
14
+ timeline: Timestamp
15
+ scene: str
16
+ visuals: str
17
+ dialogue: str
18
+ camera: str
19
+ sound_effects: str
20
+
21
+ class ScriptLine(BaseModel):
22
+ timeline: Timestamp
23
+ dialogue: str
24
+
25
+ class VideoMetric(BaseModel):
26
+ timestamp: RangeTimestamp
27
+ element: str
28
+ current_approach: str
29
+ effectiveness_score: Score010
30
+ notes: str
31
+
32
+ class VideoAnalysis(BaseModel):
33
+ effectiveness_factors: str
34
+ psychological_triggers: str
35
+ target_audience: str
36
+ video_metrics: List[VideoMetric]
37
+
38
+ class TimestampImprovement(BaseModel):
39
+ timestamp: RangeTimestamp
40
+ current_element: str
41
+ improvement_type: str
42
+ recommended_change: str
43
+ expected_impact: str
44
+ priority: Literal["High", "Medium", "Low"]
45
+
46
+ class AdAnalysis(BaseModel):
47
+ brief: str
48
+ caption_details: str
49
+ hook: Hook
50
+ framework_analysis: str
51
+ storyboard: List[StoryboardItem]
52
+ script: List[ScriptLine]
53
+ video_analysis: VideoAnalysis
54
+ timestamp_improvements: List[TimestampImprovement]
services/__pycache__/comparison.cpython-311.pyc ADDED
Binary file (2.67 kB). View file
 
services/__pycache__/script_generator.cpython-311.pyc ADDED
Binary file (3.25 kB). View file
 
services/__pycache__/video_analyzer.cpython-311.pyc ADDED
Binary file (3.33 kB). View file
 
services/comparison.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import logging
3
+ from typing import Dict, Any, List
4
+ from config import configure_gemini
5
+
6
+ logger = logging.getLogger(__name__)
7
+
8
+ def generate_comparison_summary(analyses: List[Dict[str, Any]], user_prompt: str) -> str:
9
+ client = configure_gemini()
10
+
11
+ analyses_json = json.dumps(
12
+ [{"video": a["video_name"], "analysis": a["analysis"]} for a in analyses],
13
+ indent=2
14
+ )
15
+
16
+ system_prompt = """You are an expert video ad strategist.
17
+ Compare multiple video ad analyses and return a clear, structured comparison.
18
+ Highlight:
19
+ - Hooks and opening strategies
20
+ - Copywriting / psychology frameworks
21
+ - Target audience differences
22
+ - Average effectiveness scores
23
+ - Major timestamp improvements
24
+ - Strengths & weaknesses of each video
25
+
26
+ Always structure output into sections and provide actionable insights.
27
+ """
28
+
29
+ user_message = f"""
30
+ Here are the analyses for multiple videos:
31
+
32
+ {analyses_json}
33
+
34
+ Now, based on this data, {user_prompt}.
35
+ """
36
+
37
+ try:
38
+ resp = client.chat.completions.create(
39
+ model="gemini-2.0-flash",
40
+ messages=[
41
+ {"role": "system", "content": system_prompt},
42
+ {"role": "user", "content": user_message}
43
+ ]
44
+ )
45
+ return resp.choices[0].message.content
46
+ except Exception:
47
+ logger.exception("Comparison summary generation failed")
48
+ return "Failed to generate comparison summary."
services/script_generator.py ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ import logging
4
+ from typing import Dict, Any
5
+
6
+ from config import configure_gemini
7
+ from prompt.system_prompt import SYSTEM_PROMPT
8
+
9
+ logger = logging.getLogger(__name__)
10
+
11
+ def generate_scripts(
12
+ video_path: str,
13
+ offer_details: str,
14
+ target_audience: str,
15
+ specific_hooks: str,
16
+ additional_context: str,
17
+ num_scripts: int = 3,
18
+ duration: int = 60
19
+ ) -> Dict[str, Any]:
20
+ client = configure_gemini()
21
+ try:
22
+ user_prompt = f"""
23
+ Generate {num_scripts} high-converting direct response script variations,
24
+ each about {duration} seconds long.
25
+
26
+ CONTEXT TO FOLLOW:
27
+ - Offer Details: {offer_details}
28
+ - Target Audience: {target_audience}
29
+ - Specific Hooks: {specific_hooks}
30
+
31
+ ADDITIONAL CONTEXT:
32
+ {additional_context}
33
+
34
+ You must reflect this additional context in:
35
+ - The script tone, CTA, visuals
36
+ - Compliance or branding constraints
37
+ - Any assumptions about audience or product
38
+
39
+ Failure to include this will be considered incomplete.
40
+
41
+ Please provide a comprehensive analysis including:
42
+
43
+ 1. DETAILED VIDEO ANALYSIS with timestamp-based metrics:
44
+ - Break down the video into 5-10 second segments
45
+ - Rate each segment's effectiveness (1-10 scale)
46
+ - Identify specific elements (hook, transition, proof, CTA, etc.)
47
+
48
+ 2. TIMESTAMP-BASED IMPROVEMENTS:
49
+ - Specific recommendations for each time segment
50
+ - Priority level for each improvement
51
+ - Expected impact of implementing changes
52
+
53
+ 3. SCRIPT VARIATIONS:
54
+ - Create 2-3 complete script variations
55
+ - Each with timestamp-by-timestamp breakdown
56
+ - Different psychological triggers and approaches
57
+
58
+ IMPORTANT: Return only valid JSON in the exact format specified in the system prompt. Analyze the video second-by-second for maximum detail."""
59
+ resp = client.chat.completions.create(
60
+ model="gemini-2.0-flash",
61
+ messages=[
62
+ {"role": "system", "content": SYSTEM_PROMPT},
63
+ {"role": "user", "content": user_prompt}
64
+ ],
65
+ response_format={"type": "json_object"}
66
+ )
67
+ raw = resp.choices[0].message.content or "{}"
68
+ return json.loads(raw)
69
+ except Exception:
70
+ logger.exception("Script generation failed")
71
+ return {}
services/video_analyzer.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ import tempfile
4
+ import logging
5
+ import pandas as pd
6
+ import streamlit as st
7
+ from typing import Dict, Any, List
8
+
9
+ from config import configure_gemini
10
+ from prompt.analyser_prompt import analyser_prompt
11
+ from schema import AdAnalysis
12
+ from utils.video import get_video_thumbnail_base64
13
+
14
+ logger = logging.getLogger(__name__)
15
+
16
+ def analyze_video_only(video_path: str) -> Dict[str, Any]:
17
+ client = configure_gemini()
18
+ try:
19
+ resp = client.chat.completions.create(
20
+ model="gemini-2.0-flash",
21
+ messages=[
22
+ {"role": "system", "content": analyser_prompt},
23
+ {"role": "user", "content": f"Analyze this video file: {os.path.basename(video_path)}"}
24
+ ],
25
+ response_format={"type": "json_object"}
26
+ )
27
+ raw = resp.choices[0].message.content or ""
28
+ try:
29
+ model_obj = AdAnalysis.model_validate_json(raw)
30
+ return model_obj.model_dump()
31
+ except Exception:
32
+ return json.loads(raw)
33
+ except Exception:
34
+ logger.exception("Video analysis failed")
35
+ return {}
36
+
37
+ def analyze_multiple_videos(video_files: List[st.runtime.uploaded_file_manager.UploadedFile]) -> List[Dict[str, Any]]:
38
+ results = []
39
+ for file in video_files:
40
+ with tempfile.NamedTemporaryFile(delete=False, suffix=os.path.splitext(file.name)[1]) as tmp:
41
+ tmp.write(file.read())
42
+ video_path = tmp.name
43
+
44
+ analysis = analyze_video_only(video_path)
45
+ thumbnail_b64 = get_video_thumbnail_base64(video_path)
46
+
47
+ results.append({
48
+ "video_name": file.name,
49
+ "analysis": analysis,
50
+ "thumbnail": thumbnail_b64
51
+ })
52
+ return results
utils/.DS_Store ADDED
Binary file (6.15 kB). View file
 
utils/__pycache__/auth.cpython-311.pyc ADDED
Binary file (1.91 kB). View file
 
utils/__pycache__/dataframe.cpython-311.pyc ADDED
Binary file (5.34 kB). View file
 
utils/__pycache__/video.cpython-311.pyc ADDED
Binary file (1.38 kB). View file
 
utils/auth.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import logging
3
+ import streamlit as st
4
+
5
+ logger = logging.getLogger(__name__)
6
+
7
+ def check_token(user_token: str):
8
+ ACCESS_TOKEN = os.getenv("ACCESS_TOKEN")
9
+ if not ACCESS_TOKEN:
10
+ logger.critical("ACCESS_TOKEN not set in environment.")
11
+ return False, "Server error: Access token not configured."
12
+ if user_token == ACCESS_TOKEN:
13
+ logger.info("Access token validated successfully.")
14
+ return True, ""
15
+ logger.warning("Invalid access token attempt.")
16
+ return False, "Invalid token."
17
+
18
+ def gated_access() -> bool:
19
+ if "authenticated" not in st.session_state:
20
+ st.session_state["authenticated"] = False
21
+
22
+ if not st.session_state["authenticated"]:
23
+ st.markdown("## Access Required")
24
+ token_input = st.text_input("Enter Access Token", type="password")
25
+ if st.button("Unlock App"):
26
+ ok, error_msg = check_token(token_input)
27
+ if ok:
28
+ st.session_state["authenticated"] = True
29
+ st.rerun()
30
+ else:
31
+ st.error(error_msg)
32
+ return False
33
+ return True
utils/dataframe.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import pandas as pd
3
+ from typing import Any, Dict, List
4
+
5
+ def safe_dataframe(df: pd.DataFrame) -> pd.DataFrame:
6
+ for col in df.columns:
7
+ df[col] = df[col].astype(str)
8
+ return df
9
+
10
+ def analysis_to_csv(analysis: Dict[str, Any]) -> str:
11
+ rows = []
12
+ for sb in analysis.get("storyboard", []):
13
+ rows.append({"Section": "Storyboard", **sb})
14
+ for sc in analysis.get("script", []):
15
+ rows.append({"Section": "Script", **sc})
16
+ for met in analysis.get("video_analysis", {}).get("video_metrics", []):
17
+ rows.append({"Section": "Metrics", **met})
18
+ for imp in analysis.get("timestamp_improvements", []):
19
+ rows.append({"Section": "Improvements", **imp})
20
+ if not rows:
21
+ return ""
22
+ df = pd.DataFrame(rows)
23
+ return df.to_csv(index=False)
24
+
25
+ def _normalize_list(value: Any) -> List[str]:
26
+ if value is None:
27
+ return []
28
+ if isinstance(value, list):
29
+ return [str(v) for v in value]
30
+ return [s for s in str(value).splitlines() if s.strip()]
31
+
32
+ def _to_dataframe(items: Any, columns_map: Dict[str, str]) -> pd.DataFrame:
33
+ if not isinstance(items, list) or not items:
34
+ return pd.DataFrame(columns=list(columns_map.values()))
35
+ df = pd.DataFrame(items)
36
+ df = df.rename(columns=columns_map)
37
+ ordered_cols = [columns_map[k] for k in columns_map.keys() if columns_map[k] in df.columns]
38
+ df = df.reindex(columns=ordered_cols)
39
+ return df
40
+
41
+ def _mean_effectiveness(metrics: List[Dict[str, Any]]) -> float:
42
+ if not metrics:
43
+ return 0.0
44
+ scores = []
45
+ for m in metrics:
46
+ s = str(m.get("effectiveness_score", "0/10")).split("/")[0]
47
+ try:
48
+ scores.append(int(s))
49
+ except Exception:
50
+ pass
51
+ return round(sum(scores) / len(scores), 2) if scores else 0.0
52
+
53
+ def _search_dataframe(df: pd.DataFrame, query: str) -> pd.DataFrame:
54
+ if not query or df.empty:
55
+ return df
56
+ mask = pd.Series([False]*len(df))
57
+ for col in df.columns:
58
+ mask = mask | df[col].astype(str).str.contains(query, case=False, na=False)
59
+ return df[mask]
utils/video.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import base64
3
+ import logging
4
+
5
+ logger = logging.getLogger(__name__)
6
+
7
+ def get_video_thumbnail_base64(video_path: str, time_sec: int = 1) -> str:
8
+ try:
9
+ cap = cv2.VideoCapture(video_path)
10
+ cap.set(cv2.CAP_PROP_POS_MSEC, time_sec * 1000)
11
+ success, frame = cap.read()
12
+ cap.release()
13
+ if not success:
14
+ return ""
15
+ _, buffer = cv2.imencode(".jpg", frame)
16
+ return base64.b64encode(buffer).decode("utf-8")
17
+ except Exception:
18
+ logger.exception("Thumbnail extraction failed")
19
+ return ""