Update app.py
Browse files
app.py
CHANGED
@@ -21,7 +21,7 @@ import requests
|
|
21 |
from bs4 import BeautifulSoup
|
22 |
from markdownify import markdownify as md
|
23 |
from readability import Document
|
24 |
-
from urllib.parse import
|
25 |
from ddgs import DDGS
|
26 |
from PIL import Image
|
27 |
from huggingface_hub import InferenceClient
|
@@ -301,19 +301,26 @@ def Fetch_Webpage( # <-- MCP tool #1 (Fetch)
|
|
301 |
- Clean formatting without navigation/sidebar elements
|
302 |
- Length controlled by verbosity setting
|
303 |
"""
|
|
|
304 |
if not url or not url.strip():
|
305 |
-
|
|
|
|
|
306 |
|
307 |
try:
|
308 |
resp = _http_get_enhanced(url)
|
309 |
resp.raise_for_status()
|
310 |
except requests.exceptions.RequestException as e:
|
311 |
-
|
|
|
|
|
312 |
|
313 |
final_url = str(resp.url)
|
314 |
ctype = resp.headers.get("Content-Type", "")
|
315 |
if "html" not in ctype.lower():
|
316 |
-
|
|
|
|
|
317 |
|
318 |
# Decode to text
|
319 |
resp.encoding = resp.encoding or resp.apparent_encoding
|
@@ -325,11 +332,13 @@ def Fetch_Webpage( # <-- MCP tool #1 (Fetch)
|
|
325 |
|
326 |
# Apply verbosity-based truncation
|
327 |
if verbosity == "Brief":
|
328 |
-
|
329 |
elif verbosity == "Standard":
|
330 |
-
|
331 |
else: # "Full"
|
332 |
-
|
|
|
|
|
333 |
|
334 |
|
335 |
# ============================================
|
@@ -364,6 +373,51 @@ class RateLimiter:
|
|
364 |
_search_rate_limiter = RateLimiter(requests_per_minute=20)
|
365 |
_fetch_rate_limiter = RateLimiter(requests_per_minute=25)
|
366 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
367 |
def Search_DuckDuckGo( # <-- MCP tool #2 (DDG Search)
|
368 |
query: Annotated[str, "The search query (supports operators like site:, quotes, OR)."],
|
369 |
max_results: Annotated[int, "Number of results to return (1–20)."] = 5,
|
@@ -385,8 +439,11 @@ def Search_DuckDuckGo( # <-- MCP tool #2 (DDG Search)
|
|
385 |
Returns:
|
386 |
str: Search results in readable format with titles, URLs, and snippets as a numbered list.
|
387 |
"""
|
|
|
388 |
if not query or not query.strip():
|
389 |
-
|
|
|
|
|
390 |
|
391 |
# Validate max_results
|
392 |
max_results = max(1, min(20, max_results))
|
@@ -407,11 +464,14 @@ def Search_DuckDuckGo( # <-- MCP tool #2 (DDG Search)
|
|
407 |
error_msg = "Search timed out. Please try again with a simpler query."
|
408 |
elif "network" in str(e).lower() or "connection" in str(e).lower():
|
409 |
error_msg = "Network connection error. Please check your internet connection and try again."
|
410 |
-
|
411 |
-
|
|
|
412 |
|
413 |
if not raw:
|
414 |
-
|
|
|
|
|
415 |
|
416 |
results = []
|
417 |
|
@@ -432,7 +492,9 @@ def Search_DuckDuckGo( # <-- MCP tool #2 (DDG Search)
|
|
432 |
results.append(result_obj)
|
433 |
|
434 |
if not results:
|
435 |
-
|
|
|
|
|
436 |
|
437 |
# Format output in readable format
|
438 |
lines = [f"Found {len(results)} search results for: {query}\n"]
|
@@ -442,7 +504,9 @@ def Search_DuckDuckGo( # <-- MCP tool #2 (DDG Search)
|
|
442 |
if result['snippet']:
|
443 |
lines.append(f" Summary: {result['snippet']}")
|
444 |
lines.append("") # Empty line between results
|
445 |
-
|
|
|
|
|
446 |
|
447 |
|
448 |
# ======================================
|
@@ -460,18 +524,23 @@ def Execute_Python(code: Annotated[str, "Python source code to run; stdout is ca
|
|
460 |
str: Combined stdout produced by the code, or the exception text if
|
461 |
execution failed.
|
462 |
"""
|
|
|
463 |
if code is None:
|
464 |
-
|
|
|
|
|
465 |
|
466 |
old_stdout = sys.stdout
|
467 |
redirected_output = sys.stdout = StringIO()
|
468 |
try:
|
469 |
exec(code)
|
470 |
-
|
471 |
except Exception as e:
|
472 |
-
|
473 |
finally:
|
474 |
sys.stdout = old_stdout
|
|
|
|
|
475 |
|
476 |
|
477 |
# ==========================
|
@@ -637,7 +706,12 @@ def Generate_Speech( # <-- MCP tool #4 (Generate Speech)
|
|
637 |
- Can generate audio of any length - no 30 second limit!
|
638 |
- Use List_Kokoro_Voices() MCP tool to discover all available voice options.
|
639 |
"""
|
|
|
640 |
if not text or not text.strip():
|
|
|
|
|
|
|
|
|
641 |
raise gr.Error("Please provide non-empty text to synthesize.")
|
642 |
|
643 |
_init_kokoro()
|
@@ -651,29 +725,29 @@ def Generate_Speech( # <-- MCP tool #4 (Generate Speech)
|
|
651 |
# Process ALL segments for longer audio generation
|
652 |
audio_segments = []
|
653 |
pack = pipeline.load_voice(voice)
|
654 |
-
|
655 |
try:
|
656 |
# Get all segments first to show progress for long text
|
657 |
segments = list(pipeline(text, voice, speed))
|
658 |
total_segments = len(segments)
|
659 |
-
|
660 |
# Iterate through ALL segments instead of just the first one
|
661 |
for segment_idx, (text_chunk, ps, _) in enumerate(segments):
|
662 |
ref_s = pack[len(ps) - 1]
|
663 |
try:
|
664 |
audio = model(ps, ref_s, float(speed))
|
665 |
audio_segments.append(audio.detach().cpu().numpy())
|
666 |
-
|
667 |
# For very long text (>10 segments), show progress every few segments
|
668 |
if total_segments > 10 and (segment_idx + 1) % 5 == 0:
|
669 |
print(f"Progress: Generated {segment_idx + 1}/{total_segments} segments...")
|
670 |
-
|
671 |
except Exception as e:
|
672 |
raise gr.Error(f"Error generating audio for segment {segment_idx + 1}: {str(e)}")
|
673 |
-
|
674 |
if not audio_segments:
|
675 |
raise gr.Error("No audio was generated (empty synthesis result).")
|
676 |
-
|
677 |
# Concatenate all segments to create the complete audio
|
678 |
if len(audio_segments) == 1:
|
679 |
final_audio = audio_segments[0]
|
@@ -683,13 +757,16 @@ def Generate_Speech( # <-- MCP tool #4 (Generate Speech)
|
|
683 |
duration = len(final_audio) / 24_000
|
684 |
if total_segments > 1:
|
685 |
print(f"Completed: {total_segments} segments concatenated into {duration:.1f} seconds of audio")
|
686 |
-
|
687 |
-
#
|
|
|
688 |
return 24_000, final_audio
|
689 |
-
|
690 |
-
except gr.Error:
|
691 |
-
|
|
|
692 |
except Exception as e:
|
|
|
693 |
raise gr.Error(f"Error during speech generation: {str(e)}")
|
694 |
|
695 |
|
@@ -884,7 +961,9 @@ def Generate_Image( # <-- MCP tool #5 (Generate Image)
|
|
884 |
Error modes:
|
885 |
- Raises gr.Error with a user-friendly message on auth/model/load errors.
|
886 |
"""
|
|
|
887 |
if not prompt or not prompt.strip():
|
|
|
888 |
raise gr.Error("Please provide a non-empty prompt.")
|
889 |
|
890 |
# Slightly enhance prompt for quality (kept consistent with Serverless space)
|
@@ -907,6 +986,7 @@ def Generate_Image( # <-- MCP tool #5 (Generate Image)
|
|
907 |
guidance_scale=cfg_scale,
|
908 |
seed=seed if seed != -1 else random.randint(1, 1_000_000_000),
|
909 |
)
|
|
|
910 |
return image
|
911 |
except Exception as e: # try next provider, transform last one to friendly error
|
912 |
last_error = e
|
@@ -920,6 +1000,7 @@ def Generate_Image( # <-- MCP tool #5 (Generate Image)
|
|
920 |
raise gr.Error("The model is warming up. Please try again shortly.")
|
921 |
if "401" in msg or "403" in msg:
|
922 |
raise gr.Error("Authentication failed. Set HF_READ_TOKEN environment variable with access to the model.")
|
|
|
923 |
raise gr.Error(f"Image generation failed: {msg}")
|
924 |
|
925 |
|
@@ -1038,7 +1119,9 @@ def Generate_Video( # <-- MCP tool #6 (Generate Video)
|
|
1038 |
Error modes:
|
1039 |
- Raises gr.Error with a user-friendly message on auth/model/load errors or unsupported parameters.
|
1040 |
"""
|
|
|
1041 |
if not prompt or not prompt.strip():
|
|
|
1042 |
raise gr.Error("Please provide a non-empty prompt.")
|
1043 |
|
1044 |
if not HF_VIDEO_TOKEN:
|
@@ -1103,6 +1186,11 @@ def Generate_Video( # <-- MCP tool #6 (Generate Video)
|
|
1103 |
|
1104 |
# Save output to an .mp4
|
1105 |
path = _write_video_tmp(result, suffix=".mp4")
|
|
|
|
|
|
|
|
|
|
|
1106 |
return path
|
1107 |
except Exception as e:
|
1108 |
last_error = e
|
@@ -1115,6 +1203,7 @@ def Generate_Video( # <-- MCP tool #6 (Generate Video)
|
|
1115 |
raise gr.Error("The model is warming up. Please try again shortly.")
|
1116 |
if "401" in msg or "403" in msg:
|
1117 |
raise gr.Error("Authentication failed or not permitted. Set HF_READ_TOKEN/HF_TOKEN with inference access.")
|
|
|
1118 |
raise gr.Error(f"Video generation failed: {msg}")
|
1119 |
|
1120 |
|
|
|
21 |
from bs4 import BeautifulSoup
|
22 |
from markdownify import markdownify as md
|
23 |
from readability import Document
|
24 |
+
from urllib.parse import urlparse
|
25 |
from ddgs import DDGS
|
26 |
from PIL import Image
|
27 |
from huggingface_hub import InferenceClient
|
|
|
301 |
- Clean formatting without navigation/sidebar elements
|
302 |
- Length controlled by verbosity setting
|
303 |
"""
|
304 |
+
_log_call_start("Fetch_Webpage", url=url, verbosity=verbosity)
|
305 |
if not url or not url.strip():
|
306 |
+
result = "Please enter a valid URL."
|
307 |
+
_log_call_end("Fetch_Webpage", _truncate_for_log(result))
|
308 |
+
return result
|
309 |
|
310 |
try:
|
311 |
resp = _http_get_enhanced(url)
|
312 |
resp.raise_for_status()
|
313 |
except requests.exceptions.RequestException as e:
|
314 |
+
result = f"An error occurred: {e}"
|
315 |
+
_log_call_end("Fetch_Webpage", _truncate_for_log(result))
|
316 |
+
return result
|
317 |
|
318 |
final_url = str(resp.url)
|
319 |
ctype = resp.headers.get("Content-Type", "")
|
320 |
if "html" not in ctype.lower():
|
321 |
+
result = f"Unsupported content type for extraction: {ctype or 'unknown'}"
|
322 |
+
_log_call_end("Fetch_Webpage", _truncate_for_log(result))
|
323 |
+
return result
|
324 |
|
325 |
# Decode to text
|
326 |
resp.encoding = resp.encoding or resp.apparent_encoding
|
|
|
332 |
|
333 |
# Apply verbosity-based truncation
|
334 |
if verbosity == "Brief":
|
335 |
+
result = _truncate_markdown(markdown_content, 1000)
|
336 |
elif verbosity == "Standard":
|
337 |
+
result = _truncate_markdown(markdown_content, 3000)
|
338 |
else: # "Full"
|
339 |
+
result = markdown_content
|
340 |
+
_log_call_end("Fetch_Webpage", f"markdown_chars={len(result)}")
|
341 |
+
return result
|
342 |
|
343 |
|
344 |
# ============================================
|
|
|
373 |
_search_rate_limiter = RateLimiter(requests_per_minute=20)
|
374 |
_fetch_rate_limiter = RateLimiter(requests_per_minute=25)
|
375 |
|
376 |
+
# ==============================
|
377 |
+
# Logging Helpers (print I/O to terminal)
|
378 |
+
# ==============================
|
379 |
+
|
380 |
+
def _truncate_for_log(value: str, limit: int = 500) -> str:
|
381 |
+
"""Truncate long strings for concise terminal logging."""
|
382 |
+
if len(value) <= limit:
|
383 |
+
return value
|
384 |
+
return value[:limit - 1] + "…"
|
385 |
+
|
386 |
+
|
387 |
+
def _serialize_input(val): # type: ignore[return-any]
|
388 |
+
"""Best-effort compact serialization of arbitrary input values for logging."""
|
389 |
+
try:
|
390 |
+
if isinstance(val, (str, int, float, bool)) or val is None:
|
391 |
+
return val
|
392 |
+
if isinstance(val, (list, tuple)):
|
393 |
+
return [_serialize_input(v) for v in list(val)[:10]] + (["…"] if len(val) > 10 else []) # type: ignore[index]
|
394 |
+
if isinstance(val, dict):
|
395 |
+
out = {}
|
396 |
+
for i, (k, v) in enumerate(val.items()):
|
397 |
+
if i >= 12:
|
398 |
+
out["…"] = "…"
|
399 |
+
break
|
400 |
+
out[str(k)] = _serialize_input(v)
|
401 |
+
return out
|
402 |
+
return repr(val)[:120]
|
403 |
+
except Exception:
|
404 |
+
return "<unserializable>"
|
405 |
+
|
406 |
+
|
407 |
+
def _log_call_start(func_name: str, **kwargs) -> None:
|
408 |
+
try:
|
409 |
+
compact = {k: _serialize_input(v) for k, v in kwargs.items()}
|
410 |
+
print(f"[TOOL CALL] {func_name} inputs: {json.dumps(compact, ensure_ascii=False)[:800]}", flush=True)
|
411 |
+
except Exception as e: # pragma: no cover - logging safety
|
412 |
+
print(f"[TOOL CALL] {func_name} (failed to log inputs: {e})", flush=True)
|
413 |
+
|
414 |
+
|
415 |
+
def _log_call_end(func_name: str, output_desc: str) -> None:
|
416 |
+
try:
|
417 |
+
print(f"[TOOL RESULT] {func_name} output: {output_desc}", flush=True)
|
418 |
+
except Exception as e: # pragma: no cover
|
419 |
+
print(f"[TOOL RESULT] {func_name} (failed to log output: {e})", flush=True)
|
420 |
+
|
421 |
def Search_DuckDuckGo( # <-- MCP tool #2 (DDG Search)
|
422 |
query: Annotated[str, "The search query (supports operators like site:, quotes, OR)."],
|
423 |
max_results: Annotated[int, "Number of results to return (1–20)."] = 5,
|
|
|
439 |
Returns:
|
440 |
str: Search results in readable format with titles, URLs, and snippets as a numbered list.
|
441 |
"""
|
442 |
+
_log_call_start("Search_DuckDuckGo", query=query, max_results=max_results)
|
443 |
if not query or not query.strip():
|
444 |
+
result = "No search query provided. Please enter a search term."
|
445 |
+
_log_call_end("Search_DuckDuckGo", _truncate_for_log(result))
|
446 |
+
return result
|
447 |
|
448 |
# Validate max_results
|
449 |
max_results = max(1, min(20, max_results))
|
|
|
464 |
error_msg = "Search timed out. Please try again with a simpler query."
|
465 |
elif "network" in str(e).lower() or "connection" in str(e).lower():
|
466 |
error_msg = "Network connection error. Please check your internet connection and try again."
|
467 |
+
result = f"Error: {error_msg}"
|
468 |
+
_log_call_end("Search_DuckDuckGo", _truncate_for_log(result))
|
469 |
+
return result
|
470 |
|
471 |
if not raw:
|
472 |
+
result = f"No results found for query: {query}"
|
473 |
+
_log_call_end("Search_DuckDuckGo", _truncate_for_log(result))
|
474 |
+
return result
|
475 |
|
476 |
results = []
|
477 |
|
|
|
492 |
results.append(result_obj)
|
493 |
|
494 |
if not results:
|
495 |
+
result = f"No valid results found for query: {query}"
|
496 |
+
_log_call_end("Search_DuckDuckGo", _truncate_for_log(result))
|
497 |
+
return result
|
498 |
|
499 |
# Format output in readable format
|
500 |
lines = [f"Found {len(results)} search results for: {query}\n"]
|
|
|
504 |
if result['snippet']:
|
505 |
lines.append(f" Summary: {result['snippet']}")
|
506 |
lines.append("") # Empty line between results
|
507 |
+
result = "\n".join(lines)
|
508 |
+
_log_call_end("Search_DuckDuckGo", f"results={len(results)} chars={len(result)}")
|
509 |
+
return result
|
510 |
|
511 |
|
512 |
# ======================================
|
|
|
524 |
str: Combined stdout produced by the code, or the exception text if
|
525 |
execution failed.
|
526 |
"""
|
527 |
+
_log_call_start("Execute_Python", code=_truncate_for_log(code or "", 300))
|
528 |
if code is None:
|
529 |
+
result = "No code provided."
|
530 |
+
_log_call_end("Execute_Python", result)
|
531 |
+
return result
|
532 |
|
533 |
old_stdout = sys.stdout
|
534 |
redirected_output = sys.stdout = StringIO()
|
535 |
try:
|
536 |
exec(code)
|
537 |
+
result = redirected_output.getvalue()
|
538 |
except Exception as e:
|
539 |
+
result = str(e)
|
540 |
finally:
|
541 |
sys.stdout = old_stdout
|
542 |
+
_log_call_end("Execute_Python", _truncate_for_log(result))
|
543 |
+
return result
|
544 |
|
545 |
|
546 |
# ==========================
|
|
|
706 |
- Can generate audio of any length - no 30 second limit!
|
707 |
- Use List_Kokoro_Voices() MCP tool to discover all available voice options.
|
708 |
"""
|
709 |
+
_log_call_start("Generate_Speech", text=_truncate_for_log(text, 200), speed=speed, voice=voice)
|
710 |
if not text or not text.strip():
|
711 |
+
try:
|
712 |
+
_log_call_end("Generate_Speech", "error=empty text")
|
713 |
+
finally:
|
714 |
+
pass
|
715 |
raise gr.Error("Please provide non-empty text to synthesize.")
|
716 |
|
717 |
_init_kokoro()
|
|
|
725 |
# Process ALL segments for longer audio generation
|
726 |
audio_segments = []
|
727 |
pack = pipeline.load_voice(voice)
|
728 |
+
|
729 |
try:
|
730 |
# Get all segments first to show progress for long text
|
731 |
segments = list(pipeline(text, voice, speed))
|
732 |
total_segments = len(segments)
|
733 |
+
|
734 |
# Iterate through ALL segments instead of just the first one
|
735 |
for segment_idx, (text_chunk, ps, _) in enumerate(segments):
|
736 |
ref_s = pack[len(ps) - 1]
|
737 |
try:
|
738 |
audio = model(ps, ref_s, float(speed))
|
739 |
audio_segments.append(audio.detach().cpu().numpy())
|
740 |
+
|
741 |
# For very long text (>10 segments), show progress every few segments
|
742 |
if total_segments > 10 and (segment_idx + 1) % 5 == 0:
|
743 |
print(f"Progress: Generated {segment_idx + 1}/{total_segments} segments...")
|
744 |
+
|
745 |
except Exception as e:
|
746 |
raise gr.Error(f"Error generating audio for segment {segment_idx + 1}: {str(e)}")
|
747 |
+
|
748 |
if not audio_segments:
|
749 |
raise gr.Error("No audio was generated (empty synthesis result).")
|
750 |
+
|
751 |
# Concatenate all segments to create the complete audio
|
752 |
if len(audio_segments) == 1:
|
753 |
final_audio = audio_segments[0]
|
|
|
757 |
duration = len(final_audio) / 24_000
|
758 |
if total_segments > 1:
|
759 |
print(f"Completed: {total_segments} segments concatenated into {duration:.1f} seconds of audio")
|
760 |
+
|
761 |
+
# Success logging & return
|
762 |
+
_log_call_end("Generate_Speech", f"samples={final_audio.shape[0]} duration_sec={len(final_audio)/24_000:.2f}")
|
763 |
return 24_000, final_audio
|
764 |
+
|
765 |
+
except gr.Error as e:
|
766 |
+
_log_call_end("Generate_Speech", f"gr_error={str(e)}")
|
767 |
+
raise # Re-raise
|
768 |
except Exception as e:
|
769 |
+
_log_call_end("Generate_Speech", f"error={str(e)[:120]}")
|
770 |
raise gr.Error(f"Error during speech generation: {str(e)}")
|
771 |
|
772 |
|
|
|
961 |
Error modes:
|
962 |
- Raises gr.Error with a user-friendly message on auth/model/load errors.
|
963 |
"""
|
964 |
+
_log_call_start("Generate_Image", prompt=_truncate_for_log(prompt, 200), model_id=model_id, steps=steps, cfg_scale=cfg_scale, seed=seed, size=f"{width}x{height}")
|
965 |
if not prompt or not prompt.strip():
|
966 |
+
_log_call_end("Generate_Image", "error=empty prompt")
|
967 |
raise gr.Error("Please provide a non-empty prompt.")
|
968 |
|
969 |
# Slightly enhance prompt for quality (kept consistent with Serverless space)
|
|
|
986 |
guidance_scale=cfg_scale,
|
987 |
seed=seed if seed != -1 else random.randint(1, 1_000_000_000),
|
988 |
)
|
989 |
+
_log_call_end("Generate_Image", f"provider={provider} size={image.size}")
|
990 |
return image
|
991 |
except Exception as e: # try next provider, transform last one to friendly error
|
992 |
last_error = e
|
|
|
1000 |
raise gr.Error("The model is warming up. Please try again shortly.")
|
1001 |
if "401" in msg or "403" in msg:
|
1002 |
raise gr.Error("Authentication failed. Set HF_READ_TOKEN environment variable with access to the model.")
|
1003 |
+
_log_call_end("Generate_Image", f"error={_truncate_for_log(msg, 200)}")
|
1004 |
raise gr.Error(f"Image generation failed: {msg}")
|
1005 |
|
1006 |
|
|
|
1119 |
Error modes:
|
1120 |
- Raises gr.Error with a user-friendly message on auth/model/load errors or unsupported parameters.
|
1121 |
"""
|
1122 |
+
_log_call_start("Generate_Video", prompt=_truncate_for_log(prompt, 160), model_id=model_id, steps=steps, cfg_scale=cfg_scale, fps=fps, duration=duration, size=f"{width}x{height}")
|
1123 |
if not prompt or not prompt.strip():
|
1124 |
+
_log_call_end("Generate_Video", "error=empty prompt")
|
1125 |
raise gr.Error("Please provide a non-empty prompt.")
|
1126 |
|
1127 |
if not HF_VIDEO_TOKEN:
|
|
|
1186 |
|
1187 |
# Save output to an .mp4
|
1188 |
path = _write_video_tmp(result, suffix=".mp4")
|
1189 |
+
try:
|
1190 |
+
size = os.path.getsize(path)
|
1191 |
+
except Exception:
|
1192 |
+
size = -1
|
1193 |
+
_log_call_end("Generate_Video", f"provider={provider} path={os.path.basename(path)} bytes={size}")
|
1194 |
return path
|
1195 |
except Exception as e:
|
1196 |
last_error = e
|
|
|
1203 |
raise gr.Error("The model is warming up. Please try again shortly.")
|
1204 |
if "401" in msg or "403" in msg:
|
1205 |
raise gr.Error("Authentication failed or not permitted. Set HF_READ_TOKEN/HF_TOKEN with inference access.")
|
1206 |
+
_log_call_end("Generate_Video", f"error={_truncate_for_log(msg, 200)}")
|
1207 |
raise gr.Error(f"Video generation failed: {msg}")
|
1208 |
|
1209 |
|