diff --git "a/app.py" "b/app.py" --- "a/app.py" +++ "b/app.py" @@ -9,73 +9,114 @@ import numpy as np import sys import subprocess import json +from pygments import highlight +from pygments.lexers import PythonLexer +from pygments.formatters import HtmlFormatter +import base64 +from transformers import pipeline +import torch import re import shutil import time -from datetime import datetime +from datetime import datetime, timedelta import streamlit.components.v1 as components import uuid +import platform import pandas as pd import plotly.express as px +import markdown import zipfile +import contextlib +import threading import traceback +from io import StringIO, BytesIO -# Set up logging +# Set up enhanced logging logging.basicConfig( level=logging.INFO, - format="%(asctime)s %(levelname)s %(name)s – %(message)s", - handlers=[logging.StreamHandler()] + format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', + handlers=[ + logging.StreamHandler() + ] ) logger = logging.getLogger(__name__) -# Available quality presets and formats -QUALITY_PRESETS = ["480p", "720p", "1080p", "4K", "8K"] -FPS_OPTIONS = [24, 30, 60, 120] -OUTPUT_FORMATS = { - "MP4 Video": "mp4", - "GIF Animation": "gif", - "WebM Video": "webm", - "PNG Sequence (ZIP)": "png_sequence", - "SVG Image": "svg" -} - -# Model configurations +# Model configuration mapping for different API requirements and limits MODEL_CONFIGS = { - "gpt-4o": {}, - "gpt-4o-mini": {}, - "gpt-4.1": {}, - "gpt-4.1-mini": {}, - "o1": {}, - "o1-mini": {}, - "default": {} + "DeepSeek-V3-0324": {"max_tokens": 4000, "param_name": "max_tokens", "api_version": None, "category": "DeepSeek", "warning": None}, + "DeepSeek-R1": {"max_tokens": 4000, "param_name": "max_tokens", "api_version": None, "category": "DeepSeek", "warning": None}, + "Llama-4-Scout-17B-16E-Instruct": {"max_tokens": 4000, "param_name": "max_tokens", "api_version": None, "category": "Meta", "warning": None}, + "Llama-4-Maverick-17B-128E-Instruct-FP8": {"max_tokens": 4000, "param_name": "max_tokens", "api_version": None, "category": "Meta", "warning": None}, + "gpt-4o-mini": {"max_tokens": 15000, "param_name": "max_tokens", "api_version": None, "category": "OpenAI", "warning": None}, + "gpt-4o": {"max_tokens": 16000, "param_name": "max_tokens", "api_version": None, "category": "OpenAI", "warning": None}, + "gpt-4.1": {"max_tokens": 32768, "param_name": "max_tokens", "api_version": None, "category": "OpenAI", "warning": None}, + "gpt-4.1-mini": {"max_tokens": 32768, "param_name": "max_tokens", "api_version": None, "category": "OpenAI", "warning": None}, + "gpt-4.1-nano": {"max_tokens": 32768, "param_name": "max_tokens", "api_version": None, "category": "OpenAI", "warning": None}, + "o3-mini": {"max_completion_tokens": 100000, "param_name": "max_completion_tokens", "api_version": "2024-12-01-preview", "category": "OpenAI", "warning": None}, + "o1": {"max_completion_tokens": 100000, "param_name": "max_completion_tokens", "api_version": "2024-12-01-preview", "category": "OpenAI", "warning": None}, + "o1-mini": {"max_completion_tokens": 66000, "param_name": "max_completion_tokens", "api_version": "2024-12-01-preview", "category": "OpenAI", "warning": None}, + "o1-preview": {"max_tokens": 33000, "param_name": "max_tokens", "api_version": None, "category": "OpenAI", "warning": None}, + "Phi-4-multimodal-instruct": {"max_tokens": 4000, "param_name": "max_tokens", "api_version": None, "category": "Microsoft", "warning": None}, + "Mistral-large-2407": {"max_tokens": 4000, "param_name": "max_tokens", "api_version": None, "category": "Mistral", "warning": None}, + "Codestral-2501": {"max_tokens": 4000, "param_name": "max_tokens", "api_version": None, "category": "Mistral", "warning": None}, + # Default configuration for other models + "default": {"max_tokens": 4000, "param_name": "max_tokens", "api_version": None, "category": "Other", "warning": None} } -# Try to import st_ace +# Try to import Streamlit Ace try: from streamlit_ace import st_ace ACE_EDITOR_AVAILABLE = True except ImportError: ACE_EDITOR_AVAILABLE = False - logger.warning("streamlit-ace not available, using text_area") + logger.warning("streamlit-ace not available, falling back to standard text editor") -def get_secret(env_var): - val = os.environ.get(env_var) - if not val: - logger.warning(f"Secret '{env_var}' not set") - return val +def prepare_api_params(messages, model_name): + """Create appropriate API parameters based on model configuration""" + # Get model configuration + config = MODEL_CONFIGS.get(model_name, MODEL_CONFIGS["default"]) + + # Base parameters common to all models + api_params = { + "messages": messages, + "model": model_name + } + + # Add the appropriate token parameter based on model's parameter name + token_param = config["param_name"] + token_value = config[token_param] # Get the actual value from the config + + # Add the parameter to the API params + api_params[token_param] = token_value + + return api_params, config + +# New functions for accessing secrets and password verification +def get_secret(github_token_api): + """Retrieve a secret from HuggingFace Spaces environment variables""" + secret_value = os.environ.get(github_token_api) + if not secret_value: + logger.warning(f"Secret '{github_token_api}' not found") + return None + return secret_value def check_password(): - pwd = get_secret("password") - if not pwd: - st.error("Admin password not configured") + """Returns True if the user entered the correct password""" + # Get the password from secrets + correct_password = get_secret("password") + if not correct_password: + st.error("Admin password not configured in HuggingFace Spaces secrets") return False - if "pwd_ok" not in st.session_state: - st.session_state.pwd_ok = False - if not st.session_state.pwd_ok: - entry = st.text_input("Enter password", type="password") - if entry: - if entry == pwd: - st.session_state.pwd_ok = True + + # Password input + if "password_entered" not in st.session_state: + st.session_state.password_entered = False + + if not st.session_state.password_entered: + password = st.text_input("Enter password to access AI features", type="password") + if password: + if password == correct_password: + st.session_state.password_entered = True return True else: st.error("Incorrect password") @@ -84,301 +125,2960 @@ def check_password(): return True def ensure_packages(): - reqs = { - "manim": "0.17.3", - "Pillow": "9.0.0", - "numpy": "1.22.0", - "plotly": "5.14.0", - "pandas": "2.0.0", - "python-pptx": "0.6.21", - "fpdf": "1.7.2", - "matplotlib": "3.5.0", - "seaborn": "0.11.2", - "scipy": "1.7.3", - "streamlit-ace": "0.1.1" + required_packages = { + 'manim': '0.17.3', + 'Pillow': '9.0.0', + 'numpy': '1.22.0', + 'transformers': '4.30.0', + 'torch': '2.0.0', + 'pygments': '2.15.1', + 'streamlit-ace': '0.1.1', + 'pydub': '0.25.1', # For audio processing + 'plotly': '5.14.0', # For timeline editor + 'pandas': '2.0.0', # For data manipulation + 'python-pptx': '0.6.21', # For PowerPoint export + 'markdown': '3.4.3', # For markdown processing + 'fpdf': '1.7.2', # For PDF generation + 'matplotlib': '3.5.0', # For Python script runner + 'seaborn': '0.11.2', # For enhanced visualizations + 'scipy': '1.7.3', # For scientific computations + 'huggingface_hub': '0.16.0', # For Hugging Face API } - missing = {} - for pkg, ver in reqs.items(): - try: - __import__(pkg if pkg != "Pillow" else "PIL") - except ImportError: - missing[pkg] = ver - if not missing: + + with st.spinner("Checking required packages..."): + # First, quickly check if packages are already installed + missing_packages = {} + for package, version in required_packages.items(): + try: + # Try to import the package to check if it's available + if package == 'manim': + import manim + elif package == 'Pillow': + import PIL + elif package == 'numpy': + import numpy + elif package == 'transformers': + import transformers + elif package == 'torch': + import torch + elif package == 'pygments': + import pygments + elif package == 'streamlit-ace': + # This one is trickier, we already handle it with ACE_EDITOR_AVAILABLE flag + pass + elif package == 'pydub': + import pydub + elif package == 'plotly': + import plotly + elif package == 'pandas': + import pandas + elif package == 'python-pptx': + import pptx + elif package == 'markdown': + import markdown + elif package == 'fpdf': + import fpdf + elif package == 'matplotlib': + import matplotlib + elif package == 'seaborn': + import seaborn + elif package == 'scipy': + import scipy + elif package == 'huggingface_hub': + import huggingface_hub + except ImportError: + missing_packages[package] = version + + # If no packages are missing, return success immediately + if not missing_packages: + logger.info("All required packages already installed.") + return True + + # If there are missing packages, install them with progress reporting + progress_bar = st.progress(0) + status_text = st.empty() + + for i, (package, version) in enumerate(missing_packages.items()): + try: + progress = (i / len(missing_packages)) + progress_bar.progress(progress) + status_text.text(f"Installing {package}...") + + result = subprocess.run( + [sys.executable, "-m", "pip", "install", f"{package}>={version}"], + capture_output=True, + text=True + ) + + if result.returncode != 0: + st.error(f"Failed to install {package}: {result.stderr}") + logger.error(f"Package installation failed: {package}") + return False + + except Exception as e: + st.error(f"Error installing {package}: {str(e)}") + logger.error(f"Package installation error: {str(e)}") + return False + + progress_bar.progress(1.0) + status_text.text("All packages installed successfully!") + time.sleep(0.5) + progress_bar.empty() + status_text.empty() return True - bar = st.progress(0) - txt = st.empty() - for i, (pkg, ver) in enumerate(missing.items()): - txt.text(f"Installing {pkg}...") - res = subprocess.run([sys.executable, "-m", "pip", "install", f"{pkg}>={ver}"], capture_output=True, text=True) - if res.returncode != 0: - st.error(f"Failed to install {pkg}") - return False - bar.progress((i + 1) / len(missing)) - txt.empty() - return True -def extract_scene_class_name(code): - m = re.findall(r"class\s+(\w+)\s*\([^)]*Scene", code) - return m[0] if m else "MyScene" - -def generate_manim_preview(code): - icons = [] - if "Circle" in code: icons.append("⭕") - if "Square" in code: icons.append("🔲") - if "MathTex" in code or "Tex" in code: icons.append("📊") - if "Text" in code: icons.append("📝") - if "Axes" in code: icons.append("📈") - preview_icons = "".join(icons) or "đŸŽŦ" - return f""" -
-

Preview

-
{preview_icons}
-

Full render for accurate output

-
- """ - -def generate_manim_video(code, fmt, quality, fps, audio_path=None): - temp_dir = tempfile.mkdtemp(prefix="manim_") - scene = extract_scene_class_name(code) - scene_file = os.path.join(temp_dir, "scene.py") - with open(scene_file, "w") as f: - f.write(code) - qflags = {"480p":"-ql","720p":"-qm","1080p":"-qh","4K":"-qk","8K":"-qp"} - qf = qflags.get(quality, "-qm") - cmd = ["manim", scene_file, scene, qf, f"--format={fmt}", f"--fps={fps}"] - process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True) - progress = st.progress(0) - log = st.empty() - total = None - for line in process.stdout: - log.code(line) - m = re.search(r"(\d+)\s*/\s*(\d+)", line) - if m: - cur, tot = map(int, m.groups()) - total = tot - progress.progress(min(0.99, cur / tot)) +def install_custom_packages(package_list): + """Install custom packages specified by the user without page refresh""" + if not package_list.strip(): + return True, "No packages specified" + + # Split and clean package list + packages = [pkg.strip() for pkg in package_list.split(',') if pkg.strip()] + + if not packages: + return True, "No valid packages specified" + + status_placeholder = st.sidebar.empty() + progress_bar = st.sidebar.progress(0) + + results = [] + success = True + + for i, package in enumerate(packages): + try: + progress = (i / len(packages)) + progress_bar.progress(progress) + status_placeholder.text(f"Installing {package}...") + + result = subprocess.run( + [sys.executable, "-m", "pip", "install", package], + capture_output=True, + text=True + ) + + if result.returncode != 0: + error_msg = f"Failed to install {package}: {result.stderr}" + results.append(error_msg) + logger.error(error_msg) + success = False + else: + results.append(f"Successfully installed {package}") + logger.info(f"Successfully installed custom package: {package}") + + except Exception as e: + error_msg = f"Error installing {package}: {str(e)}" + results.append(error_msg) + logger.error(error_msg) + success = False + + progress_bar.progress(1.0) + status_placeholder.text("Installation complete!") + time.sleep(0.5) + progress_bar.empty() + status_placeholder.empty() + + return success, "\n".join(results) + +@st.cache_resource(ttl=3600) +def init_ai_models_direct(): + """Direct implementation using the exact pattern from the example code""" + try: + # Get token from secrets + token = get_secret("github_token_api") + if not token: + st.error("GitHub token not found in secrets. Please add 'github_token_api' to your HuggingFace Spaces secrets.") + return None + + # Log what we're doing - for debugging + logger.info(f"Initializing AI model with token: {token[:5]}...") + + # Use exact imports as in your example + import os + from azure.ai.inference import ChatCompletionsClient + from azure.ai.inference.models import SystemMessage, UserMessage + from azure.core.credentials import AzureKeyCredential + + # Use exact endpoint as in your example + endpoint = "https://models.inference.ai.azure.com" + + # Use default model + model_name = "gpt-4o" + + # Create client exactly as in your example + client = ChatCompletionsClient( + endpoint=endpoint, + credential=AzureKeyCredential(token), + ) + + # Return the necessary information + return { + "client": client, + "model_name": model_name, + "endpoint": endpoint + } + except ImportError as ie: + st.error(f"Import error: {str(ie)}. Please make sure azure-ai-inference is installed.") + logger.error(f"Import error: {str(ie)}") + return None + except Exception as e: + st.error(f"Error initializing AI model: {str(e)}") + logger.error(f"Initialization error: {str(e)}") + return None + +def suggest_code_completion(code_snippet, models): + """Generate code completion using the AI model""" + if not models: + st.error("AI models not properly initialized.") + return None + + try: + # Create the prompt + prompt = f"""Write a complete Manim animation scene based on this code or idea: +{code_snippet} + +The code should be a complete, working Manim animation that includes: +- Proper Scene class definition +- Constructor with animations +- Proper use of self.play() for animations +- Proper wait times between animations + +Here's the complete Manim code: +""" + + with st.spinner("AI is generating your animation code..."): + # Get the current model name and base URL + model_name = models["model_name"] + + # Convert message to the appropriate format based on model category + config = MODEL_CONFIGS.get(model_name, MODEL_CONFIGS["default"]) + category = config.get("category", "Other") + + if category == "OpenAI": + # Import OpenAI client + from openai import OpenAI + + # Get token + token = get_secret("github_token_api") + + # Create or get client + if "openai_client" not in models: + client = OpenAI( + base_url="https://models.github.ai/inference", + api_key=token + ) + models["openai_client"] = client + else: + client = models["openai_client"] + + # For OpenAI models, we need role-based messages + messages = [ + {"role": "system", "content": "You are an expert in Manim animations."}, + {"role": "user", "content": prompt} + ] + + # Create params + params = { + "messages": messages, + "model": model_name + } + + # Add token parameter + token_param = config["param_name"] + params[token_param] = config[token_param] + + # Make API call + response = client.chat.completions.create(**params) + completed_code = response.choices[0].message.content + + else: + # Use Azure client + from azure.ai.inference.models import UserMessage + + # Convert message format for Azure + messages = [UserMessage(prompt)] + api_params, _ = prepare_api_params(messages, model_name) + + # Make API call with Azure client + response = models["client"].complete(**api_params) + completed_code = response.choices[0].message.content + + # Process the code + if "```python" in completed_code: + completed_code = completed_code.split("```python")[1].split("```")[0] + elif "```" in completed_code: + completed_code = completed_code.split("```")[1].split("```")[0] + + # Add Scene class if missing + if "Scene" not in completed_code: + completed_code = f"""from manim import * + +class MyScene(Scene): + def construct(self): + {completed_code}""" + + return completed_code + + except Exception as e: + st.error(f"Error generating code: {str(e)}") + st.code(traceback.format_exc()) + return None + +def check_model_freshness(): + """Check if models need to be reloaded based on TTL""" + if 'ai_models' not in st.session_state or st.session_state.ai_models is None: + return False + + if 'last_loaded' not in st.session_state.ai_models: + return False + + last_loaded = datetime.fromisoformat(st.session_state.ai_models['last_loaded']) + ttl_hours = 1 # 1 hour TTL + + return datetime.now() - last_loaded < timedelta(hours=ttl_hours) + +def extract_scene_class_name(python_code): + """Extract the scene class name from Python code.""" + import re + scene_classes = re.findall(r'class\s+(\w+)\s*\([^)]*Scene[^)]*\)', python_code) + + if scene_classes: + # Return the first scene class found + return scene_classes[0] + else: + # If no scene class is found, use a default name + return "MyScene" + +def suggest_code_completion(code_snippet, models): + if not models or "code_model" not in models: + st.error("AI models not properly initialized") + return None + + try: + prompt = f"""Write a complete Manim animation scene based on this code or idea: +{code_snippet} + +The code should be a complete, working Manim animation that includes: +- Proper Scene class definition +- Constructor with animations +- Proper use of self.play() for animations +- Proper wait times between animations + +Here's the complete Manim code: +```python +""" + with st.spinner("AI is generating your animation code..."): + response = models["code_model"]( + prompt, + max_length=1024, + do_sample=True, + temperature=0.2, + top_p=0.95, + top_k=50, + num_return_sequences=1, + truncation=True, + pad_token_id=50256 + ) + + if not response or not response[0].get('generated_text'): + st.error("No valid completion generated") + return None + + completed_code = response[0]['generated_text'] + if "```python" in completed_code: + completed_code = completed_code.split("```python")[1].split("```")[0] + + if "Scene" not in completed_code: + completed_code = f"""from manim import * + +class MyScene(Scene): + def construct(self): + {completed_code}""" + + return completed_code + except Exception as e: + st.error(f"Error suggesting code: {str(e)}") + logger.error(f"Code suggestion error: {str(e)}") + return None + +# Quality presets +QUALITY_PRESETS = { + "480p": {"resolution": "480p", "fps": "30"}, + "720p": {"resolution": "720p", "fps": "30"}, + "1080p": {"resolution": "1080p", "fps": "60"}, + "4K": {"resolution": "2160p", "fps": "60"}, + "8K": {"resolution": "4320p", "fps": "60"} # Added 8K option +} + +# Animation speeds +ANIMATION_SPEEDS = { + "Slow": 0.5, + "Normal": 1.0, + "Fast": 2.0, + "Very Fast": 3.0 +} + +# Export formats +EXPORT_FORMATS = { + "MP4 Video": "mp4", + "GIF Animation": "gif", + "WebM Video": "webm", + "PNG Image Sequence": "png_sequence", + "SVG Image": "svg" +} + +# FPS options +FPS_OPTIONS = [15, 24, 30, 60, 120] + +def highlight_code(code): + formatter = HtmlFormatter(style='monokai') + highlighted = highlight(code, PythonLexer(), formatter) + return highlighted, formatter.get_style_defs() + +def generate_manim_preview(python_code): + """Generate a lightweight preview of the Manim animation""" + try: + # Extract scene components for preview + scene_objects = [] + if "Circle" in python_code: + scene_objects.append("circle") + if "Square" in python_code: + scene_objects.append("square") + if "MathTex" in python_code or "Tex" in python_code: + scene_objects.append("equation") + if "Text" in python_code: + scene_objects.append("text") + if "Axes" in python_code: + scene_objects.append("graph") + if "ThreeDScene" in python_code or "ThreeDAxes" in python_code: + scene_objects.append("3D scene") + if "Sphere" in python_code: + scene_objects.append("sphere") + if "Cube" in python_code: + scene_objects.append("cube") + + # Generate a more detailed visual preview based on extracted objects + object_icons = { + "circle": "⭕", + "square": "🔲", + "equation": "📊", + "text": "📝", + "graph": "📈", + "3D scene": "🧊", + "sphere": "🌐", + "cube": "🧊" + } + + icon_html = "" + for obj in scene_objects: + if obj in object_icons: + icon_html += f'{object_icons[obj]}' + + preview_html = f""" +
+

Animation Preview

+
+ {icon_html if icon_html else 'đŸŽŦ'} +
+

Scene contains: {', '.join(scene_objects) if scene_objects else 'No detected objects'}

+
Full rendering required for accurate preview
+
+ """ + return preview_html + except Exception as e: + logger.error(f"Preview generation error: {str(e)}") + return f""" +
+
+

Preview Error

+

{str(e)}

+
+
+ """ + +def prepare_audio_for_manim(audio_file, target_dir): + """Process audio file and return path for use in Manim""" + try: + # Create audio directory if it doesn't exist + audio_dir = os.path.join(target_dir, "audio") + os.makedirs(audio_dir, exist_ok=True) + + # Generate a unique filename + filename = f"audio_{int(time.time())}.mp3" + output_path = os.path.join(audio_dir, filename) + + # Save audio file + with open(output_path, "wb") as f: + f.write(audio_file.getvalue()) + + return output_path + except Exception as e: + logger.error(f"Audio processing error: {str(e)}") + return None + +def mp4_to_gif(mp4_path, output_path, fps=15): + """Convert MP4 to GIF using ffmpeg as a backup when Manim fails""" + try: + # Use ffmpeg for conversion with optimized settings + command = [ + "ffmpeg", + "-i", mp4_path, + "-vf", f"fps={fps},scale=640:-1:flags=lanczos,split[s0][s1];[s0]palettegen[p];[s1][p]paletteuse", + "-loop", "0", + output_path + ] + + # Run the conversion + result = subprocess.run(command, capture_output=True, text=True) + + if result.returncode != 0: + logger.error(f"FFmpeg conversion error: {result.stderr}") + return None + + return output_path + + except Exception as e: + logger.error(f"GIF conversion error: {str(e)}") + return None + +def generate_manim_video(python_code, format_type, quality_preset, animation_speed=1.0, audio_path=None, fps=None): + temp_dir = None + progress_placeholder = st.empty() + status_placeholder = st.empty() + log_placeholder = st.empty() + video_data = None # Initialize video data variable + + try: + if not python_code or not format_type: + raise ValueError("Missing required parameters") + + # Create temporary directory + temp_dir = tempfile.mkdtemp(prefix="manim_render_") + + # Extract the scene class name from the code + scene_class = extract_scene_class_name(python_code) + logger.info(f"Detected scene class: {scene_class}") + + # If audio is provided, we need to modify the code to include it + if audio_path: + # Check if the code already has a with_sound decorator + if "with_sound" not in python_code: + # Add the necessary import + if "from manim.scene.scene_file_writer import SceneFileWriter" not in python_code: + python_code = "from manim.scene.scene_file_writer import SceneFileWriter\n" + python_code + + # Add sound to the scene + scene_def_pattern = f"class {scene_class}\\(.*?\\):" + scene_def_match = re.search(scene_def_pattern, python_code) + + if scene_def_match: + scene_def = scene_def_match.group(0) + scene_def_with_sound = f"@with_sound(\"{audio_path}\")\n{scene_def}" + python_code = python_code.replace(scene_def, scene_def_with_sound) + else: + logger.warning("Could not find scene definition to add audio") + + # Write the code to a file + scene_file = os.path.join(temp_dir, "scene.py") + with open(scene_file, "w", encoding="utf-8") as f: + f.write(python_code) + + # Map quality preset to Manim quality flag + quality_map = { + "480p": "-ql", # Low quality + "720p": "-qm", # Medium quality + "1080p": "-qh", # High quality + "4K": "-qk", # 4K quality + "8K": "-qp" # 8K quality (production quality) + } + quality_flag = quality_map.get(quality_preset, "-qm") + + # Handle special formats + if format_type == "png_sequence": + # For PNG sequence, we need additional flags + format_arg = "--format=png" + extra_args = ["--save_pngs"] + elif format_type == "svg": + # For SVG, we need a different format + format_arg = "--format=svg" + extra_args = [] else: - p = re.search(r"(\d+)%", line) - if p: - progress.progress(min(0.99, int(p.group(1)) / 100)) - process.wait() - progress.progress(1.0) - # locate output - out_path = None - for root, _, files in os.walk(temp_dir): - for f in files: - if f.endswith(f".{fmt}") or (fmt=="png_sequence" and f.endswith(".zip")): - out_path = os.path.join(root, f) + # Standard video formats + format_arg = f"--format={format_type}" + extra_args = [] + + # Add custom FPS if specified + if fps is not None: + extra_args.append(f"--fps={fps}") + + # Show status and create progress bar + status_placeholder.info(f"Rendering {scene_class} with {quality_preset} quality...") + progress_bar = progress_placeholder.progress(0) + + # Build command + command = [ + "manim", + scene_file, + scene_class, + quality_flag, + format_arg + ] + command.extend(extra_args) + + logger.info(f"Running command: {' '.join(command)}") + + # Execute the command + process = subprocess.Popen( + command, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + text=True + ) + + # Track output + full_output = [] + output_file_path = None + mp4_output_path = None # Track MP4 output for GIF fallback + total_frames = None + current_frame = 0 + + while True: + line = process.stdout.readline() + if not line and process.poll() is not None: break - data = None - if out_path and os.path.exists(out_path): - with open(out_path, "rb") as f: - data = f.read() - shutil.rmtree(temp_dir) - if data: - size_mb = len(data) / (1024 * 1024) - return data, f"✅ Done ({size_mb:.1f} MB)" + + full_output.append(line) + log_placeholder.code("".join(full_output[-10:])) + + # Try to extract total frames information + if "Render animations with total frames:" in line: + try: + total_frames = int(line.split("Render animations with total frames:")[1].strip().split()[0]) + logger.info(f"Total frames to render: {total_frames}") + except: + pass + + # Update progress bar based on frame information or percentage + if "Rendering frame" in line and total_frames: + try: + # Extract current frame number + frame_match = re.search(r"Rendering frame (\d+)", line) + if frame_match: + current_frame = int(frame_match.group(1)) + # Calculate progress as current frame / total frames + progress = min(0.99, current_frame / total_frames) + progress_bar.progress(progress) + # Update status with frame information + status_placeholder.info(f"Rendering {scene_class}: Frame {current_frame}/{total_frames} ({int(progress*100)}%)") + except: + pass + elif "%" in line: + try: + # Fallback to percentage if available + percent = float(line.split("%")[0].strip().split()[-1]) + progress_bar.progress(min(0.99, percent / 100)) + except: + pass + + # Try to capture the output file path from Manim's output + if "File ready at" in line: + try: + # Combine next few lines to get the full path + path_parts = [] + path_parts.append(line.split("File ready at")[-1].strip()) + + # Read up to 5 more lines to get the complete path + for _ in range(5): + additional_line = process.stdout.readline() + if additional_line: + full_output.append(additional_line) + path_parts.append(additional_line.strip()) + if additional_line.strip().endswith(('.mp4', '.gif', '.webm', '.svg')): + break + + # Join all parts and clean up + potential_path = ''.join(path_parts).replace("'", "").strip() + # Look for path pattern surrounded by quotes + path_match = re.search(r'([\'"]?)((?:/|[a-zA-Z]:\\).*?\.(?:mp4|gif|webm|svg))(\1)', potential_path) + if path_match: + output_file_path = path_match.group(2) + logger.info(f"Found output path in logs: {output_file_path}") + + # Track MP4 file for potential GIF fallback + if output_file_path.endswith('.mp4'): + mp4_output_path = output_file_path + except Exception as e: + logger.error(f"Error parsing output path: {str(e)}") + + # Wait for the process to complete + process.wait() + progress_bar.progress(1.0) + + # IMPORTANT: Wait a moment for file system to catch up + time.sleep(3) + + # Special handling for GIF format - if Manim failed to generate a GIF but we have an MP4 + if format_type == "gif" and (not output_file_path or not os.path.exists(output_file_path)) and mp4_output_path and os.path.exists(mp4_output_path): + status_placeholder.info("GIF generation via Manim failed. Trying FFmpeg conversion...") + + # Generate a GIF using FFmpeg + gif_output_path = os.path.join(temp_dir, f"{scene_class}_converted.gif") + gif_path = mp4_to_gif(mp4_output_path, gif_output_path, fps=fps if fps else 15) + + if gif_path and os.path.exists(gif_path): + output_file_path = gif_path + logger.info(f"Successfully converted MP4 to GIF using FFmpeg: {gif_path}") + + # For PNG sequence, we need to collect the PNGs + if format_type == "png_sequence": + # Find the PNG directory + png_dirs = [] + search_dirs = [ + os.path.join(os.getcwd(), "media", "images", scene_class, "Animations"), + os.path.join(temp_dir, "media", "images", scene_class, "Animations"), + "/tmp/media/images", + ] + + for search_dir in search_dirs: + if os.path.exists(search_dir): + for root, dirs, _ in os.walk(search_dir): + for d in dirs: + if os.path.exists(os.path.join(root, d)): + png_dirs.append(os.path.join(root, d)) + + if png_dirs: + # Get the newest directory + newest_dir = max(png_dirs, key=os.path.getctime) + + # Create a zip file with all PNGs + png_files = [f for f in os.listdir(newest_dir) if f.endswith('.png')] + if png_files: + zip_path = os.path.join(temp_dir, f"{scene_class}_pngs.zip") + + with zipfile.ZipFile(zip_path, 'w') as zipf: + for png in png_files: + png_path = os.path.join(newest_dir, png) + zipf.write(png_path, os.path.basename(png_path)) + + with open(zip_path, 'rb') as f: + video_data = f.read() + + logger.info(f"Created PNG sequence zip: {zip_path}") + else: + logger.error("No PNG files found in directory") + else: + logger.error("No PNG directories found") + elif output_file_path and os.path.exists(output_file_path): + # For other formats, read the output file directly + with open(output_file_path, 'rb') as f: + video_data = f.read() + logger.info(f"Read output file from path: {output_file_path}") + else: + # If we didn't find the output path, search for files + search_paths = [ + os.path.join(os.getcwd(), "media", "videos"), + os.path.join(os.getcwd(), "media", "videos", "scene"), + os.path.join(os.getcwd(), "media", "videos", scene_class), + "/tmp/media/videos", + temp_dir, + os.path.join(temp_dir, "media", "videos"), + ] + + # Add quality-specific paths + for quality in ["480p30", "720p30", "1080p60", "2160p60", "4320p60"]: + search_paths.append(os.path.join(os.getcwd(), "media", "videos", "scene", quality)) + search_paths.append(os.path.join(os.getcwd(), "media", "videos", scene_class, quality)) + + # For SVG format + if format_type == "svg": + search_paths.extend([ + os.path.join(os.getcwd(), "media", "designs"), + os.path.join(os.getcwd(), "media", "designs", scene_class), + ]) + + # Find all output files in the search paths + output_files = [] + for search_path in search_paths: + if os.path.exists(search_path): + for root, _, files in os.walk(search_path): + for file in files: + if file.endswith(f".{format_type}") and "partial" not in file: + file_path = os.path.join(root, file) + if os.path.exists(file_path): + output_files.append(file_path) + logger.info(f"Found output file: {file_path}") + + if output_files: + # Get the newest file + latest_file = max(output_files, key=os.path.getctime) + with open(latest_file, 'rb') as f: + video_data = f.read() + logger.info(f"Read output from file search: {latest_file}") + + # If the format is GIF but we got an MP4, try to convert it + if format_type == "gif" and latest_file.endswith('.mp4'): + gif_output_path = os.path.join(temp_dir, f"{scene_class}_converted.gif") + gif_path = mp4_to_gif(latest_file, gif_output_path, fps=fps if fps else 15) + + if gif_path and os.path.exists(gif_path): + with open(gif_path, 'rb') as f: + video_data = f.read() + logger.info(f"Successfully converted MP4 to GIF using FFmpeg: {gif_path}") + + # If we got output data, return it + if video_data: + file_size_mb = len(video_data) / (1024 * 1024) + + # Clear placeholders + progress_placeholder.empty() + status_placeholder.empty() + log_placeholder.empty() + + return video_data, f"✅ Animation generated successfully! ({file_size_mb:.1f} MB)" + else: + output_str = ''.join(full_output) + logger.error(f"No output files found. Full output: {output_str}") + + # Check if we have an MP4 but need a GIF (special handling for GIF issues) + if format_type == "gif": + # Try one more aggressive search for any MP4 file + mp4_files = [] + for search_path in [os.getcwd(), temp_dir, "/tmp"]: + for root, _, files in os.walk(search_path): + for file in files: + if file.endswith('.mp4') and scene_class.lower() in file.lower(): + mp4_path = os.path.join(root, file) + if os.path.exists(mp4_path) and os.path.getsize(mp4_path) > 0: + mp4_files.append(mp4_path) + + if mp4_files: + newest_mp4 = max(mp4_files, key=os.path.getctime) + logger.info(f"Found MP4 for GIF conversion: {newest_mp4}") + + # Convert to GIF + gif_output_path = os.path.join(temp_dir, f"{scene_class}_converted.gif") + gif_path = mp4_to_gif(newest_mp4, gif_output_path, fps=fps if fps else 15) + + if gif_path and os.path.exists(gif_path): + with open(gif_path, 'rb') as f: + video_data = f.read() + + # Clear placeholders + progress_placeholder.empty() + status_placeholder.empty() + log_placeholder.empty() + + file_size_mb = len(video_data) / (1024 * 1024) + return video_data, f"✅ Animation converted to GIF successfully! ({file_size_mb:.1f} MB)" + + return None, f"❌ Error: No output files were generated.\n\nMakim output:\n{output_str[:500]}..." + + except Exception as e: + logger.error(f"Error: {str(e)}") + import traceback + logger.error(traceback.format_exc()) + + if progress_placeholder: + progress_placeholder.empty() + if status_placeholder: + status_placeholder.error(f"Rendering Error: {str(e)}") + if log_placeholder: + log_placeholder.empty() + + return None, f"❌ Error: {str(e)}" + + finally: + # CRITICAL: Only cleanup after we've captured the output data + if temp_dir and os.path.exists(temp_dir) and video_data is not None: + try: + shutil.rmtree(temp_dir) + logger.info(f"Cleaned up temp dir: {temp_dir}") + except Exception as e: + logger.error(f"Failed to clean temp dir: {str(e)}") + +def detect_input_calls(code): + """Detect input() calls in Python code to prepare for handling""" + input_calls = [] + lines = code.split('\n') + for i, line in enumerate(lines): + if 'input(' in line and not line.strip().startswith('#'): + # Try to extract the prompt if available + prompt_match = re.search(r'input\([\'"](.+?)[\'"]\)', line) + prompt = prompt_match.group(1) if prompt_match else f"Input for line {i+1}" + input_calls.append({"line": i+1, "prompt": prompt}) + return input_calls + +def run_python_script(code, inputs=None, timeout=60): + """Execute a Python script and capture output, handling input calls""" + result = { + "stdout": "", + "stderr": "", + "exception": None, + "plots": [], + "dataframes": [], + "execution_time": 0 + } + + # Replace input() calls with predefined values if provided + if inputs and len(inputs) > 0: + # Modify the code to use predefined inputs instead of waiting for user input + modified_code = """ +# Input values provided by the user +__INPUT_VALUES = {} +__INPUT_INDEX = 0 + +# Override the built-in input function +def input(prompt=''): + global __INPUT_INDEX + print(prompt, end='') + if __INPUT_INDEX < len(__INPUT_VALUES): + value = __INPUT_VALUES[__INPUT_INDEX] + __INPUT_INDEX += 1 + print(value) # Echo the input + return value else: - return None, "❌ No output generated" + print("\\n[WARNING] No more predefined inputs available, using empty string") + return "" -def main(): - st.set_page_config("Manim Studio", "đŸŽŦ", layout="wide") - if "inited" not in st.session_state: - st.session_state.update({ - "inited": True, - "code": "", - "video_data": None, - "status": "", - "settings": {"quality":"720p","fps":30,"format":"mp4"}, - "pwd_ok": False, - "custom_model": "gpt-4o", - "generated_code": "" +""".format(inputs) + + code = modified_code + code + + # Create a tempdir for script execution + with tempfile.TemporaryDirectory() as temp_dir: + # Path for saving plots + plot_dir = os.path.join(temp_dir, 'plots') + os.makedirs(plot_dir, exist_ok=True) + + # Files for capturing stdout and stderr + stdout_file = os.path.join(temp_dir, 'stdout.txt') + stderr_file = os.path.join(temp_dir, 'stderr.txt') + + # Add plot saving code + if 'matplotlib' in code or 'plt' in code: + if 'import matplotlib.pyplot as plt' not in code and 'from matplotlib import pyplot as plt' not in code: + code = "import matplotlib.pyplot as plt\n" + code + + # Add code to save plots + save_plots_code = """ +# Save all figures +import matplotlib.pyplot as plt +import os + +__figures = plt.get_fignums() +for __i, __num in enumerate(__figures): + __fig = plt.figure(__num) + __fig.savefig(os.path.join('{}', f'plot_{{__i}}.png')) +""".format(plot_dir.replace('\\', '\\\\')) + + code += "\n" + save_plots_code + + # Add dataframe display code if pandas is used + if 'pandas' in code or 'pd.' in code or 'DataFrame' in code: + if 'import pandas as pd' not in code and 'from pandas import' not in code: + code = "import pandas as pd\n" + code + + # Add code to save dataframe info + dataframes_code = """ +# Capture DataFrames +import pandas as pd +import json +import io +import os + +__globals_dict = globals() +__dataframes = [] +for __var_name, __var_val in __globals_dict.items(): + if isinstance(__var_val, pd.DataFrame) and not __var_name.startswith('__'): + try: + # Save basic info + __df_info = { + "name": __var_name, + "shape": __var_val.shape, + "columns": list(__var_val.columns), + "preview_html": __var_val.head().to_html() + } + with open(os.path.join('{}', f'df_{{__var_name}}.json'), 'w') as __f: + json.dump(__df_info, __f) + except: + pass +""".format(temp_dir.replace('\\', '\\\\')) + + code += "\n" + dataframes_code + + # Create the script file + script_path = os.path.join(temp_dir, 'script.py') + with open(script_path, 'w') as f: + f.write(code) + + # Execute with timeout + start_time = time.time() + try: + # Run the script with stdout and stderr redirection + with open(stdout_file, 'w') as stdout_f, open(stderr_file, 'w') as stderr_f: + process = subprocess.Popen( + [sys.executable, script_path], + stdout=stdout_f, + stderr=stderr_f, + cwd=temp_dir + ) + + try: + process.wait(timeout=timeout) + except subprocess.TimeoutExpired: + process.kill() + result["stderr"] += f"\nScript execution timed out after {timeout} seconds." + result["exception"] = "TimeoutError" + return result + + # Read the output + with open(stdout_file, 'r') as f: + result["stdout"] = f.read() + + with open(stderr_file, 'r') as f: + result["stderr"] = f.read() + + # Collect plots + if os.path.exists(plot_dir): + plot_files = sorted([f for f in os.listdir(plot_dir) if f.endswith('.png')]) + for plot_file in plot_files: + with open(os.path.join(plot_dir, plot_file), 'rb') as f: + result["plots"].append(f.read()) + + # Collect dataframes + df_files = [f for f in os.listdir(temp_dir) if f.startswith('df_') and f.endswith('.json')] + for df_file in df_files: + with open(os.path.join(temp_dir, df_file), 'r') as f: + result["dataframes"].append(json.load(f)) + + # Calculate execution time + result["execution_time"] = time.time() - start_time + + except Exception as e: + result["exception"] = str(e) + result["stderr"] += f"\nError executing script: {str(e)}" + + return result + +def display_python_script_results(result): + """Display the results from the Python script execution""" + if not result: + st.error("No results to display.") + return + + # Display execution time + st.info(f"Execution completed in {result['execution_time']:.2f} seconds") + + # Display any errors + if result["exception"]: + st.error(f"Exception occurred: {result['exception']}") + + if result["stderr"]: + st.error("Errors:") + st.code(result["stderr"], language="bash") + + # Display plots if any + if result["plots"]: + st.markdown("### Plots") + cols = st.columns(min(3, len(result["plots"]))) + for i, plot_data in enumerate(result["plots"]): + cols[i % len(cols)].image(plot_data, use_column_width=True) + + # Display dataframes if any + if result["dataframes"]: + st.markdown("### DataFrames") + for df_info in result["dataframes"]: + with st.expander(f"{df_info['name']} - {df_info['shape'][0]} rows × {df_info['shape'][1]} columns"): + st.markdown(df_info["preview_html"], unsafe_allow_html=True) + + # Display standard output + if result["stdout"]: + st.markdown("### Standard Output") + st.code(result["stdout"], language="bash") + +def parse_animation_steps(python_code): + """Parse Manim code to extract animation steps for timeline editor""" + animation_steps = [] + + # Look for self.play calls in the code + play_calls = re.findall(r'self\.play\((.*?)\)', python_code, re.DOTALL) + wait_calls = re.findall(r'self\.wait\((.*?)\)', python_code, re.DOTALL) + + # Extract animation objects from play calls + for i, play_call in enumerate(play_calls): + # Parse the arguments to self.play() + animations = [arg.strip() for arg in play_call.split(',')] + + # Get wait time after this animation if available + wait_time = 1.0 # Default wait time + if i < len(wait_calls): + wait_match = re.search(r'(\d+\.?\d*)', wait_calls[i]) + if wait_match: + wait_time = float(wait_match.group(1)) + + # Add to animation steps + animation_steps.append({ + "id": i+1, + "type": "play", + "animations": animations, + "duration": wait_time, + "start_time": sum([step.get("duration", 1.0) for step in animation_steps]), + "code": f"self.play({play_call})" }) + + return animation_steps + +def generate_code_from_timeline(animation_steps, original_code): + """Generate Manim code from the timeline data""" + # Extract the class definition and setup + class_match = re.search(r'(class\s+\w+\s*\([^)]*\)\s*:.*?def\s+construct\s*\(\s*self\s*\)\s*:)', original_code, re.DOTALL) + + if not class_match: + return original_code # Can't find proper structure to modify + + setup_code = class_match.group(1) + + # Build the new construct method + new_code = [setup_code] + indent = " " # Standard Manim indentation + + # Add each animation step in order + for step in sorted(animation_steps, key=lambda x: x["id"]): + new_code.append(f"{indent}{step['code']}") + if "duration" in step and step["duration"] > 0: + new_code.append(f"{indent}self.wait({step['duration']})") + + # Add any code that might come after animations + end_match = re.search(r'(#\s*End\s+of\s+animations.*?$)', original_code, re.DOTALL) + if end_match: + new_code.append(end_match.group(1)) + + # Combine the code parts with proper indentation + return "\n".join(new_code) + +def create_timeline_editor(code): + """Create an interactive timeline editor for animation sequences""" + st.markdown("### đŸŽžī¸ Animation Timeline Editor") + + if not code: + st.warning("Add animation code first to use the timeline editor.") + return code + + # Parse animation steps from the code + animation_steps = parse_animation_steps(code) + + if not animation_steps: + st.warning("No animation steps detected in your code.") + return code + + # Convert to DataFrame for easier manipulation + df = pd.DataFrame(animation_steps) + + # Create an interactive Gantt chart with plotly + st.markdown("#### Animation Timeline") + st.markdown("Drag timeline elements to reorder or resize to change duration") + + # Create the Gantt chart + fig = px.timeline( + df, + x_start="start_time", + x_end=df["start_time"] + df["duration"], + y="id", + color="type", + hover_name="animations", + labels={"id": "Step", "start_time": "Time (seconds)"} + ) + + # Make it interactive + fig.update_layout( + height=400, + xaxis=dict( + title="Time (seconds)", + rangeslider_visible=True + ) + ) + + # Add buttons and interactivity + timeline_chart = st.plotly_chart(fig, use_container_width=True) + + # Control panel + st.markdown("#### Timeline Controls") + controls_col1, controls_col2, controls_col3 = st.columns(3) + + with controls_col1: + selected_step = st.selectbox( + "Select Step to Edit:", + options=list(range(1, len(animation_steps) + 1)), + format_func=lambda x: f"Step {x}" + ) + + with controls_col2: + new_duration = st.number_input( + "Duration (seconds):", + min_value=0.1, + max_value=10.0, + value=float(df[df["id"] == selected_step]["duration"].values[0]), + step=0.1 + ) + + with controls_col3: + step_action = st.selectbox( + "Action:", + options=["Update Duration", "Move Up", "Move Down", "Delete Step"] + ) + + apply_btn = st.button("Apply Change", key="apply_timeline_change") + + # Handle timeline modifications + if apply_btn: + modified = False + + if step_action == "Update Duration": + # Update the duration of the selected step + idx = df[df["id"] == selected_step].index[0] + df.at[idx, "duration"] = new_duration + modified = True + + elif step_action == "Move Up" and selected_step > 1: + # Swap with the step above + idx1 = df[df["id"] == selected_step].index[0] + idx2 = df[df["id"] == selected_step - 1].index[0] + + # Swap IDs to maintain order + df.at[idx1, "id"], df.at[idx2, "id"] = selected_step - 1, selected_step + modified = True + + elif step_action == "Move Down" and selected_step < len(animation_steps): + # Swap with the step below + idx1 = df[df["id"] == selected_step].index[0] + idx2 = df[df["id"] == selected_step + 1].index[0] + + # Swap IDs to maintain order + df.at[idx1, "id"], df.at[idx2, "id"] = selected_step + 1, selected_step + modified = True + + elif step_action == "Delete Step": + # Remove the selected step + df = df[df["id"] != selected_step] + # Reindex remaining steps + new_ids = list(range(1, len(df) + 1)) + df["id"] = new_ids + modified = True + + if modified: + # Recalculate start times + df = df.sort_values("id") + cumulative_time = 0 + for idx, row in df.iterrows(): + df.at[idx, "start_time"] = cumulative_time + cumulative_time += row["duration"] + + # Regenerate animation code + animation_steps = df.to_dict('records') + new_code = generate_code_from_timeline(animation_steps, code) + + st.success("Timeline updated! Code has been regenerated.") + return new_code + + # Visual keyframe editor + st.markdown("#### Visual Keyframe Editor") + st.markdown("Add keyframes for smooth property transitions") + + keyframe_obj = st.selectbox( + "Select object to animate:", + options=[f"Object {i+1}" for i in range(5)] # Placeholder for actual objects + ) + + keyframe_prop = st.selectbox( + "Select property:", + options=["position", "scale", "rotation", "opacity", "color"] + ) + + # Keyframe timeline visualization + keyframe_times = [0, 1, 2, 3, 4] # Placeholder + keyframe_values = [0, 0.5, 0.8, 0.2, 1.0] # Placeholder + + keyframe_df = pd.DataFrame({ + "time": keyframe_times, + "value": keyframe_values + }) + + keyframe_fig = px.line( + keyframe_df, + x="time", + y="value", + markers=True, + title=f"{keyframe_prop.capitalize()} Keyframes" + ) + + keyframe_fig.update_layout( + xaxis_title="Time (seconds)", + yaxis_title="Value", + height=250 + ) + + st.plotly_chart(keyframe_fig, use_container_width=True) + + keyframe_col1, keyframe_col2, keyframe_col3 = st.columns(3) + with keyframe_col1: + keyframe_time = st.number_input("Time (s)", min_value=0.0, max_value=10.0, value=0.0, step=0.1) + with keyframe_col2: + keyframe_value = st.number_input("Value", min_value=0.0, max_value=1.0, value=0.0, step=0.1) + with keyframe_col3: + add_keyframe = st.button("Add Keyframe") + + # Return the original code or modified code + return code + +def export_to_educational_format(video_data, format_type, animation_title, explanation_text, temp_dir): + """Export animation to various educational formats""" + try: + if format_type == "powerpoint": + # Make sure python-pptx is installed + try: + import pptx + from pptx.util import Inches + except ImportError: + logger.error("python-pptx not installed") + subprocess.run([sys.executable, "-m", "pip", "install", "python-pptx"], check=True) + import pptx + from pptx.util import Inches + + # Create PowerPoint presentation + prs = pptx.Presentation() + + # Title slide + title_slide = prs.slides.add_slide(prs.slide_layouts[0]) + title_slide.shapes.title.text = animation_title + title_slide.placeholders[1].text = "Created with Manim Animation Studio" + + # Video slide + video_slide = prs.slides.add_slide(prs.slide_layouts[5]) + video_slide.shapes.title.text = "Animation" + + # Save video to temp file + video_path = os.path.join(temp_dir, "animation.mp4") + with open(video_path, "wb") as f: + f.write(video_data) + + # Add video to slide + try: + left = Inches(1) + top = Inches(1.5) + width = Inches(8) + height = Inches(4.5) + video_slide.shapes.add_movie(video_path, left, top, width, height) + except Exception as e: + logger.error(f"Error adding video to PowerPoint: {str(e)}") + # Fallback to adding a picture with link + img_path = os.path.join(temp_dir, "thumbnail.png") + # Generate thumbnail with ffmpeg + subprocess.run([ + "ffmpeg", "-i", video_path, "-ss", "00:00:01.000", + "-vframes", "1", img_path + ], check=True) + + if os.path.exists(img_path): + pic = video_slide.shapes.add_picture(img_path, left, top, width, height) + video_slide.shapes.add_textbox(left, top + height + Inches(0.5), width, Inches(0.5)).text_frame.text = "Click to play video (exported separately)" + + # Explanation slide + if explanation_text: + text_slide = prs.slides.add_slide(prs.slide_layouts[1]) + text_slide.shapes.title.text = "Explanation" + text_slide.placeholders[1].text = explanation_text + + # Save presentation + output_path = os.path.join(temp_dir, f"{animation_title.replace(' ', '_')}.pptx") + prs.save(output_path) + + # Read the file to return it + with open(output_path, "rb") as f: + return f.read(), "powerpoint" + + elif format_type == "html": + # Create interactive HTML animation + html_template = """ + + + + {title} + + + + +

{title}

+ +
+ + +
+ + + + + + +
+
+ +
+

Explanation

+ {explanation_html} +
+ + + + + """ + + # Convert video data to base64 + video_base64 = base64.b64encode(video_data).decode('utf-8') + + # Convert markdown explanation to HTML + explanation_html = markdown.markdown(explanation_text) if explanation_text else "

No explanation provided.

" + + # Format the HTML template + html_content = html_template.format( + title=animation_title, + video_base64=video_base64, + explanation_html=explanation_html + ) + + # Save to file + output_path = os.path.join(temp_dir, f"{animation_title.replace(' ', '_')}.html") + with open(output_path, "w", encoding="utf-8") as f: + f.write(html_content) + + # Read the file to return it + with open(output_path, "rb") as f: + return f.read(), "html" + + elif format_type == "sequence": + # Generate animation sequence with explanatory text + # Make sure FPDF is installed + try: + from fpdf import FPDF + except ImportError: + logger.error("fpdf not installed") + subprocess.run([sys.executable, "-m", "pip", "install", "fpdf"], check=True) + from fpdf import FPDF + + # Save video temporarily + temp_video_path = os.path.join(temp_dir, "temp_video.mp4") + with open(temp_video_path, "wb") as f: + f.write(video_data) + + # Create frames directory + frames_dir = os.path.join(temp_dir, "frames") + os.makedirs(frames_dir, exist_ok=True) + + # Extract frames using ffmpeg (assuming it's installed) + frame_count = 5 # Number of key frames to extract + try: + subprocess.run([ + "ffmpeg", + "-i", temp_video_path, + "-vf", f"select=eq(n\\,0)+eq(n\\,{frame_count//4})+eq(n\\,{frame_count//2})+eq(n\\,{frame_count*3//4})+eq(n\\,{frame_count-1})", + "-vsync", "0", + os.path.join(frames_dir, "frame_%03d.png") + ], check=True) + except Exception as e: + logger.error(f"Error extracting frames: {str(e)}") + # Try a simpler approach + subprocess.run([ + "ffmpeg", + "-i", temp_video_path, + "-r", "1", # 1 frame per second + os.path.join(frames_dir, "frame_%03d.png") + ], check=True) + + # Parse explanation text into segments (assuming sections divided by ##) + explanation_segments = explanation_text.split("##") if explanation_text else ["No explanation provided."] + + # Create a PDF with frames and explanations + pdf = FPDF() + pdf.set_auto_page_break(auto=True, margin=15) + + # Title page + pdf.add_page() + pdf.set_font("Arial", "B", 20) + pdf.cell(190, 10, animation_title, ln=True, align="C") + pdf.ln(10) + pdf.set_font("Arial", "", 12) + pdf.cell(190, 10, "Animation Sequence with Explanations", ln=True, align="C") + + # Add each frame with explanation + frame_files = sorted([f for f in os.listdir(frames_dir) if f.endswith('.png')]) + + for i, frame_file in enumerate(frame_files): + pdf.add_page() + + # Add frame image + frame_path = os.path.join(frames_dir, frame_file) + pdf.image(frame_path, x=10, y=10, w=190) + + # Add explanation text + pdf.ln(140) # Move below the image + pdf.set_font("Arial", "B", 12) + pdf.cell(190, 10, f"Step {i+1}", ln=True) + pdf.set_font("Arial", "", 10) + + # Use the corresponding explanation segment if available + explanation = explanation_segments[min(i, len(explanation_segments)-1)] + pdf.multi_cell(190, 5, explanation.strip()) + + # Save PDF + output_path = os.path.join(temp_dir, f"{animation_title.replace(' ', '_')}_sequence.pdf") + pdf.output(output_path) + + # Read the file to return it + with open(output_path, "rb") as f: + return f.read(), "pdf" + + return None, None + + except Exception as e: + logger.error(f"Educational export error: {str(e)}") + import traceback + logger.error(traceback.format_exc()) + return None, None + +def main(): + # Initialize session state variables if they don't exist + if 'init' not in st.session_state: + st.session_state.init = True + st.session_state.video_data = None + st.session_state.status = None + st.session_state.ai_models = None + st.session_state.generated_code = "" + st.session_state.code = "" + st.session_state.temp_code = "" + st.session_state.editor_key = str(uuid.uuid4()) + st.session_state.packages_checked = False # Track if packages were already checked + st.session_state.audio_path = None + st.session_state.image_paths = [] + st.session_state.custom_library_result = "" + st.session_state.python_script = "import matplotlib.pyplot as plt\nimport numpy as np\n\n# Example: Create a simple plot\nx = np.linspace(0, 10, 100)\ny = np.sin(x)\n\nplt.figure(figsize=(10, 6))\nplt.plot(x, y, 'b-', label='sin(x)')\nplt.title('Sine Wave')\nplt.xlabel('x')\nplt.ylabel('sin(x)')\nplt.grid(True)\nplt.legend()\n" + st.session_state.python_result = None + st.session_state.active_tab = 0 # Track currently active tab + st.session_state.settings = { + "quality": "720p", + "format_type": "mp4", + "animation_speed": "Normal", + "fps": 30 # Default FPS + } + st.session_state.password_entered = False # Track password authentication + st.session_state.custom_model = "gpt-4o" # Default model + st.session_state.first_load_complete = False # Prevent refreshes on first load + st.session_state.pending_tab_switch = None # Track pending tab switches + + # Page configuration with improved layout + st.set_page_config( + page_title="Manim Animation Studio", + page_icon="đŸŽŦ", + layout="wide", + initial_sidebar_state="expanded" + ) + + # Custom CSS for improved UI + st.markdown(""" + + """, unsafe_allow_html=True) + + # Header + st.markdown(""" +
+ đŸŽŦ Manim Animation Studio +
+

Create mathematical animations with Manim

+ """, unsafe_allow_html=True) + + # Check for packages ONLY ONCE per session + if not st.session_state.packages_checked: if ensure_packages(): st.session_state.packages_checked = True else: - return - - tabs = st.tabs([ - "✨ Editor", "🤖 AI Assistant", "🎨 Assets", - "đŸŽžī¸ Timeline", "🎓 Export", "🐍 Python Runner" - ]) - - # Sidebar settings + st.error("Failed to install required packages. Please try again.") + st.stop() + + # Create main tabs - LaTeX tab removed + tab_names = ["✨ Editor", "🤖 AI Assistant", "🎨 Assets", "đŸŽžī¸ Timeline", "🎓 Educational Export", "🐍 Python Runner"] + tabs = st.tabs(tab_names) + + # Sidebar for rendering settings and custom libraries with st.sidebar: - st.markdown("## âš™ī¸ Render Settings") - q = st.selectbox("Quality", QUALITY_PRESETS, index=QUALITY_PRESETS.index(st.session_state["settings"]["quality"])) - f = st.selectbox("FPS", FPS_OPTIONS, index=FPS_OPTIONS.index(st.session_state["settings"]["fps"])) - fmt_disp = st.selectbox("Format", list(OUTPUT_FORMATS.keys()), - index=list(OUTPUT_FORMATS.values()).index(st.session_state["settings"]["format"])) - st.session_state["settings"] = {"quality": q, "fps": f, "format": OUTPUT_FORMATS[fmt_disp]} - - # Editor tab + # Rendering settings section + st.markdown("## âš™ī¸ Rendering Settings") + + col1, col2 = st.columns(2) + with col1: + quality = st.selectbox( + "đŸŽ¯ Quality", + options=list(QUALITY_PRESETS.keys()), + index=list(QUALITY_PRESETS.keys()).index(st.session_state.settings["quality"]), + key="quality_select" + ) + + with col2: + format_type_display = st.selectbox( + "đŸ“Ļ Format", + options=list(EXPORT_FORMATS.keys()), + index=list(EXPORT_FORMATS.values()).index(st.session_state.settings["format_type"]) + if st.session_state.settings["format_type"] in EXPORT_FORMATS.values() else 0, + key="format_select_display" + ) + # Convert display name to actual format value + format_type = EXPORT_FORMATS[format_type_display] + + # Add FPS control + fps = st.selectbox( + "đŸŽžī¸ FPS", + options=FPS_OPTIONS, + index=FPS_OPTIONS.index(st.session_state.settings["fps"]) if st.session_state.settings["fps"] in FPS_OPTIONS else 2, # Default to 30 FPS (index 2) + key="fps_select" + ) + + animation_speed = st.selectbox( + "⚡ Speed", + options=list(ANIMATION_SPEEDS.keys()), + index=list(ANIMATION_SPEEDS.keys()).index(st.session_state.settings["animation_speed"]), + key="speed_select" + ) + + # Apply the settings without requiring a button + st.session_state.settings = { + "quality": quality, + "format_type": format_type, + "animation_speed": animation_speed, + "fps": fps + } + + # Custom libraries section + st.markdown("## 📚 Custom Libraries") + st.markdown("Enter additional Python packages needed for your animations (comma-separated):") + + custom_libraries = st.text_area( + "Libraries to install", + placeholder="e.g., scipy, networkx, matplotlib", + key="custom_libraries" + ) + + if st.button("Install Libraries", key="install_libraries_btn"): + success, result = install_custom_packages(custom_libraries) + st.session_state.custom_library_result = result + + if success: + st.success("Installation complete!") + else: + st.error("Installation failed for some packages.") + + if st.session_state.custom_library_result: + with st.expander("Installation Results"): + st.code(st.session_state.custom_library_result) + + # EDITOR TAB with tabs[0]: - c1, c2 = st.columns([3,2]) - with c1: + col1, col2 = st.columns([3, 2]) + + with col1: st.markdown("### 📝 Animation Editor") + + # Toggle between upload and type + editor_mode = st.radio( + "Choose how to input your code:", + ["Type Code", "Upload File"], + key="editor_mode" + ) + + if editor_mode == "Upload File": + uploaded_file = st.file_uploader("Upload Manim Python File", type=["py"], key="code_uploader") + if uploaded_file: + code_content = uploaded_file.getvalue().decode("utf-8") + if code_content.strip(): # Only update if file has content + st.session_state.code = code_content + st.session_state.temp_code = code_content + + # Code editor if ACE_EDITOR_AVAILABLE: - st.session_state.code = st_ace( - value=st.session_state.code, + current_code = st.session_state.code if hasattr(st.session_state, 'code') and st.session_state.code else "" + st.session_state.temp_code = st_ace( + value=current_code, language="python", theme="monokai", min_lines=20, - key="ace" + key=f"ace_editor_{st.session_state.editor_key}" ) else: - st.session_state.code = st.text_area( - "Manim Code", - value=st.session_state.code, - height=400 + current_code = st.session_state.code if hasattr(st.session_state, 'code') and st.session_state.code else "" + st.session_state.temp_code = st.text_area( + "Manim Python Code", + value=current_code, + height=400, + key=f"code_textarea_{st.session_state.editor_key}" ) - if st.button("🚀 Generate Animation"): - if not st.session_state.code.strip(): - st.error("Enter code first") + + # Update code in session state if it changed + if st.session_state.temp_code != st.session_state.code: + st.session_state.code = st.session_state.temp_code + + # Generate button (use a form to prevent page reloads) + generate_btn = st.button("🚀 Generate Animation", use_container_width=True, key="generate_btn") + if generate_btn: + if not st.session_state.code: + st.error("Please enter some code before generating animation") else: - data, msg = generate_manim_video( - st.session_state.code, - st.session_state["settings"]["format"], - st.session_state["settings"]["quality"], - st.session_state["settings"]["fps"] - ) - st.session_state.video_data = data - st.session_state.status = msg - with c2: + # Extract scene class name + scene_class = extract_scene_class_name(st.session_state.code) + + # If no valid scene class found, add a basic one + if scene_class == "MyScene" and "class MyScene" not in st.session_state.code: + default_scene = """ +class MyScene(Scene): + def construct(self): + text = Text("Default Scene") + self.play(Write(text)) + self.wait(2) +""" + st.session_state.code += default_scene + st.session_state.temp_code = st.session_state.code + st.warning("No scene class found. Added a default scene.") + + with st.spinner("Generating animation..."): + video_data, status = generate_manim_video( + st.session_state.code, + st.session_state.settings["format_type"], + st.session_state.settings["quality"], + ANIMATION_SPEEDS[st.session_state.settings["animation_speed"]], + st.session_state.audio_path, + st.session_state.settings["fps"] + ) + st.session_state.video_data = video_data + st.session_state.status = status + + with col2: + st.markdown("### đŸ–Ĩī¸ Preview & Output") + + # Preview container if st.session_state.code: - components.html(generate_manim_preview(st.session_state.code), height=250) + with st.container(): + st.markdown("
", unsafe_allow_html=True) + preview_html = generate_manim_preview(st.session_state.code) + components.html(preview_html, height=250) + st.markdown("
", unsafe_allow_html=True) + + # Generated output display if st.session_state.video_data: - fmt = st.session_state["settings"]["format"] - if fmt == "png_sequence": - st.download_button("âŦ‡ī¸ Download ZIP", st.session_state.video_data, "animation.zip", "application/zip") - elif fmt == "svg": + # Different handling based on format type + format_type = st.session_state.settings["format_type"] + + if format_type == "png_sequence": + st.info("PNG sequence generated successfully. Use the download button to get the ZIP file.") + + # Add download button for ZIP + st.download_button( + label="âŦ‡ī¸ Download PNG Sequence (ZIP)", + data=st.session_state.video_data, + file_name=f"manim_pngs_{datetime.now().strftime('%Y%m%d_%H%M%S')}.zip", + mime="application/zip", + use_container_width=True + ) + elif format_type == "svg": + # Display SVG preview try: - svg = st.session_state.video_data.decode("utf-8") - components.html(svg, height=400) - except: - st.error("Cannot display SVG") - st.download_button("âŦ‡ī¸ Download SVG", st.session_state.video_data, "animation.svg", "image/svg+xml") + svg_data = st.session_state.video_data.decode('utf-8') + components.html(svg_data, height=400) + except Exception as e: + st.error(f"Error displaying SVG: {str(e)}") + + # Download button for SVG + st.download_button( + label="âŦ‡ī¸ Download SVG", + data=st.session_state.video_data, + file_name=f"manim_animation_{datetime.now().strftime('%Y%m%d_%H%M%S')}.svg", + mime="image/svg+xml", + use_container_width=True + ) else: - st.video(st.session_state.video_data, format=fmt) - st.download_button(f"âŦ‡ī¸ Download {fmt.upper()}", st.session_state.video_data, - f"animation.{fmt}", f"video/{fmt}") + # Standard video display for MP4, GIF, WebM + try: + st.video(st.session_state.video_data, format=format_type) + except Exception as e: + st.error(f"Error displaying video: {str(e)}") + # Fallback for GIF if st.video fails + if format_type == "gif": + st.markdown("GIF preview:") + gif_b64 = base64.b64encode(st.session_state.video_data).decode() + st.markdown(f'animation', unsafe_allow_html=True) + + # Add download button + st.download_button( + label=f"âŦ‡ī¸ Download {format_type.upper()}", + data=st.session_state.video_data, + file_name=f"manim_animation_{datetime.now().strftime('%Y%m%d_%H%M%S')}.{format_type}", + mime=f"{'image' if format_type == 'gif' else 'video'}/{format_type}", + use_container_width=True + ) + if st.session_state.status: - if st.session_state.status.startswith("❌"): + if "Error" in st.session_state.status: st.error(st.session_state.status) + + # Show troubleshooting tips + with st.expander("🔍 Troubleshooting Tips"): + st.markdown(""" + ### Common Issues: + 1. **Syntax Errors**: Check your Python code for any syntax issues + 2. **Missing Scene Class**: Ensure your code contains a scene class that extends Scene + 3. **High Resolution Issues**: Try a lower quality preset for complex animations + 4. **Memory Issues**: For 4K animations, reduce complexity or try again + 5. **Format Issues**: Some formats require specific Manim configurations + 6. **GIF Generation**: If GIF doesn't work, try MP4 and we'll convert it automatically + + ### Example Code: + ```python + from manim import * + + class MyScene(Scene): + def construct(self): + circle = Circle(color=RED) + self.play(Create(circle)) + self.wait(1) + ``` + """) else: st.success(st.session_state.status) - # AI Assistant tab + # AI ASSISTANT TAB with tabs[1]: - st.markdown("### 🤖 AI Assistant") - if not check_password(): - return - model = st.selectbox("Select AI Model", list(MODEL_CONFIGS.keys()), - index=list(MODEL_CONFIGS.keys()).index(st.session_state.custom_model)) - st.session_state.custom_model = model - token = get_secret("github_token_api") - if st.button("Test Connection"): - from azure.ai.inference import ChatCompletionsClient - from azure.core.credentials import AzureKeyCredential - client = ChatCompletionsClient( - endpoint="https://models.inference.ai.azure.com", - credential=AzureKeyCredential(token) - ) - from azure.ai.inference.models import UserMessage - resp = client.complete(**{"messages":[UserMessage("Hello")], "model":model, "max_tokens":1000}) - if resp.choices: - st.success("✅ Connected") - st.session_state.ai_client = client + st.markdown("### 🤖 AI Animation Assistant") + + # Check password before allowing access + if check_password(): + # Debug section + with st.expander("🔧 Debug Connection"): + st.markdown("Test the AI model connection directly") + + if st.button("Test API Connection", key="test_api_btn"): + with st.spinner("Testing API connection..."): + try: + # Get token from secrets + token = get_secret("github_token_api") + if not token: + st.error("GitHub token not found in secrets") + st.stop() + + # Get model details + model_name = st.session_state.custom_model + config = MODEL_CONFIGS.get(model_name, MODEL_CONFIGS["default"]) + category = config.get("category", "Other") + + if category == "OpenAI": + # Use OpenAI client for GitHub AI models + try: + from openai import OpenAI + except ImportError: + st.error("OpenAI package not installed. Please run 'pip install openai'") + st.stop() + + # Create OpenAI client with GitHub AI endpoint + client = OpenAI( + base_url="https://models.github.ai/inference", + api_key=token, + ) + + # For GitHub AI models, ensure the model_name includes the publisher + # If it doesn't have a publisher prefix, add "openai/" + if "/" not in model_name: + full_model_name = f"openai/{model_name}" + st.info(f"Using full model name: {full_model_name}") + else: + full_model_name = model_name + + # Prepare parameters based on model configuration + params = { + "messages": [ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": "Hello, this is a connection test."} + ], + "model": full_model_name + } + + # Add appropriate token parameter + token_param = config["param_name"] + params[token_param] = config[token_param] + + # Make API call + response = client.chat.completions.create(**params) + + # Check if response is valid + if response and response.choices and len(response.choices) > 0: + test_response = response.choices[0].message.content + st.success(f"✅ Connection successful! Response: {test_response[:50]}...") + + # Save working connection to session state + st.session_state.ai_models = { + "openai_client": client, + "model_name": full_model_name, # Store the full model name + "endpoint": "https://models.github.ai/inference", + "last_loaded": datetime.now().isoformat(), + "category": category + } + else: + st.error("❌ API returned an empty response") + + elif category == "Azure" or category in ["DeepSeek", "Meta", "Microsoft", "Mistral", "Other"]: + # Use Azure client for Azure API models + try: + from azure.ai.inference import ChatCompletionsClient + from azure.ai.inference.models import SystemMessage, UserMessage + from azure.core.credentials import AzureKeyCredential + except ImportError: + st.error("Azure AI packages not installed. Please run 'pip install azure-ai-inference azure-core'") + st.stop() + + # Define endpoint + endpoint = "https://models.inference.ai.azure.com" + + # Prepare API parameters + messages = [UserMessage("Hello, this is a connection test.")] + api_params, config = prepare_api_params(messages, model_name) + + # Create client with appropriate API version + api_version = config.get("api_version") + if api_version: + client = ChatCompletionsClient( + endpoint=endpoint, + credential=AzureKeyCredential(token), + api_version=api_version + ) + else: + client = ChatCompletionsClient( + endpoint=endpoint, + credential=AzureKeyCredential(token), + ) + + # Test with the prepared parameters + response = client.complete(**api_params) + + # Check if response is valid + if response and response.choices and len(response.choices) > 0: + test_response = response.choices[0].message.content + st.success(f"✅ Connection successful! Response: {test_response[:50]}...") + + # Save working connection to session state + st.session_state.ai_models = { + "client": client, + "model_name": model_name, + "endpoint": endpoint, + "last_loaded": datetime.now().isoformat(), + "category": category, + "api_version": api_version + } + else: + st.error("❌ API returned an empty response") + + else: + st.error(f"Unsupported model category: {category}") + + except ImportError as ie: + st.error(f"Module import error: {str(ie)}") + st.info("Try installing required packages: openai, azure-ai-inference and azure-core") + except Exception as e: + st.error(f"❌ API test failed: {str(e)}") + import traceback + st.code(traceback.format_exc()) + + # Model selection with enhanced UI + st.markdown("### 🤖 Model Selection") + st.markdown("Select an AI model for generating animation code:") + + # Group models by category for better organization + model_categories = {} + for model_name in MODEL_CONFIGS: + if model_name != "default": + category = MODEL_CONFIGS[model_name].get("category", "Other") + if category not in model_categories: + model_categories[category] = [] + model_categories[category].append(model_name) + + # Create tabbed interface for model categories + category_tabs = st.tabs(sorted(model_categories.keys())) + + for i, category in enumerate(sorted(model_categories.keys())): + with category_tabs[i]: + for model_name in sorted(model_categories[category]): + config = MODEL_CONFIGS[model_name] + is_selected = model_name == st.session_state.custom_model + warning = config.get("warning") + + # Create styled card for each model + warning_html = f'

âš ī¸ {warning}

' if warning else "" + + st.markdown(f""" +
+

{model_name}

+
+

Max Tokens: {config.get(config['param_name'], 'Unknown')}

+

Category: {config['category']}

+

API Version: {config['api_version'] if config['api_version'] else 'Default'}

+ {warning_html} +
+
+ """, unsafe_allow_html=True) + + # Button to select this model + button_label = "Selected ✓" if is_selected else "Select Model" + if st.button(button_label, key=f"model_{model_name}", disabled=is_selected): + st.session_state.custom_model = model_name + if st.session_state.ai_models and 'model_name' in st.session_state.ai_models: + st.session_state.ai_models['model_name'] = model_name + st.rerun() + + # Display current model selection + st.info(f"🤖 **Currently using: {st.session_state.custom_model}**") + + # Add a refresh button to update model connection + if st.button("🔄 Refresh Model Connection", key="refresh_model_connection"): + if st.session_state.ai_models and 'client' in st.session_state.ai_models: + try: + # Test connection with minimal prompt + from azure.ai.inference.models import UserMessage + model_name = st.session_state.custom_model + + # Prepare parameters + messages = [UserMessage("Hello")] + api_params, config = prepare_api_params(messages, model_name) + + # Check if we need a new client with specific API version + if config["api_version"] and config["api_version"] != st.session_state.ai_models.get("api_version"): + # Create version-specific client if needed + token = get_secret("github_token_api") + from azure.ai.inference import ChatCompletionsClient + from azure.core.credentials import AzureKeyCredential + + client = ChatCompletionsClient( + endpoint=st.session_state.ai_models["endpoint"], + credential=AzureKeyCredential(token), + api_version=config["api_version"] + ) + response = client.complete(**api_params) + + # Update session state with the new client + st.session_state.ai_models["client"] = client + st.session_state.ai_models["api_version"] = config["api_version"] + else: + response = st.session_state.ai_models["client"].complete(**api_params) + + st.success(f"✅ Connection to {model_name} successful!") + st.session_state.ai_models["model_name"] = model_name + + except Exception as e: + st.error(f"❌ Connection error: {str(e)}") + st.info("Please try the Debug Connection section to re-initialize the API connection.") + + # AI code generation + if st.session_state.ai_models and "client" in st.session_state.ai_models: + st.markdown("
", unsafe_allow_html=True) + st.markdown("#### Generate Animation from Description") + st.write("Describe the animation you want to create, or provide partial code to complete.") + + # Predefined animation ideas dropdown + animation_ideas = [ + "Select an idea...", + "Create a 3D animation showing a sphere morphing into a torus", + "Show a visual proof of the Pythagorean theorem", + "Visualize a Fourier transform converting a signal from time domain to frequency domain", + "Create an animation explaining neural network forward propagation", + "Illustrate the concept of integration with area under a curve" + ] + + selected_idea = st.selectbox( + "Try one of these ideas", + options=animation_ideas + ) + + prompt_value = selected_idea if selected_idea != "Select an idea..." else "" + + code_input = st.text_area( + "Your Prompt or Code", + value=prompt_value, + placeholder="Example: Create an animation that shows a circle morphing into a square while changing color from red to blue", + height=150 + ) + + if st.button("Generate Animation Code", key="gen_ai_code"): + if code_input: + with st.spinner("AI is generating your animation code..."): + try: + # Get the client and model name + client = st.session_state.ai_models["client"] + model_name = st.session_state.ai_models["model_name"] + + # Create the prompt + prompt = f"""Write a complete Manim animation scene based on this code or idea: + {code_input} + + The code should be a complete, working Manim animation that includes: + - Proper Scene class definition + - Constructor with animations + - Proper use of self.play() for animations + - Proper wait times between animations + + Here's the complete Manim code: + """ + + # Prepare API parameters + from azure.ai.inference.models import UserMessage + messages = [UserMessage(prompt)] + api_params, config = prepare_api_params(messages, model_name) + + # Make the API call with proper parameters + response = client.complete(**api_params) + + # Process the response + if response and response.choices and len(response.choices) > 0: + completed_code = response.choices[0].message.content + + # Extract code from markdown if present + if "```python" in completed_code: + completed_code = completed_code.split("```python")[1].split("```")[0] + elif "```" in completed_code: + completed_code = completed_code.split("```")[1].split("```")[0] + + # Add Scene class if missing + if "Scene" not in completed_code: + completed_code = f"""from manim import * + + class MyScene(Scene): + def construct(self): + {completed_code}""" + + # Store the generated code + st.session_state.generated_code = completed_code + else: + st.error("Failed to generate code. API returned an empty response.") + except Exception as e: + st.error(f"Error generating code: {str(e)}") + import traceback + st.code(traceback.format_exc()) + else: + st.warning("Please enter a description or prompt first") + + + # AI generated code display and actions + if "generated_code" in st.session_state and st.session_state.generated_code: + st.markdown("
", unsafe_allow_html=True) + st.markdown("#### Generated Animation Code") + st.code(st.session_state.generated_code, language="python") + + col_ai1, col_ai2 = st.columns(2) + with col_ai1: + if st.button("Use This Code", key="use_gen_code"): + st.session_state.code = st.session_state.generated_code + st.session_state.temp_code = st.session_state.generated_code + # Set pending tab switch to editor tab + st.session_state.pending_tab_switch = 0 + st.rerun() + + with col_ai2: + if st.button("Render Preview", key="render_preview"): + with st.spinner("Rendering preview..."): + video_data, status = generate_manim_video( + st.session_state.generated_code, + "mp4", + "480p", # Use lowest quality for preview + ANIMATION_SPEEDS["Normal"], + fps=st.session_state.settings["fps"] + ) + + if video_data: + st.video(video_data) + st.download_button( + label="Download Preview", + data=video_data, + file_name=f"manim_preview_{int(time.time())}.mp4", + mime="video/mp4" + ) + else: + st.error(f"Failed to generate preview: {status}") + st.markdown("
", unsafe_allow_html=True) else: - st.error("❌ No response") - if "ai_client" in st.session_state: - prompt = st.text_area("Describe animation or provide code") - if st.button("Generate Code"): - from azure.ai.inference.models import UserMessage - msgs = [UserMessage(f"Write a complete Manim scene:\n{prompt}")] - resp = st.session_state.ai_client.complete(**{"messages":msgs, "model":model, "max_tokens":8000}) - if resp.choices: - code = resp.choices[0].message.content - if "```python" in code: - code = code.split("```python")[1].split("```")[0] - st.session_state.generated_code = code - else: - st.error("No code returned") - if st.session_state.generated_code: - st.code(st.session_state.generated_code, language="python") - if st.button("Use This Code"): - st.session_state.code = st.session_state.generated_code - st.experimental_rerun() - - # Assets tab + st.warning("AI models not initialized. Please use the Debug Connection section to test API connectivity.") + else: + st.info("Please enter the correct password to access AI features") + + # ASSETS TAB with tabs[2]: - st.markdown("### 🎨 Assets") - imgs = st.file_uploader("Upload Images", type=["png","jpg","jpeg","svg"], accept_multiple_files=True) - if imgs: - adir = os.path.join(os.getcwd(),"manim_assets","images") - os.makedirs(adir, exist_ok=True) - for up in imgs: - ext = up.name.split(".")[-1] - fn = f"img_{int(time.time())}_{uuid.uuid4().hex[:6]}.{ext}" - path = os.path.join(adir, fn) - with open(path,"wb") as f: f.write(up.getvalue()) - st.image(path, caption=up.name, width=150) - if st.button(f"Use {up.name}"): - snippet = f"""image = ImageMobject(r"{path}") + st.markdown("### 🎨 Asset Management") + + asset_col1, asset_col2 = st.columns([1, 1]) + + with asset_col1: + # Image uploader section + st.markdown("#### 📸 Image Assets") + st.markdown("Upload images to use in your animations:") + + # Allow multiple image uploads + uploaded_images = st.file_uploader( + "Upload Images", + type=["jpg", "png", "jpeg", "svg"], + accept_multiple_files=True, + key="image_uploader_tab" + ) + + if uploaded_images: + # Create a unique image directory if it doesn't exist + image_dir = os.path.join(os.getcwd(), "manim_assets", "images") + os.makedirs(image_dir, exist_ok=True) + + # Process each uploaded image + for uploaded_image in uploaded_images: + # Generate a unique filename and save the image + file_extension = uploaded_image.name.split(".")[-1] + unique_filename = f"image_{int(time.time())}_{uuid.uuid4().hex[:8]}.{file_extension}" + image_path = os.path.join(image_dir, unique_filename) + + with open(image_path, "wb") as f: + f.write(uploaded_image.getvalue()) + + # Store the path in session state + if "image_paths" not in st.session_state: + st.session_state.image_paths = [] + + # Check if this image was already added + image_already_added = False + for img in st.session_state.image_paths: + if img["name"] == uploaded_image.name: + image_already_added = True + break + + if not image_already_added: + st.session_state.image_paths.append({ + "name": uploaded_image.name, + "path": image_path + }) + + # Display uploaded images in a grid + st.markdown("##### Uploaded Images:") + image_cols = st.columns(3) + + for i, img_info in enumerate(st.session_state.image_paths[-len(uploaded_images):]): + with image_cols[i % 3]: + try: + img = Image.open(img_info["path"]) + st.image(img, caption=img_info["name"], width=150) + + # Show code snippet for this specific image + if st.button(f"Use {img_info['name']}", key=f"use_img_{i}"): + image_code = f""" +# Load and display image +image = ImageMobject(r"{img_info['path']}") +image.scale(2) # Adjust size as needed self.play(FadeIn(image)) self.wait(1) """ - st.session_state.code += "\n" + snippet - st.experimental_rerun() - aud = st.file_uploader("Upload Audio", type=["mp3","wav","ogg"]) - if aud: - adir = os.path.join(os.getcwd(),"manim_assets","audio") - os.makedirs(adir, exist_ok=True) - fn = f"audio_{int(time.time())}.{aud.name.split('.')[-1]}" - path = os.path.join(adir, fn) - with open(path,"wb") as f: f.write(aud.getvalue()) - st.audio(aud) - st.success("Audio uploaded") - - # Timeline tab + if not st.session_state.code: + base_code = """from manim import * + +class ImageScene(Scene): + def construct(self): +""" + st.session_state.code = base_code + "\n " + image_code.replace("\n", "\n ") + else: + st.session_state.code += "\n" + image_code + + st.session_state.temp_code = st.session_state.code + st.success(f"Added {img_info['name']} to your code!") + + # Set pending tab switch to editor tab + st.session_state.pending_tab_switch = 0 + st.rerun() + except Exception as e: + st.error(f"Error loading image {img_info['name']}: {e}") + + # Display previously uploaded images + if st.session_state.image_paths: + with st.expander("Previously Uploaded Images"): + # Group images by 3 in each row + for i in range(0, len(st.session_state.image_paths), 3): + prev_cols = st.columns(3) + for j in range(3): + if i+j < len(st.session_state.image_paths): + img_info = st.session_state.image_paths[i+j] + with prev_cols[j]: + try: + img = Image.open(img_info["path"]) + st.image(img, caption=img_info["name"], width=100) + st.markdown(f"
Path: {img_info['path']}
", unsafe_allow_html=True) + except: + st.markdown(f"**{img_info['name']}**") + st.markdown(f"
Path: {img_info['path']}
", unsafe_allow_html=True) + + with asset_col2: + # Audio uploader section + st.markdown("#### đŸŽĩ Audio Assets") + st.markdown("Upload audio files for background or narration:") + + uploaded_audio = st.file_uploader("Upload Audio", type=["mp3", "wav", "ogg"], key="audio_uploader") + + if uploaded_audio: + # Create a unique audio directory if it doesn't exist + audio_dir = os.path.join(os.getcwd(), "manim_assets", "audio") + os.makedirs(audio_dir, exist_ok=True) + + # Generate a unique filename and save the audio + file_extension = uploaded_audio.name.split(".")[-1] + unique_filename = f"audio_{int(time.time())}.{file_extension}" + audio_path = os.path.join(audio_dir, unique_filename) + + with open(audio_path, "wb") as f: + f.write(uploaded_audio.getvalue()) + + # Store the path in session state + st.session_state.audio_path = audio_path + + # Display audio player + st.audio(uploaded_audio) + + st.markdown(f""" +
+

Audio: {uploaded_audio.name}

+

Path: {audio_path}

+
+ """, unsafe_allow_html=True) + + # Two options for audio usage + st.markdown("#### Add Audio to Your Animation") + + option = st.radio( + "Choose how to use audio:", + ["Background Audio", "Generate Audio from Text"] + ) + + if option == "Background Audio": + st.markdown("##### Code to add background audio:") + + # For with_sound decorator + audio_code1 = f""" +# Add this import at the top of your file +from manim.scene.scene_file_writer import SceneFileWriter + +# Add this decorator before your scene class +@with_sound("{audio_path}") +class YourScene(Scene): + def construct(self): + # Your animation code here +""" + st.code(audio_code1, language="python") + + if st.button("Use This Audio in Animation", key="use_audio_btn"): + st.success("Audio set for next render!") + + elif option == "Generate Audio from Text": + # Text-to-speech input + tts_text = st.text_area( + "Enter text for narration", + placeholder="Type the narration text here...", + height=100 + ) + + if st.button("Create Narration", key="create_narration_btn"): + try: + # Use basic TTS (placeholder for actual implementation) + st.warning("Text-to-speech feature requires additional setup. Using uploaded audio instead.") + st.session_state.audio_path = audio_path + st.success("Audio set for next render!") + except Exception as e: + st.error(f"Error creating narration: {str(e)}") + + # TIMELINE EDITOR TAB with tabs[3]: - st.markdown("### đŸŽžī¸ Timeline Editor") - st.info("Adjust code manually – timeline UI coming soon.") + # New code for reordering animation steps + updated_code = create_timeline_editor(st.session_state.code) + + # If code was modified by the timeline editor, update the session state + if updated_code != st.session_state.code: + st.session_state.code = updated_code + st.session_state.temp_code = updated_code - # Export tab + # EDUCATIONAL EXPORT TAB with tabs[4]: - st.markdown("### 🎓 Export") - st.warning("Export features coming soon.") + st.markdown("### 🎓 Educational Export Options") + + # Check if we have an animation to export + if not st.session_state.video_data: + st.warning("Generate an animation first before using educational export features.") + else: + st.markdown("Create various educational assets from your animation:") + + # Animation title and explanation + animation_title = st.text_input("Animation Title", value="Manim Animation", key="edu_title") + + st.markdown("#### Explanation Text") + st.markdown("Add explanatory text to accompany your animation. Use markdown formatting.") + st.markdown("Use ## to separate explanation sections for step-by-step sequence export.") + + explanation_text = st.text_area( + "Explanation (markdown supported)", + height=150, + placeholder="Explain your animation here...\n\n## Step 1\nIntroduction to the concept...\n\n## Step 2\nNext, we demonstrate..." + ) + + # Export format selection + edu_format = st.selectbox( + "Export Format", + options=["PowerPoint Presentation", "Interactive HTML", "Explanation Sequence PDF"] + ) + + # Format-specific options + if edu_format == "PowerPoint Presentation": + st.info("Creates a PowerPoint file with your animation and explanation text.") + + elif edu_format == "Interactive HTML": + st.info("Creates an interactive HTML webpage with playback controls and explanation.") + include_controls = st.checkbox("Include interactive controls", value=True) + + elif edu_format == "Explanation Sequence PDF": + st.info("Creates a PDF with key frames and step-by-step explanations.") + frame_count = st.slider("Number of key frames", min_value=3, max_value=10, value=5) + + # Export button + if st.button("Export Educational Material", key="export_edu_btn"): + with st.spinner(f"Creating {edu_format}..."): + # Map selected format to internal format type + format_map = { + "PowerPoint Presentation": "powerpoint", + "Interactive HTML": "html", + "Explanation Sequence PDF": "sequence" + } + + # Create a temporary directory for export + temp_export_dir = tempfile.mkdtemp(prefix="manim_edu_export_") + + # Process the export + exported_data, file_type = export_to_educational_format( + st.session_state.video_data, + format_map[edu_format], + animation_title, + explanation_text, + temp_export_dir + ) + + if exported_data: + # File extension mapping + ext_map = { + "powerpoint": "pptx", + "html": "html", + "pdf": "pdf" + } + + # Download button + ext = ext_map.get(file_type, "zip") + filename = f"{animation_title.replace(' ', '_')}.{ext}" + + st.success(f"{edu_format} created successfully!") + st.download_button( + label=f"âŦ‡ī¸ Download {edu_format}", + data=exported_data, + file_name=filename, + mime=f"application/{ext}", + use_container_width=True + ) + + # For HTML, also offer to open in browser + if file_type == "html": + html_path = os.path.join(temp_export_dir, filename) + st.markdown(f"[🌐 Open in browser](file://{html_path})", unsafe_allow_html=True) + else: + st.error(f"Failed to create {edu_format}. Check logs for details.") + + # Show usage examples and tips + with st.expander("Usage Tips"): + st.markdown(""" + ### Educational Export Tips + + **PowerPoint Presentations** + - Great for lectures and classroom presentations + - Animation will autoplay when clicked + - Add detailed explanations in notes section + + **Interactive HTML** + - Perfect for websites and online learning platforms + - Students can control playback speed and navigation + - Mobile-friendly for learning on any device + + **Explanation Sequence** + - Ideal for printed materials and study guides + - Use ## headers to mark different explanation sections + - Each section will be paired with a key frame + """) - # Python Runner tab + # PYTHON RUNNER TAB with tabs[5]: - st.markdown("### 🐍 Python Runner") - code = st.text_area("Script", height=300, key="py_code") - inputs = [] - for i, line in enumerate(code.split("\n"), 1): - if "input(" in line: - prompt = re.search(r'input\(["\'](.+?)["\']\)', line) - label = prompt.group(1) if prompt else f"Input line {i}" - inputs.append(st.text_input(label, key=f"in_{i}")) - timeout = st.slider("Timeout (s)", 5, 300, 30) - if st.button("Run"): - temp = tempfile.NamedTemporaryFile(delete=False, suffix=".py") - temp.write(code.encode()) - temp.flush() - proc = subprocess.Popen([sys.executable, temp.name], - stdin=subprocess.PIPE, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - text=True) - # feed inputs - out, err = proc.communicate(input="\n".join(inputs), timeout=timeout) - if err: - st.error(err) - if out: - st.code(out) - temp.close() + st.markdown("### 🐍 Python Script Runner") + st.markdown("Execute Python scripts and visualize the results directly.") + + # Predefined example scripts + example_scripts = { + "Select an example...": "", + "Basic Matplotlib Plot": """import matplotlib.pyplot as plt +import numpy as np + +# Create data +x = np.linspace(0, 10, 100) +y = np.sin(x) + +# Create plot +plt.figure(figsize=(10, 6)) +plt.plot(x, y, 'b-', label='sin(x)') +plt.title('Sine Wave') +plt.xlabel('x') +plt.ylabel('sin(x)') +plt.grid(True) +plt.legend() +""", + "User Input Example": """# This example demonstrates how to handle user input +name = input("Enter your name: ") +age = int(input("Enter your age: ")) + +print(f"Hello, {name}! In 10 years, you'll be {age + 10} years old.") + +# Let's get some numbers and calculate the average +num_count = int(input("How many numbers would you like to average? ")) +total = 0 + +for i in range(num_count): + num = float(input(f"Enter number {i+1}: ")) + total += num + +average = total / num_count +print(f"The average of your {num_count} numbers is: {average}") +""", + "Pandas DataFrame": """import pandas as pd +import numpy as np + +# Create a sample dataframe +data = { + 'Name': ['Alice', 'Bob', 'Charlie', 'David', 'Emma'], + 'Age': [25, 30, 35, 40, 45], + 'Salary': [50000, 60000, 70000, 80000, 90000], + 'Department': ['HR', 'IT', 'Finance', 'Marketing', 'Engineering'] +} + +df = pd.DataFrame(data) + +# Display the dataframe +print("Sample DataFrame:") +print(df) + +# Basic statistics +print("\\nSummary Statistics:") +print(df.describe()) + +# Filtering +print("\\nEmployees older than 30:") +print(df[df['Age'] > 30]) +""", + "Seaborn Visualization": """import matplotlib.pyplot as plt +import seaborn as sns +import numpy as np +import pandas as pd + +# Set the style +sns.set_style("whitegrid") + +# Create sample data +np.random.seed(42) +data = np.random.randn(100, 3) +df = pd.DataFrame(data, columns=['A', 'B', 'C']) +df['category'] = pd.Categorical(['Group 1'] * 50 + ['Group 2'] * 50) + +# Create a paired plot +sns.pairplot(df, hue='category', palette='viridis') + +# Create another plot +plt.figure(figsize=(10, 6)) +sns.violinplot(x='category', y='A', data=df, palette='magma') +plt.title('Distribution of A by Category') +""" + } + + # Select example script + selected_example = st.selectbox("Select an example script:", options=list(example_scripts.keys())) + + # Python code editor + if selected_example != "Select an example..." and selected_example in example_scripts: + python_code = example_scripts[selected_example] + else: + python_code = st.session_state.python_script + + if ACE_EDITOR_AVAILABLE: + python_code = st_ace( + value=python_code, + language="python", + theme="monokai", + min_lines=15, + key=f"python_editor_{st.session_state.editor_key}" + ) + else: + python_code = st.text_area( + "Python Code", + value=python_code, + height=400, + key=f"python_textarea_{st.session_state.editor_key}" + ) + + # Store script in session state (without clearing existing code) + st.session_state.python_script = python_code + + # Check for input() calls + input_calls = detect_input_calls(python_code) + user_inputs = [] + + if input_calls: + st.markdown("### Input Values") + st.info(f"This script contains {len(input_calls)} input() calls. Please provide values below:") + + for i, input_call in enumerate(input_calls): + user_input = st.text_input( + f"{input_call['prompt']} (Line {input_call['line']})", + key=f"input_{i}" + ) + user_inputs.append(user_input) + + # Options and execution + col1, col2 = st.columns([2, 1]) + + with col1: + timeout_seconds = st.slider("Execution Timeout (seconds)", 5, 3600, 30) + + with col2: + run_btn = st.button("â–ļī¸ Run Script", use_container_width=True) + + if run_btn: + with st.spinner("Executing Python script..."): + result = run_python_script(python_code, inputs=user_inputs, timeout=timeout_seconds) + st.session_state.python_result = result + + # Display results + if st.session_state.python_result: + display_python_script_results(st.session_state.python_result) + + # Option to insert plots into Manim animation + if st.session_state.python_result["plots"]: + with st.expander("Add Plots to Manim Animation"): + st.markdown("Select a plot to include in your Manim animation:") + + plot_cols = st.columns(min(3, len(st.session_state.python_result["plots"]))) + + for i, plot_data in enumerate(st.session_state.python_result["plots"]): + # Create a unique temporary file for each plot + with tempfile.NamedTemporaryFile(delete=False, suffix=".png") as tmp: + tmp.write(plot_data) + plot_path = tmp.name + + # Display the plot with selection button + with plot_cols[i % len(plot_cols)]: + st.image(plot_data, use_column_width=True) + if st.button(f"Use Plot {i+1}", key=f"use_plot_{i}"): + # Create code to include this plot in Manim + plot_code = f""" +# Import the plot image +plot_image = ImageMobject(r"{plot_path}") +plot_image.scale(2) # Adjust size as needed +self.play(FadeIn(plot_image)) +self.wait(1) +""" + # Insert into editor code + if st.session_state.code: + st.session_state.code += "\n" + plot_code + st.session_state.temp_code = st.session_state.code + st.success(f"Plot {i+1} added to your animation code!") + # Set pending tab switch to editor tab + st.session_state.pending_tab_switch = 0 + st.rerun() + else: + basic_scene = f"""from manim import * + +class PlotScene(Scene): + def construct(self): + {plot_code} +""" + st.session_state.code = basic_scene + st.session_state.temp_code = basic_scene + st.success(f"Created new scene with Plot {i+1}!") + # Set pending tab switch to editor tab + st.session_state.pending_tab_switch = 0 + st.rerun() + + # Provide option to save the script + if st.button("📄 Save This Script", key="save_script_btn"): + # Generate a unique filename + timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") + script_filename = f"script_{timestamp}.py" + + # Offer download button for the script + st.download_button( + label="âŦ‡ī¸ Download Script", + data=python_code, + file_name=script_filename, + mime="text/plain" + ) + + # Show advanced examples and tips + with st.expander("Python Script Runner Tips"): + st.markdown(""" + ### Python Script Runner Tips + + **What can I run?** + - Any Python code that doesn't require direct UI interaction + - Libraries like Matplotlib, NumPy, Pandas, SciPy, etc. + - Data processing and visualization code + - Scripts that ask for user input (now supported!) + + **What can't I run?** + - Streamlit, Gradio, Dash, or other web UIs + - Long-running operations (timeout will occur) + - Code that requires file access outside the temporary environment + + **Working with visualizations:** + - All Matplotlib/Seaborn plots will be automatically captured + - Pandas DataFrames are detected and displayed as tables + - Use `print()` to show text output + + **Handling user input:** + - The app detects input() calls and automatically creates text fields + - Input values you provide will be passed to the script when it runs + - Type conversion (like int(), float()) is preserved + + **Adding to animations:** + - Charts and plots can be directly added to your Manim animations + - Generated images will be properly scaled for your animation + - Perfect for educational content combining data and animations + """) + + # Help section + with st.sidebar.expander("â„šī¸ Help & Info"): + st.markdown(""" + ### About Manim Animation Studio + + This app allows you to create mathematical animations using Manim, + an animation engine for explanatory math videos. + + ### Example Code + + ```python + from manim import * + + class SimpleExample(Scene): + def construct(self): + circle = Circle(color=BLUE) + self.play(Create(circle)) + + square = Square(color=RED).next_to(circle, RIGHT) + self.play(Create(square)) + + text = Text("Manim Animation").next_to(VGroup(circle, square), DOWN) + self.play(Write(text)) + + self.wait(2) + ``` + """) + + # Handle tab switching with session state to prevent refresh loop + if st.session_state.pending_tab_switch is not None: + st.session_state.active_tab = st.session_state.pending_tab_switch + st.session_state.pending_tab_switch = None + + # Set tabs active state + for i, tab in enumerate(tabs): + if i == st.session_state.active_tab: + tab.active = True + + # Mark first load as complete to prevent unnecessary refreshes + if not st.session_state.first_load_complete: + st.session_state.first_load_complete = True if __name__ == "__main__": - main() + main() \ No newline at end of file