import gradio as gr import os import json import requests import smtplib import sqlite3 from datetime import datetime from email.mime.text import MIMEText from email.mime.multipart import MIMEMultipart from algebra_solver import AlgebraSolver # Get Hugging Face token from environment variable HF_TOKEN = os.getenv("HF_TOKEN") # Database configuration DATABASE_FILE = "feedback.db" # Email configuration (private) FEEDBACK_EMAIL = "mahdi@brightmind-ai.com" SMTP_SERVER = "smtp.gmail.com" SMTP_PORT = 587 SMTP_USERNAME = os.getenv("SMTP_USERNAME", "") SMTP_PASSWORD = os.getenv("SMTP_PASSWORD", "") # Debug token detection print(f"🔍 Token detection debug:") print(f" - HF_TOKEN exists: {HF_TOKEN is not None}") print(f" - HF_TOKEN length: {len(HF_TOKEN) if HF_TOKEN else 0}") print(f" - HF_TOKEN starts with hf_: {HF_TOKEN.startswith('hf_') if HF_TOKEN else False}") print(f" - SMTP configured: {SMTP_USERNAME != '' and SMTP_PASSWORD != ''}") def generate_lesson_plan_with_progress(topic, subject, grade_level, duration, difficulty="intermediate"): """Generate a lesson plan with progress updates""" # Show progress bar progress_html = """
🤖 Generating your lesson plan...
Analyzing requirements and creating content...
""" yield progress_html, "" print(f"🚀 Starting lesson plan generation for: {topic}") print(f"📚 Subject: {subject}, Grade: {grade_level}, Duration: {duration}min") # Try Hugging Face API first if token is available if HF_TOKEN: print("🤖 Token found! Attempting Hugging Face API call...") try: prompt = f"""Create a comprehensive lesson plan for: Topic: {topic} Subject: {subject} Grade Level: {grade_level} Duration: {duration} minutes Difficulty: {difficulty} Include: 1. Learning objectives 2. Activities with time allocations 3. Materials needed 4. Assessment methods 5. Differentiation strategies 6. Educational standards 7. Real-world connections 8. Extension activities Make it practical, engaging, and ready for classroom use.""" result = call_hugging_face_api_content(prompt) if result: print("✅ Successfully generated with Hugging Face API") # Show completion and return result completion_html = """
✅ Lesson Plan Generated Successfully!
""" yield completion_html, result else: print("❌ API call returned no content, falling back to algorithms") raise Exception("No content returned from API") except Exception as e: print(f"❌ Hugging Face API failed: {e}") print("🔄 Falling back to educational algorithms...") # Show fallback progress fallback_html = """
🔄 Using educational algorithms...
""" yield fallback_html, "" result = generate_with_algorithms(topic, subject, grade_level, duration, difficulty) print("✅ Generated with educational algorithms") # Show completion completion_html = """
✅ Lesson Plan Generated with Educational Algorithms!
""" yield completion_html, result else: print("⚠️ No Hugging Face token found, using educational algorithms...") # Show progress for algorithms algo_html = """
📚 Using educational algorithms...
""" yield algo_html, "" result = generate_with_algorithms(topic, subject, grade_level, duration, difficulty) print("✅ Generated with educational algorithms") # Show completion completion_html = """
✅ Lesson Plan Generated Successfully!
""" yield completion_html, result def generate_lesson_plan(topic, subject, grade_level, duration, difficulty="intermediate"): """Generate a lesson plan using Hugging Face API or fallback algorithms""" print(f"🚀 Starting lesson plan generation for: {topic}") print(f"📚 Subject: {subject}, Grade: {grade_level}, Duration: {duration}min") # Try Hugging Face API first if token is available if HF_TOKEN: print("🤖 Token found! Attempting Hugging Face API call...") try: result = generate_with_hugging_face(topic, subject, grade_level, duration, difficulty) print("✅ Successfully generated with Hugging Face API") return result except Exception as e: print(f"❌ Hugging Face API failed: {e}") print("🔄 Falling back to educational algorithms...") else: print("⚠️ No Hugging Face token found, using educational algorithms...") # Fallback to educational algorithms result = generate_with_algorithms(topic, subject, grade_level, duration, difficulty) print("✅ Generated with educational algorithms") return result def typewriter_effect(text, status_message): """Create typewriter effect for displaying content""" import time # Show typewriter container typewriter_html = f"""
📖 {status_message}
""" yield typewriter_html, "" # Typewriter effect displayed_text = "" for i, char in enumerate(text): displayed_text += char if char == '\n': displayed_text += " " # Add indentation for new lines # Update the content updated_html = f"""
📖 {status_message}
{displayed_text}|
""" yield updated_html, "" time.sleep(0.02) # Adjust speed here # Final result without cursor - just return the text content yield "", text def generate_content_with_progress(topic, subject, grade_level, difficulty, content_type, content_length): """Generate educational content with progress updates""" # Show progress bar progress_html = """
📝 Generating your educational content...
Creating materials tailored to your specifications...
""" yield progress_html, "" print(f"🚀 Starting content generation for: {topic}") print(f"📚 Subject: {subject}, Grade: {grade_level}, Type: {content_type}") # Try Hugging Face API first if token is available if HF_TOKEN: print("🤖 Token found! Attempting Hugging Face API call for content generation...") print(f"🔑 Token length: {len(HF_TOKEN)}") print(f"🔑 Token starts with hf_: {HF_TOKEN.startswith('hf_')}") try: # Use the enhanced content agent print("🎨 Creating content generation agent...") agent_prompt = create_content_agent(topic, subject, grade_level, difficulty, content_type, content_length) print(f"📝 Sending agent prompt to API...") ai_content = call_hugging_face_api_content(agent_prompt, content_length) if ai_content: print("✅ Content generation API call successful!") print(f"📄 Generated content length: {len(ai_content)} characters") # Process content with image placeholders and multimedia print("🖼️ Processing content with multimedia elements...") print(f"📄 Content before processing: {len(ai_content)} characters") enhanced_content = process_content_with_images(ai_content, topic, content_type) print(f"📄 Enhanced content length: {len(enhanced_content)} characters") print(f"📄 First 200 chars: {enhanced_content[:200]}...") print(f"📄 Last 200 chars: ...{enhanced_content[-200:]}") yield "", enhanced_content return else: print("❌ Content generation API call returned no content") print("🔄 Falling back to template content...") except Exception as e: print(f"❌ Content generation API error: {str(e)}") print("🔄 Falling back to template content...") else: print("❌ No HF_TOKEN available for content generation") print("🔄 Using fallback content...") # Enhanced fallback content with proper text rendering if content_type == "Worksheets": fallback_content = f"""# **{topic} in {subject}: Educational Worksheet** **Grade Level:** {grade_level} | **Subject:** {subject} | **Difficulty:** {difficulty} --- ## **Introduction** {topic} is an important topic in {subject} that helps students develop critical thinking and problem-solving skills. In this worksheet, you'll explore {topic} through engaging activities and real-world examples. --- ## **Learning Objectives** By the end of this worksheet, you will be able to: - Understand the basic concepts of {topic} - Apply {topic} knowledge to solve problems - Connect {topic} to real-world situations - Demonstrate understanding through assessment activities {generate_real_image(f"Visual introduction to {topic} concepts", topic, content_type)} --- ## **Learning Objectives** By the end of this worksheet, you will be able to: - Use variables to represent unknown quantities in science problems - Solve equations related to speed, density, and chemical reactions - Translate word problems into algebraic equations - Check your solutions for accuracy ✅ **Self-check:** Rate your confidence (1-5) before starting: --- ## **Practice Activities** ### **1. Basic Concepts** Test your understanding of {topic} with these questions. **Example Question:** What is the most important aspect of {topic}? **Answer:** [Write your answer here] **Practice Questions:** 1. How does {topic} relate to {subject}? **Answer:** _________________________________ 2. What are the key characteristics of {topic}? **Answer:** _________________________________ 3. Can you give an example of {topic} in real life? **Answer:** _________________________________ --- ## **Application Problems** ### **2. Real-World Connections** Apply your knowledge of {topic} to solve these problems. **Problem 1:** Describe how {topic} is used in {subject}. **Solution:** _________________________________ **Problem 2:** What would happen if {topic} didn't exist? **Solution:** _________________________________ {generate_real_image(f"Diagram showing {topic} key concepts", topic, content_type)} --- ## **Critical Thinking Questions** ### **3. Analysis and Evaluation** Think critically about {topic} and answer these questions. 1. **Analysis:** What are the main components of {topic}? **Your Answer:** _________________________________ 2. **Evaluation:** How important is {topic} in {subject}? **Your Answer:** _________________________________ 3. **Synthesis:** How does {topic} connect to other topics in {subject}? **Your Answer:** _________________________________ --- ## **Self-Assessment** Rate your understanding of {topic}: - I can explain the basic concepts of {topic} - I can identify examples of {topic} in real life - I can apply {topic} knowledge to solve problems - I can connect {topic} to other subjects **Reflection:** What was the most interesting thing you learned about {topic}? **Your Answer:** _________________________________ {generate_real_image(f"Real-world examples of {topic} in action", topic, content_type)} --- ## **Self-Assessment** ✅ **Rate your understanding (1-5):** - I can set up equations from word problems. - I can solve for unknown variables. - I can check my answers for accuracy. 📌 **Reflection:** What was the hardest part? {generate_real_image(f"Visual assessment rubric for {topic}", topic, content_type)} --- ## **Answer Key** ### **Practice Activities** 1. **Answer:** {topic} relates to {subject} by [provide specific connection] 2. **Answer:** Key characteristics include [list main features] 3. **Answer:** Real-life example: [provide concrete example] ### **Application Problems** 1. **Answer:** {topic} is used in {subject} to [explain application] 2. **Answer:** Without {topic}, [explain consequences] ### **Critical Thinking Questions** 1. **Answer:** Main components are [list components] 2. **Answer:** Importance level: [explain significance] 3. **Answer:** Connections include [list related topics] --- ### **📢 Teacher's Notes** - **Extension Activity:** Have students research additional examples of {topic} in {subject} - **Assessment:** Use the self-assessment section to gauge student understanding - **Differentiation:** Provide additional resources for advanced students {generate_real_image(f"Summary chart of {topic} concepts", topic, content_type)} --- **Total Words:** ~1,200 | **Sections:** 6 | **Format:** Educational Worksheet ✅ **Worksheet Complete!** Ready for print or digital use.""" else: # Generic fallback for other content types fallback_content = f"""# {content_type}: {topic} ## Learning Objectives By the end of this {content_type.lower()}, students will be able to: - Understand the basic concepts of {topic} - Apply knowledge through practical exercises - Demonstrate comprehension through assessment ## Grade Level: {grade_level} ## Subject: {subject} ## Difficulty: {difficulty} --- ## Introduction This {content_type.lower()} is designed for {grade_level} students studying {subject}. The content focuses on {topic} at a {difficulty.lower()} level. {generate_real_image(f"Visual introduction to {topic} concepts", topic, content_type)} ## Main Content ### Section 1: Understanding {topic} [Content will be generated based on the specific topic and grade level] {generate_real_image(f"Diagram showing {topic} key concepts", topic, content_type)} ### Section 2: Practical Applications [Real-world examples and applications] {generate_real_image(f"Real-world examples of {topic} in action", topic, content_type)} ### Section 3: Activities and Exercises [Hands-on activities appropriate for {grade_level}] {generate_real_image(f"Step-by-step activity instructions for {topic}", topic, content_type)} ## Assessment [Questions and exercises to test understanding] {generate_real_image(f"Visual assessment rubric for {topic}", topic, content_type)} ## Additional Resources [Links and references for further learning] --- *Generated for {grade_level} {subject} students - {difficulty} level*""" # Process fallback content with multimedia elements enhanced_fallback = process_content_with_images(fallback_content, topic, content_type) # Yield the final content (this will replace the progress bar) yield "", enhanced_fallback def generate_quiz_with_progress(topic, subject, grade_level, question_count, question_types): """Generate a quiz with progress updates""" # Show progress bar progress_html = """
🎯 Generating your quiz...
Creating questions and answers...
""" yield progress_html, "" print(f"🎯 Starting quiz generation for: {topic}") print(f"📚 Subject: {subject}, Grade: {grade_level}, Questions: {question_count}") print(f"📝 Question types: {', '.join(question_types)}") # Try Hugging Face API first if token is available if HF_TOKEN: print("🤖 Token found! Attempting Hugging Face API call for quiz...") try: prompt = f"""Create a comprehensive quiz about "{topic}" for {grade_level} students studying {subject}. Requirements: - Generate exactly {question_count} questions - Include these question types: {', '.join(question_types)} - Make questions appropriate for {grade_level} grade level - Ensure questions are specific to "{topic}" - Include correct answers and explanations - Format as a structured quiz with clear questions, options, and explanations - Make it engaging and educational Format the response as a structured quiz with clear questions, options, and explanations.""" result = call_hugging_face_api_content(prompt) if result: print("✅ Successfully generated quiz with Hugging Face API") # Show completion and return result completion_html = """
✅ Quiz Generated Successfully!
""" yield completion_html, result else: print("❌ API call returned no content, falling back to algorithms") raise Exception("No content returned from API") except Exception as e: print(f"❌ Hugging Face API failed: {e}") print("🔄 Falling back to educational algorithms...") # Show fallback progress fallback_html = """
🔄 Using educational algorithms...
""" yield fallback_html, "" result = generate_quiz_with_algorithms(topic, subject, grade_level, question_count, question_types) print("✅ Generated quiz with educational algorithms") # Show completion completion_html = """
✅ Quiz Generated with Educational Algorithms!
""" yield completion_html, result else: print("⚠️ No Hugging Face token found, using educational algorithms...") # Show progress for algorithms algo_html = """
🎯 Using educational algorithms...
""" yield algo_html, "" result = generate_quiz_with_algorithms(topic, subject, grade_level, question_count, question_types) print("✅ Generated quiz with educational algorithms") # Show completion completion_html = """
✅ Quiz Generated Successfully!
""" yield completion_html, result def generate_quiz(topic, subject, grade_level, question_count, question_types): """Generate a quiz using Hugging Face API or fallback algorithms""" print(f"🎯 Starting quiz generation for: {topic}") print(f"📚 Subject: {subject}, Grade: {grade_level}, Questions: {question_count}") print(f"📝 Question types: {', '.join(question_types)}") # Try Hugging Face API first if token is available if HF_TOKEN: print("🤖 Token found! Attempting Hugging Face API call for quiz...") try: result = generate_quiz_with_hugging_face(topic, subject, grade_level, question_count, question_types) print("✅ Successfully generated quiz with Hugging Face API") return result except Exception as e: print(f"❌ Hugging Face API failed: {e}") print("🔄 Falling back to educational algorithms...") else: print("⚠️ No Hugging Face token found, using educational algorithms...") # Fallback to educational algorithms result = generate_quiz_with_algorithms(topic, subject, grade_level, question_count, question_types) print("✅ Generated quiz with educational algorithms") return result def generate_with_hugging_face(topic, subject, grade_level, duration, difficulty): """Generate lesson plan using Hugging Face Inference Providers API""" print(f"🌐 Making API call to Hugging Face Inference Providers...") print(f"🔗 Endpoint: https://router.huggingface.co/v1/chat/completions") print(f"🔑 Token available: {HF_TOKEN is not None}") prompt = f"""Create a comprehensive lesson plan for: Topic: {topic} Subject: {subject} Grade Level: {grade_level} Duration: {duration} minutes Difficulty: {difficulty} Include: 1. Learning objectives 2. Activities with time allocations 3. Materials needed 4. Assessment methods 5. Differentiation strategies 6. Educational standards Format as a structured lesson plan.""" print(f"📝 Prompt length: {len(prompt)}") headers = { "Authorization": f"Bearer {HF_TOKEN}", "Content-Type": "application/json" } data = { "model": "deepseek-ai/DeepSeek-V3-0324", "messages": [ { "role": "user", "content": prompt } ], "max_tokens": 4000, "temperature": 0.7 } print(f"📡 Sending API request...") try: response = requests.post( "https://router.huggingface.co/v1/chat/completions", headers=headers, json=data, timeout=30 ) print(f"📡 API Response Status: {response.status_code}") if response.status_code == 200: result = response.json() print(f"✅ API Response received: {type(result)}") print(f"📄 Response content length: {len(str(result))}") if "choices" in result and len(result["choices"]) > 0: content = result["choices"][0]["message"]["content"] else: content = "" print(f"📝 Generated content length: {len(content)}") if content and len(content) > 50: # Ensure we got meaningful content return format_lesson_plan(content, topic, subject, grade_level, duration, True) else: print("⚠️ API returned empty or too short content, falling back to algorithms") raise Exception("Empty or insufficient content from API") else: error_text = response.text print(f"❌ API Error: {response.status_code} - {error_text}") raise Exception(f"API request failed: {response.status_code} - {error_text}") except requests.exceptions.Timeout: print("⏰ API request timed out, falling back to algorithms") raise Exception("API request timeout") except requests.exceptions.RequestException as e: print(f"🌐 Network error: {e}, falling back to algorithms") raise Exception(f"Network error: {e}") except Exception as e: print(f"❌ Unexpected error: {e}, falling back to algorithms") raise Exception(f"Unexpected error: {e}") def generate_with_algorithms(topic, subject, grade_level, duration, difficulty): """Generate lesson plan using educational algorithms""" grade = int(grade_level.split('-')[0]) if '-' in grade_level else int(grade_level) # Generate objectives based on Bloom's Taxonomy objectives = generate_objectives(topic, grade) # Generate activities activities = generate_activities(topic, int(duration), grade) # Generate materials materials = generate_materials(subject, grade) # Generate assessment assessment = generate_assessment(grade) # Generate differentiation differentiation = generate_differentiation() lesson_plan = { "topic": topic, "subject": subject, "grade_level": grade_level, "duration": duration, "difficulty": difficulty, "objectives": objectives, "activities": activities, "materials": materials, "assessment": assessment, "differentiation": differentiation, "ai_generated": False, "generated_at": datetime.now().strftime("%Y-%m-%d") } return format_lesson_plan_from_dict(lesson_plan) def generate_objectives(topic, grade): """Generate learning objectives based on grade level""" if grade <= 2: return [ f"Students will identify key concepts about {topic}", f"Students will demonstrate understanding through hands-on activities" ] elif grade <= 5: return [ f"Students will explain the main concepts of {topic}", f"Students will apply knowledge through practical exercises", f"Students will analyze information about {topic}" ] elif grade <= 8: return [ f"Students will analyze and evaluate information about {topic}", f"Students will synthesize knowledge through creative projects", f"Students will demonstrate critical thinking skills" ] else: return [ f"Students will critically analyze complex concepts in {topic}", f"Students will synthesize information from multiple sources", f"Students will create original work demonstrating mastery" ] def generate_activities(topic, duration, grade): """Generate activities with time allocations""" activities = [] # Introduction (15% of time) intro_time = max(5, int(duration * 0.15)) activities.append({ "name": f"Introduction to {topic}", "duration": f"{intro_time} minutes", "description": f"Engage students with a hook activity related to {topic}" }) # Main activities (60% of time) main_time = int(duration * 0.6) if grade <= 2: activities.append({ "name": "Hands-on Exploration", "duration": f"{main_time} minutes", "description": f"Interactive exploration of {topic} through manipulatives and visual aids" }) elif grade <= 5: activities.append({ "name": "Guided Discovery", "duration": f"{int(main_time * 0.6)} minutes", "description": f"Structured exploration of {topic} with teacher guidance" }) activities.append({ "name": "Collaborative Learning", "duration": f"{int(main_time * 0.4)} minutes", "description": f"Group work to deepen understanding of {topic}" }) else: activities.append({ "name": "Independent Research", "duration": f"{int(main_time * 0.4)} minutes", "description": f"Students research aspects of {topic} independently" }) activities.append({ "name": "Discussion and Analysis", "duration": f"{int(main_time * 0.6)} minutes", "description": f"Class discussion analyzing different perspectives on {topic}" }) # Practice (20% of time) practice_time = int(duration * 0.2) activities.append({ "name": "Practice and Application", "duration": f"{practice_time} minutes", "description": f"Students apply their knowledge through exercises related to {topic}" }) return activities def generate_materials(subject, grade): """Generate materials based on subject and grade""" materials = ["Whiteboard or chart paper", "Markers or chalk"] if grade <= 2: materials.extend(["Visual aids and pictures", "Manipulatives or hands-on objects", "Colored pencils and paper"]) elif grade <= 5: materials.extend(["Textbooks or reference materials", "Worksheets or activity sheets", "Art supplies for projects"]) elif grade <= 8: materials.extend(["Research materials (books, articles)", "Technology devices (if available)", "Presentation materials"]) else: materials.extend(["Advanced reference materials", "Technology for research and presentation", "Writing materials for essays or reports"]) # Subject-specific materials if subject.lower() == "science": materials.extend(["Science equipment or models", "Safety materials (if needed)"]) elif subject.lower() == "math": materials.extend(["Calculators (if appropriate)", "Graph paper or rulers"]) elif subject.lower() == "history": materials.extend(["Historical documents or primary sources", "Maps or timelines"]) return materials def generate_assessment(grade): """Generate assessment methods based on grade level""" if grade <= 2: return ["Observation of student participation", "Simple oral questions and answers", "Drawing or visual representation of learning"] elif grade <= 5: return ["Quick quiz or exit ticket", "Student presentations or demonstrations", "Portfolio of completed work"] elif grade <= 8: return ["Written reflection or journal entry", "Group project evaluation", "Peer assessment activities"] else: return ["Essay or written analysis", "Research project presentation", "Peer review and self-assessment"] def generate_differentiation(): """Generate differentiation strategies""" return { "struggling": [ "Provide additional visual aids and examples", "Break down complex concepts into smaller parts", "Offer one-on-one support during activities", "Use simplified language and vocabulary" ], "advanced": [ "Provide extension activities and challenges", "Encourage independent research and exploration", "Assign leadership roles in group activities", "Offer opportunities for creative expression" ], "ell": [ "Use visual supports and gestures", "Provide vocabulary lists and definitions", "Pair with native speakers for support", "Use multimedia resources when available" ] } def format_lesson_plan(content, topic, subject, grade_level, duration, ai_generated): """Format lesson plan from AI response""" return f""" # Lesson Plan: {topic} **Subject:** {subject} **Grade Level:** {grade_level} **Duration:** {duration} minutes **Generated:** {datetime.now().strftime("%Y-%m-%d")} **AI Generated:** {'Yes' if ai_generated else 'No'} --- ## Content {content} --- *Generated by BrightMind AI - Educational Technology Platform* """ def format_lesson_plan_from_dict(lesson_plan): """Format lesson plan from dictionary""" content = f""" # Lesson Plan: {lesson_plan['topic']} **Subject:** {lesson_plan['subject']} **Grade Level:** {lesson_plan['grade_level']} **Duration:** {lesson_plan['duration']} minutes **Difficulty:** {lesson_plan['difficulty']} **Generated:** {lesson_plan['generated_at']} **AI Generated:** {'Yes' if lesson_plan['ai_generated'] else 'No'} --- ## Learning Objectives """ for i, objective in enumerate(lesson_plan['objectives'], 1): content += f"{i}. {objective}\n" content += "\n## Activities\n\n" for activity in lesson_plan['activities']: content += f"**{activity['name']}** ({activity['duration']})\n" content += f"{activity['description']}\n\n" content += "## Materials Needed\n\n" for material in lesson_plan['materials']: content += f"• {material}\n" content += "\n## Assessment Methods\n\n" for i, assessment in enumerate(lesson_plan['assessment'], 1): content += f"{i}. {assessment}\n" content += "\n## Differentiation Strategies\n\n" for category, strategies in lesson_plan['differentiation'].items(): content += f"**{category.title()} Learners:**\n" for strategy in strategies: content += f"• {strategy}\n" content += "\n" content += "\n---\n*Generated by BrightMind AI - Educational Technology Platform*" return content def generate_quiz_with_hugging_face(topic, subject, grade_level, question_count, question_types): """Generate quiz using Hugging Face Inference Providers API""" print(f"🌐 Making API call to Hugging Face Inference Providers for quiz...") print(f"🔗 Endpoint: https://router.huggingface.co/v1/chat/completions") print(f"🔑 Token available: {HF_TOKEN is not None}") prompt = f"""Create a quiz about "{topic}" for {grade_level} students studying {subject}. Requirements: - Generate exactly {question_count} questions - Include these question types: {', '.join(question_types)} - Make questions appropriate for {grade_level} grade level - Ensure questions are specific to "{topic}" - Include correct answers and explanations Format the response as a structured quiz with clear questions, options, and explanations.""" print(f"📝 Prompt length: {len(prompt)}") headers = { "Authorization": f"Bearer {HF_TOKEN}", "Content-Type": "application/json" } data = { "model": "deepseek-ai/DeepSeek-V3-0324", "messages": [ { "role": "user", "content": prompt } ], "max_tokens": 4000, "temperature": 0.7 } print(f"📡 Sending API request...") try: response = requests.post( "https://router.huggingface.co/v1/chat/completions", headers=headers, json=data, timeout=30 ) print(f"📡 API Response Status: {response.status_code}") if response.status_code == 200: result = response.json() print(f"✅ API Response received: {type(result)}") print(f"📄 Response content length: {len(str(result))}") if "choices" in result and len(result["choices"]) > 0: content = result["choices"][0]["message"]["content"] else: content = "" print(f"📝 Generated content length: {len(content)}") if content and len(content) > 50: # Ensure we got meaningful content return format_quiz(content, topic, subject, grade_level, question_count, True) else: print("⚠️ API returned empty or too short content, falling back to algorithms") raise Exception("Empty or insufficient content from API") else: error_text = response.text print(f"❌ API Error: {response.status_code} - {error_text}") raise Exception(f"API request failed: {response.status_code} - {error_text}") except requests.exceptions.Timeout: print("⏰ API request timed out, falling back to algorithms") raise Exception("API request timeout") except requests.exceptions.RequestException as e: print(f"🌐 Network error: {e}, falling back to algorithms") raise Exception(f"Network error: {e}") except Exception as e: print(f"❌ Unexpected error: {e}, falling back to algorithms") raise Exception(f"Unexpected error: {e}") def generate_quiz_with_algorithms(topic, subject, grade_level, question_count, question_types): """Generate quiz using educational algorithms""" questions = [] grade = int(grade_level.split('-')[0]) if '-' in grade_level else int(grade_level) for i in range(int(question_count)): question_type = question_types[i % len(question_types)] question = generate_question_by_type(question_type, topic, subject, grade, i + 1) questions.append(question) quiz = { "topic": topic, "subject": subject, "grade_level": grade_level, "question_count": int(question_count), "questions": questions, "generated_at": datetime.now().strftime("%Y-%m-%d"), "ai_generated": False } return format_quiz_from_dict(quiz) def generate_question_by_type(question_type, topic, subject, grade, question_number): """Generate a specific type of question""" if question_type == "Multiple Choice": return generate_multiple_choice_question(topic, subject, grade, question_number) elif question_type == "True/False": return generate_true_false_question(topic, subject, grade, question_number) elif question_type == "Short Answer": return generate_short_answer_question(topic, subject, grade, question_number) elif question_type == "Fill in the Blank": return generate_fill_in_blank_question(topic, subject, grade, question_number) else: return generate_multiple_choice_question(topic, subject, grade, question_number) def generate_multiple_choice_question(topic, subject, grade, question_number): """Generate a multiple choice question""" templates = [ { "question": f"What is the main concept of {topic}?", "options": [ f"A fundamental principle in {subject}", "A type of animal", "A mathematical formula", "A historical event" ], "correct": "A", "explanation": f"This question tests basic understanding of {topic} in the context of {subject}." }, { "question": f"Which of the following best describes {topic}?", "options": [ "A complex system with multiple components", "A simple process with one step", "A type of building material", "A musical instrument" ], "correct": "A", "explanation": f"{topic} involves multiple interconnected elements that work together." }, { "question": f"How does {topic} relate to {subject}?", "options": [ f"It provides a foundation for understanding {subject} concepts", "It is unrelated to the subject", "It only applies to advanced students", "It is only theoretical" ], "correct": "A", "explanation": f"{topic} helps students understand broader concepts in {subject}." } ] template = templates[question_number % len(templates)] return { "type": "Multiple Choice", "question": template["question"], "options": template["options"], "correct_answer": template["correct"], "explanation": template["explanation"] } def generate_true_false_question(topic, subject, grade, question_number): """Generate a true/false question""" templates = [ { "question": f"{topic} is an important concept in {subject}.", "correct": "True", "explanation": f"{topic} is indeed a fundamental concept in {subject} that students need to understand." }, { "question": f"{topic} can only be learned through memorization.", "correct": "False", "explanation": f"{topic} is best learned through understanding, practice, and application, not just memorization." }, { "question": f"Understanding {topic} helps students in other areas of {subject}.", "correct": "True", "explanation": f"Knowledge of {topic} provides a foundation for understanding related concepts in {subject}." } ] template = templates[question_number % len(templates)] return { "type": "True/False", "question": template["question"], "correct_answer": template["correct"], "explanation": template["explanation"] } def generate_short_answer_question(topic, subject, grade, question_number): """Generate a short answer question""" templates = [ { "question": f"Explain what {topic} means in your own words.", "correct_answer": f"Student should demonstrate understanding of {topic} concepts", "explanation": f"This question allows students to express their understanding of {topic} in their own words." }, { "question": f"How does {topic} relate to {subject}?", "correct_answer": f"Student should explain the connection between {topic} and {subject}", "explanation": f"This question tests students' ability to make connections between concepts." }, { "question": f"What are the key components of {topic}?", "correct_answer": f"Student should identify the main elements of {topic}", "explanation": f"This question tests students' ability to break down complex concepts." } ] template = templates[question_number % len(templates)] return { "type": "Short Answer", "question": template["question"], "correct_answer": template["correct_answer"], "explanation": template["explanation"] } def generate_fill_in_blank_question(topic, subject, grade, question_number): """Generate a fill in the blank question""" templates = [ { "question": f"{topic} is important because it helps us understand ______.", "correct_answer": f"{subject} concepts", "explanation": f"{topic} provides a foundation for understanding broader {subject} concepts." }, { "question": f"The main purpose of {topic} is to ______.", "correct_answer": f"enhance learning in {subject}", "explanation": f"{topic} serves to improve students' understanding of {subject}." }, { "question": f"Students learn {topic} to better understand ______.", "correct_answer": f"complex {subject} concepts", "explanation": f"{topic} helps students grasp more advanced {subject} topics." } ] template = templates[question_number % len(templates)] return { "type": "Fill in the Blank", "question": template["question"], "correct_answer": template["correct_answer"], "explanation": template["explanation"] } def format_quiz(content, topic, subject, grade_level, question_count, ai_generated): """Format quiz from AI response""" return f""" # Quiz: {topic} **Subject:** {subject} **Grade Level:** {grade_level} **Questions:** {question_count} **Generated:** {datetime.now().strftime("%Y-%m-%d")} **AI Generated:** {'Yes' if ai_generated else 'No'} --- ## Quiz Content {content} --- *Generated by BrightMind AI - Educational Technology Platform* """ def format_quiz_from_dict(quiz): """Format quiz from dictionary""" content = f""" # Quiz: {quiz['topic']} **Subject:** {quiz['subject']} **Grade Level:** {quiz['grade_level']} **Questions:** {quiz['question_count']} **Generated:** {quiz['generated_at']} **AI Generated:** {'Yes' if quiz['ai_generated'] else 'No'} --- ## Questions """ for i, question in enumerate(quiz['questions'], 1): content += f"### Question {i}: {question['type']}\n\n" content += f"**{question['question']}**\n\n" if question['type'] == 'Multiple Choice': for j, option in enumerate(question['options']): content += f"{chr(65 + j)}. {option}\n" content += f"\n**Correct Answer:** {question['correct_answer']}\n\n" elif question['type'] == 'True/False': content += f"**Answer:** {question['correct_answer']}\n\n" elif question['type'] == 'Short Answer': content += f"**Expected Answer:** {question['correct_answer']}\n\n" elif question['type'] == 'Fill in the Blank': content += f"**Answer:** {question['correct_answer']}\n\n" content += f"**Explanation:** {question['explanation']}\n\n" content += "---\n\n" content += "\n*Generated by BrightMind AI - Educational Technology Platform*" return content def send_feedback_email(feedback_type, rating, comments, user_email=""): """Send feedback email to admin""" if not SMTP_USERNAME or not SMTP_PASSWORD: print("⚠️ SMTP credentials not configured, storing feedback locally") # Store feedback locally if no email configured feedback_data = { "timestamp": datetime.now().isoformat(), "feedback_type": feedback_type, "rating": rating, "comments": comments, "user_email": user_email } # Save to local file try: with open("feedback_log.json", "a") as f: f.write(json.dumps(feedback_data) + "\n") return "✅ Feedback saved successfully! (Email not configured)" except Exception as e: return f"❌ Error saving feedback: {str(e)}" try: # Create message msg = MIMEMultipart() msg['From'] = SMTP_USERNAME msg['To'] = FEEDBACK_EMAIL msg['Subject'] = f"BrightMind AI Feedback - {feedback_type}" # Create email body body = f""" New Feedback Received from BrightMind AI Platform Feedback Type: {', '.join(feedback_type) if isinstance(feedback_type, list) else feedback_type} Rating: {rating}/5 User Email: {user_email if user_email else 'Not provided'} Comments: {comments} Timestamp: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')} --- This feedback was automatically sent from the BrightMind AI platform. """ msg.attach(MIMEText(body, 'plain')) # Send email server = smtplib.SMTP(SMTP_SERVER, SMTP_PORT) server.starttls() server.login(SMTP_USERNAME, SMTP_PASSWORD) text = msg.as_string() server.sendmail(SMTP_USERNAME, FEEDBACK_EMAIL, text) server.quit() print(f"✅ Feedback email sent successfully to {FEEDBACK_EMAIL}") return "✅ Thank you! Your feedback has been sent successfully." except Exception as e: print(f"❌ Error sending feedback email: {str(e)}") return f"❌ Error sending feedback: {str(e)}" def submit_feedback(feedback_type, rating, comments, user_email): """Handle feedback submission""" if not feedback_type: return "❌ Please select at least one feedback type." if not rating: return "❌ Please provide a rating." if not comments.strip(): return "❌ Please provide your comments." # Save to database first db_success = save_feedback_to_db(feedback_type, rating, comments, user_email) if not db_success: return "❌ Error saving feedback to database. Please try again." # Try to send email (optional) email_result = send_feedback_email(feedback_type, rating, comments, user_email) if "✅" in email_result: return "✅ Thank you! Your feedback has been saved and sent successfully." else: return "✅ Thank you! Your feedback has been saved successfully. (Email notification not available)" def init_database(): """Initialize the SQLite database""" try: conn = sqlite3.connect(DATABASE_FILE) cursor = conn.cursor() # Create feedback table cursor.execute(''' CREATE TABLE IF NOT EXISTS feedback ( id INTEGER PRIMARY KEY AUTOINCREMENT, timestamp TEXT NOT NULL, feedback_type TEXT NOT NULL, rating INTEGER NOT NULL, comments TEXT NOT NULL, user_email TEXT, created_at DATETIME DEFAULT CURRENT_TIMESTAMP ) ''') conn.commit() conn.close() print("✅ Database initialized successfully") return True except Exception as e: print(f"❌ Database initialization error: {e}") return False def save_feedback_to_db(feedback_type, rating, comments, user_email=""): """Save feedback to SQLite database""" try: conn = sqlite3.connect(DATABASE_FILE) cursor = conn.cursor() # Convert feedback_type list to string feedback_type_str = ', '.join(feedback_type) if isinstance(feedback_type, list) else str(feedback_type) cursor.execute(''' INSERT INTO feedback (timestamp, feedback_type, rating, comments, user_email) VALUES (?, ?, ?, ?, ?) ''', ( datetime.now().isoformat(), feedback_type_str, rating, comments, user_email )) conn.commit() conn.close() print("✅ Feedback saved to database successfully") return True except Exception as e: print(f"❌ Database save error: {e}") return False def get_feedback_stats(): """Get feedback statistics for admin""" try: conn = sqlite3.connect(DATABASE_FILE) cursor = conn.cursor() # Get total feedback count cursor.execute('SELECT COUNT(*) FROM feedback') total_feedback = cursor.fetchone()[0] # Get average rating cursor.execute('SELECT AVG(rating) FROM feedback') avg_rating = cursor.fetchone()[0] avg_rating = round(avg_rating, 2) if avg_rating else 0 # Get recent feedback (last 5) cursor.execute(''' SELECT timestamp, feedback_type, rating, comments, user_email FROM feedback ORDER BY created_at DESC LIMIT 5 ''') recent_feedback = cursor.fetchall() conn.close() return { 'total_feedback': total_feedback, 'avg_rating': avg_rating, 'recent_feedback': recent_feedback } except Exception as e: print(f"❌ Database stats error: {e}") return { 'total_feedback': 0, 'avg_rating': 0, 'recent_feedback': [] } def view_feedback_admin(): """Admin function to view all feedback""" try: conn = sqlite3.connect(DATABASE_FILE) cursor = conn.cursor() cursor.execute(''' SELECT id, timestamp, feedback_type, rating, comments, user_email, created_at FROM feedback ORDER BY created_at DESC ''') feedback_data = cursor.fetchall() conn.close() if not feedback_data: return "No feedback found in database." # Format feedback data result = "# 📊 Feedback Database\n\n" result += f"**Total Feedback Entries:** {len(feedback_data)}\n\n" for entry in feedback_data: id_val, timestamp, feedback_type, rating, comments, user_email, created_at = entry result += f"## Entry #{id_val}\n" result += f"**Date:** {timestamp}\n" result += f"**Type:** {feedback_type}\n" result += f"**Rating:** {rating}/5 ⭐\n" result += f"**Comments:** {comments}\n" result += f"**Email:** {user_email if user_email else 'Not provided'}\n" result += f"**Created:** {created_at}\n" result += "---\n\n" return result except Exception as e: return f"❌ Error accessing database: {str(e)}" # Albert Chatbot Functions def call_hugging_face_api(prompt): """Call Hugging Face API for Albert chatbot""" if not HF_TOKEN: return None headers = { "Authorization": f"Bearer {HF_TOKEN}", "Content-Type": "application/json" } data = { "model": "deepseek-ai/DeepSeek-V3-0324", "messages": [ { "role": "user", "content": prompt } ], "max_tokens": 200, "temperature": 0.7 } try: response = requests.post( "https://router.huggingface.co/v1/chat/completions", headers=headers, json=data, timeout=30 ) if response.status_code == 200: result = response.json() return result['choices'][0]['message']['content'] else: print(f"❌ Albert API error: {response.status_code}") return None except Exception as e: print(f"❌ Albert API error: {str(e)}") return None def call_hugging_face_api_content(prompt, content_length="Medium (3-5 pages)"): """Call Hugging Face API for content generation with dynamic tokens based on length""" if not HF_TOKEN: return None # Set max_tokens based on content length token_limits = { "Short (1-2 pages)": 2000, "Medium (3-5 pages)": 4000, "Long (6+ pages)": 6000 } max_tokens = token_limits.get(content_length, 4000) headers = { "Authorization": f"Bearer {HF_TOKEN}", "Content-Type": "application/json" } data = { "model": "deepseek-ai/DeepSeek-V3-0324", "messages": [ { "role": "user", "content": prompt } ], "max_tokens": max_tokens, "temperature": 0.7 } try: print(f"🌐 Making API call for content generation...") print(f"🔗 Endpoint: https://router.huggingface.co/v1/chat/completions") print(f"🔑 Token available: {HF_TOKEN is not None}") print(f"📝 Prompt length: {len(prompt)} characters") response = requests.post( "https://router.huggingface.co/v1/chat/completions", headers=headers, json=data, timeout=60 # Longer timeout for content generation ) print(f"📊 Response status: {response.status_code}") if response.status_code == 200: result = response.json() content = result['choices'][0]['message']['content'] print(f"✅ Content generation successful! Length: {len(content)} characters") return content else: print(f"❌ Content API error: {response.status_code}") print(f"❌ Response: {response.text}") return None except Exception as e: print(f"❌ Content API error: {str(e)}") return None def generate_image_with_huggingface(prompt, topic, content_type): """Generate image using Hugging Face with better size management""" try: if not HF_TOKEN: return None headers = {"Authorization": f"Bearer {HF_TOKEN}"} # Use more stable models and smaller sizes working_models = [ "stabilityai/stable-diffusion-xl-base-1.0", "runwayml/stable-diffusion-v1-5", "CompVis/stable-diffusion-v1-4" ] # Create more focused prompt clean_prompt = f"educational diagram {topic}, simple illustration, clean, minimal" for model_name in working_models: try: payload = { "inputs": clean_prompt, "parameters": { "num_inference_steps": 20, "guidance_scale": 7.0, "width": 512, # Smaller size "height": 384 # Smaller size } } response = requests.post( f"https://api-inference.huggingface.co/models/{model_name}", headers=headers, json=payload, timeout=30 ) if response.status_code == 200: content_length = len(response.content) if content_length > 5000: # Valid image data try: # Check if response is JSON error json_response = response.json() if "error" in json_response: continue except: # Not JSON, should be image data import base64 try: image_base64 = base64.b64encode(response.content).decode('utf-8') # Limit size to prevent display issues if len(image_base64) > 150000: # ~112KB limit print(f"⚠️ Image too large ({len(image_base64)} chars), skipping") continue return f"data:image/png;base64,{image_base64}" except Exception as e: print(f"❌ Base64 encoding error: {str(e)}") continue elif response.status_code == 503: print(f"Model {model_name} loading...") continue else: print(f"HTTP {response.status_code} for {model_name}") continue except Exception as model_error: print(f"Error with {model_name}: {str(model_error)}") continue return None except Exception as e: print(f"Critical error in image generation: {str(e)}") return None def get_educational_image_url(topic, description, content_type): """Get educational image URL using Hugging Face generation first, then fallbacks""" try: # Create a detailed prompt for image generation image_prompt = f"Educational illustration of {topic}, {description}, {content_type}, clean diagram, colorful, professional, suitable for middle school students" print(f"🎨 Generating image with prompt: {image_prompt}") # Try to generate image with Hugging Face first generated_image = generate_image_with_huggingface(image_prompt, topic, content_type) if generated_image: print(f"✅ Generated image successfully") return generated_image # Fallback to reliable sources if generation fails print("🔄 Image generation failed, using fallback sources") educational_sources = [ # 1. Lorem Picsum (reliable placeholder) f"https://picsum.photos/800/600?random={hash(topic) % 1000}", # 2. Placeholder with educational styling f"https://via.placeholder.com/800x600/667eea/ffffff?text={topic.replace(' ', '+')}+Educational+Content", # 3. Educational diagram generator f"https://via.placeholder.com/800x600/4f46e5/ffffff?text={content_type.replace(' ', '+')}+{topic.replace(' ', '+')}", ] # Try each fallback source for i, url in enumerate(educational_sources): try: print(f"🔍 Trying fallback source {i+1}: {url}") response = requests.head(url, timeout=5) if response.status_code == 200: print(f"✅ Found working fallback image: {url}") return url except: continue # Final fallback return f"https://via.placeholder.com/800x600/667eea/ffffff?text={topic.replace(' ', '+')}+{content_type.replace(' ', '+')}" except Exception as e: print(f"❌ Image URL generation error: {str(e)}") return f"https://via.placeholder.com/800x600/667eea/ffffff?text={topic.replace(' ', '+')}+{content_type.replace(' ', '+')}" def generate_real_image(description, topic, content_type): """Generate actual image for educational content with better error handling""" try: # Try to generate image with Hugging Face image_data = generate_image_with_huggingface(description, topic, content_type) if image_data and image_data.startswith('data:image'): # Check image size and compress if too large if len(image_data) > 100000: # If larger than ~75KB print(f"🖼️ Image too large ({len(image_data)} chars), using fallback") return generate_image_fallback(description, topic, content_type) # Ensure proper format if not image_data.startswith('data:image/png;base64,'): if 'base64,' in image_data: base64_part = image_data.split('base64,', 1)[1] image_data = f"data:image/png;base64,{base64_part}" # Create HTML with better error handling safe_description = description.replace('"', '"').replace("'", ''') # Use simpler HTML structure for better compatibility return f'''
{safe_description}

{safe_description}

''' else: print(f"🖼️ No valid image data, using fallback for: {description}") return generate_image_fallback(description, topic, content_type) except Exception as e: print(f"❌ Image generation error: {str(e)}") return generate_image_fallback(description, topic, content_type) def generate_image_fallback(description, topic, content_type): """Generate a styled text placeholder for images""" return f'''
📸

{description}

Topic: {topic} | Type: {content_type}
''' def fix_malformed_image_html(content): """Fix malformed image HTML with better error handling""" import re # Only fix truly malformed img tags, preserve valid ones # Remove img tags that have malformed src attributes (not starting with data: or http) content = re.sub(r']*src="(?!data:|http)[^"]*"[^>]*>', r'
📸 Image placeholder
', content) return content def generate_image_placeholder(description, topic, content_type): """Generate image placeholder with description for educational content (fallback)""" # Create a structured image placeholder that can be replaced with real images image_placeholder = f"""
🖼️

📸 Image Placeholder

Description: {description}

Topic: {topic}

Content Type: {content_type}

💡 This image would enhance the learning experience
""" return image_placeholder def create_content_agent(topic, subject, grade_level, difficulty, content_type, content_length): """Enhanced content generation agent with multimedia capabilities""" # Define image suggestions based on content type and topic image_suggestions = { "Worksheets": [ f"Visual diagram showing {topic} concepts", f"Step-by-step illustration for {topic} problems", f"Interactive chart demonstrating {topic} principles" ], "Handouts": [ f"Infographic summarizing {topic} key points", f"Visual timeline of {topic} development", f"Diagram showing {topic} relationships" ], "Study Guides": [ f"Concept map of {topic} topics", f"Visual summary of {topic} main ideas", f"Flowchart showing {topic} processes" ], "Activities": [ f"Step-by-step visual instructions for {topic} activity", f"Before and after examples of {topic} work", f"Visual checklist for {topic} completion" ], "Presentations": [ f"Title slide with {topic} theme", f"Key concept visualization for {topic}", f"Summary slide with {topic} highlights" ], "Lesson Materials": [ f"Opening visual hook for {topic} lesson", f"Visual examples of {topic} in real life", f"Closing visual summary of {topic} learning" ] } # Get image suggestions for this content type images = image_suggestions.get(content_type, [f"Educational illustration about {topic}"]) # Define content type specific formatting and structure content_type_formats = { "Worksheets": { "structure": "Interactive worksheet with numbered problems, answer spaces, and self-assessment", "formatting": "Use checkboxes, fill-in-the-blank spaces, numbered problems, and answer keys", "sections": ["Introduction", "Learning Objectives", "Practice Problems", "Word Problems", "Self-Assessment", "Answer Key"], "special_elements": "Include answer spaces, checkboxes for completion, and step-by-step problem solving" }, "Handouts": { "structure": "Reference handout with key concepts, formulas, and quick lookup information", "formatting": "Use bullet points, highlighted boxes, tables, and clear headings", "sections": ["Overview", "Key Concepts", "Important Formulas", "Examples", "Quick Reference", "Additional Resources"], "special_elements": "Include summary tables, highlighted key terms, and visual diagrams" }, "Study Guides": { "structure": "Comprehensive study guide with summaries, review materials, and practice questions", "formatting": "Use numbered lists, checkboxes, summary tables, and review checklists", "sections": ["Topic Summary", "Key Points", "Important Concepts", "Practice Questions", "Review Checklist", "Test Tips"], "special_elements": "Include memory aids, mnemonics, and test-taking strategies" }, "Activities": { "structure": "Hands-on activity with detailed instructions, materials list, and observation sheets", "formatting": "Use numbered steps, safety notes, observation tables, and discussion questions", "sections": ["Activity Overview", "Materials Needed", "Step-by-Step Instructions", "Observations", "Discussion Questions", "Extensions"], "special_elements": "Include safety considerations, data collection sheets, and reflection questions" }, "Presentations": { "structure": "Presentation slides with talking points, visual elements, and interactive components", "formatting": "Use slide format with bullet points, speaker notes, and visual transitions", "sections": ["Title Slide", "Agenda", "Key Points", "Examples", "Summary", "Q&A"], "special_elements": "Include speaker notes, interactive polls, and visual cues" }, "Lesson Materials": { "structure": "Complete lesson materials for teachers with instructions, activities, and assessments", "formatting": "Include teacher tips, timing suggestions, differentiation options, and rubrics", "sections": ["Lesson Overview", "Learning Objectives", "Teacher Notes", "Student Activities", "Assessment", "Resources"], "special_elements": "Include pacing guides, differentiation strategies, and assessment rubrics" } } format_info = content_type_formats.get(content_type, content_type_formats["Worksheets"]) # Define length-specific constraints length_constraints = { "Short (1-2 pages)": { "max_sections": 4, "max_words": 800, "instruction": "Keep content VERY concise and focused. Maximum 4 sections. Use bullet points and brief explanations. Avoid lengthy examples." }, "Medium (3-5 pages)": { "max_sections": 6, "max_words": 1500, "instruction": "Provide comprehensive coverage with moderate detail. Include 5-6 sections with examples and practice." }, "Long (6+ pages)": { "max_sections": 8, "max_words": 2500, "instruction": "Create extensive content with detailed explanations, multiple examples, and comprehensive practice sections." } } length_info = length_constraints.get(content_length, length_constraints["Medium (3-5 pages)"]) # Create enhanced prompt for agent-based generation agent_prompt = f"""You are an advanced educational content generation agent. Create comprehensive educational {content_type.lower()} with multimedia integration. TOPIC: {topic} SUBJECT: {subject} GRADE LEVEL: {grade_level} DIFFICULTY: {difficulty} CONTENT TYPE: {content_type} LENGTH: {content_length} LENGTH CONSTRAINTS (CRITICAL - MUST FOLLOW): - Target Length: {content_length} - Maximum Sections: {length_info['max_sections']} - Maximum Words: {length_info['max_words']} - Instruction: {length_info['instruction']} CONTENT TYPE SPECIFIC REQUIREMENTS: Structure: {format_info['structure']} Formatting: {format_info['formatting']} Sections: {', '.join(format_info['sections'])} Special Elements: {format_info['special_elements']} AGENT INSTRUCTIONS: 1. Generate rich, engaging educational content specifically formatted for {content_type} 2. STRICTLY follow the length constraints: {length_info['instruction']} 3. Include specific image placeholders with detailed descriptions 4. Create interactive elements and visual aids appropriate for {content_type} 5. Structure content for maximum learning impact using {content_type} best practices 6. Include multimedia integration points 7. Apply {content_type}-specific formatting and layout 8. KEEP CONTENT WITHIN {length_info['max_words']} WORDS MAXIMUM 9. IMPORTANT: DO NOT USE LaTeX expressions. Use simple HTML/markdown formatting instead: - For fractions: use "a/b" or "a over b" instead of \frac{{a}}{{b}} - For math: use plain text like "Speed = Distance/Time" instead of \text{{Speed}} = \frac{{\text{{Distance}}}}{{\text{{Time}}}} - For fill-in-blanks: use "_____" instead of \\_\\_\\_ - For answer boxes: use "[ANSWER: ___]" instead of \\boxed{{\\_\\_}} - For subscripts: use "H2O" instead of H_2O - For superscripts: use "x²" instead of x^2 CONTENT STRUCTURE FOR {content_type.upper()}: {chr(10).join([f"- {section}" for section in format_info['sections']])} IMAGE INTEGRATION: Include these specific image suggestions: {chr(10).join([f"- {img}" for img in images])} FORMAT REQUIREMENTS FOR {content_type.upper()}: - Use markdown formatting appropriate for {content_type} - Include image placeholders: [IMAGE: description] - Add interactive elements: [INTERACTIVE: description] - Include multimedia links: [MEDIA: description] - Use engaging headers and subheaders - Include callout boxes and highlights - Apply {format_info['formatting']} - NO LaTeX expressions - use simple HTML/markdown only - For math: use plain text like "Speed = Distance/Time" - For fractions: use "a/b" or "a over b" - For blanks: use "_____" (5 underscores) - For answer boxes: use "[ANSWER: ___]" SPECIAL FORMATTING FOR {content_type.upper()}: {format_info['special_elements']} Make the content visually rich, educationally sound, and ready for multimedia integration with {content_type}-specific formatting.""" return agent_prompt def convert_math_to_html(text): """Convert LaTeX-style math expressions to HTML""" import re # Handle LaTeX math delimiters first # Convert \[ ... \] to display math text = re.sub(r'\\\[([^\]]+)\\\]', r'
\1
', text) # Convert \( ... \) to inline math text = re.sub(r'\\\(([^)]+)\\\)', r'\1', text) # Handle fractions: \frac{a}{b} text = re.sub(r'\\frac\{([^}]+)\}\{([^}]+)\}', r'
\1
\2
', text) # Handle simple fractions like 120/2 text = re.sub(r'(\d+)/(\d+)', r'
\1
\2
', text) # Handle superscripts: ^{text} text = re.sub(r'\^\{([^}]+)\}', r'\1', text) text = re.sub(r'\^(\w)', r'\1', text) # Handle subscripts: _{text} text = re.sub(r'_\{([^}]+)\}', r'\1', text) text = re.sub(r'_(\w)', r'\1', text) # Handle \text{} commands first (before other processing) text = re.sub(r'\\text\{([^}]+)\}', r'\1', text) # Handle \boxed{} commands text = re.sub(r'\\boxed\{([^}]+)\}', r'\1', text) # Handle underscore patterns that are not subscripts (like \_\_\_) text = re.sub(r'\\_+', r' ', text) # Re-process fractions after text commands are handled text = re.sub(r'\\frac\{([^}]+)\}\{([^}]+)\}', r'
\1
\2
', text) # Handle common math symbols text = text.replace('\\times', '×') text = text.replace('\\rightarrow', '→') text = text.replace('\\leftarrow', '←') text = text.replace('\\leq', '≤') text = text.replace('\\geq', '≥') text = text.replace('\\neq', '≠') text = text.replace('\\approx', '≈') text = text.replace('\\pm', '±') text = text.replace('\\sqrt', '√') text = text.replace('\\pi', 'π') return text def solve_algebra_problem(problem_type, *args): """Solve algebra problems interactively""" try: if problem_type == "speed": distance, time = args result = algebra_solver.solve_speed_problem(float(distance), float(time)) elif problem_type == "volume": mass, density = args result = algebra_solver.solve_volume_problem(float(mass), float(density)) elif problem_type == "linear": a, b, c = args result = algebra_solver.solve_linear_equation(float(a), float(b), float(c)) elif problem_type == "quadratic": h, coefficient = args result = algebra_solver.solve_quadratic_equation(float(h), float(coefficient)) else: return "❌ Unknown problem type" if "error" in result: return f"❌ Error: {result['error']}" # Format the solution nicely solution_text = f"✅ **{result['solution']}**\n\n" if "steps" in result: solution_text += "**Steps:**\n" for i, step in enumerate(result["steps"], 1): solution_text += f"{i}. {step}\n" elif "formula" in result: solution_text += f"**Formula:** {result['formula']}\n" solution_text += f"**Calculation:** {result['calculation']}\n" return solution_text except Exception as e: return f"❌ Error solving problem: {str(e)}" def apply_content_styling(content, content_type): """Apply simple and robust styling to educational content""" import re # Skip aggressive HTML cleaning to preserve image content # Only fix specific known issues without removing valid content # Fix only empty div style tags content = re.sub(r'
]*>
', '', content) # Skip most HTML fixes to preserve content integrity # Skip aggressive attribute cleaning to preserve content # Return content as-is for Markdown rendering styled_content = content # Apply Markdown-friendly styling styled_content = re.sub(r'\[ANSWER: ([^\]]+)\]', r'**Answer:** \1', styled_content) styled_content = re.sub(r'___+', r'_________________', styled_content) styled_content = re.sub(r'\[ \]', r'☐', styled_content) styled_content = re.sub(r'☑', r'☑', styled_content) styled_content = re.sub(r'\[(\d+)\]', r'**\1**', styled_content) return styled_content def process_content_with_images(content, topic, content_type): """Process content with improved image handling""" import re print(f"🔍 Starting process_content_with_images - input length: {len(content)}") # Convert LaTeX math first content = convert_math_to_html(content) print(f"🔍 After convert_math_to_html - length: {len(content)}") # Process images with size limits image_patterns = [ r'\[IMAGE:\s*([^\]]+)\]', r'IMAGE:\s*([^\n<]+?)(?=\s*<|$|\n)', ] total_images = 0 for pattern in image_patterns: images = re.findall(pattern, content, re.MULTILINE) total_images += len(images) # Limit number of images to prevent performance issues max_images = 3 for i, image_desc in enumerate(images[:max_images]): if i >= max_images: # Replace remaining image placeholders with simple text remaining_pattern = f'[IMAGE: {image_desc}]' content = content.replace(remaining_pattern, f'\n**📸 Image:** {image_desc.strip()}\n') continue image_html = generate_real_image(image_desc.strip(), topic, content_type) print(f"🖼️ Generated image HTML length: {len(image_html)}") print(f"🖼️ Image HTML preview: {image_html[:200]}...") # Replace with generated content content = content.replace(f'[IMAGE: {image_desc}]', image_html) content = content.replace(f'IMAGE: {image_desc}', image_html) # Clean up any malformed HTML content = fix_malformed_image_html(content) print(f"🔍 After fix_malformed_image_html - length: {len(content)}") # Apply styling content = apply_content_styling(content, content_type) print(f"🔍 After apply_content_styling - length: {len(content)}") return content def generate_albert_response(user_message, user_name, age_group, chat_history): """Generate Albert's response using AI or fallback""" # Try Hugging Face API first if token is available if HF_TOKEN: try: # Create Albert's personality prompt based on age group age_emojis = { "K-2": "🌟", "3-5": "🚀", "6-8": "⚡", "9-12": "🎓", "Adult": "💡" } age_emoji = age_emojis.get(age_group, "🌟") # Build context from chat history context = "" if chat_history and len(chat_history) > 1: context = "\n\nPrevious conversation:\n" for i, (user_msg, albert_msg) in enumerate(chat_history[-3:]): # Last 3 exchanges if user_msg and albert_msg: context += f"User: {user_msg}\nAlbert: {albert_msg}\n" prompt = f"""You are Albert 🧠, a fun and encouraging educational chatbot! User Info: - Name: {user_name} - Age Group: {age_group} {age_emoji} Your personality: - Use lots of emojis and fun expressions! 😊 - Be encouraging and positive - Explain things in a simple, engaging way - Ask follow-up questions to keep learning going - Use age-appropriate language for {age_group} - Be enthusiastic about learning! - Remember our conversation and build on previous topics! {context} Current question: {user_message} Respond as Albert with enthusiasm, emojis, and helpful explanations! Keep it concise (2-3 sentences max).""" # Call Hugging Face API directly for Albert ai_content = call_hugging_face_api(prompt) if ai_content: return ai_content except Exception as e: print(f"❌ Albert API error: {str(e)}") # Fallback responses based on common questions fallback_responses = { "hello": f"Hi {user_name}! 🧠✨ I'm Albert, your learning buddy! What would you like to learn about today? 😊", "hi": f"Hey there {user_name}! 🎉 Ready for some fun learning? Ask me anything! 🚀", "help": f"Of course {user_name}! 🧠 I'm here to help you understand any topic! Just ask me a question and I'll explain it in a fun way! 😊✨", "math": f"Math is awesome {user_name}! 🔢✨ Let me know what specific math topic you're curious about and I'll make it super fun to understand! 🎯", "science": f"Science is fascinating {user_name}! 🔬🌟 Tell me what science topic you want to explore and I'll explain it with cool examples! 🚀", "english": f"Language is powerful {user_name}! 📚💫 What English topic would you like to learn about? I'll make it engaging and fun! ✨", "history": f"History is like a time machine {user_name}! ⏰🎭 What historical period or event interests you? I'll bring it to life! 🌟", "art": f"Art is creativity in action {user_name}! 🎨✨ What kind of art or creative topic would you like to explore? Let's get creative! 🚀" } # Check for keywords in the message message_lower = user_message.lower() for keyword, response in fallback_responses.items(): if keyword in message_lower: return response # Default encouraging response return f"That's a great question {user_name}! 🧠✨ I'd love to help you understand that! Can you tell me more about what specific part you'd like to learn about? I'm here to make learning fun! 😊🚀" def chat_with_albert(user_message, user_name, age_group, chat_history): """Handle the chat conversation with Albert""" if not user_message.strip(): return chat_history, "" # Add user message to history if chat_history is None: chat_history = [] # Add user message chat_history.append([user_message, None]) # Generate Albert's response albert_response = generate_albert_response(user_message, user_name, age_group, chat_history) # Add Albert's response chat_history[-1][1] = f"🧠 Albert: {albert_response}" return chat_history, "" # Initialize database on startup init_database() # Initialize algebra solver algebra_solver = AlgebraSolver() # ==================================== # MATHMIND CHATBOT FUNCTIONS # ==================================== import PyPDF2 import io import base64 import re from typing import List, Dict, Any # Global variables for MathMind mathmind_conversation_history = [] mathmind_pdf_context = "" uploaded_pdfs = [] # List of dicts: {"name": str, "content": str, "summary": str, "timestamp": str} def validate_question_is_math_related(question: str) -> bool: """Check if a question is math-related""" math_keywords = [ 'mathematics', 'math', 'algebra', 'geometry', 'calculus', 'trigonometry', 'arithmetic', 'equation', 'formula', 'theorem', 'proof', 'function', 'derivative', 'integral', 'polynomial', 'fraction', 'decimal', 'percentage', 'statistics', 'probability', 'graph', 'coordinate', 'angle', 'triangle', 'circle', 'square', 'rectangle', 'volume', 'area', 'perimeter', 'solve', 'calculate', 'compute', 'add', 'subtract', 'multiply', 'divide', 'plus', 'minus', 'times', 'equals', 'number', 'digit', 'sum', 'product', 'quotient', 'ratio', 'proportion', 'slope', 'intercept', 'variable', 'constant' ] # Also check for mathematical symbols and patterns math_patterns = [ r'\d+\s*[\+\-\*/\=]\s*\d+', # Basic math operations r'[xy]\s*[\+\-\*/\=]', # Variables in equations r'\b\d+\s*[xy]\b', # Coefficients with variables r'π|pi', # Pi r'√|sqrt', # Square root r'\^|\*\*', # Exponents r'sin|cos|tan|log', # Trig/log functions ] question_lower = question.lower() # Check for math keywords for keyword in math_keywords: if keyword in question_lower: return True # Check for math patterns for pattern in math_patterns: if re.search(pattern, question_lower): return True return False def validate_pdf_content_with_llm(pdf_text: str) -> tuple: """Use LLM to intelligently check if PDF content is math-related and generate summary""" if not HF_TOKEN: # Fallback to keyword-based validation if no token return validate_pdf_content_fallback(pdf_text), "Content validation completed" # Truncate text for LLM analysis (first 2000 characters should be enough) analysis_text = pdf_text[:2000] if len(pdf_text) > 2000 else pdf_text validation_prompt = f"""Analyze the following document content and determine: 1. Is this content primarily related to mathematics? (YES/NO) 2. What specific math topics are covered? (if any) 3. Provide a brief summary of the mathematical content. Document content: {analysis_text} Respond in this exact format: MATH_RELATED: [YES/NO] TOPICS: [list main math topics separated by commas, or "None" if not math-related] SUMMARY: [brief 1-2 sentence summary of the mathematical content, or explanation why it's not math-related] Be strict - only respond YES if the content is clearly about mathematics, mathematical concepts, math problems, or math education.""" headers = {"Authorization": f"Bearer {HF_TOKEN}"} try: payload = { "model": "meta-llama/Meta-Llama-3-8B-Instruct", "messages": [{"role": "user", "content": validation_prompt}], "max_tokens": 300, "temperature": 0.1 # Low temperature for consistent analysis } response = requests.post( "https://router.huggingface.co/v1/chat/completions", headers=headers, json=payload, timeout=30 ) if response.status_code == 200: result = response.json() if "choices" in result and len(result["choices"]) > 0: llm_response = result["choices"][0]["message"]["content"] # Parse LLM response is_math_related = "YES" in llm_response.upper() and "MATH_RELATED: YES" in llm_response.upper() # Extract topics and summary topics = "General mathematics" summary = "Math content detected" lines = llm_response.split('\n') for line in lines: if line.startswith('TOPICS:'): topics = line.replace('TOPICS:', '').strip() elif line.startswith('SUMMARY:'): summary = line.replace('SUMMARY:', '').strip() if is_math_related: detailed_summary = f"📊 **Math Topics:** {topics}\n📝 **Summary:** {summary}" return True, detailed_summary else: return False, f"📄 **Analysis:** {summary}" # Fallback if LLM call fails return validate_pdf_content_fallback(pdf_text), "Content validation completed (fallback method)" except Exception as e: print(f"LLM validation error: {str(e)}") return validate_pdf_content_fallback(pdf_text), "Content validation completed (fallback method)" def validate_pdf_content_fallback(pdf_text: str) -> bool: """Fallback keyword-based validation""" math_keywords = [ 'mathematics', 'math', 'algebra', 'geometry', 'calculus', 'trigonometry', 'arithmetic', 'equation', 'formula', 'theorem', 'proof', 'function', 'derivative', 'integral', 'polynomial', 'fraction', 'decimal', 'percentage', 'statistics', 'probability', 'graph', 'coordinate', 'angle', 'triangle', 'circle', 'square', 'rectangle', 'volume', 'area', 'perimeter' ] text_lower = pdf_text.lower() math_score = sum(1 for keyword in math_keywords if keyword in text_lower) return math_score >= 3 def generate_pdf_summary(pdf_text: str) -> str: """Generate a quick summary of math PDF content""" # Extract key math topics mentioned math_topics = [] topic_keywords = { 'Algebra': ['algebra', 'equation', 'variable', 'solve', 'polynomial', 'linear'], 'Geometry': ['geometry', 'triangle', 'circle', 'angle', 'area', 'volume', 'perimeter'], 'Calculus': ['calculus', 'derivative', 'integral', 'limit', 'function'], 'Trigonometry': ['trigonometry', 'sin', 'cos', 'tan', 'sine', 'cosine'], 'Statistics': ['statistics', 'probability', 'data', 'mean', 'median', 'mode'], 'Arithmetic': ['addition', 'subtraction', 'multiplication', 'division', 'fraction'] } text_lower = pdf_text.lower() for topic, keywords in topic_keywords.items(): if any(keyword in text_lower for keyword in keywords): math_topics.append(topic) # Generate summary if math_topics: topics_str = ", ".join(math_topics[:3]) # Limit to first 3 topics summary = f"📊 **Math Topics Detected:** {topics_str}" if len(math_topics) > 3: summary += f" and {len(math_topics) - 3} more" else: summary = "📊 **Math Content:** General mathematics material" # Add word count word_count = len(pdf_text.split()) summary += f"\n📄 **Content Length:** ~{word_count} words" return summary def extract_pdf_text(pdf_file) -> tuple: """Extract text from uploaded PDF and validate if it's math-related using LLM""" try: if pdf_file is None: return "", False, "No file uploaded" # Read PDF content pdf_reader = PyPDF2.PdfReader(io.BytesIO(pdf_file)) text = "" for page in pdf_reader.pages: text += page.extract_text() + "\n" if not text.strip(): return "", False, "❌ Could not extract text from PDF. Please ensure it's a text-based PDF." # Use LLM to validate if it's math-related and get summary is_math_related, analysis_summary = validate_pdf_content_with_llm(text) if not is_math_related: return "", False, f"⚠️ I only accept materials that are related to mathematics. Please upload a math-related PDF (homework, textbook, worksheets, etc.) for me to help you with.\n\n{analysis_summary}" # Return text with LLM-generated summary return text, True, f"✅ Math PDF successfully processed!\n\n{analysis_summary}" except Exception as e: return "", False, f"❌ Error processing PDF: {str(e)}" def apply_content_guardrails(response: str) -> str: """Apply guardrails to prevent inappropriate content""" # List of inappropriate content patterns inappropriate_patterns = [ r'\b(hate|racist|sexist|discriminat)\w*\b', r'\b(kill|murder|suicide|harm yourself)\b', r'\b(stupid|idiot|dumb)\b', r'\b(nazi|fascist)\b', ] # Check for inappropriate content response_lower = response.lower() for pattern in inappropriate_patterns: if re.search(pattern, response_lower): return """I'm here to help you learn mathematics in a positive and supportive way. Let's focus on your math questions and keep our conversation educational and respectful. What math topic would you like to explore today? 📚✨""" return response def generate_math_image(description: str) -> str: """Generate mathematical diagrams or shapes using text-to-image""" try: if not HF_TOKEN: return "📊 [Mathematical diagram would be generated here]" headers = {"Authorization": f"Bearer {HF_TOKEN}"} # Enhanced prompt for mathematical content math_prompt = f"mathematical diagram, {description}, clean educational illustration, white background, clear labels, geometric shapes, mathematical notation" models = [ "stabilityai/stable-diffusion-xl-base-1.0", "runwayml/stable-diffusion-v1-5" ] for model in models: try: payload = { "inputs": math_prompt, "parameters": { "num_inference_steps": 25, "guidance_scale": 7.5, "width": 512, "height": 512 } } response = requests.post( f"https://api-inference.huggingface.co/models/{model}", headers=headers, json=payload, timeout=30 ) if response.status_code == 200 and len(response.content) > 1000: image_base64 = base64.b64encode(response.content).decode('utf-8') return f"""
{description}

{description}

""" except Exception as e: continue return f"📊 [Mathematical diagram: {description}]" except Exception as e: return f"📊 [Mathematical diagram: {description}]" def format_latex_response(text: str) -> str: """Minimal cleanup to avoid corrupting LaTeX expressions""" import re # Very minimal approach - just clean up obvious issues formatted_text = text # Only fix the most basic malformed patterns formatted_text = formatted_text.replace('Area=$$π Area=$$π $r^{2}$ $$', 'Area = $\\pi r^2$') formatted_text = formatted_text.replace('$$π', '$\\pi$') formatted_text = formatted_text.replace('π$$', '$\\pi$') # Remove empty dollar signs only formatted_text = re.sub(r'\$\s*\$', '', formatted_text) # Don't try to add LaTeX formatting - let the API handle it properly return formatted_text def mathmind_chat_with_typing(message: str, grade_level: str, chat_history: List, pdf_context_display: str) -> tuple: """MathMind chat with typing effect and question validation""" import time global mathmind_conversation_history, mathmind_pdf_context if not message.strip(): return chat_history, "" # Add user message to conversation history mathmind_conversation_history.append({ "role": "user", "content": f"[Grade: {grade_level}] {message}" }) # Show typing indicator chat_history.append([message, "🤔 *thinking...*"]) # Generate response with context response = call_hugging_face_math_api(message, mathmind_conversation_history, mathmind_pdf_context, grade_level) # Format LaTeX expressions response = format_latex_response(response) # Add assistant response to history mathmind_conversation_history.append({ "role": "assistant", "content": response }) # Update chat history with real response chat_history[-1] = [message, response] return chat_history, "" def check_document_relevance(question: str, pdf_context: str) -> tuple: """Use LLM to check if uploaded documents can answer the question""" if not HF_TOKEN or not pdf_context.strip(): return False, "", "No documents uploaded or no API token" # Extract document names from context doc_names = [] for line in pdf_context.split('\n'): if line.startswith('=== Content from ') and line.endswith(' ==='): doc_name = line.replace('=== Content from ', '').replace(' ===', '') doc_names.append(doc_name) relevance_prompt = f"""Analyze the following question and document content to determine if the documents contain enough information to answer the question. Question: {question} Document Content: {pdf_context[:2000]} Instructions: 1. Can the uploaded documents provide a complete or partial answer to this question? 2. If yes, which specific document(s) contain the relevant information? 3. If no, the question should be answered from general knowledge. Respond in this exact format: DOCUMENT_CAN_ANSWER: [YES/NO] RELEVANT_DOCUMENTS: [list document names separated by commas, or "NONE"] EXPLANATION: [brief explanation of why documents can or cannot answer the question] Be strict - only respond YES if the documents actually contain specific information that directly addresses the question.""" headers = {"Authorization": f"Bearer {HF_TOKEN}"} try: payload = { "model": "meta-llama/Meta-Llama-3-8B-Instruct", "messages": [{"role": "user", "content": relevance_prompt}], "max_tokens": 200, "temperature": 0.1 # Low temperature for consistent analysis } response = requests.post( "https://router.huggingface.co/v1/chat/completions", headers=headers, json=payload, timeout=30 ) if response.status_code == 200: result = response.json() if "choices" in result and len(result["choices"]) > 0: llm_response = result["choices"][0]["message"]["content"] # Parse LLM response can_answer = "YES" in llm_response.upper() and "DOCUMENT_CAN_ANSWER: YES" in llm_response.upper() relevant_docs = "" lines = llm_response.split('\n') for line in lines: if line.startswith('RELEVANT_DOCUMENTS:'): relevant_docs = line.replace('RELEVANT_DOCUMENTS:', '').strip() break return can_answer, relevant_docs, llm_response return False, "", "LLM analysis failed" except Exception as e: print(f"Document relevance check error: {str(e)}") return False, "", f"Error: {str(e)}" def call_hugging_face_math_api(prompt: str, conversation_history: List[Dict], pdf_context: str = "", grade_level: str = "6-8") -> str: """Call Hugging Face API for math-specific responses""" if not HF_TOKEN: return "❌ Hugging Face token not found. Please configure HF_TOKEN." # Ensure pdf_context is a string if hasattr(pdf_context, 'value'): pdf_context = pdf_context.value elif not isinstance(pdf_context, str): pdf_context = str(pdf_context) if pdf_context else "" # Map grade levels to age ranges and language complexity grade_mapping = { "K-2": {"age": "5-8 years old", "language": "very simple words, basic counting, shapes, colors"}, "3-5": {"age": "8-11 years old", "language": "simple explanations, basic operations, real-world examples"}, "6-8": {"age": "11-14 years old", "language": "clear explanations with some technical terms, algebra basics"}, "9-12": {"age": "14-18 years old", "language": "more complex concepts, proper mathematical terminology"}, "College": {"age": "18+ years old", "language": "advanced mathematical concepts and formal notation"} } grade_info = grade_mapping.get(grade_level, grade_mapping["6-8"]) # Check if documents can answer the question can_use_docs, relevant_docs, analysis = check_document_relevance(prompt, pdf_context) # Debug logging print(f"🔍 Document relevance check:") print(f" Question: {prompt[:100]}...") print(f" Can use docs: {can_use_docs}") print(f" Relevant docs: {relevant_docs}") print(f" Analysis: {analysis[:200]}...") # Determine response approach and context if can_use_docs and pdf_context.strip(): response_approach = f"Use the uploaded document(s) to answer. Specifically mention: {relevant_docs}" context_to_use = pdf_context print(f"📄 Using document context from: {relevant_docs}") else: response_approach = "Answer from your general knowledge. Use phrases like 'According to my understanding...' or 'Based on what I know...'" context_to_use = "No relevant document context - use general knowledge" print(f"🧠 Using general knowledge (no relevant docs found)") # Build context-aware interactive prompt context_prompt = f"""You are an enthusiastic, interactive educational tutor for {grade_info['age']} students. While you specialize in mathematics, you can help with various educational topics and make learning exciting and memorable! PERSONALITY & INTERACTION STYLE: - Be conversational, friendly, and genuinely excited about learning - Ask engaging follow-up questions to keep the conversation going - Share fascinating real-world connections and "did you know?" facts - Use age-appropriate humor and analogies for {grade_info['age']} students - Encourage curiosity with questions like "What do you think happens if...?" or "Can you guess why...?" - Celebrate student insights with enthusiasm: "Excellent thinking!" or "That's a great question!" GRADE-SPECIFIC ENGAGEMENT: {f"- Use simple, relatable examples from everyday life (pizza, toys, playground)" if grade_level == "K-2" else ""} {f"- Connect to video games, sports, and fun activities they enjoy" if grade_level == "3-5" else ""} {f"- Reference pop culture, social media, and things they find cool" if grade_level == "6-8" else ""} {f"- Use real-world applications like engineering, finance, and technology" if grade_level == "9-12" else ""} {f"- Discuss advanced applications in science, research, and professional fields" if grade_level == "College" else ""} INTERACTIVE TEACHING APPROACH: 1. Start with a hook - something surprising, funny, or amazing about the topic 2. Explain the concept using {grade_info['language']} 3. Give a practical, relatable example 4. Ask a thought-provoking question to check understanding 5. Share a fun fact or real-world application 6. End with an engaging question to continue the conversation CONTENT RULES: - NO generic introductions like "I'm MathMind" - Use SIMPLE, CLEAN LaTeX: $\\pi r^2$ (good), NOT complicated nested expressions - Write math clearly: Area = $\\pi r^2$ where A is area, r is radius - NEVER use malformed LaTeX like $$π or multiple $ signs - Include emojis sparingly but effectively (🤔💡🎯✨) - Make every response feel like a conversation, not a lecture - Always end with a question or invitation to explore more - Keep LaTeX expressions SHORT and SIMPLE to avoid rendering issues RESPONSE APPROACH: {response_approach} Document Context: {context_to_use[:1000] if context_to_use != "No relevant document context - use general knowledge" else "No relevant document context - use general knowledge"} Recent Conversation: {chr(10).join([f"Student: {msg['content']}" if msg['role'] == 'user' else f"You: {msg['content']}" for msg in conversation_history[-4:]])} Student's Current Question: {prompt} Respond with enthusiasm and curiosity, making this math concept come alive for a {grade_info['age']} student! Remember to ask engaging follow-up questions.""" headers = {"Authorization": f"Bearer {HF_TOKEN}"} try: payload = { "model": "meta-llama/Meta-Llama-3-8B-Instruct", "messages": [{"role": "user", "content": context_prompt}], "max_tokens": 800, "temperature": 0.7 } response = requests.post( "https://router.huggingface.co/v1/chat/completions", headers=headers, json=payload, timeout=60 ) if response.status_code == 200: result = response.json() if "choices" in result and len(result["choices"]) > 0: raw_response = result["choices"][0]["message"]["content"] # Apply guardrails safe_response = apply_content_guardrails(raw_response) # Format with LaTeX formatted_response = format_latex_response(safe_response) return formatted_response return "❌ Sorry, I'm having trouble connecting right now. Please try again!" except Exception as e: return f"❌ Error: {str(e)}" def mathmind_chat(message: str, grade_level: str, chat_history: List, pdf_context_display: str) -> tuple: """Main MathMind chat function with memory and context""" global mathmind_conversation_history, mathmind_pdf_context if not message.strip(): return chat_history, "" # Add user message to conversation history mathmind_conversation_history.append({ "role": "user", "content": f"[Grade: {grade_level}] {message}" }) # Generate response with context (use global variable, not the display parameter) response = call_hugging_face_math_api(message, mathmind_conversation_history, mathmind_pdf_context, grade_level) # Add assistant response to history mathmind_conversation_history.append({ "role": "assistant", "content": response }) # Update chat history for display chat_history.append([message, response]) return chat_history, "" def clear_mathmind_chat(): """Clear MathMind conversation history""" global mathmind_conversation_history mathmind_conversation_history = [] return [], "" def update_pdf_context(pdf_file): """Add PDF to MathMind context (supports multiple files)""" global mathmind_pdf_context, uploaded_pdfs if pdf_file is None: return "No PDF uploaded", generate_context_display() # Handle both file objects and bytes data if hasattr(pdf_file, 'read'): # It's a file object, read the bytes pdf_data = pdf_file.read() file_name = getattr(pdf_file, 'name', 'uploaded_file.pdf') else: # It's already bytes data pdf_data = pdf_file file_name = 'uploaded_file.pdf' # Check if file already exists for existing_pdf in uploaded_pdfs: if existing_pdf["name"] == file_name: return f"⚠️ File '{file_name}' is already uploaded", generate_context_display() text, is_valid, message = extract_pdf_text(pdf_data) if is_valid: # Add to uploaded files list uploaded_pdfs.append({ "name": file_name, "content": text, "summary": message.split('\n\n', 1)[1] if '\n\n' in message else "Math content", "timestamp": datetime.now().strftime("%Y-%m-%d %H:%M") }) # Update combined context update_combined_context() return message, generate_context_display() else: return message, generate_context_display() def update_combined_context(): """Update the combined PDF context from all uploaded files""" global mathmind_pdf_context if not uploaded_pdfs: mathmind_pdf_context = "" return # Combine all PDF contexts with file attribution context_parts = [] for pdf in uploaded_pdfs: context_parts.append(f"=== Content from {pdf['name']} ===\n{pdf['content']}\n") mathmind_pdf_context = "\n".join(context_parts) def generate_context_display(): """Generate HTML display for current PDF context""" if not uploaded_pdfs: return "

No PDF context loaded

" html_parts = [f"

📚 Loaded Documents ({len(uploaded_pdfs)}):

"] for i, pdf in enumerate(uploaded_pdfs, 1): # Create a clean card for each document card_html = f"""
📄 {i}. {pdf['name']}

⏰ Added: {pdf['timestamp']}

{pdf['summary'][:300]}{'...' if len(pdf['summary']) > 300 else ''}
""" html_parts.append(card_html) # Add instruction for removing files if len(uploaded_pdfs) > 0: html_parts.append("""

💡 Tip: To remove individual documents, you can upload new ones to replace them, or use "Remove All Documents" below to clear everything.

""") return "".join(html_parts) def remove_pdf_by_name(file_name: str): """Remove a specific PDF from context""" global uploaded_pdfs uploaded_pdfs = [pdf for pdf in uploaded_pdfs if pdf["name"] != file_name] update_combined_context() return f"✅ Removed '{file_name}' from context", generate_context_display() def remove_pdf_context(): """Remove all PDF context""" global mathmind_pdf_context, uploaded_pdfs mathmind_pdf_context = "" uploaded_pdfs = [] return "✅ All PDF context removed", "No PDF context loaded" # Create Gradio interface with custom CSS with gr.Blocks( title="BrightMind AI - Educational Content Generator", theme=gr.themes.Soft(), css=""" /* Force Light Mode - Disable Dark Mode */ :root { color-scheme: light only !important; } *, *::before, *::after { color-scheme: light !important; } /* Override any dark mode media queries */ @media (prefers-color-scheme: dark) { * { color-scheme: light !important; background: #ffffff !important; color: #000000 !important; } body, html, .gradio-container, .gradio-app { background: #ffffff !important; color: #000000 !important; } /* Force all Gradio components to light mode */ .gr-button, .gr-textbox, .gr-dropdown, .gr-chatbot, .gr-file, .gr-html, .gr-markdown, .gr-tab-nav, .gr-panel, .gr-form, .gr-box { background: #ffffff !important; color: #000000 !important; border-color: #e2e8f0 !important; } } /* Modern UI Framework */ .gradio-container { font-family: 'Inter', -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif; background: #ffffff !important; color: #000000 !important; min-height: 100vh; position: relative; overflow: hidden; } /* Animated Background */ .gradio-container::before { content: ''; position: fixed; top: 0; left: 0; width: 100%; height: 100%; background: radial-gradient(circle at 20% 80%, rgba(120, 119, 198, 0.1) 0%, transparent 50%), radial-gradient(circle at 80% 20%, rgba(255, 119, 198, 0.1) 0%, transparent 50%), radial-gradient(circle at 40% 40%, rgba(120, 219, 255, 0.1) 0%, transparent 50%); animation: backgroundShift 15s ease-in-out infinite; z-index: -1; pointer-events: none; } .gradio-container::after { content: ''; position: fixed; top: 0; left: 0; width: 100%; height: 100%; background-image: radial-gradient(2px 2px at 20px 30px, #cbd5e0, transparent), radial-gradient(2px 2px at 40px 70px, #a0aec0, transparent), radial-gradient(1px 1px at 90px 40px, #e2e8f0, transparent), radial-gradient(1px 1px at 130px 80px, #cbd5e0, transparent), radial-gradient(2px 2px at 160px 30px, #a0aec0, transparent); background-repeat: repeat; background-size: 200px 100px; animation: particleFloat 20s linear infinite; z-index: -1; pointer-events: none; opacity: 0.6; } @keyframes backgroundShift { 0%, 100% { transform: translateX(0px) translateY(0px) scale(1); filter: hue-rotate(0deg); } 25% { transform: translateX(-20px) translateY(-10px) scale(1.02); filter: hue-rotate(90deg); } 50% { transform: translateX(10px) translateY(-20px) scale(0.98); filter: hue-rotate(180deg); } 75% { transform: translateX(-10px) translateY(10px) scale(1.01); filter: hue-rotate(270deg); } } @keyframes particleFloat { 0% { transform: translateY(0px) translateX(0px); } 25% { transform: translateY(-30px) translateX(10px); } 50% { transform: translateY(-60px) translateX(-5px); } 75% { transform: translateY(-30px) translateX(15px); } 100% { transform: translateY(0px) translateX(0px); } } /* Modern Card System */ .modern-card { background: linear-gradient(145deg, #ffffff 0%, #f8fafc 100%); border-radius: 24px; padding: 32px; margin: 24px 0; box-shadow: 0 25px 50px rgba(0, 0, 0, 0.1); border: 1px solid rgba(255, 255, 255, 0.2); backdrop-filter: blur(20px); transition: all 0.4s cubic-bezier(0.4, 0, 0.2, 1); position: relative; overflow: hidden; } .modern-card::before { content: ''; position: absolute; top: 0; left: 0; right: 0; height: 4px; background: linear-gradient(90deg, #667eea, #764ba2, #f093fb); border-radius: 24px 24px 0 0; } .modern-card:hover { transform: translateY(-8px) scale(1.02); box-shadow: 0 35px 70px rgba(0, 0, 0, 0.15); } /* Enhanced Input Styling - Force Text Visibility */ .gradio-container input, .gradio-container textarea, .gradio-container select, .gradio-container input[type="text"], .gradio-container input[type="email"], .gradio-container input[type="password"], .gradio-container textarea, .gradio-container select { border-radius: 16px !important; border: 2px solid #e2e8f0 !important; padding: 16px 20px !important; font-size: 15px !important; font-weight: 500 !important; transition: all 0.3s cubic-bezier(0.4, 0, 0.2, 1) !important; background: #ffffff !important; box-shadow: 0 4px 15px rgba(0, 0, 0, 0.05) !important; color: #1a202c !important; } .gradio-container input::placeholder, .gradio-container textarea::placeholder { color: #718096 !important; opacity: 1 !important; } /* Dropdown and Select Styling - Black text on white background */ .gradio-container select, .gradio-container select option, .gradio-container .dropdown, .gradio-container .dropdown *, .gradio-container .dropdown .wrap, .gradio-container .dropdown .wrap *, .gradio-container .gr-dropdown, .gradio-container .gr-dropdown *, .gradio-container .gr-dropdown select, .gradio-container .gr-dropdown select option { color: #000000 !important; background: #ffffff !important; border: 1px solid #e2e8f0 !important; } /* Force dropdown text visibility with maximum specificity */ .gradio-container .dropdown span, .gradio-container .dropdown div, .gradio-container .dropdown p, .gradio-container .dropdown li, .gradio-container .dropdown a, .gradio-container .gr-dropdown span, .gradio-container .gr-dropdown div, .gradio-container .gr-dropdown p, .gradio-container .gr-dropdown li, .gradio-container .gr-dropdown a { color: #000000 !important; background: transparent !important; } /* Dropdown hover states */ .gradio-container select:hover, .gradio-container .dropdown:hover, .gradio-container .gr-dropdown:hover { background: #f8f9fa !important; border: 1px solid #cbd5e0 !important; } /* Dropdown focus states */ .gradio-container select:focus, .gradio-container .dropdown:focus, .gradio-container .gr-dropdown:focus { background: #ffffff !important; border: 1px solid #667eea !important; outline: none !important; } /* Dropdown options list */ .gradio-container .dropdown .options, .gradio-container .dropdown .options *, .gradio-container .gr-dropdown .options, .gradio-container .gr-dropdown .options * { color: #000000 !important; background: #ffffff !important; } /* Force visibility for dropdown option text */ .gradio-container .dropdown .option, .gradio-container .dropdown .option *, .gradio-container .dropdown .item, .gradio-container .dropdown .item *, .gradio-container .gr-dropdown .option, .gradio-container .gr-dropdown .option *, .gradio-container .gr-dropdown .item, .gradio-container .gr-dropdown .item * { color: #000000 !important; background: transparent !important; } /* Override any conflicting text colors */ .gradio-container .dropdown *[style*="color"], .gradio-container .gr-dropdown *[style*="color"] { color: #000000 !important; } /* Dropdown option hover */ .gradio-container .dropdown .options li:hover, .gradio-container .gr-dropdown .options li:hover { background: #f8f9fa !important; color: #000000 !important; } /* Additional dropdown selectors for comprehensive coverage */ .gradio-container [class*="dropdown"], .gradio-container [class*="select"], .gradio-container [data-testid*="dropdown"], .gradio-container [data-testid*="select"] { color: #000000 !important; background: #ffffff !important; border: 1px solid #e2e8f0 !important; } .gradio-container [class*="dropdown"] *, .gradio-container [class*="select"] *, .gradio-container [data-testid*="dropdown"] *, .gradio-container [data-testid*="select"] * { color: #000000 !important; background: #ffffff !important; } /* Force all form selects to be black on white */ .gradio-container form select, .gradio-container form select option, .gradio-container .form select, .gradio-container .form select option { color: #000000 !important; background: #ffffff !important; border: 1px solid #e2e8f0 !important; } /* Ultimate dropdown text visibility fix */ .gradio-container .dropdown, .gradio-container .gr-dropdown, .gradio-container select { color: #000000 !important; } .gradio-container .dropdown *, .gradio-container .gr-dropdown *, .gradio-container select * { color: #000000 !important; } /* Override any white or transparent text that might be invisible */ .gradio-container .dropdown *[style*="color: white"], .gradio-container .dropdown *[style*="color: #fff"], .gradio-container .dropdown *[style*="color: #ffffff"], .gradio-container .dropdown *[style*="color: transparent"], .gradio-container .gr-dropdown *[style*="color: white"], .gradio-container .gr-dropdown *[style*="color: #fff"], .gradio-container .gr-dropdown *[style*="color: #ffffff"], .gradio-container .gr-dropdown *[style*="color: transparent"] { color: #000000 !important; opacity: 1 !important; } .gradio-container input:focus, .gradio-container textarea:focus, .gradio-container select:focus { border-color: #667eea !important; box-shadow: 0 0 0 4px rgba(102, 126, 234, 0.1), 0 8px 25px rgba(0, 0, 0, 0.1) !important; outline: none !important; transform: translateY(-2px) !important; color: #1a202c !important; background: #ffffff !important; } /* Force text visibility in all text elements */ .gradio-container * { color: inherit !important; } .gradio-container input, .gradio-container textarea, .gradio-container select { color: #1a202c !important; } /* Markdown and output text visibility */ .gradio-container .markdown, .gradio-container .markdown *, .gradio-container .prose, .gradio-container .prose * { color: #1a202c !important; } /* Chat message text visibility */ .gradio-container .chatbot .message, .gradio-container .chatbot .message * { color: #1a202c !important; } /* Override any conflicting styles */ .gradio-container .gr-textbox, .gradio-container .gr-textbox input, .gradio-container .gr-textarea, .gradio-container .gr-textarea textarea, .gradio-container .gr-dropdown, .gradio-container .gr-dropdown select { color: #1a202c !important; background: #ffffff !important; } /* Force visibility for all generated content */ .gradio-container .gr-markdown, .gradio-container .gr-markdown *, .gradio-container .gr-html, .gradio-container .gr-html *, .gradio-container .output, .gradio-container .output *, .gradio-container .result, .gradio-container .result * { color: #1a202c !important; } /* Specific styling for lesson plan and quiz outputs */ .gradio-container .gr-markdown h1, .gradio-container .gr-markdown h2, .gradio-container .gr-markdown h3, .gradio-container .gr-markdown h4, .gradio-container .gr-markdown h5, .gradio-container .gr-markdown h6 { color: #1a202c !important; } .gradio-container .gr-markdown p, .gradio-container .gr-markdown li, .gradio-container .gr-markdown span, .gradio-container .gr-markdown div { color: #1a202c !important; } /* Chat output visibility */ .gradio-container .chatbot .message p, .gradio-container .chatbot .message div, .gradio-container .chatbot .message span { color: #1a202c !important; } /* Dataset/Table text visibility */ .gradio-container .gr-dataset, .gradio-container .gr-dataset *, .gradio-container .gr-dataset table, .gradio-container .gr-dataset table *, .gradio-container .gr-dataset td, .gradio-container .gr-dataset th, .gradio-container .gr-dataset tr { color: #1a202c !important; background: #ffffff !important; } /* Table specific visibility fixes */ .gradio-container table, .gradio-container table *, .gradio-container table td, .gradio-container table th, .gradio-container table tr { color: #1a202c !important; background: #ffffff !important; } /* Examples table visibility */ .gradio-container .examples, .gradio-container .examples *, .gradio-container .examples table, .gradio-container .examples table *, .gradio-container .examples td, .gradio-container .examples th { color: #1a202c !important; background: #ffffff !important; } /* Slider and form control visibility */ .gradio-container .gr-slider, .gradio-container .gr-slider *, .gradio-container .gr-checkbox, .gradio-container .gr-checkbox *, .gradio-container .gr-radio, .gradio-container .gr-radio * { color: #1a202c !important; } /* Label text visibility */ .gradio-container label, .gradio-container .gr-label, .gradio-container .gr-label * { color: #1a202c !important; } /* Force all text to be visible */ .gradio-container div, .gradio-container span, .gradio-container p, .gradio-container td, .gradio-container th, .gradio-container li { color: #1a202c !important; } /* Override any white text that might be invisible */ .gradio-container *[style*="color: white"], .gradio-container *[style*="color: #fff"], .gradio-container *[style*="color: #ffffff"] { color: #1a202c !important; } /* Ensure all interactive elements have visible text */ .gradio-container button, .gradio-container .btn, .gradio-container .gr-button { color: #ffffff !important; } /* Table and dataset specific fixes */ .gradio-container table td, .gradio-container table th, .gradio-container .dataset table td, .gradio-container .dataset table th { color: #1a202c !important; background: transparent !important; } /* Form element text visibility */ .gradio-container .form, .gradio-container .form *, .gradio-container .gr-form, .gradio-container .gr-form * { color: #1a202c !important; } /* Quiz Generator specific fixes */ .gradio-container .gr-slider .wrap, .gradio-container .gr-slider .wrap *, .gradio-container .gr-slider label, .gradio-container .gr-slider .label, .gradio-container .gr-slider .gr-label { color: #1a202c !important; } /* Comprehensive Slider Visibility */ .gradio-container .gr-slider, .gradio-container .gr-slider *, .gradio-container [data-testid="slider"], .gradio-container [data-testid="slider"] *, .gradio-container .slider, .gradio-container .slider * { background: #ffffff !important; border: 1px solid #e2e8f0 !important; border-radius: 8px !important; } /* Slider track visibility */ .gradio-container .gr-slider .track, .gradio-container .gr-slider .rail, .gradio-container .gr-slider .slider-track, .gradio-container [data-testid="slider"] .track, .gradio-container [data-testid="slider"] .rail, .gradio-container .slider .track, .gradio-container .slider .rail { background: #e2e8f0 !important; border: 2px solid #cbd5e0 !important; height: 8px !important; border-radius: 4px !important; } /* Slider thumb visibility */ .gradio-container .gr-slider .thumb, .gradio-container .gr-slider .handle, .gradio-container .gr-slider .slider-thumb, .gradio-container [data-testid="slider"] .thumb, .gradio-container [data-testid="slider"] .handle, .gradio-container .slider .thumb, .gradio-container .slider .handle { background: #000000 !important; border: 3px solid #000000 !important; box-shadow: 0 2px 6px rgba(0, 0, 0, 0.5) !important; width: 20px !important; height: 20px !important; border-radius: 50% !important; } /* Slider value display */ .gradio-container .gr-slider .value, .gradio-container .gr-slider .gr-number, .gradio-container .gr-slider input[type="number"], .gradio-container [data-testid="slider"] .value, .gradio-container [data-testid="slider"] input, .gradio-container .slider .value, .gradio-container .slider input { color: #1a202c !important; background: #ffffff !important; border: 1px solid #cbd5e0 !important; } /* Force slider visibility with multiple selectors */ .gradio-container input[type="range"] { -webkit-appearance: none !important; appearance: none !important; background: #e2e8f0 !important; border: 2px solid #cbd5e0 !important; height: 8px !important; border-radius: 4px !important; outline: none !important; } .gradio-container input[type="range"]::-webkit-slider-track { -webkit-appearance: none !important; appearance: none !important; background: #e2e8f0 !important; border: 2px solid #cbd5e0 !important; height: 8px !important; border-radius: 4px !important; } .gradio-container input[type="range"]::-webkit-slider-thumb { -webkit-appearance: none !important; appearance: none !important; background: #000000 !important; border: 3px solid #000000 !important; box-shadow: 0 2px 6px rgba(0, 0, 0, 0.5) !important; width: 20px !important; height: 20px !important; border-radius: 50% !important; cursor: pointer !important; } .gradio-container input[type="range"]::-moz-range-track { background: #e2e8f0 !important; border: 2px solid #cbd5e0 !important; height: 8px !important; border-radius: 4px !important; } .gradio-container input[type="range"]::-moz-range-thumb { background: #000000 !important; border: 3px solid #000000 !important; box-shadow: 0 2px 6px rgba(0, 0, 0, 0.5) !important; width: 20px !important; height: 20px !important; border-radius: 50% !important; cursor: pointer !important; } /* Microsoft Edge and IE */ .gradio-container input[type="range"]::-ms-track { background: #e2e8f0 !important; border: 2px solid #cbd5e0 !important; height: 8px !important; border-radius: 4px !important; } .gradio-container input[type="range"]::-ms-thumb { background: #000000 !important; border: 3px solid #000000 !important; box-shadow: 0 2px 6px rgba(0, 0, 0, 0.5) !important; width: 20px !important; height: 20px !important; border-radius: 50% !important; cursor: pointer !important; } /* Number input visibility */ .gradio-container .gr-number input, .gradio-container .gr-number input[type="number"] { color: #1a202c !important; background: #ffffff !important; border: 1px solid #cbd5e0 !important; } /* Checkbox and radio button labels */ .gradio-container .gr-checkbox label, .gradio-container .gr-radio label, .gradio-container .gr-checkbox .label, .gradio-container .gr-radio .label { color: #1a202c !important; } /* Radio button styling - selected state turns black */ .gradio-container .gr-radio input[type="radio"] { appearance: none !important; width: 20px !important; height: 20px !important; border: 2px solid #e2e8f0 !important; border-radius: 50% !important; background: #ffffff !important; cursor: pointer !important; position: relative !important; } .gradio-container .gr-radio input[type="radio"]:checked { background: #000000 !important; border: 2px solid #000000 !important; } .gradio-container .gr-radio input[type="radio"]:checked::after { content: '' !important; position: absolute !important; top: 50% !important; left: 50% !important; width: 8px !important; height: 8px !important; background: #ffffff !important; border-radius: 50% !important; transform: translate(-50%, -50%) !important; } /* Radio button container styling */ .gradio-container .gr-radio { background: #ffffff !important; border: 1px solid #e2e8f0 !important; border-radius: 12px !important; padding: 16px !important; margin: 8px !important; transition: all 0.3s ease !important; } .gradio-container .gr-radio:hover { background: #f8f9fa !important; border: 1px solid #cbd5e0 !important; transform: translateY(-2px) !important; box-shadow: 0 4px 12px rgba(0, 0, 0, 0.1) !important; } .gradio-container .gr-radio:has(input[type="radio"]:checked) { background: #000000 !important; border: 1px solid #000000 !important; color: #ffffff !important; } .gradio-container .gr-radio:has(input[type="radio"]:checked) label { color: #ffffff !important; } /* Force all quiz form elements to have visible text */ .gradio-container .quiz-form, .gradio-container .quiz-form *, .gradio-container .quiz-form label, .gradio-container .quiz-form .label, .gradio-container .quiz-form .gr-label { color: #1a202c !important; } /* Override any transparent or invisible text */ .gradio-container *[style*="opacity: 0"], .gradio-container *[style*="opacity:0"], .gradio-container .invisible, .gradio-container .hidden { opacity: 1 !important; color: #1a202c !important; } /* Ensure all text in cards is visible */ .gradio-container .modern-card * { color: #1a202c !important; } /* Specific fixes for quiz tab elements */ .gradio-container .tab-nav .tab-nav button[data-testid="tab-2"] ~ * .gr-slider, .gradio-container .tab-nav .tab-nav button[data-testid="tab-2"] ~ * .gr-number, .gradio-container .tab-nav .tab-nav button[data-testid="tab-2"] ~ * .gr-checkbox, .gradio-container .tab-nav .tab-nav button[data-testid="tab-2"] ~ * .gr-radio { color: #1a202c !important; } /* Force white background for all chat elements */ .gradio-container .gr-chatbot, .gradio-container .gr-chatbot *, .gradio-container .chatbot, .gradio-container .chatbot *, .gradio-container [data-testid="chatbot"], .gradio-container [data-testid="chatbot"] * { background: #ffffff !important; } /* Override any dark themes that might be applied */ .gradio-container .dark, .gradio-container .dark *, .gradio-container [data-theme="dark"], .gradio-container [data-theme="dark"] * { background: #ffffff !important; color: #1a202c !important; } /* Force visibility for all text elements */ .gradio-container * { color: #1a202c !important; } /* Override any white or transparent text */ .gradio-container *[style*="color: white"], .gradio-container *[style*="color: #fff"], .gradio-container *[style*="color: #ffffff"], .gradio-container *[style*="color: transparent"], .gradio-container *[style*="opacity: 0"] { color: #1a202c !important; opacity: 1 !important; } /* Ensure all interactive elements are visible */ .gradio-container .gr-component, .gradio-container .gr-component * { color: #1a202c !important; background: #ffffff !important; } /* Specific fixes for lesson plan examples */ .gradio-container .lesson-examples, .gradio-container .lesson-examples *, .gradio-container .lesson-examples table, .gradio-container .lesson-examples table * { color: #1a202c !important; background: #ffffff !important; } /* Additional slider targeting for all possible selectors */ .gradio-container .wrap .gr-slider, .gradio-container .wrap .gr-slider *, .gradio-container .form .gr-slider, .gradio-container .form .gr-slider *, .gradio-container .block .gr-slider, .gradio-container .block .gr-slider *, .gradio-container .component .gr-slider, .gradio-container .component .gr-slider * { background: #ffffff !important; border: 1px solid #e2e8f0 !important; } /* Target slider elements by their actual HTML structure */ .gradio-container .gr-slider .wrap, .gradio-container .gr-slider .wrap *, .gradio-container .gr-slider .container, .gradio-container .gr-slider .container * { background: #e2e8f0 !important; border: 2px solid #cbd5e0 !important; } /* Force visibility for any slider-like elements */ .gradio-container [class*="slider"], .gradio-container [class*="range"], .gradio-container [class*="track"] { background: #e2e8f0 !important; border: 2px solid #cbd5e0 !important; } .gradio-container [class*="thumb"], .gradio-container [class*="handle"] { background: #000000 !important; border: 3px solid #000000 !important; box-shadow: 0 2px 6px rgba(0, 0, 0, 0.5) !important; } /* Premium Button Design */ .gradio-container .btn { border-radius: 16px !important; padding: 16px 32px !important; font-weight: 700 !important; font-size: 15px !important; text-transform: uppercase !important; letter-spacing: 0.5px !important; transition: all 0.4s cubic-bezier(0.4, 0, 0.2, 1) !important; border: none !important; position: relative !important; overflow: hidden !important; background: linear-gradient(135deg, #667eea 0%, #764ba2 100%) !important; color: white !important; box-shadow: 0 8px 25px rgba(102, 126, 234, 0.3) !important; } .gradio-container .btn::before { content: ''; position: absolute; top: 0; left: -100%; width: 100%; height: 100%; background: linear-gradient(90deg, transparent, rgba(255, 255, 255, 0.3), transparent); transition: left 0.6s; } .gradio-container .btn:hover::before { left: 100%; } .gradio-container .btn:hover { transform: translateY(-4px) scale(1.05) !important; box-shadow: 0 15px 35px rgba(102, 126, 234, 0.4) !important; } .gradio-container .btn:active { transform: translateY(-2px) scale(1.02) !important; } /* Modern Tab Navigation */ .gradio-container .tab-nav { background: linear-gradient(135deg, #667eea 0%, #764ba2 100%) !important; border-radius: 20px !important; padding: 12px !important; margin-bottom: 32px !important; box-shadow: 0 15px 35px rgba(102, 126, 234, 0.3) !important; backdrop-filter: blur(20px) !important; } .gradio-container .tab-nav button { border-radius: 16px !important; padding: 16px 24px !important; font-weight: 700 !important; font-size: 15px !important; transition: all 0.4s cubic-bezier(0.4, 0, 0.2, 1) !important; border: none !important; background: transparent !important; color: rgba(255, 255, 255, 0.8) !important; position: relative !important; overflow: hidden !important; } .gradio-container .tab-nav button::before { content: ''; position: absolute; top: 0; left: 0; right: 0; bottom: 0; background: linear-gradient(135deg, rgba(255, 255, 255, 0.1), rgba(255, 255, 255, 0.05)); opacity: 0; transition: opacity 0.3s ease; } .gradio-container .tab-nav button:hover::before { opacity: 1; } .gradio-container .tab-nav button.selected { background: rgba(255, 255, 255, 0.2) !important; color: white !important; backdrop-filter: blur(20px) !important; box-shadow: 0 8px 25px rgba(0, 0, 0, 0.1) !important; } .gradio-container .tab-nav button:hover { color: white !important; transform: translateY(-2px) !important; } /* Enhanced Chat Interface */ .gradio-container .chatbot { border-radius: 24px !important; border: 2px solid #e2e8f0 !important; background: #ffffff !important; box-shadow: 0 15px 35px rgba(0, 0, 0, 0.1) !important; backdrop-filter: blur(20px) !important; overflow: hidden !important; } .gradio-container .chatbot .message { border-radius: 20px !important; margin: 16px 0 !important; padding: 20px 24px !important; background: #ffffff !important; box-shadow: 0 8px 25px rgba(0, 0, 0, 0.08) !important; transition: all 0.3s ease !important; } .gradio-container .chatbot .message:hover { transform: translateX(4px) !important; box-shadow: 0 12px 30px rgba(0, 0, 0, 0.12) !important; } /* Modern Progress System */ .progress-container { background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); border-radius: 24px; padding: 40px; margin: 32px 0; color: white; text-align: center; box-shadow: 0 25px 50px rgba(102, 126, 234, 0.3); backdrop-filter: blur(20px); position: relative; overflow: hidden; } .progress-container::before { content: ''; position: absolute; top: -50%; left: -50%; width: 200%; height: 200%; background: radial-gradient(circle, rgba(255, 255, 255, 0.1) 0%, transparent 70%); animation: rotate 20s linear infinite; } @keyframes rotate { 0% { transform: rotate(0deg); } 100% { transform: rotate(360deg); } } .progress-bar { width: 100%; height: 16px; background: rgba(255, 255, 255, 0.2); border-radius: 8px; overflow: hidden; margin: 24px 0; position: relative; } .progress-fill { height: 100%; background: linear-gradient(90deg, #4facfe 0%, #00f2fe 100%); border-radius: 8px; animation: progress-animation 2s ease-in-out; position: relative; } .progress-fill::after { content: ''; position: absolute; top: 0; left: 0; right: 0; bottom: 0; background: linear-gradient(90deg, transparent, rgba(255, 255, 255, 0.3), transparent); animation: shimmer 2s infinite; } @keyframes shimmer { 0% { transform: translateX(-100%); } 100% { transform: translateX(100%); } } @keyframes progress-animation { 0% { width: 0%; } 100% { width: 100%; } } .status-text { font-size: 20px; font-weight: 700; margin: 20px 0; text-shadow: 0 2px 4px rgba(0, 0, 0, 0.3); } .step-text { font-size: 18px; opacity: 0.9; margin: 12px 0; font-weight: 500; } /* Modern Header Design */ .modern-header { background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); border-radius: 24px; padding: 50px; margin: 32px 0; text-align: center; color: white; box-shadow: 0 25px 50px rgba(102, 126, 234, 0.3); position: relative; overflow: hidden; } .modern-header::before { content: ''; position: absolute; top: -50%; left: -50%; width: 200%; height: 200%; background: radial-gradient(circle, rgba(255, 255, 255, 0.1) 0%, transparent 70%); animation: float 6s ease-in-out infinite; } @keyframes float { 0%, 100% { transform: translateY(0px) rotate(0deg); } 50% { transform: translateY(-20px) rotate(180deg); } } .modern-header h1 { font-size: 3rem; font-weight: 800; margin: 0 0 16px 0; text-shadow: 0 4px 8px rgba(0, 0, 0, 0.3); position: relative; z-index: 1; } .modern-header p { font-size: 1.4rem; opacity: 0.9; margin: 0; position: relative; z-index: 1; font-weight: 500; } /* Animation Classes */ .fade-in { animation: fadeIn 0.8s cubic-bezier(0.4, 0, 0.2, 1); } @keyframes fadeIn { from { opacity: 0; transform: translateY(30px); } to { opacity: 1; transform: translateY(0); } } .slide-in-left { animation: slideInLeft 1s cubic-bezier(0.4, 0, 0.2, 1); } @keyframes slideInLeft { from { transform: translateX(-100%); opacity: 0; } to { transform: translateX(0); opacity: 1; } } .slide-in-right { animation: slideInRight 1s cubic-bezier(0.4, 0, 0.2, 1); } @keyframes slideInRight { from { transform: translateX(100%); opacity: 0; } to { transform: translateX(0); opacity: 1; } } .bounce-in { animation: bounceIn 1.2s cubic-bezier(0.68, -0.55, 0.265, 1.55); } @keyframes bounceIn { 0% { transform: scale(0.3); opacity: 0; } 50% { transform: scale(1.05); } 70% { transform: scale(0.9); } 100% { transform: scale(1); opacity: 1; } } /* Legacy Progress Container */ .progress-container { background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); border-radius: 15px; padding: 20px; margin: 20px 0; box-shadow: 0 8px 32px rgba(0,0,0,0.1); } .progress-bar { width: 100%; height: 8px; background: rgba(255,255,255,0.2); border-radius: 4px; overflow: hidden; margin: 10px 0; } .progress-fill { height: 100%; background: linear-gradient(90deg, #4facfe 0%, #00f2fe 100%); border-radius: 4px; animation: progress-animation 2s ease-in-out infinite; } @keyframes progress-animation { 0% { width: 0%; } 50% { width: 70%; } 100% { width: 100%; } } .typewriter-container { background: linear-gradient(135deg, #f5f7fa 0%, #c3cfe2 100%); border-radius: 15px; padding: 25px; margin: 20px 0; box-shadow: 0 10px 40px rgba(0,0,0,0.1); border-left: 5px solid #667eea; } .typewriter-header { display: flex; align-items: center; margin-bottom: 15px; } .book-icon { font-size: 24px; margin-right: 10px; color: #8b4513; animation: book-flip 2s ease-in-out infinite; } @keyframes book-flip { 0%, 100% { transform: rotateY(0deg); } 50% { transform: rotateY(10deg); } } .typewriter-text { font-family: 'Courier New', monospace; font-size: 16px; line-height: 1.6; color: #2c3e50; white-space: pre-wrap; min-height: 200px; } .cursor { display: inline-block; width: 2px; height: 20px; background: #667eea; animation: blink 1s infinite; } @keyframes blink { 0%, 50% { opacity: 1; } 51%, 100% { opacity: 0; } } .generation-status { text-align: center; color: #667eea; font-weight: 600; margin: 10px 0; } .professional-header { background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); color: white; padding: 30px; border-radius: 15px; text-align: center; margin-bottom: 30px; box-shadow: 0 10px 40px rgba(0,0,0,0.1); } .professional-header h1 { margin: 0; font-size: 2.5em; font-weight: 700; } .professional-header p { margin: 10px 0 0 0; font-size: 1.2em; opacity: 0.9; } /* Albert Chat Avatar Styling */ .chatbot .message { margin: 15px 0; padding: 10px; border-radius: 10px; background: rgba(255,255,255,0.1); } .chatbot .message .avatar { width: 300px !important; height: 300px !important; border-radius: 50% !important; margin-right: 20px !important; border: 6px solid #667eea !important; box-shadow: 0 8px 16px rgba(0,0,0,0.3) !important; transition: all 0.3s ease !important; } .chatbot .message .avatar:hover { transform: scale(1.1) !important; box-shadow: 0 8px 16px rgba(102, 126, 234, 0.4) !important; } .chatbot .message .message-content { font-size: 16px; line-height: 1.6; } .chatbot .message .message-content strong { color: #667eea; font-size: 18px; } /* Voice button styling */ .voice-controls { background: linear-gradient(135deg, #f8f9fa 0%, #e9ecef 100%); border-radius: 10px; padding: 15px; margin: 10px 0; border-left: 4px solid #4CAF50; } .voice-controls button { background: linear-gradient(135deg, #4CAF50 0%, #45a049 100%) !important; color: white !important; border: none !important; padding: 6px 12px !important; border-radius: 6px !important; cursor: pointer !important; font-weight: 500 !important; font-size: 11px !important; box-shadow: 0 2px 6px rgba(76, 175, 80, 0.3) !important; transition: all 0.3s ease !important; } .voice-controls button:hover { transform: translateY(-2px) !important; box-shadow: 0 6px 16px rgba(76, 175, 80, 0.4) !important; } /* Auto-scroll for chat */ #albert_chat .chatbot { scroll-behavior: smooth !important; } #albert_chat .overflow-y-auto { scroll-behavior: smooth !important; } """ ) as demo: # Modern Header with gr.Row(): gr.HTML("""

🧠 BrightMind AI

Professional Educational Content Generator for Teachers

""") with gr.Tabs(): with gr.TabItem("🧠 Ask Albert"): with gr.Row(): with gr.Column(scale=1): gr.HTML("""

🧠 Meet Albert!

Your friendly learning buddy! 😊

✨ Tell Albert your name & age → Ask any question → Get fun explanations!

""") # Modern Form Section gr.HTML("""

👤 User Setup

""") user_name = gr.Textbox( label="👋 What's your name?", placeholder="Enter your name here...", value="" ) user_age_group = gr.Dropdown( choices=["K-2", "3-5", "6-8", "9-12", "Adult"], label="🎂 What's your age group?", value="6-8" ) gr.HTML("
") # Modern Chat Interface gr.HTML("""

💬 Chat Interface

""") chat_input = gr.Textbox( label="Ask Albert anything!", placeholder="Type your question here...", lines=2 ) with gr.Row(): send_btn = gr.Button("🚀 Send Message", variant="primary") clear_btn = gr.Button("🗑️ Clear Chat", variant="secondary") gr.HTML("
") with gr.Column(scale=2): # Modern Albert Section gr.HTML("""
👨‍🏫

Albert is ready to help!

""") # Modern Chat Display gr.HTML("""

💬 Albert's Chat

""") chat_display = gr.Chatbot( label="", height=400, show_label=False, container=True, bubble_full_width=False, avatar_images=[ "https://img.icons8.com/color/48/000000/user-male-circle.png", # User avatar "https://img.icons8.com/color/48/000000/teacher.png" # Albert avatar (professional teacher) ], elem_id="albert_chat" ) gr.HTML("
") # Typing indicator typing_indicator = gr.HTML( value="", visible=False ) # Context display context_display = gr.HTML( value="" ) # Event handlers for Albert chat def send_message_with_typing(message, name, age_group, history): """Send message with typing effect and context memory""" if not message.strip(): return history, "", "", "" # Add user message to history if history is None: history = [] # Add user message with name history.append([f"{name}: {message}", None]) # Show typing indicator with Albert thinking animation typing_html = """
🧠
Albert is thinking
...
""" # Generate Albert's response with full context albert_response = generate_albert_response(message, name, age_group, history) # Fix any malformed image HTML in the response albert_response = fix_malformed_image_html(albert_response) # Add Albert's response with fast typing effect import time typed_response = "" for i, char in enumerate(albert_response): typed_response += char history[-1][1] = f"{typed_response}|" time.sleep(0.015) # Even faster typing speed # Final response without cursor history[-1][1] = f"{albert_response}" # No context memory display context_html = "" # Add auto-scroll trigger scroll_script = """ """ return history, "", context_html, scroll_script def clear_chat(): return [], "", "", "" def initialize_chat(name, age_group): if not name.strip(): return [], "", "" welcome_message = f"Hi {name}! 🧠✨ I'm Albert, your learning buddy! I'm so excited to help you learn! I'll remember our conversation so you can ask follow-up questions anytime. What would you like to know about today? 😊🚀" return [[None, f"{welcome_message}"]], "", "" # Connect event handlers send_btn.click( fn=send_message_with_typing, inputs=[chat_input, user_name, user_age_group, chat_display], outputs=[chat_display, chat_input, context_display, typing_indicator] ) clear_btn.click( fn=clear_chat, outputs=[chat_display, context_display, context_display, typing_indicator] ) # Initialize chat when name is entered user_name.change( fn=initialize_chat, inputs=[user_name, user_age_group], outputs=[chat_display, context_display, context_display] ) with gr.TabItem("📚 Lesson Plan Generator"): with gr.Row(): with gr.Column(): # Modern Form Section gr.HTML("""

📚 Lesson Plan Generator

Create comprehensive lesson plans with AI assistance

""") topic = gr.Textbox(label="📝 Topic", placeholder="e.g., Photosynthesis, World War II, Algebra") subject = gr.Dropdown( choices=["Science", "Mathematics", "History", "English Language Arts", "Geography", "Art"], label="📚 Subject", value="Science", interactive=True ) grade_level = gr.Dropdown( choices=["K-2", "3-5", "6-8", "9-12"], label="🎓 Grade Level", value="6-8", interactive=True ) duration = gr.Slider( minimum=30, maximum=180, step=15, value=60, label="⏱️ Duration (minutes)" ) difficulty = gr.Dropdown( choices=["Beginner", "Intermediate", "Advanced"], label="🎯 Difficulty", value="Intermediate", interactive=True ) generate_lesson_btn = gr.Button("🚀 Generate Lesson Plan", variant="primary") gr.HTML("
") with gr.Column(): # Modern Output Section gr.HTML("""

📋 Generated Lesson Plan

""") progress_display = gr.HTML("", visible=False) lesson_plan_output = gr.Markdown(label="") gr.HTML("
") # Modern Examples Section gr.HTML("""

💡 Example Topics

Click on any example to get started quickly!

""") gr.Dataset( components=[topic, subject, grade_level, duration, difficulty], samples=[ ["Photosynthesis", "Science", "6-8", 60, "Intermediate"], ["World War II", "History", "9-12", 90, "Advanced"], ["Basic Algebra", "Mathematics", "3-5", 45, "Beginner"] ], ) gr.HTML("
") # Event handler for lesson plan generation generate_lesson_btn.click( fn=generate_lesson_plan_with_progress, inputs=[topic, subject, grade_level, duration, difficulty], outputs=[progress_display, lesson_plan_output], show_progress=True ) # DISABLED: Content Generator Tab # Commented out as requested - keeping all functionality intact for future use """ with gr.TabItem("📝 Content Generator"): with gr.Row(): with gr.Column(scale=1): # Modern Form Section gr.HTML('''

📝 Content Generation

Generate educational materials tailored to your specific needs

''') with gr.Group(): content_topic = gr.Textbox(label="📝 Topic", placeholder="e.g., Photosynthesis, World War II, Algebra") content_subject = gr.Dropdown( choices=["Science", "Mathematics", "History", "English Language Arts", "Geography", "Art"], label="📚 Subject", value="Science", interactive=True ) content_grade_level = gr.Dropdown( choices=["K-2", "3-5", "6-8", "9-12"], label="🎓 Grade Level", value="6-8", interactive=True ) content_difficulty = gr.Dropdown( choices=["Beginner", "Intermediate", "Advanced"], label="🎯 Difficulty", value="Intermediate", interactive=True ) content_type = gr.Dropdown( choices=["Worksheets", "Handouts", "Study Guides", "Activities", "Presentations", "Lesson Materials"], label="📄 Content Type", value="Worksheets", interactive=True ) content_length = gr.Dropdown( choices=["Short (1-2 pages)", "Medium (3-5 pages)", "Long (6+ pages)"], label="📏 Length", value="Medium (3-5 pages)", interactive=True ) generate_content_btn = gr.Button("🚀 Generate Content", variant="primary", size="lg") clear_content_btn = gr.Button("🗑️ Clear", variant="secondary") with gr.Column(scale=2): # Modern Output Section gr.HTML('''

📄 Generated Content

Your educational materials will appear here

''') content_progress_display = gr.HTML("", visible=False) content_output = gr.Markdown(value="*Your educational content will appear here...*") # Content examples content_examples = gr.Dataset( components=[content_topic, content_subject, content_grade_level, content_difficulty, content_type, content_length], samples=[ ["Photosynthesis", "Science", "6-8", "Intermediate", "Worksheets", "Medium (3-5 pages)"], ["World War II", "History", "9-12", "Advanced", "Study Guides", "Long (6+ pages)"], ["Basic Algebra", "Mathematics", "3-5", "Beginner", "Activities", "Short (1-2 pages)"] ], label="💡 Content Examples" ) # Event handlers for content generation generate_content_btn.click( fn=generate_content_with_progress, inputs=[content_topic, content_subject, content_grade_level, content_difficulty, content_type, content_length], outputs=[content_progress_display, content_output], show_progress=True ) clear_content_btn.click( fn=lambda: ("", "Your educational content will appear here..."), inputs=[], outputs=[content_progress_display, content_output] ) """ with gr.TabItem("🎯 Quiz Generator"): with gr.Row(): with gr.Column(): # Modern Form Section gr.HTML("""

🎯 Quiz Generator

Create engaging quizzes with multiple question types

""") quiz_topic = gr.Textbox(label="📝 Quiz Topic", placeholder="e.g., Photosynthesis, World War II, Algebra") quiz_subject = gr.Dropdown( choices=["Science", "Mathematics", "History", "English Language Arts", "Geography", "Art"], label="📚 Subject", value="Science", interactive=True ) quiz_grade_level = gr.Dropdown( choices=["K-2", "3-5", "6-8", "9-12"], label="🎓 Grade Level", value="6-8", interactive=True ) question_count = gr.Slider( minimum=3, maximum=20, step=1, value=5, label="❓ Number of Questions" ) question_types = gr.CheckboxGroup( choices=["Multiple Choice", "True/False", "Short Answer", "Fill in the Blank"], label="📋 Question Types", value=["Multiple Choice", "True/False"] ) generate_quiz_btn = gr.Button("🚀 Generate Quiz", variant="primary") gr.HTML("
") with gr.Column(): # Modern Output Section gr.HTML("""

📋 Generated Quiz

""") quiz_progress_display = gr.HTML(visible=False) quiz_output = gr.Markdown(label="") gr.HTML("
") # Event handlers generate_quiz_btn.click( fn=generate_quiz_with_progress, inputs=[quiz_topic, quiz_subject, quiz_grade_level, question_count, question_types], outputs=[quiz_progress_display, quiz_output], show_progress=True ) # Examples gr.Examples( examples=[ ["Photosynthesis", "Science", "6-8", 5, ["Multiple Choice", "True/False"]], ["World War II", "History", "9-12", 8, ["Multiple Choice", "Short Answer"]], ["Basic Algebra", "Mathematics", "3-5", 4, ["Multiple Choice", "Fill in the Blank"]] ], inputs=[quiz_topic, quiz_subject, quiz_grade_level, question_count, question_types] ) with gr.TabItem("🧮 MathMind"): with gr.Row(): with gr.Column(scale=2): # Modern Chat Interface gr.HTML("""

🧮 MathMind - Interactive Math Tutor

Your personal AI math tutor with memory, context awareness, and interactive learning

""") # Grade Level Selection mathmind_grade = gr.Dropdown( choices=["K-2 (Ages 5-7)", "3-5 (Ages 8-10)", "6-8 (Ages 11-13)", "9-12 (Ages 14-18)", "College (Ages 18+)"], label="🎓 Grade Level", value="6-8 (Ages 11-13)", interactive=True ) # Chat Interface with LaTeX rendering mathmind_chatbot = gr.Chatbot( label="💬 Chat with MathMind", height=400, show_label=True, container=True, latex_delimiters=[ {"left": "$", "right": "$", "display": False}, {"left": "$$", "right": "$$", "display": True} ] ) with gr.Row(): mathmind_msg = gr.Textbox( label="Your Message", placeholder="Ask me anything about math! For example: 'Can you explain fractions?' or 'Help me solve 2x + 5 = 15'", lines=2, scale=4 ) mathmind_send = gr.Button("Send 💬", variant="primary", scale=1) with gr.Row(): mathmind_clear = gr.Button("🗑️ Clear Chat", variant="secondary") mathmind_examples = gr.Button("💡 Example Questions", variant="secondary") with gr.Column(scale=1): # PDF Context Management gr.HTML("""

📚 PDF Context

Upload math PDFs for additional context

""") mathmind_pdf_upload = gr.File( label="📄 Add Math PDFs (Multiple Selection Supported)", file_types=[".pdf"], type="binary", file_count="multiple" ) mathmind_pdf_status = gr.Textbox( label="📋 Upload Status", interactive=False, lines=3 ) mathmind_pdf_context = gr.HTML( label="📖 Loaded Documents", value="

No PDF context loaded

" ) mathmind_remove_all_pdf = gr.Button("🗑️ Remove All Documents", variant="secondary") # Quick Actions gr.HTML("""

🚀 Quick Actions

• Ask for step-by-step solutions

• Request visual diagrams

• Practice with examples

• Upload math PDFs for context

""") # Event Handlers mathmind_send.click( fn=mathmind_chat_with_typing, inputs=[mathmind_msg, mathmind_grade, mathmind_chatbot, mathmind_pdf_context], outputs=[mathmind_chatbot, mathmind_msg], show_progress=True ) mathmind_msg.submit( fn=mathmind_chat_with_typing, inputs=[mathmind_msg, mathmind_grade, mathmind_chatbot, mathmind_pdf_context], outputs=[mathmind_chatbot, mathmind_msg], show_progress=True ) mathmind_clear.click( fn=clear_mathmind_chat, outputs=[mathmind_chatbot, mathmind_msg] ) # PDF management functions def handle_multiple_pdf_upload(pdf_files): """Handle multiple PDF uploads""" if not pdf_files: return "No files uploaded", generate_context_display() # Handle both single file and multiple files files_to_process = pdf_files if isinstance(pdf_files, list) else [pdf_files] status_messages = [] processed_count = 0 for pdf_file in files_to_process: if pdf_file is not None: status, _ = update_pdf_context(pdf_file) if "successfully processed" in status: processed_count += 1 status_messages.append(status.split('\n')[0]) # Get first line only # Create combined status message if processed_count > 0: combined_status = f"✅ {processed_count} PDF(s) successfully processed!\n" + "\n".join(status_messages[-3:]) # Show last 3 messages else: combined_status = "❌ No files were successfully processed:\n" + "\n".join(status_messages[-3:]) return combined_status, generate_context_display() def handle_remove_all(): """Remove all files from context""" status, _ = remove_pdf_context() return status, generate_context_display() # Event handlers mathmind_pdf_upload.change( fn=handle_multiple_pdf_upload, inputs=[mathmind_pdf_upload], outputs=[mathmind_pdf_status, mathmind_pdf_context] ) mathmind_remove_all_pdf.click( fn=handle_remove_all, outputs=[mathmind_pdf_status, mathmind_pdf_context] ) # Example Questions Handler def show_example_questions(): examples = [ "Why is $\\pi$ such a big deal? What makes it so special?", "How do video games actually use math behind the scenes?", "Can you explain fractions using pizza slices? I love pizza!", "What's the coolest thing about triangles that most people don't know?", "Help me solve 2x + 5 = 15 step by step - and tell me why it works!", "How is math used in creating special effects in movies?", "What's the weirdest mathematical fact that will blow my mind?", "Can you show me a math magic trick that I can use to impress friends?", "How do architects use geometry when designing buildings?", "What's the connection between music and mathematics?" ] return [[f"🎯 Try this: {q}", f"Awesome choice! This is going to be fun! 🚀"] for q in examples[:5]] mathmind_examples.click( fn=show_example_questions, outputs=[mathmind_chatbot] ) # Welcome message def mathmind_welcome(): welcome_msg = """🎉 Hey there, future mathematician! Ready to discover how amazing math can be? **Let's get this math party started! 🎊** Try asking me something like: • "Why is $\\pi$ so special?" • "How do video games use math?" • "What's the coolest thing about triangles?" • "Can you help me with fractions using pizza?" Don't be shy - I LOVE curious questions! What's got you puzzled today? 🤓💡""" return [[None, welcome_msg]] # Load welcome message when tab loads demo.load( fn=mathmind_welcome, outputs=[mathmind_chatbot] ) with gr.TabItem("🔧 Admin Dashboard", visible=False): # Hidden by default with gr.Row(): with gr.Column(): gr.Markdown(""" ## 🔧 Admin Dashboard **Access Level**: Administrator Only **Database**: SQLite (feedback.db) **Last Updated**: Real-time """) refresh_btn = gr.Button("🔄 Refresh Data", variant="secondary") view_feedback_btn = gr.Button("📊 View All Feedback", variant="primary") with gr.Column(): admin_output = gr.Markdown(label="Admin Data") # Event handlers for admin def format_stats(): stats = get_feedback_stats() result = f""" # 📊 Feedback Statistics **Total Feedback Entries:** {stats['total_feedback']} **Average Rating:** {stats['avg_rating']}/5 ⭐ **Database Status:** ✅ Active ## Recent Feedback (Last 5) """ for entry in stats['recent_feedback']: timestamp, feedback_type, rating, comments, user_email = entry result += f""" ### Entry from {timestamp} - **Type:** {feedback_type} - **Rating:** {rating}/5 ⭐ - **Comments:** {comments[:100]}{'...' if len(comments) > 100 else ''} - **Email:** {user_email if user_email else 'Not provided'} --- """ return result refresh_btn.click( fn=format_stats, outputs=admin_output ) view_feedback_btn.click( fn=view_feedback_admin, outputs=admin_output ) if __name__ == "__main__": demo.launch()