import gradio as gr
import os
import json
import requests
import smtplib
import sqlite3
from datetime import datetime
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from algebra_solver import AlgebraSolver
# Get Hugging Face token from environment variable
HF_TOKEN = os.getenv("HF_TOKEN")
# Database configuration
DATABASE_FILE = "feedback.db"
# Email configuration (private)
FEEDBACK_EMAIL = "mahdi@brightmind-ai.com"
SMTP_SERVER = "smtp.gmail.com"
SMTP_PORT = 587
SMTP_USERNAME = os.getenv("SMTP_USERNAME", "")
SMTP_PASSWORD = os.getenv("SMTP_PASSWORD", "")
# Debug token detection
print(f"🔍 Token detection debug:")
print(f" - HF_TOKEN exists: {HF_TOKEN is not None}")
print(f" - HF_TOKEN length: {len(HF_TOKEN) if HF_TOKEN else 0}")
print(f" - HF_TOKEN starts with hf_: {HF_TOKEN.startswith('hf_') if HF_TOKEN else False}")
print(f" - SMTP configured: {SMTP_USERNAME != '' and SMTP_PASSWORD != ''}")
def generate_lesson_plan_with_progress(topic, subject, grade_level, duration, difficulty="intermediate"):
"""Generate a lesson plan with progress updates"""
# Show progress bar
progress_html = """
🤖 Generating your lesson plan...
Analyzing requirements and creating content...
"""
yield progress_html, ""
print(f"🚀 Starting lesson plan generation for: {topic}")
print(f"📚 Subject: {subject}, Grade: {grade_level}, Duration: {duration}min")
# Try Hugging Face API first if token is available
if HF_TOKEN:
print("🤖 Token found! Attempting Hugging Face API call...")
try:
prompt = f"""Create a comprehensive lesson plan for:
Topic: {topic}
Subject: {subject}
Grade Level: {grade_level}
Duration: {duration} minutes
Difficulty: {difficulty}
Include:
1. Learning objectives
2. Activities with time allocations
3. Materials needed
4. Assessment methods
5. Differentiation strategies
6. Educational standards
7. Real-world connections
8. Extension activities
Make it practical, engaging, and ready for classroom use."""
result = call_hugging_face_api_content(prompt)
if result:
print("✅ Successfully generated with Hugging Face API")
# Show completion and return result
completion_html = """
✅ Lesson Plan Generated Successfully!
"""
yield completion_html, result
else:
print("❌ API call returned no content, falling back to algorithms")
raise Exception("No content returned from API")
except Exception as e:
print(f"❌ Hugging Face API failed: {e}")
print("🔄 Falling back to educational algorithms...")
# Show fallback progress
fallback_html = """
🔄 Using educational algorithms...
"""
yield fallback_html, ""
result = generate_with_algorithms(topic, subject, grade_level, duration, difficulty)
print("✅ Generated with educational algorithms")
# Show completion
completion_html = """
✅ Lesson Plan Generated with Educational Algorithms!
"""
yield completion_html, result
else:
print("⚠️ No Hugging Face token found, using educational algorithms...")
# Show progress for algorithms
algo_html = """
📚 Using educational algorithms...
"""
yield algo_html, ""
result = generate_with_algorithms(topic, subject, grade_level, duration, difficulty)
print("✅ Generated with educational algorithms")
# Show completion
completion_html = """
✅ Lesson Plan Generated Successfully!
"""
yield completion_html, result
def generate_lesson_plan(topic, subject, grade_level, duration, difficulty="intermediate"):
"""Generate a lesson plan using Hugging Face API or fallback algorithms"""
print(f"🚀 Starting lesson plan generation for: {topic}")
print(f"📚 Subject: {subject}, Grade: {grade_level}, Duration: {duration}min")
# Try Hugging Face API first if token is available
if HF_TOKEN:
print("🤖 Token found! Attempting Hugging Face API call...")
try:
result = generate_with_hugging_face(topic, subject, grade_level, duration, difficulty)
print("✅ Successfully generated with Hugging Face API")
return result
except Exception as e:
print(f"❌ Hugging Face API failed: {e}")
print("🔄 Falling back to educational algorithms...")
else:
print("⚠️ No Hugging Face token found, using educational algorithms...")
# Fallback to educational algorithms
result = generate_with_algorithms(topic, subject, grade_level, duration, difficulty)
print("✅ Generated with educational algorithms")
return result
def typewriter_effect(text, status_message):
"""Create typewriter effect for displaying content"""
import time
# Show typewriter container
typewriter_html = f"""
📖{status_message}
"""
yield typewriter_html, ""
# Typewriter effect
displayed_text = ""
for i, char in enumerate(text):
displayed_text += char
if char == '\n':
displayed_text += " " # Add indentation for new lines
# Update the content
updated_html = f"""
📖{status_message}
{displayed_text}|
"""
yield updated_html, ""
time.sleep(0.02) # Adjust speed here
# Final result without cursor - just return the text content
yield "", text
def generate_content_with_progress(topic, subject, grade_level, difficulty, content_type, content_length):
"""Generate educational content with progress updates"""
# Show progress bar
progress_html = """
📝 Generating your educational content...
Creating materials tailored to your specifications...
"""
yield progress_html, ""
print(f"🚀 Starting content generation for: {topic}")
print(f"📚 Subject: {subject}, Grade: {grade_level}, Type: {content_type}")
# Try Hugging Face API first if token is available
if HF_TOKEN:
print("🤖 Token found! Attempting Hugging Face API call for content generation...")
print(f"🔑 Token length: {len(HF_TOKEN)}")
print(f"🔑 Token starts with hf_: {HF_TOKEN.startswith('hf_')}")
try:
# Use the enhanced content agent
print("🎨 Creating content generation agent...")
agent_prompt = create_content_agent(topic, subject, grade_level, difficulty, content_type, content_length)
print(f"📝 Sending agent prompt to API...")
ai_content = call_hugging_face_api_content(agent_prompt, content_length)
if ai_content:
print("✅ Content generation API call successful!")
print(f"📄 Generated content length: {len(ai_content)} characters")
# Process content with image placeholders and multimedia
print("🖼️ Processing content with multimedia elements...")
print(f"📄 Content before processing: {len(ai_content)} characters")
enhanced_content = process_content_with_images(ai_content, topic, content_type)
print(f"📄 Enhanced content length: {len(enhanced_content)} characters")
print(f"📄 First 200 chars: {enhanced_content[:200]}...")
print(f"📄 Last 200 chars: ...{enhanced_content[-200:]}")
yield "", enhanced_content
return
else:
print("❌ Content generation API call returned no content")
print("🔄 Falling back to template content...")
except Exception as e:
print(f"❌ Content generation API error: {str(e)}")
print("🔄 Falling back to template content...")
else:
print("❌ No HF_TOKEN available for content generation")
print("🔄 Using fallback content...")
# Enhanced fallback content with proper text rendering
if content_type == "Worksheets":
fallback_content = f"""# **{topic} in {subject}: Educational Worksheet**
**Grade Level:** {grade_level} | **Subject:** {subject} | **Difficulty:** {difficulty}
---
## **Introduction**
{topic} is an important topic in {subject} that helps students develop critical thinking and problem-solving skills. In this worksheet, you'll explore {topic} through engaging activities and real-world examples.
---
## **Learning Objectives**
By the end of this worksheet, you will be able to:
- Understand the basic concepts of {topic}
- Apply {topic} knowledge to solve problems
- Connect {topic} to real-world situations
- Demonstrate understanding through assessment activities
{generate_real_image(f"Visual introduction to {topic} concepts", topic, content_type)}
---
## **Learning Objectives**
By the end of this worksheet, you will be able to:
- Use variables to represent unknown quantities in science problems
- Solve equations related to speed, density, and chemical reactions
- Translate word problems into algebraic equations
- Check your solutions for accuracy
✅ **Self-check:** Rate your confidence (1-5) before starting:
---
## **Practice Activities**
### **1. Basic Concepts**
Test your understanding of {topic} with these questions.
**Example Question:** What is the most important aspect of {topic}?
**Answer:** [Write your answer here]
**Practice Questions:**
1. How does {topic} relate to {subject}?
**Answer:** _________________________________
2. What are the key characteristics of {topic}?
**Answer:** _________________________________
3. Can you give an example of {topic} in real life?
**Answer:** _________________________________
---
## **Application Problems**
### **2. Real-World Connections**
Apply your knowledge of {topic} to solve these problems.
**Problem 1:** Describe how {topic} is used in {subject}.
**Solution:** _________________________________
**Problem 2:** What would happen if {topic} didn't exist?
**Solution:** _________________________________
{generate_real_image(f"Diagram showing {topic} key concepts", topic, content_type)}
---
## **Critical Thinking Questions**
### **3. Analysis and Evaluation**
Think critically about {topic} and answer these questions.
1. **Analysis:** What are the main components of {topic}?
**Your Answer:** _________________________________
2. **Evaluation:** How important is {topic} in {subject}?
**Your Answer:** _________________________________
3. **Synthesis:** How does {topic} connect to other topics in {subject}?
**Your Answer:** _________________________________
---
## **Self-Assessment**
Rate your understanding of {topic}:
- I can explain the basic concepts of {topic}
- I can identify examples of {topic} in real life
- I can apply {topic} knowledge to solve problems
- I can connect {topic} to other subjects
**Reflection:** What was the most interesting thing you learned about {topic}?
**Your Answer:** _________________________________
{generate_real_image(f"Real-world examples of {topic} in action", topic, content_type)}
---
## **Self-Assessment**
✅ **Rate your understanding (1-5):**
- I can set up equations from word problems.
- I can solve for unknown variables.
- I can check my answers for accuracy.
📌 **Reflection:** What was the hardest part?
{generate_real_image(f"Visual assessment rubric for {topic}", topic, content_type)}
---
## **Answer Key**
### **Practice Activities**
1. **Answer:** {topic} relates to {subject} by [provide specific connection]
2. **Answer:** Key characteristics include [list main features]
3. **Answer:** Real-life example: [provide concrete example]
### **Application Problems**
1. **Answer:** {topic} is used in {subject} to [explain application]
2. **Answer:** Without {topic}, [explain consequences]
### **Critical Thinking Questions**
1. **Answer:** Main components are [list components]
2. **Answer:** Importance level: [explain significance]
3. **Answer:** Connections include [list related topics]
---
### **📢 Teacher's Notes**
- **Extension Activity:** Have students research additional examples of {topic} in {subject}
- **Assessment:** Use the self-assessment section to gauge student understanding
- **Differentiation:** Provide additional resources for advanced students
{generate_real_image(f"Summary chart of {topic} concepts", topic, content_type)}
---
**Total Words:** ~1,200 | **Sections:** 6 | **Format:** Educational Worksheet
✅ **Worksheet Complete!** Ready for print or digital use."""
else:
# Generic fallback for other content types
fallback_content = f"""# {content_type}: {topic}
## Learning Objectives
By the end of this {content_type.lower()}, students will be able to:
- Understand the basic concepts of {topic}
- Apply knowledge through practical exercises
- Demonstrate comprehension through assessment
## Grade Level: {grade_level}
## Subject: {subject}
## Difficulty: {difficulty}
---
## Introduction
This {content_type.lower()} is designed for {grade_level} students studying {subject}. The content focuses on {topic} at a {difficulty.lower()} level.
{generate_real_image(f"Visual introduction to {topic} concepts", topic, content_type)}
## Main Content
### Section 1: Understanding {topic}
[Content will be generated based on the specific topic and grade level]
{generate_real_image(f"Diagram showing {topic} key concepts", topic, content_type)}
### Section 2: Practical Applications
[Real-world examples and applications]
{generate_real_image(f"Real-world examples of {topic} in action", topic, content_type)}
### Section 3: Activities and Exercises
[Hands-on activities appropriate for {grade_level}]
{generate_real_image(f"Step-by-step activity instructions for {topic}", topic, content_type)}
## Assessment
[Questions and exercises to test understanding]
{generate_real_image(f"Visual assessment rubric for {topic}", topic, content_type)}
## Additional Resources
[Links and references for further learning]
---
*Generated for {grade_level} {subject} students - {difficulty} level*"""
# Process fallback content with multimedia elements
enhanced_fallback = process_content_with_images(fallback_content, topic, content_type)
# Yield the final content (this will replace the progress bar)
yield "", enhanced_fallback
def generate_quiz_with_progress(topic, subject, grade_level, question_count, question_types):
"""Generate a quiz with progress updates"""
# Show progress bar
progress_html = """
🎯 Generating your quiz...
Creating questions and answers...
"""
yield progress_html, ""
print(f"🎯 Starting quiz generation for: {topic}")
print(f"📚 Subject: {subject}, Grade: {grade_level}, Questions: {question_count}")
print(f"📝 Question types: {', '.join(question_types)}")
# Try Hugging Face API first if token is available
if HF_TOKEN:
print("🤖 Token found! Attempting Hugging Face API call for quiz...")
try:
prompt = f"""Create a comprehensive quiz about "{topic}" for {grade_level} students studying {subject}.
Requirements:
- Generate exactly {question_count} questions
- Include these question types: {', '.join(question_types)}
- Make questions appropriate for {grade_level} grade level
- Ensure questions are specific to "{topic}"
- Include correct answers and explanations
- Format as a structured quiz with clear questions, options, and explanations
- Make it engaging and educational
Format the response as a structured quiz with clear questions, options, and explanations."""
result = call_hugging_face_api_content(prompt)
if result:
print("✅ Successfully generated quiz with Hugging Face API")
# Show completion and return result
completion_html = """
✅ Quiz Generated Successfully!
"""
yield completion_html, result
else:
print("❌ API call returned no content, falling back to algorithms")
raise Exception("No content returned from API")
except Exception as e:
print(f"❌ Hugging Face API failed: {e}")
print("🔄 Falling back to educational algorithms...")
# Show fallback progress
fallback_html = """
🔄 Using educational algorithms...
"""
yield fallback_html, ""
result = generate_quiz_with_algorithms(topic, subject, grade_level, question_count, question_types)
print("✅ Generated quiz with educational algorithms")
# Show completion
completion_html = """
✅ Quiz Generated with Educational Algorithms!
"""
yield completion_html, result
else:
print("⚠️ No Hugging Face token found, using educational algorithms...")
# Show progress for algorithms
algo_html = """
🎯 Using educational algorithms...
"""
yield algo_html, ""
result = generate_quiz_with_algorithms(topic, subject, grade_level, question_count, question_types)
print("✅ Generated quiz with educational algorithms")
# Show completion
completion_html = """
✅ Quiz Generated Successfully!
"""
yield completion_html, result
def generate_quiz(topic, subject, grade_level, question_count, question_types):
"""Generate a quiz using Hugging Face API or fallback algorithms"""
print(f"🎯 Starting quiz generation for: {topic}")
print(f"📚 Subject: {subject}, Grade: {grade_level}, Questions: {question_count}")
print(f"📝 Question types: {', '.join(question_types)}")
# Try Hugging Face API first if token is available
if HF_TOKEN:
print("🤖 Token found! Attempting Hugging Face API call for quiz...")
try:
result = generate_quiz_with_hugging_face(topic, subject, grade_level, question_count, question_types)
print("✅ Successfully generated quiz with Hugging Face API")
return result
except Exception as e:
print(f"❌ Hugging Face API failed: {e}")
print("🔄 Falling back to educational algorithms...")
else:
print("⚠️ No Hugging Face token found, using educational algorithms...")
# Fallback to educational algorithms
result = generate_quiz_with_algorithms(topic, subject, grade_level, question_count, question_types)
print("✅ Generated quiz with educational algorithms")
return result
def generate_with_hugging_face(topic, subject, grade_level, duration, difficulty):
"""Generate lesson plan using Hugging Face Inference Providers API"""
print(f"🌐 Making API call to Hugging Face Inference Providers...")
print(f"🔗 Endpoint: https://router.huggingface.co/v1/chat/completions")
print(f"🔑 Token available: {HF_TOKEN is not None}")
prompt = f"""Create a comprehensive lesson plan for:
Topic: {topic}
Subject: {subject}
Grade Level: {grade_level}
Duration: {duration} minutes
Difficulty: {difficulty}
Include:
1. Learning objectives
2. Activities with time allocations
3. Materials needed
4. Assessment methods
5. Differentiation strategies
6. Educational standards
Format as a structured lesson plan."""
print(f"📝 Prompt length: {len(prompt)}")
headers = {
"Authorization": f"Bearer {HF_TOKEN}",
"Content-Type": "application/json"
}
data = {
"model": "deepseek-ai/DeepSeek-V3-0324",
"messages": [
{
"role": "user",
"content": prompt
}
],
"max_tokens": 4000,
"temperature": 0.7
}
print(f"📡 Sending API request...")
try:
response = requests.post(
"https://router.huggingface.co/v1/chat/completions",
headers=headers,
json=data,
timeout=30
)
print(f"📡 API Response Status: {response.status_code}")
if response.status_code == 200:
result = response.json()
print(f"✅ API Response received: {type(result)}")
print(f"📄 Response content length: {len(str(result))}")
if "choices" in result and len(result["choices"]) > 0:
content = result["choices"][0]["message"]["content"]
else:
content = ""
print(f"📝 Generated content length: {len(content)}")
if content and len(content) > 50: # Ensure we got meaningful content
return format_lesson_plan(content, topic, subject, grade_level, duration, True)
else:
print("⚠️ API returned empty or too short content, falling back to algorithms")
raise Exception("Empty or insufficient content from API")
else:
error_text = response.text
print(f"❌ API Error: {response.status_code} - {error_text}")
raise Exception(f"API request failed: {response.status_code} - {error_text}")
except requests.exceptions.Timeout:
print("⏰ API request timed out, falling back to algorithms")
raise Exception("API request timeout")
except requests.exceptions.RequestException as e:
print(f"🌐 Network error: {e}, falling back to algorithms")
raise Exception(f"Network error: {e}")
except Exception as e:
print(f"❌ Unexpected error: {e}, falling back to algorithms")
raise Exception(f"Unexpected error: {e}")
def generate_with_algorithms(topic, subject, grade_level, duration, difficulty):
"""Generate lesson plan using educational algorithms"""
grade = int(grade_level.split('-')[0]) if '-' in grade_level else int(grade_level)
# Generate objectives based on Bloom's Taxonomy
objectives = generate_objectives(topic, grade)
# Generate activities
activities = generate_activities(topic, int(duration), grade)
# Generate materials
materials = generate_materials(subject, grade)
# Generate assessment
assessment = generate_assessment(grade)
# Generate differentiation
differentiation = generate_differentiation()
lesson_plan = {
"topic": topic,
"subject": subject,
"grade_level": grade_level,
"duration": duration,
"difficulty": difficulty,
"objectives": objectives,
"activities": activities,
"materials": materials,
"assessment": assessment,
"differentiation": differentiation,
"ai_generated": False,
"generated_at": datetime.now().strftime("%Y-%m-%d")
}
return format_lesson_plan_from_dict(lesson_plan)
def generate_objectives(topic, grade):
"""Generate learning objectives based on grade level"""
if grade <= 2:
return [
f"Students will identify key concepts about {topic}",
f"Students will demonstrate understanding through hands-on activities"
]
elif grade <= 5:
return [
f"Students will explain the main concepts of {topic}",
f"Students will apply knowledge through practical exercises",
f"Students will analyze information about {topic}"
]
elif grade <= 8:
return [
f"Students will analyze and evaluate information about {topic}",
f"Students will synthesize knowledge through creative projects",
f"Students will demonstrate critical thinking skills"
]
else:
return [
f"Students will critically analyze complex concepts in {topic}",
f"Students will synthesize information from multiple sources",
f"Students will create original work demonstrating mastery"
]
def generate_activities(topic, duration, grade):
"""Generate activities with time allocations"""
activities = []
# Introduction (15% of time)
intro_time = max(5, int(duration * 0.15))
activities.append({
"name": f"Introduction to {topic}",
"duration": f"{intro_time} minutes",
"description": f"Engage students with a hook activity related to {topic}"
})
# Main activities (60% of time)
main_time = int(duration * 0.6)
if grade <= 2:
activities.append({
"name": "Hands-on Exploration",
"duration": f"{main_time} minutes",
"description": f"Interactive exploration of {topic} through manipulatives and visual aids"
})
elif grade <= 5:
activities.append({
"name": "Guided Discovery",
"duration": f"{int(main_time * 0.6)} minutes",
"description": f"Structured exploration of {topic} with teacher guidance"
})
activities.append({
"name": "Collaborative Learning",
"duration": f"{int(main_time * 0.4)} minutes",
"description": f"Group work to deepen understanding of {topic}"
})
else:
activities.append({
"name": "Independent Research",
"duration": f"{int(main_time * 0.4)} minutes",
"description": f"Students research aspects of {topic} independently"
})
activities.append({
"name": "Discussion and Analysis",
"duration": f"{int(main_time * 0.6)} minutes",
"description": f"Class discussion analyzing different perspectives on {topic}"
})
# Practice (20% of time)
practice_time = int(duration * 0.2)
activities.append({
"name": "Practice and Application",
"duration": f"{practice_time} minutes",
"description": f"Students apply their knowledge through exercises related to {topic}"
})
return activities
def generate_materials(subject, grade):
"""Generate materials based on subject and grade"""
materials = ["Whiteboard or chart paper", "Markers or chalk"]
if grade <= 2:
materials.extend(["Visual aids and pictures", "Manipulatives or hands-on objects", "Colored pencils and paper"])
elif grade <= 5:
materials.extend(["Textbooks or reference materials", "Worksheets or activity sheets", "Art supplies for projects"])
elif grade <= 8:
materials.extend(["Research materials (books, articles)", "Technology devices (if available)", "Presentation materials"])
else:
materials.extend(["Advanced reference materials", "Technology for research and presentation", "Writing materials for essays or reports"])
# Subject-specific materials
if subject.lower() == "science":
materials.extend(["Science equipment or models", "Safety materials (if needed)"])
elif subject.lower() == "math":
materials.extend(["Calculators (if appropriate)", "Graph paper or rulers"])
elif subject.lower() == "history":
materials.extend(["Historical documents or primary sources", "Maps or timelines"])
return materials
def generate_assessment(grade):
"""Generate assessment methods based on grade level"""
if grade <= 2:
return ["Observation of student participation", "Simple oral questions and answers", "Drawing or visual representation of learning"]
elif grade <= 5:
return ["Quick quiz or exit ticket", "Student presentations or demonstrations", "Portfolio of completed work"]
elif grade <= 8:
return ["Written reflection or journal entry", "Group project evaluation", "Peer assessment activities"]
else:
return ["Essay or written analysis", "Research project presentation", "Peer review and self-assessment"]
def generate_differentiation():
"""Generate differentiation strategies"""
return {
"struggling": [
"Provide additional visual aids and examples",
"Break down complex concepts into smaller parts",
"Offer one-on-one support during activities",
"Use simplified language and vocabulary"
],
"advanced": [
"Provide extension activities and challenges",
"Encourage independent research and exploration",
"Assign leadership roles in group activities",
"Offer opportunities for creative expression"
],
"ell": [
"Use visual supports and gestures",
"Provide vocabulary lists and definitions",
"Pair with native speakers for support",
"Use multimedia resources when available"
]
}
def format_lesson_plan(content, topic, subject, grade_level, duration, ai_generated):
"""Format lesson plan from AI response"""
return f"""
# Lesson Plan: {topic}
**Subject:** {subject}
**Grade Level:** {grade_level}
**Duration:** {duration} minutes
**Generated:** {datetime.now().strftime("%Y-%m-%d")}
**AI Generated:** {'Yes' if ai_generated else 'No'}
---
## Content
{content}
---
*Generated by BrightMind AI - Educational Technology Platform*
"""
def format_lesson_plan_from_dict(lesson_plan):
"""Format lesson plan from dictionary"""
content = f"""
# Lesson Plan: {lesson_plan['topic']}
**Subject:** {lesson_plan['subject']}
**Grade Level:** {lesson_plan['grade_level']}
**Duration:** {lesson_plan['duration']} minutes
**Difficulty:** {lesson_plan['difficulty']}
**Generated:** {lesson_plan['generated_at']}
**AI Generated:** {'Yes' if lesson_plan['ai_generated'] else 'No'}
---
## Learning Objectives
"""
for i, objective in enumerate(lesson_plan['objectives'], 1):
content += f"{i}. {objective}\n"
content += "\n## Activities\n\n"
for activity in lesson_plan['activities']:
content += f"**{activity['name']}** ({activity['duration']})\n"
content += f"{activity['description']}\n\n"
content += "## Materials Needed\n\n"
for material in lesson_plan['materials']:
content += f"• {material}\n"
content += "\n## Assessment Methods\n\n"
for i, assessment in enumerate(lesson_plan['assessment'], 1):
content += f"{i}. {assessment}\n"
content += "\n## Differentiation Strategies\n\n"
for category, strategies in lesson_plan['differentiation'].items():
content += f"**{category.title()} Learners:**\n"
for strategy in strategies:
content += f"• {strategy}\n"
content += "\n"
content += "\n---\n*Generated by BrightMind AI - Educational Technology Platform*"
return content
def generate_quiz_with_hugging_face(topic, subject, grade_level, question_count, question_types):
"""Generate quiz using Hugging Face Inference Providers API"""
print(f"🌐 Making API call to Hugging Face Inference Providers for quiz...")
print(f"🔗 Endpoint: https://router.huggingface.co/v1/chat/completions")
print(f"🔑 Token available: {HF_TOKEN is not None}")
prompt = f"""Create a quiz about "{topic}" for {grade_level} students studying {subject}.
Requirements:
- Generate exactly {question_count} questions
- Include these question types: {', '.join(question_types)}
- Make questions appropriate for {grade_level} grade level
- Ensure questions are specific to "{topic}"
- Include correct answers and explanations
Format the response as a structured quiz with clear questions, options, and explanations."""
print(f"📝 Prompt length: {len(prompt)}")
headers = {
"Authorization": f"Bearer {HF_TOKEN}",
"Content-Type": "application/json"
}
data = {
"model": "deepseek-ai/DeepSeek-V3-0324",
"messages": [
{
"role": "user",
"content": prompt
}
],
"max_tokens": 4000,
"temperature": 0.7
}
print(f"📡 Sending API request...")
try:
response = requests.post(
"https://router.huggingface.co/v1/chat/completions",
headers=headers,
json=data,
timeout=30
)
print(f"📡 API Response Status: {response.status_code}")
if response.status_code == 200:
result = response.json()
print(f"✅ API Response received: {type(result)}")
print(f"📄 Response content length: {len(str(result))}")
if "choices" in result and len(result["choices"]) > 0:
content = result["choices"][0]["message"]["content"]
else:
content = ""
print(f"📝 Generated content length: {len(content)}")
if content and len(content) > 50: # Ensure we got meaningful content
return format_quiz(content, topic, subject, grade_level, question_count, True)
else:
print("⚠️ API returned empty or too short content, falling back to algorithms")
raise Exception("Empty or insufficient content from API")
else:
error_text = response.text
print(f"❌ API Error: {response.status_code} - {error_text}")
raise Exception(f"API request failed: {response.status_code} - {error_text}")
except requests.exceptions.Timeout:
print("⏰ API request timed out, falling back to algorithms")
raise Exception("API request timeout")
except requests.exceptions.RequestException as e:
print(f"🌐 Network error: {e}, falling back to algorithms")
raise Exception(f"Network error: {e}")
except Exception as e:
print(f"❌ Unexpected error: {e}, falling back to algorithms")
raise Exception(f"Unexpected error: {e}")
def generate_quiz_with_algorithms(topic, subject, grade_level, question_count, question_types):
"""Generate quiz using educational algorithms"""
questions = []
grade = int(grade_level.split('-')[0]) if '-' in grade_level else int(grade_level)
for i in range(int(question_count)):
question_type = question_types[i % len(question_types)]
question = generate_question_by_type(question_type, topic, subject, grade, i + 1)
questions.append(question)
quiz = {
"topic": topic,
"subject": subject,
"grade_level": grade_level,
"question_count": int(question_count),
"questions": questions,
"generated_at": datetime.now().strftime("%Y-%m-%d"),
"ai_generated": False
}
return format_quiz_from_dict(quiz)
def generate_question_by_type(question_type, topic, subject, grade, question_number):
"""Generate a specific type of question"""
if question_type == "Multiple Choice":
return generate_multiple_choice_question(topic, subject, grade, question_number)
elif question_type == "True/False":
return generate_true_false_question(topic, subject, grade, question_number)
elif question_type == "Short Answer":
return generate_short_answer_question(topic, subject, grade, question_number)
elif question_type == "Fill in the Blank":
return generate_fill_in_blank_question(topic, subject, grade, question_number)
else:
return generate_multiple_choice_question(topic, subject, grade, question_number)
def generate_multiple_choice_question(topic, subject, grade, question_number):
"""Generate a multiple choice question"""
templates = [
{
"question": f"What is the main concept of {topic}?",
"options": [
f"A fundamental principle in {subject}",
"A type of animal",
"A mathematical formula",
"A historical event"
],
"correct": "A",
"explanation": f"This question tests basic understanding of {topic} in the context of {subject}."
},
{
"question": f"Which of the following best describes {topic}?",
"options": [
"A complex system with multiple components",
"A simple process with one step",
"A type of building material",
"A musical instrument"
],
"correct": "A",
"explanation": f"{topic} involves multiple interconnected elements that work together."
},
{
"question": f"How does {topic} relate to {subject}?",
"options": [
f"It provides a foundation for understanding {subject} concepts",
"It is unrelated to the subject",
"It only applies to advanced students",
"It is only theoretical"
],
"correct": "A",
"explanation": f"{topic} helps students understand broader concepts in {subject}."
}
]
template = templates[question_number % len(templates)]
return {
"type": "Multiple Choice",
"question": template["question"],
"options": template["options"],
"correct_answer": template["correct"],
"explanation": template["explanation"]
}
def generate_true_false_question(topic, subject, grade, question_number):
"""Generate a true/false question"""
templates = [
{
"question": f"{topic} is an important concept in {subject}.",
"correct": "True",
"explanation": f"{topic} is indeed a fundamental concept in {subject} that students need to understand."
},
{
"question": f"{topic} can only be learned through memorization.",
"correct": "False",
"explanation": f"{topic} is best learned through understanding, practice, and application, not just memorization."
},
{
"question": f"Understanding {topic} helps students in other areas of {subject}.",
"correct": "True",
"explanation": f"Knowledge of {topic} provides a foundation for understanding related concepts in {subject}."
}
]
template = templates[question_number % len(templates)]
return {
"type": "True/False",
"question": template["question"],
"correct_answer": template["correct"],
"explanation": template["explanation"]
}
def generate_short_answer_question(topic, subject, grade, question_number):
"""Generate a short answer question"""
templates = [
{
"question": f"Explain what {topic} means in your own words.",
"correct_answer": f"Student should demonstrate understanding of {topic} concepts",
"explanation": f"This question allows students to express their understanding of {topic} in their own words."
},
{
"question": f"How does {topic} relate to {subject}?",
"correct_answer": f"Student should explain the connection between {topic} and {subject}",
"explanation": f"This question tests students' ability to make connections between concepts."
},
{
"question": f"What are the key components of {topic}?",
"correct_answer": f"Student should identify the main elements of {topic}",
"explanation": f"This question tests students' ability to break down complex concepts."
}
]
template = templates[question_number % len(templates)]
return {
"type": "Short Answer",
"question": template["question"],
"correct_answer": template["correct_answer"],
"explanation": template["explanation"]
}
def generate_fill_in_blank_question(topic, subject, grade, question_number):
"""Generate a fill in the blank question"""
templates = [
{
"question": f"{topic} is important because it helps us understand ______.",
"correct_answer": f"{subject} concepts",
"explanation": f"{topic} provides a foundation for understanding broader {subject} concepts."
},
{
"question": f"The main purpose of {topic} is to ______.",
"correct_answer": f"enhance learning in {subject}",
"explanation": f"{topic} serves to improve students' understanding of {subject}."
},
{
"question": f"Students learn {topic} to better understand ______.",
"correct_answer": f"complex {subject} concepts",
"explanation": f"{topic} helps students grasp more advanced {subject} topics."
}
]
template = templates[question_number % len(templates)]
return {
"type": "Fill in the Blank",
"question": template["question"],
"correct_answer": template["correct_answer"],
"explanation": template["explanation"]
}
def format_quiz(content, topic, subject, grade_level, question_count, ai_generated):
"""Format quiz from AI response"""
return f"""
# Quiz: {topic}
**Subject:** {subject}
**Grade Level:** {grade_level}
**Questions:** {question_count}
**Generated:** {datetime.now().strftime("%Y-%m-%d")}
**AI Generated:** {'Yes' if ai_generated else 'No'}
---
## Quiz Content
{content}
---
*Generated by BrightMind AI - Educational Technology Platform*
"""
def format_quiz_from_dict(quiz):
"""Format quiz from dictionary"""
content = f"""
# Quiz: {quiz['topic']}
**Subject:** {quiz['subject']}
**Grade Level:** {quiz['grade_level']}
**Questions:** {quiz['question_count']}
**Generated:** {quiz['generated_at']}
**AI Generated:** {'Yes' if quiz['ai_generated'] else 'No'}
---
## Questions
"""
for i, question in enumerate(quiz['questions'], 1):
content += f"### Question {i}: {question['type']}\n\n"
content += f"**{question['question']}**\n\n"
if question['type'] == 'Multiple Choice':
for j, option in enumerate(question['options']):
content += f"{chr(65 + j)}. {option}\n"
content += f"\n**Correct Answer:** {question['correct_answer']}\n\n"
elif question['type'] == 'True/False':
content += f"**Answer:** {question['correct_answer']}\n\n"
elif question['type'] == 'Short Answer':
content += f"**Expected Answer:** {question['correct_answer']}\n\n"
elif question['type'] == 'Fill in the Blank':
content += f"**Answer:** {question['correct_answer']}\n\n"
content += f"**Explanation:** {question['explanation']}\n\n"
content += "---\n\n"
content += "\n*Generated by BrightMind AI - Educational Technology Platform*"
return content
def send_feedback_email(feedback_type, rating, comments, user_email=""):
"""Send feedback email to admin"""
if not SMTP_USERNAME or not SMTP_PASSWORD:
print("⚠️ SMTP credentials not configured, storing feedback locally")
# Store feedback locally if no email configured
feedback_data = {
"timestamp": datetime.now().isoformat(),
"feedback_type": feedback_type,
"rating": rating,
"comments": comments,
"user_email": user_email
}
# Save to local file
try:
with open("feedback_log.json", "a") as f:
f.write(json.dumps(feedback_data) + "\n")
return "✅ Feedback saved successfully! (Email not configured)"
except Exception as e:
return f"❌ Error saving feedback: {str(e)}"
try:
# Create message
msg = MIMEMultipart()
msg['From'] = SMTP_USERNAME
msg['To'] = FEEDBACK_EMAIL
msg['Subject'] = f"BrightMind AI Feedback - {feedback_type}"
# Create email body
body = f"""
New Feedback Received from BrightMind AI Platform
Feedback Type: {', '.join(feedback_type) if isinstance(feedback_type, list) else feedback_type}
Rating: {rating}/5
User Email: {user_email if user_email else 'Not provided'}
Comments: {comments}
Timestamp: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}
---
This feedback was automatically sent from the BrightMind AI platform.
"""
msg.attach(MIMEText(body, 'plain'))
# Send email
server = smtplib.SMTP(SMTP_SERVER, SMTP_PORT)
server.starttls()
server.login(SMTP_USERNAME, SMTP_PASSWORD)
text = msg.as_string()
server.sendmail(SMTP_USERNAME, FEEDBACK_EMAIL, text)
server.quit()
print(f"✅ Feedback email sent successfully to {FEEDBACK_EMAIL}")
return "✅ Thank you! Your feedback has been sent successfully."
except Exception as e:
print(f"❌ Error sending feedback email: {str(e)}")
return f"❌ Error sending feedback: {str(e)}"
def submit_feedback(feedback_type, rating, comments, user_email):
"""Handle feedback submission"""
if not feedback_type:
return "❌ Please select at least one feedback type."
if not rating:
return "❌ Please provide a rating."
if not comments.strip():
return "❌ Please provide your comments."
# Save to database first
db_success = save_feedback_to_db(feedback_type, rating, comments, user_email)
if not db_success:
return "❌ Error saving feedback to database. Please try again."
# Try to send email (optional)
email_result = send_feedback_email(feedback_type, rating, comments, user_email)
if "✅" in email_result:
return "✅ Thank you! Your feedback has been saved and sent successfully."
else:
return "✅ Thank you! Your feedback has been saved successfully. (Email notification not available)"
def init_database():
"""Initialize the SQLite database"""
try:
conn = sqlite3.connect(DATABASE_FILE)
cursor = conn.cursor()
# Create feedback table
cursor.execute('''
CREATE TABLE IF NOT EXISTS feedback (
id INTEGER PRIMARY KEY AUTOINCREMENT,
timestamp TEXT NOT NULL,
feedback_type TEXT NOT NULL,
rating INTEGER NOT NULL,
comments TEXT NOT NULL,
user_email TEXT,
created_at DATETIME DEFAULT CURRENT_TIMESTAMP
)
''')
conn.commit()
conn.close()
print("✅ Database initialized successfully")
return True
except Exception as e:
print(f"❌ Database initialization error: {e}")
return False
def save_feedback_to_db(feedback_type, rating, comments, user_email=""):
"""Save feedback to SQLite database"""
try:
conn = sqlite3.connect(DATABASE_FILE)
cursor = conn.cursor()
# Convert feedback_type list to string
feedback_type_str = ', '.join(feedback_type) if isinstance(feedback_type, list) else str(feedback_type)
cursor.execute('''
INSERT INTO feedback (timestamp, feedback_type, rating, comments, user_email)
VALUES (?, ?, ?, ?, ?)
''', (
datetime.now().isoformat(),
feedback_type_str,
rating,
comments,
user_email
))
conn.commit()
conn.close()
print("✅ Feedback saved to database successfully")
return True
except Exception as e:
print(f"❌ Database save error: {e}")
return False
def get_feedback_stats():
"""Get feedback statistics for admin"""
try:
conn = sqlite3.connect(DATABASE_FILE)
cursor = conn.cursor()
# Get total feedback count
cursor.execute('SELECT COUNT(*) FROM feedback')
total_feedback = cursor.fetchone()[0]
# Get average rating
cursor.execute('SELECT AVG(rating) FROM feedback')
avg_rating = cursor.fetchone()[0]
avg_rating = round(avg_rating, 2) if avg_rating else 0
# Get recent feedback (last 5)
cursor.execute('''
SELECT timestamp, feedback_type, rating, comments, user_email
FROM feedback
ORDER BY created_at DESC
LIMIT 5
''')
recent_feedback = cursor.fetchall()
conn.close()
return {
'total_feedback': total_feedback,
'avg_rating': avg_rating,
'recent_feedback': recent_feedback
}
except Exception as e:
print(f"❌ Database stats error: {e}")
return {
'total_feedback': 0,
'avg_rating': 0,
'recent_feedback': []
}
def view_feedback_admin():
"""Admin function to view all feedback"""
try:
conn = sqlite3.connect(DATABASE_FILE)
cursor = conn.cursor()
cursor.execute('''
SELECT id, timestamp, feedback_type, rating, comments, user_email, created_at
FROM feedback
ORDER BY created_at DESC
''')
feedback_data = cursor.fetchall()
conn.close()
if not feedback_data:
return "No feedback found in database."
# Format feedback data
result = "# 📊 Feedback Database\n\n"
result += f"**Total Feedback Entries:** {len(feedback_data)}\n\n"
for entry in feedback_data:
id_val, timestamp, feedback_type, rating, comments, user_email, created_at = entry
result += f"## Entry #{id_val}\n"
result += f"**Date:** {timestamp}\n"
result += f"**Type:** {feedback_type}\n"
result += f"**Rating:** {rating}/5 ⭐\n"
result += f"**Comments:** {comments}\n"
result += f"**Email:** {user_email if user_email else 'Not provided'}\n"
result += f"**Created:** {created_at}\n"
result += "---\n\n"
return result
except Exception as e:
return f"❌ Error accessing database: {str(e)}"
# Albert Chatbot Functions
def call_hugging_face_api(prompt):
"""Call Hugging Face API for Albert chatbot"""
if not HF_TOKEN:
return None
headers = {
"Authorization": f"Bearer {HF_TOKEN}",
"Content-Type": "application/json"
}
data = {
"model": "deepseek-ai/DeepSeek-V3-0324",
"messages": [
{
"role": "user",
"content": prompt
}
],
"max_tokens": 200,
"temperature": 0.7
}
try:
response = requests.post(
"https://router.huggingface.co/v1/chat/completions",
headers=headers,
json=data,
timeout=30
)
if response.status_code == 200:
result = response.json()
return result['choices'][0]['message']['content']
else:
print(f"❌ Albert API error: {response.status_code}")
return None
except Exception as e:
print(f"❌ Albert API error: {str(e)}")
return None
def call_hugging_face_api_content(prompt, content_length="Medium (3-5 pages)"):
"""Call Hugging Face API for content generation with dynamic tokens based on length"""
if not HF_TOKEN:
return None
# Set max_tokens based on content length
token_limits = {
"Short (1-2 pages)": 2000,
"Medium (3-5 pages)": 4000,
"Long (6+ pages)": 6000
}
max_tokens = token_limits.get(content_length, 4000)
headers = {
"Authorization": f"Bearer {HF_TOKEN}",
"Content-Type": "application/json"
}
data = {
"model": "deepseek-ai/DeepSeek-V3-0324",
"messages": [
{
"role": "user",
"content": prompt
}
],
"max_tokens": max_tokens,
"temperature": 0.7
}
try:
print(f"🌐 Making API call for content generation...")
print(f"🔗 Endpoint: https://router.huggingface.co/v1/chat/completions")
print(f"🔑 Token available: {HF_TOKEN is not None}")
print(f"📝 Prompt length: {len(prompt)} characters")
response = requests.post(
"https://router.huggingface.co/v1/chat/completions",
headers=headers,
json=data,
timeout=60 # Longer timeout for content generation
)
print(f"📊 Response status: {response.status_code}")
if response.status_code == 200:
result = response.json()
content = result['choices'][0]['message']['content']
print(f"✅ Content generation successful! Length: {len(content)} characters")
return content
else:
print(f"❌ Content API error: {response.status_code}")
print(f"❌ Response: {response.text}")
return None
except Exception as e:
print(f"❌ Content API error: {str(e)}")
return None
def generate_image_with_huggingface(prompt, topic, content_type):
"""Generate image using Hugging Face with better size management"""
try:
if not HF_TOKEN:
return None
headers = {"Authorization": f"Bearer {HF_TOKEN}"}
# Use more stable models and smaller sizes
working_models = [
"stabilityai/stable-diffusion-xl-base-1.0",
"runwayml/stable-diffusion-v1-5",
"CompVis/stable-diffusion-v1-4"
]
# Create more focused prompt
clean_prompt = f"educational diagram {topic}, simple illustration, clean, minimal"
for model_name in working_models:
try:
payload = {
"inputs": clean_prompt,
"parameters": {
"num_inference_steps": 20,
"guidance_scale": 7.0,
"width": 512, # Smaller size
"height": 384 # Smaller size
}
}
response = requests.post(
f"https://api-inference.huggingface.co/models/{model_name}",
headers=headers,
json=payload,
timeout=30
)
if response.status_code == 200:
content_length = len(response.content)
if content_length > 5000: # Valid image data
try:
# Check if response is JSON error
json_response = response.json()
if "error" in json_response:
continue
except:
# Not JSON, should be image data
import base64
try:
image_base64 = base64.b64encode(response.content).decode('utf-8')
# Limit size to prevent display issues
if len(image_base64) > 150000: # ~112KB limit
print(f"⚠️ Image too large ({len(image_base64)} chars), skipping")
continue
return f"data:image/png;base64,{image_base64}"
except Exception as e:
print(f"❌ Base64 encoding error: {str(e)}")
continue
elif response.status_code == 503:
print(f"Model {model_name} loading...")
continue
else:
print(f"HTTP {response.status_code} for {model_name}")
continue
except Exception as model_error:
print(f"Error with {model_name}: {str(model_error)}")
continue
return None
except Exception as e:
print(f"Critical error in image generation: {str(e)}")
return None
def get_educational_image_url(topic, description, content_type):
"""Get educational image URL using Hugging Face generation first, then fallbacks"""
try:
# Create a detailed prompt for image generation
image_prompt = f"Educational illustration of {topic}, {description}, {content_type}, clean diagram, colorful, professional, suitable for middle school students"
print(f"🎨 Generating image with prompt: {image_prompt}")
# Try to generate image with Hugging Face first
generated_image = generate_image_with_huggingface(image_prompt, topic, content_type)
if generated_image:
print(f"✅ Generated image successfully")
return generated_image
# Fallback to reliable sources if generation fails
print("🔄 Image generation failed, using fallback sources")
educational_sources = [
# 1. Lorem Picsum (reliable placeholder)
f"https://picsum.photos/800/600?random={hash(topic) % 1000}",
# 2. Placeholder with educational styling
f"https://via.placeholder.com/800x600/667eea/ffffff?text={topic.replace(' ', '+')}+Educational+Content",
# 3. Educational diagram generator
f"https://via.placeholder.com/800x600/4f46e5/ffffff?text={content_type.replace(' ', '+')}+{topic.replace(' ', '+')}",
]
# Try each fallback source
for i, url in enumerate(educational_sources):
try:
print(f"🔍 Trying fallback source {i+1}: {url}")
response = requests.head(url, timeout=5)
if response.status_code == 200:
print(f"✅ Found working fallback image: {url}")
return url
except:
continue
# Final fallback
return f"https://via.placeholder.com/800x600/667eea/ffffff?text={topic.replace(' ', '+')}+{content_type.replace(' ', '+')}"
except Exception as e:
print(f"❌ Image URL generation error: {str(e)}")
return f"https://via.placeholder.com/800x600/667eea/ffffff?text={topic.replace(' ', '+')}+{content_type.replace(' ', '+')}"
def generate_real_image(description, topic, content_type):
"""Generate actual image for educational content with better error handling"""
try:
# Try to generate image with Hugging Face
image_data = generate_image_with_huggingface(description, topic, content_type)
if image_data and image_data.startswith('data:image'):
# Check image size and compress if too large
if len(image_data) > 100000: # If larger than ~75KB
print(f"🖼️ Image too large ({len(image_data)} chars), using fallback")
return generate_image_fallback(description, topic, content_type)
# Ensure proper format
if not image_data.startswith('data:image/png;base64,'):
if 'base64,' in image_data:
base64_part = image_data.split('base64,', 1)[1]
image_data = f"data:image/png;base64,{base64_part}"
# Create HTML with better error handling
safe_description = description.replace('"', '"').replace("'", ''')
# Use simpler HTML structure for better compatibility
return f'''
{safe_description}
'''
else:
print(f"🖼️ No valid image data, using fallback for: {description}")
return generate_image_fallback(description, topic, content_type)
except Exception as e:
print(f"❌ Image generation error: {str(e)}")
return generate_image_fallback(description, topic, content_type)
def generate_image_fallback(description, topic, content_type):
"""Generate a styled text placeholder for images"""
return f'''
📸
{description}
Topic: {topic} | Type: {content_type}
'''
def fix_malformed_image_html(content):
"""Fix malformed image HTML with better error handling"""
import re
# Only fix truly malformed img tags, preserve valid ones
# Remove img tags that have malformed src attributes (not starting with data: or http)
content = re.sub(r']*src="(?!data:|http)[^"]*"[^>]*>', r'
📸 Image placeholder
', content)
return content
def generate_image_placeholder(description, topic, content_type):
"""Generate image placeholder with description for educational content (fallback)"""
# Create a structured image placeholder that can be replaced with real images
image_placeholder = f"""
🖼️
📸 Image Placeholder
Description: {description}
Topic: {topic}
Content Type: {content_type}
💡 This image would enhance the learning experience
"""
return image_placeholder
def create_content_agent(topic, subject, grade_level, difficulty, content_type, content_length):
"""Enhanced content generation agent with multimedia capabilities"""
# Define image suggestions based on content type and topic
image_suggestions = {
"Worksheets": [
f"Visual diagram showing {topic} concepts",
f"Step-by-step illustration for {topic} problems",
f"Interactive chart demonstrating {topic} principles"
],
"Handouts": [
f"Infographic summarizing {topic} key points",
f"Visual timeline of {topic} development",
f"Diagram showing {topic} relationships"
],
"Study Guides": [
f"Concept map of {topic} topics",
f"Visual summary of {topic} main ideas",
f"Flowchart showing {topic} processes"
],
"Activities": [
f"Step-by-step visual instructions for {topic} activity",
f"Before and after examples of {topic} work",
f"Visual checklist for {topic} completion"
],
"Presentations": [
f"Title slide with {topic} theme",
f"Key concept visualization for {topic}",
f"Summary slide with {topic} highlights"
],
"Lesson Materials": [
f"Opening visual hook for {topic} lesson",
f"Visual examples of {topic} in real life",
f"Closing visual summary of {topic} learning"
]
}
# Get image suggestions for this content type
images = image_suggestions.get(content_type, [f"Educational illustration about {topic}"])
# Define content type specific formatting and structure
content_type_formats = {
"Worksheets": {
"structure": "Interactive worksheet with numbered problems, answer spaces, and self-assessment",
"formatting": "Use checkboxes, fill-in-the-blank spaces, numbered problems, and answer keys",
"sections": ["Introduction", "Learning Objectives", "Practice Problems", "Word Problems", "Self-Assessment", "Answer Key"],
"special_elements": "Include answer spaces, checkboxes for completion, and step-by-step problem solving"
},
"Handouts": {
"structure": "Reference handout with key concepts, formulas, and quick lookup information",
"formatting": "Use bullet points, highlighted boxes, tables, and clear headings",
"sections": ["Overview", "Key Concepts", "Important Formulas", "Examples", "Quick Reference", "Additional Resources"],
"special_elements": "Include summary tables, highlighted key terms, and visual diagrams"
},
"Study Guides": {
"structure": "Comprehensive study guide with summaries, review materials, and practice questions",
"formatting": "Use numbered lists, checkboxes, summary tables, and review checklists",
"sections": ["Topic Summary", "Key Points", "Important Concepts", "Practice Questions", "Review Checklist", "Test Tips"],
"special_elements": "Include memory aids, mnemonics, and test-taking strategies"
},
"Activities": {
"structure": "Hands-on activity with detailed instructions, materials list, and observation sheets",
"formatting": "Use numbered steps, safety notes, observation tables, and discussion questions",
"sections": ["Activity Overview", "Materials Needed", "Step-by-Step Instructions", "Observations", "Discussion Questions", "Extensions"],
"special_elements": "Include safety considerations, data collection sheets, and reflection questions"
},
"Presentations": {
"structure": "Presentation slides with talking points, visual elements, and interactive components",
"formatting": "Use slide format with bullet points, speaker notes, and visual transitions",
"sections": ["Title Slide", "Agenda", "Key Points", "Examples", "Summary", "Q&A"],
"special_elements": "Include speaker notes, interactive polls, and visual cues"
},
"Lesson Materials": {
"structure": "Complete lesson materials for teachers with instructions, activities, and assessments",
"formatting": "Include teacher tips, timing suggestions, differentiation options, and rubrics",
"sections": ["Lesson Overview", "Learning Objectives", "Teacher Notes", "Student Activities", "Assessment", "Resources"],
"special_elements": "Include pacing guides, differentiation strategies, and assessment rubrics"
}
}
format_info = content_type_formats.get(content_type, content_type_formats["Worksheets"])
# Define length-specific constraints
length_constraints = {
"Short (1-2 pages)": {
"max_sections": 4,
"max_words": 800,
"instruction": "Keep content VERY concise and focused. Maximum 4 sections. Use bullet points and brief explanations. Avoid lengthy examples."
},
"Medium (3-5 pages)": {
"max_sections": 6,
"max_words": 1500,
"instruction": "Provide comprehensive coverage with moderate detail. Include 5-6 sections with examples and practice."
},
"Long (6+ pages)": {
"max_sections": 8,
"max_words": 2500,
"instruction": "Create extensive content with detailed explanations, multiple examples, and comprehensive practice sections."
}
}
length_info = length_constraints.get(content_length, length_constraints["Medium (3-5 pages)"])
# Create enhanced prompt for agent-based generation
agent_prompt = f"""You are an advanced educational content generation agent. Create comprehensive educational {content_type.lower()} with multimedia integration.
TOPIC: {topic}
SUBJECT: {subject}
GRADE LEVEL: {grade_level}
DIFFICULTY: {difficulty}
CONTENT TYPE: {content_type}
LENGTH: {content_length}
LENGTH CONSTRAINTS (CRITICAL - MUST FOLLOW):
- Target Length: {content_length}
- Maximum Sections: {length_info['max_sections']}
- Maximum Words: {length_info['max_words']}
- Instruction: {length_info['instruction']}
CONTENT TYPE SPECIFIC REQUIREMENTS:
Structure: {format_info['structure']}
Formatting: {format_info['formatting']}
Sections: {', '.join(format_info['sections'])}
Special Elements: {format_info['special_elements']}
AGENT INSTRUCTIONS:
1. Generate rich, engaging educational content specifically formatted for {content_type}
2. STRICTLY follow the length constraints: {length_info['instruction']}
3. Include specific image placeholders with detailed descriptions
4. Create interactive elements and visual aids appropriate for {content_type}
5. Structure content for maximum learning impact using {content_type} best practices
6. Include multimedia integration points
7. Apply {content_type}-specific formatting and layout
8. KEEP CONTENT WITHIN {length_info['max_words']} WORDS MAXIMUM
9. IMPORTANT: DO NOT USE LaTeX expressions. Use simple HTML/markdown formatting instead:
- For fractions: use "a/b" or "a over b" instead of \frac{{a}}{{b}}
- For math: use plain text like "Speed = Distance/Time" instead of \text{{Speed}} = \frac{{\text{{Distance}}}}{{\text{{Time}}}}
- For fill-in-blanks: use "_____" instead of \\_\\_\\_
- For answer boxes: use "[ANSWER: ___]" instead of \\boxed{{\\_\\_}}
- For subscripts: use "H2O" instead of H_2O
- For superscripts: use "x²" instead of x^2
CONTENT STRUCTURE FOR {content_type.upper()}:
{chr(10).join([f"- {section}" for section in format_info['sections']])}
IMAGE INTEGRATION:
Include these specific image suggestions:
{chr(10).join([f"- {img}" for img in images])}
FORMAT REQUIREMENTS FOR {content_type.upper()}:
- Use markdown formatting appropriate for {content_type}
- Include image placeholders: [IMAGE: description]
- Add interactive elements: [INTERACTIVE: description]
- Include multimedia links: [MEDIA: description]
- Use engaging headers and subheaders
- Include callout boxes and highlights
- Apply {format_info['formatting']}
- NO LaTeX expressions - use simple HTML/markdown only
- For math: use plain text like "Speed = Distance/Time"
- For fractions: use "a/b" or "a over b"
- For blanks: use "_____" (5 underscores)
- For answer boxes: use "[ANSWER: ___]"
SPECIAL FORMATTING FOR {content_type.upper()}:
{format_info['special_elements']}
Make the content visually rich, educationally sound, and ready for multimedia integration with {content_type}-specific formatting."""
return agent_prompt
def convert_math_to_html(text):
"""Convert LaTeX-style math expressions to HTML"""
import re
# Handle LaTeX math delimiters first
# Convert \[ ... \] to display math
text = re.sub(r'\\\[([^\]]+)\\\]', r'
\1
', text)
# Convert \( ... \) to inline math
text = re.sub(r'\\\(([^)]+)\\\)', r'\1', text)
# Handle fractions: \frac{a}{b}
text = re.sub(r'\\frac\{([^}]+)\}\{([^}]+)\}', r'
\1
\2
', text)
# Handle simple fractions like 120/2
text = re.sub(r'(\d+)/(\d+)', r'
\1
\2
', text)
# Handle superscripts: ^{text}
text = re.sub(r'\^\{([^}]+)\}', r'\1', text)
text = re.sub(r'\^(\w)', r'\1', text)
# Handle subscripts: _{text}
text = re.sub(r'_\{([^}]+)\}', r'\1', text)
text = re.sub(r'_(\w)', r'\1', text)
# Handle \text{} commands first (before other processing)
text = re.sub(r'\\text\{([^}]+)\}', r'\1', text)
# Handle \boxed{} commands
text = re.sub(r'\\boxed\{([^}]+)\}', r'\1', text)
# Handle underscore patterns that are not subscripts (like \_\_\_)
text = re.sub(r'\\_+', r'', text)
# Re-process fractions after text commands are handled
text = re.sub(r'\\frac\{([^}]+)\}\{([^}]+)\}', r'
\1
\2
', text)
# Handle common math symbols
text = text.replace('\\times', '×')
text = text.replace('\\rightarrow', '→')
text = text.replace('\\leftarrow', '←')
text = text.replace('\\leq', '≤')
text = text.replace('\\geq', '≥')
text = text.replace('\\neq', '≠')
text = text.replace('\\approx', '≈')
text = text.replace('\\pm', '±')
text = text.replace('\\sqrt', '√')
text = text.replace('\\pi', 'π')
return text
def solve_algebra_problem(problem_type, *args):
"""Solve algebra problems interactively"""
try:
if problem_type == "speed":
distance, time = args
result = algebra_solver.solve_speed_problem(float(distance), float(time))
elif problem_type == "volume":
mass, density = args
result = algebra_solver.solve_volume_problem(float(mass), float(density))
elif problem_type == "linear":
a, b, c = args
result = algebra_solver.solve_linear_equation(float(a), float(b), float(c))
elif problem_type == "quadratic":
h, coefficient = args
result = algebra_solver.solve_quadratic_equation(float(h), float(coefficient))
else:
return "❌ Unknown problem type"
if "error" in result:
return f"❌ Error: {result['error']}"
# Format the solution nicely
solution_text = f"✅ **{result['solution']}**\n\n"
if "steps" in result:
solution_text += "**Steps:**\n"
for i, step in enumerate(result["steps"], 1):
solution_text += f"{i}. {step}\n"
elif "formula" in result:
solution_text += f"**Formula:** {result['formula']}\n"
solution_text += f"**Calculation:** {result['calculation']}\n"
return solution_text
except Exception as e:
return f"❌ Error solving problem: {str(e)}"
def apply_content_styling(content, content_type):
"""Apply simple and robust styling to educational content"""
import re
# Skip aggressive HTML cleaning to preserve image content
# Only fix specific known issues without removing valid content
# Fix only empty div style tags
content = re.sub(r'
]*>
', '', content)
# Skip most HTML fixes to preserve content integrity
# Skip aggressive attribute cleaning to preserve content
# Return content as-is for Markdown rendering
styled_content = content
# Apply Markdown-friendly styling
styled_content = re.sub(r'\[ANSWER: ([^\]]+)\]', r'**Answer:** \1', styled_content)
styled_content = re.sub(r'___+', r'_________________', styled_content)
styled_content = re.sub(r'\[ \]', r'☐', styled_content)
styled_content = re.sub(r'☑', r'☑', styled_content)
styled_content = re.sub(r'\[(\d+)\]', r'**\1**', styled_content)
return styled_content
def process_content_with_images(content, topic, content_type):
"""Process content with improved image handling"""
import re
print(f"🔍 Starting process_content_with_images - input length: {len(content)}")
# Convert LaTeX math first
content = convert_math_to_html(content)
print(f"🔍 After convert_math_to_html - length: {len(content)}")
# Process images with size limits
image_patterns = [
r'\[IMAGE:\s*([^\]]+)\]',
r'IMAGE:\s*([^\n<]+?)(?=\s*<|$|\n)',
]
total_images = 0
for pattern in image_patterns:
images = re.findall(pattern, content, re.MULTILINE)
total_images += len(images)
# Limit number of images to prevent performance issues
max_images = 3
for i, image_desc in enumerate(images[:max_images]):
if i >= max_images:
# Replace remaining image placeholders with simple text
remaining_pattern = f'[IMAGE: {image_desc}]'
content = content.replace(remaining_pattern, f'\n**📸 Image:** {image_desc.strip()}\n')
continue
image_html = generate_real_image(image_desc.strip(), topic, content_type)
print(f"🖼️ Generated image HTML length: {len(image_html)}")
print(f"🖼️ Image HTML preview: {image_html[:200]}...")
# Replace with generated content
content = content.replace(f'[IMAGE: {image_desc}]', image_html)
content = content.replace(f'IMAGE: {image_desc}', image_html)
# Clean up any malformed HTML
content = fix_malformed_image_html(content)
print(f"🔍 After fix_malformed_image_html - length: {len(content)}")
# Apply styling
content = apply_content_styling(content, content_type)
print(f"🔍 After apply_content_styling - length: {len(content)}")
return content
def generate_albert_response(user_message, user_name, age_group, chat_history):
"""Generate Albert's response using AI or fallback"""
# Try Hugging Face API first if token is available
if HF_TOKEN:
try:
# Create Albert's personality prompt based on age group
age_emojis = {
"K-2": "🌟",
"3-5": "🚀",
"6-8": "⚡",
"9-12": "🎓",
"Adult": "💡"
}
age_emoji = age_emojis.get(age_group, "🌟")
# Build context from chat history
context = ""
if chat_history and len(chat_history) > 1:
context = "\n\nPrevious conversation:\n"
for i, (user_msg, albert_msg) in enumerate(chat_history[-3:]): # Last 3 exchanges
if user_msg and albert_msg:
context += f"User: {user_msg}\nAlbert: {albert_msg}\n"
prompt = f"""You are Albert 🧠, a fun and encouraging educational chatbot!
User Info:
- Name: {user_name}
- Age Group: {age_group} {age_emoji}
Your personality:
- Use lots of emojis and fun expressions! 😊
- Be encouraging and positive
- Explain things in a simple, engaging way
- Ask follow-up questions to keep learning going
- Use age-appropriate language for {age_group}
- Be enthusiastic about learning!
- Remember our conversation and build on previous topics!
{context}
Current question: {user_message}
Respond as Albert with enthusiasm, emojis, and helpful explanations! Keep it concise (2-3 sentences max)."""
# Call Hugging Face API directly for Albert
ai_content = call_hugging_face_api(prompt)
if ai_content:
return ai_content
except Exception as e:
print(f"❌ Albert API error: {str(e)}")
# Fallback responses based on common questions
fallback_responses = {
"hello": f"Hi {user_name}! 🧠✨ I'm Albert, your learning buddy! What would you like to learn about today? 😊",
"hi": f"Hey there {user_name}! 🎉 Ready for some fun learning? Ask me anything! 🚀",
"help": f"Of course {user_name}! 🧠 I'm here to help you understand any topic! Just ask me a question and I'll explain it in a fun way! 😊✨",
"math": f"Math is awesome {user_name}! 🔢✨ Let me know what specific math topic you're curious about and I'll make it super fun to understand! 🎯",
"science": f"Science is fascinating {user_name}! 🔬🌟 Tell me what science topic you want to explore and I'll explain it with cool examples! 🚀",
"english": f"Language is powerful {user_name}! 📚💫 What English topic would you like to learn about? I'll make it engaging and fun! ✨",
"history": f"History is like a time machine {user_name}! ⏰🎭 What historical period or event interests you? I'll bring it to life! 🌟",
"art": f"Art is creativity in action {user_name}! 🎨✨ What kind of art or creative topic would you like to explore? Let's get creative! 🚀"
}
# Check for keywords in the message
message_lower = user_message.lower()
for keyword, response in fallback_responses.items():
if keyword in message_lower:
return response
# Default encouraging response
return f"That's a great question {user_name}! 🧠✨ I'd love to help you understand that! Can you tell me more about what specific part you'd like to learn about? I'm here to make learning fun! 😊🚀"
def chat_with_albert(user_message, user_name, age_group, chat_history):
"""Handle the chat conversation with Albert"""
if not user_message.strip():
return chat_history, ""
# Add user message to history
if chat_history is None:
chat_history = []
# Add user message
chat_history.append([user_message, None])
# Generate Albert's response
albert_response = generate_albert_response(user_message, user_name, age_group, chat_history)
# Add Albert's response
chat_history[-1][1] = f"🧠 Albert: {albert_response}"
return chat_history, ""
# Initialize database on startup
init_database()
# Initialize algebra solver
algebra_solver = AlgebraSolver()
# ====================================
# MATHMIND CHATBOT FUNCTIONS
# ====================================
import PyPDF2
import io
import base64
import re
from typing import List, Dict, Any
# Global variables for MathMind
mathmind_conversation_history = []
mathmind_pdf_context = ""
uploaded_pdfs = [] # List of dicts: {"name": str, "content": str, "summary": str, "timestamp": str}
def validate_question_is_math_related(question: str) -> bool:
"""Check if a question is math-related"""
math_keywords = [
'mathematics', 'math', 'algebra', 'geometry', 'calculus', 'trigonometry',
'arithmetic', 'equation', 'formula', 'theorem', 'proof', 'function',
'derivative', 'integral', 'polynomial', 'fraction', 'decimal', 'percentage',
'statistics', 'probability', 'graph', 'coordinate', 'angle', 'triangle',
'circle', 'square', 'rectangle', 'volume', 'area', 'perimeter', 'solve',
'calculate', 'compute', 'add', 'subtract', 'multiply', 'divide', 'plus',
'minus', 'times', 'equals', 'number', 'digit', 'sum', 'product', 'quotient',
'ratio', 'proportion', 'slope', 'intercept', 'variable', 'constant'
]
# Also check for mathematical symbols and patterns
math_patterns = [
r'\d+\s*[\+\-\*/\=]\s*\d+', # Basic math operations
r'[xy]\s*[\+\-\*/\=]', # Variables in equations
r'\b\d+\s*[xy]\b', # Coefficients with variables
r'π|pi', # Pi
r'√|sqrt', # Square root
r'\^|\*\*', # Exponents
r'sin|cos|tan|log', # Trig/log functions
]
question_lower = question.lower()
# Check for math keywords
for keyword in math_keywords:
if keyword in question_lower:
return True
# Check for math patterns
for pattern in math_patterns:
if re.search(pattern, question_lower):
return True
return False
def validate_pdf_content_with_llm(pdf_text: str) -> tuple:
"""Use LLM to intelligently check if PDF content is math-related and generate summary"""
if not HF_TOKEN:
# Fallback to keyword-based validation if no token
return validate_pdf_content_fallback(pdf_text), "Content validation completed"
# Truncate text for LLM analysis (first 2000 characters should be enough)
analysis_text = pdf_text[:2000] if len(pdf_text) > 2000 else pdf_text
validation_prompt = f"""Analyze the following document content and determine:
1. Is this content primarily related to mathematics? (YES/NO)
2. What specific math topics are covered? (if any)
3. Provide a brief summary of the mathematical content.
Document content:
{analysis_text}
Respond in this exact format:
MATH_RELATED: [YES/NO]
TOPICS: [list main math topics separated by commas, or "None" if not math-related]
SUMMARY: [brief 1-2 sentence summary of the mathematical content, or explanation why it's not math-related]
Be strict - only respond YES if the content is clearly about mathematics, mathematical concepts, math problems, or math education."""
headers = {"Authorization": f"Bearer {HF_TOKEN}"}
try:
payload = {
"model": "meta-llama/Meta-Llama-3-8B-Instruct",
"messages": [{"role": "user", "content": validation_prompt}],
"max_tokens": 300,
"temperature": 0.1 # Low temperature for consistent analysis
}
response = requests.post(
"https://router.huggingface.co/v1/chat/completions",
headers=headers,
json=payload,
timeout=30
)
if response.status_code == 200:
result = response.json()
if "choices" in result and len(result["choices"]) > 0:
llm_response = result["choices"][0]["message"]["content"]
# Parse LLM response
is_math_related = "YES" in llm_response.upper() and "MATH_RELATED: YES" in llm_response.upper()
# Extract topics and summary
topics = "General mathematics"
summary = "Math content detected"
lines = llm_response.split('\n')
for line in lines:
if line.startswith('TOPICS:'):
topics = line.replace('TOPICS:', '').strip()
elif line.startswith('SUMMARY:'):
summary = line.replace('SUMMARY:', '').strip()
if is_math_related:
detailed_summary = f"📊 **Math Topics:** {topics}\n📝 **Summary:** {summary}"
return True, detailed_summary
else:
return False, f"📄 **Analysis:** {summary}"
# Fallback if LLM call fails
return validate_pdf_content_fallback(pdf_text), "Content validation completed (fallback method)"
except Exception as e:
print(f"LLM validation error: {str(e)}")
return validate_pdf_content_fallback(pdf_text), "Content validation completed (fallback method)"
def validate_pdf_content_fallback(pdf_text: str) -> bool:
"""Fallback keyword-based validation"""
math_keywords = [
'mathematics', 'math', 'algebra', 'geometry', 'calculus', 'trigonometry',
'arithmetic', 'equation', 'formula', 'theorem', 'proof', 'function',
'derivative', 'integral', 'polynomial', 'fraction', 'decimal', 'percentage',
'statistics', 'probability', 'graph', 'coordinate', 'angle', 'triangle',
'circle', 'square', 'rectangle', 'volume', 'area', 'perimeter'
]
text_lower = pdf_text.lower()
math_score = sum(1 for keyword in math_keywords if keyword in text_lower)
return math_score >= 3
def generate_pdf_summary(pdf_text: str) -> str:
"""Generate a quick summary of math PDF content"""
# Extract key math topics mentioned
math_topics = []
topic_keywords = {
'Algebra': ['algebra', 'equation', 'variable', 'solve', 'polynomial', 'linear'],
'Geometry': ['geometry', 'triangle', 'circle', 'angle', 'area', 'volume', 'perimeter'],
'Calculus': ['calculus', 'derivative', 'integral', 'limit', 'function'],
'Trigonometry': ['trigonometry', 'sin', 'cos', 'tan', 'sine', 'cosine'],
'Statistics': ['statistics', 'probability', 'data', 'mean', 'median', 'mode'],
'Arithmetic': ['addition', 'subtraction', 'multiplication', 'division', 'fraction']
}
text_lower = pdf_text.lower()
for topic, keywords in topic_keywords.items():
if any(keyword in text_lower for keyword in keywords):
math_topics.append(topic)
# Generate summary
if math_topics:
topics_str = ", ".join(math_topics[:3]) # Limit to first 3 topics
summary = f"📊 **Math Topics Detected:** {topics_str}"
if len(math_topics) > 3:
summary += f" and {len(math_topics) - 3} more"
else:
summary = "📊 **Math Content:** General mathematics material"
# Add word count
word_count = len(pdf_text.split())
summary += f"\n📄 **Content Length:** ~{word_count} words"
return summary
def extract_pdf_text(pdf_file) -> tuple:
"""Extract text from uploaded PDF and validate if it's math-related using LLM"""
try:
if pdf_file is None:
return "", False, "No file uploaded"
# Read PDF content
pdf_reader = PyPDF2.PdfReader(io.BytesIO(pdf_file))
text = ""
for page in pdf_reader.pages:
text += page.extract_text() + "\n"
if not text.strip():
return "", False, "❌ Could not extract text from PDF. Please ensure it's a text-based PDF."
# Use LLM to validate if it's math-related and get summary
is_math_related, analysis_summary = validate_pdf_content_with_llm(text)
if not is_math_related:
return "", False, f"⚠️ I only accept materials that are related to mathematics. Please upload a math-related PDF (homework, textbook, worksheets, etc.) for me to help you with.\n\n{analysis_summary}"
# Return text with LLM-generated summary
return text, True, f"✅ Math PDF successfully processed!\n\n{analysis_summary}"
except Exception as e:
return "", False, f"❌ Error processing PDF: {str(e)}"
def apply_content_guardrails(response: str) -> str:
"""Apply guardrails to prevent inappropriate content"""
# List of inappropriate content patterns
inappropriate_patterns = [
r'\b(hate|racist|sexist|discriminat)\w*\b',
r'\b(kill|murder|suicide|harm yourself)\b',
r'\b(stupid|idiot|dumb)\b',
r'\b(nazi|fascist)\b',
]
# Check for inappropriate content
response_lower = response.lower()
for pattern in inappropriate_patterns:
if re.search(pattern, response_lower):
return """I'm here to help you learn mathematics in a positive and supportive way. Let's focus on your math questions and keep our conversation educational and respectful.
What math topic would you like to explore today? 📚✨"""
return response
def generate_math_image(description: str) -> str:
"""Generate mathematical diagrams or shapes using text-to-image"""
try:
if not HF_TOKEN:
return "📊 [Mathematical diagram would be generated here]"
headers = {"Authorization": f"Bearer {HF_TOKEN}"}
# Enhanced prompt for mathematical content
math_prompt = f"mathematical diagram, {description}, clean educational illustration, white background, clear labels, geometric shapes, mathematical notation"
models = [
"stabilityai/stable-diffusion-xl-base-1.0",
"runwayml/stable-diffusion-v1-5"
]
for model in models:
try:
payload = {
"inputs": math_prompt,
"parameters": {
"num_inference_steps": 25,
"guidance_scale": 7.5,
"width": 512,
"height": 512
}
}
response = requests.post(
f"https://api-inference.huggingface.co/models/{model}",
headers=headers,
json=payload,
timeout=30
)
if response.status_code == 200 and len(response.content) > 1000:
image_base64 = base64.b64encode(response.content).decode('utf-8')
return f"""
{description}
"""
except Exception as e:
continue
return f"📊 [Mathematical diagram: {description}]"
except Exception as e:
return f"📊 [Mathematical diagram: {description}]"
def format_latex_response(text: str) -> str:
"""Minimal cleanup to avoid corrupting LaTeX expressions"""
import re
# Very minimal approach - just clean up obvious issues
formatted_text = text
# Only fix the most basic malformed patterns
formatted_text = formatted_text.replace('Area=$$π Area=$$π $r^{2}$ $$', 'Area = $\\pi r^2$')
formatted_text = formatted_text.replace('$$π', '$\\pi$')
formatted_text = formatted_text.replace('π$$', '$\\pi$')
# Remove empty dollar signs only
formatted_text = re.sub(r'\$\s*\$', '', formatted_text)
# Don't try to add LaTeX formatting - let the API handle it properly
return formatted_text
def mathmind_chat_with_typing(message: str, grade_level: str, chat_history: List, pdf_context_display: str) -> tuple:
"""MathMind chat with typing effect and question validation"""
import time
global mathmind_conversation_history, mathmind_pdf_context
if not message.strip():
return chat_history, ""
# Add user message to conversation history
mathmind_conversation_history.append({
"role": "user",
"content": f"[Grade: {grade_level}] {message}"
})
# Show typing indicator
chat_history.append([message, "🤔 *thinking...*"])
# Generate response with context
response = call_hugging_face_math_api(message, mathmind_conversation_history, mathmind_pdf_context, grade_level)
# Format LaTeX expressions
response = format_latex_response(response)
# Add assistant response to history
mathmind_conversation_history.append({
"role": "assistant",
"content": response
})
# Update chat history with real response
chat_history[-1] = [message, response]
return chat_history, ""
def check_document_relevance(question: str, pdf_context: str) -> tuple:
"""Use LLM to check if uploaded documents can answer the question"""
if not HF_TOKEN or not pdf_context.strip():
return False, "", "No documents uploaded or no API token"
# Extract document names from context
doc_names = []
for line in pdf_context.split('\n'):
if line.startswith('=== Content from ') and line.endswith(' ==='):
doc_name = line.replace('=== Content from ', '').replace(' ===', '')
doc_names.append(doc_name)
relevance_prompt = f"""Analyze the following question and document content to determine if the documents contain enough information to answer the question.
Question: {question}
Document Content:
{pdf_context[:2000]}
Instructions:
1. Can the uploaded documents provide a complete or partial answer to this question?
2. If yes, which specific document(s) contain the relevant information?
3. If no, the question should be answered from general knowledge.
Respond in this exact format:
DOCUMENT_CAN_ANSWER: [YES/NO]
RELEVANT_DOCUMENTS: [list document names separated by commas, or "NONE"]
EXPLANATION: [brief explanation of why documents can or cannot answer the question]
Be strict - only respond YES if the documents actually contain specific information that directly addresses the question."""
headers = {"Authorization": f"Bearer {HF_TOKEN}"}
try:
payload = {
"model": "meta-llama/Meta-Llama-3-8B-Instruct",
"messages": [{"role": "user", "content": relevance_prompt}],
"max_tokens": 200,
"temperature": 0.1 # Low temperature for consistent analysis
}
response = requests.post(
"https://router.huggingface.co/v1/chat/completions",
headers=headers,
json=payload,
timeout=30
)
if response.status_code == 200:
result = response.json()
if "choices" in result and len(result["choices"]) > 0:
llm_response = result["choices"][0]["message"]["content"]
# Parse LLM response
can_answer = "YES" in llm_response.upper() and "DOCUMENT_CAN_ANSWER: YES" in llm_response.upper()
relevant_docs = ""
lines = llm_response.split('\n')
for line in lines:
if line.startswith('RELEVANT_DOCUMENTS:'):
relevant_docs = line.replace('RELEVANT_DOCUMENTS:', '').strip()
break
return can_answer, relevant_docs, llm_response
return False, "", "LLM analysis failed"
except Exception as e:
print(f"Document relevance check error: {str(e)}")
return False, "", f"Error: {str(e)}"
def call_hugging_face_math_api(prompt: str, conversation_history: List[Dict], pdf_context: str = "", grade_level: str = "6-8") -> str:
"""Call Hugging Face API for math-specific responses"""
if not HF_TOKEN:
return "❌ Hugging Face token not found. Please configure HF_TOKEN."
# Ensure pdf_context is a string
if hasattr(pdf_context, 'value'):
pdf_context = pdf_context.value
elif not isinstance(pdf_context, str):
pdf_context = str(pdf_context) if pdf_context else ""
# Map grade levels to age ranges and language complexity
grade_mapping = {
"K-2": {"age": "5-8 years old", "language": "very simple words, basic counting, shapes, colors"},
"3-5": {"age": "8-11 years old", "language": "simple explanations, basic operations, real-world examples"},
"6-8": {"age": "11-14 years old", "language": "clear explanations with some technical terms, algebra basics"},
"9-12": {"age": "14-18 years old", "language": "more complex concepts, proper mathematical terminology"},
"College": {"age": "18+ years old", "language": "advanced mathematical concepts and formal notation"}
}
grade_info = grade_mapping.get(grade_level, grade_mapping["6-8"])
# Check if documents can answer the question
can_use_docs, relevant_docs, analysis = check_document_relevance(prompt, pdf_context)
# Debug logging
print(f"🔍 Document relevance check:")
print(f" Question: {prompt[:100]}...")
print(f" Can use docs: {can_use_docs}")
print(f" Relevant docs: {relevant_docs}")
print(f" Analysis: {analysis[:200]}...")
# Determine response approach and context
if can_use_docs and pdf_context.strip():
response_approach = f"Use the uploaded document(s) to answer. Specifically mention: {relevant_docs}"
context_to_use = pdf_context
print(f"📄 Using document context from: {relevant_docs}")
else:
response_approach = "Answer from your general knowledge. Use phrases like 'According to my understanding...' or 'Based on what I know...'"
context_to_use = "No relevant document context - use general knowledge"
print(f"🧠 Using general knowledge (no relevant docs found)")
# Build context-aware interactive prompt
context_prompt = f"""You are an enthusiastic, interactive educational tutor for {grade_info['age']} students. While you specialize in mathematics, you can help with various educational topics and make learning exciting and memorable!
PERSONALITY & INTERACTION STYLE:
- Be conversational, friendly, and genuinely excited about learning
- Ask engaging follow-up questions to keep the conversation going
- Share fascinating real-world connections and "did you know?" facts
- Use age-appropriate humor and analogies for {grade_info['age']} students
- Encourage curiosity with questions like "What do you think happens if...?" or "Can you guess why...?"
- Celebrate student insights with enthusiasm: "Excellent thinking!" or "That's a great question!"
GRADE-SPECIFIC ENGAGEMENT:
{f"- Use simple, relatable examples from everyday life (pizza, toys, playground)" if grade_level == "K-2" else ""}
{f"- Connect to video games, sports, and fun activities they enjoy" if grade_level == "3-5" else ""}
{f"- Reference pop culture, social media, and things they find cool" if grade_level == "6-8" else ""}
{f"- Use real-world applications like engineering, finance, and technology" if grade_level == "9-12" else ""}
{f"- Discuss advanced applications in science, research, and professional fields" if grade_level == "College" else ""}
INTERACTIVE TEACHING APPROACH:
1. Start with a hook - something surprising, funny, or amazing about the topic
2. Explain the concept using {grade_info['language']}
3. Give a practical, relatable example
4. Ask a thought-provoking question to check understanding
5. Share a fun fact or real-world application
6. End with an engaging question to continue the conversation
CONTENT RULES:
- NO generic introductions like "I'm MathMind"
- Use SIMPLE, CLEAN LaTeX: $\\pi r^2$ (good), NOT complicated nested expressions
- Write math clearly: Area = $\\pi r^2$ where A is area, r is radius
- NEVER use malformed LaTeX like $$π or multiple $ signs
- Include emojis sparingly but effectively (🤔💡🎯✨)
- Make every response feel like a conversation, not a lecture
- Always end with a question or invitation to explore more
- Keep LaTeX expressions SHORT and SIMPLE to avoid rendering issues
RESPONSE APPROACH: {response_approach}
Document Context: {context_to_use[:1000] if context_to_use != "No relevant document context - use general knowledge" else "No relevant document context - use general knowledge"}
Recent Conversation:
{chr(10).join([f"Student: {msg['content']}" if msg['role'] == 'user' else f"You: {msg['content']}" for msg in conversation_history[-4:]])}
Student's Current Question: {prompt}
Respond with enthusiasm and curiosity, making this math concept come alive for a {grade_info['age']} student! Remember to ask engaging follow-up questions."""
headers = {"Authorization": f"Bearer {HF_TOKEN}"}
try:
payload = {
"model": "meta-llama/Meta-Llama-3-8B-Instruct",
"messages": [{"role": "user", "content": context_prompt}],
"max_tokens": 800,
"temperature": 0.7
}
response = requests.post(
"https://router.huggingface.co/v1/chat/completions",
headers=headers,
json=payload,
timeout=60
)
if response.status_code == 200:
result = response.json()
if "choices" in result and len(result["choices"]) > 0:
raw_response = result["choices"][0]["message"]["content"]
# Apply guardrails
safe_response = apply_content_guardrails(raw_response)
# Format with LaTeX
formatted_response = format_latex_response(safe_response)
return formatted_response
return "❌ Sorry, I'm having trouble connecting right now. Please try again!"
except Exception as e:
return f"❌ Error: {str(e)}"
def mathmind_chat(message: str, grade_level: str, chat_history: List, pdf_context_display: str) -> tuple:
"""Main MathMind chat function with memory and context"""
global mathmind_conversation_history, mathmind_pdf_context
if not message.strip():
return chat_history, ""
# Add user message to conversation history
mathmind_conversation_history.append({
"role": "user",
"content": f"[Grade: {grade_level}] {message}"
})
# Generate response with context (use global variable, not the display parameter)
response = call_hugging_face_math_api(message, mathmind_conversation_history, mathmind_pdf_context, grade_level)
# Add assistant response to history
mathmind_conversation_history.append({
"role": "assistant",
"content": response
})
# Update chat history for display
chat_history.append([message, response])
return chat_history, ""
def clear_mathmind_chat():
"""Clear MathMind conversation history"""
global mathmind_conversation_history
mathmind_conversation_history = []
return [], ""
def update_pdf_context(pdf_file):
"""Add PDF to MathMind context (supports multiple files)"""
global mathmind_pdf_context, uploaded_pdfs
if pdf_file is None:
return "No PDF uploaded", generate_context_display()
# Handle both file objects and bytes data
if hasattr(pdf_file, 'read'):
# It's a file object, read the bytes
pdf_data = pdf_file.read()
file_name = getattr(pdf_file, 'name', 'uploaded_file.pdf')
else:
# It's already bytes data
pdf_data = pdf_file
file_name = 'uploaded_file.pdf'
# Check if file already exists
for existing_pdf in uploaded_pdfs:
if existing_pdf["name"] == file_name:
return f"⚠️ File '{file_name}' is already uploaded", generate_context_display()
text, is_valid, message = extract_pdf_text(pdf_data)
if is_valid:
# Add to uploaded files list
uploaded_pdfs.append({
"name": file_name,
"content": text,
"summary": message.split('\n\n', 1)[1] if '\n\n' in message else "Math content",
"timestamp": datetime.now().strftime("%Y-%m-%d %H:%M")
})
# Update combined context
update_combined_context()
return message, generate_context_display()
else:
return message, generate_context_display()
def update_combined_context():
"""Update the combined PDF context from all uploaded files"""
global mathmind_pdf_context
if not uploaded_pdfs:
mathmind_pdf_context = ""
return
# Combine all PDF contexts with file attribution
context_parts = []
for pdf in uploaded_pdfs:
context_parts.append(f"=== Content from {pdf['name']} ===\n{pdf['content']}\n")
mathmind_pdf_context = "\n".join(context_parts)
def generate_context_display():
"""Generate HTML display for current PDF context"""
if not uploaded_pdfs:
return "
No PDF context loaded
"
html_parts = [f"
📚 Loaded Documents ({len(uploaded_pdfs)}):
"]
for i, pdf in enumerate(uploaded_pdfs, 1):
# Create a clean card for each document
card_html = f"""
📄 {i}. {pdf['name']}
⏰ Added: {pdf['timestamp']}
{pdf['summary'][:300]}{'...' if len(pdf['summary']) > 300 else ''}
"""
html_parts.append(card_html)
# Add instruction for removing files
if len(uploaded_pdfs) > 0:
html_parts.append("""
💡 Tip: To remove individual documents, you can upload new ones to replace them, or use "Remove All Documents" below to clear everything.
")
# Typing indicator
typing_indicator = gr.HTML(
value="",
visible=False
)
# Context display
context_display = gr.HTML(
value=""
)
# Event handlers for Albert chat
def send_message_with_typing(message, name, age_group, history):
"""Send message with typing effect and context memory"""
if not message.strip():
return history, "", "", ""
# Add user message to history
if history is None:
history = []
# Add user message with name
history.append([f"{name}: {message}", None])
# Show typing indicator with Albert thinking animation
typing_html = """
🧠
Albert is thinking
...
"""
# Generate Albert's response with full context
albert_response = generate_albert_response(message, name, age_group, history)
# Fix any malformed image HTML in the response
albert_response = fix_malformed_image_html(albert_response)
# Add Albert's response with fast typing effect
import time
typed_response = ""
for i, char in enumerate(albert_response):
typed_response += char
history[-1][1] = f"{typed_response}|"
time.sleep(0.015) # Even faster typing speed
# Final response without cursor
history[-1][1] = f"{albert_response}"
# No context memory display
context_html = ""
# Add auto-scroll trigger
scroll_script = """
"""
return history, "", context_html, scroll_script
def clear_chat():
return [], "", "", ""
def initialize_chat(name, age_group):
if not name.strip():
return [], "", ""
welcome_message = f"Hi {name}! 🧠✨ I'm Albert, your learning buddy! I'm so excited to help you learn! I'll remember our conversation so you can ask follow-up questions anytime. What would you like to know about today? 😊🚀"
return [[None, f"{welcome_message}"]], "", ""
# Connect event handlers
send_btn.click(
fn=send_message_with_typing,
inputs=[chat_input, user_name, user_age_group, chat_display],
outputs=[chat_display, chat_input, context_display, typing_indicator]
)
clear_btn.click(
fn=clear_chat,
outputs=[chat_display, context_display, context_display, typing_indicator]
)
# Initialize chat when name is entered
user_name.change(
fn=initialize_chat,
inputs=[user_name, user_age_group],
outputs=[chat_display, context_display, context_display]
)
with gr.TabItem("📚 Lesson Plan Generator"):
with gr.Row():
with gr.Column():
# Modern Form Section
gr.HTML("""
📚 Lesson Plan Generator
Create comprehensive lesson plans with AI assistance
")
# Event handler for lesson plan generation
generate_lesson_btn.click(
fn=generate_lesson_plan_with_progress,
inputs=[topic, subject, grade_level, duration, difficulty],
outputs=[progress_display, lesson_plan_output],
show_progress=True
)
# DISABLED: Content Generator Tab
# Commented out as requested - keeping all functionality intact for future use
"""
with gr.TabItem("📝 Content Generator"):
with gr.Row():
with gr.Column(scale=1):
# Modern Form Section
gr.HTML('''
📝 Content Generation
Generate educational materials tailored to your specific needs
""")
# Event Handlers
mathmind_send.click(
fn=mathmind_chat_with_typing,
inputs=[mathmind_msg, mathmind_grade, mathmind_chatbot, mathmind_pdf_context],
outputs=[mathmind_chatbot, mathmind_msg],
show_progress=True
)
mathmind_msg.submit(
fn=mathmind_chat_with_typing,
inputs=[mathmind_msg, mathmind_grade, mathmind_chatbot, mathmind_pdf_context],
outputs=[mathmind_chatbot, mathmind_msg],
show_progress=True
)
mathmind_clear.click(
fn=clear_mathmind_chat,
outputs=[mathmind_chatbot, mathmind_msg]
)
# PDF management functions
def handle_multiple_pdf_upload(pdf_files):
"""Handle multiple PDF uploads"""
if not pdf_files:
return "No files uploaded", generate_context_display()
# Handle both single file and multiple files
files_to_process = pdf_files if isinstance(pdf_files, list) else [pdf_files]
status_messages = []
processed_count = 0
for pdf_file in files_to_process:
if pdf_file is not None:
status, _ = update_pdf_context(pdf_file)
if "successfully processed" in status:
processed_count += 1
status_messages.append(status.split('\n')[0]) # Get first line only
# Create combined status message
if processed_count > 0:
combined_status = f"✅ {processed_count} PDF(s) successfully processed!\n" + "\n".join(status_messages[-3:]) # Show last 3 messages
else:
combined_status = "❌ No files were successfully processed:\n" + "\n".join(status_messages[-3:])
return combined_status, generate_context_display()
def handle_remove_all():
"""Remove all files from context"""
status, _ = remove_pdf_context()
return status, generate_context_display()
# Event handlers
mathmind_pdf_upload.change(
fn=handle_multiple_pdf_upload,
inputs=[mathmind_pdf_upload],
outputs=[mathmind_pdf_status, mathmind_pdf_context]
)
mathmind_remove_all_pdf.click(
fn=handle_remove_all,
outputs=[mathmind_pdf_status, mathmind_pdf_context]
)
# Example Questions Handler
def show_example_questions():
examples = [
"Why is $\\pi$ such a big deal? What makes it so special?",
"How do video games actually use math behind the scenes?",
"Can you explain fractions using pizza slices? I love pizza!",
"What's the coolest thing about triangles that most people don't know?",
"Help me solve 2x + 5 = 15 step by step - and tell me why it works!",
"How is math used in creating special effects in movies?",
"What's the weirdest mathematical fact that will blow my mind?",
"Can you show me a math magic trick that I can use to impress friends?",
"How do architects use geometry when designing buildings?",
"What's the connection between music and mathematics?"
]
return [[f"🎯 Try this: {q}", f"Awesome choice! This is going to be fun! 🚀"] for q in examples[:5]]
mathmind_examples.click(
fn=show_example_questions,
outputs=[mathmind_chatbot]
)
# Welcome message
def mathmind_welcome():
welcome_msg = """🎉 Hey there, future mathematician! Ready to discover how amazing math can be?
**Let's get this math party started! 🎊**
Try asking me something like:
• "Why is $\\pi$ so special?"
• "How do video games use math?"
• "What's the coolest thing about triangles?"
• "Can you help me with fractions using pizza?"
Don't be shy - I LOVE curious questions! What's got you puzzled today? 🤓💡"""
return [[None, welcome_msg]]
# Load welcome message when tab loads
demo.load(
fn=mathmind_welcome,
outputs=[mathmind_chatbot]
)
with gr.TabItem("🔧 Admin Dashboard", visible=False): # Hidden by default
with gr.Row():
with gr.Column():
gr.Markdown("""
## 🔧 Admin Dashboard
**Access Level**: Administrator Only
**Database**: SQLite (feedback.db)
**Last Updated**: Real-time
""")
refresh_btn = gr.Button("🔄 Refresh Data", variant="secondary")
view_feedback_btn = gr.Button("📊 View All Feedback", variant="primary")
with gr.Column():
admin_output = gr.Markdown(label="Admin Data")
# Event handlers for admin
def format_stats():
stats = get_feedback_stats()
result = f"""
# 📊 Feedback Statistics
**Total Feedback Entries:** {stats['total_feedback']}
**Average Rating:** {stats['avg_rating']}/5 ⭐
**Database Status:** ✅ Active
## Recent Feedback (Last 5)
"""
for entry in stats['recent_feedback']:
timestamp, feedback_type, rating, comments, user_email = entry
result += f"""
### Entry from {timestamp}
- **Type:** {feedback_type}
- **Rating:** {rating}/5 ⭐
- **Comments:** {comments[:100]}{'...' if len(comments) > 100 else ''}
- **Email:** {user_email if user_email else 'Not provided'}
---
"""
return result
refresh_btn.click(
fn=format_stats,
outputs=admin_output
)
view_feedback_btn.click(
fn=view_feedback_admin,
outputs=admin_output
)
if __name__ == "__main__":
demo.launch()