Spaces:
Sleeping
Sleeping
import gradio as gr | |
import google.generativeai as genai | |
from typing import TypedDict, List, Dict, Any, Tuple | |
from pptx import Presentation | |
import pandas as pd | |
import os | |
from pathlib import Path | |
import re | |
import json | |
from PyPDF2 import PdfReader | |
from tavily import TavilyClient | |
import tempfile | |
def initialize_apis(google_api_key: str, tavily_api_key: str): | |
"""Initialize the APIs with provided keys.""" | |
genai.configure(api_key=google_api_key) | |
model = genai.GenerativeModel('gemini-1.5-flash-latest') | |
tavily = TavilyClient(api_key=tavily_api_key) | |
return model, tavily | |
def process_step(prompt: str, model, max_retries: int = 2) -> str: | |
"""Process a single Gemini step with retry logic.""" | |
for attempt in range(max_retries): | |
try: | |
response = model.generate_content(prompt) | |
return response.text | |
except Exception as e: | |
if attempt == max_retries - 1: | |
return f"Error in processing: {str(e)}" | |
continue | |
def load_document(file_path: str) -> str: | |
"""Load and extract text from uploaded document.""" | |
try: | |
file_ext = Path(file_path).suffix.lower() | |
if file_ext == '.pdf': | |
try: | |
reader = PdfReader(file_path) | |
text_content = [] | |
for page in reader.pages: | |
text_content.append(page.extract_text()) | |
return "\n".join(text_content)[:4000] | |
except Exception as e: | |
print(f"PDF reading failed: {str(e)}") | |
return f"Error loading PDF: {str(e)}" | |
elif file_ext == '.pptx': | |
prs = Presentation(file_path) | |
text_content = [] | |
for slide in prs.slides: | |
for shape in slide.shapes: | |
if hasattr(shape, "text"): | |
text_content.append(shape.text) | |
return "\n".join(text_content)[:4000] | |
elif file_ext in ['.txt', '.md']: | |
with open(file_path, 'r', encoding='utf-8') as file: | |
return file.read()[:4000] | |
elif file_ext in ['.csv', '.xlsx']: | |
if file_ext == '.csv': | |
df = pd.read_csv(file_path, encoding='utf-8') | |
else: | |
df = pd.read_excel(file_path) | |
return df.to_string()[:4000] | |
except Exception as e: | |
print(f"Error in load_document: {str(e)}") | |
return f"Error loading document: {str(e)}" | |
def extract_profiles(text: str) -> List[Dict]: | |
"""Extract individual profiles from the generated text.""" | |
try: | |
sections = re.split(r'(?m)^#{1,2}\s+', text) | |
sections = [s.strip() for s in sections if s.strip()] | |
profiles = [] | |
for section in sections: | |
profile = { | |
'content': section, | |
'name': section.split('\n')[0].strip() | |
} | |
profiles.append(profile) | |
return profiles | |
except Exception as e: | |
print(f"Error extracting profiles: {e}") | |
return [] | |
def simulate_student_response(profile: Dict, focus: str, document_content: str, model) -> str: | |
"""Simulate detailed student response from their perspective.""" | |
prompt = f""" | |
You are a student with this profile: | |
{profile['content']} | |
Based on this context: | |
{focus} | |
And these materials: | |
{document_content[:50000]} | |
Respond in first person as this student: | |
# Emotional Response | |
[Express your feelings about this unit/lesson/assessment, your emotional state, and comfort level] | |
# Learning Experience | |
[Describe your understanding, confusions, and what makes sense or doesn't] | |
# Behavioral Response | |
[Describe how you're acting in class, your participation, and interactions] | |
# Personal Feedback | |
[Share what would help you learn better and what you need from the teacher] | |
""" | |
return process_step(prompt, model) | |
def synthesize_responses(profiles_text: str, all_responses: str, model) -> str: | |
"""Synthesize student responses to identify key themes and needs.""" | |
synthesis_prompt = f""" | |
Review these student feedback: | |
Feedback: | |
{all_responses[:20000]} | |
Provide a detailed synthesis that includes: | |
# Key Patterns | |
[Identify common themes across responses] | |
# Critical Challenges | |
[List specific learning obstacles and emotional barriers] | |
# Engagement Points | |
[Note areas of interest and motivation] | |
# Support Gaps | |
[Identify unmet needs and required resources] | |
Be specific and cite direct evidence from student responses. | |
Tag each insight with [Academic], [Emotional], or [Behavioral] for clarity. | |
Make sure to cover all student groups. | |
""" | |
return process_step(synthesis_prompt, model) | |
def conduct_research(queries: List[Dict[str, str]], tavily_client) -> str: | |
"""Conduct research based on synthesized needs.""" | |
research_results = [] | |
for query in queries: | |
search_query = query['query'].strip() | |
print(f"Researching: {search_query}") | |
results = tavily_client.search( | |
query=search_query, | |
search_depth="advanced", | |
include_answer=True, | |
include_raw_content=True, | |
include_images=False, | |
max_tokens=1000, | |
) | |
query_results = f""" | |
# Research: {query['area']} - {query['challenge']} | |
""" | |
if 'results' in results: | |
for idx, result in enumerate(results['results'][:3], 1): | |
query_results += f""" | |
## Source {idx} | |
**Title**: {result['title']} | |
**Summary**: {result['content']} | |
**URL**: {result['url']} | |
""" | |
elif 'answer' in results: | |
query_results += f""" | |
## AI-Generated Summary | |
{results['answer']} | |
## Key Sources: | |
""" | |
for idx, result in enumerate(results.get('results', [])[:2], 1): | |
query_results += f"- {result.get('url', 'Source not available')}\n" | |
research_results.append(query_results) | |
return "\n".join(research_results) | |
def generate_research_queries(synthesis: str, model) -> List[Dict[str, str]]: | |
"""Generate specific research queries based on synthesis.""" | |
query_prompt = f""" | |
Based on this synthesis of student responses: | |
{synthesis} | |
Generate 5 focused educational research queries. Format as JSON list with: | |
- 'area': The focus area (Academic/Emotional/Behavioral) | |
- 'challenge': The specific challenge | |
- 'query': A clear search query for finding teaching strategies | |
Example: | |
[ | |
{{ | |
"area": "Academic", | |
"challenge": "Limited English proficiency", | |
"query": "effective, evidence-based translanguaging teaching strategies for ESL students" | |
}} | |
] | |
""" | |
queries_text = process_step(query_prompt, model) | |
queries_text = queries_text.strip() | |
if queries_text.startswith("```json"): | |
queries_text = queries_text[7:-3] | |
return json.loads(queries_text) | |
def generate_final_recommendations( | |
profiles_text: str, | |
all_responses: str, | |
synthesis: str, | |
research_results: str, | |
model | |
) -> str: | |
"""Generate comprehensive recommendations based on all data.""" | |
recommendation_prompt = f""" | |
Review all this information: | |
Original Profiles: | |
{profiles_text[:800]} | |
Student Responses: | |
{all_responses[:800]} | |
Response Synthesis: | |
{synthesis[:800]} | |
Research Findings: | |
{research_results[:1500]} | |
Provide a comprehensive, evidence-based improvement plan: | |
# Executive Summary | |
[Brief overview of situation and key findings] | |
# Evidence-Based Strategies | |
[For each major challenge identified in the synthesis, provide: | |
- The specific challenge | |
- Research-backed solution | |
- Implementation steps | |
- Expected outcomes] | |
# Implementation Timeline | |
[Organize recommendations into: | |
- Immediate actions (Next class) | |
- Short-term changes (Next week) | |
- Long-term adjustments (Next month)] | |
# Required Resources | |
[List specific materials, tools, or support needed] | |
# Success Metrics | |
[How to measure improvement for each intervention] | |
# Potential Challenges | |
[Anticipate implementation obstacles and solutions] | |
Format with markdown and focus on practical, actionable steps. | |
""" | |
return process_step(recommendation_prompt, model) | |
def run_simulation( | |
google_api_key: str, | |
tavily_api_key: str, | |
class_info: str, | |
group_descriptions: str, | |
focus: str, | |
document_path: str, | |
progress = gr.Progress() | |
) -> Tuple[str, str, str, str, str]: | |
try: | |
# Initialize APIs | |
model, tavily_client = initialize_apis(google_api_key, tavily_api_key) | |
# Load document | |
progress(0.1, desc="Loading document...") | |
document_content = load_document(document_path) if document_path else "" | |
# Generate initial profiles | |
progress(0.2, desc="Creating student profiles...") | |
profile_prompt = f""" | |
Based on this information: | |
Class: {class_info[:500]} | |
Groups: {group_descriptions[:500]} | |
Create detailed student group profiles. For each group, include: | |
# [Group Name] | |
## Learning Profile | |
- Academic strengths and challenges | |
- Prior knowledge and experience | |
- English proficiency | |
- Study habits | |
## Psychological Profile | |
- Emotional characteristics | |
- Motivation factors | |
- Self-perception in learning | |
- Self-management, executive functioning skills, behavior patterns | |
## Social Profile | |
- Interaction patterns | |
- Group dynamics | |
- Communication style | |
## Cultural Profile | |
- Identity markers (gender, race, social-class, religion) | |
- Relevant cultural characteristics | |
## Support Needs | |
- Academic support requirements | |
- Emotional support needs | |
- Environmental preferences | |
Create realistic personas that feel like real students. | |
""" | |
profiles_text = process_step(profile_prompt, model) | |
yield profiles_text, gr.update(), gr.update(), gr.update(), gr.update() | |
# Extract individual profiles | |
profiles = extract_profiles(profiles_text) | |
# Simulate responses for each profile | |
progress(0.4, desc="Simulating student responses...") | |
all_responses = "# Student Responses\n\n" | |
for idx, profile in enumerate(profiles, 1): | |
progress(0.4 + (0.1 * (idx/len(profiles))), desc=f"Simulating {profile['name']}...") | |
response = simulate_student_response(profile, focus, document_content, model) | |
all_responses += f"# {profile['name']}\n{response}\n\n" | |
yield profiles_text, all_responses, gr.update(), gr.update(), gr.update() | |
# Synthesize responses | |
progress(0.6, desc="Synthesizing responses...") | |
synthesis = synthesize_responses(profiles_text, all_responses, model) | |
yield profiles_text, all_responses, synthesis, gr.update(), gr.update() | |
# Generate and conduct research | |
progress(0.7, desc="Generating research queries...") | |
research_queries = generate_research_queries(synthesis, model) | |
progress(0.8, desc="Conducting research...") | |
research_results = conduct_research(research_queries, tavily_client) | |
yield profiles_text, all_responses, synthesis, research_results, gr.update() | |
# Generate final recommendations | |
progress(0.9, desc="Generating recommendations...") | |
recommendations = generate_final_recommendations( | |
profiles_text, | |
all_responses, | |
synthesis, | |
research_results, | |
model | |
) | |
progress(1.0, desc="Complete!") | |
yield profiles_text, all_responses, synthesis, research_results, recommendations | |
except Exception as e: | |
error_msg = f"Error: {str(e)}" | |
yield error_msg, "Processing failed", "Processing failed", "Processing failed", "Processing failed" | |
def create_gradio_interface(): | |
with gr.Blocks() as app: | |
gr.Markdown(""" | |
# TinyTroop π’π©βπ | |
## Get evidence-based recommendations from synthetic student feedback | |
""") | |
with gr.Row(): | |
with gr.Column(): | |
# Add API key inputs at the top | |
with gr.Group(): | |
gr.Markdown("### API Configuration") | |
google_api_key = gr.Textbox( | |
label="Google API Key", | |
placeholder="Enter your Google API key", | |
type="password" | |
) | |
tavily_api_key = gr.Textbox( | |
label="Tavily API Key", | |
placeholder="Enter your Tavily API key", | |
type="password" | |
) | |
class_info = gr.TextArea( | |
label="Class Information", | |
placeholder="Describe your class (subject, grade level, context, size...)", | |
lines=3 | |
) | |
group_descriptions = gr.TextArea( | |
label="Student Groups", | |
placeholder="Describe the different student groups in your class...", | |
lines=3 | |
) | |
focus = gr.TextArea( | |
label="Learning Objectives", | |
placeholder="Outline the learning objectives, current plan, activities, and/or assessments...", | |
lines=3 | |
) | |
document_path = gr.Textbox( | |
label="Document Path", | |
placeholder="Path to your file (e.g., lesson.pdf)" | |
) | |
simulate_btn = gr.Button( | |
"Generate Simulation", | |
variant="primary" | |
) | |
# Outputs in vertical layout | |
with gr.Column(): | |
gr.Markdown("### π Student Profiles") | |
profiles_output = gr.Markdown( | |
value="Student profiles will appear here..." | |
) | |
gr.Markdown("### π Student Responses") | |
responses_output = gr.Markdown( | |
value="Student responses will appear here..." | |
) | |
gr.Markdown("### π Response Synthesis") | |
synthesis_output = gr.Markdown( | |
value="Synthesis of responses will appear here..." | |
) | |
gr.Markdown("### π Research Findings") | |
research_output = gr.Markdown( | |
value="Research results will appear here..." | |
) | |
gr.Markdown("### π Evidence-Based Recommendations") | |
recommendations_output = gr.Markdown( | |
value="Final recommendations will appear here..." | |
) | |
# Download buttons | |
with gr.Row(): | |
download_profiles = gr.Button("π₯ Download Profiles") | |
download_responses = gr.Button("π₯ Download Responses") | |
download_synthesis = gr.Button("π₯ Download Synthesis") | |
download_research = gr.Button("π₯ Download Research") | |
download_recommendations = gr.Button("π₯ Download Recommendations") | |
# Hidden components for download functionality | |
profiles_file = gr.File(visible=False) | |
responses_file = gr.File(visible=False) | |
synthesis_file = gr.File(visible=False) | |
research_file = gr.File(visible=False) | |
recommendations_file = gr.File(visible=False) | |
# Set up event handlers | |
simulate_btn.click( | |
run_simulation, | |
inputs=[ | |
google_api_key, | |
tavily_api_key, | |
class_info, | |
group_descriptions, | |
focus, | |
document_path | |
], | |
outputs=[ | |
profiles_output, | |
responses_output, | |
synthesis_output, | |
research_output, | |
recommendations_output | |
] | |
) | |
def create_download(text, filename): | |
"""Create a downloadable file from text content.""" | |
with tempfile.NamedTemporaryFile(mode='w', delete=False, suffix='.md') as f: | |
f.write(text) | |
return str(Path(f.name)) | |
# Set up download button handlers | |
download_profiles.click( | |
lambda x: create_download(x, "profiles.md"), | |
inputs=[profiles_output], | |
outputs=[profiles_file] | |
) | |
download_responses.click( | |
lambda x: create_download(x, "responses.md"), | |
inputs=[responses_output], | |
outputs=[responses_file] | |
) | |
download_synthesis.click( | |
lambda x: create_download(x, "synthesis.md"), | |
inputs=[synthesis_output], | |
outputs=[synthesis_file] | |
) | |
download_research.click( | |
lambda x: create_download(x, "research.md"), | |
inputs=[research_output], | |
outputs=[research_file] | |
) | |
download_recommendations.click( | |
lambda x: create_download(x, "recommendations.md"), | |
inputs=[recommendations_output], | |
outputs=[recommendations_file] | |
) | |
return app | |
# Launch the app | |
if __name__ == "__main__": | |
app = create_gradio_interface() | |
app.launch(debug=True, share=True, show_error=True) |