Spaces:
Sleeping
Sleeping
# -*- coding: utf-8 -*- | |
"""app.ipynb | |
Automatically generated by Colab. | |
Original file is located at | |
https://colab.research.google.com/drive/1e_M_kKgA4L3dmmiCjbOrNT3hBfnny_9P | |
#Core system | |
""" | |
# core_system.py - Modified with fixed exam functionality | |
import os | |
import json | |
import datetime | |
import time | |
from datetime import timedelta | |
from typing import List, Dict, Any, Optional | |
# LLM Integration using LangChain | |
class LLMService: | |
def __init__(self, api_key): | |
self.api_key = api_key | |
# Changed from ChatOpenAI to ChatGroq | |
try: | |
from langchain_groq import ChatGroq | |
self.chat_model = ChatGroq( | |
model="llama3-70b-8192", # Using a Groq compatible model | |
temperature=0.2, | |
groq_api_key=api_key | |
) | |
except ImportError: | |
# Fallback to direct API calls if langchain_groq is not available | |
import requests | |
self.chat_model = None | |
def create_chain(self, template: str, output_key: str = "result"): | |
if self.chat_model: | |
from langchain.prompts import ChatPromptTemplate | |
from langchain.chains import LLMChain | |
chat_prompt = ChatPromptTemplate.from_template(template) | |
return LLMChain( | |
llm=self.chat_model, | |
prompt=chat_prompt, | |
output_key=output_key, | |
verbose=True | |
) | |
return None | |
def get_completion(self, prompt: str) -> str: | |
if self.chat_model: | |
chain = self.create_chain(prompt) | |
response = chain.invoke({"input": ""}) | |
return response["result"] | |
else: | |
# Direct API call if langchain is not available | |
import requests | |
headers = { | |
"Authorization": f"Bearer {self.api_key}", | |
"Content-Type": "application/json" | |
} | |
data = { | |
"model": "llama3-70b-8192", | |
"messages": [{"role": "user", "content": prompt}], | |
"temperature": 0.7, | |
"max_tokens": 2048 | |
} | |
response = requests.post( | |
"https://api.groq.com/openai/v1/chat/completions", | |
headers=headers, | |
json=data | |
) | |
if response.status_code == 200: | |
return response.json()["choices"][0]["message"]["content"] | |
else: | |
raise Exception(f"API Error: {response.status_code} - {response.text}") | |
def generate_module_content(self, day: int, topic: str) -> str: | |
prompt = f""" | |
Create a comprehensive Python programming module for Day {day} covering {topic}. | |
The module should follow this structure in Markdown format: | |
# [Module Title] | |
## Introduction | |
[A brief introduction to the day's topics] | |
## Section 1: [Section Title] | |
[Detailed explanation of concepts] | |
### Code Examples | |
```python | |
# Example code with comments | |
``` | |
### Practice Exercises | |
[2-3 exercises with clear instructions] | |
## Section 2: [Section Title] | |
[Repeat the pattern for all relevant topics] | |
Make sure the content is: | |
- Comprehensive but focused on the day's topic | |
- Includes clear examples with comments | |
- Has practice exercises that build skills progressively | |
- Uses proper Markdown formatting | |
""" | |
return self.get_completion(prompt) | |
def generate_exam_questions(self, day: int, topic: str, previous_mistakes: List[Dict] = None) -> List[Dict]: | |
mistake_context = "" | |
if previous_mistakes and len(previous_mistakes) > 0: | |
mistakes = "\n".join([ | |
f"- Question: {m['question']}\n Wrong Answer: {m['user_answer']}\n Correct Answer: {m['correct_answer']}" | |
for m in previous_mistakes[:3] | |
]) | |
mistake_context = f""" | |
Include variations of questions related to these previous mistakes: | |
{mistakes} | |
""" | |
prompt = f""" | |
Create a 1-hour Python exam for Day {day} covering {topic}. | |
{mistake_context} | |
Include 5 questions with a mix of: | |
- Multiple-choice (4 options each) | |
- Short-answer (requiring 1-3 lines of text) | |
- Coding exercises (simple functions or snippets) | |
Return your response as a JSON array where each question is an object with these fields: | |
- question_type: "multiple-choice", "short-answer", or "coding" | |
- question_text: The full question text | |
- options: Array of options (for multiple-choice only) | |
- correct_answer: The correct answer or solution | |
- explanation: Detailed explanation of the correct answer | |
- difficulty: Number from 1 (easiest) to 5 (hardest) | |
Example: | |
[ | |
{{ | |
"question_type": "multiple-choice", | |
"question_text": "What is the output of print(3 * '4' + '5')?", | |
"options": ["12", "445", "4445", "Error"], | |
"correct_answer": "4445", | |
"explanation": "The * operator with a string repeats it, and + concatenates strings", | |
"difficulty": 2 | |
}}, | |
{{ | |
"question_type": "coding", | |
"question_text": "Write a function that returns the sum of all even numbers in a list.", | |
"options": null, | |
"correct_answer": "def sum_even(numbers):\\n return sum(x for x in numbers if x % 2 == 0)", | |
"explanation": "This solution uses a generator expression with the sum function to add only even numbers", | |
"difficulty": 3 | |
}} | |
] | |
ONLY return the valid JSON array. Do NOT include any explanatory text or code fences. | |
""" | |
result = self.get_completion(prompt) | |
# Clean up potential formatting issues | |
result = result.strip() | |
if result.startswith("```json"): | |
result = result.split("```json")[1] | |
if result.endswith("```"): | |
result = result.rsplit("```", 1)[0] | |
try: | |
return json.loads(result) | |
except json.JSONDecodeError as e: | |
print(f"JSON decode error: {e}") | |
print(f"Raw response: {result}") | |
# Fall back to creating a minimal structure | |
return [{"question_type": "short-answer", | |
"question_text": "There was an error generating questions. Please describe what you've learned today.", | |
"options": None, | |
"correct_answer": "Any reasonable summary", | |
"explanation": "This is a backup question", | |
"difficulty": 1}] | |
def evaluate_answer(self, question: Dict, user_answer: str) -> Dict: | |
prompt = f""" | |
Grade this response to a Python programming question: | |
Question Type: {question["question_type"]} | |
Question: {question["question_text"]} | |
Correct Answer: {question["correct_answer"]} | |
Student's Answer: {user_answer} | |
Return your evaluation as a JSON object with these fields: | |
- is_correct: boolean (true/false) | |
- feedback: detailed explanation of what was correct/incorrect | |
- correct_solution: the correct solution with explanation if the answer was wrong | |
For coding questions, be somewhat lenient - focus on logic correctness rather than exact syntax matching. | |
For multiple choice, it must match the correct option. | |
For short answer, assess if the key concepts are present and correct. | |
ONLY return the valid JSON object. Do NOT include any explanatory text. | |
""" | |
result = self.get_completion(prompt) | |
# Clean up potential formatting issues | |
result = result.strip() | |
if result.startswith("```json"): | |
result = result.split("```json")[1] | |
if result.endswith("```"): | |
result = result.rsplit("```", 1)[0] | |
try: | |
return json.loads(result) | |
except json.JSONDecodeError as e: | |
print(f"JSON decode error: {e}") | |
print(f"Raw response: {result}") | |
# Return a fallback response | |
return { | |
"is_correct": False, | |
"feedback": "There was an error evaluating your answer. Please try again.", | |
"correct_solution": question["correct_answer"] | |
} | |
def answer_student_question(self, question: str, context: Optional[str] = None) -> str: | |
context_text = f"Context from previous questions: {context}\n\n" if context else "" | |
prompt = f""" | |
{context_text}You are an expert Python tutor. Answer this student's question clearly with explanations and examples: | |
{question} | |
- Use code examples where appropriate | |
- Break down complex concepts step by step | |
- Be comprehensive but concise | |
- Use proper Markdown formatting for code | |
""" | |
return self.get_completion(prompt) | |
# Content Generator with simplified storage | |
class ContentGenerator: | |
def __init__(self, api_key): | |
self.llm_service = LLMService(api_key) | |
# Simplified in-memory storage | |
self.modules = [] | |
self.questions = [] | |
self.responses = [] | |
self.chat_logs = [] | |
def generate_module(self, day: int) -> tuple: | |
day_topics = { | |
1: "Python fundamentals (variables, data types, control structures)", | |
2: "Intermediate Python (functions, modules, error handling)", | |
3: "Advanced Python (file I/O, object-oriented programming, key libraries)" | |
} | |
topic = day_topics.get(day, "Python programming") | |
content = self.llm_service.generate_module_content(day, topic) | |
# Extract title from content | |
title = f"Day {day} Python Module" | |
if content.startswith("# "): | |
title_line = content.split("\n", 1)[0] | |
title = title_line.replace("# ", "").strip() | |
# Save to in-memory storage | |
module_id = len(self.modules) + 1 | |
self.modules.append({ | |
"id": module_id, | |
"day": day, | |
"title": title, | |
"content": content, | |
"created_at": datetime.datetime.utcnow() | |
}) | |
return content, module_id | |
def generate_exam(self, day: int, module_id: int, previous_mistakes: List = None) -> tuple: | |
day_topics = { | |
1: "Python fundamentals (variables, data types, control structures)", | |
2: "Intermediate Python (functions, modules, error handling)", | |
3: "Advanced Python (file I/O, object-oriented programming, key libraries)" | |
} | |
topic = day_topics.get(day, "Python programming") | |
# Generate questions for this day's topics | |
try: | |
questions_data = self.llm_service.generate_exam_questions(day, topic, previous_mistakes) | |
if not questions_data: | |
raise ValueError("Failed to generate exam questions") | |
saved_questions = [] | |
for q_data in questions_data: | |
question_id = len(self.questions) + 1 | |
question = { | |
"id": question_id, | |
"module_id": module_id, | |
"question_type": q_data["question_type"], | |
"question_text": q_data["question_text"], | |
"options": q_data.get("options"), | |
"correct_answer": q_data["correct_answer"], | |
"explanation": q_data["explanation"], | |
"difficulty": q_data.get("difficulty", 3) | |
} | |
self.questions.append(question) | |
saved_questions.append(question) | |
return questions_data, saved_questions | |
except Exception as e: | |
print(f"Error generating exam: {str(e)}") | |
# Create a simple fallback question | |
fallback_question = { | |
"question_type": "short-answer", | |
"question_text": f"Explain a key concept you learned in Day {day} about {topic}.", | |
"options": None, | |
"correct_answer": "Any reasonable explanation", | |
"explanation": "This is a fallback question due to an error in question generation", | |
"difficulty": 2 | |
} | |
question_id = len(self.questions) + 1 | |
question = { | |
"id": question_id, | |
"module_id": module_id, | |
"question_type": fallback_question["question_type"], | |
"question_text": fallback_question["question_text"], | |
"options": fallback_question.get("options"), | |
"correct_answer": fallback_question["correct_answer"], | |
"explanation": fallback_question["explanation"], | |
"difficulty": fallback_question["difficulty"] | |
} | |
self.questions.append(question) | |
return [fallback_question], [question] | |
def grade_response(self, question_id: int, user_answer: str) -> Dict: | |
# Find question in memory | |
question = next((q for q in self.questions if q["id"] == question_id), None) | |
if not question: | |
return {"error": "Question not found"} | |
try: | |
feedback_data = self.llm_service.evaluate_answer(question, user_answer) | |
# Save response to in-memory storage | |
response_id = len(self.responses) + 1 | |
response = { | |
"id": response_id, | |
"question_id": question_id, | |
"user_answer": user_answer, | |
"is_correct": feedback_data.get("is_correct", False), | |
"feedback": feedback_data.get("feedback", ""), | |
"timestamp": datetime.datetime.utcnow() | |
} | |
self.responses.append(response) | |
return feedback_data | |
except Exception as e: | |
print(f"Error grading response: {str(e)}") | |
# Create a fallback response | |
response_id = len(self.responses) + 1 | |
response = { | |
"id": response_id, | |
"question_id": question_id, | |
"user_answer": user_answer, | |
"is_correct": False, | |
"feedback": f"Error evaluating answer: {str(e)}", | |
"timestamp": datetime.datetime.utcnow() | |
} | |
self.responses.append(response) | |
return { | |
"is_correct": False, | |
"feedback": f"Error evaluating answer: {str(e)}", | |
"correct_solution": question["correct_answer"] | |
} | |
def get_previous_mistakes(self, day: int) -> List: | |
"""Get mistakes from previous days to inform adaptive content""" | |
if day <= 1: | |
return [] | |
previous_day = day - 1 | |
# Find modules from previous day | |
previous_modules = [m for m in self.modules if m["day"] == previous_day] | |
if not previous_modules: | |
return [] | |
module_ids = [module["id"] for module in previous_modules] | |
questions = [q for q in self.questions if q["module_id"] in module_ids] | |
if not questions: | |
return [] | |
question_ids = [question["id"] for question in questions] | |
incorrect_responses = [r for r in self.responses if r["question_id"] in question_ids and not r["is_correct"]] | |
mistakes = [] | |
for response in incorrect_responses: | |
question = next((q for q in self.questions if q["id"] == response["question_id"]), None) | |
if question: | |
mistakes.append({ | |
"question": question["question_text"], | |
"user_answer": response["user_answer"], | |
"correct_answer": question["correct_answer"] | |
}) | |
return mistakes | |
def answer_question(self, user_question: str, related_question_id: Optional[int] = None) -> str: | |
# Get context from related question if available | |
context = None | |
if related_question_id: | |
question = next((q for q in self.questions if q["id"] == related_question_id), None) | |
if question: | |
context = f"Question: {question['question_text']}\nCorrect Answer: {question['correct_answer']}\nExplanation: {question['explanation']}" | |
response = self.llm_service.answer_student_question(user_question, context) | |
# Log the interaction | |
chat_log_id = len(self.chat_logs) + 1 | |
chat_log = { | |
"id": chat_log_id, | |
"user_question": user_question, | |
"ai_response": response, | |
"related_question_id": related_question_id, | |
"timestamp": datetime.datetime.utcnow() | |
} | |
self.chat_logs.append(chat_log) | |
return response | |
# Learning System Class | |
class LearningSystem: | |
def __init__(self, api_key): | |
self.content_generator = ContentGenerator(api_key) | |
self.current_day = 1 | |
self.current_module_id = None | |
self.exam_start_time = None | |
self.exam_in_progress = False | |
self.exam_questions = [] | |
self.questions_data = [] # Store the questions data for display | |
def generate_day_content(self): | |
content, module_id = self.content_generator.generate_module(self.current_day) | |
self.current_module_id = module_id | |
return content | |
def start_exam(self): | |
try: | |
if not self.current_module_id: | |
# Check if we already have a module for this day | |
existing_module = next((m for m in self.content_generator.modules if m["day"] == self.current_day), None) | |
if existing_module: | |
self.current_module_id = existing_module["id"] | |
else: | |
# Generate content for the day if not already done | |
content, module_id = self.content_generator.generate_module(self.current_day) | |
self.current_module_id = module_id | |
# Get previous mistakes for adaptive learning | |
previous_mistakes = self.content_generator.get_previous_mistakes(self.current_day) | |
# Generate exam questions | |
self.questions_data, self.exam_questions = self.content_generator.generate_exam( | |
self.current_day, | |
self.current_module_id, | |
previous_mistakes | |
) | |
if not self.questions_data or not self.exam_questions: | |
return "Failed to generate exam questions. Please try again." | |
self.exam_start_time = datetime.datetime.now() | |
self.exam_in_progress = True | |
# Format the exam for display | |
exam_text = f"# Day {self.current_day} Python Exam\n\n" | |
exam_text += f"**Time Limit:** 1 hour\n" | |
exam_text += f"**Start Time:** {self.exam_start_time.strftime('%H:%M:%S')}\n" | |
exam_text += f"**End Time:** {(self.exam_start_time + timedelta(hours=1)).strftime('%H:%M:%S')}\n\n" | |
# Add adaptive learning notice if applicable | |
if previous_mistakes and len(previous_mistakes) > 0: | |
exam_text += f"**Note:** This exam includes questions based on topics you had difficulty with previously.\n\n" | |
for i, question in enumerate(self.questions_data): | |
exam_text += f"## Question {i+1}: {question['question_type'].title()}\n\n" | |
exam_text += f"{question['question_text']}\n\n" | |
if question['question_type'] == "multiple-choice" and question.get('options'): | |
for j, option in enumerate(question['options']): | |
exam_text += f"- {chr(65+j)}. {option}\n" | |
exam_text += "\n" | |
exam_text += "## Instructions for submitting answers:\n\n" | |
exam_text += "1. For multiple-choice questions, input the letter of your answer (A, B, C, or D)\n" | |
exam_text += "2. For short-answer questions, write your complete answer\n" | |
exam_text += "3. For coding questions, write your complete code solution\n" | |
exam_text += "4. **Separate each answer with two line breaks**\n\n" | |
return exam_text | |
except Exception as e: | |
self.exam_in_progress = False | |
return f"Error starting exam: {str(e)}" | |
def submit_exam(self, answers_text): | |
try: | |
if not self.exam_in_progress: | |
return "No exam is currently in progress. Please start an exam first." | |
if not self.exam_questions: | |
return "No exam questions available. Please restart the exam." | |
# Check time | |
current_time = datetime.datetime.now() | |
if current_time > self.exam_start_time + timedelta(hours=1): | |
time_overrun = current_time - (self.exam_start_time + timedelta(hours=1)) | |
overrun_minutes = time_overrun.total_seconds() / 60 | |
time_notice = f"Time limit exceeded by {overrun_minutes:.1f} minutes. Your answers are being processed anyway." | |
else: | |
time_notice = "Exam completed within the time limit." | |
# Split answers by question (double newline separator) | |
answers = [ans.strip() for ans in answers_text.split("\n\n") if ans.strip()] | |
feedback_text = f"# Day {self.current_day} Exam Results\n\n" | |
feedback_text += f"{time_notice}\n\n" | |
correct_count = 0 | |
total_evaluated = 0 | |
# Ensure we don't exceed the number of questions | |
num_questions = min(len(self.exam_questions), len(answers)) | |
# If the user provided fewer answers than questions, fill in blanks | |
while len(answers) < len(self.exam_questions): | |
answers.append("") | |
for i in range(len(self.exam_questions)): | |
question = self.exam_questions[i] | |
answer = answers[i] if i < len(answers) else "" | |
# Handle empty answers | |
if not answer: | |
feedback_text += f"## Question {i+1}\n\n" | |
feedback_text += "**Your Answer:** No answer provided\n\n" | |
feedback_text += "**Result:** Incorrect\n\n" | |
feedback_text += f"**Correct Solution:** {question['correct_answer']}\n\n" | |
total_evaluated += 1 | |
continue | |
try: | |
# Grade the response | |
feedback = self.content_generator.grade_response(question["id"], answer) | |
total_evaluated += 1 | |
# Format feedback | |
feedback_text += f"## Question {i+1}\n\n" | |
feedback_text += f"**Your Answer:**\n{answer}\n\n" | |
feedback_text += f"**Result:** {'✅ Correct' if feedback.get('is_correct', False) else '❌ Incorrect'}\n\n" | |
feedback_text += f"**Feedback:**\n{feedback.get('feedback', '')}\n\n" | |
if feedback.get('is_correct', False): | |
correct_count += 1 | |
else: | |
feedback_text += f"**Correct Solution:**\n{feedback.get('correct_solution', '')}\n\n" | |
except Exception as e: | |
feedback_text += f"## Question {i+1}\n\n" | |
feedback_text += f"**Error grading answer:** {str(e)}\n\n" | |
# Calculate score | |
if total_evaluated > 0: | |
score = correct_count / total_evaluated * 100 | |
else: | |
score = 0 | |
feedback_text += f"# Final Score: {score:.1f}%\n\n" | |
# Suggestions for improvement | |
if score < 100: | |
feedback_text += "## Suggestions for Improvement\n\n" | |
if score < 60: | |
feedback_text += "- Review the fundamental concepts again\n" | |
feedback_text += "- Practice more with the code examples\n" | |
feedback_text += "- Use the Q&A Sandbox to ask about difficult topics\n" | |
elif score < 80: | |
feedback_text += "- Focus on the specific areas where you made mistakes\n" | |
feedback_text += "- Try rewriting the solutions for incorrect answers\n" | |
else: | |
feedback_text += "- Great job! Just a few minor issues to review\n" | |
feedback_text += "- Look at the explanations for the few questions you missed\n" | |
else: | |
feedback_text += "## Excellent Work!\n\n" | |
feedback_text += "You've mastered today's content. Ready for the next day's material!\n" | |
self.exam_in_progress = False | |
return feedback_text | |
except Exception as e: | |
self.exam_in_progress = False | |
return f"Error submitting exam: {str(e)}" | |
def answer_sandbox_question(self, question): | |
return self.content_generator.answer_question(question) | |
def advance_to_next_day(self): | |
if self.current_day < 3: | |
self.current_day += 1 | |
self.current_module_id = None | |
self.exam_questions = [] | |
return f"Advanced to Day {self.current_day}." | |
else: | |
return "You have completed the 3-day curriculum." | |
def get_learning_progress(self): | |
try: | |
modules = self.content_generator.modules | |
questions = self.content_generator.questions | |
responses = self.content_generator.responses | |
total_questions = len(questions) | |
answered_questions = len(responses) | |
correct_answers = sum(1 for r in responses if r["is_correct"]) | |
if answered_questions > 0: | |
accuracy = correct_answers / answered_questions * 100 | |
else: | |
accuracy = 0 | |
report = "# Learning Progress Summary\n\n" | |
report += f"## Overall Statistics\n" | |
report += f"- Total modules completed: {len(modules)}\n" | |
report += f"- Total questions attempted: {answered_questions}/{total_questions}\n" | |
report += f"- Overall accuracy: {accuracy:.1f}%\n\n" | |
# Day-by-day progress with adaptive learning info | |
for day in range(1, 4): | |
day_modules = [m for m in modules if m["day"] == day] | |
report += f"## Day {day}: " | |
if day_modules: | |
report += f"{day_modules[0]['title']}\n" | |
day_questions = [q for q in questions if q["module_id"] in [m["id"] for m in day_modules]] | |
day_responses = [r for r in responses if r["question_id"] in [q["id"] for q in day_questions]] | |
day_total = len(day_questions) | |
day_answered = len(day_responses) | |
day_correct = sum(1 for r in day_responses if r["is_correct"]) | |
if day_answered > 0: | |
day_accuracy = day_correct / day_answered * 100 | |
report += f"- **Exam Score:** {day_accuracy:.1f}%\n" | |
else: | |
report += "- **Exam:** Not taken yet\n" | |
report += f"- Questions attempted: {day_answered}/{day_total}\n" | |
# Show adaptive learning details | |
if day > 1: | |
previous_mistakes = self.content_generator.get_previous_mistakes(day) | |
if previous_mistakes: | |
report += f"- **Adaptive Learning:** {len(previous_mistakes)} topics from Day {day-1} reinforced\n" | |
# Show exam results if available | |
if day_answered > 0: | |
report += "### Exam Performance\n" | |
# Group by question type | |
question_types = set(q["question_type"] for q in day_questions) | |
for q_type in question_types: | |
type_questions = [q for q in day_questions if q["question_type"] == q_type] | |
type_responses = [r for r in day_responses if r["question_id"] in [q["id"] for q in type_questions]] | |
type_correct = sum(1 for r in type_responses if r["is_correct"]) | |
if type_responses: | |
type_accuracy = type_correct / len(type_responses) * 100 | |
report += f"- **{q_type.title()}:** {type_accuracy:.1f}% correct\n" | |
# Common mistakes | |
incorrect_responses = [r for r in day_responses if not r["is_correct"]] | |
if incorrect_responses: | |
report += "\n### Areas for Improvement\n" | |
for resp in incorrect_responses[:3]: # Show top 3 mistakes | |
question = next((q for q in questions if q["id"] == resp["question_id"]), None) | |
if question: | |
report += f"- **Question:** {question['question_text'][:100]}...\n" | |
report += f" **Your Answer:** {resp['user_answer'][:100]}...\n" | |
report += f" **Correct Answer:** {question['correct_answer'][:100]}...\n\n" | |
else: | |
report += "Not started yet\n" | |
report += "\n" | |
# Learning recommendations | |
report += "## Recommendations\n\n" | |
if correct_answers < answered_questions * 0.7: | |
report += "- Review the modules before moving to the next day\n" | |
report += "- Focus on practicing code examples\n" | |
report += "- Use the Q&A Sandbox to clarify difficult concepts\n" | |
else: | |
report += "- Continue with the current pace\n" | |
report += "- Try to implement small projects using what you've learned\n" | |
return report | |
except Exception as e: | |
return f"Error generating progress report: {str(e)}" | |
"""#gradio""" | |
# Gradio UI - Modified for Google Colab | |
import os | |
import gradio as gr | |
# Note: We're not importing from core_system | |
# Instead, we'll use the classes already defined in the previous cell | |
def create_interface(): | |
# System initialization section | |
def initialize_system(api_key_value): | |
if not api_key_value or len(api_key_value) < 10: # Basic validation | |
return "Please enter a valid API key.", gr.update(visible=False), None | |
try: | |
# Test API connection | |
test_service = LLMService(api_key_value) | |
test_response = test_service.get_completion("Say hello") | |
if len(test_response) > 0: | |
learning_system = LearningSystem(api_key_value) | |
return "✅ System initialized successfully! You can now use the learning system.", gr.update(visible=True), learning_system | |
else: | |
return "❌ API connection test failed. Please check your API key.", gr.update(visible=False), None | |
except Exception as e: | |
return f"❌ Error initializing system: {str(e)}", gr.update(visible=False), None | |
with gr.Blocks(title="AI-Powered Python Learning System", theme="soft") as interface: | |
# Store learning system state | |
learning_system_state = gr.State(None) | |
# Header | |
gr.Markdown( | |
""" | |
<div style="text-align: center; margin-bottom: 20px;"> | |
<h1 style="color: #4a69bd; font-size: 2.5em;">AI-Powered Python Learning System</h1> | |
<p style="font-size: 1.2em; color: #444;">Master Python programming with personalized AI tutoring</p> | |
</div> | |
""" | |
) | |
# API Key input - outside the tabs | |
with gr.Row(): | |
# Try to get API key from environment variable | |
API_KEY = os.environ.get("GROQ_API_KEY", "") | |
api_key_input = gr.Textbox( | |
label="Enter your Groq API Key", | |
placeholder="gsk_...", | |
type="password", | |
value=API_KEY # Use environment variable if available | |
) | |
init_btn = gr.Button("Initialize System", variant="primary") | |
init_status = gr.Markdown("Enter your Groq API key and click 'Initialize System' to begin.") | |
# Main interface container - hidden until initialized | |
with gr.Column(visible=False) as main_interface: | |
with gr.Tabs() as tabs: | |
# Content & Learning tab | |
with gr.Tab("Content & Learning"): | |
with gr.Row(): | |
day_display = gr.Markdown("## Current Day: 1") | |
with gr.Row(): | |
generate_content_btn = gr.Button("Generate Today's Content", variant="primary") | |
next_day_btn = gr.Button("Advance to Next Day", variant="secondary") | |
content_display = gr.Markdown("Click 'Generate Today's Content' to begin.") | |
# Exam tab | |
with gr.Tab("Exam"): | |
with gr.Row(): | |
start_exam_btn = gr.Button("Start Exam", variant="primary") | |
exam_display = gr.Markdown("Click 'Start Exam' to begin the assessment.") | |
with gr.Row(): | |
exam_answers = gr.Textbox( | |
label="Enter your answers (separate each answer with two line breaks)", | |
placeholder="Answer 1\n\nAnswer 2\n\nAnswer 3...", | |
lines=15 | |
) | |
submit_exam_btn = gr.Button("Submit Exam", variant="primary") | |
exam_feedback = gr.Markdown("Your exam results will appear here.") | |
# Q&A Sandbox tab | |
with gr.Tab("Q&A Sandbox"): | |
with gr.Row(): | |
question_input = gr.Textbox( | |
label="Ask any question about Python", | |
placeholder="Enter your question here...", | |
lines=3 | |
) | |
ask_btn = gr.Button("Ask Question", variant="primary") | |
answer_display = gr.Markdown("Ask a question to get started.") | |
# Progress Report tab | |
with gr.Tab("Progress Report"): | |
with gr.Row(): | |
report_btn = gr.Button("Generate Progress Report", variant="primary") | |
progress_display = gr.Markdown("Click 'Generate Progress Report' to see your learning statistics.") | |
# Custom functions to handle state | |
def generate_content(learning_system): | |
if not learning_system: | |
return "Please initialize the system first." | |
return learning_system.generate_day_content() | |
def advance_day(learning_system): | |
if not learning_system: | |
return "Please initialize the system first.", "## Current Day: 1" | |
result = learning_system.advance_to_next_day() | |
return result, f"## Current Day: {learning_system.current_day}" | |
def start_exam(learning_system): | |
if not learning_system: | |
return "Please initialize the system first." | |
try: | |
exam_content = learning_system.start_exam() | |
return exam_content | |
except Exception as e: | |
return f"Error starting exam: {str(e)}" | |
def submit_exam(learning_system, answers): | |
if not learning_system: | |
return "Please initialize the system first." | |
if not answers.strip(): | |
return "Please provide answers before submitting." | |
try: | |
feedback = learning_system.submit_exam(answers) | |
return feedback | |
except Exception as e: | |
return f"Error evaluating exam: {str(e)}" | |
def ask_question(learning_system, question): | |
if not learning_system: | |
return "Please initialize the system first." | |
if not question.strip(): | |
return "Please enter a question." | |
try: | |
answer = learning_system.answer_sandbox_question(question) | |
return answer | |
except Exception as e: | |
return f"Error processing question: {str(e)}" | |
def generate_progress_report(learning_system): | |
if not learning_system: | |
return "Please initialize the system first." | |
try: | |
report = learning_system.get_learning_progress() | |
return report | |
except Exception as e: | |
return f"Error generating progress report: {str(e)}" | |
# Set up event handlers | |
init_btn.click( | |
initialize_system, | |
inputs=[api_key_input], | |
outputs=[init_status, main_interface, learning_system_state] | |
) | |
generate_content_btn.click( | |
generate_content, | |
inputs=[learning_system_state], | |
outputs=[content_display] | |
) | |
next_day_btn.click( | |
advance_day, | |
inputs=[learning_system_state], | |
outputs=[content_display, day_display] | |
) | |
start_exam_btn.click( | |
start_exam, | |
inputs=[learning_system_state], | |
outputs=[exam_display] | |
) | |
submit_exam_btn.click( | |
submit_exam, | |
inputs=[learning_system_state, exam_answers], | |
outputs=[exam_feedback] | |
) | |
ask_btn.click( | |
ask_question, | |
inputs=[learning_system_state, question_input], | |
outputs=[answer_display] | |
) | |
report_btn.click( | |
generate_progress_report, | |
inputs=[learning_system_state], | |
outputs=[progress_display] | |
) | |
return interface | |
# Create and launch the interface | |
# For Colab, make sure to install gradio first if you haven't | |
# !pip install gradio | |
interface = create_interface() | |
interface.launch(share=True) | |