Spaces:
Running
Running
github-actions[bot] commited on
Commit ยท
48a1ffa
1
Parent(s): 92bfe31
๐ Auto-deploy backend from GitHub (5c5b127)
Browse files- automation_engine.py +1 -1
- main.py +2 -2
- routes/diagnostic.py +1 -1
- routes/quiz_generation_routes.py +140 -60
automation_engine.py
CHANGED
|
@@ -553,7 +553,7 @@ class MathPulseAutomationEngine:
|
|
| 553 |
"word_problem",
|
| 554 |
],
|
| 555 |
"difficultyDistribution": cfg["dist"],
|
| 556 |
-
"bloomLevels": ["remember", "understand", "apply"],
|
| 557 |
"includeGraphs": False,
|
| 558 |
"excludeTopics": [],
|
| 559 |
"purpose": "remedial",
|
|
|
|
| 553 |
"word_problem",
|
| 554 |
],
|
| 555 |
"difficultyDistribution": cfg["dist"],
|
| 556 |
+
"bloomLevels": ["remember", "understand", "apply", "analyze"],
|
| 557 |
"includeGraphs": False,
|
| 558 |
"excludeTopics": [],
|
| 559 |
"purpose": "remedial",
|
main.py
CHANGED
|
@@ -12814,11 +12814,11 @@ async def evaluate_progress(request: ProgressEvaluateRequest):
|
|
| 12814 |
|
| 12815 |
applying_level_correct = sum(
|
| 12816 |
1 for item in request.items
|
| 12817 |
-
if item.get("is_correct", False) and item.get("bloom_level", "") in ("applying", "analyzing"
|
| 12818 |
)
|
| 12819 |
analyzing_level_correct = sum(
|
| 12820 |
1 for item in request.items
|
| 12821 |
-
if item.get("is_correct", False) and item.get("bloom_level", "") in ("analyzing",
|
| 12822 |
)
|
| 12823 |
|
| 12824 |
if prev == "beginning" and score_percent >= 60 and applying_level_correct >= 2:
|
|
|
|
| 12814 |
|
| 12815 |
applying_level_correct = sum(
|
| 12816 |
1 for item in request.items
|
| 12817 |
+
if item.get("is_correct", False) and item.get("bloom_level", "") in ("applying", "analyzing")
|
| 12818 |
)
|
| 12819 |
analyzing_level_correct = sum(
|
| 12820 |
1 for item in request.items
|
| 12821 |
+
if item.get("is_correct", False) and item.get("bloom_level", "") in ("analyzing",)
|
| 12822 |
)
|
| 12823 |
|
| 12824 |
if prev == "beginning" and score_percent >= 60 and applying_level_correct >= 2:
|
routes/diagnostic.py
CHANGED
|
@@ -319,7 +319,7 @@ Finite Math: FM1-MAT-01, FM2-PROB-01, FM2-PROB-02
|
|
| 319 |
{previous_block}{variance_block}DIFFICULTY DISTRIBUTION (across all 15 questions):
|
| 320 |
- Easy (Bloom: remembering / understanding): 6 questions (40%)
|
| 321 |
- Medium (Bloom: applying / analyzing): 6 questions (40%)
|
| 322 |
-
- Hard (Bloom:
|
| 323 |
|
| 324 |
QUESTION RULES:
|
| 325 |
1. All questions are 4-option multiple choice (A, B, C, D).
|
|
|
|
| 319 |
{previous_block}{variance_block}DIFFICULTY DISTRIBUTION (across all 15 questions):
|
| 320 |
- Easy (Bloom: remembering / understanding): 6 questions (40%)
|
| 321 |
- Medium (Bloom: applying / analyzing): 6 questions (40%)
|
| 322 |
+
- Hard (Bloom: analyzing): 3 questions (20%)
|
| 323 |
|
| 324 |
QUESTION RULES:
|
| 325 |
1. All questions are 4-option multiple choice (A, B, C, D).
|
routes/quiz_generation_routes.py
CHANGED
|
@@ -70,6 +70,10 @@ class QuizQuestion(BaseModel):
|
|
| 70 |
options: Optional[List[str]] = None
|
| 71 |
correctAnswer: str
|
| 72 |
explanation: str
|
|
|
|
|
|
|
|
|
|
|
|
|
| 73 |
|
| 74 |
|
| 75 |
class QuizGenerationResponse(BaseModel):
|
|
@@ -90,82 +94,129 @@ def _build_quiz_generation_prompt(
|
|
| 90 |
difficulty: str,
|
| 91 |
retrieved_context: str,
|
| 92 |
variance_seed: Optional[int] = None,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 93 |
) -> str:
|
| 94 |
-
"""Build the
|
|
|
|
|
|
|
|
|
|
| 95 |
|
| 96 |
# Build variance instruction based on seed
|
| 97 |
variance_instruction = ""
|
| 98 |
if variance_seed is not None:
|
| 99 |
variance_instruction = f"""
|
| 100 |
-
|
| 101 |
-
|
| 102 |
-
|
| 103 |
-
|
| 104 |
-
|
|
|
|
|
|
|
| 105 |
|
| 106 |
-
|
| 107 |
|
| 108 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 109 |
|
| 110 |
-
##
|
| 111 |
{retrieved_context}
|
| 112 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 113 |
## Instructions
|
| 114 |
-
1. Generate
|
| 115 |
-
2. Question types to use: {
|
| 116 |
3. DISTRIBUTION (for {question_count} questions):
|
| 117 |
-
-
|
| 118 |
-
-
|
| 119 |
-
-
|
| 120 |
-
-
|
| 121 |
-
4.
|
| 122 |
-
5.
|
| 123 |
-
6.
|
| 124 |
-
7. Provide clear explanations for the correct answer.{variance_instruction}
|
| 125 |
|
| 126 |
## Question Type Rules
|
| 127 |
-
- multiple-choice: 4 options
|
| 128 |
- true-false: statement that is either True or False
|
| 129 |
- fill-in-blank: question with a single numeric or short text answer
|
| 130 |
|
| 131 |
-
## Output Format
|
| 132 |
-
Return ONLY a valid JSON array. No markdown, no extra text. Format:
|
| 133 |
[
|
| 134 |
{{
|
| 135 |
-
"
|
| 136 |
-
"
|
| 137 |
-
"
|
| 138 |
-
"
|
| 139 |
-
"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 140 |
}},
|
| 141 |
{{
|
|
|
|
|
|
|
| 142 |
"type": "true-false",
|
| 143 |
-
"
|
| 144 |
-
"options": [
|
| 145 |
-
|
| 146 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 147 |
}},
|
| 148 |
{{
|
| 149 |
-
"
|
| 150 |
-
"
|
|
|
|
|
|
|
| 151 |
"options": null,
|
| 152 |
-
"
|
| 153 |
-
"explanation": "Substitute x = 4: f(4) = 2(4) + 3 = 8 + 3 = 11."
|
|
|
|
|
|
|
|
|
|
|
|
|
| 154 |
}}
|
| 155 |
]
|
| 156 |
|
| 157 |
IMPORTANT:
|
| 158 |
-
- Return ONLY
|
| 159 |
-
-
|
| 160 |
-
-
|
| 161 |
-
-
|
| 162 |
-
-
|
|
|
|
| 163 |
|
| 164 |
|
| 165 |
# โโ Response Parser โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ
|
| 166 |
|
| 167 |
def _parse_quiz_response(text: str, expected_count: int) -> List[Dict[str, Any]]:
|
| 168 |
-
"""Parse and validate
|
| 169 |
cleaned = text.strip()
|
| 170 |
|
| 171 |
# Strip markdown fences
|
|
@@ -196,31 +247,55 @@ def _parse_quiz_response(text: str, expected_count: int) -> List[Dict[str, Any]]
|
|
| 196 |
if not isinstance(q, dict):
|
| 197 |
continue
|
| 198 |
|
| 199 |
-
#
|
| 200 |
-
if "
|
|
|
|
| 201 |
continue
|
| 202 |
|
| 203 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 204 |
normalized = {
|
| 205 |
"id": i + 1,
|
| 206 |
-
"type":
|
| 207 |
-
"question": q["
|
| 208 |
-
"
|
|
|
|
|
|
|
|
|
|
| 209 |
"explanation": q.get("explanation", ""),
|
|
|
|
|
|
|
| 210 |
}
|
| 211 |
|
| 212 |
-
# Handle options
|
| 213 |
-
if "options" in q and q["options"]:
|
| 214 |
-
normalized["options"] = q["options"]
|
| 215 |
-
elif "choices" in q and q["choices"]:
|
| 216 |
-
normalized["options"] = q["choices"]
|
| 217 |
-
else:
|
| 218 |
-
# For true-false, auto-populate options
|
| 219 |
-
if normalized["type"] == "true-false":
|
| 220 |
-
normalized["options"] = ["True", "False"]
|
| 221 |
-
else:
|
| 222 |
-
normalized["options"] = None
|
| 223 |
-
|
| 224 |
validated.append(normalized)
|
| 225 |
|
| 226 |
if len(validated) < min(expected_count, 3):
|
|
@@ -296,7 +371,7 @@ async def generate_quiz(request: QuizGenerationRequest):
|
|
| 296 |
|
| 297 |
confidence = summarize_retrieval_confidence(chunks)
|
| 298 |
|
| 299 |
-
# 2. Build generation prompt
|
| 300 |
prompt = _build_quiz_generation_prompt(
|
| 301 |
topic=request.topic,
|
| 302 |
subject=request.subject,
|
|
@@ -306,6 +381,11 @@ async def generate_quiz(request: QuizGenerationRequest):
|
|
| 306 |
difficulty=request.difficulty,
|
| 307 |
retrieved_context=formatted_context,
|
| 308 |
variance_seed=request.varianceSeed,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 309 |
)
|
| 310 |
|
| 311 |
# 3. Call DeepSeek with higher temperature for variance
|
|
|
|
| 70 |
options: Optional[List[str]] = None
|
| 71 |
correctAnswer: str
|
| 72 |
explanation: str
|
| 73 |
+
bloomLevel: Optional[str] = None
|
| 74 |
+
competencyCode: Optional[str] = None
|
| 75 |
+
points: Optional[int] = None
|
| 76 |
+
xpReward: Optional[int] = None
|
| 77 |
|
| 78 |
|
| 79 |
class QuizGenerationResponse(BaseModel):
|
|
|
|
| 94 |
difficulty: str,
|
| 95 |
retrieved_context: str,
|
| 96 |
variance_seed: Optional[int] = None,
|
| 97 |
+
competency_code: Optional[str] = None,
|
| 98 |
+
grade_level: str = "Grade 11/12",
|
| 99 |
+
lesson_objective: Optional[str] = None,
|
| 100 |
+
xp_reward: int = 10,
|
| 101 |
+
points: int = 1,
|
| 102 |
) -> str:
|
| 103 |
+
"""Build the QuizForge prompt for quiz generation with variance."""
|
| 104 |
+
|
| 105 |
+
# Build question type string
|
| 106 |
+
qt_str = ", ".join(question_types) if question_types else "multiple_choice"
|
| 107 |
|
| 108 |
# Build variance instruction based on seed
|
| 109 |
variance_instruction = ""
|
| 110 |
if variance_seed is not None:
|
| 111 |
variance_instruction = f"""
|
| 112 |
+
IMPORTANT โ VARIANCE (seed {variance_seed}):
|
| 113 |
+
- Paraphrase concepts in fresh ways
|
| 114 |
+
- Use different numerical values and scenarios
|
| 115 |
+
- Vary question phrasing and structure
|
| 116 |
+
- Do NOT repeat similar question patterns"""
|
| 117 |
+
|
| 118 |
+
return f"""You are a precise DepEd-aligned mathematics quiz generator for Filipino Senior High School STEM students ({grade_level}).
|
| 119 |
|
| 120 |
+
Generate a "Try It Yourself" quiz for the following lesson.
|
| 121 |
|
| 122 |
+
## LESSON METADATA
|
| 123 |
+
- Title: {lesson_title or topic}
|
| 124 |
+
- DepEd Competency: {competency_code or 'Not specified'}
|
| 125 |
+
- Grade Level: {grade_level}
|
| 126 |
+
- Subject: {subject}
|
| 127 |
+
- Lesson Objective: {lesson_objective or topic}
|
| 128 |
|
| 129 |
+
## RAG CONTEXT (lesson source material)
|
| 130 |
{retrieved_context}
|
| 131 |
|
| 132 |
+
## QUIZ CONFIGURATION
|
| 133 |
+
- Number of questions: {question_count}
|
| 134 |
+
- Difficulty: {difficulty}
|
| 135 |
+
- Question type: {qt_str}
|
| 136 |
+
- XP per correct answer: {xp_reward}
|
| 137 |
+
- Point value per question: {points}
|
| 138 |
+
|
| 139 |
## Instructions
|
| 140 |
+
1. Generate EXACTLY {question_count} questions covering the topic above.
|
| 141 |
+
2. Question types to use: {qt_str}
|
| 142 |
3. DISTRIBUTION (for {question_count} questions):
|
| 143 |
+
- Include at least 1 "remember" (recall, definitions, fundamental facts)
|
| 144 |
+
- Include at least 1 "understand" (explain concepts)
|
| 145 |
+
- Include at least 1 "apply" (real-world context: pesos, jeepney, sari-sari store, barangay)
|
| 146 |
+
- Difficulty: {difficulty} โ appropriate for {grade_level} Filipino STEM students.
|
| 147 |
+
4. Use Filipino-localized context where possible (pesos, jeepney, barangay, sari-sari store, etc.).
|
| 148 |
+
5. Each question must be mathematically accurate and curriculum-aligned.
|
| 149 |
+
6. Provide clear explanations for the correct answer.{variance_instruction}
|
|
|
|
| 150 |
|
| 151 |
## Question Type Rules
|
| 152 |
+
- multiple-choice: 4 options as array of objects with "key" and "text" fields, exactly one correct
|
| 153 |
- true-false: statement that is either True or False
|
| 154 |
- fill-in-blank: question with a single numeric or short text answer
|
| 155 |
|
| 156 |
+
## Output Format (strict JSON array โ no markdown, no extra text)
|
|
|
|
| 157 |
[
|
| 158 |
{{
|
| 159 |
+
"id": "q1",
|
| 160 |
+
"question_text": "What is the derivative of f(x) = xยณ?",
|
| 161 |
+
"type": "multiple_choice",
|
| 162 |
+
"bloom_level": "remember",
|
| 163 |
+
"options": [
|
| 164 |
+
{{ "key": "A", "text": "2xยฒ" }},
|
| 165 |
+
{{ "key": "B", "text": "3xยฒ" }},
|
| 166 |
+
{{ "key": "C", "text": "xยฒ" }},
|
| 167 |
+
{{ "key": "D", "text": "3x" }}
|
| 168 |
+
],
|
| 169 |
+
"correct_answer": "B",
|
| 170 |
+
"explanation": "Using the power rule: d/dx(xโฟ) = nxโฟโปยน. So d/dx(xยณ) = 3xยฒ.",
|
| 171 |
+
"points": {points},
|
| 172 |
+
"xp_reward": {xp_reward},
|
| 173 |
+
"difficulty": "{difficulty}",
|
| 174 |
+
"competency_code": "{competency_code or 'N/A'}"
|
| 175 |
}},
|
| 176 |
{{
|
| 177 |
+
"id": "q2",
|
| 178 |
+
"question_text": "The sum of angles in a triangle is 180 degrees.",
|
| 179 |
"type": "true-false",
|
| 180 |
+
"bloom_level": "remember",
|
| 181 |
+
"options": [
|
| 182 |
+
{{ "key": "A", "text": "True" }},
|
| 183 |
+
{{ "key": "B", "text": "False" }}
|
| 184 |
+
],
|
| 185 |
+
"correct_answer": "A",
|
| 186 |
+
"explanation": "By the triangle angle sum theorem, interior angles of any Euclidean triangle sum to 180ยฐ.",
|
| 187 |
+
"points": {points},
|
| 188 |
+
"xp_reward": {xp_reward},
|
| 189 |
+
"difficulty": "{difficulty}",
|
| 190 |
+
"competency_code": "{competency_code or 'N/A'}"
|
| 191 |
}},
|
| 192 |
{{
|
| 193 |
+
"id": "q3",
|
| 194 |
+
"question_text": "If f(x) = 2x + 3, then f(4) = ___",
|
| 195 |
+
"type": "fill_in_blank",
|
| 196 |
+
"bloom_level": "apply",
|
| 197 |
"options": null,
|
| 198 |
+
"correct_answer": "11",
|
| 199 |
+
"explanation": "Substitute x = 4: f(4) = 2(4) + 3 = 8 + 3 = 11.",
|
| 200 |
+
"points": {points},
|
| 201 |
+
"xp_reward": {xp_reward},
|
| 202 |
+
"difficulty": "{difficulty}",
|
| 203 |
+
"competency_code": "{competency_code or 'N/A'}"
|
| 204 |
}}
|
| 205 |
]
|
| 206 |
|
| 207 |
IMPORTANT:
|
| 208 |
+
- Return ONLY a valid JSON array, no markdown fences, no extra text
|
| 209 |
+
- For multiple-choice: options are objects with "key" ("A","B","C","D") and "text" (the answer text)
|
| 210 |
+
- correct_answer must be the KEY ("A","B","C","D") that matches the correct option
|
| 211 |
+
- For fill-in-blank, correct_answer is the exact text that fills the blank
|
| 212 |
+
- Generate FRESH, VARIED questions โ no two questions should be identical or nearly identical
|
| 213 |
+
- Spread Bloom's taxonomy: include "remember", "understand", and "apply" level questions"""
|
| 214 |
|
| 215 |
|
| 216 |
# โโ Response Parser โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ
|
| 217 |
|
| 218 |
def _parse_quiz_response(text: str, expected_count: int) -> List[Dict[str, Any]]:
|
| 219 |
+
"""Parse and validate QuizForge quiz generation response."""
|
| 220 |
cleaned = text.strip()
|
| 221 |
|
| 222 |
# Strip markdown fences
|
|
|
|
| 247 |
if not isinstance(q, dict):
|
| 248 |
continue
|
| 249 |
|
| 250 |
+
# QuizForge required fields
|
| 251 |
+
if "question_text" not in q or "correct_answer" not in q:
|
| 252 |
+
logger.warning(f"Question {i} missing required field 'question_text' or 'correct_answer', skipping")
|
| 253 |
continue
|
| 254 |
|
| 255 |
+
qtype = q.get("type", "multiple-choice")
|
| 256 |
+
correct_key = q.get("correct_answer", "")
|
| 257 |
+
|
| 258 |
+
# โโ Flatten options from [{key, text}] to [text] โโโโโโโโโโโโโโโโโโ
|
| 259 |
+
raw_options = q.get("options")
|
| 260 |
+
flat_options: Optional[List[str]] = None
|
| 261 |
+
|
| 262 |
+
if raw_options and isinstance(raw_options, list):
|
| 263 |
+
# QuizForge: options is [{key: "A", text: "..."}, ...]
|
| 264 |
+
if len(raw_options) > 0 and isinstance(raw_options[0], dict) and "text" in raw_options[0]:
|
| 265 |
+
# Sort by key to maintain consistent ordering (A, B, C, D)
|
| 266 |
+
def key_sort(opt: Dict[str, str]) -> str:
|
| 267 |
+
return opt.get("key", "")
|
| 268 |
+
sorted_opts = sorted(raw_options, key=key_sort)
|
| 269 |
+
flat_options = [opt.get("text", "") for opt in sorted_opts]
|
| 270 |
+
else:
|
| 271 |
+
# Already flat array of strings
|
| 272 |
+
flat_options = raw_options
|
| 273 |
+
elif qtype == "true-false":
|
| 274 |
+
flat_options = ["True", "False"]
|
| 275 |
+
|
| 276 |
+
# โโ Map correct_key ("A") โ correct_answer TEXT โโโโโโโโโโโโโโโโโ
|
| 277 |
+
correct_answer_text: str = correct_key
|
| 278 |
+
if flat_options and isinstance(raw_options, list):
|
| 279 |
+
# Find the option whose key matches correct_key
|
| 280 |
+
for opt in raw_options:
|
| 281 |
+
if isinstance(opt, dict) and opt.get("key") == correct_key:
|
| 282 |
+
correct_answer_text = opt.get("text", correct_key)
|
| 283 |
+
break
|
| 284 |
+
|
| 285 |
+
# โโ Build normalized internal record โโโโโโโโโโโโโโโโโโโโโโโโโโโโ
|
| 286 |
normalized = {
|
| 287 |
"id": i + 1,
|
| 288 |
+
"type": qtype,
|
| 289 |
+
"question": q["question_text"],
|
| 290 |
+
"bloomLevel": q.get("bloom_level", "apply"),
|
| 291 |
+
"competencyCode": q.get("competency_code"),
|
| 292 |
+
"correctAnswer": correct_answer_text,
|
| 293 |
+
"options": flat_options,
|
| 294 |
"explanation": q.get("explanation", ""),
|
| 295 |
+
"points": q.get("points", 1),
|
| 296 |
+
"xpReward": q.get("xp_reward", 10),
|
| 297 |
}
|
| 298 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 299 |
validated.append(normalized)
|
| 300 |
|
| 301 |
if len(validated) < min(expected_count, 3):
|
|
|
|
| 371 |
|
| 372 |
confidence = summarize_retrieval_confidence(chunks)
|
| 373 |
|
| 374 |
+
# 2. Build generation prompt (QuizForge format)
|
| 375 |
prompt = _build_quiz_generation_prompt(
|
| 376 |
topic=request.topic,
|
| 377 |
subject=request.subject,
|
|
|
|
| 381 |
difficulty=request.difficulty,
|
| 382 |
retrieved_context=formatted_context,
|
| 383 |
variance_seed=request.varianceSeed,
|
| 384 |
+
competency_code=request.competencyCode,
|
| 385 |
+
grade_level="Grade 11/12",
|
| 386 |
+
lesson_objective=request.topic,
|
| 387 |
+
xp_reward=10,
|
| 388 |
+
points=1,
|
| 389 |
)
|
| 390 |
|
| 391 |
# 3. Call DeepSeek with higher temperature for variance
|