Spaces:
Running
Running
github-actions[bot] commited on
Commit ·
9ea3d39
1
Parent(s): e756dd1
🚀 Auto-deploy from GitHub (664a608)
Browse files- README.md +1 -1
- automation_engine.py +6 -14
- main.py +176 -135
- requirements.txt +2 -1
- tests/test_api.py +44 -61
README.md
CHANGED
|
@@ -13,7 +13,7 @@ pinned: false
|
|
| 13 |
FastAPI backend for the MathPulse AI educational platform.
|
| 14 |
|
| 15 |
## Models Used
|
| 16 |
-
- **
|
| 17 |
- **facebook/bart-large-mnli** - Student Risk Classification (zero-shot)
|
| 18 |
|
| 19 |
## API Endpoints
|
|
|
|
| 13 |
FastAPI backend for the MathPulse AI educational platform.
|
| 14 |
|
| 15 |
## Models Used
|
| 16 |
+
- **meta-llama/Meta-Llama-3-8B-Instruct** - AI Math Tutor, Learning Path Generation, Daily Insights
|
| 17 |
- **facebook/bart-large-mnli** - Student Risk Classification (zero-shot)
|
| 18 |
|
| 19 |
## API Endpoints
|
automation_engine.py
CHANGED
|
@@ -514,11 +514,9 @@ class MathPulseAutomationEngine:
|
|
| 514 |
weak_topics: List[Dict[str, Any]],
|
| 515 |
grade_level: str,
|
| 516 |
) -> Optional[str]:
|
| 517 |
-
"""
|
| 518 |
try:
|
| 519 |
-
from main import
|
| 520 |
-
|
| 521 |
-
hf = get_client()
|
| 522 |
|
| 523 |
weakness_lines = ", ".join(at_risk_subjects)
|
| 524 |
topic_lines = "\n".join(
|
|
@@ -538,8 +536,7 @@ class MathPulseAutomationEngine:
|
|
| 538 |
"Format as a numbered list. Be specific."
|
| 539 |
)
|
| 540 |
|
| 541 |
-
|
| 542 |
-
model=CHAT_MODEL,
|
| 543 |
messages=[
|
| 544 |
{
|
| 545 |
"role": "system",
|
|
@@ -553,7 +550,6 @@ class MathPulseAutomationEngine:
|
|
| 553 |
max_tokens=1500,
|
| 554 |
temperature=0.7,
|
| 555 |
)
|
| 556 |
-
return response.choices[0].message.content or ""
|
| 557 |
except Exception as e:
|
| 558 |
logger.warning(f"Learning-path AI call failed: {e}")
|
| 559 |
return None
|
|
@@ -563,11 +559,9 @@ class MathPulseAutomationEngine:
|
|
| 563 |
risk_classifications: Dict[str, Dict[str, Any]],
|
| 564 |
weak_topics: List[Dict[str, Any]],
|
| 565 |
) -> Optional[str]:
|
| 566 |
-
"""
|
| 567 |
try:
|
| 568 |
-
from main import
|
| 569 |
-
|
| 570 |
-
hf = get_client()
|
| 571 |
|
| 572 |
at_risk = [
|
| 573 |
subj for subj, data in risk_classifications.items()
|
|
@@ -592,8 +586,7 @@ class MathPulseAutomationEngine:
|
|
| 592 |
"Keep response under 300 words, structured with clear sections."
|
| 593 |
)
|
| 594 |
|
| 595 |
-
|
| 596 |
-
model=CHAT_MODEL,
|
| 597 |
messages=[
|
| 598 |
{
|
| 599 |
"role": "system",
|
|
@@ -607,7 +600,6 @@ class MathPulseAutomationEngine:
|
|
| 607 |
max_tokens=1000,
|
| 608 |
temperature=0.5,
|
| 609 |
)
|
| 610 |
-
return response.choices[0].message.content or ""
|
| 611 |
except Exception as e:
|
| 612 |
logger.warning(f"Teacher-intervention AI call failed: {e}")
|
| 613 |
return None
|
|
|
|
| 514 |
weak_topics: List[Dict[str, Any]],
|
| 515 |
grade_level: str,
|
| 516 |
) -> Optional[str]:
|
| 517 |
+
"""Generate a personalised learning path via HF Serverless Inference."""
|
| 518 |
try:
|
| 519 |
+
from main import call_hf_chat
|
|
|
|
|
|
|
| 520 |
|
| 521 |
weakness_lines = ", ".join(at_risk_subjects)
|
| 522 |
topic_lines = "\n".join(
|
|
|
|
| 536 |
"Format as a numbered list. Be specific."
|
| 537 |
)
|
| 538 |
|
| 539 |
+
return call_hf_chat(
|
|
|
|
| 540 |
messages=[
|
| 541 |
{
|
| 542 |
"role": "system",
|
|
|
|
| 550 |
max_tokens=1500,
|
| 551 |
temperature=0.7,
|
| 552 |
)
|
|
|
|
| 553 |
except Exception as e:
|
| 554 |
logger.warning(f"Learning-path AI call failed: {e}")
|
| 555 |
return None
|
|
|
|
| 559 |
risk_classifications: Dict[str, Dict[str, Any]],
|
| 560 |
weak_topics: List[Dict[str, Any]],
|
| 561 |
) -> Optional[str]:
|
| 562 |
+
"""Generate teacher intervention recommendations via HF Serverless Inference."""
|
| 563 |
try:
|
| 564 |
+
from main import call_hf_chat
|
|
|
|
|
|
|
| 565 |
|
| 566 |
at_risk = [
|
| 567 |
subj for subj, data in risk_classifications.items()
|
|
|
|
| 586 |
"Keep response under 300 words, structured with clear sections."
|
| 587 |
)
|
| 588 |
|
| 589 |
+
return call_hf_chat(
|
|
|
|
| 590 |
messages=[
|
| 591 |
{
|
| 592 |
"role": "system",
|
|
|
|
| 600 |
max_tokens=1000,
|
| 601 |
temperature=0.5,
|
| 602 |
)
|
|
|
|
| 603 |
except Exception as e:
|
| 604 |
logger.warning(f"Teacher-intervention AI call failed: {e}")
|
| 605 |
return None
|
main.py
CHANGED
|
@@ -1,7 +1,8 @@
|
|
| 1 |
"""
|
| 2 |
MathPulse AI - FastAPI Backend
|
| 3 |
AI-powered math tutoring backend using Hugging Face models.
|
| 4 |
-
-
|
|
|
|
| 5 |
- facebook/bart-large-mnli for student risk classification
|
| 6 |
- Multi-method verification system for math accuracy
|
| 7 |
- AI-powered Quiz Maker with Bloom's Taxonomy integration
|
|
@@ -29,6 +30,7 @@ from starlette.middleware.base import BaseHTTPMiddleware
|
|
| 29 |
import asyncio
|
| 30 |
import time
|
| 31 |
import uuid
|
|
|
|
| 32 |
import uvicorn
|
| 33 |
|
| 34 |
# Event-driven automation engine
|
|
@@ -92,11 +94,26 @@ from analytics import (
|
|
| 92 |
logging.basicConfig(level=logging.INFO)
|
| 93 |
logger = logging.getLogger("mathpulse")
|
| 94 |
|
| 95 |
-
HF_TOKEN = os.environ.get("
|
| 96 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 97 |
RISK_MODEL = "facebook/bart-large-mnli"
|
| 98 |
VERIFICATION_SAMPLES = 3 # Number of samples for self-consistency checking
|
| 99 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 100 |
# ─── FastAPI App ───────────────────────────────────────────────
|
| 101 |
|
| 102 |
app = FastAPI(
|
|
@@ -198,30 +215,30 @@ app.add_middleware(
|
|
| 198 |
allow_headers=["*"],
|
| 199 |
)
|
| 200 |
|
| 201 |
-
# ─── Hugging Face
|
| 202 |
|
|
|
|
| 203 |
from huggingface_hub import InferenceClient
|
| 204 |
|
| 205 |
-
|
| 206 |
|
| 207 |
|
| 208 |
def get_client() -> InferenceClient:
|
| 209 |
-
"""Get or initialize the HuggingFace InferenceClient
|
| 210 |
-
global
|
| 211 |
-
if
|
| 212 |
if not HF_TOKEN:
|
| 213 |
raise HTTPException(
|
| 214 |
status_code=500,
|
| 215 |
-
detail="
|
| 216 |
)
|
| 217 |
for attempt in range(3):
|
| 218 |
try:
|
| 219 |
-
|
| 220 |
token=HF_TOKEN,
|
| 221 |
timeout=60,
|
| 222 |
-
provider="auto", # auto-route to available inference providers
|
| 223 |
)
|
| 224 |
-
logger.info("
|
| 225 |
break
|
| 226 |
except Exception as e:
|
| 227 |
logger.warning(f"HF client init attempt {attempt + 1} failed: {e}")
|
|
@@ -231,9 +248,129 @@ def get_client() -> InferenceClient:
|
|
| 231 |
detail="Failed to initialize AI model client after 3 attempts.",
|
| 232 |
)
|
| 233 |
time.sleep(2 ** attempt)
|
| 234 |
-
|
| 235 |
-
|
| 236 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 237 |
|
| 238 |
|
| 239 |
# ─── Request/Response Models ──────────────────────────────────
|
|
@@ -353,7 +490,7 @@ async def root():
|
|
| 353 |
}
|
| 354 |
|
| 355 |
|
| 356 |
-
# ─── AI Chat Tutor
|
| 357 |
|
| 358 |
|
| 359 |
MATH_TUTOR_SYSTEM_PROMPT = """You are MathPulse AI, a rigorous and friendly expert math tutor for students. You help with:
|
|
@@ -382,10 +519,8 @@ Additional Guidelines:
|
|
| 382 |
|
| 383 |
@app.post("/api/chat", response_model=ChatResponse)
|
| 384 |
async def chat_tutor(request: ChatRequest):
|
| 385 |
-
"""AI Math Tutor powered by
|
| 386 |
try:
|
| 387 |
-
hf = get_client()
|
| 388 |
-
|
| 389 |
messages = [{"role": "system", "content": MATH_TUTOR_SYSTEM_PROMPT}]
|
| 390 |
|
| 391 |
# Add conversation history
|
|
@@ -395,30 +530,11 @@ async def chat_tutor(request: ChatRequest):
|
|
| 395 |
# Add current message
|
| 396 |
messages.append({"role": "user", "content": request.message})
|
| 397 |
|
| 398 |
-
#
|
| 399 |
-
|
| 400 |
-
|
| 401 |
-
|
| 402 |
-
|
| 403 |
-
response = hf.chat_completion(
|
| 404 |
-
model=CHAT_MODEL,
|
| 405 |
-
messages=messages,
|
| 406 |
-
max_tokens=2048,
|
| 407 |
-
temperature=0.2,
|
| 408 |
-
top_p=0.9,
|
| 409 |
-
)
|
| 410 |
-
answer = response.choices[0].message.content or ""
|
| 411 |
-
last_err = None
|
| 412 |
-
break
|
| 413 |
-
except Exception as hf_err:
|
| 414 |
-
last_err = hf_err
|
| 415 |
-
logger.warning(f"HF chat attempt {attempt + 1}/5 failed: {hf_err}")
|
| 416 |
-
if attempt < 4:
|
| 417 |
-
# Longer backoff: 2s, 4s, 8s, 16s to handle cold starts
|
| 418 |
-
await asyncio.sleep(2 ** (attempt + 1))
|
| 419 |
-
|
| 420 |
-
if last_err is not None:
|
| 421 |
-
logger.error(f"HF chat failed after 5 attempts: {last_err}")
|
| 422 |
raise HTTPException(
|
| 423 |
status_code=502,
|
| 424 |
detail="AI model service is temporarily unavailable. Please try again.",
|
|
@@ -476,7 +592,6 @@ async def verify_math_response(
|
|
| 476 |
math problem and check if the final answers agree.
|
| 477 |
Returns dict with 'verified' (bool), 'confidence' (str), and 'response'.
|
| 478 |
"""
|
| 479 |
-
hf = get_client()
|
| 480 |
responses: List[str] = []
|
| 481 |
answers: List[Optional[str]] = []
|
| 482 |
|
|
@@ -484,14 +599,7 @@ async def verify_math_response(
|
|
| 484 |
|
| 485 |
for i in range(VERIFICATION_SAMPLES):
|
| 486 |
try:
|
| 487 |
-
|
| 488 |
-
model=CHAT_MODEL,
|
| 489 |
-
messages=base_messages,
|
| 490 |
-
max_tokens=2048,
|
| 491 |
-
temperature=0.7,
|
| 492 |
-
top_p=0.9,
|
| 493 |
-
)
|
| 494 |
-
text = result.choices[0].message.content or ""
|
| 495 |
responses.append(text)
|
| 496 |
answers.append(_extract_final_answer(text))
|
| 497 |
logger.info(f" Sample {i+1} answer: {answers[-1]}")
|
|
@@ -554,7 +662,6 @@ async def verify_with_code(problem: str, solution: str) -> Dict[str, Any]:
|
|
| 554 |
Ask the model to generate Python verification code for a math solution,
|
| 555 |
execute it safely, and return the verification result.
|
| 556 |
"""
|
| 557 |
-
hf = get_client()
|
| 558 |
|
| 559 |
prompt = f"""Given this math problem and its proposed solution, write a short Python script that numerically verifies the answer.
|
| 560 |
|
|
@@ -571,8 +678,7 @@ Rules:
|
|
| 571 |
Respond with ONLY the Python code, no markdown fences, no explanation."""
|
| 572 |
|
| 573 |
try:
|
| 574 |
-
|
| 575 |
-
model=CHAT_MODEL,
|
| 576 |
messages=[
|
| 577 |
{
|
| 578 |
"role": "system",
|
|
@@ -583,8 +689,6 @@ Respond with ONLY the Python code, no markdown fences, no explanation."""
|
|
| 583 |
max_tokens=800,
|
| 584 |
temperature=0.1,
|
| 585 |
)
|
| 586 |
-
|
| 587 |
-
raw_code = result.choices[0].message.content or ""
|
| 588 |
# Strip markdown code fences if present
|
| 589 |
code = re.sub(r"^```(?:python)?\s*\n?", "", raw_code.strip())
|
| 590 |
code = re.sub(r"\n?```\s*$", "", code)
|
|
@@ -666,7 +770,6 @@ async def llm_judge_verification(problem: str, solution: str) -> Dict[str, Any]:
|
|
| 666 |
solution is correct. Checks formula usage, calculations, and logic.
|
| 667 |
Returns dict with 'correct' (bool), 'issues' (list), 'confidence' (float).
|
| 668 |
"""
|
| 669 |
-
hf = get_client()
|
| 670 |
|
| 671 |
prompt = f"""You are a meticulous math verification expert. Your job is to verify whether the following solution to a math problem is mathematically correct.
|
| 672 |
|
|
@@ -689,8 +792,7 @@ Respond with ONLY a JSON object (no markdown, no explanation outside the JSON):
|
|
| 689 |
}}"""
|
| 690 |
|
| 691 |
try:
|
| 692 |
-
|
| 693 |
-
model=CHAT_MODEL,
|
| 694 |
messages=[
|
| 695 |
{
|
| 696 |
"role": "system",
|
|
@@ -701,8 +803,6 @@ Respond with ONLY a JSON object (no markdown, no explanation outside the JSON):
|
|
| 701 |
max_tokens=500,
|
| 702 |
temperature=0.1,
|
| 703 |
)
|
| 704 |
-
|
| 705 |
-
raw = result.choices[0].message.content or ""
|
| 706 |
# Extract JSON from response
|
| 707 |
json_start = raw.find("{")
|
| 708 |
json_end = raw.rfind("}") + 1
|
|
@@ -928,15 +1028,13 @@ async def predict_risk_batch(request: BatchRiskRequest):
|
|
| 928 |
return results
|
| 929 |
|
| 930 |
|
| 931 |
-
# ─── Learning Path Generation
|
| 932 |
|
| 933 |
|
| 934 |
@app.post("/api/learning-path", response_model=LearningPathResponse)
|
| 935 |
async def generate_learning_path(request: LearningPathRequest):
|
| 936 |
"""Generate AI-powered personalized learning path"""
|
| 937 |
try:
|
| 938 |
-
hf = get_client()
|
| 939 |
-
|
| 940 |
prompt = f"""Generate a personalized math learning path for a student with these details:
|
| 941 |
- Weak Topics: {', '.join(request.weaknesses)}
|
| 942 |
- Grade Level: {request.gradeLevel}
|
|
@@ -958,28 +1056,10 @@ Format as a numbered list. Be specific to the math topics mentioned."""
|
|
| 958 |
{"role": "user", "content": prompt},
|
| 959 |
]
|
| 960 |
|
| 961 |
-
|
| 962 |
-
|
| 963 |
-
|
| 964 |
-
|
| 965 |
-
try:
|
| 966 |
-
response = hf.chat_completion(
|
| 967 |
-
model=CHAT_MODEL,
|
| 968 |
-
messages=messages,
|
| 969 |
-
max_tokens=1500,
|
| 970 |
-
temperature=0.7,
|
| 971 |
-
)
|
| 972 |
-
content = response.choices[0].message.content or ""
|
| 973 |
-
last_err = None
|
| 974 |
-
break
|
| 975 |
-
except Exception as hf_err:
|
| 976 |
-
last_err = hf_err
|
| 977 |
-
logger.warning(f"HF learning-path attempt {attempt + 1} failed: {hf_err}")
|
| 978 |
-
if attempt < 2:
|
| 979 |
-
await asyncio.sleep(2 ** attempt)
|
| 980 |
-
|
| 981 |
-
if last_err is not None:
|
| 982 |
-
logger.error(f"HF learning-path failed after 3 attempts: {last_err}")
|
| 983 |
raise HTTPException(
|
| 984 |
status_code=502,
|
| 985 |
detail="Learning path generation is temporarily unavailable.",
|
|
@@ -994,15 +1074,13 @@ Format as a numbered list. Be specific to the math topics mentioned."""
|
|
| 994 |
raise HTTPException(status_code=500, detail=f"Learning path error: {str(e)}")
|
| 995 |
|
| 996 |
|
| 997 |
-
# ─── Daily AI Insights
|
| 998 |
|
| 999 |
|
| 1000 |
@app.post("/api/analytics/daily-insight", response_model=DailyInsightResponse)
|
| 1001 |
async def daily_insight(request: DailyInsightRequest):
|
| 1002 |
"""Generate daily AI insights for teacher dashboard"""
|
| 1003 |
try:
|
| 1004 |
-
hf = get_client()
|
| 1005 |
-
|
| 1006 |
students = request.students
|
| 1007 |
total = len(students)
|
| 1008 |
if total == 0:
|
|
@@ -1040,28 +1118,10 @@ Keep the response under 200 words. Be specific and practical."""
|
|
| 1040 |
{"role": "user", "content": prompt},
|
| 1041 |
]
|
| 1042 |
|
| 1043 |
-
|
| 1044 |
-
|
| 1045 |
-
|
| 1046 |
-
|
| 1047 |
-
try:
|
| 1048 |
-
response = hf.chat_completion(
|
| 1049 |
-
model=CHAT_MODEL,
|
| 1050 |
-
messages=messages,
|
| 1051 |
-
max_tokens=800,
|
| 1052 |
-
temperature=0.7,
|
| 1053 |
-
)
|
| 1054 |
-
content = response.choices[0].message.content or ""
|
| 1055 |
-
last_err = None
|
| 1056 |
-
break
|
| 1057 |
-
except Exception as hf_err:
|
| 1058 |
-
last_err = hf_err
|
| 1059 |
-
logger.warning(f"HF daily-insight attempt {attempt + 1} failed: {hf_err}")
|
| 1060 |
-
if attempt < 2:
|
| 1061 |
-
await asyncio.sleep(2 ** attempt)
|
| 1062 |
-
|
| 1063 |
-
if last_err is not None:
|
| 1064 |
-
logger.error(f"HF daily-insight failed after 3 attempts: {last_err}")
|
| 1065 |
raise HTTPException(
|
| 1066 |
status_code=502,
|
| 1067 |
detail="AI insight generation is temporarily unavailable.",
|
|
@@ -1117,7 +1177,6 @@ async def upload_class_records(file: UploadFile = File(...)):
|
|
| 1117 |
raise HTTPException(status_code=400, detail="No data found in uploaded file")
|
| 1118 |
|
| 1119 |
# AI-powered column mapping
|
| 1120 |
-
hf = get_client()
|
| 1121 |
columns_text = ", ".join(df.columns.tolist())
|
| 1122 |
|
| 1123 |
prompt = f"""I have a spreadsheet with these columns: {columns_text}
|
|
@@ -1132,16 +1191,11 @@ Map each column to one of these standard fields (respond as JSON only):
|
|
| 1132 |
|
| 1133 |
If a column doesn't match any field, skip it. Respond ONLY with a JSON object mapping original column names to field names. Example: {{"Student Name": "name", "ID": "studentId"}}"""
|
| 1134 |
|
| 1135 |
-
|
| 1136 |
-
model=CHAT_MODEL,
|
| 1137 |
messages=[{"role": "user", "content": prompt}],
|
| 1138 |
max_tokens=300,
|
| 1139 |
temperature=0.1,
|
| 1140 |
)
|
| 1141 |
-
|
| 1142 |
-
# Parse AI column mapping
|
| 1143 |
-
raw_content = mapping_response.choices[0].message.content
|
| 1144 |
-
mapping_text = (raw_content or "").strip()
|
| 1145 |
# Extract JSON from response
|
| 1146 |
try:
|
| 1147 |
# Try to find JSON in the response
|
|
@@ -1522,12 +1576,11 @@ def _validate_quiz_questions(
|
|
| 1522 |
@app.post("/api/quiz/generate", response_model=QuizResponse)
|
| 1523 |
async def generate_quiz(request: QuizGenerationRequest):
|
| 1524 |
"""
|
| 1525 |
-
Generate an AI-powered quiz
|
| 1526 |
Supports Bloom's Taxonomy integration, multiple question types,
|
| 1527 |
and graph-based identification questions.
|
| 1528 |
"""
|
| 1529 |
try:
|
| 1530 |
-
hf = get_client()
|
| 1531 |
|
| 1532 |
# Filter out excluded topics
|
| 1533 |
effective_topics = [t for t in request.topics if t not in request.excludeTopics]
|
|
@@ -1589,15 +1642,7 @@ Remember:
|
|
| 1589 |
|
| 1590 |
logger.info(f"Generating quiz: {request.numQuestions} questions, topics={effective_topics}")
|
| 1591 |
|
| 1592 |
-
|
| 1593 |
-
model=CHAT_MODEL,
|
| 1594 |
-
messages=messages,
|
| 1595 |
-
max_tokens=4096,
|
| 1596 |
-
temperature=0.3,
|
| 1597 |
-
top_p=0.9,
|
| 1598 |
-
)
|
| 1599 |
-
|
| 1600 |
-
raw_content = response.choices[0].message.content or ""
|
| 1601 |
logger.info(f"Raw quiz response length: {len(raw_content)} chars")
|
| 1602 |
|
| 1603 |
parsed_questions = _parse_quiz_json(raw_content)
|
|
@@ -1709,8 +1754,6 @@ async def student_competency(request: StudentCompetencyRequest):
|
|
| 1709 |
Returns efficiency scores, competency levels, and recommendations.
|
| 1710 |
"""
|
| 1711 |
try:
|
| 1712 |
-
hf = get_client()
|
| 1713 |
-
|
| 1714 |
history = request.quizHistory or []
|
| 1715 |
|
| 1716 |
if not history:
|
|
@@ -1786,8 +1829,7 @@ async def student_competency(request: StudentCompetencyRequest):
|
|
| 1786 |
|
| 1787 |
Focus on actionable recommendations. Be encouraging yet honest."""
|
| 1788 |
|
| 1789 |
-
|
| 1790 |
-
model=CHAT_MODEL,
|
| 1791 |
messages=[
|
| 1792 |
{"role": "system", "content": "You are an educational assessment expert. Be concise and supportive."},
|
| 1793 |
{"role": "user", "content": ai_prompt},
|
|
@@ -1795,7 +1837,6 @@ Focus on actionable recommendations. Be encouraging yet honest."""
|
|
| 1795 |
max_tokens=200,
|
| 1796 |
temperature=0.3,
|
| 1797 |
)
|
| 1798 |
-
overall_perspective = ai_response.choices[0].message.content or ""
|
| 1799 |
if overall_perspective:
|
| 1800 |
# Add to recommended as a note
|
| 1801 |
recommended.append(f"AI Insight: {overall_perspective.strip()}")
|
|
|
|
| 1 |
"""
|
| 2 |
MathPulse AI - FastAPI Backend
|
| 3 |
AI-powered math tutoring backend using Hugging Face models.
|
| 4 |
+
- meta-llama/Meta-Llama-3-8B-Instruct for chat, learning paths, insights, and quiz generation
|
| 5 |
+
(via HF Serverless Inference API)
|
| 6 |
- facebook/bart-large-mnli for student risk classification
|
| 7 |
- Multi-method verification system for math accuracy
|
| 8 |
- AI-powered Quiz Maker with Bloom's Taxonomy integration
|
|
|
|
| 30 |
import asyncio
|
| 31 |
import time
|
| 32 |
import uuid
|
| 33 |
+
import requests as http_requests
|
| 34 |
import uvicorn
|
| 35 |
|
| 36 |
# Event-driven automation engine
|
|
|
|
| 94 |
logging.basicConfig(level=logging.INFO)
|
| 95 |
logger = logging.getLogger("mathpulse")
|
| 96 |
|
| 97 |
+
HF_TOKEN = os.environ.get("HF_TOKEN", os.environ.get("HUGGING_FACE_API_TOKEN", ""))
|
| 98 |
+
|
| 99 |
+
# Temporarily using Meta-Llama-3-8B-Instruct via HF Serverless Inference API
|
| 100 |
+
# because Qwen/Qwen2.5-Math-7B-Instruct is provider-only (not available on
|
| 101 |
+
# HF serverless). Swap this back once a provider is configured or the model
|
| 102 |
+
# becomes serverless-compatible.
|
| 103 |
+
HF_MATH_MODEL_ID = os.getenv("HF_MATH_MODEL_ID", "meta-llama/Meta-Llama-3-8B-Instruct")
|
| 104 |
+
|
| 105 |
+
# Alias kept so automation_engine.py (which imports CHAT_MODEL) keeps working.
|
| 106 |
+
CHAT_MODEL = HF_MATH_MODEL_ID
|
| 107 |
+
|
| 108 |
RISK_MODEL = "facebook/bart-large-mnli"
|
| 109 |
VERIFICATION_SAMPLES = 3 # Number of samples for self-consistency checking
|
| 110 |
|
| 111 |
+
if not HF_TOKEN:
|
| 112 |
+
logger.warning(
|
| 113 |
+
"HF_TOKEN is not set. AI features will fail. "
|
| 114 |
+
"On HF Spaces this is injected automatically as a secret."
|
| 115 |
+
)
|
| 116 |
+
|
| 117 |
# ─── FastAPI App ───────────────────────────────────────────────
|
| 118 |
|
| 119 |
app = FastAPI(
|
|
|
|
| 215 |
allow_headers=["*"],
|
| 216 |
)
|
| 217 |
|
| 218 |
+
# ─── Hugging Face Clients ─────────────────────────────────────
|
| 219 |
|
| 220 |
+
# InferenceClient is kept only for zero-shot classification (BART).
|
| 221 |
from huggingface_hub import InferenceClient
|
| 222 |
|
| 223 |
+
_zsc_client: Optional[InferenceClient] = None
|
| 224 |
|
| 225 |
|
| 226 |
def get_client() -> InferenceClient:
|
| 227 |
+
"""Get or initialize the HuggingFace InferenceClient (used for zero-shot classification only)."""
|
| 228 |
+
global _zsc_client
|
| 229 |
+
if _zsc_client is None:
|
| 230 |
if not HF_TOKEN:
|
| 231 |
raise HTTPException(
|
| 232 |
status_code=500,
|
| 233 |
+
detail="HF_TOKEN not configured. Set the HF_TOKEN environment variable.",
|
| 234 |
)
|
| 235 |
for attempt in range(3):
|
| 236 |
try:
|
| 237 |
+
_zsc_client = InferenceClient(
|
| 238 |
token=HF_TOKEN,
|
| 239 |
timeout=60,
|
|
|
|
| 240 |
)
|
| 241 |
+
logger.info("HF InferenceClient initialized (for zero-shot classification)")
|
| 242 |
break
|
| 243 |
except Exception as e:
|
| 244 |
logger.warning(f"HF client init attempt {attempt + 1} failed: {e}")
|
|
|
|
| 248 |
detail="Failed to initialize AI model client after 3 attempts.",
|
| 249 |
)
|
| 250 |
time.sleep(2 ** attempt)
|
| 251 |
+
assert _zsc_client is not None
|
| 252 |
+
return _zsc_client
|
| 253 |
+
|
| 254 |
+
|
| 255 |
+
# ─── HF Serverless Chat Helper (requests-based) ───────────────
|
| 256 |
+
|
| 257 |
+
|
| 258 |
+
def call_hf_chat(
|
| 259 |
+
messages: List[Dict[str, str]],
|
| 260 |
+
*,
|
| 261 |
+
max_tokens: int = 2048,
|
| 262 |
+
temperature: float = 0.2,
|
| 263 |
+
top_p: float = 0.9,
|
| 264 |
+
model: Optional[str] = None,
|
| 265 |
+
) -> str:
|
| 266 |
+
"""
|
| 267 |
+
Call the HF Serverless Inference API (OpenAI-compatible chat endpoint)
|
| 268 |
+
using plain ``requests``. Retries up to 3 times on 503 (model loading).
|
| 269 |
+
"""
|
| 270 |
+
if not HF_TOKEN:
|
| 271 |
+
raise RuntimeError("HF_TOKEN is not set")
|
| 272 |
+
|
| 273 |
+
target_model = model or HF_MATH_MODEL_ID
|
| 274 |
+
url = f"https://api-inference.huggingface.co/models/{target_model}/v1/chat/completions"
|
| 275 |
+
headers = {
|
| 276 |
+
"Authorization": f"Bearer {HF_TOKEN}",
|
| 277 |
+
"Content-Type": "application/json",
|
| 278 |
+
}
|
| 279 |
+
payload = {
|
| 280 |
+
"model": target_model,
|
| 281 |
+
"messages": messages,
|
| 282 |
+
"max_tokens": max_tokens,
|
| 283 |
+
"temperature": temperature,
|
| 284 |
+
"top_p": top_p,
|
| 285 |
+
}
|
| 286 |
+
|
| 287 |
+
for attempt in range(3):
|
| 288 |
+
resp = http_requests.post(url, headers=headers, json=payload, timeout=60)
|
| 289 |
+
if resp.status_code == 503 and attempt < 2:
|
| 290 |
+
logger.warning(f"HF chat 503 (model loading), retry {attempt + 1}/3")
|
| 291 |
+
time.sleep(3)
|
| 292 |
+
continue
|
| 293 |
+
if resp.status_code != 200:
|
| 294 |
+
raise RuntimeError(f"HF Inference error {resp.status_code}: {resp.text}")
|
| 295 |
+
|
| 296 |
+
data = resp.json()
|
| 297 |
+
# OpenAI-compatible format: {"choices": [{"message": {"content": "..."}}]}
|
| 298 |
+
choices = data.get("choices", [])
|
| 299 |
+
if choices:
|
| 300 |
+
return (choices[0].get("message", {}).get("content", "") or "").strip()
|
| 301 |
+
|
| 302 |
+
raise RuntimeError(f"Unexpected HF response format: {data}")
|
| 303 |
+
|
| 304 |
+
raise RuntimeError("HF Inference failed after retries")
|
| 305 |
+
|
| 306 |
+
|
| 307 |
+
# ─── Math Tutor Prompt & Wrapper ──────────────────────────────
|
| 308 |
+
|
| 309 |
+
|
| 310 |
+
def build_math_tutor_prompt(question: str) -> str:
|
| 311 |
+
"""Build a structured math-tutor prompt for the LLM."""
|
| 312 |
+
return f"""SYSTEM:
|
| 313 |
+
You are MathPulse Tutor, a precise and patient math tutor for Filipino senior high school STEM students.
|
| 314 |
+
Your job is to:
|
| 315 |
+
1) Understand the student's math question (algebra, functions, graphs, trigonometry, analytic geometry, basic calculus, statistics, or word problems).
|
| 316 |
+
2) Solve the problem step by step, explaining each transformation in simple language.
|
| 317 |
+
3) Show all important equations clearly and avoid skipping algebra steps unless obvious to a Grade 11–12 STEM student.
|
| 318 |
+
4) At the end, restate the final answer explicitly (e.g., "Final answer: x = 3").
|
| 319 |
+
5) If the question is ambiguous or missing information, ask a short clarifying question first instead of guessing.
|
| 320 |
+
6) If the student makes a mistake, point it out gently, explain why it is wrong, and show the correct method.
|
| 321 |
+
7) Never invent new notation or definitions; use standard high-school math notation only.
|
| 322 |
+
8) When there are multiple possible methods, briefly mention alternatives but pick one main method and follow it consistently.
|
| 323 |
+
9) If the computation is long, summarize intermediate results so the student does not get lost.
|
| 324 |
+
10) If the answer depends on approximations, specify whether the result is exact or rounded (and to how many decimal places).
|
| 325 |
+
Speak in clear, concise English. Use short paragraphs and LaTeX-style math when helpful (e.g., x^2 + 3x + 2 = 0).
|
| 326 |
+
If the user question is not about math, politely say that you can only help with math-related questions.
|
| 327 |
+
|
| 328 |
+
USER:
|
| 329 |
+
Student question:
|
| 330 |
+
{question}
|
| 331 |
+
"""
|
| 332 |
+
|
| 333 |
+
|
| 334 |
+
def call_math_tutor_llm(question: str) -> str:
|
| 335 |
+
"""Convenience wrapper: call the HF serverless model with the MathPulse tutor prompt."""
|
| 336 |
+
if not HF_TOKEN:
|
| 337 |
+
raise RuntimeError("HF_TOKEN is not set")
|
| 338 |
+
|
| 339 |
+
url = f"https://api-inference.huggingface.co/models/{HF_MATH_MODEL_ID}"
|
| 340 |
+
payload = {
|
| 341 |
+
"inputs": build_math_tutor_prompt(question),
|
| 342 |
+
"parameters": {
|
| 343 |
+
"max_new_tokens": 512,
|
| 344 |
+
"temperature": 0.2,
|
| 345 |
+
"top_p": 0.9,
|
| 346 |
+
},
|
| 347 |
+
}
|
| 348 |
+
headers = {
|
| 349 |
+
"Authorization": f"Bearer {HF_TOKEN}",
|
| 350 |
+
"Content-Type": "application/json",
|
| 351 |
+
}
|
| 352 |
+
|
| 353 |
+
for attempt in range(3):
|
| 354 |
+
resp = http_requests.post(url, headers=headers, json=payload, timeout=60)
|
| 355 |
+
if resp.status_code == 503 and attempt < 2:
|
| 356 |
+
time.sleep(3)
|
| 357 |
+
continue
|
| 358 |
+
if resp.status_code != 200:
|
| 359 |
+
raise RuntimeError(f"HF Inference error {resp.status_code}: {resp.text}")
|
| 360 |
+
|
| 361 |
+
data = resp.json()
|
| 362 |
+
if isinstance(data, list) and len(data) > 0:
|
| 363 |
+
generated = data[0].get("generated_text") or data[0].get("output_text")
|
| 364 |
+
if generated:
|
| 365 |
+
return generated.strip()
|
| 366 |
+
elif isinstance(data, dict):
|
| 367 |
+
generated = data.get("generated_text") or data.get("output_text")
|
| 368 |
+
if generated:
|
| 369 |
+
return generated.strip()
|
| 370 |
+
|
| 371 |
+
raise RuntimeError(f"Unexpected HF response format: {data}")
|
| 372 |
+
|
| 373 |
+
raise RuntimeError("HF Inference failed after retries")
|
| 374 |
|
| 375 |
|
| 376 |
# ─── Request/Response Models ──────────────────────────────────
|
|
|
|
| 490 |
}
|
| 491 |
|
| 492 |
|
| 493 |
+
# ─── AI Chat Tutor ─────────────────────────────────────────────
|
| 494 |
|
| 495 |
|
| 496 |
MATH_TUTOR_SYSTEM_PROMPT = """You are MathPulse AI, a rigorous and friendly expert math tutor for students. You help with:
|
|
|
|
| 519 |
|
| 520 |
@app.post("/api/chat", response_model=ChatResponse)
|
| 521 |
async def chat_tutor(request: ChatRequest):
|
| 522 |
+
"""AI Math Tutor powered by HF Serverless Inference (Meta-Llama-3-8B-Instruct)"""
|
| 523 |
try:
|
|
|
|
|
|
|
| 524 |
messages = [{"role": "system", "content": MATH_TUTOR_SYSTEM_PROMPT}]
|
| 525 |
|
| 526 |
# Add conversation history
|
|
|
|
| 530 |
# Add current message
|
| 531 |
messages.append({"role": "user", "content": request.message})
|
| 532 |
|
| 533 |
+
# Call HF serverless with retry (handled inside call_hf_chat)
|
| 534 |
+
try:
|
| 535 |
+
answer = call_hf_chat(messages, max_tokens=2048, temperature=0.2, top_p=0.9)
|
| 536 |
+
except Exception as hf_err:
|
| 537 |
+
logger.error(f"HF chat failed: {hf_err}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 538 |
raise HTTPException(
|
| 539 |
status_code=502,
|
| 540 |
detail="AI model service is temporarily unavailable. Please try again.",
|
|
|
|
| 592 |
math problem and check if the final answers agree.
|
| 593 |
Returns dict with 'verified' (bool), 'confidence' (str), and 'response'.
|
| 594 |
"""
|
|
|
|
| 595 |
responses: List[str] = []
|
| 596 |
answers: List[Optional[str]] = []
|
| 597 |
|
|
|
|
| 599 |
|
| 600 |
for i in range(VERIFICATION_SAMPLES):
|
| 601 |
try:
|
| 602 |
+
text = call_hf_chat(base_messages, max_tokens=2048, temperature=0.7, top_p=0.9)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 603 |
responses.append(text)
|
| 604 |
answers.append(_extract_final_answer(text))
|
| 605 |
logger.info(f" Sample {i+1} answer: {answers[-1]}")
|
|
|
|
| 662 |
Ask the model to generate Python verification code for a math solution,
|
| 663 |
execute it safely, and return the verification result.
|
| 664 |
"""
|
|
|
|
| 665 |
|
| 666 |
prompt = f"""Given this math problem and its proposed solution, write a short Python script that numerically verifies the answer.
|
| 667 |
|
|
|
|
| 678 |
Respond with ONLY the Python code, no markdown fences, no explanation."""
|
| 679 |
|
| 680 |
try:
|
| 681 |
+
raw_code = call_hf_chat(
|
|
|
|
| 682 |
messages=[
|
| 683 |
{
|
| 684 |
"role": "system",
|
|
|
|
| 689 |
max_tokens=800,
|
| 690 |
temperature=0.1,
|
| 691 |
)
|
|
|
|
|
|
|
| 692 |
# Strip markdown code fences if present
|
| 693 |
code = re.sub(r"^```(?:python)?\s*\n?", "", raw_code.strip())
|
| 694 |
code = re.sub(r"\n?```\s*$", "", code)
|
|
|
|
| 770 |
solution is correct. Checks formula usage, calculations, and logic.
|
| 771 |
Returns dict with 'correct' (bool), 'issues' (list), 'confidence' (float).
|
| 772 |
"""
|
|
|
|
| 773 |
|
| 774 |
prompt = f"""You are a meticulous math verification expert. Your job is to verify whether the following solution to a math problem is mathematically correct.
|
| 775 |
|
|
|
|
| 792 |
}}"""
|
| 793 |
|
| 794 |
try:
|
| 795 |
+
raw = call_hf_chat(
|
|
|
|
| 796 |
messages=[
|
| 797 |
{
|
| 798 |
"role": "system",
|
|
|
|
| 803 |
max_tokens=500,
|
| 804 |
temperature=0.1,
|
| 805 |
)
|
|
|
|
|
|
|
| 806 |
# Extract JSON from response
|
| 807 |
json_start = raw.find("{")
|
| 808 |
json_end = raw.rfind("}") + 1
|
|
|
|
| 1028 |
return results
|
| 1029 |
|
| 1030 |
|
| 1031 |
+
# ─── Learning Path Generation ──────────────────────────────────
|
| 1032 |
|
| 1033 |
|
| 1034 |
@app.post("/api/learning-path", response_model=LearningPathResponse)
|
| 1035 |
async def generate_learning_path(request: LearningPathRequest):
|
| 1036 |
"""Generate AI-powered personalized learning path"""
|
| 1037 |
try:
|
|
|
|
|
|
|
| 1038 |
prompt = f"""Generate a personalized math learning path for a student with these details:
|
| 1039 |
- Weak Topics: {', '.join(request.weaknesses)}
|
| 1040 |
- Grade Level: {request.gradeLevel}
|
|
|
|
| 1056 |
{"role": "user", "content": prompt},
|
| 1057 |
]
|
| 1058 |
|
| 1059 |
+
try:
|
| 1060 |
+
content = call_hf_chat(messages, max_tokens=1500, temperature=0.7)
|
| 1061 |
+
except Exception as hf_err:
|
| 1062 |
+
logger.error(f"HF learning-path failed: {hf_err}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1063 |
raise HTTPException(
|
| 1064 |
status_code=502,
|
| 1065 |
detail="Learning path generation is temporarily unavailable.",
|
|
|
|
| 1074 |
raise HTTPException(status_code=500, detail=f"Learning path error: {str(e)}")
|
| 1075 |
|
| 1076 |
|
| 1077 |
+
# ─── Daily AI Insights ─────────────────────────────────────────
|
| 1078 |
|
| 1079 |
|
| 1080 |
@app.post("/api/analytics/daily-insight", response_model=DailyInsightResponse)
|
| 1081 |
async def daily_insight(request: DailyInsightRequest):
|
| 1082 |
"""Generate daily AI insights for teacher dashboard"""
|
| 1083 |
try:
|
|
|
|
|
|
|
| 1084 |
students = request.students
|
| 1085 |
total = len(students)
|
| 1086 |
if total == 0:
|
|
|
|
| 1118 |
{"role": "user", "content": prompt},
|
| 1119 |
]
|
| 1120 |
|
| 1121 |
+
try:
|
| 1122 |
+
content = call_hf_chat(messages, max_tokens=800, temperature=0.7)
|
| 1123 |
+
except Exception as hf_err:
|
| 1124 |
+
logger.error(f"HF daily-insight failed: {hf_err}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1125 |
raise HTTPException(
|
| 1126 |
status_code=502,
|
| 1127 |
detail="AI insight generation is temporarily unavailable.",
|
|
|
|
| 1177 |
raise HTTPException(status_code=400, detail="No data found in uploaded file")
|
| 1178 |
|
| 1179 |
# AI-powered column mapping
|
|
|
|
| 1180 |
columns_text = ", ".join(df.columns.tolist())
|
| 1181 |
|
| 1182 |
prompt = f"""I have a spreadsheet with these columns: {columns_text}
|
|
|
|
| 1191 |
|
| 1192 |
If a column doesn't match any field, skip it. Respond ONLY with a JSON object mapping original column names to field names. Example: {{"Student Name": "name", "ID": "studentId"}}"""
|
| 1193 |
|
| 1194 |
+
mapping_text = call_hf_chat(
|
|
|
|
| 1195 |
messages=[{"role": "user", "content": prompt}],
|
| 1196 |
max_tokens=300,
|
| 1197 |
temperature=0.1,
|
| 1198 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1199 |
# Extract JSON from response
|
| 1200 |
try:
|
| 1201 |
# Try to find JSON in the response
|
|
|
|
| 1576 |
@app.post("/api/quiz/generate", response_model=QuizResponse)
|
| 1577 |
async def generate_quiz(request: QuizGenerationRequest):
|
| 1578 |
"""
|
| 1579 |
+
Generate an AI-powered quiz via HF Serverless Inference.
|
| 1580 |
Supports Bloom's Taxonomy integration, multiple question types,
|
| 1581 |
and graph-based identification questions.
|
| 1582 |
"""
|
| 1583 |
try:
|
|
|
|
| 1584 |
|
| 1585 |
# Filter out excluded topics
|
| 1586 |
effective_topics = [t for t in request.topics if t not in request.excludeTopics]
|
|
|
|
| 1642 |
|
| 1643 |
logger.info(f"Generating quiz: {request.numQuestions} questions, topics={effective_topics}")
|
| 1644 |
|
| 1645 |
+
raw_content = call_hf_chat(messages, max_tokens=4096, temperature=0.3, top_p=0.9)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1646 |
logger.info(f"Raw quiz response length: {len(raw_content)} chars")
|
| 1647 |
|
| 1648 |
parsed_questions = _parse_quiz_json(raw_content)
|
|
|
|
| 1754 |
Returns efficiency scores, competency levels, and recommendations.
|
| 1755 |
"""
|
| 1756 |
try:
|
|
|
|
|
|
|
| 1757 |
history = request.quizHistory or []
|
| 1758 |
|
| 1759 |
if not history:
|
|
|
|
| 1829 |
|
| 1830 |
Focus on actionable recommendations. Be encouraging yet honest."""
|
| 1831 |
|
| 1832 |
+
overall_perspective = call_hf_chat(
|
|
|
|
| 1833 |
messages=[
|
| 1834 |
{"role": "system", "content": "You are an educational assessment expert. Be concise and supportive."},
|
| 1835 |
{"role": "user", "content": ai_prompt},
|
|
|
|
| 1837 |
max_tokens=200,
|
| 1838 |
temperature=0.3,
|
| 1839 |
)
|
|
|
|
| 1840 |
if overall_perspective:
|
| 1841 |
# Add to recommended as a note
|
| 1842 |
recommended.append(f"AI Insight: {overall_perspective.strip()}")
|
requirements.txt
CHANGED
|
@@ -1,6 +1,7 @@
|
|
| 1 |
fastapi>=0.104.0
|
| 2 |
uvicorn[standard]>=0.24.0
|
| 3 |
-
huggingface-hub>=0.
|
|
|
|
| 4 |
pandas>=2.1.0
|
| 5 |
openpyxl>=3.1.0
|
| 6 |
pdfplumber>=0.10.0
|
|
|
|
| 1 |
fastapi>=0.104.0
|
| 2 |
uvicorn[standard]>=0.24.0
|
| 3 |
+
huggingface-hub>=0.31.0
|
| 4 |
+
requests>=2.31.0
|
| 5 |
pandas>=2.1.0
|
| 6 |
openpyxl>=3.1.0
|
| 7 |
pdfplumber>=0.10.0
|
tests/test_api.py
CHANGED
|
@@ -102,27 +102,15 @@ class FakeClassificationElement:
|
|
| 102 |
self.score = score
|
| 103 |
|
| 104 |
|
| 105 |
-
|
| 106 |
-
"""Mimics ChatCompletionOutput.choices[0]."""
|
| 107 |
-
|
| 108 |
-
def __init__(self, content: str):
|
| 109 |
-
self.message = MagicMock(content=content)
|
| 110 |
-
|
| 111 |
-
|
| 112 |
-
class FakeChatCompletion:
|
| 113 |
-
"""Mimics InferenceClient.chat_completion() return."""
|
| 114 |
-
|
| 115 |
-
def __init__(self, content: str):
|
| 116 |
-
self.choices = [FakeChatChoice(content)]
|
| 117 |
-
|
| 118 |
-
|
| 119 |
-
def make_hf_client(
|
| 120 |
-
chat_content: str = "The answer is 42.",
|
| 121 |
classification: list | None = None,
|
| 122 |
):
|
| 123 |
-
"""Create a mock InferenceClient with predictable outputs.
|
|
|
|
|
|
|
|
|
|
|
|
|
| 124 |
mock_client = MagicMock()
|
| 125 |
-
mock_client.chat_completion.return_value = FakeChatCompletion(chat_content)
|
| 126 |
|
| 127 |
if classification is None:
|
| 128 |
classification = [
|
|
@@ -162,9 +150,9 @@ class TestHealthEndpoints:
|
|
| 162 |
|
| 163 |
|
| 164 |
class TestChatEndpoint:
|
| 165 |
-
@patch("main.
|
| 166 |
-
def test_chat_success(self,
|
| 167 |
-
|
| 168 |
response = client.post("/api/chat", json={
|
| 169 |
"message": "What is 2+2?",
|
| 170 |
"history": [],
|
|
@@ -172,10 +160,9 @@ class TestChatEndpoint:
|
|
| 172 |
assert response.status_code == 200
|
| 173 |
assert "4" in response.json()["response"]
|
| 174 |
|
| 175 |
-
@patch("main.
|
| 176 |
-
def test_chat_with_history(self,
|
| 177 |
-
|
| 178 |
-
mock_get.return_value = hf
|
| 179 |
response = client.post("/api/chat", json={
|
| 180 |
"message": "Is that correct?",
|
| 181 |
"history": [
|
|
@@ -185,19 +172,17 @@ class TestChatEndpoint:
|
|
| 185 |
})
|
| 186 |
assert response.status_code == 200
|
| 187 |
# Verify history was included in messages
|
| 188 |
-
call_args =
|
| 189 |
-
messages = call_args.
|
| 190 |
assert len(messages) >= 3 # system + 2 history + 1 current
|
| 191 |
|
| 192 |
def test_chat_missing_message_returns_422(self):
|
| 193 |
response = client.post("/api/chat", json={"history": []})
|
| 194 |
assert response.status_code == 422
|
| 195 |
|
| 196 |
-
@patch("main.
|
| 197 |
-
def test_chat_hf_failure_returns_502(self,
|
| 198 |
-
|
| 199 |
-
hf.chat_completion.side_effect = Exception("HF API down")
|
| 200 |
-
mock_get.return_value = hf
|
| 201 |
response = client.post("/api/chat", json={
|
| 202 |
"message": "Hello",
|
| 203 |
"history": [],
|
|
@@ -211,7 +196,7 @@ class TestChatEndpoint:
|
|
| 211 |
class TestRiskPrediction:
|
| 212 |
@patch("main.get_client")
|
| 213 |
def test_predict_risk_success(self, mock_get):
|
| 214 |
-
mock_get.return_value =
|
| 215 |
response = client.post("/api/predict-risk", json={
|
| 216 |
"engagementScore": 80,
|
| 217 |
"avgQuizScore": 75,
|
|
@@ -249,7 +234,7 @@ class TestRiskPrediction:
|
|
| 249 |
|
| 250 |
@patch("main.get_client")
|
| 251 |
def test_predict_risk_hf_failure(self, mock_get):
|
| 252 |
-
hf =
|
| 253 |
hf.zero_shot_classification.side_effect = Exception("HF down")
|
| 254 |
mock_get.return_value = hf
|
| 255 |
response = client.post("/api/predict-risk", json={
|
|
@@ -262,7 +247,7 @@ class TestRiskPrediction:
|
|
| 262 |
|
| 263 |
@patch("main.get_client")
|
| 264 |
def test_batch_risk_prediction(self, mock_get):
|
| 265 |
-
mock_get.return_value =
|
| 266 |
response = client.post("/api/predict-risk/batch", json={
|
| 267 |
"students": [
|
| 268 |
{"engagementScore": 80, "avgQuizScore": 75, "attendance": 90, "assignmentCompletion": 85},
|
|
@@ -277,9 +262,9 @@ class TestRiskPrediction:
|
|
| 277 |
|
| 278 |
|
| 279 |
class TestLearningPath:
|
| 280 |
-
@patch("main.
|
| 281 |
-
def test_learning_path_success(self,
|
| 282 |
-
|
| 283 |
response = client.post("/api/learning-path", json={
|
| 284 |
"weaknesses": ["fractions", "decimals"],
|
| 285 |
"gradeLevel": "Grade 7",
|
|
@@ -299,11 +284,9 @@ class TestLearningPath:
|
|
| 299 |
})
|
| 300 |
assert response.status_code == 422
|
| 301 |
|
| 302 |
-
@patch("main.
|
| 303 |
-
def test_learning_path_hf_failure(self,
|
| 304 |
-
|
| 305 |
-
hf.chat_completion.side_effect = Exception("HF down")
|
| 306 |
-
mock_get.return_value = hf
|
| 307 |
response = client.post("/api/learning-path", json={
|
| 308 |
"weaknesses": ["algebra"],
|
| 309 |
"gradeLevel": "Grade 8",
|
|
@@ -315,9 +298,9 @@ class TestLearningPath:
|
|
| 315 |
|
| 316 |
|
| 317 |
class TestDailyInsight:
|
| 318 |
-
@patch("main.
|
| 319 |
-
def test_daily_insight_success(self,
|
| 320 |
-
|
| 321 |
response = client.post("/api/analytics/daily-insight", json={
|
| 322 |
"students": [
|
| 323 |
{"name": "Alice", "engagementScore": 80, "avgQuizScore": 75, "attendance": 90, "riskLevel": "Low"},
|
|
@@ -359,8 +342,8 @@ class TestQuizTopics:
|
|
| 359 |
|
| 360 |
|
| 361 |
class TestQuizGeneration:
|
| 362 |
-
@patch("main.
|
| 363 |
-
def test_generate_quiz_success(self,
|
| 364 |
quiz_json = json.dumps([{
|
| 365 |
"questionType": "multiple_choice",
|
| 366 |
"question": "What is 2+2?",
|
|
@@ -372,7 +355,7 @@ class TestQuizGeneration:
|
|
| 372 |
"points": 1,
|
| 373 |
"explanation": "2+2=4",
|
| 374 |
}])
|
| 375 |
-
|
| 376 |
|
| 377 |
response = client.post("/api/quiz/generate", json={
|
| 378 |
"topics": ["Arithmetic"],
|
|
@@ -390,9 +373,9 @@ class TestQuizGeneration:
|
|
| 390 |
})
|
| 391 |
assert response.status_code == 422
|
| 392 |
|
| 393 |
-
@patch("main.
|
| 394 |
-
def test_generate_quiz_bad_llm_output(self,
|
| 395 |
-
|
| 396 |
response = client.post("/api/quiz/generate", json={
|
| 397 |
"topics": ["Algebra"],
|
| 398 |
"gradeLevel": "Grade 8",
|
|
@@ -400,8 +383,8 @@ class TestQuizGeneration:
|
|
| 400 |
})
|
| 401 |
assert response.status_code == 500
|
| 402 |
|
| 403 |
-
@patch("main.
|
| 404 |
-
def test_preview_quiz(self,
|
| 405 |
quiz_json = json.dumps([{
|
| 406 |
"questionType": "identification",
|
| 407 |
"question": "Define slope.",
|
|
@@ -412,7 +395,7 @@ class TestQuizGeneration:
|
|
| 412 |
"points": 1,
|
| 413 |
"explanation": "Slope = rise/run.",
|
| 414 |
}])
|
| 415 |
-
|
| 416 |
response = client.post("/api/quiz/preview", json={
|
| 417 |
"topics": ["Algebra"],
|
| 418 |
"gradeLevel": "Grade 8",
|
|
@@ -491,9 +474,9 @@ class TestErrorHandling:
|
|
| 491 |
|
| 492 |
|
| 493 |
class TestStudentCompetency:
|
| 494 |
-
@patch("main.
|
| 495 |
-
def test_competency_no_history(self,
|
| 496 |
-
|
| 497 |
response = client.post("/api/quiz/student-competency", json={
|
| 498 |
"studentId": "student123",
|
| 499 |
"quizHistory": [],
|
|
@@ -503,9 +486,9 @@ class TestStudentCompetency:
|
|
| 503 |
assert data["studentId"] == "student123"
|
| 504 |
assert data["competencies"] == []
|
| 505 |
|
| 506 |
-
@patch("main.
|
| 507 |
-
def test_competency_with_history(self,
|
| 508 |
-
|
| 509 |
response = client.post("/api/quiz/student-competency", json={
|
| 510 |
"studentId": "student123",
|
| 511 |
"quizHistory": [
|
|
|
|
| 102 |
self.score = score
|
| 103 |
|
| 104 |
|
| 105 |
+
def make_zsc_client(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 106 |
classification: list | None = None,
|
| 107 |
):
|
| 108 |
+
"""Create a mock InferenceClient with predictable zero-shot outputs.
|
| 109 |
+
|
| 110 |
+
Used only for risk-prediction tests (the only endpoint still using
|
| 111 |
+
``get_client()`` / ``InferenceClient``).
|
| 112 |
+
"""
|
| 113 |
mock_client = MagicMock()
|
|
|
|
| 114 |
|
| 115 |
if classification is None:
|
| 116 |
classification = [
|
|
|
|
| 150 |
|
| 151 |
|
| 152 |
class TestChatEndpoint:
|
| 153 |
+
@patch("main.call_hf_chat")
|
| 154 |
+
def test_chat_success(self, mock_chat):
|
| 155 |
+
mock_chat.return_value = "Hello! 2+2=4."
|
| 156 |
response = client.post("/api/chat", json={
|
| 157 |
"message": "What is 2+2?",
|
| 158 |
"history": [],
|
|
|
|
| 160 |
assert response.status_code == 200
|
| 161 |
assert "4" in response.json()["response"]
|
| 162 |
|
| 163 |
+
@patch("main.call_hf_chat")
|
| 164 |
+
def test_chat_with_history(self, mock_chat):
|
| 165 |
+
mock_chat.return_value = "Yes, that's right."
|
|
|
|
| 166 |
response = client.post("/api/chat", json={
|
| 167 |
"message": "Is that correct?",
|
| 168 |
"history": [
|
|
|
|
| 172 |
})
|
| 173 |
assert response.status_code == 200
|
| 174 |
# Verify history was included in messages
|
| 175 |
+
call_args = mock_chat.call_args
|
| 176 |
+
messages = call_args.args[0] if call_args.args else call_args.kwargs.get("messages", [])
|
| 177 |
assert len(messages) >= 3 # system + 2 history + 1 current
|
| 178 |
|
| 179 |
def test_chat_missing_message_returns_422(self):
|
| 180 |
response = client.post("/api/chat", json={"history": []})
|
| 181 |
assert response.status_code == 422
|
| 182 |
|
| 183 |
+
@patch("main.call_hf_chat")
|
| 184 |
+
def test_chat_hf_failure_returns_502(self, mock_chat):
|
| 185 |
+
mock_chat.side_effect = Exception("HF API down")
|
|
|
|
|
|
|
| 186 |
response = client.post("/api/chat", json={
|
| 187 |
"message": "Hello",
|
| 188 |
"history": [],
|
|
|
|
| 196 |
class TestRiskPrediction:
|
| 197 |
@patch("main.get_client")
|
| 198 |
def test_predict_risk_success(self, mock_get):
|
| 199 |
+
mock_get.return_value = make_zsc_client()
|
| 200 |
response = client.post("/api/predict-risk", json={
|
| 201 |
"engagementScore": 80,
|
| 202 |
"avgQuizScore": 75,
|
|
|
|
| 234 |
|
| 235 |
@patch("main.get_client")
|
| 236 |
def test_predict_risk_hf_failure(self, mock_get):
|
| 237 |
+
hf = make_zsc_client()
|
| 238 |
hf.zero_shot_classification.side_effect = Exception("HF down")
|
| 239 |
mock_get.return_value = hf
|
| 240 |
response = client.post("/api/predict-risk", json={
|
|
|
|
| 247 |
|
| 248 |
@patch("main.get_client")
|
| 249 |
def test_batch_risk_prediction(self, mock_get):
|
| 250 |
+
mock_get.return_value = make_zsc_client()
|
| 251 |
response = client.post("/api/predict-risk/batch", json={
|
| 252 |
"students": [
|
| 253 |
{"engagementScore": 80, "avgQuizScore": 75, "attendance": 90, "assignmentCompletion": 85},
|
|
|
|
| 262 |
|
| 263 |
|
| 264 |
class TestLearningPath:
|
| 265 |
+
@patch("main.call_hf_chat")
|
| 266 |
+
def test_learning_path_success(self, mock_chat):
|
| 267 |
+
mock_chat.return_value = "1. Review fractions\n2. Practice decimals"
|
| 268 |
response = client.post("/api/learning-path", json={
|
| 269 |
"weaknesses": ["fractions", "decimals"],
|
| 270 |
"gradeLevel": "Grade 7",
|
|
|
|
| 284 |
})
|
| 285 |
assert response.status_code == 422
|
| 286 |
|
| 287 |
+
@patch("main.call_hf_chat")
|
| 288 |
+
def test_learning_path_hf_failure(self, mock_chat):
|
| 289 |
+
mock_chat.side_effect = Exception("HF down")
|
|
|
|
|
|
|
| 290 |
response = client.post("/api/learning-path", json={
|
| 291 |
"weaknesses": ["algebra"],
|
| 292 |
"gradeLevel": "Grade 8",
|
|
|
|
| 298 |
|
| 299 |
|
| 300 |
class TestDailyInsight:
|
| 301 |
+
@patch("main.call_hf_chat")
|
| 302 |
+
def test_daily_insight_success(self, mock_chat):
|
| 303 |
+
mock_chat.return_value = "Class is doing well."
|
| 304 |
response = client.post("/api/analytics/daily-insight", json={
|
| 305 |
"students": [
|
| 306 |
{"name": "Alice", "engagementScore": 80, "avgQuizScore": 75, "attendance": 90, "riskLevel": "Low"},
|
|
|
|
| 342 |
|
| 343 |
|
| 344 |
class TestQuizGeneration:
|
| 345 |
+
@patch("main.call_hf_chat")
|
| 346 |
+
def test_generate_quiz_success(self, mock_chat):
|
| 347 |
quiz_json = json.dumps([{
|
| 348 |
"questionType": "multiple_choice",
|
| 349 |
"question": "What is 2+2?",
|
|
|
|
| 355 |
"points": 1,
|
| 356 |
"explanation": "2+2=4",
|
| 357 |
}])
|
| 358 |
+
mock_chat.return_value = quiz_json
|
| 359 |
|
| 360 |
response = client.post("/api/quiz/generate", json={
|
| 361 |
"topics": ["Arithmetic"],
|
|
|
|
| 373 |
})
|
| 374 |
assert response.status_code == 422
|
| 375 |
|
| 376 |
+
@patch("main.call_hf_chat")
|
| 377 |
+
def test_generate_quiz_bad_llm_output(self, mock_chat):
|
| 378 |
+
mock_chat.return_value = "This is not valid JSON at all."
|
| 379 |
response = client.post("/api/quiz/generate", json={
|
| 380 |
"topics": ["Algebra"],
|
| 381 |
"gradeLevel": "Grade 8",
|
|
|
|
| 383 |
})
|
| 384 |
assert response.status_code == 500
|
| 385 |
|
| 386 |
+
@patch("main.call_hf_chat")
|
| 387 |
+
def test_preview_quiz(self, mock_chat):
|
| 388 |
quiz_json = json.dumps([{
|
| 389 |
"questionType": "identification",
|
| 390 |
"question": "Define slope.",
|
|
|
|
| 395 |
"points": 1,
|
| 396 |
"explanation": "Slope = rise/run.",
|
| 397 |
}])
|
| 398 |
+
mock_chat.return_value = quiz_json
|
| 399 |
response = client.post("/api/quiz/preview", json={
|
| 400 |
"topics": ["Algebra"],
|
| 401 |
"gradeLevel": "Grade 8",
|
|
|
|
| 474 |
|
| 475 |
|
| 476 |
class TestStudentCompetency:
|
| 477 |
+
@patch("main.call_hf_chat")
|
| 478 |
+
def test_competency_no_history(self, mock_chat):
|
| 479 |
+
mock_chat.return_value = ""
|
| 480 |
response = client.post("/api/quiz/student-competency", json={
|
| 481 |
"studentId": "student123",
|
| 482 |
"quizHistory": [],
|
|
|
|
| 486 |
assert data["studentId"] == "student123"
|
| 487 |
assert data["competencies"] == []
|
| 488 |
|
| 489 |
+
@patch("main.call_hf_chat")
|
| 490 |
+
def test_competency_with_history(self, mock_chat):
|
| 491 |
+
mock_chat.return_value = "Good progress overall."
|
| 492 |
response = client.post("/api/quiz/student-competency", json={
|
| 493 |
"studentId": "student123",
|
| 494 |
"quizHistory": [
|