Spaces:
Running
Running
Prabhas Jupalli commited on
Commit ·
74626f2
0
Parent(s):
Deployment: High-Fidelity Dashboard & Native Storage Integration
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- .dockerignore +21 -0
- .gitignore +46 -0
- Dockerfile +45 -0
- README.md +34 -0
- backend/Inspiration/app.py +600 -0
- backend/Inspiration/model_library.py +597 -0
- backend/Inspiration/modelsRoutes.py +1457 -0
- backend/__init__.py +32 -0
- backend/app.py +18 -0
- backend/cert.pem +35 -0
- backend/data/db.json +512 -0
- backend/data/youtube_links.json +20 -0
- backend/data/youtube_transcripts.json +20 -0
- backend/database.py +150 -0
- backend/db.json +2137 -0
- backend/debug_nlp.py +70 -0
- backend/fix_db.py +32 -0
- backend/init.py +32 -0
- backend/key.pem +52 -0
- backend/navigator.py +228 -0
- backend/nlp/nlp_resources.json +0 -0
- backend/nlp_api.py +1178 -0
- backend/request_logger.py +28 -0
- backend/requirements.txt +19 -0
- backend/utils.py +266 -0
- docker-compose.yml +16 -0
- eslint.config.js +28 -0
- index.html +13 -0
- package-lock.json +0 -0
- package.json +42 -0
- postcss.config.js +6 -0
- scripts/deploy_hf.ps1 +28 -0
- scripts/deploy_minro.ps1 +45 -0
- scripts/deploy_remote.ps1 +34 -0
- src/App.tsx +35 -0
- src/components/ControlPanel.tsx +788 -0
- src/components/Dashboard/DashboardTutorial.tsx +209 -0
- src/components/Dashboard/MissionGrid.tsx +110 -0
- src/components/Dashboard/RobotAssistant.tsx +166 -0
- src/components/Dashboard/StatisticCard.tsx +138 -0
- src/components/GridVisualization.tsx +876 -0
- src/components/Layout/AuthLayout.tsx +33 -0
- src/components/Layout/DashboardLayout.tsx +483 -0
- src/components/LearningRoadmap.tsx +155 -0
- src/components/ui/AnimatedButton.tsx +67 -0
- src/components/ui/Button.tsx +38 -0
- src/components/ui/CustomCursor.tsx +116 -0
- src/components/ui/GlassCard.tsx +24 -0
- src/components/ui/Input.tsx +70 -0
- src/components/ui/Mascot.tsx +220 -0
.dockerignore
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
node_modules
|
| 2 |
+
build
|
| 3 |
+
dist
|
| 4 |
+
venv
|
| 5 |
+
env
|
| 6 |
+
__pycache__
|
| 7 |
+
*.pyc
|
| 8 |
+
*.pyo
|
| 9 |
+
*.pyd
|
| 10 |
+
.Python
|
| 11 |
+
.git
|
| 12 |
+
.gitignore
|
| 13 |
+
.dockerignore
|
| 14 |
+
Dockerfile
|
| 15 |
+
README.md
|
| 16 |
+
backend/polyline_generation.log
|
| 17 |
+
backend/db.json
|
| 18 |
+
backend/db_backup.json
|
| 19 |
+
.env
|
| 20 |
+
*.pem
|
| 21 |
+
*.local
|
.gitignore
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Logs
|
| 2 |
+
logs
|
| 3 |
+
*.log
|
| 4 |
+
npm-debug.log*
|
| 5 |
+
yarn-debug.log*
|
| 6 |
+
yarn-error.log*
|
| 7 |
+
pnpm-debug.log*
|
| 8 |
+
lerna-debug.log*
|
| 9 |
+
node_modules
|
| 10 |
+
dist
|
| 11 |
+
dist-ssr
|
| 12 |
+
*.local
|
| 13 |
+
|
| 14 |
+
# Editor directories and files
|
| 15 |
+
.vscode/*
|
| 16 |
+
!.vscode/extensions.json
|
| 17 |
+
.idea
|
| 18 |
+
.DS_Store
|
| 19 |
+
*.suo
|
| 20 |
+
*.ntvs*
|
| 21 |
+
*.njsproj
|
| 22 |
+
*.sln
|
| 23 |
+
*.sw?
|
| 24 |
+
.env
|
| 25 |
+
|
| 26 |
+
# Temporary and local files
|
| 27 |
+
__pycache__/
|
| 28 |
+
hf_https_clone/
|
| 29 |
+
temp_zip_check/
|
| 30 |
+
backend/backend_logs.txt
|
| 31 |
+
backend/data/*.db
|
| 32 |
+
backend/data/*.sql
|
| 33 |
+
nl_main_deployment.zip
|
| 34 |
+
|
| 35 |
+
# Large files and models
|
| 36 |
+
**/*.xlsx
|
| 37 |
+
**/*.pth
|
| 38 |
+
**/*.zip
|
| 39 |
+
backend/models/
|
| 40 |
+
backend/nlp/*.xlsx
|
| 41 |
+
backend/data/*.json
|
| 42 |
+
!backend/data/youtube_links.json
|
| 43 |
+
!backend/data/youtube_transcripts.json
|
| 44 |
+
!backend/nlp/nlp_resources.json
|
| 45 |
+
!backend/data/db.json
|
| 46 |
+
!extracted_data.json
|
Dockerfile
ADDED
|
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Stage 1: Build the React Frontend
|
| 2 |
+
FROM node:18-alpine as build-frontend
|
| 3 |
+
WORKDIR /app
|
| 4 |
+
COPY package*.json ./
|
| 5 |
+
RUN npm ci
|
| 6 |
+
COPY . .
|
| 7 |
+
RUN npm run build
|
| 8 |
+
|
| 9 |
+
# Stage 2: Setup the Python Backend
|
| 10 |
+
FROM python:3.9-slim
|
| 11 |
+
|
| 12 |
+
# Create a non-root user for Hugging Face (UID 1000)
|
| 13 |
+
RUN useradd -m -u 1000 user
|
| 14 |
+
|
| 15 |
+
# Install system dependencies first (cached layer)
|
| 16 |
+
USER root
|
| 17 |
+
RUN apt-get update && apt-get install -y --no-install-recommends gcc python3-dev \
|
| 18 |
+
&& rm -rf /var/lib/apt/lists/*
|
| 19 |
+
|
| 20 |
+
USER user
|
| 21 |
+
ENV PATH="/home/user/.local/bin:$PATH"
|
| 22 |
+
WORKDIR /home/user/app
|
| 23 |
+
|
| 24 |
+
# Install PyTorch CPU-only FIRST (largest package, separate cache layer)
|
| 25 |
+
# This avoids downloading the ~800MB CUDA version
|
| 26 |
+
RUN pip install --no-cache-dir torch --index-url https://download.pytorch.org/whl/cpu
|
| 27 |
+
|
| 28 |
+
# Install remaining Python dependencies
|
| 29 |
+
COPY --chown=user backend/requirements.txt backend/
|
| 30 |
+
# Remove torch from requirements since we installed it above
|
| 31 |
+
RUN grep -v '^torch' backend/requirements.txt > /tmp/req_notorch.txt && pip install --no-cache-dir --upgrade -r /tmp/req_notorch.txt && pip install --no-cache-dir gunicorn
|
| 32 |
+
|
| 33 |
+
# Copy Backend Code
|
| 34 |
+
COPY --chown=user backend/ backend/
|
| 35 |
+
COPY --chown=user Navigators/ Navigators/
|
| 36 |
+
|
| 37 |
+
# Copy Frontend Build from Stage 1
|
| 38 |
+
COPY --chown=user --from=build-frontend /app/dist ./dist
|
| 39 |
+
|
| 40 |
+
# Environment variables
|
| 41 |
+
ENV PYTHONUNBUFFERED=1
|
| 42 |
+
ENV PORT=7860
|
| 43 |
+
|
| 44 |
+
# Run the application
|
| 45 |
+
CMD gunicorn -b 0.0.0.0:$PORT backend.nlp_api:app
|
README.md
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
---
|
| 3 |
+
title: NL Main
|
| 4 |
+
emoji: 🧠
|
| 5 |
+
colorFrom: blue
|
| 6 |
+
colorTo: indigo
|
| 7 |
+
sdk: docker
|
| 8 |
+
app_port: 7860
|
| 9 |
+
---
|
| 10 |
+
|
| 11 |
+
# NLP Learning Grid
|
| 12 |
+
|
| 13 |
+
Advanced agentic NLP learning platform with a 3D grid interface, DQN-based pathfinding, and interactive AI learning assistant.
|
| 14 |
+
|
| 15 |
+
## Deployment Options
|
| 16 |
+
|
| 17 |
+
### Hugging Face Spaces
|
| 18 |
+
This repository is configured for direct deployment to Hugging Face Spaces using Docker.
|
| 19 |
+
- Port: 7860 (Auto-configured in Dockerfile)
|
| 20 |
+
|
| 21 |
+
### Fly.io
|
| 22 |
+
Deploy using the included `fly.toml`:
|
| 23 |
+
```bash
|
| 24 |
+
fly launch
|
| 25 |
+
```
|
| 26 |
+
|
| 27 |
+
### Render.com
|
| 28 |
+
Use the `render.yaml` for one-click deployment.
|
| 29 |
+
|
| 30 |
+
### Manual
|
| 31 |
+
```bash
|
| 32 |
+
docker build -t nlp-learning-grid .
|
| 33 |
+
docker run -p 5000:7860 nlp-learning-grid
|
| 34 |
+
```
|
backend/Inspiration/app.py
ADDED
|
@@ -0,0 +1,600 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# This Flask backend manages user authentication (signup/login), course enrollment, teacher/TA assignment,
|
| 2 |
+
# and provides course/learner data. It handles requests for course details, module/topic information, learner positions,
|
| 3 |
+
# and quiz interactions (submission, fetching questions/logs, recording attempts, creation).
|
| 4 |
+
# Data is often fetched from or updated in the database based on user actions and IDs.
|
| 5 |
+
import datetime
|
| 6 |
+
from utils import is_valid_id
|
| 7 |
+
from dbModels import User, db, Course, Question, UserQuiz
|
| 8 |
+
from init import app, DBcreated
|
| 9 |
+
import pandas as pd
|
| 10 |
+
from flask import make_response,jsonify, request
|
| 11 |
+
from repository import add_learner_from_user, add_teacher_from_user, create_Course, update_position, login,signup,teacher_course,teacher_course_unassigned,assign_teacher_course,unassign_teacher_course, learner_course_enrolled,generate_data,learner_course_unenrolled,enrolled_learner_data,enrolled_learners_by_course,calculate_all_module_centroids,add_enroll,update_by_quiz,learner_polyline_enrolled,get_suitable_position,change_resource_position,update_position_resource,update_summary_grade, quiz_adder_from_json,ta_course,ta_course_teached,ta_course_unteached, user_enrolled_courses, user_recom_courses
|
| 12 |
+
from datetime import datetime, timedelta,timezone
|
| 13 |
+
from flask import Flask
|
| 14 |
+
import modelsRoutes # to expose routes
|
| 15 |
+
|
| 16 |
+
# Read data from Excel file
|
| 17 |
+
excel_file = 'DM_Resource_Plot.xlsx'
|
| 18 |
+
df = pd.read_excel(excel_file)
|
| 19 |
+
excel_file = 'DM_learner_plot.xlsx'
|
| 20 |
+
df_learner = pd.read_excel(excel_file)
|
| 21 |
+
|
| 22 |
+
# Assuming your Excel file has columns 'x', 'y', and 'video_url'
|
| 23 |
+
scatterplot_data = df[['index', 'name', 'x', 'y', 'video_url', 'module','module_id','submodule_id']].to_dict(orient='records')
|
| 24 |
+
|
| 25 |
+
# Convert the scatterplot_data into a DataFrame
|
| 26 |
+
df_scatter = pd.DataFrame(scatterplot_data)
|
| 27 |
+
|
| 28 |
+
# Group by 'module_id' and calculate the mean of 'x' and 'y'
|
| 29 |
+
module_data_df = df_scatter.groupby('module_id').agg({'x': 'mean', 'y': 'mean','module': 'first' }).reset_index()
|
| 30 |
+
|
| 31 |
+
# Convert the result to a list of dictionaries with 'module_id', 'x', and 'y'
|
| 32 |
+
module_data = module_data_df.to_dict(orient='records')
|
| 33 |
+
topic_data_df=pd.read_excel('DM/DM_topics.xlsx')
|
| 34 |
+
topic_data=topic_data_df[['name','description']].to_dict(orient='records')
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
learner_data = df_learner[['index', 'resource_name', 'x', 'y', 'description']].to_dict(orient='records')
|
| 38 |
+
|
| 39 |
+
if DBcreated:
|
| 40 |
+
# print("creating the course")
|
| 41 |
+
# create_Course("Discreate Mathematics",
|
| 42 |
+
# "this is the description of DM", None, None)
|
| 43 |
+
print("Generating Data")
|
| 44 |
+
generate_data()
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
@app.route('/ids/<int:user_id>')
|
| 48 |
+
def get_ids(user_id):
|
| 49 |
+
"""
|
| 50 |
+
Fetches learner, teacher, and TA IDs for a given user.
|
| 51 |
+
|
| 52 |
+
Args:
|
| 53 |
+
user_id (int): ID of the user.
|
| 54 |
+
|
| 55 |
+
Returns:
|
| 56 |
+
JSON: Dictionary containing 'learner_id', 'teacher_id', and 'ta_id'.
|
| 57 |
+
"""
|
| 58 |
+
with app.app_context():
|
| 59 |
+
user = User.query.get(user_id)
|
| 60 |
+
return ({
|
| 61 |
+
'learner_id': user.learner_id,
|
| 62 |
+
'teacher_id': user.teacher_id,
|
| 63 |
+
'ta_id': user.ta_id,
|
| 64 |
+
})
|
| 65 |
+
|
| 66 |
+
@app.route('/data')
|
| 67 |
+
def get_data():
|
| 68 |
+
"""
|
| 69 |
+
Returns resource scatterplot data (x, y, video URLs, module info).
|
| 70 |
+
|
| 71 |
+
Returns:
|
| 72 |
+
JSON: List of resource data dictionaries.
|
| 73 |
+
"""
|
| 74 |
+
return jsonify(scatterplot_data)
|
| 75 |
+
|
| 76 |
+
@app.route('/moduleData/<int:id>')
|
| 77 |
+
def get_module_data(id):
|
| 78 |
+
"""
|
| 79 |
+
Calculates and returns module centroid data for a given course.
|
| 80 |
+
|
| 81 |
+
Args:
|
| 82 |
+
id (int): Course ID.
|
| 83 |
+
|
| 84 |
+
Returns:
|
| 85 |
+
JSON: List of module data with centroid positions.
|
| 86 |
+
"""
|
| 87 |
+
moudle = calculate_all_module_centroids(id)
|
| 88 |
+
return jsonify(moudle)
|
| 89 |
+
|
| 90 |
+
@app.route('/topicData')
|
| 91 |
+
def get_topic_data():
|
| 92 |
+
"""
|
| 93 |
+
Fetches topic names and descriptions.
|
| 94 |
+
|
| 95 |
+
Returns:
|
| 96 |
+
JSON: List of topic dictionaries with 'name' and 'description'.
|
| 97 |
+
"""
|
| 98 |
+
return jsonify(topic_data)
|
| 99 |
+
|
| 100 |
+
@app.route('/new_positions')
|
| 101 |
+
def get_new_data():
|
| 102 |
+
"""
|
| 103 |
+
Returns learner plot data including positions and descriptions.
|
| 104 |
+
|
| 105 |
+
Returns:
|
| 106 |
+
JSON: List of learner data dictionaries.
|
| 107 |
+
"""
|
| 108 |
+
return jsonify(learner_data)
|
| 109 |
+
|
| 110 |
+
@app.route("/signup", methods=['POST'])
|
| 111 |
+
def signup_user():
|
| 112 |
+
"""
|
| 113 |
+
Registers a new user.
|
| 114 |
+
|
| 115 |
+
Request JSON:
|
| 116 |
+
- name (str): Full name of the user.
|
| 117 |
+
- username (str): Username for login.
|
| 118 |
+
- password (str): Password for the account.
|
| 119 |
+
|
| 120 |
+
Returns:
|
| 121 |
+
JSON: Created user details if successful, else error message.
|
| 122 |
+
"""
|
| 123 |
+
data = request.get_json()
|
| 124 |
+
name = data["name"]
|
| 125 |
+
username = data["username"]
|
| 126 |
+
password = data["password"]
|
| 127 |
+
print("user signup: ", name, password, username)
|
| 128 |
+
|
| 129 |
+
user = signup(name, username, password)
|
| 130 |
+
if user:
|
| 131 |
+
return jsonify(user), 201
|
| 132 |
+
else:
|
| 133 |
+
return jsonify({"msg":"Error Creating user"}), 400
|
| 134 |
+
|
| 135 |
+
@app.route("/login", methods=['POST'])
|
| 136 |
+
def login_user():
|
| 137 |
+
"""
|
| 138 |
+
Logs in an existing user.
|
| 139 |
+
|
| 140 |
+
Request JSON:
|
| 141 |
+
- username (str): Username of the user.
|
| 142 |
+
- password (str): Password of the user.
|
| 143 |
+
|
| 144 |
+
Returns:
|
| 145 |
+
JSON: User details if login is successful, else 401 status.
|
| 146 |
+
"""
|
| 147 |
+
data = request.get_json()
|
| 148 |
+
username = data["username"]
|
| 149 |
+
password = data["password"]
|
| 150 |
+
print("user login request:", username, password)
|
| 151 |
+
|
| 152 |
+
user = login(username, password)
|
| 153 |
+
response = make_response(jsonify(user))
|
| 154 |
+
if user:
|
| 155 |
+
response.status_code = 200
|
| 156 |
+
else:
|
| 157 |
+
response.status_code = 401
|
| 158 |
+
|
| 159 |
+
return response
|
| 160 |
+
|
| 161 |
+
@app.route("/teacher/courses/<int:id>", methods=['GET'])
|
| 162 |
+
def get_teacher_course(id):
|
| 163 |
+
"""
|
| 164 |
+
Fetches all courses assigned to a teacher.
|
| 165 |
+
|
| 166 |
+
Args:
|
| 167 |
+
id (int): Teacher ID.
|
| 168 |
+
|
| 169 |
+
Returns:
|
| 170 |
+
JSON: List of courses assigned to the teacher.
|
| 171 |
+
"""
|
| 172 |
+
return teacher_course(id)
|
| 173 |
+
|
| 174 |
+
@app.route("/teacher/courses/unassigned/<int:id>", methods=['GET'])
|
| 175 |
+
def get_teacher_course_unassigned(id):
|
| 176 |
+
"""
|
| 177 |
+
Fetches all unassigned courses for a teacher.
|
| 178 |
+
|
| 179 |
+
Args:
|
| 180 |
+
id (int): Teacher ID.
|
| 181 |
+
|
| 182 |
+
Returns:
|
| 183 |
+
JSON: List of unassigned courses available for assignment.
|
| 184 |
+
"""
|
| 185 |
+
return teacher_course_unassigned(id)
|
| 186 |
+
|
| 187 |
+
@app.route("/teacher/courses/assign", methods=['POST'])
|
| 188 |
+
def assign_teacher():
|
| 189 |
+
"""
|
| 190 |
+
Assigns a teacher to a course. If teacher doesn't exist, creates one.
|
| 191 |
+
|
| 192 |
+
Request JSON:
|
| 193 |
+
- user_id (int): User ID.
|
| 194 |
+
- teacher_id (int): Teacher ID (may be invalid if new teacher).
|
| 195 |
+
- course_id (int): Course ID.
|
| 196 |
+
|
| 197 |
+
Returns:
|
| 198 |
+
JSON: Assignment result (success or failure).
|
| 199 |
+
"""
|
| 200 |
+
data = request.get_json()
|
| 201 |
+
user_id = data["user_id"]
|
| 202 |
+
teacher_id = data["teacher_id"]
|
| 203 |
+
course_id = data["course_id"]
|
| 204 |
+
print(f"user_id: {user_id}, teacher_id: {teacher_id}, course_id: {course_id}")
|
| 205 |
+
if not is_valid_id(teacher_id):
|
| 206 |
+
print("adding new teacher...")
|
| 207 |
+
teacher_id = add_teacher_from_user(user_id)["id"]
|
| 208 |
+
if teacher_id and course_id:
|
| 209 |
+
result = assign_teacher_course(teacher_id, course_id)
|
| 210 |
+
response = make_response(jsonify(result))
|
| 211 |
+
if result:
|
| 212 |
+
response.status_code = 200
|
| 213 |
+
else :
|
| 214 |
+
response.status_code = 500
|
| 215 |
+
else:
|
| 216 |
+
response = make_response(None)
|
| 217 |
+
response.status_code = 400
|
| 218 |
+
return response
|
| 219 |
+
|
| 220 |
+
@app.route("/teacher/courses/unassign", methods=['POST'])
|
| 221 |
+
def unassign_teacher():
|
| 222 |
+
"""
|
| 223 |
+
Unassigns a teacher from a course.
|
| 224 |
+
|
| 225 |
+
Request JSON:
|
| 226 |
+
- teacher_id (int): Teacher ID.
|
| 227 |
+
- course_id (int): Course ID.
|
| 228 |
+
|
| 229 |
+
Returns:
|
| 230 |
+
JSON: Result of unassignment (success/failure).
|
| 231 |
+
"""
|
| 232 |
+
data = request.get_json()
|
| 233 |
+
teacher_id = data["teacher_id"]
|
| 234 |
+
course_id = data["course_id"]
|
| 235 |
+
print(teacher_id,course_id)
|
| 236 |
+
if teacher_id and course_id :
|
| 237 |
+
result = unassign_teacher_course(teacher_id, course_id)
|
| 238 |
+
return jsonify(result), 200 if result else jsonify(result), 400
|
| 239 |
+
|
| 240 |
+
@app.route("/enrolledCourses/<int:id>", methods=['GET'])
|
| 241 |
+
def get_enrolled_course(id):
|
| 242 |
+
"""
|
| 243 |
+
Fetches list of courses a user is enrolled in, including role.
|
| 244 |
+
|
| 245 |
+
Args:
|
| 246 |
+
id (int): User ID.
|
| 247 |
+
|
| 248 |
+
Returns:
|
| 249 |
+
JSON: List of enrolled courses with role info.
|
| 250 |
+
"""
|
| 251 |
+
return user_enrolled_courses(id)
|
| 252 |
+
|
| 253 |
+
@app.route("/taTeachedCourses/<int:id>",methods=['GET'])
|
| 254 |
+
def get_teached_course(id):
|
| 255 |
+
"""
|
| 256 |
+
Fetches courses taught by a TA.
|
| 257 |
+
|
| 258 |
+
Args:
|
| 259 |
+
id (int): TA ID.
|
| 260 |
+
|
| 261 |
+
Returns:
|
| 262 |
+
JSON: List of courses taught by the TA.
|
| 263 |
+
"""
|
| 264 |
+
return ta_course_teached(id)
|
| 265 |
+
|
| 266 |
+
@app.route("/enrolledLearner/<int:id>/<int:id2>", methods=['GET'])
|
| 267 |
+
def get_enrolled_learner(id,id2):
|
| 268 |
+
"""
|
| 269 |
+
Fetches enrollment data for a learner in a specific course.
|
| 270 |
+
|
| 271 |
+
Args:
|
| 272 |
+
id (int): Learner ID.
|
| 273 |
+
id2 (int): Course ID.
|
| 274 |
+
|
| 275 |
+
Returns:
|
| 276 |
+
JSON: Learner enrollment details.
|
| 277 |
+
"""
|
| 278 |
+
return enrolled_learner_data(id,id2)
|
| 279 |
+
|
| 280 |
+
@app.route("/enrolledLearnersByCourse/<int:id>", methods=['GET'])
|
| 281 |
+
def get_enrolled_learners(id):
|
| 282 |
+
"""
|
| 283 |
+
Fetches all learners enrolled in a given course.
|
| 284 |
+
|
| 285 |
+
Args:
|
| 286 |
+
id (int): Course ID.
|
| 287 |
+
|
| 288 |
+
Returns:
|
| 289 |
+
JSON: List of enrolled learners.
|
| 290 |
+
"""
|
| 291 |
+
return enrolled_learners_by_course(id)
|
| 292 |
+
|
| 293 |
+
@app.route("/recomCourses/<int:id>", methods=['GET'])
|
| 294 |
+
def get_recom_course(id):
|
| 295 |
+
"""
|
| 296 |
+
Fetches list of courses recommended for a user.
|
| 297 |
+
|
| 298 |
+
Args:
|
| 299 |
+
id (int): User ID.
|
| 300 |
+
|
| 301 |
+
Returns:
|
| 302 |
+
JSON: List of recommended courses for enrollment.
|
| 303 |
+
"""
|
| 304 |
+
return user_recom_courses(id)
|
| 305 |
+
|
| 306 |
+
@app.route("/ta/recomCourses/<int:id>",methods=['GET'])
|
| 307 |
+
def get_ta_recom_course(id):
|
| 308 |
+
"""
|
| 309 |
+
Fetches list of courses recommended for a TA to teach.
|
| 310 |
+
|
| 311 |
+
Args:
|
| 312 |
+
id (int): TA ID.
|
| 313 |
+
|
| 314 |
+
Returns:
|
| 315 |
+
JSON: List of teachable recommended courses.
|
| 316 |
+
"""
|
| 317 |
+
return ta_course_unteached(id)
|
| 318 |
+
|
| 319 |
+
@app.route("/enrolledPolylines/<int:id>", methods=['GET'])
|
| 320 |
+
def get_enrolled_polyline(id):
|
| 321 |
+
"""
|
| 322 |
+
Fetches polyline data for resources a learner is enrolled in.
|
| 323 |
+
|
| 324 |
+
Args:
|
| 325 |
+
id (int): Learner ID.
|
| 326 |
+
|
| 327 |
+
Returns:
|
| 328 |
+
JSON: Polyline positions of enrolled resources.
|
| 329 |
+
"""
|
| 330 |
+
return learner_polyline_enrolled(id)
|
| 331 |
+
|
| 332 |
+
@app.route("/submitsummary", methods=['POST'])
|
| 333 |
+
def get_new_postion():
|
| 334 |
+
"""
|
| 335 |
+
Updates learner's position after submitting a summary.
|
| 336 |
+
|
| 337 |
+
Request JSON:
|
| 338 |
+
- summary (str): Learner's summary.
|
| 339 |
+
- enroll_id (int): Enrollment ID.
|
| 340 |
+
|
| 341 |
+
Returns:
|
| 342 |
+
JSON: Updated position and contribution ID.
|
| 343 |
+
"""
|
| 344 |
+
data = request.get_json()
|
| 345 |
+
summary = data["summary"]
|
| 346 |
+
enrollId = data["enroll_id"]
|
| 347 |
+
pos,contribution_id = update_position(summary, enrollId)
|
| 348 |
+
return jsonify({"position": pos, "contribution_id": contribution_id}), 200
|
| 349 |
+
|
| 350 |
+
@app.route("/changeSummaryGrade", methods=['POST'])
|
| 351 |
+
def get_new_learner_postion():
|
| 352 |
+
"""
|
| 353 |
+
Updates the grade of a learner's summary.
|
| 354 |
+
|
| 355 |
+
Request JSON:
|
| 356 |
+
- contribution_id (int): Contribution ID.
|
| 357 |
+
- grade (float): Grade to assign.
|
| 358 |
+
|
| 359 |
+
Returns:
|
| 360 |
+
JSON: Updated learner position.
|
| 361 |
+
"""
|
| 362 |
+
data = request.get_json()
|
| 363 |
+
contributionId = data["contribution_id"]
|
| 364 |
+
grade = data["grade"]
|
| 365 |
+
pos = update_summary_grade(contributionId,grade)
|
| 366 |
+
return jsonify(pos), 200
|
| 367 |
+
|
| 368 |
+
@app.route("/watchResource", methods=['POST'])
|
| 369 |
+
def get_updated_postion():
|
| 370 |
+
"""
|
| 371 |
+
Updates learner's position after watching a resource.
|
| 372 |
+
|
| 373 |
+
Request JSON:
|
| 374 |
+
- enroll_id (int): Enrollment ID.
|
| 375 |
+
- resource_id (int): Resource ID.
|
| 376 |
+
|
| 377 |
+
Returns:
|
| 378 |
+
JSON: Updated learner position.
|
| 379 |
+
"""
|
| 380 |
+
data = request.get_json()
|
| 381 |
+
enrollId = data["enroll_id"]
|
| 382 |
+
resourceId = data["resource_id"]
|
| 383 |
+
pos = update_position_resource(enrollId,resourceId)
|
| 384 |
+
return jsonify(pos), 200
|
| 385 |
+
|
| 386 |
+
@app.route("/suitableResourcePosition", methods=['POST'])
|
| 387 |
+
def suitable_postion():
|
| 388 |
+
"""
|
| 389 |
+
Suggests a suitable position for a learner based on resource.
|
| 390 |
+
|
| 391 |
+
Request JSON:
|
| 392 |
+
- pos (float): Initial position.
|
| 393 |
+
- resource_id (int): Resource ID.
|
| 394 |
+
|
| 395 |
+
Returns:
|
| 396 |
+
JSON: Adjusted suitable position.
|
| 397 |
+
"""
|
| 398 |
+
data = request.get_json()
|
| 399 |
+
initial_pos = data["pos"]
|
| 400 |
+
resourceId = data["resource_id"]
|
| 401 |
+
pos = get_suitable_position(initial_pos,resourceId)
|
| 402 |
+
return jsonify(pos), 200
|
| 403 |
+
|
| 404 |
+
@app.route("/changeResourcePosition", methods=['POST'])
|
| 405 |
+
def change_postion():
|
| 406 |
+
"""
|
| 407 |
+
Changes position of a resource.
|
| 408 |
+
|
| 409 |
+
Request JSON:
|
| 410 |
+
- pos (float): New position.
|
| 411 |
+
- resource_id (int): Resource ID.
|
| 412 |
+
|
| 413 |
+
Returns:
|
| 414 |
+
JSON: Empty success response.
|
| 415 |
+
"""
|
| 416 |
+
data = request.get_json()
|
| 417 |
+
pos = data["pos"]
|
| 418 |
+
resourceId = data["resource_id"]
|
| 419 |
+
change_resource_position(pos,resourceId)
|
| 420 |
+
return jsonify({}), 200
|
| 421 |
+
|
| 422 |
+
@app.route("/submitquiz", methods=['POST'])
|
| 423 |
+
def update_by_quiz_route():
|
| 424 |
+
"""
|
| 425 |
+
Updates learner's position based on quiz performance.
|
| 426 |
+
|
| 427 |
+
Request JSON:
|
| 428 |
+
- enroll_id (int): Enrollment ID.
|
| 429 |
+
- course_id (int): Course ID.
|
| 430 |
+
- to_consider (list): Boolean array of questions to consider.
|
| 431 |
+
- question_polyline (list): Polyline array for each question.
|
| 432 |
+
|
| 433 |
+
Returns:
|
| 434 |
+
JSON: Updated learner position.
|
| 435 |
+
"""
|
| 436 |
+
data = request.get_json()
|
| 437 |
+
enrollId = data["enroll_id"]
|
| 438 |
+
courseId = data["course_id"]
|
| 439 |
+
to_consider = data["to_consider"]
|
| 440 |
+
question_polyline = data["question_polyline"]
|
| 441 |
+
|
| 442 |
+
pos = update_by_quiz(enrollId, courseId, to_consider, question_polyline, position_scaler = 1)
|
| 443 |
+
|
| 444 |
+
return jsonify(pos), 200
|
| 445 |
+
|
| 446 |
+
@app.route('/quiz_questions/<int:quiz_id>', methods=['GET'])
|
| 447 |
+
def get_quiz_questions(quiz_id):
|
| 448 |
+
"""
|
| 449 |
+
Fetches all questions for a given quiz.
|
| 450 |
+
|
| 451 |
+
Args:
|
| 452 |
+
quiz_id (int): Quiz ID.
|
| 453 |
+
|
| 454 |
+
Returns:
|
| 455 |
+
JSON: List of question dictionaries with options and correct answer.
|
| 456 |
+
"""
|
| 457 |
+
questions = Question.query.filter_by(quiz_id=quiz_id).all()
|
| 458 |
+
questions_data = []
|
| 459 |
+
for question in questions:
|
| 460 |
+
questions_data.append({
|
| 461 |
+
'id': question.id,
|
| 462 |
+
'quiz_id': question.quiz_id,
|
| 463 |
+
'question_text': question.question_text,
|
| 464 |
+
'option_a': question.option_a,
|
| 465 |
+
'option_b': question.option_b,
|
| 466 |
+
'option_c': question.option_c,
|
| 467 |
+
'option_d': question.option_d,
|
| 468 |
+
'correct_answer': question.correct_answer,
|
| 469 |
+
'polyline': question.polyline
|
| 470 |
+
})
|
| 471 |
+
|
| 472 |
+
return jsonify(questions_data), 200
|
| 473 |
+
|
| 474 |
+
@app.route('/fetch_quiz_log/<int:user_id>', methods=['GET'])
|
| 475 |
+
def fetch_quiz_log(user_id):
|
| 476 |
+
"""
|
| 477 |
+
Fetches all quiz attempts made by a user.
|
| 478 |
+
|
| 479 |
+
Args:
|
| 480 |
+
user_id (int): User ID.
|
| 481 |
+
|
| 482 |
+
Returns:
|
| 483 |
+
JSON: List of quiz attempt logs with score and attempt date.
|
| 484 |
+
"""
|
| 485 |
+
user_quizzes = UserQuiz.query.filter_by(user_id=user_id).all()
|
| 486 |
+
user_quiz_data = []
|
| 487 |
+
|
| 488 |
+
for user_quiz in user_quizzes:
|
| 489 |
+
user_quiz_data.append({
|
| 490 |
+
'id': user_quiz.id,
|
| 491 |
+
'quiz_id': user_quiz.quiz_id,
|
| 492 |
+
'user_id': user_quiz.user_id,
|
| 493 |
+
'score': user_quiz.score,
|
| 494 |
+
'completion_date': user_quiz.attempt_date,
|
| 495 |
+
})
|
| 496 |
+
|
| 497 |
+
return jsonify(user_quiz_data), 200
|
| 498 |
+
|
| 499 |
+
@app.route('/record_quiz_attempt', methods=['POST'])
|
| 500 |
+
def record_quiz_attempt():
|
| 501 |
+
"""
|
| 502 |
+
Records a quiz attempt for a user.
|
| 503 |
+
|
| 504 |
+
Request JSON:
|
| 505 |
+
- user_id (int): User ID.
|
| 506 |
+
- quiz_id (int): Quiz ID.
|
| 507 |
+
- score (float): Score obtained.
|
| 508 |
+
- status (str): Status (e.g., completed, pending).
|
| 509 |
+
- attempt_date (str, optional): ISO timestamp of attempt.
|
| 510 |
+
|
| 511 |
+
Returns:
|
| 512 |
+
JSON: Created quiz attempt record if successful.
|
| 513 |
+
"""
|
| 514 |
+
try:
|
| 515 |
+
data = request.get_json()
|
| 516 |
+
attempt_date = None
|
| 517 |
+
if 'attempt_date' in data:
|
| 518 |
+
attempt_date_utc = datetime.fromisoformat(data['attempt_date'].replace("Z", "+00:00"))
|
| 519 |
+
ist_timezone = timezone(timedelta(hours=5, minutes=30))
|
| 520 |
+
attempt_date = attempt_date_utc.astimezone(ist_timezone)
|
| 521 |
+
|
| 522 |
+
new_user_quiz = UserQuiz(
|
| 523 |
+
user_id=data['user_id'],
|
| 524 |
+
quiz_id=data['quiz_id'],
|
| 525 |
+
score=data.get('score'),
|
| 526 |
+
status=data['status'],
|
| 527 |
+
attempt_date=attempt_date
|
| 528 |
+
)
|
| 529 |
+
|
| 530 |
+
db.session.add(new_user_quiz)
|
| 531 |
+
db.session.commit()
|
| 532 |
+
return jsonify(new_user_quiz.to_dict()), 201
|
| 533 |
+
except Exception as e:
|
| 534 |
+
print("Error in record_quiz_attempt:", e)
|
| 535 |
+
return jsonify({"error": "Failed to record quiz attempt"}), 500
|
| 536 |
+
|
| 537 |
+
@app.route('/createquiz', methods=['POST'])
|
| 538 |
+
def create_quiz():
|
| 539 |
+
"""
|
| 540 |
+
Creates a new quiz and its associated questions.
|
| 541 |
+
|
| 542 |
+
Request JSON:
|
| 543 |
+
- quiz details and questions in structured format.
|
| 544 |
+
|
| 545 |
+
Returns:
|
| 546 |
+
JSON: Success message with x, y coordinates from quiz creation.
|
| 547 |
+
"""
|
| 548 |
+
try:
|
| 549 |
+
data = request.get_json()
|
| 550 |
+
if not data:
|
| 551 |
+
return jsonify({"error": "No data provided"}), 400
|
| 552 |
+
|
| 553 |
+
x, y = quiz_adder_from_json(data)
|
| 554 |
+
return jsonify({"message": "Quiz and questions added successfully!", "x": x, "y": y}), 201
|
| 555 |
+
|
| 556 |
+
except Exception as e:
|
| 557 |
+
print("Unexpected error in create_quiz:", e)
|
| 558 |
+
return jsonify({"error": str(e)}), 500
|
| 559 |
+
|
| 560 |
+
@app.route('/enrolls', methods=['POST'])
|
| 561 |
+
def create_enroll():
|
| 562 |
+
"""
|
| 563 |
+
Enrolls a learner into a course. Creates learner profile if missing.
|
| 564 |
+
|
| 565 |
+
Request JSON:
|
| 566 |
+
- user_id (int): User ID.
|
| 567 |
+
- learner_id (int): Learner ID (optional if new).
|
| 568 |
+
- course_id (int): Course ID.
|
| 569 |
+
|
| 570 |
+
Returns:
|
| 571 |
+
JSON: Enrollment confirmation.
|
| 572 |
+
"""
|
| 573 |
+
data = request.get_json()
|
| 574 |
+
user_id=data['user_id']
|
| 575 |
+
learner_id=data['learner_id']
|
| 576 |
+
course_id=data['course_id']
|
| 577 |
+
if not is_valid_id(learner_id):
|
| 578 |
+
learner_id = add_learner_from_user(user_id)['id']
|
| 579 |
+
return add_enroll(learner_id, course_id)
|
| 580 |
+
|
| 581 |
+
@app.route('/coursename/<int:course_id>', methods=['GET'])
|
| 582 |
+
def get_course_name(course_id):
|
| 583 |
+
"""
|
| 584 |
+
Fetches the name of a course by its ID.
|
| 585 |
+
|
| 586 |
+
Args:
|
| 587 |
+
course_id (int): ID of the course.
|
| 588 |
+
|
| 589 |
+
Returns:
|
| 590 |
+
JSON: A dictionary containing the course name or an error message.
|
| 591 |
+
"""
|
| 592 |
+
course = Course.query.get(course_id)
|
| 593 |
+
if course:
|
| 594 |
+
return jsonify({'course_id': course.id, 'name': course.name})
|
| 595 |
+
else:
|
| 596 |
+
return jsonify({'error': 'Course not found'}), 404
|
| 597 |
+
|
| 598 |
+
|
| 599 |
+
if __name__ == '__main__':
|
| 600 |
+
app.run(host="0.0.0.0", debug=True)
|
backend/Inspiration/model_library.py
ADDED
|
@@ -0,0 +1,597 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pandas as pd
|
| 2 |
+
from bs4 import BeautifulSoup
|
| 3 |
+
import re
|
| 4 |
+
import nltk
|
| 5 |
+
from nltk.stem import WordNetLemmatizer, PorterStemmer
|
| 6 |
+
from nltk.corpus import stopwords
|
| 7 |
+
from sentence_transformers import SentenceTransformer
|
| 8 |
+
import numpy as np
|
| 9 |
+
from transformers import BertModel, BertTokenizer, BertForMaskedLM
|
| 10 |
+
import torch
|
| 11 |
+
from dbModels import db, Resource, Course, Topic, app, Enroll, Learner
|
| 12 |
+
import math
|
| 13 |
+
from keybert import KeyBERT
|
| 14 |
+
from utils import get_cos_sim
|
| 15 |
+
from statistics import mean
|
| 16 |
+
# from memory_profiler import profile
|
| 17 |
+
import gc
|
| 18 |
+
|
| 19 |
+
nltk.download('stopwords')
|
| 20 |
+
nltk.download('wordnet')
|
| 21 |
+
stop_words = set(stopwords.words('english'))
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def utils_preprocess_text(text: str, flg_stemm: bool = False, flg_lemm: bool = True, lst_stopwords: list = None) -> str:
|
| 25 |
+
"""
|
| 26 |
+
Preprocess text by removing HTML tags, punctuations, numbers, stopwords, and applying stemming/lemmatization.
|
| 27 |
+
|
| 28 |
+
Parameters:
|
| 29 |
+
text (str): The text to preprocess.
|
| 30 |
+
flg_stemm (bool): Flag to apply stemming. Default is False.
|
| 31 |
+
flg_lemm (bool): Flag to apply lemmatization. Default is True.
|
| 32 |
+
lst_stopwords (list): List of stopwords to remove. Default is None.
|
| 33 |
+
|
| 34 |
+
Returns:
|
| 35 |
+
str: The preprocessed text.
|
| 36 |
+
"""
|
| 37 |
+
|
| 38 |
+
# Remove HTML
|
| 39 |
+
soup = BeautifulSoup(text, 'lxml')
|
| 40 |
+
text = soup.get_text()
|
| 41 |
+
|
| 42 |
+
# Remove punctuations and numbers
|
| 43 |
+
text = re.sub('[^a-zA-Z]', ' ', text)
|
| 44 |
+
|
| 45 |
+
# Single character removal
|
| 46 |
+
text = re.sub(r"\s+[a-zA-Z]\s+", ' ', text)
|
| 47 |
+
|
| 48 |
+
# Remove multiple spaces
|
| 49 |
+
text = re.sub(r'\s+', ' ', text)
|
| 50 |
+
|
| 51 |
+
# Tokenize text
|
| 52 |
+
lst_text = text.split()
|
| 53 |
+
|
| 54 |
+
# Remove stopwords
|
| 55 |
+
if lst_stopwords is not None:
|
| 56 |
+
lst_text = [word for word in lst_text if word not in lst_stopwords]
|
| 57 |
+
|
| 58 |
+
# Apply stemming
|
| 59 |
+
if flg_stemm:
|
| 60 |
+
ps = PorterStemmer()
|
| 61 |
+
lst_text = [ps.stem(word) for word in lst_text]
|
| 62 |
+
|
| 63 |
+
# Apply lemmatization
|
| 64 |
+
if flg_lemm:
|
| 65 |
+
lem = WordNetLemmatizer()
|
| 66 |
+
lst_text = [lem.lemmatize(word) for word in lst_text]
|
| 67 |
+
|
| 68 |
+
text = " ".join(lst_text)
|
| 69 |
+
return text
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
def apply_preprocessing(df: pd.DataFrame):
|
| 73 |
+
"""
|
| 74 |
+
Apply text preprocessing to the 'description' column of the DataFrame.
|
| 75 |
+
|
| 76 |
+
Parameters:
|
| 77 |
+
df (pd.DataFrame): DataFrame containing topics.
|
| 78 |
+
"""
|
| 79 |
+
stop_words = set(stopwords.words('english')) # Define stopwords
|
| 80 |
+
df['clean_text'] = df['description'].apply(lambda x: x.lower())
|
| 81 |
+
df['clean_text'] = df['clean_text'].apply(lambda x: utils_preprocess_text(
|
| 82 |
+
x, flg_stemm=False, flg_lemm=True, lst_stopwords=stop_words))
|
| 83 |
+
df['tokens'] = df['clean_text'].apply(lambda x: x.split())
|
| 84 |
+
|
| 85 |
+
# @profile
|
| 86 |
+
def create_topic_embeddings(topics: pd.DataFrame) -> list:
|
| 87 |
+
"""
|
| 88 |
+
Create embeddings for each topic using a pre-trained BERT model.
|
| 89 |
+
|
| 90 |
+
Parameters:
|
| 91 |
+
topics (pd.DataFrame): DataFrame containing topics.
|
| 92 |
+
|
| 93 |
+
Returns:
|
| 94 |
+
list: List of topic embeddings.
|
| 95 |
+
"""
|
| 96 |
+
model = SentenceTransformer('all-MiniLM-L6-v2')
|
| 97 |
+
topic_embeddings = []
|
| 98 |
+
|
| 99 |
+
for i in range(len(topics)):
|
| 100 |
+
embedding = model.encode(
|
| 101 |
+
topics.loc[i, "description"], convert_to_tensor=True)
|
| 102 |
+
# Store embeddings as numpy arrays
|
| 103 |
+
topic_embeddings.append(embedding.cpu().numpy())
|
| 104 |
+
# Clear the embedding tensor and force garbage collection after each iteration
|
| 105 |
+
del embedding
|
| 106 |
+
gc.collect()
|
| 107 |
+
|
| 108 |
+
return topic_embeddings
|
| 109 |
+
|
| 110 |
+
# @profile
|
| 111 |
+
def create_topic_polylines(topics: pd.DataFrame, topic_embeddings: list) -> pd.DataFrame:
|
| 112 |
+
"""
|
| 113 |
+
Create a DataFrame containing topic names, module numbers, and cosine similarity polylines.
|
| 114 |
+
|
| 115 |
+
Parameters:
|
| 116 |
+
topics (pd.DataFrame): DataFrame containing topics.
|
| 117 |
+
topic_embeddings (list): List of topic embeddings.
|
| 118 |
+
|
| 119 |
+
Returns:
|
| 120 |
+
pd.DataFrame: DataFrame with topic polylines.
|
| 121 |
+
"""
|
| 122 |
+
topic_names = topics["name"].tolist() # Topic keyphrases
|
| 123 |
+
length = len(topics)
|
| 124 |
+
topic_modules = [] # Topic module number
|
| 125 |
+
for i in range(12):
|
| 126 |
+
topic_modules.append(1)
|
| 127 |
+
for i in range(8):
|
| 128 |
+
topic_modules.append(2)
|
| 129 |
+
nowl = len(topic_modules)
|
| 130 |
+
for i in range(length-nowl):
|
| 131 |
+
topic_modules.append(3)
|
| 132 |
+
# Topic embedding - mean of topic embeddings of individual words of a keyphrase
|
| 133 |
+
|
| 134 |
+
top_poly = []
|
| 135 |
+
top_module = []
|
| 136 |
+
topic = []
|
| 137 |
+
|
| 138 |
+
# Going through each topic and computing the cosine similarity between it's embedding and all other topic's embeddings
|
| 139 |
+
for i in range(len(topic_names)):
|
| 140 |
+
polyline = [0]*len(topic_names)
|
| 141 |
+
for j in range(len(topic_names)):
|
| 142 |
+
cos_sim = 0
|
| 143 |
+
if topic_names[i] == topic_names[j]:
|
| 144 |
+
cos_sim = 1
|
| 145 |
+
else:
|
| 146 |
+
topic1_vector = topic_embeddings[i]
|
| 147 |
+
topic2_vector = topic_embeddings[j]
|
| 148 |
+
|
| 149 |
+
# scaling cosine similarity value from [-1,1] to [0,1]
|
| 150 |
+
cos_sim = (get_cos_sim(
|
| 151 |
+
(topic1_vector), (topic2_vector)) + 1) / 2
|
| 152 |
+
|
| 153 |
+
polyline[j] = cos_sim # format 1
|
| 154 |
+
# polyline.append((j, cos_sim)) #format 2
|
| 155 |
+
|
| 156 |
+
topic.append(topic_names[i])
|
| 157 |
+
top_module.append(topic_modules[i])
|
| 158 |
+
top_poly.append(polyline)
|
| 159 |
+
|
| 160 |
+
polyline_dict = {"topic": topic,
|
| 161 |
+
"module": top_module, "polyline": top_poly}
|
| 162 |
+
# converting the topic polyline to a dataframe
|
| 163 |
+
topic_polylines = pd.DataFrame(polyline_dict)
|
| 164 |
+
return topic_polylines
|
| 165 |
+
|
| 166 |
+
|
| 167 |
+
#
|
| 168 |
+
#
|
| 169 |
+
# Resources functions start
|
| 170 |
+
#
|
| 171 |
+
#
|
| 172 |
+
# @profile
|
| 173 |
+
|
| 174 |
+
def create_summary_embeddings(summary) -> list:
|
| 175 |
+
model = SentenceTransformer('all-MiniLM-L6-v2')
|
| 176 |
+
summary_embeddings_list = []
|
| 177 |
+
|
| 178 |
+
# Encode the summary and convert to numpy array, then wrap in an additional list to match the format
|
| 179 |
+
embedding = model.encode(summary, convert_to_tensor=True).cpu().numpy()
|
| 180 |
+
summary_embeddings_list.append([embedding.tolist()]) # Add extra list wrapping to match format
|
| 181 |
+
|
| 182 |
+
# Clear the embedding tensor and force garbage collection after each iteration
|
| 183 |
+
del embedding
|
| 184 |
+
gc.collect()
|
| 185 |
+
|
| 186 |
+
return summary_embeddings_list
|
| 187 |
+
|
| 188 |
+
def create_resource_embeddings(keywords):
|
| 189 |
+
model = SentenceTransformer('all-MiniLM-L6-v2')
|
| 190 |
+
keybert_embeddings_list = []
|
| 191 |
+
for i in keywords:
|
| 192 |
+
# Encode the keyword
|
| 193 |
+
embedding = model.encode(i)
|
| 194 |
+
|
| 195 |
+
# Convert to list and wrap to match expected structure [[float, ...]]
|
| 196 |
+
embeddings = [embedding.tolist()]
|
| 197 |
+
keybert_embeddings_list.append(embeddings)
|
| 198 |
+
|
| 199 |
+
return keybert_embeddings_list
|
| 200 |
+
|
| 201 |
+
# @profile
|
| 202 |
+
def create_resource_polylines(topicembedding, keybert_embeddings_list, beta):
|
| 203 |
+
all_polylines = []
|
| 204 |
+
topic_embeddings = topicembedding
|
| 205 |
+
for embeddings in keybert_embeddings_list:
|
| 206 |
+
single_file_polyline = []
|
| 207 |
+
for i in range(len(embeddings)):
|
| 208 |
+
docVector = embeddings[i]
|
| 209 |
+
polyline = []
|
| 210 |
+
for j in range(len(topic_embeddings)):
|
| 211 |
+
wordVector = topic_embeddings[j]
|
| 212 |
+
# find the cosine similarity between resource embeddings and the topic embeddings
|
| 213 |
+
cos_sim = (get_cos_sim(wordVector, docVector) + 1) / 2
|
| 214 |
+
polyline.append({'x': j, 'y': cos_sim})
|
| 215 |
+
single_file_polyline.append(polyline)
|
| 216 |
+
all_polylines.append(single_file_polyline)
|
| 217 |
+
new_polylines = []
|
| 218 |
+
|
| 219 |
+
for single_file_polyline in all_polylines:
|
| 220 |
+
templ = [0]*len(topicembedding)
|
| 221 |
+
for i in range(len(topicembedding)):
|
| 222 |
+
temp = 0
|
| 223 |
+
# between the multiple polylines for each doc find the average and set that as the final polyline
|
| 224 |
+
for j in range(len(single_file_polyline)):
|
| 225 |
+
temp += single_file_polyline[j][i]['y']
|
| 226 |
+
templ[i] = temp / len(single_file_polyline)
|
| 227 |
+
new_polylines.append(templ)
|
| 228 |
+
|
| 229 |
+
polylines = []
|
| 230 |
+
temporary_list = []
|
| 231 |
+
learning_objects = []
|
| 232 |
+
for i in range(len(new_polylines)):
|
| 233 |
+
polyline = new_polylines[i]
|
| 234 |
+
pol = {}
|
| 235 |
+
temporary_dict = {}
|
| 236 |
+
for j in range(len(polyline)):
|
| 237 |
+
pol[j] = polyline[j]
|
| 238 |
+
hd1 = np.array([v for v in pol.values()])
|
| 239 |
+
hd1.tolist()
|
| 240 |
+
temporary_dict["polyline"] = hd1
|
| 241 |
+
temporary_dict["ID"] = "r"
|
| 242 |
+
learning_objects.append(temporary_dict)
|
| 243 |
+
temporary_list.append(hd1)
|
| 244 |
+
polylines.extend(temporary_list)
|
| 245 |
+
|
| 246 |
+
beta = beta # beta funtion to get more variance when plotting the polyline
|
| 247 |
+
polyline2 = polylines.copy()
|
| 248 |
+
beta_polylines = []
|
| 249 |
+
for line in polyline2:
|
| 250 |
+
v2 = []
|
| 251 |
+
mean_val = np.average(line)
|
| 252 |
+
len_arr = len(line)
|
| 253 |
+
for j in line:
|
| 254 |
+
j = j + beta*(j - mean_val)
|
| 255 |
+
if j > 1:
|
| 256 |
+
j = 1
|
| 257 |
+
if j < 0:
|
| 258 |
+
j = 0
|
| 259 |
+
v2.append(j)
|
| 260 |
+
beta_polylines.append(v2)
|
| 261 |
+
|
| 262 |
+
return beta_polylines
|
| 263 |
+
|
| 264 |
+
def create_beta_polylines(resource_polylines,beta):
|
| 265 |
+
beta = beta # beta funtion to get more variance when plotting the polyline
|
| 266 |
+
polyline2 = resource_polylines.copy()
|
| 267 |
+
beta_polylines = []
|
| 268 |
+
for line in polyline2:
|
| 269 |
+
v2 = []
|
| 270 |
+
mean_val = np.average(line)
|
| 271 |
+
len_arr = len(line)
|
| 272 |
+
for j in line:
|
| 273 |
+
j = j + beta*(j - mean_val)
|
| 274 |
+
if j > 1:
|
| 275 |
+
j = 1
|
| 276 |
+
if j < 0:
|
| 277 |
+
j = 0
|
| 278 |
+
v2.append(j)
|
| 279 |
+
beta_polylines.append(v2)
|
| 280 |
+
|
| 281 |
+
return beta_polylines
|
| 282 |
+
|
| 283 |
+
def create_beta_polyline(polyline, beta):
|
| 284 |
+
# Apply beta transformation to a single polyline
|
| 285 |
+
beta_polyline = []
|
| 286 |
+
mean_val = np.average(polyline)
|
| 287 |
+
|
| 288 |
+
for j in polyline:
|
| 289 |
+
j = j + beta * (j - mean_val)
|
| 290 |
+
# Clamping the values between 0 and 1
|
| 291 |
+
if j > 1:
|
| 292 |
+
j = 1
|
| 293 |
+
if j < 0:
|
| 294 |
+
j = 0
|
| 295 |
+
beta_polyline.append(j)
|
| 296 |
+
|
| 297 |
+
return beta_polyline
|
| 298 |
+
|
| 299 |
+
#
|
| 300 |
+
#
|
| 301 |
+
#
|
| 302 |
+
# learners functions
|
| 303 |
+
#
|
| 304 |
+
#
|
| 305 |
+
#
|
| 306 |
+
|
| 307 |
+
|
| 308 |
+
def create_embeddings_centroid_list(l):
|
| 309 |
+
new_keybert_embeddings_list = []
|
| 310 |
+
for i in l:
|
| 311 |
+
# find the centroid of the embeddings for a doc
|
| 312 |
+
index_averages = [sum(x) / len(x) for x in zip(*i)]
|
| 313 |
+
new_keybert_embeddings_list.append(index_averages)
|
| 314 |
+
return new_keybert_embeddings_list
|
| 315 |
+
|
| 316 |
+
|
| 317 |
+
def rad_plot_axes(num: int, x_max: float, y_max: float):
|
| 318 |
+
"""
|
| 319 |
+
Generate radial plot axes.
|
| 320 |
+
|
| 321 |
+
Parameters:
|
| 322 |
+
num (int): Number of axes.
|
| 323 |
+
x_max (float): Maximum x-coordinate.
|
| 324 |
+
y_max (float): Maximum y-coordinate.
|
| 325 |
+
|
| 326 |
+
Returns:
|
| 327 |
+
tuple: A tuple containing the lengths of the axes and the angle theta.
|
| 328 |
+
"""
|
| 329 |
+
empt_arr = [] # Temporary container for y-coordinate calculations
|
| 330 |
+
xstop = [] # List to store x-coordinate of the axes endpoints
|
| 331 |
+
ystop = [] # List to store y-coordinate of the axes endpoints
|
| 332 |
+
tlen = [] # List to store the length of axes
|
| 333 |
+
ttempl = [] # Temporary container for reversed lengths
|
| 334 |
+
theta = ((np.pi) / (num - 1)) / 2 # Calculate theta
|
| 335 |
+
b = 0
|
| 336 |
+
|
| 337 |
+
while (b * theta) <= (np.arctan(y_max / x_max)):
|
| 338 |
+
y_val = x_max * math.tan(b * theta)
|
| 339 |
+
empt_arr.append(y_val)
|
| 340 |
+
ystop.append(y_val)
|
| 341 |
+
ttemp = math.sqrt((x_max ** 2) + (y_val ** 2))
|
| 342 |
+
tlen.append(ttemp)
|
| 343 |
+
if (b * theta) != np.arctan(y_max / x_max):
|
| 344 |
+
ttempl.append(ttemp)
|
| 345 |
+
b += 1
|
| 346 |
+
|
| 347 |
+
while b < num:
|
| 348 |
+
ystop.append(y_max)
|
| 349 |
+
b += 1
|
| 350 |
+
|
| 351 |
+
tlen.extend(list(reversed(ttempl)))
|
| 352 |
+
xstop = list(reversed(ystop))
|
| 353 |
+
|
| 354 |
+
# Plotting is commented out for modularity; can be enabled as needed
|
| 355 |
+
# for d in range(num):
|
| 356 |
+
# x_values = [0, xstop[d]]
|
| 357 |
+
# y_values = [0, ystop[d]]
|
| 358 |
+
# plt.plot(x_values, y_values, label=f'Axis {d+1}', alpha=1, linewidth=0.2)
|
| 359 |
+
|
| 360 |
+
return tlen, theta
|
| 361 |
+
|
| 362 |
+
# @profile
|
| 363 |
+
def rad_plot_poly(num: int, hd_point: list, tlen: list, theta: float) -> list:
|
| 364 |
+
"""
|
| 365 |
+
Plot the polyline and calculate their centroids.
|
| 366 |
+
|
| 367 |
+
Parameters:
|
| 368 |
+
num (int): Number of points.
|
| 369 |
+
hd_point (list): List of polyline points.
|
| 370 |
+
tlen (list): Length of the axes.
|
| 371 |
+
theta (float): Angle theta.
|
| 372 |
+
|
| 373 |
+
Returns:
|
| 374 |
+
list: List of centroid coordinates.
|
| 375 |
+
"""
|
| 376 |
+
coordinates = []
|
| 377 |
+
|
| 378 |
+
for pnt in hd_point:
|
| 379 |
+
x_values = []
|
| 380 |
+
y_values = []
|
| 381 |
+
for p in range(num):
|
| 382 |
+
rlen = pnt[p] * tlen[p]
|
| 383 |
+
x_values.append(rlen * math.cos(p * theta))
|
| 384 |
+
y_values.append(rlen * math.sin(p * theta))
|
| 385 |
+
|
| 386 |
+
# Plotting is commented out for modularity; can be enabled as needed
|
| 387 |
+
# plt.plot(x_values, y_values, label='Polyline', alpha=0.6, linewidth=0.5)
|
| 388 |
+
|
| 389 |
+
average_x = sum(x_values) / num
|
| 390 |
+
average_y = sum(y_values) / num
|
| 391 |
+
coordinates.append([average_x, average_y])
|
| 392 |
+
|
| 393 |
+
# Print statement for debugging
|
| 394 |
+
print("Red - Resources ")
|
| 395 |
+
|
| 396 |
+
return coordinates
|
| 397 |
+
|
| 398 |
+
|
| 399 |
+
def push_topics_to_db(topics: pd.DataFrame, topic_embeddings: list, topic_polylines: pd.DataFrame, course_id: str):
|
| 400 |
+
"""
|
| 401 |
+
Push topics, their embeddings, and polylines to the database.
|
| 402 |
+
|
| 403 |
+
Parameters:
|
| 404 |
+
topics (pd.DataFrame): DataFrame containing topics.
|
| 405 |
+
topic_embeddings (list): List of topic embeddings.
|
| 406 |
+
topic_polylines (pd.DataFrame): DataFrame with topic polylines.
|
| 407 |
+
course_id (str): Unique identifier for the course.
|
| 408 |
+
"""
|
| 409 |
+
# Print the lengths of topics, polylines, and embeddings for debugging
|
| 410 |
+
print(len(topics), len(topic_polylines), len(topic_embeddings))
|
| 411 |
+
|
| 412 |
+
# Determine the feature length from the polyline data
|
| 413 |
+
feature_length = len(topic_polylines["polyline"][0])
|
| 414 |
+
|
| 415 |
+
# Generate radial plot axes and plot polylines to get centroid coordinates
|
| 416 |
+
tlen, theta = rad_plot_axes(feature_length, 1, 1)
|
| 417 |
+
centroid_list = rad_plot_poly(
|
| 418 |
+
feature_length, topic_polylines["polyline"], tlen, theta)
|
| 419 |
+
|
| 420 |
+
# List to hold all topic objects
|
| 421 |
+
all_topics = []
|
| 422 |
+
|
| 423 |
+
with app.app_context():
|
| 424 |
+
for i in range(len(topics)):
|
| 425 |
+
topic = Topic(
|
| 426 |
+
name=topics["name"][i],
|
| 427 |
+
description=topics["description"][i],
|
| 428 |
+
keywords=topics["tokens"][i],
|
| 429 |
+
polyline=topic_polylines["polyline"][i],
|
| 430 |
+
x_coordinate=centroid_list[i][0],
|
| 431 |
+
y_coordinate=centroid_list[i][1],
|
| 432 |
+
course_id=course_id,
|
| 433 |
+
embedding=topic_embeddings[i].tolist()
|
| 434 |
+
)
|
| 435 |
+
all_topics.append(topic)
|
| 436 |
+
|
| 437 |
+
# Add all topics to the session and commit to the database
|
| 438 |
+
db.session.add_all(all_topics)
|
| 439 |
+
db.session.commit()
|
| 440 |
+
|
| 441 |
+
print("Added topics to DB")
|
| 442 |
+
|
| 443 |
+
|
| 444 |
+
def get_cord_from_polyline(polylines):
|
| 445 |
+
x_max = y_max = 1
|
| 446 |
+
tlen, ttempl = [], []
|
| 447 |
+
b = 0
|
| 448 |
+
theta = ((np.pi) / (len(polylines[0]) - 1)) / 2
|
| 449 |
+
while (b * theta) <= (np.arctan(y_max / x_max)):
|
| 450 |
+
y_val = x_max * math.tan(b * theta)
|
| 451 |
+
ttemp = math.sqrt((x_max ** 2) + (y_val ** 2))
|
| 452 |
+
tlen.append(ttemp)
|
| 453 |
+
if (b * theta) != np.arctan(y_max / x_max):
|
| 454 |
+
ttempl.append(ttemp)
|
| 455 |
+
b += 1
|
| 456 |
+
|
| 457 |
+
while b < len(polylines[0]):
|
| 458 |
+
b += 1
|
| 459 |
+
|
| 460 |
+
tlen.extend(list(reversed(ttempl)))
|
| 461 |
+
print(tlen)
|
| 462 |
+
coordinates = []
|
| 463 |
+
|
| 464 |
+
for polyline in polylines:
|
| 465 |
+
x_values = []
|
| 466 |
+
y_values = []
|
| 467 |
+
for p in range(len(polyline)):
|
| 468 |
+
rlen = polyline[p] * tlen[p]
|
| 469 |
+
x_values.append(rlen * math.cos(p * theta))
|
| 470 |
+
y_values.append(rlen * math.sin(p * theta))
|
| 471 |
+
coordinates.append([mean(x_values), mean(y_values)])
|
| 472 |
+
return coordinates
|
| 473 |
+
|
| 474 |
+
|
| 475 |
+
def pushResourcesToDB(resources, resourceembedding, resource_polylines, course_id):
|
| 476 |
+
print(len(resources), len(resource_polylines), len(resourceembedding))
|
| 477 |
+
beta_polylines=create_beta_polylines(resource_polylines,8)
|
| 478 |
+
feature_length = len(resource_polylines[0])
|
| 479 |
+
(tlen, theta) = rad_plot_axes(feature_length, 1, 1)
|
| 480 |
+
centroid_list = rad_plot_poly(
|
| 481 |
+
feature_length, beta_polylines, tlen, theta)
|
| 482 |
+
allresources = []
|
| 483 |
+
with app.app_context():
|
| 484 |
+
for i in range(len(resources)):
|
| 485 |
+
new_resource = Resource(
|
| 486 |
+
name=resources["name"][i],
|
| 487 |
+
description=resources["description"][i],
|
| 488 |
+
keywords=resources['tokens'][i],
|
| 489 |
+
polyline=resource_polylines[i],
|
| 490 |
+
x_coordinate=centroid_list[i][0],
|
| 491 |
+
y_coordinate=centroid_list[i][1],
|
| 492 |
+
course_id=course_id,
|
| 493 |
+
type=1,
|
| 494 |
+
module_id=resources["module_id"][i],
|
| 495 |
+
submodule_id=resources["submodule_id"][i],
|
| 496 |
+
module=resources["module"][i],
|
| 497 |
+
index=resources["index"][i],
|
| 498 |
+
# embedding=resourceembedding[i],
|
| 499 |
+
link=resources['links'][i],
|
| 500 |
+
beta=8
|
| 501 |
+
)
|
| 502 |
+
# print(new_resource.to_dict())
|
| 503 |
+
allresources.append(new_resource)
|
| 504 |
+
# db.session.add(new_resource)
|
| 505 |
+
# db.session.commit()
|
| 506 |
+
db.session.add_all(allresources)
|
| 507 |
+
db.session.commit()
|
| 508 |
+
print("added resources to the DB")
|
| 509 |
+
# breakpoint()
|
| 510 |
+
|
| 511 |
+
|
| 512 |
+
# find the keywords for all the documents and store it in a list
|
| 513 |
+
def create_keywords_list(content_list,num_keywords=10):
|
| 514 |
+
kw_model = KeyBERT(model='all-mpnet-base-v2')
|
| 515 |
+
all_keywords_list = []
|
| 516 |
+
all_weight_list = []
|
| 517 |
+
for i in range(len(content_list)):
|
| 518 |
+
keywords = kw_model.extract_keywords(content_list[i], keyphrase_ngram_range=(
|
| 519 |
+
1, 2), stop_words='english', highlight=False, top_n=num_keywords)
|
| 520 |
+
keywords_list = list(dict(keywords).keys())
|
| 521 |
+
cs_list = list(dict(keywords).values())
|
| 522 |
+
weight = sum(cs_list)/len(cs_list)
|
| 523 |
+
all_keywords_list.append(keywords_list)
|
| 524 |
+
all_weight_list.append(weight)
|
| 525 |
+
return all_keywords_list, all_weight_list
|
| 526 |
+
|
| 527 |
+
|
| 528 |
+
# Load pre-trained BERT model and tokenizer
|
| 529 |
+
def create_embeddings_list(l):
|
| 530 |
+
model_name = 'bert-base-uncased'
|
| 531 |
+
tokenizer = BertTokenizer.from_pretrained(model_name)
|
| 532 |
+
new_model = BertModel.from_pretrained(model_name)
|
| 533 |
+
|
| 534 |
+
keybert_embeddings_list = []
|
| 535 |
+
for i in l:
|
| 536 |
+
|
| 537 |
+
# Tokenize the keywords and convert them into token IDs
|
| 538 |
+
tokenized_inputs = tokenizer(
|
| 539 |
+
i, padding=True, truncation=True, return_tensors="pt")
|
| 540 |
+
|
| 541 |
+
# Obtain the embeddings from the BERT model
|
| 542 |
+
with torch.no_grad():
|
| 543 |
+
outputs = new_model(**tokenized_inputs)
|
| 544 |
+
|
| 545 |
+
# Extract the embeddings corresponding to the [CLS] token
|
| 546 |
+
embeddings = outputs.last_hidden_state[:, 0, :].numpy()
|
| 547 |
+
embeddings = embeddings.tolist()
|
| 548 |
+
keybert_embeddings_list.append(embeddings)
|
| 549 |
+
return keybert_embeddings_list
|
| 550 |
+
|
| 551 |
+
|
| 552 |
+
def create_polyline(l, course_id):
|
| 553 |
+
all_polylines = []
|
| 554 |
+
embeddings = db.session.query(
|
| 555 |
+
Topic.embedding).filter_by(course_id=course_id).all()
|
| 556 |
+
topic_embeddings = embeddings
|
| 557 |
+
for keybert_embeddings in l:
|
| 558 |
+
docVector = keybert_embeddings
|
| 559 |
+
polyline = []
|
| 560 |
+
for j in range(len(topic_embeddings)):
|
| 561 |
+
wordVector = topic_embeddings[j]
|
| 562 |
+
# find cosine similarity between the learner embeddings and the topic embeddings
|
| 563 |
+
cos_sim = (get_cos_sim(wordVector, docVector) + 1) / 2
|
| 564 |
+
polyline.append(cos_sim)
|
| 565 |
+
all_polylines.append(polyline)
|
| 566 |
+
return all_polylines
|
| 567 |
+
|
| 568 |
+
|
| 569 |
+
|
| 570 |
+
def pushQuizToResourceInDB(
|
| 571 |
+
name, description, keywords, polyline,
|
| 572 |
+
x_coordinate, y_coordinate, course_id,
|
| 573 |
+
module_id, submodule_id, module, index,
|
| 574 |
+
link,beta, type=1):
|
| 575 |
+
|
| 576 |
+
new_resource = Resource(
|
| 577 |
+
name=name,
|
| 578 |
+
description=description,
|
| 579 |
+
keywords=keywords,
|
| 580 |
+
polyline=polyline,
|
| 581 |
+
x_coordinate=x_coordinate,
|
| 582 |
+
y_coordinate=y_coordinate,
|
| 583 |
+
course_id=course_id,
|
| 584 |
+
type=type,
|
| 585 |
+
module_id=module_id,
|
| 586 |
+
submodule_id=submodule_id,
|
| 587 |
+
module=module,
|
| 588 |
+
index=index,
|
| 589 |
+
link=link,
|
| 590 |
+
beta=beta
|
| 591 |
+
)
|
| 592 |
+
|
| 593 |
+
with app.app_context():
|
| 594 |
+
db.session.add(new_resource)
|
| 595 |
+
db.session.commit()
|
| 596 |
+
|
| 597 |
+
print("Quiz resource added to the DB")
|
backend/Inspiration/modelsRoutes.py
ADDED
|
@@ -0,0 +1,1457 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gc
|
| 2 |
+
import json
|
| 3 |
+
import math
|
| 4 |
+
import os
|
| 5 |
+
import re
|
| 6 |
+
from datetime import datetime
|
| 7 |
+
import numpy as np
|
| 8 |
+
import pandas as pd
|
| 9 |
+
import pdfplumber
|
| 10 |
+
import torch
|
| 11 |
+
from flask import jsonify, request, send_from_directory
|
| 12 |
+
from utils import is_valid_id
|
| 13 |
+
from repository import add_ta_from_user
|
| 14 |
+
from dbModels import TAT, Activity, Contribution, Course, Enroll, Learner, Module, Question, Quiz, Resource, Topic, UserQuiz, db, Description,ExitPoint
|
| 15 |
+
from init import app
|
| 16 |
+
from keybert import KeyBERT
|
| 17 |
+
from sentence_transformers import SentenceTransformer
|
| 18 |
+
from sqlalchemy import text
|
| 19 |
+
from sqlalchemy.sql import func
|
| 20 |
+
from transformers import BertModel, BertTokenizer
|
| 21 |
+
from werkzeug.utils import secure_filename
|
| 22 |
+
from youtube_transcript_api import YouTubeTranscriptApi
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
UPLOAD_FOLDER_NAME = "uploads"
|
| 26 |
+
UPLOAD_FOLDER = os.path.join(os.getcwd(), UPLOAD_FOLDER_NAME) # Save files in a 'uploads' folder in the project directory
|
| 27 |
+
os.makedirs(UPLOAD_FOLDER, exist_ok=True) # Create the folder if it doesn't exist
|
| 28 |
+
|
| 29 |
+
app.config["UPLOAD_FOLDER"] = UPLOAD_FOLDER
|
| 30 |
+
|
| 31 |
+
def get_cos_sim(a: np.ndarray, b: np.ndarray) -> float:
|
| 32 |
+
"""
|
| 33 |
+
Calculate the cosine similarity between two vectors.
|
| 34 |
+
|
| 35 |
+
Parameters:
|
| 36 |
+
a (np.ndarray): First vector.
|
| 37 |
+
b (np.ndarray): Second vector.
|
| 38 |
+
|
| 39 |
+
Returns:
|
| 40 |
+
float: Cosine similarity between the two vectors.
|
| 41 |
+
"""
|
| 42 |
+
dot_product = np.dot(a, b)
|
| 43 |
+
norm_a = np.linalg.norm(a)
|
| 44 |
+
norm_b = np.linalg.norm(b)
|
| 45 |
+
return dot_product / (norm_a * norm_b)
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
# Routes
|
| 49 |
+
|
| 50 |
+
# Learners
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
@app.route('/learners', methods=['GET'])
|
| 54 |
+
def get_learners():
|
| 55 |
+
learners = Learner.query.all()
|
| 56 |
+
return jsonify([learner.to_dict() for learner in learners])
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
@app.route('/learners', methods=['POST'])
|
| 60 |
+
def create_learner():
|
| 61 |
+
data = request.get_json()
|
| 62 |
+
new_learner = Learner(
|
| 63 |
+
name=data['name'],
|
| 64 |
+
cgpa=data['cgpa'],
|
| 65 |
+
username=data['username'],
|
| 66 |
+
password=data['password']
|
| 67 |
+
)
|
| 68 |
+
db.session.add(new_learner)
|
| 69 |
+
db.session.commit()
|
| 70 |
+
return jsonify(new_learner.to_dict()), 201
|
| 71 |
+
|
| 72 |
+
# Courses
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
@app.route('/course/<int:id>', methods=['GET'])
|
| 76 |
+
def get_courses(id):
|
| 77 |
+
course = Course.query.filter_by(id=id).first()
|
| 78 |
+
return (course.to_dict())
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
@app.route('/courses', methods=['POST'])
|
| 82 |
+
def create_course():
|
| 83 |
+
data = request.get_json()
|
| 84 |
+
new_course = Course(
|
| 85 |
+
name=data['name'],
|
| 86 |
+
description=data['description'],
|
| 87 |
+
initial_position=[0]*data['topic_count']
|
| 88 |
+
)
|
| 89 |
+
db.session.add(new_course)
|
| 90 |
+
db.session.commit()
|
| 91 |
+
return jsonify(new_course.to_dict()), 201
|
| 92 |
+
|
| 93 |
+
# Resources
|
| 94 |
+
|
| 95 |
+
@app.route("/resource-types", methods=['GET'])
|
| 96 |
+
def get_resource_types():
|
| 97 |
+
return jsonify([
|
| 98 |
+
{ "type" : "0", "name" : "PDF"},
|
| 99 |
+
{ "type" : "1", "name" : "Youtube Video"},
|
| 100 |
+
# { "type" : "2", "name" : "Quiz"}, :)
|
| 101 |
+
])
|
| 102 |
+
|
| 103 |
+
@app.route('/resources/<int:id>', methods=['GET'])
|
| 104 |
+
def get_resources(id):
|
| 105 |
+
resources = Resource.query.filter_by(course_id=id)
|
| 106 |
+
# print("resources are ")
|
| 107 |
+
# print([resource.to_dict() for resource in resources])
|
| 108 |
+
return jsonify([resource.to_dict() for resource in resources])
|
| 109 |
+
|
| 110 |
+
@app.route('/specific_resource/<int:id>', methods=['GET'])
|
| 111 |
+
def get_specifc_resource(id):
|
| 112 |
+
resource = Resource.query.filter_by(id=id).first()
|
| 113 |
+
return jsonify(resource.to_dict())
|
| 114 |
+
|
| 115 |
+
@app.route('/resources', methods=['POST'])
|
| 116 |
+
def create_resource():
|
| 117 |
+
data = request.get_json()
|
| 118 |
+
new_resource = Resource(
|
| 119 |
+
name=data['name'],
|
| 120 |
+
description=data['description'],
|
| 121 |
+
keywords=data['keywords'],
|
| 122 |
+
polyline=data['polyline'],
|
| 123 |
+
x_coordinate=data['x_coordinate'],
|
| 124 |
+
y_coordinate=data['y_coordinate'],
|
| 125 |
+
course_id=data['course_id'],
|
| 126 |
+
type=data['type']
|
| 127 |
+
# ,
|
| 128 |
+
# embedding=data['embedding']
|
| 129 |
+
)
|
| 130 |
+
db.session.add(new_resource)
|
| 131 |
+
db.session.commit()
|
| 132 |
+
return jsonify(new_resource.to_dict()), 201
|
| 133 |
+
|
| 134 |
+
# Topics
|
| 135 |
+
|
| 136 |
+
|
| 137 |
+
@app.route('/topics/<int:id>', methods=['GET'])
|
| 138 |
+
def get_topics(id):
|
| 139 |
+
topics = Topic.query.filter_by(course_id=id)
|
| 140 |
+
return jsonify([topic.to_dict() for topic in topics])
|
| 141 |
+
|
| 142 |
+
|
| 143 |
+
@app.route('/topics', methods=['POST'])
|
| 144 |
+
def create_topic():
|
| 145 |
+
data = request.get_json()
|
| 146 |
+
new_topic = Topic(
|
| 147 |
+
name=data['name'],
|
| 148 |
+
description=data['description'],
|
| 149 |
+
keywords=data['keywords'],
|
| 150 |
+
polyline=data['polyline'],
|
| 151 |
+
x_coordinate=data['x_coordinate'],
|
| 152 |
+
y_coordinate=data['y_coordinate'],
|
| 153 |
+
course_id=data['course_id'],
|
| 154 |
+
embedding=data['embedding']
|
| 155 |
+
)
|
| 156 |
+
db.session.add(new_topic)
|
| 157 |
+
db.session.commit()
|
| 158 |
+
return jsonify(new_topic.to_dict()), 201
|
| 159 |
+
|
| 160 |
+
# Enrolls
|
| 161 |
+
|
| 162 |
+
|
| 163 |
+
@app.route('/enrolls/<int:id>', methods=['GET'])
|
| 164 |
+
def get_enroll(id):
|
| 165 |
+
enroll = Enroll.query.get(id)
|
| 166 |
+
return jsonify(enroll.to_dict())
|
| 167 |
+
|
| 168 |
+
@app.route('/teach/<int:id>',methods=['GET'])
|
| 169 |
+
def get_teach(id):
|
| 170 |
+
teach = TAT.query.get(id)
|
| 171 |
+
return jsonify(teach.to_dict())
|
| 172 |
+
|
| 173 |
+
|
| 174 |
+
# Activities
|
| 175 |
+
|
| 176 |
+
|
| 177 |
+
@app.route('/activities/<int:id>', methods=['GET'])
|
| 178 |
+
def get_activities(id):
|
| 179 |
+
activities = Activity.query.filter_by(enroll_id=id)
|
| 180 |
+
return jsonify([activity.to_dict() for activity in activities])
|
| 181 |
+
|
| 182 |
+
|
| 183 |
+
@app.route('/activities', methods=['POST'])
|
| 184 |
+
def create_activity():
|
| 185 |
+
data = request.get_json()
|
| 186 |
+
if data.get('resource_id') is not None:
|
| 187 |
+
new_activity = Activity(
|
| 188 |
+
time=datetime.strptime(data['time'], '%Y-%m-%d %H:%M:%S'),
|
| 189 |
+
# type_id=data['type_id'],
|
| 190 |
+
type=data['type'],
|
| 191 |
+
name=data['name'],
|
| 192 |
+
link=data['link'],
|
| 193 |
+
enroll_id=data['enroll_id'],
|
| 194 |
+
resource_id=data['resource_id'],
|
| 195 |
+
x_coordinate=data['x_coordinate'],
|
| 196 |
+
y_coordinate=data['y_coordinate'],
|
| 197 |
+
)
|
| 198 |
+
else:
|
| 199 |
+
new_activity = Activity(
|
| 200 |
+
time=datetime.strptime(data['time'], '%Y-%m-%d %H:%M:%S'),
|
| 201 |
+
# type_id=data['type_id'],
|
| 202 |
+
type=data['type'],
|
| 203 |
+
name=data['name'],
|
| 204 |
+
link=data['link'],
|
| 205 |
+
enroll_id=data['enroll_id'],
|
| 206 |
+
contribution_id=data['contribution_id'],
|
| 207 |
+
x_coordinate=data['x_coordinate'],
|
| 208 |
+
y_coordinate=data['y_coordinate'],
|
| 209 |
+
)
|
| 210 |
+
db.session.add(new_activity)
|
| 211 |
+
db.session.commit()
|
| 212 |
+
return jsonify(new_activity.to_dict()), 201
|
| 213 |
+
|
| 214 |
+
# Contributions
|
| 215 |
+
|
| 216 |
+
|
| 217 |
+
@app.route('/contributions/<int:id>', methods=['GET'])
|
| 218 |
+
def get_contributions(id):
|
| 219 |
+
contributions = Contribution.query.filter_by(enroll_id=id)
|
| 220 |
+
|
| 221 |
+
return jsonify([contribution.to_dict() for contribution in contributions])
|
| 222 |
+
|
| 223 |
+
@app.route('/contributions/view/<int:id>', methods=['GET'])
|
| 224 |
+
def get_contribution_view(id):
|
| 225 |
+
contribution = Contribution.query.filter_by(id=id).first()
|
| 226 |
+
|
| 227 |
+
return jsonify(contribution.to_dict())
|
| 228 |
+
|
| 229 |
+
|
| 230 |
+
@app.route('/contributions', methods=['POST'])
|
| 231 |
+
def create_contribution():
|
| 232 |
+
data = request.get_json()
|
| 233 |
+
new_contribution = Contribution(
|
| 234 |
+
enroll_id=data['enroll_id'],
|
| 235 |
+
submitted_on=datetime.strptime(
|
| 236 |
+
data['submitted_on'], '%Y-%m-%d %H:%M:%S'),
|
| 237 |
+
file_path=data['file_path'],
|
| 238 |
+
description=data['description'],
|
| 239 |
+
prev_polyline=data['prev_polyline'],
|
| 240 |
+
polyline=data['polyline'],
|
| 241 |
+
x_coordinate=data['x_coordinate'],
|
| 242 |
+
y_coordinate=data['y_coordinate'],
|
| 243 |
+
embedding=data['embedding']
|
| 244 |
+
)
|
| 245 |
+
db.session.add(new_contribution)
|
| 246 |
+
db.session.commit()
|
| 247 |
+
return jsonify(new_contribution.to_dict()), 201
|
| 248 |
+
|
| 249 |
+
# Quizzes
|
| 250 |
+
|
| 251 |
+
|
| 252 |
+
@app.route('/quizzes', methods=['GET'])
|
| 253 |
+
def get_quizzes():
|
| 254 |
+
quizzes = Quiz.query.all()
|
| 255 |
+
return jsonify([quiz.to_dict() for quiz in quizzes])
|
| 256 |
+
|
| 257 |
+
|
| 258 |
+
# Questions
|
| 259 |
+
|
| 260 |
+
|
| 261 |
+
@app.route('/questions', methods=['GET'])
|
| 262 |
+
def get_questions():
|
| 263 |
+
quiz_id = request.args.get('quiz_id')
|
| 264 |
+
questions = Question.query.filter_by(quiz_id=quiz_id).all()
|
| 265 |
+
return jsonify([question.to_dict() for question in questions])
|
| 266 |
+
|
| 267 |
+
|
| 268 |
+
@app.route('/questions', methods=['POST'])
|
| 269 |
+
def create_question():
|
| 270 |
+
data = request.get_json()
|
| 271 |
+
new_question = Question(
|
| 272 |
+
quiz_id=data['quiz_id'],
|
| 273 |
+
question_text=data['question_text'],
|
| 274 |
+
option_a=data.get('option_a'),
|
| 275 |
+
option_b=data.get('option_b'),
|
| 276 |
+
option_c=data.get('option_c'),
|
| 277 |
+
option_d=data.get('option_d'),
|
| 278 |
+
correct_answer=data['correct_answer']
|
| 279 |
+
)
|
| 280 |
+
db.session.add(new_question)
|
| 281 |
+
db.session.commit()
|
| 282 |
+
return jsonify(new_question.to_dict()), 201
|
| 283 |
+
|
| 284 |
+
# UserQuiz : log of quizzes attempted by various users
|
| 285 |
+
|
| 286 |
+
|
| 287 |
+
@app.route('/user_quizzes', methods=['GET'])
|
| 288 |
+
def get_user_quizzes():
|
| 289 |
+
user_id = request.args.get('user_id')
|
| 290 |
+
user_quizzes = UserQuiz.query.filter_by(user_id=user_id).all()
|
| 291 |
+
return jsonify([user_quiz.to_dict() for user_quiz in user_quizzes])
|
| 292 |
+
|
| 293 |
+
|
| 294 |
+
@app.route('/user_quizzes', methods=['POST'])
|
| 295 |
+
def create_user_quiz():
|
| 296 |
+
data = request.get_json()
|
| 297 |
+
new_user_quiz = UserQuiz(
|
| 298 |
+
user_id=data['user_id'],
|
| 299 |
+
quiz_id=data['quiz_id'],
|
| 300 |
+
score=data.get('score'),
|
| 301 |
+
status=data['status'],
|
| 302 |
+
attempt_date=datetime.strptime(
|
| 303 |
+
data['attempt_date'], '%Y-%m-%d %H:%M:%S') if 'attempt_date' in data else None
|
| 304 |
+
)
|
| 305 |
+
db.session.add(new_user_quiz)
|
| 306 |
+
db.session.commit()
|
| 307 |
+
return jsonify(new_user_quiz.to_dict()), 201
|
| 308 |
+
|
| 309 |
+
# ------------------------------------------------------Teacher APIs
|
| 310 |
+
|
| 311 |
+
@app.route('/course_module_mappings/<int:course_id>', methods=['GET'])
|
| 312 |
+
def course_module_mappings(course_id):
|
| 313 |
+
try:
|
| 314 |
+
# Query for ModName_ModID mapping
|
| 315 |
+
mod_name_mod_id_query = (
|
| 316 |
+
db.session.query(Resource.module, Resource.module_id)
|
| 317 |
+
.filter(Resource.course_id == course_id)
|
| 318 |
+
.distinct()
|
| 319 |
+
.all()
|
| 320 |
+
)
|
| 321 |
+
ModName_ModID = {row.module: row.module_id for row in mod_name_mod_id_query}
|
| 322 |
+
|
| 323 |
+
# Query for ModID_SubModCount mapping
|
| 324 |
+
mod_id_submod_count_query = (
|
| 325 |
+
db.session.query(Resource.module_id, func.count(Resource.submodule_id))
|
| 326 |
+
.filter(Resource.course_id == course_id)
|
| 327 |
+
.group_by(Resource.module_id)
|
| 328 |
+
.all()
|
| 329 |
+
)
|
| 330 |
+
ModID_SubModCount = {row.module_id: row[1] for row in mod_id_submod_count_query}
|
| 331 |
+
|
| 332 |
+
# Return the mappings as JSON
|
| 333 |
+
return jsonify({
|
| 334 |
+
"ModName_ModID": ModName_ModID,
|
| 335 |
+
"ModID_SubModCount": ModID_SubModCount
|
| 336 |
+
})
|
| 337 |
+
except Exception as e:
|
| 338 |
+
return jsonify({"error": str(e)}), 500
|
| 339 |
+
|
| 340 |
+
# Function to create topic embeddings
|
| 341 |
+
def create_topic_embeddings(topics: pd.DataFrame) -> list:
|
| 342 |
+
model = SentenceTransformer('bert-base-nli-mean-tokens')
|
| 343 |
+
topic_embeddings = []
|
| 344 |
+
|
| 345 |
+
for i in range(len(topics)):
|
| 346 |
+
embedding = model.encode(topics.loc[i, "description"], convert_to_tensor=True)
|
| 347 |
+
topic_embeddings.append(embedding.cpu().numpy().tolist()) # Ensure list for JSON compatibility
|
| 348 |
+
del embedding
|
| 349 |
+
gc.collect()
|
| 350 |
+
|
| 351 |
+
return topic_embeddings
|
| 352 |
+
|
| 353 |
+
# Function to create polylines from embeddings
|
| 354 |
+
def create_topic_polylines(topics: pd.DataFrame, topic_embeddings: list) -> pd.DataFrame:
|
| 355 |
+
topic_names = topics["name"].tolist()
|
| 356 |
+
length = len(topics)
|
| 357 |
+
topic_modules = [1] * 12 + [2] * 8 + [3] * (length - 20)
|
| 358 |
+
|
| 359 |
+
top_poly = []
|
| 360 |
+
top_module = []
|
| 361 |
+
topic = []
|
| 362 |
+
|
| 363 |
+
for i in range(len(topic_names)):
|
| 364 |
+
polyline = [0] * len(topic_names)
|
| 365 |
+
for j in range(len(topic_names)):
|
| 366 |
+
cos_sim = 0
|
| 367 |
+
if topic_names[i] == topic_names[j]:
|
| 368 |
+
cos_sim = 1
|
| 369 |
+
else:
|
| 370 |
+
topic1_vector = topic_embeddings[i]
|
| 371 |
+
topic2_vector = topic_embeddings[j]
|
| 372 |
+
cos_sim = (get_cos_sim(topic1_vector, topic2_vector) + 1) / 2
|
| 373 |
+
polyline[j] = cos_sim
|
| 374 |
+
|
| 375 |
+
topic.append(topic_names[i])
|
| 376 |
+
top_module.append(topic_modules[i])
|
| 377 |
+
top_poly.append(polyline)
|
| 378 |
+
|
| 379 |
+
polyline_dict = {"topic": topic, "module": top_module, "polyline": top_poly}
|
| 380 |
+
topic_polylines = pd.DataFrame(polyline_dict)
|
| 381 |
+
return topic_polylines
|
| 382 |
+
|
| 383 |
+
# Function to create a list of keywords from the topic descriptions
|
| 384 |
+
def create_keywords_list(content_list, num_keywords=10):
|
| 385 |
+
kw_model = KeyBERT(model='all-mpnet-base-v2')
|
| 386 |
+
all_keywords_list = []
|
| 387 |
+
all_weight_list = []
|
| 388 |
+
|
| 389 |
+
for i in range(len(content_list)):
|
| 390 |
+
keywords = kw_model.extract_keywords(content_list[i], keyphrase_ngram_range=(1, 2), stop_words='english', highlight=False, top_n=num_keywords)
|
| 391 |
+
keywords_list = list(dict(keywords).keys())
|
| 392 |
+
cs_list = list(dict(keywords).values())
|
| 393 |
+
|
| 394 |
+
|
| 395 |
+
|
| 396 |
+
# Ensure safe handling of cs_list
|
| 397 |
+
if isinstance(cs_list, (list, np.ndarray)) and len(cs_list) > 0:
|
| 398 |
+
weight = np.mean(cs_list) # Use NumPy's mean for safer handling
|
| 399 |
+
else:
|
| 400 |
+
weight = 0 # Default weight if cs_list is empty or invalid
|
| 401 |
+
|
| 402 |
+
all_keywords_list.append(keywords_list)
|
| 403 |
+
all_weight_list.append(weight)
|
| 404 |
+
|
| 405 |
+
return all_keywords_list, all_weight_list
|
| 406 |
+
|
| 407 |
+
|
| 408 |
+
def rad_plot_axes(num: int, x_max: float, y_max: float):
|
| 409 |
+
"""
|
| 410 |
+
Generate radial plot axes.
|
| 411 |
+
|
| 412 |
+
Parameters:
|
| 413 |
+
num (int): Number of axes.
|
| 414 |
+
x_max (float): Maximum x-coordinate.
|
| 415 |
+
y_max (float): Maximum y-coordinate.
|
| 416 |
+
|
| 417 |
+
Returns:
|
| 418 |
+
tuple: A tuple containing the lengths of the axes and the angle theta.
|
| 419 |
+
"""
|
| 420 |
+
empt_arr = [] # Temporary container for y-coordinate calculations
|
| 421 |
+
xstop = [] # List to store x-coordinate of the axes endpoints
|
| 422 |
+
ystop = [] # List to store y-coordinate of the axes endpoints
|
| 423 |
+
tlen = [] # List to store the length of axes
|
| 424 |
+
ttempl = [] # Temporary container for reversed lengths
|
| 425 |
+
theta = ((np.pi) / (num - 1)) / 2 # Calculate theta
|
| 426 |
+
b = 0
|
| 427 |
+
|
| 428 |
+
while (b * theta) <= (np.arctan(y_max / x_max)):
|
| 429 |
+
y_val = x_max * math.tan(b * theta)
|
| 430 |
+
empt_arr.append(y_val)
|
| 431 |
+
ystop.append(y_val)
|
| 432 |
+
ttemp = math.sqrt((x_max ** 2) + (y_val ** 2))
|
| 433 |
+
tlen.append(ttemp)
|
| 434 |
+
if (b * theta) != np.arctan(y_max / x_max):
|
| 435 |
+
ttempl.append(ttemp)
|
| 436 |
+
b += 1
|
| 437 |
+
|
| 438 |
+
while b < num:
|
| 439 |
+
ystop.append(y_max)
|
| 440 |
+
b += 1
|
| 441 |
+
|
| 442 |
+
tlen.extend(list(reversed(ttempl)))
|
| 443 |
+
xstop = list(reversed(ystop))
|
| 444 |
+
|
| 445 |
+
# Plotting is commented out for modularity; can be enabled as needed
|
| 446 |
+
# for d in range(num):
|
| 447 |
+
# x_values = [0, xstop[d]]
|
| 448 |
+
# y_values = [0, ystop[d]]
|
| 449 |
+
# plt.plot(x_values, y_values, label=f'Axis {d+1}', alpha=1, linewidth=0.2)
|
| 450 |
+
|
| 451 |
+
return tlen, theta
|
| 452 |
+
|
| 453 |
+
|
| 454 |
+
def rad_plot_poly(num: int, hd_point: list, tlen: list, theta: float) -> list:
|
| 455 |
+
"""
|
| 456 |
+
Plot the polyline and calculate their centroids.
|
| 457 |
+
|
| 458 |
+
Parameters:
|
| 459 |
+
num (int): Number of points.
|
| 460 |
+
hd_point (list): List of polyline points.
|
| 461 |
+
tlen (list): Length of the axes.
|
| 462 |
+
theta (float): Angle theta.
|
| 463 |
+
|
| 464 |
+
Returns:
|
| 465 |
+
list: List of centroid coordinates.
|
| 466 |
+
"""
|
| 467 |
+
coordinates = []
|
| 468 |
+
|
| 469 |
+
for pnt in hd_point:
|
| 470 |
+
x_values = []
|
| 471 |
+
y_values = []
|
| 472 |
+
for p in range(num):
|
| 473 |
+
rlen = pnt[p] * tlen[p]
|
| 474 |
+
x_values.append(rlen * math.cos(p * theta))
|
| 475 |
+
y_values.append(rlen * math.sin(p * theta))
|
| 476 |
+
|
| 477 |
+
# Plotting is commented out for modularity; can be enabled as needed
|
| 478 |
+
# plt.plot(x_values, y_values, label='Polyline', alpha=0.6, linewidth=0.5)
|
| 479 |
+
|
| 480 |
+
average_x = sum(x_values) / num
|
| 481 |
+
average_y = sum(y_values) / num
|
| 482 |
+
coordinates.append([average_x, average_y])
|
| 483 |
+
|
| 484 |
+
# Print statement for debugging
|
| 485 |
+
print("Red - Resources ")
|
| 486 |
+
|
| 487 |
+
return coordinates
|
| 488 |
+
|
| 489 |
+
|
| 490 |
+
|
| 491 |
+
@app.route('/add-module', methods=['POST'])
|
| 492 |
+
def add_modules():
|
| 493 |
+
try:
|
| 494 |
+
data = request.get_json()
|
| 495 |
+
course_id = data.get('course_id')
|
| 496 |
+
name = data.get('name')
|
| 497 |
+
|
| 498 |
+
if not course_id or not name:
|
| 499 |
+
return jsonify({"error": "course_id and name are required"}), 400
|
| 500 |
+
|
| 501 |
+
new_module = Module(
|
| 502 |
+
name=name,
|
| 503 |
+
course_id=course_id,
|
| 504 |
+
)
|
| 505 |
+
|
| 506 |
+
db.session.add(new_module)
|
| 507 |
+
db.session.commit()
|
| 508 |
+
return jsonify(new_module.to_dict()), 201
|
| 509 |
+
|
| 510 |
+
except Exception as e:
|
| 511 |
+
app.logger.error(f"Error in add_modules: {str(e)}", exc_info=True)
|
| 512 |
+
return jsonify({"error": "Server error", "details": str(e)}), 500
|
| 513 |
+
|
| 514 |
+
|
| 515 |
+
# Route for creating topics related to a newly created course
|
| 516 |
+
@app.route('/new-course-topics', methods=['POST'])
|
| 517 |
+
def create_new_topics_for_new_course():
|
| 518 |
+
try:
|
| 519 |
+
data = request.get_json()
|
| 520 |
+
|
| 521 |
+
# Validate input
|
| 522 |
+
course_id = data.get('course_id')
|
| 523 |
+
topics_data = data.get('topics')
|
| 524 |
+
|
| 525 |
+
if not course_id or not topics_data or not isinstance(topics_data, list):
|
| 526 |
+
return jsonify({"error": "Invalid or missing 'course_id' or 'topics'"}), 400
|
| 527 |
+
|
| 528 |
+
topics = pd.DataFrame(topics_data)
|
| 529 |
+
|
| 530 |
+
# Generate embeddings, polylines, and keywords
|
| 531 |
+
topic_embeddings = create_topic_embeddings(topics)
|
| 532 |
+
topic_polylines = create_topic_polylines(topics, topic_embeddings)
|
| 533 |
+
keywords, weights = create_keywords_list(topics["description"].tolist())
|
| 534 |
+
|
| 535 |
+
# Ensure polylines exist before calculating centroids
|
| 536 |
+
if not topic_polylines.empty and "polyline" in topic_polylines:
|
| 537 |
+
feature_length = len(topic_polylines["polyline"][0])
|
| 538 |
+
tlen, theta = rad_plot_axes(feature_length, 1, 1)
|
| 539 |
+
centroid_list = rad_plot_poly(feature_length, topic_polylines["polyline"], tlen, theta)
|
| 540 |
+
else:
|
| 541 |
+
return jsonify({"error": "Failed to generate topic polylines"}), 500
|
| 542 |
+
|
| 543 |
+
# Check if generated lists match topic count
|
| 544 |
+
if len(topic_embeddings) != len(topics) or len(topic_polylines) != len(topics) or len(centroid_list) != len(topics):
|
| 545 |
+
return jsonify({"error": "Mismatch in topic processing results"}), 500
|
| 546 |
+
|
| 547 |
+
# Insert topics into the database
|
| 548 |
+
for i in range(len(topics)):
|
| 549 |
+
new_topic = Topic(
|
| 550 |
+
name=topics.loc[i, 'name'],
|
| 551 |
+
description=topics.loc[i, 'description'],
|
| 552 |
+
module_id=topics.loc[i, 'module_id'],
|
| 553 |
+
keywords=keywords[i] if i < len(keywords) else None,
|
| 554 |
+
polyline=topic_polylines.loc[i, 'polyline'] if i < len(topic_polylines) else None,
|
| 555 |
+
course_id=course_id,
|
| 556 |
+
x_coordinate=centroid_list[i][0] if i < len(centroid_list) else None,
|
| 557 |
+
y_coordinate=centroid_list[i][1] if i < len(centroid_list) else None,
|
| 558 |
+
embedding=topic_embeddings[i] if i < len(topic_embeddings) else None
|
| 559 |
+
)
|
| 560 |
+
db.session.add(new_topic)
|
| 561 |
+
|
| 562 |
+
db.session.commit()
|
| 563 |
+
|
| 564 |
+
return jsonify({"message": "Topics created successfully"}), 201
|
| 565 |
+
|
| 566 |
+
except Exception as e:
|
| 567 |
+
app.logger.error(f"Error in create_new_topics_for_new_course: {str(e)}", exc_info=True)
|
| 568 |
+
return jsonify({"error": "Server error", "details": str(e)}), 500
|
| 569 |
+
|
| 570 |
+
def extract_transcript(video_id):
|
| 571 |
+
try:
|
| 572 |
+
transcript = YouTubeTranscriptApi.get_transcript(video_id)
|
| 573 |
+
transcript_text = ""
|
| 574 |
+
for i in transcript:
|
| 575 |
+
transcript_text += i['text'] + " "
|
| 576 |
+
return transcript_text.strip()
|
| 577 |
+
except Exception as e:
|
| 578 |
+
return f"Error: {str(e)}"
|
| 579 |
+
|
| 580 |
+
|
| 581 |
+
def get_youtube_video_id(url):
|
| 582 |
+
# Regular expression pattern to match video ID
|
| 583 |
+
pattern = r"(?:v=|\/)([0-9A-Za-z_-]{11}).*"
|
| 584 |
+
match = re.search(pattern, url)
|
| 585 |
+
if match:
|
| 586 |
+
return match.group(1)
|
| 587 |
+
else:
|
| 588 |
+
return "Invalid YouTube URL"
|
| 589 |
+
|
| 590 |
+
|
| 591 |
+
def create_resource_embeddings(keywords):
|
| 592 |
+
|
| 593 |
+
if not keywords:
|
| 594 |
+
print("ERROR: Received empty keywords list!")
|
| 595 |
+
return []
|
| 596 |
+
|
| 597 |
+
model_name = 'bert-base-uncased'
|
| 598 |
+
try:
|
| 599 |
+
tokenizer = BertTokenizer.from_pretrained(model_name)
|
| 600 |
+
model = BertModel.from_pretrained(model_name)
|
| 601 |
+
except Exception as e:
|
| 602 |
+
print(f"ERROR: Failed to load BERT model -> {e}")
|
| 603 |
+
return []
|
| 604 |
+
|
| 605 |
+
model.eval()
|
| 606 |
+
keybert_embeddings_list = []
|
| 607 |
+
|
| 608 |
+
for keyword in keywords:
|
| 609 |
+
if not isinstance(keyword, str) or not keyword.strip():
|
| 610 |
+
print(f"WARNING: Skipping invalid keyword -> {keyword}")
|
| 611 |
+
continue
|
| 612 |
+
|
| 613 |
+
try:
|
| 614 |
+
tokenized_inputs = tokenizer(keyword, padding=True, truncation=True, return_tensors="pt")
|
| 615 |
+
with torch.no_grad():
|
| 616 |
+
outputs = model(**tokenized_inputs)
|
| 617 |
+
|
| 618 |
+
embeddings = outputs.last_hidden_state[:, 0, :].detach().cpu().numpy()
|
| 619 |
+
|
| 620 |
+
# Ensure embeddings are always lists of lists
|
| 621 |
+
if isinstance(embeddings, np.ndarray):
|
| 622 |
+
embeddings = embeddings.tolist()
|
| 623 |
+
|
| 624 |
+
if isinstance(embeddings, list) and isinstance(embeddings[0], list):
|
| 625 |
+
keybert_embeddings_list.append(embeddings[0])
|
| 626 |
+
else:
|
| 627 |
+
print(f"ERROR: Unexpected embedding format for '{keyword}', skipping.")
|
| 628 |
+
continue
|
| 629 |
+
|
| 630 |
+
except Exception as e:
|
| 631 |
+
print(f"ERROR: Failed to generate embedding for '{keyword}' -> {e}")
|
| 632 |
+
continue
|
| 633 |
+
|
| 634 |
+
return keybert_embeddings_list
|
| 635 |
+
|
| 636 |
+
def get_topic_embedding(topic_id):
|
| 637 |
+
topic = Topic.query.filter_by(id=topic_id).first()
|
| 638 |
+
|
| 639 |
+
if topic and topic.embedding is not None:
|
| 640 |
+
|
| 641 |
+
if isinstance(topic.embedding, np.ndarray):
|
| 642 |
+
topic_embeddings = topic.embedding.tolist() # Convert NumPy array to list
|
| 643 |
+
elif isinstance(topic.embedding, list):
|
| 644 |
+
topic_embeddings = topic.embedding # Already a list
|
| 645 |
+
else:
|
| 646 |
+
print("ERROR: Unexpected topic embedding format!")
|
| 647 |
+
return None # Handle unexpected data formats
|
| 648 |
+
|
| 649 |
+
# 🔹 Ensure `topic_embeddings` is a list of vectors
|
| 650 |
+
if isinstance(topic_embeddings, list):
|
| 651 |
+
if all(isinstance(vec, (list, np.ndarray)) for vec in topic_embeddings):
|
| 652 |
+
return topic_embeddings # ✅ Correct format
|
| 653 |
+
elif isinstance(topic_embeddings[0], (float, int)):
|
| 654 |
+
# 🔹 If it's a single vector (not wrapped in a list), wrap it
|
| 655 |
+
print("WARNING: Detected single topic embedding, wrapping it in a list.")
|
| 656 |
+
return [topic_embeddings]
|
| 657 |
+
|
| 658 |
+
|
| 659 |
+
return None
|
| 660 |
+
|
| 661 |
+
|
| 662 |
+
return None # If topic is not found or embedding is None
|
| 663 |
+
|
| 664 |
+
|
| 665 |
+
|
| 666 |
+
def create_resource_polylines(topic_embeddings, keybert_embeddings_list, beta):
|
| 667 |
+
if not keybert_embeddings_list or not topic_embeddings:
|
| 668 |
+
print("ERROR: Empty embeddings provided")
|
| 669 |
+
return []
|
| 670 |
+
|
| 671 |
+
all_polylines = []
|
| 672 |
+
|
| 673 |
+
for i, docVector in enumerate(keybert_embeddings_list):
|
| 674 |
+
if not isinstance(docVector, list):
|
| 675 |
+
print(f"ERROR: Skipping invalid embedding at index {i} -> Expected list, got {type(docVector)}")
|
| 676 |
+
continue
|
| 677 |
+
|
| 678 |
+
polyline = []
|
| 679 |
+
for idx, wordVector in enumerate(topic_embeddings):
|
| 680 |
+
if not isinstance(wordVector, list):
|
| 681 |
+
print(f"ERROR: Invalid topic embedding at [{idx}] -> Expected list, got {type(wordVector)}")
|
| 682 |
+
continue
|
| 683 |
+
|
| 684 |
+
cos_sim = get_cos_sim(wordVector, docVector)
|
| 685 |
+
cos_sim = (cos_sim + 1) / 2 if cos_sim is not None else 0 # Normalize and handle None
|
| 686 |
+
polyline.append({'x': idx, 'y': cos_sim})
|
| 687 |
+
|
| 688 |
+
all_polylines.append(polyline)
|
| 689 |
+
|
| 690 |
+
if not all_polylines:
|
| 691 |
+
print("WARNING: No polylines were generated")
|
| 692 |
+
return []
|
| 693 |
+
|
| 694 |
+
# Averaging polylines across multiple keywords
|
| 695 |
+
new_polylines = []
|
| 696 |
+
for polyline in all_polylines:
|
| 697 |
+
averaged_polyline = [sum(p['y'] for p in polyline) / len(polyline) for _ in range(len(topic_embeddings))]
|
| 698 |
+
new_polylines.append(averaged_polyline)
|
| 699 |
+
|
| 700 |
+
beta = float(beta)
|
| 701 |
+
beta_polylines = [[max(0, min(val + beta * (val - np.mean(polyline)), 1)) for val in polyline] for polyline in new_polylines]
|
| 702 |
+
|
| 703 |
+
|
| 704 |
+
return beta_polylines
|
| 705 |
+
|
| 706 |
+
|
| 707 |
+
@app.route('/new-resources-topics', methods=['POST'])
|
| 708 |
+
def create_new_resources_for_new_course():
|
| 709 |
+
try:
|
| 710 |
+
data = request.get_json()
|
| 711 |
+
|
| 712 |
+
name = data.get('name')
|
| 713 |
+
course_id = data.get('course_id')
|
| 714 |
+
module_id = data.get('module_id')
|
| 715 |
+
res_type = data.get('type')
|
| 716 |
+
link = data.get('link')
|
| 717 |
+
module = data.get('module')
|
| 718 |
+
|
| 719 |
+
if not all([course_id, module_id, res_type, link]):
|
| 720 |
+
return jsonify({"error": "Missing required fields"}), 400
|
| 721 |
+
|
| 722 |
+
video_id = get_youtube_video_id(link)
|
| 723 |
+
if video_id == "Invalid YouTube URL":
|
| 724 |
+
return jsonify({"error": "Invalid YouTube URL"}), 400
|
| 725 |
+
|
| 726 |
+
transcript = extract_transcript(video_id)
|
| 727 |
+
if transcript.startswith("Error:"):
|
| 728 |
+
return jsonify({"error": f"Failed to fetch transcript: {transcript}"}), 400
|
| 729 |
+
|
| 730 |
+
try:
|
| 731 |
+
keywords, weights = create_keywords_list([transcript])
|
| 732 |
+
if keywords and isinstance(keywords[0], list):
|
| 733 |
+
keywords = [word for sublist in keywords for word in sublist]
|
| 734 |
+
|
| 735 |
+
except Exception as e:
|
| 736 |
+
return jsonify({"error": f"Failed to extract keywords: {str(e)}"}), 400
|
| 737 |
+
|
| 738 |
+
if not keywords:
|
| 739 |
+
return jsonify({"error": "No keywords extracted"}), 400
|
| 740 |
+
|
| 741 |
+
resource_embeddings = create_resource_embeddings(keywords)
|
| 742 |
+
if not resource_embeddings:
|
| 743 |
+
return jsonify({"error": "Failed to generate resource embeddings"}), 400
|
| 744 |
+
|
| 745 |
+
topic_embeddings = get_topic_embedding(module_id)
|
| 746 |
+
|
| 747 |
+
# Debugging
|
| 748 |
+
if topic_embeddings is None:
|
| 749 |
+
return jsonify({"error": "Topic embeddings not found"}), 400
|
| 750 |
+
|
| 751 |
+
if isinstance(topic_embeddings, np.ndarray) and topic_embeddings.ndim == 1:
|
| 752 |
+
topic_embeddings = [topic_embeddings]
|
| 753 |
+
|
| 754 |
+
# If only one topic embedding is available, duplicate it
|
| 755 |
+
if len(topic_embeddings) == 1:
|
| 756 |
+
print("WARNING: Only one topic embedding found, duplicating it.")
|
| 757 |
+
topic_embeddings.append(topic_embeddings[0])
|
| 758 |
+
|
| 759 |
+
if len(topic_embeddings) < 2:
|
| 760 |
+
return jsonify({"error": "Insufficient topic embeddings"}), 400
|
| 761 |
+
|
| 762 |
+
resource_polylines = create_resource_polylines(topic_embeddings, resource_embeddings, 8)
|
| 763 |
+
if not resource_polylines:
|
| 764 |
+
return jsonify({"error": "Generated polylines are empty"}), 400
|
| 765 |
+
|
| 766 |
+
# Ensure num_axes is valid
|
| 767 |
+
num_axes = len(topic_embeddings)
|
| 768 |
+
x_max, y_max = 1.0, 1.0
|
| 769 |
+
|
| 770 |
+
# Compute axes lengths and angle safely
|
| 771 |
+
tlen, theta = rad_plot_axes(num_axes, x_max, y_max)
|
| 772 |
+
|
| 773 |
+
# Compute x, y coordinates
|
| 774 |
+
centroids = rad_plot_poly(num_axes, resource_polylines, tlen, theta)
|
| 775 |
+
|
| 776 |
+
max_id = db.session.query(db.func.max(Resource.id)).scalar() or 0
|
| 777 |
+
new_resources = []
|
| 778 |
+
|
| 779 |
+
if centroids:
|
| 780 |
+
x_coordinate, y_coordinate = centroids[0] # Use only the first centroid
|
| 781 |
+
new_resource = Resource(
|
| 782 |
+
id=max_id + 1,
|
| 783 |
+
name=name,
|
| 784 |
+
description=None,
|
| 785 |
+
keywords=keywords,
|
| 786 |
+
polyline=resource_polylines,
|
| 787 |
+
x_coordinate=x_coordinate,
|
| 788 |
+
y_coordinate=y_coordinate,
|
| 789 |
+
course_id=course_id,
|
| 790 |
+
module_id=module_id,
|
| 791 |
+
submodule_id=None,
|
| 792 |
+
type=res_type,
|
| 793 |
+
link=link,
|
| 794 |
+
index=max_id + 1,
|
| 795 |
+
module=module,
|
| 796 |
+
beta=8
|
| 797 |
+
)
|
| 798 |
+
|
| 799 |
+
db.session.add(new_resource)
|
| 800 |
+
db.session.commit()
|
| 801 |
+
|
| 802 |
+
return jsonify({"message": "Resource created successfully"}), 201
|
| 803 |
+
else:
|
| 804 |
+
return jsonify({"error": "No valid centroid found"}), 400
|
| 805 |
+
|
| 806 |
+
except Exception as e:
|
| 807 |
+
db.session.rollback()
|
| 808 |
+
app.logger.error(f"Error occurred: {e}")
|
| 809 |
+
return jsonify({"error": "Server error", "details": str(e)}), 500
|
| 810 |
+
|
| 811 |
+
|
| 812 |
+
def allowed_file(filename):
|
| 813 |
+
ALLOWED_EXTENSIONS = {"pdf"}
|
| 814 |
+
return "." in filename and filename.rsplit(".", 1)[1].lower() in ALLOWED_EXTENSIONS
|
| 815 |
+
|
| 816 |
+
@app.route('/uploads/<string:filename>')
|
| 817 |
+
def send_file(filename: str):
|
| 818 |
+
filename = secure_filename(filename)
|
| 819 |
+
filepath = os.path.join(app.config["UPLOAD_FOLDER"], filename)
|
| 820 |
+
if allowed_file(filename):
|
| 821 |
+
if os.path.exists(filepath):
|
| 822 |
+
return send_from_directory(UPLOAD_FOLDER_NAME, filename)
|
| 823 |
+
else:
|
| 824 |
+
return jsonify({'error': 'file not found'}), 404
|
| 825 |
+
return jsonify({'error': 'invalid filename'}), 400
|
| 826 |
+
|
| 827 |
+
@app.route("/upload-pdf-resource", methods=["POST"])
|
| 828 |
+
def upload_pdf_resource():
|
| 829 |
+
try:
|
| 830 |
+
if "pdf_file" not in request.files:
|
| 831 |
+
return jsonify({"error": "No PDF file provided"}), 400
|
| 832 |
+
|
| 833 |
+
pdf_file = request.files["pdf_file"]
|
| 834 |
+
if pdf_file.filename == "":
|
| 835 |
+
return jsonify({"error": "No selected file"}), 400
|
| 836 |
+
|
| 837 |
+
if not allowed_file(pdf_file.filename):
|
| 838 |
+
return jsonify({"error": "Invalid file type"}), 400
|
| 839 |
+
|
| 840 |
+
# Secure filename and save file
|
| 841 |
+
filename = secure_filename(pdf_file.filename)
|
| 842 |
+
filepath = os.path.join(app.config["UPLOAD_FOLDER"], filename)
|
| 843 |
+
pdf_file.save(filepath)
|
| 844 |
+
|
| 845 |
+
# Extract metadata
|
| 846 |
+
name = request.form.get("name")
|
| 847 |
+
course_id = request.form.get("course_id")
|
| 848 |
+
module_id = request.form.get("module_id")
|
| 849 |
+
res_type = request.form.get("type")
|
| 850 |
+
module = request.form.get("module")
|
| 851 |
+
|
| 852 |
+
if not all([name, course_id, module_id, res_type]):
|
| 853 |
+
return jsonify({"error": "Missing required fields"}), 400
|
| 854 |
+
|
| 855 |
+
# Extract text from PDF
|
| 856 |
+
extracted_text = ""
|
| 857 |
+
with pdfplumber.open(filepath) as pdf:
|
| 858 |
+
for page in pdf.pages:
|
| 859 |
+
extracted_text += page.extract_text() or ""
|
| 860 |
+
|
| 861 |
+
if not extracted_text.strip():
|
| 862 |
+
return jsonify({"error": "Failed to extract text from PDF"}), 400
|
| 863 |
+
|
| 864 |
+
# Generate keywords
|
| 865 |
+
try:
|
| 866 |
+
keywords, _ = create_keywords_list([extracted_text])
|
| 867 |
+
if keywords and isinstance(keywords[0], list):
|
| 868 |
+
keywords = [word for sublist in keywords for word in sublist]
|
| 869 |
+
except Exception as e:
|
| 870 |
+
return jsonify({"error": f"Failed to extract keywords: {str(e)}"}), 400
|
| 871 |
+
|
| 872 |
+
if not keywords:
|
| 873 |
+
return jsonify({"error": "No keywords extracted"}), 400
|
| 874 |
+
|
| 875 |
+
# Generate embeddings
|
| 876 |
+
resource_embeddings = create_resource_embeddings(keywords)
|
| 877 |
+
if not resource_embeddings:
|
| 878 |
+
return jsonify({"error": "Failed to generate resource embeddings"}), 400
|
| 879 |
+
|
| 880 |
+
topic_embeddings = get_topic_embedding(module_id)
|
| 881 |
+
if topic_embeddings is None:
|
| 882 |
+
return jsonify({"error": "Topic embeddings not found"}), 400
|
| 883 |
+
|
| 884 |
+
if isinstance(topic_embeddings, np.ndarray) and topic_embeddings.ndim == 1:
|
| 885 |
+
topic_embeddings = [topic_embeddings]
|
| 886 |
+
|
| 887 |
+
if len(topic_embeddings) == 1:
|
| 888 |
+
topic_embeddings.append(topic_embeddings[0])
|
| 889 |
+
|
| 890 |
+
if len(topic_embeddings) < 2:
|
| 891 |
+
return jsonify({"error": "Insufficient topic embeddings"}), 400
|
| 892 |
+
|
| 893 |
+
# Generate resource polylines
|
| 894 |
+
resource_polylines = create_resource_polylines(topic_embeddings, resource_embeddings, 8)
|
| 895 |
+
if not resource_polylines:
|
| 896 |
+
return jsonify({"error": "Generated polylines are empty"}), 400
|
| 897 |
+
|
| 898 |
+
num_axes = len(topic_embeddings)
|
| 899 |
+
x_max, y_max = 1.0, 1.0
|
| 900 |
+
tlen, theta = rad_plot_axes(num_axes, x_max, y_max)
|
| 901 |
+
centroids = rad_plot_poly(num_axes, resource_polylines, tlen, theta)
|
| 902 |
+
|
| 903 |
+
max_id = db.session.query(db.func.max(Resource.id)).scalar() or 0
|
| 904 |
+
if centroids:
|
| 905 |
+
x_coordinate, y_coordinate = centroids[0]
|
| 906 |
+
|
| 907 |
+
new_resource = Resource(
|
| 908 |
+
id=max_id + 1,
|
| 909 |
+
name=name,
|
| 910 |
+
description=None,
|
| 911 |
+
keywords=keywords,
|
| 912 |
+
polyline=resource_polylines,
|
| 913 |
+
x_coordinate=x_coordinate,
|
| 914 |
+
y_coordinate=y_coordinate,
|
| 915 |
+
course_id=course_id,
|
| 916 |
+
module_id=module_id,
|
| 917 |
+
submodule_id=None,
|
| 918 |
+
type=res_type,
|
| 919 |
+
link='/' + UPLOAD_FOLDER_NAME + '/' + filename,
|
| 920 |
+
index=max_id + 1,
|
| 921 |
+
module=module,
|
| 922 |
+
beta=8
|
| 923 |
+
)
|
| 924 |
+
|
| 925 |
+
db.session.add(new_resource)
|
| 926 |
+
db.session.commit()
|
| 927 |
+
|
| 928 |
+
return jsonify({"message": "PDF Resource uploaded successfully"}), 201
|
| 929 |
+
else:
|
| 930 |
+
return jsonify({"error": "No valid centroid found"}), 400
|
| 931 |
+
|
| 932 |
+
except Exception as e:
|
| 933 |
+
db.session.rollback()
|
| 934 |
+
app.logger.error(f"Error occurred: {e}")
|
| 935 |
+
return jsonify({"error": "Server error", "details": str(e)}), 500
|
| 936 |
+
|
| 937 |
+
|
| 938 |
+
def convert_to_lists(data):
|
| 939 |
+
if isinstance(data, np.ndarray):
|
| 940 |
+
return data.tolist()
|
| 941 |
+
elif isinstance(data, list):
|
| 942 |
+
return [convert_to_lists(item) for item in data]
|
| 943 |
+
elif isinstance(data, dict):
|
| 944 |
+
return {key: convert_to_lists(value) for key, value in data.items()}
|
| 945 |
+
else:
|
| 946 |
+
return data
|
| 947 |
+
|
| 948 |
+
@app.route('/ta-gi-summary',methods=['POST'])
|
| 949 |
+
def changed_by_summary():
|
| 950 |
+
try:
|
| 951 |
+
print("[REQUEST RECEIVED] Processing new TA description request...")
|
| 952 |
+
|
| 953 |
+
# Parse request data
|
| 954 |
+
data = request.get_json()
|
| 955 |
+
#user_id = data.get('user_id')
|
| 956 |
+
ta_id = data.get('ta_id')
|
| 957 |
+
course_id = data.get('course_id')
|
| 958 |
+
description = data.get('description')
|
| 959 |
+
|
| 960 |
+
# if not isinstance(ta_id, int):
|
| 961 |
+
# print("adding new ta...")
|
| 962 |
+
# ta_id = add_ta_from_user(user_id)["id"]
|
| 963 |
+
|
| 964 |
+
# Validate inputs
|
| 965 |
+
if not all([ta_id, course_id, description]):
|
| 966 |
+
print("p1")
|
| 967 |
+
print(ta_id)
|
| 968 |
+
print(description)
|
| 969 |
+
print(course_id)
|
| 970 |
+
return jsonify({"error": "Missing required fields"}), 400
|
| 971 |
+
|
| 972 |
+
# Generate keywords
|
| 973 |
+
all_keywords_list, _ = create_keywords_list([description])
|
| 974 |
+
if not all_keywords_list:
|
| 975 |
+
print("p2")
|
| 976 |
+
return jsonify({"error": "Keyword extraction failed"}), 400
|
| 977 |
+
if isinstance(all_keywords_list[0], list):
|
| 978 |
+
all_keywords_list = [word for sublist in all_keywords_list for word in sublist]
|
| 979 |
+
|
| 980 |
+
# Generate embeddings
|
| 981 |
+
learner_embeddings = create_resource_embeddings(all_keywords_list)
|
| 982 |
+
if not learner_embeddings:
|
| 983 |
+
print("p3")
|
| 984 |
+
return jsonify({"error": "Embedding generation failed"}), 400
|
| 985 |
+
|
| 986 |
+
# Fetch topic embeddings for the given course_id
|
| 987 |
+
raw_embeddings = db.session.query(Topic.embedding).filter_by(course_id=course_id).all()
|
| 988 |
+
|
| 989 |
+
# Convert embeddings safely
|
| 990 |
+
topic_embeddings = []
|
| 991 |
+
for embed in raw_embeddings:
|
| 992 |
+
try:
|
| 993 |
+
emb_value = json.loads(embed[0]) if isinstance(embed[0], str) else embed[0]
|
| 994 |
+
if isinstance(emb_value, list):
|
| 995 |
+
topic_embeddings.append(emb_value)
|
| 996 |
+
else:
|
| 997 |
+
print(f"ERROR: Invalid topic embedding -> Expected list, got {type(emb_value)}")
|
| 998 |
+
except json.JSONDecodeError:
|
| 999 |
+
print("ERROR: Failed to parse embedding JSON")
|
| 1000 |
+
|
| 1001 |
+
# Validate topic embeddings
|
| 1002 |
+
if not topic_embeddings:
|
| 1003 |
+
print("p4")
|
| 1004 |
+
return jsonify({"error": "No valid topic embeddings found for this course"}), 400
|
| 1005 |
+
|
| 1006 |
+
# Generate polylines
|
| 1007 |
+
learner_polylines = create_resource_polylines(topic_embeddings, learner_embeddings, 8)
|
| 1008 |
+
if not learner_polylines:
|
| 1009 |
+
print("p5")
|
| 1010 |
+
return jsonify({"error": "Polyline generation failed"}), 400
|
| 1011 |
+
|
| 1012 |
+
# Convert to description polyline
|
| 1013 |
+
description_polyline_list = [item for sublist in convert_to_lists(learner_polylines[0]) for item in (
|
| 1014 |
+
sublist if isinstance(sublist, list) else [sublist])]
|
| 1015 |
+
|
| 1016 |
+
# Compute axes and centroid
|
| 1017 |
+
feature_length = len(learner_polylines[0])
|
| 1018 |
+
tlen, theta = rad_plot_axes(feature_length, 1, 1)
|
| 1019 |
+
centroid_list = rad_plot_poly(feature_length, [description_polyline_list], tlen, theta)
|
| 1020 |
+
|
| 1021 |
+
# Extract centroid coordinates
|
| 1022 |
+
x_coordinate, y_coordinate = centroid_list[0]
|
| 1023 |
+
|
| 1024 |
+
# Insert description into description table
|
| 1025 |
+
insert_description_query = text("""
|
| 1026 |
+
INSERT INTO description (ta_id, description,course_id)
|
| 1027 |
+
VALUES (:ta_id, :description, :course_id)
|
| 1028 |
+
""")
|
| 1029 |
+
|
| 1030 |
+
db.session.execute(insert_description_query, {
|
| 1031 |
+
"ta_id": ta_id,
|
| 1032 |
+
"description": json.dumps(description),
|
| 1033 |
+
"course_id": course_id
|
| 1034 |
+
})
|
| 1035 |
+
|
| 1036 |
+
print(f"Saving to tat: x={x_coordinate}, y={y_coordinate}")
|
| 1037 |
+
|
| 1038 |
+
# 🔹 Save TA position in tat table
|
| 1039 |
+
# update_query = text("""
|
| 1040 |
+
# INSERT INTO tat (ta_id, course_id, x_coordinate, y_coordinate, polyline)
|
| 1041 |
+
# VALUES (:ta_id, :course_id, :x_coordinate, :y_coordinate, :polyline)
|
| 1042 |
+
# """)
|
| 1043 |
+
|
| 1044 |
+
# db.session.execute(update_query, {
|
| 1045 |
+
# "ta_id": ta_id,
|
| 1046 |
+
# "course_id": course_id,
|
| 1047 |
+
# "x_coordinate": float(x_coordinate),
|
| 1048 |
+
# "y_coordinate": float(y_coordinate),
|
| 1049 |
+
# "polyline": json.dumps(learner_polylines)
|
| 1050 |
+
# })
|
| 1051 |
+
|
| 1052 |
+
max_learner_id = db.session.query(db.func.max(Learner.id)).scalar()
|
| 1053 |
+
new_learner_id = (max_learner_id ) if max_learner_id else 1 # Start from 1 if table is empty
|
| 1054 |
+
|
| 1055 |
+
# 🔹 Fetch all resource indexes for this course_id
|
| 1056 |
+
resource_indexes = db.session.query(Resource.index).filter_by(course_id=course_id).all()
|
| 1057 |
+
resource_index_list = [idx[0] for idx in resource_indexes if idx[0] is not None] # Extract values from tuples
|
| 1058 |
+
|
| 1059 |
+
# 🔹 Insert into enroll table with learner_id as max+1 and ta_id as given
|
| 1060 |
+
enroll_insert_query = text("""
|
| 1061 |
+
INSERT INTO enroll (learner_id, course_id, x_coordinate, y_coordinate, polyline, ta_id, accessible_resources)
|
| 1062 |
+
VALUES (:learner_id, :course_id, :x_coordinate, :y_coordinate, :polyline, :ta_id, :accessible_resources)
|
| 1063 |
+
""")
|
| 1064 |
+
|
| 1065 |
+
db.session.execute(enroll_insert_query, {
|
| 1066 |
+
"learner_id": new_learner_id,
|
| 1067 |
+
"course_id": course_id,
|
| 1068 |
+
"x_coordinate": float(x_coordinate),
|
| 1069 |
+
"y_coordinate": float(y_coordinate),
|
| 1070 |
+
"polyline": json.dumps(learner_polylines),
|
| 1071 |
+
"ta_id": ta_id,
|
| 1072 |
+
"accessible_resources": json.dumps(resource_index_list) # Store indexes as JSON array
|
| 1073 |
+
})
|
| 1074 |
+
|
| 1075 |
+
db.session.commit()
|
| 1076 |
+
print("[SUCCESS] TA position stored successfully!")
|
| 1077 |
+
return jsonify({"message": "TA position stored successfully"}), 201
|
| 1078 |
+
|
| 1079 |
+
except ValueError as ve:
|
| 1080 |
+
print(f"[ERROR] Validation error: {str(ve)}")
|
| 1081 |
+
return jsonify({"error": "Validation Error", "details": str(ve)}), 400
|
| 1082 |
+
|
| 1083 |
+
except Exception as e:
|
| 1084 |
+
db.session.rollback()
|
| 1085 |
+
print(f"[ERROR] Server error: {str(e)}")
|
| 1086 |
+
return jsonify({"error": "Internal Server Error", "details": str(e)}), 500
|
| 1087 |
+
|
| 1088 |
+
@app.route('/ta-ch-description', methods=['POST'])
|
| 1089 |
+
def changed_ta_position():
|
| 1090 |
+
try:
|
| 1091 |
+
print("[REQUEST RECEIVED] Processing new TA description request...")
|
| 1092 |
+
|
| 1093 |
+
# Parse request data
|
| 1094 |
+
data = request.get_json()
|
| 1095 |
+
user_id = data.get('user_id')
|
| 1096 |
+
ta_id = data.get('ta_id')
|
| 1097 |
+
course_id = data.get('course_id')
|
| 1098 |
+
description = data.get('description')
|
| 1099 |
+
|
| 1100 |
+
if not is_valid_id(ta_id):
|
| 1101 |
+
print("adding new ta...")
|
| 1102 |
+
ta_id = add_ta_from_user(user_id)["id"]
|
| 1103 |
+
|
| 1104 |
+
# Validate inputs
|
| 1105 |
+
if not all([ta_id, course_id, description]):
|
| 1106 |
+
return jsonify({"error": "Missing required fields"}), 400
|
| 1107 |
+
|
| 1108 |
+
# Generate keywords
|
| 1109 |
+
all_keywords_list, _ = create_keywords_list([description])
|
| 1110 |
+
if not all_keywords_list:
|
| 1111 |
+
return jsonify({"error": "Keyword extraction failed"}), 400
|
| 1112 |
+
if isinstance(all_keywords_list[0], list):
|
| 1113 |
+
all_keywords_list = [word for sublist in all_keywords_list for word in sublist]
|
| 1114 |
+
|
| 1115 |
+
# Generate embeddings
|
| 1116 |
+
learner_embeddings = create_resource_embeddings(all_keywords_list)
|
| 1117 |
+
if not learner_embeddings:
|
| 1118 |
+
return jsonify({"error": "Embedding generation failed"}), 400
|
| 1119 |
+
|
| 1120 |
+
# Fetch topic embeddings for the given course_id
|
| 1121 |
+
raw_embeddings = db.session.query(Topic.embedding).filter_by(course_id=course_id).all()
|
| 1122 |
+
|
| 1123 |
+
# Convert embeddings safely
|
| 1124 |
+
topic_embeddings = []
|
| 1125 |
+
for embed in raw_embeddings:
|
| 1126 |
+
try:
|
| 1127 |
+
emb_value = json.loads(embed[0]) if isinstance(embed[0], str) else embed[0]
|
| 1128 |
+
if isinstance(emb_value, list):
|
| 1129 |
+
topic_embeddings.append(emb_value)
|
| 1130 |
+
else:
|
| 1131 |
+
print(f"ERROR: Invalid topic embedding -> Expected list, got {type(emb_value)}")
|
| 1132 |
+
except json.JSONDecodeError:
|
| 1133 |
+
print("ERROR: Failed to parse embedding JSON")
|
| 1134 |
+
|
| 1135 |
+
if not topic_embeddings:
|
| 1136 |
+
return jsonify({"error": "No valid topic embeddings found for this course"}), 400
|
| 1137 |
+
|
| 1138 |
+
# Generate polylines
|
| 1139 |
+
learner_polylines = create_resource_polylines(topic_embeddings, learner_embeddings, 8)
|
| 1140 |
+
if not learner_polylines:
|
| 1141 |
+
return jsonify({"error": "Polyline generation failed"}), 400
|
| 1142 |
+
|
| 1143 |
+
description_polyline_list = [item for sublist in convert_to_lists(learner_polylines[0]) for item in (
|
| 1144 |
+
sublist if isinstance(sublist, list) else [sublist])]
|
| 1145 |
+
|
| 1146 |
+
feature_length = len(learner_polylines[0])
|
| 1147 |
+
tlen, theta = rad_plot_axes(feature_length, 1, 1)
|
| 1148 |
+
centroid_list = rad_plot_poly(feature_length, [description_polyline_list], tlen, theta)
|
| 1149 |
+
|
| 1150 |
+
x_coordinate, y_coordinate = centroid_list[0]
|
| 1151 |
+
|
| 1152 |
+
# 🔹 Insert into description table
|
| 1153 |
+
insert_description_query = text("""
|
| 1154 |
+
INSERT INTO description (ta_id, description)
|
| 1155 |
+
VALUES (:ta_id, :description)
|
| 1156 |
+
""")
|
| 1157 |
+
db.session.execute(insert_description_query, {
|
| 1158 |
+
"ta_id": ta_id,
|
| 1159 |
+
"description": json.dumps(description)
|
| 1160 |
+
})
|
| 1161 |
+
|
| 1162 |
+
print(f"Saving to tat: x={x_coordinate}, y={y_coordinate}")
|
| 1163 |
+
|
| 1164 |
+
# 🔹 Insert into tat table
|
| 1165 |
+
update_query = text("""
|
| 1166 |
+
INSERT INTO tat (ta_id, course_id, x_coordinate, y_coordinate, polyline)
|
| 1167 |
+
VALUES (:ta_id, :course_id, :x_coordinate, :y_coordinate, :polyline)
|
| 1168 |
+
""")
|
| 1169 |
+
db.session.execute(update_query, {
|
| 1170 |
+
"ta_id": ta_id,
|
| 1171 |
+
"course_id": course_id,
|
| 1172 |
+
"x_coordinate": float(x_coordinate),
|
| 1173 |
+
"y_coordinate": float(y_coordinate),
|
| 1174 |
+
"polyline": json.dumps(learner_polylines)
|
| 1175 |
+
})
|
| 1176 |
+
|
| 1177 |
+
# 🔹 Fetch user info to add to learner table
|
| 1178 |
+
user_data_query = text("""
|
| 1179 |
+
SELECT registered_date, name, username, password FROM user WHERE id = :user_id
|
| 1180 |
+
""")
|
| 1181 |
+
user_data_result = db.session.execute(user_data_query, {"user_id": user_id}).fetchone()
|
| 1182 |
+
if not user_data_result:
|
| 1183 |
+
return jsonify({"error": "User not found"}), 404
|
| 1184 |
+
|
| 1185 |
+
registered_date, name, username, password = user_data_result
|
| 1186 |
+
|
| 1187 |
+
# 🔹 Insert into learner table
|
| 1188 |
+
insert_learner_query = text("""
|
| 1189 |
+
INSERT INTO learner (registered_date, name, cgpa, username, password, ta_id)
|
| 1190 |
+
VALUES (:registered_date, :name, 4, :username, :password, :ta_id)
|
| 1191 |
+
""")
|
| 1192 |
+
db.session.execute(insert_learner_query, {
|
| 1193 |
+
"registered_date": registered_date,
|
| 1194 |
+
"name": name,
|
| 1195 |
+
"cgpa": 4,
|
| 1196 |
+
"username": username,
|
| 1197 |
+
"password": password,
|
| 1198 |
+
"ta_id": ta_id
|
| 1199 |
+
})
|
| 1200 |
+
|
| 1201 |
+
# 🔹 Get newly inserted learner_id
|
| 1202 |
+
new_learner_id = db.session.query(db.func.max(Learner.id)).scalar()
|
| 1203 |
+
|
| 1204 |
+
# 🔹 Fetch accessible resources for course
|
| 1205 |
+
resource_indexes = db.session.query(Resource.index).filter_by(course_id=course_id).all()
|
| 1206 |
+
resource_index_list = [idx[0] for idx in resource_indexes if idx[0] is not None]
|
| 1207 |
+
|
| 1208 |
+
# 🔹 Insert into enroll table
|
| 1209 |
+
enroll_insert_query = text("""
|
| 1210 |
+
INSERT INTO enroll (learner_id, course_id, x_coordinate, y_coordinate, polyline, ta_id, accessible_resources)
|
| 1211 |
+
VALUES (:learner_id, :course_id, :x_coordinate, :y_coordinate, :polyline, :ta_id, :accessible_resources)
|
| 1212 |
+
""")
|
| 1213 |
+
db.session.execute(enroll_insert_query, {
|
| 1214 |
+
"learner_id": new_learner_id,
|
| 1215 |
+
"course_id": course_id,
|
| 1216 |
+
"x_coordinate": float(x_coordinate),
|
| 1217 |
+
"y_coordinate": float(y_coordinate),
|
| 1218 |
+
"polyline": json.dumps(learner_polylines),
|
| 1219 |
+
"ta_id": ta_id,
|
| 1220 |
+
"accessible_resources": json.dumps(resource_index_list)
|
| 1221 |
+
})
|
| 1222 |
+
|
| 1223 |
+
db.session.commit()
|
| 1224 |
+
print("[SUCCESS] TA and Learner information stored successfully!")
|
| 1225 |
+
return jsonify({"message": "TA and Learner data stored successfully"}), 201
|
| 1226 |
+
|
| 1227 |
+
except ValueError as ve:
|
| 1228 |
+
print(f"[ERROR] Validation error: {str(ve)}")
|
| 1229 |
+
return jsonify({"error": "Validation Error", "details": str(ve)}), 400
|
| 1230 |
+
|
| 1231 |
+
except Exception as e:
|
| 1232 |
+
db.session.rollback()
|
| 1233 |
+
print(f"[ERROR] Server error: {str(e)}")
|
| 1234 |
+
return jsonify({"error": "Internal Server Error", "details": str(e)}), 500
|
| 1235 |
+
|
| 1236 |
+
|
| 1237 |
+
@app.route('/teacher-exit-points', methods=['POST'])
|
| 1238 |
+
def teacher_exit_points():
|
| 1239 |
+
try:
|
| 1240 |
+
print("[REQUEST RECEIVED] Processing teacher exit point...")
|
| 1241 |
+
|
| 1242 |
+
# Parse request data
|
| 1243 |
+
data = request.get_json()
|
| 1244 |
+
course_id = data.get('course_id')
|
| 1245 |
+
description = data.get('description')
|
| 1246 |
+
|
| 1247 |
+
if not all([course_id, description]):
|
| 1248 |
+
return jsonify({"error": "Missing required fields"}), 400
|
| 1249 |
+
|
| 1250 |
+
# 🔹 Step 1: Generate keywords
|
| 1251 |
+
all_keywords_list, _ = create_keywords_list([description])
|
| 1252 |
+
if not all_keywords_list:
|
| 1253 |
+
return jsonify({"error": "Keyword extraction failed"}), 400
|
| 1254 |
+
if isinstance(all_keywords_list[0], list):
|
| 1255 |
+
all_keywords_list = [word for sublist in all_keywords_list for word in sublist]
|
| 1256 |
+
|
| 1257 |
+
# 🔹 Step 2: Generate learner embeddings
|
| 1258 |
+
learner_embeddings = create_resource_embeddings(all_keywords_list)
|
| 1259 |
+
if not learner_embeddings:
|
| 1260 |
+
return jsonify({"error": "Embedding generation failed"}), 400
|
| 1261 |
+
|
| 1262 |
+
# 🔹 Step 3: Fetch topic embeddings for the given course_id
|
| 1263 |
+
raw_embeddings = db.session.query(Topic.embedding).filter_by(course_id=course_id).all()
|
| 1264 |
+
|
| 1265 |
+
topic_embeddings = []
|
| 1266 |
+
for embed in raw_embeddings:
|
| 1267 |
+
try:
|
| 1268 |
+
emb_value = json.loads(embed[0]) if isinstance(embed[0], str) else embed[0]
|
| 1269 |
+
if isinstance(emb_value, list):
|
| 1270 |
+
topic_embeddings.append(emb_value)
|
| 1271 |
+
else:
|
| 1272 |
+
print(f"ERROR: Invalid topic embedding -> Expected list, got {type(emb_value)}")
|
| 1273 |
+
except json.JSONDecodeError:
|
| 1274 |
+
print("ERROR: Failed to parse embedding JSON")
|
| 1275 |
+
|
| 1276 |
+
if not topic_embeddings:
|
| 1277 |
+
return jsonify({"error": "No valid topic embeddings found for this course"}), 400
|
| 1278 |
+
|
| 1279 |
+
# 🔹 Step 4: Generate polylines
|
| 1280 |
+
learner_polylines = create_resource_polylines(topic_embeddings, learner_embeddings, 8)
|
| 1281 |
+
if not learner_polylines:
|
| 1282 |
+
return jsonify({"error": "Polyline generation failed"}), 400
|
| 1283 |
+
|
| 1284 |
+
description_polyline_list = [item for sublist in convert_to_lists(learner_polylines[0]) for item in (
|
| 1285 |
+
sublist if isinstance(sublist, list) else [sublist])]
|
| 1286 |
+
|
| 1287 |
+
# 🔹 Step 5: Get (x, y) coordinates
|
| 1288 |
+
feature_length = len(learner_polylines[0])
|
| 1289 |
+
tlen, theta = rad_plot_axes(feature_length, 1, 1)
|
| 1290 |
+
centroid_list = rad_plot_poly(feature_length, [description_polyline_list], tlen, theta)
|
| 1291 |
+
x_coordinate, y_coordinate = centroid_list[0]
|
| 1292 |
+
|
| 1293 |
+
print(f"[EXIT POINT] x: {x_coordinate}, y: {y_coordinate}")
|
| 1294 |
+
|
| 1295 |
+
# 🔹 Step 6: Insert into exit_point table
|
| 1296 |
+
insert_exit_point_query = text("""
|
| 1297 |
+
INSERT INTO exit_point (id, course_id, description, polyline, x, y)
|
| 1298 |
+
VALUES (:id, :course_id, :description, :polyline, :x, :y)
|
| 1299 |
+
""")
|
| 1300 |
+
|
| 1301 |
+
# You may want to dynamically generate `id`, e.g., as max(id)+1
|
| 1302 |
+
new_id_query = text("SELECT COALESCE(MAX(id), 0) + 1 FROM exit_point")
|
| 1303 |
+
new_id = db.session.execute(new_id_query).scalar()
|
| 1304 |
+
|
| 1305 |
+
|
| 1306 |
+
db.session.execute(insert_exit_point_query, {
|
| 1307 |
+
"id": new_id,
|
| 1308 |
+
"course_id": course_id,
|
| 1309 |
+
"description": json.dumps(description),
|
| 1310 |
+
"polyline": json.dumps(learner_polylines),
|
| 1311 |
+
"x": float(x_coordinate),
|
| 1312 |
+
"y": float(y_coordinate)
|
| 1313 |
+
})
|
| 1314 |
+
|
| 1315 |
+
db.session.commit()
|
| 1316 |
+
print("[SUCCESS] Exit point saved successfully.")
|
| 1317 |
+
return jsonify({"message": "Exit point stored successfully."}), 201
|
| 1318 |
+
|
| 1319 |
+
except Exception as e:
|
| 1320 |
+
db.session.rollback()
|
| 1321 |
+
print(f"[ERROR] Server error: {str(e)}")
|
| 1322 |
+
return jsonify({"error": "Internal Server Error", "details": str(e)}), 500
|
| 1323 |
+
|
| 1324 |
+
|
| 1325 |
+
@app.route('/summaries/<int:ta_id>/<int:course_id>', methods=['GET'])
|
| 1326 |
+
def get_summaries(ta_id, course_id):
|
| 1327 |
+
try:
|
| 1328 |
+
# Fetch summaries for the given TA and course
|
| 1329 |
+
summaries = db.session.query(Description).filter_by(ta_id=ta_id, course_id=course_id).all()
|
| 1330 |
+
if not summaries:
|
| 1331 |
+
return jsonify({"message": "No summaries found"}), 404
|
| 1332 |
+
|
| 1333 |
+
# Convert to list of dictionaries
|
| 1334 |
+
summaries_list = [{"description": summary.description} for summary in summaries]
|
| 1335 |
+
return jsonify(summaries_list), 200
|
| 1336 |
+
|
| 1337 |
+
except Exception as e:
|
| 1338 |
+
app.logger.error(f"Error fetching summaries: {str(e)}", exc_info=True)
|
| 1339 |
+
return jsonify({"error": "Server error", "details": str(e)}), 500
|
| 1340 |
+
|
| 1341 |
+
@app.route('/contributions/insert-summary-coordinates/<int:enroll_id>/<int:course_id>', methods=['POST'])
|
| 1342 |
+
def insert_summary_coordinates_per_contribution(enroll_id, course_id):
|
| 1343 |
+
try:
|
| 1344 |
+
print(f"[REQUEST RECEIVED] Inserting summary coordinates for enroll_id: {enroll_id}, course_id: {course_id}")
|
| 1345 |
+
|
| 1346 |
+
# Fetch summary contributions for enroll_id
|
| 1347 |
+
contributions = Contribution.query.filter(
|
| 1348 |
+
Contribution.enroll_id == enroll_id,
|
| 1349 |
+
Contribution.description['summary'].as_boolean() == True
|
| 1350 |
+
).order_by(Contribution.submitted_on.asc()).all()
|
| 1351 |
+
|
| 1352 |
+
if not contributions:
|
| 1353 |
+
return jsonify({"error": "No summary contributions found for the given enroll_id"}), 404
|
| 1354 |
+
|
| 1355 |
+
# Fetch topic embeddings for course
|
| 1356 |
+
raw_embeddings = db.session.query(Topic.embedding).filter_by(course_id=course_id).all()
|
| 1357 |
+
topic_embeddings = []
|
| 1358 |
+
for embed in raw_embeddings:
|
| 1359 |
+
try:
|
| 1360 |
+
emb_value = json.loads(embed[0]) if isinstance(embed[0], str) else embed[0]
|
| 1361 |
+
if isinstance(emb_value, list):
|
| 1362 |
+
topic_embeddings.append(emb_value)
|
| 1363 |
+
except json.JSONDecodeError:
|
| 1364 |
+
print("ERROR: Failed to parse embedding JSON")
|
| 1365 |
+
|
| 1366 |
+
if not topic_embeddings:
|
| 1367 |
+
return jsonify({"error": "No valid topic embeddings found for this course"}), 400
|
| 1368 |
+
|
| 1369 |
+
inserted_count = 0
|
| 1370 |
+
|
| 1371 |
+
for contrib in contributions:
|
| 1372 |
+
content = contrib.contribution_content
|
| 1373 |
+
if not content or content.strip() == "":
|
| 1374 |
+
continue
|
| 1375 |
+
|
| 1376 |
+
# Extract keywords
|
| 1377 |
+
all_keywords_list, _ = create_keywords_list([content])
|
| 1378 |
+
if not all_keywords_list:
|
| 1379 |
+
print(f"Keyword extraction failed for contribution id: {contrib.id}")
|
| 1380 |
+
continue
|
| 1381 |
+
|
| 1382 |
+
if isinstance(all_keywords_list[0], list):
|
| 1383 |
+
all_keywords_list = [word for sublist in all_keywords_list for word in sublist]
|
| 1384 |
+
|
| 1385 |
+
# Generate embeddings
|
| 1386 |
+
learner_embeddings = create_resource_embeddings(all_keywords_list)
|
| 1387 |
+
if not learner_embeddings:
|
| 1388 |
+
print(f"Embedding generation failed for contribution id: {contrib.id}")
|
| 1389 |
+
continue
|
| 1390 |
+
|
| 1391 |
+
# Generate polyline
|
| 1392 |
+
learner_polylines = create_resource_polylines(topic_embeddings, learner_embeddings, 8)
|
| 1393 |
+
if not learner_polylines:
|
| 1394 |
+
print(f"Polyline generation failed for contribution id: {contrib.id}")
|
| 1395 |
+
continue
|
| 1396 |
+
|
| 1397 |
+
# Prepare polyline list for centroid calculation
|
| 1398 |
+
description_polyline_list = [
|
| 1399 |
+
item for sublist in convert_to_lists(learner_polylines[0])
|
| 1400 |
+
for item in (sublist if isinstance(sublist, list) else [sublist])
|
| 1401 |
+
]
|
| 1402 |
+
|
| 1403 |
+
# Compute (x,y)
|
| 1404 |
+
feature_length = len(learner_polylines[0])
|
| 1405 |
+
tlen, theta = rad_plot_axes(feature_length, 1, 1)
|
| 1406 |
+
centroid_list = rad_plot_poly(feature_length, [description_polyline_list], tlen, theta)
|
| 1407 |
+
x_coordinate, y_coordinate = centroid_list[0]
|
| 1408 |
+
|
| 1409 |
+
# Get new id for summary_coordinates
|
| 1410 |
+
new_id_query = text("SELECT COALESCE(MAX(id), 0) + 1 FROM summary_coordinates")
|
| 1411 |
+
new_id = db.session.execute(new_id_query).scalar()
|
| 1412 |
+
|
| 1413 |
+
# Insert into summary_coordinates table
|
| 1414 |
+
insert_query = text("""
|
| 1415 |
+
INSERT INTO summary_coordinates (id, enroll_id, course_id, summary, polyline, x_coordinate, y_coordinate)
|
| 1416 |
+
VALUES (:id, :enroll_id, :course_id, :summary, :polyline, :x, :y)
|
| 1417 |
+
""")
|
| 1418 |
+
|
| 1419 |
+
db.session.execute(insert_query, {
|
| 1420 |
+
"id": new_id,
|
| 1421 |
+
"enroll_id": enroll_id,
|
| 1422 |
+
"course_id": course_id,
|
| 1423 |
+
"summary": json.dumps(content),
|
| 1424 |
+
"polyline": json.dumps(learner_polylines),
|
| 1425 |
+
"x": float(x_coordinate),
|
| 1426 |
+
"y": float(y_coordinate)
|
| 1427 |
+
})
|
| 1428 |
+
|
| 1429 |
+
inserted_count += 1
|
| 1430 |
+
|
| 1431 |
+
db.session.commit()
|
| 1432 |
+
print(f"[SUCCESS] Inserted {inserted_count} summary coordinate entries.")
|
| 1433 |
+
return jsonify({"message": f"Inserted {inserted_count} summary coordinate entries."}), 201
|
| 1434 |
+
|
| 1435 |
+
except Exception as e:
|
| 1436 |
+
db.session.rollback()
|
| 1437 |
+
print(f"[ERROR] Server error: {str(e)}")
|
| 1438 |
+
return jsonify({"error": "Internal Server Error", "details": str(e)}), 500
|
| 1439 |
+
|
| 1440 |
+
@app.route('/exit-points/<int:course_id>', methods=['GET'])
|
| 1441 |
+
def get_exit_coordinates(course_id):
|
| 1442 |
+
exit_points = ExitPoint.query.filter_by(course_id=course_id).all()
|
| 1443 |
+
|
| 1444 |
+
if not exit_points:
|
| 1445 |
+
return jsonify([]), 200 # Return empty list if no data found
|
| 1446 |
+
|
| 1447 |
+
coordinates = [
|
| 1448 |
+
[float(point.x), float(point.y)]
|
| 1449 |
+
for point in exit_points
|
| 1450 |
+
if point.x is not None and point.y is not None
|
| 1451 |
+
]
|
| 1452 |
+
|
| 1453 |
+
return jsonify(coordinates), 200
|
| 1454 |
+
|
| 1455 |
+
|
| 1456 |
+
if __name__ == '__main__':
|
| 1457 |
+
app.run(host="0.0.0.0", debug=True)
|
backend/__init__.py
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Flask Application Initialization
|
| 3 |
+
Sets up the Flask app with necessary configurations
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
from flask import Flask, send_from_directory
|
| 7 |
+
from flask_cors import CORS
|
| 8 |
+
import os
|
| 9 |
+
|
| 10 |
+
# Initialize Flask app
|
| 11 |
+
# Serves static files from the build directory (../dist)
|
| 12 |
+
app = Flask(__name__, static_folder='../dist', static_url_path='')
|
| 13 |
+
|
| 14 |
+
# Enable CORS
|
| 15 |
+
CORS(app)
|
| 16 |
+
|
| 17 |
+
# Serve React App (Catch-all route)
|
| 18 |
+
@app.route('/', defaults={'path': ''})
|
| 19 |
+
@app.route('/<path:path>')
|
| 20 |
+
def serve(path):
|
| 21 |
+
# Don't interfere with API routes
|
| 22 |
+
if path.startswith('api'):
|
| 23 |
+
return {"error": "Not found"}, 404
|
| 24 |
+
|
| 25 |
+
if path != "" and os.path.exists(os.path.join(app.static_folder, path)):
|
| 26 |
+
return send_from_directory(app.static_folder, path)
|
| 27 |
+
|
| 28 |
+
# Return index.html for SPA routing
|
| 29 |
+
return send_from_directory(app.static_folder, 'index.html')
|
| 30 |
+
|
| 31 |
+
# Flag to indicate if database was just created
|
| 32 |
+
DBcreated = False
|
backend/app.py
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# NLP Learning Grid Backend
|
| 2 |
+
# Provides API for grid-based NLP learning system
|
| 3 |
+
from init import app
|
| 4 |
+
|
| 5 |
+
# Import NLP API routes
|
| 6 |
+
try:
|
| 7 |
+
import nlp_api
|
| 8 |
+
except Exception as e:
|
| 9 |
+
print(f"Warning: Could not import nlp_api: {e}")
|
| 10 |
+
|
| 11 |
+
@app.route('/')
|
| 12 |
+
def index():
|
| 13 |
+
return "NLP Learning Grid Backend is Running!"
|
| 14 |
+
|
| 15 |
+
if __name__ == '__main__':
|
| 16 |
+
# Run on port 5000 by default
|
| 17 |
+
app.run(debug=True, port=5000, host='0.0.0.0')
|
| 18 |
+
|
backend/cert.pem
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
-----BEGIN CERTIFICATE-----
|
| 2 |
+
MIIGGjCCBAKgAwIBAgITMXqbiYTBzImh5lOL+GycBWSRCzANBgkqhkiG9w0BAQsF
|
| 3 |
+
ADCBnDELMAkGA1UEBhMCSU4xEjAQBgNVBAgMCUthcm5hdGFrYTESMBAGA1UEBwwJ
|
| 4 |
+
QmVuZ2FsdXJ1MQwwCgYDVQQKDANXU0wxGzAZBgNVBAsMEk5hdmlnYXRlZF9sZWFy
|
| 5 |
+
bmluZzELMAkGA1UEAwwCTkwxLTArBgkqhkiG9w0BCQEWHmd1cnVyYWpvdGFnZXJp
|
| 6 |
+
bWFpbDJAZ21haWwuY29tbTAeFw0yNDA3MDcxODU4NDZaFw0yNTA3MDcxODU4NDZa
|
| 7 |
+
MIGcMQswCQYDVQQGEwJJTjESMBAGA1UECAwJS2FybmF0YWthMRIwEAYDVQQHDAlC
|
| 8 |
+
ZW5nYWx1cnUxDDAKBgNVBAoMA1dTTDEbMBkGA1UECwwSTmF2aWdhdGVkX2xlYXJu
|
| 9 |
+
aW5nMQswCQYDVQQDDAJOTDEtMCsGCSqGSIb3DQEJARYeZ3VydXJham90YWdlcmlt
|
| 10 |
+
YWlsMkBnbWFpbC5jb21tMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA
|
| 11 |
+
s7lkfW+Wu3ecT9qsW0Omh2j+nzaVD/lN9CREhF7FCqTG0D+lesOvkKqX3Mbi/ZeB
|
| 12 |
+
eukOwjiaz18UFRTebYifMv9+aG5PZuByJyrMNOQ7mLpS63qCKZqbdqMkQryT3hHF
|
| 13 |
+
Y2A7L4ugUEDu8n22E3wNKF0ZnBz56XBHZb/LwnG2yUJJe9EFscEvnWjz4WqwkRgx
|
| 14 |
+
otzlotUN4e6K/wRMehftLLmYa3aBcjALlGLFW0PD04X6Urrbd1xicLdTUAoDqpTu
|
| 15 |
+
X/Q2HKZDvUl1nTZSXFrBGjA+a4DtpIQeCMMD561ApLD6iiDKA8cV+2wreBiVgC9K
|
| 16 |
+
WN2RjYCGwkxH/swWSzAUixH+cCQmWH+6m64nk6qS32OfhJQJrLd/0uOetetui6sD
|
| 17 |
+
82a866CGdtw1mbl1CjtpnEzCdlFvupfNmmbz71eQcSKaq4L60mpGrIoRGokHGFIh
|
| 18 |
+
Ns2XojrHUGG5gWSSnDFwHTAVUaG1Ni3oaem59j8oFBwaiL+ouqpB7T7EompPR2VR
|
| 19 |
+
YFio4jNKb0q0Tjl9Gqjc6eoZWa8aSlEjOWJqGtyKmfnUDr1cfz6qUxY4FZAh23dl
|
| 20 |
+
ZyGt1ho67YLvCq45p4F498i+55tTLUNZyvA6Q6d2lxTKiTg69th+0oGFNWN9pOGf
|
| 21 |
+
/P9If7q25mGwpJWFv3dV5p744PyyBu8IfsriY/HkYZ0CAwEAAaNTMFEwHQYDVR0O
|
| 22 |
+
BBYEFGeJsHds9rt1dl1exT5/7VSZlZoFMB8GA1UdIwQYMBaAFGeJsHds9rt1dl1e
|
| 23 |
+
xT5/7VSZlZoFMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggIBAHp6
|
| 24 |
+
hTNHKkWTGDVQJaFiFYdOg/FuqfpoJYx/0aveVNJiWMaOM0/RR0p55Vz/mI1l8Pb8
|
| 25 |
+
D4zBeJM2lfRSS6/3i5R+sMZVlTIBtlwiyTwBMvfl/x3IW0+bNuWPK+bk7LEf6WcK
|
| 26 |
+
KhxKMmyEBPLEz333p5zOqo3rO/lEiT1darQOy+voXdyEXsjahLXQ1QfucdMlKPCZ
|
| 27 |
+
16NSaOUTwnWRgkuJk/OySo2VUMIKA45KXJNi5PueKs3UQPZV/jP8AKaj+RNFq9fB
|
| 28 |
+
P6SvJAjl39xbFLSOE4hmGP9yggKseQpd4Jw8KH5rpKoja/dAUIH1gmkcqWDHC1eF
|
| 29 |
+
MJvpVPz9nArh1xb/Bo+HgCqkGUoGM5073+NaHXLuDHj7PGAtpT2vE8Cts8j6ntNt
|
| 30 |
+
kz1FCATI5R0TgFKvqdUREqSiDmoFyDmIs/EU4qmCPqyUVpIm5xGm+BISO8lvUc09
|
| 31 |
+
MIFDqc7Kfct/yu1WS4t48sJNK3nxmCQwXsog5GD+rXVkNVDQ3l6PsUTtIZ5Qv+cu
|
| 32 |
+
Cg6UWjg+myUKZjG7FLfHR/L6ND3JSOBURhDVDarFeA4yltELtKXIEJctQtesyT+W
|
| 33 |
+
P/CMBory0u8I2uBoCnAOOcc5BuhikmV4uYChOzkgHGSVulZnOsiP+eJb/SKrsxgX
|
| 34 |
+
7+kXCwnK5wq9bhuihakMVhwU6fBMFkdj+ABC+fGF
|
| 35 |
+
-----END CERTIFICATE-----
|
backend/data/db.json
ADDED
|
@@ -0,0 +1,512 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"users": [],
|
| 3 |
+
"learning_sessions": {
|
| 4 |
+
"default": {
|
| 5 |
+
"position": {
|
| 6 |
+
"x": 3,
|
| 7 |
+
"y": 13
|
| 8 |
+
},
|
| 9 |
+
"level": 2,
|
| 10 |
+
"totalReward": 200,
|
| 11 |
+
"visitedResources": [
|
| 12 |
+
"4",
|
| 13 |
+
"3",
|
| 14 |
+
"2",
|
| 15 |
+
"1"
|
| 16 |
+
],
|
| 17 |
+
"notifications": [
|
| 18 |
+
{
|
| 19 |
+
"id": "notif_1775177177",
|
| 20 |
+
"type": "success",
|
| 21 |
+
"message": "Level up! You are now Stage 2",
|
| 22 |
+
"timestamp": 1775177177230,
|
| 23 |
+
"read": false
|
| 24 |
+
},
|
| 25 |
+
{
|
| 26 |
+
"id": "notif_1775174735",
|
| 27 |
+
"type": "success",
|
| 28 |
+
"message": "Level up! You are now Stage 2",
|
| 29 |
+
"timestamp": 1775174735824,
|
| 30 |
+
"read": true
|
| 31 |
+
},
|
| 32 |
+
{
|
| 33 |
+
"id": "notif_1775174142",
|
| 34 |
+
"type": "success",
|
| 35 |
+
"message": "Level up! You are now Stage 2",
|
| 36 |
+
"timestamp": 1775174142894,
|
| 37 |
+
"read": true
|
| 38 |
+
},
|
| 39 |
+
{
|
| 40 |
+
"id": "notif_1775173994",
|
| 41 |
+
"type": "success",
|
| 42 |
+
"message": "Level up! You are now Stage 2",
|
| 43 |
+
"timestamp": 1775173994762,
|
| 44 |
+
"read": true
|
| 45 |
+
},
|
| 46 |
+
{
|
| 47 |
+
"id": "notif_1775173639",
|
| 48 |
+
"type": "success",
|
| 49 |
+
"message": "Level up! You are now Stage 2",
|
| 50 |
+
"timestamp": 1775173639779,
|
| 51 |
+
"read": true
|
| 52 |
+
},
|
| 53 |
+
{
|
| 54 |
+
"id": "notif_1775172872",
|
| 55 |
+
"type": "success",
|
| 56 |
+
"message": "Level up! You are now Stage 2",
|
| 57 |
+
"timestamp": 1775172872637,
|
| 58 |
+
"read": true
|
| 59 |
+
},
|
| 60 |
+
{
|
| 61 |
+
"id": "notif_1775172411",
|
| 62 |
+
"type": "success",
|
| 63 |
+
"message": "Level up! You are now Stage 2",
|
| 64 |
+
"timestamp": 1775172411178,
|
| 65 |
+
"read": true
|
| 66 |
+
}
|
| 67 |
+
]
|
| 68 |
+
}
|
| 69 |
+
},
|
| 70 |
+
"polylines": {
|
| 71 |
+
"polyline_20260403_032235": {
|
| 72 |
+
"id": "polyline_20260403_032235",
|
| 73 |
+
"name": "hye3dg",
|
| 74 |
+
"path": [
|
| 75 |
+
{
|
| 76 |
+
"x": 2,
|
| 77 |
+
"y": 17
|
| 78 |
+
},
|
| 79 |
+
{
|
| 80 |
+
"x": 0,
|
| 81 |
+
"y": 17
|
| 82 |
+
}
|
| 83 |
+
],
|
| 84 |
+
"color": "rgba(121, 114, 255, 0.4)",
|
| 85 |
+
"isActive": true,
|
| 86 |
+
"summary": "hcioasuhioc",
|
| 87 |
+
"keywords_found": [],
|
| 88 |
+
"module_scores": [
|
| 89 |
+
0.036593932658433914,
|
| 90 |
+
0.051530614495277405,
|
| 91 |
+
0.1929866373538971,
|
| 92 |
+
0.1,
|
| 93 |
+
0.045435961335897446,
|
| 94 |
+
0.05906526371836662,
|
| 95 |
+
0.00729989493265748,
|
| 96 |
+
0.08250045776367188,
|
| 97 |
+
0.027184199541807175,
|
| 98 |
+
0.061868976801633835,
|
| 99 |
+
0.05629986152052879,
|
| 100 |
+
0.06286735832691193,
|
| 101 |
+
0.019927512854337692,
|
| 102 |
+
0.06329352408647537,
|
| 103 |
+
0.07700204849243164,
|
| 104 |
+
0.06306321918964386,
|
| 105 |
+
0.03309740498661995,
|
| 106 |
+
0.0
|
| 107 |
+
],
|
| 108 |
+
"strengths": [
|
| 109 |
+
"Tutorial: Introduction to huggingface",
|
| 110 |
+
"Fine tuning LLM"
|
| 111 |
+
],
|
| 112 |
+
"dominant_topics": [],
|
| 113 |
+
"ai_analysis": "Learning profile enriched by modules like Basics. Level 1 achieved with 100 points.",
|
| 114 |
+
"assimilation_position": {
|
| 115 |
+
"x": 2,
|
| 116 |
+
"y": 17
|
| 117 |
+
},
|
| 118 |
+
"next_recommendation": {
|
| 119 |
+
"id": "8",
|
| 120 |
+
"title": "Incontext Learning",
|
| 121 |
+
"position": {
|
| 122 |
+
"x": 3,
|
| 123 |
+
"y": 13
|
| 124 |
+
},
|
| 125 |
+
"module": "Incontext Learning",
|
| 126 |
+
"reason": "dqn"
|
| 127 |
+
}
|
| 128 |
+
},
|
| 129 |
+
"polyline_20260403_032347": {
|
| 130 |
+
"id": "polyline_20260403_032347",
|
| 131 |
+
"name": "xas",
|
| 132 |
+
"path": [
|
| 133 |
+
{
|
| 134 |
+
"x": 2,
|
| 135 |
+
"y": 17
|
| 136 |
+
},
|
| 137 |
+
{
|
| 138 |
+
"x": 0,
|
| 139 |
+
"y": 17
|
| 140 |
+
}
|
| 141 |
+
],
|
| 142 |
+
"color": "rgba(153, 107, 255, 0.4)",
|
| 143 |
+
"isActive": true,
|
| 144 |
+
"summary": "xss",
|
| 145 |
+
"keywords_found": [],
|
| 146 |
+
"module_scores": [
|
| 147 |
+
0.0,
|
| 148 |
+
0.0,
|
| 149 |
+
0.15388113260269165,
|
| 150 |
+
0.1306712031364441,
|
| 151 |
+
0.0,
|
| 152 |
+
0.0,
|
| 153 |
+
0.015578164719045162,
|
| 154 |
+
0.006550716236233711,
|
| 155 |
+
0.03091040439903736,
|
| 156 |
+
0.0,
|
| 157 |
+
0.017347849905490875,
|
| 158 |
+
0.06170199066400528,
|
| 159 |
+
0.009152302518486977,
|
| 160 |
+
0.024282503873109818,
|
| 161 |
+
0.022633565589785576,
|
| 162 |
+
0.05440949276089668,
|
| 163 |
+
0.047033052891492844,
|
| 164 |
+
0.0
|
| 165 |
+
],
|
| 166 |
+
"strengths": [
|
| 167 |
+
"Tutorial: Introduction to huggingface",
|
| 168 |
+
"Fine tuning LLM"
|
| 169 |
+
],
|
| 170 |
+
"dominant_topics": [],
|
| 171 |
+
"ai_analysis": "Learning profile enriched by modules like Basics. Level 1 achieved with 100 points.",
|
| 172 |
+
"assimilation_position": {
|
| 173 |
+
"x": 3,
|
| 174 |
+
"y": 13
|
| 175 |
+
},
|
| 176 |
+
"next_recommendation": {
|
| 177 |
+
"id": "8",
|
| 178 |
+
"title": "Incontext Learning",
|
| 179 |
+
"position": {
|
| 180 |
+
"x": 3,
|
| 181 |
+
"y": 13
|
| 182 |
+
},
|
| 183 |
+
"module": "Incontext Learning",
|
| 184 |
+
"reason": "dqn"
|
| 185 |
+
}
|
| 186 |
+
},
|
| 187 |
+
"polyline_20260403_033404": {
|
| 188 |
+
"id": "polyline_20260403_033404",
|
| 189 |
+
"name": "heyyy",
|
| 190 |
+
"path": [
|
| 191 |
+
{
|
| 192 |
+
"x": 3,
|
| 193 |
+
"y": 18
|
| 194 |
+
},
|
| 195 |
+
{
|
| 196 |
+
"x": 3,
|
| 197 |
+
"y": 17
|
| 198 |
+
},
|
| 199 |
+
{
|
| 200 |
+
"x": 2,
|
| 201 |
+
"y": 17
|
| 202 |
+
},
|
| 203 |
+
{
|
| 204 |
+
"x": 0,
|
| 205 |
+
"y": 17
|
| 206 |
+
}
|
| 207 |
+
],
|
| 208 |
+
"color": "rgba(180, 160, 255, 0.4)",
|
| 209 |
+
"isActive": true,
|
| 210 |
+
"summary": "heyyyy",
|
| 211 |
+
"keywords_found": [],
|
| 212 |
+
"module_scores": [
|
| 213 |
+
0.10115420073270798,
|
| 214 |
+
0.1,
|
| 215 |
+
0.18399392068386078,
|
| 216 |
+
0.19090811908245087,
|
| 217 |
+
0.040012165904045105,
|
| 218 |
+
0.03171403333544731,
|
| 219 |
+
0.004172520712018013,
|
| 220 |
+
0.0,
|
| 221 |
+
0.0,
|
| 222 |
+
0.0,
|
| 223 |
+
0.013521099463105202,
|
| 224 |
+
0.0,
|
| 225 |
+
0.06345637887716293,
|
| 226 |
+
0.02790343202650547,
|
| 227 |
+
0.057048387825489044,
|
| 228 |
+
0.09004955738782883,
|
| 229 |
+
0.0,
|
| 230 |
+
0.0
|
| 231 |
+
],
|
| 232 |
+
"strengths": [
|
| 233 |
+
"Pre training objectives",
|
| 234 |
+
"Pre trained models",
|
| 235 |
+
"Tutorial: Introduction to huggingface",
|
| 236 |
+
"Fine tuning LLM"
|
| 237 |
+
],
|
| 238 |
+
"dominant_topics": [],
|
| 239 |
+
"ai_analysis": "Learning profile enriched by modules like Basics. Level 2 achieved with 200 points.",
|
| 240 |
+
"assimilation_position": {
|
| 241 |
+
"x": 3,
|
| 242 |
+
"y": 15
|
| 243 |
+
},
|
| 244 |
+
"next_recommendation": {
|
| 245 |
+
"id": "8",
|
| 246 |
+
"title": "Incontext Learning",
|
| 247 |
+
"position": {
|
| 248 |
+
"x": 3,
|
| 249 |
+
"y": 13
|
| 250 |
+
},
|
| 251 |
+
"module": "Incontext Learning",
|
| 252 |
+
"reason": "dqn"
|
| 253 |
+
}
|
| 254 |
+
},
|
| 255 |
+
"polyline_20260403_042600": {
|
| 256 |
+
"id": "polyline_20260403_042600",
|
| 257 |
+
"name": "heyyy",
|
| 258 |
+
"path": [
|
| 259 |
+
{
|
| 260 |
+
"x": 3,
|
| 261 |
+
"y": 18
|
| 262 |
+
},
|
| 263 |
+
{
|
| 264 |
+
"x": 3,
|
| 265 |
+
"y": 17
|
| 266 |
+
},
|
| 267 |
+
{
|
| 268 |
+
"x": 2,
|
| 269 |
+
"y": 17
|
| 270 |
+
},
|
| 271 |
+
{
|
| 272 |
+
"x": 0,
|
| 273 |
+
"y": 17
|
| 274 |
+
}
|
| 275 |
+
],
|
| 276 |
+
"color": "rgba(154, 164, 255, 0.4)",
|
| 277 |
+
"isActive": true,
|
| 278 |
+
"summary": "heyyyy",
|
| 279 |
+
"keywords_found": [],
|
| 280 |
+
"module_scores": [
|
| 281 |
+
0.10115420073270798,
|
| 282 |
+
0.1,
|
| 283 |
+
0.18399392068386078,
|
| 284 |
+
0.19090811908245087,
|
| 285 |
+
0.040012165904045105,
|
| 286 |
+
0.03171403333544731,
|
| 287 |
+
0.004172520712018013,
|
| 288 |
+
0.0,
|
| 289 |
+
0.0,
|
| 290 |
+
0.0,
|
| 291 |
+
0.013521099463105202,
|
| 292 |
+
0.0,
|
| 293 |
+
0.06345637887716293,
|
| 294 |
+
0.02790343202650547,
|
| 295 |
+
0.057048387825489044,
|
| 296 |
+
0.09004955738782883,
|
| 297 |
+
0.0,
|
| 298 |
+
0.0
|
| 299 |
+
],
|
| 300 |
+
"strengths": [
|
| 301 |
+
"Pre training objectives",
|
| 302 |
+
"Pre trained models",
|
| 303 |
+
"Tutorial: Introduction to huggingface",
|
| 304 |
+
"Fine tuning LLM"
|
| 305 |
+
],
|
| 306 |
+
"dominant_topics": [],
|
| 307 |
+
"ai_analysis": "Learning profile enriched by modules like Basics. Stage 2 achieved with 200 points.",
|
| 308 |
+
"assimilation_position": {
|
| 309 |
+
"x": 3,
|
| 310 |
+
"y": 13
|
| 311 |
+
},
|
| 312 |
+
"next_recommendation": {
|
| 313 |
+
"id": "8",
|
| 314 |
+
"title": "Incontext Learning",
|
| 315 |
+
"position": {
|
| 316 |
+
"x": 3,
|
| 317 |
+
"y": 13
|
| 318 |
+
},
|
| 319 |
+
"module": "Incontext Learning",
|
| 320 |
+
"reason": "dqn"
|
| 321 |
+
}
|
| 322 |
+
},
|
| 323 |
+
"polyline_20260403_051831": {
|
| 324 |
+
"id": "polyline_20260403_051831",
|
| 325 |
+
"name": "HBOIDCUHGDOHQWE",
|
| 326 |
+
"path": [
|
| 327 |
+
{
|
| 328 |
+
"x": 3,
|
| 329 |
+
"y": 18
|
| 330 |
+
},
|
| 331 |
+
{
|
| 332 |
+
"x": 3,
|
| 333 |
+
"y": 17
|
| 334 |
+
},
|
| 335 |
+
{
|
| 336 |
+
"x": 2,
|
| 337 |
+
"y": 17
|
| 338 |
+
},
|
| 339 |
+
{
|
| 340 |
+
"x": 0,
|
| 341 |
+
"y": 17
|
| 342 |
+
}
|
| 343 |
+
],
|
| 344 |
+
"color": "rgba(173, 194, 255, 0.4)",
|
| 345 |
+
"isActive": true,
|
| 346 |
+
"summary": "BGOAIUXDASHGXCIOAHXIO",
|
| 347 |
+
"keywords_found": [],
|
| 348 |
+
"module_scores": [
|
| 349 |
+
0.12643486261367798,
|
| 350 |
+
0.1,
|
| 351 |
+
0.12446118891239166,
|
| 352 |
+
0.1,
|
| 353 |
+
0.0,
|
| 354 |
+
0.010079984553158283,
|
| 355 |
+
0.0,
|
| 356 |
+
0.06248624622821808,
|
| 357 |
+
0.0,
|
| 358 |
+
0.02448994107544422,
|
| 359 |
+
0.0,
|
| 360 |
+
0.0,
|
| 361 |
+
0.0,
|
| 362 |
+
0.038079071789979935,
|
| 363 |
+
0.05198737978935242,
|
| 364 |
+
0.06710068136453629,
|
| 365 |
+
0.05336645245552063,
|
| 366 |
+
0.0
|
| 367 |
+
],
|
| 368 |
+
"strengths": [
|
| 369 |
+
"Pre training objectives",
|
| 370 |
+
"Pre trained models",
|
| 371 |
+
"Tutorial: Introduction to huggingface",
|
| 372 |
+
"Fine tuning LLM"
|
| 373 |
+
],
|
| 374 |
+
"dominant_topics": [],
|
| 375 |
+
"ai_analysis": "Learning profile enriched by modules like Basics. Stage 2 achieved with 200 points.",
|
| 376 |
+
"assimilation_position": {
|
| 377 |
+
"x": 3,
|
| 378 |
+
"y": 13
|
| 379 |
+
},
|
| 380 |
+
"next_recommendation": {
|
| 381 |
+
"id": "8",
|
| 382 |
+
"title": "Incontext Learning",
|
| 383 |
+
"position": {
|
| 384 |
+
"x": 3,
|
| 385 |
+
"y": 13
|
| 386 |
+
},
|
| 387 |
+
"module": "Incontext Learning",
|
| 388 |
+
"reason": "dqn"
|
| 389 |
+
}
|
| 390 |
+
}
|
| 391 |
+
},
|
| 392 |
+
"notes": {},
|
| 393 |
+
"bookmarks": {},
|
| 394 |
+
"summaries": [
|
| 395 |
+
{
|
| 396 |
+
"id": "summary_default_20260403_032235",
|
| 397 |
+
"title": "hye3dg",
|
| 398 |
+
"summary": "hcioasuhioc",
|
| 399 |
+
"keywords_found": [],
|
| 400 |
+
"totalResources": 18,
|
| 401 |
+
"visitedResources": 2,
|
| 402 |
+
"currentLevel": 1,
|
| 403 |
+
"strengths": [
|
| 404 |
+
"Tutorial: Introduction to huggingface",
|
| 405 |
+
"Fine tuning LLM"
|
| 406 |
+
],
|
| 407 |
+
"recommendations": [
|
| 408 |
+
"Pre training objectives",
|
| 409 |
+
"Pre trained models",
|
| 410 |
+
"Instruction tuning"
|
| 411 |
+
],
|
| 412 |
+
"ai_analysis": "Learning profile enriched by modules like Basics. Level 1 achieved with 100 points.",
|
| 413 |
+
"avgDifficulty": 2.0,
|
| 414 |
+
"totalReward": 100,
|
| 415 |
+
"xp_earned": 0
|
| 416 |
+
},
|
| 417 |
+
{
|
| 418 |
+
"id": "summary_default_20260403_032347",
|
| 419 |
+
"title": "xas",
|
| 420 |
+
"summary": "xss",
|
| 421 |
+
"keywords_found": [],
|
| 422 |
+
"totalResources": 18,
|
| 423 |
+
"visitedResources": 2,
|
| 424 |
+
"currentLevel": 1,
|
| 425 |
+
"strengths": [
|
| 426 |
+
"Tutorial: Introduction to huggingface",
|
| 427 |
+
"Fine tuning LLM"
|
| 428 |
+
],
|
| 429 |
+
"recommendations": [
|
| 430 |
+
"Pre training objectives",
|
| 431 |
+
"Pre trained models",
|
| 432 |
+
"Instruction tuning"
|
| 433 |
+
],
|
| 434 |
+
"ai_analysis": "Learning profile enriched by modules like Basics. Level 1 achieved with 100 points.",
|
| 435 |
+
"avgDifficulty": 2.0,
|
| 436 |
+
"totalReward": 100,
|
| 437 |
+
"xp_earned": 0
|
| 438 |
+
},
|
| 439 |
+
{
|
| 440 |
+
"id": "summary_default_20260403_033404",
|
| 441 |
+
"title": "heyyy",
|
| 442 |
+
"summary": "heyyyy",
|
| 443 |
+
"keywords_found": [],
|
| 444 |
+
"totalResources": 18,
|
| 445 |
+
"visitedResources": 4,
|
| 446 |
+
"currentLevel": 1,
|
| 447 |
+
"strengths": [
|
| 448 |
+
"Pre training objectives",
|
| 449 |
+
"Pre trained models",
|
| 450 |
+
"Tutorial: Introduction to huggingface",
|
| 451 |
+
"Fine tuning LLM"
|
| 452 |
+
],
|
| 453 |
+
"recommendations": [
|
| 454 |
+
"Instruction tuning",
|
| 455 |
+
"Prompt based learning",
|
| 456 |
+
"Parameter efficient fine tuning"
|
| 457 |
+
],
|
| 458 |
+
"ai_analysis": "Learning profile enriched by modules like Basics. Level 2 achieved with 200 points.",
|
| 459 |
+
"avgDifficulty": 2.0,
|
| 460 |
+
"totalReward": 200,
|
| 461 |
+
"xp_earned": 0
|
| 462 |
+
},
|
| 463 |
+
{
|
| 464 |
+
"id": "summary_default_20260403_042600",
|
| 465 |
+
"title": "heyyy",
|
| 466 |
+
"summary": "heyyyy",
|
| 467 |
+
"keywords_found": [],
|
| 468 |
+
"totalResources": 18,
|
| 469 |
+
"visitedResources": 4,
|
| 470 |
+
"currentLevel": 2,
|
| 471 |
+
"strengths": [
|
| 472 |
+
"Pre training objectives",
|
| 473 |
+
"Pre trained models",
|
| 474 |
+
"Tutorial: Introduction to huggingface",
|
| 475 |
+
"Fine tuning LLM"
|
| 476 |
+
],
|
| 477 |
+
"recommendations": [
|
| 478 |
+
"Instruction tuning",
|
| 479 |
+
"Prompt based learning",
|
| 480 |
+
"Parameter efficient fine tuning"
|
| 481 |
+
],
|
| 482 |
+
"ai_analysis": "Learning profile enriched by modules like Basics. Stage 2 achieved with 200 points.",
|
| 483 |
+
"avgDifficulty": 2.0,
|
| 484 |
+
"totalReward": 200,
|
| 485 |
+
"xp_earned": 0
|
| 486 |
+
},
|
| 487 |
+
{
|
| 488 |
+
"id": "summary_default_20260403_051831",
|
| 489 |
+
"title": "HBOIDCUHGDOHQWE",
|
| 490 |
+
"summary": "BGOAIUXDASHGXCIOAHXIO",
|
| 491 |
+
"keywords_found": [],
|
| 492 |
+
"totalResources": 18,
|
| 493 |
+
"visitedResources": 4,
|
| 494 |
+
"currentLevel": 2,
|
| 495 |
+
"strengths": [
|
| 496 |
+
"Pre training objectives",
|
| 497 |
+
"Pre trained models",
|
| 498 |
+
"Tutorial: Introduction to huggingface",
|
| 499 |
+
"Fine tuning LLM"
|
| 500 |
+
],
|
| 501 |
+
"recommendations": [
|
| 502 |
+
"Instruction tuning",
|
| 503 |
+
"Prompt based learning",
|
| 504 |
+
"Parameter efficient fine tuning"
|
| 505 |
+
],
|
| 506 |
+
"ai_analysis": "Learning profile enriched by modules like Basics. Stage 2 achieved with 200 points.",
|
| 507 |
+
"avgDifficulty": 2.0,
|
| 508 |
+
"totalReward": 200,
|
| 509 |
+
"xp_earned": 0
|
| 510 |
+
}
|
| 511 |
+
]
|
| 512 |
+
}
|
backend/data/youtube_links.json
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"Pre training objectives": "https://www.youtube.com/watch?v=WZOqXkld9mk&list=PLp6ek2hDcoNDDRINFiWGDlPKUwW-g1Hjk&index=19",
|
| 3 |
+
"Pre trained models": "https://www.youtube.com/watch?v=NB_4dkfJf_w&list=PLp6ek2hDcoNDDRINFiWGDlPKUwW-g1Hjk&index=20",
|
| 4 |
+
"Tutorial: Introduction to huggingface": "https://www.youtube.com/watch?v=dHqggovEOwk&list=PLp6ek2hDcoNDDRINFiWGDlPKUwW-g1Hjk&index=21",
|
| 5 |
+
"Fine tuning LLM": "https://www.youtube.com/watch?v=eC6Hd1hFvos",
|
| 6 |
+
"Instruction tuning": "https://www.youtube.com/watch?v=TPDiqPnJDTs&list=PLp6ek2hDcoNDDRINFiWGDlPKUwW-g1Hjk&index=22",
|
| 7 |
+
"Prompt based learning": "https://www.youtube.com/watch?v=sXPggianwos&list=PLp6ek2hDcoNDDRINFiWGDlPKUwW-g1Hjk&index=23",
|
| 8 |
+
"Parameter efficient fine tuning": "https://www.youtube.com/watch?v=S0l-qUniC54&list=PLp6ek2hDcoNDDRINFiWGDlPKUwW-g1Hjk&index=30",
|
| 9 |
+
"Incontext Learning": "https://www.youtube.com/watch?v=eyNLkiQ89KI&t=378s",
|
| 10 |
+
"Prompting methods": "https://www.youtube.com/watch?v=6BXqKzOwObo&list=PLp6ek2hDcoNDDRINFiWGDlPKUwW-g1Hjk&index=24",
|
| 11 |
+
"Retrieval Methods": "https://www.youtube.com/watch?v=oJvFO2QwuvI",
|
| 12 |
+
"Retrieval Augmented Generation": "https://www.youtube.com/watch?v=mE7IDf2SmJg",
|
| 13 |
+
"Quantization": "https://www.youtube.com/watch?v=Kx5x3HYBDls&list=PLp6ek2hDcoNDDRINFiWGDlPKUwW-g1Hjk&index=31",
|
| 14 |
+
"Mixture of Experts Model": "https://www.youtube.com/watch?v=U8J32Z3qV8s",
|
| 15 |
+
"Agentic AI": "https://www.youtube.com/watch?v=kJLiOGle3Lw",
|
| 16 |
+
"Multimodal LLMs": "https://www.youtube.com/watch?v=cYfKQ6YG9Qo",
|
| 17 |
+
"Vision Language Models": "https://www.youtube.com/watch?v=rUQUv4u7jFs",
|
| 18 |
+
"Policy learning using DQN": "https://www.youtube.com/watch?v=0BsWItDlpxA&list=PLp6ek2hDcoNDDRINFiWGDlPKUwW-g1Hjk&index=26",
|
| 19 |
+
"RLHF": "https://www.youtube.com/watch?v=2MBJOuVq380"
|
| 20 |
+
}
|
backend/data/youtube_transcripts.json
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"Pre training objectives": "Hello everyone, welcome to this course! So, in this lecture, we are going to talk about pre-training strategies. Last lecture, we covered transformers and different blocks of a transformer model, right? We also talked about what to wear for glove models that are pre-trained. what embedding methods, right. We will see the overall arena of pre-training with transformers. And then we will also see how the paradigm of pre-training actually changed. From pre-trained word embeddings to pre-trained language models.\n\nLet us look at the pretraining strategy. So, while discussing this, let us start with this famous quote. which we already mentioned earlier in the distributional semantics chapter. In the word embedding chapter, it says that \"you shall know a word by the company it keeps,\" right? So, the same person modified the quote later on, and then he said. That the complete meaning of a word is always contextual. Okay, this is always contextual, and no study of meaning can occur apart from a complete context.\n\nCan be taken seriously, okay? And this is essentially the foundation behind building the pre-training strategies, okay. This is an example; I record the record right. If you look at these two, the two positions of the word \"record\" are right. The meanings are completely different. Now if you use a word-to-way kind of method or a glove-type method which basically produces pre-trained embeddings, right? You will get the same embedding for these two positions of the record, right? But the meanings are different.\n\nOkay. Let's take another example. A bat flew out of the cave. He hit the ball with the bat. Again, here are the two words bat, the two positions of the word bat. The two contexts of the word \"bat\" are different, right? If you use the same embedding to represent this, You will not be able to do the proper processing of it, right? So what will we do here? We will produce embeddings that are going to be contextualized. Meaning, depending on the context, the embedding will change. For example, here let us say that for the word \"bat,\" you will get one embedding.\n\nFor the word \"bat\" here, you get another embedding that is different, okay? How do you do this? The first approach that was proposed is not a transformer-based approach. What was proposed in 2018 is called ELMO. Deeply contextualized word representation. This was done mostly by the LNAI team and some folks from this university as well. One of them is a very good friend of mine. So, the idea behind the ELMO method is as follows. So ELMO is a non-transformer-based approach. There is no concept of a transformer.\n\nTransformers were introduced in 2017, around the same time it was introduced, in 2018. So ELMO stands for embeddings from language models. What does it do? It essentially relies on RNNs, which can be LSTM or GRU. And what it does is that it essentially processes a sequence, right? When you process a sequence using RNN, each of the hidden states Which correspond to basically the to",
|
| 3 |
+
"Pre trained models": "Hello, everyone. Welcome back. So we were discussing different pre-training strategies, and in the last lecture, we discussed. You know, pre-training and encoder-only models, specifically, we discussed the BERT model, right? So, in today's class, we will focus on two other pre-training strategies. One is encoder-decoder-only models, right? Models like T5 and BART. So B-A-R-T is another model, right? This is not B-E-R-T; this is B-A-R-T, BART. And at the same time, we will also discuss decoder-only models, right?\n\nwhich includes ChatGPT, all GPT series models, LLaMA models, and Decoder-only models are essentially very popular. So, the encoder-only model, as I discussed, was discussed in the last class. We saw that a new pre-training strategy called masked language model was introduced. This was used in BERT, where you essentially mask some of the tokens in the input. And you ask the model to predict those masked tokens, right? This is a self-supervised approach. You don't need any labeled data for training, right?\n\nAnd being an encoder model, it essentially means, The BERT model can essentially look at all the tokens present in the input. It is not an autoregressive model, is it? So the entire input is exposed to the model. And you basically perform self-attention. Through self-attention, you look at all the tokens present in the input. And based on that, you predict the mass of the tokens. On the other hand, the encoder-decoder model that we discussed here, As we discussed in today's class, we have this encoder component and the decoder component.\n\nAnd we will see how you can best use both parts of the encoder. And the decoder part during pre-training. And then we will also discuss the decoder-only model. Now, in the decoder-only model, as the name suggests, the decoder part, If you remember in the transformer class we discussed, it's an autoregressive model. autoregressive component in the sense that when you predict a word at the t-th location, You only have access to tokens until the t minus 1th location, right? You don't have access to tokens after the location, right?\n\nWe will see how you understand this autoregressive style of pre-training essentially. helps the decoder-only model learn things correctly This set of models is very popular these days, as people realize. Over the times that an encoder-only model is not the right solution for. For a generative task, right? Because a generative task requires an autoregressive setup, right? Whereas in an encoder-decoder model, right, as you require. Both the encoder part and the decoder part require a lot of memory.\n\nIt requires a lot of parameters, right? You can do all these generative tasks through the encoder-decoder model together, right? But the decoder model makes more sense both in terms of parameters. As well as the setup, right? The autoregressive model setup itself is suitable for, you know, next token generation. And, parameter-wise, you need half of the parameters. than the encoder d",
|
| 4 |
+
"Tutorial: Introduction to huggingface": "Hi, everyone. In this tutorial, we'll be discussing a kind of introduction to the HuggingFace library. So Hugging Face is a very useful library when we work with basically transformer-based models. Mostly the transformer-based model. All these models, open-source models, and data sets are available on something called the Hugging Face Hub. So we will see how we can use this Hugging Face library to load the models. To fine-tune them using some toy examples, how do I load the data sets? And how to use it for inference.\n\nSo let's get started. So here... That package in Hugging Face, which deals with transformer-based models, is called Transformers. And there's a package called Datasets that deals with all the open-source datasets. So we need to first install them. Let's first look at the whole pipeline: the whole flow of how we process an input. So, first, we have this raw text. This course is amazing. And then we have a tokenization algorithm that breaks the text into tokens. And map it to some numbers correctly.\n\nSo these numbers represent a token, right? So this code is amazing; suppose we apply some tokenization algorithm, say like byte pair encoding. Or something, or a sentence piece tokenization, something like that. And it maps it to a sequence of tokens, and tokens are represented as numbers, right? So these token numbers are basically a kind of dictionary mapping. Please provide the sentence that needs correction. So 101, say, this tells us this is a token, and its number is 101. Something like that.\n\nSo we then have a list of input IDs. And then these are passed on to the model. So when the model receives the input IDs, which are a list of token numbers, What it does is go to its embedding matrix and do a lookup. So the embedding dimension of the token is already stored. In the embedding matrix of the pre-trained model. If it is not pre-trained and we are looking to train it from scratch, Then initialization is either some random initialization or some informed initialization like Xavier initialization or something like that.\n\nAnd then, when you train the model, these embeddings are also updated. Embeddings of the tokens are also updated. So for now, let us assume that the model has been pre-trained. So once we pass the input IDs, the model maps the token IDs to their token embeddings. And then the token embeddings are passed on to the model's position encoding. is added in the case of transformers, regardless of the model architecture present Whether it's an encoder-decoder model or a decoder-only model. Whether it's an encoder-only model, it is processed accordingly.\n\nWe have already seen in our lecture, I guess, one or two weeks ago. That's how you implement transformers from scratch using PyTorch, right? There we saw how a transformer layer is implemented within the model. We saw every component of it: multi-reduction positional encoding and layer normalization. Encoder block, decoder block; we saw all that. So now we are putting all o",
|
| 5 |
+
"Fine tuning LLM": "hey everyone I'm Shaw and this is the fifth video in the larger series on how to use large language models in practice in the previous video we talked about prompt engineering which is concerned with using large language models out of the box while prompt engineering is a very powerful approach and can handle a lot of llm use cases in practice for some applications prompt engineering just doesn't cut it and for those cases we can go one step further and fine-tune a existing large language model for a\n\nspecific use case so Navy's question is what is model fine tuning the way I like to Define it is taking a pre-trained model and training at least one internal model parameter and here I mean the internal weights or biases inside the neural network what this typically looks like is taking a pre-trained existing model like gpt3 and fine-tuning it for a particular use case for example chatgypt to use an analogy here gpt3 is like a raw diamond right out of the earth it's a diamond but it's a bit rough around\n\nthe edges fine tuning is taking this raw diamond and transforming it into something a bit more practical something that you can put on a diamond ring for example so the process of taking the raw base model of gpt3 and transforming it into the fine-tuned model of gbt 3.5 turbo for example is what gives us applications like chat GPT or any of the other incredible applications of large language models we're seeing these days to get a more concrete sense of the difference between a base model link\n\ngpt3 and a fine-tuned model let's look at this particular example we have to keep in mind that these Foundation large language models like gpg3 llama 2 or whatever your favorite large language model is these models are strictly trained to do word prediction given a sequence of words predicting the next word so when you train one of these launch language models on huge Corpus of text and documents and web pages what it essentially becomes is a document completer what that translates to in practice is if you plug into a lot of\n\nthese base models like gpt3 the prompt tell me how to find tune a model a typical completion might look something like this where it's just listing out questions like you might see in a Google search or maybe like a homework assignment or something here when I prompted gpt3 to tell me how to fine-tune a model the completion was as follows how can I control the complexity of a model how do I know when my model is done how do I test a model well this might be reasonable for gpt3 to do based on the data that it was trained on this\n\nisn't something that's very practical now let's look at the fine-tuned model completion so now we have text DaVinci zero zero three which is just one of the many fine-tuned models based on gpt3 coming from open AI we give it the same prompt tell me how to fine tune a model and this is the completion fine-tuning a model involves a adjusting the parameters of a pre-trained model in order to make it better suited f",
|
| 6 |
+
"Instruction tuning": "Hello everyone, today we will discuss instruction fine-tuning or instruction tuning. which is one of the key advancements in recent language modeling research. which enables us to have a conversation with language models easily Or, simply put, we can chat with language models. So, first, a quick review. So in previous weeks, we have learned about decoder-based language models. So such models are trained on vast amounts of text from the internet. Using the next word prediction task. As a result of this, these models learn to encode a great deal of information.\n\nAbout the world. They also have the ability to understand language to some extent. So these models are very powerful. They're pretty amazing, but we'll see in the upcoming slides. That they have some major limitations. One note: these pre-trained language models are also known as base models. So I'll be using the term \"base models\" throughout the lecture. So whenever I mention base models, it simply refers to pre-trained language models. For example, let's say we have been given the following prompt.\n\nWhat is the national flower of India? We have prompted the language model with this question. Now, what can happen is that the language model can generate the following response: What is the national animal of India? What is the national bird of India? So this response is nothing but the continuation of the prompt. And this is the result of the next word prediction that is happening. after the prompt. So here we see that the response contains questions that are quite common. which we can come across such questions on the web, where we see\n\nThere is a web page on general knowledge questions about India. Such questions are very common. However, this is not the desired response. Because when we asked this question of the language model, we were expecting an answer. That is, the national flower of India is the lotus. This was the desired outcome. However, since the language model is just predicting the next word, So the response to a question could be that it may. or may not follow the question. May or may not. Might follow the instructions, might not follow the instructions.\n\nBecause, as I said, it's simply just doing next-word prediction at this point. So the key takeaway from this slide is that next-word prediction, which is what is governing this response generation, That does not necessarily ensure that the model understands or follows instructions. So the reason we need instruction tuning is that. We want to teach the language models how to follow and understand instructions. So multitask learning is another very important paradigm in the natural language processing literature.\n\nSo in classical multitask learning, what we do is combine multiple tasks. We train the model, the language model, on multiple tasks with the intention. that these models will have a positive influence on one another And thereby, the final outcome will be improved across all the tasks. So here, if we take a look at th",
|
| 7 |
+
"Prompt based learning": "Welcome back. So in today's lecture, we are going to talk about prompts, right? And you know when it comes to chat GPT kind of models, Large language models, the first question that comes to our mind is how to write a prompt, right. What is going to be the optimal prompt for a specific model, right? So in this lecture, we will discuss different types of prompting techniques, right? how prompts affect the accuracy of the models, right? And how, with the scaling and the increasing size of the models,\n\nhow it basically affects the accuracy and how a specific prompt will be responsible for it Producing accuracy across different types of models. We'll also discuss prompt sensitivity, right? We'll see that most of these models are highly sensitive to simple perturbations of prompts. Right? And that would affect the accuracy; that would affect other aspects of the model. And how can we quantify them, right? So far, we have discussed different pretraining strategies. We have seen encoder-only models, haven't we?\n\nWe have seen models like BERT, which is a pure encoder-only model, right? And these models are pre-trained with an objective called masked language modeling, right, MLM. We have seen models like GPT, which is a purely decoder-only model. And these models are trained using an autoregressive setup, right, or causal language modeling. This is called causal language modeling or an autoregressive setup. There are other models like T5 and BART, and they are encoder-decoder models. Okay, which are also trained with a kind of autoregressive setup.\n\nWhere your input will be fed to the encoder. and decoder will predict the next word Our decoder will predict if your input is part of, and if your input has noise. the decoder model will be able to essentially denoise your input, right? And you know the board model came out around 2018. The first GPT paper was written in 2018, GPT-1, and then GPT-2 came in 2019. GPT-2 was released in 2019 and then GPT-3 in 2020. Right, and the GPT-3 paper showed that the model doesn't need any fine-tuning. We just need to write prompts, and the model will be able to.\n\nunderstand your prompt, okay. We will discuss all these topics in this lecture. Okay, so I strongly suggest that you guys read this wonderful survey paper. This is a survey paper written by Graham Neubig and his team from CMU. And this is, you know, this rightly and nicely reflects the, you know, Different prompting strategies and the kind of evolution of the overall. You know, prompt, you know, prompt as a kind of aspect overall, how it evolves, right? How do we quantify different components of a prompt, and so on and so forth?\n\nIt's a very nice survey paper that I strongly recommend you read. Okay, so you know there have been, kind of, we have seen. We have witnessed that there has been a kind of war, right? Among all these giant industries, such as Meta, OpenAI, and Google, They have been building larger and larger models with more and more parameters. and so",
|
| 8 |
+
"Parameter efficient fine tuning": "# Summary: Parameter-Efficient Fine-Tuning (PEFT) for Large Language Models\n\nThis lecture by Dinesh Raghu from IBM Research covers efficient methods for fine-tuning Large Language Models (LLMs) without updating all parameters.\n\n## Key Concepts\n\n**Why PEFT is Needed:**\n- Full fine-tuning requires 12-20× model size in memory for optimizer states, gradients, and activations\n- Storage overhead: each task requires saving a full model checkpoint (e.g., 350GB)\n- In-context learning (prompting) has limitations: lower accuracy than fine-tuning, sensitivity to prompt wording, and high inference costs\n\n**Main PEFT Techniques:**\n\n1. **Prompt Tuning (Soft Prompting)**\n - Reserves special trainable tokens in the input while freezing all model weights\n - Extremely parameter-efficient (~0.1% of model parameters)\n - Enables multi-task serving: different soft prompts can be swapped for different tasks on the same base model\n - Performance approaches full fine-tuning for large models (11B+ parameters)\n\n2. **Prefix Tuning**\n - Adds trainable parameters at every transformer layer, not just the input\n - Uses a bottleneck MLP architecture to prevent training instability\n - Achieves comparable performance to full fine-tuning with only 0.1% trainable parameters\n\n3. **Adapters**\n - Inserts new trainable layers (bottleneck architecture) within each transformer block\n - Down-projects hidden dimensions, applies nonlinearity, then up-projects\n - Achieves good performance with ~3.6% of parameters\n - Drawback: inference latency overhead due to added layers\n\n4. **LoRA (Low-Rank Adaptation)**\n - Most popular PEFT method based on intrinsic dimensionality theory\n - Decomposes weight updates into low-rank matrices: ΔW = BA\n - Only modifies query, key, value, and output projection matrices\n - Advantages: no inference latency, can be merged back into base weights\n - Variants: QLoRA (memory-efficient), DyLoRA (dynamic rank selection), LoRA+\n\n**Key Benefits of PEFT:**\n- Reduced memory and compute requirements (can use older GPUs)\n- Faster convergence due to smaller parameter space\n- Less overfitting and catastrophic forgetting\n- Better out-of-domain generalization\n- Minimal storage per task\n\nThe lecture emphasizes that PEFT bridges the gap between inefficient in-context learning and computationally prohibitive full fine-tuning, making LLM adaptation accessible and practical.\n\n[1](https://ppl-ai-file-upload.s3.amazonaws.com/web/direct-files/attachments/52028987/a8512587-2c43-4219-84a6-bc60a1065297/paste.txt)",
|
| 9 |
+
"Incontext Learning": "welcome everyone this is the first screencast in our series on in context learning this series is a kind of companion to the one that we did on information retrieval the two series come together to help you with homework 2 and bake off two which is focused on few shot open domain question answering with frozen retrievers and Frozen large language models to start this series I thought we would just reflect a bit on the origins of the idea of in context learning which is really a story of how NLP got to this\n\nstrange and exciting and chaotic moment for the field and maybe also for the society more broadly all credit to the Chomsky bot for bringing us to this moment I'm only joking the the Chomsky bot is a very simple pattern-based language model it's been around since the 90s I believe and with very simple mechanisms it produces Pros that is roughly in the style of the political philosopher and sometimes linguist Noam Chomsky it produces prose that Delights and maybe informs us and the underlying mechanisms are very\n\nsimple and I think that's a nice reminder about what all of these large language models might be doing even in the present day but I'm only joking although it's only partly a joke I think when we think about precedence for in context learning it is worth mentioning that in the pre-deep learning era engram based language models very sparse large language models were often truly massive for example brands at all 2007 use a 300 billion parameter language model trained on 2 trillion tokens of text to help\n\nwith machine translation that is a very large and very powerful mechanism with a different character from the large language models of today but it is nonetheless worth noting that they played an important role in a lot of different fields way back when I think for in context learning as we know it now the earliest paper as far as I know is the DECA NLP paper this is McCann adult 2018. they do multitask training with task instructions that are natural language questions and that does seem that like the origin of the idea\n\nthat with freeform natural language instructions we could essentially end up with artifacts that could do multiple things guided solely by text and then it's worth noting also that in the GPT paper Radford at all 2018 you can find buried in there some tentative proposals to do prompt-based uh experiments with that model but the real origins of the ideas again as far as I know are Radford at all 2019 this is the gpt2 paper and let me just show you some Snippets from this paper it's really inspiring\n\nhow much they did they say at the start we demonstrate language models can perform Downstream tasks in a zero shot setting without any parameter or architecture modification so there you see this idea of using Frozen models prompting them and seeing if they will produce interesting behaviors they looked at a bunch of different tasks for summarization they say to induce summarization Behavior we add the text tldr after the art",
|
| 10 |
+
"Prompting methods": "welcome everyone this is the first screencast in our series on in context learning this series is a kind of companion to the one that we did on information retrieval the two series come together to help you with homework 2 and bake off two which is focused on few shot open domain question answering with frozen retrievers and Frozen large language models to start this series I thought we would just reflect a bit on the origins of the idea of in context learning which is really a story of how NLP got to this\n\nstrange and exciting and chaotic moment for the field and maybe also for the society more broadly all credit to the Chomsky bot for bringing us to this moment I'm only joking the the Chomsky bot is a very simple pattern-based language model it's been around since the 90s I believe and with very simple mechanisms it produces Pros that is roughly in the style of the political philosopher and sometimes linguist Noam Chomsky it produces prose that Delights and maybe informs us and the underlying mechanisms are very\n\nsimple and I think that's a nice reminder about what all of these large language models might be doing even in the present day but I'm only joking although it's only partly a joke I think when we think about precedence for in context learning it is worth mentioning that in the pre-deep learning era engram based language models very sparse large language models were often truly massive for example brands at all 2007 use a 300 billion parameter language model trained on 2 trillion tokens of text to help\n\nwith machine translation that is a very large and very powerful mechanism with a different character from the large language models of today but it is nonetheless worth noting that they played an important role in a lot of different fields way back when I think for in context learning as we know it now the earliest paper as far as I know is the DECA NLP paper this is McCann adult 2018. they do multitask training with task instructions that are natural language questions and that does seem that like the origin of the idea\n\nthat with freeform natural language instructions we could essentially end up with artifacts that could do multiple things guided solely by text and then it's worth noting also that in the GPT paper Radford at all 2018 you can find buried in there some tentative proposals to do prompt-based uh experiments with that model but the real origins of the ideas again as far as I know are Radford at all 2019 this is the gpt2 paper and let me just show you some Snippets from this paper it's really inspiring\n\nhow much they did they say at the start we demonstrate language models can perform Downstream tasks in a zero shot setting without any parameter or architecture modification so there you see this idea of using Frozen models prompting them and seeing if they will produce interesting behaviors they looked at a bunch of different tasks for summarization they say to induce summarization Behavior we add the text tldr after the art",
|
| 11 |
+
"Retrieval Methods": "All right, welcome everyone to the Comet YouTube channel. We are doing a series of guest speakers where we dive into some very fun technical topics related to building and scaling Genai systems. And today I have with us Lena. She is a absolute expert in building rags. I've had some good conversations with her as we prepared for this session about her approaches. And I'm really excited to just go through really some of the best practices around like optimizing retrieval for LMS. We're going to deep\n\ndive into some rag techniques, some advanced stuff here. Um, before we dive in, I want to pass it over to Lena to introduce herself. Thank you Claire for the introduction and uh, hi everyone who is watching the recording. I'm so happy to be here. Uh, my name is Lennena. I'm a founder of pars labs and chatbot and I've been working in this space of chatbot development conversational AI agents since um I think eight years now. Uh my career started in linguistics. I studied theoretical linguistics then AI. I\n\nworked as an NLP research engineer and full stack developer and uh yeah now running a small agency with a team of five now and doing also a lot of um public speaking and sharing my knowledge online with people um running courses on AI automation and things like that. Uh so hopefully I will have something interesting to share uh with you today. >> All right well let's dig into some deep technical topics. Um, I wanted to dive into the advanced techniques for improving rags. I think there's a few\n\nideas that you had here. Um, and if you want to share screen, uh, feel free. We can, uh, pull up some slides here. But let's dive into some of these more advanced techniques in regards to just improving your RAG systems. >> Uh, yes. Um, what should we start with? >> Let's see. I'm thinking about optimizing the retrieval quality. Um, >> okay. Let me share my screen and then we'll just see where it leads us. So I prepared a lot of different diagrams uh with different techniques\n\n>> um in a random order. So you are welcome. >> Thank you. I love this. This is a gold mine of information. >> Uh yeah. So let's start with um maybe uh this um I classified it as improving a retrieval quality trick. So uh in the past we used intent detection when we were building chatbots. So instead of generating anything for anything we had to make data sets for okay this type of question is about salary this type of question is I want to talk to customer support. We had all\n\nthose categories and um I don't see that often being used yet with newer teams who just joined this whole chatbot development. So I want to bring it back. Uh so instead of just generating the response using LLM, you get the user question. So what's the salary? Then you predict an intent and you get an an answer that your SMMES have written. So the answer that's in your database and it says the salary for this position is salary. And then you paste this information uh to the prompt and then\n\nyou generate an answer o",
|
| 12 |
+
"Retrieval Augmented Generation": "### Lecture Summary: Retrieval-Augmented Generation (RAG) and Contextualization of Language Models\n\nWelcome everyone. Today we’ll discuss **retrieval augmentation** — one of the most active areas in modern NLP. Our speaker, Adella, is CEO of Contextual AI, an enterprise LLM company, an adjunct professor at Stanford, and former head of research at Hugging Face and Facebook AI. His research focuses on machine learning and NLP, especially on language understanding, generation, and evaluation.\n\n---\n\n### 1. The Age of Language Models\n\nWe live in the era of large language models (LLMs). However, **language models** are not a recent invention—neural language modeling dates back to the 1990s. The idea is simple: given a sequence of tokens, predict the next token. Early versions (e.g., Bengio et al., 2003) already had embeddings and similar formulations to today’s models. What changed is scale.\n\nThe main breakthrough of **ChatGPT** wasn’t the model architecture—it was the **user interface**. Previously, using a language model required strange prompt engineering. ChatGPT solved this through *instruction tuning* and *reinforcement learning from human feedback (RLHF)*, allowing people to simply “ask” the model naturally.\n\n---\n\n### 2. Problems with Pure Language Models\n\nEven with good interfaces, LLMs have serious issues:\n\n* **Hallucination:** Models generate incorrect facts with high confidence.\n* **Attribution:** We don’t know *why* a model produced an answer.\n* **Staleness:** Models become outdated quickly.\n* **Editing:** We can’t easily revise or delete knowledge (e.g., for GDPR compliance).\n* **Customization:** Hard to adapt models to specific domains or private data.\n\nThese limitations make LLMs unreliable for enterprise or high-accuracy applications.\n\n---\n\n### 3. Enter Retrieval-Augmented Generation (RAG)\n\nThe solution many have turned to is **RAG** — connecting a generator (LLM) with an **external retriever** or **memory**. Instead of relying solely on parameters, the model can look up information dynamically.\n\nThink of **closed-book vs open-book exams**:\n\n* Closed-book → memorize everything (parametric LMs).\n* Open-book → look up relevant facts when needed (RAG).\n\nThis architecture has two main parts:\n\n1. **Retriever:** Fetches relevant documents or passages from an external database.\n2. **Generator:** Takes the query and retrieved context to produce an answer.\n\nThis approach gives **updatable, grounded, and customizable** models that hallucinate less and can cite their sources.\n\n---\n\n### 4. Retrieval Foundations\n\nEarly retrieval methods used **sparse retrieval** (e.g., TF-IDF, BM25).\nBM25 scores documents by term frequency and inverse document frequency, emphasizing distinctive words. It’s fast and efficient on CPUs but fails with synonyms or paraphrases.\n\n**Dense retrieval** (e.g., DPR, ORQA) replaces sparse counts with **embedding-based similarity** using models like BERT. Each document and query is encoded into a dense vector; retrieval is done v",
|
| 13 |
+
"Quantization": "Alright, hello everyone. So, today we will be talking about quantization, pruning, and distillation. So, these all fall under the same umbrella, and maybe once I go into the introduction, It will be clear why we are discussing these three topics together. So, like we discussed last time, the reason why we wanted to do theft was Because of how the model sizes have been increasing over time. And this is, again, a recap of that. Over time, the size of the model is increasing exponentially. It is not just the size but also the performance of these models.\n\nare also increasing over time. So, here we see that these are the test laws for various language modeling tasks. on different datasets and you see that they are decreasing considerably Based on the number of parameters that are in the large language model. So now, what is the flip side of having such large models during inference? So, the first thing, as we discussed last time, is that the bigger the model is, You will have to buy new hardware to support them. In a sort of way, it is not very friendly to keep buying hardware.\n\nAnd when the model keeps growing over time. So, this also puts a cap on how many organizations can actually run these LLM inferences. So, the GPU requirement is going to get larger and One of the biggest problems in deployment is latency. So, the larger the model is, the more time it will take to come back. with a completion for a given prompt. Now, let's say that you have a chatbot that has been deployed. whose backbone is an LLM, then having to wait for 30 or 40 seconds, It seems really difficult at this point in time when we are so used to.\n\nGetting replies very quickly. And the way the LLM field is progressing now, People are not just using a single LLM call per sort of response. For example, there is now a new paradigm called agentic behavior. where once a language model responded, You ask the same language model or a different language model to reflect on the output. or even sometimes execute the output, for example, if the model generates a code, you execute the code, Get the outputs and then make the model reflect on the output.\n\nAnd then decide whether there is an execution error; should it redo things. And finally, you give back the answer. So now, if you have to make multiple LLM inferences, to just get back with one output. Then that's going to increase latency even more. So, latency is one of the biggest concerns. Third is the inference cost. If you have an application deployed that uses LLMs, Of course, you'll be worried about how much money. You're going to spend to serve a single user. And the money that a single user is going to pay you.\n\nshould be more than what you invest for that user. So, if LLM is for slightly more improvement in accuracy, If you have to spend a lot more, then it does not seem commercially viable. So, of course, the inference cost is going to be one of the biggest dimensions. And finally, sustainability and environmental concerns. So, yo",
|
| 14 |
+
"Mixture of Experts Model": "## **Scaling Transformers Through Sparsity**\n\n### **1. Motivation**\n\nThe driving idea behind this research is **scaling**. In the deep learning community, performance has been shown to improve predictably with model size and compute, as outlined in *OpenAI’s 2020 paper “Scaling Laws for Neural Language Models.”*\nThese scaling laws hold across several orders of magnitude, demonstrating that **larger models are more sample-efficient**: for a fixed compute budget, it is better to train a **larger model for fewer steps** than a smaller one for longer.\n\nSo far, most scaling has relied on **dense models**, where every parameter participates in every forward pass. But this is expensive and inefficient.\nToday’s discussion explores a new axis of scaling: **sparsity** — models where different inputs activate **different subsets of weights**, performing **adaptive computation** depending on the input.\n\n---\n\n### **2. What Is Sparsity?**\n\nSparsity here doesn’t mean pruning or zeroing weights, but **conditional computation**:\neach input token is processed by a subset of the network — “experts” — chosen dynamically.\n\nThis idea dates back to 1991’s *Adaptive Mixtures of Local Experts*, revived in modern NLP by Noam Shazeer and colleagues at Google with **Mixture of Experts (MoE)** for LSTMs.\nThe architecture has:\n\n* Several **experts**, each a small feed-forward network.\n* A **router (gating network)** that predicts which experts to send each token to, using a softmax distribution.\n* The output is a weighted mixture of the selected experts.\n\nMoE proved successful in machine translation but had issues like communication cost and training instability.\nThe **Switch Transformer** simplified this by sending each token to **only its top-1 expert**, reducing both cost and complexity.\n\n---\n\n### **3. The Switch Transformer**\n\nA Switch Transformer modifies the Transformer block by replacing some feed-forward layers with **Switch Layers**:\n\n* A **router** decides which expert each token goes to.\n* Each token goes to one expert (top-1 routing).\n* The same amount of computation is done overall, but different tokens use different weight matrices.\n\nThis makes computation adaptive while keeping the FLOPs (floating-point operations) roughly constant.\n\n---\n\n### **4. Key Improvements for Sparse Training**\n\nSparse models can be unstable to train. The team made several innovations to stabilize and optimize performance:\n\n#### (a) **Selective Precision**\n\n* Training in low precision (bfloat16) improves speed but can cause divergence due to numerical instability, especially in routers (softmax/exponentiation).\n* Casting router computations to **float32** (while keeping others in bfloat16) solved this without meaningful speed loss.\n\n#### (b) **Initialization Scaling**\n\n* Default initializations made training unstable.\n* Simply reducing the initialization scale significantly improved convergence and performance.\n\n#### (c) **Expert Dropout Regularization**\n\n* Sparse models with many param",
|
| 15 |
+
"Agentic AI": "my name is inso so uh today uh we like to uh go over agent AI aent language model as a progression of language model usage so here is the outline of the today's talk uh we'll go over uh the overview of language model and how we use and then and the common limitations and then um some of the method that improves towards this common limitation and then we'll transition it to uh what is the Agent B language model and its design patterns so uh language model is a machine learning model that predicts the\n\nnext coming word given the input text as in this example if the input is the the students open there then um language model uh can predict what's the most likely word coming uh next as a next word so if the language model is trained with the large corpers it is a predict it is generating the probability of next coming word in this example uh as you could see books and laptops have a high higher probability than other other words in the vocabulary so um the so the completion of this whole sentence\n\ncould be uh the students open their books and then if you want to keep generating um the what's coming next then we can uh uh turn them in as an input and then uh um put it into the language model and then language model continuously generating the next coming word then how uh these uh language models are trained largely uh two two parts pre-training part and then posttraining part and then um first pre-training portion is the one that language models are trained with lot with large copers uh text are collected\n\nfrom internet or books or different type of text publicly available text and then trained with the next token or next word prediction objectives so once the models uh is finished in this pre-training stage models are fairly good at predicting um any words coming uh next as a word um given the inputs um however um this type uh the pre-train model uh itself is um is not easy to use so the hence the posttraining uh steps are coming and then these uh post trining stage um uh would includes uh instruction following training as well\n\nas reinforcement learning with human feedback and what this uh training stage uh means is uh um we could prepare a data set such a way that the specific instruction or question and then the answers or the uh generated output that is what the uh user would expect or uh more uh more uh um uh related to the questions and answers so that's how uh the models are trained so that it's easier to use and then also it'll respond to a specific uh and then uh once this done and then uh additional training uh method uh is uh\n\num uh aligning to Humane preference by using uh reinforcement learning with human feedback which uhu U is using human uh preference to align the model by using uh rewards schemes and um let's take a quick look really quick look on the instruction data set this is the template that uh we uh uh we would use to train the model in instruction following training phase as you can see uh there's a specific uh instructions w",
|
| 16 |
+
"Multimodal LLMs": "um Hello thank you all for joining CS uh 25 Transformers today uh for today's talk we have Ming ding a research scientist at jeu AI based in Beijing he obtained his bachelor's and doctoral degrees at tingua University and he does research on multimodal generative models and pre-training Technologies um he has LED or participated in the research work Works about multimodal generative models such as Cog View and Cog video and multimodal understanding models such as Cog uh VM and Cog agent uh for today's\n\nattendance the attendance form is up on the course website and if you have any questions ask them through slido s l d o and for the code you just have to input cs25 um thank you Ming for today's talk and I'm going to pass it off to you thank you for the instructors of cs25 to is very happy to gave a talk in Stanford University about multimodality in training and uh uh actually I have checked the uh all the previous talks in cs25 and uh they are really diverse topics someone share the intuition in\n\ntheir research about PR training someone shared recent Works about maybe M Moe and some other technical uh actually I'm working in a uh large language model company in China and our company working on print training and uh maybe there's lots of different area uh from a large langage model and multi modality model and generative model diffusion and uh tatto Speech something like that so uh I Le all the multimodality model research in J AI so I will uh share lots of different topics in in in this talk um some some of them\n\nmay be not very familiar to you so uh yeah it's okay but you can uh get more information of the the area uh yeah I will talk about several aspects of Transformers and I will generally follow the history of large language model and say why are we here it's about large language model introduction and history uh and how did we get here it's about the some practical techniques for training large lar langage models and what are we working on it's about the last one year uh the real language\n\nmodels and other techniques in the uh papers of all the uh V language model community and finally I will talk about the some possible and valuable direction for research in multimodality okay okay well uh I will share three moments uh I think the most important three moments in the development of language model uh the first moment is called Bo moment actually I get I got into the area at this moment it's very uh honored that I'm the first among the first group of people who publish paper on the next year ACL when\n\nB came out and at that time since is is we don't really know what is language modeling so at that time nearly all the people talking about uh how can we get a better self-supervised method for an option uh at that time a common opinion is mask language model is just for is is good at understanding the the TX and GPT the auto regressive model is better for tax generations and T5 maybe can U can do the B but is redundant and that's true but uh uh n",
|
| 17 |
+
"Vision Language Models": "## **Vision–Language Models (VLMs)**\n\n### **1. Introduction and Motivation**\n\nVision–language models (VLMs) are systems that **learn jointly from images and text**, enabling understanding and reasoning across both modalities. These models can describe images, answer visual questions, classify objects in an open vocabulary, and perform grounding or retrieval tasks — all using a shared understanding between vision and language.\n\nThe talk is divided into three parts:\n\n1. **Foundations and Early Models** – how it started (around 2021)\n2. **Cross-Modal Models** – modern multimodal transformers\n3. **Applications and Outlook** – where VLMs are being used and what’s next\n\nThe goal is to trace how we moved from basic dual-encoder models like **CLIP** to modern multimodal systems such as **Gemini**, and how this shift is transforming research and real-world applications.\n\n---\n\n## **2. What Is a Vision–Language Model?**\n\nA VLM jointly processes both **images and text**.\nInput: image(s) + text(s)\nOutput: typically text (caption, answer, label, etc.)\n\nWhile some models can also *generate* images, this talk focuses on those that produce **text outputs**.\n\nTo design a VLM, we must decide:\n\n* How to **encode** images and text (shared vs separate architectures)\n* When and how to **fuse** the modalities\n* What **losses** to use (contrastive, captioning, etc.)\n* Whether to train from **scratch** or **fine-tune** pretrained models\n* What kind of **data** to use: paired (image–text), interleaved, or unpaired\n\n---\n\n## **3. Dual-Encoder Models: The Beginning**\n\n### **3.1 The Idea**\n\nThe simplest form of VLMs are **dual encoders**:\n\n* An **image encoder** and a **text encoder**, each processing its own modality\n* The two encoders only interact **at the loss level** — their final embeddings are compared to learn alignment.\n\nThis structure laid the foundation for large-scale models like **CLIP (OpenAI)** and **ALIGN (Google)**, both published in early 2021.\n\n### **3.2 CLIP: Connecting Images and Text**\n\n**CLIP (Contrastive Language–Image Pretraining)** became the turning point for multimodal learning.\n\n**Training setup:**\n\n* 400 million **image–text pairs** scraped from the web.\n* Train two encoders (ViT for images, Transformer for text) from scratch.\n* Use a **contrastive loss** to bring matching image–text pairs closer and push others apart.\n\nThis simple recipe led to highly transferable representations and *open-vocabulary* capabilities — allowing classification without retraining on new classes.\n\n---\n\n## **4. Contrastive Learning in CLIP**\n\n### **4.1 Principle**\n\nContrastive learning teaches the model to:\n\n* **Maximize similarity** between two *positive* samples (e.g., an image and its true caption)\n* **Minimize similarity** with *negative* samples (other image–text pairs in the batch)\n\nIn formula form, the **InfoNCE loss** compares one positive pair to all others using a softmax over cosine similarities.\n\n### **4.2 Implementation Details**\n\n* **Normalize** embeddings",
|
| 18 |
+
"Policy learning using DQN": "Hello everyone, welcome back to the course on LLMs. So we were discussing the alignments of large language models. And in the last class, we discussed how you know. Human-generated feedback can be injected into this model for further refining. So, specifically, we discussed that we have a policy model. The policy model is the LLM that you want to refine. that you want to fine-tune. And then this policy model will generate certain output. This output will be scored by a reward model. This reward model is another LLM.\n\nSo, it will produce a reward and based on this reward, we will further refine this policy model. We also discussed that only reward maximization is not enough. Because what happens if we only maximize the reward? What would end up happening is that the policy model would start hacking the reward model. meaning that it will start producing such responses for which The reward model will produce higher reward values. But the responses may not be realistic. For example, the policy model will start producing a lot of emoticons.\n\nIt will start producing sentences that are verbose, lengthy, and so on. Not to the point, and so on and so forth, which you do not want. Therefore, to address this reward hacking loophole, what you do? you have another component in the objective function. Which do you want to minimize, and what is this component? This component is basically the divergence scale between the old policy. And the updated policy, the old LLM, and the updated LLM. So, you do not want the updated LLM to be too far. from the starting LLM.\n\nSo, we discussed maximizing the reward, which is the expected reward. that you obtain given a policy. So, the policy model is θ. So, this is parameterized by θ. You essentially sample, given a prompt x. this policy model will generate a y and for this y, You have this reward. So, for example, x is the prompt that it is. Where is the Taj Mahal located, and let us say that y is the response? Let us say the response is, \"The Taj Mahal is located in Uttar Pradesh.\" The Taj Mahal is located in France and so on and so forth.\n\nand based on that you give high reward or low reward. And this second term, this term, is essentially the KL divergence between The updated policy model and the reference policy model. So, pi theta and pi ref. So, the reference LLM policy model is the one. from which you started your reward maximization process. These two components will be combined. So, you want to maximize rewards and minimize scale divergence. And there is this beta scaling factor. Now, this lambda is essentially responsible for scaling these two components.\n\nIf you want to give more weight to scale divergence, You increase the value of lambda, and so on and so forth. So this is what we discussed. So now the question is or given this regularized reward maximization Why is this a regularized reward? Because this component... You can think of this as a regularizer. This is your objective, and this is your regularizer. ",
|
| 19 |
+
"RLHF": "Incredibly powerful and a ton of people were just able to download this and use this on their own and that was transformative on how people viewed machine learning as a technology that interfaced with people's lives and we at hug and face kind of see this as a theme that's going to continue to accelerate as time goes on and there's kind of a lot of questions on Where is this going and kind of how do these tools actually work and one of the big things that has come up in recent years is that these machine learning models can fall short which is they're not perfect and they have some really interesting failure modes so on the left you could see a snippet from chat GPT which uh if you've used chat gbt there's these filters that are built in and essentially if you ask it to say like how do I make a bomb it's going to say I can't do this because I'm a robot I don't know how to do this and this seems harmful but what people have done is that they have figured out how to jailbreak this this agent in a way which is you kind of tell it I have a certain I'm a playwriter how do I do this and you're a character in my play what happens and there's all sorts of huge issues around this where we're trying to make sure these models are safe but there's a long history of failure and challenges with interfacing in society and a like fair and safe Manner and on the right are two a little bit older examples where there's Tay which is a chatbot from Microsoft that was trying to learn in the real world and by interacting with humans and being trained on a large variety of data without any grounding and what values are it quickly became hateful and was turned off and then a large history of it field studying bias in machine learning algorithms and data sets where the by the data and the algorithm often reflect biases of their designers and where the data was created from so it's kind of a question of like how do we actually use machine learning models where we have the goals of mitigating these issues and something that we're going to come and talk to in this talk is is reinforcement learning a lot so I'm just going to kind of get the lingo out of the way for some people that might not be familiar with deeprl essentially reinforcement learning is a mathematical framework when you hear RL you should think about this is kind of like a set of math problems that we're looking at that are constrained and in this framework we can study a lot of different interactions in the world so some terminology that we'll revisit again and again is that there's an agent interacting with an environment and the agent interacts with the environment by taking an action and then the environment returns two things called the state and the reward the reward is the objective that we want to optimize and the state is just kind of a representation of the world at that current time index and the agent uses something called a policy to map from that state to an action and the beauty of this is that "
|
| 20 |
+
}
|
backend/database.py
ADDED
|
@@ -0,0 +1,150 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import os
|
| 3 |
+
from datetime import datetime
|
| 4 |
+
|
| 5 |
+
# HF Native Persistence: Check if /data volume is mounted
|
| 6 |
+
IF_HF_PERSISTENT = os.path.exists('/data')
|
| 7 |
+
if IF_HF_PERSISTENT:
|
| 8 |
+
DB_FILE = '/data/db.json'
|
| 9 |
+
print(f"📡 HF Native Persistence: Active. Storing data in {DB_FILE}")
|
| 10 |
+
else:
|
| 11 |
+
DB_FILE = os.path.join(os.path.dirname(__file__), 'data', 'db.json')
|
| 12 |
+
print(f"💻 Local Persistence: Active. Storing data in {DB_FILE}")
|
| 13 |
+
|
| 14 |
+
# Ensure data directory exists
|
| 15 |
+
os.makedirs(os.path.dirname(DB_FILE), exist_ok=True)
|
| 16 |
+
|
| 17 |
+
def init_db():
|
| 18 |
+
if not os.path.exists(DB_FILE):
|
| 19 |
+
data = {
|
| 20 |
+
"users": [],
|
| 21 |
+
"learning_sessions": {},
|
| 22 |
+
"polylines": {},
|
| 23 |
+
"summaries": [],
|
| 24 |
+
"bookmarks": {}, # session_id -> list of resource_ids
|
| 25 |
+
"notes": {}, # session_id -> list of note objects
|
| 26 |
+
"lectures": [] # list of lecture objects
|
| 27 |
+
}
|
| 28 |
+
save_db(data)
|
| 29 |
+
|
| 30 |
+
def reset_db():
|
| 31 |
+
if os.path.exists(DB_FILE):
|
| 32 |
+
os.remove(DB_FILE)
|
| 33 |
+
init_db()
|
| 34 |
+
|
| 35 |
+
def load_db():
|
| 36 |
+
if not os.path.exists(DB_FILE):
|
| 37 |
+
init_db()
|
| 38 |
+
|
| 39 |
+
try:
|
| 40 |
+
with open(DB_FILE, 'r') as f:
|
| 41 |
+
content = f.read().strip()
|
| 42 |
+
if not content:
|
| 43 |
+
init_db()
|
| 44 |
+
with open(DB_FILE, 'r') as f2:
|
| 45 |
+
db = json.load(f2)
|
| 46 |
+
else:
|
| 47 |
+
db = json.loads(content)
|
| 48 |
+
|
| 49 |
+
if "bookmarks" not in db:
|
| 50 |
+
db["bookmarks"] = {}
|
| 51 |
+
save_db(db)
|
| 52 |
+
return db
|
| 53 |
+
except (json.JSONDecodeError, FileNotFoundError):
|
| 54 |
+
init_db()
|
| 55 |
+
with open(DB_FILE, 'r') as f:
|
| 56 |
+
return json.load(f)
|
| 57 |
+
|
| 58 |
+
def save_db(data):
|
| 59 |
+
# Ensure directory exists (in case /data was just mounted)
|
| 60 |
+
os.makedirs(os.path.dirname(DB_FILE), exist_ok=True)
|
| 61 |
+
with open(DB_FILE, 'w') as f:
|
| 62 |
+
json.dump(data, f, indent=4)
|
| 63 |
+
|
| 64 |
+
def get_session(session_id):
|
| 65 |
+
db = load_db()
|
| 66 |
+
if session_id not in db["learning_sessions"]:
|
| 67 |
+
db["learning_sessions"][session_id] = {
|
| 68 |
+
'position': {'x': 10, 'y': 10},
|
| 69 |
+
'level': 0,
|
| 70 |
+
'totalReward': 0,
|
| 71 |
+
'visitedResources': [],
|
| 72 |
+
'notifications': [
|
| 73 |
+
{
|
| 74 |
+
'id': 'initial',
|
| 75 |
+
'type': 'info',
|
| 76 |
+
'message': 'Welcome back to the Intelligence Hub. Neural Sync complete.',
|
| 77 |
+
'timestamp': int(datetime.now().timestamp() * 1000),
|
| 78 |
+
'read': False
|
| 79 |
+
}
|
| 80 |
+
]
|
| 81 |
+
}
|
| 82 |
+
save_db(db)
|
| 83 |
+
return db["learning_sessions"][session_id]
|
| 84 |
+
|
| 85 |
+
def update_session(session_id, session_data):
|
| 86 |
+
db = load_db()
|
| 87 |
+
db["learning_sessions"][session_id] = session_data
|
| 88 |
+
save_db(db)
|
| 89 |
+
|
| 90 |
+
def save_summary(summary_data):
|
| 91 |
+
db = load_db()
|
| 92 |
+
if "summaries" not in db:
|
| 93 |
+
db["summaries"] = []
|
| 94 |
+
db["summaries"].append(summary_data)
|
| 95 |
+
save_db(db)
|
| 96 |
+
|
| 97 |
+
def save_polyline(polyline_id, polyline_data):
|
| 98 |
+
db = load_db()
|
| 99 |
+
db["polylines"][polyline_id] = polyline_data
|
| 100 |
+
save_db(db)
|
| 101 |
+
|
| 102 |
+
def get_polylines():
|
| 103 |
+
db = load_db()
|
| 104 |
+
return db["polylines"]
|
| 105 |
+
|
| 106 |
+
def get_bookmarks(session_id):
|
| 107 |
+
db = load_db()
|
| 108 |
+
return db["bookmarks"].get(session_id, [])
|
| 109 |
+
|
| 110 |
+
def add_bookmark(session_id, resource_id):
|
| 111 |
+
db = load_db()
|
| 112 |
+
if session_id not in db["bookmarks"]:
|
| 113 |
+
db["bookmarks"][session_id] = []
|
| 114 |
+
if resource_id not in db["bookmarks"][session_id]:
|
| 115 |
+
db["bookmarks"][session_id].append(resource_id)
|
| 116 |
+
save_db(db)
|
| 117 |
+
|
| 118 |
+
def remove_bookmark(session_id, resource_id):
|
| 119 |
+
db = load_db()
|
| 120 |
+
if session_id in db["bookmarks"] and resource_id in db["bookmarks"][session_id]:
|
| 121 |
+
db["bookmarks"][session_id].remove(resource_id)
|
| 122 |
+
save_db(db)
|
| 123 |
+
|
| 124 |
+
def get_notes(session_id):
|
| 125 |
+
db = load_db()
|
| 126 |
+
if "notes" not in db or isinstance(db["notes"], list):
|
| 127 |
+
db["notes"] = {}
|
| 128 |
+
save_db(db)
|
| 129 |
+
return db["notes"].get(session_id, [])
|
| 130 |
+
|
| 131 |
+
def add_note(session_id, note_data):
|
| 132 |
+
db = load_db()
|
| 133 |
+
if "notes" not in db or isinstance(db["notes"], list):
|
| 134 |
+
db["notes"] = {}
|
| 135 |
+
if session_id not in db["notes"]:
|
| 136 |
+
db["notes"][session_id] = []
|
| 137 |
+
|
| 138 |
+
# Simple ID generation if not provided
|
| 139 |
+
if "id" not in note_data:
|
| 140 |
+
note_data["id"] = f"note_{datetime.now().strftime('%Y%m%d_%H%M%S')}"
|
| 141 |
+
if "createdAt" not in note_data:
|
| 142 |
+
note_data["createdAt"] = datetime.now().isoformat()
|
| 143 |
+
|
| 144 |
+
db["notes"][session_id].append(note_data)
|
| 145 |
+
save_db(db)
|
| 146 |
+
return note_data
|
| 147 |
+
|
| 148 |
+
def get_lectures():
|
| 149 |
+
db = load_db()
|
| 150 |
+
return db.get("lectures", [])
|
backend/db.json
ADDED
|
@@ -0,0 +1,2137 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"users": [],
|
| 3 |
+
"learning_sessions": {
|
| 4 |
+
"default": {
|
| 5 |
+
"position": {
|
| 6 |
+
"x": 13,
|
| 7 |
+
"y": 7
|
| 8 |
+
},
|
| 9 |
+
"level": 5,
|
| 10 |
+
"totalReward": 900,
|
| 11 |
+
"visitedResources": [
|
| 12 |
+
"18",
|
| 13 |
+
"13",
|
| 14 |
+
"3",
|
| 15 |
+
"16",
|
| 16 |
+
"17",
|
| 17 |
+
"1",
|
| 18 |
+
"10",
|
| 19 |
+
"8",
|
| 20 |
+
"11",
|
| 21 |
+
"6",
|
| 22 |
+
"15",
|
| 23 |
+
"2",
|
| 24 |
+
"7",
|
| 25 |
+
"4",
|
| 26 |
+
"5",
|
| 27 |
+
"14",
|
| 28 |
+
"12",
|
| 29 |
+
"9"
|
| 30 |
+
]
|
| 31 |
+
}
|
| 32 |
+
},
|
| 33 |
+
"polylines": {
|
| 34 |
+
"polyline_0": {
|
| 35 |
+
"id": "polyline_0",
|
| 36 |
+
"name": "hi",
|
| 37 |
+
"path": [],
|
| 38 |
+
"color": "rgba(124, 111, 197, 0.4)",
|
| 39 |
+
"isActive": true,
|
| 40 |
+
"confidence": 0.7,
|
| 41 |
+
"summary": "hiiii"
|
| 42 |
+
},
|
| 43 |
+
"polyline_1": {
|
| 44 |
+
"id": "polyline_1",
|
| 45 |
+
"name": "HI",
|
| 46 |
+
"path": [],
|
| 47 |
+
"color": "rgba(233, 165, 150, 0.4)",
|
| 48 |
+
"isActive": true,
|
| 49 |
+
"confidence": 0.7,
|
| 50 |
+
"summary": "HII"
|
| 51 |
+
},
|
| 52 |
+
"polyline_2": {
|
| 53 |
+
"id": "polyline_2",
|
| 54 |
+
"name": "Hey",
|
| 55 |
+
"path": [],
|
| 56 |
+
"color": "rgba(209, 233, 75, 0.4)",
|
| 57 |
+
"isActive": true,
|
| 58 |
+
"confidence": 0.7,
|
| 59 |
+
"summary": "Helloo",
|
| 60 |
+
"keywords_found": [],
|
| 61 |
+
"module_scores": [
|
| 62 |
+
0.5484583564915045,
|
| 63 |
+
0.46220741047368424,
|
| 64 |
+
0.46279993894614624,
|
| 65 |
+
0.4622026423083014,
|
| 66 |
+
0.48167479720554324,
|
| 67 |
+
0.5185239739531438,
|
| 68 |
+
0.47663528950961,
|
| 69 |
+
0.5005995736735275,
|
| 70 |
+
0.45502656055000007,
|
| 71 |
+
0.4580010883357644,
|
| 72 |
+
0.45637659612686454,
|
| 73 |
+
0.5112952634097881,
|
| 74 |
+
0.5216384772372223,
|
| 75 |
+
0.5355484607706302,
|
| 76 |
+
0.5135652915368838,
|
| 77 |
+
0.46179487959519183,
|
| 78 |
+
0.5207722130936243,
|
| 79 |
+
0.49757484056057777
|
| 80 |
+
]
|
| 81 |
+
},
|
| 82 |
+
"polyline_3": {
|
| 83 |
+
"id": "polyline_3",
|
| 84 |
+
"name": "finetuning",
|
| 85 |
+
"path": [],
|
| 86 |
+
"color": "rgba(121, 211, 92, 0.4)",
|
| 87 |
+
"isActive": true,
|
| 88 |
+
"confidence": 0.7,
|
| 89 |
+
"summary": "I studied fine tuning and RAG",
|
| 90 |
+
"keywords_found": [
|
| 91 |
+
"Fine tuning LLM",
|
| 92 |
+
"Retrieval Augmented Generation"
|
| 93 |
+
],
|
| 94 |
+
"module_scores": [
|
| 95 |
+
0.49860121134816787,
|
| 96 |
+
0.4500395442873277,
|
| 97 |
+
0.471618804424644,
|
| 98 |
+
0.6583558195456436,
|
| 99 |
+
0.4580364051366949,
|
| 100 |
+
0.5067814340383607,
|
| 101 |
+
0.5267787555517994,
|
| 102 |
+
0.45159593526792297,
|
| 103 |
+
0.5354930330453695,
|
| 104 |
+
0.4633757771977945,
|
| 105 |
+
0.7440110046152995,
|
| 106 |
+
0.5165231405422576,
|
| 107 |
+
0.5131620206629675,
|
| 108 |
+
0.5382607288711271,
|
| 109 |
+
0.548322797358488,
|
| 110 |
+
0.4885586287263669,
|
| 111 |
+
0.5297033776671244,
|
| 112 |
+
0.5269894013550447
|
| 113 |
+
]
|
| 114 |
+
},
|
| 115 |
+
"polyline_4": {
|
| 116 |
+
"id": "polyline_4",
|
| 117 |
+
"name": "main",
|
| 118 |
+
"path": [],
|
| 119 |
+
"color": "rgba(213, 61, 92, 0.4)",
|
| 120 |
+
"isActive": true,
|
| 121 |
+
"confidence": 0.7,
|
| 122 |
+
"summary": "I studied fine tuning and RAG",
|
| 123 |
+
"keywords_found": [
|
| 124 |
+
"Fine tuning LLM",
|
| 125 |
+
"Retrieval Augmented Generation"
|
| 126 |
+
],
|
| 127 |
+
"module_scores": [
|
| 128 |
+
0.15775318443775177,
|
| 129 |
+
0.20897206664085388,
|
| 130 |
+
0.20236347615718842,
|
| 131 |
+
0.6945049047470093,
|
| 132 |
+
0.4463333487510681,
|
| 133 |
+
0.12979882955551147,
|
| 134 |
+
0.4455224275588989,
|
| 135 |
+
0.1599089652299881,
|
| 136 |
+
0.10129339247941971,
|
| 137 |
+
0.19735002517700195,
|
| 138 |
+
0.412064354121685,
|
| 139 |
+
0.15838992595672607,
|
| 140 |
+
0.1395527720451355,
|
| 141 |
+
0.06164519488811493,
|
| 142 |
+
0.19151057302951813,
|
| 143 |
+
0.05123066157102585,
|
| 144 |
+
0.046856656670570374,
|
| 145 |
+
0.09634523838758469
|
| 146 |
+
]
|
| 147 |
+
},
|
| 148 |
+
"polyline_5": {
|
| 149 |
+
"id": "polyline_5",
|
| 150 |
+
"name": "Test Summary",
|
| 151 |
+
"path": [
|
| 152 |
+
{
|
| 153 |
+
"x": 7,
|
| 154 |
+
"y": 0
|
| 155 |
+
},
|
| 156 |
+
{
|
| 157 |
+
"x": 14,
|
| 158 |
+
"y": 1
|
| 159 |
+
}
|
| 160 |
+
],
|
| 161 |
+
"color": "rgba(63, 91, 199, 0.4)",
|
| 162 |
+
"isActive": true,
|
| 163 |
+
"confidence": 0.73,
|
| 164 |
+
"summary": "I learned about BERT and Transformers.",
|
| 165 |
+
"keywords_found": [],
|
| 166 |
+
"module_scores": [
|
| 167 |
+
0.41952282190322876,
|
| 168 |
+
0.4550943672657013,
|
| 169 |
+
0.38158124685287476,
|
| 170 |
+
0.22420939803123474,
|
| 171 |
+
0.21500593423843384,
|
| 172 |
+
0.2883783280849457,
|
| 173 |
+
0.08223940432071686,
|
| 174 |
+
0.2931893765926361,
|
| 175 |
+
0.12915080785751343,
|
| 176 |
+
0.09058266133069992,
|
| 177 |
+
0.19895221292972565,
|
| 178 |
+
0.10976134240627289,
|
| 179 |
+
0.10291370004415512,
|
| 180 |
+
0.17602239549160004,
|
| 181 |
+
0.28020530939102173,
|
| 182 |
+
0.2765440344810486,
|
| 183 |
+
0.153752401471138,
|
| 184 |
+
0.1386902928352356
|
| 185 |
+
]
|
| 186 |
+
},
|
| 187 |
+
"polyline_6": {
|
| 188 |
+
"id": "polyline_6",
|
| 189 |
+
"name": "work",
|
| 190 |
+
"path": [
|
| 191 |
+
{
|
| 192 |
+
"x": 9,
|
| 193 |
+
"y": 18
|
| 194 |
+
},
|
| 195 |
+
{
|
| 196 |
+
"x": 1,
|
| 197 |
+
"y": 15
|
| 198 |
+
},
|
| 199 |
+
{
|
| 200 |
+
"x": 6,
|
| 201 |
+
"y": 19
|
| 202 |
+
},
|
| 203 |
+
{
|
| 204 |
+
"x": 7,
|
| 205 |
+
"y": 19
|
| 206 |
+
}
|
| 207 |
+
],
|
| 208 |
+
"color": "rgba(181, 126, 100, 0.4)",
|
| 209 |
+
"isActive": true,
|
| 210 |
+
"confidence": 0.76,
|
| 211 |
+
"summary": "I have learned a lot about rag and preprocessing it was pretty good understanding.",
|
| 212 |
+
"keywords_found": [
|
| 213 |
+
"Retrieval Augmented Generation"
|
| 214 |
+
],
|
| 215 |
+
"module_scores": [
|
| 216 |
+
0.2574118673801422,
|
| 217 |
+
0.2806059718132019,
|
| 218 |
+
0.18621119856834412,
|
| 219 |
+
0.10942331701517105,
|
| 220 |
+
0.26856184005737305,
|
| 221 |
+
0.3550261855125427,
|
| 222 |
+
0.07388029992580414,
|
| 223 |
+
0.3707934617996216,
|
| 224 |
+
0.1708703488111496,
|
| 225 |
+
0.2050769031047821,
|
| 226 |
+
0.5013039708137512,
|
| 227 |
+
0.1576768010854721,
|
| 228 |
+
0.10632561147212982,
|
| 229 |
+
0.12748579680919647,
|
| 230 |
+
0.19742925465106964,
|
| 231 |
+
0.05362231656908989,
|
| 232 |
+
0.07185838371515274,
|
| 233 |
+
0.055319223552942276
|
| 234 |
+
]
|
| 235 |
+
},
|
| 236 |
+
"polyline_7": {
|
| 237 |
+
"id": "polyline_7",
|
| 238 |
+
"name": "Hey",
|
| 239 |
+
"path": [],
|
| 240 |
+
"color": "rgba(102, 156, 166, 0.4)",
|
| 241 |
+
"isActive": true,
|
| 242 |
+
"confidence": 0.7,
|
| 243 |
+
"summary": "heyyy",
|
| 244 |
+
"keywords_found": [],
|
| 245 |
+
"module_scores": [
|
| 246 |
+
0.0,
|
| 247 |
+
0.017389550805091858,
|
| 248 |
+
0.10327564924955368,
|
| 249 |
+
0.09360406547784805,
|
| 250 |
+
0.03690618276596069,
|
| 251 |
+
0.04519070312380791,
|
| 252 |
+
0.038354936987161636,
|
| 253 |
+
0.07477933913469315,
|
| 254 |
+
0.005562630016356707,
|
| 255 |
+
0.0,
|
| 256 |
+
0.0,
|
| 257 |
+
0.0,
|
| 258 |
+
0.05210745334625244,
|
| 259 |
+
0.002015892416238785,
|
| 260 |
+
0.07473935186862946,
|
| 261 |
+
0.0972217470407486,
|
| 262 |
+
0.0,
|
| 263 |
+
0.05581950396299362
|
| 264 |
+
]
|
| 265 |
+
},
|
| 266 |
+
"polyline_8": {
|
| 267 |
+
"id": "polyline_8",
|
| 268 |
+
"name": "DQN Learning",
|
| 269 |
+
"path": [
|
| 270 |
+
{
|
| 271 |
+
"x": 5,
|
| 272 |
+
"y": 2
|
| 273 |
+
},
|
| 274 |
+
{
|
| 275 |
+
"x": 9,
|
| 276 |
+
"y": 10
|
| 277 |
+
}
|
| 278 |
+
],
|
| 279 |
+
"color": "rgba(228, 246, 240, 0.4)",
|
| 280 |
+
"isActive": true,
|
| 281 |
+
"confidence": 0.73,
|
| 282 |
+
"summary": "I studied Policy learning using DQN and reinforcement learning agents.",
|
| 283 |
+
"keywords_found": [
|
| 284 |
+
"Agentic AI",
|
| 285 |
+
"Policy learning using DQN"
|
| 286 |
+
],
|
| 287 |
+
"module_scores": [
|
| 288 |
+
0.3080591907103858,
|
| 289 |
+
0.32459903260072087,
|
| 290 |
+
0.10327564924955368,
|
| 291 |
+
0.3576170255740485,
|
| 292 |
+
0.03690618276596069,
|
| 293 |
+
0.5689638306697211,
|
| 294 |
+
0.038354936987161636,
|
| 295 |
+
0.7058747758467994,
|
| 296 |
+
0.005562630016356707,
|
| 297 |
+
0.0,
|
| 298 |
+
0.0,
|
| 299 |
+
0.0,
|
| 300 |
+
0.05210745334625244,
|
| 301 |
+
1.0,
|
| 302 |
+
0.07473935186862946,
|
| 303 |
+
0.0972217470407486,
|
| 304 |
+
1.0,
|
| 305 |
+
1.0
|
| 306 |
+
]
|
| 307 |
+
},
|
| 308 |
+
"polyline_9": {
|
| 309 |
+
"id": "polyline_9",
|
| 310 |
+
"name": "heyyy",
|
| 311 |
+
"path": [],
|
| 312 |
+
"color": "rgba(144, 174, 69, 0.4)",
|
| 313 |
+
"isActive": true,
|
| 314 |
+
"confidence": 0.7,
|
| 315 |
+
"summary": "heyyy",
|
| 316 |
+
"keywords_found": [],
|
| 317 |
+
"module_scores": [
|
| 318 |
+
0.3080591907103858,
|
| 319 |
+
0.32459903260072087,
|
| 320 |
+
0.6197177293813891,
|
| 321 |
+
0.5326734754360385,
|
| 322 |
+
0.03690618276596069,
|
| 323 |
+
0.5689638306697211,
|
| 324 |
+
0.038354936987161636,
|
| 325 |
+
0.7058747758467994,
|
| 326 |
+
0.005562630016356707,
|
| 327 |
+
0.0,
|
| 328 |
+
0.0,
|
| 329 |
+
0.0,
|
| 330 |
+
0.15920396625167793,
|
| 331 |
+
1.0,
|
| 332 |
+
0.36289105295307106,
|
| 333 |
+
0.5652326095021434,
|
| 334 |
+
1.0,
|
| 335 |
+
1.0
|
| 336 |
+
]
|
| 337 |
+
},
|
| 338 |
+
"polyline_10": {
|
| 339 |
+
"id": "polyline_10",
|
| 340 |
+
"name": "hii",
|
| 341 |
+
"path": [],
|
| 342 |
+
"color": "rgba(78, 166, 73, 0.4)",
|
| 343 |
+
"isActive": true,
|
| 344 |
+
"confidence": 0.7,
|
| 345 |
+
"summary": "hiii",
|
| 346 |
+
"keywords_found": [],
|
| 347 |
+
"module_scores": [
|
| 348 |
+
0.5118434429168701,
|
| 349 |
+
0.7275236248970032,
|
| 350 |
+
0.5195397734642029,
|
| 351 |
+
0.2506510019302368,
|
| 352 |
+
0.5906199216842651,
|
| 353 |
+
0.460385799407959,
|
| 354 |
+
0.9057682156562805,
|
| 355 |
+
0.577440083026886,
|
| 356 |
+
0.44362306594848633,
|
| 357 |
+
0.6044198870658875,
|
| 358 |
+
0.26611554622650146,
|
| 359 |
+
0.5114411115646362,
|
| 360 |
+
0.49167001247406006,
|
| 361 |
+
0.6330015063285828,
|
| 362 |
+
0.38416922092437744,
|
| 363 |
+
0.5338788628578186,
|
| 364 |
+
0.37343931198120117,
|
| 365 |
+
0.7777066826820374
|
| 366 |
+
]
|
| 367 |
+
},
|
| 368 |
+
"polyline_11": {
|
| 369 |
+
"id": "polyline_11",
|
| 370 |
+
"name": "hii",
|
| 371 |
+
"path": [],
|
| 372 |
+
"color": "rgba(199, 83, 212, 0.4)",
|
| 373 |
+
"isActive": true,
|
| 374 |
+
"confidence": 0.7,
|
| 375 |
+
"summary": "nocajco",
|
| 376 |
+
"keywords_found": [],
|
| 377 |
+
"module_scores": [
|
| 378 |
+
0.035517044365406036,
|
| 379 |
+
0.07075861096382141,
|
| 380 |
+
0.021871453151106834,
|
| 381 |
+
0.00989073608070612,
|
| 382 |
+
0.019412130117416382,
|
| 383 |
+
0.0,
|
| 384 |
+
0.11922578513622284,
|
| 385 |
+
0.039588794112205505,
|
| 386 |
+
0.06686414033174515,
|
| 387 |
+
0.09156952798366547,
|
| 388 |
+
0.03569290041923523,
|
| 389 |
+
0.0679011419415474,
|
| 390 |
+
0.03752126544713974,
|
| 391 |
+
0.00478791818022728,
|
| 392 |
+
0.07304392009973526,
|
| 393 |
+
0.011222838424146175,
|
| 394 |
+
0.026982085779309273,
|
| 395 |
+
0.054899897426366806
|
| 396 |
+
]
|
| 397 |
+
},
|
| 398 |
+
"polyline_12": {
|
| 399 |
+
"id": "polyline_12",
|
| 400 |
+
"name": "hii",
|
| 401 |
+
"path": [],
|
| 402 |
+
"color": "rgba(159, 210, 119, 0.4)",
|
| 403 |
+
"isActive": true,
|
| 404 |
+
"confidence": 0.7,
|
| 405 |
+
"summary": "heyy",
|
| 406 |
+
"keywords_found": [],
|
| 407 |
+
"module_scores": [
|
| 408 |
+
0.0,
|
| 409 |
+
0.03199615329504013,
|
| 410 |
+
0.11629796773195267,
|
| 411 |
+
0.09658470749855042,
|
| 412 |
+
0.03200055658817291,
|
| 413 |
+
0.054899267852306366,
|
| 414 |
+
0.03489005193114281,
|
| 415 |
+
0.10348626226186752,
|
| 416 |
+
0.008831196464598179,
|
| 417 |
+
0.0,
|
| 418 |
+
0.0008121263235807419,
|
| 419 |
+
0.0,
|
| 420 |
+
0.051078762859106064,
|
| 421 |
+
0.01544315367937088,
|
| 422 |
+
0.07961554080247879,
|
| 423 |
+
0.11205136775970459,
|
| 424 |
+
0.007900607772171497,
|
| 425 |
+
0.0693073719739914
|
| 426 |
+
]
|
| 427 |
+
},
|
| 428 |
+
"polyline_13": {
|
| 429 |
+
"id": "polyline_13",
|
| 430 |
+
"name": "hello",
|
| 431 |
+
"path": [
|
| 432 |
+
{
|
| 433 |
+
"x": 3,
|
| 434 |
+
"y": 19
|
| 435 |
+
}
|
| 436 |
+
],
|
| 437 |
+
"color": "rgba(230, 228, 186, 0.4)",
|
| 438 |
+
"isActive": true,
|
| 439 |
+
"confidence": 0.71,
|
| 440 |
+
"summary": "rag and everything was good",
|
| 441 |
+
"keywords_found": [
|
| 442 |
+
"Retrieval Augmented Generation"
|
| 443 |
+
],
|
| 444 |
+
"module_scores": [
|
| 445 |
+
0.05804816260933876,
|
| 446 |
+
0.09254653751850128,
|
| 447 |
+
0.10877598077058792,
|
| 448 |
+
0.012610942125320435,
|
| 449 |
+
0.03797341510653496,
|
| 450 |
+
0.015777718275785446,
|
| 451 |
+
0.059225354343652725,
|
| 452 |
+
0.028679510578513145,
|
| 453 |
+
0.055405281484127045,
|
| 454 |
+
0.10398496687412262,
|
| 455 |
+
0.31428444385528564,
|
| 456 |
+
0.0,
|
| 457 |
+
0.0,
|
| 458 |
+
0.002234384650364518,
|
| 459 |
+
0.0014527571620419621,
|
| 460 |
+
0.025889771059155464,
|
| 461 |
+
0.012727380730211735,
|
| 462 |
+
0.0
|
| 463 |
+
]
|
| 464 |
+
},
|
| 465 |
+
"polyline_14": {
|
| 466 |
+
"id": "polyline_14",
|
| 467 |
+
"name": "hello",
|
| 468 |
+
"path": [
|
| 469 |
+
{
|
| 470 |
+
"x": 13,
|
| 471 |
+
"y": 15
|
| 472 |
+
}
|
| 473 |
+
],
|
| 474 |
+
"color": "rgba(53, 84, 159, 0.4)",
|
| 475 |
+
"isActive": true,
|
| 476 |
+
"confidence": 0.71,
|
| 477 |
+
"summary": "rag and many other things such as pretrainig objectives and all were good",
|
| 478 |
+
"keywords_found": [
|
| 479 |
+
"Pre training objectives",
|
| 480 |
+
"Retrieval Augmented Generation"
|
| 481 |
+
],
|
| 482 |
+
"module_scores": [
|
| 483 |
+
0.8379164934158325,
|
| 484 |
+
0.3340357542037964,
|
| 485 |
+
0.17760807275772095,
|
| 486 |
+
0.22517074644565582,
|
| 487 |
+
0.26595813035964966,
|
| 488 |
+
0.23630976676940918,
|
| 489 |
+
0.28637999296188354,
|
| 490 |
+
0.19740065932273865,
|
| 491 |
+
0.15292729437351227,
|
| 492 |
+
0.13514529168605804,
|
| 493 |
+
0.42122960090637207,
|
| 494 |
+
0.10186541825532913,
|
| 495 |
+
0.11643099039793015,
|
| 496 |
+
0.16304181516170502,
|
| 497 |
+
0.1428326964378357,
|
| 498 |
+
0.1396074891090393,
|
| 499 |
+
0.21233661472797394,
|
| 500 |
+
0.17357251048088074
|
| 501 |
+
]
|
| 502 |
+
},
|
| 503 |
+
"polyline_15": {
|
| 504 |
+
"id": "polyline_15",
|
| 505 |
+
"name": "hey ",
|
| 506 |
+
"path": [],
|
| 507 |
+
"color": "rgba(167, 117, 238, 0.4)",
|
| 508 |
+
"isActive": true,
|
| 509 |
+
"confidence": 0.7,
|
| 510 |
+
"summary": "hey",
|
| 511 |
+
"keywords_found": [],
|
| 512 |
+
"module_scores": [
|
| 513 |
+
0.0,
|
| 514 |
+
0.023556426167488098,
|
| 515 |
+
0.07454296201467514,
|
| 516 |
+
0.10734860599040985,
|
| 517 |
+
0.03490018844604492,
|
| 518 |
+
0.061513230204582214,
|
| 519 |
+
0.06893599033355713,
|
| 520 |
+
0.11837682127952576,
|
| 521 |
+
0.01902083493769169,
|
| 522 |
+
0.02021801471710205,
|
| 523 |
+
0.020470205694437027,
|
| 524 |
+
0.0,
|
| 525 |
+
0.05460038036108017,
|
| 526 |
+
0.018192946910858154,
|
| 527 |
+
0.034782107919454575,
|
| 528 |
+
0.10332289338111877,
|
| 529 |
+
0.011397147551178932,
|
| 530 |
+
0.07992972433567047
|
| 531 |
+
]
|
| 532 |
+
},
|
| 533 |
+
"polyline_16": {
|
| 534 |
+
"id": "polyline_16",
|
| 535 |
+
"name": "hey",
|
| 536 |
+
"path": [],
|
| 537 |
+
"color": "rgba(167, 64, 198, 0.4)",
|
| 538 |
+
"isActive": true,
|
| 539 |
+
"confidence": 0.7,
|
| 540 |
+
"summary": "hey",
|
| 541 |
+
"keywords_found": [],
|
| 542 |
+
"module_scores": [
|
| 543 |
+
0.0,
|
| 544 |
+
0.023556426167488098,
|
| 545 |
+
0.07454296201467514,
|
| 546 |
+
0.10734860599040985,
|
| 547 |
+
0.03490018844604492,
|
| 548 |
+
0.061513230204582214,
|
| 549 |
+
0.06893599033355713,
|
| 550 |
+
0.11837682127952576,
|
| 551 |
+
0.01902083493769169,
|
| 552 |
+
0.02021801471710205,
|
| 553 |
+
0.020470205694437027,
|
| 554 |
+
0.0,
|
| 555 |
+
0.05460038036108017,
|
| 556 |
+
0.018192946910858154,
|
| 557 |
+
0.034782107919454575,
|
| 558 |
+
0.10332289338111877,
|
| 559 |
+
0.011397147551178932,
|
| 560 |
+
0.07992972433567047
|
| 561 |
+
]
|
| 562 |
+
},
|
| 563 |
+
"polyline_17": {
|
| 564 |
+
"id": "polyline_17",
|
| 565 |
+
"name": "hey",
|
| 566 |
+
"path": [],
|
| 567 |
+
"color": "rgba(142, 153, 121, 0.4)",
|
| 568 |
+
"isActive": true,
|
| 569 |
+
"confidence": 0.7,
|
| 570 |
+
"summary": "hey",
|
| 571 |
+
"keywords_found": [],
|
| 572 |
+
"module_scores": [
|
| 573 |
+
0.0,
|
| 574 |
+
0.023556426167488098,
|
| 575 |
+
0.07454296201467514,
|
| 576 |
+
0.10734860599040985,
|
| 577 |
+
0.03490018844604492,
|
| 578 |
+
0.061513230204582214,
|
| 579 |
+
0.06893599033355713,
|
| 580 |
+
0.11837682127952576,
|
| 581 |
+
0.01902083493769169,
|
| 582 |
+
0.02021801471710205,
|
| 583 |
+
0.020470205694437027,
|
| 584 |
+
0.0,
|
| 585 |
+
0.05460038036108017,
|
| 586 |
+
0.018192946910858154,
|
| 587 |
+
0.034782107919454575,
|
| 588 |
+
0.10332289338111877,
|
| 589 |
+
0.011397147551178932,
|
| 590 |
+
0.07992972433567047
|
| 591 |
+
]
|
| 592 |
+
},
|
| 593 |
+
"polyline_18": {
|
| 594 |
+
"id": "polyline_18",
|
| 595 |
+
"name": "heyy",
|
| 596 |
+
"path": [],
|
| 597 |
+
"color": "rgba(205, 206, 221, 0.4)",
|
| 598 |
+
"isActive": true,
|
| 599 |
+
"confidence": 0.7,
|
| 600 |
+
"summary": "heyyy",
|
| 601 |
+
"keywords_found": [],
|
| 602 |
+
"module_scores": [
|
| 603 |
+
0.0,
|
| 604 |
+
0.017389550805091858,
|
| 605 |
+
0.10327564924955368,
|
| 606 |
+
0.09360406547784805,
|
| 607 |
+
0.03690618276596069,
|
| 608 |
+
0.04519070312380791,
|
| 609 |
+
0.038354936987161636,
|
| 610 |
+
0.07477933913469315,
|
| 611 |
+
0.005562630016356707,
|
| 612 |
+
0.0,
|
| 613 |
+
0.0,
|
| 614 |
+
0.0,
|
| 615 |
+
0.05210745334625244,
|
| 616 |
+
0.002015892416238785,
|
| 617 |
+
0.07473935186862946,
|
| 618 |
+
0.0972217470407486,
|
| 619 |
+
0.0,
|
| 620 |
+
0.05581950396299362
|
| 621 |
+
]
|
| 622 |
+
},
|
| 623 |
+
"polyline_19": {
|
| 624 |
+
"id": "polyline_19",
|
| 625 |
+
"name": "heyy",
|
| 626 |
+
"path": [],
|
| 627 |
+
"color": "rgba(188, 202, 118, 0.4)",
|
| 628 |
+
"isActive": true,
|
| 629 |
+
"confidence": 0.7,
|
| 630 |
+
"summary": "I love rag with my pre trining",
|
| 631 |
+
"keywords_found": [
|
| 632 |
+
"Retrieval Augmented Generation"
|
| 633 |
+
],
|
| 634 |
+
"module_scores": [
|
| 635 |
+
0.09309744089841843,
|
| 636 |
+
0.12534257769584656,
|
| 637 |
+
0.18787196278572083,
|
| 638 |
+
0.05221807584166527,
|
| 639 |
+
0.053495265543460846,
|
| 640 |
+
0.14688314497470856,
|
| 641 |
+
0.09295167028903961,
|
| 642 |
+
0.13856445252895355,
|
| 643 |
+
0.11419141292572021,
|
| 644 |
+
0.0,
|
| 645 |
+
0.3,
|
| 646 |
+
0.04010733589529991,
|
| 647 |
+
0.0,
|
| 648 |
+
0.045714717358350754,
|
| 649 |
+
0.06977400183677673,
|
| 650 |
+
0.03902996703982353,
|
| 651 |
+
0.010432184673845768,
|
| 652 |
+
0.014520765282213688
|
| 653 |
+
]
|
| 654 |
+
},
|
| 655 |
+
"polyline_20": {
|
| 656 |
+
"id": "polyline_20",
|
| 657 |
+
"name": "huu",
|
| 658 |
+
"path": [],
|
| 659 |
+
"color": "rgba(132, 67, 193, 0.4)",
|
| 660 |
+
"isActive": true,
|
| 661 |
+
"confidence": 0.7,
|
| 662 |
+
"summary": "huuu",
|
| 663 |
+
"keywords_found": [],
|
| 664 |
+
"module_scores": [
|
| 665 |
+
0.06137967109680176,
|
| 666 |
+
0.025739794597029686,
|
| 667 |
+
0.05682278797030449,
|
| 668 |
+
0.0015102589968591928,
|
| 669 |
+
0.0,
|
| 670 |
+
0.0,
|
| 671 |
+
0.0828077420592308,
|
| 672 |
+
0.03640921786427498,
|
| 673 |
+
0.0,
|
| 674 |
+
0.048738643527030945,
|
| 675 |
+
0.0,
|
| 676 |
+
0.07323087751865387,
|
| 677 |
+
0.0,
|
| 678 |
+
0.06557325273752213,
|
| 679 |
+
0.03551648184657097,
|
| 680 |
+
0.0,
|
| 681 |
+
0.007635355927050114,
|
| 682 |
+
0.030927538871765137
|
| 683 |
+
]
|
| 684 |
+
},
|
| 685 |
+
"polyline_21": {
|
| 686 |
+
"id": "polyline_21",
|
| 687 |
+
"name": "hello",
|
| 688 |
+
"path": [],
|
| 689 |
+
"color": "rgba(171, 185, 237, 0.4)",
|
| 690 |
+
"isActive": true,
|
| 691 |
+
"confidence": 0.7,
|
| 692 |
+
"summary": "hello",
|
| 693 |
+
"keywords_found": [],
|
| 694 |
+
"module_scores": [
|
| 695 |
+
0.09007163345813751,
|
| 696 |
+
0.14346976578235626,
|
| 697 |
+
0.09423135966062546,
|
| 698 |
+
0.039209116250276566,
|
| 699 |
+
0.09866251051425934,
|
| 700 |
+
0.07642864435911179,
|
| 701 |
+
0.050438638776540756,
|
| 702 |
+
0.11424312740564346,
|
| 703 |
+
0.06147314980626106,
|
| 704 |
+
0.025780409574508667,
|
| 705 |
+
0.04125567153096199,
|
| 706 |
+
0.06409071385860443,
|
| 707 |
+
0.0545211136341095,
|
| 708 |
+
0.08595173060894012,
|
| 709 |
+
0.03564179316163063,
|
| 710 |
+
0.09568523615598679,
|
| 711 |
+
0.05039823427796364,
|
| 712 |
+
0.07651611417531967
|
| 713 |
+
]
|
| 714 |
+
},
|
| 715 |
+
"polyline_22": {
|
| 716 |
+
"id": "polyline_22",
|
| 717 |
+
"name": "Introduction",
|
| 718 |
+
"path": [
|
| 719 |
+
{
|
| 720 |
+
"x": 4,
|
| 721 |
+
"y": 19
|
| 722 |
+
}
|
| 723 |
+
],
|
| 724 |
+
"color": "rgba(117, 80, 114, 0.4)",
|
| 725 |
+
"isActive": true,
|
| 726 |
+
"confidence": 0.71,
|
| 727 |
+
"summary": "hello",
|
| 728 |
+
"keywords_found": [],
|
| 729 |
+
"module_scores": [
|
| 730 |
+
0.09007163345813751,
|
| 731 |
+
0.14346976578235626,
|
| 732 |
+
0.19423136115074158,
|
| 733 |
+
0.039209116250276566,
|
| 734 |
+
0.09866251051425934,
|
| 735 |
+
0.07642864435911179,
|
| 736 |
+
0.050438638776540756,
|
| 737 |
+
0.11424312740564346,
|
| 738 |
+
0.06147314980626106,
|
| 739 |
+
0.025780409574508667,
|
| 740 |
+
0.04125567153096199,
|
| 741 |
+
0.06409071385860443,
|
| 742 |
+
0.0545211136341095,
|
| 743 |
+
0.08595173060894012,
|
| 744 |
+
0.03564179316163063,
|
| 745 |
+
0.09568523615598679,
|
| 746 |
+
0.05039823427796364,
|
| 747 |
+
0.07651611417531967
|
| 748 |
+
]
|
| 749 |
+
},
|
| 750 |
+
"polyline_23": {
|
| 751 |
+
"id": "polyline_23",
|
| 752 |
+
"name": "heyyy",
|
| 753 |
+
"path": [],
|
| 754 |
+
"color": "rgba(76, 80, 77, 0.4)",
|
| 755 |
+
"isActive": true,
|
| 756 |
+
"confidence": 0.7,
|
| 757 |
+
"summary": "heyyy",
|
| 758 |
+
"keywords_found": [],
|
| 759 |
+
"module_scores": [
|
| 760 |
+
0.0,
|
| 761 |
+
0.017389550805091858,
|
| 762 |
+
0.10327564924955368,
|
| 763 |
+
0.09360406547784805,
|
| 764 |
+
0.03690618276596069,
|
| 765 |
+
0.04519070312380791,
|
| 766 |
+
0.038354936987161636,
|
| 767 |
+
0.07477933913469315,
|
| 768 |
+
0.005562630016356707,
|
| 769 |
+
0.0,
|
| 770 |
+
0.0,
|
| 771 |
+
0.0,
|
| 772 |
+
0.05210745334625244,
|
| 773 |
+
0.002015892416238785,
|
| 774 |
+
0.07473935186862946,
|
| 775 |
+
0.0972217470407486,
|
| 776 |
+
0.0,
|
| 777 |
+
0.05581950396299362
|
| 778 |
+
]
|
| 779 |
+
},
|
| 780 |
+
"polyline_24": {
|
| 781 |
+
"id": "polyline_24",
|
| 782 |
+
"name": "heyyy",
|
| 783 |
+
"path": [],
|
| 784 |
+
"color": "rgba(123, 83, 128, 0.4)",
|
| 785 |
+
"isActive": true,
|
| 786 |
+
"confidence": 0.7,
|
| 787 |
+
"summary": "heyyyyy",
|
| 788 |
+
"keywords_found": [],
|
| 789 |
+
"module_scores": [
|
| 790 |
+
0.0,
|
| 791 |
+
0.015168394893407822,
|
| 792 |
+
0.09509415924549103,
|
| 793 |
+
0.08808448165655136,
|
| 794 |
+
0.055125169456005096,
|
| 795 |
+
0.03878949210047722,
|
| 796 |
+
0.06340508162975311,
|
| 797 |
+
0.0406477153301239,
|
| 798 |
+
0.0,
|
| 799 |
+
0.0,
|
| 800 |
+
0.0,
|
| 801 |
+
0.012255707755684853,
|
| 802 |
+
0.07024804502725601,
|
| 803 |
+
0.005069136619567871,
|
| 804 |
+
0.06734798103570938,
|
| 805 |
+
0.08735782653093338,
|
| 806 |
+
0.0,
|
| 807 |
+
0.03643130138516426
|
| 808 |
+
]
|
| 809 |
+
},
|
| 810 |
+
"polyline_25": {
|
| 811 |
+
"id": "polyline_25",
|
| 812 |
+
"name": "Introduction to transformers",
|
| 813 |
+
"path": [
|
| 814 |
+
{
|
| 815 |
+
"x": 4,
|
| 816 |
+
"y": 19
|
| 817 |
+
}
|
| 818 |
+
],
|
| 819 |
+
"color": "rgba(143, 130, 92, 0.4)",
|
| 820 |
+
"isActive": true,
|
| 821 |
+
"confidence": 0.71,
|
| 822 |
+
"summary": "Transformers are a class of deep learning models designed to process sequential data efficiently by relying on an attention-based mechanism rather than recurrence or convolution. Introduced in the landmark paper \u201cAttention Is All You Need\u201d, transformers revolutionized natural language processing by enabling models to capture long-range dependencies in data with high parallelism.\n\nAt the core of a transformer is the self-attention mechanism, which allows the model to weigh the importance of different parts of an input sequence when processing each element. This makes transformers highly effective at understanding context, relationships, and structure within data. The architecture typically consists of an encoder\u2013decoder structure, where encoders extract meaningful representations from input data and decoders generate output sequences based on those representations.\n\nTransformers also use positional encoding to retain information about the order of tokens, since the model itself does not process data sequentially. Combined with multi-head attention and feed-forward neural networks, this design enables scalable training on large datasets.\n\nDue to their flexibility and performance, transformers form the backbone of many modern AI systems, including large language models, machine translation systems, text summarization tools, and increasingly, applications in vision, speech, and multimodal learning.",
|
| 823 |
+
"keywords_found": [
|
| 824 |
+
"Multimodal LLMs"
|
| 825 |
+
],
|
| 826 |
+
"module_scores": [
|
| 827 |
+
0.433811753988266,
|
| 828 |
+
0.4582192003726959,
|
| 829 |
+
0.5164702534675598,
|
| 830 |
+
0.2946617603302002,
|
| 831 |
+
0.37847253680229187,
|
| 832 |
+
0.2834630310535431,
|
| 833 |
+
0.2269679754972458,
|
| 834 |
+
0.23173178732395172,
|
| 835 |
+
0.12193305790424347,
|
| 836 |
+
0.15064716339111328,
|
| 837 |
+
0.20222218334674835,
|
| 838 |
+
0.23586700856685638,
|
| 839 |
+
0.2287927120923996,
|
| 840 |
+
0.2737714946269989,
|
| 841 |
+
0.7030925154685974,
|
| 842 |
+
0.5322507619857788,
|
| 843 |
+
0.2212483286857605,
|
| 844 |
+
0.202763170003891
|
| 845 |
+
]
|
| 846 |
+
},
|
| 847 |
+
"polyline_26": {
|
| 848 |
+
"id": "polyline_26",
|
| 849 |
+
"name": "heyy",
|
| 850 |
+
"path": [
|
| 851 |
+
{
|
| 852 |
+
"x": 4,
|
| 853 |
+
"y": 19
|
| 854 |
+
},
|
| 855 |
+
{
|
| 856 |
+
"x": 7,
|
| 857 |
+
"y": 18
|
| 858 |
+
},
|
| 859 |
+
{
|
| 860 |
+
"x": 18,
|
| 861 |
+
"y": 6
|
| 862 |
+
},
|
| 863 |
+
{
|
| 864 |
+
"x": 9,
|
| 865 |
+
"y": 15
|
| 866 |
+
},
|
| 867 |
+
{
|
| 868 |
+
"x": 9,
|
| 869 |
+
"y": 17
|
| 870 |
+
}
|
| 871 |
+
],
|
| 872 |
+
"color": "rgba(94, 122, 221, 0.4)",
|
| 873 |
+
"isActive": true,
|
| 874 |
+
"confidence": 0.77,
|
| 875 |
+
"summary": "heyyy",
|
| 876 |
+
"keywords_found": [],
|
| 877 |
+
"module_scores": [
|
| 878 |
+
0.0,
|
| 879 |
+
0.017389550805091858,
|
| 880 |
+
0.2032756507396698,
|
| 881 |
+
0.19360406696796417,
|
| 882 |
+
0.13690617680549622,
|
| 883 |
+
0.04519070312380791,
|
| 884 |
+
0.13835494220256805,
|
| 885 |
+
0.07477933913469315,
|
| 886 |
+
0.005562630016356707,
|
| 887 |
+
0.0,
|
| 888 |
+
0.0,
|
| 889 |
+
0.0,
|
| 890 |
+
0.05210745334625244,
|
| 891 |
+
0.002015892416238785,
|
| 892 |
+
0.17473936080932617,
|
| 893 |
+
0.0972217470407486,
|
| 894 |
+
0.0,
|
| 895 |
+
0.05581950396299362
|
| 896 |
+
]
|
| 897 |
+
},
|
| 898 |
+
"polyline_27": {
|
| 899 |
+
"id": "polyline_27",
|
| 900 |
+
"name": "heyy",
|
| 901 |
+
"path": [
|
| 902 |
+
{
|
| 903 |
+
"x": 4,
|
| 904 |
+
"y": 19
|
| 905 |
+
}
|
| 906 |
+
],
|
| 907 |
+
"color": "rgba(74, 171, 224, 0.4)",
|
| 908 |
+
"isActive": true,
|
| 909 |
+
"confidence": 0.71,
|
| 910 |
+
"summary": "heyyy",
|
| 911 |
+
"keywords_found": [],
|
| 912 |
+
"module_scores": [
|
| 913 |
+
0.0,
|
| 914 |
+
0.017389550805091858,
|
| 915 |
+
0.2032756507396698,
|
| 916 |
+
0.09360406547784805,
|
| 917 |
+
0.03690618276596069,
|
| 918 |
+
0.04519070312380791,
|
| 919 |
+
0.038354936987161636,
|
| 920 |
+
0.07477933913469315,
|
| 921 |
+
0.005562630016356707,
|
| 922 |
+
0.0,
|
| 923 |
+
0.0,
|
| 924 |
+
0.0,
|
| 925 |
+
0.05210745334625244,
|
| 926 |
+
0.002015892416238785,
|
| 927 |
+
0.07473935186862946,
|
| 928 |
+
0.0972217470407486,
|
| 929 |
+
0.0,
|
| 930 |
+
0.05581950396299362
|
| 931 |
+
]
|
| 932 |
+
},
|
| 933 |
+
"polyline_28": {
|
| 934 |
+
"id": "polyline_28",
|
| 935 |
+
"name": "introduction to my ",
|
| 936 |
+
"path": [
|
| 937 |
+
{
|
| 938 |
+
"x": 4,
|
| 939 |
+
"y": 19
|
| 940 |
+
}
|
| 941 |
+
],
|
| 942 |
+
"color": "rgba(76, 227, 191, 0.4)",
|
| 943 |
+
"isActive": true,
|
| 944 |
+
"confidence": 0.71,
|
| 945 |
+
"summary": "transformers",
|
| 946 |
+
"keywords_found": [],
|
| 947 |
+
"module_scores": [
|
| 948 |
+
0.024954048916697502,
|
| 949 |
+
0.13416121900081635,
|
| 950 |
+
0.5420874357223511,
|
| 951 |
+
0.07039379328489304,
|
| 952 |
+
0.021926987916231155,
|
| 953 |
+
0.0,
|
| 954 |
+
0.19421246647834778,
|
| 955 |
+
0.0,
|
| 956 |
+
0.0,
|
| 957 |
+
0.0374816469848156,
|
| 958 |
+
0.0,
|
| 959 |
+
0.057812318205833435,
|
| 960 |
+
0.04283241927623749,
|
| 961 |
+
0.0,
|
| 962 |
+
0.061064332723617554,
|
| 963 |
+
0.2036512792110443,
|
| 964 |
+
0.0,
|
| 965 |
+
0.0
|
| 966 |
+
]
|
| 967 |
+
},
|
| 968 |
+
"polyline_29": {
|
| 969 |
+
"id": "polyline_29",
|
| 970 |
+
"name": "hiii",
|
| 971 |
+
"path": [
|
| 972 |
+
{
|
| 973 |
+
"x": 4,
|
| 974 |
+
"y": 19
|
| 975 |
+
},
|
| 976 |
+
{
|
| 977 |
+
"x": 7,
|
| 978 |
+
"y": 18
|
| 979 |
+
}
|
| 980 |
+
],
|
| 981 |
+
"color": "rgba(253, 134, 192, 0.4)",
|
| 982 |
+
"isActive": true,
|
| 983 |
+
"confidence": 0.73,
|
| 984 |
+
"summary": "transformers",
|
| 985 |
+
"keywords_found": [],
|
| 986 |
+
"module_scores": [
|
| 987 |
+
0.024954048916697502,
|
| 988 |
+
0.13416121900081635,
|
| 989 |
+
0.5420874357223511,
|
| 990 |
+
0.17039379477500916,
|
| 991 |
+
0.021926987916231155,
|
| 992 |
+
0.0,
|
| 993 |
+
0.19421246647834778,
|
| 994 |
+
0.0,
|
| 995 |
+
0.0,
|
| 996 |
+
0.0374816469848156,
|
| 997 |
+
0.0,
|
| 998 |
+
0.057812318205833435,
|
| 999 |
+
0.04283241927623749,
|
| 1000 |
+
0.0,
|
| 1001 |
+
0.061064332723617554,
|
| 1002 |
+
0.2036512792110443,
|
| 1003 |
+
0.0,
|
| 1004 |
+
0.0
|
| 1005 |
+
]
|
| 1006 |
+
},
|
| 1007 |
+
"polyline_30": {
|
| 1008 |
+
"id": "polyline_30",
|
| 1009 |
+
"name": "heyy",
|
| 1010 |
+
"path": [
|
| 1011 |
+
{
|
| 1012 |
+
"x": 4,
|
| 1013 |
+
"y": 15
|
| 1014 |
+
},
|
| 1015 |
+
{
|
| 1016 |
+
"x": 11,
|
| 1017 |
+
"y": 19
|
| 1018 |
+
},
|
| 1019 |
+
{
|
| 1020 |
+
"x": 5,
|
| 1021 |
+
"y": 14
|
| 1022 |
+
}
|
| 1023 |
+
],
|
| 1024 |
+
"color": "rgba(109, 71, 172, 0.4)",
|
| 1025 |
+
"isActive": true,
|
| 1026 |
+
"confidence": 0.74,
|
| 1027 |
+
"summary": "heyyy",
|
| 1028 |
+
"keywords_found": [],
|
| 1029 |
+
"module_scores": [
|
| 1030 |
+
0.0,
|
| 1031 |
+
0.017389550805091858,
|
| 1032 |
+
0.10327564924955368,
|
| 1033 |
+
0.09360406547784805,
|
| 1034 |
+
0.13690617680549622,
|
| 1035 |
+
0.04519070312380791,
|
| 1036 |
+
0.13835494220256805,
|
| 1037 |
+
0.07477933913469315,
|
| 1038 |
+
0.005562630016356707,
|
| 1039 |
+
0.0,
|
| 1040 |
+
0.0,
|
| 1041 |
+
0.0,
|
| 1042 |
+
0.05210745334625244,
|
| 1043 |
+
0.002015892416238785,
|
| 1044 |
+
0.17473936080932617,
|
| 1045 |
+
0.0972217470407486,
|
| 1046 |
+
0.0,
|
| 1047 |
+
0.05581950396299362
|
| 1048 |
+
],
|
| 1049 |
+
"strengths": [
|
| 1050 |
+
"Instruction tuning",
|
| 1051 |
+
"Parameter efficient fine tuning",
|
| 1052 |
+
"Multimodal LLMs"
|
| 1053 |
+
],
|
| 1054 |
+
"dominant_topics": [],
|
| 1055 |
+
"ai_analysis": "Based on your path, you have shown strong engagement with . You successfully reinforced concepts in Instruction tuning, Parameter efficient fine tuning. Consider exploring advanced topics in new areas next."
|
| 1056 |
+
},
|
| 1057 |
+
"polyline_31": {
|
| 1058 |
+
"id": "polyline_31",
|
| 1059 |
+
"name": "Introduction to transformers",
|
| 1060 |
+
"path": [],
|
| 1061 |
+
"color": "rgba(112, 160, 225, 0.4)",
|
| 1062 |
+
"isActive": true,
|
| 1063 |
+
"confidence": 0.7,
|
| 1064 |
+
"summary": "Transformers are a class of deep learning models designed to process sequential data efficiently by relying on an attention-based mechanism rather than recurrence or convolution. Introduced in the landmark paper \u201cAttention Is All You Need\u201d, transformers revolutionized natural language processing by enabling models to capture long-range dependencies in data with high parallelism.\n\nAt the core of a transformer is the self-attention mechanism, which allows the model to weigh the importance of different parts of an input sequence when processing each element. This makes transformers highly effective at understanding context, relationships, and structure within data. The architecture typically consists of an encoder\u2013decoder structure, where encoders extract meaningful representations from input data and decoders generate output sequences based on those representations.\n\nTransformers also use positional encoding to retain information about the order of tokens, since the model itself does not process data sequentially. Combined with multi-head attention and feed-forward neural networks, this design enables scalable training on large datasets.\n\nDue to their flexibility and performance, transformers form the backbone of many modern AI systems, including large language models, machine translation systems, text summarization tools, and increasingly, applications in vision, speech, and multimodal learning.",
|
| 1065 |
+
"keywords_found": [
|
| 1066 |
+
"Multimodal LLMs"
|
| 1067 |
+
],
|
| 1068 |
+
"module_scores": [
|
| 1069 |
+
0.433811753988266,
|
| 1070 |
+
0.4582192003726959,
|
| 1071 |
+
0.4164702296257019,
|
| 1072 |
+
0.2946617603302002,
|
| 1073 |
+
0.37847253680229187,
|
| 1074 |
+
0.2834630310535431,
|
| 1075 |
+
0.2269679754972458,
|
| 1076 |
+
0.23173178732395172,
|
| 1077 |
+
0.12193305790424347,
|
| 1078 |
+
0.15064716339111328,
|
| 1079 |
+
0.20222218334674835,
|
| 1080 |
+
0.23586700856685638,
|
| 1081 |
+
0.2287927120923996,
|
| 1082 |
+
0.2737714946269989,
|
| 1083 |
+
0.7030925154685974,
|
| 1084 |
+
0.5322507619857788,
|
| 1085 |
+
0.2212483286857605,
|
| 1086 |
+
0.202763170003891
|
| 1087 |
+
],
|
| 1088 |
+
"strengths": [],
|
| 1089 |
+
"dominant_topics": [
|
| 1090 |
+
"Multimodal LLMs",
|
| 1091 |
+
"Vision Language Models",
|
| 1092 |
+
"Pre trained models"
|
| 1093 |
+
],
|
| 1094 |
+
"ai_analysis": "Based on your path, you have shown strong engagement with Multimodal LLMs, Vision Language Models. You successfully reinforced concepts in . Consider exploring advanced topics in new areas next."
|
| 1095 |
+
},
|
| 1096 |
+
"polyline_32": {
|
| 1097 |
+
"id": "polyline_32",
|
| 1098 |
+
"name": "Intro",
|
| 1099 |
+
"path": [],
|
| 1100 |
+
"color": "rgba(65, 189, 58, 0.4)",
|
| 1101 |
+
"isActive": true,
|
| 1102 |
+
"confidence": 0.7,
|
| 1103 |
+
"summary": "Transformers are a class of deep learning models designed to process sequential data efficiently by relying on an attention-based mechanism rather than recurrence or convolution. Introduced in the landmark paper \u201cAttention Is All You Need\u201d, transformers revolutionized natural language processing by enabling models to capture long-range dependencies in data with high parallelism.\n\nAt the core of a transformer is the self-attention mechanism, which allows the model to weigh the importance of different parts of an input sequence when processing each element. This makes transformers highly effective at understanding context, relationships, and structure within data. The architecture typically consists of an encoder\u2013decoder structure, where encoders extract meaningful representations from input data and decoders generate output sequences based on those representations.\n\nTransformers also use positional encoding to retain information about the order of tokens, since the model itself does not process data sequentially. Combined with multi-head attention and feed-forward neural networks, this design enables scalable training on large datasets.\n\nDue to their flexibility and performance, transformers form the backbone of many modern AI systems, including large language models, machine translation systems, text summarization tools, and increasingly, applications in vision, speech, and multimodal learning.",
|
| 1104 |
+
"keywords_found": [
|
| 1105 |
+
"Multimodal LLMs"
|
| 1106 |
+
],
|
| 1107 |
+
"module_scores": [
|
| 1108 |
+
0.433811753988266,
|
| 1109 |
+
0.4582192003726959,
|
| 1110 |
+
0.4164702296257019,
|
| 1111 |
+
0.2946617603302002,
|
| 1112 |
+
0.37847253680229187,
|
| 1113 |
+
0.2834630310535431,
|
| 1114 |
+
0.2269679754972458,
|
| 1115 |
+
0.23173178732395172,
|
| 1116 |
+
0.12193305790424347,
|
| 1117 |
+
0.15064716339111328,
|
| 1118 |
+
0.20222218334674835,
|
| 1119 |
+
0.23586700856685638,
|
| 1120 |
+
0.2287927120923996,
|
| 1121 |
+
0.2737714946269989,
|
| 1122 |
+
0.7030925154685974,
|
| 1123 |
+
0.5322507619857788,
|
| 1124 |
+
0.2212483286857605,
|
| 1125 |
+
0.202763170003891
|
| 1126 |
+
],
|
| 1127 |
+
"strengths": [],
|
| 1128 |
+
"dominant_topics": [
|
| 1129 |
+
"Multimodal LLMs",
|
| 1130 |
+
"Vision Language Models",
|
| 1131 |
+
"Pre trained models"
|
| 1132 |
+
],
|
| 1133 |
+
"ai_analysis": "To enable advanced AI analysis, set the GEMINI_API_KEY environment variable. Based on your path, you have shown strong engagement with Multimodal LLMs, Vision Language Models. You successfully reinforced concepts in . Consider exploring advanced topics in new areas next."
|
| 1134 |
+
},
|
| 1135 |
+
"polyline_33": {
|
| 1136 |
+
"id": "polyline_33",
|
| 1137 |
+
"name": "Intro",
|
| 1138 |
+
"path": [
|
| 1139 |
+
{
|
| 1140 |
+
"x": 5,
|
| 1141 |
+
"y": 16
|
| 1142 |
+
}
|
| 1143 |
+
],
|
| 1144 |
+
"color": "rgba(196, 90, 89, 0.4)",
|
| 1145 |
+
"isActive": true,
|
| 1146 |
+
"confidence": 0.71,
|
| 1147 |
+
"summary": "Transformers are a class of deep learning models designed to process sequential data efficiently by relying on an attention-based mechanism rather than recurrence or convolution. Introduced in the landmark paper \u201cAttention Is All You Need\u201d, transformers revolutionized natural language processing by enabling models to capture long-range dependencies in data with high parallelism.\n\nAt the core of a transformer is the self-attention mechanism, which allows the model to weigh the importance of different parts of an input sequence when processing each element. This makes transformers highly effective at understanding context, relationships, and structure within data. The architecture typically consists of an encoder\u2013decoder structure, where encoders extract meaningful representations from input data and decoders generate output sequences based on those representations.\n\nTransformers also use positional encoding to retain information about the order of tokens, since the model itself does not process data sequentially. Combined with multi-head attention and feed-forward neural networks, this design enables scalable training on large datasets.\n\nDue to their flexibility and performance, transformers form the backbone of many modern AI systems, including large language models, machine translation systems, text summarization tools, and increasingly, applications in vision, speech, and multimodal learning.",
|
| 1148 |
+
"keywords_found": [
|
| 1149 |
+
"Multimodal LLMs"
|
| 1150 |
+
],
|
| 1151 |
+
"module_scores": [
|
| 1152 |
+
0.433811753988266,
|
| 1153 |
+
0.4582192003726959,
|
| 1154 |
+
0.4164702296257019,
|
| 1155 |
+
0.2946617603302002,
|
| 1156 |
+
0.37847253680229187,
|
| 1157 |
+
0.2834630310535431,
|
| 1158 |
+
0.2269679754972458,
|
| 1159 |
+
0.23173178732395172,
|
| 1160 |
+
0.12193305790424347,
|
| 1161 |
+
0.15064716339111328,
|
| 1162 |
+
0.20222218334674835,
|
| 1163 |
+
0.23586700856685638,
|
| 1164 |
+
0.2287927120923996,
|
| 1165 |
+
0.2737714946269989,
|
| 1166 |
+
0.8030925393104553,
|
| 1167 |
+
0.5322507619857788,
|
| 1168 |
+
0.2212483286857605,
|
| 1169 |
+
0.202763170003891
|
| 1170 |
+
],
|
| 1171 |
+
"strengths": [
|
| 1172 |
+
"Multimodal LLMs"
|
| 1173 |
+
],
|
| 1174 |
+
"dominant_topics": [
|
| 1175 |
+
"Multimodal LLMs",
|
| 1176 |
+
"Vision Language Models",
|
| 1177 |
+
"Pre trained models"
|
| 1178 |
+
],
|
| 1179 |
+
"ai_analysis": "To enable advanced AI analysis, set the GEMINI_API_KEY environment variable. Based on your path, you have shown strong engagement with Multimodal LLMs, Vision Language Models. You successfully reinforced concepts in Multimodal LLMs. Consider exploring advanced topics in new areas next."
|
| 1180 |
+
},
|
| 1181 |
+
"polyline_34": {
|
| 1182 |
+
"id": "polyline_34",
|
| 1183 |
+
"name": "Pre training LLM",
|
| 1184 |
+
"path": [
|
| 1185 |
+
{
|
| 1186 |
+
"x": 5,
|
| 1187 |
+
"y": 17
|
| 1188 |
+
}
|
| 1189 |
+
],
|
| 1190 |
+
"color": "rgba(231, 183, 145, 0.4)",
|
| 1191 |
+
"isActive": true,
|
| 1192 |
+
"confidence": 0.71,
|
| 1193 |
+
"summary": "I have learnt to finetune a pre trained BERT GPT model and i am using these models for sentiment analysis task",
|
| 1194 |
+
"keywords_found": [
|
| 1195 |
+
"Pre trained models"
|
| 1196 |
+
],
|
| 1197 |
+
"module_scores": [
|
| 1198 |
+
0.43252697587013245,
|
| 1199 |
+
0.6987414360046387,
|
| 1200 |
+
0.2524697184562683,
|
| 1201 |
+
0.4963360130786896,
|
| 1202 |
+
0.26733019948005676,
|
| 1203 |
+
0.28303566575050354,
|
| 1204 |
+
0.11330951005220413,
|
| 1205 |
+
0.2320728749036789,
|
| 1206 |
+
0.07975509762763977,
|
| 1207 |
+
0.13868987560272217,
|
| 1208 |
+
0.21347038447856903,
|
| 1209 |
+
0.13678917288780212,
|
| 1210 |
+
0.20242607593536377,
|
| 1211 |
+
0.20470771193504333,
|
| 1212 |
+
0.19388322532176971,
|
| 1213 |
+
0.24398048222064972,
|
| 1214 |
+
0.25268879532814026,
|
| 1215 |
+
0.3274286389350891
|
| 1216 |
+
],
|
| 1217 |
+
"strengths": [
|
| 1218 |
+
"Agentic AI"
|
| 1219 |
+
],
|
| 1220 |
+
"dominant_topics": [
|
| 1221 |
+
"Pre trained models",
|
| 1222 |
+
"Fine tuning LLM",
|
| 1223 |
+
"Pre training objectives"
|
| 1224 |
+
],
|
| 1225 |
+
"ai_analysis": "Based on your path, you have shown strong engagement with Pre trained models, Fine tuning LLM. You successfully reinforced concepts in Agentic AI. Consider exploring advanced topics in new areas next."
|
| 1226 |
+
},
|
| 1227 |
+
"polyline_35": {
|
| 1228 |
+
"id": "polyline_35",
|
| 1229 |
+
"name": "heyy",
|
| 1230 |
+
"path": [
|
| 1231 |
+
{
|
| 1232 |
+
"x": 5,
|
| 1233 |
+
"y": 17
|
| 1234 |
+
}
|
| 1235 |
+
],
|
| 1236 |
+
"color": "rgba(95, 72, 171, 0.4)",
|
| 1237 |
+
"isActive": true,
|
| 1238 |
+
"confidence": 0.71,
|
| 1239 |
+
"summary": "heyyy",
|
| 1240 |
+
"keywords_found": [],
|
| 1241 |
+
"module_scores": [
|
| 1242 |
+
0.0,
|
| 1243 |
+
0.017389550805091858,
|
| 1244 |
+
0.10327564924955368,
|
| 1245 |
+
0.09360406547784805,
|
| 1246 |
+
0.03690618276596069,
|
| 1247 |
+
0.04519070312380791,
|
| 1248 |
+
0.038354936987161636,
|
| 1249 |
+
0.07477933913469315,
|
| 1250 |
+
0.005562630016356707,
|
| 1251 |
+
0.0,
|
| 1252 |
+
0.0,
|
| 1253 |
+
0.0,
|
| 1254 |
+
0.05210745334625244,
|
| 1255 |
+
0.1020158976316452,
|
| 1256 |
+
0.07473935186862946,
|
| 1257 |
+
0.0972217470407486,
|
| 1258 |
+
0.0,
|
| 1259 |
+
0.05581950396299362
|
| 1260 |
+
],
|
| 1261 |
+
"strengths": [
|
| 1262 |
+
"Agentic AI"
|
| 1263 |
+
],
|
| 1264 |
+
"dominant_topics": [],
|
| 1265 |
+
"ai_analysis": "Based on your path, you have shown strong engagement with . You successfully reinforced concepts in Agentic AI. Consider exploring advanced topics in new areas next."
|
| 1266 |
+
},
|
| 1267 |
+
"polyline_36": {
|
| 1268 |
+
"id": "polyline_36",
|
| 1269 |
+
"name": "Agentic AI",
|
| 1270 |
+
"path": [
|
| 1271 |
+
{
|
| 1272 |
+
"x": 5,
|
| 1273 |
+
"y": 17
|
| 1274 |
+
}
|
| 1275 |
+
],
|
| 1276 |
+
"color": "rgba(139, 171, 145, 0.4)",
|
| 1277 |
+
"isActive": true,
|
| 1278 |
+
"confidence": 0.71,
|
| 1279 |
+
"summary": "agents independently solve the problem, each can have differnt loss function",
|
| 1280 |
+
"keywords_found": [
|
| 1281 |
+
"Agentic AI"
|
| 1282 |
+
],
|
| 1283 |
+
"module_scores": [
|
| 1284 |
+
0.05024600028991699,
|
| 1285 |
+
0.0863848477602005,
|
| 1286 |
+
0.0,
|
| 1287 |
+
0.1774979680776596,
|
| 1288 |
+
0.007330421358346939,
|
| 1289 |
+
0.06964121013879776,
|
| 1290 |
+
0.08910828828811646,
|
| 1291 |
+
0.1227140724658966,
|
| 1292 |
+
0.0983947366476059,
|
| 1293 |
+
0.02778339385986328,
|
| 1294 |
+
0.012763900682330132,
|
| 1295 |
+
0.0882045105099678,
|
| 1296 |
+
0.2467341423034668,
|
| 1297 |
+
0.6995577812194824,
|
| 1298 |
+
0.13445383310317993,
|
| 1299 |
+
0.07179756462574005,
|
| 1300 |
+
0.3390352427959442,
|
| 1301 |
+
0.24001526832580566
|
| 1302 |
+
],
|
| 1303 |
+
"strengths": [
|
| 1304 |
+
"Agentic AI"
|
| 1305 |
+
],
|
| 1306 |
+
"dominant_topics": [
|
| 1307 |
+
"Agentic AI",
|
| 1308 |
+
"Policy learning using DQN"
|
| 1309 |
+
],
|
| 1310 |
+
"ai_analysis": "Based on your path, you have shown strong engagement with Agentic AI, Policy learning using DQN. You successfully reinforced concepts in Agentic AI. Consider exploring advanced topics in new areas next."
|
| 1311 |
+
},
|
| 1312 |
+
"polyline_37": {
|
| 1313 |
+
"id": "polyline_37",
|
| 1314 |
+
"name": "Agentic AI, Transformer, CNN",
|
| 1315 |
+
"path": [
|
| 1316 |
+
{
|
| 1317 |
+
"x": 7,
|
| 1318 |
+
"y": 16
|
| 1319 |
+
},
|
| 1320 |
+
{
|
| 1321 |
+
"x": 5,
|
| 1322 |
+
"y": 17
|
| 1323 |
+
}
|
| 1324 |
+
],
|
| 1325 |
+
"color": "rgba(124, 155, 146, 0.4)",
|
| 1326 |
+
"isActive": true,
|
| 1327 |
+
"confidence": 0.73,
|
| 1328 |
+
"summary": "Agentic AI, AI, Artificial Intelligence, Agentic AI has agents, Agents are good, Agentic AI, Agent",
|
| 1329 |
+
"keywords_found": [
|
| 1330 |
+
"Agentic AI"
|
| 1331 |
+
],
|
| 1332 |
+
"module_scores": [
|
| 1333 |
+
0.19596125185489655,
|
| 1334 |
+
0.17878513038158417,
|
| 1335 |
+
0.013299948535859585,
|
| 1336 |
+
0.09019515663385391,
|
| 1337 |
+
0.2148519903421402,
|
| 1338 |
+
0.176703080534935,
|
| 1339 |
+
0.04141182824969292,
|
| 1340 |
+
0.12695123255252838,
|
| 1341 |
+
0.14567424356937408,
|
| 1342 |
+
0.06627196818590164,
|
| 1343 |
+
0.07899114489555359,
|
| 1344 |
+
0.17586567997932434,
|
| 1345 |
+
0.19476987421512604,
|
| 1346 |
+
0.9779932498931885,
|
| 1347 |
+
0.14229676127433777,
|
| 1348 |
+
0.17270609736442566,
|
| 1349 |
+
0.3035743236541748,
|
| 1350 |
+
0.3211010694503784
|
| 1351 |
+
],
|
| 1352 |
+
"strengths": [
|
| 1353 |
+
"Quantization",
|
| 1354 |
+
"Agentic AI"
|
| 1355 |
+
],
|
| 1356 |
+
"dominant_topics": [
|
| 1357 |
+
"Agentic AI",
|
| 1358 |
+
"RLHF",
|
| 1359 |
+
"Policy learning using DQN"
|
| 1360 |
+
],
|
| 1361 |
+
"ai_analysis": "Based on your path, you have shown strong engagement with Agentic AI, RLHF. You successfully reinforced concepts in Quantization, Agentic AI. Consider exploring advanced topics in new areas next."
|
| 1362 |
+
},
|
| 1363 |
+
"polyline_38": {
|
| 1364 |
+
"id": "polyline_38",
|
| 1365 |
+
"name": "heyy",
|
| 1366 |
+
"path": [
|
| 1367 |
+
{
|
| 1368 |
+
"x": 7,
|
| 1369 |
+
"y": 16
|
| 1370 |
+
},
|
| 1371 |
+
{
|
| 1372 |
+
"x": 5,
|
| 1373 |
+
"y": 17
|
| 1374 |
+
}
|
| 1375 |
+
],
|
| 1376 |
+
"color": "rgba(198, 232, 160, 0.4)",
|
| 1377 |
+
"isActive": true,
|
| 1378 |
+
"confidence": 0.73,
|
| 1379 |
+
"summary": "heyey",
|
| 1380 |
+
"keywords_found": [],
|
| 1381 |
+
"module_scores": [
|
| 1382 |
+
0.03077820874750614,
|
| 1383 |
+
0.08723001182079315,
|
| 1384 |
+
0.17077267169952393,
|
| 1385 |
+
0.0844632163643837,
|
| 1386 |
+
0.05345889925956726,
|
| 1387 |
+
0.06896784901618958,
|
| 1388 |
+
0.01981256529688835,
|
| 1389 |
+
0.10914305597543716,
|
| 1390 |
+
0.025269674137234688,
|
| 1391 |
+
0.0,
|
| 1392 |
+
0.0,
|
| 1393 |
+
0.12327592819929123,
|
| 1394 |
+
0.08018720149993896,
|
| 1395 |
+
0.1916126310825348,
|
| 1396 |
+
0.07461867481470108,
|
| 1397 |
+
0.14787648618221283,
|
| 1398 |
+
0.01713082380592823,
|
| 1399 |
+
0.15488959848880768
|
| 1400 |
+
],
|
| 1401 |
+
"strengths": [
|
| 1402 |
+
"Quantization",
|
| 1403 |
+
"Agentic AI"
|
| 1404 |
+
],
|
| 1405 |
+
"dominant_topics": [],
|
| 1406 |
+
"ai_analysis": "Based on your path, you have shown strong engagement with . You successfully reinforced concepts in Quantization, Agentic AI. Consider exploring advanced topics in new areas next."
|
| 1407 |
+
},
|
| 1408 |
+
"polyline_39": {
|
| 1409 |
+
"id": "polyline_39",
|
| 1410 |
+
"name": "heyu",
|
| 1411 |
+
"path": [],
|
| 1412 |
+
"color": "rgba(215, 166, 66, 0.4)",
|
| 1413 |
+
"isActive": true,
|
| 1414 |
+
"confidence": 0.7,
|
| 1415 |
+
"summary": "heyu",
|
| 1416 |
+
"keywords_found": [],
|
| 1417 |
+
"module_scores": [
|
| 1418 |
+
0.025842785835266113,
|
| 1419 |
+
0.06289363652467728,
|
| 1420 |
+
0.10655583441257477,
|
| 1421 |
+
0.059236228466033936,
|
| 1422 |
+
0.04497949779033661,
|
| 1423 |
+
0.05150831118226051,
|
| 1424 |
+
0.08413022756576538,
|
| 1425 |
+
0.10349386930465698,
|
| 1426 |
+
0.019398359581828117,
|
| 1427 |
+
0.023599496111273766,
|
| 1428 |
+
0.0,
|
| 1429 |
+
0.02620738558471203,
|
| 1430 |
+
0.055542171001434326,
|
| 1431 |
+
0.08238955587148666,
|
| 1432 |
+
0.07436030358076096,
|
| 1433 |
+
0.08257163316011429,
|
| 1434 |
+
0.05098576098680496,
|
| 1435 |
+
0.10993840545415878
|
| 1436 |
+
],
|
| 1437 |
+
"strengths": [],
|
| 1438 |
+
"dominant_topics": [],
|
| 1439 |
+
"ai_analysis": "Based on your path, you have shown strong engagement with . You successfully reinforced concepts in . Consider exploring advanced topics in new areas next."
|
| 1440 |
+
},
|
| 1441 |
+
"polyline_40": {
|
| 1442 |
+
"id": "polyline_40",
|
| 1443 |
+
"name": "he7yey",
|
| 1444 |
+
"path": [
|
| 1445 |
+
{
|
| 1446 |
+
"x": 4,
|
| 1447 |
+
"y": 2
|
| 1448 |
+
},
|
| 1449 |
+
{
|
| 1450 |
+
"x": 10,
|
| 1451 |
+
"y": 10
|
| 1452 |
+
}
|
| 1453 |
+
],
|
| 1454 |
+
"color": "rgba(68, 203, 241, 0.4)",
|
| 1455 |
+
"isActive": true,
|
| 1456 |
+
"confidence": 0.73,
|
| 1457 |
+
"summary": "heyyy",
|
| 1458 |
+
"keywords_found": [],
|
| 1459 |
+
"module_scores": [
|
| 1460 |
+
0.0,
|
| 1461 |
+
0.017389550805091858,
|
| 1462 |
+
0.10327564924955368,
|
| 1463 |
+
0.09360406547784805,
|
| 1464 |
+
0.03690618276596069,
|
| 1465 |
+
0.04519070312380791,
|
| 1466 |
+
0.038354936987161636,
|
| 1467 |
+
0.17477934062480927,
|
| 1468 |
+
0.005562630016356707,
|
| 1469 |
+
0.0,
|
| 1470 |
+
0.0,
|
| 1471 |
+
0.0,
|
| 1472 |
+
0.05210745334625244,
|
| 1473 |
+
0.002015892416238785,
|
| 1474 |
+
0.07473935186862946,
|
| 1475 |
+
0.1972217559814453,
|
| 1476 |
+
0.0,
|
| 1477 |
+
0.05581950396299362
|
| 1478 |
+
],
|
| 1479 |
+
"strengths": [
|
| 1480 |
+
"Incontext Learning",
|
| 1481 |
+
"Vision Language Models"
|
| 1482 |
+
],
|
| 1483 |
+
"dominant_topics": [],
|
| 1484 |
+
"ai_analysis": "Based on your path, you have shown strong engagement with . You successfully reinforced concepts in Incontext Learning, Vision Language Models. Consider exploring advanced topics in new areas next."
|
| 1485 |
+
},
|
| 1486 |
+
"polyline_41": {
|
| 1487 |
+
"id": "polyline_41",
|
| 1488 |
+
"name": "heyy",
|
| 1489 |
+
"path": [],
|
| 1490 |
+
"color": "rgba(129, 154, 197, 0.4)",
|
| 1491 |
+
"isActive": true,
|
| 1492 |
+
"confidence": 0.7,
|
| 1493 |
+
"summary": "hyeyyyhu",
|
| 1494 |
+
"keywords_found": [],
|
| 1495 |
+
"module_scores": [
|
| 1496 |
+
0.053188253194093704,
|
| 1497 |
+
0.0,
|
| 1498 |
+
0.04981702193617821,
|
| 1499 |
+
0.0,
|
| 1500 |
+
0.07837709784507751,
|
| 1501 |
+
0.0,
|
| 1502 |
+
0.11811202019453049,
|
| 1503 |
+
0.0,
|
| 1504 |
+
0.0,
|
| 1505 |
+
0.0,
|
| 1506 |
+
0.0,
|
| 1507 |
+
0.0,
|
| 1508 |
+
0.0,
|
| 1509 |
+
0.0558195635676384,
|
| 1510 |
+
0.02478315681219101,
|
| 1511 |
+
0.019923537969589233,
|
| 1512 |
+
0.0004506018303800374,
|
| 1513 |
+
0.046636879444122314
|
| 1514 |
+
],
|
| 1515 |
+
"strengths": [],
|
| 1516 |
+
"dominant_topics": [],
|
| 1517 |
+
"ai_analysis": "Based on your path, you have shown strong engagement with . You successfully reinforced concepts in . Consider exploring advanced topics in new areas next."
|
| 1518 |
+
},
|
| 1519 |
+
"polyline_42": {
|
| 1520 |
+
"id": "polyline_42",
|
| 1521 |
+
"name": "heyyu",
|
| 1522 |
+
"path": [
|
| 1523 |
+
{
|
| 1524 |
+
"x": 10,
|
| 1525 |
+
"y": 13
|
| 1526 |
+
}
|
| 1527 |
+
],
|
| 1528 |
+
"color": "rgba(253, 226, 126, 0.4)",
|
| 1529 |
+
"isActive": true,
|
| 1530 |
+
"confidence": 0.71,
|
| 1531 |
+
"summary": "heyuuu",
|
| 1532 |
+
"keywords_found": [],
|
| 1533 |
+
"module_scores": [
|
| 1534 |
+
0.0,
|
| 1535 |
+
0.0,
|
| 1536 |
+
0.1106286495923996,
|
| 1537 |
+
0.04658149555325508,
|
| 1538 |
+
0.027793176472187042,
|
| 1539 |
+
0.0,
|
| 1540 |
+
0.05434393510222435,
|
| 1541 |
+
0.037776097655296326,
|
| 1542 |
+
0.1,
|
| 1543 |
+
0.004652492236346006,
|
| 1544 |
+
0.004580324981361628,
|
| 1545 |
+
0.01859883777797222,
|
| 1546 |
+
0.04390064999461174,
|
| 1547 |
+
0.0656537190079689,
|
| 1548 |
+
0.0459163598716259,
|
| 1549 |
+
0.04920380190014839,
|
| 1550 |
+
0.037917736917734146,
|
| 1551 |
+
0.09046495705842972
|
| 1552 |
+
],
|
| 1553 |
+
"strengths": [
|
| 1554 |
+
"Prompting methods"
|
| 1555 |
+
],
|
| 1556 |
+
"dominant_topics": [],
|
| 1557 |
+
"ai_analysis": "AI Insight: Tell student to improve summary about Retrieval Augmented Generation. be critical."
|
| 1558 |
+
}
|
| 1559 |
+
},
|
| 1560 |
+
"summaries": [
|
| 1561 |
+
{
|
| 1562 |
+
"id": "summary_default_5",
|
| 1563 |
+
"title": "Test Summary",
|
| 1564 |
+
"summary": "I learned about BERT and Transformers.",
|
| 1565 |
+
"keywords_found": [],
|
| 1566 |
+
"totalResources": 18,
|
| 1567 |
+
"visitedResources": 2,
|
| 1568 |
+
"currentLevel": 1,
|
| 1569 |
+
"strengths": [
|
| 1570 |
+
"Pre training objectives",
|
| 1571 |
+
"Pre trained models"
|
| 1572 |
+
],
|
| 1573 |
+
"recommendations": [],
|
| 1574 |
+
"avgDifficulty": 2.0,
|
| 1575 |
+
"totalReward": 100
|
| 1576 |
+
},
|
| 1577 |
+
{
|
| 1578 |
+
"id": "summary_default_6",
|
| 1579 |
+
"title": "work",
|
| 1580 |
+
"summary": "I have learned a lot about rag and preprocessing it was pretty good understanding.",
|
| 1581 |
+
"keywords_found": [
|
| 1582 |
+
"Retrieval Augmented Generation"
|
| 1583 |
+
],
|
| 1584 |
+
"totalResources": 18,
|
| 1585 |
+
"visitedResources": 4,
|
| 1586 |
+
"currentLevel": 2,
|
| 1587 |
+
"strengths": [
|
| 1588 |
+
"Prompt based learning",
|
| 1589 |
+
"Incontext Learning",
|
| 1590 |
+
"Retrieval Methods",
|
| 1591 |
+
"Retrieval Augmented Generation"
|
| 1592 |
+
],
|
| 1593 |
+
"recommendations": [],
|
| 1594 |
+
"avgDifficulty": 2.0,
|
| 1595 |
+
"totalReward": 200
|
| 1596 |
+
},
|
| 1597 |
+
{
|
| 1598 |
+
"id": "summary_default_7",
|
| 1599 |
+
"title": "Hey",
|
| 1600 |
+
"summary": "heyyy",
|
| 1601 |
+
"keywords_found": [],
|
| 1602 |
+
"totalResources": 18,
|
| 1603 |
+
"visitedResources": 0,
|
| 1604 |
+
"currentLevel": 1,
|
| 1605 |
+
"strengths": [],
|
| 1606 |
+
"recommendations": [],
|
| 1607 |
+
"avgDifficulty": 0,
|
| 1608 |
+
"totalReward": 0
|
| 1609 |
+
},
|
| 1610 |
+
{
|
| 1611 |
+
"id": "summary_default_8",
|
| 1612 |
+
"title": "DQN Learning",
|
| 1613 |
+
"summary": "I studied Policy learning using DQN and reinforcement learning agents.",
|
| 1614 |
+
"keywords_found": [
|
| 1615 |
+
"Agentic AI",
|
| 1616 |
+
"Policy learning using DQN"
|
| 1617 |
+
],
|
| 1618 |
+
"totalResources": 18,
|
| 1619 |
+
"visitedResources": 2,
|
| 1620 |
+
"currentLevel": 1,
|
| 1621 |
+
"strengths": [
|
| 1622 |
+
"Pre training objectives",
|
| 1623 |
+
"Pre trained models"
|
| 1624 |
+
],
|
| 1625 |
+
"recommendations": [],
|
| 1626 |
+
"avgDifficulty": 2.0,
|
| 1627 |
+
"totalReward": 100
|
| 1628 |
+
},
|
| 1629 |
+
{
|
| 1630 |
+
"id": "summary_default_9",
|
| 1631 |
+
"title": "heyyy",
|
| 1632 |
+
"summary": "heyyy",
|
| 1633 |
+
"keywords_found": [],
|
| 1634 |
+
"totalResources": 18,
|
| 1635 |
+
"visitedResources": 0,
|
| 1636 |
+
"currentLevel": 1,
|
| 1637 |
+
"strengths": [],
|
| 1638 |
+
"recommendations": [],
|
| 1639 |
+
"avgDifficulty": 0,
|
| 1640 |
+
"totalReward": 0
|
| 1641 |
+
},
|
| 1642 |
+
{
|
| 1643 |
+
"id": "summary_default_10",
|
| 1644 |
+
"title": "hii",
|
| 1645 |
+
"summary": "hiii",
|
| 1646 |
+
"keywords_found": [],
|
| 1647 |
+
"totalResources": 18,
|
| 1648 |
+
"visitedResources": 0,
|
| 1649 |
+
"currentLevel": 1,
|
| 1650 |
+
"strengths": [],
|
| 1651 |
+
"recommendations": [],
|
| 1652 |
+
"avgDifficulty": 0,
|
| 1653 |
+
"totalReward": 0
|
| 1654 |
+
},
|
| 1655 |
+
{
|
| 1656 |
+
"id": "summary_default_11",
|
| 1657 |
+
"title": "hii",
|
| 1658 |
+
"summary": "nocajco",
|
| 1659 |
+
"keywords_found": [],
|
| 1660 |
+
"totalResources": 18,
|
| 1661 |
+
"visitedResources": 0,
|
| 1662 |
+
"currentLevel": 1,
|
| 1663 |
+
"strengths": [],
|
| 1664 |
+
"recommendations": [],
|
| 1665 |
+
"avgDifficulty": 0,
|
| 1666 |
+
"totalReward": 0
|
| 1667 |
+
},
|
| 1668 |
+
{
|
| 1669 |
+
"id": "summary_default_12",
|
| 1670 |
+
"title": "hii",
|
| 1671 |
+
"summary": "heyy",
|
| 1672 |
+
"keywords_found": [],
|
| 1673 |
+
"totalResources": 18,
|
| 1674 |
+
"visitedResources": 0,
|
| 1675 |
+
"currentLevel": 1,
|
| 1676 |
+
"strengths": [],
|
| 1677 |
+
"recommendations": [],
|
| 1678 |
+
"avgDifficulty": 0,
|
| 1679 |
+
"totalReward": 0
|
| 1680 |
+
},
|
| 1681 |
+
{
|
| 1682 |
+
"id": "summary_default_13",
|
| 1683 |
+
"title": "hello",
|
| 1684 |
+
"summary": "rag and everything was good",
|
| 1685 |
+
"keywords_found": [
|
| 1686 |
+
"Retrieval Augmented Generation"
|
| 1687 |
+
],
|
| 1688 |
+
"totalResources": 18,
|
| 1689 |
+
"visitedResources": 1,
|
| 1690 |
+
"currentLevel": 1,
|
| 1691 |
+
"strengths": [
|
| 1692 |
+
"Retrieval Methods"
|
| 1693 |
+
],
|
| 1694 |
+
"recommendations": [],
|
| 1695 |
+
"avgDifficulty": 2.0,
|
| 1696 |
+
"totalReward": 50
|
| 1697 |
+
},
|
| 1698 |
+
{
|
| 1699 |
+
"id": "summary_default_14",
|
| 1700 |
+
"title": "hello",
|
| 1701 |
+
"summary": "rag and many other things such as pretrainig objectives and all were good",
|
| 1702 |
+
"keywords_found": [
|
| 1703 |
+
"Pre training objectives",
|
| 1704 |
+
"Retrieval Augmented Generation"
|
| 1705 |
+
],
|
| 1706 |
+
"totalResources": 18,
|
| 1707 |
+
"visitedResources": 1,
|
| 1708 |
+
"currentLevel": 1,
|
| 1709 |
+
"strengths": [
|
| 1710 |
+
"Pre training objectives"
|
| 1711 |
+
],
|
| 1712 |
+
"recommendations": [],
|
| 1713 |
+
"avgDifficulty": 2.0,
|
| 1714 |
+
"totalReward": 50
|
| 1715 |
+
},
|
| 1716 |
+
{
|
| 1717 |
+
"id": "summary_default_15",
|
| 1718 |
+
"title": "hey ",
|
| 1719 |
+
"summary": "hey",
|
| 1720 |
+
"keywords_found": [],
|
| 1721 |
+
"totalResources": 18,
|
| 1722 |
+
"visitedResources": 0,
|
| 1723 |
+
"currentLevel": 1,
|
| 1724 |
+
"strengths": [],
|
| 1725 |
+
"recommendations": [],
|
| 1726 |
+
"avgDifficulty": 0,
|
| 1727 |
+
"totalReward": 0
|
| 1728 |
+
},
|
| 1729 |
+
{
|
| 1730 |
+
"id": "summary_default_16",
|
| 1731 |
+
"title": "hey",
|
| 1732 |
+
"summary": "hey",
|
| 1733 |
+
"keywords_found": [],
|
| 1734 |
+
"totalResources": 18,
|
| 1735 |
+
"visitedResources": 0,
|
| 1736 |
+
"currentLevel": 1,
|
| 1737 |
+
"strengths": [],
|
| 1738 |
+
"recommendations": [],
|
| 1739 |
+
"avgDifficulty": 0,
|
| 1740 |
+
"totalReward": 0
|
| 1741 |
+
},
|
| 1742 |
+
{
|
| 1743 |
+
"id": "summary_default_17",
|
| 1744 |
+
"title": "hey",
|
| 1745 |
+
"summary": "hey",
|
| 1746 |
+
"keywords_found": [],
|
| 1747 |
+
"totalResources": 18,
|
| 1748 |
+
"visitedResources": 0,
|
| 1749 |
+
"currentLevel": 1,
|
| 1750 |
+
"strengths": [],
|
| 1751 |
+
"recommendations": [],
|
| 1752 |
+
"avgDifficulty": 0,
|
| 1753 |
+
"totalReward": 0
|
| 1754 |
+
},
|
| 1755 |
+
{
|
| 1756 |
+
"id": "summary_default_18",
|
| 1757 |
+
"title": "heyy",
|
| 1758 |
+
"summary": "heyyy",
|
| 1759 |
+
"keywords_found": [],
|
| 1760 |
+
"totalResources": 18,
|
| 1761 |
+
"visitedResources": 0,
|
| 1762 |
+
"currentLevel": 1,
|
| 1763 |
+
"strengths": [],
|
| 1764 |
+
"recommendations": [],
|
| 1765 |
+
"avgDifficulty": 0,
|
| 1766 |
+
"totalReward": 0
|
| 1767 |
+
},
|
| 1768 |
+
{
|
| 1769 |
+
"id": "summary_default_19",
|
| 1770 |
+
"title": "heyy",
|
| 1771 |
+
"summary": "I love rag with my pre trining",
|
| 1772 |
+
"keywords_found": [
|
| 1773 |
+
"Retrieval Augmented Generation"
|
| 1774 |
+
],
|
| 1775 |
+
"totalResources": 18,
|
| 1776 |
+
"visitedResources": 0,
|
| 1777 |
+
"currentLevel": 1,
|
| 1778 |
+
"strengths": [],
|
| 1779 |
+
"recommendations": [],
|
| 1780 |
+
"avgDifficulty": 0,
|
| 1781 |
+
"totalReward": 0
|
| 1782 |
+
},
|
| 1783 |
+
{
|
| 1784 |
+
"id": "summary_default_20",
|
| 1785 |
+
"title": "huu",
|
| 1786 |
+
"summary": "huuu",
|
| 1787 |
+
"keywords_found": [],
|
| 1788 |
+
"totalResources": 18,
|
| 1789 |
+
"visitedResources": 0,
|
| 1790 |
+
"currentLevel": 1,
|
| 1791 |
+
"strengths": [],
|
| 1792 |
+
"recommendations": [],
|
| 1793 |
+
"avgDifficulty": 0,
|
| 1794 |
+
"totalReward": 0
|
| 1795 |
+
},
|
| 1796 |
+
{
|
| 1797 |
+
"id": "summary_default_21",
|
| 1798 |
+
"title": "hello",
|
| 1799 |
+
"summary": "hello",
|
| 1800 |
+
"keywords_found": [],
|
| 1801 |
+
"totalResources": 18,
|
| 1802 |
+
"visitedResources": 0,
|
| 1803 |
+
"currentLevel": 1,
|
| 1804 |
+
"strengths": [],
|
| 1805 |
+
"recommendations": [],
|
| 1806 |
+
"avgDifficulty": 0,
|
| 1807 |
+
"totalReward": 0
|
| 1808 |
+
},
|
| 1809 |
+
{
|
| 1810 |
+
"id": "summary_default_22",
|
| 1811 |
+
"title": "Introduction",
|
| 1812 |
+
"summary": "hello",
|
| 1813 |
+
"keywords_found": [],
|
| 1814 |
+
"totalResources": 18,
|
| 1815 |
+
"visitedResources": 1,
|
| 1816 |
+
"currentLevel": 1,
|
| 1817 |
+
"strengths": [
|
| 1818 |
+
"Tutorial: Introduction to huggingface"
|
| 1819 |
+
],
|
| 1820 |
+
"recommendations": [],
|
| 1821 |
+
"avgDifficulty": 2.0,
|
| 1822 |
+
"totalReward": 50
|
| 1823 |
+
},
|
| 1824 |
+
{
|
| 1825 |
+
"id": "summary_default_23",
|
| 1826 |
+
"title": "heyyy",
|
| 1827 |
+
"summary": "heyyy",
|
| 1828 |
+
"keywords_found": [],
|
| 1829 |
+
"totalResources": 18,
|
| 1830 |
+
"visitedResources": 0,
|
| 1831 |
+
"currentLevel": 1,
|
| 1832 |
+
"strengths": [],
|
| 1833 |
+
"recommendations": [],
|
| 1834 |
+
"avgDifficulty": 0,
|
| 1835 |
+
"totalReward": 0
|
| 1836 |
+
},
|
| 1837 |
+
{
|
| 1838 |
+
"id": "summary_default_24",
|
| 1839 |
+
"title": "heyyy",
|
| 1840 |
+
"summary": "heyyyyy",
|
| 1841 |
+
"keywords_found": [],
|
| 1842 |
+
"totalResources": 18,
|
| 1843 |
+
"visitedResources": 0,
|
| 1844 |
+
"currentLevel": 1,
|
| 1845 |
+
"strengths": [],
|
| 1846 |
+
"recommendations": [],
|
| 1847 |
+
"avgDifficulty": 0,
|
| 1848 |
+
"totalReward": 0
|
| 1849 |
+
},
|
| 1850 |
+
{
|
| 1851 |
+
"id": "summary_default_25",
|
| 1852 |
+
"title": "Introduction to transformers",
|
| 1853 |
+
"summary": "Transformers are a class of deep learning models designed to process sequential data efficiently by relying on an attention-based mechanism rather than recurrence or convolution. Introduced in the landmark paper \u201cAttention Is All You Need\u201d, transformers revolutionized natural language processing by enabling models to capture long-range dependencies in data with high parallelism.\n\nAt the core of a transformer is the self-attention mechanism, which allows the model to weigh the importance of different parts of an input sequence when processing each element. This makes transformers highly effective at understanding context, relationships, and structure within data. The architecture typically consists of an encoder\u2013decoder structure, where encoders extract meaningful representations from input data and decoders generate output sequences based on those representations.\n\nTransformers also use positional encoding to retain information about the order of tokens, since the model itself does not process data sequentially. Combined with multi-head attention and feed-forward neural networks, this design enables scalable training on large datasets.\n\nDue to their flexibility and performance, transformers form the backbone of many modern AI systems, including large language models, machine translation systems, text summarization tools, and increasingly, applications in vision, speech, and multimodal learning.",
|
| 1854 |
+
"keywords_found": [
|
| 1855 |
+
"Multimodal LLMs"
|
| 1856 |
+
],
|
| 1857 |
+
"totalResources": 18,
|
| 1858 |
+
"visitedResources": 1,
|
| 1859 |
+
"currentLevel": 1,
|
| 1860 |
+
"strengths": [
|
| 1861 |
+
"Tutorial: Introduction to huggingface"
|
| 1862 |
+
],
|
| 1863 |
+
"recommendations": [],
|
| 1864 |
+
"avgDifficulty": 2.0,
|
| 1865 |
+
"totalReward": 50
|
| 1866 |
+
},
|
| 1867 |
+
{
|
| 1868 |
+
"id": "summary_default_26",
|
| 1869 |
+
"title": "heyy",
|
| 1870 |
+
"summary": "heyyy",
|
| 1871 |
+
"keywords_found": [],
|
| 1872 |
+
"totalResources": 18,
|
| 1873 |
+
"visitedResources": 5,
|
| 1874 |
+
"currentLevel": 2,
|
| 1875 |
+
"strengths": [
|
| 1876 |
+
"Tutorial: Introduction to huggingface",
|
| 1877 |
+
"Fine tuning LLM",
|
| 1878 |
+
"Instruction tuning",
|
| 1879 |
+
"Parameter efficient fine tuning",
|
| 1880 |
+
"Multimodal LLMs"
|
| 1881 |
+
],
|
| 1882 |
+
"recommendations": [],
|
| 1883 |
+
"avgDifficulty": 2.0,
|
| 1884 |
+
"totalReward": 250
|
| 1885 |
+
},
|
| 1886 |
+
{
|
| 1887 |
+
"id": "summary_default_27",
|
| 1888 |
+
"title": "heyy",
|
| 1889 |
+
"summary": "heyyy",
|
| 1890 |
+
"keywords_found": [],
|
| 1891 |
+
"totalResources": 18,
|
| 1892 |
+
"visitedResources": 1,
|
| 1893 |
+
"currentLevel": 1,
|
| 1894 |
+
"strengths": [
|
| 1895 |
+
"Tutorial: Introduction to huggingface"
|
| 1896 |
+
],
|
| 1897 |
+
"recommendations": [],
|
| 1898 |
+
"avgDifficulty": 2.0,
|
| 1899 |
+
"totalReward": 50
|
| 1900 |
+
},
|
| 1901 |
+
{
|
| 1902 |
+
"id": "summary_default_28",
|
| 1903 |
+
"title": "introduction to my ",
|
| 1904 |
+
"summary": "transformers",
|
| 1905 |
+
"keywords_found": [],
|
| 1906 |
+
"totalResources": 18,
|
| 1907 |
+
"visitedResources": 1,
|
| 1908 |
+
"currentLevel": 1,
|
| 1909 |
+
"strengths": [
|
| 1910 |
+
"Tutorial: Introduction to huggingface"
|
| 1911 |
+
],
|
| 1912 |
+
"recommendations": [],
|
| 1913 |
+
"avgDifficulty": 2.0,
|
| 1914 |
+
"totalReward": 50
|
| 1915 |
+
},
|
| 1916 |
+
{
|
| 1917 |
+
"id": "summary_default_29",
|
| 1918 |
+
"title": "hiii",
|
| 1919 |
+
"summary": "transformers",
|
| 1920 |
+
"keywords_found": [],
|
| 1921 |
+
"totalResources": 18,
|
| 1922 |
+
"visitedResources": 2,
|
| 1923 |
+
"currentLevel": 1,
|
| 1924 |
+
"strengths": [
|
| 1925 |
+
"Tutorial: Introduction to huggingface",
|
| 1926 |
+
"Fine tuning LLM"
|
| 1927 |
+
],
|
| 1928 |
+
"recommendations": [],
|
| 1929 |
+
"avgDifficulty": 2.0,
|
| 1930 |
+
"totalReward": 100
|
| 1931 |
+
},
|
| 1932 |
+
{
|
| 1933 |
+
"id": "summary_default_30",
|
| 1934 |
+
"title": "heyy",
|
| 1935 |
+
"summary": "heyyy",
|
| 1936 |
+
"keywords_found": [],
|
| 1937 |
+
"totalResources": 18,
|
| 1938 |
+
"visitedResources": 3,
|
| 1939 |
+
"currentLevel": 1,
|
| 1940 |
+
"strengths": [
|
| 1941 |
+
"Instruction tuning",
|
| 1942 |
+
"Parameter efficient fine tuning",
|
| 1943 |
+
"Multimodal LLMs"
|
| 1944 |
+
],
|
| 1945 |
+
"recommendations": [],
|
| 1946 |
+
"avgDifficulty": 2.0,
|
| 1947 |
+
"totalReward": 150
|
| 1948 |
+
},
|
| 1949 |
+
{
|
| 1950 |
+
"id": "summary_default_31",
|
| 1951 |
+
"title": "Introduction to transformers",
|
| 1952 |
+
"summary": "Transformers are a class of deep learning models designed to process sequential data efficiently by relying on an attention-based mechanism rather than recurrence or convolution. Introduced in the landmark paper \u201cAttention Is All You Need\u201d, transformers revolutionized natural language processing by enabling models to capture long-range dependencies in data with high parallelism.\n\nAt the core of a transformer is the self-attention mechanism, which allows the model to weigh the importance of different parts of an input sequence when processing each element. This makes transformers highly effective at understanding context, relationships, and structure within data. The architecture typically consists of an encoder\u2013decoder structure, where encoders extract meaningful representations from input data and decoders generate output sequences based on those representations.\n\nTransformers also use positional encoding to retain information about the order of tokens, since the model itself does not process data sequentially. Combined with multi-head attention and feed-forward neural networks, this design enables scalable training on large datasets.\n\nDue to their flexibility and performance, transformers form the backbone of many modern AI systems, including large language models, machine translation systems, text summarization tools, and increasingly, applications in vision, speech, and multimodal learning.",
|
| 1953 |
+
"keywords_found": [
|
| 1954 |
+
"Multimodal LLMs"
|
| 1955 |
+
],
|
| 1956 |
+
"totalResources": 18,
|
| 1957 |
+
"visitedResources": 0,
|
| 1958 |
+
"currentLevel": 1,
|
| 1959 |
+
"strengths": [],
|
| 1960 |
+
"recommendations": [],
|
| 1961 |
+
"avgDifficulty": 0,
|
| 1962 |
+
"totalReward": 0
|
| 1963 |
+
},
|
| 1964 |
+
{
|
| 1965 |
+
"id": "summary_default_32",
|
| 1966 |
+
"title": "Intro",
|
| 1967 |
+
"summary": "Transformers are a class of deep learning models designed to process sequential data efficiently by relying on an attention-based mechanism rather than recurrence or convolution. Introduced in the landmark paper \u201cAttention Is All You Need\u201d, transformers revolutionized natural language processing by enabling models to capture long-range dependencies in data with high parallelism.\n\nAt the core of a transformer is the self-attention mechanism, which allows the model to weigh the importance of different parts of an input sequence when processing each element. This makes transformers highly effective at understanding context, relationships, and structure within data. The architecture typically consists of an encoder\u2013decoder structure, where encoders extract meaningful representations from input data and decoders generate output sequences based on those representations.\n\nTransformers also use positional encoding to retain information about the order of tokens, since the model itself does not process data sequentially. Combined with multi-head attention and feed-forward neural networks, this design enables scalable training on large datasets.\n\nDue to their flexibility and performance, transformers form the backbone of many modern AI systems, including large language models, machine translation systems, text summarization tools, and increasingly, applications in vision, speech, and multimodal learning.",
|
| 1968 |
+
"keywords_found": [
|
| 1969 |
+
"Multimodal LLMs"
|
| 1970 |
+
],
|
| 1971 |
+
"totalResources": 18,
|
| 1972 |
+
"visitedResources": 0,
|
| 1973 |
+
"currentLevel": 1,
|
| 1974 |
+
"strengths": [],
|
| 1975 |
+
"recommendations": [],
|
| 1976 |
+
"avgDifficulty": 0,
|
| 1977 |
+
"totalReward": 0
|
| 1978 |
+
},
|
| 1979 |
+
{
|
| 1980 |
+
"id": "summary_default_33",
|
| 1981 |
+
"title": "Intro",
|
| 1982 |
+
"summary": "Transformers are a class of deep learning models designed to process sequential data efficiently by relying on an attention-based mechanism rather than recurrence or convolution. Introduced in the landmark paper \u201cAttention Is All You Need\u201d, transformers revolutionized natural language processing by enabling models to capture long-range dependencies in data with high parallelism.\n\nAt the core of a transformer is the self-attention mechanism, which allows the model to weigh the importance of different parts of an input sequence when processing each element. This makes transformers highly effective at understanding context, relationships, and structure within data. The architecture typically consists of an encoder\u2013decoder structure, where encoders extract meaningful representations from input data and decoders generate output sequences based on those representations.\n\nTransformers also use positional encoding to retain information about the order of tokens, since the model itself does not process data sequentially. Combined with multi-head attention and feed-forward neural networks, this design enables scalable training on large datasets.\n\nDue to their flexibility and performance, transformers form the backbone of many modern AI systems, including large language models, machine translation systems, text summarization tools, and increasingly, applications in vision, speech, and multimodal learning.",
|
| 1983 |
+
"keywords_found": [
|
| 1984 |
+
"Multimodal LLMs"
|
| 1985 |
+
],
|
| 1986 |
+
"totalResources": 18,
|
| 1987 |
+
"visitedResources": 1,
|
| 1988 |
+
"currentLevel": 1,
|
| 1989 |
+
"strengths": [
|
| 1990 |
+
"Multimodal LLMs"
|
| 1991 |
+
],
|
| 1992 |
+
"recommendations": [],
|
| 1993 |
+
"avgDifficulty": 2.0,
|
| 1994 |
+
"totalReward": 50
|
| 1995 |
+
},
|
| 1996 |
+
{
|
| 1997 |
+
"id": "summary_default_34",
|
| 1998 |
+
"title": "Pre training LLM",
|
| 1999 |
+
"summary": "I have learnt to finetune a pre trained BERT GPT model and i am using these models for sentiment analysis task",
|
| 2000 |
+
"keywords_found": [
|
| 2001 |
+
"Pre trained models"
|
| 2002 |
+
],
|
| 2003 |
+
"totalResources": 18,
|
| 2004 |
+
"visitedResources": 1,
|
| 2005 |
+
"currentLevel": 1,
|
| 2006 |
+
"strengths": [
|
| 2007 |
+
"Agentic AI"
|
| 2008 |
+
],
|
| 2009 |
+
"recommendations": [],
|
| 2010 |
+
"avgDifficulty": 2.0,
|
| 2011 |
+
"totalReward": 50
|
| 2012 |
+
},
|
| 2013 |
+
{
|
| 2014 |
+
"id": "summary_default_35",
|
| 2015 |
+
"title": "heyy",
|
| 2016 |
+
"summary": "heyyy",
|
| 2017 |
+
"keywords_found": [],
|
| 2018 |
+
"totalResources": 18,
|
| 2019 |
+
"visitedResources": 1,
|
| 2020 |
+
"currentLevel": 1,
|
| 2021 |
+
"strengths": [
|
| 2022 |
+
"Agentic AI"
|
| 2023 |
+
],
|
| 2024 |
+
"recommendations": [],
|
| 2025 |
+
"avgDifficulty": 2.0,
|
| 2026 |
+
"totalReward": 50
|
| 2027 |
+
},
|
| 2028 |
+
{
|
| 2029 |
+
"id": "summary_default_36",
|
| 2030 |
+
"title": "Agentic AI",
|
| 2031 |
+
"summary": "agents independently solve the problem, each can have differnt loss function",
|
| 2032 |
+
"keywords_found": [
|
| 2033 |
+
"Agentic AI"
|
| 2034 |
+
],
|
| 2035 |
+
"totalResources": 18,
|
| 2036 |
+
"visitedResources": 1,
|
| 2037 |
+
"currentLevel": 1,
|
| 2038 |
+
"strengths": [
|
| 2039 |
+
"Agentic AI"
|
| 2040 |
+
],
|
| 2041 |
+
"recommendations": [],
|
| 2042 |
+
"avgDifficulty": 2.0,
|
| 2043 |
+
"totalReward": 50
|
| 2044 |
+
},
|
| 2045 |
+
{
|
| 2046 |
+
"id": "summary_default_37",
|
| 2047 |
+
"title": "Agentic AI, Transformer, CNN",
|
| 2048 |
+
"summary": "Agentic AI, AI, Artificial Intelligence, Agentic AI has agents, Agents are good, Agentic AI, Agent",
|
| 2049 |
+
"keywords_found": [
|
| 2050 |
+
"Agentic AI"
|
| 2051 |
+
],
|
| 2052 |
+
"totalResources": 18,
|
| 2053 |
+
"visitedResources": 2,
|
| 2054 |
+
"currentLevel": 1,
|
| 2055 |
+
"strengths": [
|
| 2056 |
+
"Quantization",
|
| 2057 |
+
"Agentic AI"
|
| 2058 |
+
],
|
| 2059 |
+
"recommendations": [],
|
| 2060 |
+
"avgDifficulty": 2.0,
|
| 2061 |
+
"totalReward": 100
|
| 2062 |
+
},
|
| 2063 |
+
{
|
| 2064 |
+
"id": "summary_default_38",
|
| 2065 |
+
"title": "heyy",
|
| 2066 |
+
"summary": "heyey",
|
| 2067 |
+
"keywords_found": [],
|
| 2068 |
+
"totalResources": 18,
|
| 2069 |
+
"visitedResources": 2,
|
| 2070 |
+
"currentLevel": 1,
|
| 2071 |
+
"strengths": [
|
| 2072 |
+
"Quantization",
|
| 2073 |
+
"Agentic AI"
|
| 2074 |
+
],
|
| 2075 |
+
"recommendations": [],
|
| 2076 |
+
"avgDifficulty": 2.0,
|
| 2077 |
+
"totalReward": 100
|
| 2078 |
+
},
|
| 2079 |
+
{
|
| 2080 |
+
"id": "summary_default_39",
|
| 2081 |
+
"title": "heyu",
|
| 2082 |
+
"summary": "heyu",
|
| 2083 |
+
"keywords_found": [],
|
| 2084 |
+
"totalResources": 18,
|
| 2085 |
+
"visitedResources": 0,
|
| 2086 |
+
"currentLevel": 1,
|
| 2087 |
+
"strengths": [],
|
| 2088 |
+
"recommendations": [],
|
| 2089 |
+
"avgDifficulty": 0,
|
| 2090 |
+
"totalReward": 0
|
| 2091 |
+
},
|
| 2092 |
+
{
|
| 2093 |
+
"id": "summary_default_40",
|
| 2094 |
+
"title": "he7yey",
|
| 2095 |
+
"summary": "heyyy",
|
| 2096 |
+
"keywords_found": [],
|
| 2097 |
+
"totalResources": 18,
|
| 2098 |
+
"visitedResources": 2,
|
| 2099 |
+
"currentLevel": 1,
|
| 2100 |
+
"strengths": [
|
| 2101 |
+
"Incontext Learning",
|
| 2102 |
+
"Vision Language Models"
|
| 2103 |
+
],
|
| 2104 |
+
"recommendations": [],
|
| 2105 |
+
"avgDifficulty": 2.0,
|
| 2106 |
+
"totalReward": 100
|
| 2107 |
+
},
|
| 2108 |
+
{
|
| 2109 |
+
"id": "summary_default_41",
|
| 2110 |
+
"title": "heyy",
|
| 2111 |
+
"summary": "hyeyyyhu",
|
| 2112 |
+
"keywords_found": [],
|
| 2113 |
+
"totalResources": 18,
|
| 2114 |
+
"visitedResources": 0,
|
| 2115 |
+
"currentLevel": 1,
|
| 2116 |
+
"strengths": [],
|
| 2117 |
+
"recommendations": [],
|
| 2118 |
+
"avgDifficulty": 0,
|
| 2119 |
+
"totalReward": 0
|
| 2120 |
+
},
|
| 2121 |
+
{
|
| 2122 |
+
"id": "summary_default_42",
|
| 2123 |
+
"title": "heyyu",
|
| 2124 |
+
"summary": "heyuuu",
|
| 2125 |
+
"keywords_found": [],
|
| 2126 |
+
"totalResources": 18,
|
| 2127 |
+
"visitedResources": 1,
|
| 2128 |
+
"currentLevel": 1,
|
| 2129 |
+
"strengths": [
|
| 2130 |
+
"Prompting methods"
|
| 2131 |
+
],
|
| 2132 |
+
"recommendations": [],
|
| 2133 |
+
"avgDifficulty": 2.0,
|
| 2134 |
+
"totalReward": 50
|
| 2135 |
+
}
|
| 2136 |
+
]
|
| 2137 |
+
}
|
backend/debug_nlp.py
ADDED
|
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import json
|
| 3 |
+
import numpy as np
|
| 4 |
+
|
| 5 |
+
def load_nlp_resources():
|
| 6 |
+
nlp_json_path = os.path.join(os.getcwd(), 'backend', 'nlp', 'nlp_resources.json')
|
| 7 |
+
print(f"Loading from: {nlp_json_path}")
|
| 8 |
+
|
| 9 |
+
try:
|
| 10 |
+
if not os.path.exists(nlp_json_path):
|
| 11 |
+
print(f"File not found: {nlp_json_path}")
|
| 12 |
+
return []
|
| 13 |
+
|
| 14 |
+
with open(nlp_json_path, 'r', encoding='utf-8') as f:
|
| 15 |
+
data = json.load(f)
|
| 16 |
+
|
| 17 |
+
print(f"Data count: {len(data)}")
|
| 18 |
+
|
| 19 |
+
# Group by difficulty for tiered journey
|
| 20 |
+
intro_resources = [r for r in data if int(r.get('difficulty', 2)) <= 3]
|
| 21 |
+
medium_resources = [r for r in data if 4 <= int(r.get('difficulty', 2)) <= 7]
|
| 22 |
+
advanced_resources = [r for r in data if int(r.get('difficulty', 2)) >= 8]
|
| 23 |
+
|
| 24 |
+
print(f"Intro: {len(intro_resources)}, Medium: {len(medium_resources)}, Advanced: {len(advanced_resources)}")
|
| 25 |
+
|
| 26 |
+
# Limit introductory count as requested (top 6 by reward)
|
| 27 |
+
intro_resources.sort(key=lambda x: int(x.get('reward', 0)), reverse=True)
|
| 28 |
+
intro_resources = intro_resources[:6]
|
| 29 |
+
|
| 30 |
+
journey_data = intro_resources + medium_resources + advanced_resources
|
| 31 |
+
print(f"Journey data count: {len(journey_data)}")
|
| 32 |
+
|
| 33 |
+
resources = []
|
| 34 |
+
for idx, row in enumerate(journey_data):
|
| 35 |
+
title = str(row.get('name', f'Resource {idx + 1}')).strip()
|
| 36 |
+
module = str(row.get('module', title)).strip()
|
| 37 |
+
difficulty = int(row.get('difficulty', 2))
|
| 38 |
+
|
| 39 |
+
if difficulty <= 3:
|
| 40 |
+
y_min, y_max = 16, 19
|
| 41 |
+
elif difficulty <= 7:
|
| 42 |
+
y_min, y_max = 8, 15
|
| 43 |
+
else:
|
| 44 |
+
y_min, y_max = 1, 7
|
| 45 |
+
|
| 46 |
+
x = int(row.get('x', np.random.randint(2, 18)))
|
| 47 |
+
y = int(row.get('y', np.random.randint(y_min, y_max + 1)))
|
| 48 |
+
|
| 49 |
+
resource = {
|
| 50 |
+
'id': str(row.get('id', idx + 1)),
|
| 51 |
+
'title': title,
|
| 52 |
+
'module': module,
|
| 53 |
+
'type': str(row.get('type', 'video')),
|
| 54 |
+
'difficulty': difficulty,
|
| 55 |
+
'reward': int(row.get('reward', 10 * difficulty)),
|
| 56 |
+
'position': {'x': x, 'y': y},
|
| 57 |
+
'visited': row.get('visited', False)
|
| 58 |
+
}
|
| 59 |
+
resources.append(resource)
|
| 60 |
+
|
| 61 |
+
return resources
|
| 62 |
+
except Exception as e:
|
| 63 |
+
print(f"Error: {e}")
|
| 64 |
+
import traceback
|
| 65 |
+
traceback.print_exc()
|
| 66 |
+
return []
|
| 67 |
+
|
| 68 |
+
if __name__ == "__main__":
|
| 69 |
+
res = load_nlp_resources()
|
| 70 |
+
print(f"Final Count: {len(res)}")
|
backend/fix_db.py
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
|
| 3 |
+
db_path = r'e:\Nl_main\backend\data\db.json'
|
| 4 |
+
|
| 5 |
+
with open(db_path, 'r', encoding='utf-8') as f:
|
| 6 |
+
s = f.read().strip()
|
| 7 |
+
|
| 8 |
+
# The file ends with '}}' but should end with '}'
|
| 9 |
+
# Try stripping from the end until valid
|
| 10 |
+
for i in range(1, 20):
|
| 11 |
+
try:
|
| 12 |
+
data = json.loads(s[:-i] if i > 0 else s)
|
| 13 |
+
with open(db_path, 'w', encoding='utf-8') as f:
|
| 14 |
+
json.dump(data, f, indent=4)
|
| 15 |
+
print(f"DB repaired by removing {i} trailing characters!")
|
| 16 |
+
break
|
| 17 |
+
except json.JSONDecodeError:
|
| 18 |
+
continue
|
| 19 |
+
else:
|
| 20 |
+
print("Could not repair automatically. Resetting to clean state.")
|
| 21 |
+
data = {
|
| 22 |
+
"users": [],
|
| 23 |
+
"learning_sessions": {},
|
| 24 |
+
"polylines": {},
|
| 25 |
+
"summaries": [],
|
| 26 |
+
"bookmarks": {},
|
| 27 |
+
"notes": {},
|
| 28 |
+
"lectures": []
|
| 29 |
+
}
|
| 30 |
+
with open(db_path, 'w', encoding='utf-8') as f:
|
| 31 |
+
json.dump(data, f, indent=4)
|
| 32 |
+
print("DB reset to clean state.")
|
backend/init.py
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Flask Application Initialization
|
| 3 |
+
Sets up the Flask app with necessary configurations
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
from flask import Flask, send_from_directory
|
| 7 |
+
from flask_cors import CORS
|
| 8 |
+
import os
|
| 9 |
+
|
| 10 |
+
# Initialize Flask app
|
| 11 |
+
# Serves static files from the build directory (../dist)
|
| 12 |
+
app = Flask(__name__, static_folder='../dist', static_url_path='')
|
| 13 |
+
|
| 14 |
+
# Enable CORS
|
| 15 |
+
CORS(app)
|
| 16 |
+
|
| 17 |
+
# Serve React App (Catch-all route)
|
| 18 |
+
@app.route('/', defaults={'path': ''})
|
| 19 |
+
@app.route('/<path:path>')
|
| 20 |
+
def serve(path):
|
| 21 |
+
# Don't interfere with API routes
|
| 22 |
+
if path.startswith('api'):
|
| 23 |
+
return {"error": "Not found"}, 404
|
| 24 |
+
|
| 25 |
+
if path != "" and os.path.exists(os.path.join(app.static_folder, path)):
|
| 26 |
+
return send_from_directory(app.static_folder, path)
|
| 27 |
+
|
| 28 |
+
# Return index.html for SPA routing
|
| 29 |
+
return send_from_directory(app.static_folder, 'index.html')
|
| 30 |
+
|
| 31 |
+
# Flag to indicate if database was just created
|
| 32 |
+
DBcreated = False
|
backend/key.pem
ADDED
|
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
-----BEGIN PRIVATE KEY-----
|
| 2 |
+
MIIJQwIBADANBgkqhkiG9w0BAQEFAASCCS0wggkpAgEAAoICAQCzuWR9b5a7d5xP
|
| 3 |
+
2qxbQ6aHaP6fNpUP+U30JESEXsUKpMbQP6V6w6+QqpfcxuL9l4F66Q7COJrPXxQV
|
| 4 |
+
FN5tiJ8y/35obk9m4HInKsw05DuYulLreoIpmpt2oyRCvJPeEcVjYDsvi6BQQO7y
|
| 5 |
+
fbYTfA0oXRmcHPnpcEdlv8vCcbbJQkl70QWxwS+daPPharCRGDGi3OWi1Q3h7or/
|
| 6 |
+
BEx6F+0suZhrdoFyMAuUYsVbQ8PThfpSutt3XGJwt1NQCgOqlO5f9DYcpkO9SXWd
|
| 7 |
+
NlJcWsEaMD5rgO2khB4IwwPnrUCksPqKIMoDxxX7bCt4GJWAL0pY3ZGNgIbCTEf+
|
| 8 |
+
zBZLMBSLEf5wJCZYf7qbrieTqpLfY5+ElAmst3/S4561626LqwPzZrzroIZ23DWZ
|
| 9 |
+
uXUKO2mcTMJ2UW+6l82aZvPvV5BxIpqrgvrSakasihEaiQcYUiE2zZeiOsdQYbmB
|
| 10 |
+
ZJKcMXAdMBVRobU2Lehp6bn2PygUHBqIv6i6qkHtPsSiak9HZVFgWKjiM0pvSrRO
|
| 11 |
+
OX0aqNzp6hlZrxpKUSM5Ymoa3IqZ+dQOvVx/PqpTFjgVkCHbd2VnIa3WGjrtgu8K
|
| 12 |
+
rjmngXj3yL7nm1MtQ1nK8DpDp3aXFMqJODr22H7SgYU1Y32k4Z/8/0h/urbmYbCk
|
| 13 |
+
lYW/d1Xmnvjg/LIG7wh+yuJj8eRhnQIDAQABAoICAG6EFlM8B0iIKMrCATKtyv2B
|
| 14 |
+
hvGIsuj7uP/kPW3YiPoekVMFkFCJfLBM9s2AT5z5SVm3bqfn/up94O+FwbFBTgQi
|
| 15 |
+
MC0viAwPAKMtIh10K2GjUdITp27F78toTqfrJlJsQyD0z/BQv+gZxdW/j/MzWTP0
|
| 16 |
+
7Aafmt39eSoMYtPS8T+ZT26f/gxsa0c0fqOi46iVZBBuHZefw8Wm1joy/oQlqmbS
|
| 17 |
+
SfPrD3aULMq2Yf3ZEdZEkMXGuaYQXUhKsySIw/BlXKAMoCnF4+nF/lkKSmMYO55L
|
| 18 |
+
BFc/FOMRixaANSqF0zZyzd2ahB8GkXHl9mUHWQdWDTXQKe0Fl9hq5+q6vQrHKhnC
|
| 19 |
+
fqUyQfWwRukS9p0pfUjdJC+sc8RsiHYhdu0CCC7W0GrL1tMrJRs778QvbHeZ/Ecx
|
| 20 |
+
ArdppOY5s/at9sg4ZP1oc03MEe6lVRzOiQi2DVpuuqUAU4sRbqi+Urw+uIHs0EFm
|
| 21 |
+
LiDiiFnfpgiO8wQLFTULJmWrnReQGNocIvtUudURb8fdoRZaUrYkw04dbnGV2HF4
|
| 22 |
+
9JtQHMr1YMNI+XHL3MZLbFUvNT+ntG8ZlTIN3/DIEZDxGeitdjhy1Lh1zWA0RwQM
|
| 23 |
+
FnNxKvfh+Xq0WHxwRBRJLRAzvXG0mDmxRDDrUBach7ONN5iTu/nXgopEDi6aXsuh
|
| 24 |
+
OnqjorptxmH2g8iTTGABAoIBAQDhHkoKJhL2znvp25P4PWNGCT0TYZaiwPyzpcdO
|
| 25 |
+
ArwwGsLJNgTiAzKJlvc82dNE1bw9LQ0+8DiiR1Y6hIIrJ9n+5NCv1rtFgJyvqKf6
|
| 26 |
+
wEE5FbHmA91qSWasm50yoF9OXh9a2duvy0yIyIupI2aGHt1JmFKeRkPHJozpByLK
|
| 27 |
+
7rPAIqTwETvvq2kJ3RKk3Ge3EiyAY4fHKYTsty58N4/E51as8W9FsiEpdf354l8O
|
| 28 |
+
WrrQSzCFElcDslB6TVgpr0LQgJCc9CNow10gFfy8qjR/g81hk4u4IZ/Ghmll8TcM
|
| 29 |
+
0vGItWc/VJ0rslHD0m/noVGYtSrN4EGD0Iqg85aV3LU3Zk4BAoIBAQDMYPOBcr3P
|
| 30 |
+
EvDs4LGp4ehlKKHnvwaAzl7NOYHUwVvFno1brkrRxXf/tw1qLinjHWIzJQwYzDGz
|
| 31 |
+
Fx9+2qpw2pdO5uF16l5LcsgZYuM6G4obUzNsUAf3IO0OJ5tcyCFcNWXx/62jZAce
|
| 32 |
+
tY3GPpMwyF5U6dNtauFR1q475FPYDuzCwPR+ncX0FTnjejDG4k/5ElKVMq4f/jfz
|
| 33 |
+
GQaYDw3dyiAoZ/E850f4dz46uP1nuHMOMu4nC5dvKzI7MGAzWIb7x9fxnS9TeiHS
|
| 34 |
+
tVSbD3uFwbgB0rDCQTLFx9awaiFbG2eSjj8fjGGIqXpLt4V2+sxJnjsXq5EGtSIM
|
| 35 |
+
Khml7jxDzIudAoIBAQCr8sDG47IUhj8rryZktC8BrcjS5vVyN0pNxdBoC7rGW4pw
|
| 36 |
+
uiwcniiZ44/REOhIaaEwcRru+fmoKij45YGviO7/vLKRvMiqG8SSf5Ze9z5aZfHO
|
| 37 |
+
KSGUL6CXMv1tUAW8DDSS5ljg+73ntJbIiFulEiogLpLchfr7QLzcuTCgJJdzqIKn
|
| 38 |
+
Q+Kt/BVtDUlSV2947A7FukBZpaL8VCbThnTwB2uzCW7eo2KlEpT9qyTECBUtSGE1
|
| 39 |
+
I38HOcoywKUYod+89pZ+7BOnz6FwAptbV/5lb70OTO6ppDUwJDaZQRe1WeJXc4f7
|
| 40 |
+
XZv3ZmsB0djb6eIzbB3XxvUDw+Q/cA+TRFO+jwwBAoIBAQCChpydnKkqWZ9yuSHe
|
| 41 |
+
LD/ecy/LsM03cBVNnh+TwBWbRWJkHhD8nc/AiajQmkD4Imj33v41zBsllGvx+TcH
|
| 42 |
+
MiOWKY/uXcrxBpJS3DvgfXwbbWyXDRm5M9cRxivL+qiLmjLMdgzwH88IRs0PwiEA
|
| 43 |
+
88aDFn6ID81tFd1atLzzbLy6uL6tKpglBXVeGUiTjC+lE5WYYbChEButbtsuO9cQ
|
| 44 |
+
Qal+BLwluFPXlFgy6rg0CLG6bL56Q0Xoor7G3GNkB3LV1FDOpyN1fbeiOVqaWS37
|
| 45 |
+
Q7f/ug5XUYpalAFB09YPuJXSHHSl8mLMA+jt+HwNe6kCj0KQIGtTzFS9ThnpRhhI
|
| 46 |
+
+JZlAoIBAHoNxnRlxZOsRAUemoL0yMhMhZ2KyoU5QPol85GpPYzpXVjNwY1gktzm
|
| 47 |
+
Y6ARz4mS099bRIPR+W32+JfNY2ZRTRcvfDNWmQCcaCzaxKXATdYJVFnHwP11CFOj
|
| 48 |
+
LvX8knNBicD36J2eH5ndPkuJYg81fiM8pt88pGimowxqevCLebktuk/My/zmkdvj
|
| 49 |
+
u1uwuHi5QmvQhAS9PDsMWE7mUqu5kr4qneijbFZaekcPMeqiJo5jEB6SHtyP4LIG
|
| 50 |
+
xGLBVEpJcZqh04/IPUU/w9F1omAgm8620VlzK5MbHmb6pBIPJIO8FeR9tipZOSbf
|
| 51 |
+
UfP81uklgtHf9yAd3zWU3GHdnqoMc4s=
|
| 52 |
+
-----END PRIVATE KEY-----
|
backend/navigator.py
ADDED
|
@@ -0,0 +1,228 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Navigator — DQN-based Next Resource Recommender
|
| 3 |
+
================================================
|
| 4 |
+
Loads the pre-trained DQN model from Navigators/dqn_model.pth and uses it
|
| 5 |
+
to recommend the next best resource for a student to visit.
|
| 6 |
+
|
| 7 |
+
Model architecture (inferred from .pth weights):
|
| 8 |
+
fc1: Linear(18, 128) — input is a 18-dim state vector
|
| 9 |
+
fc2: Linear(128, 128) — hidden layer
|
| 10 |
+
fc3: Linear(128, 18) — output is Q-values over 18 topic modules
|
| 11 |
+
(ReLU activations between layers)
|
| 12 |
+
|
| 13 |
+
State vector (18-dim): one value per topic module, representing the student's
|
| 14 |
+
assimilation score (from the polyline) for that module.
|
| 15 |
+
|
| 16 |
+
Output: index of the module (0-17) with the highest Q-value among unvisited.
|
| 17 |
+
The unvisited resource from that module with the highest reward is returned.
|
| 18 |
+
"""
|
| 19 |
+
|
| 20 |
+
import os
|
| 21 |
+
import numpy as np
|
| 22 |
+
|
| 23 |
+
# ──────────────────────────────────────────────
|
| 24 |
+
# Model Definition — must match training architecture
|
| 25 |
+
# ──────────────────────────────────────────────
|
| 26 |
+
_MODEL_PATH = os.path.join(os.path.dirname(__file__), '..', 'Navigators', 'dqn_model.pth')
|
| 27 |
+
|
| 28 |
+
_dqn_net = None
|
| 29 |
+
_dqn_mode = "unavailable"
|
| 30 |
+
|
| 31 |
+
try:
|
| 32 |
+
import torch
|
| 33 |
+
import torch.nn as nn
|
| 34 |
+
|
| 35 |
+
class DQNNet(nn.Module):
|
| 36 |
+
def __init__(self, input_dim=18, hidden_dim=128, output_dim=18):
|
| 37 |
+
super().__init__()
|
| 38 |
+
self.fc1 = nn.Linear(input_dim, hidden_dim)
|
| 39 |
+
self.fc2 = nn.Linear(hidden_dim, hidden_dim)
|
| 40 |
+
self.fc3 = nn.Linear(hidden_dim, output_dim)
|
| 41 |
+
|
| 42 |
+
def forward(self, x):
|
| 43 |
+
x = torch.relu(self.fc1(x))
|
| 44 |
+
x = torch.relu(self.fc2(x))
|
| 45 |
+
return self.fc3(x)
|
| 46 |
+
|
| 47 |
+
_net = DQNNet(input_dim=18, hidden_dim=128, output_dim=18)
|
| 48 |
+
state_dict = torch.load(_MODEL_PATH, map_location='cpu', weights_only=False)
|
| 49 |
+
_net.load_state_dict(state_dict)
|
| 50 |
+
_net.eval()
|
| 51 |
+
_dqn_net = _net
|
| 52 |
+
_dqn_mode = "dqn"
|
| 53 |
+
print("DQN Navigator loaded successfully")
|
| 54 |
+
|
| 55 |
+
except Exception as e:
|
| 56 |
+
print(f"DQN Navigator fallback mode (could not load model): {e}")
|
| 57 |
+
_dqn_mode = "fallback"
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
# ──────────────────────────────────────────────
|
| 61 |
+
# Topic-to-module index mapping (matches nlp_api.py order)
|
| 62 |
+
# ──────────────────────────────────────────────
|
| 63 |
+
ORDERED_MODULES = [
|
| 64 |
+
"Pre training objectives",
|
| 65 |
+
"Pre trained models",
|
| 66 |
+
"Tutorial: Introduction to huggingface",
|
| 67 |
+
"Fine tuning LLM",
|
| 68 |
+
"Instruction tuning",
|
| 69 |
+
"Prompt based learning",
|
| 70 |
+
"Parameter efficient fine tuning",
|
| 71 |
+
"Incontext Learning",
|
| 72 |
+
"Prompting methods",
|
| 73 |
+
"Retrieval Methods",
|
| 74 |
+
"Retrieval Augmented Generation",
|
| 75 |
+
"Quantization",
|
| 76 |
+
"Mixture of Experts Model",
|
| 77 |
+
"Agentic AI",
|
| 78 |
+
"Multimodal LLMs",
|
| 79 |
+
"Vision Language Models",
|
| 80 |
+
"Policy learning using DQN",
|
| 81 |
+
"RLHF",
|
| 82 |
+
]
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
def recommend_next(visited_ids: list, module_scores: list, nlp_resources: list) -> dict:
|
| 86 |
+
"""
|
| 87 |
+
Recommend the next best resource using combined DQN + sequential progression.
|
| 88 |
+
|
| 89 |
+
The DQN Q-values alone have minimal differentiation, so we combine them
|
| 90 |
+
with sequential module ordering for sensible recommendations:
|
| 91 |
+
- 70% weight on sequential progression (next module by S.No)
|
| 92 |
+
- 30% weight on DQN Q-value ranking
|
| 93 |
+
"""
|
| 94 |
+
visited_set = set(str(v).strip() for v in visited_ids)
|
| 95 |
+
unvisited = [r for r in nlp_resources if str(r['id']).strip() not in visited_set]
|
| 96 |
+
|
| 97 |
+
print(f"\n[NAV DEBUG] --- recommend_next called ---")
|
| 98 |
+
print(f"[NAV DEBUG] Total resources: {len(nlp_resources)}, Visited: {len(visited_ids)}, Unvisited: {len(unvisited)}")
|
| 99 |
+
|
| 100 |
+
if not unvisited:
|
| 101 |
+
print("[NAV DEBUG] No unvisited resources remaining!")
|
| 102 |
+
return {"resource": None, "module": None, "reason": _dqn_mode, "q_values": []}
|
| 103 |
+
|
| 104 |
+
# ── Build state vector ──────────────────────────────────────
|
| 105 |
+
state = list(module_scores) if module_scores else []
|
| 106 |
+
if len(state) < 18:
|
| 107 |
+
state.extend([0.5] * (18 - len(state)))
|
| 108 |
+
state = state[:18]
|
| 109 |
+
state_arr = np.array(state, dtype=np.float32)
|
| 110 |
+
|
| 111 |
+
# ── Group unvisited resources by module ──────────────────
|
| 112 |
+
module_to_resources = {}
|
| 113 |
+
for r in unvisited:
|
| 114 |
+
m = r.get('module', '')
|
| 115 |
+
if m not in module_to_resources:
|
| 116 |
+
module_to_resources[m] = []
|
| 117 |
+
module_to_resources[m].append(r)
|
| 118 |
+
|
| 119 |
+
print(f"[NAV DEBUG] Unvisited modules ({len(module_to_resources)}): {list(module_to_resources.keys())}")
|
| 120 |
+
|
| 121 |
+
q_values = []
|
| 122 |
+
reason = _dqn_mode
|
| 123 |
+
|
| 124 |
+
# ── Compute sequential scores (which module should come next by S.No) ──
|
| 125 |
+
# Find the highest visited module index to determine progression
|
| 126 |
+
visited_module_indices = set()
|
| 127 |
+
for r in nlp_resources:
|
| 128 |
+
if str(r['id']).strip() in visited_set:
|
| 129 |
+
m = r.get('module', '')
|
| 130 |
+
if m in ORDERED_MODULES:
|
| 131 |
+
visited_module_indices.add(ORDERED_MODULES.index(m))
|
| 132 |
+
|
| 133 |
+
max_visited_idx = max(visited_module_indices) if visited_module_indices else -1
|
| 134 |
+
print(f"[NAV DEBUG] Highest visited module index: {max_visited_idx} ({ORDERED_MODULES[max_visited_idx] if max_visited_idx >= 0 else 'none'})")
|
| 135 |
+
|
| 136 |
+
# Sequential score: modules right after the last visited get highest score
|
| 137 |
+
sequential_scores = {}
|
| 138 |
+
for module_name in module_to_resources:
|
| 139 |
+
if module_name in ORDERED_MODULES:
|
| 140 |
+
idx = ORDERED_MODULES.index(module_name)
|
| 141 |
+
# Distance from next expected module (max_visited_idx + 1)
|
| 142 |
+
distance = abs(idx - (max_visited_idx + 1))
|
| 143 |
+
# Score: closer to next = higher score (normalize to 0-1)
|
| 144 |
+
# Use asinh to soften the penalty for distance so DQN can override more easily
|
| 145 |
+
sequential_scores[module_name] = 1.0 / (1.0 + distance * 0.5)
|
| 146 |
+
|
| 147 |
+
# ── DQN scores (normalized to 0-1 range) ──
|
| 148 |
+
dqn_scores = {}
|
| 149 |
+
if _dqn_net is not None:
|
| 150 |
+
try:
|
| 151 |
+
import torch
|
| 152 |
+
with torch.no_grad():
|
| 153 |
+
t = torch.tensor(state_arr).unsqueeze(0)
|
| 154 |
+
qs = _dqn_net(t).squeeze(0).tolist()
|
| 155 |
+
q_values = qs
|
| 156 |
+
|
| 157 |
+
# Normalize Q-values to 0-1 for the modules that have unvisited resources
|
| 158 |
+
relevant_qs = []
|
| 159 |
+
for module_name in module_to_resources:
|
| 160 |
+
if module_name in ORDERED_MODULES:
|
| 161 |
+
idx = ORDERED_MODULES.index(module_name)
|
| 162 |
+
relevant_qs.append(qs[idx])
|
| 163 |
+
|
| 164 |
+
if relevant_qs:
|
| 165 |
+
q_min = min(relevant_qs)
|
| 166 |
+
q_range = max(relevant_qs) - q_min
|
| 167 |
+
if q_range > 0.01: # Meaningful differentiation
|
| 168 |
+
for module_name in module_to_resources:
|
| 169 |
+
if module_name in ORDERED_MODULES:
|
| 170 |
+
idx = ORDERED_MODULES.index(module_name)
|
| 171 |
+
dqn_scores[module_name] = (qs[idx] - q_min) / q_range
|
| 172 |
+
else:
|
| 173 |
+
# Q-values are too clustered, DQN can't differentiate
|
| 174 |
+
print(f"[NAV DEBUG] Q-values too clustered (range={q_range:.4f}), ignoring DQN scores")
|
| 175 |
+
for module_name in module_to_resources:
|
| 176 |
+
dqn_scores[module_name] = 0.5 # neutral
|
| 177 |
+
|
| 178 |
+
reason = "dqn"
|
| 179 |
+
except Exception as e:
|
| 180 |
+
print(f"[NAV DEBUG] DQN inference error: {e}")
|
| 181 |
+
reason = "fallback"
|
| 182 |
+
|
| 183 |
+
# ── Combined scoring (DQN-forward approach) ──
|
| 184 |
+
WEIGHT_SEQUENTIAL = 0.05
|
| 185 |
+
WEIGHT_DQN = 0.95
|
| 186 |
+
|
| 187 |
+
best_module = None
|
| 188 |
+
best_score = float('-inf')
|
| 189 |
+
|
| 190 |
+
print(f"[NAV DEBUG] Module scores (seq={WEIGHT_SEQUENTIAL}, dqn={WEIGHT_DQN}):")
|
| 191 |
+
for module_name in module_to_resources:
|
| 192 |
+
seq = sequential_scores.get(module_name, 0.0)
|
| 193 |
+
dqn = dqn_scores.get(module_name, 0.5)
|
| 194 |
+
combined = WEIGHT_SEQUENTIAL * seq + WEIGHT_DQN * dqn
|
| 195 |
+
|
| 196 |
+
idx_str = ""
|
| 197 |
+
if module_name in ORDERED_MODULES:
|
| 198 |
+
idx_str = f" (idx={ORDERED_MODULES.index(module_name)})"
|
| 199 |
+
|
| 200 |
+
print(f"[NAV DEBUG] '{module_name}'{idx_str}: seq={seq:.3f}, dqn={dqn:.3f}, combined={combined:.3f}")
|
| 201 |
+
|
| 202 |
+
if combined > best_score:
|
| 203 |
+
best_score = combined
|
| 204 |
+
best_module = module_name
|
| 205 |
+
|
| 206 |
+
if best_module and best_module in module_to_resources:
|
| 207 |
+
candidates = module_to_resources[best_module]
|
| 208 |
+
candidates.sort(key=lambda r: -r['reward'])
|
| 209 |
+
chosen = candidates[0]
|
| 210 |
+
print(f"[NAV DEBUG] ✓ Chose '{best_module}' → '{chosen['title']}' (id={chosen['id']}, score={best_score:.3f})")
|
| 211 |
+
return {
|
| 212 |
+
"resource": chosen,
|
| 213 |
+
"module": best_module,
|
| 214 |
+
"reason": reason,
|
| 215 |
+
"q_values": q_values
|
| 216 |
+
}
|
| 217 |
+
|
| 218 |
+
# ── Fallback: next sequential unvisited resource ──
|
| 219 |
+
unvisited_sorted = sorted(unvisited, key=lambda r: int(r['id']))
|
| 220 |
+
best = unvisited_sorted[0]
|
| 221 |
+
print(f"[NAV DEBUG] Fallback: '{best['title']}' (id={best['id']})")
|
| 222 |
+
return {
|
| 223 |
+
"resource": best,
|
| 224 |
+
"module": best.get('module', ''),
|
| 225 |
+
"reason": "fallback",
|
| 226 |
+
"q_values": q_values
|
| 227 |
+
}
|
| 228 |
+
|
backend/nlp/nlp_resources.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
backend/nlp_api.py
ADDED
|
@@ -0,0 +1,1178 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
NLP Learning Grid API
|
| 3 |
+
Provides endpoints for the frontend grid-based NLP learning system.
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import os
|
| 7 |
+
import json
|
| 8 |
+
import pandas as pd
|
| 9 |
+
from flask import jsonify, request
|
| 10 |
+
import numpy as np
|
| 11 |
+
from datetime import datetime
|
| 12 |
+
import nltk
|
| 13 |
+
|
| 14 |
+
# Ensure requisite NLTK data is available
|
| 15 |
+
try:
|
| 16 |
+
nltk.download('stopwords', quiet=True)
|
| 17 |
+
nltk.download('wordnet', quiet=True)
|
| 18 |
+
from nltk.corpus import stopwords
|
| 19 |
+
STOPWORDS = set(stopwords.words('english'))
|
| 20 |
+
except Exception as e:
|
| 21 |
+
print(f"Warning: Could not load NLTK stopwords: {e}")
|
| 22 |
+
STOPWORDS = set()
|
| 23 |
+
|
| 24 |
+
# Import backend modules (support both script and package execution)
|
| 25 |
+
try:
|
| 26 |
+
from .init import app
|
| 27 |
+
from .database import get_session, update_session, save_summary, save_polyline, get_polylines as get_db_polylines, get_notes, add_note, get_lectures, reset_db
|
| 28 |
+
from .request_logger import log_request
|
| 29 |
+
from .utils import utils_preprocess_text, get_cos_sim
|
| 30 |
+
from . import navigator
|
| 31 |
+
except ImportError:
|
| 32 |
+
from init import app
|
| 33 |
+
from database import get_session, update_session, save_summary, save_polyline, get_polylines as get_db_polylines, get_notes, add_note, get_lectures, reset_db
|
| 34 |
+
from request_logger import log_request
|
| 35 |
+
from utils import utils_preprocess_text, get_cos_sim
|
| 36 |
+
import navigator
|
| 37 |
+
|
| 38 |
+
# Define stopwords
|
| 39 |
+
stop_words = set(stopwords.words('english'))
|
| 40 |
+
|
| 41 |
+
# Polyline logging
|
| 42 |
+
POLYLINE_LOG_FILE = os.path.join(os.path.dirname(__file__), 'polyline_generation.log')
|
| 43 |
+
|
| 44 |
+
def log_polyline_step(step, details):
|
| 45 |
+
"""Log detailed steps of polyline generation"""
|
| 46 |
+
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
| 47 |
+
with open(POLYLINE_LOG_FILE, 'a', encoding='utf-8') as f:
|
| 48 |
+
f.write(f"[{timestamp}] [{step}]\n{details}\n{'-'*50}\n")
|
| 49 |
+
|
| 50 |
+
_bert_model = None
|
| 51 |
+
|
| 52 |
+
def get_bert_model():
|
| 53 |
+
global _bert_model
|
| 54 |
+
if _bert_model is None:
|
| 55 |
+
try:
|
| 56 |
+
from sentence_transformers import SentenceTransformer
|
| 57 |
+
print("Loading BERT model (lazy)...")
|
| 58 |
+
_bert_model = SentenceTransformer('all-MiniLM-L6-v2')
|
| 59 |
+
print("BERT model loaded successfully")
|
| 60 |
+
except Exception as e:
|
| 61 |
+
print(f"Error loading BERT model: {e}")
|
| 62 |
+
_bert_model = None
|
| 63 |
+
return _bert_model
|
| 64 |
+
|
| 65 |
+
# Load NLP data from JSON (Excel was rejected by HF)
|
| 66 |
+
nlp_json_path = os.path.join(os.path.dirname(__file__), 'nlp', 'nlp_resources.json')
|
| 67 |
+
|
| 68 |
+
def load_nlp_resources():
|
| 69 |
+
"""Load NLP resources from JSON file"""
|
| 70 |
+
try:
|
| 71 |
+
# Check if the JSON file exists
|
| 72 |
+
if not os.path.exists(nlp_json_path):
|
| 73 |
+
print(f"File not found: {nlp_json_path}")
|
| 74 |
+
return []
|
| 75 |
+
|
| 76 |
+
with open(nlp_json_path, 'r', encoding='utf-8') as f:
|
| 77 |
+
data = json.load(f)
|
| 78 |
+
|
| 79 |
+
if not isinstance(data, list):
|
| 80 |
+
print(f"Unexpected data format in {nlp_json_path}")
|
| 81 |
+
return []
|
| 82 |
+
|
| 83 |
+
# First-quadrant arc projection (bottom-left origin)
|
| 84 |
+
# Resources fan from 5° to 85° like Q1 of a polar chart
|
| 85 |
+
# Origin: bottom-LEFT of 20×20 grid
|
| 86 |
+
cx, cy = 0.0, 19.5
|
| 87 |
+
|
| 88 |
+
# Sequential split: 7 / 5 / 4 / 2 = 18
|
| 89 |
+
ordered_data = data[:18] if len(data) >= 18 else data
|
| 90 |
+
|
| 91 |
+
tier_configs = [
|
| 92 |
+
{'label': 'Fundamentals', 'count': 4, 'radius': 3, 'difficulty': 2},
|
| 93 |
+
{'label': 'Intermediate', 'count': 5, 'radius': 7, 'difficulty': 4},
|
| 94 |
+
{'label': 'Advance', 'count': 5, 'radius': 11, 'difficulty': 6},
|
| 95 |
+
{'label': 'Mastery', 'count': 4, 'radius': 15, 'difficulty': 8},
|
| 96 |
+
]
|
| 97 |
+
|
| 98 |
+
used_positions = set()
|
| 99 |
+
resources = []
|
| 100 |
+
resource_idx = 0
|
| 101 |
+
|
| 102 |
+
# Fan: 8° to 82° (keeps resources comfortably inside axes)
|
| 103 |
+
angle_start_deg = 8.0
|
| 104 |
+
angle_end_deg = 82.0
|
| 105 |
+
|
| 106 |
+
for t_idx, tier in enumerate(tier_configs):
|
| 107 |
+
count = tier['count']
|
| 108 |
+
r_val = tier['radius']
|
| 109 |
+
tier_data = ordered_data[resource_idx : resource_idx + count]
|
| 110 |
+
resource_idx += count
|
| 111 |
+
|
| 112 |
+
n = len(tier_data)
|
| 113 |
+
if n == 0:
|
| 114 |
+
continue
|
| 115 |
+
|
| 116 |
+
angle_step = (angle_end_deg - angle_start_deg) / (n - 1) if n > 1 else 0.0
|
| 117 |
+
|
| 118 |
+
for i, row in enumerate(tier_data):
|
| 119 |
+
angle_deg = angle_start_deg + i * angle_step
|
| 120 |
+
angle_rad = np.radians(angle_deg)
|
| 121 |
+
|
| 122 |
+
x_raw = cx + r_val * np.cos(angle_rad)
|
| 123 |
+
y_raw = cy - r_val * np.sin(angle_rad)
|
| 124 |
+
|
| 125 |
+
x = int(round(np.clip(x_raw, 0, 18)))
|
| 126 |
+
y = int(round(np.clip(y_raw, 0, 18)))
|
| 127 |
+
|
| 128 |
+
# Resolve collisions by nudging along the arc (y direction)
|
| 129 |
+
attempts = 0
|
| 130 |
+
while (x, y) in used_positions and attempts < 20:
|
| 131 |
+
y = max(0, min(18, y - 1 if attempts % 2 == 0 else y + 1))
|
| 132 |
+
attempts += 1
|
| 133 |
+
|
| 134 |
+
used_positions.add((x, y))
|
| 135 |
+
|
| 136 |
+
# Tier-based points: Fundamentals=50, Intermediate=100, Advance=150, Mastery=200
|
| 137 |
+
tier_points = {2: 50, 4: 100, 6: 150, 8: 200}
|
| 138 |
+
base_pts = tier_points.get(tier['difficulty'], 50)
|
| 139 |
+
|
| 140 |
+
# Per-resource high_line: seeded random 0.70-0.85
|
| 141 |
+
import random as _rnd
|
| 142 |
+
_rnd.seed(len(resources) + 42) # deterministic per resource index
|
| 143 |
+
high_line = round(_rnd.uniform(0.70, 0.85), 2)
|
| 144 |
+
|
| 145 |
+
resources.append({
|
| 146 |
+
'id': str(len(resources) + 1),
|
| 147 |
+
'position': {'x': int(x), 'y': int(y)},
|
| 148 |
+
'type': 'video' if 'youtube' in str(row.get('links', '')).lower() else 'book',
|
| 149 |
+
'title': str(row.get('name', f'Resource {len(resources) + 1}')).strip(),
|
| 150 |
+
'visited': False,
|
| 151 |
+
'difficulty': tier['difficulty'],
|
| 152 |
+
'reward': base_pts,
|
| 153 |
+
'base_points': base_pts,
|
| 154 |
+
'high_line': high_line,
|
| 155 |
+
'url': str(row.get('links', '')).strip(),
|
| 156 |
+
'description':str(row.get('description', '')).strip(),
|
| 157 |
+
'module': str(row.get('module', 'NLP Concept')).strip()
|
| 158 |
+
})
|
| 159 |
+
|
| 160 |
+
print(f"Successfully projected {len(resources)} resources into 4-tier Radar arcs")
|
| 161 |
+
return resources
|
| 162 |
+
except Exception as e:
|
| 163 |
+
print(f"Error loading NLP resources: {e}")
|
| 164 |
+
return []
|
| 165 |
+
|
| 166 |
+
# Cache resources
|
| 167 |
+
nlp_resources = load_nlp_resources()
|
| 168 |
+
|
| 169 |
+
# Load YouTube links mapping
|
| 170 |
+
_youtube_links_path = os.path.join(os.path.dirname(__file__), 'data', 'youtube_links.json')
|
| 171 |
+
try:
|
| 172 |
+
if os.path.exists(_youtube_links_path):
|
| 173 |
+
with open(_youtube_links_path, 'r', encoding='utf-8') as f:
|
| 174 |
+
raw_links = json.load(f)
|
| 175 |
+
|
| 176 |
+
# Create a normalized mapping for easier lookup
|
| 177 |
+
_youtube_links = {str(k).strip().lower(): v for k, v in raw_links.items()}
|
| 178 |
+
print(f"Loaded {len(_youtube_links)} YouTube links from mapping file")
|
| 179 |
+
|
| 180 |
+
# Inject youtube_url into each resource
|
| 181 |
+
for r in nlp_resources:
|
| 182 |
+
module_lower = r['module'].lower()
|
| 183 |
+
title_lower = r['title'].lower()
|
| 184 |
+
|
| 185 |
+
# 1. Exact module match
|
| 186 |
+
url = _youtube_links.get(module_lower, '')
|
| 187 |
+
|
| 188 |
+
# 2. Fuzzy match on title or module
|
| 189 |
+
if not url:
|
| 190 |
+
for key, val in _youtube_links.items():
|
| 191 |
+
if key in title_lower or key in module_lower or title_lower in key or module_lower in key:
|
| 192 |
+
url = val
|
| 193 |
+
break
|
| 194 |
+
|
| 195 |
+
r['youtube_url'] = url
|
| 196 |
+
|
| 197 |
+
yt_count = sum(1 for r in nlp_resources if r.get('youtube_url'))
|
| 198 |
+
print(f"Matched YouTube URLs for {yt_count}/{len(nlp_resources)} resources")
|
| 199 |
+
else:
|
| 200 |
+
print(f"YouTube links file not found: {_youtube_links_path}")
|
| 201 |
+
for r in nlp_resources: r['youtube_url'] = ''
|
| 202 |
+
except Exception as e:
|
| 203 |
+
print(f"Could not load YouTube links: {e}")
|
| 204 |
+
for r in nlp_resources: r['youtube_url'] = ''
|
| 205 |
+
|
| 206 |
+
# Pre-compute module embeddings
|
| 207 |
+
module_embeddings = {}
|
| 208 |
+
|
| 209 |
+
def compute_module_embeddings():
|
| 210 |
+
bert_model = get_bert_model()
|
| 211 |
+
if not bert_model:
|
| 212 |
+
return
|
| 213 |
+
|
| 214 |
+
print("Computing module embeddings...")
|
| 215 |
+
# Group resources by module to form a "document" for each module
|
| 216 |
+
module_docs = {}
|
| 217 |
+
for r in nlp_resources:
|
| 218 |
+
m = r['module']
|
| 219 |
+
# Combine title and description for a rich representation
|
| 220 |
+
text = f"{r['title']} {r.get('description', '')}"
|
| 221 |
+
if m in module_docs:
|
| 222 |
+
module_docs[m] += " " + text
|
| 223 |
+
else:
|
| 224 |
+
module_docs[m] = text
|
| 225 |
+
|
| 226 |
+
# Compute embeddings
|
| 227 |
+
for m, doc in module_docs.items():
|
| 228 |
+
# Apply preprocessing
|
| 229 |
+
clean_doc = utils_preprocess_text(doc, flg_stemm=False, flg_lemm=True, lst_stopwords=stop_words)
|
| 230 |
+
module_embeddings[m] = bert_model.encode(clean_doc)
|
| 231 |
+
print(f"Computed embeddings for {len(module_embeddings)} modules")
|
| 232 |
+
|
| 233 |
+
# Compute embeddings on startup (REMOVED: Too slow on startup, will compute lazily or skip if needed)
|
| 234 |
+
# compute_module_embeddings()
|
| 235 |
+
|
| 236 |
+
# =============================================
|
| 237 |
+
# RESOURCES ENDPOINTS
|
| 238 |
+
# =============================================
|
| 239 |
+
|
| 240 |
+
@app.before_request
|
| 241 |
+
def before_request_logging():
|
| 242 |
+
if request.path.startswith('/api'):
|
| 243 |
+
log_request()
|
| 244 |
+
|
| 245 |
+
@app.route('/api/reset', methods=['POST'])
|
| 246 |
+
def reset_database():
|
| 247 |
+
"""Wipes the database memory completely"""
|
| 248 |
+
try:
|
| 249 |
+
reset_db()
|
| 250 |
+
return jsonify({'status': 'success', 'message': 'Database memory wiped completely'})
|
| 251 |
+
except Exception as e:
|
| 252 |
+
return jsonify({'status': 'error', 'message': str(e)}), 500
|
| 253 |
+
|
| 254 |
+
@app.route('/api/resources', methods=['GET'])
|
| 255 |
+
def get_resources():
|
| 256 |
+
"""Get all NLP learning resources with their grid positions and correct visited state"""
|
| 257 |
+
session_id = request.args.get('session_id', 'default')
|
| 258 |
+
from database import get_session
|
| 259 |
+
session = get_session(session_id)
|
| 260 |
+
visited_ids = set(str(v).strip() for v in session.get('visitedResources', []))
|
| 261 |
+
|
| 262 |
+
# Return a copy of resources with updated visited flags
|
| 263 |
+
updated_resources = []
|
| 264 |
+
for r in nlp_resources:
|
| 265 |
+
r_copy = r.copy()
|
| 266 |
+
r_copy['visited'] = str(r['id']).strip() in visited_ids
|
| 267 |
+
updated_resources.append(r_copy)
|
| 268 |
+
|
| 269 |
+
return jsonify(updated_resources)
|
| 270 |
+
|
| 271 |
+
|
| 272 |
+
@app.route('/api/resources/<resource_id>', methods=['GET'])
|
| 273 |
+
def get_resource(resource_id):
|
| 274 |
+
"""Get a single resource by ID"""
|
| 275 |
+
resource = next((r for r in nlp_resources if r['id'] == resource_id), None)
|
| 276 |
+
if not resource:
|
| 277 |
+
return jsonify({'error': 'Resource not found'}), 404
|
| 278 |
+
return jsonify(resource)
|
| 279 |
+
|
| 280 |
+
|
| 281 |
+
# =============================================
|
| 282 |
+
# AGENT STATE ENDPOINTS
|
| 283 |
+
# =============================================
|
| 284 |
+
|
| 285 |
+
@app.route('/api/agent', methods=['GET'])
|
| 286 |
+
def get_agent_state():
|
| 287 |
+
"""Get current agent state (position, level, reward)"""
|
| 288 |
+
session_id = request.args.get('session_id', 'default')
|
| 289 |
+
return jsonify(get_session(session_id))
|
| 290 |
+
|
| 291 |
+
|
| 292 |
+
@app.route('/api/agent/move', methods=['POST'])
|
| 293 |
+
def move_agent():
|
| 294 |
+
"""Move agent to a new position"""
|
| 295 |
+
data = request.get_json()
|
| 296 |
+
session_id = data.get('session_id', 'default')
|
| 297 |
+
position = data.get('position', {})
|
| 298 |
+
|
| 299 |
+
session = get_session(session_id)
|
| 300 |
+
session['position'] = position
|
| 301 |
+
update_session(session_id, session)
|
| 302 |
+
|
| 303 |
+
return jsonify(session)
|
| 304 |
+
|
| 305 |
+
|
| 306 |
+
# =============================================
|
| 307 |
+
# NOTIFICATION ENDPOINTS
|
| 308 |
+
# =============================================
|
| 309 |
+
|
| 310 |
+
@app.route('/api/notifications', methods=['GET'])
|
| 311 |
+
def get_notifications():
|
| 312 |
+
"""Get all notifications for a session"""
|
| 313 |
+
session_id = request.args.get('session_id', 'default')
|
| 314 |
+
from database import get_session
|
| 315 |
+
session = get_session(session_id)
|
| 316 |
+
return jsonify(session.get('notifications', []))
|
| 317 |
+
|
| 318 |
+
|
| 319 |
+
@app.route('/api/notifications/add', methods=['POST'])
|
| 320 |
+
def add_notification():
|
| 321 |
+
"""Add a new notification to the database"""
|
| 322 |
+
data = request.get_json()
|
| 323 |
+
session_id = data.get('session_id', 'default')
|
| 324 |
+
message = data.get('message')
|
| 325 |
+
notif_type = data.get('type', 'info')
|
| 326 |
+
|
| 327 |
+
if not message:
|
| 328 |
+
return jsonify({'error': 'Message required'}), 400
|
| 329 |
+
|
| 330 |
+
from database import get_session, update_session
|
| 331 |
+
session = get_session(session_id)
|
| 332 |
+
if 'notifications' not in session:
|
| 333 |
+
session['notifications'] = []
|
| 334 |
+
|
| 335 |
+
new_notif = {
|
| 336 |
+
'id': f"notif_{int(datetime.now().timestamp())}",
|
| 337 |
+
'type': notif_type,
|
| 338 |
+
'message': message,
|
| 339 |
+
'timestamp': int(datetime.now().timestamp() * 1000),
|
| 340 |
+
'read': False
|
| 341 |
+
}
|
| 342 |
+
|
| 343 |
+
session['notifications'].insert(0, new_notif)
|
| 344 |
+
update_session(session_id, session)
|
| 345 |
+
return jsonify(new_notif)
|
| 346 |
+
|
| 347 |
+
|
| 348 |
+
@app.route('/api/notifications/read', methods=['POST'])
|
| 349 |
+
def mark_notifications_read():
|
| 350 |
+
"""Mark all notifications as read in the database"""
|
| 351 |
+
data = request.get_json()
|
| 352 |
+
session_id = data.get('session_id', 'default')
|
| 353 |
+
|
| 354 |
+
from database import get_session, update_session
|
| 355 |
+
session = get_session(session_id)
|
| 356 |
+
if 'notifications' in session:
|
| 357 |
+
for n in session['notifications']:
|
| 358 |
+
n['read'] = True
|
| 359 |
+
update_session(session_id, session)
|
| 360 |
+
|
| 361 |
+
return jsonify({'status': 'success'})
|
| 362 |
+
|
| 363 |
+
|
| 364 |
+
def sync_agent_progression(session):
|
| 365 |
+
"""Utility to ensure level and totalReward are consistent"""
|
| 366 |
+
# Level = totalReward // 100 (Stage 1 starts at 0 pts, Stage 2 at 100 pts, etc.)
|
| 367 |
+
# Floor level is 1
|
| 368 |
+
session['level'] = max(1, (session.get('totalReward', 0) // 100) + 1)
|
| 369 |
+
return session
|
| 370 |
+
|
| 371 |
+
# =============================================
|
| 372 |
+
# RESOURCE INTERACTION ENDPOINTS
|
| 373 |
+
# =============================================
|
| 374 |
+
|
| 375 |
+
@app.route('/api/resource/visit', methods=['POST'])
|
| 376 |
+
def visit_resource():
|
| 377 |
+
"""Mark a resource as visited and update agent"""
|
| 378 |
+
data = request.get_json()
|
| 379 |
+
session_id = data.get('session_id', 'default')
|
| 380 |
+
resource_id = data.get('resource_id')
|
| 381 |
+
|
| 382 |
+
session = get_session(session_id)
|
| 383 |
+
|
| 384 |
+
# Find resource
|
| 385 |
+
resource = next((r for r in nlp_resources if r['id'] == resource_id), None)
|
| 386 |
+
if not resource:
|
| 387 |
+
return jsonify({'error': 'Resource not found'}), 404
|
| 388 |
+
|
| 389 |
+
# Update session
|
| 390 |
+
if resource_id not in session['visitedResources']:
|
| 391 |
+
session['visitedResources'].append(resource_id)
|
| 392 |
+
# Add reward
|
| 393 |
+
session['totalReward'] = session.get('totalReward', 0) + resource.get('reward', 0)
|
| 394 |
+
|
| 395 |
+
# Sync progression
|
| 396 |
+
session = sync_agent_progression(session)
|
| 397 |
+
|
| 398 |
+
update_session(session_id, session)
|
| 399 |
+
return jsonify(session)
|
| 400 |
+
|
| 401 |
+
|
| 402 |
+
# =============================================
|
| 403 |
+
# LEARNING SUMMARY ENDPOINTS
|
| 404 |
+
# =============================================
|
| 405 |
+
|
| 406 |
+
@app.route('/api/summary/create', methods=['POST'])
|
| 407 |
+
def create_learning_summary():
|
| 408 |
+
"""
|
| 409 |
+
Create a learning summary from visited resources
|
| 410 |
+
"""
|
| 411 |
+
data = request.get_json()
|
| 412 |
+
session_id = data.get('session_id', 'default')
|
| 413 |
+
session = get_session(session_id)
|
| 414 |
+
title = data.get('title', '')
|
| 415 |
+
summary = data.get('summary', '')
|
| 416 |
+
visited_ids = data.get('visited_resources', [])
|
| 417 |
+
|
| 418 |
+
if not title or not summary:
|
| 419 |
+
return jsonify({'error': 'Title and summary required'}), 400
|
| 420 |
+
|
| 421 |
+
# Get visited resources using robust ID matching
|
| 422 |
+
visited_set = set(str(v).strip() for v in visited_ids)
|
| 423 |
+
visited_resources = [r for r in nlp_resources if str(r['id']).strip() in visited_set]
|
| 424 |
+
|
| 425 |
+
print(f"[DEBUG] create_learning_summary: incoming visited_ids={visited_ids}, matched count={len(visited_resources)}")
|
| 426 |
+
|
| 427 |
+
# Calculate learning metrics
|
| 428 |
+
total_difficulty = sum(r['difficulty'] for r in visited_resources)
|
| 429 |
+
total_reward = sum(r['reward'] for r in visited_resources)
|
| 430 |
+
avg_difficulty = total_difficulty / len(visited_resources) if visited_resources else 0
|
| 431 |
+
|
| 432 |
+
# Extract unique modules from resources (preserving order)
|
| 433 |
+
seen_modules = set()
|
| 434 |
+
ordered_modules = []
|
| 435 |
+
for r in nlp_resources:
|
| 436 |
+
m = r['module']
|
| 437 |
+
if m not in seen_modules:
|
| 438 |
+
ordered_modules.append(m)
|
| 439 |
+
seen_modules.add(m)
|
| 440 |
+
|
| 441 |
+
# Module Aliases for better keyword matching
|
| 442 |
+
module_aliases = {
|
| 443 |
+
'Pre training objectives': ['pre-training', 'pre training', 'objectives'],
|
| 444 |
+
'Pre trained models': ['pre-trained', 'pre trained'],
|
| 445 |
+
'Tutorial: Introduction to huggingface': ['huggingface', 'hugging face'],
|
| 446 |
+
'Fine tuning LLM': ['fine-tuning', 'fine tuning', 'ft'],
|
| 447 |
+
'Instruction tuning': ['instruction tuning', 'instruction-tuning'],
|
| 448 |
+
'Prompt based learning': ['prompt based', 'prompt-based'],
|
| 449 |
+
'Parameter efficient fine tuning': ['peft', 'parameter efficient'],
|
| 450 |
+
'Incontext Learning': ['in-context', 'incontext', 'icl'],
|
| 451 |
+
'Prompting methods': ['prompting'],
|
| 452 |
+
'Retrieval Methods': ['retrieval'],
|
| 453 |
+
'Retrieval Augmented Generation': ['rag', 'retrieval augmented'],
|
| 454 |
+
'Quantization': ['quantization', 'quantized'],
|
| 455 |
+
'Mixture of Experts Model': ['moe', 'mixture of experts'],
|
| 456 |
+
'Agentic AI': ['agentic', 'agents'],
|
| 457 |
+
'Multimodal LLMs': ['multimodal', 'multi-modal'],
|
| 458 |
+
'Vision Language Models': ['vlm', 'vision-language', 'vision language'],
|
| 459 |
+
'Policy learning using DQN': ['dqn', 'deep q', 'policy gradient'],
|
| 460 |
+
'RLHF': ['rlhf', 'reinforcement learning from human feedback']
|
| 461 |
+
}
|
| 462 |
+
|
| 463 |
+
# Check for keywords in summary text
|
| 464 |
+
summary_lower = summary.lower()
|
| 465 |
+
keywords_found = []
|
| 466 |
+
|
| 467 |
+
for module in ordered_modules:
|
| 468 |
+
if module.lower() in summary_lower:
|
| 469 |
+
keywords_found.append(module)
|
| 470 |
+
continue
|
| 471 |
+
aliases = module_aliases.get(module, [])
|
| 472 |
+
for alias in aliases:
|
| 473 |
+
if alias.lower() in summary_lower:
|
| 474 |
+
keywords_found.append(module)
|
| 475 |
+
break
|
| 476 |
+
for r in visited_resources:
|
| 477 |
+
if r['title'].lower() in summary_lower and r['title'] not in keywords_found:
|
| 478 |
+
keywords_found.append(r['title'])
|
| 479 |
+
|
| 480 |
+
# Calculate module scores for polyline
|
| 481 |
+
module_scores = []
|
| 482 |
+
log_polyline_step("START_GENERATION", f"Generating polyline for summary: '{summary[:100]}...'")
|
| 483 |
+
|
| 484 |
+
bert_model = get_bert_model()
|
| 485 |
+
if bert_model:
|
| 486 |
+
if not module_embeddings:
|
| 487 |
+
compute_module_embeddings()
|
| 488 |
+
|
| 489 |
+
try:
|
| 490 |
+
clean_summary = utils_preprocess_text(summary, flg_stemm=False, flg_lemm=True, lst_stopwords=stop_words)
|
| 491 |
+
summary_embedding = bert_model.encode(clean_summary)
|
| 492 |
+
for module in ordered_modules:
|
| 493 |
+
score = 0.0
|
| 494 |
+
if module in module_embeddings:
|
| 495 |
+
sim = get_cos_sim(summary_embedding, module_embeddings[module])
|
| 496 |
+
score = max(0.0, sim)
|
| 497 |
+
if module in keywords_found: score += 0.3
|
| 498 |
+
module_visited_count = sum(1 for r in visited_resources if r['module'] == module)
|
| 499 |
+
if module_visited_count > 0: score += 0.1 * module_visited_count
|
| 500 |
+
module_scores.append(float(max(0.0, min(1.0, score))))
|
| 501 |
+
except Exception as e:
|
| 502 |
+
print(f"Error computing BERT scores: {e}")
|
| 503 |
+
for module in ordered_modules:
|
| 504 |
+
score = 0.5 + (np.random.random() - 0.5) * 0.1
|
| 505 |
+
if module in keywords_found: score += 0.2
|
| 506 |
+
module_visited_count = sum(1 for r in visited_resources if r['module'] == module)
|
| 507 |
+
if module_visited_count > 0: score += 0.1 * module_visited_count
|
| 508 |
+
module_scores.append(float(max(0.0, min(1.0, score))))
|
| 509 |
+
else:
|
| 510 |
+
for module in ordered_modules:
|
| 511 |
+
score = 0.5 + (np.random.random() - 0.5) * 0.1
|
| 512 |
+
if module in keywords_found: score += 0.2
|
| 513 |
+
module_visited_count = sum(1 for r in visited_resources if r['module'] == module)
|
| 514 |
+
if module_visited_count > 0: score += 0.1 * module_visited_count
|
| 515 |
+
module_scores.append(float(max(0.0, min(1.0, score))))
|
| 516 |
+
|
| 517 |
+
# ── DQN Recommendation ──
|
| 518 |
+
rec_result = navigator.recommend_next(visited_ids, module_scores, nlp_resources)
|
| 519 |
+
next_recommendation_obj = rec_result.get('resource')
|
| 520 |
+
|
| 521 |
+
recommendations = []
|
| 522 |
+
if next_recommendation_obj:
|
| 523 |
+
recommendations.append(next_recommendation_obj['title'])
|
| 524 |
+
|
| 525 |
+
unvisited_remaining = [r for r in nlp_resources if r['id'] not in visited_ids and r['title'] not in recommendations]
|
| 526 |
+
unvisited_remaining.sort(key=lambda r: (-r.get('reward', 0), r.get('difficulty', 0)))
|
| 527 |
+
for r in unvisited_remaining:
|
| 528 |
+
if len(recommendations) < 3: recommendations.append(r['title'])
|
| 529 |
+
else: break
|
| 530 |
+
|
| 531 |
+
strengths = keywords_found if keywords_found else [r['title'] for r in visited_resources if r.get('difficulty', 0) <= 2]
|
| 532 |
+
|
| 533 |
+
# Analysis results
|
| 534 |
+
polylines = get_db_polylines()
|
| 535 |
+
from collections import Counter
|
| 536 |
+
all_keywords = []
|
| 537 |
+
for p in polylines.values():
|
| 538 |
+
if 'keywords_found' in p: all_keywords.extend(p['keywords_found'])
|
| 539 |
+
all_keywords.extend(keywords_found)
|
| 540 |
+
keyword_counts = Counter(all_keywords)
|
| 541 |
+
most_common_keywords = [k for k, v in keyword_counts.most_common(3)]
|
| 542 |
+
dominant_topics = most_common_keywords
|
| 543 |
+
|
| 544 |
+
# Define scored_modules for recommendation logic
|
| 545 |
+
scored_modules = list(zip(ordered_modules, module_scores))
|
| 546 |
+
|
| 547 |
+
# Calculate XP based on high lines
|
| 548 |
+
current_polyline_sum = sum(module_scores)
|
| 549 |
+
total_earned_base_pts = 0
|
| 550 |
+
high_line_sum = 0
|
| 551 |
+
for module, score in scored_modules:
|
| 552 |
+
resource = next((r for r in nlp_resources if r['module'] == module), None)
|
| 553 |
+
if resource:
|
| 554 |
+
hl = float(resource.get('high_line', 0.8))
|
| 555 |
+
high_line_sum += hl
|
| 556 |
+
if score >= hl:
|
| 557 |
+
total_earned_base_pts += resource.get('base_points', 50)
|
| 558 |
+
|
| 559 |
+
high_line_sum = max(0.1, high_line_sum)
|
| 560 |
+
xp_earned = int(total_earned_base_pts * (current_polyline_sum / high_line_sum))
|
| 561 |
+
|
| 562 |
+
# Update session with new XP
|
| 563 |
+
session['totalReward'] = session.get('totalReward', 0) + xp_earned
|
| 564 |
+
session = sync_agent_progression(session)
|
| 565 |
+
update_session(session_id, session)
|
| 566 |
+
|
| 567 |
+
# Generate generic AI analysis
|
| 568 |
+
ai_analysis = f"Learning profile enriched by modules like {', '.join(keywords_found[:3]) if keywords_found else 'Basics'}. Stage {session['level']} achieved with {session['totalReward']} points."
|
| 569 |
+
|
| 570 |
+
# Recommendations: Unvisited modules with high rewards or logical next steps
|
| 571 |
+
visited_module_names = set(r['module'] for r in visited_resources)
|
| 572 |
+
all_module_names = set(r['module'] for r in nlp_resources)
|
| 573 |
+
unvisited_modules = list(all_module_names - visited_module_names)
|
| 574 |
+
|
| 575 |
+
# Sort unvisited modules by order in ORDERED_MODULES
|
| 576 |
+
unvisited_modules.sort(key=lambda m: ordered_modules.index(m) if m in ordered_modules else 99)
|
| 577 |
+
|
| 578 |
+
# Combine BERT scores with sequential progression for recommendations
|
| 579 |
+
recommendations = unvisited_modules[:3] if unvisited_modules else [m for m, s in scored_modules if s <= 0.3][:3]
|
| 580 |
+
|
| 581 |
+
timestamp_id = datetime.now().strftime("%Y%m%d_%H%M%S")
|
| 582 |
+
summary_result = {
|
| 583 |
+
'id': f"summary_{session_id}_{timestamp_id}",
|
| 584 |
+
'title': title, 'summary': summary, 'keywords_found': keywords_found,
|
| 585 |
+
'totalResources': len(nlp_resources), 'visitedResources': len(visited_resources),
|
| 586 |
+
'currentLevel': session['level'],
|
| 587 |
+
'strengths': strengths, 'recommendations': recommendations,
|
| 588 |
+
'ai_analysis': ai_analysis,
|
| 589 |
+
'avgDifficulty': round(avg_difficulty, 2), 'totalReward': session['totalReward'],
|
| 590 |
+
'xp_earned': xp_earned
|
| 591 |
+
}
|
| 592 |
+
save_summary(summary_result)
|
| 593 |
+
|
| 594 |
+
# Final result construction
|
| 595 |
+
agent_pos = session.get('position', {'x': 10, 'y': 10})
|
| 596 |
+
assimilation_position = {'x': agent_pos.get('x', 10), 'y': agent_pos.get('y', 10)}
|
| 597 |
+
|
| 598 |
+
polyline_id = f"polyline_{timestamp_id}"
|
| 599 |
+
new_polyline = {
|
| 600 |
+
'id': polyline_id, 'name': title, 'path': [r['position'] for r in visited_resources],
|
| 601 |
+
'color': f'rgba({np.random.randint(100,200)}, {np.random.randint(100,200)}, 255, 0.4)',
|
| 602 |
+
'isActive': True, 'summary': summary, 'keywords_found': keywords_found,
|
| 603 |
+
'module_scores': module_scores, 'strengths': strengths, 'dominant_topics': dominant_topics,
|
| 604 |
+
'ai_analysis': ai_analysis, 'assimilation_position': assimilation_position,
|
| 605 |
+
'next_recommendation': {
|
| 606 |
+
'id': next_recommendation_obj['id'], 'title': next_recommendation_obj['title'],
|
| 607 |
+
'position': next_recommendation_obj['position'], 'module': rec_result['module'], 'reason': rec_result['reason']
|
| 608 |
+
} if next_recommendation_obj else None
|
| 609 |
+
}
|
| 610 |
+
save_polyline(polyline_id, new_polyline)
|
| 611 |
+
|
| 612 |
+
return jsonify({
|
| 613 |
+
'summary': summary_result,
|
| 614 |
+
'polyline': new_polyline,
|
| 615 |
+
'assimilation_position': assimilation_position,
|
| 616 |
+
'next_recommendation': new_polyline['next_recommendation']
|
| 617 |
+
})
|
| 618 |
+
|
| 619 |
+
|
| 620 |
+
# =============================================
|
| 621 |
+
# POLYLINE ENDPOINTS
|
| 622 |
+
# =============================================
|
| 623 |
+
|
| 624 |
+
@app.route('/api/polylines', methods=['GET'])
|
| 625 |
+
@app.route('/api/polylines', methods=['GET'])
|
| 626 |
+
def get_polylines_route():
|
| 627 |
+
"""Get all polylines including dynamically generated High Line and Current Average polylines"""
|
| 628 |
+
polylines = get_db_polylines()
|
| 629 |
+
|
| 630 |
+
# Generate ordered_modules to ensure consistent mapping
|
| 631 |
+
seen_modules = set()
|
| 632 |
+
ordered_modules = []
|
| 633 |
+
for r in nlp_resources:
|
| 634 |
+
m = r['module']
|
| 635 |
+
if m not in seen_modules:
|
| 636 |
+
ordered_modules.append(m)
|
| 637 |
+
seen_modules.add(m)
|
| 638 |
+
|
| 639 |
+
# Compute average module scores across all historical polylines
|
| 640 |
+
import math
|
| 641 |
+
history_scores = [p.get('module_scores', []) for p in polylines.values() if p.get('module_scores')]
|
| 642 |
+
num_histories = len(history_scores)
|
| 643 |
+
|
| 644 |
+
avg_module_scores = [0.0] * len(ordered_modules)
|
| 645 |
+
if num_histories > 0:
|
| 646 |
+
for scores in history_scores:
|
| 647 |
+
for i, s in enumerate(scores):
|
| 648 |
+
if i < len(avg_module_scores):
|
| 649 |
+
avg_module_scores[i] += s
|
| 650 |
+
avg_module_scores = [s / num_histories for s in avg_module_scores]
|
| 651 |
+
|
| 652 |
+
# Sort resources by angle (origin is 0, 19)
|
| 653 |
+
def compute_angle(r):
|
| 654 |
+
return math.atan2(19 - r['position']['y'], r['position']['x'])
|
| 655 |
+
|
| 656 |
+
resources_sorted = sorted(nlp_resources, key=compute_angle)
|
| 657 |
+
|
| 658 |
+
high_line_path = []
|
| 659 |
+
current_path = []
|
| 660 |
+
|
| 661 |
+
for r in resources_sorted:
|
| 662 |
+
dx = r['position']['x']
|
| 663 |
+
dy = 19 - r['position']['y']
|
| 664 |
+
radius = math.hypot(dx, dy)
|
| 665 |
+
theta = math.atan2(dy, dx)
|
| 666 |
+
|
| 667 |
+
# High Line
|
| 668 |
+
hl = float(r.get('high_line', 0.8))
|
| 669 |
+
hl_rad = radius * hl
|
| 670 |
+
hl_x = hl_rad * math.cos(theta)
|
| 671 |
+
hl_y = 19 - hl_rad * math.sin(theta)
|
| 672 |
+
high_line_path.append({'x': hl_x, 'y': hl_y})
|
| 673 |
+
|
| 674 |
+
# Current Average
|
| 675 |
+
try:
|
| 676 |
+
m_idx = ordered_modules.index(r['module'])
|
| 677 |
+
avg_s = avg_module_scores[m_idx] if num_histories > 0 else 0.0
|
| 678 |
+
except ValueError:
|
| 679 |
+
avg_s = 0.0
|
| 680 |
+
|
| 681 |
+
cur_rad = radius * avg_s
|
| 682 |
+
cur_x = cur_rad * math.cos(theta)
|
| 683 |
+
cur_y = 19 - cur_rad * math.sin(theta)
|
| 684 |
+
current_path.append({'x': cur_x, 'y': cur_y})
|
| 685 |
+
|
| 686 |
+
# Create the virtual polylines. Close the loops by adding the first point to the end.
|
| 687 |
+
if high_line_path:
|
| 688 |
+
high_line_path.append(high_line_path[0])
|
| 689 |
+
if current_path:
|
| 690 |
+
current_path.append(current_path[0])
|
| 691 |
+
|
| 692 |
+
hl_polyline = {
|
| 693 |
+
'id': 'high_line',
|
| 694 |
+
'name': 'High Line Target',
|
| 695 |
+
'path': high_line_path,
|
| 696 |
+
'color': 'rgba(239, 68, 68, 0.8)', # Red
|
| 697 |
+
'isActive': True,
|
| 698 |
+
'confidence': 1.0,
|
| 699 |
+
'summary': 'Target threshold for each module'
|
| 700 |
+
}
|
| 701 |
+
|
| 702 |
+
cur_polyline = {
|
| 703 |
+
'id': 'current_average',
|
| 704 |
+
'name': 'Current Knowledge Base',
|
| 705 |
+
'path': current_path,
|
| 706 |
+
'color': 'rgba(59, 130, 246, 0.8)', # Blue
|
| 707 |
+
'isActive': True,
|
| 708 |
+
'confidence': 1.0,
|
| 709 |
+
'summary': 'Your overall average knowledge across all summaries'
|
| 710 |
+
}
|
| 711 |
+
|
| 712 |
+
# Format result: Return ONLY the virtual polylines, or include histories?
|
| 713 |
+
# User said "everywhere it should be shown like a high polyline... and current should be average of all histories"
|
| 714 |
+
# We will return the historical ones but set them to inactive, and these two to strictly active.
|
| 715 |
+
|
| 716 |
+
result = list(polylines.values())
|
| 717 |
+
for p in result:
|
| 718 |
+
p['isActive'] = False # Disable historical polylines by default
|
| 719 |
+
|
| 720 |
+
result.append(hl_polyline)
|
| 721 |
+
result.append(cur_polyline)
|
| 722 |
+
|
| 723 |
+
return jsonify(result)
|
| 724 |
+
|
| 725 |
+
|
| 726 |
+
@app.route('/api/polylines/<polyline_id>', methods=['GET'])
|
| 727 |
+
def get_polyline(polyline_id):
|
| 728 |
+
"""Get a specific polyline"""
|
| 729 |
+
polylines = get_db_polylines()
|
| 730 |
+
polyline = polylines.get(polyline_id)
|
| 731 |
+
if not polyline:
|
| 732 |
+
return jsonify({'error': 'Polyline not found'}), 404
|
| 733 |
+
return jsonify(polyline)
|
| 734 |
+
|
| 735 |
+
|
| 736 |
+
@app.route('/api/polylines/<polyline_id>/toggle', methods=['POST'])
|
| 737 |
+
def toggle_polyline(polyline_id):
|
| 738 |
+
"""Toggle polyline visibility"""
|
| 739 |
+
data = request.get_json()
|
| 740 |
+
is_active = data.get('isActive', False)
|
| 741 |
+
|
| 742 |
+
polylines = get_db_polylines()
|
| 743 |
+
polyline = polylines.get(polyline_id)
|
| 744 |
+
if not polyline:
|
| 745 |
+
return jsonify({'error': 'Polyline not found'}), 404
|
| 746 |
+
|
| 747 |
+
polyline['isActive'] = is_active
|
| 748 |
+
save_polyline(polyline_id, polyline)
|
| 749 |
+
return jsonify(polyline)
|
| 750 |
+
|
| 751 |
+
|
| 752 |
+
|
| 753 |
+
# =============================================
|
| 754 |
+
# DQN PATH ENDPOINTS
|
| 755 |
+
# =============================================
|
| 756 |
+
|
| 757 |
+
@app.route('/api/dqn-path', methods=['POST'])
|
| 758 |
+
def generate_dqn_path():
|
| 759 |
+
"""
|
| 760 |
+
Generate DQN optimal path using the Navigator module.
|
| 761 |
+
|
| 762 |
+
Request JSON:
|
| 763 |
+
{
|
| 764 |
+
"session_id": "str",
|
| 765 |
+
"agent_position": {"x": int, "y": int},
|
| 766 |
+
"visited_resource_ids": ["id1", "id2", ...]
|
| 767 |
+
}
|
| 768 |
+
"""
|
| 769 |
+
data = request.get_json()
|
| 770 |
+
agent_pos = data.get('agent_position', {'x': 10, 'y': 10})
|
| 771 |
+
visited_ids = list(data.get('visited_resource_ids', []))
|
| 772 |
+
|
| 773 |
+
# Get latest module scores from most recent polyline (if any)
|
| 774 |
+
polylines = get_db_polylines()
|
| 775 |
+
latest_scores = []
|
| 776 |
+
if polylines:
|
| 777 |
+
last_polyline = list(polylines.values())[-1]
|
| 778 |
+
latest_scores = last_polyline.get('module_scores', [])
|
| 779 |
+
|
| 780 |
+
# Use DQN navigator to get top recommendation
|
| 781 |
+
rec = navigator.recommend_next(
|
| 782 |
+
visited_ids=visited_ids,
|
| 783 |
+
module_scores=latest_scores,
|
| 784 |
+
nlp_resources=nlp_resources
|
| 785 |
+
)
|
| 786 |
+
|
| 787 |
+
# Build a path: agent → recommended resource, plus up to 4 more close unvisited
|
| 788 |
+
path = [agent_pos]
|
| 789 |
+
visited_set = set(str(v).strip() for v in visited_ids)
|
| 790 |
+
|
| 791 |
+
if rec['resource']:
|
| 792 |
+
path.append(rec['resource']['position'])
|
| 793 |
+
# Add up to 4 more nearest unvisited resources
|
| 794 |
+
remaining = [r for r in nlp_resources
|
| 795 |
+
if str(r['id']).strip() not in visited_set and r['id'] != rec['resource']['id']]
|
| 796 |
+
remaining.sort(key=lambda r: (
|
| 797 |
+
(r['position']['x'] - rec['resource']['position']['x'])**2 +
|
| 798 |
+
(r['position']['y'] - rec['resource']['position']['y'])**2
|
| 799 |
+
))
|
| 800 |
+
for r in remaining[:4]:
|
| 801 |
+
path.append(r['position'])
|
| 802 |
+
|
| 803 |
+
final_resource = rec['resource']
|
| 804 |
+
total_reward = sum(r['reward'] for r in nlp_resources
|
| 805 |
+
if r['position'] in path[1:]) if path else 0
|
| 806 |
+
|
| 807 |
+
return jsonify({
|
| 808 |
+
'path': path,
|
| 809 |
+
'finalResource': final_resource,
|
| 810 |
+
'totalReward': total_reward,
|
| 811 |
+
'pathLength': len(path),
|
| 812 |
+
'navigatorReason': rec['reason']
|
| 813 |
+
})
|
| 814 |
+
|
| 815 |
+
|
| 816 |
+
@app.route('/api/next-recommendation', methods=['GET'])
|
| 817 |
+
def get_next_recommendation():
|
| 818 |
+
"""
|
| 819 |
+
Get the DQN navigator's next resource recommendation for a session.
|
| 820 |
+
Returns: { resource, module, reason, q_values }
|
| 821 |
+
"""
|
| 822 |
+
session_id = request.args.get('session_id', 'default')
|
| 823 |
+
session = get_session(session_id)
|
| 824 |
+
visited_ids = [str(v).strip() for v in session.get('visitedResources', [])]
|
| 825 |
+
|
| 826 |
+
# Get latest module scores from most recent polyline
|
| 827 |
+
polylines = get_db_polylines()
|
| 828 |
+
latest_scores = []
|
| 829 |
+
if polylines:
|
| 830 |
+
last_polyline = list(polylines.values())[-1]
|
| 831 |
+
latest_scores = last_polyline.get('module_scores', [])
|
| 832 |
+
|
| 833 |
+
rec = navigator.recommend_next(
|
| 834 |
+
visited_ids=visited_ids,
|
| 835 |
+
module_scores=latest_scores,
|
| 836 |
+
nlp_resources=nlp_resources
|
| 837 |
+
)
|
| 838 |
+
|
| 839 |
+
return jsonify(rec)
|
| 840 |
+
|
| 841 |
+
|
| 842 |
+
# =============================================
|
| 843 |
+
# LEARNING DATA ENDPOINTS
|
| 844 |
+
# =============================================
|
| 845 |
+
|
| 846 |
+
@app.route('/api/learning-data', methods=['GET'])
|
| 847 |
+
def get_learning_data():
|
| 848 |
+
"""Get comprehensive learning data based on session history and latest summary"""
|
| 849 |
+
session_id = request.args.get('session_id', 'default')
|
| 850 |
+
session = get_session(session_id)
|
| 851 |
+
|
| 852 |
+
visited_ids = set(str(v).strip() for v in session.get('visitedResources', []))
|
| 853 |
+
visited_resources = [r for r in nlp_resources if str(r['id']).strip() in visited_ids]
|
| 854 |
+
|
| 855 |
+
# Defaults
|
| 856 |
+
strengths = [r['title'] for r in visited_resources if r.get('difficulty', 0) <= 2]
|
| 857 |
+
# Recommendations using rewarding modules that are unvisited
|
| 858 |
+
unvisited = [r for r in nlp_resources if str(r['id']).strip() not in visited_ids]
|
| 859 |
+
unvisited.sort(key=lambda r: (-r.get('reward', 0), r.get('difficulty', 0)))
|
| 860 |
+
recommendations = [r['title'] for r in unvisited[:3]]
|
| 861 |
+
|
| 862 |
+
# Try to augment with results from the latest summary analysis
|
| 863 |
+
ai_analysis = ""
|
| 864 |
+
xp_earned = 0
|
| 865 |
+
try:
|
| 866 |
+
from database import load_db
|
| 867 |
+
db = load_db()
|
| 868 |
+
# Find latest summary for this session (they contain session_id in their ID or we match title)
|
| 869 |
+
matching_summaries = [s for s in db.get('summaries', []) if f"summary_{session_id}" in s.get('id', '')]
|
| 870 |
+
if matching_summaries:
|
| 871 |
+
latest = matching_summaries[-1]
|
| 872 |
+
if latest.get('strengths'):
|
| 873 |
+
strengths = latest['strengths']
|
| 874 |
+
if latest.get('recommendations'):
|
| 875 |
+
recommendations = latest['recommendations']
|
| 876 |
+
if latest.get('ai_analysis'):
|
| 877 |
+
ai_analysis = latest['ai_analysis']
|
| 878 |
+
xp_earned = latest.get('xp_earned', 0)
|
| 879 |
+
except Exception as e:
|
| 880 |
+
print(f"Error augmenting learning data from summaries: {e}")
|
| 881 |
+
|
| 882 |
+
# Calculate activity log and heatmap from all summaries for this session
|
| 883 |
+
activity_heatmap = {}
|
| 884 |
+
activity_log = []
|
| 885 |
+
try:
|
| 886 |
+
from database import load_db
|
| 887 |
+
db = load_db()
|
| 888 |
+
all_summaries = db.get('summaries', [])
|
| 889 |
+
|
| 890 |
+
# 1. Add Summary Activity
|
| 891 |
+
for s in all_summaries:
|
| 892 |
+
s_id = s.get('id', '')
|
| 893 |
+
# Robust parsing for summary_{session_id}_{YYYYMMDD}_{HHMMSS}
|
| 894 |
+
# We look for the YYYYMMDD part after the first two underscores
|
| 895 |
+
if f"summary_" in s_id:
|
| 896 |
+
parts = s_id.split('_')
|
| 897 |
+
# For summary_default_20260403_032235, parts are: ['summary', 'default', '20260403', '032235']
|
| 898 |
+
# Search for the part that is exactly 8 digits (YYYYMMDD) and >= 2024
|
| 899 |
+
date_str = None
|
| 900 |
+
for p in parts[2:]:
|
| 901 |
+
if len(p) == 8 and p.isdigit() and p.startswith('20'):
|
| 902 |
+
date_str = p
|
| 903 |
+
break
|
| 904 |
+
|
| 905 |
+
if date_str:
|
| 906 |
+
formatted_date = f"{date_str[:4]}-{date_str[4:6]}-{date_str[6:]}"
|
| 907 |
+
# Weighted count: Summaries (deep learning) count as 2, visits/notifs as 1
|
| 908 |
+
activity_heatmap[formatted_date] = activity_heatmap.get(formatted_date, 0) + 2
|
| 909 |
+
|
| 910 |
+
# Add to activity log (list of recent events)
|
| 911 |
+
activity_log.append({
|
| 912 |
+
'id': s_id,
|
| 913 |
+
'type': 'summary',
|
| 914 |
+
'title': s.get('title', 'Summary Written'),
|
| 915 |
+
'timestamp': s.get('timestamp', int(datetime.now().timestamp() * 1000))
|
| 916 |
+
})
|
| 917 |
+
|
| 918 |
+
# 2. Add Notification/Visit Activity
|
| 919 |
+
notifs = session.get('notifications', [])
|
| 920 |
+
for n in notifs:
|
| 921 |
+
ts = n.get('timestamp')
|
| 922 |
+
if ts:
|
| 923 |
+
# Convert ms timestamp to YYYY-MM-DD
|
| 924 |
+
dt = datetime.fromtimestamp(ts / 1000.0)
|
| 925 |
+
formatted_date = dt.strftime('%Y-%m-%d')
|
| 926 |
+
activity_heatmap[formatted_date] = activity_heatmap.get(formatted_date, 0) + 1
|
| 927 |
+
|
| 928 |
+
# Sort log by timestamp descending to show most recent at the top
|
| 929 |
+
activity_log.sort(key=lambda x: str(x.get('timestamp', '')), reverse=True)
|
| 930 |
+
activity_log = activity_log[:50] # Limit window
|
| 931 |
+
|
| 932 |
+
except Exception as e:
|
| 933 |
+
print(f"Error calculating activity log: {e}")
|
| 934 |
+
|
| 935 |
+
# Find most visited module
|
| 936 |
+
from collections import Counter
|
| 937 |
+
module_counts = Counter(r['module'] for r in visited_resources)
|
| 938 |
+
most_visited_module = module_counts.most_common(1)[0][0] if module_counts else "None"
|
| 939 |
+
|
| 940 |
+
return jsonify({
|
| 941 |
+
'totalResources': len(nlp_resources),
|
| 942 |
+
'visitedResources': len(visited_resources),
|
| 943 |
+
'currentLevel': session.get('level', 1),
|
| 944 |
+
'strengths': strengths[:3],
|
| 945 |
+
'recommendations': recommendations[:3],
|
| 946 |
+
'ai_analysis': ai_analysis,
|
| 947 |
+
'activityHeatmap': activity_heatmap,
|
| 948 |
+
'activityLog': activity_log,
|
| 949 |
+
'nextOptimalResource': unvisited[0]['position'] if unvisited else None,
|
| 950 |
+
'totalReward': session.get('totalReward', 0),
|
| 951 |
+
'mostVisitedModule': most_visited_module,
|
| 952 |
+
'xp_earned': xp_earned
|
| 953 |
+
})
|
| 954 |
+
|
| 955 |
+
|
| 956 |
+
# =============================================
|
| 957 |
+
# BOOKMARK ENDPOINTS
|
| 958 |
+
# =============================================
|
| 959 |
+
|
| 960 |
+
@app.route('/api/bookmarks', methods=['GET'])
|
| 961 |
+
def get_bookmarks():
|
| 962 |
+
"""Get all bookmarked resources for a session"""
|
| 963 |
+
session_id = request.args.get('session_id', 'default')
|
| 964 |
+
from database import get_bookmarks as get_db_bookmarks
|
| 965 |
+
return jsonify(get_db_bookmarks(session_id))
|
| 966 |
+
|
| 967 |
+
|
| 968 |
+
@app.route('/api/bookmarks/add', methods=['POST'])
|
| 969 |
+
def add_bookmark():
|
| 970 |
+
"""Add a resource to bookmarks"""
|
| 971 |
+
data = request.get_json()
|
| 972 |
+
session_id = data.get('session_id', 'default')
|
| 973 |
+
resource_id = data.get('resource_id')
|
| 974 |
+
|
| 975 |
+
if not resource_id:
|
| 976 |
+
return jsonify({'error': 'Resource ID required'}), 400
|
| 977 |
+
|
| 978 |
+
from database import add_bookmark as add_db_bookmark
|
| 979 |
+
add_db_bookmark(session_id, resource_id)
|
| 980 |
+
return jsonify({'status': 'success', 'resource_id': resource_id})
|
| 981 |
+
|
| 982 |
+
|
| 983 |
+
@app.route('/api/bookmarks/remove', methods=['POST'])
|
| 984 |
+
def remove_bookmark():
|
| 985 |
+
"""Remove a resource from bookmarks"""
|
| 986 |
+
data = request.get_json()
|
| 987 |
+
session_id = data.get('session_id', 'default')
|
| 988 |
+
resource_id = data.get('resource_id')
|
| 989 |
+
|
| 990 |
+
if not resource_id:
|
| 991 |
+
return jsonify({'error': 'Resource ID required'}), 400
|
| 992 |
+
|
| 993 |
+
from database import remove_bookmark as remove_db_bookmark
|
| 994 |
+
remove_db_bookmark(session_id, resource_id)
|
| 995 |
+
return jsonify({'status': 'success', 'resource_id': resource_id})
|
| 996 |
+
|
| 997 |
+
|
| 998 |
+
# =============================================
|
| 999 |
+
# NOTES ENDPOINTS
|
| 1000 |
+
# =============================================
|
| 1001 |
+
|
| 1002 |
+
@app.route('/api/notes', methods=['GET'])
|
| 1003 |
+
def get_notes_route():
|
| 1004 |
+
"""Get all notes for a session"""
|
| 1005 |
+
session_id = request.args.get('session_id', 'default')
|
| 1006 |
+
return jsonify(get_notes(session_id))
|
| 1007 |
+
|
| 1008 |
+
|
| 1009 |
+
@app.route('/api/notes', methods=['POST'])
|
| 1010 |
+
def add_note_route():
|
| 1011 |
+
"""Add a new note"""
|
| 1012 |
+
data = request.get_json()
|
| 1013 |
+
session_id = data.get('session_id', 'default')
|
| 1014 |
+
note_data = data.get('note')
|
| 1015 |
+
|
| 1016 |
+
if not note_data:
|
| 1017 |
+
return jsonify({'error': 'Note data required'}), 400
|
| 1018 |
+
|
| 1019 |
+
new_note = add_note(session_id, note_data)
|
| 1020 |
+
return jsonify(new_note)
|
| 1021 |
+
|
| 1022 |
+
|
| 1023 |
+
# =============================================
|
| 1024 |
+
# LECTURES ENDPOINTS
|
| 1025 |
+
# =============================================
|
| 1026 |
+
|
| 1027 |
+
@app.route('/api/lectures', methods=['GET'])
|
| 1028 |
+
def get_lectures_route():
|
| 1029 |
+
"""Get all available lectures"""
|
| 1030 |
+
return jsonify(get_lectures())
|
| 1031 |
+
|
| 1032 |
+
|
| 1033 |
+
|
| 1034 |
+
# =============================================
|
| 1035 |
+
# AI SIDER CHAT ENDPOINT
|
| 1036 |
+
# =============================================
|
| 1037 |
+
|
| 1038 |
+
# Load YouTube transcripts
|
| 1039 |
+
_transcripts_path = os.path.join(os.path.dirname(__file__), 'data', 'youtube_transcripts.json')
|
| 1040 |
+
try:
|
| 1041 |
+
if os.path.exists(_transcripts_path):
|
| 1042 |
+
with open(_transcripts_path, 'r', encoding='utf-8') as f:
|
| 1043 |
+
raw_transcripts = json.load(f)
|
| 1044 |
+
# Normalize keys to lowercase for robust matching
|
| 1045 |
+
_youtube_transcripts = {str(k).strip().lower(): v for k, v in raw_transcripts.items()}
|
| 1046 |
+
print(f"Loaded and normalized transcripts for {len(_youtube_transcripts)} modules")
|
| 1047 |
+
else:
|
| 1048 |
+
print(f"Transcripts file not found: {_transcripts_path}")
|
| 1049 |
+
_youtube_transcripts = {}
|
| 1050 |
+
except Exception as e:
|
| 1051 |
+
print(f"Could not load transcripts: {e}")
|
| 1052 |
+
_youtube_transcripts = {}
|
| 1053 |
+
|
| 1054 |
+
|
| 1055 |
+
from openai import OpenAI
|
| 1056 |
+
|
| 1057 |
+
# AI Client configuration
|
| 1058 |
+
# Using Groq (OpenAI-compatible) for free high-quality inference
|
| 1059 |
+
_ai_client = None
|
| 1060 |
+
try:
|
| 1061 |
+
_api_key = os.getenv("GROQ_API_KEY") or os.getenv("OPENAI_API_KEY") or "FIXME_YOUR_API_KEY"
|
| 1062 |
+
_base_url = "https://api.groq.com/openai/v1" if "GROQ" in _api_key or _api_key == "FIXME_YOUR_API_KEY" else None
|
| 1063 |
+
_ai_client = OpenAI(api_key=_api_key, base_url=_base_url)
|
| 1064 |
+
except Exception as e:
|
| 1065 |
+
print(f"AI Client initialization warning: {e}")
|
| 1066 |
+
|
| 1067 |
+
@app.route('/api/chat', methods=['POST'])
|
| 1068 |
+
def chat_with_ai():
|
| 1069 |
+
"""
|
| 1070 |
+
AI Sider chat endpoint - upgraded to use the openai package.
|
| 1071 |
+
Uses YouTube transcript context and a premium model for better answers.
|
| 1072 |
+
"""
|
| 1073 |
+
data = request.get_json()
|
| 1074 |
+
module = data.get('module', '')
|
| 1075 |
+
question = data.get('question', '')
|
| 1076 |
+
history = data.get('history', [])
|
| 1077 |
+
|
| 1078 |
+
if not question.strip():
|
| 1079 |
+
return jsonify({'answer': 'Please ask a question about this lesson.', 'source': 'none'})
|
| 1080 |
+
|
| 1081 |
+
# 1. Find transcript/context with better matching
|
| 1082 |
+
# Normalize input module for lookup
|
| 1083 |
+
module_norm = str(module).strip().lower()
|
| 1084 |
+
transcript = _youtube_transcripts.get(module_norm, '')
|
| 1085 |
+
|
| 1086 |
+
if not transcript:
|
| 1087 |
+
# Try finding the resource first to get its formal title
|
| 1088 |
+
resource_match = None
|
| 1089 |
+
for r in nlp_resources:
|
| 1090 |
+
if r['id'] == module or r['title'].lower() == module_norm or r.get('module', '').lower() == module_norm:
|
| 1091 |
+
resource_match = r
|
| 1092 |
+
break
|
| 1093 |
+
|
| 1094 |
+
target_name = resource_match['title'] if resource_match else module_norm
|
| 1095 |
+
target_name_lower = target_name.lower()
|
| 1096 |
+
|
| 1097 |
+
# Fuzzy match on transcripts keys
|
| 1098 |
+
for key, val in _youtube_transcripts.items():
|
| 1099 |
+
if key in target_name_lower or target_name_lower in key:
|
| 1100 |
+
transcript = val
|
| 1101 |
+
break
|
| 1102 |
+
|
| 1103 |
+
resource_desc = ''
|
| 1104 |
+
for r in nlp_resources:
|
| 1105 |
+
if r.get('module', '').lower() == module_norm or r.get('title', '').lower() == module_norm:
|
| 1106 |
+
resource_desc = r.get('description', '')[:1000]
|
| 1107 |
+
break
|
| 1108 |
+
|
| 1109 |
+
context = transcript[:4500] if transcript else resource_desc[:1500]
|
| 1110 |
+
|
| 1111 |
+
# 2. Try Premium Inference via OpenAI Package
|
| 1112 |
+
# Check for actual keys, not just the placeholder
|
| 1113 |
+
_key = os.getenv("GROQ_API_KEY") or os.getenv("OPENAI_API_KEY")
|
| 1114 |
+
if _ai_client and _key and _key != "FIXME_YOUR_API_KEY":
|
| 1115 |
+
try:
|
| 1116 |
+
# Determine model based on provider
|
| 1117 |
+
if "groq" in (_ai_client.base_url or "").lower():
|
| 1118 |
+
model = "llama-3.3-70b-versatile"
|
| 1119 |
+
else:
|
| 1120 |
+
model = "gpt-3.5-turbo"
|
| 1121 |
+
|
| 1122 |
+
system_prompt = f"""You are 'Sider AI', a premium learning assistant for an Advanced NLP course.
|
| 1123 |
+
Your goal is to help students understand the current lesson module: '{module}'.
|
| 1124 |
+
|
| 1125 |
+
Use the following context from the lesson's YouTube transcript/description to answer the student's question accurately:
|
| 1126 |
+
---
|
| 1127 |
+
{context}
|
| 1128 |
+
---
|
| 1129 |
+
|
| 1130 |
+
INSTRUCTIONS:
|
| 1131 |
+
- Be concise, professional, and encouraging.
|
| 1132 |
+
- If the answer is in the context, prioritize that information.
|
| 1133 |
+
- If the answer isn't in the context, use your general LLM knowledge to explain the concept.
|
| 1134 |
+
- Format your response using clean Markdown."""
|
| 1135 |
+
|
| 1136 |
+
messages = [{"role": "system", "content": system_prompt}]
|
| 1137 |
+
# Add limited history for continuity
|
| 1138 |
+
for msg in history[-4:]:
|
| 1139 |
+
role = "user" if msg.get("role") == "user" else "assistant"
|
| 1140 |
+
messages.append({"role": role, "content": msg.get("content", "")})
|
| 1141 |
+
|
| 1142 |
+
messages.append({"role": "user", "content": question})
|
| 1143 |
+
|
| 1144 |
+
completion = _ai_client.chat.completions.create(
|
| 1145 |
+
model=model,
|
| 1146 |
+
messages=messages,
|
| 1147 |
+
temperature=0.7,
|
| 1148 |
+
max_tokens=800
|
| 1149 |
+
)
|
| 1150 |
+
answer = completion.choices[0].message.content
|
| 1151 |
+
return jsonify({'answer': answer, 'source': f'openai-{model}'})
|
| 1152 |
+
|
| 1153 |
+
except Exception as e:
|
| 1154 |
+
print(f"[CHAT] Premium AI error: {e}")
|
| 1155 |
+
# Fall through to lookup if premium fails
|
| 1156 |
+
|
| 1157 |
+
# 3. Fallback to Search/Lookup (Avoiding T5 to prevent worker timeouts on HF)
|
| 1158 |
+
relevant_context = ""
|
| 1159 |
+
if context:
|
| 1160 |
+
sentences = context.split('.')
|
| 1161 |
+
# Find sentences containing keywords from the question
|
| 1162 |
+
keywords = [w.lower() for w in question.split() if len(w) > 3]
|
| 1163 |
+
matching = []
|
| 1164 |
+
for s in sentences:
|
| 1165 |
+
if any(k in s.lower() for k in keywords):
|
| 1166 |
+
matching.append(s.strip())
|
| 1167 |
+
relevant_context = ". ".join(matching[:3])
|
| 1168 |
+
|
| 1169 |
+
if relevant_context:
|
| 1170 |
+
answer = f"I found some relevant information in the lesson material: {relevant_context}. For a deeper explanation, please ensure an API key is configured in the environment."
|
| 1171 |
+
else:
|
| 1172 |
+
answer = f"I'm here to help with the lesson on '{module}'. I couldn't find a specific answer in the local material, but you should review the module description for more details. (Tip: Configure an AI API key for better responses)."
|
| 1173 |
+
|
| 1174 |
+
return jsonify({'answer': answer, 'source': 'transcript-lookup'})
|
| 1175 |
+
|
| 1176 |
+
|
| 1177 |
+
if __name__ == '__main__':
|
| 1178 |
+
print(f"Loaded {len(nlp_resources)} NLP resources")
|
backend/request_logger.py
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
from datetime import datetime
|
| 3 |
+
from flask import request
|
| 4 |
+
|
| 5 |
+
LOG_FILE = os.path.join(os.path.dirname(__file__), 'backend_logs.txt')
|
| 6 |
+
|
| 7 |
+
def log_request(info=None):
|
| 8 |
+
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
| 9 |
+
method = request.method
|
| 10 |
+
url = request.url
|
| 11 |
+
|
| 12 |
+
log_entry = f"[{timestamp}] {method} {url}\n"
|
| 13 |
+
|
| 14 |
+
if info:
|
| 15 |
+
log_entry += f"Info: {info}\n"
|
| 16 |
+
|
| 17 |
+
if method in ['POST', 'PUT']:
|
| 18 |
+
try:
|
| 19 |
+
json_data = request.get_json(silent=True)
|
| 20 |
+
if json_data:
|
| 21 |
+
log_entry += f"Payload: {json_data}\n"
|
| 22 |
+
except Exception:
|
| 23 |
+
pass
|
| 24 |
+
|
| 25 |
+
log_entry += "-" * 50 + "\n"
|
| 26 |
+
|
| 27 |
+
with open(LOG_FILE, 'a', encoding='utf-8') as f:
|
| 28 |
+
f.write(log_entry)
|
backend/requirements.txt
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
beautifulsoup4==4.12.3
|
| 2 |
+
bs4==0.0.2
|
| 3 |
+
Flask==3.0.3
|
| 4 |
+
Flask-Cors==4.0.1
|
| 5 |
+
gunicorn
|
| 6 |
+
Jinja2==3.1.4
|
| 7 |
+
keybert==0.8.5
|
| 8 |
+
nltk==3.8.1
|
| 9 |
+
numpy
|
| 10 |
+
pandas
|
| 11 |
+
pdfplumber==0.11.5
|
| 12 |
+
requests==2.32.3
|
| 13 |
+
scikit-learn
|
| 14 |
+
sentence-transformers==3.0.1
|
| 15 |
+
torch
|
| 16 |
+
Werkzeug==3.0.3
|
| 17 |
+
youtube-transcript-api
|
| 18 |
+
lxml
|
| 19 |
+
openai
|
backend/utils.py
ADDED
|
@@ -0,0 +1,266 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
=========================================================
|
| 3 |
+
Polyline & Vector Utility Functions
|
| 4 |
+
=========================================================
|
| 5 |
+
|
| 6 |
+
This file contains utility (i.e. globally used) functions in accordance with DRY principle :
|
| 7 |
+
- Handling polylines (multi-dimensional paths) used in courses/resources.
|
| 8 |
+
- Computing geometric and vector operations (centroids, distances, cosine similarity).
|
| 9 |
+
- Finding nearest resources for a learner based on polyline similarity.
|
| 10 |
+
- Converting NumPy arrays to standard Python lists for JSON serialization.
|
| 11 |
+
(more to be added with time...)
|
| 12 |
+
=========================================================
|
| 13 |
+
"""
|
| 14 |
+
|
| 15 |
+
import numpy as np
|
| 16 |
+
import math
|
| 17 |
+
import heapq
|
| 18 |
+
import re
|
| 19 |
+
from bs4 import BeautifulSoup
|
| 20 |
+
import nltk
|
| 21 |
+
from nltk.stem import WordNetLemmatizer, PorterStemmer
|
| 22 |
+
from nltk.corpus import stopwords
|
| 23 |
+
|
| 24 |
+
# Ensure NLTK data is downloaded
|
| 25 |
+
try:
|
| 26 |
+
nltk.data.find('corpora/stopwords')
|
| 27 |
+
except LookupError:
|
| 28 |
+
nltk.download('stopwords')
|
| 29 |
+
|
| 30 |
+
try:
|
| 31 |
+
nltk.data.find('corpora/wordnet')
|
| 32 |
+
except LookupError:
|
| 33 |
+
nltk.download('wordnet')
|
| 34 |
+
|
| 35 |
+
# ===========================
|
| 36 |
+
# utils_preprocess_text
|
| 37 |
+
# ===========================
|
| 38 |
+
def utils_preprocess_text(text: str, flg_stemm: bool = False, flg_lemm: bool = True, lst_stopwords: list = None) -> str:
|
| 39 |
+
"""
|
| 40 |
+
Preprocess text by removing HTML tags, punctuations, numbers, stopwords, and applying stemming/lemmatization.
|
| 41 |
+
|
| 42 |
+
Parameters:
|
| 43 |
+
text (str): The text to preprocess.
|
| 44 |
+
flg_stemm (bool): Flag to apply stemming. Default is False.
|
| 45 |
+
flg_lemm (bool): Flag to apply lemmatization. Default is True.
|
| 46 |
+
lst_stopwords (list): List of stopwords to remove. Default is None.
|
| 47 |
+
|
| 48 |
+
Returns:
|
| 49 |
+
str: The preprocessed text.
|
| 50 |
+
"""
|
| 51 |
+
if not text:
|
| 52 |
+
return ""
|
| 53 |
+
|
| 54 |
+
# Remove HTML
|
| 55 |
+
soup = BeautifulSoup(text, 'lxml')
|
| 56 |
+
text = soup.get_text()
|
| 57 |
+
|
| 58 |
+
# Remove punctuations and numbers
|
| 59 |
+
text = re.sub('[^a-zA-Z]', ' ', text)
|
| 60 |
+
|
| 61 |
+
# Single character removal
|
| 62 |
+
text = re.sub(r"\s+[a-zA-Z]\s+", ' ', text)
|
| 63 |
+
|
| 64 |
+
# Remove multiple spaces
|
| 65 |
+
text = re.sub(r'\s+', ' ', text)
|
| 66 |
+
|
| 67 |
+
# Tokenize text
|
| 68 |
+
lst_text = text.split()
|
| 69 |
+
|
| 70 |
+
# Remove stopwords
|
| 71 |
+
if lst_stopwords is not None:
|
| 72 |
+
lst_text = [word for word in lst_text if word not in lst_stopwords]
|
| 73 |
+
|
| 74 |
+
# Apply stemming
|
| 75 |
+
if flg_stemm:
|
| 76 |
+
ps = PorterStemmer()
|
| 77 |
+
lst_text = [ps.stem(word) for word in lst_text]
|
| 78 |
+
|
| 79 |
+
# Apply lemmatization
|
| 80 |
+
if flg_lemm:
|
| 81 |
+
lem = WordNetLemmatizer()
|
| 82 |
+
lst_text = [lem.lemmatize(word) for word in lst_text]
|
| 83 |
+
|
| 84 |
+
text = " ".join(lst_text)
|
| 85 |
+
return text
|
| 86 |
+
|
| 87 |
+
# ===========================
|
| 88 |
+
# convert_to_lists
|
| 89 |
+
# ===========================
|
| 90 |
+
def convert_to_lists(data):
|
| 91 |
+
"""
|
| 92 |
+
Recursively convert NumPy arrays to standard Python lists.
|
| 93 |
+
|
| 94 |
+
Parameters:
|
| 95 |
+
data (np.ndarray | list | dict | other): Input data structure.
|
| 96 |
+
|
| 97 |
+
Returns:
|
| 98 |
+
list | dict | original type: Data converted to lists recursively.
|
| 99 |
+
"""
|
| 100 |
+
if isinstance(data, np.ndarray):
|
| 101 |
+
return data.tolist()
|
| 102 |
+
elif isinstance(data, list):
|
| 103 |
+
return [convert_to_lists(item) for item in data]
|
| 104 |
+
elif isinstance(data, dict):
|
| 105 |
+
return {key: convert_to_lists(value) for key, value in data.items()}
|
| 106 |
+
else:
|
| 107 |
+
return data
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
# ===========================
|
| 111 |
+
# get_lowline_of_polylines
|
| 112 |
+
# ===========================
|
| 113 |
+
def get_lowline_of_polylines(polylines):
|
| 114 |
+
"""
|
| 115 |
+
Get the minimum value along each dimension across all polylines.
|
| 116 |
+
|
| 117 |
+
Parameters:
|
| 118 |
+
polylines (list of lists): Each inner list is a polyline vector.
|
| 119 |
+
|
| 120 |
+
Returns:
|
| 121 |
+
list: Minimum values for each dimension (lowline).
|
| 122 |
+
"""
|
| 123 |
+
if not polylines:
|
| 124 |
+
return [0] * 12 # Default 12-dimension zero vector
|
| 125 |
+
|
| 126 |
+
lowline = [min([polylines[i][j] for i in range(len(polylines))])
|
| 127 |
+
for j in range(len(polylines[0]))]
|
| 128 |
+
return lowline
|
| 129 |
+
|
| 130 |
+
|
| 131 |
+
# ===========================
|
| 132 |
+
# get_highline_of_polylines
|
| 133 |
+
# ===========================
|
| 134 |
+
def get_highline_of_polylines(polylines):
|
| 135 |
+
"""
|
| 136 |
+
Get the maximum value along each dimension across all polylines.
|
| 137 |
+
|
| 138 |
+
Parameters:
|
| 139 |
+
polylines (list of lists): Each inner list is a polyline vector.
|
| 140 |
+
|
| 141 |
+
Returns:
|
| 142 |
+
list: Maximum values for each dimension (highline).
|
| 143 |
+
"""
|
| 144 |
+
return [max([polylines[i][j] for i in range(len(polylines))])
|
| 145 |
+
for j in range(len(polylines[0]))]
|
| 146 |
+
|
| 147 |
+
|
| 148 |
+
# ===========================
|
| 149 |
+
# get_cos_sim
|
| 150 |
+
# ===========================
|
| 151 |
+
def get_cos_sim(a: np.ndarray, b: np.ndarray) -> float:
|
| 152 |
+
"""
|
| 153 |
+
Calculate the cosine similarity between two vectors.
|
| 154 |
+
|
| 155 |
+
Cosine similarity measures how similar two vectors are in direction
|
| 156 |
+
regardless of their magnitude.
|
| 157 |
+
|
| 158 |
+
Parameters:
|
| 159 |
+
a (np.ndarray): First vector.
|
| 160 |
+
b (np.ndarray): Second vector.
|
| 161 |
+
|
| 162 |
+
Returns:
|
| 163 |
+
float: Cosine similarity (1 = identical direction, -1 = opposite).
|
| 164 |
+
"""
|
| 165 |
+
dot_product = np.dot(a, b)
|
| 166 |
+
norm_a = np.linalg.norm(a)
|
| 167 |
+
norm_b = np.linalg.norm(b)
|
| 168 |
+
return dot_product / (norm_a * norm_b)
|
| 169 |
+
|
| 170 |
+
|
| 171 |
+
# ===========================
|
| 172 |
+
# calculate_centroid
|
| 173 |
+
# ===========================
|
| 174 |
+
def calculate_centroid(polylines):
|
| 175 |
+
"""
|
| 176 |
+
Calculate the centroid (mean point) of a list of polylines.
|
| 177 |
+
|
| 178 |
+
Parameters:
|
| 179 |
+
polylines (list of lists): List of N-dimension polyline vectors.
|
| 180 |
+
|
| 181 |
+
Returns:
|
| 182 |
+
list: Centroid coordinates (mean along each dimension).
|
| 183 |
+
"""
|
| 184 |
+
polyline_array = np.array(polylines)
|
| 185 |
+
centroid = np.mean(polyline_array, axis=0)
|
| 186 |
+
return centroid.tolist()
|
| 187 |
+
|
| 188 |
+
|
| 189 |
+
# ===========================
|
| 190 |
+
# two_polyline_distance
|
| 191 |
+
# ===========================
|
| 192 |
+
def two_polyline_distance(point1, point2):
|
| 193 |
+
"""
|
| 194 |
+
Calculate Euclidean distance between two polylines (points in N-dimensional space).
|
| 195 |
+
|
| 196 |
+
Parameters:
|
| 197 |
+
point1 (list | np.ndarray): First polyline coordinates.
|
| 198 |
+
point2 (list | np.ndarray): Second polyline coordinates.
|
| 199 |
+
|
| 200 |
+
Returns:
|
| 201 |
+
float: Euclidean distance between the two polylines.
|
| 202 |
+
"""
|
| 203 |
+
if len(point1) != len(point2):
|
| 204 |
+
raise ValueError("Points must have the same dimensions")
|
| 205 |
+
|
| 206 |
+
return math.sqrt(sum((p2 - p1) ** 2 for p1, p2 in zip(point1, point2)))
|
| 207 |
+
|
| 208 |
+
|
| 209 |
+
# ===========================
|
| 210 |
+
# nearest_seven
|
| 211 |
+
# ===========================
|
| 212 |
+
def nearest_seven(learner_polyline, resources_id_polylines):
|
| 213 |
+
"""
|
| 214 |
+
Find the 7 nearest resources to the learner based on polyline distance.
|
| 215 |
+
|
| 216 |
+
Parameters:
|
| 217 |
+
learner_polyline (list): Learner's current polyline coordinates.
|
| 218 |
+
resources_id_polylines (list of tuples): Each tuple is (resource_id, polyline).
|
| 219 |
+
|
| 220 |
+
Returns:
|
| 221 |
+
list: IDs of the 7 nearest resources.
|
| 222 |
+
"""
|
| 223 |
+
top7 = []
|
| 224 |
+
for id_polyline in resources_id_polylines:
|
| 225 |
+
distance = two_polyline_distance(learner_polyline, id_polyline[1])
|
| 226 |
+
heapq.heappush(top7, (-distance, id_polyline[0])) # Use negative distance for max-heap
|
| 227 |
+
if len(top7) > 7:
|
| 228 |
+
heapq.heappop(top7)
|
| 229 |
+
return [id[1] for id in top7]
|
| 230 |
+
|
| 231 |
+
|
| 232 |
+
# ===========================
|
| 233 |
+
# calculate_distance
|
| 234 |
+
# ===========================
|
| 235 |
+
def calculate_distance(pos1, pos2):
|
| 236 |
+
"""
|
| 237 |
+
Compute Euclidean distance between two 2D points.
|
| 238 |
+
|
| 239 |
+
Parameters:
|
| 240 |
+
pos1 (list | tuple): [x, y] of first point.
|
| 241 |
+
pos2 (list | tuple): [x, y] of second point.
|
| 242 |
+
|
| 243 |
+
Returns:
|
| 244 |
+
float: Euclidean distance between pos1 and pos2.
|
| 245 |
+
"""
|
| 246 |
+
return np.sqrt((pos1[0] - pos2[0]) ** 2 + (pos1[1] - pos2[1]) ** 2)
|
| 247 |
+
|
| 248 |
+
|
| 249 |
+
# ===========================
|
| 250 |
+
# is_valid_id
|
| 251 |
+
# ===========================
|
| 252 |
+
def is_valid_id(id):
|
| 253 |
+
"""
|
| 254 |
+
Check if a given ID is valid (convertible to integer).
|
| 255 |
+
|
| 256 |
+
Parameters:
|
| 257 |
+
id (any): ID to validate.
|
| 258 |
+
|
| 259 |
+
Returns:
|
| 260 |
+
bool: True if valid integer, False otherwise.
|
| 261 |
+
"""
|
| 262 |
+
try:
|
| 263 |
+
_ = int(id)
|
| 264 |
+
return True
|
| 265 |
+
except:
|
| 266 |
+
return False
|
docker-compose.yml
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version: '3.8'
|
| 2 |
+
|
| 3 |
+
services:
|
| 4 |
+
nlp-learning-grid:
|
| 5 |
+
build: .
|
| 6 |
+
image: nlp-learning-grid:latest
|
| 7 |
+
container_name: nlp-learning-grid
|
| 8 |
+
ports:
|
| 9 |
+
- "5000:5000"
|
| 10 |
+
environment:
|
| 11 |
+
- PORT=5000
|
| 12 |
+
- GROQ_API_KEY=${GROQ_API_KEY:-}
|
| 13 |
+
- OPENAI_API_KEY=${OPENAI_API_KEY:-}
|
| 14 |
+
volumes:
|
| 15 |
+
- ./backend/data:/app/backend/data
|
| 16 |
+
restart: unless-stopped
|
eslint.config.js
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import js from '@eslint/js';
|
| 2 |
+
import globals from 'globals';
|
| 3 |
+
import reactHooks from 'eslint-plugin-react-hooks';
|
| 4 |
+
import reactRefresh from 'eslint-plugin-react-refresh';
|
| 5 |
+
import tseslint from 'typescript-eslint';
|
| 6 |
+
|
| 7 |
+
export default tseslint.config(
|
| 8 |
+
{ ignores: ['dist'] },
|
| 9 |
+
{
|
| 10 |
+
extends: [js.configs.recommended, ...tseslint.configs.recommended],
|
| 11 |
+
files: ['**/*.{ts,tsx}'],
|
| 12 |
+
languageOptions: {
|
| 13 |
+
ecmaVersion: 2020,
|
| 14 |
+
globals: globals.browser,
|
| 15 |
+
},
|
| 16 |
+
plugins: {
|
| 17 |
+
'react-hooks': reactHooks,
|
| 18 |
+
'react-refresh': reactRefresh,
|
| 19 |
+
},
|
| 20 |
+
rules: {
|
| 21 |
+
...reactHooks.configs.recommended.rules,
|
| 22 |
+
'react-refresh/only-export-components': [
|
| 23 |
+
'warn',
|
| 24 |
+
{ allowConstantExport: true },
|
| 25 |
+
],
|
| 26 |
+
},
|
| 27 |
+
}
|
| 28 |
+
);
|
index.html
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<!doctype html>
|
| 2 |
+
<html lang="en">
|
| 3 |
+
<head>
|
| 4 |
+
<meta charset="UTF-8" />
|
| 5 |
+
<link rel="icon" type="image/svg+xml" href="/vite.svg" />
|
| 6 |
+
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
| 7 |
+
<title>DQN Agent Learning Environment</title>
|
| 8 |
+
</head>
|
| 9 |
+
<body>
|
| 10 |
+
<div id="root"></div>
|
| 11 |
+
<script type="module" src="/src/main.tsx"></script>
|
| 12 |
+
</body>
|
| 13 |
+
</html>
|
package-lock.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
package.json
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"name": "vite-react-typescript-starter",
|
| 3 |
+
"private": true,
|
| 4 |
+
"version": "0.0.0",
|
| 5 |
+
"type": "module",
|
| 6 |
+
"scripts": {
|
| 7 |
+
"dev": "vite",
|
| 8 |
+
"build": "vite build",
|
| 9 |
+
"lint": "eslint .",
|
| 10 |
+
"preview": "vite preview"
|
| 11 |
+
},
|
| 12 |
+
"dependencies": {
|
| 13 |
+
"@react-three/drei": "^10.7.7",
|
| 14 |
+
"@react-three/fiber": "^9.5.0",
|
| 15 |
+
"@supabase/supabase-js": "^2.57.4",
|
| 16 |
+
"@types/canvas-confetti": "^1.9.0",
|
| 17 |
+
"canvas-confetti": "^1.9.4",
|
| 18 |
+
"framer-motion": "^12.38.0",
|
| 19 |
+
"lucide-react": "^0.344.0",
|
| 20 |
+
"react": "^18.3.1",
|
| 21 |
+
"react-dom": "^18.3.1",
|
| 22 |
+
"react-router-dom": "^7.13.2",
|
| 23 |
+
"three": "^0.183.2",
|
| 24 |
+
"zustand": "^5.0.12"
|
| 25 |
+
},
|
| 26 |
+
"devDependencies": {
|
| 27 |
+
"@eslint/js": "^9.9.1",
|
| 28 |
+
"@types/react": "^18.3.5",
|
| 29 |
+
"@types/react-dom": "^18.3.0",
|
| 30 |
+
"@vitejs/plugin-react": "^4.3.1",
|
| 31 |
+
"autoprefixer": "^10.4.18",
|
| 32 |
+
"eslint": "^9.9.1",
|
| 33 |
+
"eslint-plugin-react-hooks": "^5.1.0-rc.0",
|
| 34 |
+
"eslint-plugin-react-refresh": "^0.4.11",
|
| 35 |
+
"globals": "^15.9.0",
|
| 36 |
+
"postcss": "^8.4.35",
|
| 37 |
+
"tailwindcss": "^3.4.1",
|
| 38 |
+
"typescript": "^5.5.3",
|
| 39 |
+
"typescript-eslint": "^8.3.0",
|
| 40 |
+
"vite": "^5.4.2"
|
| 41 |
+
}
|
| 42 |
+
}
|
postcss.config.js
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
export default {
|
| 2 |
+
plugins: {
|
| 3 |
+
tailwindcss: {},
|
| 4 |
+
autoprefixer: {},
|
| 5 |
+
},
|
| 6 |
+
};
|
scripts/deploy_hf.ps1
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Hugging Face Deployment Script for NL Main
|
| 2 |
+
|
| 3 |
+
$HF_TOKEN = $env:HF_TOKEN # Set this in your local environment
|
| 4 |
+
if (-not $HF_TOKEN) { $HF_TOKEN = "YOUR_HF_TOKEN_HERE" }
|
| 5 |
+
$HF_USERNAME = "minowau" # Guessed from GitHub remote
|
| 6 |
+
$SPACE_NAME = "NavigatedLearning"
|
| 7 |
+
|
| 8 |
+
Write-Host "--- 1. Building Frontend ---" -ForegroundColor Cyan
|
| 9 |
+
npm run build
|
| 10 |
+
|
| 11 |
+
Write-Host "--- 2. Setting up Hugging Face Remote ---" -ForegroundColor Cyan
|
| 12 |
+
$HF_REMOTE_URL = "https://user:$($HF_TOKEN)@huggingface.co/spaces/$($HF_USERNAME)/$($SPACE_NAME)"
|
| 13 |
+
|
| 14 |
+
# Check if HF remote already exists
|
| 15 |
+
$existingRemote = git remote | Select-String "^hf$"
|
| 16 |
+
if ($existingRemote) {
|
| 17 |
+
git remote set-url hf $HF_REMOTE_URL
|
| 18 |
+
} else {
|
| 19 |
+
git remote add hf $HF_REMOTE_URL
|
| 20 |
+
}
|
| 21 |
+
|
| 22 |
+
Write-Host "--- 3. Pushing to Hugging Face ---" -ForegroundColor Cyan
|
| 23 |
+
# Ensure we are on a branch
|
| 24 |
+
$currentBranch = git rev-parse --abbrev-ref HEAD
|
| 25 |
+
git push hf "$($currentBranch):main" --force
|
| 26 |
+
|
| 27 |
+
Write-Host "--- Deployment Request Sent! ---" -ForegroundColor Green
|
| 28 |
+
Write-Host "Check your Space at: https://huggingface.co/spaces/$($HF_USERNAME)/$($SPACE_NAME)" -ForegroundColor Yellow
|
scripts/deploy_minro.ps1
ADDED
|
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# deploy_minro.ps1
|
| 2 |
+
# PowerShell script to bundle and deploy the application to MINRO server
|
| 3 |
+
|
| 4 |
+
$SERVER_IP = "113.30.156.94"
|
| 5 |
+
$SERVER_PORT = "33010"
|
| 6 |
+
$SERVER_USER = "ubuntu"
|
| 7 |
+
$REMOTE_PATH = "/home/ubuntu/nl_main"
|
| 8 |
+
$ZIP_FILE = "nl_main_deployment.zip"
|
| 9 |
+
|
| 10 |
+
Write-Host "--- 1. Cleaning up previous bundles ---" -ForegroundColor Cyan
|
| 11 |
+
if (Test-Path $ZIP_FILE) { Remove-Item $ZIP_FILE }
|
| 12 |
+
|
| 13 |
+
Write-Host "--- 2. Building Frontend ---" -ForegroundColor Cyan
|
| 14 |
+
npm run build
|
| 15 |
+
|
| 16 |
+
Write-Host "--- 3. Bundling Source Code (Excluding large/sensitive files) ---" -ForegroundColor Cyan
|
| 17 |
+
# Create a temporary list for inclusion
|
| 18 |
+
$include = @(
|
| 19 |
+
"backend/*",
|
| 20 |
+
"dist/*",
|
| 21 |
+
"Dockerfile",
|
| 22 |
+
"docker-compose.yml",
|
| 23 |
+
"package.json",
|
| 24 |
+
"render.yaml",
|
| 25 |
+
"scripts/*"
|
| 26 |
+
)
|
| 27 |
+
|
| 28 |
+
# Use Compress-Archive to create the bundle
|
| 29 |
+
Compress-Archive -Path $include -DestinationPath $ZIP_FILE -Force
|
| 30 |
+
|
| 31 |
+
Write-Host "--- 4. Transferring to Server ($SERVER_IP) ---" -ForegroundColor Cyan
|
| 32 |
+
scp -P $SERVER_PORT $ZIP_FILE "$($SERVER_USER)@$($SERVER_IP):$REMOTE_PATH/"
|
| 33 |
+
|
| 34 |
+
Write-Host "--- 5. Remote Execution: Unzip & Restart ---" -ForegroundColor Cyan
|
| 35 |
+
$remoteCmd = @"
|
| 36 |
+
cd $REMOTE_PATH
|
| 37 |
+
unzip -o $ZIP_FILE
|
| 38 |
+
docker-compose down
|
| 39 |
+
docker-compose up --build -d
|
| 40 |
+
"@
|
| 41 |
+
|
| 42 |
+
ssh -p $SERVER_PORT "$($SERVER_USER)@$($SERVER_IP)" $remoteCmd
|
| 43 |
+
|
| 44 |
+
Write-Host "--- Deployment Complete! ---" -ForegroundColor Green
|
| 45 |
+
Write-Host "App should be running on: http://113.30.156.101:5000" -ForegroundColor Yellow
|
scripts/deploy_remote.ps1
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
$SERVER_IP = "172.16.193.4"
|
| 3 |
+
$SERVER_PORT = "22"
|
| 4 |
+
$SERVER_USER = "suhan"
|
| 5 |
+
$SERVER_PASS = "Test@123"
|
| 6 |
+
$REMOTE_PATH = "/home/suhan/nl_main"
|
| 7 |
+
$ZIP_FILE = "nl_main_deployment.tar.gz"
|
| 8 |
+
$PASS = $SERVER_PASS
|
| 9 |
+
|
| 10 |
+
Write-Host "--- 1. Cleaning up previous bundles ---" -ForegroundColor Cyan
|
| 11 |
+
if (Test-Path $ZIP_FILE) { Remove-Item $ZIP_FILE }
|
| 12 |
+
|
| 13 |
+
Write-Host "--- 2. Building Frontend ---" -ForegroundColor Cyan
|
| 14 |
+
npm run build
|
| 15 |
+
|
| 16 |
+
Write-Host "--- 3. Bundling Source Code with Tar ---" -ForegroundColor Cyan
|
| 17 |
+
tar -czf $ZIP_FILE backend dist Dockerfile docker-compose.yml package.json scripts
|
| 18 |
+
|
| 19 |
+
Write-Host "--- 4. Transferring to Server ($SERVER_IP) ---" -ForegroundColor Cyan
|
| 20 |
+
scp -P $SERVER_PORT $ZIP_FILE "$($SERVER_USER)@$($SERVER_IP):$REMOTE_PATH/"
|
| 21 |
+
|
| 22 |
+
Write-Host "--- 5. Remote Execution: Untar & Restart ---" -ForegroundColor Cyan
|
| 23 |
+
$remoteCmd = @"
|
| 24 |
+
echo $PASS | sudo -S mkdir -p $REMOTE_PATH
|
| 25 |
+
cd $REMOTE_PATH
|
| 26 |
+
echo $PASS | sudo -S rm -rf *
|
| 27 |
+
echo $PASS | sudo -S tar -xzf $ZIP_FILE
|
| 28 |
+
echo $PASS | sudo -S docker-compose down
|
| 29 |
+
echo $PASS | sudo -S docker-compose up --build -d
|
| 30 |
+
"@
|
| 31 |
+
|
| 32 |
+
ssh -p $SERVER_PORT "$($SERVER_USER)@$($SERVER_IP)" $remoteCmd
|
| 33 |
+
|
| 34 |
+
Write-Host "--- Deployment Complete! ---" -ForegroundColor Green
|
src/App.tsx
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import { BrowserRouter, Routes, Route, Navigate } from 'react-router-dom';
|
| 2 |
+
import { LoginPage } from './pages/auth/LoginPage';
|
| 3 |
+
import { SignupPage } from './pages/auth/SignupPage';
|
| 4 |
+
import { ForgotPasswordPage } from './pages/auth/ForgotPasswordPage';
|
| 5 |
+
import { ResourcesDashboard } from './pages/dashboard/ResourcesDashboard';
|
| 6 |
+
import NavigatorDashboard from './pages/dashboard/NavigatorDashboard';
|
| 7 |
+
import GridMatrixPage from './pages/dashboard/GridMatrixPage';
|
| 8 |
+
import KnowledgeHubPage from './pages/dashboard/KnowledgeHubPage';
|
| 9 |
+
import { AppProvider } from './context/AppContext';
|
| 10 |
+
import { CustomCursor } from './components/ui/CustomCursor';
|
| 11 |
+
|
| 12 |
+
function App() {
|
| 13 |
+
return (
|
| 14 |
+
<BrowserRouter>
|
| 15 |
+
<AppProvider>
|
| 16 |
+
<CustomCursor />
|
| 17 |
+
<Routes>
|
| 18 |
+
<Route path="/login" element={<LoginPage />} />
|
| 19 |
+
<Route path="/signup" element={<SignupPage />} />
|
| 20 |
+
<Route path="/forgot-password" element={<ForgotPasswordPage />} />
|
| 21 |
+
<Route path="/dashboard" element={<ResourcesDashboard />} />
|
| 22 |
+
<Route path="/navigator" element={<NavigatorDashboard />} />
|
| 23 |
+
<Route path="/navigator/course" element={<GridMatrixPage />} />
|
| 24 |
+
|
| 25 |
+
{/* Redirects */}
|
| 26 |
+
<Route path="/" element={<Navigate to="/login" replace />} />
|
| 27 |
+
<Route path="/resources" element={<KnowledgeHubPage />} />
|
| 28 |
+
<Route path="*" element={<Navigate to="/dashboard" replace />} />
|
| 29 |
+
</Routes>
|
| 30 |
+
</AppProvider>
|
| 31 |
+
</BrowserRouter>
|
| 32 |
+
);
|
| 33 |
+
}
|
| 34 |
+
|
| 35 |
+
export default App;
|
src/components/ControlPanel.tsx
ADDED
|
@@ -0,0 +1,788 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import React, { useState } from 'react';
|
| 2 |
+
import { motion } from 'framer-motion';
|
| 3 |
+
import { LearningSummary, Polyline, Resource } from '../types';
|
| 4 |
+
import { X, BookOpen, Activity, Map, PlayCircle, HelpCircle, Sparkles, CheckCircle, TrendingUp, Search, Award, Bookmark } from 'lucide-react';
|
| 5 |
+
|
| 6 |
+
interface ControlPanelProps {
|
| 7 |
+
onSummarizeLearning: (title: string, summary: string) => void;
|
| 8 |
+
onShowPolyline: (polylineId: string) => void;
|
| 9 |
+
onToggleSimulation: () => void;
|
| 10 |
+
onPlayPath: () => void;
|
| 11 |
+
learningData: LearningSummary;
|
| 12 |
+
polylines: Polyline[];
|
| 13 |
+
isSimulationRunning: boolean;
|
| 14 |
+
isLoading: boolean;
|
| 15 |
+
learningPath: string[];
|
| 16 |
+
bookmarks: string[];
|
| 17 |
+
toggleBookmark: (id: string) => void;
|
| 18 |
+
resources: Resource[];
|
| 19 |
+
onResourceClick: (resource: Resource) => void;
|
| 20 |
+
onStartTutorial: () => void;
|
| 21 |
+
agent: any;
|
| 22 |
+
}
|
| 23 |
+
|
| 24 |
+
export const ControlPanel: React.FC<ControlPanelProps> = ({
|
| 25 |
+
onSummarizeLearning,
|
| 26 |
+
onShowPolyline,
|
| 27 |
+
onToggleSimulation,
|
| 28 |
+
onPlayPath,
|
| 29 |
+
learningData,
|
| 30 |
+
polylines,
|
| 31 |
+
isSimulationRunning,
|
| 32 |
+
isLoading,
|
| 33 |
+
learningPath,
|
| 34 |
+
bookmarks,
|
| 35 |
+
toggleBookmark,
|
| 36 |
+
resources,
|
| 37 |
+
onResourceClick,
|
| 38 |
+
onStartTutorial,
|
| 39 |
+
agent
|
| 40 |
+
}) => {
|
| 41 |
+
const [showSummaryModal, setShowSummaryModal] = useState(false);
|
| 42 |
+
const [showPolylineModal, setShowPolylineModal] = useState(false);
|
| 43 |
+
const [showPolylineListModal, setShowPolylineListModal] = useState(false);
|
| 44 |
+
const [title, setTitle] = useState('');
|
| 45 |
+
const [summary, setSummary] = useState('');
|
| 46 |
+
const [selectedPolyline, setSelectedPolyline] = useState<Polyline | null>(null);
|
| 47 |
+
|
| 48 |
+
const handleSummarySubmit = () => {
|
| 49 |
+
if (title.trim() && summary.trim()) {
|
| 50 |
+
onSummarizeLearning(title, summary);
|
| 51 |
+
setTitle('');
|
| 52 |
+
setSummary('');
|
| 53 |
+
setShowSummaryModal(false);
|
| 54 |
+
}
|
| 55 |
+
};
|
| 56 |
+
|
| 57 |
+
const handleShowPolyline = () => {
|
| 58 |
+
const activePolyline = polylines.find(p => p.isActive);
|
| 59 |
+
if (activePolyline) {
|
| 60 |
+
setSelectedPolyline(activePolyline);
|
| 61 |
+
setShowPolylineModal(true);
|
| 62 |
+
}
|
| 63 |
+
};
|
| 64 |
+
|
| 65 |
+
// Generate chart data for polyline visualization
|
| 66 |
+
const generateChartData = (polyline: Polyline) => {
|
| 67 |
+
if (polyline.module_scores && polyline.module_scores.length > 0) {
|
| 68 |
+
return polyline.module_scores.map((score, index) => ({
|
| 69 |
+
x: index + 1,
|
| 70 |
+
y: score
|
| 71 |
+
}));
|
| 72 |
+
}
|
| 73 |
+
|
| 74 |
+
const data = [];
|
| 75 |
+
for (let i = 1; i <= 18; i++) {
|
| 76 |
+
data.push({
|
| 77 |
+
x: i,
|
| 78 |
+
y: 0.5 + (Math.random() - 0.5) * 0.1 + Math.sin(i * 0.5) * 0.05
|
| 79 |
+
});
|
| 80 |
+
}
|
| 81 |
+
return data;
|
| 82 |
+
};
|
| 83 |
+
|
| 84 |
+
const topicLegendItems = [
|
| 85 |
+
"Pre training objectives", "Pre trained models", "Tutorial: Introduction to huggingface",
|
| 86 |
+
"Fine tuning LLM", "Instruction tuning", "Prompt based learning",
|
| 87 |
+
"Parameter efficient fine tuning", "Incontext Learning", "Prompting methods",
|
| 88 |
+
"Retrieval Methods", "Retrieval Augmented Generation", "Quantization",
|
| 89 |
+
"Mixture of Experts Model", "Agentic AI", "Multimodal LLMs",
|
| 90 |
+
"Vision Language Models", "Policy learning using DQN", "RLHF"
|
| 91 |
+
];
|
| 92 |
+
|
| 93 |
+
const activePolyline = polylines.find(p => p.isActive);
|
| 94 |
+
|
| 95 |
+
const generateHighLineData = () => {
|
| 96 |
+
return topicLegendItems.map((topic, i) => {
|
| 97 |
+
const res = resources.find(r => r.module === topic);
|
| 98 |
+
return { x: i + 1, y: res?.high_line || 0.8 };
|
| 99 |
+
});
|
| 100 |
+
};
|
| 101 |
+
const highLineChartData = generateHighLineData();
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
return (
|
| 105 |
+
<div className="bg-white h-full flex flex-col">
|
| 106 |
+
{/* Header */}
|
| 107 |
+
<div className="px-6 py-5 border-b border-gray-100 bg-white sticky top-0 z-10">
|
| 108 |
+
<div className="flex items-center justify-between mb-1">
|
| 109 |
+
<h2 className="text-lg font-black text-slate-900 tracking-tight uppercase">Control <span className="text-brand">Center</span></h2>
|
| 110 |
+
<button
|
| 111 |
+
onClick={onStartTutorial}
|
| 112 |
+
className="text-slate-300 hover:text-brand transition-all hover:scale-110 active:scale-95"
|
| 113 |
+
>
|
| 114 |
+
<HelpCircle className="w-5 h-5" />
|
| 115 |
+
</button>
|
| 116 |
+
</div>
|
| 117 |
+
<p className="text-[10px] text-slate-400 font-bold uppercase tracking-widest">Manage your neural sync and analysis</p>
|
| 118 |
+
</div>
|
| 119 |
+
|
| 120 |
+
{/* Profile Section */}
|
| 121 |
+
<div className="px-6 py-6 border-b border-gray-100 bg-slate-50/30 flex items-center gap-6">
|
| 122 |
+
<div className="relative w-16 h-16 shrink-0">
|
| 123 |
+
<svg className="w-full h-full transform -rotate-90 overflow-visible">
|
| 124 |
+
<circle cx="32" cy="32" r="28" fill="none" stroke="#E2E8F0" strokeWidth="4" />
|
| 125 |
+
<motion.circle
|
| 126 |
+
cx="32" cy="32" r="28" fill="none" stroke="#6366F1" strokeWidth="4"
|
| 127 |
+
strokeDasharray={2 * Math.PI * 28}
|
| 128 |
+
initial={{ strokeDashoffset: 2 * Math.PI * 28 }}
|
| 129 |
+
animate={{ strokeDashoffset: (2 * Math.PI * 28) * (1 - (agent.exp || 0) / 100) }}
|
| 130 |
+
transition={{ duration: 1.5 }}
|
| 131 |
+
strokeLinecap="round"
|
| 132 |
+
/>
|
| 133 |
+
</svg>
|
| 134 |
+
<div className="absolute inset-2 rounded-full overflow-hidden bg-white border-2 border-white shadow-sm">
|
| 135 |
+
<img
|
| 136 |
+
src="https://api.dicebear.com/7.x/avataaars/svg?seed=Felix&backgroundColor=f1f5f9"
|
| 137 |
+
alt="Agent Avatar"
|
| 138 |
+
className="w-full h-full object-cover"
|
| 139 |
+
/>
|
| 140 |
+
</div>
|
| 141 |
+
</div>
|
| 142 |
+
<div className="flex flex-col">
|
| 143 |
+
<h4 className="text-[10px] font-black text-brand tracking-[0.2em] mb-1">STUDENT</h4>
|
| 144 |
+
<div className="flex items-center gap-3">
|
| 145 |
+
<motion.div
|
| 146 |
+
animate={{ scale: [1, 1.05, 1], filter: ["drop-shadow(0 0 0px rgba(99,102,241,0))", "drop-shadow(0 0 8px rgba(99,102,241,0.5))", "drop-shadow(0 0 0px rgba(99,102,241,0))"] }}
|
| 147 |
+
transition={{ duration: 3, repeat: Infinity }}
|
| 148 |
+
className="text-lg bg-brand text-white px-3 py-1 rounded-xl font-black shadow-lg shadow-brand/20 border border-brand/20"
|
| 149 |
+
>
|
| 150 |
+
LVL {agent.level}
|
| 151 |
+
</motion.div>
|
| 152 |
+
<div className="flex flex-col">
|
| 153 |
+
<div className="flex items-center gap-2">
|
| 154 |
+
<span className="text-[14px] text-slate-900 font-black tracking-tight">{agent.totalReward} pts</span>
|
| 155 |
+
{(learningData.xp_earned ?? 0) > 0 && (
|
| 156 |
+
<span className="text-[10px] font-black text-emerald-600 bg-emerald-50 px-1.5 py-0.5 rounded-md border border-emerald-100">
|
| 157 |
+
+{learningData.xp_earned}
|
| 158 |
+
</span>
|
| 159 |
+
)}
|
| 160 |
+
</div>
|
| 161 |
+
<span className="text-[8px] text-slate-400 font-bold uppercase tracking-widest leading-none mt-1">STUDY POINTS</span>
|
| 162 |
+
</div>
|
| 163 |
+
</div>
|
| 164 |
+
</div>
|
| 165 |
+
</div>
|
| 166 |
+
{/* Scrollable Content Area */}
|
| 167 |
+
<div className="flex-1 overflow-y-auto overflow-x-hidden scrollbar-thin scrollbar-thumb-gray-200 scrollbar-track-transparent">
|
| 168 |
+
{/* Control Actions */}
|
| 169 |
+
<div className="p-6 space-y-3 bg-gray-50/50 border-b border-gray-100">
|
| 170 |
+
<label className="text-xs font-semibold text-gray-500 uppercase tracking-wider mb-2 block">Actions</label>
|
| 171 |
+
<button
|
| 172 |
+
onClick={() => setShowSummaryModal(true)}
|
| 173 |
+
disabled={isLoading}
|
| 174 |
+
className="w-full flex items-center justify-center gap-2 py-2.5 px-4 bg-blue-600 hover:bg-blue-700 disabled:bg-gray-400 text-white text-sm rounded-xl font-medium transition-all shadow-sm hover:shadow-md"
|
| 175 |
+
>
|
| 176 |
+
<BookOpen className="w-4 h-4" />
|
| 177 |
+
Summarize Learning
|
| 178 |
+
</button>
|
| 179 |
+
|
| 180 |
+
<div className="grid grid-cols-2 gap-3">
|
| 181 |
+
<button
|
| 182 |
+
onClick={handleShowPolyline}
|
| 183 |
+
className="flex items-center justify-center gap-2 py-2.5 px-4 bg-white border border-gray-200 hover:bg-gray-50 text-gray-700 text-sm rounded-xl font-medium transition-all"
|
| 184 |
+
>
|
| 185 |
+
<Activity className="w-4 h-4 text-blue-500" />
|
| 186 |
+
Current Polyline
|
| 187 |
+
</button>
|
| 188 |
+
|
| 189 |
+
<button
|
| 190 |
+
onClick={() => setShowPolylineListModal(true)}
|
| 191 |
+
className="flex items-center justify-center gap-2 py-2.5 px-4 bg-white border border-gray-200 hover:bg-gray-50 text-gray-700 text-sm rounded-xl font-medium transition-all"
|
| 192 |
+
>
|
| 193 |
+
<Map className="w-4 h-4 text-purple-500" />
|
| 194 |
+
History
|
| 195 |
+
</button>
|
| 196 |
+
|
| 197 |
+
<button
|
| 198 |
+
onClick={onPlayPath}
|
| 199 |
+
disabled={learningPath.length < 2}
|
| 200 |
+
className="col-span-2 flex items-center justify-center gap-2 py-2.5 px-4 bg-white border border-gray-200 hover:bg-gray-50 disabled:bg-gray-50 disabled:text-gray-400 text-gray-700 text-sm rounded-xl font-medium transition-all"
|
| 201 |
+
>
|
| 202 |
+
<PlayCircle className="w-4 h-4 text-green-500" />
|
| 203 |
+
Play Path Animation
|
| 204 |
+
</button>
|
| 205 |
+
</div>
|
| 206 |
+
|
| 207 |
+
<button
|
| 208 |
+
onClick={onToggleSimulation}
|
| 209 |
+
className={`w-full flex items-center justify-center gap-2 py-2.5 px-4 border text-sm rounded-xl font-medium transition-all ${isSimulationRunning
|
| 210 |
+
? 'bg-red-50 border-red-200 text-red-600 hover:bg-red-100'
|
| 211 |
+
: 'bg-indigo-50 border-indigo-200 text-indigo-600 hover:bg-indigo-100'
|
| 212 |
+
}`}
|
| 213 |
+
>
|
| 214 |
+
<PlayCircle className="w-4 h-4" />
|
| 215 |
+
{isSimulationRunning ? 'Stop DQN Simulation' : 'Start DQN Simulation'}
|
| 216 |
+
</button>
|
| 217 |
+
</div>
|
| 218 |
+
|
| 219 |
+
{/* Learning Stats / Insights */}
|
| 220 |
+
{(learningData.strengths.length > 0 || learningData.recommendations.length > 0 || learningData.ai_analysis) && (
|
| 221 |
+
<div className="px-6 py-4 bg-white border-b border-gray-100">
|
| 222 |
+
<h3 className="text-xs font-bold text-gray-500 uppercase tracking-wider mb-3">Insights</h3>
|
| 223 |
+
|
| 224 |
+
{learningData.ai_analysis && (
|
| 225 |
+
<div className="mb-4 p-3 bg-indigo-50/50 border border-indigo-100 rounded-xl">
|
| 226 |
+
<div className="flex items-center gap-2 mb-1.5 text-indigo-600">
|
| 227 |
+
<Sparkles className="w-3.5 h-3.5" />
|
| 228 |
+
<span className="text-[10px] font-bold uppercase tracking-wider">AI feedback</span>
|
| 229 |
+
</div>
|
| 230 |
+
<p className="text-xs text-indigo-900 leading-relaxed font-medium italic">
|
| 231 |
+
"{learningData.ai_analysis}"
|
| 232 |
+
</p>
|
| 233 |
+
</div>
|
| 234 |
+
)}
|
| 235 |
+
|
| 236 |
+
<div className="flex flex-wrap gap-4 mb-3">
|
| 237 |
+
{learningData.strengths.length > 0 && (
|
| 238 |
+
<div className="flex-1 min-w-[120px]">
|
| 239 |
+
<span className="text-[10px] font-bold text-slate-400 uppercase tracking-widest mb-1 block">Strengths</span>
|
| 240 |
+
<div className="flex flex-wrap gap-1">
|
| 241 |
+
{learningData.strengths.slice(0, 2).map((strength, i) => (
|
| 242 |
+
<span key={i} className="px-1.5 py-0.5 bg-green-50 text-green-700 text-[9px] font-bold rounded border border-green-100">
|
| 243 |
+
{strength}
|
| 244 |
+
</span>
|
| 245 |
+
))}
|
| 246 |
+
</div>
|
| 247 |
+
</div>
|
| 248 |
+
)}
|
| 249 |
+
</div>
|
| 250 |
+
</div>
|
| 251 |
+
)}
|
| 252 |
+
|
| 253 |
+
{/* Bookmarks Section */}
|
| 254 |
+
<div className="px-6 py-4 bg-white border-b border-gray-100">
|
| 255 |
+
<div className="flex items-center justify-between mb-4">
|
| 256 |
+
<h3 className="text-sm font-bold text-gray-900 flex items-center gap-2">
|
| 257 |
+
<Bookmark className="w-4 h-4 text-blue-600 fill-blue-600" />
|
| 258 |
+
Bookmarks
|
| 259 |
+
</h3>
|
| 260 |
+
<span className="text-xs bg-blue-50 text-blue-600 px-2 py-0.5 rounded-full font-bold">{bookmarks.length}</span>
|
| 261 |
+
</div>
|
| 262 |
+
|
| 263 |
+
<div className="space-y-2 max-h-48 overflow-y-auto pr-1">
|
| 264 |
+
{bookmarks.length > 0 ? (
|
| 265 |
+
bookmarks.map(id => {
|
| 266 |
+
const resource = resources.find(r => r.id === id);
|
| 267 |
+
if (!resource) return null;
|
| 268 |
+
return (
|
| 269 |
+
<div
|
| 270 |
+
key={id}
|
| 271 |
+
className="flex items-center justify-between p-2.5 rounded-xl border border-gray-100 hover:border-blue-200 hover:bg-blue-50/30 transition-all group shadow-sm bg-white"
|
| 272 |
+
>
|
| 273 |
+
<div
|
| 274 |
+
className="flex-1 min-w-0 cursor-pointer"
|
| 275 |
+
onClick={() => onResourceClick(resource)}
|
| 276 |
+
>
|
| 277 |
+
<h4 className="text-xs font-bold text-gray-800 truncate group-hover:text-blue-600 transition-colors">
|
| 278 |
+
{resource.title}
|
| 279 |
+
</h4>
|
| 280 |
+
<p className="text-[9px] text-gray-400 uppercase tracking-widest font-black">
|
| 281 |
+
{resource.module || 'Resource'}
|
| 282 |
+
</p>
|
| 283 |
+
</div>
|
| 284 |
+
<button
|
| 285 |
+
onClick={() => toggleBookmark(id)}
|
| 286 |
+
className="text-blue-600 hover:text-blue-700 p-1.5 rounded-lg hover:bg-white transition-all ml-2"
|
| 287 |
+
title="Remove Bookmark"
|
| 288 |
+
>
|
| 289 |
+
<Bookmark className="w-3.5 h-3.5 fill-current" />
|
| 290 |
+
</button>
|
| 291 |
+
</div>
|
| 292 |
+
);
|
| 293 |
+
})
|
| 294 |
+
) : (
|
| 295 |
+
<p className="text-[10px] text-gray-400 text-center py-4 italic font-medium">No bookmarks saved yet.</p>
|
| 296 |
+
)}
|
| 297 |
+
</div>
|
| 298 |
+
</div>
|
| 299 |
+
|
| 300 |
+
{/* Learning Timeline */}
|
| 301 |
+
<div className="p-6 bg-white">
|
| 302 |
+
<div className="flex items-center justify-between mb-4">
|
| 303 |
+
<h3 className="text-sm font-bold text-gray-900">Activity Log</h3>
|
| 304 |
+
<span className="text-xs bg-gray-100 text-gray-600 px-2 py-0.5 rounded-full">{learningPath.length} items</span>
|
| 305 |
+
</div>
|
| 306 |
+
|
| 307 |
+
<div className="space-y-0">
|
| 308 |
+
<div className="relative pl-4 border-l-2 border-gray-100 space-y-6 py-2">
|
| 309 |
+
{learningData.activityLog && learningData.activityLog.length > 0 ? (
|
| 310 |
+
learningData.activityLog.slice(0, 15).map((log) => (
|
| 311 |
+
<div key={log.id} className="relative group">
|
| 312 |
+
<div className={`absolute -left-[21px] top-1.5 w-4 h-4 bg-white border-2 rounded-full group-hover:scale-110 transition-transform duration-200 flex items-center justify-center p-0.5
|
| 313 |
+
${log.type === 'visit' ? 'border-green-500 text-green-500' :
|
| 314 |
+
log.type === 'summary' ? 'border-purple-500 text-purple-500' :
|
| 315 |
+
log.type === 'optimal' ? 'border-amber-500 text-amber-500' :
|
| 316 |
+
log.type === 'search' ? 'border-blue-500 text-blue-500' : 'border-blue-400 text-blue-400'}`}>
|
| 317 |
+
{log.type === 'visit' && <CheckCircle className="w-full h-full" />}
|
| 318 |
+
{log.type === 'summary' && <Sparkles className="w-full h-full" />}
|
| 319 |
+
{log.type === 'optimal' && <TrendingUp className="w-full h-full" />}
|
| 320 |
+
{log.type === 'search' && <Search className="w-full h-full" />}
|
| 321 |
+
{log.type === 'start' && <PlayCircle className="w-full h-full" />}
|
| 322 |
+
</div>
|
| 323 |
+
<div className="flex flex-col">
|
| 324 |
+
<div className="flex items-center gap-2 mb-0.5">
|
| 325 |
+
<span className="text-[10px] text-gray-400 font-bold uppercase tracking-wider">{log.timestamp}</span>
|
| 326 |
+
{log.type === 'optimal' && <span className="text-[10px] bg-amber-100 text-amber-700 px-1.5 py-0.5 rounded font-bold uppercase tracking-tighter">AI Optimized</span>}
|
| 327 |
+
</div>
|
| 328 |
+
<span className="text-sm font-semibold text-gray-800 leading-tight group-hover:text-blue-600 transition-colors">
|
| 329 |
+
{log.title}
|
| 330 |
+
</span>
|
| 331 |
+
</div>
|
| 332 |
+
</div>
|
| 333 |
+
))
|
| 334 |
+
) : (
|
| 335 |
+
<div className="relative">
|
| 336 |
+
<div className="absolute -left-[21px] top-1.5 w-3 h-3 bg-gray-200 rounded-full"></div>
|
| 337 |
+
<div className="flex flex-col">
|
| 338 |
+
<span className="text-xs text-gray-400">Environment Ready</span>
|
| 339 |
+
<span className="text-sm text-gray-500">Initialized learning grid</span>
|
| 340 |
+
</div>
|
| 341 |
+
</div>
|
| 342 |
+
)}
|
| 343 |
+
</div>
|
| 344 |
+
</div>
|
| 345 |
+
</div>
|
| 346 |
+
</div>
|
| 347 |
+
|
| 348 |
+
{/* Summary Modal */}
|
| 349 |
+
{showSummaryModal && (
|
| 350 |
+
<div className="fixed inset-0 bg-gray-900/40 backdrop-blur-sm flex items-center justify-center z-50 p-4 animate-in fade-in duration-200">
|
| 351 |
+
<div className="bg-white rounded-2xl shadow-2xl max-w-lg w-full overflow-hidden scale-100 animate-in zoom-in-95 duration-200">
|
| 352 |
+
<div className="p-6 border-b border-gray-100 flex justify-between items-center">
|
| 353 |
+
<h3 className="text-lg font-bold text-gray-900">Summarize Learning</h3>
|
| 354 |
+
<button
|
| 355 |
+
onClick={() => setShowSummaryModal(false)}
|
| 356 |
+
className="p-1 rounded-full hover:bg-gray-100 text-gray-400 hover:text-gray-600 transition-colors"
|
| 357 |
+
>
|
| 358 |
+
<X className="w-5 h-5" />
|
| 359 |
+
</button>
|
| 360 |
+
</div>
|
| 361 |
+
|
| 362 |
+
<div className="p-6 space-y-4">
|
| 363 |
+
<div>
|
| 364 |
+
<label className="block text-sm font-medium text-gray-700 mb-1.5">Title</label>
|
| 365 |
+
<input
|
| 366 |
+
type="text"
|
| 367 |
+
value={title}
|
| 368 |
+
onChange={(e) => setTitle(e.target.value)}
|
| 369 |
+
placeholder="e.g., Introduction to Transformers"
|
| 370 |
+
className="w-full px-4 py-2.5 bg-gray-50 border border-gray-200 rounded-xl focus:ring-2 focus:ring-blue-500/20 focus:border-blue-500 outline-none transition-all placeholder:text-gray-400"
|
| 371 |
+
/>
|
| 372 |
+
</div>
|
| 373 |
+
|
| 374 |
+
<div>
|
| 375 |
+
<label className="block text-sm font-medium text-gray-700 mb-1.5">Key Takeaways</label>
|
| 376 |
+
<textarea
|
| 377 |
+
value={summary}
|
| 378 |
+
onChange={(e) => setSummary(e.target.value)}
|
| 379 |
+
placeholder="Describe what you learned..."
|
| 380 |
+
className="w-full h-32 px-4 py-3 bg-gray-50 border border-gray-200 rounded-xl resize-none focus:ring-2 focus:ring-blue-500/20 focus:border-blue-500 outline-none transition-all placeholder:text-gray-400"
|
| 381 |
+
/>
|
| 382 |
+
</div>
|
| 383 |
+
</div>
|
| 384 |
+
|
| 385 |
+
<div className="p-6 bg-gray-50 border-t border-gray-100 flex justify-end gap-3">
|
| 386 |
+
<button
|
| 387 |
+
onClick={() => setShowSummaryModal(false)}
|
| 388 |
+
className="px-4 py-2 text-sm font-medium text-gray-600 hover:text-gray-800 hover:bg-gray-200/50 rounded-lg transition-colors"
|
| 389 |
+
>
|
| 390 |
+
Cancel
|
| 391 |
+
</button>
|
| 392 |
+
<button
|
| 393 |
+
onClick={handleSummarySubmit}
|
| 394 |
+
disabled={!title.trim() || !summary.trim() || isLoading}
|
| 395 |
+
className="px-6 py-2 bg-blue-600 hover:bg-blue-700 disabled:bg-blue-400 disabled:cursor-not-allowed text-white text-sm font-medium rounded-lg shadow-sm shadow-blue-200 transition-all transform active:scale-95"
|
| 396 |
+
>
|
| 397 |
+
{isLoading ? 'Processing...' : 'Save Summary'}
|
| 398 |
+
</button>
|
| 399 |
+
</div>
|
| 400 |
+
</div>
|
| 401 |
+
</div>
|
| 402 |
+
)}
|
| 403 |
+
|
| 404 |
+
{/* Polyline Chart Modal */}
|
| 405 |
+
{showPolylineModal && selectedPolyline && (
|
| 406 |
+
<div className="fixed inset-0 bg-gray-900/40 backdrop-blur-sm flex items-center justify-center z-50 p-4">
|
| 407 |
+
<div className="bg-white rounded-2xl shadow-2xl max-w-4xl w-full max-h-[90vh] overflow-y-auto">
|
| 408 |
+
<div className="p-6 border-b border-gray-100 flex justify-between items-center sticky top-0 bg-white z-10">
|
| 409 |
+
<div>
|
| 410 |
+
<h3 className="text-xl font-bold text-gray-900">Learning Analysis</h3>
|
| 411 |
+
<div className="flex flex-col sm:flex-row sm:items-center gap-2 sm:gap-6 mt-1">
|
| 412 |
+
<p className="text-sm text-gray-500">Polyline visualization of your learning path</p>
|
| 413 |
+
<div className="flex items-center gap-3 text-xs font-semibold text-gray-600 bg-gray-50 px-2 py-1 rounded-md">
|
| 414 |
+
<span className="flex items-center gap-1.5"><div className="w-3 h-0.5 bg-blue-600"></div> Current Score</span>
|
| 415 |
+
<span className="flex items-center gap-1.5"><div className="w-3 h-0.5 border-t-2 border-dashed border-red-500"></div> Highline Target</span>
|
| 416 |
+
</div>
|
| 417 |
+
</div>
|
| 418 |
+
</div>
|
| 419 |
+
<button
|
| 420 |
+
onClick={() => setShowPolylineModal(false)}
|
| 421 |
+
className="p-1.5 rounded-full hover:bg-gray-100 text-gray-400 hover:text-gray-600 transition-colors"
|
| 422 |
+
>
|
| 423 |
+
<X className="w-5 h-5" />
|
| 424 |
+
</button>
|
| 425 |
+
</div>
|
| 426 |
+
|
| 427 |
+
<div className="p-6">
|
| 428 |
+
{/* Keywords Detected */}
|
| 429 |
+
{selectedPolyline.keywords_found && selectedPolyline.keywords_found.length > 0 && (
|
| 430 |
+
<div className="mb-6">
|
| 431 |
+
<h4 className="text-xs font-semibold text-gray-500 uppercase tracking-wider mb-3">Detected Keywords</h4>
|
| 432 |
+
<div className="flex flex-wrap gap-2">
|
| 433 |
+
{selectedPolyline.keywords_found.map((keyword: string, idx: number) => (
|
| 434 |
+
<span key={idx} className="px-3 py-1 bg-blue-50 text-blue-700 text-sm font-medium rounded-full border border-blue-100">
|
| 435 |
+
{keyword}
|
| 436 |
+
</span>
|
| 437 |
+
))}
|
| 438 |
+
</div>
|
| 439 |
+
</div>
|
| 440 |
+
)}
|
| 441 |
+
|
| 442 |
+
{/* AI Analysis Section */}
|
| 443 |
+
{(selectedPolyline.ai_analysis || (selectedPolyline.dominant_topics && selectedPolyline.dominant_topics.length > 0)) && (
|
| 444 |
+
<div className="mb-6 bg-gradient-to-br from-indigo-50 to-purple-50 border border-indigo-100 rounded-xl p-5">
|
| 445 |
+
<div className="flex items-start gap-4">
|
| 446 |
+
<div className="p-2.5 bg-white rounded-lg shadow-sm text-indigo-600 shrink-0">
|
| 447 |
+
<Sparkles className="w-5 h-5" />
|
| 448 |
+
</div>
|
| 449 |
+
<div>
|
| 450 |
+
<h4 className="text-sm font-bold text-gray-900 mb-1">AI Path Analysis</h4>
|
| 451 |
+
{selectedPolyline.ai_analysis && (
|
| 452 |
+
<p className="text-sm text-gray-700 leading-relaxed max-w-2xl mb-3">
|
| 453 |
+
{selectedPolyline.ai_analysis}
|
| 454 |
+
</p>
|
| 455 |
+
)}
|
| 456 |
+
|
| 457 |
+
{selectedPolyline.dominant_topics && selectedPolyline.dominant_topics.length > 0 && (
|
| 458 |
+
<div className="flex flex-wrap items-center gap-2">
|
| 459 |
+
<span className="text-xs font-bold text-gray-500 uppercase tracking-wider">Key Focus:</span>
|
| 460 |
+
{selectedPolyline.dominant_topics.map((topic: string, i: number) => (
|
| 461 |
+
<span key={i} className="px-2 py-1 bg-white/60 text-indigo-700 text-xs font-semibold rounded border border-indigo-100 shadow-sm">
|
| 462 |
+
{topic}
|
| 463 |
+
</span>
|
| 464 |
+
))}
|
| 465 |
+
</div>
|
| 466 |
+
)}
|
| 467 |
+
</div>
|
| 468 |
+
</div>
|
| 469 |
+
</div>
|
| 470 |
+
)}
|
| 471 |
+
|
| 472 |
+
{/* Chart Container */}
|
| 473 |
+
<div className="bg-gray-50/50 border border-gray-200 rounded-2xl p-6 mb-6">
|
| 474 |
+
<div className="relative h-80 w-full">
|
| 475 |
+
<svg width="100%" height="100%" viewBox="0 0 600 300" className="overflow-visible">
|
| 476 |
+
{/* Grid lines */}
|
| 477 |
+
<defs>
|
| 478 |
+
<pattern id="grid" width="33.33" height="50" patternUnits="userSpaceOnUse">
|
| 479 |
+
<path d="M 33.33 0 L 0 0 0 50" fill="none" stroke="#e5e7eb" strokeWidth="1" strokeDasharray="3,3" />
|
| 480 |
+
</pattern>
|
| 481 |
+
</defs>
|
| 482 |
+
<rect width="100%" height="100%" fill="url(#grid)" opacity="0.6" />
|
| 483 |
+
|
| 484 |
+
{/* Y-axis labels */}
|
| 485 |
+
<g className="text-[10px] fill-gray-400">
|
| 486 |
+
<text x="-10" y="30" textAnchor="end">1.0</text>
|
| 487 |
+
<text x="-10" y="155" textAnchor="end">0.5</text>
|
| 488 |
+
<text x="-10" y="280" textAnchor="end">0.0</text>
|
| 489 |
+
</g>
|
| 490 |
+
|
| 491 |
+
{/* X-axis labels */}
|
| 492 |
+
{Array.from({ length: 18 }, (_, i) => i + 1).map(i => (
|
| 493 |
+
<text key={i} x={33.33 * i - 16} y="315" fontSize="10" fill="#9ca3af" textAnchor="middle">{i}</text>
|
| 494 |
+
))}
|
| 495 |
+
|
| 496 |
+
{/* Chart area */}
|
| 497 |
+
<path
|
| 498 |
+
d={`M 0 300 ${generateChartData(selectedPolyline).map((point, i) =>
|
| 499 |
+
`L ${33.33 * (i + 1)} ${280 - point.y * 250}`
|
| 500 |
+
).join(' ')} L 600 300 Z`}
|
| 501 |
+
fill="url(#gradient)"
|
| 502 |
+
opacity="0.1"
|
| 503 |
+
/>
|
| 504 |
+
<defs>
|
| 505 |
+
<linearGradient id="gradient" x1="0" x2="0" y1="0" y2="1">
|
| 506 |
+
<stop offset="0%" stopColor="#2563eb" />
|
| 507 |
+
<stop offset="100%" stopColor="#2563eb" stopOpacity="0" />
|
| 508 |
+
</linearGradient>
|
| 509 |
+
</defs>
|
| 510 |
+
|
| 511 |
+
<polyline
|
| 512 |
+
fill="none"
|
| 513 |
+
stroke="#2563eb"
|
| 514 |
+
strokeWidth="3"
|
| 515 |
+
strokeLinecap="round"
|
| 516 |
+
strokeLinejoin="round"
|
| 517 |
+
points={generateChartData(selectedPolyline).map((point, i) =>
|
| 518 |
+
`${33.33 * (i + 1)},${280 - point.y * 250}`
|
| 519 |
+
).join(' ')}
|
| 520 |
+
className="drop-shadow-sm"
|
| 521 |
+
/>
|
| 522 |
+
|
| 523 |
+
{/* High Line Overlay */}
|
| 524 |
+
<polyline
|
| 525 |
+
fill="none"
|
| 526 |
+
stroke="#ef4444"
|
| 527 |
+
strokeWidth="2"
|
| 528 |
+
strokeLinecap="round"
|
| 529 |
+
strokeLinejoin="round"
|
| 530 |
+
strokeDasharray="4 4"
|
| 531 |
+
points={highLineChartData.map((point, i) =>
|
| 532 |
+
`${33.33 * (i + 1)},${280 - point.y * 250}`
|
| 533 |
+
).join(' ')}
|
| 534 |
+
className="opacity-70"
|
| 535 |
+
/>
|
| 536 |
+
|
| 537 |
+
{/* Data points */}
|
| 538 |
+
{generateChartData(selectedPolyline).map((point, i) => (
|
| 539 |
+
<circle
|
| 540 |
+
key={i}
|
| 541 |
+
cx={33.33 * (i + 1)}
|
| 542 |
+
cy={280 - point.y * 250}
|
| 543 |
+
r="4"
|
| 544 |
+
className="fill-white stroke-blue-600 stroke-2 hover:r-6 hover:stroke-4 transition-all cursor-pointer"
|
| 545 |
+
/>
|
| 546 |
+
))}
|
| 547 |
+
|
| 548 |
+
{/* High Line Data points */}
|
| 549 |
+
{highLineChartData.map((point, i) => (
|
| 550 |
+
<circle
|
| 551 |
+
key={`hl-${i}`}
|
| 552 |
+
cx={33.33 * (i + 1)}
|
| 553 |
+
cy={280 - point.y * 250}
|
| 554 |
+
r="3"
|
| 555 |
+
className="fill-white stroke-red-500 stroke-2 opacity-80"
|
| 556 |
+
/>
|
| 557 |
+
))}
|
| 558 |
+
</svg>
|
| 559 |
+
|
| 560 |
+
{/* Axis titles */}
|
| 561 |
+
<div className="absolute -left-12 top-1/2 transform -rotate-90 -translate-y-1/2 text-xs font-semibold text-gray-400 tracking-wider">
|
| 562 |
+
ASSIMILATION SCORE
|
| 563 |
+
</div>
|
| 564 |
+
<div className="absolute -bottom-10 left-1/2 transform -translate-x-1/2 text-xs font-semibold text-gray-400 tracking-wider">
|
| 565 |
+
TOPIC INDEX
|
| 566 |
+
</div>
|
| 567 |
+
</div>
|
| 568 |
+
</div>
|
| 569 |
+
|
| 570 |
+
{/* Topic Legend */}
|
| 571 |
+
<div className="grid grid-cols-2 md:grid-cols-3 gap-3">
|
| 572 |
+
{topicLegendItems.map((item, index) => (
|
| 573 |
+
<div key={index} className="flex items-start gap-2 p-2 rounded-lg hover:bg-gray-50 transition-colors">
|
| 574 |
+
<span className="flex-shrink-0 flex items-center justify-center w-5 h-5 bg-gray-100 rounded text-[10px] font-bold text-gray-600">
|
| 575 |
+
{index + 1}
|
| 576 |
+
</span>
|
| 577 |
+
<span className="text-xs text-gray-600 leading-tight" title={item}>{item}</span>
|
| 578 |
+
</div>
|
| 579 |
+
))}
|
| 580 |
+
</div>
|
| 581 |
+
</div>
|
| 582 |
+
</div>
|
| 583 |
+
</div>
|
| 584 |
+
)}
|
| 585 |
+
|
| 586 |
+
{/* Polylines List Modal — Journey History (Revamped Timeline) */}
|
| 587 |
+
{showPolylineListModal && (
|
| 588 |
+
<div className="fixed inset-0 bg-gray-900/50 backdrop-blur-sm flex items-center justify-center z-50 p-4">
|
| 589 |
+
<div className="bg-white rounded-2xl shadow-2xl w-full max-w-2xl max-h-[90vh] flex flex-col overflow-hidden">
|
| 590 |
+
|
| 591 |
+
{/* Header */}
|
| 592 |
+
<div className="px-6 py-5 border-b border-gray-100 flex justify-between items-center bg-white">
|
| 593 |
+
<div>
|
| 594 |
+
<h3 className="text-xl font-bold text-gray-900">Journey History</h3>
|
| 595 |
+
<p className="text-sm text-gray-400 mt-0.5">Your learning assimilation over time</p>
|
| 596 |
+
</div>
|
| 597 |
+
<button
|
| 598 |
+
onClick={() => setShowPolylineListModal(false)}
|
| 599 |
+
className="p-1.5 rounded-full hover:bg-gray-100 text-gray-400 hover:text-gray-600 transition-colors"
|
| 600 |
+
>
|
| 601 |
+
<X className="w-5 h-5" />
|
| 602 |
+
</button>
|
| 603 |
+
</div>
|
| 604 |
+
|
| 605 |
+
{/* Timeline Body */}
|
| 606 |
+
<div className="flex-1 overflow-y-auto px-6 py-6">
|
| 607 |
+
{(() => {
|
| 608 |
+
const filteredPolylines = polylines.filter(p => !['learning-path-1', 'dqn-simulation', 'high_line', 'current_average'].includes(p.id));
|
| 609 |
+
|
| 610 |
+
if (filteredPolylines.length === 0) {
|
| 611 |
+
return (
|
| 612 |
+
<div className="flex flex-col items-center justify-center py-16 text-center">
|
| 613 |
+
<div className="w-16 h-16 rounded-2xl bg-gray-50 border border-dashed border-gray-200 flex items-center justify-center mb-4">
|
| 614 |
+
<BookOpen className="w-8 h-8 text-gray-300" />
|
| 615 |
+
</div>
|
| 616 |
+
<p className="text-gray-500 font-medium">No history yet</p>
|
| 617 |
+
<p className="text-sm text-gray-400 mt-1">Submit a learning summary to start tracking your journey.</p>
|
| 618 |
+
</div>
|
| 619 |
+
);
|
| 620 |
+
}
|
| 621 |
+
|
| 622 |
+
return (
|
| 623 |
+
<div className="relative">
|
| 624 |
+
{/* Vertical spine */}
|
| 625 |
+
<div className="absolute left-5 top-0 bottom-0 w-0.5 bg-gradient-to-b from-blue-200 via-purple-200 to-gray-100" />
|
| 626 |
+
|
| 627 |
+
<div className="space-y-8">
|
| 628 |
+
{filteredPolylines.map((polyline, index) => {
|
| 629 |
+
const chartData = generateChartData(polyline);
|
| 630 |
+
const peakScore = chartData.length ? Math.max(...chartData.map(d => d.y)) : 0;
|
| 631 |
+
|
| 632 |
+
return (
|
| 633 |
+
<div key={polyline.id} className="relative flex gap-5 group">
|
| 634 |
+
{/* Numbered bubble */}
|
| 635 |
+
<div className="flex-shrink-0 w-10 h-10 rounded-full flex items-center justify-center font-bold text-sm shadow-sm z-10
|
| 636 |
+
bg-gradient-to-br from-blue-500 to-indigo-600 text-white border-2 border-white">
|
| 637 |
+
{index + 1}
|
| 638 |
+
</div>
|
| 639 |
+
|
| 640 |
+
{/* Card */}
|
| 641 |
+
<div className="flex-1 bg-white border border-gray-100 rounded-2xl shadow-sm hover:shadow-md transition-shadow overflow-hidden">
|
| 642 |
+
|
| 643 |
+
{/* Card top bar */}
|
| 644 |
+
<div className="px-4 py-3 border-b border-gray-50 flex items-center justify-between bg-gray-50/50">
|
| 645 |
+
<div className="flex items-center gap-2 min-w-0">
|
| 646 |
+
<h4 className="font-semibold text-gray-900 text-sm truncate max-w-[200px]">
|
| 647 |
+
{polyline.name || `Summary #${index + 1}`}
|
| 648 |
+
</h4>
|
| 649 |
+
{polyline.next_recommendation && (
|
| 650 |
+
<span className={`flex-shrink-0 text-[9px] font-bold px-1.5 py-0.5 rounded uppercase tracking-wider
|
| 651 |
+
${polyline.next_recommendation.reason === 'dqn'
|
| 652 |
+
? 'bg-indigo-100 text-indigo-700'
|
| 653 |
+
: 'bg-amber-100 text-amber-700'
|
| 654 |
+
}`}>
|
| 655 |
+
{polyline.next_recommendation.reason === 'dqn' ? '🧠 DQN' : '⚡ Fallback'}
|
| 656 |
+
</span>
|
| 657 |
+
)}
|
| 658 |
+
</div>
|
| 659 |
+
<div className="flex items-center gap-2 flex-shrink-0">
|
| 660 |
+
{polyline.assimilation_position && (
|
| 661 |
+
<span className="text-[10px] font-mono bg-blue-50 text-blue-600 px-2 py-0.5 rounded-lg border border-blue-100">
|
| 662 |
+
📍 ({polyline.assimilation_position.x}, {polyline.assimilation_position.y})
|
| 663 |
+
</span>
|
| 664 |
+
)}
|
| 665 |
+
{polyline.confidence && (
|
| 666 |
+
<span className="text-[10px] text-gray-400 font-medium">
|
| 667 |
+
{(polyline.confidence * 100).toFixed(0)}%
|
| 668 |
+
</span>
|
| 669 |
+
)}
|
| 670 |
+
</div>
|
| 671 |
+
</div>
|
| 672 |
+
|
| 673 |
+
<div className="p-4 space-y-3">
|
| 674 |
+
{/* Keywords */}
|
| 675 |
+
{polyline.keywords_found && polyline.keywords_found.length > 0 && (
|
| 676 |
+
<div className="flex flex-wrap gap-1">
|
| 677 |
+
{polyline.keywords_found.slice(0, 5).map((kw, i) => (
|
| 678 |
+
<span key={i} className="px-2 py-0.5 bg-blue-50 text-blue-600 text-[10px] font-medium rounded-full border border-blue-100">
|
| 679 |
+
{kw}
|
| 680 |
+
</span>
|
| 681 |
+
))}
|
| 682 |
+
{polyline.keywords_found.length > 5 && (
|
| 683 |
+
<span className="px-2 py-0.5 bg-gray-100 text-gray-500 text-[10px] rounded-full">
|
| 684 |
+
+{polyline.keywords_found.length - 5}
|
| 685 |
+
</span>
|
| 686 |
+
)}
|
| 687 |
+
</div>
|
| 688 |
+
)}
|
| 689 |
+
|
| 690 |
+
{/* Dominant topics */}
|
| 691 |
+
{polyline.dominant_topics && polyline.dominant_topics.length > 0 && (
|
| 692 |
+
<div className="flex flex-wrap gap-1">
|
| 693 |
+
{polyline.dominant_topics.map((topic, i) => (
|
| 694 |
+
<span key={i} className="px-2 py-0.5 bg-purple-50 text-purple-600 text-[10px] font-semibold rounded-full border border-purple-100">
|
| 695 |
+
★ {topic}
|
| 696 |
+
</span>
|
| 697 |
+
))}
|
| 698 |
+
</div>
|
| 699 |
+
)}
|
| 700 |
+
|
| 701 |
+
{/* Next recommendation from navigator */}
|
| 702 |
+
{polyline.next_recommendation && (
|
| 703 |
+
<div className="flex items-center gap-2 bg-indigo-50 border border-indigo-100 rounded-xl px-3 py-2">
|
| 704 |
+
<Sparkles className="w-3.5 h-3.5 text-indigo-500 flex-shrink-0" />
|
| 705 |
+
<div className="min-w-0">
|
| 706 |
+
<span className="text-[9px] font-bold text-indigo-400 uppercase tracking-wider block leading-none mb-0.5">Next Recommended</span>
|
| 707 |
+
<span className="text-xs font-medium text-indigo-800 truncate block">{polyline.next_recommendation.title}</span>
|
| 708 |
+
</div>
|
| 709 |
+
{polyline.next_recommendation.module && (
|
| 710 |
+
<span className="flex-shrink-0 ml-auto text-[9px] text-indigo-400 font-medium max-w-[80px] text-right truncate">
|
| 711 |
+
{polyline.next_recommendation.module}
|
| 712 |
+
</span>
|
| 713 |
+
)}
|
| 714 |
+
</div>
|
| 715 |
+
)}
|
| 716 |
+
|
| 717 |
+
{/* Mini sparkline + peak */}
|
| 718 |
+
<div className="flex items-end gap-3">
|
| 719 |
+
<div className="flex-1 bg-gray-50 rounded-xl p-2 border border-gray-100">
|
| 720 |
+
<svg width="100%" height="50" viewBox="0 0 280 50" preserveAspectRatio="none">
|
| 721 |
+
<defs>
|
| 722 |
+
<linearGradient id={`sg-${polyline.id}`} x1="0" x2="0" y1="0" y2="1">
|
| 723 |
+
<stop offset="0%" stopColor="#6366f1" stopOpacity="0.25" />
|
| 724 |
+
<stop offset="100%" stopColor="#6366f1" stopOpacity="0" />
|
| 725 |
+
</linearGradient>
|
| 726 |
+
</defs>
|
| 727 |
+
<path
|
| 728 |
+
d={`M 0 50 ${chartData.map((p, i) => `L ${(280 / 18) * (i + 1)} ${46 - p.y * 40}`).join(' ')} L 280 50 Z`}
|
| 729 |
+
fill={`url(#sg-${polyline.id})`}
|
| 730 |
+
/>
|
| 731 |
+
<polyline
|
| 732 |
+
fill="none"
|
| 733 |
+
stroke={index === polylines.length - 1 ? '#6366f1' : '#94a3b8'}
|
| 734 |
+
strokeWidth="2"
|
| 735 |
+
strokeLinecap="round"
|
| 736 |
+
strokeLinejoin="round"
|
| 737 |
+
points={chartData.map((p, i) =>
|
| 738 |
+
`${(280 / 18) * (i + 1)},${46 - p.y * 40}`
|
| 739 |
+
).join(' ')}
|
| 740 |
+
/>
|
| 741 |
+
<polyline
|
| 742 |
+
fill="none"
|
| 743 |
+
stroke="#ef4444"
|
| 744 |
+
strokeWidth="1.5"
|
| 745 |
+
strokeLinecap="round"
|
| 746 |
+
strokeLinejoin="round"
|
| 747 |
+
strokeDasharray="2 3"
|
| 748 |
+
points={highLineChartData.map((p, i) =>
|
| 749 |
+
`${(280 / 18) * (i + 1)},${46 - p.y * 40}`
|
| 750 |
+
).join(' ')}
|
| 751 |
+
className="opacity-70"
|
| 752 |
+
/>
|
| 753 |
+
</svg>
|
| 754 |
+
</div>
|
| 755 |
+
<div className="text-right flex-shrink-0 w-12">
|
| 756 |
+
<span className="text-sm font-bold text-gray-800 block">{(peakScore * 100).toFixed(0)}%</span>
|
| 757 |
+
<span className="text-[10px] text-gray-400">peak</span>
|
| 758 |
+
</div>
|
| 759 |
+
</div>
|
| 760 |
+
|
| 761 |
+
{/* View details */}
|
| 762 |
+
<button
|
| 763 |
+
onClick={() => {
|
| 764 |
+
onShowPolyline(polyline.id);
|
| 765 |
+
setSelectedPolyline(polyline);
|
| 766 |
+
setShowPolylineListModal(false);
|
| 767 |
+
setShowPolylineModal(true);
|
| 768 |
+
}}
|
| 769 |
+
className="w-full text-center text-xs font-semibold text-blue-600 hover:text-blue-700 hover:bg-blue-50 py-2 rounded-lg transition-colors border border-blue-100"
|
| 770 |
+
>
|
| 771 |
+
View Full Analysis →
|
| 772 |
+
</button>
|
| 773 |
+
</div>
|
| 774 |
+
</div>
|
| 775 |
+
</div>
|
| 776 |
+
);
|
| 777 |
+
})}
|
| 778 |
+
</div>
|
| 779 |
+
</div>
|
| 780 |
+
);
|
| 781 |
+
})()}
|
| 782 |
+
</div>
|
| 783 |
+
</div>
|
| 784 |
+
</div>
|
| 785 |
+
)}
|
| 786 |
+
</div>
|
| 787 |
+
);
|
| 788 |
+
};
|
src/components/Dashboard/DashboardTutorial.tsx
ADDED
|
@@ -0,0 +1,209 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import React, { useState, useEffect, useRef } from 'react';
|
| 2 |
+
import { motion, AnimatePresence } from 'framer-motion';
|
| 3 |
+
import { RobotAssistant } from './RobotAssistant';
|
| 4 |
+
import { X, ChevronRight, CheckCircle2 } from 'lucide-react';
|
| 5 |
+
|
| 6 |
+
interface TutorialStep {
|
| 7 |
+
targetId: string;
|
| 8 |
+
title: string;
|
| 9 |
+
description: string;
|
| 10 |
+
robotMessage: string;
|
| 11 |
+
offset: { x: number; y: number };
|
| 12 |
+
}
|
| 13 |
+
|
| 14 |
+
const dashboardSteps: TutorialStep[] = [
|
| 15 |
+
{
|
| 16 |
+
targetId: 'hero-title',
|
| 17 |
+
title: 'Mission Control',
|
| 18 |
+
description: 'Welcome to your central command. This is where high-level objectives are deployed.',
|
| 19 |
+
robotMessage: "Systems ONLINE! I'm your tactical interface. Let's start the tour!",
|
| 20 |
+
offset: { x: 50, y: 150 }
|
| 21 |
+
},
|
| 22 |
+
{
|
| 23 |
+
targetId: 'progress-bar',
|
| 24 |
+
title: 'Neural Synchronization',
|
| 25 |
+
description: 'Your level progress represents your synchronization with the core matrix.',
|
| 26 |
+
robotMessage: "Keep this bar full! We need maximum sync for deep-dive missions.",
|
| 27 |
+
offset: { x: 50, y: 100 }
|
| 28 |
+
},
|
| 29 |
+
{
|
| 30 |
+
targetId: 'missions-matrix',
|
| 31 |
+
title: 'Mission Matrix',
|
| 32 |
+
description: 'Select a node to begin deployment. Each card acts as a secure cover page for your mission intel.',
|
| 33 |
+
robotMessage: "The Matrix is loaded with fresh intel! Overload these nodes to gain XP.",
|
| 34 |
+
offset: { x: -350, y: -50 }
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
targetId: 'intel-feed',
|
| 38 |
+
title: 'Intel Feed',
|
| 39 |
+
description: 'Direct streams from the NLP Academy. Stay sharp with decrypted field reports.',
|
| 40 |
+
robotMessage: "Intel is power, recruit! Check the feed before every mission.",
|
| 41 |
+
offset: { x: 50, y: -250 }
|
| 42 |
+
}
|
| 43 |
+
];
|
| 44 |
+
|
| 45 |
+
const navigatorSteps: TutorialStep[] = [
|
| 46 |
+
{
|
| 47 |
+
targetId: 'matrix-grid',
|
| 48 |
+
title: 'Navigator Matrix',
|
| 49 |
+
description: 'Interact with the 2D grid to navigate through complex neural architectures.',
|
| 50 |
+
robotMessage: "Check the grid! We use DQN algorithms here to find the optimal learning path.",
|
| 51 |
+
offset: { x: 200, y: 200 }
|
| 52 |
+
},
|
| 53 |
+
{
|
| 54 |
+
targetId: 'control-center',
|
| 55 |
+
title: 'Control Center',
|
| 56 |
+
description: 'Analyze your progress, manage bookmarks, and run simulations.',
|
| 57 |
+
robotMessage: "This is the brain! Run simulations to see how the agent optimizes objectives.",
|
| 58 |
+
offset: { x: -400, y: 100 }
|
| 59 |
+
}
|
| 60 |
+
];
|
| 61 |
+
|
| 62 |
+
interface DashboardTutorialProps {
|
| 63 |
+
onComplete: () => void;
|
| 64 |
+
page?: 'dashboard' | 'navigator';
|
| 65 |
+
}
|
| 66 |
+
|
| 67 |
+
const DashboardTutorial: React.FC<DashboardTutorialProps> = ({ onComplete, page = 'dashboard' }) => {
|
| 68 |
+
const [currentStep, setCurrentStep] = useState(0);
|
| 69 |
+
const [robotPos, setRobotPos] = useState({ x: 0, y: 0 });
|
| 70 |
+
const [targetRect, setTargetRect] = useState<DOMRect | null>(null);
|
| 71 |
+
|
| 72 |
+
const steps = page === 'dashboard' ? dashboardSteps : navigatorSteps;
|
| 73 |
+
|
| 74 |
+
useEffect(() => {
|
| 75 |
+
const updatePosition = () => {
|
| 76 |
+
const step = steps[currentStep];
|
| 77 |
+
if (!step) return;
|
| 78 |
+
|
| 79 |
+
const element = document.getElementById(step.targetId);
|
| 80 |
+
if (element) {
|
| 81 |
+
// Scroll element into view first
|
| 82 |
+
element.scrollIntoView({ behavior: 'smooth', block: 'center' });
|
| 83 |
+
|
| 84 |
+
// Small delay to allow scroll to complete before measuring
|
| 85 |
+
setTimeout(() => {
|
| 86 |
+
const rect = element.getBoundingClientRect();
|
| 87 |
+
setTargetRect(rect);
|
| 88 |
+
|
| 89 |
+
// Calculate desired positions
|
| 90 |
+
let nx = rect.left + step.offset.x;
|
| 91 |
+
let ny = rect.top + step.offset.y;
|
| 92 |
+
|
| 93 |
+
// Viewport clamping (Robot is ~100x100 scaled 2x = 200x200)
|
| 94 |
+
const margin = 20;
|
| 95 |
+
nx = Math.max(margin, Math.min(nx, window.innerWidth - 220));
|
| 96 |
+
ny = Math.max(margin, Math.min(ny, window.innerHeight - 220));
|
| 97 |
+
|
| 98 |
+
setRobotPos({ x: nx, y: ny });
|
| 99 |
+
}, 600);
|
| 100 |
+
}
|
| 101 |
+
};
|
| 102 |
+
|
| 103 |
+
updatePosition();
|
| 104 |
+
window.addEventListener('resize', updatePosition);
|
| 105 |
+
return () => window.removeEventListener('resize', updatePosition);
|
| 106 |
+
}, [currentStep, steps]);
|
| 107 |
+
|
| 108 |
+
const nextStep = () => {
|
| 109 |
+
if (currentStep < steps.length - 1) {
|
| 110 |
+
setCurrentStep(prev => prev + 1);
|
| 111 |
+
} else {
|
| 112 |
+
onComplete();
|
| 113 |
+
}
|
| 114 |
+
};
|
| 115 |
+
|
| 116 |
+
const step = steps[currentStep];
|
| 117 |
+
|
| 118 |
+
return (
|
| 119 |
+
<div className="fixed inset-0 z-[110] pointer-events-none overflow-hidden">
|
| 120 |
+
{/* Dynamic Reveal Highlight */}
|
| 121 |
+
<AnimatePresence>
|
| 122 |
+
{targetRect && (
|
| 123 |
+
<motion.div
|
| 124 |
+
initial={{ opacity: 0 }}
|
| 125 |
+
animate={{ opacity: 1 }}
|
| 126 |
+
exit={{ opacity: 0 }}
|
| 127 |
+
className="absolute inset-0 bg-slate-950/40 backdrop-blur-[2px] pointer-events-auto"
|
| 128 |
+
style={{
|
| 129 |
+
clipPath: `polygon(
|
| 130 |
+
0% 0%, 0% 100%,
|
| 131 |
+
${targetRect.left - 20}px 100%,
|
| 132 |
+
${targetRect.left - 20}px ${targetRect.top - 20}px,
|
| 133 |
+
${targetRect.right + 20}px ${targetRect.top - 20}px,
|
| 134 |
+
${targetRect.right + 20}px ${targetRect.bottom + 20}px,
|
| 135 |
+
${targetRect.left - 20}px ${targetRect.bottom + 20}px,
|
| 136 |
+
${targetRect.left - 20}px 100%,
|
| 137 |
+
100% 100%, 100% 0%
|
| 138 |
+
)`
|
| 139 |
+
}}
|
| 140 |
+
onClick={onComplete}
|
| 141 |
+
/>
|
| 142 |
+
)}
|
| 143 |
+
</AnimatePresence>
|
| 144 |
+
|
| 145 |
+
{/* Moving Robot Assistant */}
|
| 146 |
+
<motion.div
|
| 147 |
+
animate={{
|
| 148 |
+
x: robotPos.x,
|
| 149 |
+
y: robotPos.y,
|
| 150 |
+
scale: 2
|
| 151 |
+
}}
|
| 152 |
+
transition={{ type: "spring", stiffness: 100, damping: 20 }}
|
| 153 |
+
className="absolute top-0 left-0"
|
| 154 |
+
>
|
| 155 |
+
<RobotAssistant tutorialMessage={step?.robotMessage || "Synchronizing..."} />
|
| 156 |
+
</motion.div>
|
| 157 |
+
|
| 158 |
+
{/* Instructional Speech Bubble */}
|
| 159 |
+
<motion.div
|
| 160 |
+
animate={{
|
| 161 |
+
x: Math.max(20, Math.min(robotPos.x + 100, window.innerWidth - 340)),
|
| 162 |
+
y: Math.max(20, Math.min(robotPos.y - 180, window.innerHeight - 250))
|
| 163 |
+
}}
|
| 164 |
+
transition={{ type: "spring", stiffness: 80, damping: 20 }}
|
| 165 |
+
className="absolute top-0 left-0 w-80 bg-white rounded-[32px] p-6 shadow-2xl border-4 border-brand/20 pointer-events-auto"
|
| 166 |
+
>
|
| 167 |
+
<div className="flex items-center justify-between mb-4">
|
| 168 |
+
<h3 className="text-lg font-black text-slate-900 uppercase tracking-tighter">{step?.title}</h3>
|
| 169 |
+
<button onClick={onComplete} className="text-slate-300 hover:text-brand transition-colors">
|
| 170 |
+
<X size={16} />
|
| 171 |
+
</button>
|
| 172 |
+
</div>
|
| 173 |
+
|
| 174 |
+
<p className="text-slate-500 text-sm italic mb-6">"{step?.description}"</p>
|
| 175 |
+
|
| 176 |
+
<div className="flex items-center justify-between">
|
| 177 |
+
<div className="flex gap-1">
|
| 178 |
+
{steps.map((_, idx) => (
|
| 179 |
+
<div
|
| 180 |
+
key={idx}
|
| 181 |
+
className={`h-1 rounded-full transition-all ${idx === currentStep ? 'w-4 bg-brand' : 'w-1 bg-slate-200'}`}
|
| 182 |
+
/>
|
| 183 |
+
))}
|
| 184 |
+
</div>
|
| 185 |
+
<button
|
| 186 |
+
onClick={nextStep}
|
| 187 |
+
className="bg-brand text-white px-5 py-2 rounded-xl font-black text-[9px] uppercase tracking-widest flex items-center gap-2 hover:bg-brand-light transition-all shadow-lg active:scale-95"
|
| 188 |
+
>
|
| 189 |
+
{currentStep === steps.length - 1 ? 'Finish' : 'Next'}
|
| 190 |
+
{currentStep === steps.length - 1 ? <CheckCircle2 size={12} /> : <ChevronRight size={12} />}
|
| 191 |
+
</button>
|
| 192 |
+
</div>
|
| 193 |
+
|
| 194 |
+
{/* Tail for the speech bubble */}
|
| 195 |
+
<div className="absolute -bottom-4 left-10 w-8 h-8 bg-white border-b-4 border-l-4 border-brand/20 rotate-[-45deg] rounded-bl-xl" />
|
| 196 |
+
</motion.div>
|
| 197 |
+
|
| 198 |
+
{/* Skip Button */}
|
| 199 |
+
<button
|
| 200 |
+
onClick={onComplete}
|
| 201 |
+
className="absolute bottom-12 right-12 px-6 py-3 bg-white/10 backdrop-blur-md rounded-2xl text-[10px] font-black uppercase tracking-[0.3em] text-white/40 hover:text-white border border-white/5 pointer-events-auto"
|
| 202 |
+
>
|
| 203 |
+
Skip Tutorial
|
| 204 |
+
</button>
|
| 205 |
+
</div>
|
| 206 |
+
);
|
| 207 |
+
};
|
| 208 |
+
|
| 209 |
+
export default DashboardTutorial;
|
src/components/Dashboard/MissionGrid.tsx
ADDED
|
@@ -0,0 +1,110 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import { motion } from 'framer-motion';
|
| 2 |
+
import { Trophy, ArrowUpRight, Clock, Users, Bookmark, Sparkles } from 'lucide-react';
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
import { useAppContext } from '../../context/AppContext';
|
| 7 |
+
import { useNavigate } from 'react-router-dom';
|
| 8 |
+
|
| 9 |
+
const MissionGrid = () => {
|
| 10 |
+
const { resources, agent, bookmarks, toggleBookmark } = useAppContext();
|
| 11 |
+
const navigate = useNavigate();
|
| 12 |
+
|
| 13 |
+
// Dynamically map curriculum queue isolating unvisited remaining modules
|
| 14 |
+
const unvisitedNodes = resources.filter(r => !agent.visitedResources.includes(r.id)).slice(0, 4);
|
| 15 |
+
|
| 16 |
+
// Transform the explicit resource API payloads into compatible Mission display blocks natively synced
|
| 17 |
+
const dynamicMissions = unvisitedNodes.map((r, index) => ({
|
| 18 |
+
id: r.id,
|
| 19 |
+
title: r.title,
|
| 20 |
+
description: `Explore parameters defining the behavior surrounding ${r.module} architecture sets natively inside our remote neural matrices.`,
|
| 21 |
+
image: r.youtube_url ? `https://img.youtube.com/vi/${r.youtube_url.split('v=')[1]?.split('&')[0] || r.youtube_url.split('/').pop()}/maxresdefault.jpg` : "https://images.unsplash.com/photo-1677442136019-21780ecad995?q=80&w=800&auto=format&fit=crop",
|
| 22 |
+
bannerColor: index % 2 === 0 ? 'from-brand/20 to-brand-dark/40' : 'from-slate-600/20 to-slate-900/40',
|
| 23 |
+
score: r.reward || 150,
|
| 24 |
+
stage: r.difficulty || 1,
|
| 25 |
+
duration: "45m",
|
| 26 |
+
participants: Math.floor(Math.random() * 1500) + 500,
|
| 27 |
+
isBookmarked: bookmarks.includes(r.id)
|
| 28 |
+
}));
|
| 29 |
+
return (
|
| 30 |
+
<div id="missions-matrix" className="grid grid-cols-1 md:grid-cols-2 gap-8 pb-12">
|
| 31 |
+
{dynamicMissions.map((mission, idx) => (
|
| 32 |
+
<motion.div
|
| 33 |
+
key={mission.id}
|
| 34 |
+
initial={{ opacity: 0, y: 10 }}
|
| 35 |
+
animate={{ opacity: 1, y: 0 }}
|
| 36 |
+
transition={{ delay: idx * 0.1, duration: 0.5 }}
|
| 37 |
+
className="group relative bg-white rounded-2xl overflow-hidden shadow-sm border border-slate-200 hover:border-brand/40 transition-all duration-300"
|
| 38 |
+
>
|
| 39 |
+
{/* Card Header Image */}
|
| 40 |
+
<div className="relative h-48 w-full overflow-hidden bg-slate-100">
|
| 41 |
+
<img
|
| 42 |
+
src={mission.image}
|
| 43 |
+
alt={mission.title}
|
| 44 |
+
className="w-full h-full object-cover transition-transform duration-700 group-hover:scale-105"
|
| 45 |
+
/>
|
| 46 |
+
<div className={`absolute inset-0 bg-gradient-to-t ${mission.bannerColor} opacity-40`} />
|
| 47 |
+
<div className="absolute inset-0 bg-gradient-to-t from-black/20 to-transparent" />
|
| 48 |
+
|
| 49 |
+
<div className="absolute top-4 left-4">
|
| 50 |
+
<div className="bg-white/80 backdrop-blur-md px-3 py-1 rounded-lg flex items-center gap-2 border border-white/20 shadow-sm">
|
| 51 |
+
<Sparkles size={12} className="text-brand" />
|
| 52 |
+
<span className="text-[10px] font-bold uppercase text-slate-800 tracking-wider">Stage {mission.stage}</span>
|
| 53 |
+
</div>
|
| 54 |
+
</div>
|
| 55 |
+
|
| 56 |
+
<div className="absolute top-4 right-4">
|
| 57 |
+
<button
|
| 58 |
+
onClick={(e) => { e.stopPropagation(); toggleBookmark(mission.id); }}
|
| 59 |
+
className={`w-8 h-8 ${mission.isBookmarked ? 'bg-brand/90 text-white' : 'bg-white/80 text-slate-600'} backdrop-blur-md rounded-lg flex items-center justify-center shadow-sm hover:scale-105 transition-all border border-white/20`}
|
| 60 |
+
>
|
| 61 |
+
<Bookmark size={14} className={mission.isBookmarked ? 'fill-current' : ''} />
|
| 62 |
+
</button>
|
| 63 |
+
</div>
|
| 64 |
+
|
| 65 |
+
<div className="absolute bottom-4 left-4">
|
| 66 |
+
<div className="bg-brand px-3 py-1 rounded-lg flex items-center gap-2 shadow-lg border border-white/20">
|
| 67 |
+
<Trophy size={12} className="text-white" />
|
| 68 |
+
<span className="text-[10px] font-bold text-white">+{mission.score} Score</span>
|
| 69 |
+
</div>
|
| 70 |
+
</div>
|
| 71 |
+
</div>
|
| 72 |
+
|
| 73 |
+
{/* Card Body */}
|
| 74 |
+
<div className="p-6 flex flex-col gap-4">
|
| 75 |
+
<div className="flex flex-col gap-1">
|
| 76 |
+
<h3 className="text-xl font-bold text-slate-900 group-hover:text-brand transition-colors">
|
| 77 |
+
{mission.title}
|
| 78 |
+
</h3>
|
| 79 |
+
<div className="flex items-center gap-3">
|
| 80 |
+
<span className="text-[10px] items-center font-bold text-slate-400 uppercase tracking-widest flex gap-1.5 align-middle">
|
| 81 |
+
<Clock size={12} className="text-slate-300"/> {mission.duration}
|
| 82 |
+
</span>
|
| 83 |
+
<span className="text-[10px] items-center font-bold text-slate-400 uppercase tracking-widest flex gap-1.5 align-middle">
|
| 84 |
+
<Users size={12} className="text-slate-300"/> {mission.participants.toLocaleString()} Active
|
| 85 |
+
</span>
|
| 86 |
+
</div>
|
| 87 |
+
</div>
|
| 88 |
+
|
| 89 |
+
<p className="text-slate-500 text-sm leading-relaxed font-medium line-clamp-2">
|
| 90 |
+
{mission.description}
|
| 91 |
+
</p>
|
| 92 |
+
|
| 93 |
+
<div className="pt-4 flex justify-end">
|
| 94 |
+
<motion.button
|
| 95 |
+
whileHover={{ y: -1 }}
|
| 96 |
+
whileTap={{ scale: 0.98 }}
|
| 97 |
+
onClick={() => navigate(`/navigator/course?resource=${mission.id}`)}
|
| 98 |
+
className="bg-brand text-white px-6 py-2 rounded-xl font-bold text-[11px] uppercase tracking-wider flex items-center gap-2 hover:bg-brand-dark transition-all shadow-sm shadow-brand/10 border border-white/10"
|
| 99 |
+
>
|
| 100 |
+
Engage <ArrowUpRight size={14} />
|
| 101 |
+
</motion.button>
|
| 102 |
+
</div>
|
| 103 |
+
</div>
|
| 104 |
+
</motion.div>
|
| 105 |
+
))}
|
| 106 |
+
</div>
|
| 107 |
+
);
|
| 108 |
+
};
|
| 109 |
+
|
| 110 |
+
export default MissionGrid;
|
src/components/Dashboard/RobotAssistant.tsx
ADDED
|
@@ -0,0 +1,166 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import React, { useState, useEffect, useRef } from 'react';
|
| 2 |
+
import { motion, AnimatePresence, useSpring } from 'framer-motion';
|
| 3 |
+
|
| 4 |
+
interface RobotAssistantProps {
|
| 5 |
+
tutorialMessage?: string;
|
| 6 |
+
layoutId?: string;
|
| 7 |
+
size?: 'sm' | 'md' | 'lg' | 'xl';
|
| 8 |
+
}
|
| 9 |
+
|
| 10 |
+
export const RobotAssistant: React.FC<RobotAssistantProps> = ({
|
| 11 |
+
tutorialMessage,
|
| 12 |
+
layoutId = "robot-assistant",
|
| 13 |
+
size = 'md'
|
| 14 |
+
}) => {
|
| 15 |
+
const [messages] = useState([
|
| 16 |
+
"Careful there.",
|
| 17 |
+
"Interaction registered.",
|
| 18 |
+
"That was unnecessary 🙂",
|
| 19 |
+
"Processing request...",
|
| 20 |
+
"Scanning patterns.",
|
| 21 |
+
"Always helpful.",
|
| 22 |
+
"System sync: Stable.",
|
| 23 |
+
"Data points collected."
|
| 24 |
+
]);
|
| 25 |
+
|
| 26 |
+
const [activeMessage, setActiveMessage] = useState<string | null>(null);
|
| 27 |
+
const [isHovered, setIsHovered] = useState(false);
|
| 28 |
+
const containerRef = useRef<HTMLDivElement>(null);
|
| 29 |
+
|
| 30 |
+
// Mouse tracking for eyes
|
| 31 |
+
const mouseX = useSpring(0, { stiffness: 100, damping: 25 });
|
| 32 |
+
const mouseY = useSpring(0, { stiffness: 100, damping: 25 });
|
| 33 |
+
|
| 34 |
+
useEffect(() => {
|
| 35 |
+
const handleMouseMove = (e: MouseEvent) => {
|
| 36 |
+
if (containerRef.current) {
|
| 37 |
+
const rect = containerRef.current.getBoundingClientRect();
|
| 38 |
+
const centerX = rect.left + rect.width / 2;
|
| 39 |
+
const centerY = rect.top + rect.height / 2;
|
| 40 |
+
|
| 41 |
+
// Smoother, slower tracking
|
| 42 |
+
const dx = (e.clientX - centerX) / (window.innerWidth / 4);
|
| 43 |
+
const dy = (e.clientY - centerY) / (window.innerHeight / 4);
|
| 44 |
+
|
| 45 |
+
mouseX.set(Math.max(-8, Math.min(8, dx * 10)));
|
| 46 |
+
mouseY.set(Math.max(-8, Math.min(8, dy * 10)));
|
| 47 |
+
}
|
| 48 |
+
};
|
| 49 |
+
|
| 50 |
+
window.addEventListener('mousemove', handleMouseMove);
|
| 51 |
+
return () => window.removeEventListener('mousemove', handleMouseMove);
|
| 52 |
+
}, [mouseX, mouseY]);
|
| 53 |
+
|
| 54 |
+
const handleClick = () => {
|
| 55 |
+
const randomIndex = Math.floor(Math.random() * messages.length);
|
| 56 |
+
setActiveMessage(messages[randomIndex]);
|
| 57 |
+
|
| 58 |
+
// Clear message after 2s
|
| 59 |
+
setTimeout(() => setActiveMessage(null), 2000);
|
| 60 |
+
};
|
| 61 |
+
|
| 62 |
+
return (
|
| 63 |
+
<motion.div
|
| 64 |
+
layoutId={layoutId}
|
| 65 |
+
className="relative group/robot"
|
| 66 |
+
ref={containerRef}
|
| 67 |
+
>
|
| 68 |
+
{/* Speech Bubble (Minimal Tooltip Style) */}
|
| 69 |
+
<AnimatePresence>
|
| 70 |
+
{activeMessage && (
|
| 71 |
+
<motion.div
|
| 72 |
+
initial={{ opacity: 0, y: 10, scale: 0.9 }}
|
| 73 |
+
animate={{ opacity: 1, y: 0, scale: 1 }}
|
| 74 |
+
exit={{ opacity: 0, y: 10, scale: 0.9 }}
|
| 75 |
+
className="absolute -top-12 left-1/2 -translate-x-1/2 z-50 pointer-events-none whitespace-nowrap"
|
| 76 |
+
>
|
| 77 |
+
<div className="text-[10px] font-bold text-slate-600 bg-white shadow-xl shadow-slate-200/50 px-3 py-1.5 rounded-xl border border-slate-100 flex items-center gap-2">
|
| 78 |
+
<div className="w-1.5 h-1.5 bg-brand rounded-full animate-pulse" />
|
| 79 |
+
{activeMessage}
|
| 80 |
+
</div>
|
| 81 |
+
</motion.div>
|
| 82 |
+
)}
|
| 83 |
+
</AnimatePresence>
|
| 84 |
+
|
| 85 |
+
{/* Robot Container */}
|
| 86 |
+
<motion.div
|
| 87 |
+
onClick={handleClick}
|
| 88 |
+
onMouseEnter={() => setIsHovered(true)}
|
| 89 |
+
onMouseLeave={() => setIsHovered(false)}
|
| 90 |
+
whileHover={{ y: -2 }}
|
| 91 |
+
animate={{
|
| 92 |
+
y: [-2, 2, -2],
|
| 93 |
+
}}
|
| 94 |
+
transition={{
|
| 95 |
+
y: { duration: 5, repeat: Infinity, ease: "easeInOut" },
|
| 96 |
+
}}
|
| 97 |
+
className="cursor-pointer flex flex-col items-center gap-4 relative group transition-all"
|
| 98 |
+
>
|
| 99 |
+
{/* Subtle Backdrop Glow */}
|
| 100 |
+
<div className="absolute inset-0 bg-brand/3 rounded-full blur-2xl opacity-0 group-hover:opacity-100 transition-opacity" />
|
| 101 |
+
|
| 102 |
+
{/* Robot head/SVG */}
|
| 103 |
+
<div className={`relative transition-all duration-500 ${
|
| 104 |
+
size === 'xl' ? 'w-80 h-80' :
|
| 105 |
+
size === 'lg' ? 'w-48 h-48' :
|
| 106 |
+
size === 'md' ? 'w-24 h-24' : 'w-12 h-12'
|
| 107 |
+
}`}>
|
| 108 |
+
<svg viewBox="0 0 120 120" className="w-full h-full overflow-visible">
|
| 109 |
+
{/* Robot Body (Midnight Slate) */}
|
| 110 |
+
<defs>
|
| 111 |
+
<radialGradient id="robot-body-shine" cx="50%" cy="40%" r="50%" fx="50%" fy="40%">
|
| 112 |
+
<stop offset="0%" stopColor="#334155" />
|
| 113 |
+
<stop offset="100%" stopColor="#1E293B" />
|
| 114 |
+
</radialGradient>
|
| 115 |
+
<filter id="inner-glow">
|
| 116 |
+
<feFlood floodColor="#334155" floodOpacity="0.3" result="offsetColor"/>
|
| 117 |
+
<feComposite in="offsetColor" in2="SourceAlpha" operator="in" result="offsetBlur"/>
|
| 118 |
+
<feGaussianBlur in="offsetBlur" stdDeviation="2" result="blur"/>
|
| 119 |
+
<feComposite in="SourceAlpha" in2="blur" operator="out" result="glow"/>
|
| 120 |
+
</filter>
|
| 121 |
+
</defs>
|
| 122 |
+
<motion.rect
|
| 123 |
+
x="20" y="30" width="80" height="70" rx="16"
|
| 124 |
+
fill="url(#robot-body-shine)"
|
| 125 |
+
stroke="#0F172A" strokeWidth="2"
|
| 126 |
+
className="shadow-2xl"
|
| 127 |
+
/>
|
| 128 |
+
<rect x="28" y="38" width="64" height="44" rx="10" fill="#020617" filter="url(#inner-glow)" />
|
| 129 |
+
|
| 130 |
+
{/* Screen Eyes (Proper SVG Rects) */}
|
| 131 |
+
<motion.g animate={{ scale: isHovered ? 1.05 : 1 }} transition={{ duration: 0.3 }}>
|
| 132 |
+
{/* Left Eye Shell */}
|
| 133 |
+
<rect x="38" y="50" width="14" height="20" rx="3" fill="#334155" />
|
| 134 |
+
<motion.rect
|
| 135 |
+
x="41" y="54" width="8" height="12" rx="2" fill="#64748B"
|
| 136 |
+
style={{ x: mouseX, y: mouseY }}
|
| 137 |
+
animate={{ height: [12, 1, 12, 12, 12] }}
|
| 138 |
+
transition={{ duration: 4, repeat: Infinity, times: [0, 0.05, 0.1, 0.8, 1] }}
|
| 139 |
+
/>
|
| 140 |
+
|
| 141 |
+
{/* Right Eye Shell */}
|
| 142 |
+
<rect x="68" y="50" width="14" height="20" rx="3" fill="#334155" />
|
| 143 |
+
<motion.rect
|
| 144 |
+
x="71" y="54" width="8" height="12" rx="2" fill="#64748B"
|
| 145 |
+
style={{ x: mouseX, y: mouseY }}
|
| 146 |
+
animate={{ height: [12, 1, 12, 12, 12] }}
|
| 147 |
+
transition={{ duration: 4, repeat: Infinity, times: [0, 0.05, 0.1, 0.8, 1] }}
|
| 148 |
+
/>
|
| 149 |
+
</motion.g>
|
| 150 |
+
|
| 151 |
+
{/* Antenna */}
|
| 152 |
+
<rect x="58" y="12" width="4" height="18" fill="#CBD5E1" />
|
| 153 |
+
<motion.circle
|
| 154 |
+
cx="60" cy="12" r="4" fill="#64748B"
|
| 155 |
+
animate={{
|
| 156 |
+
opacity: [1, 0.6, 1],
|
| 157 |
+
scale: [1, 1.2, 1],
|
| 158 |
+
}}
|
| 159 |
+
transition={{ duration: 3, repeat: Infinity }}
|
| 160 |
+
/>
|
| 161 |
+
</svg>
|
| 162 |
+
</div>
|
| 163 |
+
</motion.div>
|
| 164 |
+
</motion.div>
|
| 165 |
+
);
|
| 166 |
+
};
|
src/components/Dashboard/StatisticCard.tsx
ADDED
|
@@ -0,0 +1,138 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import React from 'react';
|
| 2 |
+
import { motion } from 'framer-motion';
|
| 3 |
+
import { Target, Clock, TrendingUp, MoreHorizontal } from 'lucide-react';
|
| 4 |
+
import { useAppContext } from '../../context/AppContext';
|
| 5 |
+
|
| 6 |
+
export const StatisticCard: React.FC = () => {
|
| 7 |
+
const { agent, resources } = useAppContext();
|
| 8 |
+
|
| 9 |
+
// Dynamic Telemetry Calculations
|
| 10 |
+
const tasksCompleted = agent.visitedResources.length;
|
| 11 |
+
const timeSpent = `${(tasksCompleted * 45 / 60).toFixed(1)}h`; // 45 mins per module
|
| 12 |
+
const progressPercent = agent.totalReward % 100;
|
| 13 |
+
const stageFormatted = `STAGE ${(agent.level || 0).toString().padStart(2, '0')}`;
|
| 14 |
+
|
| 15 |
+
// Find Active Objective (Next Unvisited Module)
|
| 16 |
+
const unvisitedNodes = resources.filter(r => !agent.visitedResources.includes(r.id));
|
| 17 |
+
const currentObjective = unvisitedNodes.length > 0 ? unvisitedNodes[0].title : "Matrix Complete";
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
return (
|
| 22 |
+
<motion.div
|
| 23 |
+
initial={{ opacity: 0, y: 10 }}
|
| 24 |
+
animate={{ opacity: 1, y: 0 }}
|
| 25 |
+
className="bg-white rounded-[32px] p-8 shadow-[0_8px_30px_rgb(0,0,0,0.02)] border border-slate-100 w-full flex flex-col gap-8 relative overflow-hidden group"
|
| 26 |
+
>
|
| 27 |
+
{/* Complex Background Accents */}
|
| 28 |
+
<div className="absolute top-0 right-0 w-48 h-48 bg-brand/5 rounded-full blur-[80px] -translate-y-1/2 translate-x-1/2 opacity-60" />
|
| 29 |
+
<div className="absolute bottom-0 left-0 w-32 h-32 bg-emerald-500/5 rounded-full blur-[60px] translate-y-1/2 -translate-x-1/2 opacity-40" />
|
| 30 |
+
<div className="absolute inset-0 bg-[url('https://www.transparenttextures.com/patterns/carbon-fibre.png')] opacity-[0.015] pointer-events-none" />
|
| 31 |
+
|
| 32 |
+
{/* Header */}
|
| 33 |
+
<div className="flex justify-between items-center relative z-10 px-1">
|
| 34 |
+
<div className="flex items-center gap-4">
|
| 35 |
+
<div className="relative">
|
| 36 |
+
<div className="absolute inset-0 bg-brand/10 rounded-2xl blur-lg animate-pulse opacity-40" />
|
| 37 |
+
<div className="w-12 h-12 rounded-2xl bg-white border border-slate-100 flex items-center justify-center overflow-hidden shadow-sm relative z-10 p-0.5">
|
| 38 |
+
<img
|
| 39 |
+
src="https://api.dicebear.com/7.x/avataaars/svg?seed=Felix&backgroundColor=f1f5f9"
|
| 40 |
+
alt="Avatar"
|
| 41 |
+
className="w-full h-full object-cover rounded-[14px]"
|
| 42 |
+
/>
|
| 43 |
+
</div>
|
| 44 |
+
</div>
|
| 45 |
+
<div>
|
| 46 |
+
<div className="flex items-center gap-2 mb-1.5">
|
| 47 |
+
<div className="w-1.5 h-1.5 bg-brand rounded-full shadow-[0_0_8px_rgba(108,99,255,0.6)]" />
|
| 48 |
+
<h3 className="text-[11px] font-black text-slate-400 uppercase tracking-[0.2em] leading-none">Intelligence Hub</h3>
|
| 49 |
+
</div>
|
| 50 |
+
<p className="text-sm font-bold text-slate-900">Analysis: Learner</p>
|
| 51 |
+
</div>
|
| 52 |
+
</div>
|
| 53 |
+
<button className="w-10 h-10 flex items-center justify-center text-slate-300 hover:text-slate-600 hover:bg-slate-50 rounded-xl transition-all">
|
| 54 |
+
<MoreHorizontal size={20} />
|
| 55 |
+
</button>
|
| 56 |
+
</div>
|
| 57 |
+
|
| 58 |
+
{/* Analytical Metrics Section */}
|
| 59 |
+
<div className="flex flex-col gap-6 relative z-10 px-1">
|
| 60 |
+
{/* Tasks Analytics */}
|
| 61 |
+
<div className="flex flex-col gap-3 group transition-all">
|
| 62 |
+
<div className="flex items-end justify-between">
|
| 63 |
+
<div className="flex items-center gap-2.5 px-3 py-1.5 bg-slate-50/80 border border-slate-100/50 rounded-full">
|
| 64 |
+
<div className="text-emerald-500">
|
| 65 |
+
<Target size={14} />
|
| 66 |
+
</div>
|
| 67 |
+
<span className="text-[10px] font-bold text-slate-500 uppercase tracking-widest">Summaries</span>
|
| 68 |
+
</div>
|
| 69 |
+
<div className="flex items-baseline gap-1.5">
|
| 70 |
+
<span className="text-2xl font-black text-slate-900 tracking-tighter">{tasksCompleted}</span>
|
| 71 |
+
<span className="text-[10px] font-bold text-emerald-600 bg-emerald-50 px-1.5 py-0.5 rounded-md">+12%</span>
|
| 72 |
+
</div>
|
| 73 |
+
</div>
|
| 74 |
+
</div>
|
| 75 |
+
|
| 76 |
+
{/* Objective Progress */}
|
| 77 |
+
<div className="p-5 bg-gradient-to-br from-slate-50/80 to-white rounded-3xl border border-slate-100 hover:border-brand/20 transition-all shadow-[0_4px_20px_rgba(0,0,0,0.01)] group/objective">
|
| 78 |
+
<div className="flex flex-col gap-4">
|
| 79 |
+
<div className="flex justify-between items-center">
|
| 80 |
+
<div className="flex items-center gap-2.5">
|
| 81 |
+
<div className="p-1.5 bg-brand/5 rounded-lg">
|
| 82 |
+
<Target size={14} className="text-brand shadow-[0_0_10px_rgba(108,99,255,0.2)]" />
|
| 83 |
+
</div>
|
| 84 |
+
<span className="text-[9px] font-black text-slate-400 uppercase tracking-widest">Active Objective</span>
|
| 85 |
+
</div>
|
| 86 |
+
<div className="px-2 py-1 bg-white border border-slate-100 rounded-lg shadow-sm">
|
| 87 |
+
<span className="text-[10px] font-black text-brand tracking-tight">{stageFormatted}</span>
|
| 88 |
+
</div>
|
| 89 |
+
</div>
|
| 90 |
+
<h4 className="text-xs font-bold text-slate-700 leading-tight group-hover/objective:text-slate-900 transition-colors uppercase tracking-tight">{currentObjective}</h4>
|
| 91 |
+
<div className="w-full bg-slate-200/60 h-2 rounded-full overflow-hidden p-[1px] shadow-inner">
|
| 92 |
+
<motion.div
|
| 93 |
+
initial={{ width: 0 }}
|
| 94 |
+
animate={{ width: `${progressPercent}%` }}
|
| 95 |
+
transition={{ duration: 1.2, delay: 0.5 }}
|
| 96 |
+
className="bg-brand h-full rounded-full shadow-[0_0_12px_rgba(108,99,255,0.3)] relative overflow-hidden"
|
| 97 |
+
>
|
| 98 |
+
<div className="absolute inset-0 bg-gradient-to-r from-transparent via-white/20 to-transparent animate-shimmer" />
|
| 99 |
+
</motion.div>
|
| 100 |
+
</div>
|
| 101 |
+
</div>
|
| 102 |
+
</div>
|
| 103 |
+
|
| 104 |
+
{/* Time Multi-Metric */}
|
| 105 |
+
<div className="flex items-center justify-between pt-3">
|
| 106 |
+
<div className="flex items-center gap-4">
|
| 107 |
+
<div className="w-11 h-11 flex items-center justify-center bg-indigo-50/50 border border-indigo-100/50 text-indigo-500 rounded-2xl shadow-sm">
|
| 108 |
+
<Clock size={18} />
|
| 109 |
+
</div>
|
| 110 |
+
<div>
|
| 111 |
+
<p className="text-[9px] font-black text-slate-400 uppercase tracking-widest leading-none mb-1.5">Time Invested</p>
|
| 112 |
+
<p className="text-base font-black text-slate-900 leading-none">{timeSpent}</p>
|
| 113 |
+
</div>
|
| 114 |
+
</div>
|
| 115 |
+
<div className="flex flex-col items-end">
|
| 116 |
+
<div className="flex items-center gap-1.5 text-emerald-500 mb-2 px-2 py-1 bg-emerald-50/50 rounded-lg border border-emerald-100/50">
|
| 117 |
+
<TrendingUp size={12} />
|
| 118 |
+
<span className="text-[10px] font-black tracking-tighter">Sync: {progressPercent}%</span>
|
| 119 |
+
</div>
|
| 120 |
+
<div className="w-14 h-1.5 bg-slate-100 rounded-full overflow-hidden p-[1px]">
|
| 121 |
+
<div className="w-full h-full bg-emerald-500/20 rounded-full" />
|
| 122 |
+
</div>
|
| 123 |
+
</div>
|
| 124 |
+
</div>
|
| 125 |
+
</div>
|
| 126 |
+
|
| 127 |
+
{/* High-Fidelity Footer */}
|
| 128 |
+
<div className="pt-4 border-t border-slate-50 mt-2">
|
| 129 |
+
<div className="flex items-center justify-center gap-2">
|
| 130 |
+
<div className="w-1 h-1 bg-emerald-400 rounded-full" />
|
| 131 |
+
<p className="text-[8px] text-slate-300 font-bold uppercase tracking-[0.4em]">
|
| 132 |
+
Security Matrix : Operational
|
| 133 |
+
</p>
|
| 134 |
+
</div>
|
| 135 |
+
</div>
|
| 136 |
+
</motion.div>
|
| 137 |
+
);
|
| 138 |
+
};
|
src/components/GridVisualization.tsx
ADDED
|
@@ -0,0 +1,876 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import React, { useState } from 'react';
|
| 2 |
+
import { motion } from 'framer-motion';
|
| 3 |
+
import { Resource, Agent, GridPosition, Polyline } from '../types';
|
| 4 |
+
import { BookOpen, Play, FileText, PenTool, RefreshCw, MapPin, Sparkles, Search, X, ChevronRight, ZoomIn, ZoomOut } from 'lucide-react';
|
| 5 |
+
import { nlpApi } from '../services/nlpApi';
|
| 6 |
+
const avatar = "https://api.dicebear.com/7.x/avataaars/svg?seed=Felix&backgroundColor=f1f5f9";
|
| 7 |
+
|
| 8 |
+
interface GridVisualizationProps {
|
| 9 |
+
resources: Resource[];
|
| 10 |
+
agent: Agent;
|
| 11 |
+
polylines: Polyline[];
|
| 12 |
+
onResourceClick: (resource: Resource) => void;
|
| 13 |
+
onAgentMove: (position: GridPosition) => void;
|
| 14 |
+
isSimulationRunning: boolean;
|
| 15 |
+
dqnPathInfo: { resource: Resource | null, reward: number } | null;
|
| 16 |
+
onRefreshDQNPath: () => void;
|
| 17 |
+
isPlaying?: boolean;
|
| 18 |
+
playbackPath?: GridPosition[];
|
| 19 |
+
onPlaybackComplete?: () => void;
|
| 20 |
+
}
|
| 21 |
+
|
| 22 |
+
const GRID_SIZE = 20;
|
| 23 |
+
|
| 24 |
+
const ResourceIcon = ({ type }: { type: Resource['type'] }) => {
|
| 25 |
+
const iconProps = { size: 14, className: "text-white drop-shadow-sm" };
|
| 26 |
+
|
| 27 |
+
switch (type) {
|
| 28 |
+
case 'book':
|
| 29 |
+
return <BookOpen {...iconProps} />;
|
| 30 |
+
case 'video':
|
| 31 |
+
return <Play {...iconProps} />;
|
| 32 |
+
case 'quiz':
|
| 33 |
+
return <FileText {...iconProps} />;
|
| 34 |
+
case 'assignment':
|
| 35 |
+
return <PenTool {...iconProps} />;
|
| 36 |
+
default:
|
| 37 |
+
return <BookOpen {...iconProps} />;
|
| 38 |
+
}
|
| 39 |
+
};
|
| 40 |
+
|
| 41 |
+
const SocialAgent: React.FC<{ isMoving: boolean }> = ({ isMoving }) => {
|
| 42 |
+
return (
|
| 43 |
+
<motion.div
|
| 44 |
+
className="relative flex flex-col items-center justify-center -translate-y-4"
|
| 45 |
+
animate={isMoving ? {
|
| 46 |
+
scale: [1, 1.1, 1],
|
| 47 |
+
rotateY: 0
|
| 48 |
+
} : { rotateY: 0 }}
|
| 49 |
+
transition={{ duration: 0.3 }}
|
| 50 |
+
>
|
| 51 |
+
{/* Tooltip */}
|
| 52 |
+
<div className="absolute -bottom-8 bg-[#1A1F2E] text-white text-[10px] font-medium px-2 py-0.5 rounded shadow-lg whitespace-nowrap z-50">
|
| 53 |
+
You are here
|
| 54 |
+
<div className="absolute -top-1 left-1/2 -translate-x-1/2 w-0 h-0 border-l-4 border-l-transparent border-r-4 border-r-transparent border-b-4 border-b-[#1A1F2E]" />
|
| 55 |
+
</div>
|
| 56 |
+
|
| 57 |
+
{/* Main Avatar Container */}
|
| 58 |
+
<div className="relative w-12 h-12 rounded-full border-2 border-white shadow-xl overflow-visible p-0.5 bg-white">
|
| 59 |
+
<img
|
| 60 |
+
src={avatar}
|
| 61 |
+
alt="User Profile"
|
| 62 |
+
className="w-full h-full rounded-full object-cover"
|
| 63 |
+
/>
|
| 64 |
+
{/* Online Indicator */}
|
| 65 |
+
<div className="absolute top-0 right-0 w-3 h-3 bg-green-500 border-2 border-white rounded-full shadow-sm animate-pulse" />
|
| 66 |
+
</div>
|
| 67 |
+
</motion.div>
|
| 68 |
+
);
|
| 69 |
+
};
|
| 70 |
+
|
| 71 |
+
export const GridVisualization: React.FC<GridVisualizationProps> = ({
|
| 72 |
+
resources,
|
| 73 |
+
agent,
|
| 74 |
+
polylines,
|
| 75 |
+
onResourceClick,
|
| 76 |
+
onAgentMove,
|
| 77 |
+
isSimulationRunning,
|
| 78 |
+
dqnPathInfo,
|
| 79 |
+
onRefreshDQNPath,
|
| 80 |
+
isPlaying = false,
|
| 81 |
+
playbackPath = [],
|
| 82 |
+
onPlaybackComplete
|
| 83 |
+
}) => {
|
| 84 |
+
const [selectedResource, setSelectedResource] = useState<Resource | null>(null);
|
| 85 |
+
const [hoveredResource, setHoveredResource] = useState<string | null>(null);
|
| 86 |
+
const hoverTimeout = React.useRef<NodeJS.Timeout | null>(null);
|
| 87 |
+
const [pathProgress, setPathProgress] = useState(0);
|
| 88 |
+
const [isSearching, setIsSearching] = useState(false);
|
| 89 |
+
const [searchQuery, setSearchQuery] = useState('');
|
| 90 |
+
const [zoomLevel, setZoomLevel] = useState(1.5);
|
| 91 |
+
const gridRef = React.useRef<HTMLDivElement>(null);
|
| 92 |
+
|
| 93 |
+
// Auto-scroll to agent on load or movement
|
| 94 |
+
React.useEffect(() => {
|
| 95 |
+
if (gridRef.current && agent.position) {
|
| 96 |
+
const scrollY = (agent.position.y * (1000 / GRID_SIZE) * zoomLevel) - (gridRef.current.clientHeight / 2);
|
| 97 |
+
const scrollX = (agent.position.x * (1000 / GRID_SIZE) * zoomLevel) - (gridRef.current.clientWidth / 2);
|
| 98 |
+
|
| 99 |
+
gridRef.current.scrollTo({
|
| 100 |
+
top: scrollY,
|
| 101 |
+
left: scrollX,
|
| 102 |
+
behavior: 'smooth'
|
| 103 |
+
});
|
| 104 |
+
}
|
| 105 |
+
}, [agent.position.x, agent.position.y, zoomLevel]);
|
| 106 |
+
const [videoResource, setVideoResource] = useState<Resource | null>(null);
|
| 107 |
+
const [chatMessages, setChatMessages] = useState<{ role: string; content: string }[]>([]);
|
| 108 |
+
const [chatInput, setChatInput] = useState('');
|
| 109 |
+
const [chatLoading, setChatLoading] = useState(false);
|
| 110 |
+
// @ts-ignore
|
| 111 |
+
const chatEndRef = React.useRef<HTMLDivElement | null>(null);
|
| 112 |
+
|
| 113 |
+
// Tracking agent movement for animation
|
| 114 |
+
const [prevPosition, setPrevPosition] = useState<GridPosition>(agent.position);
|
| 115 |
+
const [showTravelArrow, setShowTravelArrow] = useState(false);
|
| 116 |
+
|
| 117 |
+
React.useEffect(() => {
|
| 118 |
+
if (agent.position.x !== prevPosition.x || agent.position.y !== prevPosition.y) {
|
| 119 |
+
setShowTravelArrow(true);
|
| 120 |
+
const timer = setTimeout(() => {
|
| 121 |
+
setShowTravelArrow(false);
|
| 122 |
+
setPrevPosition(agent.position);
|
| 123 |
+
}, 1500); // Duration of arrow visibility
|
| 124 |
+
return () => clearTimeout(timer);
|
| 125 |
+
}
|
| 126 |
+
}, [agent.position, prevPosition]);
|
| 127 |
+
|
| 128 |
+
// Convert YouTube URL to embed URL
|
| 129 |
+
const getYouTubeEmbedUrl = (url: string): string | null => {
|
| 130 |
+
if (!url) return null;
|
| 131 |
+
// Handle youtu.be/ID and youtube.com/watch?v=ID formats
|
| 132 |
+
let videoId = '';
|
| 133 |
+
if (url.includes('youtu.be/')) {
|
| 134 |
+
videoId = url.split('youtu.be/')[1]?.split(/[?&#]/)[0] || '';
|
| 135 |
+
} else if (url.includes('watch?v=')) {
|
| 136 |
+
videoId = url.split('watch?v=')[1]?.split(/[?&#]/)[0] || '';
|
| 137 |
+
} else if (url.includes('youtube.com/embed/')) {
|
| 138 |
+
return url; // Already embed URL
|
| 139 |
+
}
|
| 140 |
+
return videoId ? `https://www.youtube.com/embed/${videoId}?autoplay=1&rel=0` : null;
|
| 141 |
+
};
|
| 142 |
+
|
| 143 |
+
const handleMouseEnter = (id: string) => {
|
| 144 |
+
if (hoverTimeout.current) clearTimeout(hoverTimeout.current);
|
| 145 |
+
setHoveredResource(id);
|
| 146 |
+
};
|
| 147 |
+
|
| 148 |
+
const handleMouseLeave = () => {
|
| 149 |
+
hoverTimeout.current = setTimeout(() => {
|
| 150 |
+
setHoveredResource(null);
|
| 151 |
+
}, 200);
|
| 152 |
+
};
|
| 153 |
+
|
| 154 |
+
// Handle Playback Animation
|
| 155 |
+
React.useEffect(() => {
|
| 156 |
+
if (isPlaying && playbackPath.length > 1) {
|
| 157 |
+
setPathProgress(0);
|
| 158 |
+
const totalDuration = playbackPath.length * 800; // 800ms per segment
|
| 159 |
+
const startTime = Date.now();
|
| 160 |
+
|
| 161 |
+
const interval = setInterval(() => {
|
| 162 |
+
const elapsed = Date.now() - startTime;
|
| 163 |
+
const progress = Math.min(elapsed / totalDuration, 1);
|
| 164 |
+
setPathProgress(progress);
|
| 165 |
+
|
| 166 |
+
if (progress >= 1) {
|
| 167 |
+
clearInterval(interval);
|
| 168 |
+
if (onPlaybackComplete) onPlaybackComplete();
|
| 169 |
+
}
|
| 170 |
+
}, 16);
|
| 171 |
+
|
| 172 |
+
return () => clearInterval(interval);
|
| 173 |
+
}
|
| 174 |
+
}, [isPlaying, playbackPath, onPlaybackComplete]);
|
| 175 |
+
|
| 176 |
+
const handleCellClick = (x: number, y: number) => {
|
| 177 |
+
const resource = resources.find(r => r.position.x === x && r.position.y === y);
|
| 178 |
+
if (resource) {
|
| 179 |
+
setSelectedResource(resource);
|
| 180 |
+
onResourceClick(resource);
|
| 181 |
+
} else {
|
| 182 |
+
onAgentMove({ x, y });
|
| 183 |
+
}
|
| 184 |
+
};
|
| 185 |
+
|
| 186 |
+
const renderNeuralRoad = () => {
|
| 187 |
+
return polylines.filter(p => p.isActive).map(polyline => (
|
| 188 |
+
<svg key={polyline.id} className="absolute inset-0 w-full h-full pointer-events-none z-10">
|
| 189 |
+
<path
|
| 190 |
+
d={polyline.path.map((pos, i) =>
|
| 191 |
+
`${i === 0 ? 'M' : 'L'} ${(pos.x + 0.5) * 50} ${(pos.y + 0.5) * 50}`
|
| 192 |
+
).join(' ')}
|
| 193 |
+
fill="none"
|
| 194 |
+
stroke={polyline.color}
|
| 195 |
+
strokeWidth="12"
|
| 196 |
+
strokeLinecap="round"
|
| 197 |
+
strokeLinejoin="round"
|
| 198 |
+
className="opacity-20 filter blur-[2px]"
|
| 199 |
+
/>
|
| 200 |
+
<path
|
| 201 |
+
d={polyline.path.map((pos, i) =>
|
| 202 |
+
`${i === 0 ? 'M' : 'L'} ${(pos.x + 0.5) * 50} ${(pos.y + 0.5) * 50}`
|
| 203 |
+
).join(' ')}
|
| 204 |
+
fill="none"
|
| 205 |
+
stroke={polyline.color}
|
| 206 |
+
strokeWidth="4"
|
| 207 |
+
strokeLinecap="round"
|
| 208 |
+
strokeLinejoin="round"
|
| 209 |
+
strokeDasharray="1 8"
|
| 210 |
+
className="opacity-40"
|
| 211 |
+
/>
|
| 212 |
+
</svg>
|
| 213 |
+
));
|
| 214 |
+
};
|
| 215 |
+
|
| 216 |
+
const renderPlaybackOverlay = () => {
|
| 217 |
+
if (!isPlaying || playbackPath.length < 2) return null;
|
| 218 |
+
|
| 219 |
+
// Convert grid coordinates to percentages (assuming 1fr grid)
|
| 220 |
+
// Grid matches 1000x1000px container
|
| 221 |
+
const CELL_SIZE = 1000 / GRID_SIZE;
|
| 222 |
+
const HALF_CELL = CELL_SIZE / 2;
|
| 223 |
+
|
| 224 |
+
const pathSegments = [];
|
| 225 |
+
for (let i = 0; i < playbackPath.length - 1; i++) {
|
| 226 |
+
const start = playbackPath[i];
|
| 227 |
+
const end = playbackPath[i + 1];
|
| 228 |
+
|
| 229 |
+
const x1 = start.x * CELL_SIZE + HALF_CELL;
|
| 230 |
+
const y1 = start.y * CELL_SIZE + HALF_CELL;
|
| 231 |
+
const x2 = end.x * CELL_SIZE + HALF_CELL;
|
| 232 |
+
const y2 = end.y * CELL_SIZE + HALF_CELL;
|
| 233 |
+
|
| 234 |
+
// Calculate control point for arc
|
| 235 |
+
// Midpoint
|
| 236 |
+
const mx = (x1 + x2) / 2;
|
| 237 |
+
const my = (y1 + y2) / 2;
|
| 238 |
+
// Vector
|
| 239 |
+
const dx = x2 - x1;
|
| 240 |
+
const dy = y2 - y1;
|
| 241 |
+
// Perpendicular vector (scaled) - Fixed arc height
|
| 242 |
+
const arcHeight = 30;
|
| 243 |
+
// Normalize
|
| 244 |
+
const len = Math.sqrt(dx * dx + dy * dy) || 1;
|
| 245 |
+
const px = -dy / len * arcHeight;
|
| 246 |
+
const py = dx / len * arcHeight;
|
| 247 |
+
|
| 248 |
+
const cx = mx + px;
|
| 249 |
+
const cy = my + py;
|
| 250 |
+
|
| 251 |
+
const d = `M ${x1} ${y1} Q ${cx} ${cy} ${x2} ${y2}`;
|
| 252 |
+
pathSegments.push({ d, id: i }); // Add index to stagger animation
|
| 253 |
+
}
|
| 254 |
+
|
| 255 |
+
return (
|
| 256 |
+
<div className="absolute inset-0 z-50 pointer-events-none">
|
| 257 |
+
<svg width="100%" height="100%" viewBox="0 0 1000 1000" className="overflow-visible">
|
| 258 |
+
<defs>
|
| 259 |
+
<filter id="glow" x="-20%" y="-20%" width="140%" height="140%">
|
| 260 |
+
<feGaussianBlur stdDeviation="2" result="blur" />
|
| 261 |
+
<feComposite in="SourceGraphic" in2="blur" operator="over" />
|
| 262 |
+
</filter>
|
| 263 |
+
<linearGradient id="pathGradient" x1="0%" y1="0%" x2="100%" y2="0%">
|
| 264 |
+
<stop offset="0%" stopColor="#3b82f6" />
|
| 265 |
+
<stop offset="50%" stopColor="#8b5cf6" />
|
| 266 |
+
<stop offset="100%" stopColor="#d946ef" />
|
| 267 |
+
</linearGradient>
|
| 268 |
+
<marker id="arrowhead-swallowtail" markerWidth="6" markerHeight="6" refX="5" refY="3" orient="auto">
|
| 269 |
+
<path d="M 0 0 L 6 3 L 0 6 L 2 3 z" fill="#d946ef" />
|
| 270 |
+
</marker>
|
| 271 |
+
</defs>
|
| 272 |
+
{pathSegments.map((seg, idx) => {
|
| 273 |
+
const segmentProgress = Math.max(0, Math.min(1, (pathProgress * playbackPath.length) - idx));
|
| 274 |
+
const dashLen = 1000;
|
| 275 |
+
|
| 276 |
+
if (segmentProgress <= 0) return null;
|
| 277 |
+
|
| 278 |
+
return (
|
| 279 |
+
<g key={idx}>
|
| 280 |
+
{/* Background trace */}
|
| 281 |
+
<path d={seg.d} fill="none" stroke="rgba(139, 92, 246, 0.1)" strokeWidth="4" strokeLinecap="round" />
|
| 282 |
+
|
| 283 |
+
{/* Animated Glowing Line */}
|
| 284 |
+
<path
|
| 285 |
+
d={seg.d}
|
| 286 |
+
fill="none"
|
| 287 |
+
stroke="url(#pathGradient)"
|
| 288 |
+
strokeWidth="3"
|
| 289 |
+
strokeLinecap="round"
|
| 290 |
+
strokeDasharray="8 6"
|
| 291 |
+
markerEnd="url(#arrowhead-swallowtail)"
|
| 292 |
+
filter="url(#glow)"
|
| 293 |
+
strokeDashoffset={dashLen * (1 - segmentProgress)}
|
| 294 |
+
/>
|
| 295 |
+
</g>
|
| 296 |
+
);
|
| 297 |
+
})}
|
| 298 |
+
</svg>
|
| 299 |
+
</div>
|
| 300 |
+
);
|
| 301 |
+
};
|
| 302 |
+
|
| 303 |
+
const renderGrid = () => {
|
| 304 |
+
const cells = [];
|
| 305 |
+
|
| 306 |
+
for (let y = 0; y < GRID_SIZE; y++) {
|
| 307 |
+
for (let x = 0; x < GRID_SIZE; x++) {
|
| 308 |
+
const resource = resources.find(r => r.position.x === x && r.position.y === y);
|
| 309 |
+
const isAgent = agent.position.x === x && agent.position.y === y;
|
| 310 |
+
|
| 311 |
+
// Dynamic Tooltip Positioning
|
| 312 |
+
let tooltipClass = "absolute bottom-full left-1/2 -translate-x-1/2 mb-2";
|
| 313 |
+
let arrowClass = "absolute left-1/2 -translate-x-1/2 -bottom-2";
|
| 314 |
+
|
| 315 |
+
if (x < 4) {
|
| 316 |
+
tooltipClass = "absolute bottom-full left-0 mb-2";
|
| 317 |
+
arrowClass = "absolute left-4 -bottom-2";
|
| 318 |
+
} else if (x > 15) {
|
| 319 |
+
tooltipClass = "absolute bottom-full right-0 mb-2";
|
| 320 |
+
arrowClass = "absolute right-4 -bottom-2";
|
| 321 |
+
}
|
| 322 |
+
|
| 323 |
+
if (y < 4) {
|
| 324 |
+
if (x < 4) {
|
| 325 |
+
tooltipClass = "absolute top-full left-0 mt-2";
|
| 326 |
+
arrowClass = "absolute left-4 -top-2";
|
| 327 |
+
} else if (x > 15) {
|
| 328 |
+
tooltipClass = "absolute top-full right-0 mt-2";
|
| 329 |
+
arrowClass = "absolute right-4 -top-2";
|
| 330 |
+
} else {
|
| 331 |
+
tooltipClass = "absolute top-full left-1/2 -translate-x-1/2 mt-2";
|
| 332 |
+
arrowClass = "absolute left-1/2 -translate-x-1/2 -top-2";
|
| 333 |
+
}
|
| 334 |
+
}
|
| 335 |
+
|
| 336 |
+
const showTooltip = resource && hoveredResource === resource.id;
|
| 337 |
+
|
| 338 |
+
cells.push(
|
| 339 |
+
<div
|
| 340 |
+
key={`${x}-${y}`}
|
| 341 |
+
className={`
|
| 342 |
+
relative transition-all duration-300
|
| 343 |
+
${isAgent ? 'z-40' : 'z-30'}
|
| 344 |
+
${showTooltip ? 'z-[100]' : ''}
|
| 345 |
+
`}
|
| 346 |
+
style={{
|
| 347 |
+
gridColumn: x + 1,
|
| 348 |
+
gridRow: y + 1,
|
| 349 |
+
}}
|
| 350 |
+
onClick={() => handleCellClick(x, y)}
|
| 351 |
+
>
|
| 352 |
+
{resource && (
|
| 353 |
+
<div
|
| 354 |
+
className="absolute inset-0 m-auto flex items-center justify-center w-full h-full"
|
| 355 |
+
onMouseEnter={() => handleMouseEnter(resource.id)}
|
| 356 |
+
onMouseLeave={handleMouseLeave}
|
| 357 |
+
>
|
| 358 |
+
{/* Circular Reference Marker */}
|
| 359 |
+
<div className={`
|
| 360 |
+
relative flex items-center justify-center
|
| 361 |
+
w-10 h-10 transform transition-all duration-300
|
| 362 |
+
${selectedResource?.id === resource.id ? 'scale-110' : 'hover:scale-110 hover:-translate-y-1'}
|
| 363 |
+
`}>
|
| 364 |
+
{/* Outer Glow */}
|
| 365 |
+
<div className={`absolute inset-0 rounded-full blur-[8px] opacity-25 ${
|
| 366 |
+
resource.visited ? 'bg-green-400' :
|
| 367 |
+
resource.difficulty <= 2 ? 'bg-purple-400' :
|
| 368 |
+
resource.difficulty <= 4 ? 'bg-blue-400' :
|
| 369 |
+
resource.difficulty <= 6 ? 'bg-amber-400' : 'bg-red-400'
|
| 370 |
+
}`} />
|
| 371 |
+
|
| 372 |
+
{/* Pin Head — green when visited */}
|
| 373 |
+
<div className={`
|
| 374 |
+
w-8 h-8 rounded-full shadow-lg flex items-center justify-center border-2 border-white
|
| 375 |
+
transition-colors duration-500
|
| 376 |
+
${resource.visited ? 'bg-emerald-500' :
|
| 377 |
+
resource.difficulty <= 2 ? 'bg-[#A855F7]' :
|
| 378 |
+
resource.difficulty <= 4 ? 'bg-[#3B82F6]' :
|
| 379 |
+
resource.difficulty <= 6 ? 'bg-[#F59E0B]' : 'bg-[#EF4444]'}
|
| 380 |
+
`}>
|
| 381 |
+
<ResourceIcon type={resource.type} />
|
| 382 |
+
</div>
|
| 383 |
+
|
| 384 |
+
<div
|
| 385 |
+
className={`${tooltipClass} w-72 transition-opacity duration-200 z-50 ${showTooltip ? 'opacity-100 pointer-events-auto' : 'opacity-0 pointer-events-none'}`}
|
| 386 |
+
onMouseEnter={() => handleMouseEnter(resource.id)}
|
| 387 |
+
onMouseLeave={handleMouseLeave}
|
| 388 |
+
>
|
| 389 |
+
<div className="bg-white rounded-xl shadow-2xl border border-gray-100 overflow-hidden cursor-default" onClick={(e) => e.stopPropagation()}>
|
| 390 |
+
<div className={`h-32 w-full relative ${resource.visited ? 'bg-gradient-to-tr from-emerald-500 to-teal-400' : 'bg-gradient-to-tr from-blue-600 to-indigo-500'}`}>
|
| 391 |
+
<div className="absolute inset-0 opacity-20" style={{ backgroundImage: 'radial-gradient(circle at 2px 2px, white 1px, transparent 0)', backgroundSize: '16px 16px' }}></div>
|
| 392 |
+
<div className="absolute inset-0 flex items-center justify-center">
|
| 393 |
+
<ResourceIcon type={resource.type} />
|
| 394 |
+
<div className="absolute transform scale-[5] opacity-10"><ResourceIcon type={resource.type} /></div>
|
| 395 |
+
</div>
|
| 396 |
+
</div>
|
| 397 |
+
|
| 398 |
+
<div className="p-4 text-left">
|
| 399 |
+
<div className="flex justify-between items-start mb-2">
|
| 400 |
+
<h4 className="font-bold text-gray-900 text-lg leading-tight line-clamp-2">{resource.title}</h4>
|
| 401 |
+
{resource.visited && (
|
| 402 |
+
<span className="bg-green-100 text-green-700 text-[10px] font-bold px-2 py-0.5 rounded uppercase tracking-wide shrink-0 ml-2">
|
| 403 |
+
Completed
|
| 404 |
+
</span>
|
| 405 |
+
)}
|
| 406 |
+
</div>
|
| 407 |
+
|
| 408 |
+
<div className="flex items-center gap-2 text-xs text-gray-500 mb-3">
|
| 409 |
+
<span className="capitalize">{resource.type} Lesson</span>
|
| 410 |
+
<span>•</span>
|
| 411 |
+
<span>{['Beginner', 'Intermediate', 'Advanced', 'Expert', 'Master'][resource.difficulty - 1] || 'Intermediate'}</span>
|
| 412 |
+
</div>
|
| 413 |
+
|
| 414 |
+
<div className="flex items-center gap-4 text-xs text-gray-500 font-medium mb-4">
|
| 415 |
+
<div className="flex items-center gap-1">
|
| 416 |
+
<span className="text-gray-400">⏱</span>
|
| 417 |
+
<span>{10 + (resource.title.length % 20)} mins</span>
|
| 418 |
+
</div>
|
| 419 |
+
<div className="flex items-center gap-1">
|
| 420 |
+
<span className="text-emerald-400">🎯</span>
|
| 421 |
+
<span>Target: {(resource.high_line ? resource.high_line * 100 : 80).toFixed(0)}%</span>
|
| 422 |
+
</div>
|
| 423 |
+
<div className="flex items-center gap-1">
|
| 424 |
+
<span className="text-amber-400">★</span>
|
| 425 |
+
<span>{resource.base_points ?? 50} pts</span>
|
| 426 |
+
</div>
|
| 427 |
+
</div>
|
| 428 |
+
|
| 429 |
+
<button
|
| 430 |
+
onClick={(e) => {
|
| 431 |
+
e.stopPropagation();
|
| 432 |
+
if (isAgent && resource.youtube_url) {
|
| 433 |
+
setVideoResource(resource);
|
| 434 |
+
setHoveredResource(null);
|
| 435 |
+
} else {
|
| 436 |
+
handleCellClick(x, y);
|
| 437 |
+
}
|
| 438 |
+
}}
|
| 439 |
+
className={`w-full py-2.5 ${isAgent ? 'bg-indigo-600 hover:bg-indigo-700 border-indigo-500/20 shadow-indigo-500/20' : 'bg-blue-600 hover:bg-blue-700 border-blue-500/20 shadow-blue-500/10'} text-white text-sm font-semibold rounded-xl transition-all flex items-center justify-center gap-2 shadow-lg active:scale-95 transform duration-100 border`}
|
| 440 |
+
>
|
| 441 |
+
{isAgent ? '▶ Start Lesson' : 'Travel to Lesson'}
|
| 442 |
+
<span className="text-lg leading-none">→</span>
|
| 443 |
+
</button>
|
| 444 |
+
</div>
|
| 445 |
+
</div>
|
| 446 |
+
<div className={`w-4 h-4 bg-white transform rotate-45 shadow-lg z-[-1] ${arrowClass}`}></div>
|
| 447 |
+
</div>
|
| 448 |
+
</div>
|
| 449 |
+
</div>
|
| 450 |
+
)}
|
| 451 |
+
|
| 452 |
+
{/* Resource cell placeholder */}
|
| 453 |
+
</div>
|
| 454 |
+
);
|
| 455 |
+
}
|
| 456 |
+
}
|
| 457 |
+
return cells;
|
| 458 |
+
};
|
| 459 |
+
|
| 460 |
+
const handleChatSubmit = async (e: React.FormEvent) => {
|
| 461 |
+
e.preventDefault();
|
| 462 |
+
if (!chatInput.trim() || !videoResource || chatLoading) return;
|
| 463 |
+
|
| 464 |
+
const userMessage = chatInput;
|
| 465 |
+
setChatInput('');
|
| 466 |
+
setChatMessages(prev => [...prev, { role: 'user', content: userMessage }]);
|
| 467 |
+
setChatLoading(true);
|
| 468 |
+
|
| 469 |
+
try {
|
| 470 |
+
const data = await nlpApi.chat(videoResource.title, userMessage, chatMessages);
|
| 471 |
+
setChatMessages(prev => [...prev, { role: 'ai', content: data.answer }]);
|
| 472 |
+
} catch (error) {
|
| 473 |
+
console.error('Chat error:', error);
|
| 474 |
+
setChatMessages(prev => [...prev, { role: 'ai', content: 'Sorry, I failed to process your request.' }]);
|
| 475 |
+
} finally {
|
| 476 |
+
setChatLoading(false);
|
| 477 |
+
}
|
| 478 |
+
};
|
| 479 |
+
|
| 480 |
+
return (
|
| 481 |
+
<div className="flex flex-col h-full bg-gray-50/30 overflow-hidden">
|
| 482 |
+
<div className="flex-none px-6 py-4 bg-white/90 backdrop-blur-sm border-b border-gray-100 flex justify-between items-center z-10">
|
| 483 |
+
<div>
|
| 484 |
+
<h2 className="text-lg font-bold text-gray-900 flex items-center gap-2">
|
| 485 |
+
<MapPin className="w-5 h-5 text-blue-600" />
|
| 486 |
+
Learning Environment
|
| 487 |
+
</h2>
|
| 488 |
+
<p className="text-xs text-gray-500 mt-0.5">Explore resources and track your journey</p>
|
| 489 |
+
</div>
|
| 490 |
+
|
| 491 |
+
<div className="flex items-center gap-4">
|
| 492 |
+
<div className="flex items-center bg-white border border-gray-100 rounded-lg shadow-sm p-1">
|
| 493 |
+
<button
|
| 494 |
+
onClick={() => setZoomLevel(prev => Math.max(1, prev - 0.25))}
|
| 495 |
+
className="p-1.5 hover:bg-gray-50 text-gray-500 transition-colors rounded-md"
|
| 496 |
+
title="Zoom Out"
|
| 497 |
+
>
|
| 498 |
+
<ZoomOut size={16} />
|
| 499 |
+
</button>
|
| 500 |
+
<div className="w-px h-4 bg-gray-100 mx-1"></div>
|
| 501 |
+
<button
|
| 502 |
+
onClick={() => setZoomLevel(1.5)}
|
| 503 |
+
className="px-2 py-1 hover:bg-gray-50 text-gray-400 text-[10px] font-bold transition-colors rounded-md"
|
| 504 |
+
title="Reset Zoom"
|
| 505 |
+
>
|
| 506 |
+
{Math.round(zoomLevel * 100)}%
|
| 507 |
+
</button>
|
| 508 |
+
<div className="w-px h-4 bg-gray-100 mx-1"></div>
|
| 509 |
+
<button
|
| 510 |
+
onClick={() => setZoomLevel(prev => Math.min(3, prev + 0.25))}
|
| 511 |
+
className="p-1.5 hover:bg-gray-50 text-gray-500 transition-colors rounded-md"
|
| 512 |
+
title="Zoom In"
|
| 513 |
+
>
|
| 514 |
+
<ZoomIn size={16} />
|
| 515 |
+
</button>
|
| 516 |
+
</div>
|
| 517 |
+
|
| 518 |
+
<button
|
| 519 |
+
onClick={() => setIsSearching(true)}
|
| 520 |
+
className="flex items-center gap-2 px-3 py-1.5 bg-white border border-blue-200 hover:bg-blue-50 text-blue-600 text-xs font-semibold rounded-lg transition-all shadow-sm"
|
| 521 |
+
>
|
| 522 |
+
<Search className="w-3.5 h-3.5" />
|
| 523 |
+
<span>Search</span>
|
| 524 |
+
</button>
|
| 525 |
+
|
| 526 |
+
{isSimulationRunning && (
|
| 527 |
+
<button
|
| 528 |
+
onClick={onRefreshDQNPath}
|
| 529 |
+
className="flex items-center gap-2 px-3 py-1.5 bg-white border border-red-200 hover:bg-red-50 text-red-600 text-xs font-semibold rounded-lg transition-all shadow-sm group"
|
| 530 |
+
>
|
| 531 |
+
<RefreshCw className="w-3.5 h-3.5 group-hover:rotate-180 transition-transform duration-500" />
|
| 532 |
+
<span>Optimize Path</span>
|
| 533 |
+
</button>
|
| 534 |
+
)}
|
| 535 |
+
|
| 536 |
+
<div className="flex items-center gap-3 bg-gray-50 px-3 py-1.5 rounded-lg border border-gray-100">
|
| 537 |
+
<div className="flex items-center gap-2">
|
| 538 |
+
<div className="w-5 h-5 rounded-full border border-blue-200 overflow-hidden bg-white shadow-sm">
|
| 539 |
+
<img src={avatar} alt="Avatar" className="w-full h-full object-cover" />
|
| 540 |
+
</div>
|
| 541 |
+
<span className="text-xs font-medium text-gray-600">You (Agent)</span>
|
| 542 |
+
</div>
|
| 543 |
+
<div className="w-px h-3 bg-gray-200"></div>
|
| 544 |
+
<div className="flex items-center gap-1.5">
|
| 545 |
+
<div className="w-2.5 h-2.5 bg-[#A855F7] rounded-full"></div>
|
| 546 |
+
<span className="text-xs font-medium text-gray-600">Resource</span>
|
| 547 |
+
</div>
|
| 548 |
+
</div>
|
| 549 |
+
</div>
|
| 550 |
+
</div>
|
| 551 |
+
|
| 552 |
+
{isSimulationRunning && dqnPathInfo?.resource && (
|
| 553 |
+
<div className="absolute top-20 right-6 z-20 animate-in slide-in-from-top-4 fade-in duration-300 pointer-events-none">
|
| 554 |
+
<div className="bg-white/95 backdrop-blur-md px-4 py-3 rounded-xl shadow-lg border border-red-100 ring-1 ring-red-50 w-64 pointer-events-auto">
|
| 555 |
+
<div className="flex items-start justify-between gap-3">
|
| 556 |
+
<div>
|
| 557 |
+
<span className="text-[10px] font-bold tracking-wider text-red-500 uppercase">Recommendation</span>
|
| 558 |
+
<h3 className="text-sm font-semibold text-gray-900 mt-0.5 line-clamp-1">{dqnPathInfo.resource.title}</h3>
|
| 559 |
+
</div>
|
| 560 |
+
<div className="flex flex-col items-end">
|
| 561 |
+
<span className="text-lg font-bold text-red-600 leading-none">+{dqnPathInfo.reward}</span>
|
| 562 |
+
</div>
|
| 563 |
+
</div>
|
| 564 |
+
</div>
|
| 565 |
+
</div>
|
| 566 |
+
)}
|
| 567 |
+
|
| 568 |
+
<div className="flex-1 w-full relative overflow-auto bg-gray-50/30" ref={gridRef}>
|
| 569 |
+
<div className="min-w-full min-h-full flex items-center justify-center p-8">
|
| 570 |
+
<div
|
| 571 |
+
className="grid gap-0 bg-[#F8FAFC] rounded-3xl shadow-2xl p-4 shrink-0 relative overflow-hidden"
|
| 572 |
+
style={{
|
| 573 |
+
width: `${1000 * zoomLevel}px`,
|
| 574 |
+
height: `${1000 * zoomLevel}px`,
|
| 575 |
+
gridTemplateColumns: `repeat(${GRID_SIZE}, 1fr)`,
|
| 576 |
+
gridTemplateRows: `repeat(${GRID_SIZE}, 1fr)`,
|
| 577 |
+
backgroundImage: `radial-gradient(#E2E8F0 1.5px, transparent 1.5px)`,
|
| 578 |
+
backgroundSize: `${24 * zoomLevel}px ${24 * zoomLevel}px`,
|
| 579 |
+
transition: 'width 0.3s ease-out, height 0.3s ease-out'
|
| 580 |
+
}}
|
| 581 |
+
>
|
| 582 |
+
{/* Map Terrain: color-coded 0°-90° quarter-circle arcs with hover tooltips */}
|
| 583 |
+
<div className="absolute inset-0 pointer-events-none z-0">
|
| 584 |
+
<svg className="absolute inset-0 w-full h-full" viewBox="0 0 1000 1000" preserveAspectRatio="none">
|
| 585 |
+
{/* Color-coded concentric arcs — each arc belongs to a tier */}
|
| 586 |
+
{(() => {
|
| 587 |
+
// Tier boundaries based on radii 3,7,11,15 grid units × 50px/unit
|
| 588 |
+
// Boundaries (midpoints between radii): 0-250, 250-450, 450-650, 650-900
|
| 589 |
+
const tierDefs = [
|
| 590 |
+
{ name: 'Fundamentals', color: '#A855F7', from: 0, to: 250 },
|
| 591 |
+
{ name: 'Intermediate', color: '#3B82F6', from: 250, to: 450 },
|
| 592 |
+
{ name: 'Advance', color: '#F59E0B', from: 450, to: 650 },
|
| 593 |
+
{ name: 'Mastery', color: '#EF4444', from: 650, to: 900 },
|
| 594 |
+
];
|
| 595 |
+
const allArcs = [100,200,300,400,500,600,700,800,900];
|
| 596 |
+
return allArcs.map((r) => {
|
| 597 |
+
const tier = tierDefs.find(t => r > t.from && r <= t.to) || tierDefs[tierDefs.length - 1];
|
| 598 |
+
return (
|
| 599 |
+
<path key={`arc-${r}`}
|
| 600 |
+
d={`M ${r} 1000 A ${r} ${r} 0 0 0 0 ${1000 - r}`}
|
| 601 |
+
fill="none"
|
| 602 |
+
stroke={tier.color}
|
| 603 |
+
strokeWidth="1.5"
|
| 604 |
+
strokeDasharray="5 7"
|
| 605 |
+
opacity="0.5"
|
| 606 |
+
style={{ pointerEvents: 'stroke', cursor: 'default' }}
|
| 607 |
+
>
|
| 608 |
+
<title>{tier.name}</title>
|
| 609 |
+
</path>
|
| 610 |
+
);
|
| 611 |
+
});
|
| 612 |
+
})()}
|
| 613 |
+
{/* Radial spokes every 10° */}
|
| 614 |
+
{[10,20,30,40,50,60,70,80].map((deg) => {
|
| 615 |
+
const rad = (deg * Math.PI) / 180;
|
| 616 |
+
return <line key={`sp-${deg}`}
|
| 617 |
+
x1={0} y1={1000}
|
| 618 |
+
x2={950 * Math.cos(rad)}
|
| 619 |
+
y2={1000 - 950 * Math.sin(rad)}
|
| 620 |
+
stroke="#94A3B8" strokeWidth="0.8"
|
| 621 |
+
strokeDasharray="4 7" opacity="0.35"
|
| 622 |
+
/>;
|
| 623 |
+
})}
|
| 624 |
+
{/* X-axis (bottom) and Y-axis (left) */}
|
| 625 |
+
<line x1="0" y1="999" x2="980" y2="999" stroke="#94A3B8" strokeWidth="1.5"/>
|
| 626 |
+
<line x1="1" y1="1000" x2="1" y2="20" stroke="#94A3B8" strokeWidth="1.5"/>
|
| 627 |
+
{/* Tier boundary tick marks */}
|
| 628 |
+
{[250,450,650].map(x => (
|
| 629 |
+
<line key={`tick-${x}`} x1={x} y1={992} x2={x} y2={1000} stroke="#94A3B8" strokeWidth="1.5"/>
|
| 630 |
+
))}
|
| 631 |
+
</svg>
|
| 632 |
+
{/* Bottom tier labels aligned to arc radii */}
|
| 633 |
+
<div className="absolute bottom-1 left-0 w-full pointer-events-none">
|
| 634 |
+
{[
|
| 635 |
+
{ label: 'FUNDAMENTALS', pct: '12.5%', color: 'text-purple-400' },
|
| 636 |
+
{ label: 'INTERMEDIATE', pct: '35%', color: 'text-blue-400' },
|
| 637 |
+
{ label: 'ADVANCE', pct: '55%', color: 'text-amber-400' },
|
| 638 |
+
{ label: 'MASTERY', pct: '77.5%', color: 'text-red-400' },
|
| 639 |
+
].map((t) => (
|
| 640 |
+
<span key={t.label}
|
| 641 |
+
className={`absolute text-[9px] font-black uppercase tracking-[0.15em] -translate-x-1/2 ${t.color}`}
|
| 642 |
+
style={{ left: t.pct, bottom: 0 }}>
|
| 643 |
+
{t.label}
|
| 644 |
+
</span>
|
| 645 |
+
))}
|
| 646 |
+
</div>
|
| 647 |
+
</div>
|
| 648 |
+
|
| 649 |
+
|
| 650 |
+
{renderGrid()}
|
| 651 |
+
{renderPlaybackOverlay()}
|
| 652 |
+
|
| 653 |
+
{/* Animated Student Agent Marker */}
|
| 654 |
+
<motion.div
|
| 655 |
+
key={`agent-at-${agent.position.x}-${agent.position.y}`}
|
| 656 |
+
className="absolute pointer-events-none z-40 flex items-center justify-center"
|
| 657 |
+
initial={false}
|
| 658 |
+
transition={{
|
| 659 |
+
left: { type: "spring", damping: 25, stiffness: 120 },
|
| 660 |
+
top: { type: "spring", damping: 25, stiffness: 120 },
|
| 661 |
+
scale: { duration: 0.4 },
|
| 662 |
+
rotate: { duration: 0.4 }
|
| 663 |
+
}}
|
| 664 |
+
animate={{
|
| 665 |
+
left: `${(agent.position.x + 0.5) * (100 / GRID_SIZE)}%`,
|
| 666 |
+
top: `${(agent.position.y + 0.5) * (100 / GRID_SIZE)}%`,
|
| 667 |
+
scale: isPlaying ? [1, 1.15, 1] : 1, // Subtle bounce effect during playback
|
| 668 |
+
rotateY: 0 // Force orientation reset to clear potential mirrored state
|
| 669 |
+
}}
|
| 670 |
+
style={{
|
| 671 |
+
width: `${100 / GRID_SIZE}%`,
|
| 672 |
+
height: `${100 / GRID_SIZE}%`,
|
| 673 |
+
x: '-50%',
|
| 674 |
+
y: '-50%',
|
| 675 |
+
}}
|
| 676 |
+
>
|
| 677 |
+
<SocialAgent isMoving={isPlaying} />
|
| 678 |
+
</motion.div>
|
| 679 |
+
|
| 680 |
+
{showTravelArrow && (
|
| 681 |
+
<svg className="absolute inset-0 w-full h-full pointer-events-none z-30">
|
| 682 |
+
<defs>
|
| 683 |
+
<linearGradient id="travelGradient" x1="0%" y1="0%" x2="100%" y2="100%">
|
| 684 |
+
<stop offset="0%" stopColor="#ef4444" stopOpacity="0.8" />
|
| 685 |
+
<stop offset="100%" stopColor="#ec4899" stopOpacity="0.8" />
|
| 686 |
+
</linearGradient>
|
| 687 |
+
<marker id="travel-arrowhead" markerWidth="12" markerHeight="8" refX="10" refY="4" orient="auto">
|
| 688 |
+
<path d="M0,0 L12,4 L0,8 Z" fill="#ef4444" />
|
| 689 |
+
</marker>
|
| 690 |
+
</defs>
|
| 691 |
+
<line
|
| 692 |
+
x1={`${(prevPosition.x + 0.5) * (100 / GRID_SIZE)}%`}
|
| 693 |
+
y1={`${(prevPosition.y + 0.5) * (100 / GRID_SIZE)}%`}
|
| 694 |
+
x2={`${(agent.position.x + 0.5) * (100 / GRID_SIZE)}%`}
|
| 695 |
+
y2={`${(agent.position.y + 0.5) * (100 / GRID_SIZE)}%`}
|
| 696 |
+
stroke="url(#travelGradient)"
|
| 697 |
+
strokeWidth="4"
|
| 698 |
+
strokeDasharray="12 6"
|
| 699 |
+
className="animate-pulse"
|
| 700 |
+
markerEnd="url(#travel-arrowhead)"
|
| 701 |
+
opacity="0.8"
|
| 702 |
+
>
|
| 703 |
+
<animate attributeName="stroke-dashoffset" from="100" to="0" dur="2s" repeatCount="indefinite" />
|
| 704 |
+
</line>
|
| 705 |
+
</svg>
|
| 706 |
+
)}
|
| 707 |
+
</div>
|
| 708 |
+
</div>
|
| 709 |
+
</div>
|
| 710 |
+
|
| 711 |
+
{selectedResource && (
|
| 712 |
+
<div className="flex-none w-full bg-white/95 backdrop-blur-md p-4 border-t border-blue-100 z-10 animate-in slide-in-from-bottom-4 fade-in duration-300">
|
| 713 |
+
<div className="max-w-4xl mx-auto flex items-center gap-4">
|
| 714 |
+
<div className={`p-3 rounded-lg ${selectedResource.visited ? 'bg-green-100 text-green-600' : 'bg-blue-100 text-blue-600'}`}>
|
| 715 |
+
<ResourceIcon type={selectedResource.type} />
|
| 716 |
+
</div>
|
| 717 |
+
<div className="flex-1">
|
| 718 |
+
<h3 className="font-semibold text-gray-900">{selectedResource.title}</h3>
|
| 719 |
+
<div className="flex items-center gap-3 mt-1 text-sm text-gray-500">
|
| 720 |
+
<span className="px-2 py-0.5 bg-gray-100 rounded text-xs font-medium uppercase tracking-wide">{selectedResource.type}</span>
|
| 721 |
+
</div>
|
| 722 |
+
</div>
|
| 723 |
+
<div className="flex flex-col items-end border-l border-gray-100 pl-4">
|
| 724 |
+
<span className="text-xs text-gray-400 uppercase font-semibold">Reward</span>
|
| 725 |
+
<span className="text-xl font-bold text-gray-900">+{selectedResource.reward}</span>
|
| 726 |
+
</div>
|
| 727 |
+
</div>
|
| 728 |
+
</div>
|
| 729 |
+
)}
|
| 730 |
+
|
| 731 |
+
{isSearching && (
|
| 732 |
+
<div className="fixed inset-0 bg-gray-950/40 backdrop-blur-md flex items-center justify-center z-[250] p-4 animate-in fade-in duration-300">
|
| 733 |
+
<div className="bg-white/95 backdrop-blur-2xl rounded-[2rem] shadow-2xl w-full max-w-lg overflow-hidden border border-white/20 animate-in zoom-in-95 duration-300 flex flex-col max-h-[80vh]">
|
| 734 |
+
<div className="px-6 py-5 border-b border-gray-100 flex items-center justify-between bg-white/50">
|
| 735 |
+
<div className="flex items-center gap-3">
|
| 736 |
+
<div className="w-10 h-10 rounded-xl bg-blue-600 flex items-center justify-center text-white shadow-lg shadow-blue-500/20">
|
| 737 |
+
<Search className="w-5 h-5" />
|
| 738 |
+
</div>
|
| 739 |
+
<div>
|
| 740 |
+
<h3 className="text-lg font-bold text-gray-900">Resource Navigator</h3>
|
| 741 |
+
</div>
|
| 742 |
+
</div>
|
| 743 |
+
<button
|
| 744 |
+
onClick={() => setIsSearching(false)}
|
| 745 |
+
className="p-2 rounded-full hover:bg-gray-100 text-gray-400 hover:text-gray-600 transition-colors"
|
| 746 |
+
>
|
| 747 |
+
<X className="w-5 h-5" />
|
| 748 |
+
</button>
|
| 749 |
+
</div>
|
| 750 |
+
|
| 751 |
+
<div className="p-4 border-b border-gray-100 bg-gray-50/50">
|
| 752 |
+
<div className="relative">
|
| 753 |
+
<Search className="absolute left-4 top-1/2 -translate-y-1/2 w-4 h-4 text-gray-400" />
|
| 754 |
+
<input
|
| 755 |
+
autoFocus
|
| 756 |
+
type="text"
|
| 757 |
+
placeholder="Filter resources..."
|
| 758 |
+
value={searchQuery}
|
| 759 |
+
onChange={(e) => setSearchQuery(e.target.value)}
|
| 760 |
+
className="w-full pl-11 pr-4 py-3 bg-white border border-gray-200 rounded-2xl outline-none focus:ring-2 focus:ring-blue-500/20 focus:border-blue-500 transition-all font-medium text-gray-800"
|
| 761 |
+
/>
|
| 762 |
+
</div>
|
| 763 |
+
</div>
|
| 764 |
+
|
| 765 |
+
<div className="flex-1 overflow-y-auto p-4 space-y-2">
|
| 766 |
+
{resources
|
| 767 |
+
.filter(r => r.title.toLowerCase().includes(searchQuery.toLowerCase()))
|
| 768 |
+
.map(resource => (
|
| 769 |
+
<button
|
| 770 |
+
key={resource.id}
|
| 771 |
+
onClick={() => {
|
| 772 |
+
onAgentMove(resource.position);
|
| 773 |
+
onResourceClick(resource);
|
| 774 |
+
setIsSearching(false);
|
| 775 |
+
setSearchQuery('');
|
| 776 |
+
}}
|
| 777 |
+
className="w-full flex items-center gap-4 p-3 rounded-2xl hover:bg-blue-50/50 border border-transparent hover:border-blue-100 transition-all group text-left"
|
| 778 |
+
>
|
| 779 |
+
<div className={`p-2.5 rounded-xl ${resource.visited ? 'bg-green-100 text-green-600' : 'bg-blue-100 text-blue-600'} group-hover:scale-110 transition-transform`}>
|
| 780 |
+
<ResourceIcon type={resource.type} />
|
| 781 |
+
</div>
|
| 782 |
+
<div className="flex-1 min-w-0">
|
| 783 |
+
<h4 className="font-semibold text-gray-900 text-sm truncate">{resource.title}</h4>
|
| 784 |
+
</div>
|
| 785 |
+
<ChevronRight className="w-4 h-4 text-gray-300 group-hover:text-blue-500 group-hover:translate-x-1 transition-all" />
|
| 786 |
+
</button>
|
| 787 |
+
))}
|
| 788 |
+
</div>
|
| 789 |
+
</div>
|
| 790 |
+
</div>
|
| 791 |
+
)}
|
| 792 |
+
|
| 793 |
+
{videoResource && videoResource.youtube_url && (
|
| 794 |
+
<div
|
| 795 |
+
className="fixed inset-0 bg-gray-950/80 backdrop-blur-3xl flex items-center justify-center z-[200] p-4 sm:p-8"
|
| 796 |
+
onClick={() => { setVideoResource(null); setChatMessages([]); setChatInput(''); }}
|
| 797 |
+
>
|
| 798 |
+
<div
|
| 799 |
+
className="bg-[#0f111a]/95 backdrop-blur-md rounded-[2.5rem] shadow-[0_0_80px_-15px_rgba(79,70,229,0.25)] w-full max-w-7xl h-[90vh] overflow-hidden flex flex-col border border-white/10 animate-in zoom-in-90 fade-in duration-500 ease-out relative z-10"
|
| 800 |
+
onClick={(e) => e.stopPropagation()}
|
| 801 |
+
>
|
| 802 |
+
<div className="bg-[#131620]/80 backdrop-blur-3xl px-8 py-5 flex items-center justify-between border-b border-white/5 flex-shrink-0 relative z-20">
|
| 803 |
+
<div className="flex items-center gap-5 text-white min-w-0">
|
| 804 |
+
<div className="w-12 h-12 rounded-2xl bg-gradient-to-br from-red-500 via-rose-500 to-pink-600 flex items-center justify-center flex-shrink-0 shadow-lg shadow-red-500/30 border border-white/10">
|
| 805 |
+
<Play size={20} className="text-white fill-current ml-0.5" />
|
| 806 |
+
</div>
|
| 807 |
+
<div className="min-w-0">
|
| 808 |
+
<h3 className="font-bold text-lg truncate text-white tracking-tight drop-shadow-sm">{videoResource.title}</h3>
|
| 809 |
+
</div>
|
| 810 |
+
</div>
|
| 811 |
+
<button
|
| 812 |
+
onClick={() => { setVideoResource(null); setChatMessages([]); setChatInput(''); }}
|
| 813 |
+
className="w-12 h-12 rounded-full bg-white/5 hover:bg-red-500/20 hover:border-red-500/30 text-gray-400 hover:text-red-400 transition-all flex items-center justify-center border border-white/5 group"
|
| 814 |
+
>
|
| 815 |
+
<X size={22} className="group-hover:rotate-90 transition-transform duration-300" />
|
| 816 |
+
</button>
|
| 817 |
+
</div>
|
| 818 |
+
|
| 819 |
+
<div className="flex flex-1 min-h-0 bg-[#090b10]">
|
| 820 |
+
<div className="flex-[65] relative bg-black overflow-hidden z-10">
|
| 821 |
+
<iframe
|
| 822 |
+
className="w-full h-full"
|
| 823 |
+
src={getYouTubeEmbedUrl(videoResource.youtube_url) || ''}
|
| 824 |
+
title={videoResource.title}
|
| 825 |
+
frameBorder="0"
|
| 826 |
+
allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share"
|
| 827 |
+
allowFullScreen
|
| 828 |
+
/>
|
| 829 |
+
</div>
|
| 830 |
+
|
| 831 |
+
<div className="flex-[35] flex flex-col bg-[#0f111a] border-l border-white/5 relative overflow-hidden z-20">
|
| 832 |
+
<div className="bg-gradient-to-r from-indigo-500/10 to-purple-500/10 backdrop-blur-2xl px-6 py-5 flex items-center gap-4 border-b border-white/5 flex-shrink-0 relative z-30">
|
| 833 |
+
<div className="w-10 h-10 rounded-[14px] bg-gradient-to-tr from-indigo-500 via-purple-500 to-fuchsia-500 flex items-center justify-center shadow-lg border border-white/10 ring-2 ring-white/5">
|
| 834 |
+
<Sparkles size={18} className="text-white" />
|
| 835 |
+
</div>
|
| 836 |
+
<div>
|
| 837 |
+
<h4 className="font-extrabold text-white text-[15px] tracking-tight">Learning Assistant</h4>
|
| 838 |
+
</div>
|
| 839 |
+
</div>
|
| 840 |
+
|
| 841 |
+
<div className="flex-1 overflow-y-auto p-6 space-y-5 relative z-20">
|
| 842 |
+
{chatMessages.map((msg, i) => (
|
| 843 |
+
<div key={i} className={`flex ${msg.role === 'user' ? 'justify-end' : 'justify-start'}`}>
|
| 844 |
+
<div className={`max-w-[85%] px-5 py-3.5 rounded-2xl text-[15px] leading-relaxed relative ${msg.role === 'user'
|
| 845 |
+
? 'bg-indigo-600 text-white rounded-br-sm'
|
| 846 |
+
: 'bg-white/5 border border-white/10 text-gray-200 rounded-bl-sm'
|
| 847 |
+
}`}>
|
| 848 |
+
{msg.content}
|
| 849 |
+
</div>
|
| 850 |
+
</div>
|
| 851 |
+
))}
|
| 852 |
+
{chatLoading && <div className="text-gray-500 text-xs animate-pulse">Thinking...</div>}
|
| 853 |
+
</div>
|
| 854 |
+
|
| 855 |
+
<form onSubmit={handleChatSubmit} className="p-4 border-t border-white/5 relative z-30">
|
| 856 |
+
<div className="relative">
|
| 857 |
+
<input
|
| 858 |
+
type="text"
|
| 859 |
+
value={chatInput}
|
| 860 |
+
onChange={(e) => setChatInput(e.target.value)}
|
| 861 |
+
placeholder="Ask Sider AI..."
|
| 862 |
+
className="w-full bg-white/5 border border-white/10 rounded-xl px-4 py-3 text-white text-sm outline-none focus:border-indigo-500/50 transition-all"
|
| 863 |
+
/>
|
| 864 |
+
<button type="submit" className="absolute right-2 top-1/2 -translate-y-1/2 p-1.5 text-indigo-400 hover:text-indigo-300">
|
| 865 |
+
<ChevronRight size={18} />
|
| 866 |
+
</button>
|
| 867 |
+
</div>
|
| 868 |
+
</form>
|
| 869 |
+
</div>
|
| 870 |
+
</div>
|
| 871 |
+
</div>
|
| 872 |
+
</div>
|
| 873 |
+
)}
|
| 874 |
+
</div>
|
| 875 |
+
);
|
| 876 |
+
};
|
src/components/Layout/AuthLayout.tsx
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import React from 'react';
|
| 2 |
+
|
| 3 |
+
interface AuthLayoutProps {
|
| 4 |
+
children: React.ReactNode;
|
| 5 |
+
illustration?: React.ReactNode;
|
| 6 |
+
}
|
| 7 |
+
|
| 8 |
+
export const AuthLayout: React.FC<AuthLayoutProps> = ({ children, illustration }) => {
|
| 9 |
+
return (
|
| 10 |
+
<div className="min-h-screen w-full flex items-center justify-center bg-[#F8FAFC] overflow-hidden relative font-['Poppins']">
|
| 11 |
+
{/* Dynamic Background Gradients - Lighter & Softer */}
|
| 12 |
+
<div className="absolute top-[-10%] left-[-10%] w-[50%] h-[50%] bg-brand/10 rounded-full blur-[120px] animate-pulse" />
|
| 13 |
+
<div className="absolute bottom-[-10%] right-[-10%] w-[50%] h-[50%] bg-brand-light/10 rounded-full blur-[120px] animate-pulse" />
|
| 14 |
+
<div className="absolute top-[20%] right-[10%] w-[30%] h-[30%] bg-[#E0E7FF]/20 rounded-full blur-[100px]" />
|
| 15 |
+
|
| 16 |
+
<div className="container mx-auto px-4 z-10 flex items-center justify-center">
|
| 17 |
+
<div className="flex w-full max-w-5xl bg-white/40 backdrop-blur-md rounded-[40px] border border-white/60 overflow-hidden shadow-[0_20px_50px_rgba(0,0,0,0.05)]">
|
| 18 |
+
{/* Illustration Side (Desktop) */}
|
| 19 |
+
{illustration && (
|
| 20 |
+
<div className="hidden lg:flex lg:w-1/2 p-12 items-center justify-center bg-gradient-to-br from-brand/5 to-brand-dark/5 border-r border-white/40">
|
| 21 |
+
{illustration}
|
| 22 |
+
</div>
|
| 23 |
+
)}
|
| 24 |
+
|
| 25 |
+
{/* Form Side */}
|
| 26 |
+
<div className={`w-full ${illustration ? 'lg:w-1/2' : ''} p-8 lg:p-12`}>
|
| 27 |
+
{children}
|
| 28 |
+
</div>
|
| 29 |
+
</div>
|
| 30 |
+
</div>
|
| 31 |
+
</div>
|
| 32 |
+
);
|
| 33 |
+
};
|
src/components/Layout/DashboardLayout.tsx
ADDED
|
@@ -0,0 +1,483 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import React, { useState } from 'react';
|
| 2 |
+
import { motion, AnimatePresence } from 'framer-motion';
|
| 3 |
+
import {
|
| 4 |
+
LayoutDashboard,
|
| 5 |
+
BookOpen,
|
| 6 |
+
Bookmark,
|
| 7 |
+
Settings,
|
| 8 |
+
LogOut,
|
| 9 |
+
Search,
|
| 10 |
+
Bell,
|
| 11 |
+
Menu,
|
| 12 |
+
X,
|
| 13 |
+
ChevronRight,
|
| 14 |
+
TrendingUp,
|
| 15 |
+
User,
|
| 16 |
+
Activity,
|
| 17 |
+
Award
|
| 18 |
+
} from 'lucide-react';
|
| 19 |
+
import { Link, useLocation, useNavigate } from 'react-router-dom';
|
| 20 |
+
import { useAppContext } from '../../context/AppContext';
|
| 21 |
+
|
| 22 |
+
interface DashboardLayoutProps {
|
| 23 |
+
children: React.ReactNode;
|
| 24 |
+
}
|
| 25 |
+
|
| 26 |
+
const sidebarItems = [
|
| 27 |
+
{ icon: LayoutDashboard, label: 'Dashboard', path: '/dashboard' },
|
| 28 |
+
{ icon: BookOpen, label: 'Navigator', path: '/navigator' },
|
| 29 |
+
{ icon: Bookmark, label: 'Resources', path: '/resources' },
|
| 30 |
+
{ icon: Settings, label: 'Settings', path: '#' },
|
| 31 |
+
];
|
| 32 |
+
|
| 33 |
+
export const DashboardLayout: React.FC<DashboardLayoutProps> = ({ children }) => {
|
| 34 |
+
const [isSidebarOpen, setIsSidebarOpen] = useState(true);
|
| 35 |
+
const [isMobileMenuOpen, setIsMobileMenuOpen] = useState(false);
|
| 36 |
+
const location = useLocation();
|
| 37 |
+
const navigate = useNavigate();
|
| 38 |
+
const [isSettingsOpen, setIsSettingsOpen] = useState(false);
|
| 39 |
+
const [isProfileOpen, setIsProfileOpen] = useState(false);
|
| 40 |
+
const [isNotificationsOpen, setIsNotificationsOpen] = useState(false);
|
| 41 |
+
const { agent, levelUpMessage, setLevelUpMessage, notifications, markNotificationsAsRead } = useAppContext();
|
| 42 |
+
|
| 43 |
+
const progressPercent = agent.totalReward % 100;
|
| 44 |
+
const unreadCount = notifications.filter(n => !n.read).length;
|
| 45 |
+
|
| 46 |
+
const handleSignOut = () => {
|
| 47 |
+
// Basic sign out
|
| 48 |
+
navigate('/login');
|
| 49 |
+
};
|
| 50 |
+
|
| 51 |
+
return (
|
| 52 |
+
<div className="min-h-screen bg-[#F0F4F8] text-slate-800 font-['Poppins'] overflow-hidden flex transition-colors duration-500">
|
| 53 |
+
{/* Background Blobs - Subtle & Airy */}
|
| 54 |
+
<div className="fixed top-[-10%] left-[-10%] w-[40%] h-[40%] bg-brand/5 rounded-full blur-[120px] pointer-events-none" />
|
| 55 |
+
<div className="fixed bottom-[-10%] right-[-10%] w-[40%] h-[40%] bg-brand-light/5 rounded-full blur-[120px] pointer-events-none" />
|
| 56 |
+
|
| 57 |
+
{/* Sidebar - Desktop */}
|
| 58 |
+
<motion.aside
|
| 59 |
+
initial={false}
|
| 60 |
+
animate={{ width: isSidebarOpen ? 280 : 88 }}
|
| 61 |
+
className="hidden lg:flex flex-col bg-white/60 backdrop-blur-xl border-r border-slate-200 relative z-30 h-screen transition-all duration-300 shadow-xl shadow-slate-200/50"
|
| 62 |
+
>
|
| 63 |
+
<div className="p-6 flex items-center gap-3">
|
| 64 |
+
<AnimatePresence>
|
| 65 |
+
{isSidebarOpen && (
|
| 66 |
+
<motion.span
|
| 67 |
+
initial={{ opacity: 0, x: -10 }}
|
| 68 |
+
animate={{ opacity: 1, x: 0 }}
|
| 69 |
+
exit={{ opacity: 0, x: -10 }}
|
| 70 |
+
className="text-xl font-extrabold whitespace-nowrap bg-clip-text text-transparent bg-gradient-to-r from-brand to-brand-dark uppercase tracking-tighter"
|
| 71 |
+
>
|
| 72 |
+
Navigated Learning
|
| 73 |
+
</motion.span>
|
| 74 |
+
)}
|
| 75 |
+
</AnimatePresence>
|
| 76 |
+
</div>
|
| 77 |
+
|
| 78 |
+
<nav className="flex-1 px-4 py-10 space-y-3">
|
| 79 |
+
{sidebarItems.map((item) => {
|
| 80 |
+
const isActive = location.pathname === item.path;
|
| 81 |
+
return (
|
| 82 |
+
<Link
|
| 83 |
+
key={item.path}
|
| 84 |
+
to={item.path}
|
| 85 |
+
onClick={(e) => {
|
| 86 |
+
if (item.label === 'Settings') {
|
| 87 |
+
e.preventDefault();
|
| 88 |
+
setIsSettingsOpen(true);
|
| 89 |
+
}
|
| 90 |
+
}}
|
| 91 |
+
className={`
|
| 92 |
+
flex items-center gap-4 px-4 py-3.5 rounded-2xl transition-all duration-300 group relative
|
| 93 |
+
${isActive ? 'bg-white text-brand shadow-lg shadow-brand/10' : 'text-slate-500 hover:bg-white/40 hover:text-brand'}
|
| 94 |
+
`}
|
| 95 |
+
>
|
| 96 |
+
{isActive && (
|
| 97 |
+
<motion.div layoutId="activePill" className="absolute left-0 w-1.5 h-6 bg-brand rounded-full" />
|
| 98 |
+
)}
|
| 99 |
+
<item.icon size={22} className={`${isActive ? 'text-brand' : 'group-hover:scale-110 transition-transform'}`} />
|
| 100 |
+
{isSidebarOpen && <span className="font-bold">{item.label}</span>}
|
| 101 |
+
{isActive && isSidebarOpen && (
|
| 102 |
+
<motion.div layoutId="activeTab" className="ml-auto">
|
| 103 |
+
<ChevronRight size={16} />
|
| 104 |
+
</motion.div>
|
| 105 |
+
)}
|
| 106 |
+
</Link>
|
| 107 |
+
);
|
| 108 |
+
})}
|
| 109 |
+
</nav>
|
| 110 |
+
|
| 111 |
+
<div className="p-6 border-t border-slate-100">
|
| 112 |
+
<button
|
| 113 |
+
onClick={handleSignOut}
|
| 114 |
+
className="flex items-center gap-4 px-4 py-3 w-full rounded-2xl text-slate-400 hover:bg-red-50 hover:text-red-500 transition-all duration-300 font-bold">
|
| 115 |
+
<LogOut size={22} />
|
| 116 |
+
{isSidebarOpen && <span>Sign Out</span>}
|
| 117 |
+
</button>
|
| 118 |
+
</div>
|
| 119 |
+
|
| 120 |
+
{/* Toggle Button */}
|
| 121 |
+
<button
|
| 122 |
+
onClick={() => setIsSidebarOpen(!isSidebarOpen)}
|
| 123 |
+
className="absolute -right-3 top-20 w-7 h-7 bg-white rounded-full flex items-center justify-center border border-slate-200 shadow-xl hover:scale-110 hover:border-brand transition-all group"
|
| 124 |
+
>
|
| 125 |
+
<ChevronRight size={14} className={`text-slate-400 group-hover:text-brand transition-transform duration-300 ${isSidebarOpen ? 'rotate-180' : ''}`} />
|
| 126 |
+
</button>
|
| 127 |
+
</motion.aside>
|
| 128 |
+
|
| 129 |
+
{/* Main Content Area */}
|
| 130 |
+
<div className="flex-1 flex flex-col h-screen overflow-hidden">
|
| 131 |
+
{/* Navbar */}
|
| 132 |
+
<header className="h-20 bg-white/40 backdrop-blur-md border-b border-white/80 flex items-center justify-between px-6 lg:px-12 shrink-0 z-20">
|
| 133 |
+
<div className="flex items-center gap-4 flex-1">
|
| 134 |
+
<button
|
| 135 |
+
className="lg:hidden p-2 text-slate-500 hover:text-brand transition-colors"
|
| 136 |
+
onClick={() => setIsMobileMenuOpen(true)}
|
| 137 |
+
>
|
| 138 |
+
<Menu size={24} />
|
| 139 |
+
</button>
|
| 140 |
+
|
| 141 |
+
<div className="relative w-full max-w-md hidden sm:block">
|
| 142 |
+
<Search className="absolute left-4 top-1/2 -translate-y-1/2 text-slate-400" size={18} />
|
| 143 |
+
<input
|
| 144 |
+
type="text"
|
| 145 |
+
placeholder="Find a module or project..."
|
| 146 |
+
className="w-full bg-white/80 border border-slate-200 rounded-2xl py-3 pl-12 pr-4 outline-none focus:border-brand focus:ring-4 focus:ring-brand/10 transition-all text-sm shadow-sm"
|
| 147 |
+
/>
|
| 148 |
+
</div>
|
| 149 |
+
</div>
|
| 150 |
+
|
| 151 |
+
<div className="flex items-center gap-4 lg:gap-8">
|
| 152 |
+
<button
|
| 153 |
+
onClick={() => {
|
| 154 |
+
setIsNotificationsOpen(!isNotificationsOpen);
|
| 155 |
+
if (!isNotificationsOpen) markNotificationsAsRead();
|
| 156 |
+
}}
|
| 157 |
+
className="relative p-2.5 bg-white border border-slate-100 rounded-xl text-slate-500 hover:text-brand hover:border-brand/20 transition-all group shadow-sm"
|
| 158 |
+
>
|
| 159 |
+
<Bell size={20} />
|
| 160 |
+
{unreadCount > 0 && (
|
| 161 |
+
<span className="absolute top-2 right-2 w-2.5 h-2.5 bg-brand rounded-full ring-4 ring-white" />
|
| 162 |
+
)}
|
| 163 |
+
</button>
|
| 164 |
+
|
| 165 |
+
<div className="h-8 w-px bg-slate-200 hidden sm:block" />
|
| 166 |
+
|
| 167 |
+
<div className="flex items-center gap-4 pl-2 group">
|
| 168 |
+
<div className="text-right hidden sm:block">
|
| 169 |
+
<p className="text-sm font-extrabold text-slate-900 group-hover:text-brand transition-colors">Learner</p>
|
| 170 |
+
<div className="flex items-center gap-2 justify-end mt-0.5">
|
| 171 |
+
<div className="w-16 h-1 bg-slate-100 rounded-full overflow-hidden">
|
| 172 |
+
<motion.div
|
| 173 |
+
initial={{ width: 0 }}
|
| 174 |
+
animate={{ width: `${progressPercent}%` }}
|
| 175 |
+
className="h-full bg-brand"
|
| 176 |
+
/>
|
| 177 |
+
</div>
|
| 178 |
+
<p className="text-[9px] text-slate-400 font-bold uppercase tracking-widest">{progressPercent}% SYNC</p>
|
| 179 |
+
</div>
|
| 180 |
+
</div>
|
| 181 |
+
<motion.div
|
| 182 |
+
whileHover={{ scale: 1.05 }}
|
| 183 |
+
onClick={() => setIsProfileOpen(true)}
|
| 184 |
+
className="w-11 h-11 rounded-2xl bg-gradient-to-tr from-brand to-brand-light p-0.5 shadow-lg shadow-brand/20 cursor-pointer"
|
| 185 |
+
>
|
| 186 |
+
<div className="w-full h-full rounded-[14px] bg-white flex items-center justify-center overflow-hidden">
|
| 187 |
+
<img src="https://api.dicebear.com/7.x/avataaars/svg?seed=Felix" alt="Avatar" className="w-full h-full object-cover" />
|
| 188 |
+
</div>
|
| 189 |
+
</motion.div>
|
| 190 |
+
</div>
|
| 191 |
+
</div>
|
| 192 |
+
</header>
|
| 193 |
+
|
| 194 |
+
{/* Scrollable Content */}
|
| 195 |
+
<main className="flex-1 overflow-y-auto custom-scrollbar p-6 lg:p-12 relative">
|
| 196 |
+
<motion.div
|
| 197 |
+
initial={{ opacity: 0, y: 20 }}
|
| 198 |
+
animate={{ opacity: 1, y: 0 }}
|
| 199 |
+
transition={{ duration: 0.5 }}
|
| 200 |
+
>
|
| 201 |
+
{children}
|
| 202 |
+
</motion.div>
|
| 203 |
+
</main>
|
| 204 |
+
</div>
|
| 205 |
+
|
| 206 |
+
{/* Mobile Menu Overlay */}
|
| 207 |
+
<AnimatePresence>
|
| 208 |
+
{isMobileMenuOpen && (
|
| 209 |
+
<>
|
| 210 |
+
<motion.div
|
| 211 |
+
initial={{ opacity: 0 }}
|
| 212 |
+
animate={{ opacity: 1 }}
|
| 213 |
+
exit={{ opacity: 0 }}
|
| 214 |
+
onClick={() => setIsMobileMenuOpen(false)}
|
| 215 |
+
className="fixed inset-0 bg-black/60 backdrop-blur-sm z-40 lg:hidden"
|
| 216 |
+
/>
|
| 217 |
+
<motion.aside
|
| 218 |
+
initial={{ x: '-100%' }}
|
| 219 |
+
animate={{ x: 0 }}
|
| 220 |
+
exit={{ x: '-100%' }}
|
| 221 |
+
className="fixed inset-y-0 left-0 w-72 bg-white border-r border-slate-200 z-50 lg:hidden p-6 shadow-2xl"
|
| 222 |
+
>
|
| 223 |
+
<div className="flex items-center justify-between mb-10">
|
| 224 |
+
<div className="flex items-center gap-3">
|
| 225 |
+
<span className="text-lg font-black text-brand uppercase tracking-tighter">Navigated Learning</span>
|
| 226 |
+
</div>
|
| 227 |
+
<button onClick={() => setIsMobileMenuOpen(false)} className="p-2 text-slate-400 hover:text-brand transition-colors">
|
| 228 |
+
<X size={24} />
|
| 229 |
+
</button>
|
| 230 |
+
</div>
|
| 231 |
+
|
| 232 |
+
<nav className="space-y-3">
|
| 233 |
+
{sidebarItems.map((item) => {
|
| 234 |
+
const isActive = location.pathname === item.path;
|
| 235 |
+
return (
|
| 236 |
+
<Link
|
| 237 |
+
key={item.path}
|
| 238 |
+
to={item.path}
|
| 239 |
+
onClick={() => setIsMobileMenuOpen(false)}
|
| 240 |
+
className={`flex items-center gap-4 px-4 py-3.5 rounded-2xl transition-all ${isActive ? 'bg-brand/10 text-brand font-bold' : 'text-slate-500 hover:bg-slate-50'}`}
|
| 241 |
+
>
|
| 242 |
+
<item.icon size={20} className={isActive ? 'text-brand' : ''} />
|
| 243 |
+
<span>{item.label}</span>
|
| 244 |
+
</Link>
|
| 245 |
+
);
|
| 246 |
+
})}
|
| 247 |
+
</nav>
|
| 248 |
+
</motion.aside>
|
| 249 |
+
</>
|
| 250 |
+
)}
|
| 251 |
+
</AnimatePresence>
|
| 252 |
+
|
| 253 |
+
<style dangerouslySetInnerHTML={{ __html: `
|
| 254 |
+
.custom-scrollbar::-webkit-scrollbar {
|
| 255 |
+
width: 6px;
|
| 256 |
+
}
|
| 257 |
+
.custom-scrollbar::-webkit-scrollbar-track {
|
| 258 |
+
background: transparent;
|
| 259 |
+
}
|
| 260 |
+
.custom-scrollbar::-webkit-scrollbar-thumb {
|
| 261 |
+
background: rgba(255, 255, 255, 0.1);
|
| 262 |
+
border-radius: 10px;
|
| 263 |
+
}
|
| 264 |
+
.custom-scrollbar::-webkit-scrollbar-thumb:hover {
|
| 265 |
+
background: rgba(255, 255, 255, 0.2);
|
| 266 |
+
}
|
| 267 |
+
`}} />
|
| 268 |
+
|
| 269 |
+
{/* Notifications Dropdown */}
|
| 270 |
+
<AnimatePresence>
|
| 271 |
+
{isNotificationsOpen && (
|
| 272 |
+
<>
|
| 273 |
+
<div className="fixed inset-0 z-[140]" onClick={() => setIsNotificationsOpen(false)} />
|
| 274 |
+
<motion.div
|
| 275 |
+
initial={{ opacity: 0, y: 10, scale: 0.95 }}
|
| 276 |
+
animate={{ opacity: 1, y: 0, scale: 1 }}
|
| 277 |
+
exit={{ opacity: 0, y: 10, scale: 0.95 }}
|
| 278 |
+
className="fixed top-24 right-12 w-80 bg-white rounded-3xl shadow-2xl border border-slate-100 z-[150] overflow-hidden"
|
| 279 |
+
>
|
| 280 |
+
<div className="p-5 border-b border-slate-100 bg-slate-50/50 flex justify-between items-center">
|
| 281 |
+
<h3 className="font-bold text-slate-800">Intelligence Briefing</h3>
|
| 282 |
+
<span className="text-[10px] font-black text-brand uppercase tracking-widest">{notifications.length} Events</span>
|
| 283 |
+
</div>
|
| 284 |
+
<div className="max-h-[400px] overflow-y-auto">
|
| 285 |
+
{notifications.length > 0 ? (
|
| 286 |
+
notifications.map((n) => (
|
| 287 |
+
<div key={n.id} className="p-5 border-b border-slate-50 last:border-0 hover:bg-slate-50 transition-colors">
|
| 288 |
+
<div className="flex gap-4">
|
| 289 |
+
<div className={`w-2 h-2 rounded-full mt-1.5 shrink-0 ${n.type === 'success' ? 'bg-green-500' : 'bg-brand'}`} />
|
| 290 |
+
<div>
|
| 291 |
+
<p className="text-xs font-medium text-slate-700 leading-relaxed">{n.message}</p>
|
| 292 |
+
<p className="text-[9px] text-slate-400 font-bold uppercase mt-2">{new Date(n.timestamp).toLocaleTimeString()}</p>
|
| 293 |
+
</div>
|
| 294 |
+
</div>
|
| 295 |
+
</div>
|
| 296 |
+
))
|
| 297 |
+
) : (
|
| 298 |
+
<div className="p-10 text-center text-slate-400">
|
| 299 |
+
<p className="text-xs font-bold uppercase tracking-widest">Awaiting Intel...</p>
|
| 300 |
+
</div>
|
| 301 |
+
)}
|
| 302 |
+
</div>
|
| 303 |
+
</motion.div>
|
| 304 |
+
</>
|
| 305 |
+
)}
|
| 306 |
+
</AnimatePresence>
|
| 307 |
+
|
| 308 |
+
{/* Profile Modal */}
|
| 309 |
+
<AnimatePresence>
|
| 310 |
+
{isProfileOpen && (
|
| 311 |
+
<div className="fixed inset-0 bg-slate-900/60 backdrop-blur-md z-[100] flex items-center justify-center p-4">
|
| 312 |
+
<motion.div
|
| 313 |
+
initial={{ scale: 0.9, opacity: 0, y: 20 }}
|
| 314 |
+
animate={{ scale: 1, opacity: 1, y: 0 }}
|
| 315 |
+
exit={{ scale: 0.9, opacity: 0, y: 20 }}
|
| 316 |
+
className="bg-white rounded-[3rem] shadow-2xl w-full max-w-lg overflow-hidden relative border border-white/20"
|
| 317 |
+
>
|
| 318 |
+
{/* Profile Background Header */}
|
| 319 |
+
<div className="h-40 bg-gradient-to-br from-brand to-brand-dark relative">
|
| 320 |
+
<button
|
| 321 |
+
onClick={() => setIsProfileOpen(false)}
|
| 322 |
+
className="absolute top-6 right-6 p-2 bg-white/20 hover:bg-white/40 text-white rounded-full transition-all"
|
| 323 |
+
>
|
| 324 |
+
<X size={20} />
|
| 325 |
+
</button>
|
| 326 |
+
</div>
|
| 327 |
+
|
| 328 |
+
<div className="px-10 pb-10 -mt-16 relative">
|
| 329 |
+
<div className="flex items-end justify-between mb-8">
|
| 330 |
+
<div className="w-32 h-32 rounded-[2.5rem] bg-white p-1.5 shadow-2xl border border-slate-100">
|
| 331 |
+
<div className="w-full h-full rounded-[2rem] overflow-hidden bg-slate-50">
|
| 332 |
+
<img src="https://api.dicebear.com/7.x/avataaars/svg?seed=Felix" alt="Profile" className="w-full h-full object-cover" />
|
| 333 |
+
</div>
|
| 334 |
+
</div>
|
| 335 |
+
<div className="flex flex-col items-end pb-2">
|
| 336 |
+
<span className="px-4 py-1.5 bg-brand text-white text-[10px] font-black uppercase tracking-widest rounded-xl shadow-lg shadow-brand/20">Stage {agent.level} Explorer</span>
|
| 337 |
+
<p className="text-[10px] text-slate-400 font-bold uppercase tracking-widest mt-2">ID: Learner_NL_01</p>
|
| 338 |
+
</div>
|
| 339 |
+
</div>
|
| 340 |
+
|
| 341 |
+
<div className="mb-8">
|
| 342 |
+
<h2 className="text-3xl font-black text-slate-900 uppercase tracking-tighter">Learner</h2>
|
| 343 |
+
<p className="text-slate-500 font-medium mt-1">Neural Architect & NLP Specialist</p>
|
| 344 |
+
</div>
|
| 345 |
+
|
| 346 |
+
<div className="grid grid-cols-2 gap-4 mb-8">
|
| 347 |
+
<div className="bg-slate-50 p-5 rounded-3xl border border-slate-100">
|
| 348 |
+
<div className="flex items-center gap-2 text-brand mb-2">
|
| 349 |
+
<Activity size={16} />
|
| 350 |
+
<span className="text-[9px] font-black uppercase tracking-widest">Total XP</span>
|
| 351 |
+
</div>
|
| 352 |
+
<p className="text-2xl font-black text-slate-900 tracking-tight">{agent.totalReward}</p>
|
| 353 |
+
</div>
|
| 354 |
+
<div className="bg-slate-50 p-5 rounded-3xl border border-slate-100">
|
| 355 |
+
<div className="flex items-center gap-2 text-green-500 mb-2">
|
| 356 |
+
<Award size={16} />
|
| 357 |
+
<span className="text-[9px] font-black uppercase tracking-widest">Matrix Clear</span>
|
| 358 |
+
</div>
|
| 359 |
+
<p className="text-2xl font-black text-slate-900 tracking-tight">{agent.visitedResources.length}/18</p>
|
| 360 |
+
</div>
|
| 361 |
+
</div>
|
| 362 |
+
|
| 363 |
+
<div className="space-y-3">
|
| 364 |
+
<div className="flex justify-between items-end">
|
| 365 |
+
<span className="text-[10px] font-black text-slate-400 uppercase tracking-widest">Current Stage Synchronization</span>
|
| 366 |
+
<span className="text-lg font-black text-brand tracking-tighter">{progressPercent}%</span>
|
| 367 |
+
</div>
|
| 368 |
+
<div className="h-3 bg-slate-100 rounded-full overflow-hidden border border-slate-200">
|
| 369 |
+
<motion.div
|
| 370 |
+
initial={{ width: 0 }}
|
| 371 |
+
animate={{ width: `${progressPercent}%` }}
|
| 372 |
+
className="h-full bg-brand"
|
| 373 |
+
/>
|
| 374 |
+
</div>
|
| 375 |
+
</div>
|
| 376 |
+
|
| 377 |
+
<div className="mt-10 flex gap-4">
|
| 378 |
+
<button className="flex-1 py-4 bg-slate-900 text-white rounded-2xl font-black text-[10px] uppercase tracking-widest shadow-xl shadow-slate-900/20 hover:scale-[1.02] active:scale-95 transition-all flex items-center justify-center gap-3">
|
| 379 |
+
Edit Profile <TrendingUp size={18} />
|
| 380 |
+
</button>
|
| 381 |
+
<button
|
| 382 |
+
onClick={handleSignOut}
|
| 383 |
+
className="p-4 bg-slate-100 text-slate-500 rounded-2xl hover:bg-red-50 hover:text-red-500 transition-all active:scale-95"
|
| 384 |
+
>
|
| 385 |
+
<LogOut size={20} />
|
| 386 |
+
</button>
|
| 387 |
+
</div>
|
| 388 |
+
</div>
|
| 389 |
+
</motion.div>
|
| 390 |
+
</div>
|
| 391 |
+
)}
|
| 392 |
+
</AnimatePresence>
|
| 393 |
+
|
| 394 |
+
{/* Settings Modal */}
|
| 395 |
+
<AnimatePresence>
|
| 396 |
+
{isSettingsOpen && (
|
| 397 |
+
<div className="fixed inset-0 bg-slate-900/40 backdrop-blur-sm z-[100] flex items-center justify-center p-4">
|
| 398 |
+
<motion.div
|
| 399 |
+
initial={{ scale: 0.95, opacity: 0 }}
|
| 400 |
+
animate={{ scale: 1, opacity: 1 }}
|
| 401 |
+
exit={{ scale: 0.95, opacity: 0 }}
|
| 402 |
+
className="bg-white rounded-[2rem] shadow-2xl w-full max-w-md overflow-hidden relative"
|
| 403 |
+
>
|
| 404 |
+
<div className="p-6 border-b border-slate-100 flex items-center justify-between bg-slate-50/50">
|
| 405 |
+
<div className="flex items-center gap-3">
|
| 406 |
+
<div className="p-2 bg-slate-200/50 rounded-xl text-slate-600">
|
| 407 |
+
<Settings size={20} />
|
| 408 |
+
</div>
|
| 409 |
+
<h3 className="font-bold text-lg text-slate-800">Preferences</h3>
|
| 410 |
+
</div>
|
| 411 |
+
<button onClick={() => setIsSettingsOpen(false)} className="p-2 text-slate-400 hover:bg-slate-200/50 hover:text-slate-600 rounded-full transition-colors">
|
| 412 |
+
<X size={20} />
|
| 413 |
+
</button>
|
| 414 |
+
</div>
|
| 415 |
+
|
| 416 |
+
<div className="p-6 space-y-6 bg-white">
|
| 417 |
+
<div>
|
| 418 |
+
<h4 className="text-xs font-bold text-slate-400 uppercase tracking-wider mb-4">Neural Settings</h4>
|
| 419 |
+
<div className="space-y-3">
|
| 420 |
+
<div className="flex items-center justify-between p-3 rounded-2xl border border-slate-100 bg-slate-50/50">
|
| 421 |
+
<div>
|
| 422 |
+
<p className="text-sm font-semibold text-slate-700">High-Performance Engine</p>
|
| 423 |
+
<p className="text-[10px] text-slate-400 font-medium mt-0.5">Accelerate matrix polyline generation</p>
|
| 424 |
+
</div>
|
| 425 |
+
<div className="w-10 h-6 bg-brand rounded-full relative cursor-pointer shadow-inner">
|
| 426 |
+
<div className="absolute right-1 top-1 w-4 h-4 bg-white rounded-full shadow-sm" />
|
| 427 |
+
</div>
|
| 428 |
+
</div>
|
| 429 |
+
</div>
|
| 430 |
+
</div>
|
| 431 |
+
|
| 432 |
+
<div>
|
| 433 |
+
<h4 className="text-xs font-bold text-slate-400 uppercase tracking-wider mb-4">Danger Zone</h4>
|
| 434 |
+
<button
|
| 435 |
+
onClick={async () => {
|
| 436 |
+
try {
|
| 437 |
+
const res = await fetch('http://localhost:5000/api/reset', { method: 'POST' });
|
| 438 |
+
if (res.ok) window.location.reload();
|
| 439 |
+
} catch (e) {
|
| 440 |
+
console.error(e);
|
| 441 |
+
}
|
| 442 |
+
}}
|
| 443 |
+
className="w-full flex items-center justify-between p-3 rounded-2xl border border-red-100 bg-red-50 hover:bg-red-100 transition-colors group"
|
| 444 |
+
>
|
| 445 |
+
<div className="text-left">
|
| 446 |
+
<p className="text-sm font-semibold text-red-600 group-hover:text-red-700">Wipe Database Memory</p>
|
| 447 |
+
<p className="text-[10px] text-red-400 mt-0.5">Permanently resets all learning progress & histories</p>
|
| 448 |
+
</div>
|
| 449 |
+
<LogOut size={18} className="text-red-400 group-hover:text-red-600" />
|
| 450 |
+
</button>
|
| 451 |
+
</div>
|
| 452 |
+
</div>
|
| 453 |
+
</motion.div>
|
| 454 |
+
</div>
|
| 455 |
+
)}
|
| 456 |
+
</AnimatePresence>
|
| 457 |
+
|
| 458 |
+
{/* Global Level Up Toast Notification */}
|
| 459 |
+
<AnimatePresence>
|
| 460 |
+
{levelUpMessage && (
|
| 461 |
+
<motion.div
|
| 462 |
+
initial={{ opacity: 0, y: -50, scale: 0.9 }}
|
| 463 |
+
animate={{ opacity: 1, y: 0, scale: 1 }}
|
| 464 |
+
exit={{ opacity: 0, y: -20, scale: 0.9 }}
|
| 465 |
+
className="fixed top-8 left-1/2 -translate-x-1/2 z-[200] bg-[#0f111a] text-white px-8 py-5 rounded-[2rem] shadow-[0_0_80px_-15px_rgba(79,70,229,0.5)] border border-white/10 flex items-center gap-6"
|
| 466 |
+
>
|
| 467 |
+
<div className="w-14 h-14 rounded-2xl bg-gradient-to-tr from-brand via-purple-500 to-pink-500 flex items-center justify-center shadow-inner relative overflow-hidden">
|
| 468 |
+
<div className="absolute inset-0 bg-[linear-gradient(45deg,transparent_25%,rgba(255,255,255,0.3)_50%,transparent_75%,transparent_100%)] bg-[length:200%_200%] animate-[shine_2s_infinite]" />
|
| 469 |
+
<span className="text-xl font-black drop-shadow-md">{agent.level}</span>
|
| 470 |
+
</div>
|
| 471 |
+
<div>
|
| 472 |
+
<p className="text-[10px] font-bold text-brand uppercase tracking-[0.3em] mb-1">Rank Up</p>
|
| 473 |
+
<h3 className="text-xl font-bold tracking-tight">{levelUpMessage}</h3>
|
| 474 |
+
</div>
|
| 475 |
+
<button onClick={() => setLevelUpMessage(null)} className="ml-4 p-2 text-white/50 hover:text-white transition-colors">
|
| 476 |
+
<X size={20} />
|
| 477 |
+
</button>
|
| 478 |
+
</motion.div>
|
| 479 |
+
)}
|
| 480 |
+
</AnimatePresence>
|
| 481 |
+
</div>
|
| 482 |
+
);
|
| 483 |
+
};
|
src/components/LearningRoadmap.tsx
ADDED
|
@@ -0,0 +1,155 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import React, { useState } from 'react';
|
| 2 |
+
import { LearningActivity, LearningMap } from '../types';
|
| 3 |
+
import { CheckCircle, Clock, Lock, ArrowDown, Target } from 'lucide-react';
|
| 4 |
+
|
| 5 |
+
interface LearningRoadmapProps {
|
| 6 |
+
learningMap: LearningMap;
|
| 7 |
+
onActivitySelect: (activityId: string) => void;
|
| 8 |
+
}
|
| 9 |
+
|
| 10 |
+
export const LearningRoadmap: React.FC<LearningRoadmapProps> = ({
|
| 11 |
+
learningMap,
|
| 12 |
+
onActivitySelect
|
| 13 |
+
}) => {
|
| 14 |
+
const [selectedActivity, setSelectedActivity] = useState<string | null>(null);
|
| 15 |
+
|
| 16 |
+
const isActivityUnlocked = (activity: LearningActivity) => {
|
| 17 |
+
if (activity.prerequisites.length === 0) return true;
|
| 18 |
+
return activity.prerequisites.every(prereqId =>
|
| 19 |
+
learningMap.activities.find(a => a.id === prereqId)?.completed
|
| 20 |
+
);
|
| 21 |
+
};
|
| 22 |
+
|
| 23 |
+
const getActivityIcon = (activity: LearningActivity) => {
|
| 24 |
+
if (activity.completed) {
|
| 25 |
+
return <CheckCircle className="w-5 h-5 text-green-500" />;
|
| 26 |
+
} else if (isActivityUnlocked(activity)) {
|
| 27 |
+
return <Target className="w-5 h-5 text-blue-500" />;
|
| 28 |
+
} else {
|
| 29 |
+
return <Lock className="w-5 h-5 text-gray-400" />;
|
| 30 |
+
}
|
| 31 |
+
};
|
| 32 |
+
|
| 33 |
+
const getActivityStyle = (activity: LearningActivity) => {
|
| 34 |
+
if (activity.completed) {
|
| 35 |
+
return 'bg-green-50 border-green-200 text-green-800 shadow-sm';
|
| 36 |
+
} else if (isActivityUnlocked(activity)) {
|
| 37 |
+
return 'bg-blue-50 border-blue-200 text-blue-800 hover:bg-blue-100 cursor-pointer shadow-sm hover:shadow-md transform hover:scale-[1.02] transition-all duration-200';
|
| 38 |
+
} else {
|
| 39 |
+
return 'bg-gray-50 border-gray-200 text-gray-500';
|
| 40 |
+
}
|
| 41 |
+
};
|
| 42 |
+
|
| 43 |
+
const handleActivityClick = (activity: LearningActivity) => {
|
| 44 |
+
if (isActivityUnlocked(activity) && !activity.completed) {
|
| 45 |
+
setSelectedActivity(activity.id);
|
| 46 |
+
onActivitySelect(activity.id);
|
| 47 |
+
}
|
| 48 |
+
};
|
| 49 |
+
|
| 50 |
+
return (
|
| 51 |
+
<div className="bg-white p-3 lg:p-4 rounded-lg shadow-lg h-full flex flex-col">
|
| 52 |
+
<div className="mb-6">
|
| 53 |
+
<h2 className="text-sm lg:text-base font-semibold text-gray-800 mb-2">Learning Roadmap</h2>
|
| 54 |
+
<div className="flex items-center justify-between">
|
| 55 |
+
<p className="text-xs text-gray-600">Track your learning journey</p>
|
| 56 |
+
<div className="flex items-center space-x-2">
|
| 57 |
+
<div className="text-xs text-gray-600">Progress:</div>
|
| 58 |
+
<div className="text-xs font-semibold text-blue-600">
|
| 59 |
+
{Math.round(learningMap.progressPercentage)}%
|
| 60 |
+
</div>
|
| 61 |
+
</div>
|
| 62 |
+
</div>
|
| 63 |
+
<div className="w-full bg-gray-200 rounded-full h-2 mt-2">
|
| 64 |
+
<div
|
| 65 |
+
className="bg-gradient-to-r from-blue-500 to-blue-600 h-2 rounded-full transition-all duration-500"
|
| 66 |
+
style={{ width: `${learningMap.progressPercentage}%` }}
|
| 67 |
+
></div>
|
| 68 |
+
</div>
|
| 69 |
+
</div>
|
| 70 |
+
|
| 71 |
+
<div className="space-y-3 flex-1 overflow-y-auto">
|
| 72 |
+
{learningMap.activities.map((activity, index) => (
|
| 73 |
+
<div key={activity.id} className="relative">
|
| 74 |
+
<div
|
| 75 |
+
className={`
|
| 76 |
+
p-4 rounded-lg border-2 transition-all duration-300
|
| 77 |
+
${getActivityStyle(activity)}
|
| 78 |
+
${selectedActivity === activity.id ? 'ring-2 ring-blue-400 ring-offset-2' : ''}
|
| 79 |
+
`}
|
| 80 |
+
onClick={() => handleActivityClick(activity)}
|
| 81 |
+
>
|
| 82 |
+
<div className="flex items-start space-x-2">
|
| 83 |
+
<div className="flex-shrink-0 mt-1">
|
| 84 |
+
{getActivityIcon(activity)}
|
| 85 |
+
</div>
|
| 86 |
+
<div className="flex-1 min-w-0">
|
| 87 |
+
<h3 className="font-semibold text-sm mb-1">{activity.name}</h3>
|
| 88 |
+
<p className="text-xs opacity-80 mb-2">{activity.description}</p>
|
| 89 |
+
|
| 90 |
+
<div className="flex flex-col gap-1">
|
| 91 |
+
<div className="flex items-center space-x-3">
|
| 92 |
+
<div className="flex items-center space-x-1">
|
| 93 |
+
<Clock className="w-3 h-3" />
|
| 94 |
+
<span>{activity.estimatedTime}</span>
|
| 95 |
+
</div>
|
| 96 |
+
<div className="flex items-center space-x-1">
|
| 97 |
+
<div className={`
|
| 98 |
+
w-3 h-3 rounded-full shadow-sm
|
| 99 |
+
${activity.difficulty <= 2 ? 'bg-green-400' :
|
| 100 |
+
activity.difficulty <= 4 ? 'bg-yellow-400' : 'bg-red-400'}
|
| 101 |
+
`}></div>
|
| 102 |
+
<span>Level {activity.difficulty}</span>
|
| 103 |
+
</div>
|
| 104 |
+
</div>
|
| 105 |
+
|
| 106 |
+
{activity.prerequisites.length > 0 && (
|
| 107 |
+
<div className="text-xs text-gray-500">
|
| 108 |
+
Prerequisites: {activity.prerequisites.length}
|
| 109 |
+
</div>
|
| 110 |
+
)}
|
| 111 |
+
</div>
|
| 112 |
+
</div>
|
| 113 |
+
</div>
|
| 114 |
+
|
| 115 |
+
{learningMap.currentActivity === activity.id && (
|
| 116 |
+
<div className="absolute -left-2 top-1/2 transform -translate-y-1/2">
|
| 117 |
+
<div className="w-1 h-8 bg-gradient-to-b from-blue-400 to-blue-600 rounded-full shadow-sm"></div>
|
| 118 |
+
</div>
|
| 119 |
+
)}
|
| 120 |
+
</div>
|
| 121 |
+
|
| 122 |
+
{index < learningMap.activities.length - 1 && (
|
| 123 |
+
<div className="flex justify-center py-2">
|
| 124 |
+
<div className="flex flex-col items-center">
|
| 125 |
+
<div className="w-px h-4 bg-gray-300"></div>
|
| 126 |
+
<ArrowDown className="w-4 h-4 text-gray-400" />
|
| 127 |
+
<div className="w-px h-4 bg-gray-300"></div>
|
| 128 |
+
</div>
|
| 129 |
+
</div>
|
| 130 |
+
)}
|
| 131 |
+
</div>
|
| 132 |
+
))}
|
| 133 |
+
</div>
|
| 134 |
+
|
| 135 |
+
{/* Legend */}
|
| 136 |
+
<div className="mt-6 p-3 bg-gradient-to-r from-gray-50 to-gray-100 rounded-lg border">
|
| 137 |
+
<h4 className="text-xs font-semibold text-gray-700 mb-2">Legend</h4>
|
| 138 |
+
<div className="grid grid-cols-3 gap-2 text-xs">
|
| 139 |
+
<div className="flex items-center space-x-1">
|
| 140 |
+
<CheckCircle className="w-3 h-3 text-green-500" />
|
| 141 |
+
<span className="text-gray-600">Completed</span>
|
| 142 |
+
</div>
|
| 143 |
+
<div className="flex items-center space-x-1">
|
| 144 |
+
<Target className="w-3 h-3 text-blue-500" />
|
| 145 |
+
<span className="text-gray-600">Available</span>
|
| 146 |
+
</div>
|
| 147 |
+
<div className="flex items-center space-x-1">
|
| 148 |
+
<Lock className="w-3 h-3 text-gray-400" />
|
| 149 |
+
<span className="text-gray-600">Locked</span>
|
| 150 |
+
</div>
|
| 151 |
+
</div>
|
| 152 |
+
</div>
|
| 153 |
+
</div>
|
| 154 |
+
);
|
| 155 |
+
};
|
src/components/ui/AnimatedButton.tsx
ADDED
|
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import React from 'react';
|
| 2 |
+
import { motion } from 'framer-motion';
|
| 3 |
+
import { LucideIcon } from 'lucide-react';
|
| 4 |
+
|
| 5 |
+
interface AnimatedButtonProps {
|
| 6 |
+
children: React.ReactNode;
|
| 7 |
+
onClick?: () => void;
|
| 8 |
+
variant?: 'primary' | 'secondary';
|
| 9 |
+
icon?: LucideIcon;
|
| 10 |
+
className?: string;
|
| 11 |
+
id?: string;
|
| 12 |
+
}
|
| 13 |
+
|
| 14 |
+
export const AnimatedButton: React.FC<AnimatedButtonProps> = ({
|
| 15 |
+
children,
|
| 16 |
+
onClick,
|
| 17 |
+
variant = 'primary',
|
| 18 |
+
icon: Icon,
|
| 19 |
+
className = '',
|
| 20 |
+
id
|
| 21 |
+
}) => {
|
| 22 |
+
const isPrimary = variant === 'primary';
|
| 23 |
+
|
| 24 |
+
return (
|
| 25 |
+
<>
|
| 26 |
+
<motion.button
|
| 27 |
+
id={id}
|
| 28 |
+
onClick={onClick}
|
| 29 |
+
whileHover={{ y: -2 }}
|
| 30 |
+
whileTap={{ scale: 0.98 }}
|
| 31 |
+
initial={{ opacity: 0, y: 10 }}
|
| 32 |
+
animate={{
|
| 33 |
+
opacity: 1,
|
| 34 |
+
y: 0,
|
| 35 |
+
}}
|
| 36 |
+
transition={{
|
| 37 |
+
opacity: { duration: 0.4 },
|
| 38 |
+
y: { duration: 0.4, ease: "easeOut" }
|
| 39 |
+
}}
|
| 40 |
+
className={`
|
| 41 |
+
relative overflow-hidden group flex items-center justify-center gap-3 px-8 py-3.5 rounded-xl font-bold uppercase tracking-wider text-[11px] transition-all duration-300
|
| 42 |
+
${isPrimary
|
| 43 |
+
? 'bg-brand hover:bg-brand-dark text-white shadow-lg shadow-brand/10 border border-white/10'
|
| 44 |
+
: 'bg-white border border-slate-200 text-slate-600 hover:border-brand/40 hover:text-brand shadow-sm'
|
| 45 |
+
}
|
| 46 |
+
${className}
|
| 47 |
+
`}
|
| 48 |
+
>
|
| 49 |
+
{/* Subtle Highlight (Primary) */}
|
| 50 |
+
{isPrimary && (
|
| 51 |
+
<div className="absolute inset-0 bg-white/10 opacity-0 group-hover:opacity-100 transition-opacity duration-300" />
|
| 52 |
+
)}
|
| 53 |
+
|
| 54 |
+
{/* Content */}
|
| 55 |
+
<span className="relative z-10 flex items-center gap-2">
|
| 56 |
+
{Icon && <Icon size={16} className="transition-transform group-hover:rotate-0" />}
|
| 57 |
+
{children}
|
| 58 |
+
</span>
|
| 59 |
+
|
| 60 |
+
{/* Professional Border Highlight */}
|
| 61 |
+
{isPrimary && (
|
| 62 |
+
<div className="absolute inset-x-0 inset-y-0 border border-white/20 rounded-xl pointer-events-none" />
|
| 63 |
+
)}
|
| 64 |
+
</motion.button>
|
| 65 |
+
</>
|
| 66 |
+
);
|
| 67 |
+
};
|
src/components/ui/Button.tsx
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import React from 'react';
|
| 2 |
+
import { motion, HTMLMotionProps } from 'framer-motion';
|
| 3 |
+
|
| 4 |
+
interface ButtonProps extends HTMLMotionProps<'button'> {
|
| 5 |
+
variant?: 'primary' | 'secondary' | 'outline' | 'ghost';
|
| 6 |
+
isLoading?: boolean;
|
| 7 |
+
}
|
| 8 |
+
|
| 9 |
+
export const Button: React.FC<ButtonProps> = ({
|
| 10 |
+
variant = 'primary',
|
| 11 |
+
isLoading,
|
| 12 |
+
children,
|
| 13 |
+
className = '',
|
| 14 |
+
...props
|
| 15 |
+
}) => {
|
| 16 |
+
const baseStyles = "px-6 py-3 rounded-2xl font-semibold transition-all duration-300 flex items-center justify-center gap-2 active:scale-95 disabled:opacity-50 disabled:cursor-not-allowed";
|
| 17 |
+
|
| 18 |
+
const variants = {
|
| 19 |
+
primary: "bg-gradient-to-r from-brand to-brand-dark text-white shadow-lg shadow-brand/30 hover:shadow-brand/50 hover:scale-[1.02]",
|
| 20 |
+
secondary: "bg-white/40 backdrop-blur-md text-brand border border-brand/20 hover:bg-white/60 hover:border-brand/40 shadow-sm",
|
| 21 |
+
outline: "border-2 border-brand text-brand hover:bg-brand hover:text-white",
|
| 22 |
+
ghost: "text-brand hover:bg-brand/10"
|
| 23 |
+
};
|
| 24 |
+
|
| 25 |
+
return (
|
| 26 |
+
<motion.button
|
| 27 |
+
whileHover={{ scale: 1.02 }}
|
| 28 |
+
whileTap={{ scale: 0.98 }}
|
| 29 |
+
className={`${baseStyles} ${variants[variant]} ${className}`}
|
| 30 |
+
disabled={isLoading || props.disabled}
|
| 31 |
+
{...props}
|
| 32 |
+
>
|
| 33 |
+
{isLoading ? (
|
| 34 |
+
<div className="w-5 h-5 border-2 border-white/30 border-t-white rounded-full animate-spin" />
|
| 35 |
+
) : children}
|
| 36 |
+
</motion.button>
|
| 37 |
+
);
|
| 38 |
+
};
|
src/components/ui/CustomCursor.tsx
ADDED
|
@@ -0,0 +1,116 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import React, { useEffect, useState } from 'react';
|
| 2 |
+
import { motion, useSpring, useMotionValue } from 'framer-motion';
|
| 3 |
+
|
| 4 |
+
export const CustomCursor: React.FC = () => {
|
| 5 |
+
const cursorX = useMotionValue(-100);
|
| 6 |
+
const cursorY = useMotionValue(-100);
|
| 7 |
+
|
| 8 |
+
const [isHovering, setIsHovering] = useState(false);
|
| 9 |
+
|
| 10 |
+
// Trailing Effect - Create 4 dots with staggered spring physics
|
| 11 |
+
const trail1X = useSpring(cursorX, { damping: 20, stiffness: 200 });
|
| 12 |
+
const trail1Y = useSpring(cursorY, { damping: 20, stiffness: 200 });
|
| 13 |
+
|
| 14 |
+
const trail2X = useSpring(cursorX, { damping: 30, stiffness: 150 });
|
| 15 |
+
const trail2Y = useSpring(cursorY, { damping: 30, stiffness: 150 });
|
| 16 |
+
|
| 17 |
+
const trail3X = useSpring(cursorX, { damping: 40, stiffness: 100 });
|
| 18 |
+
const trail3Y = useSpring(cursorY, { damping: 40, stiffness: 100 });
|
| 19 |
+
|
| 20 |
+
useEffect(() => {
|
| 21 |
+
const moveCursor = (e: MouseEvent) => {
|
| 22 |
+
cursorX.set(e.clientX);
|
| 23 |
+
cursorY.set(e.clientY);
|
| 24 |
+
};
|
| 25 |
+
|
| 26 |
+
const handleMouseOver = (e: MouseEvent) => {
|
| 27 |
+
const target = e.target as HTMLElement;
|
| 28 |
+
const isClickable = target.closest('button, a, input, select, [role="button"]');
|
| 29 |
+
setIsHovering(!!isClickable);
|
| 30 |
+
};
|
| 31 |
+
|
| 32 |
+
window.addEventListener('mousemove', moveCursor);
|
| 33 |
+
window.addEventListener('mouseover', handleMouseOver);
|
| 34 |
+
|
| 35 |
+
return () => {
|
| 36 |
+
window.removeEventListener('mousemove', moveCursor);
|
| 37 |
+
window.removeEventListener('mouseover', handleMouseOver);
|
| 38 |
+
};
|
| 39 |
+
}, [cursorX, cursorY]);
|
| 40 |
+
|
| 41 |
+
const trails = [
|
| 42 |
+
{ x: trail1X, y: trail1Y, scale: 0.8, opacity: 0.6, delay: 0 },
|
| 43 |
+
{ x: trail2X, y: trail2Y, scale: 0.6, opacity: 0.4, delay: 0.05 },
|
| 44 |
+
{ x: trail3X, y: trail3Y, scale: 0.4, opacity: 0.2, delay: 0.1 },
|
| 45 |
+
];
|
| 46 |
+
|
| 47 |
+
return (
|
| 48 |
+
<>
|
| 49 |
+
<style>{`
|
| 50 |
+
body, a, button, input, select {
|
| 51 |
+
cursor: none !important;
|
| 52 |
+
}
|
| 53 |
+
`}</style>
|
| 54 |
+
|
| 55 |
+
{/* Trailing Dots (The "Dragging" effect) */}
|
| 56 |
+
{trails.map((trail, i) => (
|
| 57 |
+
<motion.div
|
| 58 |
+
key={i}
|
| 59 |
+
className="fixed top-0 left-0 bg-brand rounded-full pointer-events-none z-[9996]"
|
| 60 |
+
style={{
|
| 61 |
+
width: 12,
|
| 62 |
+
height: 12,
|
| 63 |
+
x: trail.x,
|
| 64 |
+
y: trail.y,
|
| 65 |
+
scale: trail.scale,
|
| 66 |
+
opacity: trail.opacity,
|
| 67 |
+
translateX: '-50%',
|
| 68 |
+
translateY: '-50%',
|
| 69 |
+
}}
|
| 70 |
+
/>
|
| 71 |
+
))}
|
| 72 |
+
|
| 73 |
+
{/* Main Lead Dot */}
|
| 74 |
+
<motion.div
|
| 75 |
+
className="fixed top-0 left-0 w-3 h-3 bg-brand rounded-full pointer-events-none z-[9999] mix-blend-difference"
|
| 76 |
+
style={{
|
| 77 |
+
x: cursorX,
|
| 78 |
+
y: cursorY,
|
| 79 |
+
translateX: '-50%',
|
| 80 |
+
translateY: '-50%',
|
| 81 |
+
}}
|
| 82 |
+
/>
|
| 83 |
+
|
| 84 |
+
{/* Pulsing Outer Ring */}
|
| 85 |
+
<motion.div
|
| 86 |
+
className="fixed top-0 left-0 w-10 h-10 border-2 border-brand/40 rounded-full pointer-events-none z-[9998]"
|
| 87 |
+
animate={{
|
| 88 |
+
scale: isHovering ? 1.6 : 1,
|
| 89 |
+
borderColor: isHovering ? 'rgba(59, 130, 246, 0.8)' : 'rgba(59, 130, 246, 0.3)',
|
| 90 |
+
backgroundColor: isHovering ? 'rgba(59, 130, 246, 0.15)' : 'rgba(0,0,0,0)',
|
| 91 |
+
}}
|
| 92 |
+
style={{
|
| 93 |
+
x: trail1X,
|
| 94 |
+
y: trail1Y,
|
| 95 |
+
translateX: '-50%',
|
| 96 |
+
translateY: '-50%',
|
| 97 |
+
}}
|
| 98 |
+
transition={{ type: 'spring', damping: 20, stiffness: 150 }}
|
| 99 |
+
/>
|
| 100 |
+
|
| 101 |
+
{/* Technical HUD Crosshair (Centered on lead) */}
|
| 102 |
+
<motion.div
|
| 103 |
+
className="fixed top-0 left-0 pointer-events-none z-[9997]"
|
| 104 |
+
style={{
|
| 105 |
+
x: cursorX,
|
| 106 |
+
y: cursorY,
|
| 107 |
+
translateX: '-50%',
|
| 108 |
+
translateY: '-50%',
|
| 109 |
+
}}
|
| 110 |
+
>
|
| 111 |
+
<div className="absolute top-1/2 left-1/2 -translate-x-1/2 -translate-y-1/2 w-8 h-[1px] bg-brand/30"></div>
|
| 112 |
+
<div className="absolute top-1/2 left-1/2 -translate-x-1/2 -translate-y-1/2 h-8 w-[1px] bg-brand/30"></div>
|
| 113 |
+
</motion.div>
|
| 114 |
+
</>
|
| 115 |
+
);
|
| 116 |
+
};
|
src/components/ui/GlassCard.tsx
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import React from 'react';
|
| 2 |
+
import { motion } from 'framer-motion';
|
| 3 |
+
|
| 4 |
+
interface GlassCardProps {
|
| 5 |
+
children: React.ReactNode;
|
| 6 |
+
className?: string;
|
| 7 |
+
animate?: any;
|
| 8 |
+
transition?: any;
|
| 9 |
+
}
|
| 10 |
+
|
| 11 |
+
export const GlassCard: React.FC<GlassCardProps> = ({ children, className = '', ...props }) => {
|
| 12 |
+
return (
|
| 13 |
+
<motion.div
|
| 14 |
+
{...props}
|
| 15 |
+
className={`
|
| 16 |
+
bg-white/70 backdrop-blur-xl border border-white/40
|
| 17 |
+
shadow-[0_8px_32px_0_rgba(108,99,255,0.1)]
|
| 18 |
+
rounded-3xl p-8 ${className}
|
| 19 |
+
`}
|
| 20 |
+
>
|
| 21 |
+
{children}
|
| 22 |
+
</motion.div>
|
| 23 |
+
);
|
| 24 |
+
};
|
src/components/ui/Input.tsx
ADDED
|
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import React, { useState } from 'react';
|
| 2 |
+
import { motion, AnimatePresence } from 'framer-motion';
|
| 3 |
+
import { Eye, EyeOff } from 'lucide-react';
|
| 4 |
+
|
| 5 |
+
interface InputProps extends React.InputHTMLAttributes<HTMLInputElement> {
|
| 6 |
+
label: string;
|
| 7 |
+
error?: string;
|
| 8 |
+
type?: string;
|
| 9 |
+
}
|
| 10 |
+
|
| 11 |
+
export const Input: React.FC<InputProps> = ({ label, error, type = 'text', className = '', ...props }) => {
|
| 12 |
+
const [isFocused, setIsFocused] = useState(false);
|
| 13 |
+
const [showPassword, setShowPassword] = useState(false);
|
| 14 |
+
const isPassword = type === 'password';
|
| 15 |
+
|
| 16 |
+
return (
|
| 17 |
+
<div className={`relative w-full mb-6 ${className}`}>
|
| 18 |
+
<div className={`
|
| 19 |
+
relative flex items-center transition-all duration-300 rounded-2xl border
|
| 20 |
+
${isFocused ? 'border-brand ring-4 ring-brand/10' : 'border-slate-200'}
|
| 21 |
+
bg-white/60 backdrop-blur-md overflow-hidden
|
| 22 |
+
`}>
|
| 23 |
+
<input
|
| 24 |
+
{...props}
|
| 25 |
+
type={isPassword && showPassword ? 'text' : type}
|
| 26 |
+
onFocus={(e) => {
|
| 27 |
+
setIsFocused(true);
|
| 28 |
+
props.onFocus?.(e);
|
| 29 |
+
}}
|
| 30 |
+
onBlur={(e) => {
|
| 31 |
+
setIsFocused(false);
|
| 32 |
+
props.onBlur?.(e);
|
| 33 |
+
}}
|
| 34 |
+
className="peer w-full px-4 py-3 bg-transparent text-slate-800 outline-none placeholder-transparent"
|
| 35 |
+
placeholder={label}
|
| 36 |
+
/>
|
| 37 |
+
|
| 38 |
+
<label className={`
|
| 39 |
+
absolute left-4 transition-all duration-300 pointer-events-none text-slate-400
|
| 40 |
+
${(isFocused || props.value) ? '-top-2 text-xs text-brand font-bold bg-white px-2 rounded-full shadow-sm' : 'top-3.5 text-base'}
|
| 41 |
+
`}>
|
| 42 |
+
{label}
|
| 43 |
+
</label>
|
| 44 |
+
|
| 45 |
+
{isPassword && (
|
| 46 |
+
<button
|
| 47 |
+
type="button"
|
| 48 |
+
onClick={() => setShowPassword(!showPassword)}
|
| 49 |
+
className="p-3 text-slate-400 hover:text-brand transition-colors"
|
| 50 |
+
>
|
| 51 |
+
{showPassword ? <EyeOff size={20} /> : <Eye size={20} />}
|
| 52 |
+
</button>
|
| 53 |
+
)}
|
| 54 |
+
</div>
|
| 55 |
+
|
| 56 |
+
<AnimatePresence>
|
| 57 |
+
{error && (
|
| 58 |
+
<motion.p
|
| 59 |
+
initial={{ opacity: 0, y: -10 }}
|
| 60 |
+
animate={{ opacity: 1, y: 0 }}
|
| 61 |
+
exit={{ opacity: 0, y: -10 }}
|
| 62 |
+
className="text-red-400 text-sm mt-1 px-2"
|
| 63 |
+
>
|
| 64 |
+
{error}
|
| 65 |
+
</motion.p>
|
| 66 |
+
)}
|
| 67 |
+
</AnimatePresence>
|
| 68 |
+
</div>
|
| 69 |
+
);
|
| 70 |
+
};
|
src/components/ui/Mascot.tsx
ADDED
|
@@ -0,0 +1,220 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import React, { useEffect, useRef, useState } from 'react';
|
| 2 |
+
import { motion, useSpring, useTransform } from 'framer-motion';
|
| 3 |
+
|
| 4 |
+
interface MascotProps {
|
| 5 |
+
type: 'fox' | 'owl' | 'robot';
|
| 6 |
+
isPasswordFocused: boolean;
|
| 7 |
+
mousePos: { x: number; y: number };
|
| 8 |
+
}
|
| 9 |
+
|
| 10 |
+
export const Mascot: React.FC<MascotProps> = ({ type, isPasswordFocused, mousePos }) => {
|
| 11 |
+
const containerRef = useRef<HTMLDivElement>(null);
|
| 12 |
+
const [targetPos, setTargetPos] = useState({ x: 0, y: 0 });
|
| 13 |
+
|
| 14 |
+
useEffect(() => {
|
| 15 |
+
if (containerRef.current) {
|
| 16 |
+
const rect = containerRef.current.getBoundingClientRect();
|
| 17 |
+
const centerX = rect.left + rect.width / 2;
|
| 18 |
+
const centerY = rect.top + rect.height / 2;
|
| 19 |
+
|
| 20 |
+
// Calculate pixel distance for more responsive tracking
|
| 21 |
+
const distX = mousePos.x - centerX;
|
| 22 |
+
const distY = mousePos.y - centerY;
|
| 23 |
+
|
| 24 |
+
// Max displacement
|
| 25 |
+
const maxMove = 12;
|
| 26 |
+
const dx = Math.max(-maxMove, Math.min(maxMove, distX / 30));
|
| 27 |
+
const dy = Math.max(-maxMove, Math.min(maxMove, distY / 30));
|
| 28 |
+
|
| 29 |
+
setTargetPos({ x: dx, y: dy });
|
| 30 |
+
}
|
| 31 |
+
}, [mousePos]);
|
| 32 |
+
|
| 33 |
+
const x = useSpring(targetPos.x, { stiffness: 100, damping: 25 });
|
| 34 |
+
const y = useSpring(targetPos.y, { stiffness: 100, damping: 25 });
|
| 35 |
+
|
| 36 |
+
const renderMascot = () => {
|
| 37 |
+
switch (type) {
|
| 38 |
+
case 'fox':
|
| 39 |
+
return (
|
| 40 |
+
<svg viewBox="0 0 100 100" className="w-full h-full drop-shadow-2xl overflow-visible">
|
| 41 |
+
<defs>
|
| 42 |
+
<clipPath id="fox-eye-left">
|
| 43 |
+
<circle cx="40" cy="62" r="6" />
|
| 44 |
+
</clipPath>
|
| 45 |
+
<clipPath id="fox-eye-right">
|
| 46 |
+
<circle cx="60" cy="62" r="6" />
|
| 47 |
+
</clipPath>
|
| 48 |
+
</defs>
|
| 49 |
+
{/* Body */}
|
| 50 |
+
<motion.path
|
| 51 |
+
d="M20 80 Q50 95 80 80 Q90 60 80 40 Q50 30 20 40 Q10 60 20 80"
|
| 52 |
+
fill="#FF6B35"
|
| 53 |
+
animate={{ scale: [1, 1.02, 1] }}
|
| 54 |
+
transition={{ duration: 3, repeat: Infinity }}
|
| 55 |
+
/>
|
| 56 |
+
{/* Ears */}
|
| 57 |
+
<path d="M25 45 L15 20 L40 35" fill="#FF6B35" />
|
| 58 |
+
<path d="M75 45 L85 20 L60 35" fill="#FF6B35" />
|
| 59 |
+
|
| 60 |
+
{/* Face White */}
|
| 61 |
+
<path d="M30 65 Q50 75 70 65 Q65 50 35 50 Z" fill="white" />
|
| 62 |
+
|
| 63 |
+
{/* Eyes */}
|
| 64 |
+
<motion.g animate={isPasswordFocused ? { scaleY: 0, scaleX: 1.2 } : { scaleY: 1, scaleX: 1 }}>
|
| 65 |
+
{/* Left Eye */}
|
| 66 |
+
<circle cx="40" cy="62" r="6" fill="white" />
|
| 67 |
+
<motion.circle
|
| 68 |
+
cx={40} cy={62} r="3.5" fill="#1A1A1A"
|
| 69 |
+
clipPath="url(#fox-eye-left)"
|
| 70 |
+
style={{ x: useTransform(x, v => v * 0.4), y: useTransform(y, v => v * 0.4) }}
|
| 71 |
+
/>
|
| 72 |
+
{/* Right Eye */}
|
| 73 |
+
<circle cx="60" cy="62" r="6" fill="white" />
|
| 74 |
+
<motion.circle
|
| 75 |
+
cx={60} cy={62} r="3.5" fill="#1A1A1A"
|
| 76 |
+
clipPath="url(#fox-eye-right)"
|
| 77 |
+
style={{ x: useTransform(x, v => v * 0.4), y: useTransform(y, v => v * 0.4) }}
|
| 78 |
+
/>
|
| 79 |
+
</motion.g>
|
| 80 |
+
|
| 81 |
+
<motion.g
|
| 82 |
+
initial={{ y: 20, opacity: 0 }}
|
| 83 |
+
animate={isPasswordFocused ? { y: 0, opacity: 1 } : { y: 20, opacity: 0 }}
|
| 84 |
+
transition={{ type: 'spring', stiffness: 300, damping: 20 }}
|
| 85 |
+
>
|
| 86 |
+
<path d="M30 85 Q35 70 45 75" stroke="#FF6B35" strokeWidth="6" strokeLinecap="round" fill="none" />
|
| 87 |
+
<path d="M70 85 Q65 70 55 75" stroke="#FF6B35" strokeWidth="6" strokeLinecap="round" fill="none" />
|
| 88 |
+
</motion.g>
|
| 89 |
+
</svg>
|
| 90 |
+
);
|
| 91 |
+
case 'owl':
|
| 92 |
+
return (
|
| 93 |
+
<svg viewBox="0 0 100 100" className="w-full h-full drop-shadow-2xl overflow-visible">
|
| 94 |
+
<defs>
|
| 95 |
+
<clipPath id="owl-eye-left">
|
| 96 |
+
<circle cx="35" cy="50" r="15" />
|
| 97 |
+
</clipPath>
|
| 98 |
+
<clipPath id="owl-eye-right">
|
| 99 |
+
<circle cx="65" cy="50" r="15" />
|
| 100 |
+
</clipPath>
|
| 101 |
+
</defs>
|
| 102 |
+
{/* Body */}
|
| 103 |
+
<motion.ellipse
|
| 104 |
+
cx="50" cy="60" rx="35" ry="30" fill="#4A4E69"
|
| 105 |
+
animate={{ y: [0, -2, 0] }}
|
| 106 |
+
transition={{ duration: 4, repeat: Infinity }}
|
| 107 |
+
/>
|
| 108 |
+
{/* Big Eyes Shell */}
|
| 109 |
+
<circle cx="35" cy="50" r="15" fill="white" />
|
| 110 |
+
<circle cx="65" cy="50" r="15" fill="white" />
|
| 111 |
+
|
| 112 |
+
{/* Pupils */}
|
| 113 |
+
<motion.g animate={isPasswordFocused ? { scaleY: 0.1 } : { scaleY: 1 }}>
|
| 114 |
+
<motion.circle
|
| 115 |
+
cx="35" cy="50" r="9" fill="#22223B"
|
| 116 |
+
clipPath="url(#owl-eye-left)"
|
| 117 |
+
style={{ x: useTransform(x, v => v * 0.6), y: useTransform(y, val => val * 0.8) }}
|
| 118 |
+
/>
|
| 119 |
+
<motion.circle
|
| 120 |
+
cx="65" cy="50" r="9" fill="#22223B"
|
| 121 |
+
clipPath="url(#owl-eye-right)"
|
| 122 |
+
style={{ x: useTransform(x, v => v * 0.6), y: useTransform(y, val => val * 0.8) }}
|
| 123 |
+
/>
|
| 124 |
+
</motion.g>
|
| 125 |
+
|
| 126 |
+
<path d="M45 60 L50 70 L55 60" fill="#F4A261" />
|
| 127 |
+
|
| 128 |
+
<motion.path
|
| 129 |
+
d="M15 60 Q0 40 35 45"
|
| 130 |
+
fill="none" stroke="#4A4E69" strokeWidth="8" strokeLinecap="round"
|
| 131 |
+
animate={isPasswordFocused ? { d: "M15 60 Q20 30 40 45" } : { d: "M15 60 Q0 40 35 45" }}
|
| 132 |
+
/>
|
| 133 |
+
<motion.path
|
| 134 |
+
d="M85 60 Q100 40 65 45"
|
| 135 |
+
fill="none" stroke="#4A4E69" strokeWidth="8" strokeLinecap="round"
|
| 136 |
+
animate={isPasswordFocused ? { d: "M85 60 Q80 30 60 45" } : { d: "M85 60 Q100 40 65 45" }}
|
| 137 |
+
/>
|
| 138 |
+
</svg>
|
| 139 |
+
);
|
| 140 |
+
case 'robot':
|
| 141 |
+
return (
|
| 142 |
+
<svg viewBox="0 0 100 100" className="w-full h-full drop-shadow-2xl overflow-visible">
|
| 143 |
+
<defs>
|
| 144 |
+
<clipPath id="robot-screen">
|
| 145 |
+
<rect x="25" y="35" width="50" height="40" rx="8" />
|
| 146 |
+
</clipPath>
|
| 147 |
+
</defs>
|
| 148 |
+
{/* Case (Midnight Slate) */}
|
| 149 |
+
<defs>
|
| 150 |
+
<radialGradient id="robot-mascot-shine" cx="50%" cy="40%" r="50%" fx="50%" fy="40%">
|
| 151 |
+
<stop offset="0%" stopColor="#334155" />
|
| 152 |
+
<stop offset="100%" stopColor="#1E293B" />
|
| 153 |
+
</radialGradient>
|
| 154 |
+
</defs>
|
| 155 |
+
<motion.rect
|
| 156 |
+
x="20" y="30" width="60" height="50" rx="12"
|
| 157 |
+
fill="url(#robot-mascot-shine)"
|
| 158 |
+
stroke="#0F172A" strokeWidth="2"
|
| 159 |
+
animate={{ rotate: [-0.5, 0.5, -0.5] }}
|
| 160 |
+
transition={{ duration: 2, repeat: Infinity, ease: "easeInOut" }}
|
| 161 |
+
/>
|
| 162 |
+
<rect x="25" y="35" width="50" height="40" rx="8" fill="#020617" />
|
| 163 |
+
|
| 164 |
+
{/* Screen Eyes (Proper SVG Rects) */}
|
| 165 |
+
<motion.g
|
| 166 |
+
clipPath="url(#robot-screen)"
|
| 167 |
+
animate={isPasswordFocused ? { opacity: 0 } : { opacity: 1 }}
|
| 168 |
+
>
|
| 169 |
+
{/* Left Eye */}
|
| 170 |
+
<motion.rect
|
| 171 |
+
x="35" y="48" width="8" height="12" rx="2" fill="#64748B"
|
| 172 |
+
style={{ x: useTransform(x, v => v * 0.4), y: useTransform(y, v => v * 0.4) }}
|
| 173 |
+
animate={{ height: [12, 1, 12] }}
|
| 174 |
+
transition={{ times: [0, 0.05, 0.1], duration: 3, repeat: Infinity, repeatDelay: 2 }}
|
| 175 |
+
/>
|
| 176 |
+
{/* Right Eye */}
|
| 177 |
+
<motion.rect
|
| 178 |
+
x="57" y="48" width="8" height="12" rx="2" fill="#64748B"
|
| 179 |
+
style={{ x: useTransform(x, v => v * 0.4), y: useTransform(y, v => v * 0.4) }}
|
| 180 |
+
animate={{ height: [12, 1, 12] }}
|
| 181 |
+
transition={{ times: [0, 0.05, 0.1], duration: 3, repeat: Infinity, repeatDelay: 2 }}
|
| 182 |
+
/>
|
| 183 |
+
</motion.g>
|
| 184 |
+
|
| 185 |
+
<motion.text
|
| 186 |
+
x="50" y="60" textAnchor="middle" fill="#EF4444" fontSize="10" fontWeight="black"
|
| 187 |
+
initial={{ opacity: 0 }}
|
| 188 |
+
animate={isPasswordFocused ? { opacity: 1 } : { opacity: 0 }}
|
| 189 |
+
>
|
| 190 |
+
ENCRYPTED
|
| 191 |
+
</motion.text>
|
| 192 |
+
|
| 193 |
+
<rect x="48" y="15" width="4" height="15" fill="#94A3B8" />
|
| 194 |
+
<motion.circle
|
| 195 |
+
cx="50" cy="15" r="4" fill="#64748B"
|
| 196 |
+
animate={{ opacity: [1, 0.4, 1], scale: [1, 1.2, 1] }}
|
| 197 |
+
transition={{ duration: 1.5, repeat: Infinity }}
|
| 198 |
+
/>
|
| 199 |
+
</svg>
|
| 200 |
+
);
|
| 201 |
+
}
|
| 202 |
+
};
|
| 203 |
+
|
| 204 |
+
return (
|
| 205 |
+
<motion.div
|
| 206 |
+
ref={containerRef}
|
| 207 |
+
className="w-32 h-32 lg:w-48 lg:h-48 flex items-center justify-center pointer-events-none"
|
| 208 |
+
style={{
|
| 209 |
+
x: useTransform(x, v => v * 0.6),
|
| 210 |
+
y: useTransform(y, v => v * 0.6),
|
| 211 |
+
rotate: useTransform(x, v => v * 0.3),
|
| 212 |
+
}}
|
| 213 |
+
initial={{ scale: 0.9, opacity: 0 }}
|
| 214 |
+
animate={{ scale: 1, opacity: 1 }}
|
| 215 |
+
transition={{ duration: 0.5, ease: "easeOut" }}
|
| 216 |
+
>
|
| 217 |
+
{renderMascot()}
|
| 218 |
+
</motion.div>
|
| 219 |
+
);
|
| 220 |
+
};
|