Deploy ApplyMap Docker Space
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- .dockerignore +14 -0
- .gitattributes +1 -0
- Dockerfile +49 -0
- README.md +9 -9
- apps/api/Dockerfile +12 -0
- apps/api/alembic.ini +41 -0
- apps/api/alembic/env.py +57 -0
- apps/api/alembic/versions/001_initial.py +212 -0
- apps/api/requirements.txt +19 -0
- apps/api/src/__init__.py +0 -0
- apps/api/src/config.py +30 -0
- apps/api/src/database.py +23 -0
- apps/api/src/main.py +47 -0
- apps/api/src/models/__init__.py +26 -0
- apps/api/src/models/achievement.py +86 -0
- apps/api/src/models/report.py +133 -0
- apps/api/src/models/university.py +84 -0
- apps/api/src/models/user.py +77 -0
- apps/api/src/repositories/__init__.py +0 -0
- apps/api/src/routes/__init__.py +0 -0
- apps/api/src/routes/achievements.py +309 -0
- apps/api/src/routes/admin.py +184 -0
- apps/api/src/routes/auth.py +122 -0
- apps/api/src/routes/profile.py +67 -0
- apps/api/src/routes/reports.py +281 -0
- apps/api/src/routes/universities.py +217 -0
- apps/api/src/schema_maintenance.py +90 -0
- apps/api/src/schemas/__init__.py +17 -0
- apps/api/src/schemas/achievement.py +120 -0
- apps/api/src/schemas/report.py +111 -0
- apps/api/src/schemas/university.py +181 -0
- apps/api/src/schemas/user.py +116 -0
- apps/api/src/services/__init__.py +0 -0
- apps/api/src/services/achievement_import_service.py +824 -0
- apps/api/src/services/auth_service.py +76 -0
- apps/api/src/services/chancellor_analysis.py +301 -0
- apps/api/src/services/counselor_knowledge.py +70 -0
- apps/api/src/services/optimization_engine.py +420 -0
- apps/api/src/services/report_advisor.py +273 -0
- apps/api/src/services/rewrite_service.py +379 -0
- apps/api/src/services/university_advisor.py +280 -0
- apps/api/src/services/university_filters.py +97 -0
- apps/api/src/services/university_recommender.py +248 -0
- apps/web/Dockerfile +23 -0
- apps/web/components.json +16 -0
- apps/web/next-env.d.ts +5 -0
- apps/web/next.config.mjs +23 -0
- apps/web/package.json +57 -0
- apps/web/postcss.config.js +6 -0
- apps/web/public/applymap-logo.png +3 -0
.dockerignore
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
.git
|
| 2 |
+
.turbo
|
| 3 |
+
node_modules
|
| 4 |
+
apps/web/node_modules
|
| 5 |
+
apps/web/.next
|
| 6 |
+
apps/api/.venv
|
| 7 |
+
apps/api/venv
|
| 8 |
+
apps/api/__pycache__
|
| 9 |
+
apps/api/src/**/__pycache__
|
| 10 |
+
apps/api/alembic/**/__pycache__
|
| 11 |
+
*.log
|
| 12 |
+
.env
|
| 13 |
+
.env.*
|
| 14 |
+
!.env.example
|
.gitattributes
CHANGED
|
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
apps/web/public/applymap-logo.png filter=lfs diff=lfs merge=lfs -text
|
Dockerfile
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
FROM node:20-bookworm-slim
|
| 2 |
+
|
| 3 |
+
ENV DEBIAN_FRONTEND=noninteractive \
|
| 4 |
+
NEXT_TELEMETRY_DISABLED=1 \
|
| 5 |
+
PORT=7860 \
|
| 6 |
+
NEXT_PUBLIC_API_URL=/ \
|
| 7 |
+
INTERNAL_API_URL=http://127.0.0.1:8000
|
| 8 |
+
|
| 9 |
+
RUN apt-get update \
|
| 10 |
+
&& apt-get install -y --no-install-recommends \
|
| 11 |
+
bash \
|
| 12 |
+
build-essential \
|
| 13 |
+
ca-certificates \
|
| 14 |
+
curl \
|
| 15 |
+
libpq-dev \
|
| 16 |
+
python3 \
|
| 17 |
+
python3-pip \
|
| 18 |
+
python3-venv \
|
| 19 |
+
&& rm -rf /var/lib/apt/lists/*
|
| 20 |
+
|
| 21 |
+
RUN useradd -m -u 1000 user
|
| 22 |
+
|
| 23 |
+
WORKDIR /home/user/app
|
| 24 |
+
|
| 25 |
+
COPY --chown=user:user package.json pnpm-lock.yaml pnpm-workspace.yaml ./
|
| 26 |
+
COPY --chown=user:user apps/web/package.json apps/web/package.json
|
| 27 |
+
|
| 28 |
+
RUN corepack enable \
|
| 29 |
+
&& corepack prepare pnpm@8.15.0 --activate \
|
| 30 |
+
&& pnpm install --filter @applymap/web... --frozen-lockfile
|
| 31 |
+
|
| 32 |
+
COPY --chown=user:user apps/api/requirements.txt apps/api/requirements.txt
|
| 33 |
+
|
| 34 |
+
RUN python3 -m venv /home/user/venv \
|
| 35 |
+
&& /home/user/venv/bin/pip install --no-cache-dir --upgrade pip \
|
| 36 |
+
&& /home/user/venv/bin/pip install --no-cache-dir -r apps/api/requirements.txt
|
| 37 |
+
|
| 38 |
+
COPY --chown=user:user apps/web apps/web
|
| 39 |
+
COPY --chown=user:user apps/api apps/api
|
| 40 |
+
COPY --chown=user:user start.sh start.sh
|
| 41 |
+
|
| 42 |
+
RUN chmod +x start.sh \
|
| 43 |
+
&& pnpm --filter @applymap/web build
|
| 44 |
+
|
| 45 |
+
USER user
|
| 46 |
+
|
| 47 |
+
EXPOSE 7860
|
| 48 |
+
|
| 49 |
+
CMD ["./start.sh"]
|
README.md
CHANGED
|
@@ -1,13 +1,13 @@
|
|
| 1 |
---
|
| 2 |
title: ApplyMap
|
| 3 |
-
emoji:
|
| 4 |
-
colorFrom:
|
| 5 |
-
colorTo:
|
| 6 |
-
sdk:
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
pinned: false
|
| 10 |
-
short_description: Program designed to help students to apply for universities
|
| 11 |
---
|
| 12 |
|
| 13 |
-
|
|
|
|
|
|
|
|
|
| 1 |
---
|
| 2 |
title: ApplyMap
|
| 3 |
+
emoji: 🧭
|
| 4 |
+
colorFrom: blue
|
| 5 |
+
colorTo: green
|
| 6 |
+
sdk: docker
|
| 7 |
+
app_port: 7860
|
| 8 |
+
startup_duration_timeout: 30m
|
|
|
|
|
|
|
| 9 |
---
|
| 10 |
|
| 11 |
+
# ApplyMap
|
| 12 |
+
|
| 13 |
+
Accessible admissions guidance platform for Kazakhstan-focused applicants.
|
apps/api/Dockerfile
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
FROM python:3.11-slim
|
| 2 |
+
|
| 3 |
+
WORKDIR /app
|
| 4 |
+
|
| 5 |
+
COPY requirements.txt .
|
| 6 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
| 7 |
+
|
| 8 |
+
COPY . .
|
| 9 |
+
|
| 10 |
+
EXPOSE 8000
|
| 11 |
+
|
| 12 |
+
CMD ["uvicorn", "src.main:app", "--host", "0.0.0.0", "--port", "8000"]
|
apps/api/alembic.ini
ADDED
|
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[alembic]
|
| 2 |
+
script_location = alembic
|
| 3 |
+
prepend_sys_path = .
|
| 4 |
+
version_path_separator = os
|
| 5 |
+
sqlalchemy.url = postgresql://applymap:applymap@localhost:5432/applymap_db
|
| 6 |
+
|
| 7 |
+
[post_write_hooks]
|
| 8 |
+
|
| 9 |
+
[loggers]
|
| 10 |
+
keys = root,sqlalchemy,alembic
|
| 11 |
+
|
| 12 |
+
[handlers]
|
| 13 |
+
keys = console
|
| 14 |
+
|
| 15 |
+
[formatters]
|
| 16 |
+
keys = generic
|
| 17 |
+
|
| 18 |
+
[logger_root]
|
| 19 |
+
level = WARN
|
| 20 |
+
handlers = console
|
| 21 |
+
qualname =
|
| 22 |
+
|
| 23 |
+
[logger_sqlalchemy]
|
| 24 |
+
level = WARN
|
| 25 |
+
handlers =
|
| 26 |
+
qualname = sqlalchemy.engine
|
| 27 |
+
|
| 28 |
+
[logger_alembic]
|
| 29 |
+
level = INFO
|
| 30 |
+
handlers =
|
| 31 |
+
qualname = alembic
|
| 32 |
+
|
| 33 |
+
[handler_console]
|
| 34 |
+
class = StreamHandler
|
| 35 |
+
args = (sys.stderr,)
|
| 36 |
+
level = NOTSET
|
| 37 |
+
formatter = generic
|
| 38 |
+
|
| 39 |
+
[formatter_generic]
|
| 40 |
+
format = %(levelname)-5.5s [%(name)s] %(message)s
|
| 41 |
+
datefmt = %H:%M:%S
|
apps/api/alembic/env.py
ADDED
|
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import sys
|
| 3 |
+
from logging.config import fileConfig
|
| 4 |
+
|
| 5 |
+
from sqlalchemy import engine_from_config, pool
|
| 6 |
+
from alembic import context
|
| 7 |
+
|
| 8 |
+
# Add src to path
|
| 9 |
+
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
| 10 |
+
|
| 11 |
+
from src.database import Base
|
| 12 |
+
from src.models import * # noqa: F401 - import all models so metadata is populated
|
| 13 |
+
|
| 14 |
+
config = context.config
|
| 15 |
+
|
| 16 |
+
# Override sqlalchemy.url from env if available
|
| 17 |
+
database_url = os.environ.get("DATABASE_URL")
|
| 18 |
+
if database_url:
|
| 19 |
+
config.set_main_option("sqlalchemy.url", database_url)
|
| 20 |
+
|
| 21 |
+
if config.config_file_name is not None:
|
| 22 |
+
fileConfig(config.config_file_name)
|
| 23 |
+
|
| 24 |
+
target_metadata = Base.metadata
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def run_migrations_offline() -> None:
|
| 28 |
+
url = config.get_main_option("sqlalchemy.url")
|
| 29 |
+
context.configure(
|
| 30 |
+
url=url,
|
| 31 |
+
target_metadata=target_metadata,
|
| 32 |
+
literal_binds=True,
|
| 33 |
+
dialect_opts={"paramstyle": "named"},
|
| 34 |
+
)
|
| 35 |
+
with context.begin_transaction():
|
| 36 |
+
context.run_migrations()
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
def run_migrations_online() -> None:
|
| 40 |
+
connectable = engine_from_config(
|
| 41 |
+
config.get_section(config.config_ini_section, {}),
|
| 42 |
+
prefix="sqlalchemy.",
|
| 43 |
+
poolclass=pool.NullPool,
|
| 44 |
+
)
|
| 45 |
+
with connectable.connect() as connection:
|
| 46 |
+
context.configure(
|
| 47 |
+
connection=connection,
|
| 48 |
+
target_metadata=target_metadata,
|
| 49 |
+
)
|
| 50 |
+
with context.begin_transaction():
|
| 51 |
+
context.run_migrations()
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
if context.is_offline_mode():
|
| 55 |
+
run_migrations_offline()
|
| 56 |
+
else:
|
| 57 |
+
run_migrations_online()
|
apps/api/alembic/versions/001_initial.py
ADDED
|
@@ -0,0 +1,212 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Initial schema
|
| 2 |
+
|
| 3 |
+
Revision ID: 001
|
| 4 |
+
Revises:
|
| 5 |
+
Create Date: 2024-01-01 00:00:00.000000
|
| 6 |
+
|
| 7 |
+
"""
|
| 8 |
+
from typing import Sequence, Union
|
| 9 |
+
from alembic import op
|
| 10 |
+
import sqlalchemy as sa
|
| 11 |
+
from sqlalchemy.dialects import postgresql
|
| 12 |
+
|
| 13 |
+
revision: str = "001"
|
| 14 |
+
down_revision: Union[str, None] = None
|
| 15 |
+
branch_labels: Union[str, Sequence[str], None] = None
|
| 16 |
+
depends_on: Union[str, Sequence[str], None] = None
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def upgrade() -> None:
|
| 20 |
+
# users
|
| 21 |
+
op.create_table(
|
| 22 |
+
"users",
|
| 23 |
+
sa.Column("id", postgresql.UUID(as_uuid=True), primary_key=True),
|
| 24 |
+
sa.Column("email", sa.String(255), unique=True, nullable=False),
|
| 25 |
+
sa.Column("password_hash", sa.String(255), nullable=True),
|
| 26 |
+
sa.Column("role", sa.Enum("student", "admin", name="userrole"), default="student", nullable=False),
|
| 27 |
+
sa.Column("full_name", sa.String(255), nullable=True),
|
| 28 |
+
sa.Column("country", sa.String(100), nullable=True),
|
| 29 |
+
sa.Column("created_at", sa.DateTime, nullable=False),
|
| 30 |
+
sa.Column("updated_at", sa.DateTime, nullable=False),
|
| 31 |
+
)
|
| 32 |
+
op.create_index("ix_users_email", "users", ["email"])
|
| 33 |
+
|
| 34 |
+
# student_profiles
|
| 35 |
+
op.create_table(
|
| 36 |
+
"student_profiles",
|
| 37 |
+
sa.Column("id", postgresql.UUID(as_uuid=True), primary_key=True),
|
| 38 |
+
sa.Column("user_id", postgresql.UUID(as_uuid=True), sa.ForeignKey("users.id", ondelete="CASCADE"), unique=True, nullable=False),
|
| 39 |
+
sa.Column("graduation_year", sa.Integer, nullable=True),
|
| 40 |
+
sa.Column("curriculum", sa.String(100), nullable=True),
|
| 41 |
+
sa.Column("intended_major", sa.String(255), nullable=True),
|
| 42 |
+
sa.Column("sat_score", sa.Integer, nullable=True),
|
| 43 |
+
sa.Column("act_score", sa.Integer, nullable=True),
|
| 44 |
+
sa.Column("ielts_score", sa.String(10), nullable=True),
|
| 45 |
+
sa.Column("toefl_score", sa.Integer, nullable=True),
|
| 46 |
+
sa.Column("budget_range", sa.String(100), nullable=True),
|
| 47 |
+
sa.Column("aid_needed", sa.Boolean, nullable=True),
|
| 48 |
+
sa.Column("created_at", sa.DateTime, nullable=False),
|
| 49 |
+
sa.Column("updated_at", sa.DateTime, nullable=False),
|
| 50 |
+
)
|
| 51 |
+
|
| 52 |
+
# universities
|
| 53 |
+
op.create_table(
|
| 54 |
+
"universities",
|
| 55 |
+
sa.Column("id", postgresql.UUID(as_uuid=True), primary_key=True),
|
| 56 |
+
sa.Column("slug", sa.String(100), unique=True, nullable=False),
|
| 57 |
+
sa.Column("name", sa.String(255), nullable=False),
|
| 58 |
+
sa.Column("country", sa.String(100), nullable=False),
|
| 59 |
+
sa.Column("application_system", sa.String(100), nullable=True),
|
| 60 |
+
sa.Column("short_description", sa.Text, nullable=True),
|
| 61 |
+
sa.Column("weight_preset", sa.Enum("research_heavy", "leadership_heavy", "balanced_holistic", "community_service_heavy", name="weightpreset"), nullable=False),
|
| 62 |
+
sa.Column("is_active", sa.Boolean, default=True, nullable=False),
|
| 63 |
+
sa.Column("created_at", sa.DateTime, nullable=False),
|
| 64 |
+
sa.Column("updated_at", sa.DateTime, nullable=False),
|
| 65 |
+
)
|
| 66 |
+
op.create_index("ix_universities_slug", "universities", ["slug"])
|
| 67 |
+
|
| 68 |
+
# university_policy_entries
|
| 69 |
+
op.create_table(
|
| 70 |
+
"university_policy_entries",
|
| 71 |
+
sa.Column("id", postgresql.UUID(as_uuid=True), primary_key=True),
|
| 72 |
+
sa.Column("university_id", postgresql.UUID(as_uuid=True), sa.ForeignKey("universities.id", ondelete="CASCADE"), nullable=False),
|
| 73 |
+
sa.Column("title", sa.String(500), nullable=False),
|
| 74 |
+
sa.Column("content", sa.Text, nullable=False),
|
| 75 |
+
sa.Column("source_url", sa.String(1000), nullable=True),
|
| 76 |
+
sa.Column("source_title", sa.String(500), nullable=True),
|
| 77 |
+
sa.Column("source_type", sa.Enum("official", "public_example", name="sourcetype"), nullable=False),
|
| 78 |
+
sa.Column("reliability_tier", sa.Enum("A", "B", "C", "D", name="reliabilitytier"), nullable=False),
|
| 79 |
+
sa.Column("excerpt", sa.Text, nullable=True),
|
| 80 |
+
sa.Column("created_at", sa.DateTime, nullable=False),
|
| 81 |
+
sa.Column("updated_at", sa.DateTime, nullable=False),
|
| 82 |
+
)
|
| 83 |
+
op.create_index("ix_university_policy_entries_university_id", "university_policy_entries", ["university_id"])
|
| 84 |
+
|
| 85 |
+
# achievements
|
| 86 |
+
op.create_table(
|
| 87 |
+
"achievements",
|
| 88 |
+
sa.Column("id", postgresql.UUID(as_uuid=True), primary_key=True),
|
| 89 |
+
sa.Column("user_id", postgresql.UUID(as_uuid=True), sa.ForeignKey("users.id", ondelete="CASCADE"), nullable=False),
|
| 90 |
+
sa.Column("type", sa.Enum("activity", "honor", name="achievementtype"), nullable=False),
|
| 91 |
+
sa.Column("title", sa.String(500), nullable=False),
|
| 92 |
+
sa.Column("organization_name", sa.String(255), nullable=True),
|
| 93 |
+
sa.Column("role_title", sa.String(255), nullable=True),
|
| 94 |
+
sa.Column("description_raw", sa.Text, nullable=True),
|
| 95 |
+
sa.Column("category", sa.String(100), nullable=True),
|
| 96 |
+
sa.Column("start_date", sa.Date, nullable=True),
|
| 97 |
+
sa.Column("end_date", sa.Date, nullable=True),
|
| 98 |
+
sa.Column("hours_per_week", sa.Float, nullable=True),
|
| 99 |
+
sa.Column("weeks_per_year", sa.Integer, nullable=True),
|
| 100 |
+
sa.Column("impact_scope", sa.Enum("school", "local", "regional", "national", "international", "family", "personal", name="impactscope"), nullable=True),
|
| 101 |
+
sa.Column("leadership_level", sa.Enum("none", "member", "lead", "founder", "captain", name="leadershiplevel"), nullable=True),
|
| 102 |
+
sa.Column("major_relevance_score", sa.Float, nullable=True),
|
| 103 |
+
sa.Column("continuity_score", sa.Float, nullable=True),
|
| 104 |
+
sa.Column("selectivity_score", sa.Float, nullable=True),
|
| 105 |
+
sa.Column("distinctiveness_score", sa.Float, nullable=True),
|
| 106 |
+
sa.Column("truth_risk_flag", sa.Boolean, nullable=True),
|
| 107 |
+
sa.Column("created_at", sa.DateTime, nullable=False),
|
| 108 |
+
sa.Column("updated_at", sa.DateTime, nullable=False),
|
| 109 |
+
)
|
| 110 |
+
op.create_index("ix_achievements_user_id", "achievements", ["user_id"])
|
| 111 |
+
|
| 112 |
+
# achievement_evidence_files
|
| 113 |
+
op.create_table(
|
| 114 |
+
"achievement_evidence_files",
|
| 115 |
+
sa.Column("id", postgresql.UUID(as_uuid=True), primary_key=True),
|
| 116 |
+
sa.Column("achievement_id", postgresql.UUID(as_uuid=True), sa.ForeignKey("achievements.id", ondelete="CASCADE"), nullable=False),
|
| 117 |
+
sa.Column("user_id", postgresql.UUID(as_uuid=True), sa.ForeignKey("users.id", ondelete="CASCADE"), nullable=False),
|
| 118 |
+
sa.Column("file_url", sa.String(1000), nullable=False),
|
| 119 |
+
sa.Column("file_name", sa.String(500), nullable=False),
|
| 120 |
+
sa.Column("mime_type", sa.String(100), nullable=True),
|
| 121 |
+
sa.Column("uploaded_at", sa.DateTime, nullable=False),
|
| 122 |
+
)
|
| 123 |
+
|
| 124 |
+
# target_universities
|
| 125 |
+
op.create_table(
|
| 126 |
+
"target_universities",
|
| 127 |
+
sa.Column("id", postgresql.UUID(as_uuid=True), primary_key=True),
|
| 128 |
+
sa.Column("user_id", postgresql.UUID(as_uuid=True), sa.ForeignKey("users.id", ondelete="CASCADE"), nullable=False),
|
| 129 |
+
sa.Column("university_id", postgresql.UUID(as_uuid=True), sa.ForeignKey("universities.id", ondelete="CASCADE"), nullable=False),
|
| 130 |
+
sa.Column("priority_order", sa.Integer, nullable=True),
|
| 131 |
+
sa.Column("created_at", sa.DateTime, nullable=False),
|
| 132 |
+
)
|
| 133 |
+
op.create_index("ix_target_universities_user_id", "target_universities", ["user_id"])
|
| 134 |
+
|
| 135 |
+
# optimization_reports
|
| 136 |
+
op.create_table(
|
| 137 |
+
"optimization_reports",
|
| 138 |
+
sa.Column("id", postgresql.UUID(as_uuid=True), primary_key=True),
|
| 139 |
+
sa.Column("user_id", postgresql.UUID(as_uuid=True), sa.ForeignKey("users.id", ondelete="CASCADE"), nullable=False),
|
| 140 |
+
sa.Column("university_id", postgresql.UUID(as_uuid=True), sa.ForeignKey("universities.id", ondelete="CASCADE"), nullable=False),
|
| 141 |
+
sa.Column("status", sa.Enum("pending", "processing", "completed", "failed", name="reportstatus"), nullable=False),
|
| 142 |
+
sa.Column("summary_text", sa.Text, nullable=True),
|
| 143 |
+
sa.Column("version_number", sa.Integer, default=1, nullable=False),
|
| 144 |
+
sa.Column("created_at", sa.DateTime, nullable=False),
|
| 145 |
+
sa.Column("completed_at", sa.DateTime, nullable=True),
|
| 146 |
+
)
|
| 147 |
+
op.create_index("ix_optimization_reports_user_id", "optimization_reports", ["user_id"])
|
| 148 |
+
|
| 149 |
+
# report_recommendations
|
| 150 |
+
op.create_table(
|
| 151 |
+
"report_recommendations",
|
| 152 |
+
sa.Column("id", postgresql.UUID(as_uuid=True), primary_key=True),
|
| 153 |
+
sa.Column("report_id", postgresql.UUID(as_uuid=True), sa.ForeignKey("optimization_reports.id", ondelete="CASCADE"), nullable=False),
|
| 154 |
+
sa.Column("achievement_id", postgresql.UUID(as_uuid=True), sa.ForeignKey("achievements.id", ondelete="CASCADE"), nullable=False),
|
| 155 |
+
sa.Column("recommendation_type", sa.Enum("keep", "remove", "merge", "rewrite", "reorder", name="recommendationtype"), nullable=False),
|
| 156 |
+
sa.Column("suggested_rank", sa.Integer, nullable=True),
|
| 157 |
+
sa.Column("rationale", sa.Text, nullable=True),
|
| 158 |
+
sa.Column("confidence_label", sa.Enum("low", "medium", "high", name="confidencelabel"), nullable=False),
|
| 159 |
+
sa.Column("created_at", sa.DateTime, nullable=False),
|
| 160 |
+
)
|
| 161 |
+
|
| 162 |
+
# rewrite_variants
|
| 163 |
+
op.create_table(
|
| 164 |
+
"rewrite_variants",
|
| 165 |
+
sa.Column("id", postgresql.UUID(as_uuid=True), primary_key=True),
|
| 166 |
+
sa.Column("achievement_id", postgresql.UUID(as_uuid=True), sa.ForeignKey("achievements.id", ondelete="CASCADE"), nullable=False),
|
| 167 |
+
sa.Column("report_id", postgresql.UUID(as_uuid=True), sa.ForeignKey("optimization_reports.id", ondelete="CASCADE"), nullable=False),
|
| 168 |
+
sa.Column("style_mode", sa.String(50), nullable=False),
|
| 169 |
+
sa.Column("text", sa.Text, nullable=False),
|
| 170 |
+
sa.Column("character_count", sa.Integer, nullable=False),
|
| 171 |
+
sa.Column("is_recommended", sa.Boolean, default=False),
|
| 172 |
+
sa.Column("explanation", sa.Text, nullable=True),
|
| 173 |
+
sa.Column("created_at", sa.DateTime, nullable=False),
|
| 174 |
+
)
|
| 175 |
+
|
| 176 |
+
# source_references
|
| 177 |
+
op.create_table(
|
| 178 |
+
"source_references",
|
| 179 |
+
sa.Column("id", postgresql.UUID(as_uuid=True), primary_key=True),
|
| 180 |
+
sa.Column("report_id", postgresql.UUID(as_uuid=True), sa.ForeignKey("optimization_reports.id", ondelete="CASCADE"), nullable=False),
|
| 181 |
+
sa.Column("university_policy_entry_id", postgresql.UUID(as_uuid=True), sa.ForeignKey("university_policy_entries.id", ondelete="CASCADE"), nullable=False),
|
| 182 |
+
sa.Column("section", sa.Enum("official_guidance", "public_examples", "recommendation_support", name="sourcesection"), nullable=False),
|
| 183 |
+
sa.Column("note", sa.Text, nullable=True),
|
| 184 |
+
sa.Column("created_at", sa.DateTime, nullable=False),
|
| 185 |
+
)
|
| 186 |
+
|
| 187 |
+
# admin_audit_logs
|
| 188 |
+
op.create_table(
|
| 189 |
+
"admin_audit_logs",
|
| 190 |
+
sa.Column("id", postgresql.UUID(as_uuid=True), primary_key=True),
|
| 191 |
+
sa.Column("admin_user_id", postgresql.UUID(as_uuid=True), sa.ForeignKey("users.id", ondelete="SET NULL"), nullable=True),
|
| 192 |
+
sa.Column("action", sa.String(255), nullable=False),
|
| 193 |
+
sa.Column("entity_type", sa.String(100), nullable=True),
|
| 194 |
+
sa.Column("entity_id", sa.String(255), nullable=True),
|
| 195 |
+
sa.Column("metadata_json", postgresql.JSON, nullable=True),
|
| 196 |
+
sa.Column("created_at", sa.DateTime, nullable=False),
|
| 197 |
+
)
|
| 198 |
+
|
| 199 |
+
|
| 200 |
+
def downgrade() -> None:
|
| 201 |
+
op.drop_table("admin_audit_logs")
|
| 202 |
+
op.drop_table("source_references")
|
| 203 |
+
op.drop_table("rewrite_variants")
|
| 204 |
+
op.drop_table("report_recommendations")
|
| 205 |
+
op.drop_table("optimization_reports")
|
| 206 |
+
op.drop_table("target_universities")
|
| 207 |
+
op.drop_table("achievement_evidence_files")
|
| 208 |
+
op.drop_table("achievements")
|
| 209 |
+
op.drop_table("university_policy_entries")
|
| 210 |
+
op.drop_table("universities")
|
| 211 |
+
op.drop_table("student_profiles")
|
| 212 |
+
op.drop_table("users")
|
apps/api/requirements.txt
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
fastapi==0.111.0
|
| 2 |
+
uvicorn[standard]==0.29.0
|
| 3 |
+
sqlalchemy==2.0.30
|
| 4 |
+
alembic==1.13.1
|
| 5 |
+
psycopg2-binary==2.9.9
|
| 6 |
+
pydantic==2.7.1
|
| 7 |
+
pydantic-settings==2.2.1
|
| 8 |
+
python-jose[cryptography]==3.3.0
|
| 9 |
+
passlib[bcrypt]==1.7.4
|
| 10 |
+
bcrypt==4.0.1
|
| 11 |
+
python-multipart==0.0.9
|
| 12 |
+
httpx==0.27.0
|
| 13 |
+
email-validator==2.1.1
|
| 14 |
+
python-dotenv==1.0.1
|
| 15 |
+
aiofiles==23.2.1
|
| 16 |
+
Pillow==10.3.0
|
| 17 |
+
boto3==1.34.100
|
| 18 |
+
pdfplumber==0.11.4
|
| 19 |
+
python-docx==1.1.2
|
apps/api/src/__init__.py
ADDED
|
File without changes
|
apps/api/src/config.py
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pydantic_settings import BaseSettings
|
| 2 |
+
from typing import List
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
class Settings(BaseSettings):
|
| 6 |
+
DATABASE_URL: str = "postgresql://applymap:applymap@localhost:5432/applymap_db"
|
| 7 |
+
SECRET_KEY: str = "dev-secret-key-change-in-production"
|
| 8 |
+
ALGORITHM: str = "HS256"
|
| 9 |
+
ACCESS_TOKEN_EXPIRE_MINUTES: int = 60
|
| 10 |
+
REFRESH_TOKEN_EXPIRE_DAYS: int = 30
|
| 11 |
+
BACKEND_CORS_ORIGINS: List[str] = ["http://localhost:3000"]
|
| 12 |
+
|
| 13 |
+
# AI Chancellor
|
| 14 |
+
GEMINI_API_KEY: str = ""
|
| 15 |
+
GEMINI_MODEL: str = "gemini-2.5-flash"
|
| 16 |
+
GOOGLE_SEARCH_API_KEY: str = ""
|
| 17 |
+
GOOGLE_SEARCH_ENGINE_ID: str = ""
|
| 18 |
+
|
| 19 |
+
# S3 / Storage
|
| 20 |
+
S3_BUCKET_NAME: str = ""
|
| 21 |
+
AWS_ACCESS_KEY_ID: str = ""
|
| 22 |
+
AWS_SECRET_ACCESS_KEY: str = ""
|
| 23 |
+
AWS_REGION: str = "us-east-1"
|
| 24 |
+
|
| 25 |
+
class Config:
|
| 26 |
+
env_file = ".env"
|
| 27 |
+
extra = "ignore"
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
settings = Settings()
|
apps/api/src/database.py
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from sqlalchemy import create_engine
|
| 2 |
+
from sqlalchemy.ext.declarative import declarative_base
|
| 3 |
+
from sqlalchemy.orm import sessionmaker
|
| 4 |
+
from .config import settings
|
| 5 |
+
|
| 6 |
+
engine = create_engine(
|
| 7 |
+
settings.DATABASE_URL,
|
| 8 |
+
pool_pre_ping=True,
|
| 9 |
+
pool_size=10,
|
| 10 |
+
max_overflow=20,
|
| 11 |
+
)
|
| 12 |
+
|
| 13 |
+
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
|
| 14 |
+
|
| 15 |
+
Base = declarative_base()
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def get_db():
|
| 19 |
+
db = SessionLocal()
|
| 20 |
+
try:
|
| 21 |
+
yield db
|
| 22 |
+
finally:
|
| 23 |
+
db.close()
|
apps/api/src/main.py
ADDED
|
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from fastapi import FastAPI
|
| 2 |
+
from fastapi.middleware.cors import CORSMiddleware
|
| 3 |
+
from fastapi.responses import JSONResponse
|
| 4 |
+
|
| 5 |
+
from .config import settings
|
| 6 |
+
from .database import Base, engine
|
| 7 |
+
from .routes import auth, profile, achievements, universities, reports, admin
|
| 8 |
+
from .schema_maintenance import ensure_application_schema
|
| 9 |
+
|
| 10 |
+
# Create tables on startup (use Alembic migrations in production)
|
| 11 |
+
Base.metadata.create_all(bind=engine)
|
| 12 |
+
ensure_application_schema()
|
| 13 |
+
|
| 14 |
+
app = FastAPI(
|
| 15 |
+
title="ApplyMap API",
|
| 16 |
+
description="Source-backed Common App optimization for international applicants",
|
| 17 |
+
version="0.1.0",
|
| 18 |
+
docs_url="/docs",
|
| 19 |
+
redoc_url="/redoc",
|
| 20 |
+
)
|
| 21 |
+
|
| 22 |
+
# CORS
|
| 23 |
+
app.add_middleware(
|
| 24 |
+
CORSMiddleware,
|
| 25 |
+
allow_origins=settings.BACKEND_CORS_ORIGINS,
|
| 26 |
+
allow_credentials=True,
|
| 27 |
+
allow_methods=["*"],
|
| 28 |
+
allow_headers=["*"],
|
| 29 |
+
)
|
| 30 |
+
|
| 31 |
+
# Include routers
|
| 32 |
+
app.include_router(auth.router)
|
| 33 |
+
app.include_router(profile.router)
|
| 34 |
+
app.include_router(achievements.router)
|
| 35 |
+
app.include_router(universities.router)
|
| 36 |
+
app.include_router(reports.router)
|
| 37 |
+
app.include_router(admin.router)
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
@app.get("/health")
|
| 41 |
+
def health_check():
|
| 42 |
+
return {"status": "ok", "service": "applymap-api"}
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
@app.get("/")
|
| 46 |
+
def root():
|
| 47 |
+
return {"message": "ApplyMap API", "docs": "/docs"}
|
apps/api/src/models/__init__.py
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .user import User, StudentProfile
|
| 2 |
+
from .achievement import Achievement, AchievementEvidenceFile
|
| 3 |
+
from .university import University, UniversityPolicyEntry
|
| 4 |
+
from .report import (
|
| 5 |
+
TargetUniversity,
|
| 6 |
+
OptimizationReport,
|
| 7 |
+
ReportRecommendation,
|
| 8 |
+
RewriteVariant,
|
| 9 |
+
SourceReference,
|
| 10 |
+
AdminAuditLog,
|
| 11 |
+
)
|
| 12 |
+
|
| 13 |
+
__all__ = [
|
| 14 |
+
"User",
|
| 15 |
+
"StudentProfile",
|
| 16 |
+
"Achievement",
|
| 17 |
+
"AchievementEvidenceFile",
|
| 18 |
+
"University",
|
| 19 |
+
"UniversityPolicyEntry",
|
| 20 |
+
"TargetUniversity",
|
| 21 |
+
"OptimizationReport",
|
| 22 |
+
"ReportRecommendation",
|
| 23 |
+
"RewriteVariant",
|
| 24 |
+
"SourceReference",
|
| 25 |
+
"AdminAuditLog",
|
| 26 |
+
]
|
apps/api/src/models/achievement.py
ADDED
|
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import uuid
|
| 2 |
+
from datetime import datetime, date
|
| 3 |
+
from sqlalchemy import Column, String, Boolean, DateTime, Date, ForeignKey, Enum, Integer, Float, Text
|
| 4 |
+
from sqlalchemy.dialects.postgresql import UUID
|
| 5 |
+
from sqlalchemy.orm import relationship
|
| 6 |
+
import enum
|
| 7 |
+
|
| 8 |
+
from ..database import Base
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class AchievementType(str, enum.Enum):
|
| 12 |
+
activity = "activity"
|
| 13 |
+
honor = "honor"
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
class ImpactScope(str, enum.Enum):
|
| 17 |
+
school = "school"
|
| 18 |
+
local = "local"
|
| 19 |
+
regional = "regional"
|
| 20 |
+
national = "national"
|
| 21 |
+
international = "international"
|
| 22 |
+
family = "family"
|
| 23 |
+
personal = "personal"
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
class LeadershipLevel(str, enum.Enum):
|
| 27 |
+
none = "none"
|
| 28 |
+
member = "member"
|
| 29 |
+
lead = "lead"
|
| 30 |
+
founder = "founder"
|
| 31 |
+
captain = "captain"
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
class Achievement(Base):
|
| 35 |
+
__tablename__ = "achievements"
|
| 36 |
+
|
| 37 |
+
id = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)
|
| 38 |
+
user_id = Column(UUID(as_uuid=True), ForeignKey("users.id", ondelete="CASCADE"), nullable=False, index=True)
|
| 39 |
+
|
| 40 |
+
type = Column(Enum(AchievementType), nullable=False)
|
| 41 |
+
title = Column(String(500), nullable=False)
|
| 42 |
+
organization_name = Column(String(255), nullable=True)
|
| 43 |
+
role_title = Column(String(255), nullable=True)
|
| 44 |
+
description_raw = Column(Text, nullable=True)
|
| 45 |
+
category = Column(String(100), nullable=True) # e.g. "Science", "Arts", "Community Service"
|
| 46 |
+
|
| 47 |
+
start_date = Column(Date, nullable=True)
|
| 48 |
+
end_date = Column(Date, nullable=True)
|
| 49 |
+
hours_per_week = Column(Float, nullable=True)
|
| 50 |
+
weeks_per_year = Column(Integer, nullable=True)
|
| 51 |
+
|
| 52 |
+
impact_scope = Column(Enum(ImpactScope), nullable=True)
|
| 53 |
+
leadership_level = Column(Enum(LeadershipLevel), nullable=True)
|
| 54 |
+
|
| 55 |
+
# Computed Chancellor scores (0-10)
|
| 56 |
+
major_relevance_score = Column(Float, nullable=True)
|
| 57 |
+
continuity_score = Column(Float, nullable=True)
|
| 58 |
+
selectivity_score = Column(Float, nullable=True)
|
| 59 |
+
distinctiveness_score = Column(Float, nullable=True)
|
| 60 |
+
|
| 61 |
+
truth_risk_flag = Column(Boolean, nullable=True, default=False)
|
| 62 |
+
|
| 63 |
+
created_at = Column(DateTime, default=datetime.utcnow, nullable=False)
|
| 64 |
+
updated_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow, nullable=False)
|
| 65 |
+
|
| 66 |
+
# Relationships
|
| 67 |
+
user = relationship("User", back_populates="achievements")
|
| 68 |
+
evidence_files = relationship("AchievementEvidenceFile", back_populates="achievement", cascade="all, delete-orphan")
|
| 69 |
+
recommendations = relationship("ReportRecommendation", back_populates="achievement", cascade="all, delete-orphan")
|
| 70 |
+
rewrite_variants = relationship("RewriteVariant", back_populates="achievement", cascade="all, delete-orphan")
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
class AchievementEvidenceFile(Base):
|
| 74 |
+
__tablename__ = "achievement_evidence_files"
|
| 75 |
+
|
| 76 |
+
id = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)
|
| 77 |
+
achievement_id = Column(UUID(as_uuid=True), ForeignKey("achievements.id", ondelete="CASCADE"), nullable=False)
|
| 78 |
+
user_id = Column(UUID(as_uuid=True), ForeignKey("users.id", ondelete="CASCADE"), nullable=False)
|
| 79 |
+
file_url = Column(String(1000), nullable=False)
|
| 80 |
+
file_name = Column(String(500), nullable=False)
|
| 81 |
+
mime_type = Column(String(100), nullable=True)
|
| 82 |
+
uploaded_at = Column(DateTime, default=datetime.utcnow, nullable=False)
|
| 83 |
+
|
| 84 |
+
# Relationships
|
| 85 |
+
achievement = relationship("Achievement", back_populates="evidence_files")
|
| 86 |
+
user = relationship("User", back_populates="evidence_files")
|
apps/api/src/models/report.py
ADDED
|
@@ -0,0 +1,133 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import uuid
|
| 2 |
+
from datetime import datetime
|
| 3 |
+
from sqlalchemy import Column, String, Boolean, DateTime, ForeignKey, Enum, Integer, Text, JSON
|
| 4 |
+
from sqlalchemy.dialects.postgresql import UUID
|
| 5 |
+
from sqlalchemy.orm import relationship
|
| 6 |
+
import enum
|
| 7 |
+
|
| 8 |
+
from ..database import Base
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class ReportStatus(str, enum.Enum):
|
| 12 |
+
pending = "pending"
|
| 13 |
+
processing = "processing"
|
| 14 |
+
completed = "completed"
|
| 15 |
+
failed = "failed"
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
class RecommendationType(str, enum.Enum):
|
| 19 |
+
keep = "keep"
|
| 20 |
+
remove = "remove"
|
| 21 |
+
merge = "merge"
|
| 22 |
+
rewrite = "rewrite"
|
| 23 |
+
reorder = "reorder"
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
class ConfidenceLabel(str, enum.Enum):
|
| 27 |
+
low = "low"
|
| 28 |
+
medium = "medium"
|
| 29 |
+
high = "high"
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
class SourceSection(str, enum.Enum):
|
| 33 |
+
official_guidance = "official_guidance"
|
| 34 |
+
public_examples = "public_examples"
|
| 35 |
+
recommendation_support = "recommendation_support"
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
class TargetUniversity(Base):
|
| 39 |
+
__tablename__ = "target_universities"
|
| 40 |
+
|
| 41 |
+
id = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)
|
| 42 |
+
user_id = Column(UUID(as_uuid=True), ForeignKey("users.id", ondelete="CASCADE"), nullable=False, index=True)
|
| 43 |
+
university_id = Column(UUID(as_uuid=True), ForeignKey("universities.id", ondelete="CASCADE"), nullable=False)
|
| 44 |
+
priority_order = Column(Integer, nullable=True)
|
| 45 |
+
fit_category = Column(String(20), default="target", nullable=False)
|
| 46 |
+
created_at = Column(DateTime, default=datetime.utcnow, nullable=False)
|
| 47 |
+
|
| 48 |
+
# Relationships
|
| 49 |
+
user = relationship("User", back_populates="target_universities")
|
| 50 |
+
university = relationship("University", back_populates="target_universities")
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
class OptimizationReport(Base):
|
| 54 |
+
__tablename__ = "optimization_reports"
|
| 55 |
+
|
| 56 |
+
id = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)
|
| 57 |
+
user_id = Column(UUID(as_uuid=True), ForeignKey("users.id", ondelete="CASCADE"), nullable=False, index=True)
|
| 58 |
+
university_id = Column(UUID(as_uuid=True), ForeignKey("universities.id", ondelete="CASCADE"), nullable=False)
|
| 59 |
+
status = Column(Enum(ReportStatus), default=ReportStatus.pending, nullable=False)
|
| 60 |
+
summary_text = Column(Text, nullable=True)
|
| 61 |
+
advisor_snapshot_json = Column(JSON, nullable=True)
|
| 62 |
+
version_number = Column(Integer, default=1, nullable=False)
|
| 63 |
+
created_at = Column(DateTime, default=datetime.utcnow, nullable=False)
|
| 64 |
+
completed_at = Column(DateTime, nullable=True)
|
| 65 |
+
|
| 66 |
+
# Relationships
|
| 67 |
+
user = relationship("User", back_populates="reports")
|
| 68 |
+
university = relationship("University", back_populates="reports")
|
| 69 |
+
recommendations = relationship("ReportRecommendation", back_populates="report", cascade="all, delete-orphan")
|
| 70 |
+
rewrite_variants = relationship("RewriteVariant", back_populates="report", cascade="all, delete-orphan")
|
| 71 |
+
source_references = relationship("SourceReference", back_populates="report", cascade="all, delete-orphan")
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
class ReportRecommendation(Base):
|
| 75 |
+
__tablename__ = "report_recommendations"
|
| 76 |
+
|
| 77 |
+
id = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)
|
| 78 |
+
report_id = Column(UUID(as_uuid=True), ForeignKey("optimization_reports.id", ondelete="CASCADE"), nullable=False)
|
| 79 |
+
achievement_id = Column(UUID(as_uuid=True), ForeignKey("achievements.id", ondelete="CASCADE"), nullable=False)
|
| 80 |
+
recommendation_type = Column(Enum(RecommendationType), nullable=False)
|
| 81 |
+
suggested_rank = Column(Integer, nullable=True)
|
| 82 |
+
rationale = Column(Text, nullable=True)
|
| 83 |
+
confidence_label = Column(Enum(ConfidenceLabel), nullable=False, default=ConfidenceLabel.medium)
|
| 84 |
+
created_at = Column(DateTime, default=datetime.utcnow, nullable=False)
|
| 85 |
+
|
| 86 |
+
# Relationships
|
| 87 |
+
report = relationship("OptimizationReport", back_populates="recommendations")
|
| 88 |
+
achievement = relationship("Achievement", back_populates="recommendations")
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
class RewriteVariant(Base):
|
| 92 |
+
__tablename__ = "rewrite_variants"
|
| 93 |
+
|
| 94 |
+
id = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)
|
| 95 |
+
achievement_id = Column(UUID(as_uuid=True), ForeignKey("achievements.id", ondelete="CASCADE"), nullable=False)
|
| 96 |
+
report_id = Column(UUID(as_uuid=True), ForeignKey("optimization_reports.id", ondelete="CASCADE"), nullable=False)
|
| 97 |
+
style_mode = Column(String(50), nullable=False) # factual, impact_first, understated
|
| 98 |
+
text = Column(Text, nullable=False)
|
| 99 |
+
character_count = Column(Integer, nullable=False)
|
| 100 |
+
is_recommended = Column(Boolean, default=False)
|
| 101 |
+
explanation = Column(Text, nullable=True)
|
| 102 |
+
created_at = Column(DateTime, default=datetime.utcnow, nullable=False)
|
| 103 |
+
|
| 104 |
+
# Relationships
|
| 105 |
+
achievement = relationship("Achievement", back_populates="rewrite_variants")
|
| 106 |
+
report = relationship("OptimizationReport", back_populates="rewrite_variants")
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
class SourceReference(Base):
|
| 110 |
+
__tablename__ = "source_references"
|
| 111 |
+
|
| 112 |
+
id = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)
|
| 113 |
+
report_id = Column(UUID(as_uuid=True), ForeignKey("optimization_reports.id", ondelete="CASCADE"), nullable=False)
|
| 114 |
+
university_policy_entry_id = Column(UUID(as_uuid=True), ForeignKey("university_policy_entries.id", ondelete="CASCADE"), nullable=False)
|
| 115 |
+
section = Column(Enum(SourceSection), nullable=False)
|
| 116 |
+
note = Column(Text, nullable=True)
|
| 117 |
+
created_at = Column(DateTime, default=datetime.utcnow, nullable=False)
|
| 118 |
+
|
| 119 |
+
# Relationships
|
| 120 |
+
report = relationship("OptimizationReport", back_populates="source_references")
|
| 121 |
+
policy_entry = relationship("UniversityPolicyEntry", back_populates="source_references")
|
| 122 |
+
|
| 123 |
+
|
| 124 |
+
class AdminAuditLog(Base):
|
| 125 |
+
__tablename__ = "admin_audit_logs"
|
| 126 |
+
|
| 127 |
+
id = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)
|
| 128 |
+
admin_user_id = Column(UUID(as_uuid=True), ForeignKey("users.id", ondelete="SET NULL"), nullable=True)
|
| 129 |
+
action = Column(String(255), nullable=False)
|
| 130 |
+
entity_type = Column(String(100), nullable=True)
|
| 131 |
+
entity_id = Column(String(255), nullable=True)
|
| 132 |
+
metadata_json = Column(JSON, nullable=True)
|
| 133 |
+
created_at = Column(DateTime, default=datetime.utcnow, nullable=False)
|
apps/api/src/models/university.py
ADDED
|
@@ -0,0 +1,84 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import uuid
|
| 2 |
+
from datetime import datetime
|
| 3 |
+
from sqlalchemy import Column, String, Boolean, DateTime, ForeignKey, Enum, Text, Integer, JSON
|
| 4 |
+
from sqlalchemy.dialects.postgresql import UUID
|
| 5 |
+
from sqlalchemy.orm import relationship
|
| 6 |
+
import enum
|
| 7 |
+
|
| 8 |
+
from ..database import Base
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class WeightPreset(str, enum.Enum):
|
| 12 |
+
research_heavy = "research_heavy"
|
| 13 |
+
leadership_heavy = "leadership_heavy"
|
| 14 |
+
balanced_holistic = "balanced_holistic"
|
| 15 |
+
community_service_heavy = "community_service_heavy"
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
class SourceType(str, enum.Enum):
|
| 19 |
+
official = "official"
|
| 20 |
+
public_example = "public_example"
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
class ReliabilityTier(str, enum.Enum):
|
| 24 |
+
A = "A"
|
| 25 |
+
B = "B"
|
| 26 |
+
C = "C"
|
| 27 |
+
D = "D"
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
class University(Base):
|
| 31 |
+
__tablename__ = "universities"
|
| 32 |
+
|
| 33 |
+
id = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)
|
| 34 |
+
slug = Column(String(100), unique=True, nullable=False, index=True)
|
| 35 |
+
name = Column(String(255), nullable=False)
|
| 36 |
+
country = Column(String(100), nullable=False)
|
| 37 |
+
application_system = Column(String(100), nullable=True) # CommonApp, Coalition, etc.
|
| 38 |
+
application_source_url = Column(String(1000), nullable=True)
|
| 39 |
+
short_description = Column(Text, nullable=True)
|
| 40 |
+
weight_preset = Column(Enum(WeightPreset), nullable=False, default=WeightPreset.balanced_holistic)
|
| 41 |
+
region = Column(String(100), nullable=True)
|
| 42 |
+
city = Column(String(255), nullable=True)
|
| 43 |
+
is_common_app = Column(Boolean, default=False, nullable=False)
|
| 44 |
+
teaching_languages = Column(JSON, nullable=True)
|
| 45 |
+
major_strengths = Column(JSON, nullable=True)
|
| 46 |
+
education_years_required = Column(Integer, nullable=True)
|
| 47 |
+
school_years_note = Column(Text, nullable=True)
|
| 48 |
+
aid_type = Column(String(100), nullable=True)
|
| 49 |
+
aid_strength = Column(Integer, nullable=True)
|
| 50 |
+
selectivity_score = Column(Integer, nullable=True)
|
| 51 |
+
full_ride_possible = Column(Boolean, default=False, nullable=False)
|
| 52 |
+
full_tuition_possible = Column(Boolean, default=False, nullable=False)
|
| 53 |
+
aid_notes = Column(Text, nullable=True)
|
| 54 |
+
funding_source_url = Column(String(1000), nullable=True)
|
| 55 |
+
funding_source_title = Column(String(500), nullable=True)
|
| 56 |
+
eligibility_notes = Column(Text, nullable=True)
|
| 57 |
+
is_active = Column(Boolean, default=True, nullable=False)
|
| 58 |
+
created_at = Column(DateTime, default=datetime.utcnow, nullable=False)
|
| 59 |
+
updated_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow, nullable=False)
|
| 60 |
+
|
| 61 |
+
# Relationships
|
| 62 |
+
policy_entries = relationship("UniversityPolicyEntry", back_populates="university", cascade="all, delete-orphan")
|
| 63 |
+
target_universities = relationship("TargetUniversity", back_populates="university")
|
| 64 |
+
reports = relationship("OptimizationReport", back_populates="university")
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
class UniversityPolicyEntry(Base):
|
| 68 |
+
__tablename__ = "university_policy_entries"
|
| 69 |
+
|
| 70 |
+
id = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)
|
| 71 |
+
university_id = Column(UUID(as_uuid=True), ForeignKey("universities.id", ondelete="CASCADE"), nullable=False, index=True)
|
| 72 |
+
title = Column(String(500), nullable=False)
|
| 73 |
+
content = Column(Text, nullable=False)
|
| 74 |
+
source_url = Column(String(1000), nullable=True)
|
| 75 |
+
source_title = Column(String(500), nullable=True)
|
| 76 |
+
source_type = Column(Enum(SourceType), nullable=False)
|
| 77 |
+
reliability_tier = Column(Enum(ReliabilityTier), nullable=False, default=ReliabilityTier.B)
|
| 78 |
+
excerpt = Column(Text, nullable=True)
|
| 79 |
+
created_at = Column(DateTime, default=datetime.utcnow, nullable=False)
|
| 80 |
+
updated_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow, nullable=False)
|
| 81 |
+
|
| 82 |
+
# Relationships
|
| 83 |
+
university = relationship("University", back_populates="policy_entries")
|
| 84 |
+
source_references = relationship("SourceReference", back_populates="policy_entry")
|
apps/api/src/models/user.py
ADDED
|
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import uuid
|
| 2 |
+
from datetime import datetime
|
| 3 |
+
from sqlalchemy import Column, String, Boolean, DateTime, ForeignKey, Enum, Integer, JSON
|
| 4 |
+
from sqlalchemy.dialects.postgresql import UUID
|
| 5 |
+
from sqlalchemy.orm import relationship
|
| 6 |
+
import enum
|
| 7 |
+
|
| 8 |
+
from ..database import Base
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class UserRole(str, enum.Enum):
|
| 12 |
+
student = "student"
|
| 13 |
+
admin = "admin"
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
class User(Base):
|
| 17 |
+
__tablename__ = "users"
|
| 18 |
+
|
| 19 |
+
id = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)
|
| 20 |
+
email = Column(String(255), unique=True, nullable=False, index=True)
|
| 21 |
+
password_hash = Column(String(255), nullable=True) # nullable for OAuth users
|
| 22 |
+
role = Column(Enum(UserRole), default=UserRole.student, nullable=False)
|
| 23 |
+
full_name = Column(String(255), nullable=True)
|
| 24 |
+
country = Column(String(100), nullable=True)
|
| 25 |
+
created_at = Column(DateTime, default=datetime.utcnow, nullable=False)
|
| 26 |
+
updated_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow, nullable=False)
|
| 27 |
+
|
| 28 |
+
# Relationships
|
| 29 |
+
profile = relationship("StudentProfile", back_populates="user", uselist=False, cascade="all, delete-orphan")
|
| 30 |
+
achievements = relationship("Achievement", back_populates="user", cascade="all, delete-orphan")
|
| 31 |
+
target_universities = relationship("TargetUniversity", back_populates="user", cascade="all, delete-orphan")
|
| 32 |
+
reports = relationship("OptimizationReport", back_populates="user", cascade="all, delete-orphan")
|
| 33 |
+
evidence_files = relationship("AchievementEvidenceFile", back_populates="user", cascade="all, delete-orphan")
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
class StudentProfile(Base):
|
| 37 |
+
__tablename__ = "student_profiles"
|
| 38 |
+
|
| 39 |
+
id = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)
|
| 40 |
+
user_id = Column(UUID(as_uuid=True), ForeignKey("users.id", ondelete="CASCADE"), unique=True, nullable=False)
|
| 41 |
+
graduation_year = Column(Integer, nullable=True)
|
| 42 |
+
curriculum = Column(String(100), nullable=True) # IB, AP, A-Level, etc.
|
| 43 |
+
intended_major = Column(String(255), nullable=True)
|
| 44 |
+
|
| 45 |
+
# Test scores
|
| 46 |
+
sat_score = Column(Integer, nullable=True)
|
| 47 |
+
sat_math = Column(Integer, nullable=True)
|
| 48 |
+
sat_ebrw = Column(Integer, nullable=True)
|
| 49 |
+
act_score = Column(Integer, nullable=True)
|
| 50 |
+
ielts_score = Column(String(10), nullable=True) # e.g. "7.5"
|
| 51 |
+
ielts_listening = Column(String(10), nullable=True)
|
| 52 |
+
ielts_reading = Column(String(10), nullable=True)
|
| 53 |
+
ielts_writing = Column(String(10), nullable=True)
|
| 54 |
+
ielts_speaking = Column(String(10), nullable=True)
|
| 55 |
+
toefl_score = Column(Integer, nullable=True)
|
| 56 |
+
toefl_reading = Column(Integer, nullable=True)
|
| 57 |
+
toefl_listening = Column(Integer, nullable=True)
|
| 58 |
+
toefl_speaking = Column(Integer, nullable=True)
|
| 59 |
+
toefl_writing = Column(Integer, nullable=True)
|
| 60 |
+
duolingo_score = Column(Integer, nullable=True)
|
| 61 |
+
a_level_subjects = Column(String(500), nullable=True)
|
| 62 |
+
a_level_predicted = Column(String(255), nullable=True)
|
| 63 |
+
ap_subjects = Column(String(500), nullable=True)
|
| 64 |
+
ib_predicted_score = Column(Integer, nullable=True)
|
| 65 |
+
unt_score = Column(Integer, nullable=True)
|
| 66 |
+
nis_grade12_certificate_gpa = Column(String(50), nullable=True)
|
| 67 |
+
|
| 68 |
+
# Financial
|
| 69 |
+
budget_range = Column(String(100), nullable=True) # e.g. "50k-75k"
|
| 70 |
+
aid_needed = Column(Boolean, nullable=True)
|
| 71 |
+
application_preferences_json = Column(JSON, nullable=True)
|
| 72 |
+
|
| 73 |
+
created_at = Column(DateTime, default=datetime.utcnow, nullable=False)
|
| 74 |
+
updated_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow, nullable=False)
|
| 75 |
+
|
| 76 |
+
# Relationships
|
| 77 |
+
user = relationship("User", back_populates="profile")
|
apps/api/src/repositories/__init__.py
ADDED
|
File without changes
|
apps/api/src/routes/__init__.py
ADDED
|
File without changes
|
apps/api/src/routes/achievements.py
ADDED
|
@@ -0,0 +1,309 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
|
| 3 |
+
from fastapi import APIRouter, Depends, HTTPException, UploadFile, File, Form, status
|
| 4 |
+
from sqlalchemy.orm import Session
|
| 5 |
+
from typing import List, Optional
|
| 6 |
+
from uuid import UUID
|
| 7 |
+
|
| 8 |
+
from ..database import get_db
|
| 9 |
+
from ..schemas.achievement import (
|
| 10 |
+
AchievementCreate,
|
| 11 |
+
AchievementUpdate,
|
| 12 |
+
AchievementOut,
|
| 13 |
+
EvidenceFileOut,
|
| 14 |
+
AchievementImportOut,
|
| 15 |
+
)
|
| 16 |
+
from ..models.achievement import Achievement, AchievementEvidenceFile, AchievementType
|
| 17 |
+
from ..routes.auth import get_current_user
|
| 18 |
+
from ..services.chancellor_analysis import estimate_chancellor_scores
|
| 19 |
+
from ..services.achievement_import_service import decode_import_file, parse_achievement_import
|
| 20 |
+
|
| 21 |
+
router = APIRouter(prefix="/api/achievements", tags=["achievements"])
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
@router.get("", response_model=dict)
|
| 25 |
+
def list_achievements(
|
| 26 |
+
type: Optional[AchievementType] = None,
|
| 27 |
+
current_user=Depends(get_current_user),
|
| 28 |
+
db: Session = Depends(get_db),
|
| 29 |
+
):
|
| 30 |
+
query = db.query(Achievement).filter(Achievement.user_id == current_user.id)
|
| 31 |
+
if type:
|
| 32 |
+
query = query.filter(Achievement.type == type)
|
| 33 |
+
achievements = query.order_by(Achievement.created_at.desc()).all()
|
| 34 |
+
return {
|
| 35 |
+
"data": [AchievementOut.model_validate(a).model_dump() for a in achievements],
|
| 36 |
+
"message": "OK",
|
| 37 |
+
}
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
@router.post("", response_model=dict, status_code=status.HTTP_201_CREATED)
|
| 41 |
+
def create_achievement(
|
| 42 |
+
payload: AchievementCreate,
|
| 43 |
+
current_user=Depends(get_current_user),
|
| 44 |
+
db: Session = Depends(get_db),
|
| 45 |
+
):
|
| 46 |
+
achievement_data = payload.model_dump()
|
| 47 |
+
achievement_data.update(estimate_chancellor_scores(payload, current_user))
|
| 48 |
+
achievement = Achievement(
|
| 49 |
+
user_id=current_user.id,
|
| 50 |
+
**achievement_data,
|
| 51 |
+
)
|
| 52 |
+
db.add(achievement)
|
| 53 |
+
db.commit()
|
| 54 |
+
db.refresh(achievement)
|
| 55 |
+
return {
|
| 56 |
+
"data": AchievementOut.model_validate(achievement).model_dump(),
|
| 57 |
+
"message": "Achievement created",
|
| 58 |
+
}
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
@router.get("/{achievement_id}", response_model=dict)
|
| 62 |
+
def get_achievement(
|
| 63 |
+
achievement_id: UUID,
|
| 64 |
+
current_user=Depends(get_current_user),
|
| 65 |
+
db: Session = Depends(get_db),
|
| 66 |
+
):
|
| 67 |
+
achievement = db.query(Achievement).filter(
|
| 68 |
+
Achievement.id == achievement_id,
|
| 69 |
+
Achievement.user_id == current_user.id,
|
| 70 |
+
).first()
|
| 71 |
+
if not achievement:
|
| 72 |
+
raise HTTPException(status_code=404, detail="Achievement not found")
|
| 73 |
+
return {
|
| 74 |
+
"data": AchievementOut.model_validate(achievement).model_dump(),
|
| 75 |
+
"message": "OK",
|
| 76 |
+
}
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
@router.put("/{achievement_id}", response_model=dict)
|
| 80 |
+
def update_achievement(
|
| 81 |
+
achievement_id: UUID,
|
| 82 |
+
payload: AchievementUpdate,
|
| 83 |
+
current_user=Depends(get_current_user),
|
| 84 |
+
db: Session = Depends(get_db),
|
| 85 |
+
):
|
| 86 |
+
achievement = db.query(Achievement).filter(
|
| 87 |
+
Achievement.id == achievement_id,
|
| 88 |
+
Achievement.user_id == current_user.id,
|
| 89 |
+
).first()
|
| 90 |
+
if not achievement:
|
| 91 |
+
raise HTTPException(status_code=404, detail="Achievement not found")
|
| 92 |
+
|
| 93 |
+
for field, value in payload.model_dump(exclude_unset=True).items():
|
| 94 |
+
setattr(achievement, field, value)
|
| 95 |
+
|
| 96 |
+
for field, value in estimate_chancellor_scores(achievement, current_user).items():
|
| 97 |
+
setattr(achievement, field, value)
|
| 98 |
+
|
| 99 |
+
db.commit()
|
| 100 |
+
db.refresh(achievement)
|
| 101 |
+
return {
|
| 102 |
+
"data": AchievementOut.model_validate(achievement).model_dump(),
|
| 103 |
+
"message": "Achievement updated",
|
| 104 |
+
}
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
@router.delete("/{achievement_id}", response_model=dict)
|
| 108 |
+
def delete_achievement(
|
| 109 |
+
achievement_id: UUID,
|
| 110 |
+
current_user=Depends(get_current_user),
|
| 111 |
+
db: Session = Depends(get_db),
|
| 112 |
+
):
|
| 113 |
+
achievement = db.query(Achievement).filter(
|
| 114 |
+
Achievement.id == achievement_id,
|
| 115 |
+
Achievement.user_id == current_user.id,
|
| 116 |
+
).first()
|
| 117 |
+
if not achievement:
|
| 118 |
+
raise HTTPException(status_code=404, detail="Achievement not found")
|
| 119 |
+
db.delete(achievement)
|
| 120 |
+
db.commit()
|
| 121 |
+
return {"data": None, "message": "Achievement deleted"}
|
| 122 |
+
|
| 123 |
+
|
| 124 |
+
@router.post("/{achievement_id}/upload", response_model=dict)
|
| 125 |
+
async def upload_evidence(
|
| 126 |
+
achievement_id: UUID,
|
| 127 |
+
file: UploadFile = File(...),
|
| 128 |
+
current_user=Depends(get_current_user),
|
| 129 |
+
db: Session = Depends(get_db),
|
| 130 |
+
):
|
| 131 |
+
achievement = db.query(Achievement).filter(
|
| 132 |
+
Achievement.id == achievement_id,
|
| 133 |
+
Achievement.user_id == current_user.id,
|
| 134 |
+
).first()
|
| 135 |
+
if not achievement:
|
| 136 |
+
raise HTTPException(status_code=404, detail="Achievement not found")
|
| 137 |
+
|
| 138 |
+
# In production, upload to S3; for now, store locally
|
| 139 |
+
file_url = f"/uploads/{achievement_id}/{file.filename}"
|
| 140 |
+
|
| 141 |
+
evidence = AchievementEvidenceFile(
|
| 142 |
+
achievement_id=achievement_id,
|
| 143 |
+
user_id=current_user.id,
|
| 144 |
+
file_url=file_url,
|
| 145 |
+
file_name=file.filename,
|
| 146 |
+
mime_type=file.content_type,
|
| 147 |
+
)
|
| 148 |
+
db.add(evidence)
|
| 149 |
+
db.commit()
|
| 150 |
+
db.refresh(evidence)
|
| 151 |
+
|
| 152 |
+
return {
|
| 153 |
+
"data": EvidenceFileOut.model_validate(evidence).model_dump(),
|
| 154 |
+
"message": "File uploaded",
|
| 155 |
+
}
|
| 156 |
+
|
| 157 |
+
|
| 158 |
+
@router.post("/import-all", response_model=dict, status_code=status.HTTP_201_CREATED)
|
| 159 |
+
async def import_all_achievements(
|
| 160 |
+
file: UploadFile = File(...),
|
| 161 |
+
word_limit: int = Form(22),
|
| 162 |
+
clarification_answers: Optional[str] = Form(None),
|
| 163 |
+
previous_import_ids: Optional[str] = Form(None),
|
| 164 |
+
current_user=Depends(get_current_user),
|
| 165 |
+
db: Session = Depends(get_db),
|
| 166 |
+
):
|
| 167 |
+
if word_limit < 5 or word_limit > 40:
|
| 168 |
+
raise HTTPException(status_code=400, detail="Word limit must be between 5 and 40.")
|
| 169 |
+
|
| 170 |
+
raw_bytes = await file.read()
|
| 171 |
+
try:
|
| 172 |
+
raw_text = decode_import_file(file.filename or "import.txt", raw_bytes)
|
| 173 |
+
except ValueError as exc:
|
| 174 |
+
raise HTTPException(status_code=400, detail=str(exc)) from exc
|
| 175 |
+
|
| 176 |
+
parsed_clarification_answers: dict[str, str] = {}
|
| 177 |
+
if clarification_answers:
|
| 178 |
+
try:
|
| 179 |
+
raw_answers = json.loads(clarification_answers)
|
| 180 |
+
if isinstance(raw_answers, dict):
|
| 181 |
+
parsed_clarification_answers = {
|
| 182 |
+
str(key): str(value).strip()
|
| 183 |
+
for key, value in raw_answers.items()
|
| 184 |
+
if str(value).strip()
|
| 185 |
+
}
|
| 186 |
+
except json.JSONDecodeError as exc:
|
| 187 |
+
raise HTTPException(status_code=400, detail="Invalid clarification answers JSON.") from exc
|
| 188 |
+
|
| 189 |
+
parsed_previous_ids: list[UUID] = []
|
| 190 |
+
if previous_import_ids:
|
| 191 |
+
try:
|
| 192 |
+
raw_ids = json.loads(previous_import_ids)
|
| 193 |
+
if isinstance(raw_ids, list):
|
| 194 |
+
parsed_previous_ids = [UUID(str(value)) for value in raw_ids]
|
| 195 |
+
except (json.JSONDecodeError, ValueError) as exc:
|
| 196 |
+
raise HTTPException(status_code=400, detail="Invalid previous import ids JSON.") from exc
|
| 197 |
+
|
| 198 |
+
parsed = parse_achievement_import(
|
| 199 |
+
raw_text,
|
| 200 |
+
current_user,
|
| 201 |
+
word_limit,
|
| 202 |
+
parsed_clarification_answers,
|
| 203 |
+
)
|
| 204 |
+
|
| 205 |
+
if parsed_previous_ids:
|
| 206 |
+
db.query(Achievement).filter(
|
| 207 |
+
Achievement.user_id == current_user.id,
|
| 208 |
+
Achievement.id.in_(parsed_previous_ids),
|
| 209 |
+
).delete(synchronize_session=False)
|
| 210 |
+
db.flush()
|
| 211 |
+
|
| 212 |
+
imported_achievements: list[Achievement] = []
|
| 213 |
+
selection_items: list[tuple[Achievement, dict]] = []
|
| 214 |
+
|
| 215 |
+
for item in parsed["items"]:
|
| 216 |
+
achievement = Achievement(
|
| 217 |
+
user_id=current_user.id,
|
| 218 |
+
type=AchievementType(item["type"]),
|
| 219 |
+
title=item["title"],
|
| 220 |
+
organization_name=item["organization_name"],
|
| 221 |
+
role_title=item["role_title"],
|
| 222 |
+
description_raw=item["description_raw"],
|
| 223 |
+
category=item["category"],
|
| 224 |
+
hours_per_week=item["hours_per_week"],
|
| 225 |
+
weeks_per_year=item["weeks_per_year"],
|
| 226 |
+
impact_scope=item["impact_scope"],
|
| 227 |
+
leadership_level=item["leadership_level"],
|
| 228 |
+
truth_risk_flag=item["truth_risk_flag"],
|
| 229 |
+
major_relevance_score=item["major_relevance_score"],
|
| 230 |
+
selectivity_score=item["selectivity_score"],
|
| 231 |
+
continuity_score=item["continuity_score"],
|
| 232 |
+
distinctiveness_score=item["distinctiveness_score"],
|
| 233 |
+
)
|
| 234 |
+
db.add(achievement)
|
| 235 |
+
imported_achievements.append(achievement)
|
| 236 |
+
selection_items.append((achievement, item))
|
| 237 |
+
|
| 238 |
+
db.commit()
|
| 239 |
+
|
| 240 |
+
for achievement in imported_achievements:
|
| 241 |
+
db.refresh(achievement)
|
| 242 |
+
|
| 243 |
+
activity_selection = []
|
| 244 |
+
honor_selection = []
|
| 245 |
+
|
| 246 |
+
for achievement, item in selection_items:
|
| 247 |
+
rank = item.get("recommended_rank")
|
| 248 |
+
if not rank:
|
| 249 |
+
continue
|
| 250 |
+
selection_item = {
|
| 251 |
+
"achievement_id": achievement.id,
|
| 252 |
+
"type": achievement.type,
|
| 253 |
+
"rank": rank,
|
| 254 |
+
"title": achievement.title,
|
| 255 |
+
"common_app_text": item["common_app_text"],
|
| 256 |
+
"word_count": len(item["common_app_text"].split()),
|
| 257 |
+
"character_count": len(item["common_app_text"]),
|
| 258 |
+
"common_app_position": item.get("common_app_position"),
|
| 259 |
+
"common_app_organization": item.get("common_app_organization"),
|
| 260 |
+
"common_app_activity_description": item.get("common_app_activity_description"),
|
| 261 |
+
"common_app_honor_description": item.get("common_app_honor_description"),
|
| 262 |
+
"position_character_count": len(item.get("common_app_position") or ""),
|
| 263 |
+
"organization_character_count": len(item.get("common_app_organization") or ""),
|
| 264 |
+
"activity_description_character_count": len(item.get("common_app_activity_description") or ""),
|
| 265 |
+
"honor_character_count": len(item.get("common_app_honor_description") or ""),
|
| 266 |
+
"selection_reason": item.get("selection_reason") or None,
|
| 267 |
+
"verification_notes": item.get("verification_notes") or [],
|
| 268 |
+
"missing_or_unclear_facts": item.get("missing_or_unclear_facts") or [],
|
| 269 |
+
}
|
| 270 |
+
if achievement.type == AchievementType.activity and rank <= 10:
|
| 271 |
+
activity_selection.append(selection_item)
|
| 272 |
+
if achievement.type == AchievementType.honor and rank <= 5:
|
| 273 |
+
honor_selection.append(selection_item)
|
| 274 |
+
|
| 275 |
+
activity_selection.sort(key=lambda item: item["rank"])
|
| 276 |
+
honor_selection.sort(key=lambda item: item["rank"])
|
| 277 |
+
|
| 278 |
+
return {
|
| 279 |
+
"data": AchievementImportOut(
|
| 280 |
+
file_name=file.filename or "import.txt",
|
| 281 |
+
word_limit=word_limit,
|
| 282 |
+
imported_count=len(imported_achievements),
|
| 283 |
+
strongest_angle=parsed["strongest_angle"],
|
| 284 |
+
needs_student_clarification=parsed.get("needs_student_clarification", False),
|
| 285 |
+
clarifying_questions=parsed.get("clarifying_questions", []),
|
| 286 |
+
additional_information_recommended=parsed.get("additional_information_recommended", False),
|
| 287 |
+
additional_information_reason=parsed.get("additional_information_reason") or None,
|
| 288 |
+
additional_information_draft=parsed.get("additional_information_draft") or None,
|
| 289 |
+
formatting_notes=parsed.get("formatting_notes", []),
|
| 290 |
+
extraction_notes=parsed.get("extraction_notes", []),
|
| 291 |
+
source_excerpts=parsed.get("source_excerpts", []),
|
| 292 |
+
processing_steps=[
|
| 293 |
+
*parsed.get("processing_steps", []),
|
| 294 |
+
{
|
| 295 |
+
"key": "save_vault",
|
| 296 |
+
"label": "Save imported achievements",
|
| 297 |
+
"status": "complete",
|
| 298 |
+
"detail": f"Saved {len(imported_achievements)} extracted items to Achievement Vault.",
|
| 299 |
+
},
|
| 300 |
+
],
|
| 301 |
+
imported_achievements=[
|
| 302 |
+
AchievementOut.model_validate(achievement).model_dump()
|
| 303 |
+
for achievement in imported_achievements
|
| 304 |
+
],
|
| 305 |
+
top_activities=activity_selection,
|
| 306 |
+
top_honors=honor_selection,
|
| 307 |
+
).model_dump(),
|
| 308 |
+
"message": "Achievements imported",
|
| 309 |
+
}
|
apps/api/src/routes/admin.py
ADDED
|
@@ -0,0 +1,184 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from fastapi import APIRouter, Depends, HTTPException, status
|
| 2 |
+
from sqlalchemy.orm import Session
|
| 3 |
+
from typing import List, Optional
|
| 4 |
+
from uuid import UUID
|
| 5 |
+
from datetime import datetime
|
| 6 |
+
|
| 7 |
+
from ..database import get_db
|
| 8 |
+
from ..schemas.university import UniversityCreate, UniversityUpdate, UniversityOut, PolicyEntryCreate, PolicyEntryUpdate, PolicyEntryOut
|
| 9 |
+
from ..models.university import University, UniversityPolicyEntry
|
| 10 |
+
from ..models.report import AdminAuditLog
|
| 11 |
+
from ..routes.auth import get_admin_user
|
| 12 |
+
|
| 13 |
+
router = APIRouter(prefix="/api/admin", tags=["admin"])
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
def log_action(db: Session, admin_id, action: str, entity_type: str, entity_id: str, metadata: dict = None):
|
| 17 |
+
log = AdminAuditLog(
|
| 18 |
+
admin_user_id=admin_id,
|
| 19 |
+
action=action,
|
| 20 |
+
entity_type=entity_type,
|
| 21 |
+
entity_id=entity_id,
|
| 22 |
+
metadata_json=metadata or {},
|
| 23 |
+
)
|
| 24 |
+
db.add(log)
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
@router.post("/universities", response_model=dict, status_code=status.HTTP_201_CREATED)
|
| 28 |
+
def create_university(
|
| 29 |
+
payload: UniversityCreate,
|
| 30 |
+
admin=Depends(get_admin_user),
|
| 31 |
+
db: Session = Depends(get_db),
|
| 32 |
+
):
|
| 33 |
+
existing = db.query(University).filter(University.slug == payload.slug).first()
|
| 34 |
+
if existing:
|
| 35 |
+
raise HTTPException(status_code=400, detail="Slug already in use")
|
| 36 |
+
|
| 37 |
+
university = University(**payload.model_dump())
|
| 38 |
+
db.add(university)
|
| 39 |
+
db.flush()
|
| 40 |
+
|
| 41 |
+
log_action(db, admin.id, "create_university", "university", str(university.id))
|
| 42 |
+
db.commit()
|
| 43 |
+
db.refresh(university)
|
| 44 |
+
|
| 45 |
+
return {
|
| 46 |
+
"data": UniversityOut.model_validate(university).model_dump(),
|
| 47 |
+
"message": "University created",
|
| 48 |
+
}
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
@router.put("/universities/{university_id}", response_model=dict)
|
| 52 |
+
def update_university(
|
| 53 |
+
university_id: UUID,
|
| 54 |
+
payload: UniversityUpdate,
|
| 55 |
+
admin=Depends(get_admin_user),
|
| 56 |
+
db: Session = Depends(get_db),
|
| 57 |
+
):
|
| 58 |
+
university = db.query(University).filter(University.id == university_id).first()
|
| 59 |
+
if not university:
|
| 60 |
+
raise HTTPException(status_code=404, detail="University not found")
|
| 61 |
+
|
| 62 |
+
for field, value in payload.model_dump(exclude_unset=True).items():
|
| 63 |
+
setattr(university, field, value)
|
| 64 |
+
|
| 65 |
+
log_action(db, admin.id, "update_university", "university", str(university_id))
|
| 66 |
+
db.commit()
|
| 67 |
+
db.refresh(university)
|
| 68 |
+
|
| 69 |
+
return {
|
| 70 |
+
"data": UniversityOut.model_validate(university).model_dump(),
|
| 71 |
+
"message": "University updated",
|
| 72 |
+
}
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
@router.delete("/universities/{university_id}", response_model=dict)
|
| 76 |
+
def delete_university(
|
| 77 |
+
university_id: UUID,
|
| 78 |
+
admin=Depends(get_admin_user),
|
| 79 |
+
db: Session = Depends(get_db),
|
| 80 |
+
):
|
| 81 |
+
university = db.query(University).filter(University.id == university_id).first()
|
| 82 |
+
if not university:
|
| 83 |
+
raise HTTPException(status_code=404, detail="University not found")
|
| 84 |
+
|
| 85 |
+
log_action(db, admin.id, "delete_university", "university", str(university_id))
|
| 86 |
+
db.delete(university)
|
| 87 |
+
db.commit()
|
| 88 |
+
|
| 89 |
+
return {"data": None, "message": "University deleted"}
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
@router.post("/universities/{university_id}/sources", response_model=dict, status_code=status.HTTP_201_CREATED)
|
| 93 |
+
def create_policy_entry(
|
| 94 |
+
university_id: UUID,
|
| 95 |
+
payload: PolicyEntryCreate,
|
| 96 |
+
admin=Depends(get_admin_user),
|
| 97 |
+
db: Session = Depends(get_db),
|
| 98 |
+
):
|
| 99 |
+
university = db.query(University).filter(University.id == university_id).first()
|
| 100 |
+
if not university:
|
| 101 |
+
raise HTTPException(status_code=404, detail="University not found")
|
| 102 |
+
|
| 103 |
+
entry = UniversityPolicyEntry(university_id=university_id, **payload.model_dump())
|
| 104 |
+
db.add(entry)
|
| 105 |
+
db.flush()
|
| 106 |
+
|
| 107 |
+
log_action(db, admin.id, "create_policy_entry", "policy_entry", str(entry.id))
|
| 108 |
+
db.commit()
|
| 109 |
+
db.refresh(entry)
|
| 110 |
+
|
| 111 |
+
return {
|
| 112 |
+
"data": PolicyEntryOut.model_validate(entry).model_dump(),
|
| 113 |
+
"message": "Policy entry created",
|
| 114 |
+
}
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
@router.put("/sources/{entry_id}", response_model=dict)
|
| 118 |
+
def update_policy_entry(
|
| 119 |
+
entry_id: UUID,
|
| 120 |
+
payload: PolicyEntryUpdate,
|
| 121 |
+
admin=Depends(get_admin_user),
|
| 122 |
+
db: Session = Depends(get_db),
|
| 123 |
+
):
|
| 124 |
+
entry = db.query(UniversityPolicyEntry).filter(UniversityPolicyEntry.id == entry_id).first()
|
| 125 |
+
if not entry:
|
| 126 |
+
raise HTTPException(status_code=404, detail="Policy entry not found")
|
| 127 |
+
|
| 128 |
+
for field, value in payload.model_dump(exclude_unset=True).items():
|
| 129 |
+
setattr(entry, field, value)
|
| 130 |
+
|
| 131 |
+
log_action(db, admin.id, "update_policy_entry", "policy_entry", str(entry_id))
|
| 132 |
+
db.commit()
|
| 133 |
+
db.refresh(entry)
|
| 134 |
+
|
| 135 |
+
return {
|
| 136 |
+
"data": PolicyEntryOut.model_validate(entry).model_dump(),
|
| 137 |
+
"message": "Policy entry updated",
|
| 138 |
+
}
|
| 139 |
+
|
| 140 |
+
|
| 141 |
+
@router.delete("/sources/{entry_id}", response_model=dict)
|
| 142 |
+
def delete_policy_entry(
|
| 143 |
+
entry_id: UUID,
|
| 144 |
+
admin=Depends(get_admin_user),
|
| 145 |
+
db: Session = Depends(get_db),
|
| 146 |
+
):
|
| 147 |
+
entry = db.query(UniversityPolicyEntry).filter(UniversityPolicyEntry.id == entry_id).first()
|
| 148 |
+
if not entry:
|
| 149 |
+
raise HTTPException(status_code=404, detail="Policy entry not found")
|
| 150 |
+
|
| 151 |
+
log_action(db, admin.id, "delete_policy_entry", "policy_entry", str(entry_id))
|
| 152 |
+
db.delete(entry)
|
| 153 |
+
db.commit()
|
| 154 |
+
|
| 155 |
+
return {"data": None, "message": "Policy entry deleted"}
|
| 156 |
+
|
| 157 |
+
|
| 158 |
+
@router.get("/audit-logs", response_model=dict)
|
| 159 |
+
def list_audit_logs(
|
| 160 |
+
limit: int = 50,
|
| 161 |
+
offset: int = 0,
|
| 162 |
+
admin=Depends(get_admin_user),
|
| 163 |
+
db: Session = Depends(get_db),
|
| 164 |
+
):
|
| 165 |
+
logs = (
|
| 166 |
+
db.query(AdminAuditLog)
|
| 167 |
+
.order_by(AdminAuditLog.created_at.desc())
|
| 168 |
+
.offset(offset)
|
| 169 |
+
.limit(limit)
|
| 170 |
+
.all()
|
| 171 |
+
)
|
| 172 |
+
return {
|
| 173 |
+
"data": [
|
| 174 |
+
{
|
| 175 |
+
"id": str(l.id),
|
| 176 |
+
"action": l.action,
|
| 177 |
+
"entity_type": l.entity_type,
|
| 178 |
+
"entity_id": l.entity_id,
|
| 179 |
+
"created_at": l.created_at.isoformat(),
|
| 180 |
+
}
|
| 181 |
+
for l in logs
|
| 182 |
+
],
|
| 183 |
+
"message": "OK",
|
| 184 |
+
}
|
apps/api/src/routes/auth.py
ADDED
|
@@ -0,0 +1,122 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from fastapi import APIRouter, Depends, HTTPException, status, Response, Request
|
| 2 |
+
from sqlalchemy.orm import Session
|
| 3 |
+
from datetime import timedelta
|
| 4 |
+
|
| 5 |
+
from ..database import get_db
|
| 6 |
+
from ..schemas.user import UserCreate, UserLogin, UserOut, TokenOut
|
| 7 |
+
from ..services.auth_service import (
|
| 8 |
+
create_user, authenticate_user, create_access_token,
|
| 9 |
+
get_user_by_email, decode_token, get_user_by_id,
|
| 10 |
+
)
|
| 11 |
+
from ..config import settings
|
| 12 |
+
|
| 13 |
+
router = APIRouter(prefix="/api/auth", tags=["auth"])
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
def get_current_user(request: Request, db: Session = Depends(get_db)):
|
| 17 |
+
token = request.cookies.get("access_token")
|
| 18 |
+
if not token:
|
| 19 |
+
auth_header = request.headers.get("Authorization")
|
| 20 |
+
if auth_header and auth_header.startswith("Bearer "):
|
| 21 |
+
token = auth_header[7:]
|
| 22 |
+
|
| 23 |
+
if not token:
|
| 24 |
+
raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail="Not authenticated")
|
| 25 |
+
|
| 26 |
+
payload = decode_token(token)
|
| 27 |
+
if not payload:
|
| 28 |
+
raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail="Invalid token")
|
| 29 |
+
|
| 30 |
+
user_id = payload.get("sub")
|
| 31 |
+
if not user_id:
|
| 32 |
+
raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail="Invalid token")
|
| 33 |
+
|
| 34 |
+
user = get_user_by_id(db, user_id)
|
| 35 |
+
if not user:
|
| 36 |
+
raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail="User not found")
|
| 37 |
+
|
| 38 |
+
return user
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def get_admin_user(current_user=Depends(get_current_user)):
|
| 42 |
+
if current_user.role.value != "admin":
|
| 43 |
+
raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="Admin access required")
|
| 44 |
+
return current_user
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
@router.post("/signup", response_model=dict, status_code=status.HTTP_201_CREATED)
|
| 48 |
+
def signup(payload: UserCreate, response: Response, db: Session = Depends(get_db)):
|
| 49 |
+
existing = get_user_by_email(db, payload.email)
|
| 50 |
+
if existing:
|
| 51 |
+
raise HTTPException(status_code=400, detail="Email already registered")
|
| 52 |
+
|
| 53 |
+
user = create_user(
|
| 54 |
+
db,
|
| 55 |
+
email=payload.email,
|
| 56 |
+
password=payload.password,
|
| 57 |
+
full_name=payload.full_name,
|
| 58 |
+
country=payload.country,
|
| 59 |
+
)
|
| 60 |
+
|
| 61 |
+
token = create_access_token(
|
| 62 |
+
data={"sub": str(user.id)},
|
| 63 |
+
expires_delta=timedelta(minutes=settings.ACCESS_TOKEN_EXPIRE_MINUTES),
|
| 64 |
+
)
|
| 65 |
+
|
| 66 |
+
response.set_cookie(
|
| 67 |
+
key="access_token",
|
| 68 |
+
value=token,
|
| 69 |
+
httponly=True,
|
| 70 |
+
samesite="lax",
|
| 71 |
+
max_age=settings.ACCESS_TOKEN_EXPIRE_MINUTES * 60,
|
| 72 |
+
)
|
| 73 |
+
|
| 74 |
+
return {
|
| 75 |
+
"data": TokenOut(
|
| 76 |
+
access_token=token,
|
| 77 |
+
user=UserOut.model_validate(user),
|
| 78 |
+
).model_dump(),
|
| 79 |
+
"message": "Account created successfully",
|
| 80 |
+
}
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
@router.post("/login", response_model=dict)
|
| 84 |
+
def login(payload: UserLogin, response: Response, db: Session = Depends(get_db)):
|
| 85 |
+
user = authenticate_user(db, payload.email, payload.password)
|
| 86 |
+
if not user:
|
| 87 |
+
raise HTTPException(status_code=401, detail="Invalid email or password")
|
| 88 |
+
|
| 89 |
+
token = create_access_token(
|
| 90 |
+
data={"sub": str(user.id)},
|
| 91 |
+
expires_delta=timedelta(minutes=settings.ACCESS_TOKEN_EXPIRE_MINUTES),
|
| 92 |
+
)
|
| 93 |
+
|
| 94 |
+
response.set_cookie(
|
| 95 |
+
key="access_token",
|
| 96 |
+
value=token,
|
| 97 |
+
httponly=True,
|
| 98 |
+
samesite="lax",
|
| 99 |
+
max_age=settings.ACCESS_TOKEN_EXPIRE_MINUTES * 60,
|
| 100 |
+
)
|
| 101 |
+
|
| 102 |
+
return {
|
| 103 |
+
"data": TokenOut(
|
| 104 |
+
access_token=token,
|
| 105 |
+
user=UserOut.model_validate(user),
|
| 106 |
+
).model_dump(),
|
| 107 |
+
"message": "Logged in successfully",
|
| 108 |
+
}
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
@router.post("/logout")
|
| 112 |
+
def logout(response: Response):
|
| 113 |
+
response.delete_cookie("access_token")
|
| 114 |
+
return {"data": None, "message": "Logged out successfully"}
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
@router.get("/me", response_model=dict)
|
| 118 |
+
def me(current_user=Depends(get_current_user)):
|
| 119 |
+
return {
|
| 120 |
+
"data": UserOut.model_validate(current_user).model_dump(),
|
| 121 |
+
"message": "OK",
|
| 122 |
+
}
|
apps/api/src/routes/profile.py
ADDED
|
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from fastapi import APIRouter, Depends, HTTPException
|
| 2 |
+
from sqlalchemy.orm import Session
|
| 3 |
+
|
| 4 |
+
from ..database import get_db
|
| 5 |
+
from ..schemas.user import ProfileCreate, ProfileUpdate, ProfileOut, UserOut, UserUpdate
|
| 6 |
+
from ..models.user import StudentProfile
|
| 7 |
+
from ..routes.auth import get_current_user
|
| 8 |
+
|
| 9 |
+
router = APIRouter(prefix="/api/profile", tags=["profile"])
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
@router.get("", response_model=dict)
|
| 13 |
+
def get_profile(current_user=Depends(get_current_user), db: Session = Depends(get_db)):
|
| 14 |
+
profile = db.query(StudentProfile).filter(StudentProfile.user_id == current_user.id).first()
|
| 15 |
+
if not profile:
|
| 16 |
+
profile = StudentProfile(user_id=current_user.id)
|
| 17 |
+
db.add(profile)
|
| 18 |
+
db.commit()
|
| 19 |
+
db.refresh(profile)
|
| 20 |
+
return {
|
| 21 |
+
"data": {
|
| 22 |
+
"user": UserOut.model_validate(current_user).model_dump(),
|
| 23 |
+
"profile": ProfileOut.model_validate(profile).model_dump(),
|
| 24 |
+
},
|
| 25 |
+
"message": "OK",
|
| 26 |
+
}
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
@router.put("", response_model=dict)
|
| 30 |
+
def update_profile(
|
| 31 |
+
payload: ProfileUpdate,
|
| 32 |
+
current_user=Depends(get_current_user),
|
| 33 |
+
db: Session = Depends(get_db),
|
| 34 |
+
):
|
| 35 |
+
profile = db.query(StudentProfile).filter(StudentProfile.user_id == current_user.id).first()
|
| 36 |
+
if not profile:
|
| 37 |
+
profile = StudentProfile(user_id=current_user.id)
|
| 38 |
+
db.add(profile)
|
| 39 |
+
|
| 40 |
+
for field, value in payload.model_dump(exclude_unset=True).items():
|
| 41 |
+
setattr(profile, field, value)
|
| 42 |
+
|
| 43 |
+
db.commit()
|
| 44 |
+
db.refresh(profile)
|
| 45 |
+
|
| 46 |
+
return {
|
| 47 |
+
"data": ProfileOut.model_validate(profile).model_dump(),
|
| 48 |
+
"message": "Profile updated",
|
| 49 |
+
}
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
@router.put("/user", response_model=dict)
|
| 53 |
+
def update_user(
|
| 54 |
+
payload: UserUpdate,
|
| 55 |
+
current_user=Depends(get_current_user),
|
| 56 |
+
db: Session = Depends(get_db),
|
| 57 |
+
):
|
| 58 |
+
for field, value in payload.model_dump(exclude_unset=True).items():
|
| 59 |
+
setattr(current_user, field, value)
|
| 60 |
+
|
| 61 |
+
db.commit()
|
| 62 |
+
db.refresh(current_user)
|
| 63 |
+
|
| 64 |
+
return {
|
| 65 |
+
"data": UserOut.model_validate(current_user).model_dump(),
|
| 66 |
+
"message": "User updated",
|
| 67 |
+
}
|
apps/api/src/routes/reports.py
ADDED
|
@@ -0,0 +1,281 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from fastapi import APIRouter, Depends, HTTPException, status, BackgroundTasks
|
| 2 |
+
from sqlalchemy.orm import Session
|
| 3 |
+
from typing import List
|
| 4 |
+
from uuid import UUID
|
| 5 |
+
import json
|
| 6 |
+
|
| 7 |
+
from ..database import get_db
|
| 8 |
+
from ..schemas.report import ReportOut, ReportDetailOut, TargetUniversityCreate, TargetUniversityOut
|
| 9 |
+
from ..models.report import (
|
| 10 |
+
OptimizationReport, TargetUniversity, ReportStatus,
|
| 11 |
+
)
|
| 12 |
+
from ..models.achievement import Achievement
|
| 13 |
+
from ..models.university import University
|
| 14 |
+
from ..models.user import User, StudentProfile
|
| 15 |
+
from ..routes.auth import get_current_user
|
| 16 |
+
from ..services.optimization_engine import run_optimization
|
| 17 |
+
from ..services.rewrite_service import generate_rewrite_variants
|
| 18 |
+
|
| 19 |
+
router = APIRouter(prefix="/api", tags=["reports"])
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
# --- Target Universities ---
|
| 23 |
+
|
| 24 |
+
@router.get("/targets", response_model=dict)
|
| 25 |
+
def list_targets(current_user=Depends(get_current_user), db: Session = Depends(get_db)):
|
| 26 |
+
targets = (
|
| 27 |
+
db.query(TargetUniversity)
|
| 28 |
+
.filter(TargetUniversity.user_id == current_user.id)
|
| 29 |
+
.order_by(TargetUniversity.priority_order)
|
| 30 |
+
.all()
|
| 31 |
+
)
|
| 32 |
+
return {
|
| 33 |
+
"data": [TargetUniversityOut.model_validate(t).model_dump() for t in targets],
|
| 34 |
+
"message": "OK",
|
| 35 |
+
}
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
@router.post("/targets", response_model=dict, status_code=status.HTTP_201_CREATED)
|
| 39 |
+
def add_target(
|
| 40 |
+
payload: TargetUniversityCreate,
|
| 41 |
+
current_user=Depends(get_current_user),
|
| 42 |
+
db: Session = Depends(get_db),
|
| 43 |
+
):
|
| 44 |
+
fit_category = payload.fit_category if payload.fit_category in {"dream", "target", "safe"} else "target"
|
| 45 |
+
# Check university exists
|
| 46 |
+
university = db.query(University).filter(University.id == payload.university_id).first()
|
| 47 |
+
if not university:
|
| 48 |
+
raise HTTPException(status_code=404, detail="University not found")
|
| 49 |
+
|
| 50 |
+
# Check not already targeted
|
| 51 |
+
existing = db.query(TargetUniversity).filter(
|
| 52 |
+
TargetUniversity.user_id == current_user.id,
|
| 53 |
+
TargetUniversity.university_id == payload.university_id,
|
| 54 |
+
).first()
|
| 55 |
+
if existing:
|
| 56 |
+
raise HTTPException(status_code=400, detail="University already in targets")
|
| 57 |
+
|
| 58 |
+
target = TargetUniversity(
|
| 59 |
+
user_id=current_user.id,
|
| 60 |
+
university_id=payload.university_id,
|
| 61 |
+
priority_order=payload.priority_order,
|
| 62 |
+
fit_category=fit_category,
|
| 63 |
+
)
|
| 64 |
+
db.add(target)
|
| 65 |
+
db.commit()
|
| 66 |
+
db.refresh(target)
|
| 67 |
+
|
| 68 |
+
return {
|
| 69 |
+
"data": TargetUniversityOut.model_validate(target).model_dump(),
|
| 70 |
+
"message": "University added to targets",
|
| 71 |
+
}
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
@router.delete("/targets/{target_id}", response_model=dict)
|
| 75 |
+
def remove_target(
|
| 76 |
+
target_id: UUID,
|
| 77 |
+
current_user=Depends(get_current_user),
|
| 78 |
+
db: Session = Depends(get_db),
|
| 79 |
+
):
|
| 80 |
+
target = db.query(TargetUniversity).filter(
|
| 81 |
+
TargetUniversity.id == target_id,
|
| 82 |
+
TargetUniversity.user_id == current_user.id,
|
| 83 |
+
).first()
|
| 84 |
+
if not target:
|
| 85 |
+
raise HTTPException(status_code=404, detail="Target not found")
|
| 86 |
+
db.delete(target)
|
| 87 |
+
db.commit()
|
| 88 |
+
return {"data": None, "message": "Target removed"}
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
# --- Reports ---
|
| 92 |
+
|
| 93 |
+
def _run_report_generation(db: Session, report_id: UUID):
|
| 94 |
+
"""Background task to generate the report."""
|
| 95 |
+
report = db.query(OptimizationReport).filter(OptimizationReport.id == report_id).first()
|
| 96 |
+
if not report:
|
| 97 |
+
return
|
| 98 |
+
|
| 99 |
+
try:
|
| 100 |
+
report.status = ReportStatus.processing
|
| 101 |
+
db.commit()
|
| 102 |
+
|
| 103 |
+
university = db.query(University).filter(University.id == report.university_id).first()
|
| 104 |
+
user = db.query(User).filter(User.id == report.user_id).first()
|
| 105 |
+
profile = db.query(StudentProfile).filter(StudentProfile.user_id == report.user_id).first()
|
| 106 |
+
achievements = db.query(Achievement).filter(Achievement.user_id == report.user_id).all()
|
| 107 |
+
|
| 108 |
+
run_optimization(
|
| 109 |
+
db,
|
| 110 |
+
report,
|
| 111 |
+
achievements,
|
| 112 |
+
university,
|
| 113 |
+
profile=profile,
|
| 114 |
+
user_country=user.country if user else None,
|
| 115 |
+
)
|
| 116 |
+
|
| 117 |
+
# Generate rewrite variants for top kept recommendations
|
| 118 |
+
from ..models.report import ReportRecommendation, RecommendationType
|
| 119 |
+
kept_recs = db.query(ReportRecommendation).filter(
|
| 120 |
+
ReportRecommendation.report_id == report.id,
|
| 121 |
+
ReportRecommendation.recommendation_type.in_([
|
| 122 |
+
RecommendationType.keep,
|
| 123 |
+
RecommendationType.rewrite,
|
| 124 |
+
]),
|
| 125 |
+
).all()
|
| 126 |
+
|
| 127 |
+
for rec in kept_recs[:15]: # Limit rewrites to top 15
|
| 128 |
+
achievement = db.query(Achievement).filter(Achievement.id == rec.achievement_id).first()
|
| 129 |
+
if achievement:
|
| 130 |
+
variants = generate_rewrite_variants(db, achievement, report)
|
| 131 |
+
db.add_all(variants)
|
| 132 |
+
|
| 133 |
+
db.commit()
|
| 134 |
+
|
| 135 |
+
except Exception as e:
|
| 136 |
+
report.status = ReportStatus.failed
|
| 137 |
+
report.summary_text = f"Generation failed: {str(e)}"
|
| 138 |
+
db.commit()
|
| 139 |
+
raise
|
| 140 |
+
|
| 141 |
+
|
| 142 |
+
@router.post("/reports/generate", response_model=dict, status_code=status.HTTP_201_CREATED)
|
| 143 |
+
def generate_report(
|
| 144 |
+
university_id: UUID,
|
| 145 |
+
background_tasks: BackgroundTasks,
|
| 146 |
+
current_user=Depends(get_current_user),
|
| 147 |
+
db: Session = Depends(get_db),
|
| 148 |
+
):
|
| 149 |
+
university = db.query(University).filter(University.id == university_id).first()
|
| 150 |
+
if not university:
|
| 151 |
+
raise HTTPException(status_code=404, detail="University not found")
|
| 152 |
+
|
| 153 |
+
# Check for existing pending/processing report
|
| 154 |
+
existing = db.query(OptimizationReport).filter(
|
| 155 |
+
OptimizationReport.user_id == current_user.id,
|
| 156 |
+
OptimizationReport.university_id == university_id,
|
| 157 |
+
OptimizationReport.status.in_([ReportStatus.pending, ReportStatus.processing]),
|
| 158 |
+
).first()
|
| 159 |
+
if existing:
|
| 160 |
+
raise HTTPException(status_code=400, detail="A report for this university is already being processed")
|
| 161 |
+
|
| 162 |
+
# Determine version number
|
| 163 |
+
prev_reports = db.query(OptimizationReport).filter(
|
| 164 |
+
OptimizationReport.user_id == current_user.id,
|
| 165 |
+
OptimizationReport.university_id == university_id,
|
| 166 |
+
).count()
|
| 167 |
+
|
| 168 |
+
report = OptimizationReport(
|
| 169 |
+
user_id=current_user.id,
|
| 170 |
+
university_id=university_id,
|
| 171 |
+
status=ReportStatus.pending,
|
| 172 |
+
version_number=prev_reports + 1,
|
| 173 |
+
)
|
| 174 |
+
db.add(report)
|
| 175 |
+
db.commit()
|
| 176 |
+
db.refresh(report)
|
| 177 |
+
|
| 178 |
+
# Run synchronously for MVP (can be moved to background task with proper async setup)
|
| 179 |
+
try:
|
| 180 |
+
_run_report_generation(db, report.id)
|
| 181 |
+
db.refresh(report)
|
| 182 |
+
except Exception:
|
| 183 |
+
pass
|
| 184 |
+
|
| 185 |
+
return {
|
| 186 |
+
"data": ReportOut.model_validate(report).model_dump(),
|
| 187 |
+
"message": "Report generated",
|
| 188 |
+
}
|
| 189 |
+
|
| 190 |
+
|
| 191 |
+
@router.get("/reports", response_model=dict)
|
| 192 |
+
def list_reports(current_user=Depends(get_current_user), db: Session = Depends(get_db)):
|
| 193 |
+
reports = (
|
| 194 |
+
db.query(OptimizationReport)
|
| 195 |
+
.filter(OptimizationReport.user_id == current_user.id)
|
| 196 |
+
.order_by(OptimizationReport.created_at.desc())
|
| 197 |
+
.all()
|
| 198 |
+
)
|
| 199 |
+
return {
|
| 200 |
+
"data": [ReportOut.model_validate(r).model_dump() for r in reports],
|
| 201 |
+
"message": "OK",
|
| 202 |
+
}
|
| 203 |
+
|
| 204 |
+
|
| 205 |
+
@router.get("/reports/{report_id}", response_model=dict)
|
| 206 |
+
def get_report(
|
| 207 |
+
report_id: UUID,
|
| 208 |
+
current_user=Depends(get_current_user),
|
| 209 |
+
db: Session = Depends(get_db),
|
| 210 |
+
):
|
| 211 |
+
report = db.query(OptimizationReport).filter(
|
| 212 |
+
OptimizationReport.id == report_id,
|
| 213 |
+
OptimizationReport.user_id == current_user.id,
|
| 214 |
+
).first()
|
| 215 |
+
if not report:
|
| 216 |
+
raise HTTPException(status_code=404, detail="Report not found")
|
| 217 |
+
|
| 218 |
+
return {
|
| 219 |
+
"data": ReportDetailOut.model_validate(report).model_dump(),
|
| 220 |
+
"message": "OK",
|
| 221 |
+
}
|
| 222 |
+
|
| 223 |
+
|
| 224 |
+
@router.get("/reports/{report_id}/export", response_model=dict)
|
| 225 |
+
def export_report(
|
| 226 |
+
report_id: UUID,
|
| 227 |
+
current_user=Depends(get_current_user),
|
| 228 |
+
db: Session = Depends(get_db),
|
| 229 |
+
):
|
| 230 |
+
report = db.query(OptimizationReport).filter(
|
| 231 |
+
OptimizationReport.id == report_id,
|
| 232 |
+
OptimizationReport.user_id == current_user.id,
|
| 233 |
+
).first()
|
| 234 |
+
if not report:
|
| 235 |
+
raise HTTPException(status_code=404, detail="Report not found")
|
| 236 |
+
|
| 237 |
+
detail = ReportDetailOut.model_validate(report).model_dump()
|
| 238 |
+
|
| 239 |
+
# Build export-friendly structure
|
| 240 |
+
export = {
|
| 241 |
+
"report_id": str(report.id),
|
| 242 |
+
"university": detail["university"]["name"],
|
| 243 |
+
"generated_at": detail["created_at"],
|
| 244 |
+
"summary": detail["summary_text"],
|
| 245 |
+
"recommendations": [
|
| 246 |
+
{
|
| 247 |
+
"rank": r["suggested_rank"],
|
| 248 |
+
"type": r["recommendation_type"],
|
| 249 |
+
"title": r["achievement"]["title"],
|
| 250 |
+
"rationale": r["rationale"],
|
| 251 |
+
"confidence": r["confidence_label"],
|
| 252 |
+
}
|
| 253 |
+
for r in detail["recommendations"]
|
| 254 |
+
if r["suggested_rank"] is not None
|
| 255 |
+
],
|
| 256 |
+
"rewrite_variants": [
|
| 257 |
+
{
|
| 258 |
+
"achievement_title": next(
|
| 259 |
+
(r["achievement"]["title"] for r in detail["recommendations"] if r["achievement_id"] == v["achievement_id"]),
|
| 260 |
+
"Unknown",
|
| 261 |
+
),
|
| 262 |
+
"style": v["style_mode"],
|
| 263 |
+
"text": v["text"],
|
| 264 |
+
"char_count": v["character_count"],
|
| 265 |
+
}
|
| 266 |
+
for v in detail["rewrite_variants"]
|
| 267 |
+
if v["is_recommended"]
|
| 268 |
+
],
|
| 269 |
+
"sources": [
|
| 270 |
+
{
|
| 271 |
+
"section": s["section"],
|
| 272 |
+
"title": s["policy_entry"]["title"],
|
| 273 |
+
"source_type": s["policy_entry"]["source_type"],
|
| 274 |
+
"reliability_tier": s["policy_entry"]["reliability_tier"],
|
| 275 |
+
"url": s["policy_entry"]["source_url"],
|
| 276 |
+
}
|
| 277 |
+
for s in detail["source_references"]
|
| 278 |
+
],
|
| 279 |
+
}
|
| 280 |
+
|
| 281 |
+
return {"data": export, "message": "OK"}
|
apps/api/src/routes/universities.py
ADDED
|
@@ -0,0 +1,217 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from fastapi import APIRouter, Depends, HTTPException, Query, status
|
| 2 |
+
from sqlalchemy.orm import Session
|
| 3 |
+
from typing import Optional
|
| 4 |
+
from uuid import UUID
|
| 5 |
+
|
| 6 |
+
from ..database import get_db
|
| 7 |
+
from ..schemas.university import (
|
| 8 |
+
UniversityOut,
|
| 9 |
+
UniversityListOut,
|
| 10 |
+
PolicyEntryOut,
|
| 11 |
+
CommonAppRecommendationRequest,
|
| 12 |
+
UniversityAdvisorRequest,
|
| 13 |
+
)
|
| 14 |
+
from ..models.achievement import Achievement
|
| 15 |
+
from ..models.university import University, UniversityPolicyEntry
|
| 16 |
+
from ..routes.auth import get_current_user
|
| 17 |
+
from ..services.university_filters import enrich_university, filter_universities
|
| 18 |
+
from ..services.university_advisor import (
|
| 19 |
+
SearchNotConfiguredError,
|
| 20 |
+
generate_university_action_plan,
|
| 21 |
+
search_university_sources,
|
| 22 |
+
)
|
| 23 |
+
from ..services.university_recommender import recommend_common_app_universities
|
| 24 |
+
|
| 25 |
+
router = APIRouter(prefix="/api/universities", tags=["universities"])
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
@router.get("", response_model=dict)
|
| 29 |
+
def list_universities(
|
| 30 |
+
search: Optional[str] = None,
|
| 31 |
+
country: Optional[str] = None,
|
| 32 |
+
region: Optional[str] = None,
|
| 33 |
+
application_system: Optional[str] = None,
|
| 34 |
+
teaching_language: Optional[str] = None,
|
| 35 |
+
major: Optional[str] = None,
|
| 36 |
+
school_years: Optional[int] = None,
|
| 37 |
+
full_ride_only: bool = False,
|
| 38 |
+
common_app_only: bool = False,
|
| 39 |
+
aid_type: Optional[str] = None,
|
| 40 |
+
sort_by: str = "name",
|
| 41 |
+
sort_dir: str = "asc",
|
| 42 |
+
db: Session = Depends(get_db),
|
| 43 |
+
):
|
| 44 |
+
query = db.query(University).filter(University.is_active == True)
|
| 45 |
+
universities = [
|
| 46 |
+
enrich_university(university)
|
| 47 |
+
for university in query.order_by(University.name).all()
|
| 48 |
+
]
|
| 49 |
+
universities = filter_universities(
|
| 50 |
+
universities,
|
| 51 |
+
search=search,
|
| 52 |
+
country=country,
|
| 53 |
+
region=region,
|
| 54 |
+
application_system=application_system,
|
| 55 |
+
teaching_language=teaching_language,
|
| 56 |
+
major=major,
|
| 57 |
+
school_years=school_years,
|
| 58 |
+
full_ride_only=full_ride_only,
|
| 59 |
+
common_app_only=common_app_only,
|
| 60 |
+
aid_type=aid_type,
|
| 61 |
+
sort_by=sort_by,
|
| 62 |
+
sort_dir=sort_dir,
|
| 63 |
+
)
|
| 64 |
+
return {
|
| 65 |
+
"data": [UniversityListOut.model_validate(u).model_dump() for u in universities],
|
| 66 |
+
"message": "OK",
|
| 67 |
+
}
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
@router.post("/recommendations/common-app", response_model=dict, status_code=status.HTTP_201_CREATED)
|
| 71 |
+
def recommend_common_app(
|
| 72 |
+
payload: CommonAppRecommendationRequest,
|
| 73 |
+
current_user=Depends(get_current_user),
|
| 74 |
+
db: Session = Depends(get_db),
|
| 75 |
+
):
|
| 76 |
+
if not payload.top_honor_ids and not payload.top_activity_ids:
|
| 77 |
+
raise HTTPException(status_code=400, detail="Select up to 5 honors and up to 10 activities first")
|
| 78 |
+
|
| 79 |
+
profile = current_user.profile
|
| 80 |
+
if not profile:
|
| 81 |
+
raise HTTPException(status_code=400, detail="Complete your profile before generating recommendations")
|
| 82 |
+
|
| 83 |
+
existing_preferences = profile.application_preferences_json or {}
|
| 84 |
+
preferences = {
|
| 85 |
+
**existing_preferences,
|
| 86 |
+
**payload.preferences,
|
| 87 |
+
"top_honor_ids": [str(item) for item in payload.top_honor_ids[:5]],
|
| 88 |
+
"top_activity_ids": [str(item) for item in payload.top_activity_ids[:10]],
|
| 89 |
+
"intended_major": payload.preferences.get("intended_major") or profile.intended_major,
|
| 90 |
+
"curriculum": profile.curriculum,
|
| 91 |
+
"graduation_year": profile.graduation_year,
|
| 92 |
+
}
|
| 93 |
+
if payload.save_preferences:
|
| 94 |
+
profile.application_preferences_json = preferences
|
| 95 |
+
db.commit()
|
| 96 |
+
db.refresh(profile)
|
| 97 |
+
|
| 98 |
+
achievements = db.query(Achievement).filter(Achievement.user_id == current_user.id).all()
|
| 99 |
+
by_id = {str(achievement.id): achievement for achievement in achievements}
|
| 100 |
+
selected_honors = [
|
| 101 |
+
by_id[str(item)]
|
| 102 |
+
for item in payload.top_honor_ids[:5]
|
| 103 |
+
if str(item) in by_id and by_id[str(item)].type.value == "honor"
|
| 104 |
+
]
|
| 105 |
+
selected_activities = [
|
| 106 |
+
by_id[str(item)]
|
| 107 |
+
for item in payload.top_activity_ids[:10]
|
| 108 |
+
if str(item) in by_id and by_id[str(item)].type.value == "activity"
|
| 109 |
+
]
|
| 110 |
+
if not selected_honors and not selected_activities:
|
| 111 |
+
raise HTTPException(status_code=400, detail="Selected achievements were not found")
|
| 112 |
+
|
| 113 |
+
universities = [
|
| 114 |
+
enrich_university(university)
|
| 115 |
+
for university in db.query(University).filter(University.is_active == True).all()
|
| 116 |
+
]
|
| 117 |
+
common_app_universities = filter_universities(
|
| 118 |
+
universities,
|
| 119 |
+
common_app_only=True,
|
| 120 |
+
school_years=int(preferences["school_years"]) if str(preferences.get("school_years") or "").isdigit() else None,
|
| 121 |
+
sort_by="aid_strength",
|
| 122 |
+
sort_dir="desc",
|
| 123 |
+
)
|
| 124 |
+
|
| 125 |
+
recommendations = recommend_common_app_universities(
|
| 126 |
+
selected_honors=selected_honors,
|
| 127 |
+
selected_activities=selected_activities,
|
| 128 |
+
preferences=preferences,
|
| 129 |
+
universities=common_app_universities,
|
| 130 |
+
)
|
| 131 |
+
return {
|
| 132 |
+
"data": {
|
| 133 |
+
"recommendations": recommendations,
|
| 134 |
+
"selected_honors": len(selected_honors),
|
| 135 |
+
"selected_activities": len(selected_activities),
|
| 136 |
+
"available_common_app_universities": len(common_app_universities),
|
| 137 |
+
"category_note": "Safe means relative safety within the funded Common App shortlist, not guaranteed admission or aid.",
|
| 138 |
+
},
|
| 139 |
+
"message": "Recommendations generated",
|
| 140 |
+
}
|
| 141 |
+
|
| 142 |
+
|
| 143 |
+
@router.post("/advisor/plan", response_model=dict)
|
| 144 |
+
def university_advisor_plan(
|
| 145 |
+
payload: UniversityAdvisorRequest,
|
| 146 |
+
current_user=Depends(get_current_user),
|
| 147 |
+
db: Session = Depends(get_db),
|
| 148 |
+
):
|
| 149 |
+
profile = current_user.profile
|
| 150 |
+
intended_major = payload.intended_major or (profile.intended_major if profile else None)
|
| 151 |
+
search_warning = None
|
| 152 |
+
try:
|
| 153 |
+
search_results = search_university_sources(payload.university_name, intended_major)
|
| 154 |
+
except SearchNotConfiguredError:
|
| 155 |
+
search_results = []
|
| 156 |
+
search_warning = (
|
| 157 |
+
"Google Custom Search is not configured. Set GOOGLE_SEARCH_API_KEY and "
|
| 158 |
+
"GOOGLE_SEARCH_ENGINE_ID to enable source-backed live search."
|
| 159 |
+
)
|
| 160 |
+
except Exception:
|
| 161 |
+
search_results = []
|
| 162 |
+
search_warning = (
|
| 163 |
+
"Google Custom Search is currently unavailable or misconfigured. The plan below is limited "
|
| 164 |
+
"to saved profile data and cannot confirm current university facts."
|
| 165 |
+
)
|
| 166 |
+
|
| 167 |
+
achievements = (
|
| 168 |
+
db.query(Achievement)
|
| 169 |
+
.filter(Achievement.user_id == current_user.id)
|
| 170 |
+
.order_by(Achievement.created_at.desc())
|
| 171 |
+
.limit(25)
|
| 172 |
+
.all()
|
| 173 |
+
)
|
| 174 |
+
plan = generate_university_action_plan(
|
| 175 |
+
university_name=payload.university_name,
|
| 176 |
+
user=current_user,
|
| 177 |
+
achievements=achievements,
|
| 178 |
+
search_results=search_results,
|
| 179 |
+
)
|
| 180 |
+
if search_warning:
|
| 181 |
+
plan.setdefault("source_notes", [])
|
| 182 |
+
plan["source_notes"] = [search_warning, *plan["source_notes"]]
|
| 183 |
+
return {
|
| 184 |
+
"data": {
|
| 185 |
+
"university_name": payload.university_name,
|
| 186 |
+
"sources": search_results,
|
| 187 |
+
"plan": plan,
|
| 188 |
+
},
|
| 189 |
+
"message": "Advisor plan generated",
|
| 190 |
+
}
|
| 191 |
+
|
| 192 |
+
|
| 193 |
+
@router.get("/{university_id}", response_model=dict)
|
| 194 |
+
def get_university(university_id: UUID, db: Session = Depends(get_db)):
|
| 195 |
+
university = db.query(University).filter(University.id == university_id).first()
|
| 196 |
+
if not university:
|
| 197 |
+
raise HTTPException(status_code=404, detail="University not found")
|
| 198 |
+
return {
|
| 199 |
+
"data": UniversityOut.model_validate(university).model_dump(),
|
| 200 |
+
"message": "OK",
|
| 201 |
+
}
|
| 202 |
+
|
| 203 |
+
|
| 204 |
+
@router.get("/{university_id}/sources", response_model=dict)
|
| 205 |
+
def get_university_sources(university_id: UUID, db: Session = Depends(get_db)):
|
| 206 |
+
university = db.query(University).filter(University.id == university_id).first()
|
| 207 |
+
if not university:
|
| 208 |
+
raise HTTPException(status_code=404, detail="University not found")
|
| 209 |
+
|
| 210 |
+
entries = db.query(UniversityPolicyEntry).filter(
|
| 211 |
+
UniversityPolicyEntry.university_id == university_id
|
| 212 |
+
).all()
|
| 213 |
+
|
| 214 |
+
return {
|
| 215 |
+
"data": [PolicyEntryOut.model_validate(e).model_dump() for e in entries],
|
| 216 |
+
"message": "OK",
|
| 217 |
+
}
|
apps/api/src/schema_maintenance.py
ADDED
|
@@ -0,0 +1,90 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from sqlalchemy import inspect, text
|
| 2 |
+
|
| 3 |
+
from .database import engine
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
def ensure_application_schema() -> None:
|
| 7 |
+
inspector = inspect(engine)
|
| 8 |
+
student_profile_columns = {column["name"] for column in inspector.get_columns("student_profiles")}
|
| 9 |
+
university_columns = {column["name"] for column in inspector.get_columns("universities")}
|
| 10 |
+
report_columns = {column["name"] for column in inspector.get_columns("optimization_reports")}
|
| 11 |
+
target_university_columns = {column["name"] for column in inspector.get_columns("target_universities")}
|
| 12 |
+
|
| 13 |
+
if "application_preferences_json" not in student_profile_columns:
|
| 14 |
+
column_type = "JSONB" if engine.dialect.name == "postgresql" else "JSON"
|
| 15 |
+
with engine.begin() as connection:
|
| 16 |
+
connection.execute(
|
| 17 |
+
text(f"ALTER TABLE student_profiles ADD COLUMN application_preferences_json {column_type}")
|
| 18 |
+
)
|
| 19 |
+
|
| 20 |
+
student_profile_column_defs = {
|
| 21 |
+
"sat_math": "INTEGER",
|
| 22 |
+
"sat_ebrw": "INTEGER",
|
| 23 |
+
"ielts_listening": "VARCHAR(10)",
|
| 24 |
+
"ielts_reading": "VARCHAR(10)",
|
| 25 |
+
"ielts_writing": "VARCHAR(10)",
|
| 26 |
+
"ielts_speaking": "VARCHAR(10)",
|
| 27 |
+
"toefl_reading": "INTEGER",
|
| 28 |
+
"toefl_listening": "INTEGER",
|
| 29 |
+
"toefl_speaking": "INTEGER",
|
| 30 |
+
"toefl_writing": "INTEGER",
|
| 31 |
+
"duolingo_score": "INTEGER",
|
| 32 |
+
"a_level_subjects": "VARCHAR(500)",
|
| 33 |
+
"a_level_predicted": "VARCHAR(255)",
|
| 34 |
+
"ap_subjects": "VARCHAR(500)",
|
| 35 |
+
"ib_predicted_score": "INTEGER",
|
| 36 |
+
"unt_score": "INTEGER",
|
| 37 |
+
"nis_grade12_certificate_gpa": "VARCHAR(50)",
|
| 38 |
+
}
|
| 39 |
+
missing_student_profile_columns = [
|
| 40 |
+
(name, column_type)
|
| 41 |
+
for name, column_type in student_profile_column_defs.items()
|
| 42 |
+
if name not in student_profile_columns
|
| 43 |
+
]
|
| 44 |
+
if missing_student_profile_columns:
|
| 45 |
+
with engine.begin() as connection:
|
| 46 |
+
for name, column_type in missing_student_profile_columns:
|
| 47 |
+
connection.execute(text(f"ALTER TABLE student_profiles ADD COLUMN {name} {column_type}"))
|
| 48 |
+
|
| 49 |
+
json_type = "JSONB" if engine.dialect.name == "postgresql" else "JSON"
|
| 50 |
+
university_column_defs = {
|
| 51 |
+
"application_source_url": "VARCHAR(1000)",
|
| 52 |
+
"region": "VARCHAR(100)",
|
| 53 |
+
"city": "VARCHAR(255)",
|
| 54 |
+
"is_common_app": "BOOLEAN DEFAULT FALSE NOT NULL",
|
| 55 |
+
"teaching_languages": json_type,
|
| 56 |
+
"major_strengths": json_type,
|
| 57 |
+
"education_years_required": "INTEGER",
|
| 58 |
+
"school_years_note": "TEXT",
|
| 59 |
+
"aid_type": "VARCHAR(100)",
|
| 60 |
+
"aid_strength": "INTEGER",
|
| 61 |
+
"selectivity_score": "INTEGER",
|
| 62 |
+
"full_ride_possible": "BOOLEAN DEFAULT FALSE NOT NULL",
|
| 63 |
+
"full_tuition_possible": "BOOLEAN DEFAULT FALSE NOT NULL",
|
| 64 |
+
"aid_notes": "TEXT",
|
| 65 |
+
"funding_source_url": "VARCHAR(1000)",
|
| 66 |
+
"funding_source_title": "VARCHAR(500)",
|
| 67 |
+
"eligibility_notes": "TEXT",
|
| 68 |
+
}
|
| 69 |
+
|
| 70 |
+
missing_university_columns = [
|
| 71 |
+
(name, column_type)
|
| 72 |
+
for name, column_type in university_column_defs.items()
|
| 73 |
+
if name not in university_columns
|
| 74 |
+
]
|
| 75 |
+
if missing_university_columns:
|
| 76 |
+
with engine.begin() as connection:
|
| 77 |
+
for name, column_type in missing_university_columns:
|
| 78 |
+
connection.execute(text(f"ALTER TABLE universities ADD COLUMN {name} {column_type}"))
|
| 79 |
+
|
| 80 |
+
if "advisor_snapshot_json" not in report_columns:
|
| 81 |
+
with engine.begin() as connection:
|
| 82 |
+
connection.execute(
|
| 83 |
+
text(f"ALTER TABLE optimization_reports ADD COLUMN advisor_snapshot_json {json_type}")
|
| 84 |
+
)
|
| 85 |
+
|
| 86 |
+
if "fit_category" not in target_university_columns:
|
| 87 |
+
with engine.begin() as connection:
|
| 88 |
+
connection.execute(
|
| 89 |
+
text("ALTER TABLE target_universities ADD COLUMN fit_category VARCHAR(20) DEFAULT 'target' NOT NULL")
|
| 90 |
+
)
|
apps/api/src/schemas/__init__.py
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .user import UserCreate, UserLogin, UserOut, UserUpdate, ProfileCreate, ProfileUpdate, ProfileOut, TokenOut
|
| 2 |
+
from .achievement import AchievementCreate, AchievementUpdate, AchievementOut, EvidenceFileOut
|
| 3 |
+
from .university import UniversityCreate, UniversityUpdate, UniversityOut, PolicyEntryCreate, PolicyEntryUpdate, PolicyEntryOut
|
| 4 |
+
from .report import (
|
| 5 |
+
TargetUniversityCreate, TargetUniversityOut,
|
| 6 |
+
ReportOut, ReportDetailOut, RecommendationOut,
|
| 7 |
+
RewriteVariantOut, SourceReferenceOut,
|
| 8 |
+
)
|
| 9 |
+
|
| 10 |
+
__all__ = [
|
| 11 |
+
"UserCreate", "UserLogin", "UserOut", "UserUpdate", "ProfileCreate", "ProfileUpdate", "ProfileOut", "TokenOut",
|
| 12 |
+
"AchievementCreate", "AchievementUpdate", "AchievementOut", "EvidenceFileOut",
|
| 13 |
+
"UniversityCreate", "UniversityUpdate", "UniversityOut", "PolicyEntryCreate", "PolicyEntryUpdate", "PolicyEntryOut",
|
| 14 |
+
"TargetUniversityCreate", "TargetUniversityOut",
|
| 15 |
+
"ReportOut", "ReportDetailOut", "RecommendationOut",
|
| 16 |
+
"RewriteVariantOut", "SourceReferenceOut",
|
| 17 |
+
]
|
apps/api/src/schemas/achievement.py
ADDED
|
@@ -0,0 +1,120 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pydantic import BaseModel, Field
|
| 2 |
+
from typing import Optional, List
|
| 3 |
+
from datetime import datetime, date
|
| 4 |
+
from uuid import UUID
|
| 5 |
+
from ..models.achievement import AchievementType, ImpactScope, LeadershipLevel
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class AchievementCreate(BaseModel):
|
| 9 |
+
type: AchievementType
|
| 10 |
+
title: str = Field(max_length=500)
|
| 11 |
+
organization_name: Optional[str] = Field(None, max_length=255)
|
| 12 |
+
role_title: Optional[str] = Field(None, max_length=255)
|
| 13 |
+
description_raw: Optional[str] = None
|
| 14 |
+
category: Optional[str] = Field(None, max_length=100)
|
| 15 |
+
start_date: Optional[date] = None
|
| 16 |
+
end_date: Optional[date] = None
|
| 17 |
+
hours_per_week: Optional[float] = Field(None, ge=0, le=168)
|
| 18 |
+
weeks_per_year: Optional[int] = Field(None, ge=0, le=52)
|
| 19 |
+
impact_scope: Optional[ImpactScope] = None
|
| 20 |
+
leadership_level: Optional[LeadershipLevel] = None
|
| 21 |
+
truth_risk_flag: Optional[bool] = None
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
class AchievementUpdate(BaseModel):
|
| 25 |
+
title: Optional[str] = Field(None, max_length=500)
|
| 26 |
+
organization_name: Optional[str] = Field(None, max_length=255)
|
| 27 |
+
role_title: Optional[str] = Field(None, max_length=255)
|
| 28 |
+
description_raw: Optional[str] = None
|
| 29 |
+
category: Optional[str] = Field(None, max_length=100)
|
| 30 |
+
start_date: Optional[date] = None
|
| 31 |
+
end_date: Optional[date] = None
|
| 32 |
+
hours_per_week: Optional[float] = Field(None, ge=0, le=168)
|
| 33 |
+
weeks_per_year: Optional[int] = Field(None, ge=0, le=52)
|
| 34 |
+
impact_scope: Optional[ImpactScope] = None
|
| 35 |
+
leadership_level: Optional[LeadershipLevel] = None
|
| 36 |
+
truth_risk_flag: Optional[bool] = None
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
class EvidenceFileOut(BaseModel):
|
| 40 |
+
id: UUID
|
| 41 |
+
file_url: str
|
| 42 |
+
file_name: str
|
| 43 |
+
mime_type: Optional[str] = None
|
| 44 |
+
uploaded_at: datetime
|
| 45 |
+
|
| 46 |
+
model_config = {"from_attributes": True}
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
class AchievementOut(BaseModel):
|
| 50 |
+
id: UUID
|
| 51 |
+
user_id: UUID
|
| 52 |
+
type: AchievementType
|
| 53 |
+
title: str
|
| 54 |
+
organization_name: Optional[str] = None
|
| 55 |
+
role_title: Optional[str] = None
|
| 56 |
+
description_raw: Optional[str] = None
|
| 57 |
+
category: Optional[str] = None
|
| 58 |
+
start_date: Optional[date] = None
|
| 59 |
+
end_date: Optional[date] = None
|
| 60 |
+
hours_per_week: Optional[float] = None
|
| 61 |
+
weeks_per_year: Optional[int] = None
|
| 62 |
+
impact_scope: Optional[ImpactScope] = None
|
| 63 |
+
leadership_level: Optional[LeadershipLevel] = None
|
| 64 |
+
major_relevance_score: Optional[float] = None
|
| 65 |
+
continuity_score: Optional[float] = None
|
| 66 |
+
selectivity_score: Optional[float] = None
|
| 67 |
+
distinctiveness_score: Optional[float] = None
|
| 68 |
+
truth_risk_flag: Optional[bool] = None
|
| 69 |
+
created_at: datetime
|
| 70 |
+
updated_at: datetime
|
| 71 |
+
evidence_files: List[EvidenceFileOut] = []
|
| 72 |
+
|
| 73 |
+
model_config = {"from_attributes": True}
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
class AchievementImportSelectionItem(BaseModel):
|
| 77 |
+
achievement_id: UUID
|
| 78 |
+
type: AchievementType
|
| 79 |
+
rank: int
|
| 80 |
+
title: str
|
| 81 |
+
common_app_text: str
|
| 82 |
+
word_count: int
|
| 83 |
+
character_count: int
|
| 84 |
+
common_app_position: Optional[str] = None
|
| 85 |
+
common_app_organization: Optional[str] = None
|
| 86 |
+
common_app_activity_description: Optional[str] = None
|
| 87 |
+
common_app_honor_description: Optional[str] = None
|
| 88 |
+
position_character_count: Optional[int] = None
|
| 89 |
+
organization_character_count: Optional[int] = None
|
| 90 |
+
activity_description_character_count: Optional[int] = None
|
| 91 |
+
honor_character_count: Optional[int] = None
|
| 92 |
+
selection_reason: Optional[str] = None
|
| 93 |
+
verification_notes: List[str] = []
|
| 94 |
+
missing_or_unclear_facts: List[str] = []
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
class AchievementImportStep(BaseModel):
|
| 98 |
+
key: str
|
| 99 |
+
label: str
|
| 100 |
+
status: str
|
| 101 |
+
detail: str
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
class AchievementImportOut(BaseModel):
|
| 105 |
+
file_name: str
|
| 106 |
+
word_limit: int
|
| 107 |
+
imported_count: int
|
| 108 |
+
strongest_angle: str
|
| 109 |
+
needs_student_clarification: bool = False
|
| 110 |
+
clarifying_questions: List[str] = []
|
| 111 |
+
additional_information_recommended: bool = False
|
| 112 |
+
additional_information_reason: Optional[str] = None
|
| 113 |
+
additional_information_draft: Optional[str] = None
|
| 114 |
+
formatting_notes: List[str] = []
|
| 115 |
+
extraction_notes: List[str] = []
|
| 116 |
+
source_excerpts: List[str] = []
|
| 117 |
+
processing_steps: List[AchievementImportStep] = []
|
| 118 |
+
imported_achievements: List[AchievementOut]
|
| 119 |
+
top_activities: List[AchievementImportSelectionItem]
|
| 120 |
+
top_honors: List[AchievementImportSelectionItem]
|
apps/api/src/schemas/report.py
ADDED
|
@@ -0,0 +1,111 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pydantic import BaseModel
|
| 2 |
+
from typing import Optional, List
|
| 3 |
+
from datetime import datetime
|
| 4 |
+
from uuid import UUID
|
| 5 |
+
from ..models.report import ReportStatus, RecommendationType, ConfidenceLabel, SourceSection
|
| 6 |
+
from .achievement import AchievementOut
|
| 7 |
+
from .university import UniversityListOut, PolicyEntryOut
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class TargetUniversityCreate(BaseModel):
|
| 11 |
+
university_id: UUID
|
| 12 |
+
priority_order: Optional[int] = None
|
| 13 |
+
fit_category: str = "target"
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
class TargetUniversityOut(BaseModel):
|
| 17 |
+
id: UUID
|
| 18 |
+
user_id: UUID
|
| 19 |
+
university_id: UUID
|
| 20 |
+
priority_order: Optional[int] = None
|
| 21 |
+
fit_category: str = "target"
|
| 22 |
+
created_at: datetime
|
| 23 |
+
university: UniversityListOut
|
| 24 |
+
|
| 25 |
+
model_config = {"from_attributes": True}
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
class RecommendationOut(BaseModel):
|
| 29 |
+
id: UUID
|
| 30 |
+
report_id: UUID
|
| 31 |
+
achievement_id: UUID
|
| 32 |
+
recommendation_type: RecommendationType
|
| 33 |
+
suggested_rank: Optional[int] = None
|
| 34 |
+
rationale: Optional[str] = None
|
| 35 |
+
confidence_label: ConfidenceLabel
|
| 36 |
+
created_at: datetime
|
| 37 |
+
achievement: AchievementOut
|
| 38 |
+
|
| 39 |
+
model_config = {"from_attributes": True}
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
class RewriteVariantOut(BaseModel):
|
| 43 |
+
id: UUID
|
| 44 |
+
achievement_id: UUID
|
| 45 |
+
report_id: UUID
|
| 46 |
+
style_mode: str
|
| 47 |
+
text: str
|
| 48 |
+
character_count: int
|
| 49 |
+
is_recommended: bool
|
| 50 |
+
explanation: Optional[str] = None
|
| 51 |
+
created_at: datetime
|
| 52 |
+
|
| 53 |
+
model_config = {"from_attributes": True}
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
class SourceReferenceOut(BaseModel):
|
| 57 |
+
id: UUID
|
| 58 |
+
report_id: UUID
|
| 59 |
+
university_policy_entry_id: UUID
|
| 60 |
+
section: SourceSection
|
| 61 |
+
note: Optional[str] = None
|
| 62 |
+
created_at: datetime
|
| 63 |
+
policy_entry: PolicyEntryOut
|
| 64 |
+
|
| 65 |
+
model_config = {"from_attributes": True}
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
class AdvisorProgramOut(BaseModel):
|
| 69 |
+
name: str
|
| 70 |
+
why_it_matters: str
|
| 71 |
+
funding_note: str
|
| 72 |
+
priority: str
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
class AdvisorActionOut(BaseModel):
|
| 76 |
+
title: str
|
| 77 |
+
detail: str
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
class AdvisorSnapshotOut(BaseModel):
|
| 81 |
+
title: str
|
| 82 |
+
subtitle: str
|
| 83 |
+
target_major: str
|
| 84 |
+
report_note: str
|
| 85 |
+
focus_areas: List[str] = []
|
| 86 |
+
research_programs: List[AdvisorProgramOut] = []
|
| 87 |
+
funding_plan: List[str] = []
|
| 88 |
+
action_plan: List[AdvisorActionOut] = []
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
class ReportOut(BaseModel):
|
| 92 |
+
id: UUID
|
| 93 |
+
user_id: UUID
|
| 94 |
+
university_id: UUID
|
| 95 |
+
status: ReportStatus
|
| 96 |
+
summary_text: Optional[str] = None
|
| 97 |
+
advisor_snapshot_json: Optional[AdvisorSnapshotOut] = None
|
| 98 |
+
version_number: int
|
| 99 |
+
created_at: datetime
|
| 100 |
+
completed_at: Optional[datetime] = None
|
| 101 |
+
university: UniversityListOut
|
| 102 |
+
|
| 103 |
+
model_config = {"from_attributes": True}
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
class ReportDetailOut(ReportOut):
|
| 107 |
+
recommendations: List[RecommendationOut] = []
|
| 108 |
+
rewrite_variants: List[RewriteVariantOut] = []
|
| 109 |
+
source_references: List[SourceReferenceOut] = []
|
| 110 |
+
|
| 111 |
+
model_config = {"from_attributes": True}
|
apps/api/src/schemas/university.py
ADDED
|
@@ -0,0 +1,181 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pydantic import BaseModel, Field
|
| 2 |
+
from typing import Optional, List
|
| 3 |
+
from datetime import datetime
|
| 4 |
+
from uuid import UUID
|
| 5 |
+
from ..models.university import WeightPreset, SourceType, ReliabilityTier
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class PolicyEntryCreate(BaseModel):
|
| 9 |
+
title: str = Field(max_length=500)
|
| 10 |
+
content: str
|
| 11 |
+
source_url: Optional[str] = None
|
| 12 |
+
source_title: Optional[str] = None
|
| 13 |
+
source_type: SourceType
|
| 14 |
+
reliability_tier: ReliabilityTier = ReliabilityTier.B
|
| 15 |
+
excerpt: Optional[str] = None
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
class PolicyEntryUpdate(BaseModel):
|
| 19 |
+
title: Optional[str] = Field(None, max_length=500)
|
| 20 |
+
content: Optional[str] = None
|
| 21 |
+
source_url: Optional[str] = None
|
| 22 |
+
source_title: Optional[str] = None
|
| 23 |
+
source_type: Optional[SourceType] = None
|
| 24 |
+
reliability_tier: Optional[ReliabilityTier] = None
|
| 25 |
+
excerpt: Optional[str] = None
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
class PolicyEntryOut(BaseModel):
|
| 29 |
+
id: UUID
|
| 30 |
+
university_id: UUID
|
| 31 |
+
title: str
|
| 32 |
+
content: str
|
| 33 |
+
source_url: Optional[str] = None
|
| 34 |
+
source_title: Optional[str] = None
|
| 35 |
+
source_type: SourceType
|
| 36 |
+
reliability_tier: ReliabilityTier
|
| 37 |
+
excerpt: Optional[str] = None
|
| 38 |
+
created_at: datetime
|
| 39 |
+
updated_at: datetime
|
| 40 |
+
|
| 41 |
+
model_config = {"from_attributes": True}
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
class UniversityCreate(BaseModel):
|
| 45 |
+
slug: str = Field(max_length=100)
|
| 46 |
+
name: str = Field(max_length=255)
|
| 47 |
+
country: str = Field(max_length=100)
|
| 48 |
+
application_system: Optional[str] = None
|
| 49 |
+
application_source_url: Optional[str] = None
|
| 50 |
+
short_description: Optional[str] = None
|
| 51 |
+
weight_preset: WeightPreset = WeightPreset.balanced_holistic
|
| 52 |
+
region: Optional[str] = None
|
| 53 |
+
city: Optional[str] = None
|
| 54 |
+
is_common_app: bool = False
|
| 55 |
+
teaching_languages: List[str] = []
|
| 56 |
+
major_strengths: List[str] = []
|
| 57 |
+
education_years_required: Optional[int] = None
|
| 58 |
+
school_years_note: Optional[str] = None
|
| 59 |
+
aid_type: Optional[str] = None
|
| 60 |
+
aid_strength: Optional[int] = None
|
| 61 |
+
selectivity_score: Optional[int] = None
|
| 62 |
+
full_ride_possible: bool = False
|
| 63 |
+
full_tuition_possible: bool = False
|
| 64 |
+
aid_notes: Optional[str] = None
|
| 65 |
+
funding_source_url: Optional[str] = None
|
| 66 |
+
funding_source_title: Optional[str] = None
|
| 67 |
+
eligibility_notes: Optional[str] = None
|
| 68 |
+
is_active: bool = True
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
class UniversityUpdate(BaseModel):
|
| 72 |
+
name: Optional[str] = Field(None, max_length=255)
|
| 73 |
+
country: Optional[str] = Field(None, max_length=100)
|
| 74 |
+
application_system: Optional[str] = None
|
| 75 |
+
application_source_url: Optional[str] = None
|
| 76 |
+
short_description: Optional[str] = None
|
| 77 |
+
weight_preset: Optional[WeightPreset] = None
|
| 78 |
+
region: Optional[str] = None
|
| 79 |
+
city: Optional[str] = None
|
| 80 |
+
is_common_app: Optional[bool] = None
|
| 81 |
+
teaching_languages: Optional[List[str]] = None
|
| 82 |
+
major_strengths: Optional[List[str]] = None
|
| 83 |
+
education_years_required: Optional[int] = None
|
| 84 |
+
school_years_note: Optional[str] = None
|
| 85 |
+
aid_type: Optional[str] = None
|
| 86 |
+
aid_strength: Optional[int] = None
|
| 87 |
+
selectivity_score: Optional[int] = None
|
| 88 |
+
full_ride_possible: Optional[bool] = None
|
| 89 |
+
full_tuition_possible: Optional[bool] = None
|
| 90 |
+
aid_notes: Optional[str] = None
|
| 91 |
+
funding_source_url: Optional[str] = None
|
| 92 |
+
funding_source_title: Optional[str] = None
|
| 93 |
+
eligibility_notes: Optional[str] = None
|
| 94 |
+
is_active: Optional[bool] = None
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
class UniversityOut(BaseModel):
|
| 98 |
+
id: UUID
|
| 99 |
+
slug: str
|
| 100 |
+
name: str
|
| 101 |
+
country: str
|
| 102 |
+
application_system: Optional[str] = None
|
| 103 |
+
application_source_url: Optional[str] = None
|
| 104 |
+
short_description: Optional[str] = None
|
| 105 |
+
weight_preset: WeightPreset
|
| 106 |
+
region: Optional[str] = None
|
| 107 |
+
city: Optional[str] = None
|
| 108 |
+
is_common_app: bool = False
|
| 109 |
+
teaching_languages: List[str] = []
|
| 110 |
+
major_strengths: List[str] = []
|
| 111 |
+
education_years_required: Optional[int] = None
|
| 112 |
+
school_years_note: Optional[str] = None
|
| 113 |
+
aid_type: Optional[str] = None
|
| 114 |
+
aid_strength: Optional[int] = None
|
| 115 |
+
selectivity_score: Optional[int] = None
|
| 116 |
+
full_ride_possible: bool = False
|
| 117 |
+
full_tuition_possible: bool = False
|
| 118 |
+
aid_notes: Optional[str] = None
|
| 119 |
+
funding_source_url: Optional[str] = None
|
| 120 |
+
funding_source_title: Optional[str] = None
|
| 121 |
+
eligibility_notes: Optional[str] = None
|
| 122 |
+
is_active: bool
|
| 123 |
+
created_at: datetime
|
| 124 |
+
updated_at: datetime
|
| 125 |
+
policy_entries: List[PolicyEntryOut] = []
|
| 126 |
+
|
| 127 |
+
model_config = {"from_attributes": True}
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
class UniversityListOut(BaseModel):
|
| 131 |
+
id: UUID
|
| 132 |
+
slug: str
|
| 133 |
+
name: str
|
| 134 |
+
country: str
|
| 135 |
+
application_system: Optional[str] = None
|
| 136 |
+
short_description: Optional[str] = None
|
| 137 |
+
weight_preset: WeightPreset
|
| 138 |
+
is_active: bool
|
| 139 |
+
region: Optional[str] = None
|
| 140 |
+
city: Optional[str] = None
|
| 141 |
+
is_common_app: bool = False
|
| 142 |
+
application_source_url: Optional[str] = None
|
| 143 |
+
teaching_languages: List[str] = []
|
| 144 |
+
major_strengths: List[str] = []
|
| 145 |
+
education_years_required: Optional[int] = None
|
| 146 |
+
school_years_note: Optional[str] = None
|
| 147 |
+
aid_type: Optional[str] = None
|
| 148 |
+
aid_strength: Optional[int] = None
|
| 149 |
+
selectivity_score: Optional[int] = None
|
| 150 |
+
full_ride_possible: bool = False
|
| 151 |
+
full_tuition_possible: bool = False
|
| 152 |
+
aid_notes: Optional[str] = None
|
| 153 |
+
funding_source_url: Optional[str] = None
|
| 154 |
+
funding_source_title: Optional[str] = None
|
| 155 |
+
eligibility_notes: Optional[str] = None
|
| 156 |
+
|
| 157 |
+
model_config = {"from_attributes": True}
|
| 158 |
+
|
| 159 |
+
|
| 160 |
+
class CommonAppRecommendationRequest(BaseModel):
|
| 161 |
+
top_honor_ids: List[UUID] = Field(default_factory=list, max_length=5)
|
| 162 |
+
top_activity_ids: List[UUID] = Field(default_factory=list, max_length=10)
|
| 163 |
+
preferences: dict = Field(default_factory=dict)
|
| 164 |
+
save_preferences: bool = True
|
| 165 |
+
|
| 166 |
+
|
| 167 |
+
class UniversityAdvisorRequest(BaseModel):
|
| 168 |
+
university_name: str = Field(min_length=2, max_length=255)
|
| 169 |
+
intended_major: Optional[str] = None
|
| 170 |
+
|
| 171 |
+
|
| 172 |
+
class CommonAppRecommendationOut(BaseModel):
|
| 173 |
+
university_id: UUID
|
| 174 |
+
slug: str
|
| 175 |
+
name: str
|
| 176 |
+
country: str
|
| 177 |
+
category: str
|
| 178 |
+
rationale: str
|
| 179 |
+
fit_notes: Optional[str] = None
|
| 180 |
+
aid_notes: Optional[str] = None
|
| 181 |
+
funding_source_url: Optional[str] = None
|
apps/api/src/schemas/user.py
ADDED
|
@@ -0,0 +1,116 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pydantic import BaseModel, EmailStr, Field, field_validator
|
| 2 |
+
from typing import Optional, Any
|
| 3 |
+
from datetime import datetime
|
| 4 |
+
from uuid import UUID
|
| 5 |
+
from ..models.user import UserRole
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class UserCreate(BaseModel):
|
| 9 |
+
email: EmailStr
|
| 10 |
+
password: str = Field(min_length=8)
|
| 11 |
+
full_name: Optional[str] = None
|
| 12 |
+
country: Optional[str] = None
|
| 13 |
+
|
| 14 |
+
@field_validator("password")
|
| 15 |
+
@classmethod
|
| 16 |
+
def password_strength(cls, v: str) -> str:
|
| 17 |
+
if len(v) < 8:
|
| 18 |
+
raise ValueError("Password must be at least 8 characters")
|
| 19 |
+
return v
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
class UserLogin(BaseModel):
|
| 23 |
+
email: EmailStr
|
| 24 |
+
password: str
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
class UserUpdate(BaseModel):
|
| 28 |
+
full_name: Optional[str] = None
|
| 29 |
+
country: Optional[str] = None
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
class UserOut(BaseModel):
|
| 33 |
+
id: UUID
|
| 34 |
+
email: str
|
| 35 |
+
role: UserRole
|
| 36 |
+
full_name: Optional[str] = None
|
| 37 |
+
country: Optional[str] = None
|
| 38 |
+
created_at: datetime
|
| 39 |
+
|
| 40 |
+
model_config = {"from_attributes": True}
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
class ProfileCreate(BaseModel):
|
| 44 |
+
graduation_year: Optional[int] = None
|
| 45 |
+
curriculum: Optional[str] = None
|
| 46 |
+
intended_major: Optional[str] = None
|
| 47 |
+
sat_score: Optional[int] = None
|
| 48 |
+
sat_math: Optional[int] = None
|
| 49 |
+
sat_ebrw: Optional[int] = None
|
| 50 |
+
act_score: Optional[int] = None
|
| 51 |
+
ielts_score: Optional[str] = None
|
| 52 |
+
ielts_listening: Optional[str] = None
|
| 53 |
+
ielts_reading: Optional[str] = None
|
| 54 |
+
ielts_writing: Optional[str] = None
|
| 55 |
+
ielts_speaking: Optional[str] = None
|
| 56 |
+
toefl_score: Optional[int] = None
|
| 57 |
+
toefl_reading: Optional[int] = None
|
| 58 |
+
toefl_listening: Optional[int] = None
|
| 59 |
+
toefl_speaking: Optional[int] = None
|
| 60 |
+
toefl_writing: Optional[int] = None
|
| 61 |
+
duolingo_score: Optional[int] = None
|
| 62 |
+
a_level_subjects: Optional[str] = None
|
| 63 |
+
a_level_predicted: Optional[str] = None
|
| 64 |
+
ap_subjects: Optional[str] = None
|
| 65 |
+
ib_predicted_score: Optional[int] = None
|
| 66 |
+
unt_score: Optional[int] = None
|
| 67 |
+
nis_grade12_certificate_gpa: Optional[str] = None
|
| 68 |
+
budget_range: Optional[str] = None
|
| 69 |
+
aid_needed: Optional[bool] = None
|
| 70 |
+
application_preferences_json: Optional[dict[str, Any]] = None
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
class ProfileUpdate(ProfileCreate):
|
| 74 |
+
pass
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
class ProfileOut(BaseModel):
|
| 78 |
+
id: UUID
|
| 79 |
+
user_id: UUID
|
| 80 |
+
graduation_year: Optional[int] = None
|
| 81 |
+
curriculum: Optional[str] = None
|
| 82 |
+
intended_major: Optional[str] = None
|
| 83 |
+
sat_score: Optional[int] = None
|
| 84 |
+
sat_math: Optional[int] = None
|
| 85 |
+
sat_ebrw: Optional[int] = None
|
| 86 |
+
act_score: Optional[int] = None
|
| 87 |
+
ielts_score: Optional[str] = None
|
| 88 |
+
ielts_listening: Optional[str] = None
|
| 89 |
+
ielts_reading: Optional[str] = None
|
| 90 |
+
ielts_writing: Optional[str] = None
|
| 91 |
+
ielts_speaking: Optional[str] = None
|
| 92 |
+
toefl_score: Optional[int] = None
|
| 93 |
+
toefl_reading: Optional[int] = None
|
| 94 |
+
toefl_listening: Optional[int] = None
|
| 95 |
+
toefl_speaking: Optional[int] = None
|
| 96 |
+
toefl_writing: Optional[int] = None
|
| 97 |
+
duolingo_score: Optional[int] = None
|
| 98 |
+
a_level_subjects: Optional[str] = None
|
| 99 |
+
a_level_predicted: Optional[str] = None
|
| 100 |
+
ap_subjects: Optional[str] = None
|
| 101 |
+
ib_predicted_score: Optional[int] = None
|
| 102 |
+
unt_score: Optional[int] = None
|
| 103 |
+
nis_grade12_certificate_gpa: Optional[str] = None
|
| 104 |
+
budget_range: Optional[str] = None
|
| 105 |
+
aid_needed: Optional[bool] = None
|
| 106 |
+
application_preferences_json: Optional[dict[str, Any]] = None
|
| 107 |
+
created_at: datetime
|
| 108 |
+
updated_at: datetime
|
| 109 |
+
|
| 110 |
+
model_config = {"from_attributes": True}
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
class TokenOut(BaseModel):
|
| 114 |
+
access_token: str
|
| 115 |
+
token_type: str = "bearer"
|
| 116 |
+
user: UserOut
|
apps/api/src/services/__init__.py
ADDED
|
File without changes
|
apps/api/src/services/achievement_import_service.py
ADDED
|
@@ -0,0 +1,824 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import os
|
| 3 |
+
import re
|
| 4 |
+
from typing import Any, Optional
|
| 5 |
+
|
| 6 |
+
import httpx
|
| 7 |
+
|
| 8 |
+
from ..config import settings
|
| 9 |
+
from ..models.achievement import AchievementType, ImpactScope, LeadershipLevel
|
| 10 |
+
from .counselor_knowledge import CHANCELLOR_COUNSELOR_FRAMEWORK
|
| 11 |
+
|
| 12 |
+
MAX_IMPORT_BYTES = 10_000_000
|
| 13 |
+
MAX_IMPORT_CHARS = 80_000
|
| 14 |
+
DEFAULT_WORD_LIMIT = 22
|
| 15 |
+
MAX_IMPORTED_ITEMS = 60
|
| 16 |
+
MAX_TOP_ACTIVITIES = 10
|
| 17 |
+
MAX_TOP_HONORS = 5
|
| 18 |
+
COMMON_APP_ACTIVITY_POSITION_LIMIT = 50
|
| 19 |
+
COMMON_APP_ACTIVITY_ORGANIZATION_LIMIT = 100
|
| 20 |
+
COMMON_APP_ACTIVITY_DESCRIPTION_LIMIT = 150
|
| 21 |
+
COMMON_APP_HONOR_DESCRIPTION_LIMIT = 100
|
| 22 |
+
|
| 23 |
+
IMPORT_SCHEMA = {
|
| 24 |
+
"type": "object",
|
| 25 |
+
"properties": {
|
| 26 |
+
"strongest_angle": {"type": "string"},
|
| 27 |
+
"needs_student_clarification": {"type": "boolean"},
|
| 28 |
+
"clarifying_questions": {"type": "array", "items": {"type": "string"}},
|
| 29 |
+
"additional_information_recommended": {"type": "boolean"},
|
| 30 |
+
"additional_information_reason": {"type": "string"},
|
| 31 |
+
"additional_information_draft": {"type": "string"},
|
| 32 |
+
"formatting_notes": {"type": "array", "items": {"type": "string"}},
|
| 33 |
+
"extraction_notes": {"type": "array", "items": {"type": "string"}},
|
| 34 |
+
"items": {
|
| 35 |
+
"type": "array",
|
| 36 |
+
"items": {
|
| 37 |
+
"type": "object",
|
| 38 |
+
"properties": {
|
| 39 |
+
"source_index": {"type": "integer"},
|
| 40 |
+
"type": {"type": "string", "enum": ["activity", "honor"]},
|
| 41 |
+
"title": {"type": "string"},
|
| 42 |
+
"organization_name": {"type": ["string", "null"]},
|
| 43 |
+
"role_title": {"type": ["string", "null"]},
|
| 44 |
+
"description_raw": {"type": ["string", "null"]},
|
| 45 |
+
"category": {"type": ["string", "null"]},
|
| 46 |
+
"hours_per_week": {"type": ["number", "null"]},
|
| 47 |
+
"weeks_per_year": {"type": ["integer", "null"]},
|
| 48 |
+
"impact_scope": {"type": ["string", "null"]},
|
| 49 |
+
"leadership_level": {"type": ["string", "null"]},
|
| 50 |
+
"truth_risk_flag": {"type": "boolean"},
|
| 51 |
+
"major_relevance_score": {"type": "number"},
|
| 52 |
+
"selectivity_score": {"type": "number"},
|
| 53 |
+
"continuity_score": {"type": "number"},
|
| 54 |
+
"distinctiveness_score": {"type": "number"},
|
| 55 |
+
"selection_reason": {"type": "string"},
|
| 56 |
+
"common_app_text": {"type": "string"},
|
| 57 |
+
"common_app_position": {"type": ["string", "null"]},
|
| 58 |
+
"common_app_organization": {"type": ["string", "null"]},
|
| 59 |
+
"common_app_activity_description": {"type": ["string", "null"]},
|
| 60 |
+
"common_app_honor_description": {"type": ["string", "null"]},
|
| 61 |
+
"verification_queries": {"type": "array", "items": {"type": "string"}},
|
| 62 |
+
"verification_notes": {"type": "array", "items": {"type": "string"}},
|
| 63 |
+
"missing_or_unclear_facts": {"type": "array", "items": {"type": "string"}},
|
| 64 |
+
"recommended_rank": {"type": ["integer", "null"]},
|
| 65 |
+
},
|
| 66 |
+
"required": [
|
| 67 |
+
"source_index",
|
| 68 |
+
"type",
|
| 69 |
+
"title",
|
| 70 |
+
"truth_risk_flag",
|
| 71 |
+
"major_relevance_score",
|
| 72 |
+
"selectivity_score",
|
| 73 |
+
"continuity_score",
|
| 74 |
+
"distinctiveness_score",
|
| 75 |
+
"selection_reason",
|
| 76 |
+
"common_app_text",
|
| 77 |
+
"common_app_position",
|
| 78 |
+
"common_app_organization",
|
| 79 |
+
"common_app_activity_description",
|
| 80 |
+
"common_app_honor_description",
|
| 81 |
+
"verification_queries",
|
| 82 |
+
"verification_notes",
|
| 83 |
+
"missing_or_unclear_facts",
|
| 84 |
+
"recommended_rank",
|
| 85 |
+
],
|
| 86 |
+
},
|
| 87 |
+
},
|
| 88 |
+
},
|
| 89 |
+
"required": [
|
| 90 |
+
"strongest_angle",
|
| 91 |
+
"needs_student_clarification",
|
| 92 |
+
"clarifying_questions",
|
| 93 |
+
"additional_information_recommended",
|
| 94 |
+
"additional_information_reason",
|
| 95 |
+
"additional_information_draft",
|
| 96 |
+
"formatting_notes",
|
| 97 |
+
"extraction_notes",
|
| 98 |
+
"items",
|
| 99 |
+
],
|
| 100 |
+
}
|
| 101 |
+
|
| 102 |
+
HONOR_KEYWORDS = (
|
| 103 |
+
"award",
|
| 104 |
+
"winner",
|
| 105 |
+
"won",
|
| 106 |
+
"prize",
|
| 107 |
+
"medal",
|
| 108 |
+
"honor",
|
| 109 |
+
"honour",
|
| 110 |
+
"olympiad",
|
| 111 |
+
"scholarship",
|
| 112 |
+
"finalist",
|
| 113 |
+
"laureate",
|
| 114 |
+
"distinction",
|
| 115 |
+
"champion",
|
| 116 |
+
)
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
def _profile_context(user: Optional[Any]) -> dict[str, Any]:
|
| 120 |
+
profile = getattr(user, "profile", None)
|
| 121 |
+
if profile is None:
|
| 122 |
+
return {
|
| 123 |
+
"country": getattr(user, "country", None),
|
| 124 |
+
}
|
| 125 |
+
|
| 126 |
+
return {
|
| 127 |
+
"country": getattr(user, "country", None),
|
| 128 |
+
"graduation_year": getattr(profile, "graduation_year", None),
|
| 129 |
+
"curriculum": getattr(profile, "curriculum", None),
|
| 130 |
+
"intended_major": getattr(profile, "intended_major", None),
|
| 131 |
+
"sat_score": getattr(profile, "sat_score", None),
|
| 132 |
+
"act_score": getattr(profile, "act_score", None),
|
| 133 |
+
"ielts_score": getattr(profile, "ielts_score", None),
|
| 134 |
+
"toefl_score": getattr(profile, "toefl_score", None),
|
| 135 |
+
"application_preferences_json": getattr(profile, "application_preferences_json", None),
|
| 136 |
+
}
|
| 137 |
+
|
| 138 |
+
|
| 139 |
+
def _compact_whitespace(value: str) -> str:
|
| 140 |
+
return re.sub(r"\s+", " ", value or "").strip()
|
| 141 |
+
|
| 142 |
+
|
| 143 |
+
def _normalize_student_facing_text(value: str) -> str:
|
| 144 |
+
text = _compact_whitespace(value)
|
| 145 |
+
text = re.sub(r"\bRepublican\b", "National", text, flags=re.IGNORECASE)
|
| 146 |
+
text = re.sub(r"\bRespublikalyk\b", "National", text, flags=re.IGNORECASE)
|
| 147 |
+
text = re.sub(r"\bRespublikanskiy\b", "National", text, flags=re.IGNORECASE)
|
| 148 |
+
for index, char in enumerate(text):
|
| 149 |
+
if char.isalpha():
|
| 150 |
+
text = text[:index] + char.upper() + text[index + 1 :]
|
| 151 |
+
break
|
| 152 |
+
return text
|
| 153 |
+
|
| 154 |
+
|
| 155 |
+
def _preserve_source_structure(value: str) -> str:
|
| 156 |
+
lines = []
|
| 157 |
+
for line in (value or "").replace("\r\n", "\n").replace("\r", "\n").split("\n"):
|
| 158 |
+
cleaned = re.sub(r"[ \t\f\v]+", " ", line).strip()
|
| 159 |
+
if cleaned:
|
| 160 |
+
lines.append(cleaned)
|
| 161 |
+
elif lines and lines[-1] != "":
|
| 162 |
+
lines.append("")
|
| 163 |
+
return re.sub(r"\n{3,}", "\n\n", "\n".join(lines)).strip()
|
| 164 |
+
|
| 165 |
+
|
| 166 |
+
def _source_excerpts(raw_text: str, *, max_items: int = 8, max_chars: int = 240) -> list[str]:
|
| 167 |
+
scored: list[tuple[int, int, str]] = []
|
| 168 |
+
keywords = (
|
| 169 |
+
"award",
|
| 170 |
+
"winner",
|
| 171 |
+
"honor",
|
| 172 |
+
"activities",
|
| 173 |
+
"president",
|
| 174 |
+
"captain",
|
| 175 |
+
"founder",
|
| 176 |
+
"research",
|
| 177 |
+
"olympiad",
|
| 178 |
+
"competition",
|
| 179 |
+
"volunteer",
|
| 180 |
+
"raised",
|
| 181 |
+
"published",
|
| 182 |
+
"selected",
|
| 183 |
+
"national",
|
| 184 |
+
"international",
|
| 185 |
+
"hr/wk",
|
| 186 |
+
"wk/yr",
|
| 187 |
+
)
|
| 188 |
+
for chunk in re.split(r"(?:\n\s*){1,}|(?:\s*[•\u2022*]\s+)", raw_text):
|
| 189 |
+
text = _compact_whitespace(chunk)
|
| 190 |
+
if len(text) < 18:
|
| 191 |
+
continue
|
| 192 |
+
lower = text.lower()
|
| 193 |
+
score = sum(2 for keyword in keywords if keyword in lower)
|
| 194 |
+
if any(char.isdigit() for char in text):
|
| 195 |
+
score += 1
|
| 196 |
+
scored.append((score, len(scored), _truncate_characters(text, max_chars)))
|
| 197 |
+
scored.sort(key=lambda item: (-item[0], item[1]))
|
| 198 |
+
return [text for _, _, text in scored[:max_items]]
|
| 199 |
+
|
| 200 |
+
|
| 201 |
+
def _count_words(value: str) -> int:
|
| 202 |
+
return len(re.findall(r"\b[\w'-]+\b", value))
|
| 203 |
+
|
| 204 |
+
|
| 205 |
+
def _truncate_characters(value: str, limit: int) -> str:
|
| 206 |
+
if len(value) <= limit:
|
| 207 |
+
return value
|
| 208 |
+
truncated = value[:limit]
|
| 209 |
+
last_space = truncated.rfind(" ")
|
| 210 |
+
if last_space > max(0, limit - 24):
|
| 211 |
+
truncated = truncated[:last_space]
|
| 212 |
+
return truncated.rstrip(",.;: ")
|
| 213 |
+
|
| 214 |
+
|
| 215 |
+
def _clean_string_list(value: Any, *, max_items: int = 6, max_chars: int = 260) -> list[str]:
|
| 216 |
+
if not isinstance(value, list):
|
| 217 |
+
return []
|
| 218 |
+
|
| 219 |
+
strings: list[str] = []
|
| 220 |
+
for item in value:
|
| 221 |
+
text = _compact_whitespace(str(item or ""))
|
| 222 |
+
if text:
|
| 223 |
+
strings.append(_truncate_characters(text, max_chars))
|
| 224 |
+
if len(strings) >= max_items:
|
| 225 |
+
break
|
| 226 |
+
return strings
|
| 227 |
+
|
| 228 |
+
|
| 229 |
+
def _enforce_common_app_limit(value: str, word_limit: int, achievement_type: str) -> str:
|
| 230 |
+
words = _compact_whitespace(value).split()
|
| 231 |
+
if word_limit > 0 and len(words) > word_limit:
|
| 232 |
+
value = " ".join(words[:word_limit])
|
| 233 |
+
if achievement_type == AchievementType.activity.value:
|
| 234 |
+
value = _truncate_characters(value, COMMON_APP_ACTIVITY_DESCRIPTION_LIMIT)
|
| 235 |
+
if achievement_type == AchievementType.honor.value:
|
| 236 |
+
value = _truncate_characters(value, COMMON_APP_HONOR_DESCRIPTION_LIMIT)
|
| 237 |
+
return _compact_whitespace(value)
|
| 238 |
+
|
| 239 |
+
|
| 240 |
+
def _activity_position(item: dict[str, Any]) -> str:
|
| 241 |
+
value = _compact_whitespace(str(item.get("common_app_position") or item.get("role_title") or item.get("title") or ""))
|
| 242 |
+
return _truncate_characters(value, COMMON_APP_ACTIVITY_POSITION_LIMIT)
|
| 243 |
+
|
| 244 |
+
|
| 245 |
+
def _activity_organization(item: dict[str, Any]) -> str:
|
| 246 |
+
value = _compact_whitespace(str(item.get("common_app_organization") or item.get("organization_name") or ""))
|
| 247 |
+
return _truncate_characters(value, COMMON_APP_ACTIVITY_ORGANIZATION_LIMIT)
|
| 248 |
+
|
| 249 |
+
|
| 250 |
+
def _activity_description(item: dict[str, Any], word_limit: int) -> str:
|
| 251 |
+
value = _compact_whitespace(
|
| 252 |
+
str(item.get("common_app_activity_description") or item.get("common_app_text") or item.get("description_raw") or "")
|
| 253 |
+
)
|
| 254 |
+
if not value:
|
| 255 |
+
value = _fallback_common_app_text(item, word_limit)
|
| 256 |
+
return _enforce_common_app_limit(value, word_limit, AchievementType.activity.value)
|
| 257 |
+
|
| 258 |
+
|
| 259 |
+
def _honor_description(item: dict[str, Any], word_limit: int) -> str:
|
| 260 |
+
value = _compact_whitespace(
|
| 261 |
+
str(item.get("common_app_honor_description") or item.get("common_app_text") or item.get("title") or "")
|
| 262 |
+
)
|
| 263 |
+
return _enforce_common_app_limit(value, word_limit, AchievementType.honor.value)
|
| 264 |
+
|
| 265 |
+
|
| 266 |
+
def _coerce_enum(enum_cls: Any, value: Any) -> Any:
|
| 267 |
+
if value in (None, "", "null"):
|
| 268 |
+
return None
|
| 269 |
+
try:
|
| 270 |
+
return enum_cls(value)
|
| 271 |
+
except ValueError:
|
| 272 |
+
return None
|
| 273 |
+
|
| 274 |
+
|
| 275 |
+
def _clamp_score(value: Any) -> float:
|
| 276 |
+
try:
|
| 277 |
+
return round(max(0.0, min(10.0, float(value))), 1)
|
| 278 |
+
except (TypeError, ValueError):
|
| 279 |
+
return 5.0
|
| 280 |
+
|
| 281 |
+
|
| 282 |
+
def _extract_pdf_text(raw_bytes: bytes) -> str:
|
| 283 |
+
import io
|
| 284 |
+
|
| 285 |
+
try:
|
| 286 |
+
import pdfplumber
|
| 287 |
+
except ImportError:
|
| 288 |
+
raise ValueError("PDF support requires pdfplumber. Run: pip install pdfplumber")
|
| 289 |
+
|
| 290 |
+
pages_text: list[str] = []
|
| 291 |
+
with pdfplumber.open(io.BytesIO(raw_bytes)) as pdf:
|
| 292 |
+
for page in pdf.pages:
|
| 293 |
+
page_text = page.extract_text() or ""
|
| 294 |
+
if page_text.strip():
|
| 295 |
+
pages_text.append(page_text)
|
| 296 |
+
return "\n".join(pages_text)
|
| 297 |
+
|
| 298 |
+
|
| 299 |
+
def _extract_docx_text(raw_bytes: bytes) -> str:
|
| 300 |
+
import io
|
| 301 |
+
|
| 302 |
+
try:
|
| 303 |
+
from docx import Document
|
| 304 |
+
except ImportError:
|
| 305 |
+
raise ValueError("DOCX support requires python-docx. Run: pip install python-docx")
|
| 306 |
+
|
| 307 |
+
doc = Document(io.BytesIO(raw_bytes))
|
| 308 |
+
paragraphs = [p.text for p in doc.paragraphs if p.text.strip()]
|
| 309 |
+
return "\n".join(paragraphs)
|
| 310 |
+
|
| 311 |
+
|
| 312 |
+
def decode_import_file(file_name: str, raw_bytes: bytes) -> str:
|
| 313 |
+
if len(raw_bytes) > MAX_IMPORT_BYTES:
|
| 314 |
+
raise ValueError("File is too large for import. Keep it under 10 MB.")
|
| 315 |
+
|
| 316 |
+
extension = os.path.splitext(file_name or "")[1].lower()
|
| 317 |
+
supported = {".txt", ".md", ".csv", ".json", ".pdf", ".docx"}
|
| 318 |
+
if extension and extension not in supported:
|
| 319 |
+
raise ValueError(
|
| 320 |
+
"Import supports .txt, .md, .csv, .json, .pdf, and .docx files."
|
| 321 |
+
)
|
| 322 |
+
|
| 323 |
+
if extension == ".pdf":
|
| 324 |
+
text = _extract_pdf_text(raw_bytes)
|
| 325 |
+
elif extension == ".docx":
|
| 326 |
+
text = _extract_docx_text(raw_bytes)
|
| 327 |
+
else:
|
| 328 |
+
for encoding in ("utf-8", "utf-8-sig", "utf-16", "cp1251", "latin-1"):
|
| 329 |
+
try:
|
| 330 |
+
text = raw_bytes.decode(encoding)
|
| 331 |
+
break
|
| 332 |
+
except UnicodeDecodeError:
|
| 333 |
+
continue
|
| 334 |
+
else:
|
| 335 |
+
raise ValueError("Could not read the uploaded file as text.")
|
| 336 |
+
|
| 337 |
+
text = _preserve_source_structure(text)
|
| 338 |
+
if not text:
|
| 339 |
+
raise ValueError("The uploaded file is empty.")
|
| 340 |
+
return text[:MAX_IMPORT_CHARS]
|
| 341 |
+
|
| 342 |
+
|
| 343 |
+
def _fallback_title(line: str) -> str:
|
| 344 |
+
line = _compact_whitespace(line)
|
| 345 |
+
chunks = re.split(r"[.;:-]", line, maxsplit=1)
|
| 346 |
+
title = chunks[0].strip() if chunks else line
|
| 347 |
+
return title[:120] or "Untitled achievement"
|
| 348 |
+
|
| 349 |
+
|
| 350 |
+
def _fallback_type(line: str) -> AchievementType:
|
| 351 |
+
normalized = line.lower()
|
| 352 |
+
if any(keyword in normalized for keyword in HONOR_KEYWORDS):
|
| 353 |
+
return AchievementType.honor
|
| 354 |
+
return AchievementType.activity
|
| 355 |
+
|
| 356 |
+
|
| 357 |
+
def _fallback_common_app_text(item: dict[str, Any], word_limit: int) -> str:
|
| 358 |
+
parts = [
|
| 359 |
+
item.get("role_title"),
|
| 360 |
+
item.get("organization_name"),
|
| 361 |
+
item.get("description_raw"),
|
| 362 |
+
]
|
| 363 |
+
value = _compact_whitespace(". ".join(part for part in parts if part))
|
| 364 |
+
if not value:
|
| 365 |
+
value = item["title"]
|
| 366 |
+
return _enforce_common_app_limit(value, word_limit, item["type"])
|
| 367 |
+
|
| 368 |
+
|
| 369 |
+
def _fallback_parse(raw_text: str, user: Optional[Any], word_limit: int) -> dict[str, Any]:
|
| 370 |
+
from .chancellor_analysis import _heuristic_scores
|
| 371 |
+
|
| 372 |
+
lines = [
|
| 373 |
+
_compact_whitespace(line)
|
| 374 |
+
for line in re.split(r"(?:\r?\n)+|(?:\s*[-*•]\s+)", raw_text)
|
| 375 |
+
if _compact_whitespace(line)
|
| 376 |
+
]
|
| 377 |
+
|
| 378 |
+
items: list[dict[str, Any]] = []
|
| 379 |
+
for index, line in enumerate(lines[:MAX_IMPORTED_ITEMS], start=1):
|
| 380 |
+
item_type = _fallback_type(line)
|
| 381 |
+
base_item = {
|
| 382 |
+
"source_index": index,
|
| 383 |
+
"type": item_type.value,
|
| 384 |
+
"title": _fallback_title(line),
|
| 385 |
+
"organization_name": None,
|
| 386 |
+
"role_title": None,
|
| 387 |
+
"description_raw": line,
|
| 388 |
+
"category": None,
|
| 389 |
+
"hours_per_week": None,
|
| 390 |
+
"weeks_per_year": None,
|
| 391 |
+
"impact_scope": None,
|
| 392 |
+
"leadership_level": None,
|
| 393 |
+
"truth_risk_flag": False,
|
| 394 |
+
"selection_reason": "Selected by heuristic fallback because AI extraction was unavailable.",
|
| 395 |
+
}
|
| 396 |
+
scores = _heuristic_scores(base_item, user)
|
| 397 |
+
item = {
|
| 398 |
+
**base_item,
|
| 399 |
+
**scores,
|
| 400 |
+
"common_app_text": "",
|
| 401 |
+
"common_app_position": None,
|
| 402 |
+
"common_app_organization": None,
|
| 403 |
+
"common_app_activity_description": None,
|
| 404 |
+
"common_app_honor_description": None,
|
| 405 |
+
"verification_queries": [],
|
| 406 |
+
"verification_notes": [],
|
| 407 |
+
"missing_or_unclear_facts": ["AI extraction was unavailable; verify the wording against the original evidence."],
|
| 408 |
+
"recommended_rank": None,
|
| 409 |
+
}
|
| 410 |
+
item["common_app_text"] = _fallback_common_app_text(item, word_limit)
|
| 411 |
+
items.append(item)
|
| 412 |
+
|
| 413 |
+
strongest_angle = (
|
| 414 |
+
"Present the profile as a focused, evidence-backed student story with the strongest sustained work first."
|
| 415 |
+
)
|
| 416 |
+
return {
|
| 417 |
+
"strongest_angle": strongest_angle,
|
| 418 |
+
"needs_student_clarification": True,
|
| 419 |
+
"clarifying_questions": [
|
| 420 |
+
"Please confirm titles, dates, roles, and measurable outcomes before using the generated Common App wording."
|
| 421 |
+
],
|
| 422 |
+
"additional_information_recommended": False,
|
| 423 |
+
"additional_information_reason": "",
|
| 424 |
+
"additional_information_draft": "",
|
| 425 |
+
"formatting_notes": ["Gemini extraction was unavailable, so ApplyMap used a conservative local fallback."],
|
| 426 |
+
"extraction_notes": [
|
| 427 |
+
f"Local fallback split the source into {len(items)} candidate achievement lines."
|
| 428 |
+
],
|
| 429 |
+
"items": items,
|
| 430 |
+
}
|
| 431 |
+
|
| 432 |
+
|
| 433 |
+
def _import_prompt(
|
| 434 |
+
raw_text: str,
|
| 435 |
+
user: Optional[Any],
|
| 436 |
+
word_limit: int,
|
| 437 |
+
clarification_answers: Optional[dict[str, str]] = None,
|
| 438 |
+
) -> str:
|
| 439 |
+
payload = {
|
| 440 |
+
"student_profile": _profile_context(user),
|
| 441 |
+
"word_limit": word_limit,
|
| 442 |
+
"common_app_limits": {
|
| 443 |
+
"activities_max_items": MAX_TOP_ACTIVITIES,
|
| 444 |
+
"activity_position_leadership_description_chars": COMMON_APP_ACTIVITY_POSITION_LIMIT,
|
| 445 |
+
"activity_organization_name_chars": COMMON_APP_ACTIVITY_ORGANIZATION_LIMIT,
|
| 446 |
+
"activity_description_chars": COMMON_APP_ACTIVITY_DESCRIPTION_LIMIT,
|
| 447 |
+
"honors_max_items": MAX_TOP_HONORS,
|
| 448 |
+
"honor_title_description_chars": COMMON_APP_HONOR_DESCRIPTION_LIMIT,
|
| 449 |
+
},
|
| 450 |
+
"source_excerpts": _source_excerpts(raw_text),
|
| 451 |
+
"student_clarification_answers": clarification_answers or {},
|
| 452 |
+
"raw_source_text": raw_text,
|
| 453 |
+
}
|
| 454 |
+
return (
|
| 455 |
+
"You are ApplyMap Chancellor, helping an international student convert a messy mixed-achievement note file "
|
| 456 |
+
"into a clean, factual application-ready shortlist.\n\n"
|
| 457 |
+
f"{CHANCELLOR_COUNSELOR_FRAMEWORK}\n\n"
|
| 458 |
+
"Tasks:\n"
|
| 459 |
+
"1. Extract every distinct student achievement from the raw text before ranking. Merge only true duplicates.\n"
|
| 460 |
+
"2. Classify each item as either 'activity' or 'honor'.\n"
|
| 461 |
+
"3. Fill structured fields conservatively. If a field is missing, use null instead of inventing facts.\n"
|
| 462 |
+
"4. Score each item from 0 to 10 on major_relevance_score, selectivity_score, continuity_score, and distinctiveness_score.\n"
|
| 463 |
+
"5. Recommend the strongest top 10 activities and top 5 academic honors for a Common App-style application. Use recommended_rank "
|
| 464 |
+
"for selected items and null for the rest.\n"
|
| 465 |
+
"6. For activities, fill separate Common App fields: common_app_position <= 50 characters, "
|
| 466 |
+
"common_app_organization <= 100 characters, and common_app_activity_description <= 150 characters. "
|
| 467 |
+
"Use the activity description for accomplishments and measurable impact, not role repetition.\n"
|
| 468 |
+
"7. For honors, fill common_app_honor_description as one title/description block <= 100 characters.\n"
|
| 469 |
+
"8. strongest_angle must explain the single best overall application angle in one sentence.\n"
|
| 470 |
+
"9. If there are inconsistencies in years, roles, award level, school grade, hours, or metrics, set "
|
| 471 |
+
"needs_student_clarification=true and write short clarifying_questions before the student should trust final wording.\n"
|
| 472 |
+
"10. Recommend Additional Information only when it is genuinely needed to clarify important context that cannot fit "
|
| 473 |
+
"in the activity/honor fields, unusual school/curriculum context, or multiple related awards. If recommended, write a "
|
| 474 |
+
"ready-to-paste concise additional_information_draft; otherwise leave it blank.\n\n"
|
| 475 |
+
"11. If student_clarification_answers includes an answer to a missing detail, use that answer to improve the fields, "
|
| 476 |
+
"scores, ranking, and Common App wording. Do not keep asking the same question unless the answer is still unclear.\n\n"
|
| 477 |
+
"Important constraints:\n"
|
| 478 |
+
"- Do not invent achievements, outcomes, metrics, organizations, dates, leadership roles, or awards.\n"
|
| 479 |
+
"- Output all student-facing fields in polished English even when the source is Russian, Kazakh, or mixed-language.\n"
|
| 480 |
+
"- Fix lowercase or informal source phrasing into proper English capitalization and grammar.\n"
|
| 481 |
+
"- Preserve years, date ranges, school grade, event names, number of students served, placements, and supported metrics. "
|
| 482 |
+
"Do not remove these facts just to make the sentence shorter.\n"
|
| 483 |
+
"- Never replace a concrete source detail with a guessed or more impressive detail. If the source says gift cards, "
|
| 484 |
+
"lessons, mentoring, or another specific activity, translate that detail directly; do not invent tournaments, "
|
| 485 |
+
"research, publications, awards, or program names.\n"
|
| 486 |
+
"- Translate Kazakhstan award level words like Republican/Respublikalyk/Respublikanskiy as National unless the "
|
| 487 |
+
"official English title clearly uses Republican.\n"
|
| 488 |
+
"- If the source says the student mentored five 8th graders as an 11th grader in 2024-2025 and organized events, "
|
| 489 |
+
"keep those facts in concise English instead of reducing the entry to a generic mentoring sentence.\n"
|
| 490 |
+
"- If the source sounds uncertain or inflated, set truth_risk_flag to true.\n"
|
| 491 |
+
"- Prefer concrete, specific language over hype.\n"
|
| 492 |
+
"- Preserve Kazakhstan/NIS/IB/A-Level context when present.\n"
|
| 493 |
+
"- Treat MESK written in Russian or Kazakh as NIS Grade 12 Certificate in English output.\n"
|
| 494 |
+
"- For Korean university targets, do not assume Common App. Korea entries may need Study in Korea, KAIST Apply, "
|
| 495 |
+
"UwayApply, JinhakApply, or a university-specific format. Mark Korea-specific wording as application-ready, and "
|
| 496 |
+
"ask for the target portal if the limit is unclear.\n"
|
| 497 |
+
"- Apply the College Essay Guy-style approach: active verbs, measurable impact, no filler, no repeated role wording, "
|
| 498 |
+
"selectivity where supported, and abbreviations only when they improve clarity.\n"
|
| 499 |
+
"- If the student omits participant counts, selection rates, dates, or exact award level, add those to "
|
| 500 |
+
"missing_or_unclear_facts and propose verification_queries. Do not fabricate counts.\n"
|
| 501 |
+
"- The shortlist should reward spike, depth, selectivity, continuity, and distinctive impact.\n"
|
| 502 |
+
"- A weak selected item is worse than leaving a slot empty. Only rank items that are truly shortlist-worthy.\n\n"
|
| 503 |
+
f"Input JSON:\n{json.dumps(payload, ensure_ascii=False, default=str)}"
|
| 504 |
+
)
|
| 505 |
+
|
| 506 |
+
|
| 507 |
+
def _extract_gemini_text(response_payload: dict[str, Any]) -> str:
|
| 508 |
+
candidates = response_payload.get("candidates") or []
|
| 509 |
+
content = candidates[0].get("content") if candidates else {}
|
| 510 |
+
parts = content.get("parts") or []
|
| 511 |
+
return str(parts[0].get("text", "")) if parts else ""
|
| 512 |
+
|
| 513 |
+
|
| 514 |
+
def _gemini_parse(
|
| 515 |
+
raw_text: str,
|
| 516 |
+
user: Optional[Any],
|
| 517 |
+
word_limit: int,
|
| 518 |
+
clarification_answers: Optional[dict[str, str]] = None,
|
| 519 |
+
) -> Optional[dict[str, Any]]:
|
| 520 |
+
api_key = settings.GEMINI_API_KEY.strip()
|
| 521 |
+
if not api_key:
|
| 522 |
+
return None
|
| 523 |
+
|
| 524 |
+
model = (settings.GEMINI_MODEL or "gemini-2.5-flash").strip()
|
| 525 |
+
url = f"https://generativelanguage.googleapis.com/v1beta/models/{model}:generateContent"
|
| 526 |
+
payload = {
|
| 527 |
+
"contents": [{"parts": [{"text": _import_prompt(raw_text, user, word_limit, clarification_answers)}]}],
|
| 528 |
+
"generationConfig": {
|
| 529 |
+
"temperature": 0.1,
|
| 530 |
+
"maxOutputTokens": 30000,
|
| 531 |
+
"responseMimeType": "application/json",
|
| 532 |
+
"responseJsonSchema": IMPORT_SCHEMA,
|
| 533 |
+
},
|
| 534 |
+
}
|
| 535 |
+
|
| 536 |
+
try:
|
| 537 |
+
with httpx.Client(timeout=90.0) as client:
|
| 538 |
+
response = client.post(
|
| 539 |
+
url,
|
| 540 |
+
headers={
|
| 541 |
+
"x-goog-api-key": api_key,
|
| 542 |
+
"Content-Type": "application/json",
|
| 543 |
+
},
|
| 544 |
+
json=payload,
|
| 545 |
+
)
|
| 546 |
+
response.raise_for_status()
|
| 547 |
+
response_text = _extract_gemini_text(response.json())
|
| 548 |
+
parsed = json.loads(response_text)
|
| 549 |
+
if not isinstance(parsed, dict) or not isinstance(parsed.get("items"), list):
|
| 550 |
+
return None
|
| 551 |
+
return parsed
|
| 552 |
+
except (httpx.HTTPError, json.JSONDecodeError, KeyError, TypeError, ValueError):
|
| 553 |
+
return None
|
| 554 |
+
|
| 555 |
+
|
| 556 |
+
def _local_score(item: dict[str, Any]) -> float:
|
| 557 |
+
return (
|
| 558 |
+
float(item.get("major_relevance_score") or 0)
|
| 559 |
+
+ float(item.get("selectivity_score") or 0)
|
| 560 |
+
+ float(item.get("continuity_score") or 0)
|
| 561 |
+
+ float(item.get("distinctiveness_score") or 0)
|
| 562 |
+
)
|
| 563 |
+
|
| 564 |
+
|
| 565 |
+
class SearchNotConfiguredError(RuntimeError):
|
| 566 |
+
pass
|
| 567 |
+
|
| 568 |
+
|
| 569 |
+
def _google_search(query: str, *, num: int = 3) -> list[dict[str, str]]:
|
| 570 |
+
api_key = settings.GOOGLE_SEARCH_API_KEY.strip()
|
| 571 |
+
engine_id = settings.GOOGLE_SEARCH_ENGINE_ID.strip()
|
| 572 |
+
if not api_key or not engine_id:
|
| 573 |
+
raise SearchNotConfiguredError("Google Custom Search is not configured")
|
| 574 |
+
|
| 575 |
+
with httpx.Client(timeout=5.0) as client:
|
| 576 |
+
response = client.get(
|
| 577 |
+
"https://www.googleapis.com/customsearch/v1",
|
| 578 |
+
params={
|
| 579 |
+
"key": api_key,
|
| 580 |
+
"cx": engine_id,
|
| 581 |
+
"q": query,
|
| 582 |
+
"num": num,
|
| 583 |
+
"safe": "active",
|
| 584 |
+
"hl": "en",
|
| 585 |
+
},
|
| 586 |
+
)
|
| 587 |
+
response.raise_for_status()
|
| 588 |
+
|
| 589 |
+
results = response.json().get("items") or []
|
| 590 |
+
return [
|
| 591 |
+
{
|
| 592 |
+
"title": _compact_whitespace(str(item.get("title") or "")),
|
| 593 |
+
"url": str(item.get("link") or ""),
|
| 594 |
+
"snippet": _compact_whitespace(str(item.get("snippet") or "")),
|
| 595 |
+
}
|
| 596 |
+
for item in results
|
| 597 |
+
if item.get("link")
|
| 598 |
+
]
|
| 599 |
+
|
| 600 |
+
|
| 601 |
+
def _default_verification_query(item: dict[str, Any], user: Optional[Any]) -> str:
|
| 602 |
+
country = _compact_whitespace(str(getattr(user, "country", "") or ""))
|
| 603 |
+
parts = [
|
| 604 |
+
f'"{item.get("title")}"' if item.get("title") else "",
|
| 605 |
+
f'"{item.get("organization_name")}"' if item.get("organization_name") else "",
|
| 606 |
+
country if country else "",
|
| 607 |
+
"participants results award official",
|
| 608 |
+
]
|
| 609 |
+
return _compact_whitespace(" ".join(part for part in parts if part))
|
| 610 |
+
|
| 611 |
+
|
| 612 |
+
def _format_search_note(result: dict[str, str]) -> str:
|
| 613 |
+
title = _truncate_characters(result.get("title") or "Search result", 80)
|
| 614 |
+
snippet = _truncate_characters(result.get("snippet") or "No snippet available", 150)
|
| 615 |
+
url = result.get("url") or ""
|
| 616 |
+
return _truncate_characters(f"Source candidate: {title} - {snippet} ({url})", 300)
|
| 617 |
+
|
| 618 |
+
|
| 619 |
+
def _attach_google_verification(items: list[dict[str, Any]], user: Optional[Any]) -> list[str]:
|
| 620 |
+
if not settings.GOOGLE_SEARCH_API_KEY.strip() or not settings.GOOGLE_SEARCH_ENGINE_ID.strip():
|
| 621 |
+
return [
|
| 622 |
+
"Google Search is not configured. Set GOOGLE_SEARCH_API_KEY and GOOGLE_SEARCH_ENGINE_ID to verify achievements online."
|
| 623 |
+
]
|
| 624 |
+
|
| 625 |
+
notes: list[str] = []
|
| 626 |
+
for item in items:
|
| 627 |
+
query = (item.get("verification_queries") or [_default_verification_query(item, user)])[0]
|
| 628 |
+
if not query:
|
| 629 |
+
item.setdefault("missing_or_unclear_facts", []).append("No searchable title or organization was available.")
|
| 630 |
+
continue
|
| 631 |
+
try:
|
| 632 |
+
search_results = _google_search(query, num=3)
|
| 633 |
+
except (SearchNotConfiguredError, httpx.HTTPError):
|
| 634 |
+
return [
|
| 635 |
+
"Google verification is currently unavailable. Ask the student for official links, certificates, or organizer pages for unsupported claims."
|
| 636 |
+
]
|
| 637 |
+
|
| 638 |
+
if not search_results:
|
| 639 |
+
item.setdefault("missing_or_unclear_facts", []).append(
|
| 640 |
+
"No Google result found for this item; ask the student for an official source or certificate."
|
| 641 |
+
)
|
| 642 |
+
continue
|
| 643 |
+
|
| 644 |
+
item.setdefault("verification_notes", []).extend(_format_search_note(result) for result in search_results[:3])
|
| 645 |
+
notes.append(f"Checked Google for: {query}")
|
| 646 |
+
|
| 647 |
+
return notes
|
| 648 |
+
|
| 649 |
+
|
| 650 |
+
def _normalize_items(result: dict[str, Any], word_limit: int) -> dict[str, Any]:
|
| 651 |
+
normalized_items: list[dict[str, Any]] = []
|
| 652 |
+
|
| 653 |
+
for index, raw_item in enumerate(result.get("items") or [], start=1):
|
| 654 |
+
title = _normalize_student_facing_text(str(raw_item.get("title") or ""))
|
| 655 |
+
if not title:
|
| 656 |
+
continue
|
| 657 |
+
|
| 658 |
+
item_type = _coerce_enum(AchievementType, raw_item.get("type")) or AchievementType.activity
|
| 659 |
+
normalized = {
|
| 660 |
+
"source_index": int(raw_item.get("source_index") or index),
|
| 661 |
+
"type": item_type.value,
|
| 662 |
+
"title": title[:500],
|
| 663 |
+
"organization_name": _normalize_student_facing_text(str(raw_item.get("organization_name") or "")) or None,
|
| 664 |
+
"role_title": _normalize_student_facing_text(str(raw_item.get("role_title") or "")) or None,
|
| 665 |
+
"description_raw": _normalize_student_facing_text(str(raw_item.get("description_raw") or "")) or None,
|
| 666 |
+
"category": _normalize_student_facing_text(str(raw_item.get("category") or "")) or None,
|
| 667 |
+
"hours_per_week": raw_item.get("hours_per_week"),
|
| 668 |
+
"weeks_per_year": raw_item.get("weeks_per_year"),
|
| 669 |
+
"impact_scope": (_coerce_enum(ImpactScope, raw_item.get("impact_scope")) or None),
|
| 670 |
+
"leadership_level": (_coerce_enum(LeadershipLevel, raw_item.get("leadership_level")) or None),
|
| 671 |
+
"truth_risk_flag": bool(raw_item.get("truth_risk_flag")),
|
| 672 |
+
"major_relevance_score": _clamp_score(raw_item.get("major_relevance_score")),
|
| 673 |
+
"selectivity_score": _clamp_score(raw_item.get("selectivity_score")),
|
| 674 |
+
"continuity_score": _clamp_score(raw_item.get("continuity_score")),
|
| 675 |
+
"distinctiveness_score": _clamp_score(raw_item.get("distinctiveness_score")),
|
| 676 |
+
"selection_reason": _normalize_student_facing_text(str(raw_item.get("selection_reason") or "")),
|
| 677 |
+
"common_app_text": _enforce_common_app_limit(
|
| 678 |
+
_normalize_student_facing_text(str(raw_item.get("common_app_text") or "")),
|
| 679 |
+
word_limit,
|
| 680 |
+
item_type.value,
|
| 681 |
+
),
|
| 682 |
+
"common_app_position": _normalize_student_facing_text(str(raw_item.get("common_app_position") or "")) or None,
|
| 683 |
+
"common_app_organization": _normalize_student_facing_text(str(raw_item.get("common_app_organization") or "")) or None,
|
| 684 |
+
"common_app_activity_description": _normalize_student_facing_text(
|
| 685 |
+
str(raw_item.get("common_app_activity_description") or "")
|
| 686 |
+
)
|
| 687 |
+
or None,
|
| 688 |
+
"common_app_honor_description": _normalize_student_facing_text(str(raw_item.get("common_app_honor_description") or ""))
|
| 689 |
+
or None,
|
| 690 |
+
"verification_queries": _clean_string_list(raw_item.get("verification_queries"), max_items=3, max_chars=160),
|
| 691 |
+
"verification_notes": _clean_string_list(raw_item.get("verification_notes"), max_items=5, max_chars=300),
|
| 692 |
+
"missing_or_unclear_facts": _clean_string_list(
|
| 693 |
+
raw_item.get("missing_or_unclear_facts"), max_items=6, max_chars=180
|
| 694 |
+
),
|
| 695 |
+
"recommended_rank": raw_item.get("recommended_rank"),
|
| 696 |
+
}
|
| 697 |
+
if not normalized["common_app_text"]:
|
| 698 |
+
normalized["common_app_text"] = _fallback_common_app_text(normalized, word_limit)
|
| 699 |
+
if item_type == AchievementType.activity:
|
| 700 |
+
normalized["common_app_position"] = _activity_position(normalized)
|
| 701 |
+
normalized["common_app_organization"] = _activity_organization(normalized) or None
|
| 702 |
+
normalized["common_app_activity_description"] = _activity_description(normalized, word_limit)
|
| 703 |
+
normalized["common_app_text"] = normalized["common_app_activity_description"]
|
| 704 |
+
normalized["common_app_honor_description"] = None
|
| 705 |
+
else:
|
| 706 |
+
normalized["common_app_honor_description"] = _honor_description(normalized, word_limit)
|
| 707 |
+
normalized["common_app_text"] = normalized["common_app_honor_description"]
|
| 708 |
+
normalized["common_app_position"] = None
|
| 709 |
+
normalized["common_app_organization"] = None
|
| 710 |
+
normalized["common_app_activity_description"] = None
|
| 711 |
+
normalized_items.append(normalized)
|
| 712 |
+
|
| 713 |
+
return {
|
| 714 |
+
"strongest_angle": _compact_whitespace(str(result.get("strongest_angle") or "")),
|
| 715 |
+
"needs_student_clarification": bool(result.get("needs_student_clarification")),
|
| 716 |
+
"clarifying_questions": _clean_string_list(result.get("clarifying_questions"), max_items=8, max_chars=220),
|
| 717 |
+
"additional_information_recommended": bool(result.get("additional_information_recommended")),
|
| 718 |
+
"additional_information_reason": _compact_whitespace(str(result.get("additional_information_reason") or "")),
|
| 719 |
+
"additional_information_draft": _compact_whitespace(str(result.get("additional_information_draft") or "")),
|
| 720 |
+
"formatting_notes": _clean_string_list(result.get("formatting_notes"), max_items=8, max_chars=220),
|
| 721 |
+
"extraction_notes": _clean_string_list(result.get("extraction_notes"), max_items=8, max_chars=220),
|
| 722 |
+
"items": normalized_items,
|
| 723 |
+
}
|
| 724 |
+
|
| 725 |
+
|
| 726 |
+
def parse_achievement_import(
|
| 727 |
+
raw_text: str,
|
| 728 |
+
user: Optional[Any],
|
| 729 |
+
word_limit: int,
|
| 730 |
+
clarification_answers: Optional[dict[str, str]] = None,
|
| 731 |
+
) -> dict[str, Any]:
|
| 732 |
+
parsed = _gemini_parse(raw_text, user, word_limit, clarification_answers)
|
| 733 |
+
used_gemini = parsed is not None
|
| 734 |
+
if parsed is None:
|
| 735 |
+
parsed = _fallback_parse(raw_text, user, word_limit)
|
| 736 |
+
normalized = _normalize_items(parsed, word_limit)
|
| 737 |
+
items = normalized["items"]
|
| 738 |
+
|
| 739 |
+
activities = [item for item in items if item["type"] == AchievementType.activity.value]
|
| 740 |
+
honors = [item for item in items if item["type"] == AchievementType.honor.value]
|
| 741 |
+
|
| 742 |
+
ranked_activities = sorted(
|
| 743 |
+
activities,
|
| 744 |
+
key=lambda item: (
|
| 745 |
+
item.get("recommended_rank") is None,
|
| 746 |
+
item.get("recommended_rank") or 99,
|
| 747 |
+
-_local_score(item),
|
| 748 |
+
),
|
| 749 |
+
)
|
| 750 |
+
ranked_honors = sorted(
|
| 751 |
+
honors,
|
| 752 |
+
key=lambda item: (
|
| 753 |
+
item.get("recommended_rank") is None,
|
| 754 |
+
item.get("recommended_rank") or 99,
|
| 755 |
+
-_local_score(item),
|
| 756 |
+
),
|
| 757 |
+
)
|
| 758 |
+
|
| 759 |
+
if not any(item.get("recommended_rank") for item in ranked_activities):
|
| 760 |
+
for rank, item in enumerate(sorted(activities, key=_local_score, reverse=True)[:MAX_TOP_ACTIVITIES], start=1):
|
| 761 |
+
item["recommended_rank"] = rank
|
| 762 |
+
ranked_activities = sorted(activities, key=lambda item: item.get("recommended_rank") or 99)
|
| 763 |
+
|
| 764 |
+
if not any(item.get("recommended_rank") for item in ranked_honors):
|
| 765 |
+
for rank, item in enumerate(sorted(honors, key=_local_score, reverse=True)[:MAX_TOP_HONORS], start=1):
|
| 766 |
+
item["recommended_rank"] = rank
|
| 767 |
+
ranked_honors = sorted(honors, key=lambda item: item.get("recommended_rank") or 99)
|
| 768 |
+
|
| 769 |
+
normalized["strongest_angle"] = normalized["strongest_angle"] or (
|
| 770 |
+
"Lead with the most selective, sustained, and distinctive work, then support it with the strongest honors."
|
| 771 |
+
)
|
| 772 |
+
normalized["top_activities"] = [
|
| 773 |
+
item
|
| 774 |
+
for item in ranked_activities
|
| 775 |
+
if item.get("recommended_rank") and item["recommended_rank"] <= MAX_TOP_ACTIVITIES
|
| 776 |
+
][:MAX_TOP_ACTIVITIES]
|
| 777 |
+
normalized["top_honors"] = [
|
| 778 |
+
item for item in ranked_honors if item.get("recommended_rank") and item["recommended_rank"] <= MAX_TOP_HONORS
|
| 779 |
+
][:MAX_TOP_HONORS]
|
| 780 |
+
verification_notes = _attach_google_verification(
|
| 781 |
+
[*normalized["top_activities"], *normalized["top_honors"]],
|
| 782 |
+
user,
|
| 783 |
+
)
|
| 784 |
+
normalized["formatting_notes"].extend(verification_notes)
|
| 785 |
+
normalized["source_excerpts"] = _source_excerpts(raw_text)
|
| 786 |
+
normalized["processing_steps"] = [
|
| 787 |
+
{
|
| 788 |
+
"key": "read_file",
|
| 789 |
+
"label": "Read uploaded file",
|
| 790 |
+
"status": "complete",
|
| 791 |
+
"detail": f"Extracted {len(raw_text):,} characters while preserving line breaks and bullet structure.",
|
| 792 |
+
},
|
| 793 |
+
{
|
| 794 |
+
"key": "extract_candidates",
|
| 795 |
+
"label": "Extract achievement candidates",
|
| 796 |
+
"status": "complete",
|
| 797 |
+
"detail": f"Built {len(items)} structured candidates from the source text.",
|
| 798 |
+
},
|
| 799 |
+
{
|
| 800 |
+
"key": "rank_shortlist",
|
| 801 |
+
"label": "Rank activities and honors",
|
| 802 |
+
"status": "complete",
|
| 803 |
+
"detail": f"Selected {len(normalized['top_activities'])} activities and {len(normalized['top_honors'])} honors for Common App.",
|
| 804 |
+
},
|
| 805 |
+
{
|
| 806 |
+
"key": "format_common_app",
|
| 807 |
+
"label": "Format Common App fields",
|
| 808 |
+
"status": "complete",
|
| 809 |
+
"detail": "Enforced 50/100/150-character activity fields and 100-character honor lines.",
|
| 810 |
+
},
|
| 811 |
+
{
|
| 812 |
+
"key": "verify_claims",
|
| 813 |
+
"label": "Check uncertainty and verification needs",
|
| 814 |
+
"status": "complete",
|
| 815 |
+
"detail": "Generated clarification questions and verification notes for unsupported claims.",
|
| 816 |
+
},
|
| 817 |
+
{
|
| 818 |
+
"key": "ai_engine",
|
| 819 |
+
"label": "AI extraction engine",
|
| 820 |
+
"status": "complete",
|
| 821 |
+
"detail": "Gemini returned structured JSON." if used_gemini else "Gemini was unavailable or returned invalid JSON; used local fallback.",
|
| 822 |
+
},
|
| 823 |
+
]
|
| 824 |
+
return normalized
|
apps/api/src/services/auth_service.py
ADDED
|
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from datetime import datetime, timedelta
|
| 2 |
+
from typing import Optional
|
| 3 |
+
from jose import JWTError, jwt
|
| 4 |
+
from passlib.context import CryptContext
|
| 5 |
+
from sqlalchemy.orm import Session
|
| 6 |
+
from uuid import UUID
|
| 7 |
+
|
| 8 |
+
from ..config import settings
|
| 9 |
+
from ..models.user import User, StudentProfile
|
| 10 |
+
|
| 11 |
+
pwd_context = CryptContext(schemes=["bcrypt"], deprecated="auto")
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def verify_password(plain_password: str, hashed_password: str) -> bool:
|
| 15 |
+
return pwd_context.verify(plain_password, hashed_password)
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def get_password_hash(password: str) -> str:
|
| 19 |
+
return pwd_context.hash(password)
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def create_access_token(data: dict, expires_delta: Optional[timedelta] = None) -> str:
|
| 23 |
+
to_encode = data.copy()
|
| 24 |
+
if expires_delta:
|
| 25 |
+
expire = datetime.utcnow() + expires_delta
|
| 26 |
+
else:
|
| 27 |
+
expire = datetime.utcnow() + timedelta(minutes=settings.ACCESS_TOKEN_EXPIRE_MINUTES)
|
| 28 |
+
to_encode.update({"exp": expire})
|
| 29 |
+
return jwt.encode(to_encode, settings.SECRET_KEY, algorithm=settings.ALGORITHM)
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def decode_token(token: str) -> Optional[dict]:
|
| 33 |
+
try:
|
| 34 |
+
payload = jwt.decode(token, settings.SECRET_KEY, algorithms=[settings.ALGORITHM])
|
| 35 |
+
return payload
|
| 36 |
+
except JWTError:
|
| 37 |
+
return None
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
def get_user_by_email(db: Session, email: str) -> Optional[User]:
|
| 41 |
+
return db.query(User).filter(User.email == email).first()
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
def get_user_by_id(db: Session, user_id: UUID) -> Optional[User]:
|
| 45 |
+
return db.query(User).filter(User.id == user_id).first()
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
def create_user(db: Session, email: str, password: str, full_name: Optional[str] = None, country: Optional[str] = None) -> User:
|
| 49 |
+
hashed = get_password_hash(password)
|
| 50 |
+
user = User(
|
| 51 |
+
email=email,
|
| 52 |
+
password_hash=hashed,
|
| 53 |
+
full_name=full_name,
|
| 54 |
+
country=country,
|
| 55 |
+
)
|
| 56 |
+
db.add(user)
|
| 57 |
+
db.flush()
|
| 58 |
+
|
| 59 |
+
# Create empty profile
|
| 60 |
+
profile = StudentProfile(user_id=user.id)
|
| 61 |
+
db.add(profile)
|
| 62 |
+
|
| 63 |
+
db.commit()
|
| 64 |
+
db.refresh(user)
|
| 65 |
+
return user
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
def authenticate_user(db: Session, email: str, password: str) -> Optional[User]:
|
| 69 |
+
user = get_user_by_email(db, email)
|
| 70 |
+
if not user:
|
| 71 |
+
return None
|
| 72 |
+
if not user.password_hash:
|
| 73 |
+
return None
|
| 74 |
+
if not verify_password(password, user.password_hash):
|
| 75 |
+
return None
|
| 76 |
+
return user
|
apps/api/src/services/chancellor_analysis.py
ADDED
|
@@ -0,0 +1,301 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
from typing import Any, Optional
|
| 3 |
+
|
| 4 |
+
import httpx
|
| 5 |
+
|
| 6 |
+
from ..config import settings
|
| 7 |
+
from .counselor_knowledge import CHANCELLOR_COUNSELOR_FRAMEWORK
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
SCORE_KEYS = (
|
| 11 |
+
"major_relevance_score",
|
| 12 |
+
"selectivity_score",
|
| 13 |
+
"continuity_score",
|
| 14 |
+
"distinctiveness_score",
|
| 15 |
+
)
|
| 16 |
+
|
| 17 |
+
SCORE_SCHEMA = {
|
| 18 |
+
"type": "object",
|
| 19 |
+
"properties": {
|
| 20 |
+
"major_relevance_score": {
|
| 21 |
+
"type": "number",
|
| 22 |
+
"minimum": 0,
|
| 23 |
+
"maximum": 10,
|
| 24 |
+
"description": "Alignment between the achievement and the student's intended major or academic direction.",
|
| 25 |
+
},
|
| 26 |
+
"selectivity_score": {
|
| 27 |
+
"type": "number",
|
| 28 |
+
"minimum": 0,
|
| 29 |
+
"maximum": 10,
|
| 30 |
+
"description": "Competitiveness, award level, and selection difficulty behind the achievement.",
|
| 31 |
+
},
|
| 32 |
+
"continuity_score": {
|
| 33 |
+
"type": "number",
|
| 34 |
+
"minimum": 0,
|
| 35 |
+
"maximum": 10,
|
| 36 |
+
"description": "Sustained commitment based on duration, hours, and ongoing responsibility.",
|
| 37 |
+
},
|
| 38 |
+
"distinctiveness_score": {
|
| 39 |
+
"type": "number",
|
| 40 |
+
"minimum": 0,
|
| 41 |
+
"maximum": 10,
|
| 42 |
+
"description": "How uncommon, self-directed, leadership-heavy, impact-heavy, or spike-relevant the achievement is.",
|
| 43 |
+
},
|
| 44 |
+
},
|
| 45 |
+
"required": list(SCORE_KEYS),
|
| 46 |
+
"additionalProperties": False,
|
| 47 |
+
}
|
| 48 |
+
|
| 49 |
+
ADMISSIONS_FRAMEWORK = """
|
| 50 |
+
Use this admissions strategy framework:
|
| 51 |
+
- Depth beats breadth. A few world-class or deeply developed achievements should score higher than many generic activities.
|
| 52 |
+
- Favor a strong, authentic spike: a sustained area where the student shows uncommon initiative, impact, rigor, or visibility.
|
| 53 |
+
- Impact and leadership matter most when they are specific: built, published, founded, led, scaled, won, selected, presented, or served a defined audience.
|
| 54 |
+
- Authenticity matters. Do not reward over-polished, vague, buzzword-heavy, or all-perfect claims without concrete evidence.
|
| 55 |
+
- Major strategy matters. If the intended major is crowded for the student's context, reward achievements that make the profile more distinctive or cross-disciplinary.
|
| 56 |
+
- Home-country context matters for international students. Global impact is stronger when it is also tied back to a real local or national context.
|
| 57 |
+
- Be conservative when evidence is missing. Never invent facts.
|
| 58 |
+
""".strip()
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
def _value(source: Any, field: str) -> Any:
|
| 62 |
+
if isinstance(source, dict):
|
| 63 |
+
return source.get(field)
|
| 64 |
+
return getattr(source, field, None)
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
def _enum_value(value: Any) -> str:
|
| 68 |
+
if value is None:
|
| 69 |
+
return ""
|
| 70 |
+
return getattr(value, "value", str(value))
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
def _text(source: Any) -> str:
|
| 74 |
+
parts = [
|
| 75 |
+
_value(source, "title"),
|
| 76 |
+
_value(source, "organization_name"),
|
| 77 |
+
_value(source, "role_title"),
|
| 78 |
+
_value(source, "description_raw"),
|
| 79 |
+
_value(source, "category"),
|
| 80 |
+
]
|
| 81 |
+
return " ".join(str(part) for part in parts if part).lower()
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
def _profile_major(user: Optional[Any]) -> str:
|
| 85 |
+
profile = getattr(user, "profile", None)
|
| 86 |
+
intended_major = getattr(profile, "intended_major", None)
|
| 87 |
+
return str(intended_major).lower() if intended_major else ""
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
def _contains_any(text: str, keywords: list[str]) -> bool:
|
| 91 |
+
return any(keyword in text for keyword in keywords)
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
def _clamp(value: float) -> float:
|
| 95 |
+
return round(max(0.0, min(10.0, value)), 1)
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
def _profile_context(user: Optional[Any]) -> dict[str, Any]:
|
| 99 |
+
profile = getattr(user, "profile", None)
|
| 100 |
+
if profile is None:
|
| 101 |
+
return {}
|
| 102 |
+
|
| 103 |
+
return {
|
| 104 |
+
"country": getattr(user, "country", None),
|
| 105 |
+
"graduation_year": getattr(profile, "graduation_year", None),
|
| 106 |
+
"curriculum": getattr(profile, "curriculum", None),
|
| 107 |
+
"intended_major": getattr(profile, "intended_major", None),
|
| 108 |
+
"sat_score": getattr(profile, "sat_score", None),
|
| 109 |
+
"act_score": getattr(profile, "act_score", None),
|
| 110 |
+
"ielts_score": getattr(profile, "ielts_score", None),
|
| 111 |
+
"toefl_score": getattr(profile, "toefl_score", None),
|
| 112 |
+
}
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
def _achievement_context(source: Any) -> dict[str, Any]:
|
| 116 |
+
return {
|
| 117 |
+
"type": _enum_value(_value(source, "type")),
|
| 118 |
+
"title": _value(source, "title"),
|
| 119 |
+
"organization_name": _value(source, "organization_name"),
|
| 120 |
+
"role_title": _value(source, "role_title"),
|
| 121 |
+
"description_raw": _value(source, "description_raw"),
|
| 122 |
+
"category": _value(source, "category"),
|
| 123 |
+
"hours_per_week": _value(source, "hours_per_week"),
|
| 124 |
+
"weeks_per_year": _value(source, "weeks_per_year"),
|
| 125 |
+
"impact_scope": _enum_value(_value(source, "impact_scope")),
|
| 126 |
+
"leadership_level": _enum_value(_value(source, "leadership_level")),
|
| 127 |
+
}
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
def _gemini_prompt(source: Any, user: Optional[Any]) -> str:
|
| 131 |
+
payload = {
|
| 132 |
+
"student_profile": _profile_context(user),
|
| 133 |
+
"achievement": _achievement_context(source),
|
| 134 |
+
}
|
| 135 |
+
return (
|
| 136 |
+
"You are ApplyMap Chancellor, an admissions evaluation assistant for international applicants. "
|
| 137 |
+
"Score one student achievement. Use the framework below, but only use facts present in the input.\n\n"
|
| 138 |
+
"Kazakhstan context: treat UNT/ENT, NIS selection context, the NIS Grade 12 Certificate, IB, and A-levels "
|
| 139 |
+
"as relevant academic contexts when they appear. NIS applicants may have selective STEM-focused, "
|
| 140 |
+
"trilingual, Cambridge-aligned academic backgrounds. MESK in Russian/Kazakh user language maps to "
|
| 141 |
+
"NIS Grade 12 Certificate in English output.\n\n"
|
| 142 |
+
f"{ADMISSIONS_FRAMEWORK}\n\n"
|
| 143 |
+
f"{CHANCELLOR_COUNSELOR_FRAMEWORK}\n\n"
|
| 144 |
+
"Score each field from 0 to 10, using one decimal place when useful:\n"
|
| 145 |
+
"- major_relevance_score: fit with intended major, academic direction, and profile strategy.\n"
|
| 146 |
+
"- selectivity_score: competitiveness, award level, and selection difficulty.\n"
|
| 147 |
+
"- continuity_score: sustained commitment based on time, duration, and responsibility.\n"
|
| 148 |
+
"- distinctiveness_score: uncommon spike, initiative, leadership, originality, visibility, or impact.\n\n"
|
| 149 |
+
"Be conservative when evidence is missing. Return JSON only.\n\n"
|
| 150 |
+
f"Input JSON:\n{json.dumps(payload, ensure_ascii=False, default=str)}"
|
| 151 |
+
)
|
| 152 |
+
|
| 153 |
+
|
| 154 |
+
def _extract_gemini_text(response_payload: dict[str, Any]) -> str:
|
| 155 |
+
candidates = response_payload.get("candidates") or []
|
| 156 |
+
content = candidates[0].get("content") if candidates else {}
|
| 157 |
+
parts = content.get("parts") or []
|
| 158 |
+
return str(parts[0].get("text", "")) if parts else ""
|
| 159 |
+
|
| 160 |
+
|
| 161 |
+
def _scores_from_mapping(value: Any) -> Optional[dict[str, float]]:
|
| 162 |
+
if not isinstance(value, dict):
|
| 163 |
+
return None
|
| 164 |
+
|
| 165 |
+
scores: dict[str, float] = {}
|
| 166 |
+
for key in SCORE_KEYS:
|
| 167 |
+
raw = value.get(key)
|
| 168 |
+
if raw is None or isinstance(raw, bool):
|
| 169 |
+
return None
|
| 170 |
+
try:
|
| 171 |
+
scores[key] = _clamp(float(raw))
|
| 172 |
+
except (TypeError, ValueError):
|
| 173 |
+
return None
|
| 174 |
+
return scores
|
| 175 |
+
|
| 176 |
+
|
| 177 |
+
def _gemini_scores(source: Any, user: Optional[Any]) -> Optional[dict[str, float]]:
|
| 178 |
+
api_key = settings.GEMINI_API_KEY.strip()
|
| 179 |
+
if not api_key:
|
| 180 |
+
return None
|
| 181 |
+
|
| 182 |
+
model = (settings.GEMINI_MODEL or "gemini-2.5-flash").strip()
|
| 183 |
+
url = f"https://generativelanguage.googleapis.com/v1beta/models/{model}:generateContent"
|
| 184 |
+
request_payload = {
|
| 185 |
+
"contents": [{"parts": [{"text": _gemini_prompt(source, user)}]}],
|
| 186 |
+
"generationConfig": {
|
| 187 |
+
"temperature": 0.1,
|
| 188 |
+
"responseMimeType": "application/json",
|
| 189 |
+
"responseJsonSchema": SCORE_SCHEMA,
|
| 190 |
+
},
|
| 191 |
+
}
|
| 192 |
+
|
| 193 |
+
try:
|
| 194 |
+
with httpx.Client(timeout=12.0) as client:
|
| 195 |
+
response = client.post(
|
| 196 |
+
url,
|
| 197 |
+
headers={
|
| 198 |
+
"x-goog-api-key": api_key,
|
| 199 |
+
"Content-Type": "application/json",
|
| 200 |
+
},
|
| 201 |
+
json=request_payload,
|
| 202 |
+
)
|
| 203 |
+
response.raise_for_status()
|
| 204 |
+
response_text = _extract_gemini_text(response.json())
|
| 205 |
+
return _scores_from_mapping(json.loads(response_text))
|
| 206 |
+
except (httpx.HTTPError, json.JSONDecodeError, KeyError, TypeError, ValueError):
|
| 207 |
+
return None
|
| 208 |
+
|
| 209 |
+
|
| 210 |
+
def _heuristic_scores(source: Any, user: Optional[Any] = None) -> dict[str, float]:
|
| 211 |
+
text = _text(source)
|
| 212 |
+
scope = _enum_value(_value(source, "impact_scope"))
|
| 213 |
+
leadership = _enum_value(_value(source, "leadership_level"))
|
| 214 |
+
hours = _value(source, "hours_per_week") or 0
|
| 215 |
+
weeks = _value(source, "weeks_per_year") or 0
|
| 216 |
+
intended_major = _profile_major(user)
|
| 217 |
+
|
| 218 |
+
major_score = 5.0
|
| 219 |
+
if intended_major:
|
| 220 |
+
major_terms = [
|
| 221 |
+
term
|
| 222 |
+
for term in intended_major.replace("/", " ").replace(",", " ").split()
|
| 223 |
+
if len(term) > 2
|
| 224 |
+
]
|
| 225 |
+
major_score = 7.0 if any(term in text for term in major_terms) else 4.5
|
| 226 |
+
elif _contains_any(
|
| 227 |
+
text,
|
| 228 |
+
[
|
| 229 |
+
"research",
|
| 230 |
+
"science",
|
| 231 |
+
"math",
|
| 232 |
+
"physics",
|
| 233 |
+
"chemistry",
|
| 234 |
+
"biology",
|
| 235 |
+
"robot",
|
| 236 |
+
"programming",
|
| 237 |
+
"engineering",
|
| 238 |
+
"economics",
|
| 239 |
+
],
|
| 240 |
+
):
|
| 241 |
+
major_score = 6.0
|
| 242 |
+
if _contains_any(text, ["coursework", "project", "lab", "olympiad", "competition", "internship"]):
|
| 243 |
+
major_score += 1.0
|
| 244 |
+
|
| 245 |
+
scope_base = {
|
| 246 |
+
"international": 8.0,
|
| 247 |
+
"national": 7.0,
|
| 248 |
+
"regional": 5.5,
|
| 249 |
+
"local": 4.5,
|
| 250 |
+
"school": 3.5,
|
| 251 |
+
"family": 3.0,
|
| 252 |
+
"personal": 2.5,
|
| 253 |
+
}
|
| 254 |
+
selectivity_score = scope_base.get(scope, 4.5)
|
| 255 |
+
if _contains_any(
|
| 256 |
+
text,
|
| 257 |
+
["selected", "selective", "winner", "finalist", "medal", "olympiad", "scholarship", "first place", "top "],
|
| 258 |
+
):
|
| 259 |
+
selectivity_score += 1.5
|
| 260 |
+
if _contains_any(text, ["imo", "ipho", "ibo", "nasa", "global", "international"]):
|
| 261 |
+
selectivity_score += 1.0
|
| 262 |
+
|
| 263 |
+
annual_hours = float(hours) * float(weeks)
|
| 264 |
+
if annual_hours >= 250:
|
| 265 |
+
continuity_score = 8.0
|
| 266 |
+
elif annual_hours >= 120:
|
| 267 |
+
continuity_score = 6.5
|
| 268 |
+
elif annual_hours >= 40:
|
| 269 |
+
continuity_score = 5.0
|
| 270 |
+
elif weeks or hours:
|
| 271 |
+
continuity_score = 3.5
|
| 272 |
+
else:
|
| 273 |
+
continuity_score = 4.0
|
| 274 |
+
if _contains_any(text, ["year", "years", "since", "ongoing", "weekly", "semester"]):
|
| 275 |
+
continuity_score += 1.0
|
| 276 |
+
|
| 277 |
+
distinctiveness_score = 4.5
|
| 278 |
+
if leadership in {"lead", "captain", "founder"}:
|
| 279 |
+
distinctiveness_score += 1.5
|
| 280 |
+
if scope in {"national", "international"}:
|
| 281 |
+
distinctiveness_score += 1.0
|
| 282 |
+
if _contains_any(
|
| 283 |
+
text,
|
| 284 |
+
["founded", "created", "built", "published", "patent", "research", "startup", "world champion", "first place"],
|
| 285 |
+
):
|
| 286 |
+
distinctiveness_score += 1.5
|
| 287 |
+
if _contains_any(text, ["press", "media", "tedx", "conference", "downloads", "users"]):
|
| 288 |
+
distinctiveness_score += 1.0
|
| 289 |
+
if _contains_any(text, ["helped", "member", "participated"]) and len(text) < 120:
|
| 290 |
+
distinctiveness_score -= 0.5
|
| 291 |
+
|
| 292 |
+
return {
|
| 293 |
+
"major_relevance_score": _clamp(major_score),
|
| 294 |
+
"selectivity_score": _clamp(selectivity_score),
|
| 295 |
+
"continuity_score": _clamp(continuity_score),
|
| 296 |
+
"distinctiveness_score": _clamp(distinctiveness_score),
|
| 297 |
+
}
|
| 298 |
+
|
| 299 |
+
|
| 300 |
+
def estimate_chancellor_scores(source: Any, user: Optional[Any] = None) -> dict[str, float]:
|
| 301 |
+
return _gemini_scores(source, user) or _heuristic_scores(source, user)
|
apps/api/src/services/counselor_knowledge.py
ADDED
|
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
CHANCELLOR_COUNSELOR_FRAMEWORK = """
|
| 2 |
+
Use this internal admissions counseling framework silently. Do not mention these materials to the student.
|
| 3 |
+
|
| 4 |
+
Achievement extraction and Common App writing:
|
| 5 |
+
- Preserve every distinct achievement from the uploaded file before ranking. Do not collapse unrelated awards, projects,
|
| 6 |
+
leadership roles, jobs, family duties, research, and service into one item.
|
| 7 |
+
- Rank for quality, not quantity: sustained depth, selective recognition, independent initiative, measurable impact,
|
| 8 |
+
leadership, intellectual vitality, and connection to the student's intended direction matter most.
|
| 9 |
+
- Write copy-paste-ready English. Use strong active verbs, concrete metrics, and compressed phrasing. Remove filler,
|
| 10 |
+
adjectives that do not add evidence, and repeated role wording.
|
| 11 |
+
- Always translate Russian, Kazakh, or mixed-language notes into polished English output. Fix capitalization when the
|
| 12 |
+
source starts in lowercase or uses informal phrasing.
|
| 13 |
+
- Preserve years, school grade, number of students served, event names, placements, participant counts, and other
|
| 14 |
+
concrete facts when they appear in the source. Compress wording, but do not remove the meaning.
|
| 15 |
+
- Never replace a concrete source detail with a more impressive or more likely-sounding detail. If the source says
|
| 16 |
+
gift-card events, lessons, mentoring, or another specific activity, keep that meaning instead of inventing
|
| 17 |
+
tournaments, research, publications, or competitions.
|
| 18 |
+
- For Kazakhstan award levels, translate "Republican" / "Respublikalyk" / "Respublikanskiy" as "National" unless an
|
| 19 |
+
official English title clearly uses a different wording.
|
| 20 |
+
- Activities need three fields: position/leadership, organization, and description. Use the description for impact,
|
| 21 |
+
scale, selectivity, outputs, audiences, and results.
|
| 22 |
+
- Honors need one compact title/description line. Include level, placement, and selectivity only when supported.
|
| 23 |
+
- If facts conflict or are missing, ask direct questions before treating the wording as final.
|
| 24 |
+
|
| 25 |
+
Low GPA, weak SAT/ACT, or academic dips:
|
| 26 |
+
- Recommend Additional Information only when it explains a material transcript/test-score weakness, an access barrier,
|
| 27 |
+
a serious disruption, or school context that changes interpretation.
|
| 28 |
+
- The best explanation is concise: context, accountability, concrete recovery, and evidence that the issue is resolved
|
| 29 |
+
or no longer defines the student's academic ability.
|
| 30 |
+
- Acceptable contexts can include diagnosed learning differences, health issues, family instability or caretaking,
|
| 31 |
+
unsafe home environment, immigration/language transition, financial hardship, or unusually heavy work obligations.
|
| 32 |
+
- Do not blame teachers, claim the student simply did not care, use generic pandemic excuses without specifics,
|
| 33 |
+
compare to classmates, or over-explain. If there is no meaningful context, advise not to force an explanation.
|
| 34 |
+
- For low testing, prefer strategy over excuse: use stronger coursework, external academic evidence, competitions,
|
| 35 |
+
research, predicted/official exam scores, or test-optional positioning when appropriate.
|
| 36 |
+
|
| 37 |
+
Ultra-selective admissions strategy:
|
| 38 |
+
- Stats alone rarely differentiate a strong international applicant. Look for one or two unusually strong spikes rather
|
| 39 |
+
than many generic activities.
|
| 40 |
+
- Strong applications show academic evidence beyond grades: selective awards, original research, advanced coursework,
|
| 41 |
+
publications, technical artifacts, public work, or rigorous external validation.
|
| 42 |
+
- Overrepresented majors need sharper differentiation. A technical student can stand out through policy, ethics,
|
| 43 |
+
education, local impact, research, or another authentic cross-disciplinary angle.
|
| 44 |
+
- Essays and activity descriptions should reveal judgment, motivation, and personality through concrete choices,
|
| 45 |
+
not buzzwords or an all-perfect persona.
|
| 46 |
+
- For Kazakhstan/NIS students, preserve local context: NIS, NIS Grade 12 Certificate, UNT/ENT, research schools,
|
| 47 |
+
olympiad stages, language background, 11 vs 12 years of schooling, and access to counseling.
|
| 48 |
+
- MESK in Russian/Kazakh means NIS Grade 12 Certificate in English output.
|
| 49 |
+
|
| 50 |
+
Korea application context:
|
| 51 |
+
- Do not treat Korean universities as Common App schools by default. South Korea applications commonly use Study in
|
| 52 |
+
Korea resources, university-specific portals, or third-party Korean application portals depending on the university.
|
| 53 |
+
- For KAIST, UNIST, POSTECH, SNU, Yonsei, Korea University, and similar Korean targets, produce concise
|
| 54 |
+
application-ready English that follows the target platform's own limit when known. Do not force the 50/100/150
|
| 55 |
+
Common App fields unless the application system is explicitly Common App.
|
| 56 |
+
- For KAIST-style short fields, prioritize title, year, placement/selectivity, exact role, and quantified output.
|
| 57 |
+
- Internal Korea drafting checklist from the user's local counselor material: KAIST-style portals may separate honors
|
| 58 |
+
and extracurriculars into about 5 slots each and can use very short 200-byte descriptions; SNU/Yonsei-style portals
|
| 59 |
+
can use about 300-byte activity descriptions; Korea University may vary around 300-500 bytes; GKS forms may list
|
| 60 |
+
awards separately while the Personal Statement carries the explanation. Treat these as drafting heuristics, not
|
| 61 |
+
confirmed current rules.
|
| 62 |
+
- When Korea-specific limits are uncertain, state that the limit must be checked against the current official portal
|
| 63 |
+
instead of inventing a number.
|
| 64 |
+
|
| 65 |
+
Source discipline:
|
| 66 |
+
- Separate confirmed facts from student claims. Never invent participant counts, selection rates, official titles,
|
| 67 |
+
award levels, deadlines, scholarships, or program requirements.
|
| 68 |
+
- When using search results, prefer official university, organizer, government, or program pages. If a fact is not
|
| 69 |
+
confirmed, mark it as unconfirmed and ask the student for a certificate, official link, or clearer detail.
|
| 70 |
+
""".strip()
|
apps/api/src/services/optimization_engine.py
ADDED
|
@@ -0,0 +1,420 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Optimization engine for ranking and recommending achievements.
|
| 3 |
+
Uses weighted scoring based on university-specific weight presets.
|
| 4 |
+
"""
|
| 5 |
+
from typing import List, Dict, Tuple, Optional
|
| 6 |
+
from dataclasses import dataclass
|
| 7 |
+
from sqlalchemy.orm import Session
|
| 8 |
+
from uuid import UUID
|
| 9 |
+
|
| 10 |
+
from ..models.achievement import Achievement, AchievementType, ImpactScope, LeadershipLevel
|
| 11 |
+
from ..models.university import University, WeightPreset
|
| 12 |
+
from ..models.user import StudentProfile
|
| 13 |
+
from ..models.report import (
|
| 14 |
+
OptimizationReport, ReportRecommendation, SourceReference,
|
| 15 |
+
RecommendationType, ConfidenceLabel, SourceSection, ReportStatus
|
| 16 |
+
)
|
| 17 |
+
from .report_advisor import build_report_advisor_snapshot
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
# Weight configurations per preset
|
| 21 |
+
WEIGHT_PRESETS: Dict[str, Dict[str, float]] = {
|
| 22 |
+
WeightPreset.research_heavy: {
|
| 23 |
+
"impact_scope": 0.15,
|
| 24 |
+
"selectivity": 0.25,
|
| 25 |
+
"leadership": 0.10,
|
| 26 |
+
"continuity": 0.15,
|
| 27 |
+
"major_relevance": 0.25,
|
| 28 |
+
"distinctiveness": 0.10,
|
| 29 |
+
"clarity": 0.05,
|
| 30 |
+
"duplication_penalty": 2.0,
|
| 31 |
+
},
|
| 32 |
+
WeightPreset.leadership_heavy: {
|
| 33 |
+
"impact_scope": 0.20,
|
| 34 |
+
"selectivity": 0.10,
|
| 35 |
+
"leadership": 0.30,
|
| 36 |
+
"continuity": 0.15,
|
| 37 |
+
"major_relevance": 0.10,
|
| 38 |
+
"distinctiveness": 0.10,
|
| 39 |
+
"clarity": 0.05,
|
| 40 |
+
"duplication_penalty": 2.0,
|
| 41 |
+
},
|
| 42 |
+
WeightPreset.balanced_holistic: {
|
| 43 |
+
"impact_scope": 0.20,
|
| 44 |
+
"selectivity": 0.15,
|
| 45 |
+
"leadership": 0.15,
|
| 46 |
+
"continuity": 0.15,
|
| 47 |
+
"major_relevance": 0.15,
|
| 48 |
+
"distinctiveness": 0.15,
|
| 49 |
+
"clarity": 0.05,
|
| 50 |
+
"duplication_penalty": 2.0,
|
| 51 |
+
},
|
| 52 |
+
WeightPreset.community_service_heavy: {
|
| 53 |
+
"impact_scope": 0.30,
|
| 54 |
+
"selectivity": 0.10,
|
| 55 |
+
"leadership": 0.15,
|
| 56 |
+
"continuity": 0.20,
|
| 57 |
+
"major_relevance": 0.10,
|
| 58 |
+
"distinctiveness": 0.10,
|
| 59 |
+
"clarity": 0.05,
|
| 60 |
+
"duplication_penalty": 2.0,
|
| 61 |
+
},
|
| 62 |
+
}
|
| 63 |
+
|
| 64 |
+
# Numeric mappings for enum values
|
| 65 |
+
IMPACT_SCOPE_MAP: Dict[str, float] = {
|
| 66 |
+
ImpactScope.school: 3.0,
|
| 67 |
+
ImpactScope.local: 4.0,
|
| 68 |
+
ImpactScope.regional: 6.0,
|
| 69 |
+
ImpactScope.national: 8.5,
|
| 70 |
+
ImpactScope.international: 10.0,
|
| 71 |
+
ImpactScope.family: 2.0,
|
| 72 |
+
ImpactScope.personal: 1.0,
|
| 73 |
+
}
|
| 74 |
+
|
| 75 |
+
LEADERSHIP_MAP: Dict[str, float] = {
|
| 76 |
+
LeadershipLevel.none: 1.0,
|
| 77 |
+
LeadershipLevel.member: 4.0,
|
| 78 |
+
LeadershipLevel.lead: 7.0,
|
| 79 |
+
LeadershipLevel.captain: 8.5,
|
| 80 |
+
LeadershipLevel.founder: 10.0,
|
| 81 |
+
}
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
@dataclass
|
| 85 |
+
class ScoredAchievement:
|
| 86 |
+
achievement: Achievement
|
| 87 |
+
raw_score: float
|
| 88 |
+
breakdown: Dict[str, float]
|
| 89 |
+
is_duplicate: bool
|
| 90 |
+
duplicate_of: Optional[UUID]
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
def _get_impact_score(achievement: Achievement) -> float:
|
| 94 |
+
if achievement.impact_scope:
|
| 95 |
+
return IMPACT_SCOPE_MAP.get(achievement.impact_scope, 5.0)
|
| 96 |
+
return 5.0 # default middle value
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
def _get_leadership_score(achievement: Achievement) -> float:
|
| 100 |
+
if achievement.leadership_level:
|
| 101 |
+
return LEADERSHIP_MAP.get(achievement.leadership_level, 4.0)
|
| 102 |
+
return 4.0
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
def _get_clarity_score(achievement: Achievement) -> float:
|
| 106 |
+
"""Estimate clarity from description completeness."""
|
| 107 |
+
score = 0.0
|
| 108 |
+
if achievement.description_raw and len(achievement.description_raw) > 30:
|
| 109 |
+
score += 4.0
|
| 110 |
+
if achievement.role_title:
|
| 111 |
+
score += 2.0
|
| 112 |
+
if achievement.organization_name:
|
| 113 |
+
score += 2.0
|
| 114 |
+
if achievement.hours_per_week:
|
| 115 |
+
score += 1.0
|
| 116 |
+
if achievement.start_date:
|
| 117 |
+
score += 1.0
|
| 118 |
+
return min(score, 10.0)
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
def _detect_duplicates(achievements: List[Achievement]) -> Dict[UUID, Optional[UUID]]:
|
| 122 |
+
"""Detect duplicate achievements based on organization and overlapping titles."""
|
| 123 |
+
duplicate_map: Dict[UUID, Optional[UUID]] = {}
|
| 124 |
+
seen: List[Achievement] = []
|
| 125 |
+
|
| 126 |
+
for ach in achievements:
|
| 127 |
+
is_dup = False
|
| 128 |
+
for prev in seen:
|
| 129 |
+
# Same org and similar title suggests duplication
|
| 130 |
+
if (
|
| 131 |
+
ach.organization_name
|
| 132 |
+
and prev.organization_name
|
| 133 |
+
and ach.organization_name.lower().strip() == prev.organization_name.lower().strip()
|
| 134 |
+
):
|
| 135 |
+
ach_words = set((ach.title or "").lower().split())
|
| 136 |
+
prev_words = set((prev.title or "").lower().split())
|
| 137 |
+
if ach_words and prev_words:
|
| 138 |
+
overlap = len(ach_words & prev_words) / max(len(ach_words), len(prev_words))
|
| 139 |
+
if overlap > 0.5:
|
| 140 |
+
duplicate_map[ach.id] = prev.id
|
| 141 |
+
is_dup = True
|
| 142 |
+
break
|
| 143 |
+
if not is_dup:
|
| 144 |
+
duplicate_map[ach.id] = None
|
| 145 |
+
seen.append(ach)
|
| 146 |
+
|
| 147 |
+
return duplicate_map
|
| 148 |
+
|
| 149 |
+
|
| 150 |
+
def score_achievement(
|
| 151 |
+
achievement: Achievement,
|
| 152 |
+
weights: Dict[str, float],
|
| 153 |
+
is_duplicate: bool = False,
|
| 154 |
+
) -> Tuple[float, Dict[str, float]]:
|
| 155 |
+
"""Score a single achievement using the given weight configuration."""
|
| 156 |
+
|
| 157 |
+
impact = _get_impact_score(achievement)
|
| 158 |
+
selectivity = achievement.selectivity_score if achievement.selectivity_score is not None else 5.0
|
| 159 |
+
leadership = _get_leadership_score(achievement)
|
| 160 |
+
continuity = achievement.continuity_score if achievement.continuity_score is not None else 5.0
|
| 161 |
+
major_relevance = achievement.major_relevance_score if achievement.major_relevance_score is not None else 5.0
|
| 162 |
+
distinctiveness = achievement.distinctiveness_score if achievement.distinctiveness_score is not None else 5.0
|
| 163 |
+
clarity = _get_clarity_score(achievement)
|
| 164 |
+
|
| 165 |
+
breakdown = {
|
| 166 |
+
"impact_scope": impact * weights["impact_scope"],
|
| 167 |
+
"selectivity": selectivity * weights["selectivity"],
|
| 168 |
+
"leadership": leadership * weights["leadership"],
|
| 169 |
+
"continuity": continuity * weights["continuity"],
|
| 170 |
+
"major_relevance": major_relevance * weights["major_relevance"],
|
| 171 |
+
"distinctiveness": distinctiveness * weights["distinctiveness"],
|
| 172 |
+
"clarity": clarity * weights["clarity"],
|
| 173 |
+
}
|
| 174 |
+
|
| 175 |
+
raw = sum(breakdown.values())
|
| 176 |
+
|
| 177 |
+
if is_duplicate:
|
| 178 |
+
raw -= weights["duplication_penalty"]
|
| 179 |
+
|
| 180 |
+
return raw, breakdown
|
| 181 |
+
|
| 182 |
+
|
| 183 |
+
def _calculate_confidence(achievement: Achievement, score: float, all_scores: List[float]) -> ConfidenceLabel:
|
| 184 |
+
"""Determine confidence label based on data completeness and score spread."""
|
| 185 |
+
filled_fields = sum([
|
| 186 |
+
1 if achievement.impact_scope else 0,
|
| 187 |
+
1 if achievement.selectivity_score is not None else 0,
|
| 188 |
+
1 if achievement.leadership_level else 0,
|
| 189 |
+
1 if achievement.continuity_score is not None else 0,
|
| 190 |
+
1 if achievement.major_relevance_score is not None else 0,
|
| 191 |
+
1 if achievement.distinctiveness_score is not None else 0,
|
| 192 |
+
1 if achievement.description_raw else 0,
|
| 193 |
+
])
|
| 194 |
+
|
| 195 |
+
if filled_fields >= 6:
|
| 196 |
+
completeness = "high"
|
| 197 |
+
elif filled_fields >= 3:
|
| 198 |
+
completeness = "medium"
|
| 199 |
+
else:
|
| 200 |
+
completeness = "low"
|
| 201 |
+
|
| 202 |
+
if len(all_scores) > 1:
|
| 203 |
+
score_range = max(all_scores) - min(all_scores)
|
| 204 |
+
if score_range < 1.0:
|
| 205 |
+
spread = "low"
|
| 206 |
+
elif score_range < 3.0:
|
| 207 |
+
spread = "medium"
|
| 208 |
+
else:
|
| 209 |
+
spread = "high"
|
| 210 |
+
else:
|
| 211 |
+
spread = "medium"
|
| 212 |
+
|
| 213 |
+
if completeness == "high" and spread in ("medium", "high"):
|
| 214 |
+
return ConfidenceLabel.high
|
| 215 |
+
elif completeness == "low" or spread == "low":
|
| 216 |
+
return ConfidenceLabel.low
|
| 217 |
+
else:
|
| 218 |
+
return ConfidenceLabel.medium
|
| 219 |
+
|
| 220 |
+
|
| 221 |
+
def _generate_rationale(
|
| 222 |
+
achievement: Achievement,
|
| 223 |
+
recommendation_type: RecommendationType,
|
| 224 |
+
rank: Optional[int],
|
| 225 |
+
weights: Dict[str, float],
|
| 226 |
+
breakdown: Dict[str, float],
|
| 227 |
+
university: University,
|
| 228 |
+
) -> str:
|
| 229 |
+
"""Generate a human-readable rationale for the recommendation."""
|
| 230 |
+
lines = []
|
| 231 |
+
|
| 232 |
+
if recommendation_type == RecommendationType.keep:
|
| 233 |
+
lines.append(f"Recommended for {university.name} (#{rank}) based on {university.weight_preset.replace('_', ' ')} criteria.")
|
| 234 |
+
|
| 235 |
+
# Find top contributing factors
|
| 236 |
+
sorted_factors = sorted(breakdown.items(), key=lambda x: x[1], reverse=True)
|
| 237 |
+
top_factors = [f[0].replace("_", " ") for f in sorted_factors[:2]]
|
| 238 |
+
lines.append(f"Strongest factors: {', '.join(top_factors)}.")
|
| 239 |
+
|
| 240 |
+
if achievement.impact_scope in (ImpactScope.national, ImpactScope.international):
|
| 241 |
+
lines.append(f"Noteworthy {achievement.impact_scope.value}-level impact.")
|
| 242 |
+
|
| 243 |
+
if achievement.leadership_level in (LeadershipLevel.founder, LeadershipLevel.captain):
|
| 244 |
+
lines.append(f"Leadership role ({achievement.leadership_level.value}) adds significant weight.")
|
| 245 |
+
|
| 246 |
+
elif recommendation_type == RecommendationType.remove:
|
| 247 |
+
lines.append(f"Not recommended for inclusion in your {university.name} application.")
|
| 248 |
+
lines.append("Score falls below the threshold given your other achievements and this university's priorities.")
|
| 249 |
+
if achievement.impact_scope in (ImpactScope.personal, ImpactScope.family):
|
| 250 |
+
lines.append("Limited external impact scope reduces fit for this university profile.")
|
| 251 |
+
|
| 252 |
+
elif recommendation_type == RecommendationType.rewrite:
|
| 253 |
+
lines.append(f"Strong potential — keep for {university.name} but the description needs sharpening.")
|
| 254 |
+
if not achievement.description_raw or len(achievement.description_raw) < 50:
|
| 255 |
+
lines.append("Current description is too brief to convey full impact.")
|
| 256 |
+
lines.append("Use the Rewrite Studio to tighten language and lead with impact.")
|
| 257 |
+
|
| 258 |
+
elif recommendation_type == RecommendationType.merge:
|
| 259 |
+
lines.append("Overlaps significantly with another entry. Consider merging to avoid duplication.")
|
| 260 |
+
lines.append("Admissions readers notice repetition — a single strong entry outperforms two weak ones.")
|
| 261 |
+
|
| 262 |
+
elif recommendation_type == RecommendationType.reorder:
|
| 263 |
+
lines.append(f"Include at position #{rank}. Ordering here is important — earlier positions receive more attention.")
|
| 264 |
+
|
| 265 |
+
return " ".join(lines)
|
| 266 |
+
|
| 267 |
+
|
| 268 |
+
def run_optimization(
|
| 269 |
+
db: Session,
|
| 270 |
+
report: OptimizationReport,
|
| 271 |
+
achievements: List[Achievement],
|
| 272 |
+
university: University,
|
| 273 |
+
profile: StudentProfile | None = None,
|
| 274 |
+
user_country: str | None = None,
|
| 275 |
+
) -> None:
|
| 276 |
+
"""
|
| 277 |
+
Run the full optimization pipeline and populate the report with recommendations.
|
| 278 |
+
"""
|
| 279 |
+
weights = WEIGHT_PRESETS.get(university.weight_preset, WEIGHT_PRESETS[WeightPreset.balanced_holistic])
|
| 280 |
+
|
| 281 |
+
activities = [a for a in achievements if a.type == AchievementType.activity]
|
| 282 |
+
honors = [a for a in achievements if a.type == AchievementType.honor]
|
| 283 |
+
|
| 284 |
+
dup_map_activities = _detect_duplicates(activities)
|
| 285 |
+
dup_map_honors = _detect_duplicates(honors)
|
| 286 |
+
|
| 287 |
+
def score_list(items: List[Achievement], dup_map: dict) -> List[ScoredAchievement]:
|
| 288 |
+
scored = []
|
| 289 |
+
for ach in items:
|
| 290 |
+
is_dup = dup_map.get(ach.id) is not None
|
| 291 |
+
dup_of = dup_map.get(ach.id)
|
| 292 |
+
raw, breakdown = score_achievement(ach, weights, is_duplicate=is_dup)
|
| 293 |
+
scored.append(ScoredAchievement(
|
| 294 |
+
achievement=ach,
|
| 295 |
+
raw_score=raw,
|
| 296 |
+
breakdown=breakdown,
|
| 297 |
+
is_duplicate=is_dup,
|
| 298 |
+
duplicate_of=dup_of,
|
| 299 |
+
))
|
| 300 |
+
scored.sort(key=lambda x: x.raw_score, reverse=True)
|
| 301 |
+
return scored
|
| 302 |
+
|
| 303 |
+
scored_activities = score_list(activities, dup_map_activities)
|
| 304 |
+
scored_honors = score_list(honors, dup_map_honors)
|
| 305 |
+
|
| 306 |
+
all_activity_scores = [s.raw_score for s in scored_activities]
|
| 307 |
+
all_honor_scores = [s.raw_score for s in scored_honors]
|
| 308 |
+
|
| 309 |
+
recommendations = []
|
| 310 |
+
|
| 311 |
+
# Process activities: top 10 keep, rest remove/merge
|
| 312 |
+
for rank, scored in enumerate(scored_activities, start=1):
|
| 313 |
+
ach = scored.achievement
|
| 314 |
+
|
| 315 |
+
if scored.is_duplicate:
|
| 316 |
+
rec_type = RecommendationType.merge
|
| 317 |
+
suggested_rank = None
|
| 318 |
+
elif rank <= 10:
|
| 319 |
+
if not ach.description_raw or len(ach.description_raw or "") < 30:
|
| 320 |
+
rec_type = RecommendationType.rewrite
|
| 321 |
+
else:
|
| 322 |
+
rec_type = RecommendationType.keep
|
| 323 |
+
suggested_rank = rank
|
| 324 |
+
else:
|
| 325 |
+
rec_type = RecommendationType.remove
|
| 326 |
+
suggested_rank = None
|
| 327 |
+
|
| 328 |
+
confidence = _calculate_confidence(ach, scored.raw_score, all_activity_scores)
|
| 329 |
+
rationale = _generate_rationale(ach, rec_type, suggested_rank, weights, scored.breakdown, university)
|
| 330 |
+
|
| 331 |
+
rec = ReportRecommendation(
|
| 332 |
+
report_id=report.id,
|
| 333 |
+
achievement_id=ach.id,
|
| 334 |
+
recommendation_type=rec_type,
|
| 335 |
+
suggested_rank=suggested_rank,
|
| 336 |
+
rationale=rationale,
|
| 337 |
+
confidence_label=confidence,
|
| 338 |
+
)
|
| 339 |
+
recommendations.append(rec)
|
| 340 |
+
|
| 341 |
+
# Process honors: top 5 keep, rest remove
|
| 342 |
+
for rank, scored in enumerate(scored_honors, start=1):
|
| 343 |
+
ach = scored.achievement
|
| 344 |
+
|
| 345 |
+
if scored.is_duplicate:
|
| 346 |
+
rec_type = RecommendationType.merge
|
| 347 |
+
suggested_rank = None
|
| 348 |
+
elif rank <= 5:
|
| 349 |
+
if not ach.description_raw or len(ach.description_raw or "") < 30:
|
| 350 |
+
rec_type = RecommendationType.rewrite
|
| 351 |
+
else:
|
| 352 |
+
rec_type = RecommendationType.keep
|
| 353 |
+
suggested_rank = rank
|
| 354 |
+
else:
|
| 355 |
+
rec_type = RecommendationType.remove
|
| 356 |
+
suggested_rank = None
|
| 357 |
+
|
| 358 |
+
confidence = _calculate_confidence(ach, scored.raw_score, all_honor_scores)
|
| 359 |
+
rationale = _generate_rationale(ach, rec_type, suggested_rank, weights, scored.breakdown, university)
|
| 360 |
+
|
| 361 |
+
rec = ReportRecommendation(
|
| 362 |
+
report_id=report.id,
|
| 363 |
+
achievement_id=ach.id,
|
| 364 |
+
recommendation_type=rec_type,
|
| 365 |
+
suggested_rank=suggested_rank,
|
| 366 |
+
rationale=rationale,
|
| 367 |
+
confidence_label=confidence,
|
| 368 |
+
)
|
| 369 |
+
recommendations.append(rec)
|
| 370 |
+
|
| 371 |
+
db.add_all(recommendations)
|
| 372 |
+
|
| 373 |
+
# Add source references from university policy entries
|
| 374 |
+
for entry in university.policy_entries:
|
| 375 |
+
section = (
|
| 376 |
+
SourceSection.official_guidance
|
| 377 |
+
if entry.source_type.value == "official"
|
| 378 |
+
else SourceSection.public_examples
|
| 379 |
+
)
|
| 380 |
+
source_ref = SourceReference(
|
| 381 |
+
report_id=report.id,
|
| 382 |
+
university_policy_entry_id=entry.id,
|
| 383 |
+
section=section,
|
| 384 |
+
note=f"Referenced for {university.name} application context.",
|
| 385 |
+
)
|
| 386 |
+
db.add(source_ref)
|
| 387 |
+
|
| 388 |
+
# Build summary text
|
| 389 |
+
target_major = (
|
| 390 |
+
profile.intended_major if profile and profile.intended_major else None
|
| 391 |
+
) or (university.major_strengths[0] if university.major_strengths else None) or "your target major"
|
| 392 |
+
weight_label = getattr(university.weight_preset, "value", university.weight_preset).replace("_", " ")
|
| 393 |
+
weight_emphasis = ", ".join(
|
| 394 |
+
key.replace("_", " ")
|
| 395 |
+
for key, value in weights.items()
|
| 396 |
+
if isinstance(value, float) and value >= 0.20 and key != "duplication_penalty"
|
| 397 |
+
)
|
| 398 |
+
funding_note = (
|
| 399 |
+
"A full-funding route is visible in the current dataset."
|
| 400 |
+
if university.full_ride_possible
|
| 401 |
+
else "Funding still needs careful verification before this school stays in the core list."
|
| 402 |
+
)
|
| 403 |
+
|
| 404 |
+
report.summary_text = (
|
| 405 |
+
f"Advisor ready for {university.name} v{report.version_number}. "
|
| 406 |
+
f"Focus major: {target_major}. "
|
| 407 |
+
f"University profile: {weight_label}. "
|
| 408 |
+
f"Weight emphasis: {weight_emphasis}. "
|
| 409 |
+
f"{funding_note}"
|
| 410 |
+
)
|
| 411 |
+
report.advisor_snapshot_json = build_report_advisor_snapshot(
|
| 412 |
+
university=university,
|
| 413 |
+
profile=profile,
|
| 414 |
+
user_country=user_country,
|
| 415 |
+
report_note=report.summary_text,
|
| 416 |
+
)
|
| 417 |
+
report.status = ReportStatus.completed
|
| 418 |
+
report.completed_at = __import__("datetime").datetime.utcnow()
|
| 419 |
+
|
| 420 |
+
db.commit()
|
apps/api/src/services/report_advisor.py
ADDED
|
@@ -0,0 +1,273 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from typing import Any
|
| 4 |
+
|
| 5 |
+
from ..models.university import University
|
| 6 |
+
from ..models.user import StudentProfile
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
PROGRAM_LIBRARY: dict[str, list[dict[str, str]]] = {
|
| 10 |
+
"mit": [
|
| 11 |
+
{
|
| 12 |
+
"name": "MIT PRIMES",
|
| 13 |
+
"why_it_matters": "High-signal math and computer science research track that reads as real academic depth, not just another club.",
|
| 14 |
+
"funding_note": "Treat as top priority if you can access the remote track, but verify the current international eligibility and cost rules.",
|
| 15 |
+
"priority": "verify",
|
| 16 |
+
},
|
| 17 |
+
{
|
| 18 |
+
"name": "Research Science Institute (RSI)",
|
| 19 |
+
"why_it_matters": "Elite research credential with immediate value for STEM-heavy applications and a much stronger narrative than a generic summer school.",
|
| 20 |
+
"funding_note": "Prioritize because the program has historically been fully funded for admitted students, but still verify the current cycle.",
|
| 21 |
+
"priority": "full-funding",
|
| 22 |
+
},
|
| 23 |
+
{
|
| 24 |
+
"name": "MITES",
|
| 25 |
+
"why_it_matters": "Strong MIT-branded pre-college signal for STEM readiness and academic stretch.",
|
| 26 |
+
"funding_note": "Useful only if the current cycle accepts your profile and funding route. Verify international access before planning around it.",
|
| 27 |
+
"priority": "verify",
|
| 28 |
+
},
|
| 29 |
+
{
|
| 30 |
+
"name": "Beaver Works Summer Institute",
|
| 31 |
+
"why_it_matters": "Good applied CS, AI, and robotics proof if you need a stronger technical build before applications.",
|
| 32 |
+
"funding_note": "Do not assume full funding. Apply only if scholarship or sponsored access is available.",
|
| 33 |
+
"priority": "scholarship",
|
| 34 |
+
},
|
| 35 |
+
],
|
| 36 |
+
"default_cs": [
|
| 37 |
+
{
|
| 38 |
+
"name": "Research Science Institute (RSI)",
|
| 39 |
+
"why_it_matters": "Recognizable research signal for highly selective STEM admissions.",
|
| 40 |
+
"funding_note": "Prioritize first because it has historically offered a strong funding route for admitted students, but verify the current cycle.",
|
| 41 |
+
"priority": "full-funding",
|
| 42 |
+
},
|
| 43 |
+
{
|
| 44 |
+
"name": "Pioneer Academics",
|
| 45 |
+
"why_it_matters": "Produces an actual research output and helps move your story from project-builder to research-capable applicant.",
|
| 46 |
+
"funding_note": "Apply only if scholarship support is available. Do not treat it as an automatic full-funding option.",
|
| 47 |
+
"priority": "scholarship",
|
| 48 |
+
},
|
| 49 |
+
{
|
| 50 |
+
"name": "PROMYS",
|
| 51 |
+
"why_it_matters": "Strong fit if your target major needs proof of mathematical rigor behind CS or AI ambitions.",
|
| 52 |
+
"funding_note": "Useful when aid is available. Verify current scholarship access for international students.",
|
| 53 |
+
"priority": "verify",
|
| 54 |
+
},
|
| 55 |
+
{
|
| 56 |
+
"name": "YYGS IST",
|
| 57 |
+
"why_it_matters": "Not as strong as true research, but still better than a generic enrichment camp if you need a branded academic program.",
|
| 58 |
+
"funding_note": "Only worth it with substantial aid or sponsorship.",
|
| 59 |
+
"priority": "scholarship",
|
| 60 |
+
},
|
| 61 |
+
],
|
| 62 |
+
"default_engineering": [
|
| 63 |
+
{
|
| 64 |
+
"name": "Research Science Institute (RSI)",
|
| 65 |
+
"why_it_matters": "Adds hard research credibility for engineering-heavy applications.",
|
| 66 |
+
"funding_note": "Prioritize because the funding path has historically been strong for admitted students, but verify the current cycle.",
|
| 67 |
+
"priority": "full-funding",
|
| 68 |
+
},
|
| 69 |
+
{
|
| 70 |
+
"name": "Beaver Works Summer Institute",
|
| 71 |
+
"why_it_matters": "Strong applied engineering and robotics signal when you need a build-heavy program.",
|
| 72 |
+
"funding_note": "Use only with scholarship support or sponsor backing.",
|
| 73 |
+
"priority": "scholarship",
|
| 74 |
+
},
|
| 75 |
+
{
|
| 76 |
+
"name": "PROMYS",
|
| 77 |
+
"why_it_matters": "Helpful if your engineering target expects serious math underneath the build work.",
|
| 78 |
+
"funding_note": "Verify current financial aid options for international students.",
|
| 79 |
+
"priority": "verify",
|
| 80 |
+
},
|
| 81 |
+
],
|
| 82 |
+
}
|
| 83 |
+
|
| 84 |
+
MAJOR_KEYWORDS = {
|
| 85 |
+
"cs": ["computer science", "cs", "artificial intelligence", "ai", "machine learning", "software"],
|
| 86 |
+
"engineering": ["engineering", "electrical", "mechanical", "robotics"],
|
| 87 |
+
"business": ["business", "economics", "finance", "management"],
|
| 88 |
+
"life_sciences": ["biology", "biotech", "medicine", "neuroscience", "chemistry"],
|
| 89 |
+
}
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
def _normalize(value: str | None) -> str:
|
| 93 |
+
return (value or "").strip().lower()
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
def _dedupe(values: list[str]) -> list[str]:
|
| 97 |
+
seen: set[str] = set()
|
| 98 |
+
output: list[str] = []
|
| 99 |
+
for value in values:
|
| 100 |
+
if not value:
|
| 101 |
+
continue
|
| 102 |
+
if value in seen:
|
| 103 |
+
continue
|
| 104 |
+
seen.add(value)
|
| 105 |
+
output.append(value)
|
| 106 |
+
return output
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
def _includes_any(value: str, terms: list[str]) -> bool:
|
| 110 |
+
return any(term in value for term in terms)
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
def _detect_track(university: University, major: str) -> str:
|
| 114 |
+
combined = " ".join(
|
| 115 |
+
[
|
| 116 |
+
_normalize(major),
|
| 117 |
+
_normalize(" ".join(university.major_strengths or [])),
|
| 118 |
+
]
|
| 119 |
+
)
|
| 120 |
+
|
| 121 |
+
if _includes_any(combined, MAJOR_KEYWORDS["cs"]):
|
| 122 |
+
return "cs"
|
| 123 |
+
if _includes_any(combined, MAJOR_KEYWORDS["engineering"]):
|
| 124 |
+
return "engineering"
|
| 125 |
+
if _includes_any(combined, MAJOR_KEYWORDS["business"]):
|
| 126 |
+
return "business"
|
| 127 |
+
if _includes_any(combined, MAJOR_KEYWORDS["life_sciences"]):
|
| 128 |
+
return "life_sciences"
|
| 129 |
+
return "general"
|
| 130 |
+
|
| 131 |
+
|
| 132 |
+
def _programs_for(university: University, major: str) -> list[dict[str, str]]:
|
| 133 |
+
slug = _normalize(university.slug)
|
| 134 |
+
if slug in PROGRAM_LIBRARY:
|
| 135 |
+
return PROGRAM_LIBRARY[slug]
|
| 136 |
+
|
| 137 |
+
track = _detect_track(university, major)
|
| 138 |
+
if track == "cs":
|
| 139 |
+
return PROGRAM_LIBRARY["default_cs"]
|
| 140 |
+
if track == "engineering":
|
| 141 |
+
return PROGRAM_LIBRARY["default_engineering"]
|
| 142 |
+
|
| 143 |
+
return [
|
| 144 |
+
{
|
| 145 |
+
"name": "Professor-led summer research program",
|
| 146 |
+
"why_it_matters": "You still need a real academic signal tied to the target major, not only extracurricular activity.",
|
| 147 |
+
"funding_note": "Choose only options with a clear scholarship or sponsor route.",
|
| 148 |
+
"priority": "verify",
|
| 149 |
+
}
|
| 150 |
+
]
|
| 151 |
+
|
| 152 |
+
|
| 153 |
+
def _focus_areas(university: University, major: str, profile: StudentProfile | None) -> list[str]:
|
| 154 |
+
school_years = None
|
| 155 |
+
preferences = profile.application_preferences_json if profile else None
|
| 156 |
+
if isinstance(preferences, dict):
|
| 157 |
+
raw_school_years = preferences.get("school_years")
|
| 158 |
+
if raw_school_years is not None:
|
| 159 |
+
school_years = str(raw_school_years)
|
| 160 |
+
|
| 161 |
+
focus = [
|
| 162 |
+
f"Target major: {major}. Keep the advisor anchored to this major, not to the whole student profile.",
|
| 163 |
+
f"Application route: {university.application_system}." if university.application_system else "",
|
| 164 |
+
f"Curriculum context: {profile.curriculum}." if profile and profile.curriculum else "",
|
| 165 |
+
(
|
| 166 |
+
f"Academic baseline: the school expects {university.education_years_required}+ years of schooling."
|
| 167 |
+
if university.education_years_required
|
| 168 |
+
else ""
|
| 169 |
+
),
|
| 170 |
+
f"Your saved school-years setting is {school_years}." if school_years else "",
|
| 171 |
+
f"School-years note: {university.school_years_note}" if university.school_years_note else "",
|
| 172 |
+
(
|
| 173 |
+
f"The university already reads strongest for: {', '.join((university.major_strengths or [])[:4])}."
|
| 174 |
+
if university.major_strengths
|
| 175 |
+
else ""
|
| 176 |
+
),
|
| 177 |
+
(
|
| 178 |
+
"This school profile rewards research depth and proof of technical rigor more than generic leadership packaging."
|
| 179 |
+
if getattr(university.weight_preset, "value", university.weight_preset) == "research_heavy"
|
| 180 |
+
else ""
|
| 181 |
+
),
|
| 182 |
+
(
|
| 183 |
+
"This school profile rewards leadership and initiative, but it still needs major-specific substance."
|
| 184 |
+
if getattr(university.weight_preset, "value", university.weight_preset) == "leadership_heavy"
|
| 185 |
+
else ""
|
| 186 |
+
),
|
| 187 |
+
(
|
| 188 |
+
"This school profile is balanced, so academic rigor, fit, and a clear narrative all matter."
|
| 189 |
+
if getattr(university.weight_preset, "value", university.weight_preset) == "balanced_holistic"
|
| 190 |
+
else ""
|
| 191 |
+
),
|
| 192 |
+
(
|
| 193 |
+
"This school profile values community impact, but the story still has to stay connected to the intended major."
|
| 194 |
+
if getattr(university.weight_preset, "value", university.weight_preset) == "community_service_heavy"
|
| 195 |
+
else ""
|
| 196 |
+
),
|
| 197 |
+
]
|
| 198 |
+
|
| 199 |
+
return _dedupe(focus)[:5]
|
| 200 |
+
|
| 201 |
+
|
| 202 |
+
def _funding_plan(university: University, user_country: str | None) -> list[str]:
|
| 203 |
+
country_label = user_country or "your country"
|
| 204 |
+
funding = [
|
| 205 |
+
(
|
| 206 |
+
f"This university stays on the shortlist because a full-funding route appears possible for an international applicant from {country_label}."
|
| 207 |
+
if university.full_ride_possible
|
| 208 |
+
else "Do not position this university as full-ride-safe yet. Keep it only if you can cover the gap or find a separate sponsor route."
|
| 209 |
+
),
|
| 210 |
+
f"Funding route in the dataset: {university.aid_type.replace('_', ' ')}." if university.aid_type else "",
|
| 211 |
+
f"Aid note: {university.aid_notes}" if university.aid_notes else "",
|
| 212 |
+
f"Eligibility check: {university.eligibility_notes}" if university.eligibility_notes else "",
|
| 213 |
+
(
|
| 214 |
+
"Before final submission, verify the current aid policy on the university funding page."
|
| 215 |
+
if university.funding_source_url
|
| 216 |
+
else "Before final submission, verify the current aid policy on the official admissions and financial aid pages."
|
| 217 |
+
),
|
| 218 |
+
]
|
| 219 |
+
return _dedupe(funding)
|
| 220 |
+
|
| 221 |
+
|
| 222 |
+
def _action_plan(university: University, major: str, programs: list[dict[str, str]]) -> list[dict[str, str]]:
|
| 223 |
+
top_programs = ", ".join(program["name"] for program in programs[:3])
|
| 224 |
+
return [
|
| 225 |
+
{
|
| 226 |
+
"title": "Lock the exact application lane",
|
| 227 |
+
"detail": f"Keep this advisor strictly on {major} at {university.name}. Do not dilute it into a generic student summary.",
|
| 228 |
+
},
|
| 229 |
+
{
|
| 230 |
+
"title": "Add one real research signal",
|
| 231 |
+
"detail": (
|
| 232 |
+
f"Prioritize named programs like {top_programs} instead of generic competitions or random certificates."
|
| 233 |
+
if top_programs
|
| 234 |
+
else "Prioritize a named research program with a real output, not another generic extracurricular."
|
| 235 |
+
),
|
| 236 |
+
},
|
| 237 |
+
{
|
| 238 |
+
"title": "Build one flagship major-aligned artifact",
|
| 239 |
+
"detail": "Ship one serious project, paper, or technical build that can carry the application narrative for this major.",
|
| 240 |
+
},
|
| 241 |
+
{
|
| 242 |
+
"title": "Protect the funding story",
|
| 243 |
+
"detail": (
|
| 244 |
+
"Keep this school only if the international full-funding route still checks out on the current official page."
|
| 245 |
+
if university.full_ride_possible
|
| 246 |
+
else "Treat funding as a blocker, not a footnote. If full funding is not realistic, move the school out of the core list."
|
| 247 |
+
),
|
| 248 |
+
},
|
| 249 |
+
]
|
| 250 |
+
|
| 251 |
+
|
| 252 |
+
def build_report_advisor_snapshot(
|
| 253 |
+
*,
|
| 254 |
+
university: University,
|
| 255 |
+
profile: StudentProfile | None,
|
| 256 |
+
user_country: str | None,
|
| 257 |
+
report_note: str,
|
| 258 |
+
) -> dict[str, Any]:
|
| 259 |
+
major = (profile.intended_major if profile and profile.intended_major else None) or (
|
| 260 |
+
university.major_strengths[0] if university.major_strengths else None
|
| 261 |
+
) or "your target major"
|
| 262 |
+
programs = _programs_for(university, major)
|
| 263 |
+
|
| 264 |
+
return {
|
| 265 |
+
"title": f"{university.name} advisor",
|
| 266 |
+
"subtitle": f"Focused on {major} and the funding reality for an international applicant.",
|
| 267 |
+
"target_major": major,
|
| 268 |
+
"report_note": report_note,
|
| 269 |
+
"focus_areas": _focus_areas(university, major, profile),
|
| 270 |
+
"research_programs": programs,
|
| 271 |
+
"funding_plan": _funding_plan(university, user_country),
|
| 272 |
+
"action_plan": _action_plan(university, major, programs),
|
| 273 |
+
}
|
apps/api/src/services/rewrite_service.py
ADDED
|
@@ -0,0 +1,379 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Rewrite service for generating style variants of achievement descriptions.
|
| 3 |
+
Uses Gemini when available, then falls back to conservative local formatting.
|
| 4 |
+
"""
|
| 5 |
+
import json
|
| 6 |
+
import re
|
| 7 |
+
from typing import Any, Dict, List, Optional, Tuple
|
| 8 |
+
|
| 9 |
+
import httpx
|
| 10 |
+
from sqlalchemy.orm import Session
|
| 11 |
+
|
| 12 |
+
from ..config import settings
|
| 13 |
+
from ..models.achievement import Achievement, AchievementType
|
| 14 |
+
from ..models.report import OptimizationReport, RewriteVariant
|
| 15 |
+
from .counselor_knowledge import CHANCELLOR_COUNSELOR_FRAMEWORK
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
COMMON_APP_ACTIVITY_DESC_LIMIT = 150
|
| 19 |
+
COMMON_APP_HONOR_DESC_LIMIT = 100
|
| 20 |
+
KAIST_DESC_LIMIT = 200
|
| 21 |
+
KOREA_DEFAULT_DESC_LIMIT = 300
|
| 22 |
+
|
| 23 |
+
STYLE_ORDER: list[tuple[str, bool, str]] = [
|
| 24 |
+
(
|
| 25 |
+
"factual",
|
| 26 |
+
False,
|
| 27 |
+
"Clean English wording that preserves the verified facts without hype.",
|
| 28 |
+
),
|
| 29 |
+
(
|
| 30 |
+
"impact_first",
|
| 31 |
+
True,
|
| 32 |
+
"Leads with the strongest outcome, scope, or selectivity before explaining the role.",
|
| 33 |
+
),
|
| 34 |
+
(
|
| 35 |
+
"understated",
|
| 36 |
+
False,
|
| 37 |
+
"Concise, restrained version that keeps the student's voice factual.",
|
| 38 |
+
),
|
| 39 |
+
]
|
| 40 |
+
|
| 41 |
+
REWRITE_SCHEMA = {
|
| 42 |
+
"type": "object",
|
| 43 |
+
"properties": {
|
| 44 |
+
"variants": {
|
| 45 |
+
"type": "array",
|
| 46 |
+
"items": {
|
| 47 |
+
"type": "object",
|
| 48 |
+
"properties": {
|
| 49 |
+
"style_mode": {
|
| 50 |
+
"type": "string",
|
| 51 |
+
"enum": ["factual", "impact_first", "understated"],
|
| 52 |
+
},
|
| 53 |
+
"text": {"type": "string"},
|
| 54 |
+
"explanation": {"type": "string"},
|
| 55 |
+
},
|
| 56 |
+
"required": ["style_mode", "text", "explanation"],
|
| 57 |
+
},
|
| 58 |
+
},
|
| 59 |
+
},
|
| 60 |
+
"required": ["variants"],
|
| 61 |
+
}
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
def _compact_whitespace(value: str) -> str:
|
| 65 |
+
return re.sub(r"\s+", " ", value or "").strip()
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
def _truncate_to_limit(text: str, limit: int) -> str:
|
| 69 |
+
"""Hard truncate text to a character limit, breaking at a word boundary when possible."""
|
| 70 |
+
text = _compact_whitespace(text)
|
| 71 |
+
if len(text) <= limit:
|
| 72 |
+
return text
|
| 73 |
+
truncated = text[:limit]
|
| 74 |
+
last_space = truncated.rfind(" ")
|
| 75 |
+
if last_space > limit - 24:
|
| 76 |
+
truncated = truncated[:last_space]
|
| 77 |
+
return truncated.rstrip(",.;: ")
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
def _has_cyrillic(value: str) -> bool:
|
| 81 |
+
return bool(re.search(r"[\u0400-\u04FF]", value or ""))
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
def _ascii_punctuation(value: str) -> str:
|
| 85 |
+
replacements = {
|
| 86 |
+
"\u2018": "'",
|
| 87 |
+
"\u2019": "'",
|
| 88 |
+
"\u201c": '"',
|
| 89 |
+
"\u201d": '"',
|
| 90 |
+
"\u2013": "-",
|
| 91 |
+
"\u2014": "-",
|
| 92 |
+
"\u2026": "...",
|
| 93 |
+
"\u00a0": " ",
|
| 94 |
+
}
|
| 95 |
+
for source, target in replacements.items():
|
| 96 |
+
value = value.replace(source, target)
|
| 97 |
+
return value
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
def _normalize_award_level(value: str) -> str:
|
| 101 |
+
value = re.sub(r"\bRepublican\b", "National", value, flags=re.IGNORECASE)
|
| 102 |
+
value = re.sub(r"\bRespublikalyk\b", "National", value, flags=re.IGNORECASE)
|
| 103 |
+
value = re.sub(r"\bRespublikanskiy\b", "National", value, flags=re.IGNORECASE)
|
| 104 |
+
return value
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
def _capitalize_first(value: str) -> str:
|
| 108 |
+
if not value:
|
| 109 |
+
return value
|
| 110 |
+
for index, char in enumerate(value):
|
| 111 |
+
if char.isalpha():
|
| 112 |
+
return value[:index] + char.upper() + value[index + 1 :]
|
| 113 |
+
return value
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
def _extract_year_phrases(value: str) -> list[str]:
|
| 117 |
+
years: list[str] = []
|
| 118 |
+
for match in re.finditer(r"\b20\d{2}(?:\s*[-\u2013\u2014]\s*(?:20)?\d{2})?\b", value or ""):
|
| 119 |
+
year = _ascii_punctuation(match.group(0))
|
| 120 |
+
year = re.sub(r"\s*-\s*", "-", year)
|
| 121 |
+
if year not in years:
|
| 122 |
+
years.append(year)
|
| 123 |
+
return years[:2]
|
| 124 |
+
|
| 125 |
+
|
| 126 |
+
def _preserve_required_year(text: str, required_years: list[str], limit: int) -> str:
|
| 127 |
+
if not text or not required_years:
|
| 128 |
+
return text
|
| 129 |
+
if any(year in text for year in required_years):
|
| 130 |
+
return text
|
| 131 |
+
|
| 132 |
+
suffix = f", {required_years[0]}"
|
| 133 |
+
if text.endswith("."):
|
| 134 |
+
suffix = f" {required_years[0]}."
|
| 135 |
+
base = text[:-1]
|
| 136 |
+
else:
|
| 137 |
+
base = text
|
| 138 |
+
if len(base) + len(suffix) <= limit:
|
| 139 |
+
return f"{base}{suffix}"
|
| 140 |
+
|
| 141 |
+
base = _truncate_to_limit(base, max(1, limit - len(suffix)))
|
| 142 |
+
return f"{base}{suffix}"
|
| 143 |
+
|
| 144 |
+
|
| 145 |
+
def _clean_generated_text(text: str, *, limit: int, required_years: list[str]) -> str:
|
| 146 |
+
text = _ascii_punctuation(_compact_whitespace(text)).strip(" \"'")
|
| 147 |
+
text = _normalize_award_level(text)
|
| 148 |
+
text = _capitalize_first(text)
|
| 149 |
+
if _has_cyrillic(text):
|
| 150 |
+
return ""
|
| 151 |
+
text = _preserve_required_year(text, required_years, limit)
|
| 152 |
+
return _truncate_to_limit(text, limit)
|
| 153 |
+
|
| 154 |
+
|
| 155 |
+
def _achievement_type(achievement: Achievement) -> str:
|
| 156 |
+
value = getattr(achievement.type, "value", achievement.type)
|
| 157 |
+
return str(value or AchievementType.activity.value)
|
| 158 |
+
|
| 159 |
+
|
| 160 |
+
def _target_format(report: OptimizationReport, achievement: Achievement) -> dict[str, Any]:
|
| 161 |
+
university = getattr(report, "university", None)
|
| 162 |
+
name = str(getattr(university, "name", "") or "")
|
| 163 |
+
country = str(getattr(university, "country", "") or "")
|
| 164 |
+
application_system = str(getattr(university, "application_system", "") or "")
|
| 165 |
+
haystack = f"{name} {country} {application_system}".lower()
|
| 166 |
+
|
| 167 |
+
if "korea" in haystack or any(token in haystack for token in ["kaist", "unist", "postech", "yonsei"]):
|
| 168 |
+
if "kaist" in haystack:
|
| 169 |
+
return {
|
| 170 |
+
"label": "KAIST Apply",
|
| 171 |
+
"limit": KAIST_DESC_LIMIT,
|
| 172 |
+
"limit_unit": "English bytes/chars",
|
| 173 |
+
"is_common_app": False,
|
| 174 |
+
"instruction": "Use KAIST-style concise English. Prioritize year, placement/selectivity, role, and quantified output.",
|
| 175 |
+
}
|
| 176 |
+
return {
|
| 177 |
+
"label": "Korean university application",
|
| 178 |
+
"limit": KOREA_DEFAULT_DESC_LIMIT,
|
| 179 |
+
"limit_unit": "English bytes/chars",
|
| 180 |
+
"is_common_app": False,
|
| 181 |
+
"instruction": "Do not assume Common App. Use concise Study in Korea or university-portal-ready English.",
|
| 182 |
+
}
|
| 183 |
+
|
| 184 |
+
if _achievement_type(achievement) == AchievementType.honor.value:
|
| 185 |
+
return {
|
| 186 |
+
"label": "Common App honors",
|
| 187 |
+
"limit": COMMON_APP_HONOR_DESC_LIMIT,
|
| 188 |
+
"limit_unit": "characters",
|
| 189 |
+
"is_common_app": True,
|
| 190 |
+
"instruction": "Write one Common App honor title/description block.",
|
| 191 |
+
}
|
| 192 |
+
|
| 193 |
+
return {
|
| 194 |
+
"label": "Common App activities",
|
| 195 |
+
"limit": COMMON_APP_ACTIVITY_DESC_LIMIT,
|
| 196 |
+
"limit_unit": "characters",
|
| 197 |
+
"is_common_app": True,
|
| 198 |
+
"instruction": "Write a Common App activity description. Do not repeat the position field.",
|
| 199 |
+
}
|
| 200 |
+
|
| 201 |
+
|
| 202 |
+
def _extract_key_facts(achievement: Achievement) -> Dict[str, Any]:
|
| 203 |
+
"""Extract factual elements from the achievement for use in rewrites."""
|
| 204 |
+
facts: Dict[str, Any] = {
|
| 205 |
+
"type": _achievement_type(achievement),
|
| 206 |
+
"title": achievement.title,
|
| 207 |
+
"organization_name": achievement.organization_name,
|
| 208 |
+
"role_title": achievement.role_title,
|
| 209 |
+
"description_raw": achievement.description_raw,
|
| 210 |
+
"category": achievement.category,
|
| 211 |
+
"start_date": achievement.start_date.isoformat() if achievement.start_date else None,
|
| 212 |
+
"end_date": achievement.end_date.isoformat() if achievement.end_date else None,
|
| 213 |
+
"hours_per_week": achievement.hours_per_week,
|
| 214 |
+
"weeks_per_year": achievement.weeks_per_year,
|
| 215 |
+
"impact_scope": getattr(achievement.impact_scope, "value", achievement.impact_scope),
|
| 216 |
+
"leadership_level": getattr(achievement.leadership_level, "value", achievement.leadership_level),
|
| 217 |
+
}
|
| 218 |
+
return {key: value for key, value in facts.items() if value not in (None, "")}
|
| 219 |
+
|
| 220 |
+
|
| 221 |
+
def _source_text(facts: dict[str, Any]) -> str:
|
| 222 |
+
return " ".join(str(value) for value in facts.values() if value not in (None, ""))
|
| 223 |
+
|
| 224 |
+
|
| 225 |
+
def _fallback_text(achievement: Achievement, facts: dict[str, Any], limit: int, required_years: list[str]) -> str:
|
| 226 |
+
parts = [
|
| 227 |
+
str(facts.get("role_title") or ""),
|
| 228 |
+
f"at {facts['organization_name']}" if facts.get("organization_name") else "",
|
| 229 |
+
str(facts.get("description_raw") or facts.get("title") or ""),
|
| 230 |
+
]
|
| 231 |
+
text = _clean_generated_text(" ".join(part for part in parts if part), limit=limit, required_years=required_years)
|
| 232 |
+
if text:
|
| 233 |
+
return text
|
| 234 |
+
return _truncate_to_limit(
|
| 235 |
+
"English rewrite unavailable; verify exact facts before submission.",
|
| 236 |
+
limit,
|
| 237 |
+
)
|
| 238 |
+
|
| 239 |
+
|
| 240 |
+
def _rewrite_prompt(
|
| 241 |
+
*,
|
| 242 |
+
achievement: Achievement,
|
| 243 |
+
facts: dict[str, Any],
|
| 244 |
+
target_format: dict[str, Any],
|
| 245 |
+
required_years: list[str],
|
| 246 |
+
) -> str:
|
| 247 |
+
payload = {
|
| 248 |
+
"achievement_facts": facts,
|
| 249 |
+
"target_format": target_format,
|
| 250 |
+
"required_years_from_source": required_years,
|
| 251 |
+
}
|
| 252 |
+
return (
|
| 253 |
+
"You are ApplyMap Chancellor. Rewrite one achievement into copy-paste-ready application text.\n\n"
|
| 254 |
+
f"{CHANCELLOR_COUNSELOR_FRAMEWORK}\n\n"
|
| 255 |
+
"Rules:\n"
|
| 256 |
+
"- Return exactly three variants: factual, impact_first, and understated.\n"
|
| 257 |
+
"- Output only polished English text. Translate Russian, Kazakh, or mixed-language input into English.\n"
|
| 258 |
+
"- Fix capitalization, grammar, and informal phrasing.\n"
|
| 259 |
+
"- Preserve years, date ranges, grade level, number of people served, event names, placements, and supported metrics.\n"
|
| 260 |
+
"- Never replace a concrete source detail with a guessed or more impressive detail. If the source says gift cards, "
|
| 261 |
+
"lessons, mentoring, or another specific activity, translate that detail directly; do not invent tournaments, "
|
| 262 |
+
"research, publications, awards, or program names.\n"
|
| 263 |
+
"- Translate Kazakhstan award level words like Republican/Respublikalyk/Respublikanskiy as National unless an official "
|
| 264 |
+
"English title clearly uses Republican.\n"
|
| 265 |
+
"- Do not invent facts, participant counts, selection rates, titles, or outcomes.\n"
|
| 266 |
+
"- Use ASCII punctuation. Do not output Cyrillic text.\n"
|
| 267 |
+
f"- Target format: {target_format['label']}; limit: {target_format['limit']} {target_format['limit_unit']}.\n"
|
| 268 |
+
f"- {target_format['instruction']}\n"
|
| 269 |
+
"- For Korean universities, do not call the output Common App wording unless the application system is Common App.\n"
|
| 270 |
+
"- Keep the meaning even when compressing. If the source says the student mentored five 8th graders as an 11th grader "
|
| 271 |
+
"in 2024-2025 and organized events, keep those facts in concise English.\n"
|
| 272 |
+
"- Each text must be no longer than the target limit.\n"
|
| 273 |
+
"- Explanations should be one short sentence and should not mention internal materials.\n\n"
|
| 274 |
+
f"Input JSON:\n{json.dumps(payload, ensure_ascii=False, default=str)}"
|
| 275 |
+
)
|
| 276 |
+
|
| 277 |
+
|
| 278 |
+
def _extract_gemini_text(response_payload: dict[str, Any]) -> str:
|
| 279 |
+
candidates = response_payload.get("candidates") or []
|
| 280 |
+
content = candidates[0].get("content") if candidates else {}
|
| 281 |
+
parts = content.get("parts") or []
|
| 282 |
+
return str(parts[0].get("text", "")) if parts else ""
|
| 283 |
+
|
| 284 |
+
|
| 285 |
+
def _gemini_rewrite_variants(
|
| 286 |
+
achievement: Achievement,
|
| 287 |
+
facts: dict[str, Any],
|
| 288 |
+
target_format: dict[str, Any],
|
| 289 |
+
required_years: list[str],
|
| 290 |
+
) -> Optional[dict[str, tuple[str, str]]]:
|
| 291 |
+
api_key = settings.GEMINI_API_KEY.strip()
|
| 292 |
+
if not api_key:
|
| 293 |
+
return None
|
| 294 |
+
|
| 295 |
+
model = (settings.GEMINI_MODEL or "gemini-2.5-flash").strip()
|
| 296 |
+
url = f"https://generativelanguage.googleapis.com/v1beta/models/{model}:generateContent"
|
| 297 |
+
payload = {
|
| 298 |
+
"contents": [
|
| 299 |
+
{
|
| 300 |
+
"parts": [
|
| 301 |
+
{
|
| 302 |
+
"text": _rewrite_prompt(
|
| 303 |
+
achievement=achievement,
|
| 304 |
+
facts=facts,
|
| 305 |
+
target_format=target_format,
|
| 306 |
+
required_years=required_years,
|
| 307 |
+
)
|
| 308 |
+
}
|
| 309 |
+
]
|
| 310 |
+
}
|
| 311 |
+
],
|
| 312 |
+
"generationConfig": {
|
| 313 |
+
"temperature": 0.1,
|
| 314 |
+
"maxOutputTokens": 4096,
|
| 315 |
+
"responseMimeType": "application/json",
|
| 316 |
+
"responseJsonSchema": REWRITE_SCHEMA,
|
| 317 |
+
},
|
| 318 |
+
}
|
| 319 |
+
|
| 320 |
+
try:
|
| 321 |
+
with httpx.Client(timeout=45.0) as client:
|
| 322 |
+
response = client.post(
|
| 323 |
+
url,
|
| 324 |
+
headers={"x-goog-api-key": api_key, "Content-Type": "application/json"},
|
| 325 |
+
json=payload,
|
| 326 |
+
)
|
| 327 |
+
response.raise_for_status()
|
| 328 |
+
parsed = json.loads(_extract_gemini_text(response.json()))
|
| 329 |
+
except (httpx.HTTPError, json.JSONDecodeError, KeyError, TypeError, ValueError):
|
| 330 |
+
return None
|
| 331 |
+
|
| 332 |
+
variants: dict[str, tuple[str, str]] = {}
|
| 333 |
+
for raw_variant in parsed.get("variants") or []:
|
| 334 |
+
style_mode = str(raw_variant.get("style_mode") or "")
|
| 335 |
+
if style_mode not in {style for style, _, _ in STYLE_ORDER}:
|
| 336 |
+
continue
|
| 337 |
+
text = _clean_generated_text(
|
| 338 |
+
str(raw_variant.get("text") or ""),
|
| 339 |
+
limit=int(target_format["limit"]),
|
| 340 |
+
required_years=required_years,
|
| 341 |
+
)
|
| 342 |
+
if not text:
|
| 343 |
+
continue
|
| 344 |
+
explanation = _compact_whitespace(str(raw_variant.get("explanation") or "Generated from verified student facts."))
|
| 345 |
+
variants[style_mode] = (text, explanation)
|
| 346 |
+
|
| 347 |
+
return variants or None
|
| 348 |
+
|
| 349 |
+
|
| 350 |
+
def generate_rewrite_variants(
|
| 351 |
+
db: Session,
|
| 352 |
+
achievement: Achievement,
|
| 353 |
+
report: OptimizationReport,
|
| 354 |
+
) -> List[RewriteVariant]:
|
| 355 |
+
"""
|
| 356 |
+
Generate 3 style variants for an achievement.
|
| 357 |
+
Returns RewriteVariant objects (not yet committed to DB).
|
| 358 |
+
"""
|
| 359 |
+
facts = _extract_key_facts(achievement)
|
| 360 |
+
target_format = _target_format(report, achievement)
|
| 361 |
+
required_years = _extract_year_phrases(_source_text(facts))
|
| 362 |
+
generated = _gemini_rewrite_variants(achievement, facts, target_format, required_years) or {}
|
| 363 |
+
|
| 364 |
+
fallback = _fallback_text(achievement, facts, int(target_format["limit"]), required_years)
|
| 365 |
+
variants = []
|
| 366 |
+
for style_mode, is_recommended, default_explanation in STYLE_ORDER:
|
| 367 |
+
text, explanation = generated.get(style_mode, (fallback, default_explanation))
|
| 368 |
+
variant = RewriteVariant(
|
| 369 |
+
achievement_id=achievement.id,
|
| 370 |
+
report_id=report.id,
|
| 371 |
+
style_mode=style_mode,
|
| 372 |
+
text=text,
|
| 373 |
+
character_count=len(text),
|
| 374 |
+
is_recommended=is_recommended,
|
| 375 |
+
explanation=explanation,
|
| 376 |
+
)
|
| 377 |
+
variants.append(variant)
|
| 378 |
+
|
| 379 |
+
return variants
|
apps/api/src/services/university_advisor.py
ADDED
|
@@ -0,0 +1,280 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
from typing import Any
|
| 3 |
+
from urllib.parse import urlparse
|
| 4 |
+
|
| 5 |
+
import httpx
|
| 6 |
+
|
| 7 |
+
from ..config import settings
|
| 8 |
+
from .chancellor_analysis import ADMISSIONS_FRAMEWORK
|
| 9 |
+
from .counselor_knowledge import CHANCELLOR_COUNSELOR_FRAMEWORK
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
ADVISOR_SCHEMA = {
|
| 13 |
+
"type": "object",
|
| 14 |
+
"properties": {
|
| 15 |
+
"summary": {"type": "string"},
|
| 16 |
+
"exams_to_prioritize": {
|
| 17 |
+
"type": "array",
|
| 18 |
+
"items": {
|
| 19 |
+
"type": "object",
|
| 20 |
+
"properties": {
|
| 21 |
+
"exam": {"type": "string"},
|
| 22 |
+
"why": {"type": "string"},
|
| 23 |
+
"priority": {"type": "string", "enum": ["high", "medium", "low"]},
|
| 24 |
+
},
|
| 25 |
+
"required": ["exam", "why", "priority"],
|
| 26 |
+
},
|
| 27 |
+
},
|
| 28 |
+
"profile_actions": {
|
| 29 |
+
"type": "array",
|
| 30 |
+
"items": {"type": "string"},
|
| 31 |
+
},
|
| 32 |
+
"low_value_activities": {
|
| 33 |
+
"type": "array",
|
| 34 |
+
"items": {"type": "string"},
|
| 35 |
+
},
|
| 36 |
+
"research_or_summer_programs": {
|
| 37 |
+
"type": "array",
|
| 38 |
+
"items": {
|
| 39 |
+
"type": "object",
|
| 40 |
+
"properties": {
|
| 41 |
+
"name": {"type": "string"},
|
| 42 |
+
"why_it_helps": {"type": "string"},
|
| 43 |
+
"source_url": {"type": "string"},
|
| 44 |
+
},
|
| 45 |
+
"required": ["name", "why_it_helps"],
|
| 46 |
+
},
|
| 47 |
+
},
|
| 48 |
+
"source_notes": {
|
| 49 |
+
"type": "array",
|
| 50 |
+
"items": {"type": "string"},
|
| 51 |
+
},
|
| 52 |
+
},
|
| 53 |
+
"required": [
|
| 54 |
+
"summary",
|
| 55 |
+
"exams_to_prioritize",
|
| 56 |
+
"profile_actions",
|
| 57 |
+
"low_value_activities",
|
| 58 |
+
"research_or_summer_programs",
|
| 59 |
+
"source_notes",
|
| 60 |
+
],
|
| 61 |
+
}
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
class SearchNotConfiguredError(RuntimeError):
|
| 65 |
+
pass
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
def _google_search(query: str, *, num: int = 5) -> list[dict[str, str]]:
|
| 69 |
+
api_key = settings.GOOGLE_SEARCH_API_KEY.strip()
|
| 70 |
+
engine_id = settings.GOOGLE_SEARCH_ENGINE_ID.strip()
|
| 71 |
+
if not api_key or not engine_id:
|
| 72 |
+
raise SearchNotConfiguredError("Google Custom Search is not configured")
|
| 73 |
+
|
| 74 |
+
with httpx.Client(timeout=12.0) as client:
|
| 75 |
+
response = client.get(
|
| 76 |
+
"https://www.googleapis.com/customsearch/v1",
|
| 77 |
+
params={
|
| 78 |
+
"key": api_key,
|
| 79 |
+
"cx": engine_id,
|
| 80 |
+
"q": query,
|
| 81 |
+
"num": num,
|
| 82 |
+
"safe": "active",
|
| 83 |
+
"hl": "en",
|
| 84 |
+
},
|
| 85 |
+
)
|
| 86 |
+
response.raise_for_status()
|
| 87 |
+
|
| 88 |
+
items = response.json().get("items") or []
|
| 89 |
+
return [
|
| 90 |
+
{
|
| 91 |
+
"title": str(item.get("title") or ""),
|
| 92 |
+
"url": str(item.get("link") or ""),
|
| 93 |
+
"snippet": str(item.get("snippet") or ""),
|
| 94 |
+
}
|
| 95 |
+
for item in items
|
| 96 |
+
if item.get("link")
|
| 97 |
+
]
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
def _source_tier(url: str, university_name: str) -> str:
|
| 101 |
+
host = urlparse(url).netloc.lower()
|
| 102 |
+
name_tokens = [token for token in university_name.lower().replace("-", " ").split() if len(token) > 3]
|
| 103 |
+
has_name_token = any(token in host for token in name_tokens)
|
| 104 |
+
if has_name_token and (".edu" in host or ".ac." in host or host.endswith(".edu")):
|
| 105 |
+
return "official"
|
| 106 |
+
if has_name_token:
|
| 107 |
+
return "likely_official"
|
| 108 |
+
if ".edu" in host or ".ac." in host:
|
| 109 |
+
return "education_domain"
|
| 110 |
+
return "third_party"
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
def search_university_sources(university_name: str, intended_major: str | None = None) -> list[dict[str, str]]:
|
| 114 |
+
major = intended_major or "undergraduate"
|
| 115 |
+
queries = [
|
| 116 |
+
f"{university_name} official undergraduate admissions international students requirements",
|
| 117 |
+
f"{university_name} official scholarships financial aid international students",
|
| 118 |
+
f"{university_name} official English taught programs {major}",
|
| 119 |
+
f"{university_name} official research summer programs high school students {major}",
|
| 120 |
+
]
|
| 121 |
+
|
| 122 |
+
seen: set[str] = set()
|
| 123 |
+
results: list[dict[str, str]] = []
|
| 124 |
+
for query in queries:
|
| 125 |
+
for item in _google_search(query, num=5):
|
| 126 |
+
url = item["url"]
|
| 127 |
+
if url in seen:
|
| 128 |
+
continue
|
| 129 |
+
seen.add(url)
|
| 130 |
+
results.append(
|
| 131 |
+
{
|
| 132 |
+
**item,
|
| 133 |
+
"query": query,
|
| 134 |
+
"source_tier": _source_tier(url, university_name),
|
| 135 |
+
}
|
| 136 |
+
)
|
| 137 |
+
if len(results) >= 12:
|
| 138 |
+
return results
|
| 139 |
+
return results
|
| 140 |
+
|
| 141 |
+
|
| 142 |
+
def _profile_payload(user: Any) -> dict[str, Any]:
|
| 143 |
+
profile = getattr(user, "profile", None)
|
| 144 |
+
return {
|
| 145 |
+
"country": getattr(user, "country", None),
|
| 146 |
+
"graduation_year": getattr(profile, "graduation_year", None),
|
| 147 |
+
"curriculum": getattr(profile, "curriculum", None),
|
| 148 |
+
"intended_major": getattr(profile, "intended_major", None),
|
| 149 |
+
"sat_score": getattr(profile, "sat_score", None),
|
| 150 |
+
"sat_math": getattr(profile, "sat_math", None),
|
| 151 |
+
"sat_ebrw": getattr(profile, "sat_ebrw", None),
|
| 152 |
+
"act_score": getattr(profile, "act_score", None),
|
| 153 |
+
"ielts_score": getattr(profile, "ielts_score", None),
|
| 154 |
+
"toefl_score": getattr(profile, "toefl_score", None),
|
| 155 |
+
"duolingo_score": getattr(profile, "duolingo_score", None),
|
| 156 |
+
"a_level_subjects": getattr(profile, "a_level_subjects", None),
|
| 157 |
+
"ib_predicted_score": getattr(profile, "ib_predicted_score", None),
|
| 158 |
+
"unt_score": getattr(profile, "unt_score", None),
|
| 159 |
+
"nis_grade12_certificate_gpa": getattr(profile, "nis_grade12_certificate_gpa", None),
|
| 160 |
+
}
|
| 161 |
+
|
| 162 |
+
|
| 163 |
+
def _achievement_payload(achievements: list[Any]) -> list[dict[str, Any]]:
|
| 164 |
+
return [
|
| 165 |
+
{
|
| 166 |
+
"type": getattr(item.type, "value", item.type),
|
| 167 |
+
"title": item.title,
|
| 168 |
+
"organization_name": item.organization_name,
|
| 169 |
+
"role_title": item.role_title,
|
| 170 |
+
"description_raw": item.description_raw,
|
| 171 |
+
"category": item.category,
|
| 172 |
+
"impact_scope": getattr(item.impact_scope, "value", item.impact_scope),
|
| 173 |
+
"leadership_level": getattr(item.leadership_level, "value", item.leadership_level),
|
| 174 |
+
"major_relevance_score": item.major_relevance_score,
|
| 175 |
+
"selectivity_score": item.selectivity_score,
|
| 176 |
+
"continuity_score": item.continuity_score,
|
| 177 |
+
"distinctiveness_score": item.distinctiveness_score,
|
| 178 |
+
}
|
| 179 |
+
for item in achievements
|
| 180 |
+
]
|
| 181 |
+
|
| 182 |
+
|
| 183 |
+
def _prompt(
|
| 184 |
+
*,
|
| 185 |
+
university_name: str,
|
| 186 |
+
user: Any,
|
| 187 |
+
achievements: list[Any],
|
| 188 |
+
search_results: list[dict[str, str]],
|
| 189 |
+
) -> str:
|
| 190 |
+
payload = {
|
| 191 |
+
"target_university": university_name,
|
| 192 |
+
"student_profile": _profile_payload(user),
|
| 193 |
+
"student_achievements": _achievement_payload(achievements),
|
| 194 |
+
"google_search_results": search_results,
|
| 195 |
+
}
|
| 196 |
+
return (
|
| 197 |
+
"You are ApplyMap Chancellor. Give a concise, factual action plan for one target university. "
|
| 198 |
+
"Use only the student profile, achievements, and the supplied Google Custom Search results. "
|
| 199 |
+
"Do not invent admission requirements, scores, deadlines, program names, or scholarships. "
|
| 200 |
+
"If a fact is not supported by a source result, write that it cannot be confirmed from the current sources.\n\n"
|
| 201 |
+
"Kazakhstan context: interpret UNT/ENT, NIS Grade 12 Certificate, NIS school context, IB, A-levels, "
|
| 202 |
+
"and 11 vs 12 years of schooling as important fit factors. MESK in Russian/Kazakh user language maps "
|
| 203 |
+
"to NIS Grade 12 Certificate in English.\n\n"
|
| 204 |
+
f"{ADMISSIONS_FRAMEWORK}\n\n"
|
| 205 |
+
f"{CHANCELLOR_COUNSELOR_FRAMEWORK}\n\n"
|
| 206 |
+
"Be direct. Avoid motivational filler. Identify exams that could materially improve the application, "
|
| 207 |
+
"activities that are low-value for this target, and research or summer programs only when they appear "
|
| 208 |
+
"in the supplied source results. If google_search_results is empty, say current university facts cannot "
|
| 209 |
+
"be confirmed and give only general next steps that do not depend on current requirements. Return JSON only.\n\n"
|
| 210 |
+
f"Input JSON:\n{json.dumps(payload, ensure_ascii=False, default=str)}"
|
| 211 |
+
)
|
| 212 |
+
|
| 213 |
+
|
| 214 |
+
def _extract_text(response_payload: dict[str, Any]) -> str:
|
| 215 |
+
candidates = response_payload.get("candidates") or []
|
| 216 |
+
content = candidates[0].get("content") if candidates else {}
|
| 217 |
+
parts = content.get("parts") or []
|
| 218 |
+
return str(parts[0].get("text", "")) if parts else ""
|
| 219 |
+
|
| 220 |
+
|
| 221 |
+
def generate_university_action_plan(
|
| 222 |
+
*,
|
| 223 |
+
university_name: str,
|
| 224 |
+
user: Any,
|
| 225 |
+
achievements: list[Any],
|
| 226 |
+
search_results: list[dict[str, str]],
|
| 227 |
+
) -> dict[str, Any]:
|
| 228 |
+
api_key = settings.GEMINI_API_KEY.strip()
|
| 229 |
+
if not api_key:
|
| 230 |
+
return {
|
| 231 |
+
"summary": "Gemini is not configured, so ApplyMap cannot generate a source-backed action plan yet.",
|
| 232 |
+
"exams_to_prioritize": [],
|
| 233 |
+
"profile_actions": [],
|
| 234 |
+
"low_value_activities": [],
|
| 235 |
+
"research_or_summer_programs": [],
|
| 236 |
+
"source_notes": ["Set GEMINI_API_KEY to enable the Chancellor action plan."],
|
| 237 |
+
}
|
| 238 |
+
|
| 239 |
+
model = (settings.GEMINI_MODEL or "gemini-2.5-flash").strip()
|
| 240 |
+
url = f"https://generativelanguage.googleapis.com/v1beta/models/{model}:generateContent"
|
| 241 |
+
request_payload = {
|
| 242 |
+
"contents": [
|
| 243 |
+
{
|
| 244 |
+
"parts": [
|
| 245 |
+
{
|
| 246 |
+
"text": _prompt(
|
| 247 |
+
university_name=university_name,
|
| 248 |
+
user=user,
|
| 249 |
+
achievements=achievements,
|
| 250 |
+
search_results=search_results,
|
| 251 |
+
)
|
| 252 |
+
}
|
| 253 |
+
]
|
| 254 |
+
}
|
| 255 |
+
],
|
| 256 |
+
"generationConfig": {
|
| 257 |
+
"temperature": 0.1,
|
| 258 |
+
"responseMimeType": "application/json",
|
| 259 |
+
"responseJsonSchema": ADVISOR_SCHEMA,
|
| 260 |
+
},
|
| 261 |
+
}
|
| 262 |
+
|
| 263 |
+
try:
|
| 264 |
+
with httpx.Client(timeout=25.0) as client:
|
| 265 |
+
response = client.post(
|
| 266 |
+
url,
|
| 267 |
+
headers={"x-goog-api-key": api_key, "Content-Type": "application/json"},
|
| 268 |
+
json=request_payload,
|
| 269 |
+
)
|
| 270 |
+
response.raise_for_status()
|
| 271 |
+
return json.loads(_extract_text(response.json()))
|
| 272 |
+
except (httpx.HTTPError, json.JSONDecodeError, KeyError, TypeError, ValueError):
|
| 273 |
+
return {
|
| 274 |
+
"summary": "The Chancellor could not generate a reliable JSON plan from the current sources.",
|
| 275 |
+
"exams_to_prioritize": [],
|
| 276 |
+
"profile_actions": [],
|
| 277 |
+
"low_value_activities": [],
|
| 278 |
+
"research_or_summer_programs": [],
|
| 279 |
+
"source_notes": ["Retry with a more specific university name or after checking API configuration."],
|
| 280 |
+
}
|
apps/api/src/services/university_filters.py
ADDED
|
@@ -0,0 +1,97 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Any, Iterable
|
| 2 |
+
|
| 3 |
+
from ..models.university import University
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
def enrich_university(university: University) -> dict[str, Any]:
|
| 7 |
+
return {
|
| 8 |
+
"id": university.id,
|
| 9 |
+
"slug": university.slug,
|
| 10 |
+
"name": university.name,
|
| 11 |
+
"country": university.country,
|
| 12 |
+
"application_system": university.application_system,
|
| 13 |
+
"application_source_url": university.application_source_url,
|
| 14 |
+
"short_description": university.short_description,
|
| 15 |
+
"weight_preset": university.weight_preset,
|
| 16 |
+
"is_active": university.is_active,
|
| 17 |
+
"region": university.region,
|
| 18 |
+
"city": university.city,
|
| 19 |
+
"is_common_app": university.is_common_app,
|
| 20 |
+
"teaching_languages": university.teaching_languages or [],
|
| 21 |
+
"major_strengths": university.major_strengths or [],
|
| 22 |
+
"education_years_required": university.education_years_required,
|
| 23 |
+
"school_years_note": university.school_years_note,
|
| 24 |
+
"aid_type": university.aid_type,
|
| 25 |
+
"aid_strength": university.aid_strength,
|
| 26 |
+
"selectivity_score": university.selectivity_score,
|
| 27 |
+
"full_ride_possible": university.full_ride_possible,
|
| 28 |
+
"full_tuition_possible": university.full_tuition_possible,
|
| 29 |
+
"aid_notes": university.aid_notes,
|
| 30 |
+
"funding_source_url": university.funding_source_url,
|
| 31 |
+
"funding_source_title": university.funding_source_title,
|
| 32 |
+
"eligibility_notes": university.eligibility_notes,
|
| 33 |
+
"created_at": university.created_at,
|
| 34 |
+
"updated_at": university.updated_at,
|
| 35 |
+
}
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
def filter_universities(
|
| 39 |
+
universities: Iterable[dict[str, Any]],
|
| 40 |
+
*,
|
| 41 |
+
search: str | None = None,
|
| 42 |
+
country: str | None = None,
|
| 43 |
+
region: str | None = None,
|
| 44 |
+
application_system: str | None = None,
|
| 45 |
+
teaching_language: str | None = None,
|
| 46 |
+
major: str | None = None,
|
| 47 |
+
school_years: int | None = None,
|
| 48 |
+
full_ride_only: bool = False,
|
| 49 |
+
common_app_only: bool = False,
|
| 50 |
+
aid_type: str | None = None,
|
| 51 |
+
sort_by: str = "name",
|
| 52 |
+
sort_dir: str = "asc",
|
| 53 |
+
) -> list[dict[str, Any]]:
|
| 54 |
+
result = list(universities)
|
| 55 |
+
|
| 56 |
+
if search:
|
| 57 |
+
needle = search.lower()
|
| 58 |
+
result = [
|
| 59 |
+
item for item in result
|
| 60 |
+
if needle in item["name"].lower()
|
| 61 |
+
or needle in (item.get("country") or "").lower()
|
| 62 |
+
or needle in " ".join(item.get("major_strengths") or []).lower()
|
| 63 |
+
]
|
| 64 |
+
if country:
|
| 65 |
+
result = [item for item in result if (item.get("country") or "").lower() == country.lower()]
|
| 66 |
+
if region:
|
| 67 |
+
result = [item for item in result if (item.get("region") or "").lower() == region.lower()]
|
| 68 |
+
if application_system:
|
| 69 |
+
needle = application_system.lower()
|
| 70 |
+
result = [item for item in result if needle in (item.get("application_system") or "").lower()]
|
| 71 |
+
if teaching_language:
|
| 72 |
+
result = [
|
| 73 |
+
item for item in result
|
| 74 |
+
if teaching_language.lower() in [language.lower() for language in item.get("teaching_languages") or []]
|
| 75 |
+
]
|
| 76 |
+
if major:
|
| 77 |
+
major_needle = major.lower()
|
| 78 |
+
result = [
|
| 79 |
+
item for item in result
|
| 80 |
+
if major_needle in " ".join(item.get("major_strengths") or []).lower()
|
| 81 |
+
]
|
| 82 |
+
if school_years:
|
| 83 |
+
result = [
|
| 84 |
+
item for item in result
|
| 85 |
+
if not item.get("education_years_required")
|
| 86 |
+
or int(item["education_years_required"]) <= school_years
|
| 87 |
+
]
|
| 88 |
+
if full_ride_only:
|
| 89 |
+
result = [item for item in result if item.get("full_ride_possible")]
|
| 90 |
+
if common_app_only:
|
| 91 |
+
result = [item for item in result if item.get("is_common_app")]
|
| 92 |
+
if aid_type:
|
| 93 |
+
result = [item for item in result if item.get("aid_type") == aid_type]
|
| 94 |
+
|
| 95 |
+
sort_key = sort_by if sort_by in {"name", "country", "aid_type", "aid_strength", "selectivity_score", "education_years_required"} else "name"
|
| 96 |
+
reverse = sort_dir == "desc"
|
| 97 |
+
return sorted(result, key=lambda item: (item.get(sort_key) is None, item.get(sort_key)), reverse=reverse)
|
apps/api/src/services/university_recommender.py
ADDED
|
@@ -0,0 +1,248 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
from typing import Any
|
| 3 |
+
|
| 4 |
+
import httpx
|
| 5 |
+
|
| 6 |
+
from ..config import settings
|
| 7 |
+
from ..models.achievement import Achievement
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
RECOMMENDATION_SCHEMA = {
|
| 11 |
+
"type": "object",
|
| 12 |
+
"properties": {
|
| 13 |
+
"summary": {"type": "string"},
|
| 14 |
+
"recommendations": {
|
| 15 |
+
"type": "array",
|
| 16 |
+
"maxItems": 20,
|
| 17 |
+
"items": {
|
| 18 |
+
"type": "object",
|
| 19 |
+
"properties": {
|
| 20 |
+
"slug": {"type": "string"},
|
| 21 |
+
"category": {"type": "string", "enum": ["dream", "target", "safe"]},
|
| 22 |
+
"rationale": {"type": "string"},
|
| 23 |
+
"fit_notes": {"type": "string"},
|
| 24 |
+
},
|
| 25 |
+
"required": ["slug", "category", "rationale"],
|
| 26 |
+
},
|
| 27 |
+
},
|
| 28 |
+
},
|
| 29 |
+
"required": ["recommendations"],
|
| 30 |
+
}
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def _achievement_payload(achievement: Achievement) -> dict[str, Any]:
|
| 34 |
+
scores = [
|
| 35 |
+
achievement.major_relevance_score,
|
| 36 |
+
achievement.selectivity_score,
|
| 37 |
+
achievement.continuity_score,
|
| 38 |
+
achievement.distinctiveness_score,
|
| 39 |
+
]
|
| 40 |
+
numeric_scores = [score for score in scores if isinstance(score, (int, float))]
|
| 41 |
+
return {
|
| 42 |
+
"id": str(achievement.id),
|
| 43 |
+
"type": achievement.type.value,
|
| 44 |
+
"title": achievement.title,
|
| 45 |
+
"organization_name": achievement.organization_name,
|
| 46 |
+
"role_title": achievement.role_title,
|
| 47 |
+
"description_raw": achievement.description_raw,
|
| 48 |
+
"category": achievement.category,
|
| 49 |
+
"impact_scope": getattr(achievement.impact_scope, "value", achievement.impact_scope),
|
| 50 |
+
"leadership_level": getattr(achievement.leadership_level, "value", achievement.leadership_level),
|
| 51 |
+
"hours_per_week": achievement.hours_per_week,
|
| 52 |
+
"weeks_per_year": achievement.weeks_per_year,
|
| 53 |
+
"chancellor_score_average": round(sum(numeric_scores) / len(numeric_scores), 1) if numeric_scores else None,
|
| 54 |
+
}
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
def _prompt(
|
| 58 |
+
*,
|
| 59 |
+
selected_honors: list[Achievement],
|
| 60 |
+
selected_activities: list[Achievement],
|
| 61 |
+
preferences: dict[str, Any],
|
| 62 |
+
universities: list[dict[str, Any]],
|
| 63 |
+
) -> str:
|
| 64 |
+
payload = {
|
| 65 |
+
"student_preferences": preferences,
|
| 66 |
+
"selected_top_honors": [_achievement_payload(item) for item in selected_honors],
|
| 67 |
+
"selected_top_activities": [_achievement_payload(item) for item in selected_activities],
|
| 68 |
+
"allowed_common_app_universities": [
|
| 69 |
+
{
|
| 70 |
+
"slug": item["slug"],
|
| 71 |
+
"name": item["name"],
|
| 72 |
+
"country": item["country"],
|
| 73 |
+
"major_strengths": item.get("major_strengths"),
|
| 74 |
+
"aid_type": item.get("aid_type"),
|
| 75 |
+
"aid_strength": item.get("aid_strength"),
|
| 76 |
+
"selectivity_score": item.get("selectivity_score"),
|
| 77 |
+
"education_years_required": item.get("education_years_required"),
|
| 78 |
+
"school_years_note": item.get("school_years_note"),
|
| 79 |
+
"aid_notes": item.get("aid_notes"),
|
| 80 |
+
}
|
| 81 |
+
for item in universities
|
| 82 |
+
],
|
| 83 |
+
}
|
| 84 |
+
return (
|
| 85 |
+
"You are SourceLock Chancellor. Recommend up to 20 Common App universities using only the selected "
|
| 86 |
+
"top 5 honors, selected top 10 activities, and saved student preferences in the input JSON. "
|
| 87 |
+
"Do not use unselected achievements. Do not recommend universities outside allowed_common_app_universities.\n\n"
|
| 88 |
+
"Categorize results as dream, target, or safe. For a high-need international applicant, safe means "
|
| 89 |
+
"relative safety within this funded/Common App shortlist, not guaranteed admission or aid. Prefer about "
|
| 90 |
+
"4 dream, 10 target, and 6 safe when enough universities are available.\n\n"
|
| 91 |
+
"Consider intended major, preferred countries/regions, school years, teaching language, full-ride need, "
|
| 92 |
+
"aid route quality, and selectivity. Return JSON only.\n\n"
|
| 93 |
+
f"Input JSON:\n{json.dumps(payload, ensure_ascii=False, default=str)}"
|
| 94 |
+
)
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
def _extract_text(response_payload: dict[str, Any]) -> str:
|
| 98 |
+
candidates = response_payload.get("candidates") or []
|
| 99 |
+
content = candidates[0].get("content") if candidates else {}
|
| 100 |
+
parts = content.get("parts") or []
|
| 101 |
+
return str(parts[0].get("text", "")) if parts else ""
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
def _gemini_recommendations(
|
| 105 |
+
*,
|
| 106 |
+
selected_honors: list[Achievement],
|
| 107 |
+
selected_activities: list[Achievement],
|
| 108 |
+
preferences: dict[str, Any],
|
| 109 |
+
universities: list[dict[str, Any]],
|
| 110 |
+
) -> list[dict[str, Any]] | None:
|
| 111 |
+
api_key = settings.GEMINI_API_KEY.strip()
|
| 112 |
+
if not api_key:
|
| 113 |
+
return None
|
| 114 |
+
|
| 115 |
+
model = (settings.GEMINI_MODEL or "gemini-2.5-flash").strip()
|
| 116 |
+
url = f"https://generativelanguage.googleapis.com/v1beta/models/{model}:generateContent"
|
| 117 |
+
request_payload = {
|
| 118 |
+
"contents": [{"parts": [{"text": _prompt(
|
| 119 |
+
selected_honors=selected_honors,
|
| 120 |
+
selected_activities=selected_activities,
|
| 121 |
+
preferences=preferences,
|
| 122 |
+
universities=universities,
|
| 123 |
+
)}]}],
|
| 124 |
+
"generationConfig": {
|
| 125 |
+
"temperature": 0.15,
|
| 126 |
+
"responseMimeType": "application/json",
|
| 127 |
+
"responseJsonSchema": RECOMMENDATION_SCHEMA,
|
| 128 |
+
},
|
| 129 |
+
}
|
| 130 |
+
|
| 131 |
+
try:
|
| 132 |
+
with httpx.Client(timeout=20.0) as client:
|
| 133 |
+
response = client.post(
|
| 134 |
+
url,
|
| 135 |
+
headers={"x-goog-api-key": api_key, "Content-Type": "application/json"},
|
| 136 |
+
json=request_payload,
|
| 137 |
+
)
|
| 138 |
+
response.raise_for_status()
|
| 139 |
+
payload = json.loads(_extract_text(response.json()))
|
| 140 |
+
except (httpx.HTTPError, json.JSONDecodeError, KeyError, TypeError, ValueError):
|
| 141 |
+
return None
|
| 142 |
+
|
| 143 |
+
allowed_by_slug = {item["slug"]: item for item in universities}
|
| 144 |
+
results = []
|
| 145 |
+
for rec in payload.get("recommendations", []):
|
| 146 |
+
slug = rec.get("slug")
|
| 147 |
+
if slug not in allowed_by_slug:
|
| 148 |
+
continue
|
| 149 |
+
university = allowed_by_slug[slug]
|
| 150 |
+
category = rec.get("category") if rec.get("category") in {"dream", "target", "safe"} else "target"
|
| 151 |
+
results.append(_merge_recommendation(university, category, rec.get("rationale") or "", rec.get("fit_notes")))
|
| 152 |
+
if len(results) == 20:
|
| 153 |
+
break
|
| 154 |
+
return results or None
|
| 155 |
+
|
| 156 |
+
|
| 157 |
+
def _merge_recommendation(university: dict[str, Any], category: str, rationale: str, fit_notes: str | None) -> dict[str, Any]:
|
| 158 |
+
return {
|
| 159 |
+
"university_id": university["id"],
|
| 160 |
+
"slug": university["slug"],
|
| 161 |
+
"name": university["name"],
|
| 162 |
+
"country": university["country"],
|
| 163 |
+
"category": category,
|
| 164 |
+
"rationale": rationale,
|
| 165 |
+
"fit_notes": fit_notes,
|
| 166 |
+
"aid_notes": university.get("aid_notes"),
|
| 167 |
+
"funding_source_url": university.get("funding_source_url"),
|
| 168 |
+
}
|
| 169 |
+
|
| 170 |
+
|
| 171 |
+
def _score_university(university: dict[str, Any], preferences: dict[str, Any], achievement_text: str) -> float:
|
| 172 |
+
score = float(university.get("aid_strength") or 0)
|
| 173 |
+
major = str(preferences.get("intended_major") or preferences.get("major") or "").lower()
|
| 174 |
+
preferred_countries = [str(item).lower() for item in preferences.get("preferred_countries", []) if item]
|
| 175 |
+
preferred_regions = [str(item).lower() for item in preferences.get("preferred_regions", []) if item]
|
| 176 |
+
teaching_language = str(preferences.get("teaching_language") or "").lower()
|
| 177 |
+
school_years = preferences.get("school_years")
|
| 178 |
+
|
| 179 |
+
strengths = " ".join(university.get("major_strengths") or []).lower()
|
| 180 |
+
if major and any(term in strengths for term in major.replace("/", " ").split() if len(term) > 2):
|
| 181 |
+
score += 18
|
| 182 |
+
if preferred_countries and university["country"].lower() in preferred_countries:
|
| 183 |
+
score += 10
|
| 184 |
+
if preferred_regions and str(university.get("region") or "").lower() in preferred_regions:
|
| 185 |
+
score += 8
|
| 186 |
+
if teaching_language and teaching_language in [language.lower() for language in university.get("teaching_languages") or []]:
|
| 187 |
+
score += 5
|
| 188 |
+
if preferences.get("needs_full_ride") and university.get("full_ride_possible"):
|
| 189 |
+
score += 12
|
| 190 |
+
if school_years and university.get("education_years_required") and int(school_years) < int(university["education_years_required"]):
|
| 191 |
+
score -= 40
|
| 192 |
+
if "research" in achievement_text and university.get("weight_preset") == "research_heavy":
|
| 193 |
+
score += 6
|
| 194 |
+
return score
|
| 195 |
+
|
| 196 |
+
|
| 197 |
+
def _fallback_recommendations(
|
| 198 |
+
*,
|
| 199 |
+
selected_honors: list[Achievement],
|
| 200 |
+
selected_activities: list[Achievement],
|
| 201 |
+
preferences: dict[str, Any],
|
| 202 |
+
universities: list[dict[str, Any]],
|
| 203 |
+
) -> list[dict[str, Any]]:
|
| 204 |
+
achievement_text = " ".join(
|
| 205 |
+
f"{item.title} {item.description_raw or ''} {item.category or ''}"
|
| 206 |
+
for item in [*selected_honors, *selected_activities]
|
| 207 |
+
).lower()
|
| 208 |
+
ranked = sorted(
|
| 209 |
+
universities,
|
| 210 |
+
key=lambda item: (
|
| 211 |
+
_score_university(item, preferences, achievement_text),
|
| 212 |
+
-(item.get("selectivity_score") or 0),
|
| 213 |
+
),
|
| 214 |
+
reverse=True,
|
| 215 |
+
)[:20]
|
| 216 |
+
|
| 217 |
+
results = []
|
| 218 |
+
for index, university in enumerate(ranked):
|
| 219 |
+
if index < 4:
|
| 220 |
+
category = "dream"
|
| 221 |
+
elif index < 14:
|
| 222 |
+
category = "target"
|
| 223 |
+
else:
|
| 224 |
+
category = "safe"
|
| 225 |
+
rationale = "Heuristic fallback based on major fit, funding route, school-year compatibility, and selected achievements."
|
| 226 |
+
results.append(_merge_recommendation(university, category, rationale, university.get("eligibility_notes")))
|
| 227 |
+
return results
|
| 228 |
+
|
| 229 |
+
|
| 230 |
+
def recommend_common_app_universities(
|
| 231 |
+
*,
|
| 232 |
+
selected_honors: list[Achievement],
|
| 233 |
+
selected_activities: list[Achievement],
|
| 234 |
+
preferences: dict[str, Any],
|
| 235 |
+
universities: list[dict[str, Any]],
|
| 236 |
+
) -> list[dict[str, Any]]:
|
| 237 |
+
gemini_results = _gemini_recommendations(
|
| 238 |
+
selected_honors=selected_honors,
|
| 239 |
+
selected_activities=selected_activities,
|
| 240 |
+
preferences=preferences,
|
| 241 |
+
universities=universities,
|
| 242 |
+
)
|
| 243 |
+
return gemini_results or _fallback_recommendations(
|
| 244 |
+
selected_honors=selected_honors,
|
| 245 |
+
selected_activities=selected_activities,
|
| 246 |
+
preferences=preferences,
|
| 247 |
+
universities=universities,
|
| 248 |
+
)
|
apps/web/Dockerfile
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
FROM node:20-alpine AS base
|
| 2 |
+
RUN npm i -g pnpm
|
| 3 |
+
|
| 4 |
+
FROM base AS deps
|
| 5 |
+
WORKDIR /app
|
| 6 |
+
COPY package.json ./
|
| 7 |
+
RUN pnpm install
|
| 8 |
+
|
| 9 |
+
FROM base AS builder
|
| 10 |
+
WORKDIR /app
|
| 11 |
+
COPY --from=deps /app/node_modules ./node_modules
|
| 12 |
+
COPY . .
|
| 13 |
+
RUN pnpm build
|
| 14 |
+
|
| 15 |
+
FROM base AS runner
|
| 16 |
+
WORKDIR /app
|
| 17 |
+
ENV NODE_ENV production
|
| 18 |
+
COPY --from=builder /app/public ./public
|
| 19 |
+
COPY --from=builder /app/.next/standalone ./
|
| 20 |
+
COPY --from=builder /app/.next/static ./.next/static
|
| 21 |
+
|
| 22 |
+
EXPOSE 3000
|
| 23 |
+
CMD ["node", "server.js"]
|
apps/web/components.json
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"$schema": "https://ui.shadcn.com/schema.json",
|
| 3 |
+
"style": "default",
|
| 4 |
+
"rsc": true,
|
| 5 |
+
"tsx": true,
|
| 6 |
+
"tailwind": {
|
| 7 |
+
"config": "tailwind.config.ts",
|
| 8 |
+
"css": "src/app/globals.css",
|
| 9 |
+
"baseColor": "slate",
|
| 10 |
+
"cssVariables": true
|
| 11 |
+
},
|
| 12 |
+
"aliases": {
|
| 13 |
+
"components": "@/components",
|
| 14 |
+
"utils": "@/lib/utils"
|
| 15 |
+
}
|
| 16 |
+
}
|
apps/web/next-env.d.ts
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/// <reference types="next" />
|
| 2 |
+
/// <reference types="next/image-types/global" />
|
| 3 |
+
|
| 4 |
+
// NOTE: This file should not be edited
|
| 5 |
+
// see https://nextjs.org/docs/basic-features/typescript for more information.
|
apps/web/next.config.mjs
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
const nextConfig = {
|
| 2 |
+
async rewrites() {
|
| 3 |
+
return [
|
| 4 |
+
{
|
| 5 |
+
source: "/api/:path*",
|
| 6 |
+
destination: `${process.env.INTERNAL_API_URL || "http://127.0.0.1:8000"}/api/:path*`,
|
| 7 |
+
},
|
| 8 |
+
];
|
| 9 |
+
},
|
| 10 |
+
images: {
|
| 11 |
+
remotePatterns: [
|
| 12 |
+
{
|
| 13 |
+
protocol: "https",
|
| 14 |
+
hostname: "**",
|
| 15 |
+
},
|
| 16 |
+
],
|
| 17 |
+
},
|
| 18 |
+
experimental: {
|
| 19 |
+
serverComponentsExternalPackages: [],
|
| 20 |
+
},
|
| 21 |
+
};
|
| 22 |
+
|
| 23 |
+
export default nextConfig;
|
apps/web/package.json
ADDED
|
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"name": "@applymap/web",
|
| 3 |
+
"version": "0.0.1",
|
| 4 |
+
"private": true,
|
| 5 |
+
"scripts": {
|
| 6 |
+
"dev": "next dev",
|
| 7 |
+
"build": "next build",
|
| 8 |
+
"start": "next start",
|
| 9 |
+
"lint": "next lint",
|
| 10 |
+
"type-check": "tsc --noEmit"
|
| 11 |
+
},
|
| 12 |
+
"dependencies": {
|
| 13 |
+
"next": "14.2.3",
|
| 14 |
+
"react": "^18.3.1",
|
| 15 |
+
"react-dom": "^18.3.1",
|
| 16 |
+
"next-auth": "^4.24.7",
|
| 17 |
+
"@tanstack/react-query": "^5.40.0",
|
| 18 |
+
"@tanstack/react-query-devtools": "^5.40.0",
|
| 19 |
+
"react-hook-form": "^7.51.5",
|
| 20 |
+
"zod": "^3.23.8",
|
| 21 |
+
"@hookform/resolvers": "^3.6.0",
|
| 22 |
+
"axios": "^1.7.2",
|
| 23 |
+
"class-variance-authority": "^0.7.0",
|
| 24 |
+
"clsx": "^2.1.1",
|
| 25 |
+
"tailwind-merge": "^2.3.0",
|
| 26 |
+
"tailwindcss-animate": "^1.0.7",
|
| 27 |
+
"lucide-react": "^0.395.0",
|
| 28 |
+
"@radix-ui/react-accordion": "^1.2.0",
|
| 29 |
+
"@radix-ui/react-alert-dialog": "^1.1.1",
|
| 30 |
+
"@radix-ui/react-avatar": "^1.1.0",
|
| 31 |
+
"@radix-ui/react-checkbox": "^1.1.1",
|
| 32 |
+
"@radix-ui/react-dialog": "^1.1.1",
|
| 33 |
+
"@radix-ui/react-dropdown-menu": "^2.1.1",
|
| 34 |
+
"@radix-ui/react-label": "^2.1.0",
|
| 35 |
+
"@radix-ui/react-progress": "^1.1.0",
|
| 36 |
+
"@radix-ui/react-radio-group": "^1.2.0",
|
| 37 |
+
"@radix-ui/react-select": "^2.1.1",
|
| 38 |
+
"@radix-ui/react-separator": "^1.1.0",
|
| 39 |
+
"@radix-ui/react-slot": "^1.1.0",
|
| 40 |
+
"@radix-ui/react-tabs": "^1.1.0",
|
| 41 |
+
"@radix-ui/react-toast": "^1.2.1",
|
| 42 |
+
"@radix-ui/react-tooltip": "^1.1.1",
|
| 43 |
+
"date-fns": "^3.6.0",
|
| 44 |
+
"sonner": "^1.5.0"
|
| 45 |
+
},
|
| 46 |
+
"devDependencies": {
|
| 47 |
+
"typescript": "^5.4.5",
|
| 48 |
+
"@types/node": "^20.14.2",
|
| 49 |
+
"@types/react": "^18.3.3",
|
| 50 |
+
"@types/react-dom": "^18.3.0",
|
| 51 |
+
"tailwindcss": "^3.4.4",
|
| 52 |
+
"postcss": "^8.4.38",
|
| 53 |
+
"autoprefixer": "^10.4.19",
|
| 54 |
+
"eslint": "^8.57.0",
|
| 55 |
+
"eslint-config-next": "14.2.3"
|
| 56 |
+
}
|
| 57 |
+
}
|
apps/web/postcss.config.js
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
module.exports = {
|
| 2 |
+
plugins: {
|
| 3 |
+
tailwindcss: {},
|
| 4 |
+
autoprefixer: {},
|
| 5 |
+
},
|
| 6 |
+
};
|
apps/web/public/applymap-logo.png
ADDED
|
Git LFS Details
|