Commit ·
5dccc28
0
Parent(s):
Initial commit: DocsQA app
Browse files- .env.example +15 -0
- .gitignore +18 -0
- README.md +139 -0
- app/__init__.py +1 -0
- app/config.py +35 -0
- app/database.py +41 -0
- app/main.py +374 -0
- app/models.py +69 -0
- app/schemas.py +35 -0
- app/security.py +39 -0
- app/services/agent.py +104 -0
- app/services/document_service.py +212 -0
- app/services/pdf_utils.py +41 -0
- app/services/storage_service.py +79 -0
- app/services/vector_store.py +151 -0
- app/services/web_search.py +72 -0
- app/static/style.css +680 -0
- app/templates/index.html +577 -0
- pyproject.toml +35 -0
.env.example
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
GROQ_API_KEY=your-groq-api-key
|
| 2 |
+
SECRET_KEY=replace-me-with-a-long-random-string
|
| 3 |
+
DATABASE_URL=postgresql+psycopg://postgres.your-project-ref:your-password@aws-0-region.pooler.supabase.com:6543/postgres
|
| 4 |
+
UPLOAD_DIRECTORY=./uploads
|
| 5 |
+
STORAGE_BACKEND=local
|
| 6 |
+
SUPABASE_URL=
|
| 7 |
+
SUPABASE_SERVICE_ROLE_KEY=
|
| 8 |
+
SUPABASE_STORAGE_BUCKET=documents
|
| 9 |
+
SUPABASE_STORAGE_PREFIX=docsqa
|
| 10 |
+
ACCESS_TOKEN_EXPIRE_MINUTES=720
|
| 11 |
+
MODEL_NAME=llama-3.1-8b-instant
|
| 12 |
+
EMBEDDING_MODEL=mixedbread-ai/mxbai-embed-large-v1
|
| 13 |
+
EMBEDDING_DIMENSIONS=1024
|
| 14 |
+
WEB_SEARCH_PROVIDER=duckduckgo
|
| 15 |
+
TAVILY_API_KEY=
|
.gitignore
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
.DS_Store
|
| 2 |
+
.env
|
| 3 |
+
.env.*
|
| 4 |
+
!.env.example
|
| 5 |
+
.venv/
|
| 6 |
+
__pycache__/
|
| 7 |
+
*.pyc
|
| 8 |
+
*.pyo
|
| 9 |
+
*.pyd
|
| 10 |
+
.pytest_cache/
|
| 11 |
+
.mypy_cache/
|
| 12 |
+
.ruff_cache/
|
| 13 |
+
.coverage
|
| 14 |
+
htmlcov/
|
| 15 |
+
docsqa_langgraph_assignment.egg-info/
|
| 16 |
+
uploads/
|
| 17 |
+
.idea/
|
| 18 |
+
.vscode/
|
README.md
ADDED
|
@@ -0,0 +1,139 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# DocsQA LangGraph Assignment
|
| 2 |
+
|
| 3 |
+
RAG-powered research assistant with:
|
| 4 |
+
|
| 5 |
+
- Auth (register/login/logout) using HTTP-only cookie sessions
|
| 6 |
+
- Multi-file PDF upload (up to 5 files/request, max 10 pages/file)
|
| 7 |
+
- Duplicate detection by SHA-256 hash with cross-user document reuse
|
| 8 |
+
- Vector indexing in Supabase Postgres + `pgvector`
|
| 9 |
+
- LangGraph agent with document retrieval + web search fallback
|
| 10 |
+
- Session conversation memory for follow-up questions
|
| 11 |
+
- Source citations in answers for both document and web evidence
|
| 12 |
+
- Chat-style UI with markdown rendering
|
| 13 |
+
|
| 14 |
+
## Architecture
|
| 15 |
+
|
| 16 |
+
- Backend: FastAPI + SQLAlchemy
|
| 17 |
+
- Agent: LangGraph ReAct agent
|
| 18 |
+
- LLM: Groq chat model
|
| 19 |
+
- Vector store: Supabase Postgres with `pgvector`
|
| 20 |
+
- Search fallback: Tavily (preferred) or DuckDuckGo when available
|
| 21 |
+
|
| 22 |
+
## Chunking Strategy
|
| 23 |
+
|
| 24 |
+
- Splitter: recursive character splitter (`chunk_size=1200`, `chunk_overlap=200`)
|
| 25 |
+
- Why:
|
| 26 |
+
- 1200 keeps enough local context for legal/business clauses
|
| 27 |
+
- 200 overlap reduces boundary loss between adjacent chunks
|
| 28 |
+
- good balance for retrieval accuracy vs. embedding cost
|
| 29 |
+
- Indexing is page-aware: each stored chunk carries `page_number` metadata.
|
| 30 |
+
|
| 31 |
+
## Retrieval Approach
|
| 32 |
+
|
| 33 |
+
- Retrieval method: cosine similarity search in `pgvector`
|
| 34 |
+
- Pipeline:
|
| 35 |
+
- determine relevant user-owned document hashes
|
| 36 |
+
- embed query
|
| 37 |
+
- retrieve top-k chunks across selected docs
|
| 38 |
+
- Returned evidence includes:
|
| 39 |
+
- document filename
|
| 40 |
+
- page number
|
| 41 |
+
- excerpt text
|
| 42 |
+
- Final assistant answer is instructed to cite these in a human-friendly source section.
|
| 43 |
+
|
| 44 |
+
## Agent Routing Logic
|
| 45 |
+
|
| 46 |
+
- Default behavior: prefer `vector_search` for questions answerable from uploaded docs.
|
| 47 |
+
- If document evidence is insufficient, agent can call `web_search` tool.
|
| 48 |
+
- Web search output is normalized to citation-friendly rows (title, URL, snippet).
|
| 49 |
+
- Prompt requires:
|
| 50 |
+
- vector citations: document + page + excerpt
|
| 51 |
+
- web citations: website title + URL
|
| 52 |
+
|
| 53 |
+
## Bonus Feature
|
| 54 |
+
|
| 55 |
+
**Implemented bonus:** User-scoped retrieval with automatic document dedup reuse.
|
| 56 |
+
|
| 57 |
+
- If two users upload the same file, processing/indexing is reused by file hash.
|
| 58 |
+
- Ownership is still enforced via `user_documents` mapping, so retrieval stays user-scoped.
|
| 59 |
+
- Why chosen: materially improves performance/cost while preserving access boundaries.
|
| 60 |
+
|
| 61 |
+
## Problems Faced and Fixes
|
| 62 |
+
|
| 63 |
+
- Dependency mismatch (`transformers`/`sentence-transformers`/`torch`) causing startup errors.
|
| 64 |
+
- Added robust local fallback embedding path to keep app functional.
|
| 65 |
+
- Optional web-search dependency (`ddgs`) missing.
|
| 66 |
+
- Added graceful web tool fallback and Tavily direct tool support.
|
| 67 |
+
- Passlib bcrypt backend issues.
|
| 68 |
+
- Switched new password hashing to `pbkdf2_sha256` while retaining bcrypt verify compatibility.
|
| 69 |
+
- Template/render and response UX issues.
|
| 70 |
+
- Reworked frontend into a stable chat-style UI with clean result handling.
|
| 71 |
+
|
| 72 |
+
## If I Had More Time
|
| 73 |
+
|
| 74 |
+
- Add proper migration tooling (Alembic) instead of startup `ALTER TABLE`.
|
| 75 |
+
- Add reranking for higher retrieval precision on long multi-document queries.
|
| 76 |
+
- Add persistent server-side conversation storage (Redis/Postgres) for multi-worker deployments.
|
| 77 |
+
- Add automated evaluation suite for citation faithfulness and retrieval quality.
|
| 78 |
+
|
| 79 |
+
## Environment Setup
|
| 80 |
+
|
| 81 |
+
```bash
|
| 82 |
+
cp .env.example .env
|
| 83 |
+
```
|
| 84 |
+
|
| 85 |
+
Required:
|
| 86 |
+
|
| 87 |
+
- `GROQ_API_KEY`
|
| 88 |
+
- `SECRET_KEY`
|
| 89 |
+
- `DATABASE_URL` (Supabase transaction pooler recommended)
|
| 90 |
+
|
| 91 |
+
Optional:
|
| 92 |
+
|
| 93 |
+
- `TAVILY_API_KEY` (for Tavily web search)
|
| 94 |
+
|
| 95 |
+
Storage (optional, recommended for deployment):
|
| 96 |
+
|
| 97 |
+
- `STORAGE_BACKEND=local` or `supabase`
|
| 98 |
+
- `SUPABASE_URL`
|
| 99 |
+
- `SUPABASE_SERVICE_ROLE_KEY`
|
| 100 |
+
- `SUPABASE_STORAGE_BUCKET` (default: `documents`)
|
| 101 |
+
- `SUPABASE_STORAGE_PREFIX` (default: `docsqa`)
|
| 102 |
+
|
| 103 |
+
Recommended `DATABASE_URL` format:
|
| 104 |
+
|
| 105 |
+
`postgresql+psycopg://<user>:<password>@<pooler-host>:6543/postgres?sslmode=require`
|
| 106 |
+
|
| 107 |
+
## Install and Run
|
| 108 |
+
|
| 109 |
+
```bash
|
| 110 |
+
python3 -m venv .venv
|
| 111 |
+
source .venv/bin/activate
|
| 112 |
+
pip install -e .
|
| 113 |
+
uvicorn app.main:app --reload
|
| 114 |
+
```
|
| 115 |
+
|
| 116 |
+
Open: `http://127.0.0.1:8000`
|
| 117 |
+
|
| 118 |
+
## File Storage Mode
|
| 119 |
+
|
| 120 |
+
- Local dev default: `STORAGE_BACKEND=local` (writes under `UPLOAD_DIRECTORY`).
|
| 121 |
+
- Deployment recommendation: `STORAGE_BACKEND=supabase` to store PDFs in Supabase Storage instead of local disk.
|
| 122 |
+
|
| 123 |
+
## API Endpoints
|
| 124 |
+
|
| 125 |
+
- `POST /register`
|
| 126 |
+
- `POST /login`
|
| 127 |
+
- `POST /logout`
|
| 128 |
+
- `POST /upload`
|
| 129 |
+
- `GET /documents`
|
| 130 |
+
- `POST /ask`
|
| 131 |
+
|
| 132 |
+
## test_documents
|
| 133 |
+
|
| 134 |
+
Sample PDFs used during development are in `test_documents/`.
|
| 135 |
+
|
| 136 |
+
## Deployment and Loom
|
| 137 |
+
|
| 138 |
+
- Live deployed URL: _add your deployed link here_
|
| 139 |
+
- Loom walkthrough (<5 min): _add your Loom link here_
|
app/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
# Package marker.
|
app/config.py
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from functools import lru_cache
|
| 2 |
+
from pathlib import Path
|
| 3 |
+
|
| 4 |
+
from pydantic_settings import BaseSettings, SettingsConfigDict
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class Settings(BaseSettings):
|
| 8 |
+
model_config = SettingsConfigDict(env_file=".env", env_file_encoding="utf-8", extra="ignore")
|
| 9 |
+
|
| 10 |
+
app_name: str = "DocsQA Assignment"
|
| 11 |
+
secret_key: str = "change-me"
|
| 12 |
+
algorithm: str = "HS256"
|
| 13 |
+
access_token_expire_minutes: int = 720
|
| 14 |
+
database_url: str = "postgresql+psycopg://postgres:postgres@localhost:5432/postgres"
|
| 15 |
+
upload_directory: str = "./uploads"
|
| 16 |
+
storage_backend: str = "local"
|
| 17 |
+
supabase_url: str | None = None
|
| 18 |
+
supabase_service_role_key: str | None = None
|
| 19 |
+
supabase_storage_bucket: str = "documents"
|
| 20 |
+
supabase_storage_prefix: str = "docsqa"
|
| 21 |
+
model_name: str = "llama-3.1-8b-instant"
|
| 22 |
+
embedding_model: str = "mixedbread-ai/mxbai-embed-large-v1"
|
| 23 |
+
embedding_dimensions: int = 1024
|
| 24 |
+
groq_api_key: str | None = None
|
| 25 |
+
web_search_provider: str = "duckduckgo"
|
| 26 |
+
tavily_api_key: str | None = None
|
| 27 |
+
|
| 28 |
+
@property
|
| 29 |
+
def upload_path(self) -> Path:
|
| 30 |
+
return Path(self.upload_directory)
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
@lru_cache
|
| 34 |
+
def get_settings() -> Settings:
|
| 35 |
+
return Settings()
|
app/database.py
ADDED
|
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from collections.abc import Generator
|
| 2 |
+
|
| 3 |
+
from sqlalchemy import create_engine, text
|
| 4 |
+
from sqlalchemy.engine import make_url
|
| 5 |
+
from sqlalchemy.exc import OperationalError
|
| 6 |
+
from sqlalchemy.orm import DeclarativeBase, Session, sessionmaker
|
| 7 |
+
|
| 8 |
+
from app.config import get_settings
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
settings = get_settings()
|
| 12 |
+
|
| 13 |
+
engine = create_engine(settings.database_url, pool_pre_ping=True)
|
| 14 |
+
SessionLocal = sessionmaker(bind=engine, autoflush=False, autocommit=False, expire_on_commit=False)
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
class Base(DeclarativeBase):
|
| 18 |
+
pass
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
def init_db() -> None:
|
| 22 |
+
try:
|
| 23 |
+
with engine.begin() as connection:
|
| 24 |
+
connection.execute(text("CREATE EXTENSION IF NOT EXISTS vector"))
|
| 25 |
+
connection.execute(text("ALTER TABLE IF EXISTS document_chunks ADD COLUMN IF NOT EXISTS page_number INTEGER"))
|
| 26 |
+
Base.metadata.create_all(bind=engine)
|
| 27 |
+
except OperationalError as exc:
|
| 28 |
+
host = make_url(settings.database_url).host or "<unknown>"
|
| 29 |
+
raise RuntimeError(
|
| 30 |
+
"Database connection failed for host "
|
| 31 |
+
f"'{host}'. If you are using Supabase, use the Transaction Pooler URL "
|
| 32 |
+
"(port 6543) from the Supabase dashboard with sslmode=require."
|
| 33 |
+
) from exc
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
def get_db() -> Generator[Session, None, None]:
|
| 37 |
+
db = SessionLocal()
|
| 38 |
+
try:
|
| 39 |
+
yield db
|
| 40 |
+
finally:
|
| 41 |
+
db.close()
|
app/main.py
ADDED
|
@@ -0,0 +1,374 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import re
|
| 2 |
+
from typing import Any
|
| 3 |
+
|
| 4 |
+
from fastapi import Cookie, Depends, FastAPI, File, Form, HTTPException, Request, UploadFile, status
|
| 5 |
+
from fastapi.responses import HTMLResponse, JSONResponse
|
| 6 |
+
from fastapi.responses import StreamingResponse
|
| 7 |
+
from langchain_core.messages import HumanMessage, ToolMessage
|
| 8 |
+
from fastapi.staticfiles import StaticFiles
|
| 9 |
+
from fastapi.templating import Jinja2Templates
|
| 10 |
+
from sqlalchemy.exc import OperationalError
|
| 11 |
+
from sqlalchemy import select
|
| 12 |
+
from sqlalchemy.orm import Session
|
| 13 |
+
|
| 14 |
+
from app.database import get_db, init_db
|
| 15 |
+
from app.models import Document, User, UserDocument
|
| 16 |
+
from app.schemas import AskRequest, AskResponse, UserCreate, UserLogin
|
| 17 |
+
from app.security import create_access_token, decode_access_token, hash_password, verify_password
|
| 18 |
+
from app.services.agent import build_agent
|
| 19 |
+
from app.services.document_service import DocumentService
|
| 20 |
+
from app.services.pdf_utils import count_pdf_pages_from_bytes
|
| 21 |
+
from app.services.storage_service import StorageService
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
init_db()
|
| 25 |
+
|
| 26 |
+
app = FastAPI(title="DocsQA Assignment")
|
| 27 |
+
app.mount("/static", StaticFiles(directory="app/static"), name="static")
|
| 28 |
+
templates = Jinja2Templates(directory="app/templates")
|
| 29 |
+
document_service = DocumentService()
|
| 30 |
+
storage_service = StorageService()
|
| 31 |
+
MAX_UPLOAD_FILES = 5
|
| 32 |
+
MAX_PDF_PAGES = 10
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def _message_content_to_text(content: Any) -> str:
|
| 36 |
+
if isinstance(content, str):
|
| 37 |
+
return content
|
| 38 |
+
if isinstance(content, list):
|
| 39 |
+
chunks: list[str] = []
|
| 40 |
+
for item in content:
|
| 41 |
+
if isinstance(item, dict):
|
| 42 |
+
text_value = item.get("text")
|
| 43 |
+
if isinstance(text_value, str):
|
| 44 |
+
chunks.append(text_value)
|
| 45 |
+
else:
|
| 46 |
+
chunks.append(str(item))
|
| 47 |
+
return "\n".join(chunks)
|
| 48 |
+
return str(content)
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
def _parse_vector_sources(tool_output: str) -> list[dict[str, str]]:
|
| 52 |
+
lines = tool_output.splitlines()
|
| 53 |
+
sources: list[dict[str, str]] = []
|
| 54 |
+
current_document_id = ""
|
| 55 |
+
current_doc = ""
|
| 56 |
+
current_page = ""
|
| 57 |
+
|
| 58 |
+
for line in lines:
|
| 59 |
+
match = re.match(r"^\s*\d+\.\s+document_id=(.*?)\s+\|\s+document=(.*?)\s+\|\s+page=(.*?)\s+\|\s+distance=", line)
|
| 60 |
+
if match:
|
| 61 |
+
current_document_id = match.group(1).strip()
|
| 62 |
+
current_doc = match.group(2).strip()
|
| 63 |
+
current_page = match.group(3).strip()
|
| 64 |
+
continue
|
| 65 |
+
excerpt_match = re.match(r"^\s*excerpt:\s*(.*)$", line)
|
| 66 |
+
if excerpt_match and current_doc:
|
| 67 |
+
excerpt = excerpt_match.group(1).strip()
|
| 68 |
+
sources.append(
|
| 69 |
+
{
|
| 70 |
+
"document_id": current_document_id,
|
| 71 |
+
"document": current_doc,
|
| 72 |
+
"page": current_page or "unknown",
|
| 73 |
+
"excerpt": excerpt,
|
| 74 |
+
}
|
| 75 |
+
)
|
| 76 |
+
current_document_id = ""
|
| 77 |
+
current_doc = ""
|
| 78 |
+
current_page = ""
|
| 79 |
+
return sources
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
def _parse_web_sources(tool_output: str) -> list[dict[str, str]]:
|
| 83 |
+
lines = tool_output.splitlines()
|
| 84 |
+
sources: list[dict[str, str]] = []
|
| 85 |
+
current_title = ""
|
| 86 |
+
current_url = ""
|
| 87 |
+
|
| 88 |
+
for line in lines:
|
| 89 |
+
title_match = re.match(r"^\s*\d+\.\s+title:\s*(.*)$", line)
|
| 90 |
+
if title_match:
|
| 91 |
+
current_title = title_match.group(1).strip()
|
| 92 |
+
current_url = ""
|
| 93 |
+
continue
|
| 94 |
+
url_match = re.match(r"^\s*url:\s*(.*)$", line)
|
| 95 |
+
if url_match and current_title:
|
| 96 |
+
current_url = url_match.group(1).strip()
|
| 97 |
+
sources.append({"title": current_title, "url": current_url})
|
| 98 |
+
current_title = ""
|
| 99 |
+
current_url = ""
|
| 100 |
+
return sources
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
def _extract_current_turn_tool_messages(messages: list[Any]) -> list[ToolMessage]:
|
| 104 |
+
turn_tools_reversed: list[ToolMessage] = []
|
| 105 |
+
for message in reversed(messages):
|
| 106 |
+
if isinstance(message, HumanMessage):
|
| 107 |
+
break
|
| 108 |
+
if isinstance(message, ToolMessage):
|
| 109 |
+
turn_tools_reversed.append(message)
|
| 110 |
+
return list(reversed(turn_tools_reversed))
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
def _extract_sources_from_messages(messages: list[Any]) -> dict[str, list[dict[str, str]]]:
|
| 114 |
+
vector_sources: list[dict[str, str]] = []
|
| 115 |
+
web_sources: list[dict[str, str]] = []
|
| 116 |
+
|
| 117 |
+
for message in _extract_current_turn_tool_messages(messages):
|
| 118 |
+
tool_name = (message.name or "").strip()
|
| 119 |
+
text = _message_content_to_text(message.content)
|
| 120 |
+
if tool_name == "vector_search":
|
| 121 |
+
vector_sources.extend(_parse_vector_sources(text))
|
| 122 |
+
elif tool_name == "web_search":
|
| 123 |
+
web_sources.extend(_parse_web_sources(text))
|
| 124 |
+
|
| 125 |
+
seen_vector: set[tuple[str, str, str]] = set()
|
| 126 |
+
deduped_vector: list[dict[str, str]] = []
|
| 127 |
+
for item in vector_sources:
|
| 128 |
+
key = (item.get("document", ""), item.get("page", ""), item.get("excerpt", ""))
|
| 129 |
+
if key in seen_vector:
|
| 130 |
+
continue
|
| 131 |
+
seen_vector.add(key)
|
| 132 |
+
deduped_vector.append(item)
|
| 133 |
+
|
| 134 |
+
seen_web: set[tuple[str, str]] = set()
|
| 135 |
+
deduped_web: list[dict[str, str]] = []
|
| 136 |
+
for item in web_sources:
|
| 137 |
+
key = (item.get("title", ""), item.get("url", ""))
|
| 138 |
+
if key in seen_web:
|
| 139 |
+
continue
|
| 140 |
+
seen_web.add(key)
|
| 141 |
+
deduped_web.append(item)
|
| 142 |
+
|
| 143 |
+
return {"vector": deduped_vector, "web": deduped_web}
|
| 144 |
+
|
| 145 |
+
|
| 146 |
+
def _strip_sources_from_answer(answer: str) -> str:
|
| 147 |
+
# Remove any trailing "Sources" section and source-status lines from model text.
|
| 148 |
+
cleaned = re.sub(r"(?is)\n+\s*(?:#+\s*)?sources\s*:?\s*\n.*$", "", answer).strip()
|
| 149 |
+
lines = cleaned.splitlines()
|
| 150 |
+
filtered = []
|
| 151 |
+
for line in lines:
|
| 152 |
+
if re.match(r"^\s*source(s)?\s*:", line, flags=re.IGNORECASE):
|
| 153 |
+
continue
|
| 154 |
+
if re.match(r"^\s*no sources were used for this response\.?\s*$", line, flags=re.IGNORECASE):
|
| 155 |
+
continue
|
| 156 |
+
if re.match(r"^\s*no citations available for this turn\.?\s*$", line, flags=re.IGNORECASE):
|
| 157 |
+
continue
|
| 158 |
+
filtered.append(line)
|
| 159 |
+
return "\n".join(filtered).strip()
|
| 160 |
+
|
| 161 |
+
|
| 162 |
+
def get_current_user(
|
| 163 |
+
access_token: str | None = Cookie(default=None),
|
| 164 |
+
db: Session = Depends(get_db),
|
| 165 |
+
) -> User:
|
| 166 |
+
if not access_token:
|
| 167 |
+
raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail="Not authenticated")
|
| 168 |
+
user_id = decode_access_token(access_token)
|
| 169 |
+
if not user_id:
|
| 170 |
+
raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail="Invalid token")
|
| 171 |
+
try:
|
| 172 |
+
user = db.get(User, int(user_id))
|
| 173 |
+
except OperationalError as exc:
|
| 174 |
+
raise HTTPException(
|
| 175 |
+
status_code=status.HTTP_503_SERVICE_UNAVAILABLE,
|
| 176 |
+
detail="Database is temporarily unavailable. Please try again in a moment.",
|
| 177 |
+
) from exc
|
| 178 |
+
if not user:
|
| 179 |
+
raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail="User not found")
|
| 180 |
+
return user
|
| 181 |
+
|
| 182 |
+
|
| 183 |
+
@app.get("/", response_class=HTMLResponse)
|
| 184 |
+
def home(request: Request, access_token: str | None = Cookie(default=None), db: Session = Depends(get_db)):
|
| 185 |
+
user = None
|
| 186 |
+
documents = []
|
| 187 |
+
db_unavailable = False
|
| 188 |
+
if access_token:
|
| 189 |
+
user_id = decode_access_token(access_token)
|
| 190 |
+
if user_id:
|
| 191 |
+
try:
|
| 192 |
+
user = db.get(User, int(user_id))
|
| 193 |
+
if user:
|
| 194 |
+
documents = document_service.list_user_documents(db, user)
|
| 195 |
+
except OperationalError:
|
| 196 |
+
# Keep homepage responsive during transient DNS/DB outages.
|
| 197 |
+
db_unavailable = True
|
| 198 |
+
user = None
|
| 199 |
+
documents = []
|
| 200 |
+
return templates.TemplateResponse(
|
| 201 |
+
request=request,
|
| 202 |
+
name="index.html",
|
| 203 |
+
context={"request": request, "user": user, "documents": documents, "db_unavailable": db_unavailable},
|
| 204 |
+
)
|
| 205 |
+
|
| 206 |
+
|
| 207 |
+
@app.post("/register")
|
| 208 |
+
def register(email: str = Form(...), password: str = Form(...), db: Session = Depends(get_db)):
|
| 209 |
+
payload = UserCreate(email=email, password=password)
|
| 210 |
+
existing = db.scalar(select(User).where(User.email == payload.email))
|
| 211 |
+
if existing:
|
| 212 |
+
raise HTTPException(status_code=400, detail="Email already registered")
|
| 213 |
+
user = User(email=payload.email, password_hash=hash_password(payload.password))
|
| 214 |
+
db.add(user)
|
| 215 |
+
db.commit()
|
| 216 |
+
token = create_access_token(str(user.id))
|
| 217 |
+
response = JSONResponse(
|
| 218 |
+
status_code=status.HTTP_201_CREATED,
|
| 219 |
+
content={"message": "Registered successfully", "email": user.email},
|
| 220 |
+
)
|
| 221 |
+
response.set_cookie("access_token", token, httponly=True, samesite="lax", path="/")
|
| 222 |
+
return response
|
| 223 |
+
|
| 224 |
+
|
| 225 |
+
@app.post("/login")
|
| 226 |
+
def login(email: str = Form(...), password: str = Form(...), db: Session = Depends(get_db)):
|
| 227 |
+
payload = UserLogin(email=email, password=password)
|
| 228 |
+
user = db.scalar(select(User).where(User.email == payload.email))
|
| 229 |
+
if not user or not verify_password(payload.password, user.password_hash):
|
| 230 |
+
raise HTTPException(status_code=400, detail="Invalid credentials")
|
| 231 |
+
token = create_access_token(str(user.id))
|
| 232 |
+
response = JSONResponse(content={"message": "Login successful", "email": user.email})
|
| 233 |
+
response.set_cookie("access_token", token, httponly=True, samesite="lax", path="/")
|
| 234 |
+
return response
|
| 235 |
+
|
| 236 |
+
|
| 237 |
+
@app.post("/logout")
|
| 238 |
+
def logout(access_token: str | None = Cookie(default=None)):
|
| 239 |
+
response = JSONResponse(content={"message": "Logged out successfully"})
|
| 240 |
+
response.delete_cookie("access_token", path="/")
|
| 241 |
+
return response
|
| 242 |
+
|
| 243 |
+
|
| 244 |
+
@app.post("/upload")
|
| 245 |
+
async def upload_document(
|
| 246 |
+
files: list[UploadFile] = File(..., alias="file"),
|
| 247 |
+
db: Session = Depends(get_db),
|
| 248 |
+
user: User = Depends(get_current_user),
|
| 249 |
+
):
|
| 250 |
+
if not files:
|
| 251 |
+
raise HTTPException(status_code=400, detail="At least one PDF file is required")
|
| 252 |
+
if len(files) > MAX_UPLOAD_FILES:
|
| 253 |
+
raise HTTPException(status_code=400, detail=f"Upload supports up to {MAX_UPLOAD_FILES} files at a time")
|
| 254 |
+
|
| 255 |
+
results = []
|
| 256 |
+
for file in files:
|
| 257 |
+
filename = file.filename or ""
|
| 258 |
+
if not filename.lower().endswith(".pdf"):
|
| 259 |
+
raise HTTPException(status_code=400, detail=f"Only PDF files are supported: {filename or '<unnamed file>'}")
|
| 260 |
+
|
| 261 |
+
content, file_hash = await document_service.save_upload(file)
|
| 262 |
+
page_count = count_pdf_pages_from_bytes(content)
|
| 263 |
+
if page_count > MAX_PDF_PAGES:
|
| 264 |
+
raise HTTPException(
|
| 265 |
+
status_code=400,
|
| 266 |
+
detail=f"{filename} has {page_count} pages. Maximum allowed is {MAX_PDF_PAGES} pages per file.",
|
| 267 |
+
)
|
| 268 |
+
|
| 269 |
+
document, created, processed = document_service.get_or_create_document(
|
| 270 |
+
db=db,
|
| 271 |
+
user=user,
|
| 272 |
+
upload=file,
|
| 273 |
+
content=content,
|
| 274 |
+
file_hash=file_hash,
|
| 275 |
+
)
|
| 276 |
+
results.append(
|
| 277 |
+
{
|
| 278 |
+
"filename": document.filename,
|
| 279 |
+
"created": created,
|
| 280 |
+
"processed": processed,
|
| 281 |
+
"page_count": document.page_count,
|
| 282 |
+
}
|
| 283 |
+
)
|
| 284 |
+
|
| 285 |
+
db.commit()
|
| 286 |
+
return {
|
| 287 |
+
"message": "Upload handled successfully",
|
| 288 |
+
"count": len(results),
|
| 289 |
+
"documents": results,
|
| 290 |
+
}
|
| 291 |
+
|
| 292 |
+
|
| 293 |
+
@app.get("/documents")
|
| 294 |
+
def list_documents(db: Session = Depends(get_db), user: User = Depends(get_current_user)):
|
| 295 |
+
documents = document_service.list_user_documents(db, user)
|
| 296 |
+
return [
|
| 297 |
+
{
|
| 298 |
+
"id": document.id,
|
| 299 |
+
"filename": document.filename,
|
| 300 |
+
"file_hash": document.file_hash,
|
| 301 |
+
"page_count": document.page_count,
|
| 302 |
+
"summary": document.summary,
|
| 303 |
+
}
|
| 304 |
+
for document in documents
|
| 305 |
+
]
|
| 306 |
+
|
| 307 |
+
|
| 308 |
+
@app.get("/documents/{document_id}/pdf")
|
| 309 |
+
def get_document_pdf(document_id: int, db: Session = Depends(get_db), user: User = Depends(get_current_user)):
|
| 310 |
+
link = db.scalar(
|
| 311 |
+
select(UserDocument.id).where(UserDocument.document_id == document_id, UserDocument.user_id == user.id)
|
| 312 |
+
)
|
| 313 |
+
if not link:
|
| 314 |
+
raise HTTPException(status_code=404, detail="Document not found for this user.")
|
| 315 |
+
|
| 316 |
+
document = db.get(Document, document_id)
|
| 317 |
+
if document is None:
|
| 318 |
+
raise HTTPException(status_code=404, detail="Document not found.")
|
| 319 |
+
|
| 320 |
+
try:
|
| 321 |
+
content = storage_service.read_file_bytes(file_path=document.file_path)
|
| 322 |
+
except Exception as exc:
|
| 323 |
+
raise HTTPException(status_code=500, detail="Unable to load document content.") from exc
|
| 324 |
+
|
| 325 |
+
headers = {"Content-Disposition": f'inline; filename="{document.filename}"'}
|
| 326 |
+
return StreamingResponse(iter([content]), media_type="application/pdf", headers=headers)
|
| 327 |
+
|
| 328 |
+
|
| 329 |
+
@app.delete("/documents/{document_id}")
|
| 330 |
+
def delete_document(document_id: int, db: Session = Depends(get_db), user: User = Depends(get_current_user)):
|
| 331 |
+
try:
|
| 332 |
+
result = document_service.delete_user_document(db, user=user, document_id=document_id)
|
| 333 |
+
except ValueError as exc:
|
| 334 |
+
raise HTTPException(status_code=404, detail=str(exc)) from exc
|
| 335 |
+
db.commit()
|
| 336 |
+
return {
|
| 337 |
+
"message": f"Removed {result['filename']} from your account.",
|
| 338 |
+
"deleted_shared_document": result["deleted_shared_document"],
|
| 339 |
+
}
|
| 340 |
+
|
| 341 |
+
|
| 342 |
+
@app.post("/ask", response_model=AskResponse)
|
| 343 |
+
def ask_question(
|
| 344 |
+
payload: AskRequest,
|
| 345 |
+
db: Session = Depends(get_db),
|
| 346 |
+
user: User = Depends(get_current_user),
|
| 347 |
+
access_token: str | None = Cookie(default=None),
|
| 348 |
+
):
|
| 349 |
+
document_service.ensure_page_metadata_for_user(db=db, user=user)
|
| 350 |
+
agent = build_agent(db=db, user=user)
|
| 351 |
+
session_key = access_token or f"user:{user.id}"
|
| 352 |
+
config = {"configurable": {"thread_id": session_key}}
|
| 353 |
+
previous_messages: list[Any] = []
|
| 354 |
+
try:
|
| 355 |
+
state = agent.get_state(config)
|
| 356 |
+
values = getattr(state, "values", {}) or {}
|
| 357 |
+
maybe_messages = values.get("messages", [])
|
| 358 |
+
if isinstance(maybe_messages, list):
|
| 359 |
+
previous_messages = maybe_messages
|
| 360 |
+
except Exception:
|
| 361 |
+
# If state read fails, continue safely and parse from result fallback.
|
| 362 |
+
previous_messages = []
|
| 363 |
+
|
| 364 |
+
result = agent.invoke({"messages": [("user", payload.query)]}, config=config)
|
| 365 |
+
final_message = result["messages"][-1].content
|
| 366 |
+
answer = final_message if isinstance(final_message, str) else str(final_message)
|
| 367 |
+
answer = _strip_sources_from_answer(answer)
|
| 368 |
+
all_messages = result.get("messages", [])
|
| 369 |
+
if isinstance(all_messages, list) and len(all_messages) >= len(previous_messages):
|
| 370 |
+
current_turn_messages = all_messages[len(previous_messages):]
|
| 371 |
+
else:
|
| 372 |
+
current_turn_messages = all_messages if isinstance(all_messages, list) else []
|
| 373 |
+
sources = _extract_sources_from_messages(current_turn_messages)
|
| 374 |
+
return AskResponse(answer=answer, sources=sources)
|
app/models.py
ADDED
|
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from datetime import datetime
|
| 2 |
+
|
| 3 |
+
from sqlalchemy import DateTime, ForeignKey, Integer, String, Text, UniqueConstraint, func
|
| 4 |
+
from sqlalchemy.orm import Mapped, mapped_column, relationship
|
| 5 |
+
from pgvector.sqlalchemy import Vector
|
| 6 |
+
|
| 7 |
+
from app.config import get_settings
|
| 8 |
+
|
| 9 |
+
from app.database import Base
|
| 10 |
+
|
| 11 |
+
settings = get_settings()
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class User(Base):
|
| 15 |
+
__tablename__ = "users"
|
| 16 |
+
|
| 17 |
+
id: Mapped[int] = mapped_column(Integer, primary_key=True, index=True)
|
| 18 |
+
email: Mapped[str] = mapped_column(String(255), unique=True, nullable=False, index=True)
|
| 19 |
+
password_hash: Mapped[str] = mapped_column(String(255), nullable=False)
|
| 20 |
+
created_at: Mapped[datetime] = mapped_column(DateTime(timezone=True), server_default=func.now(), nullable=False)
|
| 21 |
+
|
| 22 |
+
documents: Mapped[list["UserDocument"]] = relationship(back_populates="user", cascade="all, delete-orphan")
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
class Document(Base):
|
| 26 |
+
__tablename__ = "documents"
|
| 27 |
+
|
| 28 |
+
id: Mapped[int] = mapped_column(Integer, primary_key=True, index=True)
|
| 29 |
+
filename: Mapped[str] = mapped_column(String(512), nullable=False)
|
| 30 |
+
file_hash: Mapped[str] = mapped_column(String(64), unique=True, nullable=False, index=True)
|
| 31 |
+
file_path: Mapped[str] = mapped_column(String(1024), nullable=False)
|
| 32 |
+
page_count: Mapped[int] = mapped_column(Integer, default=0, nullable=False)
|
| 33 |
+
summary: Mapped[str] = mapped_column(Text, default="", nullable=False)
|
| 34 |
+
extracted_preview: Mapped[str] = mapped_column(Text, default="", nullable=False)
|
| 35 |
+
processing_status: Mapped[str] = mapped_column(String(32), default="completed", nullable=False)
|
| 36 |
+
created_at: Mapped[datetime] = mapped_column(DateTime(timezone=True), server_default=func.now(), nullable=False)
|
| 37 |
+
|
| 38 |
+
users: Mapped[list["UserDocument"]] = relationship(back_populates="document", cascade="all, delete-orphan")
|
| 39 |
+
chunks: Mapped[list["DocumentChunk"]] = relationship(back_populates="document", cascade="all, delete-orphan")
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
class UserDocument(Base):
|
| 43 |
+
__tablename__ = "user_documents"
|
| 44 |
+
__table_args__ = (UniqueConstraint("user_id", "document_id", name="uq_user_document"),)
|
| 45 |
+
|
| 46 |
+
id: Mapped[int] = mapped_column(Integer, primary_key=True, index=True)
|
| 47 |
+
user_id: Mapped[int] = mapped_column(ForeignKey("users.id"), nullable=False, index=True)
|
| 48 |
+
document_id: Mapped[int] = mapped_column(ForeignKey("documents.id"), nullable=False, index=True)
|
| 49 |
+
created_at: Mapped[datetime] = mapped_column(DateTime(timezone=True), server_default=func.now(), nullable=False)
|
| 50 |
+
|
| 51 |
+
user: Mapped["User"] = relationship(back_populates="documents")
|
| 52 |
+
document: Mapped["Document"] = relationship(back_populates="users")
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
class DocumentChunk(Base):
|
| 56 |
+
__tablename__ = "document_chunks"
|
| 57 |
+
__table_args__ = (UniqueConstraint("document_id", "chunk_index", name="uq_document_chunk"),)
|
| 58 |
+
|
| 59 |
+
id: Mapped[int] = mapped_column(Integer, primary_key=True, index=True)
|
| 60 |
+
document_id: Mapped[int] = mapped_column(ForeignKey("documents.id"), nullable=False, index=True)
|
| 61 |
+
file_hash: Mapped[str] = mapped_column(String(64), nullable=False, index=True)
|
| 62 |
+
filename: Mapped[str] = mapped_column(String(512), nullable=False)
|
| 63 |
+
chunk_index: Mapped[int] = mapped_column(Integer, nullable=False)
|
| 64 |
+
page_number: Mapped[int | None] = mapped_column(Integer, nullable=True, index=True)
|
| 65 |
+
content: Mapped[str] = mapped_column(Text, nullable=False)
|
| 66 |
+
embedding: Mapped[list[float]] = mapped_column(Vector(settings.embedding_dimensions), nullable=False)
|
| 67 |
+
created_at: Mapped[datetime] = mapped_column(DateTime(timezone=True), server_default=func.now(), nullable=False)
|
| 68 |
+
|
| 69 |
+
document: Mapped["Document"] = relationship(back_populates="chunks")
|
app/schemas.py
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pydantic import BaseModel, EmailStr, Field
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
class UserCreate(BaseModel):
|
| 5 |
+
email: EmailStr
|
| 6 |
+
password: str
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class UserLogin(BaseModel):
|
| 10 |
+
email: EmailStr
|
| 11 |
+
password: str
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class TokenPayload(BaseModel):
|
| 15 |
+
sub: str
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
class AskRequest(BaseModel):
|
| 19 |
+
query: str
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
class AskResponse(BaseModel):
|
| 23 |
+
answer: str
|
| 24 |
+
sources: dict[str, list[dict[str, str]]] = Field(default_factory=lambda: {"vector": [], "web": []})
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
class DocumentResponse(BaseModel):
|
| 28 |
+
id: int
|
| 29 |
+
filename: str
|
| 30 |
+
file_hash: str
|
| 31 |
+
summary: str
|
| 32 |
+
page_count: int
|
| 33 |
+
|
| 34 |
+
class Config:
|
| 35 |
+
from_attributes = True
|
app/security.py
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from datetime import datetime, timedelta, timezone
|
| 2 |
+
|
| 3 |
+
from jose import JWTError, jwt
|
| 4 |
+
from passlib.context import CryptContext
|
| 5 |
+
|
| 6 |
+
from app.config import get_settings
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
# Use PBKDF2 for new hashes to avoid bcrypt backend/version issues and
|
| 10 |
+
# bcrypt's 72-byte password input limit. Keep bcrypt for legacy verification.
|
| 11 |
+
pwd_context = CryptContext(
|
| 12 |
+
schemes=["pbkdf2_sha256", "bcrypt"],
|
| 13 |
+
deprecated="auto",
|
| 14 |
+
)
|
| 15 |
+
settings = get_settings()
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def hash_password(password: str) -> str:
|
| 19 |
+
return pwd_context.hash(password)
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def verify_password(password: str, password_hash: str) -> bool:
|
| 23 |
+
return pwd_context.verify(password, password_hash)
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
def create_access_token(subject: str) -> str:
|
| 27 |
+
expires_delta = timedelta(minutes=settings.access_token_expire_minutes)
|
| 28 |
+
expire = datetime.now(timezone.utc) + expires_delta
|
| 29 |
+
payload = {"sub": subject, "exp": expire}
|
| 30 |
+
return jwt.encode(payload, settings.secret_key, algorithm=settings.algorithm)
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def decode_access_token(token: str) -> str | None:
|
| 34 |
+
try:
|
| 35 |
+
payload = jwt.decode(token, settings.secret_key, algorithms=[settings.algorithm])
|
| 36 |
+
subject = payload.get("sub")
|
| 37 |
+
return str(subject) if subject is not None else None
|
| 38 |
+
except JWTError:
|
| 39 |
+
return None
|
app/services/agent.py
ADDED
|
@@ -0,0 +1,104 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Literal
|
| 2 |
+
|
| 3 |
+
from langchain_core.messages import AIMessage, SystemMessage
|
| 4 |
+
from langchain_core.tools import StructuredTool
|
| 5 |
+
from langchain_groq import ChatGroq
|
| 6 |
+
from langgraph.checkpoint.memory import MemorySaver
|
| 7 |
+
from langgraph.graph import END, START, MessagesState, StateGraph
|
| 8 |
+
from langgraph.prebuilt import ToolNode
|
| 9 |
+
from pydantic import BaseModel, Field
|
| 10 |
+
from sqlalchemy.orm import Session
|
| 11 |
+
|
| 12 |
+
from app.config import get_settings
|
| 13 |
+
from app.models import User
|
| 14 |
+
from app.services.document_service import DocumentService
|
| 15 |
+
from app.services.vector_store import VectorStoreService
|
| 16 |
+
from app.services.web_search import build_web_search_tool
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
class VectorSearchInput(BaseModel):
|
| 20 |
+
query: str = Field(..., description="The user question to answer from uploaded documents.")
|
| 21 |
+
file_hashes: list[str] | None = Field(
|
| 22 |
+
default=None,
|
| 23 |
+
description="Optional document hashes to filter search. Leave empty to auto-resolve relevant documents for the current user.",
|
| 24 |
+
)
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
LANGGRAPH_CHECKPOINTER = MemorySaver()
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
def _route_tools(state: MessagesState) -> Literal["tools", "__end__"]:
|
| 31 |
+
last = state["messages"][-1]
|
| 32 |
+
if isinstance(last, AIMessage) and last.tool_calls:
|
| 33 |
+
return "tools"
|
| 34 |
+
return "__end__"
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
def build_agent(*, db: Session, user: User):
|
| 38 |
+
settings = get_settings()
|
| 39 |
+
if not settings.groq_api_key:
|
| 40 |
+
raise RuntimeError("GROQ_API_KEY is required for agent responses.")
|
| 41 |
+
llm = ChatGroq(api_key=settings.groq_api_key, model=settings.model_name, temperature=0)
|
| 42 |
+
document_service = DocumentService()
|
| 43 |
+
vector_store = VectorStoreService()
|
| 44 |
+
web_search_tool = build_web_search_tool()
|
| 45 |
+
|
| 46 |
+
def vector_search(query: str, file_hashes: list[str] | None = None) -> str:
|
| 47 |
+
resolved_hashes = file_hashes or document_service.resolve_relevant_document_hashes(db, user=user, query=query)
|
| 48 |
+
if not resolved_hashes:
|
| 49 |
+
return "No uploaded documents are available for this user."
|
| 50 |
+
matches = vector_store.similarity_search(db=db, query=query, file_hashes=resolved_hashes, k=4)
|
| 51 |
+
if not matches:
|
| 52 |
+
return f"No vector matches found for hashes: {resolved_hashes}"
|
| 53 |
+
lines = ["Vector evidence (cite document + page + excerpt in final answer):"]
|
| 54 |
+
for index, match in enumerate(matches, start=1):
|
| 55 |
+
page_number = match["metadata"].get("page_number")
|
| 56 |
+
page_label = str(page_number) if page_number is not None else "unknown"
|
| 57 |
+
document_id = match["metadata"].get("document_id")
|
| 58 |
+
lines.append(
|
| 59 |
+
f"{index}. document_id={document_id} | document={match['metadata']['filename']} | page={page_label} | distance={match['distance']:.4f}"
|
| 60 |
+
)
|
| 61 |
+
lines.append(f" excerpt: {match['content'][:900].replace(chr(10), ' ')}")
|
| 62 |
+
return "\n\n".join(lines)
|
| 63 |
+
|
| 64 |
+
vector_tool = StructuredTool.from_function(
|
| 65 |
+
func=vector_search,
|
| 66 |
+
name="vector_search",
|
| 67 |
+
description=(
|
| 68 |
+
"Searches the current user's uploaded documents. "
|
| 69 |
+
"If file hashes are omitted, the tool first finds the most relevant document hashes from stored metadata and summary, "
|
| 70 |
+
"then applies those hashes as a vector-search filter."
|
| 71 |
+
),
|
| 72 |
+
args_schema=VectorSearchInput,
|
| 73 |
+
)
|
| 74 |
+
|
| 75 |
+
tools = [vector_tool]
|
| 76 |
+
prompt = (
|
| 77 |
+
"You are a document QA agent. Prefer vector_search for questions about the user's uploaded documents. "
|
| 78 |
+
"Do NOT include any 'Sources' section, citation list, footnotes, chunk ids, or hashes in the final answer text. "
|
| 79 |
+
"Only provide the concise user-facing answer. "
|
| 80 |
+
"Citation metadata is handled separately by the application. "
|
| 81 |
+
"Do not claim evidence that is not present in tool outputs."
|
| 82 |
+
)
|
| 83 |
+
if web_search_tool is not None:
|
| 84 |
+
tools.append(web_search_tool)
|
| 85 |
+
prompt += " Use web search only when the answer depends on external or current information."
|
| 86 |
+
else:
|
| 87 |
+
prompt += " Web search is currently unavailable in this environment."
|
| 88 |
+
|
| 89 |
+
llm_with_tools = llm.bind_tools(tools)
|
| 90 |
+
tool_node = ToolNode(tools)
|
| 91 |
+
system_prompt = SystemMessage(content=prompt)
|
| 92 |
+
|
| 93 |
+
def agent_node(state: MessagesState):
|
| 94 |
+
response = llm_with_tools.invoke([system_prompt, *state["messages"]])
|
| 95 |
+
return {"messages": [response]}
|
| 96 |
+
|
| 97 |
+
graph = StateGraph(MessagesState)
|
| 98 |
+
graph.add_node("agent", agent_node)
|
| 99 |
+
graph.add_node("tools", tool_node)
|
| 100 |
+
graph.add_edge(START, "agent")
|
| 101 |
+
graph.add_conditional_edges("agent", _route_tools, {"tools": "tools", "__end__": END})
|
| 102 |
+
graph.add_edge("tools", "agent")
|
| 103 |
+
|
| 104 |
+
return graph.compile(checkpointer=LANGGRAPH_CHECKPOINTER)
|
app/services/document_service.py
ADDED
|
@@ -0,0 +1,212 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import hashlib
|
| 2 |
+
|
| 3 |
+
from fastapi import UploadFile
|
| 4 |
+
from langchain_groq import ChatGroq
|
| 5 |
+
from sqlalchemy import func, select
|
| 6 |
+
from sqlalchemy.orm import Session
|
| 7 |
+
|
| 8 |
+
from app.config import get_settings
|
| 9 |
+
from app.models import Document, DocumentChunk, User, UserDocument
|
| 10 |
+
from app.services.pdf_utils import extract_pdf_pages_from_bytes, extract_pdf_text_from_bytes
|
| 11 |
+
from app.services.storage_service import StorageService
|
| 12 |
+
from app.services.vector_store import VectorStoreService
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
class DocumentService:
|
| 16 |
+
def __init__(self) -> None:
|
| 17 |
+
self.settings = get_settings()
|
| 18 |
+
self.storage = StorageService()
|
| 19 |
+
self.vector_store = VectorStoreService()
|
| 20 |
+
self.summarizer = None
|
| 21 |
+
|
| 22 |
+
async def save_upload(self, upload: UploadFile) -> tuple[bytes, str]:
|
| 23 |
+
content = await upload.read()
|
| 24 |
+
file_hash = hashlib.sha256(content).hexdigest()
|
| 25 |
+
return content, file_hash
|
| 26 |
+
|
| 27 |
+
def get_or_create_document(self, *, db: Session, user: User, upload: UploadFile, content: bytes, file_hash: str) -> tuple[Document, bool, bool]:
|
| 28 |
+
existing_document = db.scalar(select(Document).where(Document.file_hash == file_hash))
|
| 29 |
+
created = False
|
| 30 |
+
processed = False
|
| 31 |
+
|
| 32 |
+
if existing_document is None:
|
| 33 |
+
file_path = self.storage.save_pdf(
|
| 34 |
+
file_hash=file_hash,
|
| 35 |
+
filename=upload.filename or "document.pdf",
|
| 36 |
+
content=content,
|
| 37 |
+
)
|
| 38 |
+
preview_text, page_count = extract_pdf_text_from_bytes(content, max_pages=10)
|
| 39 |
+
full_pages, _ = extract_pdf_pages_from_bytes(content)
|
| 40 |
+
summary = self._summarize_preview(preview_text, upload.filename or "document.pdf")
|
| 41 |
+
existing_document = Document(
|
| 42 |
+
filename=upload.filename or "document.pdf",
|
| 43 |
+
file_hash=file_hash,
|
| 44 |
+
file_path=file_path,
|
| 45 |
+
page_count=page_count,
|
| 46 |
+
summary=summary,
|
| 47 |
+
extracted_preview=preview_text[:8000],
|
| 48 |
+
processing_status="completed",
|
| 49 |
+
)
|
| 50 |
+
db.add(existing_document)
|
| 51 |
+
db.flush()
|
| 52 |
+
self.vector_store.add_document(
|
| 53 |
+
db=db,
|
| 54 |
+
document_id=existing_document.id,
|
| 55 |
+
file_hash=file_hash,
|
| 56 |
+
filename=existing_document.filename,
|
| 57 |
+
pages=full_pages,
|
| 58 |
+
)
|
| 59 |
+
created = True
|
| 60 |
+
processed = True
|
| 61 |
+
else:
|
| 62 |
+
needs_page_reindex = db.scalar(
|
| 63 |
+
select(DocumentChunk.id)
|
| 64 |
+
.where(DocumentChunk.document_id == existing_document.id, DocumentChunk.page_number.is_(None))
|
| 65 |
+
.limit(1)
|
| 66 |
+
)
|
| 67 |
+
if needs_page_reindex:
|
| 68 |
+
content_bytes = self.storage.read_file_bytes(file_path=existing_document.file_path)
|
| 69 |
+
full_pages, _ = extract_pdf_pages_from_bytes(content_bytes)
|
| 70 |
+
self.vector_store.add_document(
|
| 71 |
+
db=db,
|
| 72 |
+
document_id=existing_document.id,
|
| 73 |
+
file_hash=existing_document.file_hash,
|
| 74 |
+
filename=existing_document.filename,
|
| 75 |
+
pages=full_pages,
|
| 76 |
+
)
|
| 77 |
+
processed = True
|
| 78 |
+
|
| 79 |
+
link = db.scalar(
|
| 80 |
+
select(UserDocument).where(
|
| 81 |
+
UserDocument.user_id == user.id,
|
| 82 |
+
UserDocument.document_id == existing_document.id,
|
| 83 |
+
)
|
| 84 |
+
)
|
| 85 |
+
if link is None:
|
| 86 |
+
db.add(UserDocument(user_id=user.id, document_id=existing_document.id))
|
| 87 |
+
db.flush()
|
| 88 |
+
|
| 89 |
+
return existing_document, created, processed
|
| 90 |
+
|
| 91 |
+
def list_user_documents(self, db: Session, user: User) -> list[Document]:
|
| 92 |
+
stmt = (
|
| 93 |
+
select(Document)
|
| 94 |
+
.join(UserDocument, UserDocument.document_id == Document.id)
|
| 95 |
+
.where(UserDocument.user_id == user.id)
|
| 96 |
+
.order_by(Document.created_at.desc())
|
| 97 |
+
)
|
| 98 |
+
return list(db.scalars(stmt))
|
| 99 |
+
|
| 100 |
+
def delete_user_document(self, db: Session, *, user: User, document_id: int) -> dict[str, str | bool]:
|
| 101 |
+
link = db.scalar(
|
| 102 |
+
select(UserDocument).where(
|
| 103 |
+
UserDocument.user_id == user.id,
|
| 104 |
+
UserDocument.document_id == document_id,
|
| 105 |
+
)
|
| 106 |
+
)
|
| 107 |
+
if link is None:
|
| 108 |
+
raise ValueError("Document not found for this user.")
|
| 109 |
+
|
| 110 |
+
document = db.get(Document, document_id)
|
| 111 |
+
if document is None:
|
| 112 |
+
raise ValueError("Document does not exist.")
|
| 113 |
+
|
| 114 |
+
db.delete(link)
|
| 115 |
+
db.flush()
|
| 116 |
+
|
| 117 |
+
remaining_links = db.scalar(select(func.count()).select_from(UserDocument).where(UserDocument.document_id == document_id)) or 0
|
| 118 |
+
deleted_shared_document = False
|
| 119 |
+
|
| 120 |
+
if remaining_links == 0:
|
| 121 |
+
db.delete(document)
|
| 122 |
+
db.flush()
|
| 123 |
+
self.storage.delete_file(file_path=document.file_path)
|
| 124 |
+
deleted_shared_document = True
|
| 125 |
+
|
| 126 |
+
return {
|
| 127 |
+
"filename": document.filename,
|
| 128 |
+
"deleted_shared_document": deleted_shared_document,
|
| 129 |
+
}
|
| 130 |
+
|
| 131 |
+
def resolve_relevant_document_hashes(self, db: Session, *, user: User, query: str, limit: int = 5) -> list[str]:
|
| 132 |
+
stopwords = {
|
| 133 |
+
"the",
|
| 134 |
+
"and",
|
| 135 |
+
"for",
|
| 136 |
+
"with",
|
| 137 |
+
"from",
|
| 138 |
+
"that",
|
| 139 |
+
"this",
|
| 140 |
+
"what",
|
| 141 |
+
"who",
|
| 142 |
+
"how",
|
| 143 |
+
"are",
|
| 144 |
+
"was",
|
| 145 |
+
"were",
|
| 146 |
+
"is",
|
| 147 |
+
"of",
|
| 148 |
+
"about",
|
| 149 |
+
"tell",
|
| 150 |
+
"more",
|
| 151 |
+
"please",
|
| 152 |
+
"can",
|
| 153 |
+
"you",
|
| 154 |
+
"your",
|
| 155 |
+
}
|
| 156 |
+
terms = [term.strip() for term in query.lower().split() if len(term.strip()) > 2 and term.strip() not in stopwords]
|
| 157 |
+
docs = self.list_user_documents(db, user)
|
| 158 |
+
scored: list[tuple[int, str]] = []
|
| 159 |
+
for doc in docs:
|
| 160 |
+
haystack = f"{doc.filename} {doc.summary} {doc.extracted_preview}".lower()
|
| 161 |
+
filename_score = sum(3 for term in terms if term in (doc.filename or "").lower())
|
| 162 |
+
body_score = sum(1 for term in terms if term in haystack)
|
| 163 |
+
score = filename_score + body_score
|
| 164 |
+
if score > 0:
|
| 165 |
+
scored.append((score, doc.file_hash))
|
| 166 |
+
scored.sort(reverse=True)
|
| 167 |
+
hashes = [file_hash for _, file_hash in scored[:limit]]
|
| 168 |
+
if hashes:
|
| 169 |
+
return hashes
|
| 170 |
+
return [doc.file_hash for doc in docs[:limit]]
|
| 171 |
+
|
| 172 |
+
def ensure_page_metadata_for_user(self, *, db: Session, user: User) -> None:
|
| 173 |
+
docs = self.list_user_documents(db, user)
|
| 174 |
+
changed = False
|
| 175 |
+
for doc in docs:
|
| 176 |
+
needs_page_reindex = db.scalar(
|
| 177 |
+
select(DocumentChunk.id)
|
| 178 |
+
.where(DocumentChunk.document_id == doc.id, DocumentChunk.page_number.is_(None))
|
| 179 |
+
.limit(1)
|
| 180 |
+
)
|
| 181 |
+
if not needs_page_reindex:
|
| 182 |
+
continue
|
| 183 |
+
try:
|
| 184 |
+
content_bytes = self.storage.read_file_bytes(file_path=doc.file_path)
|
| 185 |
+
except Exception:
|
| 186 |
+
continue
|
| 187 |
+
full_pages, _ = extract_pdf_pages_from_bytes(content_bytes)
|
| 188 |
+
self.vector_store.add_document(
|
| 189 |
+
db=db,
|
| 190 |
+
document_id=doc.id,
|
| 191 |
+
file_hash=doc.file_hash,
|
| 192 |
+
filename=doc.filename,
|
| 193 |
+
pages=full_pages,
|
| 194 |
+
)
|
| 195 |
+
changed = True
|
| 196 |
+
if changed:
|
| 197 |
+
db.commit()
|
| 198 |
+
|
| 199 |
+
def _summarize_preview(self, preview_text: str, filename: str) -> str:
|
| 200 |
+
if not preview_text.strip():
|
| 201 |
+
return f"No text could be extracted from the first pages of {filename}."
|
| 202 |
+
if not self.settings.groq_api_key:
|
| 203 |
+
return preview_text[:1200]
|
| 204 |
+
if self.summarizer is None:
|
| 205 |
+
self.summarizer = ChatGroq(api_key=self.settings.groq_api_key, model=self.settings.model_name, temperature=0)
|
| 206 |
+
prompt = (
|
| 207 |
+
"Summarize the following document preview in 6-8 concise bullet-style sentences. "
|
| 208 |
+
"Focus on purpose, key topics, and likely use cases.\n\n"
|
| 209 |
+
f"Filename: {filename}\n\nPreview:\n{preview_text[:16000]}"
|
| 210 |
+
)
|
| 211 |
+
response = self.summarizer.invoke(prompt)
|
| 212 |
+
return response.content if isinstance(response.content, str) else str(response.content)
|
app/services/pdf_utils.py
ADDED
|
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pathlib import Path
|
| 2 |
+
from io import BytesIO
|
| 3 |
+
|
| 4 |
+
from pypdf import PdfReader
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
def extract_pdf_pages(file_path: Path, max_pages: int | None = None) -> tuple[list[tuple[int, str]], int]:
|
| 8 |
+
reader = PdfReader(str(file_path))
|
| 9 |
+
total_pages = len(reader.pages)
|
| 10 |
+
page_limit = min(total_pages, max_pages) if max_pages else total_pages
|
| 11 |
+
pages: list[tuple[int, str]] = []
|
| 12 |
+
for index, page in enumerate(reader.pages[:page_limit], start=1):
|
| 13 |
+
pages.append((index, (page.extract_text() or "").strip()))
|
| 14 |
+
return pages, total_pages
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
def extract_pdf_text(file_path: Path, max_pages: int | None = None) -> tuple[str, int]:
|
| 18 |
+
pages, total_pages = extract_pdf_pages(file_path, max_pages=max_pages)
|
| 19 |
+
text = "\n\n".join(page_text for _, page_text in pages).strip()
|
| 20 |
+
return text, total_pages
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def extract_pdf_pages_from_bytes(content: bytes, max_pages: int | None = None) -> tuple[list[tuple[int, str]], int]:
|
| 24 |
+
reader = PdfReader(BytesIO(content))
|
| 25 |
+
total_pages = len(reader.pages)
|
| 26 |
+
page_limit = min(total_pages, max_pages) if max_pages else total_pages
|
| 27 |
+
pages: list[tuple[int, str]] = []
|
| 28 |
+
for index, page in enumerate(reader.pages[:page_limit], start=1):
|
| 29 |
+
pages.append((index, (page.extract_text() or "").strip()))
|
| 30 |
+
return pages, total_pages
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def extract_pdf_text_from_bytes(content: bytes, max_pages: int | None = None) -> tuple[str, int]:
|
| 34 |
+
pages, total_pages = extract_pdf_pages_from_bytes(content, max_pages=max_pages)
|
| 35 |
+
text = "\n\n".join(page_text for _, page_text in pages).strip()
|
| 36 |
+
return text, total_pages
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
def count_pdf_pages_from_bytes(content: bytes) -> int:
|
| 40 |
+
reader = PdfReader(BytesIO(content))
|
| 41 |
+
return len(reader.pages)
|
app/services/storage_service.py
ADDED
|
@@ -0,0 +1,79 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pathlib import Path
|
| 2 |
+
|
| 3 |
+
from app.config import get_settings
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
class StorageService:
|
| 7 |
+
def __init__(self) -> None:
|
| 8 |
+
self.settings = get_settings()
|
| 9 |
+
self.backend = self.settings.storage_backend.lower().strip()
|
| 10 |
+
self._client = None
|
| 11 |
+
self.settings.upload_path.mkdir(parents=True, exist_ok=True)
|
| 12 |
+
|
| 13 |
+
def save_pdf(self, *, file_hash: str, filename: str, content: bytes) -> str:
|
| 14 |
+
if self._use_supabase():
|
| 15 |
+
key = self._build_supabase_key(file_hash=file_hash, filename=filename)
|
| 16 |
+
self._supabase().storage.from_(self.settings.supabase_storage_bucket).upload(
|
| 17 |
+
path=key,
|
| 18 |
+
file=content,
|
| 19 |
+
file_options={"content-type": "application/pdf", "upsert": "true"},
|
| 20 |
+
)
|
| 21 |
+
return f"supabase://{self.settings.supabase_storage_bucket}/{key}"
|
| 22 |
+
|
| 23 |
+
suffix = Path(filename).suffix or ".pdf"
|
| 24 |
+
target = self.settings.upload_path / f"{file_hash}{suffix}"
|
| 25 |
+
if not target.exists():
|
| 26 |
+
target.write_bytes(content)
|
| 27 |
+
return str(target)
|
| 28 |
+
|
| 29 |
+
def read_file_bytes(self, *, file_path: str) -> bytes:
|
| 30 |
+
if file_path.startswith("supabase://"):
|
| 31 |
+
bucket, key = self._parse_supabase_path(file_path)
|
| 32 |
+
result = self._supabase().storage.from_(bucket).download(key)
|
| 33 |
+
if isinstance(result, bytes):
|
| 34 |
+
return result
|
| 35 |
+
return bytes(result)
|
| 36 |
+
return Path(file_path).read_bytes()
|
| 37 |
+
|
| 38 |
+
def delete_file(self, *, file_path: str) -> None:
|
| 39 |
+
if file_path.startswith("supabase://"):
|
| 40 |
+
bucket, key = self._parse_supabase_path(file_path)
|
| 41 |
+
try:
|
| 42 |
+
self._supabase().storage.from_(bucket).remove([key])
|
| 43 |
+
except Exception:
|
| 44 |
+
pass
|
| 45 |
+
return
|
| 46 |
+
local_path = Path(file_path)
|
| 47 |
+
if local_path.exists():
|
| 48 |
+
try:
|
| 49 |
+
local_path.unlink()
|
| 50 |
+
except OSError:
|
| 51 |
+
pass
|
| 52 |
+
|
| 53 |
+
def _use_supabase(self) -> bool:
|
| 54 |
+
return (
|
| 55 |
+
self.backend == "supabase"
|
| 56 |
+
and bool(self.settings.supabase_url)
|
| 57 |
+
and bool(self.settings.supabase_service_role_key)
|
| 58 |
+
)
|
| 59 |
+
|
| 60 |
+
def _supabase(self):
|
| 61 |
+
if self._client is None:
|
| 62 |
+
from supabase import create_client
|
| 63 |
+
|
| 64 |
+
self._client = create_client(self.settings.supabase_url, self.settings.supabase_service_role_key)
|
| 65 |
+
return self._client
|
| 66 |
+
|
| 67 |
+
def _build_supabase_key(self, *, file_hash: str, filename: str) -> str:
|
| 68 |
+
suffix = Path(filename).suffix or ".pdf"
|
| 69 |
+
base_prefix = self.settings.supabase_storage_prefix.strip("/ ")
|
| 70 |
+
if base_prefix:
|
| 71 |
+
return f"{base_prefix}/{file_hash}{suffix}"
|
| 72 |
+
return f"{file_hash}{suffix}"
|
| 73 |
+
|
| 74 |
+
def _parse_supabase_path(self, file_path: str) -> tuple[str, str]:
|
| 75 |
+
without_scheme = file_path.removeprefix("supabase://")
|
| 76 |
+
parts = without_scheme.split("/", 1)
|
| 77 |
+
if len(parts) != 2:
|
| 78 |
+
raise ValueError(f"Invalid supabase path: {file_path}")
|
| 79 |
+
return parts[0], parts[1]
|
app/services/vector_store.py
ADDED
|
@@ -0,0 +1,151 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import hashlib
|
| 2 |
+
import math
|
| 3 |
+
import re
|
| 4 |
+
from typing import Any
|
| 5 |
+
|
| 6 |
+
from sqlalchemy import delete, select
|
| 7 |
+
from sqlalchemy.orm import Session
|
| 8 |
+
|
| 9 |
+
from app.config import get_settings
|
| 10 |
+
from app.models import DocumentChunk
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class SimpleTextSplitter:
|
| 14 |
+
def __init__(self, *, chunk_size: int, chunk_overlap: int) -> None:
|
| 15 |
+
self.chunk_size = chunk_size
|
| 16 |
+
self.chunk_overlap = chunk_overlap
|
| 17 |
+
|
| 18 |
+
def split_text(self, text: str) -> list[str]:
|
| 19 |
+
normalized = text.strip()
|
| 20 |
+
if not normalized:
|
| 21 |
+
return []
|
| 22 |
+
if len(normalized) <= self.chunk_size:
|
| 23 |
+
return [normalized]
|
| 24 |
+
|
| 25 |
+
chunks: list[str] = []
|
| 26 |
+
start = 0
|
| 27 |
+
step = max(1, self.chunk_size - self.chunk_overlap)
|
| 28 |
+
text_length = len(normalized)
|
| 29 |
+
while start < text_length:
|
| 30 |
+
end = min(text_length, start + self.chunk_size)
|
| 31 |
+
chunk = normalized[start:end].strip()
|
| 32 |
+
if chunk:
|
| 33 |
+
chunks.append(chunk)
|
| 34 |
+
if end >= text_length:
|
| 35 |
+
break
|
| 36 |
+
start += step
|
| 37 |
+
return chunks
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
class LocalHashEmbeddings:
|
| 41 |
+
def __init__(self, dimensions: int) -> None:
|
| 42 |
+
self.dimensions = dimensions
|
| 43 |
+
|
| 44 |
+
def embed_documents(self, texts: list[str]) -> list[list[float]]:
|
| 45 |
+
return [self._embed_text(text) for text in texts]
|
| 46 |
+
|
| 47 |
+
def embed_query(self, text: str) -> list[float]:
|
| 48 |
+
return self._embed_text(text)
|
| 49 |
+
|
| 50 |
+
def _embed_text(self, text: str) -> list[float]:
|
| 51 |
+
vector = [0.0] * self.dimensions
|
| 52 |
+
tokens = re.findall(r"\w+", text.lower())
|
| 53 |
+
if not tokens:
|
| 54 |
+
return vector
|
| 55 |
+
|
| 56 |
+
for token in tokens:
|
| 57 |
+
digest = hashlib.sha256(token.encode("utf-8")).digest()
|
| 58 |
+
bucket = int.from_bytes(digest[:4], "big") % self.dimensions
|
| 59 |
+
sign = 1.0 if digest[4] % 2 == 0 else -1.0
|
| 60 |
+
vector[bucket] += sign
|
| 61 |
+
|
| 62 |
+
norm = math.sqrt(sum(value * value for value in vector))
|
| 63 |
+
if norm == 0:
|
| 64 |
+
return vector
|
| 65 |
+
return [value / norm for value in vector]
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
class VectorStoreService:
|
| 69 |
+
def __init__(self) -> None:
|
| 70 |
+
self.splitter = SimpleTextSplitter(chunk_size=1200, chunk_overlap=200)
|
| 71 |
+
self.embeddings = None
|
| 72 |
+
|
| 73 |
+
def _get_embeddings(self) -> Any:
|
| 74 |
+
settings = get_settings()
|
| 75 |
+
if self.embeddings is None:
|
| 76 |
+
try:
|
| 77 |
+
from langchain_huggingface import HuggingFaceEmbeddings
|
| 78 |
+
|
| 79 |
+
self.embeddings = HuggingFaceEmbeddings(
|
| 80 |
+
model_name=settings.embedding_model,
|
| 81 |
+
model_kwargs={"device": "cpu"},
|
| 82 |
+
encode_kwargs={"normalize_embeddings": True},
|
| 83 |
+
)
|
| 84 |
+
except Exception:
|
| 85 |
+
# Keep the app usable when transformer/torch dependencies are unavailable.
|
| 86 |
+
self.embeddings = LocalHashEmbeddings(settings.embedding_dimensions)
|
| 87 |
+
return self.embeddings
|
| 88 |
+
|
| 89 |
+
def add_document(self, *, db: Session, document_id: int, file_hash: str, filename: str, pages: list[tuple[int, str]]) -> None:
|
| 90 |
+
chunk_rows: list[tuple[int | None, str]] = []
|
| 91 |
+
for page_number, page_text in pages:
|
| 92 |
+
if not page_text.strip():
|
| 93 |
+
continue
|
| 94 |
+
page_chunks = self.splitter.split_text(page_text)
|
| 95 |
+
chunk_rows.extend((page_number, chunk) for chunk in page_chunks if chunk.strip())
|
| 96 |
+
chunks = [chunk for _, chunk in chunk_rows]
|
| 97 |
+
if not chunks:
|
| 98 |
+
return
|
| 99 |
+
embeddings_client = self._get_embeddings()
|
| 100 |
+
embeddings = embeddings_client.embed_documents(chunks)
|
| 101 |
+
db.execute(delete(DocumentChunk).where(DocumentChunk.document_id == document_id))
|
| 102 |
+
rows = [
|
| 103 |
+
DocumentChunk(
|
| 104 |
+
document_id=document_id,
|
| 105 |
+
file_hash=file_hash,
|
| 106 |
+
filename=filename,
|
| 107 |
+
chunk_index=index,
|
| 108 |
+
page_number=page_number,
|
| 109 |
+
content=chunk,
|
| 110 |
+
embedding=embedding,
|
| 111 |
+
)
|
| 112 |
+
for index, ((page_number, chunk), embedding) in enumerate(zip(chunk_rows, embeddings, strict=False))
|
| 113 |
+
]
|
| 114 |
+
db.add_all(rows)
|
| 115 |
+
db.flush()
|
| 116 |
+
|
| 117 |
+
def similarity_search(self, *, db: Session, query: str, file_hashes: list[str], k: int = 4) -> list[dict[str, Any]]:
|
| 118 |
+
if not file_hashes:
|
| 119 |
+
return []
|
| 120 |
+
query_embedding = self._get_embeddings().embed_query(query)
|
| 121 |
+
stmt = (
|
| 122 |
+
select(
|
| 123 |
+
DocumentChunk.document_id,
|
| 124 |
+
DocumentChunk.content,
|
| 125 |
+
DocumentChunk.filename,
|
| 126 |
+
DocumentChunk.file_hash,
|
| 127 |
+
DocumentChunk.chunk_index,
|
| 128 |
+
DocumentChunk.page_number,
|
| 129 |
+
DocumentChunk.embedding.cosine_distance(query_embedding).label("distance"),
|
| 130 |
+
)
|
| 131 |
+
.where(DocumentChunk.file_hash.in_(file_hashes))
|
| 132 |
+
.order_by(DocumentChunk.embedding.cosine_distance(query_embedding))
|
| 133 |
+
.limit(k)
|
| 134 |
+
)
|
| 135 |
+
results = db.execute(stmt).all()
|
| 136 |
+
matches: list[dict[str, Any]] = []
|
| 137 |
+
for row in results:
|
| 138 |
+
matches.append(
|
| 139 |
+
{
|
| 140 |
+
"content": row.content,
|
| 141 |
+
"metadata": {
|
| 142 |
+
"document_id": row.document_id,
|
| 143 |
+
"filename": row.filename,
|
| 144 |
+
"file_hash": row.file_hash,
|
| 145 |
+
"chunk_index": row.chunk_index,
|
| 146 |
+
"page_number": row.page_number,
|
| 147 |
+
},
|
| 148 |
+
"distance": row.distance,
|
| 149 |
+
}
|
| 150 |
+
)
|
| 151 |
+
return matches
|
app/services/web_search.py
ADDED
|
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
|
| 3 |
+
from langchain_community.tools import DuckDuckGoSearchResults
|
| 4 |
+
from langchain_core.tools import StructuredTool
|
| 5 |
+
from pydantic import BaseModel, ConfigDict, Field
|
| 6 |
+
|
| 7 |
+
from app.config import get_settings
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class WebSearchInput(BaseModel):
|
| 11 |
+
model_config = ConfigDict(extra="allow")
|
| 12 |
+
query: str = Field(default="", description="Search query for external/current information.")
|
| 13 |
+
cursor: int | None = Field(default=None, description="Optional cursor from model/tool planning. Ignored.")
|
| 14 |
+
id: str | int | None = Field(default=None, description="Optional id from model/tool planning. Ignored.")
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
def _build_tavily_tool(api_key: str):
|
| 18 |
+
try:
|
| 19 |
+
from tavily import TavilyClient
|
| 20 |
+
except Exception:
|
| 21 |
+
return None
|
| 22 |
+
|
| 23 |
+
client = TavilyClient(api_key=api_key)
|
| 24 |
+
|
| 25 |
+
def tavily_search(query: str = "", cursor: int | None = None, id: str | int | None = None, **kwargs) -> str:
|
| 26 |
+
query_text = query.strip()
|
| 27 |
+
_ = (cursor, id, kwargs)
|
| 28 |
+
if not query_text:
|
| 29 |
+
return "Web search was requested without a query."
|
| 30 |
+
try:
|
| 31 |
+
result = client.search(query=query_text, search_depth="advanced")
|
| 32 |
+
except Exception as exc:
|
| 33 |
+
return f"Tavily search failed: {exc}"
|
| 34 |
+
rows = result.get("results", []) if isinstance(result, dict) else []
|
| 35 |
+
if not rows:
|
| 36 |
+
return "No web results found."
|
| 37 |
+
|
| 38 |
+
lines = ["Web search results (cite website URLs used):"]
|
| 39 |
+
for index, row in enumerate(rows[:5], start=1):
|
| 40 |
+
title = str(row.get("title") or "Untitled result")
|
| 41 |
+
url = str(row.get("url") or "").strip()
|
| 42 |
+
snippet = str(row.get("content") or "").strip().replace("\n", " ")
|
| 43 |
+
snippet = snippet[:500]
|
| 44 |
+
lines.append(f"{index}. title: {title}")
|
| 45 |
+
lines.append(f" url: {url or 'N/A'}")
|
| 46 |
+
if snippet:
|
| 47 |
+
lines.append(f" snippet: {snippet}")
|
| 48 |
+
lines.append("\nRaw response:")
|
| 49 |
+
lines.append(json.dumps(result, ensure_ascii=True))
|
| 50 |
+
return "\n".join(lines)
|
| 51 |
+
|
| 52 |
+
return StructuredTool.from_function(
|
| 53 |
+
func=tavily_search,
|
| 54 |
+
name="web_search",
|
| 55 |
+
description="Search the web for current/external information using Tavily.",
|
| 56 |
+
args_schema=WebSearchInput,
|
| 57 |
+
)
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
def build_web_search_tool():
|
| 61 |
+
settings = get_settings()
|
| 62 |
+
provider = settings.web_search_provider.lower()
|
| 63 |
+
|
| 64 |
+
if provider == "tavily":
|
| 65 |
+
if not settings.tavily_api_key:
|
| 66 |
+
return None
|
| 67 |
+
return _build_tavily_tool(settings.tavily_api_key)
|
| 68 |
+
|
| 69 |
+
try:
|
| 70 |
+
return DuckDuckGoSearchResults(num_results=5)
|
| 71 |
+
except Exception:
|
| 72 |
+
return None
|
app/static/style.css
ADDED
|
@@ -0,0 +1,680 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
:root {
|
| 2 |
+
--bg-1: #fff7eb;
|
| 3 |
+
--bg-2: #f8e7cf;
|
| 4 |
+
--ink-1: #1f1b16;
|
| 5 |
+
--ink-2: #554b40;
|
| 6 |
+
--card: rgba(255, 252, 247, 0.84);
|
| 7 |
+
--card-strong: rgba(255, 252, 247, 0.95);
|
| 8 |
+
--line: rgba(100, 77, 45, 0.18);
|
| 9 |
+
--line-strong: rgba(100, 77, 45, 0.34);
|
| 10 |
+
--brand-1: #b24a00;
|
| 11 |
+
--brand-2: #ff7b00;
|
| 12 |
+
--brand-3: #ffd8a8;
|
| 13 |
+
--ok-bg: #f0fdf4;
|
| 14 |
+
--ok-line: #86efac;
|
| 15 |
+
--error-bg: #fef2f2;
|
| 16 |
+
--error-line: #fca5a5;
|
| 17 |
+
}
|
| 18 |
+
|
| 19 |
+
*,
|
| 20 |
+
*::before,
|
| 21 |
+
*::after {
|
| 22 |
+
box-sizing: border-box;
|
| 23 |
+
}
|
| 24 |
+
|
| 25 |
+
body {
|
| 26 |
+
margin: 0;
|
| 27 |
+
min-height: 100vh;
|
| 28 |
+
color: var(--ink-1);
|
| 29 |
+
font-family: "Space Grotesk", "Helvetica Neue", sans-serif;
|
| 30 |
+
background:
|
| 31 |
+
linear-gradient(180deg, var(--bg-1), var(--bg-2)),
|
| 32 |
+
radial-gradient(circle at 10% 10%, rgba(255, 179, 71, 0.32), transparent 45%);
|
| 33 |
+
position: relative;
|
| 34 |
+
}
|
| 35 |
+
|
| 36 |
+
.bg-orb {
|
| 37 |
+
position: fixed;
|
| 38 |
+
border-radius: 999px;
|
| 39 |
+
filter: blur(70px);
|
| 40 |
+
pointer-events: none;
|
| 41 |
+
z-index: 0;
|
| 42 |
+
opacity: 0.65;
|
| 43 |
+
}
|
| 44 |
+
|
| 45 |
+
.orb-one {
|
| 46 |
+
width: 420px;
|
| 47 |
+
height: 420px;
|
| 48 |
+
left: -100px;
|
| 49 |
+
top: -110px;
|
| 50 |
+
background: rgba(255, 133, 28, 0.42);
|
| 51 |
+
}
|
| 52 |
+
|
| 53 |
+
.orb-two {
|
| 54 |
+
width: 360px;
|
| 55 |
+
height: 360px;
|
| 56 |
+
right: -100px;
|
| 57 |
+
top: 220px;
|
| 58 |
+
background: rgba(255, 208, 143, 0.6);
|
| 59 |
+
}
|
| 60 |
+
|
| 61 |
+
.shell {
|
| 62 |
+
position: relative;
|
| 63 |
+
z-index: 1;
|
| 64 |
+
width: min(1460px, calc(100% - 2.5rem));
|
| 65 |
+
margin: 0 auto;
|
| 66 |
+
padding: 1.6rem 0 3.4rem;
|
| 67 |
+
}
|
| 68 |
+
|
| 69 |
+
.hero {
|
| 70 |
+
margin-bottom: 1rem;
|
| 71 |
+
}
|
| 72 |
+
|
| 73 |
+
.workspace-strip {
|
| 74 |
+
margin-bottom: 0.95rem;
|
| 75 |
+
display: flex;
|
| 76 |
+
justify-content: space-between;
|
| 77 |
+
align-items: flex-start;
|
| 78 |
+
gap: 1rem;
|
| 79 |
+
}
|
| 80 |
+
|
| 81 |
+
.workspace-strip h1 {
|
| 82 |
+
font-size: clamp(1.65rem, 3.5vw, 2.2rem);
|
| 83 |
+
margin-top: 0.35rem;
|
| 84 |
+
}
|
| 85 |
+
|
| 86 |
+
.hero-topline {
|
| 87 |
+
display: flex;
|
| 88 |
+
align-items: center;
|
| 89 |
+
justify-content: space-between;
|
| 90 |
+
gap: 1rem;
|
| 91 |
+
}
|
| 92 |
+
|
| 93 |
+
.eyebrow {
|
| 94 |
+
margin: 0;
|
| 95 |
+
text-transform: uppercase;
|
| 96 |
+
letter-spacing: 0.18em;
|
| 97 |
+
font-size: 0.72rem;
|
| 98 |
+
color: var(--brand-1);
|
| 99 |
+
font-weight: 700;
|
| 100 |
+
}
|
| 101 |
+
|
| 102 |
+
h1,
|
| 103 |
+
h2,
|
| 104 |
+
h3 {
|
| 105 |
+
margin: 0;
|
| 106 |
+
letter-spacing: -0.02em;
|
| 107 |
+
}
|
| 108 |
+
|
| 109 |
+
h1 {
|
| 110 |
+
font-size: clamp(1.8rem, 4vw, 2.7rem);
|
| 111 |
+
margin-top: 0.65rem;
|
| 112 |
+
}
|
| 113 |
+
|
| 114 |
+
h2 {
|
| 115 |
+
font-size: 1.25rem;
|
| 116 |
+
}
|
| 117 |
+
|
| 118 |
+
h3 {
|
| 119 |
+
font-size: 1rem;
|
| 120 |
+
}
|
| 121 |
+
|
| 122 |
+
.lede,
|
| 123 |
+
p,
|
| 124 |
+
label,
|
| 125 |
+
textarea,
|
| 126 |
+
input,
|
| 127 |
+
button {
|
| 128 |
+
font-size: 0.98rem;
|
| 129 |
+
line-height: 1.5;
|
| 130 |
+
}
|
| 131 |
+
|
| 132 |
+
.lede {
|
| 133 |
+
margin-top: 0.75rem;
|
| 134 |
+
color: var(--ink-2);
|
| 135 |
+
max-width: 72ch;
|
| 136 |
+
}
|
| 137 |
+
|
| 138 |
+
.db-warning {
|
| 139 |
+
margin-top: 0.75rem;
|
| 140 |
+
border: 1px solid var(--error-line);
|
| 141 |
+
background: var(--error-bg);
|
| 142 |
+
color: #991b1b;
|
| 143 |
+
border-radius: 10px;
|
| 144 |
+
padding: 0.55rem 0.7rem;
|
| 145 |
+
}
|
| 146 |
+
|
| 147 |
+
.muted {
|
| 148 |
+
color: var(--ink-2);
|
| 149 |
+
margin: 0.35rem 0 0;
|
| 150 |
+
}
|
| 151 |
+
|
| 152 |
+
.grid {
|
| 153 |
+
display: grid;
|
| 154 |
+
gap: 1rem;
|
| 155 |
+
}
|
| 156 |
+
|
| 157 |
+
.grid.two {
|
| 158 |
+
grid-template-columns: repeat(auto-fit, minmax(300px, 1fr));
|
| 159 |
+
}
|
| 160 |
+
|
| 161 |
+
.card {
|
| 162 |
+
border: 1px solid var(--line);
|
| 163 |
+
border-radius: 20px;
|
| 164 |
+
background: var(--card);
|
| 165 |
+
backdrop-filter: blur(10px);
|
| 166 |
+
box-shadow:
|
| 167 |
+
0 8px 22px rgba(70, 50, 20, 0.08),
|
| 168 |
+
0 28px 60px rgba(70, 50, 20, 0.07);
|
| 169 |
+
padding: 1.45rem;
|
| 170 |
+
animation: fade-up 0.4s ease both;
|
| 171 |
+
}
|
| 172 |
+
|
| 173 |
+
.panel {
|
| 174 |
+
display: grid;
|
| 175 |
+
gap: 0.9rem;
|
| 176 |
+
}
|
| 177 |
+
|
| 178 |
+
.panel-head p {
|
| 179 |
+
margin: 0.25rem 0 0;
|
| 180 |
+
color: var(--ink-2);
|
| 181 |
+
}
|
| 182 |
+
|
| 183 |
+
.panel-head-inline {
|
| 184 |
+
display: flex;
|
| 185 |
+
justify-content: space-between;
|
| 186 |
+
align-items: center;
|
| 187 |
+
gap: 0.8rem;
|
| 188 |
+
}
|
| 189 |
+
|
| 190 |
+
.badge {
|
| 191 |
+
border: 1px solid rgba(178, 74, 0, 0.22);
|
| 192 |
+
background: linear-gradient(135deg, rgba(255, 187, 107, 0.24), rgba(255, 123, 0, 0.12));
|
| 193 |
+
color: #8d3600;
|
| 194 |
+
padding: 0.3rem 0.7rem;
|
| 195 |
+
border-radius: 999px;
|
| 196 |
+
font-size: 0.78rem;
|
| 197 |
+
white-space: nowrap;
|
| 198 |
+
}
|
| 199 |
+
|
| 200 |
+
.toolbar {
|
| 201 |
+
display: flex;
|
| 202 |
+
justify-content: space-between;
|
| 203 |
+
align-items: center;
|
| 204 |
+
gap: 1rem;
|
| 205 |
+
margin-bottom: 1rem;
|
| 206 |
+
}
|
| 207 |
+
|
| 208 |
+
.app-layout {
|
| 209 |
+
display: grid;
|
| 210 |
+
grid-template-columns: minmax(360px, 430px) 1fr;
|
| 211 |
+
gap: 1.2rem;
|
| 212 |
+
align-items: start;
|
| 213 |
+
}
|
| 214 |
+
|
| 215 |
+
.sidebar-panel {
|
| 216 |
+
position: sticky;
|
| 217 |
+
top: 1rem;
|
| 218 |
+
max-height: calc(100vh - 2rem);
|
| 219 |
+
overflow: auto;
|
| 220 |
+
gap: 1rem;
|
| 221 |
+
padding: 1.55rem;
|
| 222 |
+
}
|
| 223 |
+
|
| 224 |
+
.sidebar-docs {
|
| 225 |
+
max-height: 52vh;
|
| 226 |
+
overflow: auto;
|
| 227 |
+
padding-right: 0.25rem;
|
| 228 |
+
}
|
| 229 |
+
|
| 230 |
+
.user-email {
|
| 231 |
+
overflow-wrap: anywhere;
|
| 232 |
+
}
|
| 233 |
+
|
| 234 |
+
label,
|
| 235 |
+
input,
|
| 236 |
+
textarea,
|
| 237 |
+
button {
|
| 238 |
+
display: block;
|
| 239 |
+
width: 100%;
|
| 240 |
+
}
|
| 241 |
+
|
| 242 |
+
label {
|
| 243 |
+
color: #453d34;
|
| 244 |
+
font-weight: 500;
|
| 245 |
+
}
|
| 246 |
+
|
| 247 |
+
input,
|
| 248 |
+
textarea {
|
| 249 |
+
margin-top: 0.38rem;
|
| 250 |
+
border: 1px solid var(--line);
|
| 251 |
+
border-radius: 12px;
|
| 252 |
+
background: var(--card-strong);
|
| 253 |
+
padding: 0.75rem 0.85rem;
|
| 254 |
+
color: var(--ink-1);
|
| 255 |
+
transition: border-color 0.2s ease, box-shadow 0.2s ease;
|
| 256 |
+
}
|
| 257 |
+
|
| 258 |
+
input:focus,
|
| 259 |
+
textarea:focus {
|
| 260 |
+
outline: none;
|
| 261 |
+
border-color: var(--brand-2);
|
| 262 |
+
box-shadow: 0 0 0 3px rgba(255, 123, 0, 0.2);
|
| 263 |
+
}
|
| 264 |
+
|
| 265 |
+
textarea {
|
| 266 |
+
min-height: 146px;
|
| 267 |
+
resize: vertical;
|
| 268 |
+
}
|
| 269 |
+
|
| 270 |
+
button {
|
| 271 |
+
border: none;
|
| 272 |
+
border-radius: 12px;
|
| 273 |
+
padding: 0.8rem 1rem;
|
| 274 |
+
color: #fff;
|
| 275 |
+
cursor: pointer;
|
| 276 |
+
font-weight: 700;
|
| 277 |
+
background: linear-gradient(135deg, var(--brand-1), var(--brand-2));
|
| 278 |
+
transition: transform 0.12s ease, filter 0.2s ease;
|
| 279 |
+
}
|
| 280 |
+
|
| 281 |
+
button:hover {
|
| 282 |
+
filter: brightness(1.06);
|
| 283 |
+
}
|
| 284 |
+
|
| 285 |
+
button:active {
|
| 286 |
+
transform: translateY(1px);
|
| 287 |
+
}
|
| 288 |
+
|
| 289 |
+
button:disabled {
|
| 290 |
+
opacity: 0.76;
|
| 291 |
+
cursor: wait;
|
| 292 |
+
}
|
| 293 |
+
|
| 294 |
+
button.secondary {
|
| 295 |
+
background: linear-gradient(135deg, #2f2f2f, #151515);
|
| 296 |
+
}
|
| 297 |
+
|
| 298 |
+
button.danger {
|
| 299 |
+
background: linear-gradient(135deg, #b42318, #e11d48);
|
| 300 |
+
}
|
| 301 |
+
|
| 302 |
+
.result {
|
| 303 |
+
margin: 0;
|
| 304 |
+
white-space: pre-wrap;
|
| 305 |
+
overflow-wrap: anywhere;
|
| 306 |
+
border-radius: 12px;
|
| 307 |
+
border: 1px solid var(--ok-line);
|
| 308 |
+
background: var(--ok-bg);
|
| 309 |
+
color: #0f5132;
|
| 310 |
+
font-family: "IBM Plex Mono", monospace;
|
| 311 |
+
font-size: 0.85rem;
|
| 312 |
+
padding: 0.8rem;
|
| 313 |
+
min-height: 76px;
|
| 314 |
+
}
|
| 315 |
+
|
| 316 |
+
.hidden {
|
| 317 |
+
display: none;
|
| 318 |
+
}
|
| 319 |
+
|
| 320 |
+
.result:empty {
|
| 321 |
+
display: none;
|
| 322 |
+
}
|
| 323 |
+
|
| 324 |
+
.result.error {
|
| 325 |
+
background: var(--error-bg);
|
| 326 |
+
border-color: var(--error-line);
|
| 327 |
+
color: #991b1b;
|
| 328 |
+
}
|
| 329 |
+
|
| 330 |
+
.docs {
|
| 331 |
+
display: grid;
|
| 332 |
+
gap: 0.75rem;
|
| 333 |
+
}
|
| 334 |
+
|
| 335 |
+
.chat-shell {
|
| 336 |
+
gap: 0.75rem;
|
| 337 |
+
}
|
| 338 |
+
|
| 339 |
+
.chat-panel {
|
| 340 |
+
min-height: 78vh;
|
| 341 |
+
padding: 1.55rem;
|
| 342 |
+
}
|
| 343 |
+
|
| 344 |
+
.chat-thread {
|
| 345 |
+
border: 1px solid var(--line);
|
| 346 |
+
background: rgba(255, 250, 243, 0.7);
|
| 347 |
+
border-radius: 14px;
|
| 348 |
+
padding: 0.9rem;
|
| 349 |
+
min-height: 56vh;
|
| 350 |
+
max-height: 68vh;
|
| 351 |
+
overflow-y: auto;
|
| 352 |
+
display: grid;
|
| 353 |
+
gap: 0.65rem;
|
| 354 |
+
align-content: start;
|
| 355 |
+
grid-auto-rows: max-content;
|
| 356 |
+
}
|
| 357 |
+
|
| 358 |
+
.chat-msg {
|
| 359 |
+
display: flex;
|
| 360 |
+
}
|
| 361 |
+
|
| 362 |
+
.chat-msg.user {
|
| 363 |
+
justify-content: flex-end;
|
| 364 |
+
}
|
| 365 |
+
|
| 366 |
+
.chat-msg.assistant {
|
| 367 |
+
justify-content: flex-start;
|
| 368 |
+
}
|
| 369 |
+
|
| 370 |
+
.chat-bubble {
|
| 371 |
+
border-radius: 14px;
|
| 372 |
+
padding: 0.6rem 0.75rem;
|
| 373 |
+
max-width: min(92%, 900px);
|
| 374 |
+
overflow-wrap: anywhere;
|
| 375 |
+
border: 1px solid var(--line);
|
| 376 |
+
}
|
| 377 |
+
|
| 378 |
+
.chat-bubble-user {
|
| 379 |
+
background: linear-gradient(135deg, var(--brand-1), var(--brand-2));
|
| 380 |
+
color: #fff;
|
| 381 |
+
border-color: transparent;
|
| 382 |
+
}
|
| 383 |
+
|
| 384 |
+
.chat-bubble-assistant {
|
| 385 |
+
background: var(--card-strong);
|
| 386 |
+
color: var(--ink-1);
|
| 387 |
+
}
|
| 388 |
+
|
| 389 |
+
.chat-pending {
|
| 390 |
+
opacity: 0.8;
|
| 391 |
+
font-style: italic;
|
| 392 |
+
}
|
| 393 |
+
|
| 394 |
+
.chat-error {
|
| 395 |
+
border-color: var(--error-line);
|
| 396 |
+
background: var(--error-bg);
|
| 397 |
+
color: #991b1b;
|
| 398 |
+
}
|
| 399 |
+
|
| 400 |
+
.chat-composer {
|
| 401 |
+
display: grid;
|
| 402 |
+
gap: 0.6rem;
|
| 403 |
+
padding-top: 0.35rem;
|
| 404 |
+
border-top: 1px solid var(--line);
|
| 405 |
+
background: linear-gradient(180deg, rgba(255, 252, 247, 0), rgba(255, 252, 247, 0.92) 35%);
|
| 406 |
+
}
|
| 407 |
+
|
| 408 |
+
.chat-composer textarea {
|
| 409 |
+
min-height: 118px;
|
| 410 |
+
font-size: 1rem;
|
| 411 |
+
}
|
| 412 |
+
|
| 413 |
+
.chat-markdown p,
|
| 414 |
+
.chat-markdown ul,
|
| 415 |
+
.chat-markdown ol {
|
| 416 |
+
margin: 0.3rem 0;
|
| 417 |
+
}
|
| 418 |
+
|
| 419 |
+
.chat-markdown p:first-child {
|
| 420 |
+
margin-top: 0;
|
| 421 |
+
}
|
| 422 |
+
|
| 423 |
+
.chat-markdown p:last-child {
|
| 424 |
+
margin-bottom: 0;
|
| 425 |
+
}
|
| 426 |
+
|
| 427 |
+
.chat-markdown pre,
|
| 428 |
+
.chat-markdown code {
|
| 429 |
+
font-family: "IBM Plex Mono", monospace;
|
| 430 |
+
}
|
| 431 |
+
|
| 432 |
+
.chat-markdown pre {
|
| 433 |
+
background: #f6ead8;
|
| 434 |
+
border: 1px solid #e8ccb0;
|
| 435 |
+
border-radius: 10px;
|
| 436 |
+
padding: 0.65rem;
|
| 437 |
+
overflow-x: auto;
|
| 438 |
+
}
|
| 439 |
+
|
| 440 |
+
.chat-markdown table {
|
| 441 |
+
border-collapse: collapse;
|
| 442 |
+
width: 100%;
|
| 443 |
+
display: block;
|
| 444 |
+
overflow-x: auto;
|
| 445 |
+
margin: 0.5rem 0;
|
| 446 |
+
}
|
| 447 |
+
|
| 448 |
+
.chat-markdown th,
|
| 449 |
+
.chat-markdown td {
|
| 450 |
+
border: 1px solid var(--line-strong);
|
| 451 |
+
padding: 0.5rem;
|
| 452 |
+
text-align: left;
|
| 453 |
+
vertical-align: top;
|
| 454 |
+
min-width: 120px;
|
| 455 |
+
}
|
| 456 |
+
|
| 457 |
+
.message-panel {
|
| 458 |
+
margin-top: 0.1rem;
|
| 459 |
+
min-height: auto;
|
| 460 |
+
}
|
| 461 |
+
|
| 462 |
+
.source-dropdown {
|
| 463 |
+
margin-top: 0.5rem;
|
| 464 |
+
border-top: 1px solid var(--line);
|
| 465 |
+
padding-top: 0.45rem;
|
| 466 |
+
}
|
| 467 |
+
|
| 468 |
+
.source-dropdown summary {
|
| 469 |
+
cursor: pointer;
|
| 470 |
+
font-weight: 700;
|
| 471 |
+
color: #6d3d13;
|
| 472 |
+
list-style: none;
|
| 473 |
+
}
|
| 474 |
+
|
| 475 |
+
.source-dropdown summary::-webkit-details-marker {
|
| 476 |
+
display: none;
|
| 477 |
+
}
|
| 478 |
+
|
| 479 |
+
.source-dropdown summary::before {
|
| 480 |
+
content: "▸";
|
| 481 |
+
display: inline-block;
|
| 482 |
+
margin-right: 0.35rem;
|
| 483 |
+
transition: transform 0.15s ease;
|
| 484 |
+
}
|
| 485 |
+
|
| 486 |
+
.source-dropdown[open] summary::before {
|
| 487 |
+
transform: rotate(90deg);
|
| 488 |
+
}
|
| 489 |
+
|
| 490 |
+
.chat-bubble .panel,
|
| 491 |
+
.chat-bubble .card,
|
| 492 |
+
.chat-bubble .result {
|
| 493 |
+
min-height: 0;
|
| 494 |
+
}
|
| 495 |
+
|
| 496 |
+
.sources-panel {
|
| 497 |
+
display: grid;
|
| 498 |
+
gap: 0.7rem;
|
| 499 |
+
margin-top: 0.55rem;
|
| 500 |
+
}
|
| 501 |
+
|
| 502 |
+
.sources-panel h4 {
|
| 503 |
+
margin: 0.2rem 0 0.45rem;
|
| 504 |
+
font-size: 0.9rem;
|
| 505 |
+
color: #6d3d13;
|
| 506 |
+
}
|
| 507 |
+
|
| 508 |
+
.source-list {
|
| 509 |
+
display: grid;
|
| 510 |
+
gap: 0.55rem;
|
| 511 |
+
}
|
| 512 |
+
|
| 513 |
+
.source-card {
|
| 514 |
+
border: 1px solid var(--line);
|
| 515 |
+
background: #fff8ef;
|
| 516 |
+
border-radius: 10px;
|
| 517 |
+
padding: 0.65rem;
|
| 518 |
+
}
|
| 519 |
+
|
| 520 |
+
.source-meta {
|
| 521 |
+
display: flex;
|
| 522 |
+
justify-content: space-between;
|
| 523 |
+
align-items: center;
|
| 524 |
+
gap: 0.6rem;
|
| 525 |
+
margin-bottom: 0.35rem;
|
| 526 |
+
}
|
| 527 |
+
|
| 528 |
+
.source-doc {
|
| 529 |
+
font-weight: 700;
|
| 530 |
+
font-size: 0.86rem;
|
| 531 |
+
}
|
| 532 |
+
|
| 533 |
+
.source-page {
|
| 534 |
+
font-size: 0.78rem;
|
| 535 |
+
color: #6d3d13;
|
| 536 |
+
}
|
| 537 |
+
|
| 538 |
+
.source-excerpt {
|
| 539 |
+
margin: 0;
|
| 540 |
+
font-size: 0.9rem;
|
| 541 |
+
line-height: 1.48;
|
| 542 |
+
}
|
| 543 |
+
|
| 544 |
+
.source-excerpt mark {
|
| 545 |
+
background: #ffe29d;
|
| 546 |
+
color: #402100;
|
| 547 |
+
border-radius: 3px;
|
| 548 |
+
padding: 0 2px;
|
| 549 |
+
}
|
| 550 |
+
|
| 551 |
+
.source-actions {
|
| 552 |
+
margin-top: 0.5rem;
|
| 553 |
+
}
|
| 554 |
+
|
| 555 |
+
.source-link {
|
| 556 |
+
display: inline-block;
|
| 557 |
+
text-decoration: none;
|
| 558 |
+
font-size: 0.82rem;
|
| 559 |
+
font-weight: 700;
|
| 560 |
+
padding: 0.35rem 0.58rem;
|
| 561 |
+
border: 1px solid #ddbb97;
|
| 562 |
+
border-radius: 8px;
|
| 563 |
+
color: #6d3d13;
|
| 564 |
+
background: #fdf2e4;
|
| 565 |
+
}
|
| 566 |
+
|
| 567 |
+
.source-web-list {
|
| 568 |
+
margin: 0;
|
| 569 |
+
padding-left: 1.1rem;
|
| 570 |
+
}
|
| 571 |
+
|
| 572 |
+
.doc {
|
| 573 |
+
display: grid;
|
| 574 |
+
border: 1px solid var(--line);
|
| 575 |
+
border-radius: 14px;
|
| 576 |
+
padding: 1.1rem;
|
| 577 |
+
background: var(--card-strong);
|
| 578 |
+
gap: 0.75rem;
|
| 579 |
+
}
|
| 580 |
+
|
| 581 |
+
.doc-actions {
|
| 582 |
+
margin-top: 0.75rem;
|
| 583 |
+
display: flex;
|
| 584 |
+
justify-content: space-between;
|
| 585 |
+
align-items: center;
|
| 586 |
+
}
|
| 587 |
+
|
| 588 |
+
.doc-actions button {
|
| 589 |
+
width: 100%;
|
| 590 |
+
padding: 0.55rem 0.9rem;
|
| 591 |
+
font-size: 0.82rem;
|
| 592 |
+
}
|
| 593 |
+
|
| 594 |
+
.doc-head {
|
| 595 |
+
display: flex;
|
| 596 |
+
justify-content: space-between;
|
| 597 |
+
align-items: center;
|
| 598 |
+
gap: 0.8rem;
|
| 599 |
+
}
|
| 600 |
+
|
| 601 |
+
.doc-pages {
|
| 602 |
+
color: #684b29;
|
| 603 |
+
font-size: 0.8rem;
|
| 604 |
+
padding: 0.2rem 0.55rem;
|
| 605 |
+
border-radius: 999px;
|
| 606 |
+
border: 1px solid rgba(178, 74, 0, 0.24);
|
| 607 |
+
background: rgba(255, 206, 140, 0.3);
|
| 608 |
+
}
|
| 609 |
+
|
| 610 |
+
.hash {
|
| 611 |
+
margin: 0.55rem 0 0.3rem;
|
| 612 |
+
color: #5d5247;
|
| 613 |
+
}
|
| 614 |
+
|
| 615 |
+
code {
|
| 616 |
+
font-family: "IBM Plex Mono", monospace;
|
| 617 |
+
font-size: 0.76rem;
|
| 618 |
+
}
|
| 619 |
+
|
| 620 |
+
.summary {
|
| 621 |
+
margin: 0;
|
| 622 |
+
color: #2d251d;
|
| 623 |
+
}
|
| 624 |
+
|
| 625 |
+
@keyframes fade-up {
|
| 626 |
+
from {
|
| 627 |
+
opacity: 0;
|
| 628 |
+
transform: translateY(8px);
|
| 629 |
+
}
|
| 630 |
+
to {
|
| 631 |
+
opacity: 1;
|
| 632 |
+
transform: translateY(0);
|
| 633 |
+
}
|
| 634 |
+
}
|
| 635 |
+
|
| 636 |
+
@media (max-width: 720px) {
|
| 637 |
+
.shell {
|
| 638 |
+
width: min(1120px, calc(100% - 1rem));
|
| 639 |
+
padding-top: 0.95rem;
|
| 640 |
+
}
|
| 641 |
+
|
| 642 |
+
.toolbar {
|
| 643 |
+
flex-direction: column;
|
| 644 |
+
align-items: stretch;
|
| 645 |
+
}
|
| 646 |
+
|
| 647 |
+
.hero-topline,
|
| 648 |
+
.panel-head-inline,
|
| 649 |
+
.doc-head {
|
| 650 |
+
flex-direction: column;
|
| 651 |
+
align-items: flex-start;
|
| 652 |
+
}
|
| 653 |
+
|
| 654 |
+
.app-layout {
|
| 655 |
+
grid-template-columns: 1fr;
|
| 656 |
+
}
|
| 657 |
+
|
| 658 |
+
.workspace-strip {
|
| 659 |
+
flex-direction: column;
|
| 660 |
+
align-items: flex-start;
|
| 661 |
+
}
|
| 662 |
+
|
| 663 |
+
.sidebar-panel {
|
| 664 |
+
position: static;
|
| 665 |
+
max-height: none;
|
| 666 |
+
}
|
| 667 |
+
|
| 668 |
+
.sidebar-docs {
|
| 669 |
+
max-height: none;
|
| 670 |
+
}
|
| 671 |
+
|
| 672 |
+
.chat-panel {
|
| 673 |
+
min-height: 68vh;
|
| 674 |
+
}
|
| 675 |
+
|
| 676 |
+
.chat-thread {
|
| 677 |
+
min-height: 46vh;
|
| 678 |
+
max-height: 56vh;
|
| 679 |
+
}
|
| 680 |
+
}
|
app/templates/index.html
ADDED
|
@@ -0,0 +1,577 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<!DOCTYPE html>
|
| 2 |
+
<html lang="en">
|
| 3 |
+
<head>
|
| 4 |
+
<meta charset="UTF-8" />
|
| 5 |
+
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
| 6 |
+
<title>DocsQA Assignment</title>
|
| 7 |
+
<link rel="preconnect" href="https://fonts.googleapis.com" />
|
| 8 |
+
<link rel="preconnect" href="https://fonts.gstatic.com" crossorigin />
|
| 9 |
+
<link
|
| 10 |
+
href="https://fonts.googleapis.com/css2?family=Space+Grotesk:wght@400;500;600;700&family=IBM+Plex+Mono:wght@400;500&display=swap"
|
| 11 |
+
rel="stylesheet"
|
| 12 |
+
/>
|
| 13 |
+
<script src="https://cdn.jsdelivr.net/npm/marked/marked.min.js"></script>
|
| 14 |
+
<link rel="stylesheet" href="/static/style.css" />
|
| 15 |
+
</head>
|
| 16 |
+
<body>
|
| 17 |
+
<div class="bg-orb orb-one"></div>
|
| 18 |
+
<div class="bg-orb orb-two"></div>
|
| 19 |
+
<main class="shell">
|
| 20 |
+
{% if not user %}
|
| 21 |
+
<section class="hero card">
|
| 22 |
+
<div class="hero-topline">
|
| 23 |
+
<p class="eyebrow">LangGraph Assignment</p>
|
| 24 |
+
<span class="badge">FastAPI + Supabase + PGVector</span>
|
| 25 |
+
</div>
|
| 26 |
+
<h1>DocsQA Workspace</h1>
|
| 27 |
+
<p class="lede">
|
| 28 |
+
Upload PDFs, avoid duplicate reprocessing by file hash, and ask an agent that uses user-scoped document retrieval with optional web search.
|
| 29 |
+
</p>
|
| 30 |
+
{% if db_unavailable %}
|
| 31 |
+
<p class="db-warning">
|
| 32 |
+
Database connection is temporarily unavailable. This is usually a transient DNS/network issue with the Supabase host. Please retry shortly.
|
| 33 |
+
</p>
|
| 34 |
+
{% endif %}
|
| 35 |
+
</section>
|
| 36 |
+
{% else %}
|
| 37 |
+
<section class="workspace-strip card">
|
| 38 |
+
<div>
|
| 39 |
+
<p class="eyebrow">LangGraph Assignment</p>
|
| 40 |
+
<h1>DocsQA Workspace</h1>
|
| 41 |
+
<p class="muted">Private document chat with structured sources.</p>
|
| 42 |
+
</div>
|
| 43 |
+
<span class="badge">FastAPI + Supabase + PGVector</span>
|
| 44 |
+
</section>
|
| 45 |
+
{% endif %}
|
| 46 |
+
|
| 47 |
+
{% if not user %}
|
| 48 |
+
<section class="grid two auth-grid">
|
| 49 |
+
<form class="card panel" id="register-form" method="post" action="/register">
|
| 50 |
+
<div class="panel-head">
|
| 51 |
+
<h2>Create account</h2>
|
| 52 |
+
<p>Start by creating your personal docs workspace.</p>
|
| 53 |
+
</div>
|
| 54 |
+
<label>Email <input type="email" name="email" required /></label>
|
| 55 |
+
<label>Password <input type="password" name="password" required /></label>
|
| 56 |
+
<button type="submit">Register</button>
|
| 57 |
+
<pre class="result ok" id="register-result"></pre>
|
| 58 |
+
</form>
|
| 59 |
+
|
| 60 |
+
<form class="card panel" id="login-form" method="post" action="/login">
|
| 61 |
+
<div class="panel-head">
|
| 62 |
+
<h2>Sign in</h2>
|
| 63 |
+
<p>Continue with your existing account.</p>
|
| 64 |
+
</div>
|
| 65 |
+
<label>Email <input type="email" name="email" required /></label>
|
| 66 |
+
<label>Password <input type="password" name="password" required /></label>
|
| 67 |
+
<button type="submit">Login</button>
|
| 68 |
+
<pre class="result ok" id="login-result"></pre>
|
| 69 |
+
</form>
|
| 70 |
+
</section>
|
| 71 |
+
{% else %}
|
| 72 |
+
<section class="app-layout">
|
| 73 |
+
<aside class="card panel sidebar-panel">
|
| 74 |
+
<div class="panel-head">
|
| 75 |
+
<h2 class="user-email">{{ user.email }}</h2>
|
| 76 |
+
<p class="muted">Your uploaded docs are private to this account.</p>
|
| 77 |
+
</div>
|
| 78 |
+
|
| 79 |
+
<form id="upload-form" class="panel">
|
| 80 |
+
<label class="muted">Upload PDFs (max 5 files, 10 pages each)</label>
|
| 81 |
+
<input type="file" id="file" name="file" accept="application/pdf" multiple required />
|
| 82 |
+
<button type="submit">Upload</button>
|
| 83 |
+
</form>
|
| 84 |
+
<pre class="result" id="upload-result"></pre>
|
| 85 |
+
|
| 86 |
+
<div class="panel-head panel-head-inline">
|
| 87 |
+
<h3>Your documents</h3>
|
| 88 |
+
<span class="badge">{{ documents|length }}</span>
|
| 89 |
+
</div>
|
| 90 |
+
<div class="docs sidebar-docs">
|
| 91 |
+
{% for document in documents %}
|
| 92 |
+
<article class="doc">
|
| 93 |
+
<header class="doc-head">
|
| 94 |
+
<h3>{{ document.filename }}</h3>
|
| 95 |
+
<span class="doc-pages">{{ document.page_count }} pages</span>
|
| 96 |
+
</header>
|
| 97 |
+
<div class="doc-actions">
|
| 98 |
+
<button
|
| 99 |
+
type="button"
|
| 100 |
+
class="danger doc-delete-btn"
|
| 101 |
+
data-document-id="{{ document.id }}"
|
| 102 |
+
data-document-name="{{ document.filename }}"
|
| 103 |
+
>
|
| 104 |
+
Delete
|
| 105 |
+
</button>
|
| 106 |
+
</div>
|
| 107 |
+
</article>
|
| 108 |
+
{% else %}
|
| 109 |
+
<p class="muted">No documents uploaded yet.</p>
|
| 110 |
+
{% endfor %}
|
| 111 |
+
</div>
|
| 112 |
+
|
| 113 |
+
<form method="post" action="/logout" id="logout-form">
|
| 114 |
+
<button type="submit" class="secondary">Sign out</button>
|
| 115 |
+
</form>
|
| 116 |
+
</aside>
|
| 117 |
+
|
| 118 |
+
<section class="card panel chat-shell chat-panel">
|
| 119 |
+
<div class="panel-head panel-head-inline">
|
| 120 |
+
<h2>DocsQA Chat</h2>
|
| 121 |
+
<span class="badge">Markdown enabled</span>
|
| 122 |
+
</div>
|
| 123 |
+
<div id="chat-thread" class="chat-thread">
|
| 124 |
+
<article class="chat-msg assistant">
|
| 125 |
+
<div class="chat-bubble chat-bubble-assistant chat-markdown">
|
| 126 |
+
<p>Ask anything about your uploaded PDFs and I will answer with citations from retrieved chunks.</p>
|
| 127 |
+
</div>
|
| 128 |
+
</article>
|
| 129 |
+
</div>
|
| 130 |
+
<form id="ask-form" class="chat-composer">
|
| 131 |
+
<textarea
|
| 132 |
+
id="query"
|
| 133 |
+
rows="3"
|
| 134 |
+
placeholder="Message DocsQA..."
|
| 135 |
+
required
|
| 136 |
+
></textarea>
|
| 137 |
+
<button type="submit">Send</button>
|
| 138 |
+
</form>
|
| 139 |
+
</section>
|
| 140 |
+
</section>
|
| 141 |
+
{% endif %}
|
| 142 |
+
</main>
|
| 143 |
+
|
| 144 |
+
<script>
|
| 145 |
+
const registerForm = document.getElementById("register-form");
|
| 146 |
+
const loginForm = document.getElementById("login-form");
|
| 147 |
+
const logoutForm = document.getElementById("logout-form");
|
| 148 |
+
const registerResult = document.getElementById("register-result");
|
| 149 |
+
const loginResult = document.getElementById("login-result");
|
| 150 |
+
const uploadForm = document.getElementById("upload-form");
|
| 151 |
+
const askForm = document.getElementById("ask-form");
|
| 152 |
+
const uploadResult = document.getElementById("upload-result");
|
| 153 |
+
const chatThread = document.getElementById("chat-thread");
|
| 154 |
+
const queryInput = document.getElementById("query");
|
| 155 |
+
const docDeleteButtons = document.querySelectorAll(".doc-delete-btn");
|
| 156 |
+
|
| 157 |
+
const safeJson = async (response) => {
|
| 158 |
+
try {
|
| 159 |
+
return await response.json();
|
| 160 |
+
} catch {
|
| 161 |
+
return { detail: "Unexpected non-JSON response" };
|
| 162 |
+
}
|
| 163 |
+
};
|
| 164 |
+
|
| 165 |
+
const prettyError = (body) => {
|
| 166 |
+
if (!body) return "Request failed.";
|
| 167 |
+
if (typeof body.detail === "string") return body.detail;
|
| 168 |
+
if (typeof body.message === "string") return body.message;
|
| 169 |
+
return "Request failed.";
|
| 170 |
+
};
|
| 171 |
+
|
| 172 |
+
const setBusy = (form, busy) => {
|
| 173 |
+
if (!form) return;
|
| 174 |
+
const button = form.querySelector("button[type='submit']");
|
| 175 |
+
if (!button) return;
|
| 176 |
+
button.disabled = busy;
|
| 177 |
+
if (busy) {
|
| 178 |
+
button.dataset.originalText = button.textContent;
|
| 179 |
+
button.textContent = "Please wait...";
|
| 180 |
+
} else if (button.dataset.originalText) {
|
| 181 |
+
button.textContent = button.dataset.originalText;
|
| 182 |
+
}
|
| 183 |
+
};
|
| 184 |
+
|
| 185 |
+
const escapeHtml = (value) =>
|
| 186 |
+
value
|
| 187 |
+
.replaceAll("&", "&")
|
| 188 |
+
.replaceAll("<", "<")
|
| 189 |
+
.replaceAll(">", ">");
|
| 190 |
+
|
| 191 |
+
const normalizeTabularAnswer = (text) => {
|
| 192 |
+
const lines = text.split("\n");
|
| 193 |
+
const out = [];
|
| 194 |
+
let i = 0;
|
| 195 |
+
|
| 196 |
+
while (i < lines.length) {
|
| 197 |
+
const line = lines[i];
|
| 198 |
+
if (!line.includes("\t")) {
|
| 199 |
+
out.push(line);
|
| 200 |
+
i += 1;
|
| 201 |
+
continue;
|
| 202 |
+
}
|
| 203 |
+
|
| 204 |
+
const rows = [];
|
| 205 |
+
while (i < lines.length && lines[i].includes("\t")) {
|
| 206 |
+
rows.push(lines[i].split("\t").map((cell) => cell.trim()));
|
| 207 |
+
i += 1;
|
| 208 |
+
}
|
| 209 |
+
|
| 210 |
+
if (rows.length < 2) {
|
| 211 |
+
out.push(...rows.map((row) => row.join(" ")));
|
| 212 |
+
continue;
|
| 213 |
+
}
|
| 214 |
+
|
| 215 |
+
const columnCount = Math.max(...rows.map((row) => row.length));
|
| 216 |
+
const paddedRows = rows.map((row) => {
|
| 217 |
+
const copy = [...row];
|
| 218 |
+
while (copy.length < columnCount) copy.push("");
|
| 219 |
+
return copy.map((cell) => cell.replaceAll("|", "\\|").replaceAll("\n", "<br>"));
|
| 220 |
+
});
|
| 221 |
+
|
| 222 |
+
out.push(`| ${paddedRows[0].join(" | ")} |`);
|
| 223 |
+
out.push(`| ${Array(columnCount).fill("---").join(" | ")} |`);
|
| 224 |
+
for (const row of paddedRows.slice(1)) {
|
| 225 |
+
out.push(`| ${row.join(" | ")} |`);
|
| 226 |
+
}
|
| 227 |
+
}
|
| 228 |
+
|
| 229 |
+
return out.join("\n");
|
| 230 |
+
};
|
| 231 |
+
|
| 232 |
+
const sanitizeHtml = (html) => {
|
| 233 |
+
const parser = new DOMParser();
|
| 234 |
+
const doc = parser.parseFromString(html, "text/html");
|
| 235 |
+
doc.querySelectorAll("script,style,iframe,object,embed").forEach((node) => node.remove());
|
| 236 |
+
for (const el of doc.querySelectorAll("*")) {
|
| 237 |
+
for (const attr of [...el.attributes]) {
|
| 238 |
+
const name = attr.name.toLowerCase();
|
| 239 |
+
const value = attr.value.toLowerCase();
|
| 240 |
+
if (name.startsWith("on")) {
|
| 241 |
+
el.removeAttribute(attr.name);
|
| 242 |
+
}
|
| 243 |
+
if ((name === "href" || name === "src") && value.startsWith("javascript:")) {
|
| 244 |
+
el.removeAttribute(attr.name);
|
| 245 |
+
}
|
| 246 |
+
}
|
| 247 |
+
}
|
| 248 |
+
return doc.body.innerHTML;
|
| 249 |
+
};
|
| 250 |
+
|
| 251 |
+
const renderMarkdown = (text) => {
|
| 252 |
+
const normalized = normalizeTabularAnswer(text || "");
|
| 253 |
+
if (window.marked?.parse) {
|
| 254 |
+
const html = window.marked.parse(normalized, { gfm: true, breaks: true });
|
| 255 |
+
return sanitizeHtml(html);
|
| 256 |
+
}
|
| 257 |
+
return `<pre>${escapeHtml(normalized)}</pre>`;
|
| 258 |
+
};
|
| 259 |
+
|
| 260 |
+
const stripSourceStatusLines = (text) => {
|
| 261 |
+
if (!text) return "";
|
| 262 |
+
const withoutSourcesSection = text.replace(/\n+\s*(?:#+\s*)?sources\s*:?\s*\n[\s\S]*$/i, "").trim();
|
| 263 |
+
return withoutSourcesSection
|
| 264 |
+
.split("\n")
|
| 265 |
+
.filter((line) => !/^\s*no sources were used for this response\.?\s*$/i.test(line.trim()))
|
| 266 |
+
.filter((line) => !/^\s*no citations available for this turn\.?\s*$/i.test(line.trim()))
|
| 267 |
+
.join("\n")
|
| 268 |
+
.trim();
|
| 269 |
+
};
|
| 270 |
+
|
| 271 |
+
const buildSourcesMarkdown = (sources) => {
|
| 272 |
+
const vectorSources = Array.isArray(sources?.vector) ? sources.vector : [];
|
| 273 |
+
const webSources = Array.isArray(sources?.web) ? sources.web : [];
|
| 274 |
+
const lines = [];
|
| 275 |
+
|
| 276 |
+
if (vectorSources.length) {
|
| 277 |
+
lines.push("### Document Sources");
|
| 278 |
+
for (const src of vectorSources) {
|
| 279 |
+
const doc = src.document || "Unknown document";
|
| 280 |
+
const page = src.page || "unknown";
|
| 281 |
+
const excerpt = src.excerpt || "";
|
| 282 |
+
lines.push(`- **${doc}**, page **${page}**`);
|
| 283 |
+
if (excerpt) {
|
| 284 |
+
lines.push(` - "${excerpt}"`);
|
| 285 |
+
}
|
| 286 |
+
}
|
| 287 |
+
}
|
| 288 |
+
|
| 289 |
+
if (webSources.length) {
|
| 290 |
+
if (lines.length) lines.push("");
|
| 291 |
+
lines.push("### Web Sources");
|
| 292 |
+
for (const src of webSources) {
|
| 293 |
+
const title = src.title || "Untitled source";
|
| 294 |
+
const url = src.url || "";
|
| 295 |
+
if (url && url !== "N/A") {
|
| 296 |
+
lines.push(`- [${title}](${url})`);
|
| 297 |
+
} else {
|
| 298 |
+
lines.push(`- ${title}`);
|
| 299 |
+
}
|
| 300 |
+
}
|
| 301 |
+
}
|
| 302 |
+
|
| 303 |
+
if (lines.length) return lines.join("\n");
|
| 304 |
+
return "_No citations available for this turn._";
|
| 305 |
+
};
|
| 306 |
+
|
| 307 |
+
const sourceStopwords = new Set([
|
| 308 |
+
"the", "and", "for", "with", "from", "that", "this", "what", "who", "how", "are", "was", "were", "is",
|
| 309 |
+
"of", "about", "tell", "more", "please", "can", "you", "your", "according", "resume"
|
| 310 |
+
]);
|
| 311 |
+
|
| 312 |
+
const extractQueryTerms = (queryText) => {
|
| 313 |
+
const raw = (queryText || "").toLowerCase().match(/[a-z0-9_]+/g) || [];
|
| 314 |
+
const deduped = [];
|
| 315 |
+
const seen = new Set();
|
| 316 |
+
for (const term of raw) {
|
| 317 |
+
if (term.length < 3 || sourceStopwords.has(term) || seen.has(term)) continue;
|
| 318 |
+
seen.add(term);
|
| 319 |
+
deduped.push(term);
|
| 320 |
+
}
|
| 321 |
+
return deduped;
|
| 322 |
+
};
|
| 323 |
+
|
| 324 |
+
const escapeRegex = (value) => value.replace(/[.*+?^${}()|[\]\\]/g, "\\$&");
|
| 325 |
+
|
| 326 |
+
const highlightMatches = (text, terms) => {
|
| 327 |
+
const plain = text || "";
|
| 328 |
+
if (!terms.length) return escapeHtml(plain);
|
| 329 |
+
const pattern = new RegExp(`\\b(${terms.map(escapeRegex).join("|")})\\b`, "gi");
|
| 330 |
+
return escapeHtml(plain).replace(pattern, "<mark>$1</mark>");
|
| 331 |
+
};
|
| 332 |
+
|
| 333 |
+
const renderSourcesHtml = (sources, queryText = "") => {
|
| 334 |
+
const vectorSources = Array.isArray(sources?.vector) ? sources.vector : [];
|
| 335 |
+
const webSources = Array.isArray(sources?.web) ? sources.web : [];
|
| 336 |
+
const terms = extractQueryTerms(queryText);
|
| 337 |
+
|
| 338 |
+
const sections = [];
|
| 339 |
+
|
| 340 |
+
if (vectorSources.length) {
|
| 341 |
+
const vectorCards = vectorSources
|
| 342 |
+
.map((src) => {
|
| 343 |
+
const documentId = (src.document_id || "").toString().trim();
|
| 344 |
+
const doc = escapeHtml(src.document || "Unknown document");
|
| 345 |
+
const page = escapeHtml(src.page || "unknown");
|
| 346 |
+
const excerptHtml = highlightMatches(src.excerpt || "", terms);
|
| 347 |
+
const pageNumber = Number.parseInt(src.page || "", 10);
|
| 348 |
+
const pageAnchor = Number.isFinite(pageNumber) && pageNumber > 0 ? `#page=${pageNumber}` : "";
|
| 349 |
+
const pdfUrl = documentId ? `/documents/${encodeURIComponent(documentId)}/pdf${pageAnchor}` : "";
|
| 350 |
+
return `
|
| 351 |
+
<article class="source-card">
|
| 352 |
+
<div class="source-meta">
|
| 353 |
+
<span class="source-doc">${doc}</span>
|
| 354 |
+
<span class="source-page">Page ${page}</span>
|
| 355 |
+
</div>
|
| 356 |
+
<p class="source-excerpt">${excerptHtml || "No excerpt available."}</p>
|
| 357 |
+
${
|
| 358 |
+
pdfUrl
|
| 359 |
+
? `<div class="source-actions"><a class="source-link" href="${pdfUrl}" target="_blank" rel="noopener noreferrer">Open PDF</a></div>`
|
| 360 |
+
: ""
|
| 361 |
+
}
|
| 362 |
+
</article>
|
| 363 |
+
`;
|
| 364 |
+
})
|
| 365 |
+
.join("");
|
| 366 |
+
sections.push(`<section><h4>Document Sources</h4><div class="source-list">${vectorCards}</div></section>`);
|
| 367 |
+
}
|
| 368 |
+
|
| 369 |
+
if (webSources.length) {
|
| 370 |
+
const webItems = webSources
|
| 371 |
+
.map((src) => {
|
| 372 |
+
const title = escapeHtml(src.title || "Untitled source");
|
| 373 |
+
const url = src.url || "";
|
| 374 |
+
if (url && url !== "N/A") {
|
| 375 |
+
return `<li><a href="${escapeHtml(url)}" target="_blank" rel="noopener noreferrer">${title}</a></li>`;
|
| 376 |
+
}
|
| 377 |
+
return `<li>${title}</li>`;
|
| 378 |
+
})
|
| 379 |
+
.join("");
|
| 380 |
+
sections.push(`<section><h4>Web Sources</h4><ul class="source-web-list">${webItems}</ul></section>`);
|
| 381 |
+
}
|
| 382 |
+
|
| 383 |
+
if (!sections.length) {
|
| 384 |
+
return `<p class="muted">No citations available for this turn.</p>`;
|
| 385 |
+
}
|
| 386 |
+
return sections.join("");
|
| 387 |
+
};
|
| 388 |
+
|
| 389 |
+
const renderAssistantResponse = (container, fullAnswerText, sourceDict = null, queryText = "") => {
|
| 390 |
+
if (!container) return;
|
| 391 |
+
const answerContent = stripSourceStatusLines(fullAnswerText) || "Response received.";
|
| 392 |
+
const sourcesContent = renderSourcesHtml(sourceDict, queryText);
|
| 393 |
+
|
| 394 |
+
container.innerHTML = "";
|
| 395 |
+
|
| 396 |
+
const answerPanel = document.createElement("div");
|
| 397 |
+
answerPanel.className = "message-panel";
|
| 398 |
+
answerPanel.innerHTML = renderMarkdown(answerContent);
|
| 399 |
+
|
| 400 |
+
container.appendChild(answerPanel);
|
| 401 |
+
|
| 402 |
+
const details = document.createElement("details");
|
| 403 |
+
details.className = "source-dropdown";
|
| 404 |
+
const summary = document.createElement("summary");
|
| 405 |
+
summary.textContent = "Sources";
|
| 406 |
+
const body = document.createElement("div");
|
| 407 |
+
body.className = "sources-panel";
|
| 408 |
+
body.innerHTML = sourcesContent;
|
| 409 |
+
details.appendChild(summary);
|
| 410 |
+
details.appendChild(body);
|
| 411 |
+
container.appendChild(details);
|
| 412 |
+
};
|
| 413 |
+
|
| 414 |
+
const appendMessage = ({ role, text, markdown = false, pending = false, isError = false }) => {
|
| 415 |
+
if (!chatThread) return null;
|
| 416 |
+
const row = document.createElement("article");
|
| 417 |
+
row.className = `chat-msg ${role}`;
|
| 418 |
+
|
| 419 |
+
const bubble = document.createElement("div");
|
| 420 |
+
bubble.className = `chat-bubble ${role === "user" ? "chat-bubble-user" : "chat-bubble-assistant"}`;
|
| 421 |
+
if (markdown) bubble.classList.add("chat-markdown");
|
| 422 |
+
if (pending) bubble.classList.add("chat-pending");
|
| 423 |
+
if (isError) bubble.classList.add("chat-error");
|
| 424 |
+
|
| 425 |
+
if (markdown) {
|
| 426 |
+
bubble.innerHTML = renderMarkdown(text);
|
| 427 |
+
} else {
|
| 428 |
+
bubble.textContent = text;
|
| 429 |
+
}
|
| 430 |
+
|
| 431 |
+
row.appendChild(bubble);
|
| 432 |
+
chatThread.appendChild(row);
|
| 433 |
+
chatThread.scrollTop = chatThread.scrollHeight;
|
| 434 |
+
return bubble;
|
| 435 |
+
};
|
| 436 |
+
|
| 437 |
+
registerForm?.addEventListener("submit", async (event) => {
|
| 438 |
+
event.preventDefault();
|
| 439 |
+
setBusy(registerForm, true);
|
| 440 |
+
const formData = new FormData(registerForm);
|
| 441 |
+
const response = await fetch("/register", { method: "POST", body: formData });
|
| 442 |
+
const body = await safeJson(response);
|
| 443 |
+
registerResult.textContent = response.ok
|
| 444 |
+
? "Registration successful. Redirecting to your workspace..."
|
| 445 |
+
: prettyError(body);
|
| 446 |
+
registerResult.classList.toggle("error", !response.ok);
|
| 447 |
+
setBusy(registerForm, false);
|
| 448 |
+
if (response.ok) {
|
| 449 |
+
window.location.reload();
|
| 450 |
+
}
|
| 451 |
+
});
|
| 452 |
+
|
| 453 |
+
loginForm?.addEventListener("submit", async (event) => {
|
| 454 |
+
event.preventDefault();
|
| 455 |
+
setBusy(loginForm, true);
|
| 456 |
+
const formData = new FormData(loginForm);
|
| 457 |
+
const response = await fetch("/login", { method: "POST", body: formData });
|
| 458 |
+
const body = await safeJson(response);
|
| 459 |
+
loginResult.textContent = response.ok
|
| 460 |
+
? "Login successful. Redirecting..."
|
| 461 |
+
: prettyError(body);
|
| 462 |
+
loginResult.classList.toggle("error", !response.ok);
|
| 463 |
+
setBusy(loginForm, false);
|
| 464 |
+
if (response.ok) {
|
| 465 |
+
window.location.reload();
|
| 466 |
+
}
|
| 467 |
+
});
|
| 468 |
+
|
| 469 |
+
logoutForm?.addEventListener("submit", async (event) => {
|
| 470 |
+
event.preventDefault();
|
| 471 |
+
const response = await fetch("/logout", { method: "POST" });
|
| 472 |
+
if (response.ok) {
|
| 473 |
+
window.location.reload();
|
| 474 |
+
}
|
| 475 |
+
});
|
| 476 |
+
|
| 477 |
+
uploadForm?.addEventListener("submit", async (event) => {
|
| 478 |
+
event.preventDefault();
|
| 479 |
+
setBusy(uploadForm, true);
|
| 480 |
+
const formData = new FormData();
|
| 481 |
+
const fileInput = document.getElementById("file");
|
| 482 |
+
const files = Array.from(fileInput?.files || []);
|
| 483 |
+
if (!files.length) {
|
| 484 |
+
uploadResult.textContent = "Please choose at least one PDF.";
|
| 485 |
+
uploadResult.classList.add("error");
|
| 486 |
+
setBusy(uploadForm, false);
|
| 487 |
+
return;
|
| 488 |
+
}
|
| 489 |
+
for (const file of files) {
|
| 490 |
+
formData.append("file", file);
|
| 491 |
+
}
|
| 492 |
+
const response = await fetch("/upload", { method: "POST", body: formData });
|
| 493 |
+
const body = await safeJson(response);
|
| 494 |
+
if (response.ok) {
|
| 495 |
+
const createdCount = (body.documents || []).filter((doc) => doc.created).length;
|
| 496 |
+
const reusedCount = (body.documents || []).length - createdCount;
|
| 497 |
+
uploadResult.textContent =
|
| 498 |
+
`Uploaded ${body.count} file(s). ${createdCount} indexed, ${reusedCount} reused.\n` +
|
| 499 |
+
(body.documents || [])
|
| 500 |
+
.map((doc) => `- ${doc.filename} (${doc.page_count} pages)`)
|
| 501 |
+
.join("\n");
|
| 502 |
+
} else {
|
| 503 |
+
uploadResult.textContent = prettyError(body);
|
| 504 |
+
}
|
| 505 |
+
uploadResult.classList.toggle("error", !response.ok);
|
| 506 |
+
setBusy(uploadForm, false);
|
| 507 |
+
if (response.ok) {
|
| 508 |
+
window.location.reload();
|
| 509 |
+
}
|
| 510 |
+
});
|
| 511 |
+
|
| 512 |
+
docDeleteButtons.forEach((button) => {
|
| 513 |
+
button.addEventListener("click", async () => {
|
| 514 |
+
const documentId = button.dataset.documentId;
|
| 515 |
+
const documentName = button.dataset.documentName || "this document";
|
| 516 |
+
if (!documentId) return;
|
| 517 |
+
const confirmed = window.confirm(`Delete ${documentName} from your documents?`);
|
| 518 |
+
if (!confirmed) return;
|
| 519 |
+
|
| 520 |
+
button.disabled = true;
|
| 521 |
+
const response = await fetch(`/documents/${documentId}`, { method: "DELETE" });
|
| 522 |
+
const body = await safeJson(response);
|
| 523 |
+
if (!response.ok) {
|
| 524 |
+
button.disabled = false;
|
| 525 |
+
uploadResult.textContent = prettyError(body);
|
| 526 |
+
uploadResult.classList.add("error");
|
| 527 |
+
return;
|
| 528 |
+
}
|
| 529 |
+
window.location.reload();
|
| 530 |
+
});
|
| 531 |
+
});
|
| 532 |
+
|
| 533 |
+
askForm?.addEventListener("submit", async (event) => {
|
| 534 |
+
event.preventDefault();
|
| 535 |
+
const query = queryInput?.value?.trim() || "";
|
| 536 |
+
if (!query) return;
|
| 537 |
+
|
| 538 |
+
appendMessage({ role: "user", text: query });
|
| 539 |
+
if (queryInput) queryInput.value = "";
|
| 540 |
+
setBusy(askForm, true);
|
| 541 |
+
const pendingBubble = appendMessage({ role: "assistant", text: "Thinking...", markdown: false, pending: true });
|
| 542 |
+
|
| 543 |
+
const response = await fetch("/ask", {
|
| 544 |
+
method: "POST",
|
| 545 |
+
headers: { "Content-Type": "application/json" },
|
| 546 |
+
body: JSON.stringify({ query }),
|
| 547 |
+
});
|
| 548 |
+
const body = await safeJson(response);
|
| 549 |
+
const target = pendingBubble || appendMessage({ role: "assistant", text: "", markdown: false });
|
| 550 |
+
if (!target) {
|
| 551 |
+
setBusy(askForm, false);
|
| 552 |
+
return;
|
| 553 |
+
}
|
| 554 |
+
target.classList.remove("chat-pending");
|
| 555 |
+
|
| 556 |
+
if (response.ok) {
|
| 557 |
+
const answerText = body.answer || "Response received.";
|
| 558 |
+
target.classList.add("chat-markdown");
|
| 559 |
+
renderAssistantResponse(target, answerText, body.sources || null, query);
|
| 560 |
+
} else {
|
| 561 |
+
const message = prettyError(body);
|
| 562 |
+
target.classList.add("chat-error");
|
| 563 |
+
target.textContent = message;
|
| 564 |
+
}
|
| 565 |
+
chatThread.scrollTop = chatThread.scrollHeight;
|
| 566 |
+
setBusy(askForm, false);
|
| 567 |
+
});
|
| 568 |
+
|
| 569 |
+
queryInput?.addEventListener("keydown", (event) => {
|
| 570 |
+
if (event.key === "Enter" && !event.shiftKey) {
|
| 571 |
+
event.preventDefault();
|
| 572 |
+
askForm?.requestSubmit();
|
| 573 |
+
}
|
| 574 |
+
});
|
| 575 |
+
</script>
|
| 576 |
+
</body>
|
| 577 |
+
</html>
|
pyproject.toml
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[project]
|
| 2 |
+
name = "docsqa-langgraph-assignment"
|
| 3 |
+
version = "0.1.0"
|
| 4 |
+
description = "LangGraph-powered document QA assignment app with login, upload deduplication, vector search, and web search."
|
| 5 |
+
readme = "README.md"
|
| 6 |
+
requires-python = ">=3.11"
|
| 7 |
+
dependencies = [
|
| 8 |
+
"fastapi>=0.115.0",
|
| 9 |
+
"jinja2>=3.1.4",
|
| 10 |
+
"langchain-community>=0.3.0",
|
| 11 |
+
"langchain-groq>=0.2.0",
|
| 12 |
+
"langchain-huggingface>=0.1.0",
|
| 13 |
+
"langchain-text-splitters>=0.3.0",
|
| 14 |
+
"langgraph>=0.2.35",
|
| 15 |
+
"passlib[bcrypt]>=1.7.4",
|
| 16 |
+
"pgvector>=0.3.6",
|
| 17 |
+
"psycopg[binary]>=3.2.1",
|
| 18 |
+
"pydantic-settings>=2.5.2",
|
| 19 |
+
"pypdf>=5.0.1",
|
| 20 |
+
"python-jose[cryptography]>=3.3.0",
|
| 21 |
+
"python-multipart>=0.0.9",
|
| 22 |
+
"sqlalchemy>=2.0.35",
|
| 23 |
+
"sentence-transformers>=3.0.1",
|
| 24 |
+
"uvicorn[standard]>=0.30.6",
|
| 25 |
+
"email-validator>=2.2.0",
|
| 26 |
+
"tavily-python==0.7.23",
|
| 27 |
+
"supabase>=2.7.4",
|
| 28 |
+
]
|
| 29 |
+
|
| 30 |
+
[build-system]
|
| 31 |
+
requires = ["setuptools>=68.0"]
|
| 32 |
+
build-backend = "setuptools.build_meta"
|
| 33 |
+
|
| 34 |
+
[tool.setuptools.packages.find]
|
| 35 |
+
include = ["app*"]
|