from fastapi import FastAPI
from pydantic import BaseModel
from typing import List
from langchain_community.vectorstores import Chroma
from langchain_community.embeddings import HuggingFaceEmbeddings
from .config import config

app = FastAPI(title="Simple RAG Service", version="0.1.0")


class QueryRequest(BaseModel):
    query: str
    top_k: int | None = None


class RetrievedDoc(BaseModel):
    source: str
    content: str


_embeddings = None
_vector_store = None


def _load_embeddings():
    global _embeddings
    if _embeddings is None:
        _embeddings = HuggingFaceEmbeddings(model_name=config.embedding_model, encode_kwargs={"batch_size": config.batch_size})
    return _embeddings


def _load_vector_store():
    global _vector_store
    if _vector_store is None:
        _vector_store = Chroma(collection_name=config.collection_name, embedding_function=_load_embeddings(
        ), persist_directory=str(config.persist_dir))
    return _vector_store


## We no longer construct an LLM or RetrievalQA chain; we directly return retrieved docs.
@app.post("/query", response_model=List[RetrievedDoc])
async def query(req: QueryRequest):
    vs = _load_vector_store()
    k = req.top_k or config.top_k
    retriever = vs.as_retriever(search_kwargs={"k": k})
    docs = retriever.get_relevant_documents(req.query)
    results: List[RetrievedDoc] = []
    for d in docs:
        meta = d.metadata or {}
        results.append(RetrievedDoc(source=meta.get("source", "unknown"), content=d.page_content))
    return results


@app.get("/health")
async def health():
    return {"status": "ok"}

# To run: uvicorn rag.service:app --reload
