from build_database import get_embedding, client, collection
import requests


def search_similar(query: str, top_k, embedding_url=None, model_name=None):
    query_emb = get_embedding(query, embedding_url, model_name)
    results = collection.query(query_embeddings=[query_emb], n_results=top_k)
    docs = results["documents"][0]
    metas = results["metadatas"][0]
    return [{"file": m["file"], "content": d} for d, m in zip(docs, metas)]


def ask_rag(query: str, top_k=5):
    results = search_similar(query, top_k)
    context = "\n\n".join([f"[{r['file']}] {r['content']}" for r in results])

    prompt = f"""你是一名专业的中国法律助手。以下是与问题相关的法律条文：
{context}

请根据这些条文回答下列问题：
{query}
"""

    response = requests.post(
        "http://0.0.0.0:10003/v1/chat/completions",
        json={
            "model": "chat",
            "messages": [
                {"role": "system", "content": "你是一个严谨的法律助理。"},
                {"role": "user", "content": prompt},
            ],
        },
    )
    data = response.json()
    print(f"prompt: {prompt}")
    return data["choices"][0]["message"]["content"]


if __name__ == "__main__":
    # 如果数据库为空，则构建
    assert collection.count() != 0

    query = "黑土地保护法中提到了哪些财政投入的内容？"
    answer = ask_rag(query)
    print("🧾 答案：", answer)
