# from retriever.vectordb import search_documents | |
from retriever.vectordb_rerank import search_documents | |
from generator.prompt_builder import build_prompt | |
from generator.llm_inference import generate_answer | |
def rag_pipeline(query: str, top_k: int = 5) -> str: | |
""" | |
1. ์ฌ์ฉ์ ์ง๋ฌธ์ผ๋ก ๊ด๋ จ ๋ฌธ์๋ฅผ ๊ฒ์ | |
2. ๊ฒ์๋ ๋ฌธ์์ ํจ๊ป ํ๋กฌํํธ ๊ตฌ์ฑ | |
3. ํ๋กฌํํธ๋ก๋ถํฐ ๋ต๋ณ ์์ฑ | |
""" | |
# 1. ๊ฒ์ | |
context_docs = search_documents(query, top_k=top_k) | |
print("context_docs", context_docs) | |
# 2. ํ๋กฌํํธ ์กฐ๋ฆฝ | |
prompt = build_prompt(query, context_docs) | |
print("prompt", prompt) | |
# 3. ๋ชจ๋ธ ์ถ๋ก | |
output = generate_answer(prompt) | |
print("output", output) | |
return output | |