Spaces:
Sleeping
Sleeping
Fix RAG context injection + updated retrieval logic
Browse files- main_api.py +0 -4
main_api.py
CHANGED
|
@@ -242,10 +242,6 @@ def chat(req: ChatRequest):
|
|
| 242 |
system_content = SYSTEM_PROMPT
|
| 243 |
if trimmed_context:
|
| 244 |
system_content += "\n\n===== RETRIEVED CONTEXT =====\n" + trimmed_context
|
| 245 |
-
|
| 246 |
-
# Always include recent conversation (to maintain chat flow)
|
| 247 |
-
system_content += "\n\n===== RECENT CHAT =====\n" + recent_chat
|
| 248 |
-
|
| 249 |
# build prompt messages as list of simple dicts (call_llm expects same message format as in chatbot_graph)
|
| 250 |
# chatbot_graph.call_llm expects langchain messages (SystemMessage/HumanMessage) — we built that in original file.
|
| 251 |
# create messages as minimal objects that call_llm can accept (we rely on original call_llm).
|
|
|
|
| 242 |
system_content = SYSTEM_PROMPT
|
| 243 |
if trimmed_context:
|
| 244 |
system_content += "\n\n===== RETRIEVED CONTEXT =====\n" + trimmed_context
|
|
|
|
|
|
|
|
|
|
|
|
|
| 245 |
# build prompt messages as list of simple dicts (call_llm expects same message format as in chatbot_graph)
|
| 246 |
# chatbot_graph.call_llm expects langchain messages (SystemMessage/HumanMessage) — we built that in original file.
|
| 247 |
# create messages as minimal objects that call_llm can accept (we rely on original call_llm).
|