|
|
from langchain_core.messages import AIMessage
|
|
|
from data import debug_print,llm1
|
|
|
from nodes.intent import get_pretty_state_string,CreditCardState
|
|
|
from langchain_core.messages import AIMessage, SystemMessage, HumanMessage
|
|
|
|
|
|
|
|
|
|
|
|
async def agent_node(state: CreditCardState):
|
|
|
debug_print("NODE", f"Entered agent_node with state:\n{get_pretty_state_string(state)}\n")
|
|
|
|
|
|
full_query = state["raw_query"]
|
|
|
selected_preferences = f"\nUser Preferences: {state['preferences']}\n" if state["preferences"] else ""
|
|
|
|
|
|
ranked_cards_raw = state.get("ranked_cards", [])
|
|
|
formatted_cards = ""
|
|
|
if isinstance(ranked_cards_raw, list):
|
|
|
for idx, card in enumerate(ranked_cards_raw, 1):
|
|
|
name = card.get("name", "Unnamed Card")
|
|
|
desc = card.get("description", "")
|
|
|
formatted_cards += f"Card {idx}: {name}; {desc}\n"
|
|
|
else:
|
|
|
formatted_cards = str(ranked_cards_raw)
|
|
|
ranked_cards = formatted_cards.strip()
|
|
|
|
|
|
input_message = f"""## User Query: {full_query}
|
|
|
{selected_preferences}
|
|
|
Ranked Cards: {ranked_cards}
|
|
|
|
|
|
### Instructions:
|
|
|
You are given a user query and a list of ranked cards.
|
|
|
1. First, assess whether the provided card list is sufficient and relevant to confidently answer the user's query.
|
|
|
- If the list is not relevant or lacks information, state that more information is needed.
|
|
|
2. If the list is relevant, follow these strict rules to select and explain the best one:
|
|
|
1. Analyze the user's need from the given query.
|
|
|
2. Select the best card based only on the details in the descriptions.
|
|
|
3. Explain why it's the best choice (mention only what's explicitly written).
|
|
|
4. Do not assume any benefit that is not stated.
|
|
|
5. Use simple, structured output with no symbols like * or #.
|
|
|
6. If the user asks for FD-based cards or is a beginner, assume all given cards are FD-based and choose the best.
|
|
|
"""
|
|
|
|
|
|
messages_for_llm_input = []
|
|
|
messages_for_llm_input.append(SystemMessage(content="You are a credit card recommendation agent."))
|
|
|
|
|
|
|
|
|
messages_for_llm_input.append(HumanMessage(content=input_message))
|
|
|
|
|
|
try:
|
|
|
debug_print("AGENT", "Starting vLLM agent generation via ChatOpenAI...")
|
|
|
|
|
|
response_obj = await llm1.ainvoke(
|
|
|
messages_for_llm_input,
|
|
|
config={
|
|
|
"max_tokens": 512,
|
|
|
"temperature": 0.7,
|
|
|
"top_p": 0.9,
|
|
|
}
|
|
|
)
|
|
|
response_text = response_obj.content
|
|
|
|
|
|
debug_print("AGENT", f"Decoded Response (truncated):\n{response_text[:500]}...")
|
|
|
|
|
|
except Exception as e:
|
|
|
error_str = str(e)
|
|
|
debug_print("ERROR", f"Error during generation in agent_node: {error_str}")
|
|
|
return {
|
|
|
"messages": [
|
|
|
AIMessage(
|
|
|
content=f"Oops! An error occurred during AI generation: {error_str}",
|
|
|
additional_kwargs={"error": True}
|
|
|
)
|
|
|
]
|
|
|
}
|
|
|
|
|
|
return {"messages": [AIMessage(content=response_text)]} |