Spaces:
Running
Running
add tool call from fe
Browse files
main.py
CHANGED
@@ -363,6 +363,10 @@ class FollowupQueryModel(BaseModel):
|
|
363 |
)
|
364 |
conversation_id: str = Field(default_factory=lambda: str(uuid4()), description="Unique identifier for the conversation")
|
365 |
user_id: str = Field(..., description="Unique identifier for the user")
|
|
|
|
|
|
|
|
|
366 |
|
367 |
class Config:
|
368 |
schema_extra = {
|
@@ -370,7 +374,8 @@ class FollowupQueryModel(BaseModel):
|
|
370 |
"query": "How can I improve my productivity?",
|
371 |
"model_id": "openai/gpt-4o-mini",
|
372 |
"conversation_id": "123e4567-e89b-12d3-a456-426614174000",
|
373 |
-
"user_id": "user123"
|
|
|
374 |
}
|
375 |
}
|
376 |
|
@@ -519,33 +524,45 @@ def followup_agent(query: FollowupQueryModel, background_tasks: BackgroundTasks,
|
|
519 |
|
520 |
def process_response():
|
521 |
full_response = ""
|
522 |
-
for content in chat_with_llama_stream(limited_conversation, model=query.model_id):
|
523 |
-
yield content
|
524 |
-
full_response += content
|
525 |
-
|
526 |
-
logger.info(f"LLM RAW response for query: {query.query}: {full_response}")
|
527 |
-
response_content, interact, tools = parse_followup_and_tools(full_response)
|
528 |
-
|
529 |
-
result = {
|
530 |
-
"clarification": interact,
|
531 |
-
"tools": tools
|
532 |
-
}
|
533 |
-
|
534 |
-
yield "<json>"+ json.dumps(result)+"</json>"
|
535 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
536 |
|
537 |
-
|
538 |
-
|
539 |
-
|
540 |
-
|
541 |
-
|
542 |
-
|
543 |
-
|
544 |
-
|
545 |
-
|
546 |
-
|
547 |
-
|
548 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
549 |
|
550 |
# Add the assistant's response to the conversation history
|
551 |
conversations[query.conversation_id].append({"role": "assistant", "content": full_response})
|
|
|
363 |
)
|
364 |
conversation_id: str = Field(default_factory=lambda: str(uuid4()), description="Unique identifier for the conversation")
|
365 |
user_id: str = Field(..., description="Unique identifier for the user")
|
366 |
+
tool_call: Literal["web", "news", "auto"] = Field(
|
367 |
+
default="auto",
|
368 |
+
description="Type of tool to call (web, news, auto)"
|
369 |
+
)
|
370 |
|
371 |
class Config:
|
372 |
schema_extra = {
|
|
|
374 |
"query": "How can I improve my productivity?",
|
375 |
"model_id": "openai/gpt-4o-mini",
|
376 |
"conversation_id": "123e4567-e89b-12d3-a456-426614174000",
|
377 |
+
"user_id": "user123",
|
378 |
+
"tool_call": "auto"
|
379 |
}
|
380 |
}
|
381 |
|
|
|
524 |
|
525 |
def process_response():
|
526 |
full_response = ""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
527 |
|
528 |
+
# Check if tool_call is specified and call the tool directly
|
529 |
+
if query.tool_call in ["web", "news"]:
|
530 |
+
search_query = query.query
|
531 |
+
search_response = search_assistant_api(search_query, query.tool_call, model=query.model_id)
|
532 |
+
|
533 |
+
yield "<report>"
|
534 |
+
for content in search_response():
|
535 |
+
yield content
|
536 |
+
full_response += content
|
537 |
+
yield "</report>"
|
538 |
+
else:
|
539 |
+
for content in chat_with_llama_stream(limited_conversation, model=query.model_id):
|
540 |
+
yield content
|
541 |
+
full_response += content
|
542 |
|
543 |
+
logger.info(f"LLM RAW response for query: {query.query}: {full_response}")
|
544 |
+
response_content, interact, tools = parse_followup_and_tools(full_response)
|
545 |
+
|
546 |
+
result = {
|
547 |
+
"clarification": interact,
|
548 |
+
"tools": tools
|
549 |
+
}
|
550 |
+
|
551 |
+
yield "<json>"+ json.dumps(result)+"</json>"
|
552 |
+
|
553 |
+
|
554 |
+
# Process tool if present
|
555 |
+
if tools and len(tools) > 0:
|
556 |
+
tool = tools[0] # Assume only one tool is present
|
557 |
+
if tool["name"] in ["news", "web"]:
|
558 |
+
search_query = tool["input"]
|
559 |
+
search_response = search_assistant_api(search_query, tool["name"], model=query.model_id)
|
560 |
+
|
561 |
+
yield "<report>"
|
562 |
+
for content in search_response():
|
563 |
+
yield content
|
564 |
+
full_response += content
|
565 |
+
yield "</report>"
|
566 |
|
567 |
# Add the assistant's response to the conversation history
|
568 |
conversations[query.conversation_id].append({"role": "assistant", "content": full_response})
|