Spaces:
Running
Running
SearchGPT: Enhance. #2
Browse files- config.py +159 -19
- src/engine/browser_engine.py +2 -2
- src/processor/message_processor.py +10 -350
- src/processor/reasoning/__init__.py +12 -0
- src/processor/reasoning/interface.py +9 -0
- src/processor/reasoning/tool_reasoning.py +38 -0
- src/processor/response/__init__.py +14 -0
- src/processor/response/formatter.py +26 -0
- src/processor/response/generator.py +50 -0
- src/processor/response/setup.py +22 -0
- src/processor/tools/__init__.py +14 -0
- src/processor/tools/executor.py +16 -0
- src/processor/tools/interaction.py +121 -0
- src/processor/tools/parser.py +17 -0
config.py
CHANGED
|
@@ -13,7 +13,7 @@ BAIDU_ENDPOINT = "https://www.baidu.com/s"
|
|
| 13 |
READER_ENDPOINT = "https://r.jina.ai/"
|
| 14 |
REQUEST_TIMEOUT = 300 # 5 minute
|
| 15 |
|
| 16 |
-
|
| 17 |
You are ChatGPT, an AI assistant with mandatory real-time web search, URL content extraction, knowledge validation, and professional summarization capabilities.
|
| 18 |
|
| 19 |
Your absolute rule:
|
|
@@ -70,6 +70,9 @@ Execution Workflow:
|
|
| 70 |
Critical Instruction:
|
| 71 |
- Every new query or request must trigger a `web_search`.
|
| 72 |
- You must not generate answers from prior knowledge, conversation history, or cached data.
|
|
|
|
|
|
|
|
|
|
| 73 |
- If tools fail, you must state explicitly that no valid data could be retrieved.
|
| 74 |
"""
|
| 75 |
|
|
@@ -81,7 +84,7 @@ CONTENT_EXTRACTION = """
|
|
| 81 |
- Evaluate credibility of sources, highlight potential biases or conflicts
|
| 82 |
- Produce a structured, professional, and comprehensive summary
|
| 83 |
- Emphasize clarity, accuracy, and logical flow
|
| 84 |
-
- Include all discovered URLs in the final summary as [Source
|
| 85 |
- Mark any uncertainties, contradictions, or missing information clearly
|
| 86 |
</system>
|
| 87 |
"""
|
|
@@ -98,24 +101,143 @@ SEARCH_SELECTION = """
|
|
| 98 |
</system>
|
| 99 |
"""
|
| 100 |
|
| 101 |
-
|
| 102 |
-
|
| 103 |
-
<br><br>
|
| 104 |
-
This Space implements an agent-based system with <b><a href="https://www.gradio.app" target="_blank">Gradio</a></b>. It is integrated with
|
| 105 |
-
<b><a href="https://docs.searxng.org" target="_blank">SearXNG</a></b>, which is then converted into a script tool or function for native execution.
|
| 106 |
-
<br><br>
|
| 107 |
-
The agent mode is inspired by the <b><a href="https://openwebui.com/t/hadad/deep_research" target="_blank">Deep Research</a></b> from
|
| 108 |
-
<b><a href="https://docs.openwebui.com" target="_blank">OpenWebUI</a></b> tools script.
|
| 109 |
-
<br><br>
|
| 110 |
-
The <b>Deep Research</b> feature is also available on the primary Spaces of <b><a href="https://umint-openwebui.hf.space"
|
| 111 |
-
target="_blank">UltimaX Intelligence</a></b>.
|
| 112 |
-
<br><br>
|
| 113 |
-
Please consider reading the <b><a href="https://huggingface.co/spaces/umint/ai/discussions/37#68b55209c51ca52ed299db4c"
|
| 114 |
-
target="_blank">Terms of Use and Consequences of Violation</a></b> if you wish to proceed to the main Spaces.
|
| 115 |
-
<br><br>
|
| 116 |
-
<b>Like this project? Feel free to buy me a <a href="https://ko-fi.com/hadad" target="_blank">coffee</a></b>.
|
| 117 |
"""
|
| 118 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 119 |
OS = [
|
| 120 |
"Windows NT 10.0; Win64; x64",
|
| 121 |
"Macintosh; Intel Mac OS X 10_15_7",
|
|
@@ -305,4 +427,22 @@ TIMEZONES = [
|
|
| 305 |
"Africa/Nairobi",
|
| 306 |
"Pacific/Auckland",
|
| 307 |
"Pacific/Honolulu"
|
| 308 |
-
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 13 |
READER_ENDPOINT = "https://r.jina.ai/"
|
| 14 |
REQUEST_TIMEOUT = 300 # 5 minute
|
| 15 |
|
| 16 |
+
INSTRUCTIONS_START = """
|
| 17 |
You are ChatGPT, an AI assistant with mandatory real-time web search, URL content extraction, knowledge validation, and professional summarization capabilities.
|
| 18 |
|
| 19 |
Your absolute rule:
|
|
|
|
| 70 |
Critical Instruction:
|
| 71 |
- Every new query or request must trigger a `web_search`.
|
| 72 |
- You must not generate answers from prior knowledge, conversation history, or cached data.
|
| 73 |
+
- Always use Markdown format for URL sources with [Source Title](URL).
|
| 74 |
+
- Replace "Source Title" with the original name of the source.
|
| 75 |
+
- Replace "URL" with the original source link.
|
| 76 |
- If tools fail, you must state explicitly that no valid data could be retrieved.
|
| 77 |
"""
|
| 78 |
|
|
|
|
| 84 |
- Evaluate credibility of sources, highlight potential biases or conflicts
|
| 85 |
- Produce a structured, professional, and comprehensive summary
|
| 86 |
- Emphasize clarity, accuracy, and logical flow
|
| 87 |
+
- Include all discovered URLs in the final summary as [Source Title](URL)
|
| 88 |
- Mark any uncertainties, contradictions, or missing information clearly
|
| 89 |
</system>
|
| 90 |
"""
|
|
|
|
| 101 |
</system>
|
| 102 |
"""
|
| 103 |
|
| 104 |
+
INSTRUCTIONS_END = """
|
| 105 |
+
You have just executed tools and obtained results. You MUST now provide a comprehensive answer based ONLY on the tool results.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 106 |
"""
|
| 107 |
|
| 108 |
+
REASONING_STEPS = {
|
| 109 |
+
"web_search": {
|
| 110 |
+
"parsing": (
|
| 111 |
+
"I need to search for information about: {query}<br><br>"
|
| 112 |
+
"I'm analyzing the user's request and preparing to execute a web search. "
|
| 113 |
+
"The query I've identified is comprehensive and should yield relevant results. "
|
| 114 |
+
"I will use the {engine} search engine for this task as it provides reliable and up-to-date information.<br><br>"
|
| 115 |
+
"I'm now parsing the search parameters to ensure they are correctly formatted. "
|
| 116 |
+
"The search query has been validated and I'm checking that all required fields are present. "
|
| 117 |
+
"I need to make sure the search engine parameter is valid and supported by our system.<br><br>"
|
| 118 |
+
"I'm preparing the search request with the following configuration:<br>"
|
| 119 |
+
"- Search Query: {query}<br>"
|
| 120 |
+
"- Search Engine: {engine}<br><br>"
|
| 121 |
+
"I'm verifying that the network connection is stable and that the search service is accessible. "
|
| 122 |
+
"All preliminary checks have been completed successfully."
|
| 123 |
+
),
|
| 124 |
+
"executing": (
|
| 125 |
+
"I'm now executing the web search for: {query}<br><br>"
|
| 126 |
+
"I'm connecting to the {engine} search service and sending the search request. "
|
| 127 |
+
"The connection has been established successfully and I'm waiting for the search results. "
|
| 128 |
+
"I'm processing multiple search result pages to gather comprehensive information.<br><br>"
|
| 129 |
+
"I'm analyzing the search results to identify the most relevant and authoritative sources. "
|
| 130 |
+
"The search engine is returning results and I'm filtering them based on relevance scores. "
|
| 131 |
+
"I'm extracting key information from each search result including titles, snippets, and URLs.<br><br>"
|
| 132 |
+
"I'm organizing the search results in order of relevance and checking for duplicate content. "
|
| 133 |
+
"The search process is progressing smoothly and I'm collecting valuable information. "
|
| 134 |
+
"I'm also verifying the credibility of the sources to ensure high-quality information.<br><br>"
|
| 135 |
+
"Current status: Processing search results...<br>"
|
| 136 |
+
"Results found: Multiple relevant sources identified<br>"
|
| 137 |
+
"Quality assessment: High relevance detected"
|
| 138 |
+
),
|
| 139 |
+
"completed": (
|
| 140 |
+
"I have successfully completed the web search for: {query}<br><br>"
|
| 141 |
+
"I've retrieved comprehensive search results from {engine} and analyzed all the information. "
|
| 142 |
+
"The search yielded multiple relevant results that directly address the user's query. "
|
| 143 |
+
"I've extracted the most important information and organized it for processing.<br><br>"
|
| 144 |
+
"I've identified several high-quality sources with authoritative information. "
|
| 145 |
+
"The search results include recent and up-to-date content that is highly relevant. "
|
| 146 |
+
"I've filtered out any duplicate or low-quality results to ensure accuracy.<br><br>"
|
| 147 |
+
"I'm now processing the collected information to formulate a comprehensive response. "
|
| 148 |
+
"The search results provide sufficient detail to answer the user's question thoroughly. "
|
| 149 |
+
"I've verified the credibility of the sources and cross-referenced the information.<br><br>"
|
| 150 |
+
"Search Summary:<br>"
|
| 151 |
+
"- Total results processed: Multiple pages<br>"
|
| 152 |
+
"- Relevance score: High<br>"
|
| 153 |
+
"- Information quality: Verified and accurate<br>"
|
| 154 |
+
"- Sources: Authoritative and recent<br><br>"
|
| 155 |
+
"Preview of results:<br>{preview}"
|
| 156 |
+
),
|
| 157 |
+
"error": (
|
| 158 |
+
"I encountered an issue while attempting to search for: {query}<br><br>"
|
| 159 |
+
"I tried to execute the web search but encountered an unexpected error. "
|
| 160 |
+
"The error occurred during the search process and I need to handle it appropriately. "
|
| 161 |
+
"I'm analyzing the error to understand what went wrong and how to proceed.<br><br>"
|
| 162 |
+
"Error details: {error}<br><br>"
|
| 163 |
+
"I'm attempting to diagnose the issue and considering alternative approaches. "
|
| 164 |
+
"The error might be due to network connectivity, service availability, or parameter issues. "
|
| 165 |
+
"I will try to recover from this error and provide the best possible response.<br><br>"
|
| 166 |
+
"I'm evaluating whether I can retry the search with modified parameters. "
|
| 167 |
+
"If the search cannot be completed, I will use my existing knowledge to help the user. "
|
| 168 |
+
"I'm committed to providing valuable assistance despite this technical challenge."
|
| 169 |
+
)
|
| 170 |
+
},
|
| 171 |
+
"read_url": {
|
| 172 |
+
"parsing": (
|
| 173 |
+
"I need to read and extract content from the URL: {url}<br><br>"
|
| 174 |
+
"I'm analyzing the URL structure to ensure it's valid and accessible. "
|
| 175 |
+
"The URL appears to be properly formatted and I'm preparing to fetch its content. "
|
| 176 |
+
"I will extract the main content from this webpage to gather detailed information.<br><br>"
|
| 177 |
+
"I'm validating the URL protocol and checking if it uses HTTP or HTTPS. "
|
| 178 |
+
"The domain seems legitimate and I'm preparing the request headers. "
|
| 179 |
+
"I need to ensure that the website allows automated content extraction.<br><br>"
|
| 180 |
+
"I'm configuring the content extraction parameters:<br>"
|
| 181 |
+
"- Target URL: {url}<br>"
|
| 182 |
+
"- Extraction Method: Full content parsing<br>"
|
| 183 |
+
"- Content Type: HTML/Text<br>"
|
| 184 |
+
"- Encoding: Auto-detect<br><br>"
|
| 185 |
+
"I'm checking if the website requires any special handling or authentication. "
|
| 186 |
+
"All preliminary validation checks have been completed successfully."
|
| 187 |
+
),
|
| 188 |
+
"executing": (
|
| 189 |
+
"I'm now accessing the URL: {url}<br><br>"
|
| 190 |
+
"I'm establishing a connection to the web server and sending the HTTP request. "
|
| 191 |
+
"The connection is being established and I'm waiting for the server response. "
|
| 192 |
+
"I'm following any redirects if necessary to reach the final destination.<br><br>"
|
| 193 |
+
"I'm downloading the webpage content and checking the response status code. "
|
| 194 |
+
"The server is responding and I'm receiving the HTML content. "
|
| 195 |
+
"I'm monitoring the download progress and ensuring data integrity.<br><br>"
|
| 196 |
+
"I'm parsing the HTML structure to extract the main content. "
|
| 197 |
+
"I'm identifying and removing navigation elements, advertisements, and other non-content sections. "
|
| 198 |
+
"I'm focusing on extracting the primary article or information content.<br><br>"
|
| 199 |
+
"Current status: Extracting content...<br>"
|
| 200 |
+
"Response received: Processing HTML<br>"
|
| 201 |
+
"Content extraction: In progress"
|
| 202 |
+
),
|
| 203 |
+
"completed": (
|
| 204 |
+
"I have successfully extracted content from: {url}<br><br>"
|
| 205 |
+
"I've retrieved the complete webpage content and processed it thoroughly. "
|
| 206 |
+
"The extraction was successful and I've obtained the main textual content. "
|
| 207 |
+
"I've cleaned the content by removing unnecessary HTML tags and formatting.<br><br>"
|
| 208 |
+
"I've identified the main article or information section of the webpage. "
|
| 209 |
+
"The content has been properly parsed and structured for analysis. "
|
| 210 |
+
"I've preserved important information while filtering out irrelevant elements.<br><br>"
|
| 211 |
+
"I'm now analyzing the extracted content to understand its context and relevance. "
|
| 212 |
+
"The information appears to be comprehensive and directly related to the topic. "
|
| 213 |
+
"I've verified that the content is complete and hasn't been truncated.<br><br>"
|
| 214 |
+
"Extraction Summary:<br>"
|
| 215 |
+
"- Content length: Substantial<br>"
|
| 216 |
+
"- Extraction quality: High<br>"
|
| 217 |
+
"- Content type: Article/Information<br>"
|
| 218 |
+
"- Processing status: Complete<br><br>"
|
| 219 |
+
"Preview of extracted content:<br>{preview}"
|
| 220 |
+
),
|
| 221 |
+
"error": (
|
| 222 |
+
"I encountered an issue while trying to access: {url}<br><br>"
|
| 223 |
+
"I attempted to fetch the webpage content but encountered an error. "
|
| 224 |
+
"The error prevented me from successfully extracting the information. "
|
| 225 |
+
"I'm analyzing the error to understand the cause and find a solution.<br><br>"
|
| 226 |
+
"Error details: {error}<br><br>"
|
| 227 |
+
"I'm considering possible causes such as network issues, access restrictions, or invalid URLs. "
|
| 228 |
+
"The website might be blocking automated access or the URL might be incorrect. "
|
| 229 |
+
"I will try to work around this limitation and provide alternative assistance.<br><br>"
|
| 230 |
+
"I'm evaluating whether I can access the content through alternative methods. "
|
| 231 |
+
"If direct access isn't possible, I'll use my knowledge to help with the query. "
|
| 232 |
+
"I remain committed to providing useful information despite this obstacle."
|
| 233 |
+
)
|
| 234 |
+
}
|
| 235 |
+
}
|
| 236 |
+
|
| 237 |
+
REASONING_DEFAULT = "I'm processing the tool execution request..."
|
| 238 |
+
|
| 239 |
+
REASONING_DELAY = 0.15 # 150 ms
|
| 240 |
+
|
| 241 |
OS = [
|
| 242 |
"Windows NT 10.0; Win64; x64",
|
| 243 |
"Macintosh; Intel Mac OS X 10_15_7",
|
|
|
|
| 427 |
"Africa/Nairobi",
|
| 428 |
"Pacific/Auckland",
|
| 429 |
"Pacific/Honolulu"
|
| 430 |
+
]
|
| 431 |
+
|
| 432 |
+
DESCRIPTION = """
|
| 433 |
+
<b>SearchGPT</b> is <b>ChatGPT</b> with real-time web search capabilities and the ability to read content directly from a URL.
|
| 434 |
+
<br><br>
|
| 435 |
+
This Space implements an agent-based system with <b><a href="https://www.gradio.app" target="_blank">Gradio</a></b>. It is integrated with
|
| 436 |
+
<b><a href="https://docs.searxng.org" target="_blank">SearXNG</a></b>, which is then converted into a script tool or function for native execution.
|
| 437 |
+
<br><br>
|
| 438 |
+
The agent mode is inspired by the <b><a href="https://openwebui.com/t/hadad/deep_research" target="_blank">Deep Research</a></b> from
|
| 439 |
+
<b><a href="https://docs.openwebui.com" target="_blank">OpenWebUI</a></b> tools script.
|
| 440 |
+
<br><br>
|
| 441 |
+
The <b>Deep Research</b> feature is also available on the primary Spaces of <b><a href="https://umint-openwebui.hf.space"
|
| 442 |
+
target="_blank">UltimaX Intelligence</a></b>.
|
| 443 |
+
<br><br>
|
| 444 |
+
Please consider reading the <b><a href="https://huggingface.co/spaces/umint/ai/discussions/37#68b55209c51ca52ed299db4c"
|
| 445 |
+
target="_blank">Terms of Use and Consequences of Violation</a></b> if you wish to proceed to the main Spaces.
|
| 446 |
+
<br><br>
|
| 447 |
+
<b>Like this project? Feel free to buy me a <a href="https://ko-fi.com/hadad" target="_blank">coffee</a></b>.
|
| 448 |
+
""" # Gradio
|
src/engine/browser_engine.py
CHANGED
|
@@ -59,7 +59,7 @@ class BrowserEngine:
|
|
| 59 |
)
|
| 60 |
request_response.raise_for_status()
|
| 61 |
extracted_content = request_response.text
|
| 62 |
-
return f"{extracted_content}{CONTENT_EXTRACTION}"
|
| 63 |
except Exception as error:
|
| 64 |
return f"Error reading URL: {str(error)}"
|
| 65 |
|
|
@@ -83,6 +83,6 @@ class BrowserEngine:
|
|
| 83 |
)
|
| 84 |
search_response.raise_for_status()
|
| 85 |
search_results = search_response.text
|
| 86 |
-
return f"{search_results}{SEARCH_SELECTION}"
|
| 87 |
except Exception as error:
|
| 88 |
return f"Error during search: {str(error)}"
|
|
|
|
| 59 |
)
|
| 60 |
request_response.raise_for_status()
|
| 61 |
extracted_content = request_response.text
|
| 62 |
+
return f"{extracted_content}\n\n\n{CONTENT_EXTRACTION}"
|
| 63 |
except Exception as error:
|
| 64 |
return f"Error reading URL: {str(error)}"
|
| 65 |
|
|
|
|
| 83 |
)
|
| 84 |
search_response.raise_for_status()
|
| 85 |
search_results = search_response.text
|
| 86 |
+
return f"{search_results}\n\n\n{SEARCH_SELECTION}"
|
| 87 |
except Exception as error:
|
| 88 |
return f"Error during search: {str(error)}"
|
src/processor/message_processor.py
CHANGED
|
@@ -3,359 +3,15 @@
|
|
| 3 |
# SPDX-License-Identifier: Apache-2.0
|
| 4 |
#
|
| 5 |
|
| 6 |
-
import json
|
| 7 |
import traceback
|
| 8 |
-
import
|
| 9 |
-
from openai import OpenAI
|
| 10 |
-
from config import MODEL, INSTRUCTIONS
|
| 11 |
from src.core.web_configuration import WebConfiguration
|
| 12 |
from src.engine.browser_engine import BrowserEngine
|
| 13 |
from src.tools.tool_manager import construct_tool_definitions
|
| 14 |
from src.client.openai_client import initialize_client
|
| 15 |
-
from
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
history = []
|
| 19 |
-
|
| 20 |
-
if system_instruction:
|
| 21 |
-
history.append({"role": "system", "content": system_instruction})
|
| 22 |
-
|
| 23 |
-
if isinstance(conversation_history, list):
|
| 24 |
-
for history_item in conversation_history:
|
| 25 |
-
message_role = history_item.get("role")
|
| 26 |
-
message_content = history_item.get("content")
|
| 27 |
-
if message_role in ("user", "assistant") and isinstance(message_content, str):
|
| 28 |
-
history.append({"role": message_role, "content": message_content})
|
| 29 |
-
|
| 30 |
-
if isinstance(user_input, str) and user_input.strip():
|
| 31 |
-
history.append({"role": "user", "content": user_input})
|
| 32 |
-
|
| 33 |
-
return history
|
| 34 |
-
|
| 35 |
-
def extract_tool_parameters(raw_parameters, fallback_engine="google"):
|
| 36 |
-
try:
|
| 37 |
-
parsed_params = json.loads(raw_parameters or "{}")
|
| 38 |
-
if "engine" in parsed_params and parsed_params["engine"] not in ["google", "bing", "baidu"]:
|
| 39 |
-
parsed_params["engine"] = fallback_engine
|
| 40 |
-
if "engine" not in parsed_params:
|
| 41 |
-
parsed_params["engine"] = fallback_engine
|
| 42 |
-
return parsed_params, None
|
| 43 |
-
except Exception as parse_error:
|
| 44 |
-
return None, f"Invalid tool arguments: {str(parse_error)}"
|
| 45 |
-
|
| 46 |
-
def assistant_response(response_message):
|
| 47 |
-
extracted_tool_calls = []
|
| 48 |
-
|
| 49 |
-
if getattr(response_message, "tool_calls", None):
|
| 50 |
-
for tool_call in response_message.tool_calls:
|
| 51 |
-
extracted_tool_calls.append(
|
| 52 |
-
{
|
| 53 |
-
"id": tool_call.id,
|
| 54 |
-
"type": "function",
|
| 55 |
-
"function": {
|
| 56 |
-
"name": tool_call.function.name,
|
| 57 |
-
"arguments": tool_call.function.arguments
|
| 58 |
-
}
|
| 59 |
-
}
|
| 60 |
-
)
|
| 61 |
-
|
| 62 |
-
return {
|
| 63 |
-
"role": "assistant",
|
| 64 |
-
"content": response_message.content or "",
|
| 65 |
-
"tool_calls": extracted_tool_calls if extracted_tool_calls else None
|
| 66 |
-
}
|
| 67 |
-
|
| 68 |
-
def invoke_tool_function(search_engine, function_name, function_params):
|
| 69 |
-
if function_name == "web_search":
|
| 70 |
-
return search_engine.perform_search(
|
| 71 |
-
search_query=function_params.get("query", ""),
|
| 72 |
-
search_provider=function_params.get("engine", "google")
|
| 73 |
-
)
|
| 74 |
-
if function_name == "read_url":
|
| 75 |
-
return search_engine.extract_page_content(
|
| 76 |
-
target_url=function_params.get("url", "")
|
| 77 |
-
)
|
| 78 |
-
return f"Unknown tool: {function_name}"
|
| 79 |
-
|
| 80 |
-
def generate_response(server, model_name, conversation_messages, tool_definitions):
|
| 81 |
-
response_generator = ""
|
| 82 |
-
|
| 83 |
-
try:
|
| 84 |
-
response = server.chat.completions.create(
|
| 85 |
-
model=model_name,
|
| 86 |
-
messages=conversation_messages,
|
| 87 |
-
tools=tool_definitions,
|
| 88 |
-
tool_choice="none",
|
| 89 |
-
temperature=0.75,
|
| 90 |
-
stream=True
|
| 91 |
-
)
|
| 92 |
-
|
| 93 |
-
for data in response:
|
| 94 |
-
try:
|
| 95 |
-
raw_data = data.choices[0].delta.content or ""
|
| 96 |
-
except Exception:
|
| 97 |
-
raw_data = ""
|
| 98 |
-
|
| 99 |
-
if raw_data:
|
| 100 |
-
response_generator += raw_data
|
| 101 |
-
yield response_generator
|
| 102 |
-
|
| 103 |
-
yield response_generator
|
| 104 |
-
|
| 105 |
-
except Exception as response_error:
|
| 106 |
-
response_generator += f"\nError: {str(response_error)}\n"
|
| 107 |
-
response_generator += traceback.format_exc()
|
| 108 |
-
yield response_generator
|
| 109 |
-
|
| 110 |
-
def typing_effect(text, current_length=0):
|
| 111 |
-
if current_length < len(text):
|
| 112 |
-
return text[:current_length]
|
| 113 |
-
return text
|
| 114 |
-
|
| 115 |
-
def tool_reasoning(tool_name, tool_arguments, stage, error=None, result=None):
|
| 116 |
-
if tool_name == "web_search":
|
| 117 |
-
query = tool_arguments.get("query", "") if tool_arguments else ""
|
| 118 |
-
engine = tool_arguments.get("engine", "google") if tool_arguments else "google"
|
| 119 |
-
|
| 120 |
-
if stage == "parsing":
|
| 121 |
-
return (
|
| 122 |
-
f"I need to search for information about: {query}<br><br>"
|
| 123 |
-
f"I'm analyzing the user's request and preparing to execute a web search. "
|
| 124 |
-
f"The query I've identified is comprehensive and should yield relevant results. "
|
| 125 |
-
f"I will use the {engine} search engine for this task as it provides reliable and up-to-date information.<br><br>"
|
| 126 |
-
f"I'm now parsing the search parameters to ensure they are correctly formatted. "
|
| 127 |
-
f"The search query has been validated and I'm checking that all required fields are present. "
|
| 128 |
-
f"I need to make sure the search engine parameter is valid and supported by our system.<br><br>"
|
| 129 |
-
f"I'm preparing the search request with the following configuration:<br>"
|
| 130 |
-
f"- Search Query: {query}<br>"
|
| 131 |
-
f"- Search Engine: {engine}<br><br>"
|
| 132 |
-
f"I'm verifying that the network connection is stable and that the search service is accessible. "
|
| 133 |
-
f"All preliminary checks have been completed successfully."
|
| 134 |
-
)
|
| 135 |
-
elif stage == "executing":
|
| 136 |
-
return (
|
| 137 |
-
f"I'm now executing the web search for: {query}<br><br>"
|
| 138 |
-
f"I'm connecting to the {engine} search service and sending the search request. "
|
| 139 |
-
f"The connection has been established successfully and I'm waiting for the search results. "
|
| 140 |
-
f"I'm processing multiple search result pages to gather comprehensive information.<br><br>"
|
| 141 |
-
f"I'm analyzing the search results to identify the most relevant and authoritative sources. "
|
| 142 |
-
f"The search engine is returning results and I'm filtering them based on relevance scores. "
|
| 143 |
-
f"I'm extracting key information from each search result including titles, snippets, and URLs.<br><br>"
|
| 144 |
-
f"I'm organizing the search results in order of relevance and checking for duplicate content. "
|
| 145 |
-
f"The search process is progressing smoothly and I'm collecting valuable information. "
|
| 146 |
-
f"I'm also verifying the credibility of the sources to ensure high-quality information.<br><br>"
|
| 147 |
-
f"Current status: Processing search results...<br>"
|
| 148 |
-
f"Results found: Multiple relevant sources identified<br>"
|
| 149 |
-
f"Quality assessment: High relevance detected"
|
| 150 |
-
)
|
| 151 |
-
elif stage == "completed":
|
| 152 |
-
preview = result[:300] + "..." if result and len(result) > 300 else result
|
| 153 |
-
return (
|
| 154 |
-
f"I have successfully completed the web search for: {query}<br><br>"
|
| 155 |
-
f"I've retrieved comprehensive search results from {engine} and analyzed all the information. "
|
| 156 |
-
f"The search yielded multiple relevant results that directly address the user's query. "
|
| 157 |
-
f"I've extracted the most important information and organized it for processing.<br><br>"
|
| 158 |
-
f"I've identified several high-quality sources with authoritative information. "
|
| 159 |
-
f"The search results include recent and up-to-date content that is highly relevant. "
|
| 160 |
-
f"I've filtered out any duplicate or low-quality results to ensure accuracy.<br><br>"
|
| 161 |
-
f"I'm now processing the collected information to formulate a comprehensive response. "
|
| 162 |
-
f"The search results provide sufficient detail to answer the user's question thoroughly. "
|
| 163 |
-
f"I've verified the credibility of the sources and cross-referenced the information.<br><br>"
|
| 164 |
-
f"Search Summary:<br>"
|
| 165 |
-
f"- Total results processed: Multiple pages<br>"
|
| 166 |
-
f"- Relevance score: High<br>"
|
| 167 |
-
f"- Information quality: Verified and accurate<br>"
|
| 168 |
-
f"- Sources: Authoritative and recent<br><br>"
|
| 169 |
-
f"Preview of results:<br>{preview}"
|
| 170 |
-
)
|
| 171 |
-
elif stage == "error":
|
| 172 |
-
return (
|
| 173 |
-
f"I encountered an issue while attempting to search for: {query}<br><br>"
|
| 174 |
-
f"I tried to execute the web search but encountered an unexpected error. "
|
| 175 |
-
f"The error occurred during the search process and I need to handle it appropriately. "
|
| 176 |
-
f"I'm analyzing the error to understand what went wrong and how to proceed.<br><br>"
|
| 177 |
-
f"Error details: {error}<br><br>"
|
| 178 |
-
f"I'm attempting to diagnose the issue and considering alternative approaches. "
|
| 179 |
-
f"The error might be due to network connectivity, service availability, or parameter issues. "
|
| 180 |
-
f"I will try to recover from this error and provide the best possible response.<br><br>"
|
| 181 |
-
f"I'm evaluating whether I can retry the search with modified parameters. "
|
| 182 |
-
f"If the search cannot be completed, I will use my existing knowledge to help the user. "
|
| 183 |
-
f"I'm committed to providing valuable assistance despite this technical challenge."
|
| 184 |
-
)
|
| 185 |
-
|
| 186 |
-
elif tool_name == "read_url":
|
| 187 |
-
url = tool_arguments.get("url", "") if tool_arguments else ""
|
| 188 |
-
|
| 189 |
-
if stage == "parsing":
|
| 190 |
-
return (
|
| 191 |
-
f"I need to read and extract content from the URL: {url}<br><br>"
|
| 192 |
-
f"I'm analyzing the URL structure to ensure it's valid and accessible. "
|
| 193 |
-
f"The URL appears to be properly formatted and I'm preparing to fetch its content. "
|
| 194 |
-
f"I will extract the main content from this webpage to gather detailed information.<br><br>"
|
| 195 |
-
f"I'm validating the URL protocol and checking if it uses HTTP or HTTPS. "
|
| 196 |
-
f"The domain seems legitimate and I'm preparing the request headers. "
|
| 197 |
-
f"I need to ensure that the website allows automated content extraction.<br><br>"
|
| 198 |
-
f"I'm configuring the content extraction parameters:<br>"
|
| 199 |
-
f"- Target URL: {url}<br>"
|
| 200 |
-
f"- Extraction Method: Full content parsing<br>"
|
| 201 |
-
f"- Content Type: HTML/Text<br>"
|
| 202 |
-
f"- Encoding: Auto-detect<br><br>"
|
| 203 |
-
f"I'm checking if the website requires any special handling or authentication. "
|
| 204 |
-
f"All preliminary validation checks have been completed successfully."
|
| 205 |
-
)
|
| 206 |
-
elif stage == "executing":
|
| 207 |
-
return (
|
| 208 |
-
f"I'm now accessing the URL: {url}<br><br>"
|
| 209 |
-
f"I'm establishing a connection to the web server and sending the HTTP request. "
|
| 210 |
-
f"The connection is being established and I'm waiting for the server response. "
|
| 211 |
-
f"I'm following any redirects if necessary to reach the final destination.<br><br>"
|
| 212 |
-
f"I'm downloading the webpage content and checking the response status code. "
|
| 213 |
-
f"The server is responding and I'm receiving the HTML content. "
|
| 214 |
-
f"I'm monitoring the download progress and ensuring data integrity.<br><br>"
|
| 215 |
-
f"I'm parsing the HTML structure to extract the main content. "
|
| 216 |
-
f"I'm identifying and removing navigation elements, advertisements, and other non-content sections. "
|
| 217 |
-
f"I'm focusing on extracting the primary article or information content.<br><br>"
|
| 218 |
-
f"Current status: Extracting content...<br>"
|
| 219 |
-
f"Response received: Processing HTML<br>"
|
| 220 |
-
f"Content extraction: In progress"
|
| 221 |
-
)
|
| 222 |
-
elif stage == "completed":
|
| 223 |
-
preview = result[:300] + "..." if result and len(result) > 300 else result
|
| 224 |
-
return (
|
| 225 |
-
f"I have successfully extracted content from: {url}<br><br>"
|
| 226 |
-
f"I've retrieved the complete webpage content and processed it thoroughly. "
|
| 227 |
-
f"The extraction was successful and I've obtained the main textual content. "
|
| 228 |
-
f"I've cleaned the content by removing unnecessary HTML tags and formatting.<br><br>"
|
| 229 |
-
f"I've identified the main article or information section of the webpage. "
|
| 230 |
-
f"The content has been properly parsed and structured for analysis. "
|
| 231 |
-
f"I've preserved important information while filtering out irrelevant elements.<br><br>"
|
| 232 |
-
f"I'm now analyzing the extracted content to understand its context and relevance. "
|
| 233 |
-
f"The information appears to be comprehensive and directly related to the topic. "
|
| 234 |
-
f"I've verified that the content is complete and hasn't been truncated.<br><br>"
|
| 235 |
-
f"Extraction Summary:<br>"
|
| 236 |
-
f"- Content length: Substantial<br>"
|
| 237 |
-
f"- Extraction quality: High<br>"
|
| 238 |
-
f"- Content type: Article/Information<br>"
|
| 239 |
-
f"- Processing status: Complete<br><br>"
|
| 240 |
-
f"Preview of extracted content:<br>{preview}"
|
| 241 |
-
)
|
| 242 |
-
elif stage == "error":
|
| 243 |
-
return (
|
| 244 |
-
f"I encountered an issue while trying to access: {url}<br><br>"
|
| 245 |
-
f"I attempted to fetch the webpage content but encountered an error. "
|
| 246 |
-
f"The error prevented me from successfully extracting the information. "
|
| 247 |
-
f"I'm analyzing the error to understand the cause and find a solution.<br><br>"
|
| 248 |
-
f"Error details: {error}<br><br>"
|
| 249 |
-
f"I'm considering possible causes such as network issues, access restrictions, or invalid URLs. "
|
| 250 |
-
f"The website might be blocking automated access or the URL might be incorrect. "
|
| 251 |
-
f"I will try to work around this limitation and provide alternative assistance.<br><br>"
|
| 252 |
-
f"I'm evaluating whether I can access the content through alternative methods. "
|
| 253 |
-
f"If direct access isn't possible, I'll use my knowledge to help with the query. "
|
| 254 |
-
f"I remain committed to providing useful information despite this obstacle."
|
| 255 |
-
)
|
| 256 |
-
|
| 257 |
-
return "I'm processing the tool execution request..."
|
| 258 |
-
|
| 259 |
-
def process_tool_interactions(server, model_name, conversation_messages, tool_definitions, search_engine):
|
| 260 |
-
maximum_iterations = 1
|
| 261 |
-
logs_generator = ""
|
| 262 |
-
|
| 263 |
-
for iteration_index in range(maximum_iterations):
|
| 264 |
-
try:
|
| 265 |
-
model_response = server.chat.completions.create(
|
| 266 |
-
model=model_name,
|
| 267 |
-
messages=conversation_messages,
|
| 268 |
-
tools=tool_definitions,
|
| 269 |
-
tool_choice="auto",
|
| 270 |
-
temperature=0.6
|
| 271 |
-
)
|
| 272 |
-
except Exception:
|
| 273 |
-
return conversation_messages, logs_generator
|
| 274 |
-
|
| 275 |
-
response_choice = model_response.choices[0]
|
| 276 |
-
assistant_message = response_choice.message
|
| 277 |
-
formatted_assistant_message = assistant_response(assistant_message)
|
| 278 |
-
|
| 279 |
-
conversation_messages.append(
|
| 280 |
-
{
|
| 281 |
-
"role": formatted_assistant_message["role"],
|
| 282 |
-
"content": formatted_assistant_message["content"],
|
| 283 |
-
"tool_calls": formatted_assistant_message["tool_calls"]
|
| 284 |
-
}
|
| 285 |
-
)
|
| 286 |
-
|
| 287 |
-
pending_tool_calls = assistant_message.tool_calls or []
|
| 288 |
-
if not pending_tool_calls:
|
| 289 |
-
if logs_generator:
|
| 290 |
-
logs_generator = styles(logs_generator.replace('<br>', '\n').strip(), expanded=False)
|
| 291 |
-
return conversation_messages, logs_generator
|
| 292 |
-
|
| 293 |
-
for tool_invocation in pending_tool_calls:
|
| 294 |
-
tool_name = tool_invocation.function.name
|
| 295 |
-
tool_arguments_raw = tool_invocation.function.arguments
|
| 296 |
-
|
| 297 |
-
extracted_arguments, extraction_error = extract_tool_parameters(tool_arguments_raw)
|
| 298 |
-
|
| 299 |
-
if extraction_error:
|
| 300 |
-
error_reasoning = tool_reasoning(tool_name, None, "error", error=extraction_error)
|
| 301 |
-
for i in range(0, len(error_reasoning), 50):
|
| 302 |
-
logs_generator = styles(typing_effect(error_reasoning, i), expanded=True)
|
| 303 |
-
yield logs_generator
|
| 304 |
-
time.sleep(0.10)
|
| 305 |
-
logs_generator = styles(error_reasoning, expanded=True)
|
| 306 |
-
yield logs_generator
|
| 307 |
-
tool_execution_result = extraction_error
|
| 308 |
-
else:
|
| 309 |
-
parsing_reasoning = tool_reasoning(tool_name, extracted_arguments, "parsing")
|
| 310 |
-
for i in range(0, len(parsing_reasoning), 50):
|
| 311 |
-
logs_generator = styles(typing_effect(parsing_reasoning, i), expanded=True)
|
| 312 |
-
yield logs_generator
|
| 313 |
-
time.sleep(0.10)
|
| 314 |
-
|
| 315 |
-
executing_reasoning = tool_reasoning(tool_name, extracted_arguments, "executing")
|
| 316 |
-
for i in range(0, len(executing_reasoning), 50):
|
| 317 |
-
logs_generator = styles(typing_effect(executing_reasoning, i), expanded=True)
|
| 318 |
-
yield logs_generator
|
| 319 |
-
time.sleep(0.10)
|
| 320 |
-
|
| 321 |
-
try:
|
| 322 |
-
tool_execution_result = invoke_tool_function(
|
| 323 |
-
search_engine,
|
| 324 |
-
tool_name,
|
| 325 |
-
extracted_arguments
|
| 326 |
-
)
|
| 327 |
-
|
| 328 |
-
completed_reasoning = tool_reasoning(tool_name, extracted_arguments, "completed", result=tool_execution_result)
|
| 329 |
-
for i in range(0, len(completed_reasoning), 50):
|
| 330 |
-
logs_generator = styles(typing_effect(completed_reasoning, i), expanded=True)
|
| 331 |
-
yield logs_generator
|
| 332 |
-
time.sleep(0.10)
|
| 333 |
-
logs_generator = styles(completed_reasoning, expanded=False)
|
| 334 |
-
yield logs_generator
|
| 335 |
-
|
| 336 |
-
except Exception as tool_error:
|
| 337 |
-
error_reasoning = tool_reasoning(tool_name, extracted_arguments, "error", error=str(tool_error))
|
| 338 |
-
for i in range(0, len(error_reasoning), 50):
|
| 339 |
-
logs_generator = styles(typing_effect(error_reasoning, i), expanded=True)
|
| 340 |
-
yield logs_generator
|
| 341 |
-
time.sleep(0.10)
|
| 342 |
-
logs_generator = styles(error_reasoning, expanded=True)
|
| 343 |
-
yield logs_generator
|
| 344 |
-
tool_execution_result = str(tool_error)
|
| 345 |
-
|
| 346 |
-
conversation_messages.append(
|
| 347 |
-
{
|
| 348 |
-
"role": "tool",
|
| 349 |
-
"tool_call_id": tool_invocation.id,
|
| 350 |
-
"name": tool_name,
|
| 351 |
-
"content": tool_execution_result
|
| 352 |
-
}
|
| 353 |
-
)
|
| 354 |
-
|
| 355 |
-
if logs_generator:
|
| 356 |
-
logs_generator = styles(logs_generator.replace('<br>', '\n').strip(), expanded=False)
|
| 357 |
-
|
| 358 |
-
return conversation_messages, logs_generator
|
| 359 |
|
| 360 |
def process_user_request(user_message, chat_history):
|
| 361 |
if not isinstance(user_message, str) or not user_message.strip():
|
|
@@ -376,12 +32,14 @@ def process_user_request(user_message, chat_history):
|
|
| 376 |
available_tools = construct_tool_definitions()
|
| 377 |
|
| 378 |
conversation_messages = setup_response(
|
| 379 |
-
|
| 380 |
chat_history,
|
| 381 |
user_message
|
| 382 |
)
|
| 383 |
|
| 384 |
tool_response = ""
|
|
|
|
|
|
|
| 385 |
for tool_update in process_tool_interactions(
|
| 386 |
server=server,
|
| 387 |
model_name=MODEL,
|
|
@@ -395,6 +53,7 @@ def process_user_request(user_message, chat_history):
|
|
| 395 |
else:
|
| 396 |
conversation_messages = tool_update[0]
|
| 397 |
tool_response = tool_update[1]
|
|
|
|
| 398 |
|
| 399 |
if tool_response:
|
| 400 |
yield tool_response + "\n\n"
|
|
@@ -403,7 +62,8 @@ def process_user_request(user_message, chat_history):
|
|
| 403 |
server=server,
|
| 404 |
model_name=MODEL,
|
| 405 |
conversation_messages=conversation_messages,
|
| 406 |
-
tool_definitions=available_tools
|
|
|
|
| 407 |
)
|
| 408 |
|
| 409 |
for final_response in final_response_generator:
|
|
|
|
| 3 |
# SPDX-License-Identifier: Apache-2.0
|
| 4 |
#
|
| 5 |
|
|
|
|
| 6 |
import traceback
|
| 7 |
+
from config import MODEL, INSTRUCTIONS_START
|
|
|
|
|
|
|
| 8 |
from src.core.web_configuration import WebConfiguration
|
| 9 |
from src.engine.browser_engine import BrowserEngine
|
| 10 |
from src.tools.tool_manager import construct_tool_definitions
|
| 11 |
from src.client.openai_client import initialize_client
|
| 12 |
+
from .response.setup import setup_response
|
| 13 |
+
from .response.generator import generate_response
|
| 14 |
+
from .tools.interaction import process_tool_interactions
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 15 |
|
| 16 |
def process_user_request(user_message, chat_history):
|
| 17 |
if not isinstance(user_message, str) or not user_message.strip():
|
|
|
|
| 32 |
available_tools = construct_tool_definitions()
|
| 33 |
|
| 34 |
conversation_messages = setup_response(
|
| 35 |
+
INSTRUCTIONS_START,
|
| 36 |
chat_history,
|
| 37 |
user_message
|
| 38 |
)
|
| 39 |
|
| 40 |
tool_response = ""
|
| 41 |
+
tools_done = False
|
| 42 |
+
|
| 43 |
for tool_update in process_tool_interactions(
|
| 44 |
server=server,
|
| 45 |
model_name=MODEL,
|
|
|
|
| 53 |
else:
|
| 54 |
conversation_messages = tool_update[0]
|
| 55 |
tool_response = tool_update[1]
|
| 56 |
+
tools_done = tool_update[2]
|
| 57 |
|
| 58 |
if tool_response:
|
| 59 |
yield tool_response + "\n\n"
|
|
|
|
| 62 |
server=server,
|
| 63 |
model_name=MODEL,
|
| 64 |
conversation_messages=conversation_messages,
|
| 65 |
+
tool_definitions=available_tools,
|
| 66 |
+
tools_done=tools_done
|
| 67 |
)
|
| 68 |
|
| 69 |
for final_response in final_response_generator:
|
src/processor/reasoning/__init__.py
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#
|
| 2 |
+
# SPDX-FileCopyrightText: Hadad <hadad@linuxmail.org>
|
| 3 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 4 |
+
#
|
| 5 |
+
|
| 6 |
+
from .interface import reasoning_interfaces
|
| 7 |
+
from .tool_reasoning import tool_reasoning
|
| 8 |
+
|
| 9 |
+
__all__ = [
|
| 10 |
+
'reasoning_interfaces',
|
| 11 |
+
'tool_reasoning'
|
| 12 |
+
]
|
src/processor/reasoning/interface.py
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#
|
| 2 |
+
# SPDX-FileCopyrightText: Hadad <hadad@linuxmail.org>
|
| 3 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 4 |
+
#
|
| 5 |
+
|
| 6 |
+
def reasoning_interfaces(text, current_length=0):
|
| 7 |
+
if current_length < len(text):
|
| 8 |
+
return text[:current_length]
|
| 9 |
+
return text
|
src/processor/reasoning/tool_reasoning.py
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#
|
| 2 |
+
# SPDX-FileCopyrightText: Hadad <hadad@linuxmail.org>
|
| 3 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 4 |
+
#
|
| 5 |
+
|
| 6 |
+
from config import REASONING_STEPS, REASONING_DEFAULT
|
| 7 |
+
|
| 8 |
+
def tool_reasoning(tool_name, tool_arguments, stage, error=None, result=None):
|
| 9 |
+
if tool_name == "web_search":
|
| 10 |
+
query = tool_arguments.get("query", "") if tool_arguments else ""
|
| 11 |
+
engine = tool_arguments.get("engine", "google") if tool_arguments else "google"
|
| 12 |
+
|
| 13 |
+
template = REASONING_STEPS.get("web_search", {}).get(stage)
|
| 14 |
+
|
| 15 |
+
if template:
|
| 16 |
+
if stage == "completed":
|
| 17 |
+
preview = result[:300] + "..." if result and len(result) > 300 else result
|
| 18 |
+
return template.format(query=query, engine=engine, preview=preview)
|
| 19 |
+
elif stage == "error":
|
| 20 |
+
return template.format(query=query, engine=engine, error=error)
|
| 21 |
+
else:
|
| 22 |
+
return template.format(query=query, engine=engine)
|
| 23 |
+
|
| 24 |
+
elif tool_name == "read_url":
|
| 25 |
+
url = tool_arguments.get("url", "") if tool_arguments else ""
|
| 26 |
+
|
| 27 |
+
template = REASONING_STEPS.get("read_url", {}).get(stage)
|
| 28 |
+
|
| 29 |
+
if template:
|
| 30 |
+
if stage == "completed":
|
| 31 |
+
preview = result[:300] + "..." if result and len(result) > 300 else result
|
| 32 |
+
return template.format(url=url, preview=preview)
|
| 33 |
+
elif stage == "error":
|
| 34 |
+
return template.format(url=url, error=error)
|
| 35 |
+
else:
|
| 36 |
+
return template.format(url=url)
|
| 37 |
+
|
| 38 |
+
return REASONING_DEFAULT
|
src/processor/response/__init__.py
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#
|
| 2 |
+
# SPDX-FileCopyrightText: Hadad <hadad@linuxmail.org>
|
| 3 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 4 |
+
#
|
| 5 |
+
|
| 6 |
+
from .setup import setup_response
|
| 7 |
+
from .formatter import assistant_response
|
| 8 |
+
from .generator import generate_response
|
| 9 |
+
|
| 10 |
+
__all__ = [
|
| 11 |
+
'setup_response',
|
| 12 |
+
'assistant_response',
|
| 13 |
+
'generate_response'
|
| 14 |
+
]
|
src/processor/response/formatter.py
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#
|
| 2 |
+
# SPDX-FileCopyrightText: Hadad <hadad@linuxmail.org>
|
| 3 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 4 |
+
#
|
| 5 |
+
|
| 6 |
+
def assistant_response(response_message):
|
| 7 |
+
extracted_tool_calls = []
|
| 8 |
+
|
| 9 |
+
if getattr(response_message, "tool_calls", None):
|
| 10 |
+
for tool_call in response_message.tool_calls:
|
| 11 |
+
extracted_tool_calls.append(
|
| 12 |
+
{
|
| 13 |
+
"id": tool_call.id,
|
| 14 |
+
"type": "function",
|
| 15 |
+
"function": {
|
| 16 |
+
"name": tool_call.function.name,
|
| 17 |
+
"arguments": tool_call.function.arguments
|
| 18 |
+
}
|
| 19 |
+
}
|
| 20 |
+
)
|
| 21 |
+
|
| 22 |
+
return {
|
| 23 |
+
"role": "assistant",
|
| 24 |
+
"content": response_message.content or "",
|
| 25 |
+
"tool_calls": extracted_tool_calls if extracted_tool_calls else None
|
| 26 |
+
}
|
src/processor/response/generator.py
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#
|
| 2 |
+
# SPDX-FileCopyrightText: Hadad <hadad@linuxmail.org>
|
| 3 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 4 |
+
#
|
| 5 |
+
|
| 6 |
+
import traceback
|
| 7 |
+
from config import INSTRUCTIONS_END
|
| 8 |
+
|
| 9 |
+
def generate_response(
|
| 10 |
+
server,
|
| 11 |
+
model_name,
|
| 12 |
+
conversation_messages,
|
| 13 |
+
tool_definitions,
|
| 14 |
+
tools_done=False
|
| 15 |
+
):
|
| 16 |
+
response_generator = ""
|
| 17 |
+
|
| 18 |
+
if tools_done:
|
| 19 |
+
system_reminder = {
|
| 20 |
+
"role": "system",
|
| 21 |
+
"content": INSTRUCTIONS_END
|
| 22 |
+
}
|
| 23 |
+
conversation_messages.append(system_reminder)
|
| 24 |
+
|
| 25 |
+
try:
|
| 26 |
+
response = server.chat.completions.create(
|
| 27 |
+
model=model_name,
|
| 28 |
+
messages=conversation_messages,
|
| 29 |
+
tools=tool_definitions if not tools_done else None,
|
| 30 |
+
tool_choice="none",
|
| 31 |
+
temperature=0.75,
|
| 32 |
+
stream=True
|
| 33 |
+
)
|
| 34 |
+
|
| 35 |
+
for data in response:
|
| 36 |
+
try:
|
| 37 |
+
raw_data = data.choices[0].delta.content or ""
|
| 38 |
+
except Exception:
|
| 39 |
+
raw_data = ""
|
| 40 |
+
|
| 41 |
+
if raw_data:
|
| 42 |
+
response_generator += raw_data
|
| 43 |
+
yield response_generator
|
| 44 |
+
|
| 45 |
+
yield response_generator
|
| 46 |
+
|
| 47 |
+
except Exception as response_error:
|
| 48 |
+
response_generator += f"\nError: {str(response_error)}\n"
|
| 49 |
+
response_generator += traceback.format_exc()
|
| 50 |
+
yield response_generator
|
src/processor/response/setup.py
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#
|
| 2 |
+
# SPDX-FileCopyrightText: Hadad <hadad@linuxmail.org>
|
| 3 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 4 |
+
#
|
| 5 |
+
|
| 6 |
+
def setup_response(system_instruction, conversation_history, user_input):
|
| 7 |
+
history = []
|
| 8 |
+
|
| 9 |
+
if system_instruction:
|
| 10 |
+
history.append({"role": "system", "content": system_instruction})
|
| 11 |
+
|
| 12 |
+
if isinstance(conversation_history, list):
|
| 13 |
+
for history_item in conversation_history:
|
| 14 |
+
message_role = history_item.get("role")
|
| 15 |
+
message_content = history_item.get("content")
|
| 16 |
+
if message_role in ("user", "assistant") and isinstance(message_content, str):
|
| 17 |
+
history.append({"role": message_role, "content": message_content})
|
| 18 |
+
|
| 19 |
+
if isinstance(user_input, str) and user_input.strip():
|
| 20 |
+
history.append({"role": "user", "content": user_input})
|
| 21 |
+
|
| 22 |
+
return history
|
src/processor/tools/__init__.py
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#
|
| 2 |
+
# SPDX-FileCopyrightText: Hadad <hadad@linuxmail.org>
|
| 3 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 4 |
+
#
|
| 5 |
+
|
| 6 |
+
from .parser import extract_tool_parameters
|
| 7 |
+
from .executor import invoke_tool_function
|
| 8 |
+
from .interaction import process_tool_interactions
|
| 9 |
+
|
| 10 |
+
__all__ = [
|
| 11 |
+
'extract_tool_parameters',
|
| 12 |
+
'invoke_tool_function',
|
| 13 |
+
'process_tool_interactions'
|
| 14 |
+
]
|
src/processor/tools/executor.py
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#
|
| 2 |
+
# SPDX-FileCopyrightText: Hadad <hadad@linuxmail.org>
|
| 3 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 4 |
+
#
|
| 5 |
+
|
| 6 |
+
def invoke_tool_function(search_engine, function_name, function_params):
|
| 7 |
+
if function_name == "web_search":
|
| 8 |
+
return search_engine.perform_search(
|
| 9 |
+
search_query=function_params.get("query", ""),
|
| 10 |
+
search_provider=function_params.get("engine", "google")
|
| 11 |
+
)
|
| 12 |
+
if function_name == "read_url":
|
| 13 |
+
return search_engine.extract_page_content(
|
| 14 |
+
target_url=function_params.get("url", "")
|
| 15 |
+
)
|
| 16 |
+
return f"Unknown tool: {function_name}"
|
src/processor/tools/interaction.py
ADDED
|
@@ -0,0 +1,121 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#
|
| 2 |
+
# SPDX-FileCopyrightText: Hadad <hadad@linuxmail.org>
|
| 3 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 4 |
+
#
|
| 5 |
+
|
| 6 |
+
import time
|
| 7 |
+
from assets.css.reasoning import styles
|
| 8 |
+
from ..response.formatter import assistant_response
|
| 9 |
+
from ..reasoning.interface import reasoning_interfaces
|
| 10 |
+
from ..reasoning.tool_reasoning import tool_reasoning
|
| 11 |
+
from .parser import extract_tool_parameters
|
| 12 |
+
from .executor import invoke_tool_function
|
| 13 |
+
from config import REASONING_DELAY
|
| 14 |
+
|
| 15 |
+
def process_tool_interactions(server, model_name, conversation_messages, tool_definitions, search_engine):
|
| 16 |
+
maximum_iterations = 1
|
| 17 |
+
logs_generator = ""
|
| 18 |
+
tool_results = []
|
| 19 |
+
|
| 20 |
+
for iteration_index in range(maximum_iterations):
|
| 21 |
+
try:
|
| 22 |
+
model_response = server.chat.completions.create(
|
| 23 |
+
model=model_name,
|
| 24 |
+
messages=conversation_messages,
|
| 25 |
+
tools=tool_definitions,
|
| 26 |
+
tool_choice="auto",
|
| 27 |
+
temperature=0.6
|
| 28 |
+
)
|
| 29 |
+
except Exception:
|
| 30 |
+
return conversation_messages, logs_generator, False
|
| 31 |
+
|
| 32 |
+
response_choice = model_response.choices[0]
|
| 33 |
+
assistant_message = response_choice.message
|
| 34 |
+
formatted_assistant_message = assistant_response(assistant_message)
|
| 35 |
+
|
| 36 |
+
conversation_messages.append(
|
| 37 |
+
{
|
| 38 |
+
"role": formatted_assistant_message["role"],
|
| 39 |
+
"content": formatted_assistant_message["content"],
|
| 40 |
+
"tool_calls": formatted_assistant_message["tool_calls"]
|
| 41 |
+
}
|
| 42 |
+
)
|
| 43 |
+
|
| 44 |
+
pending_tool_calls = assistant_message.tool_calls or []
|
| 45 |
+
if not pending_tool_calls:
|
| 46 |
+
if logs_generator:
|
| 47 |
+
logs_generator = styles(logs_generator.replace('<br>', '\n').strip(), expanded=False)
|
| 48 |
+
return conversation_messages, logs_generator, False
|
| 49 |
+
|
| 50 |
+
for tool_invocation in pending_tool_calls:
|
| 51 |
+
tool_name = tool_invocation.function.name
|
| 52 |
+
tool_arguments_raw = tool_invocation.function.arguments
|
| 53 |
+
|
| 54 |
+
extracted_arguments, extraction_error = extract_tool_parameters(tool_arguments_raw)
|
| 55 |
+
|
| 56 |
+
if extraction_error:
|
| 57 |
+
error_reasoning = tool_reasoning(tool_name, None, "error", error=extraction_error)
|
| 58 |
+
for i in range(0, len(error_reasoning), 50):
|
| 59 |
+
logs_generator = styles(reasoning_interfaces(error_reasoning, i), expanded=True)
|
| 60 |
+
yield logs_generator
|
| 61 |
+
time.sleep(REASONING_DELAY)
|
| 62 |
+
logs_generator = styles(error_reasoning, expanded=True)
|
| 63 |
+
yield logs_generator
|
| 64 |
+
tool_execution_result = extraction_error
|
| 65 |
+
else:
|
| 66 |
+
reasoning_status = tool_reasoning(tool_name, extracted_arguments, "parsing")
|
| 67 |
+
for i in range(0, len(reasoning_status), 50):
|
| 68 |
+
logs_generator = styles(reasoning_interfaces(reasoning_status, i), expanded=True)
|
| 69 |
+
yield logs_generator
|
| 70 |
+
time.sleep(REASONING_DELAY)
|
| 71 |
+
|
| 72 |
+
reasoning_start = tool_reasoning(tool_name, extracted_arguments, "executing")
|
| 73 |
+
for i in range(0, len(reasoning_start), 50):
|
| 74 |
+
logs_generator = styles(reasoning_interfaces(reasoning_start, i), expanded=True)
|
| 75 |
+
yield logs_generator
|
| 76 |
+
time.sleep(REASONING_DELAY)
|
| 77 |
+
|
| 78 |
+
try:
|
| 79 |
+
tool_execution_result = invoke_tool_function(
|
| 80 |
+
search_engine,
|
| 81 |
+
tool_name,
|
| 82 |
+
extracted_arguments
|
| 83 |
+
)
|
| 84 |
+
tool_results.append({
|
| 85 |
+
"tool": tool_name,
|
| 86 |
+
"arguments": extracted_arguments,
|
| 87 |
+
"result": tool_execution_result
|
| 88 |
+
})
|
| 89 |
+
|
| 90 |
+
reasoning_done = tool_reasoning(tool_name, extracted_arguments, "completed", result=tool_execution_result)
|
| 91 |
+
for i in range(0, len(reasoning_done), 50):
|
| 92 |
+
logs_generator = styles(reasoning_interfaces(reasoning_done, i), expanded=True)
|
| 93 |
+
yield logs_generator
|
| 94 |
+
time.sleep(REASONING_DELAY)
|
| 95 |
+
logs_generator = styles(reasoning_done, expanded=False)
|
| 96 |
+
yield logs_generator
|
| 97 |
+
|
| 98 |
+
except Exception as tool_error:
|
| 99 |
+
error_reasoning = tool_reasoning(tool_name, extracted_arguments, "error", error=str(tool_error))
|
| 100 |
+
for i in range(0, len(error_reasoning), 50):
|
| 101 |
+
logs_generator = styles(reasoning_interfaces(error_reasoning, i), expanded=True)
|
| 102 |
+
yield logs_generator
|
| 103 |
+
time.sleep(REASONING_DELAY)
|
| 104 |
+
logs_generator = styles(error_reasoning, expanded=True)
|
| 105 |
+
yield logs_generator
|
| 106 |
+
tool_execution_result = str(tool_error)
|
| 107 |
+
|
| 108 |
+
conversation_messages.append(
|
| 109 |
+
{
|
| 110 |
+
"role": "tool",
|
| 111 |
+
"tool_call_id": tool_invocation.id,
|
| 112 |
+
"name": tool_name,
|
| 113 |
+
"content": tool_execution_result
|
| 114 |
+
}
|
| 115 |
+
)
|
| 116 |
+
|
| 117 |
+
if logs_generator:
|
| 118 |
+
logs_generator = styles(logs_generator.replace('<br>', '\n').strip(), expanded=False)
|
| 119 |
+
|
| 120 |
+
results_generator = len(tool_results) > 0
|
| 121 |
+
return conversation_messages, logs_generator, results_generator
|
src/processor/tools/parser.py
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#
|
| 2 |
+
# SPDX-FileCopyrightText: Hadad <hadad@linuxmail.org>
|
| 3 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 4 |
+
#
|
| 5 |
+
|
| 6 |
+
import json
|
| 7 |
+
|
| 8 |
+
def extract_tool_parameters(raw_parameters, fallback_engine="google"):
|
| 9 |
+
try:
|
| 10 |
+
parsed_params = json.loads(raw_parameters or "{}")
|
| 11 |
+
if "engine" in parsed_params and parsed_params["engine"] not in ["google", "bing", "baidu"]:
|
| 12 |
+
parsed_params["engine"] = fallback_engine
|
| 13 |
+
if "engine" not in parsed_params:
|
| 14 |
+
parsed_params["engine"] = fallback_engine
|
| 15 |
+
return parsed_params, None
|
| 16 |
+
except Exception as parse_error:
|
| 17 |
+
return None, f"Invalid tool arguments: {str(parse_error)}"
|