specialized-agents-api / search_utils.py
pvanand's picture
Update search_utils.py
990bb7f verified
import requests
from config import get_api_keys
import logging
from functools import lru_cache
logger = logging.getLogger(__name__)
api_keys = get_api_keys()
def internet_search(query, search_type="web", num_results=20):
logger.info(f"Performing internet search for query: {query}, type: {search_type}")
url = f"https://api.search.brave.com/res/v1/{'web' if search_type == 'web' else 'news'}/search"
headers = {
"Accept": "application/json",
"Accept-Encoding": "gzip",
"X-Subscription-Token": api_keys["BRAVE_API_KEY"]
}
params = {"q": query}
response = requests.get(url, headers=headers, params=params)
if response.status_code != 200:
logger.error(f"Failed to fetch search results. Status code: {response.status_code}")
return []
search_data = response.json()["web"]["results"] if search_type == "web" else response.json()["results"]
processed_results = [
{
"title": item["title"],
"snippet": item["extra_snippets"][0],
"last_updated": item.get("age", ""),
"url":item.get("url", "")
}
for item in search_data
if item.get("extra_snippets")
][:num_results]
logger.info(f"Retrieved {len(processed_results)} search results")
return processed_results
@lru_cache(maxsize=100)
def cached_internet_search(query: str):
logger.info(f"Performing cached internet search for query: {query}")
return internet_search(query, search_type="news")
def analyze_data(query, data_type="news"):
logger.info(f"Analyzing {data_type} for query: {query}")
if data_type == "news":
data = cached_internet_search(query)
prompt_generator = generate_news_prompt
system_prompt = NEWS_ASSISTANT_PROMPT
else:
data = internet_search(query, search_type="web")
prompt_generator = generate_search_prompt
system_prompt = SEARCH_ASSISTANT_PROMPT
if not data:
logger.error(f"Failed to fetch {data_type} data")
return None
prompt = prompt_generator(query, data)
messages = [
{"role": "system", "content": system_prompt},
{"role": "user", "content": prompt}
]
logger.info(f"{data_type.capitalize()} analysis completed")
return messages,data
def search_assistant_api(query, data_type, model="openai/gpt-4o-mini"):
logger.info(f"Received {data_type} assistant query: {query}")
messages, search_data = analyze_data(query, data_type)
if not messages:
logger.error(f"Failed to fetch {data_type} data")
raise HTTPException(status_code=500, detail=f"Failed to fetch {data_type} data")
def process_response():
logger.info(f"Generating response using LLM: {messages}")
full_response = ""
for content in chat_with_llama_stream(messages, model=model):
full_response += content
yield content
logger.info(f"Completed {data_type} assistant response for query: {query}")
logger.info(f"LLM Response: {full_response}")
yield "<json><ref>"+ json.dumps(search_data)+"</ref></json>"
return process_response