File size: 3,189 Bytes
020f75e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fd8d64a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
990bb7f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
import requests
from config import get_api_keys
import logging
from functools import lru_cache

logger = logging.getLogger(__name__)

api_keys = get_api_keys()

def internet_search(query, search_type="web", num_results=20):
    logger.info(f"Performing internet search for query: {query}, type: {search_type}")
    url = f"https://api.search.brave.com/res/v1/{'web' if search_type == 'web' else 'news'}/search"
    
    headers = {
        "Accept": "application/json",
        "Accept-Encoding": "gzip",
        "X-Subscription-Token": api_keys["BRAVE_API_KEY"]
    }
    params = {"q": query}

    response = requests.get(url, headers=headers, params=params)

    if response.status_code != 200:
        logger.error(f"Failed to fetch search results. Status code: {response.status_code}")
        return []
    
    search_data = response.json()["web"]["results"] if search_type == "web" else response.json()["results"]
    
    processed_results = [
        {
            "title": item["title"],
            "snippet": item["extra_snippets"][0],
            "last_updated": item.get("age", ""),
            "url":item.get("url", "")
        }
        for item in search_data
        if item.get("extra_snippets")
    ][:num_results]

    logger.info(f"Retrieved {len(processed_results)} search results")
    return processed_results

@lru_cache(maxsize=100)
def cached_internet_search(query: str):
    logger.info(f"Performing cached internet search for query: {query}")
    return internet_search(query, search_type="news")

def analyze_data(query, data_type="news"):
    logger.info(f"Analyzing {data_type} for query: {query}")
    
    if data_type == "news":
        data = cached_internet_search(query)
        prompt_generator = generate_news_prompt
        system_prompt = NEWS_ASSISTANT_PROMPT
    else:
        data = internet_search(query, search_type="web")
        prompt_generator = generate_search_prompt
        system_prompt = SEARCH_ASSISTANT_PROMPT
    
    if not data:
        logger.error(f"Failed to fetch {data_type} data")
        return None

    prompt = prompt_generator(query, data)
    messages = [
        {"role": "system", "content": system_prompt},
        {"role": "user", "content": prompt}
    ]

    logger.info(f"{data_type.capitalize()} analysis completed")
    return messages,data

def search_assistant_api(query, data_type, model="openai/gpt-4o-mini"):
    logger.info(f"Received {data_type} assistant query: {query}")
    messages, search_data = analyze_data(query, data_type)
    
    if not messages:
        logger.error(f"Failed to fetch {data_type} data")
        raise HTTPException(status_code=500, detail=f"Failed to fetch {data_type} data")
    
    def process_response():
        logger.info(f"Generating response using LLM: {messages}")
        full_response = ""
        for content in chat_with_llama_stream(messages, model=model):
            full_response += content
            yield content
        logger.info(f"Completed {data_type} assistant response for query: {query}")
        logger.info(f"LLM Response: {full_response}")
        yield "<json><ref>"+ json.dumps(search_data)+"</ref></json>"
    return process_response