File size: 1,627 Bytes
69a077e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
# chat_agent.py
import os
import re
from openai import OpenAI
from main import WebScrapingOrchestrator

class SimpleChatAgent:
    def __init__(self):
        self.client = OpenAI(
            base_url="https://api.studio.nebius.com/v1/",
            api_key=os.environ.get("NEBIUS_API_KEY"),
        )
        self.model = "meta-llama/Meta-Llama-3.1-70B-Instruct"
        self.orchestrator = WebScrapingOrchestrator()

    async def handle_query(self, user_input, history):
        # Web scraping check
        url_match = re.search(r"(https?://[^\s]+)", user_input)
        if "scrape" in user_input.lower() and url_match:
            url = url_match.group(1)
            result = await self.orchestrator.process_url(url)
            if "error" in result:
                return f"❌ Error scraping {url}: {result['error']}"
            return (
                f"✅ Scraped Data from {result['title']}:\n"
                f"- Topics: {', '.join(result['llm_ready_data']['main_topics'])}\n"
                f"- Summary: {result['llm_ready_data']['text_summary'][:500]}..."
            )

        # Build full chat history
        messages = []
        for user_msg, bot_msg in history:
            messages.append({"role": "user", "content": user_msg})
            messages.append({"role": "assistant", "content": bot_msg})
        messages.append({"role": "user", "content": user_input})

        # Call Nebius LLM
        response = self.client.chat.completions.create(
            model=self.model,
            messages=messages,
            temperature=0.6,
        )
        return response.choices[0].message.content