rdune71 commited on
Commit
e441606
·
1 Parent(s): 3af622c

self healing/weather update

Browse files
src/llm/enhanced_provider.py CHANGED
@@ -1,32 +1,41 @@
1
  import json
2
  from typing import List, Dict, Optional, Union
3
  from src.llm.base_provider import LLMProvider
4
- from src.services.context_enrichment import context_service
5
 
6
  class EnhancedLLMProvider(LLMProvider):
7
- """Base provider with context enrichment"""
8
 
9
  def __init__(self, model_name: str, timeout: int = 30, max_retries: int = 3):
10
  super().__init__(model_name, timeout, max_retries)
11
 
12
- def _enrich_context(self, conversation_history: List[Dict]) -> List[Dict]:
13
- """Add current context to conversation"""
 
 
 
14
  # Get the last user message to determine context needs
15
  last_user_message = ""
16
  for msg in reversed(conversation_history):
17
  if msg["role"] == "user":
18
  last_user_message = msg["content"]
19
  break
20
-
21
- # Get current context
22
- context = context_service.get_current_context(last_user_message)
23
 
24
- # Add context as system message at the beginning
25
- context_message = {
26
- "role": "system",
27
- "content": f"[Current Context: {context['current_time']} | Weather: {context['weather']}]"
28
- }
 
 
 
 
 
 
 
 
 
 
29
 
30
- # Insert context at the beginning
31
- enriched_history = [context_message] + conversation_history
32
- return enriched_history
 
1
  import json
2
  from typing import List, Dict, Optional, Union
3
  from src.llm.base_provider import LLMProvider
4
+ from src.services.context_provider import context_provider
5
 
6
  class EnhancedLLMProvider(LLMProvider):
7
+ """Base provider with intelligent context enrichment"""
8
 
9
  def __init__(self, model_name: str, timeout: int = 30, max_retries: int = 3):
10
  super().__init__(model_name, timeout, max_retries)
11
 
12
+ def _enrich_context_intelligently(self, conversation_history: List[Dict]) -> List[Dict]:
13
+ """Add context only when it's actually relevant"""
14
+ if not conversation_history:
15
+ return conversation_history
16
+
17
  # Get the last user message to determine context needs
18
  last_user_message = ""
19
  for msg in reversed(conversation_history):
20
  if msg["role"] == "user":
21
  last_user_message = msg["content"]
22
  break
 
 
 
23
 
24
+ # Get intelligent context
25
+ context_string = context_provider.get_context_for_llm(
26
+ last_user_message,
27
+ conversation_history
28
+ )
29
+
30
+ # Only add context if it's relevant
31
+ if context_string:
32
+ context_message = {
33
+ "role": "system",
34
+ "content": context_string
35
+ }
36
+ # Insert context at the beginning
37
+ enriched_history = [context_message] + conversation_history
38
+ return enriched_history
39
 
40
+ # Return original history if no context needed
41
+ return conversation_history
 
src/llm/hf_provider.py CHANGED
@@ -3,8 +3,7 @@ import logging
3
  from typing import List, Dict, Optional, Union
4
  from src.llm.enhanced_provider import EnhancedLLMProvider
5
  from utils.config import config
6
- from src.services.context_enrichment import context_service
7
-
8
  logger = logging.getLogger(__name__)
9
 
10
  try:
@@ -36,8 +35,8 @@ class HuggingFaceProvider(EnhancedLLMProvider):
36
  def generate(self, prompt: str, conversation_history: List[Dict]) -> Optional[str]:
37
  """Generate a response synchronously"""
38
  try:
39
- # Enrich context
40
- enriched_history = self._enrich_context(conversation_history)
41
 
42
  response = self.client.chat.completions.create(
43
  model=self.model_name,
@@ -53,7 +52,7 @@ class HuggingFaceProvider(EnhancedLLMProvider):
53
  if self._is_scale_to_zero_error(e):
54
  logger.info("HF endpoint is scaling up, waiting...")
55
  time.sleep(60) # Wait for endpoint to initialize
56
- # Retry once
57
  response = self.client.chat.completions.create(
58
  model=self.model_name,
59
  messages=conversation_history,
@@ -67,8 +66,8 @@ class HuggingFaceProvider(EnhancedLLMProvider):
67
  def stream_generate(self, prompt: str, conversation_history: List[Dict]) -> Optional[Union[str, List[str]]]:
68
  """Generate a response with streaming support"""
69
  try:
70
- # Enrich context
71
- enriched_history = self._enrich_context(conversation_history)
72
 
73
  response = self.client.chat.completions.create(
74
  model=self.model_name,
@@ -90,7 +89,7 @@ class HuggingFaceProvider(EnhancedLLMProvider):
90
  if self._is_scale_to_zero_error(e):
91
  logger.info("HF endpoint is scaling up, waiting...")
92
  time.sleep(60) # Wait for endpoint to initialize
93
- # Retry once
94
  response = self.client.chat.completions.create(
95
  model=self.model_name,
96
  messages=conversation_history,
@@ -107,27 +106,36 @@ class HuggingFaceProvider(EnhancedLLMProvider):
107
  return chunks
108
  raise
109
 
110
- def _enrich_context(self, conversation_history: List[Dict]) -> List[Dict]:
111
- """Add current context to conversation"""
 
 
 
112
  # Get the last user message to determine context needs
113
  last_user_message = ""
114
  for msg in reversed(conversation_history):
115
  if msg["role"] == "user":
116
  last_user_message = msg["content"]
117
  break
118
-
119
- # Get current context
120
- context = context_service.get_current_context(last_user_message)
121
 
122
- # Add context as system message at the beginning
123
- context_message = {
124
- "role": "system",
125
- "content": f"[Current Context: {context['current_time']} | Weather: {context['weather']}]"
126
- }
 
 
 
 
 
 
 
 
 
 
127
 
128
- # Insert context at the beginning
129
- enriched_history = [context_message] + conversation_history
130
- return enriched_history
131
 
132
  def _is_scale_to_zero_error(self, error: Exception) -> bool:
133
  """Check if the error is related to scale-to-zero initialization"""
@@ -136,7 +144,6 @@ class HuggingFaceProvider(EnhancedLLMProvider):
136
  "503",
137
  "service unavailable",
138
  "initializing",
139
- "cold start",
140
- "timeout"
141
  ]
142
  return any(indicator in error_str for indicator in scale_to_zero_indicators)
 
3
  from typing import List, Dict, Optional, Union
4
  from src.llm.enhanced_provider import EnhancedLLMProvider
5
  from utils.config import config
6
+ from src.services.context_provider import context_provider
 
7
  logger = logging.getLogger(__name__)
8
 
9
  try:
 
35
  def generate(self, prompt: str, conversation_history: List[Dict]) -> Optional[str]:
36
  """Generate a response synchronously"""
37
  try:
38
+ # Intelligently enrich context
39
+ enriched_history = self._enrich_context_intelligently(conversation_history)
40
 
41
  response = self.client.chat.completions.create(
42
  model=self.model_name,
 
52
  if self._is_scale_to_zero_error(e):
53
  logger.info("HF endpoint is scaling up, waiting...")
54
  time.sleep(60) # Wait for endpoint to initialize
55
+ # Retry once after waiting
56
  response = self.client.chat.completions.create(
57
  model=self.model_name,
58
  messages=conversation_history,
 
66
  def stream_generate(self, prompt: str, conversation_history: List[Dict]) -> Optional[Union[str, List[str]]]:
67
  """Generate a response with streaming support"""
68
  try:
69
+ # Intelligently enrich context
70
+ enriched_history = self._enrich_context_intelligently(conversation_history)
71
 
72
  response = self.client.chat.completions.create(
73
  model=self.model_name,
 
89
  if self._is_scale_to_zero_error(e):
90
  logger.info("HF endpoint is scaling up, waiting...")
91
  time.sleep(60) # Wait for endpoint to initialize
92
+ # Retry once after waiting
93
  response = self.client.chat.completions.create(
94
  model=self.model_name,
95
  messages=conversation_history,
 
106
  return chunks
107
  raise
108
 
109
+ def _enrich_context_intelligently(self, conversation_history: List[Dict]) -> List[Dict]:
110
+ """Intelligently add context only when relevant"""
111
+ if not conversation_history:
112
+ return conversation_history
113
+
114
  # Get the last user message to determine context needs
115
  last_user_message = ""
116
  for msg in reversed(conversation_history):
117
  if msg["role"] == "user":
118
  last_user_message = msg["content"]
119
  break
 
 
 
120
 
121
+ # Get intelligent context
122
+ context_string = context_provider.get_context_for_llm(
123
+ last_user_message,
124
+ conversation_history
125
+ )
126
+
127
+ # Only add context if it's relevant
128
+ if context_string:
129
+ context_message = {
130
+ "role": "system",
131
+ "content": context_string
132
+ }
133
+ # Insert context at the beginning
134
+ enriched_history = [context_message] + conversation_history
135
+ return enriched_history
136
 
137
+ # Return original history if no context needed
138
+ return conversation_history
 
139
 
140
  def _is_scale_to_zero_error(self, error: Exception) -> bool:
141
  """Check if the error is related to scale-to-zero initialization"""
 
144
  "503",
145
  "service unavailable",
146
  "initializing",
147
+ "cold start"
 
148
  ]
149
  return any(indicator in error_str for indicator in scale_to_zero_indicators)
src/llm/ollama_provider.py CHANGED
@@ -1,17 +1,17 @@
1
  import requests
2
  import logging
3
  import re
4
- import time
5
  from typing import List, Dict, Optional, Union
6
  from src.llm.base_provider import LLMProvider
7
  from utils.config import config
 
8
 
9
  logger = logging.getLogger(__name__)
10
 
11
  class OllamaProvider(LLMProvider):
12
- """Ollama LLM provider implementation with enhanced error handling"""
13
 
14
- def __init__(self, model_name: str, timeout: int = 30, max_retries: int = 2):
15
  super().__init__(model_name, timeout, max_retries)
16
  self.host = self._sanitize_host(config.ollama_host or "http://localhost:11434")
17
  self.headers = {
@@ -30,12 +30,12 @@ class OllamaProvider(LLMProvider):
30
  return host
31
 
32
  def generate(self, prompt: str, conversation_history: List[Dict]) -> Optional[str]:
33
- """Generate a response synchronously with better error handling"""
34
  try:
35
  return self._retry_with_backoff(self._generate_impl, prompt, conversation_history)
36
  except Exception as e:
37
  logger.error(f"Ollama generation failed: {e}")
38
- raise Exception(f"Ollama is not responding. Please check your connection or try the HF Endpoint.")
39
 
40
  def stream_generate(self, prompt: str, conversation_history: List[Dict]) -> Optional[Union[str, List[str]]]:
41
  """Generate a response with streaming support"""
@@ -43,16 +43,44 @@ class OllamaProvider(LLMProvider):
43
  return self._retry_with_backoff(self._stream_generate_impl, prompt, conversation_history)
44
  except Exception as e:
45
  logger.error(f"Ollama stream generation failed: {e}")
46
- raise Exception(f"Ollama is not responding. Please check your connection or try the HF Endpoint.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
47
 
48
  def _generate_impl(self, prompt: str, conversation_history: List[Dict]) -> str:
49
- """Implementation of synchronous generation with enhanced debugging"""
50
  try:
51
  url = f"{self.host}/api/chat"
52
 
 
 
 
53
  # Prepare messages - ensure proper format
54
  messages = []
55
- for msg in conversation_history:
56
  if isinstance(msg, dict) and "role" in msg and "content" in msg:
57
  messages.append({
58
  "role": msg["role"],
@@ -69,14 +97,12 @@ class OllamaProvider(LLMProvider):
69
  logger.info(f"Ollama request payload: {payload}")
70
  logger.info(f"Ollama headers: {self.headers}")
71
 
72
- # Use shorter timeout for better UX
73
  response = requests.post(
74
  url,
75
  json=payload,
76
  headers=self.headers,
77
- timeout=30 # Reduced from 60 to 30 seconds
78
  )
79
-
80
  logger.info(f"Ollama response status: {response.status_code}")
81
  logger.info(f"Ollama response headers: {dict(response.headers)}")
82
 
@@ -106,15 +132,13 @@ class OllamaProvider(LLMProvider):
106
  def _stream_generate_impl(self, prompt: str, conversation_history: List[Dict]) -> List[str]:
107
  """Implementation of streaming generation"""
108
  try:
 
 
 
109
  url = f"{self.host}/api/chat"
110
- messages = conversation_history.copy()
111
- # Add the current prompt if not already in history
112
- if not messages or messages[-1].get("content") != prompt:
113
- messages.append({"role": "user", "content": prompt})
114
-
115
  payload = {
116
  "model": self.model_name,
117
- "messages": messages,
118
  "stream": True
119
  }
120
 
@@ -142,3 +166,34 @@ class OllamaProvider(LLMProvider):
142
  except Exception as e:
143
  logger.error(f"Ollama stream generation failed: {e}")
144
  raise
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import requests
2
  import logging
3
  import re
 
4
  from typing import List, Dict, Optional, Union
5
  from src.llm.base_provider import LLMProvider
6
  from utils.config import config
7
+ from src.services.context_provider import context_provider
8
 
9
  logger = logging.getLogger(__name__)
10
 
11
  class OllamaProvider(LLMProvider):
12
+ """Ollama LLM provider implementation"""
13
 
14
+ def __init__(self, model_name: str, timeout: int = 60, max_retries: int = 3):
15
  super().__init__(model_name, timeout, max_retries)
16
  self.host = self._sanitize_host(config.ollama_host or "http://localhost:11434")
17
  self.headers = {
 
30
  return host
31
 
32
  def generate(self, prompt: str, conversation_history: List[Dict]) -> Optional[str]:
33
+ """Generate a response synchronously"""
34
  try:
35
  return self._retry_with_backoff(self._generate_impl, prompt, conversation_history)
36
  except Exception as e:
37
  logger.error(f"Ollama generation failed: {e}")
38
+ return None
39
 
40
  def stream_generate(self, prompt: str, conversation_history: List[Dict]) -> Optional[Union[str, List[str]]]:
41
  """Generate a response with streaming support"""
 
43
  return self._retry_with_backoff(self._stream_generate_impl, prompt, conversation_history)
44
  except Exception as e:
45
  logger.error(f"Ollama stream generation failed: {e}")
46
+ return None
47
+
48
+ def validate_model(self) -> bool:
49
+ """Validate if the model is available"""
50
+ try:
51
+ response = requests.get(
52
+ f"{self.host}/api/tags",
53
+ headers=self.headers,
54
+ timeout=self.timeout
55
+ )
56
+ if response.status_code == 200:
57
+ models = response.json().get("models", [])
58
+ model_names = [model.get("name") for model in models]
59
+ return self.model_name in model_names
60
+ elif response.status_code == 404:
61
+ # Try alternative endpoint
62
+ response2 = requests.get(
63
+ f"{self.host}",
64
+ headers=self.headers,
65
+ timeout=self.timeout
66
+ )
67
+ return response2.status_code == 200
68
+ return False
69
+ except Exception as e:
70
+ logger.error(f"Model validation failed: {e}")
71
+ return False
72
 
73
  def _generate_impl(self, prompt: str, conversation_history: List[Dict]) -> str:
74
+ """Implementation of synchronous generation with intelligent context"""
75
  try:
76
  url = f"{self.host}/api/chat"
77
 
78
+ # Intelligently enrich context
79
+ enriched_history = self._enrich_context_intelligently(conversation_history)
80
+
81
  # Prepare messages - ensure proper format
82
  messages = []
83
+ for msg in enriched_history:
84
  if isinstance(msg, dict) and "role" in msg and "content" in msg:
85
  messages.append({
86
  "role": msg["role"],
 
97
  logger.info(f"Ollama request payload: {payload}")
98
  logger.info(f"Ollama headers: {self.headers}")
99
 
 
100
  response = requests.post(
101
  url,
102
  json=payload,
103
  headers=self.headers,
104
+ timeout=self.timeout
105
  )
 
106
  logger.info(f"Ollama response status: {response.status_code}")
107
  logger.info(f"Ollama response headers: {dict(response.headers)}")
108
 
 
132
  def _stream_generate_impl(self, prompt: str, conversation_history: List[Dict]) -> List[str]:
133
  """Implementation of streaming generation"""
134
  try:
135
+ # Intelligently enrich context
136
+ enriched_history = self._enrich_context_intelligently(conversation_history)
137
+
138
  url = f"{self.host}/api/chat"
 
 
 
 
 
139
  payload = {
140
  "model": self.model_name,
141
+ "messages": enriched_history,
142
  "stream": True
143
  }
144
 
 
166
  except Exception as e:
167
  logger.error(f"Ollama stream generation failed: {e}")
168
  raise
169
+
170
+ def _enrich_context_intelligently(self, conversation_history: List[Dict]) -> List[Dict]:
171
+ """Intelligently add context only when relevant"""
172
+ if not conversation_history:
173
+ return conversation_history
174
+
175
+ # Get the last user message to determine context needs
176
+ last_user_message = ""
177
+ for msg in reversed(conversation_history):
178
+ if msg["role"] == "user":
179
+ last_user_message = msg["content"]
180
+ break
181
+
182
+ # Get intelligent context
183
+ context_string = context_provider.get_context_for_llm(
184
+ last_user_message,
185
+ conversation_history
186
+ )
187
+
188
+ # Only add context if it's relevant
189
+ if context_string:
190
+ context_message = {
191
+ "role": "system",
192
+ "content": context_string
193
+ }
194
+ # Insert context at the beginning
195
+ enriched_history = [context_message] + conversation_history
196
+ return enriched_history
197
+
198
+ # Return original history if no context needed
199
+ return conversation_history
src/services/context_provider.py ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import requests
2
+ from typing import Dict, Any, Optional
3
+ from utils.config import config
4
+ from src.services.smart_context import smart_context
5
+
6
+ class ContextProvider:
7
+ """Provides context data only when relevant"""
8
+
9
+ def __init__(self):
10
+ self.openweather_api_key = getattr(config, 'openweather_api_key', None)
11
+
12
+ def get_context_for_llm(self, user_query: str,
13
+ conversation_history: list = None) -> Optional[str]:
14
+ """Generate context string only when relevant for LLM consumption"""
15
+ # Get smart context detection
16
+ context_info = smart_context.get_relevant_context(user_query, conversation_history)
17
+
18
+ context_parts = []
19
+
20
+ # Add time context if relevant
21
+ if context_info['include_time'] and 'time_data' in context_info:
22
+ time_data = context_info['time_data']
23
+ context_parts.append(f"Current time: {time_data['current_time']}")
24
+
25
+ # Add weather context if relevant
26
+ if context_info['include_weather']:
27
+ weather_data = self._get_weather_data(context_info['detected_location'] or 'New York')
28
+ if weather_data:
29
+ context_parts.append(weather_data)
30
+
31
+ # Only return context if there's something relevant
32
+ if context_parts:
33
+ return f"[Context: {', '.join(context_parts)}]"
34
+
35
+ return None # No context needed
36
+
37
+ def _get_weather_data(self, location: str) -> Optional[str]:
38
+ """Get weather data for a specific location"""
39
+ if not self.openweather_api_key:
40
+ return None
41
+
42
+ try:
43
+ url = "http://api.openweathermap.org/data/2.5/weather"
44
+ params = {
45
+ 'q': location,
46
+ 'appid': self.openweather_api_key,
47
+ 'units': 'metric'
48
+ }
49
+
50
+ response = requests.get(url, params=params, timeout=5)
51
+ if response.status_code == 200:
52
+ data = response.json()
53
+ return (f"Weather in {data['name']}: {data['weather'][0]['description']}, "
54
+ f"{data['main']['temp']}°C, humidity {data['main']['humidity']}%")
55
+ except Exception:
56
+ pass
57
+
58
+ return None
59
+
60
+ # Global instance
61
+ context_provider = ContextProvider()
src/services/smart_context.py ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import time
2
+ import re
3
+ from typing import Dict, Any, List, Optional
4
+ from datetime import datetime
5
+ from utils.config import config
6
+
7
+ class SmartContextDetector:
8
+ """Intelligently detects when context is relevant to user queries"""
9
+
10
+ def __init__(self):
11
+ # Keywords that suggest time/weather relevance
12
+ self.time_indicators = [
13
+ 'time', 'date', 'today', 'now', 'current', 'moment',
14
+ 'morning', 'afternoon', 'evening', 'night', 'weekend',
15
+ 'what time', 'what day', 'what year', 'what month'
16
+ ]
17
+
18
+ self.weather_indicators = [
19
+ 'weather', 'temperature', 'rain', 'snow', 'sunny', 'cloudy',
20
+ 'forecast', 'climate', 'season', 'hot', 'cold', 'warm', 'cool',
21
+ 'umbrella', 'jacket', 'outdoor', 'outside'
22
+ ]
23
+
24
+ self.location_indicators = [
25
+ 'in', 'at', 'near', 'around', 'local', 'here', 'there'
26
+ ]
27
+
28
+ def should_include_time_context(self, user_query: str,
29
+ conversation_history: List[Dict] = None) -> bool:
30
+ """Determine if time context is relevant to the query"""
31
+ query_lower = user_query.lower()
32
+
33
+ # Direct time references
34
+ if any(indicator in query_lower for indicator in self.time_indicators):
35
+ return True
36
+
37
+ # Check conversation context for time-related discussions
38
+ if conversation_history:
39
+ recent_messages = conversation_history[-3:] # Last 3 messages
40
+ context_text = " ".join([msg.get('content', '') for msg in recent_messages]).lower()
41
+ if any(word in context_text for word in self.time_indicators):
42
+ return True
43
+
44
+ return False
45
+
46
+ def should_include_weather_context(self, user_query: str,
47
+ conversation_history: List[Dict] = None) -> bool:
48
+ """Determine if weather context is relevant to the query"""
49
+ query_lower = user_query.lower()
50
+
51
+ # Direct weather references
52
+ if any(indicator in query_lower for indicator in self.weather_indicators):
53
+ return True
54
+
55
+ # Location + activity combinations that often involve weather
56
+ location_mentioned = any(loc.lower() in query_lower for loc in
57
+ ['new york', 'london', 'tokyo', 'paris', 'los angeles', 'sydney', 'singapore', 'mumbai'])
58
+ activity_mentioned = any(activity in query_lower for activity in
59
+ ['outdoor', 'outside', 'walk', 'run', 'travel', 'trip', 'plans', 'going'])
60
+
61
+ if location_mentioned and activity_mentioned:
62
+ return True
63
+
64
+ # Check conversation context
65
+ if conversation_history:
66
+ recent_messages = conversation_history[-3:]
67
+ context_text = " ".join([msg.get('content', '') for msg in recent_messages]).lower()
68
+ if any(word in context_text for word in self.weather_indicators):
69
+ return True
70
+
71
+ return False
72
+
73
+ def extract_location_if_relevant(self, user_query: str) -> Optional[str]:
74
+ """Extract location if query suggests location-specific context"""
75
+ # Common locations
76
+ locations = {
77
+ 'new york': ['new york', 'ny', 'nyc'],
78
+ 'london': ['london', 'uk', 'england'],
79
+ 'tokyo': ['tokyo', 'japan', 'jp'],
80
+ 'paris': ['paris', 'france', 'fr'],
81
+ 'los angeles': ['los angeles', 'la', 'california'],
82
+ 'sydney': ['sydney', 'australia', 'au'],
83
+ 'singapore': ['singapore', 'sg'],
84
+ 'mumbai': ['mumbai', 'india', 'in']
85
+ }
86
+
87
+ query_lower = user_query.lower()
88
+ for location, aliases in locations.items():
89
+ if any(alias in query_lower for alias in aliases):
90
+ return location
91
+
92
+ return None
93
+
94
+ def get_relevant_context(self, user_query: str,
95
+ conversation_history: List[Dict] = None) -> Dict[str, Any]:
96
+ """Get only the context that's actually relevant"""
97
+ context = {
98
+ 'include_time': self.should_include_time_context(user_query, conversation_history),
99
+ 'include_weather': self.should_include_weather_context(user_query, conversation_history),
100
+ 'detected_location': self.extract_location_if_relevant(user_query),
101
+ 'timestamp': time.time()
102
+ }
103
+
104
+ # Add actual context data only if needed
105
+ if context['include_time']:
106
+ now = datetime.now()
107
+ context['time_data'] = {
108
+ 'current_time': now.strftime("%A, %B %d, %Y at %I:%M %p"),
109
+ 'day_of_week': now.strftime("%A"),
110
+ 'is_business_hours': 9 <= now.hour <= 17
111
+ }
112
+
113
+ return context
114
+
115
+ # Global instance
116
+ smart_context = SmartContextDetector()