rdune71 commited on
Commit
8c617d5
·
1 Parent(s): 0757010

Implemented timestamp injection at multiple levels to ensure AI always has access to current date/time

Browse files
core/coordinator.py CHANGED
@@ -123,7 +123,14 @@ Your role is to: 1. Provide immediate, concise responses using available informa
123
  try:
124
  # Get conversation history
125
  session = session_manager.get_session(user_id)
126
- conversation_history = session.get("conversation", []).copy()
 
 
 
 
 
 
 
127
 
128
  yield {
129
  'type': 'coordination_status',
@@ -253,6 +260,14 @@ Your role is to: 1. Provide immediate, concise responses using available informa
253
  # Prepare enhanced conversation for HF with hierarchical context
254
  enhanced_history = history.copy()
255
 
 
 
 
 
 
 
 
 
256
  # Add system instructions for HF
257
  enhanced_history.insert(0, {
258
  "role": "system",
@@ -310,6 +325,14 @@ Your role is to: 1. Provide immediate, concise responses using available informa
310
  # Prepare conversation with hierarchical context
311
  enhanced_history = history.copy()
312
 
 
 
 
 
 
 
 
 
313
  # Add system instruction for Ollama's role
314
  enhanced_history.insert(0, {
315
  "role": "system",
 
123
  try:
124
  # Get conversation history
125
  session = session_manager.get_session(user_id)
126
+
127
+ # Inject current time into context
128
+ current_time = datetime.now().strftime("%A, %B %d, %Y at %I:%M %p")
129
+ time_context = {
130
+ "role": "system",
131
+ "content": f"[Current Date & Time: {current_time}]"
132
+ }
133
+ conversation_history = [time_context] + session.get("conversation", []).copy()
134
 
135
  yield {
136
  'type': 'coordination_status',
 
260
  # Prepare enhanced conversation for HF with hierarchical context
261
  enhanced_history = history.copy()
262
 
263
+ # Inject current time into HF context too
264
+ current_time = datetime.now().strftime("%A, %B %d, %Y at %I:%M %p")
265
+ time_context = {
266
+ "role": "system",
267
+ "content": f"[Current Date & Time: {current_time}]"
268
+ }
269
+ enhanced_history = [time_context] + enhanced_history
270
+
271
  # Add system instructions for HF
272
  enhanced_history.insert(0, {
273
  "role": "system",
 
325
  # Prepare conversation with hierarchical context
326
  enhanced_history = history.copy()
327
 
328
+ # Inject current time into Ollama context too
329
+ current_time = datetime.now().strftime("%A, %B %d, %Y at %I:%M %p")
330
+ time_context = {
331
+ "role": "system",
332
+ "content": f"[Current Date & Time: {current_time}]"
333
+ }
334
+ enhanced_history = [time_context] + enhanced_history
335
+
336
  # Add system instruction for Ollama's role
337
  enhanced_history.insert(0, {
338
  "role": "system",
core/providers/huggingface.py CHANGED
@@ -1,5 +1,6 @@
1
  import time
2
  import logging
 
3
  from typing import List, Dict, Optional, Union
4
  from core.providers.base import LLMProvider
5
  from utils.config import config
@@ -13,18 +14,18 @@ except ImportError:
13
 
14
  class HuggingFaceProvider(LLMProvider):
15
  """Hugging Face LLM provider implementation"""
16
-
17
  def __init__(self, model_name: str, timeout: int = 30, max_retries: int = 3):
18
  super().__init__(model_name, timeout, max_retries)
19
  logger.info(f"Initializing HuggingFaceProvider with:")
20
  logger.info(f" HF_API_URL: {config.hf_api_url}")
21
  logger.info(f" HF_TOKEN SET: {bool(config.hf_token)}")
22
-
23
  if not HUGGINGFACE_SDK_AVAILABLE:
24
  raise ImportError("Hugging Face provider requires 'openai' package")
25
  if not config.hf_token:
26
  raise ValueError("HF_TOKEN not set - required for Hugging Face provider")
27
-
28
  # Make sure NO proxies parameter is included
29
  try:
30
  self.client = OpenAI(
@@ -66,10 +67,15 @@ class HuggingFaceProvider(LLMProvider):
66
 
67
  def _generate_impl(self, prompt: str, conversation_history: List[Dict]) -> str:
68
  """Implementation of synchronous generation with proper configuration"""
 
 
 
 
 
69
  try:
70
  response = self.client.chat.completions.create(
71
  model=self.model_name,
72
- messages=conversation_history,
73
  max_tokens=8192, # Set to 8192 as requested
74
  temperature=0.7,
75
  top_p=0.9,
@@ -85,7 +91,7 @@ class HuggingFaceProvider(LLMProvider):
85
  # Retry once after waiting
86
  response = self.client.chat.completions.create(
87
  model=self.model_name,
88
- messages=conversation_history,
89
  max_tokens=8192, # Set to 8192 as requested
90
  temperature=0.7,
91
  top_p=0.9,
@@ -98,10 +104,15 @@ class HuggingFaceProvider(LLMProvider):
98
 
99
  def _stream_generate_impl(self, prompt: str, conversation_history: List[Dict]) -> List[str]:
100
  """Implementation of streaming generation with proper configuration"""
 
 
 
 
 
101
  try:
102
  response = self.client.chat.completions.create(
103
  model=self.model_name,
104
- messages=conversation_history,
105
  max_tokens=8192, # Set to 8192 as requested
106
  temperature=0.7,
107
  top_p=0.9,
@@ -109,13 +120,13 @@ class HuggingFaceProvider(LLMProvider):
109
  presence_penalty=0.1,
110
  stream=True # Enable streaming
111
  )
112
-
113
  chunks = []
114
  for chunk in response:
115
  content = chunk.choices[0].delta.content
116
  if content:
117
  chunks.append(content)
118
-
119
  return chunks
120
  except Exception as e:
121
  # Handle scale-to-zero behavior
@@ -125,7 +136,7 @@ class HuggingFaceProvider(LLMProvider):
125
  # Retry once after waiting
126
  response = self.client.chat.completions.create(
127
  model=self.model_name,
128
- messages=conversation_history,
129
  max_tokens=8192, # Set to 8192 as requested
130
  temperature=0.7,
131
  top_p=0.9,
@@ -133,13 +144,13 @@ class HuggingFaceProvider(LLMProvider):
133
  presence_penalty=0.1,
134
  stream=True # Enable streaming
135
  )
136
-
137
  chunks = []
138
  for chunk in response:
139
  content = chunk.choices[0].delta.content
140
  if content:
141
  chunks.append(content)
142
-
143
  return chunks
144
  else:
145
  raise
 
1
  import time
2
  import logging
3
+ from datetime import datetime
4
  from typing import List, Dict, Optional, Union
5
  from core.providers.base import LLMProvider
6
  from utils.config import config
 
14
 
15
  class HuggingFaceProvider(LLMProvider):
16
  """Hugging Face LLM provider implementation"""
17
+
18
  def __init__(self, model_name: str, timeout: int = 30, max_retries: int = 3):
19
  super().__init__(model_name, timeout, max_retries)
20
  logger.info(f"Initializing HuggingFaceProvider with:")
21
  logger.info(f" HF_API_URL: {config.hf_api_url}")
22
  logger.info(f" HF_TOKEN SET: {bool(config.hf_token)}")
23
+
24
  if not HUGGINGFACE_SDK_AVAILABLE:
25
  raise ImportError("Hugging Face provider requires 'openai' package")
26
  if not config.hf_token:
27
  raise ValueError("HF_TOKEN not set - required for Hugging Face provider")
28
+
29
  # Make sure NO proxies parameter is included
30
  try:
31
  self.client = OpenAI(
 
67
 
68
  def _generate_impl(self, prompt: str, conversation_history: List[Dict]) -> str:
69
  """Implementation of synchronous generation with proper configuration"""
70
+ # Inject current time as first message
71
+ current_time = datetime.now().strftime("%A, %B %d, %Y at %I:%M %p")
72
+ time_context = {"role": "system", "content": f"[Current Date & Time: {current_time}]"}
73
+ enhanced_history = [time_context] + conversation_history
74
+
75
  try:
76
  response = self.client.chat.completions.create(
77
  model=self.model_name,
78
+ messages=enhanced_history,
79
  max_tokens=8192, # Set to 8192 as requested
80
  temperature=0.7,
81
  top_p=0.9,
 
91
  # Retry once after waiting
92
  response = self.client.chat.completions.create(
93
  model=self.model_name,
94
+ messages=enhanced_history,
95
  max_tokens=8192, # Set to 8192 as requested
96
  temperature=0.7,
97
  top_p=0.9,
 
104
 
105
  def _stream_generate_impl(self, prompt: str, conversation_history: List[Dict]) -> List[str]:
106
  """Implementation of streaming generation with proper configuration"""
107
+ # Inject current time as first message
108
+ current_time = datetime.now().strftime("%A, %B %d, %Y at %I:%M %p")
109
+ time_context = {"role": "system", "content": f"[Current Date & Time: {current_time}]"}
110
+ enhanced_history = [time_context] + conversation_history
111
+
112
  try:
113
  response = self.client.chat.completions.create(
114
  model=self.model_name,
115
+ messages=enhanced_history,
116
  max_tokens=8192, # Set to 8192 as requested
117
  temperature=0.7,
118
  top_p=0.9,
 
120
  presence_penalty=0.1,
121
  stream=True # Enable streaming
122
  )
123
+
124
  chunks = []
125
  for chunk in response:
126
  content = chunk.choices[0].delta.content
127
  if content:
128
  chunks.append(content)
129
+
130
  return chunks
131
  except Exception as e:
132
  # Handle scale-to-zero behavior
 
136
  # Retry once after waiting
137
  response = self.client.chat.completions.create(
138
  model=self.model_name,
139
+ messages=enhanced_history,
140
  max_tokens=8192, # Set to 8192 as requested
141
  temperature=0.7,
142
  top_p=0.9,
 
144
  presence_penalty=0.1,
145
  stream=True # Enable streaming
146
  )
147
+
148
  chunks = []
149
  for chunk in response:
150
  content = chunk.choices[0].delta.content
151
  if content:
152
  chunks.append(content)
153
+
154
  return chunks
155
  else:
156
  raise
core/providers/ollama.py CHANGED
@@ -1,6 +1,7 @@
1
  import requests
2
  import logging
3
  import re
 
4
  from typing import List, Dict, Optional, Union
5
  from core.providers.base import LLMProvider
6
  from utils.config import config
@@ -9,7 +10,7 @@ logger = logging.getLogger(__name__)
9
 
10
  class OllamaProvider(LLMProvider):
11
  """Ollama LLM provider implementation"""
12
-
13
  def __init__(self, model_name: str, timeout: int = 60, max_retries: int = 3): # Increased timeout from 30 to 60
14
  super().__init__(model_name, timeout, max_retries)
15
  self.host = self._sanitize_host(config.ollama_host or "http://localhost:11434")
@@ -77,6 +78,12 @@ class OllamaProvider(LLMProvider):
77
  """Implementation of synchronous generation"""
78
  url = f"{self.host}/api/chat"
79
  messages = conversation_history.copy()
 
 
 
 
 
 
80
  # Add the current prompt if not already in history
81
  if not messages or messages[-1].get("content") != prompt:
82
  messages.append({"role": "user", "content": prompt})
@@ -99,6 +106,12 @@ class OllamaProvider(LLMProvider):
99
  """Implementation of streaming generation"""
100
  url = f"{self.host}/api/chat"
101
  messages = conversation_history.copy()
 
 
 
 
 
 
102
  # Add the current prompt if not already in history
103
  if not messages or messages[-1].get("content") != prompt:
104
  messages.append({"role": "user", "content": prompt})
 
1
  import requests
2
  import logging
3
  import re
4
+ from datetime import datetime
5
  from typing import List, Dict, Optional, Union
6
  from core.providers.base import LLMProvider
7
  from utils.config import config
 
10
 
11
  class OllamaProvider(LLMProvider):
12
  """Ollama LLM provider implementation"""
13
+
14
  def __init__(self, model_name: str, timeout: int = 60, max_retries: int = 3): # Increased timeout from 30 to 60
15
  super().__init__(model_name, timeout, max_retries)
16
  self.host = self._sanitize_host(config.ollama_host or "http://localhost:11434")
 
78
  """Implementation of synchronous generation"""
79
  url = f"{self.host}/api/chat"
80
  messages = conversation_history.copy()
81
+
82
+ # Inject current time as first message
83
+ current_time = datetime.now().strftime("%A, %B %d, %Y at %I:%M %p")
84
+ time_context = {"role": "system", "content": f"[Current Date & Time: {current_time}]"}
85
+ messages = [time_context] + messages
86
+
87
  # Add the current prompt if not already in history
88
  if not messages or messages[-1].get("content") != prompt:
89
  messages.append({"role": "user", "content": prompt})
 
106
  """Implementation of streaming generation"""
107
  url = f"{self.host}/api/chat"
108
  messages = conversation_history.copy()
109
+
110
+ # Inject current time as first message
111
+ current_time = datetime.now().strftime("%A, %B %d, %Y at %I:%M %p")
112
+ time_context = {"role": "system", "content": f"[Current Date & Time: {current_time}]"}
113
+ messages = [time_context] + messages
114
+
115
  # Add the current prompt if not already in history
116
  if not messages or messages[-1].get("content") != prompt:
117
  messages.append({"role": "user", "content": prompt})