yangdx commited on
Commit
81c566b
·
1 Parent(s): b66d9e3

Improve OpenAI LLM logging with more detailed debug information

Browse files
Files changed (1) hide show
  1. lightrag/llm/openai.py +7 -2
lightrag/llm/openai.py CHANGED
@@ -90,11 +90,13 @@ async def openai_complete_if_cache(
90
  messages.extend(history_messages)
91
  messages.append({"role": "user", "content": prompt})
92
 
93
- logger.debug("===== Sending Query to LLM =====")
94
  logger.debug(f"Model: {model} Base URL: {base_url}")
95
  logger.debug(f"Additional kwargs: {kwargs}")
96
- verbose_debug(f"Query: {prompt}")
97
  verbose_debug(f"System prompt: {system_prompt}")
 
 
98
 
99
  try:
100
  if "response_format" in kwargs:
@@ -163,6 +165,9 @@ async def openai_complete_if_cache(
163
  "total_tokens": getattr(response.usage, "total_tokens", 0),
164
  }
165
  token_tracker.add_usage(token_counts)
 
 
 
166
 
167
  return content
168
 
 
90
  messages.extend(history_messages)
91
  messages.append({"role": "user", "content": prompt})
92
 
93
+ logger.debug("===== Entering func of LLM =====")
94
  logger.debug(f"Model: {model} Base URL: {base_url}")
95
  logger.debug(f"Additional kwargs: {kwargs}")
96
+ logger.debug(f"Num of history messages: {len(history_messages)}")
97
  verbose_debug(f"System prompt: {system_prompt}")
98
+ verbose_debug(f"Query: {prompt}")
99
+ logger.debug("===== Sending Query to LLM =====")
100
 
101
  try:
102
  if "response_format" in kwargs:
 
165
  "total_tokens": getattr(response.usage, "total_tokens", 0),
166
  }
167
  token_tracker.add_usage(token_counts)
168
+
169
+ logger.debug(f"Response content len: {len(content)}")
170
+ verbose_debug(f"Response: {response}")
171
 
172
  return content
173