yangdx commited on
Commit
8eee239
·
1 Parent(s): bc4398a

Replace verbose_debug with logger.debug for token logging.

Browse files

- Removed unused verbose_debug import
- Updated debug logging in kg_query
- Updated debug logging in mix_kg_vector_query
- Updated debug logging in kg_query_with_keywords

Files changed (1) hide show
  1. lightrag/operate.py +3 -4
lightrag/operate.py CHANGED
@@ -24,7 +24,6 @@ from .utils import (
24
  CacheData,
25
  statistic_data,
26
  get_conversation_turns,
27
- verbose_debug,
28
  )
29
  from .base import (
30
  BaseGraphStorage,
@@ -689,7 +688,7 @@ async def kg_query(
689
  return sys_prompt
690
 
691
  len_of_prompts = len(encode_string_by_tiktoken(query + sys_prompt))
692
- verbose_debug(f"[kg_query]Prompt Tokens: {len_of_prompts}")
693
 
694
  response = await use_model_func(
695
  query,
@@ -978,7 +977,7 @@ async def mix_kg_vector_query(
978
  return sys_prompt
979
 
980
  len_of_prompts = len(encode_string_by_tiktoken(query + sys_prompt))
981
- verbose_debug(f"[mix_kg_vector_query]Prompt Tokens: {len_of_prompts}")
982
 
983
  # 6. Generate response
984
  response = await use_model_func(
@@ -1808,7 +1807,7 @@ async def kg_query_with_keywords(
1808
  return sys_prompt
1809
 
1810
  len_of_prompts = len(encode_string_by_tiktoken(query + sys_prompt))
1811
- verbose_debug(f"[kg_query_with_keywords]Prompt Tokens: {len_of_prompts}")
1812
 
1813
  response = await use_model_func(
1814
  query,
 
24
  CacheData,
25
  statistic_data,
26
  get_conversation_turns,
 
27
  )
28
  from .base import (
29
  BaseGraphStorage,
 
688
  return sys_prompt
689
 
690
  len_of_prompts = len(encode_string_by_tiktoken(query + sys_prompt))
691
+ logger.debug(f"[kg_query]Prompt Tokens: {len_of_prompts}")
692
 
693
  response = await use_model_func(
694
  query,
 
977
  return sys_prompt
978
 
979
  len_of_prompts = len(encode_string_by_tiktoken(query + sys_prompt))
980
+ logger.debug(f"[mix_kg_vector_query]Prompt Tokens: {len_of_prompts}")
981
 
982
  # 6. Generate response
983
  response = await use_model_func(
 
1807
  return sys_prompt
1808
 
1809
  len_of_prompts = len(encode_string_by_tiktoken(query + sys_prompt))
1810
+ logger.debug(f"[kg_query_with_keywords]Prompt Tokens: {len_of_prompts}")
1811
 
1812
  response = await use_model_func(
1813
  query,