yangdx commited on
Commit
10a6fea
·
1 Parent(s): a264709

Fix linting

Browse files
Files changed (2) hide show
  1. lightrag/operate.py +1 -2
  2. lightrag/utils.py +6 -6
lightrag/operate.py CHANGED
@@ -148,7 +148,7 @@ async def _handle_entity_relation_summary(
148
 
149
  # Use LLM function with cache
150
  summary = await use_llm_func_with_cache(
151
- use_prompt,
152
  use_llm_func,
153
  llm_response_cache=llm_response_cache,
154
  max_tokens=summary_max_tokens,
@@ -446,7 +446,6 @@ async def extract_entities(
446
  pipeline_status_lock=None,
447
  llm_response_cache: BaseKVStorage | None = None,
448
  ) -> None:
449
-
450
  use_llm_func: callable = global_config["llm_model_func"]
451
  entity_extract_max_gleaning = global_config["entity_extract_max_gleaning"]
452
 
 
148
 
149
  # Use LLM function with cache
150
  summary = await use_llm_func_with_cache(
151
+ use_prompt,
152
  use_llm_func,
153
  llm_response_cache=llm_response_cache,
154
  max_tokens=summary_max_tokens,
 
446
  pipeline_status_lock=None,
447
  llm_response_cache: BaseKVStorage | None = None,
448
  ) -> None:
 
449
  use_llm_func: callable = global_config["llm_model_func"]
450
  entity_extract_max_gleaning = global_config["entity_extract_max_gleaning"]
451
 
lightrag/utils.py CHANGED
@@ -913,18 +913,18 @@ def lazy_external_import(module_name: str, class_name: str) -> Callable[..., Any
913
 
914
 
915
  async def use_llm_func_with_cache(
916
- input_text: str,
917
  use_llm_func: callable,
918
- llm_response_cache: 'BaseKVStorage | None' = None,
919
  max_tokens: int = None,
920
  history_messages: list[dict[str, str]] = None,
921
- cache_type: str = "extract"
922
  ) -> str:
923
  """Call LLM function with cache support
924
-
925
  If cache is available and enabled (determined by handle_cache based on mode),
926
  retrieve result from cache; otherwise call LLM function and save result to cache.
927
-
928
  Args:
929
  input_text: Input text to send to LLM
930
  use_llm_func: LLM function to call
@@ -932,7 +932,7 @@ async def use_llm_func_with_cache(
932
  max_tokens: Maximum tokens for generation
933
  history_messages: History messages list
934
  cache_type: Type of cache
935
-
936
  Returns:
937
  LLM response text
938
  """
 
913
 
914
 
915
  async def use_llm_func_with_cache(
916
+ input_text: str,
917
  use_llm_func: callable,
918
+ llm_response_cache: "BaseKVStorage | None" = None,
919
  max_tokens: int = None,
920
  history_messages: list[dict[str, str]] = None,
921
+ cache_type: str = "extract",
922
  ) -> str:
923
  """Call LLM function with cache support
924
+
925
  If cache is available and enabled (determined by handle_cache based on mode),
926
  retrieve result from cache; otherwise call LLM function and save result to cache.
927
+
928
  Args:
929
  input_text: Input text to send to LLM
930
  use_llm_func: LLM function to call
 
932
  max_tokens: Maximum tokens for generation
933
  history_messages: History messages list
934
  cache_type: Type of cache
935
+
936
  Returns:
937
  LLM response text
938
  """