zrguo commited on
Commit
5370a5b
·
unverified ·
2 Parent(s): d5271fb 28b7304

Merge pull request #634 from magicyuan876/main

Browse files
Files changed (2) hide show
  1. lightrag/lightrag.py +2 -3
  2. lightrag/utils.py +1 -3
lightrag/lightrag.py CHANGED
@@ -469,9 +469,8 @@ class LightRAG:
469
  error_msg = f"Failed to process document {doc_id}: {str(e)}\n{traceback.format_exc()}"
470
  logger.error(error_msg)
471
  continue
472
-
473
- finally:
474
- # Ensure all indexes are updated after each document
475
  await self._insert_done()
476
 
477
  def insert_custom_chunks(self, full_text: str, text_chunks: list[str]):
 
469
  error_msg = f"Failed to process document {doc_id}: {str(e)}\n{traceback.format_exc()}"
470
  logger.error(error_msg)
471
  continue
472
+ else:
473
+ # Only update index when processing succeeds
 
474
  await self._insert_done()
475
 
476
  def insert_custom_chunks(self, full_text: str, text_chunks: list[str]):
lightrag/utils.py CHANGED
@@ -479,9 +479,7 @@ async def handle_cache(hashing_kv, args_hash, prompt, mode="default"):
479
  quantized = min_val = max_val = None
480
  if is_embedding_cache_enabled:
481
  # Use embedding cache
482
- embedding_model_func = hashing_kv.global_config[
483
- "embedding_func"
484
- ].func # ["func"]
485
  llm_model_func = hashing_kv.global_config.get("llm_model_func")
486
 
487
  current_embedding = await embedding_model_func([prompt])
 
479
  quantized = min_val = max_val = None
480
  if is_embedding_cache_enabled:
481
  # Use embedding cache
482
+ embedding_model_func = hashing_kv.global_config["embedding_func"]["func"]
 
 
483
  llm_model_func = hashing_kv.global_config.get("llm_model_func")
484
 
485
  current_embedding = await embedding_model_func([prompt])