修复bug
Browse fileshttps://github.com/HKUDS/LightRAG/issues/306
主要修改包括:
在存储文本块数据时增加了验证,确保只存储有效的数据
在处理文本块之前增加了空列表检查
在截断文本块之前过滤掉无效的数据
增加了更多的日志警告信息
查询的修改:
添加了对 chunks 的有效性检查,过滤掉无效的 chunks:
- lightrag/operate.py +46 -13
lightrag/operate.py
CHANGED
@@ -990,23 +990,37 @@ async def _find_related_text_unit_from_relationships(
|
|
990 |
for index, unit_list in enumerate(text_units):
|
991 |
for c_id in unit_list:
|
992 |
if c_id not in all_text_units_lookup:
|
993 |
-
|
994 |
-
|
995 |
-
|
996 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
997 |
|
998 |
-
|
999 |
-
logger.warning("Text chunks are missing, maybe the storage is damaged")
|
1000 |
-
all_text_units = [
|
1001 |
-
{"id": k, **v} for k, v in all_text_units_lookup.items() if v is not None
|
1002 |
-
]
|
1003 |
all_text_units = sorted(all_text_units, key=lambda x: x["order"])
|
1004 |
-
|
1005 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1006 |
key=lambda x: x["data"]["content"],
|
1007 |
max_token_size=query_param.max_token_for_text_unit,
|
1008 |
)
|
1009 |
-
|
|
|
1010 |
|
1011 |
return all_text_units
|
1012 |
|
@@ -1050,24 +1064,43 @@ async def naive_query(
|
|
1050 |
results = await chunks_vdb.query(query, top_k=query_param.top_k)
|
1051 |
if not len(results):
|
1052 |
return PROMPTS["fail_response"]
|
|
|
1053 |
chunks_ids = [r["id"] for r in results]
|
1054 |
chunks = await text_chunks_db.get_by_ids(chunks_ids)
|
1055 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1056 |
maybe_trun_chunks = truncate_list_by_token_size(
|
1057 |
-
|
1058 |
key=lambda x: x["content"],
|
1059 |
max_token_size=query_param.max_token_for_text_unit,
|
1060 |
)
|
|
|
|
|
|
|
|
|
|
|
1061 |
logger.info(f"Truncate {len(chunks)} to {len(maybe_trun_chunks)} chunks")
|
1062 |
section = "\n--New Chunk--\n".join([c["content"] for c in maybe_trun_chunks])
|
|
|
1063 |
if query_param.only_need_context:
|
1064 |
return section
|
|
|
1065 |
sys_prompt_temp = PROMPTS["naive_rag_response"]
|
1066 |
sys_prompt = sys_prompt_temp.format(
|
1067 |
content_data=section, response_type=query_param.response_type
|
1068 |
)
|
|
|
1069 |
if query_param.only_need_prompt:
|
1070 |
return sys_prompt
|
|
|
1071 |
response = await use_model_func(
|
1072 |
query,
|
1073 |
system_prompt=sys_prompt,
|
|
|
990 |
for index, unit_list in enumerate(text_units):
|
991 |
for c_id in unit_list:
|
992 |
if c_id not in all_text_units_lookup:
|
993 |
+
chunk_data = await text_chunks_db.get_by_id(c_id)
|
994 |
+
# Only store valid data
|
995 |
+
if chunk_data is not None and "content" in chunk_data:
|
996 |
+
all_text_units_lookup[c_id] = {
|
997 |
+
"data": chunk_data,
|
998 |
+
"order": index,
|
999 |
+
}
|
1000 |
+
|
1001 |
+
if not all_text_units_lookup:
|
1002 |
+
logger.warning("No valid text chunks found")
|
1003 |
+
return []
|
1004 |
|
1005 |
+
all_text_units = [{"id": k, **v} for k, v in all_text_units_lookup.items()]
|
|
|
|
|
|
|
|
|
1006 |
all_text_units = sorted(all_text_units, key=lambda x: x["order"])
|
1007 |
+
|
1008 |
+
# Ensure all text chunks have content
|
1009 |
+
valid_text_units = [
|
1010 |
+
t for t in all_text_units if t["data"] is not None and "content" in t["data"]
|
1011 |
+
]
|
1012 |
+
|
1013 |
+
if not valid_text_units:
|
1014 |
+
logger.warning("No valid text chunks after filtering")
|
1015 |
+
return []
|
1016 |
+
|
1017 |
+
truncated_text_units = truncate_list_by_token_size(
|
1018 |
+
valid_text_units,
|
1019 |
key=lambda x: x["data"]["content"],
|
1020 |
max_token_size=query_param.max_token_for_text_unit,
|
1021 |
)
|
1022 |
+
|
1023 |
+
all_text_units: list[TextChunkSchema] = [t["data"] for t in truncated_text_units]
|
1024 |
|
1025 |
return all_text_units
|
1026 |
|
|
|
1064 |
results = await chunks_vdb.query(query, top_k=query_param.top_k)
|
1065 |
if not len(results):
|
1066 |
return PROMPTS["fail_response"]
|
1067 |
+
|
1068 |
chunks_ids = [r["id"] for r in results]
|
1069 |
chunks = await text_chunks_db.get_by_ids(chunks_ids)
|
1070 |
|
1071 |
+
# Filter out invalid chunks
|
1072 |
+
valid_chunks = [
|
1073 |
+
chunk for chunk in chunks if chunk is not None and "content" in chunk
|
1074 |
+
]
|
1075 |
+
|
1076 |
+
if not valid_chunks:
|
1077 |
+
logger.warning("No valid chunks found after filtering")
|
1078 |
+
return PROMPTS["fail_response"]
|
1079 |
+
|
1080 |
maybe_trun_chunks = truncate_list_by_token_size(
|
1081 |
+
valid_chunks,
|
1082 |
key=lambda x: x["content"],
|
1083 |
max_token_size=query_param.max_token_for_text_unit,
|
1084 |
)
|
1085 |
+
|
1086 |
+
if not maybe_trun_chunks:
|
1087 |
+
logger.warning("No chunks left after truncation")
|
1088 |
+
return PROMPTS["fail_response"]
|
1089 |
+
|
1090 |
logger.info(f"Truncate {len(chunks)} to {len(maybe_trun_chunks)} chunks")
|
1091 |
section = "\n--New Chunk--\n".join([c["content"] for c in maybe_trun_chunks])
|
1092 |
+
|
1093 |
if query_param.only_need_context:
|
1094 |
return section
|
1095 |
+
|
1096 |
sys_prompt_temp = PROMPTS["naive_rag_response"]
|
1097 |
sys_prompt = sys_prompt_temp.format(
|
1098 |
content_data=section, response_type=query_param.response_type
|
1099 |
)
|
1100 |
+
|
1101 |
if query_param.only_need_prompt:
|
1102 |
return sys_prompt
|
1103 |
+
|
1104 |
response = await use_model_func(
|
1105 |
query,
|
1106 |
system_prompt=sys_prompt,
|