DavIvek
commited on
Commit
·
4852dde
1
Parent(s):
4644ee6
wip fix Memgraph get_knowledge_graph issues
Browse files- lightrag/api/README.md +3 -2
- lightrag/kg/memgraph_impl.py +21 -15
lightrag/api/README.md
CHANGED
@@ -179,9 +179,9 @@ The command-line `workspace` argument and the `WORKSPACE` environment variable i
|
|
179 |
- **For local file-based databases, data isolation is achieved through workspace subdirectories:** `JsonKVStorage`, `JsonDocStatusStorage`, `NetworkXStorage`, `NanoVectorDBStorage`, `FaissVectorDBStorage`.
|
180 |
- **For databases that store data in collections, it's done by adding a workspace prefix to the collection name:** `RedisKVStorage`, `RedisDocStatusStorage`, `MilvusVectorDBStorage`, `QdrantVectorDBStorage`, `MongoKVStorage`, `MongoDocStatusStorage`, `MongoVectorDBStorage`, `MongoGraphStorage`, `PGGraphStorage`.
|
181 |
- **For relational databases, data isolation is achieved by adding a `workspace` field to the tables for logical data separation:** `PGKVStorage`, `PGVectorStorage`, `PGDocStatusStorage`.
|
182 |
-
- **For
|
183 |
|
184 |
-
To maintain compatibility with legacy data, the default workspace for PostgreSQL is `default` and for Neo4j is `base` when no workspace is configured. For all external storages, the system provides dedicated workspace environment variables to override the common `WORKSPACE` environment variable configuration. These storage-specific workspace environment variables are: `REDIS_WORKSPACE`, `MILVUS_WORKSPACE`, `QDRANT_WORKSPACE`, `MONGODB_WORKSPACE`, `POSTGRES_WORKSPACE`, `NEO4J_WORKSPACE`.
|
185 |
|
186 |
### Multiple workers for Gunicorn + Uvicorn
|
187 |
|
@@ -394,6 +394,7 @@ MongoKVStorage MongoDB
|
|
394 |
NetworkXStorage NetworkX (default)
|
395 |
Neo4JStorage Neo4J
|
396 |
PGGraphStorage PostgreSQL with AGE plugin
|
|
|
397 |
```
|
398 |
|
399 |
> Testing has shown that Neo4J delivers superior performance in production environments compared to PostgreSQL with AGE plugin.
|
|
|
179 |
- **For local file-based databases, data isolation is achieved through workspace subdirectories:** `JsonKVStorage`, `JsonDocStatusStorage`, `NetworkXStorage`, `NanoVectorDBStorage`, `FaissVectorDBStorage`.
|
180 |
- **For databases that store data in collections, it's done by adding a workspace prefix to the collection name:** `RedisKVStorage`, `RedisDocStatusStorage`, `MilvusVectorDBStorage`, `QdrantVectorDBStorage`, `MongoKVStorage`, `MongoDocStatusStorage`, `MongoVectorDBStorage`, `MongoGraphStorage`, `PGGraphStorage`.
|
181 |
- **For relational databases, data isolation is achieved by adding a `workspace` field to the tables for logical data separation:** `PGKVStorage`, `PGVectorStorage`, `PGDocStatusStorage`.
|
182 |
+
- **For graph databases, logical data isolation is achieved through labels:** `Neo4JStorage`, `MemgraphStorage`
|
183 |
|
184 |
+
To maintain compatibility with legacy data, the default workspace for PostgreSQL is `default` and for Neo4j is `base` when no workspace is configured. For all external storages, the system provides dedicated workspace environment variables to override the common `WORKSPACE` environment variable configuration. These storage-specific workspace environment variables are: `REDIS_WORKSPACE`, `MILVUS_WORKSPACE`, `QDRANT_WORKSPACE`, `MONGODB_WORKSPACE`, `POSTGRES_WORKSPACE`, `NEO4J_WORKSPACE`, `MEMGRAPH_WORKSPACE`.
|
185 |
|
186 |
### Multiple workers for Gunicorn + Uvicorn
|
187 |
|
|
|
394 |
NetworkXStorage NetworkX (default)
|
395 |
Neo4JStorage Neo4J
|
396 |
PGGraphStorage PostgreSQL with AGE plugin
|
397 |
+
MemgraphStorage. Memgraph
|
398 |
```
|
399 |
|
400 |
> Testing has shown that Neo4J delivers superior performance in production environments compared to PostgreSQL with AGE plugin.
|
lightrag/kg/memgraph_impl.py
CHANGED
@@ -435,7 +435,7 @@ class MemgraphStorage(BaseGraphStorage):
|
|
435 |
|
436 |
async def upsert_node(self, node_id: str, node_data: dict[str, str]) -> None:
|
437 |
"""
|
438 |
-
Upsert a node in the
|
439 |
|
440 |
Args:
|
441 |
node_id: The unique identifier for the node (used as label)
|
@@ -448,7 +448,7 @@ class MemgraphStorage(BaseGraphStorage):
|
|
448 |
properties = node_data
|
449 |
entity_type = properties["entity_type"]
|
450 |
if "entity_id" not in properties:
|
451 |
-
raise ValueError("
|
452 |
|
453 |
try:
|
454 |
async with self._driver.session(database=self._DATABASE) as session:
|
@@ -817,28 +817,34 @@ class MemgraphStorage(BaseGraphStorage):
|
|
817 |
WITH start
|
818 |
CALL {{
|
819 |
WITH start
|
820 |
-
MATCH path = (start)-[*0..{max_depth}]-(node)
|
821 |
WITH nodes(path) AS path_nodes, relationships(path) AS path_rels
|
822 |
UNWIND path_nodes AS n
|
823 |
WITH collect(DISTINCT n) AS all_nodes, collect(DISTINCT path_rels) AS all_rel_lists
|
824 |
WITH all_nodes, reduce(r = [], x IN all_rel_lists | r + x) AS all_rels
|
825 |
RETURN all_nodes, all_rels
|
826 |
}}
|
827 |
-
WITH all_nodes AS nodes, all_rels AS relationships, size(all_nodes) AS
|
828 |
WITH
|
829 |
-
|
830 |
-
|
831 |
-
|
832 |
-
|
833 |
-
|
834 |
-
|
835 |
-
|
|
|
|
|
|
|
|
|
|
|
836 |
RETURN
|
837 |
-
|
838 |
-
|
839 |
-
|
840 |
-
|
841 |
"""
|
|
|
842 |
result_set = None
|
843 |
try:
|
844 |
result_set = await session.run(
|
|
|
435 |
|
436 |
async def upsert_node(self, node_id: str, node_data: dict[str, str]) -> None:
|
437 |
"""
|
438 |
+
Upsert a node in the Memgraph database.
|
439 |
|
440 |
Args:
|
441 |
node_id: The unique identifier for the node (used as label)
|
|
|
448 |
properties = node_data
|
449 |
entity_type = properties["entity_type"]
|
450 |
if "entity_id" not in properties:
|
451 |
+
raise ValueError("Memgraph: node properties must contain an 'entity_id' field")
|
452 |
|
453 |
try:
|
454 |
async with self._driver.session(database=self._DATABASE) as session:
|
|
|
817 |
WITH start
|
818 |
CALL {{
|
819 |
WITH start
|
820 |
+
MATCH path = (start)-[*BFS 0..{max_depth}]-(node)
|
821 |
WITH nodes(path) AS path_nodes, relationships(path) AS path_rels
|
822 |
UNWIND path_nodes AS n
|
823 |
WITH collect(DISTINCT n) AS all_nodes, collect(DISTINCT path_rels) AS all_rel_lists
|
824 |
WITH all_nodes, reduce(r = [], x IN all_rel_lists | r + x) AS all_rels
|
825 |
RETURN all_nodes, all_rels
|
826 |
}}
|
827 |
+
WITH all_nodes AS nodes, all_rels AS relationships, size(all_nodes) AS total_nodes_found
|
828 |
WITH
|
829 |
+
CASE
|
830 |
+
WHEN total_nodes_found <= {max_nodes} THEN nodes
|
831 |
+
ELSE nodes[0..{max_nodes}]
|
832 |
+
END AS limited_nodes,
|
833 |
+
relationships,
|
834 |
+
total_nodes_found,
|
835 |
+
total_nodes_found > {max_nodes} AS is_truncated
|
836 |
+
|
837 |
+
UNWIND relationships AS rel
|
838 |
+
WITH limited_nodes, rel, total_nodes_found, is_truncated
|
839 |
+
WHERE startNode(rel) IN limited_nodes AND endNode(rel) IN limited_nodes
|
840 |
+
WITH limited_nodes, collect(DISTINCT rel) AS limited_relationships, total_nodes_found, is_truncated
|
841 |
RETURN
|
842 |
+
[node IN limited_nodes | {{node: node}}] AS node_info,
|
843 |
+
limited_relationships AS relationships,
|
844 |
+
total_nodes_found,
|
845 |
+
is_truncated
|
846 |
"""
|
847 |
+
|
848 |
result_set = None
|
849 |
try:
|
850 |
result_set = await session.run(
|