Datasets:
Document dangling CITES targets and fix PyG example to filter them
Browse files
README.md
CHANGED
|
@@ -128,6 +128,14 @@ Relation semantics:
|
|
| 128 |
- `BELONGS_TO` — `Paper → Concept`
|
| 129 |
- `COLLABORATES_WITH` — `Author → Author` (co-authorship; symmetric, may appear in both directions)
|
| 130 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 131 |
## Usage
|
| 132 |
|
| 133 |
### Load with the `datasets` library
|
|
@@ -159,7 +167,7 @@ edges = pd.read_parquet("hf://datasets/jugalgajjar/CS-Knowledge-Graph-Dataset/10
|
|
| 159 |
### Build a PyTorch Geometric graph
|
| 160 |
|
| 161 |
```python
|
| 162 |
-
import
|
| 163 |
import torch
|
| 164 |
from torch_geometric.data import HeteroData
|
| 165 |
from datasets import load_dataset
|
|
@@ -168,6 +176,7 @@ scale = "10k"
|
|
| 168 |
nodes = load_dataset("jugalgajjar/CS-Knowledge-Graph-Dataset", f"{scale}_nodes", split="train").to_pandas()
|
| 169 |
edges = load_dataset("jugalgajjar/CS-Knowledge-Graph-Dataset", f"{scale}_edges", split="train").to_pandas()
|
| 170 |
|
|
|
|
| 171 |
data = HeteroData()
|
| 172 |
id_maps = {}
|
| 173 |
for ntype, group in nodes.groupby("node_type"):
|
|
@@ -175,17 +184,23 @@ for ntype, group in nodes.groupby("node_type"):
|
|
| 175 |
id_maps[ntype] = {nid: i for i, nid in enumerate(ids)}
|
| 176 |
data[ntype].num_nodes = len(ids)
|
| 177 |
|
| 178 |
-
#
|
| 179 |
type_from_prefix = {"paper": "Paper", "author": "Author", "venue": "Venue", "concept": "Concept"}
|
| 180 |
def ntype_of(nid: str) -> str:
|
| 181 |
return type_from_prefix[nid.split("_", 1)[0]]
|
| 182 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 183 |
for relation, group in edges.groupby("relation"):
|
| 184 |
src_type = ntype_of(group["source"].iloc[0])
|
| 185 |
dst_type = ntype_of(group["target"].iloc[0])
|
| 186 |
-
src = group["source"].map(id_maps[src_type]).to_numpy()
|
| 187 |
-
dst = group["target"].map(id_maps[dst_type]).to_numpy()
|
| 188 |
-
data[src_type, relation, dst_type].edge_index = torch.
|
|
|
|
|
|
|
| 189 |
```
|
| 190 |
|
| 191 |
## Raw SQLite databases
|
|
|
|
| 128 |
- `BELONGS_TO` — `Paper → Concept`
|
| 129 |
- `COLLABORATES_WITH` — `Author → Author` (co-authorship; symmetric, may appear in both directions)
|
| 130 |
|
| 131 |
+
**Dangling `CITES` targets.** Each scale is built from a Computer Science slice
|
| 132 |
+
of OpenAlex, so the `nodes` table only contains CS papers (plus their authors,
|
| 133 |
+
venues, and concepts). However, those CS papers may cite papers from outside
|
| 134 |
+
CS — those external papers appear as `target` in `CITES` edges but are **not**
|
| 135 |
+
present in the `nodes` table. Filter or add placeholder nodes as appropriate
|
| 136 |
+
for your task. Sources are always present in `nodes`; only `CITES` targets can
|
| 137 |
+
be dangling.
|
| 138 |
+
|
| 139 |
## Usage
|
| 140 |
|
| 141 |
### Load with the `datasets` library
|
|
|
|
| 167 |
### Build a PyTorch Geometric graph
|
| 168 |
|
| 169 |
```python
|
| 170 |
+
import numpy as np
|
| 171 |
import torch
|
| 172 |
from torch_geometric.data import HeteroData
|
| 173 |
from datasets import load_dataset
|
|
|
|
| 176 |
nodes = load_dataset("jugalgajjar/CS-Knowledge-Graph-Dataset", f"{scale}_nodes", split="train").to_pandas()
|
| 177 |
edges = load_dataset("jugalgajjar/CS-Knowledge-Graph-Dataset", f"{scale}_edges", split="train").to_pandas()
|
| 178 |
|
| 179 |
+
# Build per-type id -> contiguous index maps
|
| 180 |
data = HeteroData()
|
| 181 |
id_maps = {}
|
| 182 |
for ntype, group in nodes.groupby("node_type"):
|
|
|
|
| 184 |
id_maps[ntype] = {nid: i for i, nid in enumerate(ids)}
|
| 185 |
data[ntype].num_nodes = len(ids)
|
| 186 |
|
| 187 |
+
# Each node_id is prefixed with its type
|
| 188 |
type_from_prefix = {"paper": "Paper", "author": "Author", "venue": "Venue", "concept": "Concept"}
|
| 189 |
def ntype_of(nid: str) -> str:
|
| 190 |
return type_from_prefix[nid.split("_", 1)[0]]
|
| 191 |
|
| 192 |
+
# Drop CITES edges whose target isn't in the node set (cross-domain citations).
|
| 193 |
+
node_id_set = set(nodes["node_id"])
|
| 194 |
+
edges = edges[edges["target"].isin(node_id_set)].reset_index(drop=True)
|
| 195 |
+
|
| 196 |
for relation, group in edges.groupby("relation"):
|
| 197 |
src_type = ntype_of(group["source"].iloc[0])
|
| 198 |
dst_type = ntype_of(group["target"].iloc[0])
|
| 199 |
+
src = group["source"].map(id_maps[src_type]).to_numpy(dtype=np.int64)
|
| 200 |
+
dst = group["target"].map(id_maps[dst_type]).to_numpy(dtype=np.int64)
|
| 201 |
+
data[src_type, relation, dst_type].edge_index = torch.from_numpy(np.stack([src, dst]))
|
| 202 |
+
|
| 203 |
+
print(data)
|
| 204 |
```
|
| 205 |
|
| 206 |
## Raw SQLite databases
|