extract-entities: support local parquet/jsonl paths for mounted-bucket workflows
Browse files- README.md +16 -0
- extract-entities.py +41 -4
README.md
CHANGED
|
@@ -53,6 +53,22 @@ hf jobs uv run --flavor t4-small --secrets HF_TOKEN \
|
|
| 53 |
--batch-size 32
|
| 54 |
```
|
| 55 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 56 |
## Recommended entity-type vocabularies
|
| 57 |
|
| 58 |
GLiNER is open-vocabulary, so any string works. Some starting points:
|
|
|
|
| 53 |
--batch-size 32
|
| 54 |
```
|
| 55 |
|
| 56 |
+
## Reading from local files or a mounted bucket
|
| 57 |
+
|
| 58 |
+
The `input_dataset` argument also accepts local file paths (parquet, jsonl, json, csv). Useful when the input is staged in a [Storage Bucket](https://huggingface.co/docs/hub/storage-buckets) — typical pattern for multi-stage pipelines where an upstream Job has prepared the data:
|
| 59 |
+
|
| 60 |
+
```bash
|
| 61 |
+
hf jobs uv run --flavor t4-small --secrets HF_TOKEN \
|
| 62 |
+
-v hf://buckets/yourname/working-data:/input \
|
| 63 |
+
https://huggingface.co/datasets/uv-scripts/gliner/raw/main/extract-entities.py \
|
| 64 |
+
/input/data.parquet \
|
| 65 |
+
yourname/output-entities \
|
| 66 |
+
--text-column text --entity-types Person Organization Location \
|
| 67 |
+
--device cuda --batch-size 32
|
| 68 |
+
```
|
| 69 |
+
|
| 70 |
+
Local paths are detected heuristically — anything starting with `/`, `./`, `../`, or ending in a known data extension is treated as a file path; otherwise the argument is interpreted as a HF dataset ID.
|
| 71 |
+
|
| 72 |
## Recommended entity-type vocabularies
|
| 73 |
|
| 74 |
GLiNER is open-vocabulary, so any string works. Some starting points:
|
extract-entities.py
CHANGED
|
@@ -63,8 +63,21 @@ def parse_args():
|
|
| 63 |
formatter_class=argparse.RawDescriptionHelpFormatter,
|
| 64 |
epilog=__doc__,
|
| 65 |
)
|
| 66 |
-
p.add_argument(
|
| 67 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 68 |
p.add_argument(
|
| 69 |
"--text-column",
|
| 70 |
default="text",
|
|
@@ -200,11 +213,35 @@ entities: list of {{
|
|
| 200 |
"""
|
| 201 |
|
| 202 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 203 |
def main():
|
| 204 |
args = parse_args()
|
| 205 |
device = resolve_device(args.device)
|
| 206 |
-
|
| 207 |
-
ds = load_dataset(args.input_dataset, split=args.split, streaming=False)
|
| 208 |
|
| 209 |
if args.text_column not in ds.column_names:
|
| 210 |
sys.exit(
|
|
|
|
| 63 |
formatter_class=argparse.RawDescriptionHelpFormatter,
|
| 64 |
epilog=__doc__,
|
| 65 |
)
|
| 66 |
+
p.add_argument(
|
| 67 |
+
"input_dataset",
|
| 68 |
+
help=(
|
| 69 |
+
"Input. Either a HF dataset ID (e.g. 'org/dataset') or a local path "
|
| 70 |
+
"to parquet/jsonl file(s) — useful when running in HF Jobs with a mounted "
|
| 71 |
+
"bucket: '-v hf://buckets/<ns>/<bucket>:/input' then pass '/input/cards.parquet'."
|
| 72 |
+
),
|
| 73 |
+
)
|
| 74 |
+
p.add_argument(
|
| 75 |
+
"output_dataset",
|
| 76 |
+
help=(
|
| 77 |
+
"Output HF dataset ID (e.g. 'user/output'). The script always pushes results "
|
| 78 |
+
"to a HF dataset repo regardless of where input came from."
|
| 79 |
+
),
|
| 80 |
+
)
|
| 81 |
p.add_argument(
|
| 82 |
"--text-column",
|
| 83 |
default="text",
|
|
|
|
| 213 |
"""
|
| 214 |
|
| 215 |
|
| 216 |
+
def is_local_path(s: str) -> bool:
|
| 217 |
+
"""Heuristic: treat as local path if it starts with / or ./ or contains a known data extension."""
|
| 218 |
+
if s.startswith(("/", "./", "../")):
|
| 219 |
+
return True
|
| 220 |
+
if any(s.endswith(ext) for ext in (".parquet", ".jsonl", ".json", ".csv")):
|
| 221 |
+
return True
|
| 222 |
+
if "*" in s and any(ext in s for ext in (".parquet", ".jsonl", ".json", ".csv")):
|
| 223 |
+
return True
|
| 224 |
+
return False
|
| 225 |
+
|
| 226 |
+
|
| 227 |
+
def load_input(spec: str, split: str):
|
| 228 |
+
"""Load either a HF dataset by ID, or a local parquet/jsonl path (incl. globs)."""
|
| 229 |
+
if is_local_path(spec):
|
| 230 |
+
ext = ".parquet" if ".parquet" in spec else \
|
| 231 |
+
".jsonl" if spec.endswith(".jsonl") else \
|
| 232 |
+
".json" if spec.endswith(".json") else \
|
| 233 |
+
".csv" if spec.endswith(".csv") else ".parquet"
|
| 234 |
+
loader = {".parquet": "parquet", ".jsonl": "json", ".json": "json", ".csv": "csv"}[ext]
|
| 235 |
+
log.info("Loading local %s file(s): %s", loader, spec)
|
| 236 |
+
return load_dataset(loader, data_files=spec, split="train")
|
| 237 |
+
log.info("Loading HF dataset '%s' split=%s ...", spec, split)
|
| 238 |
+
return load_dataset(spec, split=split, streaming=False)
|
| 239 |
+
|
| 240 |
+
|
| 241 |
def main():
|
| 242 |
args = parse_args()
|
| 243 |
device = resolve_device(args.device)
|
| 244 |
+
ds = load_input(args.input_dataset, args.split)
|
|
|
|
| 245 |
|
| 246 |
if args.text_column not in ds.column_names:
|
| 247 |
sys.exit(
|