trec / preprocess.py
lukasgarbas's picture
add classic trec
4ae150f
import argparse
import json
import random
from pathlib import Path
from typing import Optional, Sequence, TypedDict
import requests
from datasets import Dataset, DatasetDict
OUT_DIR = Path(__file__).parent / "data"
METADATA_PATH = OUT_DIR / "metadata.json"
SEED = 42
VAL_RATIO = 0.1
URLS = {
"train": "https://cogcomp.seas.upenn.edu/Data/QA/QC/train_5500.label",
"test": "https://cogcomp.seas.upenn.edu/Data/QA/QC/TREC_10.label",
}
COARSE_DESC = {
"ABBR": "abbreviation", "ENTY": "entities", "DESC": "description and abstract concepts",
"HUM": "human beings", "LOC": "locations", "NUM": "numeric values"
}
FINE_DESC = {
"ABBR:abb":"abbreviation","ABBR:exp":"expression abbreviated",
"ENTY:animal":"animals","ENTY:body":"organs of body","ENTY:color":"colors","ENTY:cremat":"creative works",
"ENTY:currency":"currency names","ENTY:dismed":"diseases and medicine","ENTY:event":"events","ENTY:food":"food",
"ENTY:instru":"musical instrument","ENTY:lang":"languages","ENTY:letter":"letters like a-z","ENTY:other":"other entities",
"ENTY:plant":"plants","ENTY:product":"products","ENTY:religion":"religions","ENTY:sport":"sports",
"ENTY:substance":"elements and substances","ENTY:symbol":"symbols and signs","ENTY:techmeth":"techniques and methods",
"ENTY:termeq":"equivalent terms","ENTY:veh":"vehicles","ENTY:word":"words with a special property",
"DESC:def":"definition of something","DESC:desc":"description of something","DESC:manner":"manner of an action","DESC:reason":"reasons",
"HUM:gr":"a group/organization","HUM:ind":"an individual","HUM:title":"title of a person","HUM:desc":"description of a person",
"LOC:city":"cities","LOC:country":"countries","LOC:mount":"mountains","LOC:other":"other locations","LOC:state":"states",
"NUM:code":"codes","NUM:count":"counts","NUM:date":"dates","NUM:dist":"distances","NUM:money":"prices","NUM:ord":"ranks",
"NUM:other":"other numbers","NUM:period":"duration","NUM:perc":"percentages","NUM:speed":"speed","NUM:temp":"temperature",
"NUM:volsize":"size/area/volume","NUM:weight":"weight",
}
class TrecExample(TypedDict):
text: str
coarse_label: str
coarse_description: Optional[str]
fine_label: str
fine_description: Optional[str]
def fetch(url: str) -> list[bytes]:
r = requests.get(url, timeout=30)
r.raise_for_status()
return r.content.splitlines()
def parse(lines: Sequence[bytes]) -> list[TrecExample]:
rows: list[TrecExample] = []
for b in lines:
line = b.decode("utf-8", errors="replace").strip()
if not line or " " not in line:
continue
fine, text = line.split(" ", 1)
coarse = fine.split(":", 1)[0]
rows.append(
{
"text": text.strip(),
"coarse_label": coarse,
"coarse_description": COARSE_DESC.get(coarse, ""),
"fine_label": fine,
"fine_description": FINE_DESC.get(fine, ""),
}
)
return rows
def extract_metadata(ds: DatasetDict) -> dict:
num_rows = {name: len(split) for name, split in ds.items()}
first_split = next(iter(ds.values()))
features = {name: repr(feat) for name, feat in first_split.features.items()}
coarse_labels = {label for split in ds.values() for label in split["coarse_label"]}
fine_labels = {label for split in ds.values() for label in split["fine_label"]}
label_maps = {
"coarse_label": sorted(coarse_labels),
"fine_label": sorted(fine_labels),
}
return {
"num_rows": num_rows,
"features": features,
"label_maps": label_maps}
if __name__ == "__main__":
"""Fetch TREC from source, split it, save as Parquet and add metadata.
Run: python preprocess_trec.py --val-ratio 0.1 --seed 42 --out-dir data
"""
ap = argparse.ArgumentParser()
ap.add_argument("--val-ratio", type=float, default=VAL_RATIO, help="Fraction of training set for validation")
ap.add_argument("--seed", type=int, default=SEED, help="Random seed for shuffling")
ap.add_argument("--out-dir", type=Path, help="Output directory for Parquet files")
ap.add_argument("--metadata-path", type=Path, help="Path for metadata.json")
args = ap.parse_args()
out_dir = args.out_dir or OUT_DIR
metadata_path = args.metadata_path or METADATA_PATH
train = parse(fetch(URLS["train"]))
test = parse(fetch(URLS["test"]))
rng = random.Random(args.seed)
rng.shuffle(train)
n_val = int(len(train) * args.val_ratio)
validation = train[:n_val]
train = train[n_val:]
data = DatasetDict(
{
"train": Dataset.from_list(train),
"validation": Dataset.from_list(validation),
"test": Dataset.from_list(test),
}
)
out_dir.mkdir(exist_ok=True, parents=True)
for name, split in data.items():
split.to_parquet(str(out_dir / f"{name}.parquet"))
metadata_path.write_text(json.dumps(extract_metadata(data), indent=2))