ancient-scripts-datasets / scripts /ingest_ceipom.py
Nacryos's picture
Phase 8: Add 24 new ancient/proto-language lexicons (12,911 entries) + scripts
98e5288 unverified
#!/usr/bin/env python3
"""Ingest Oscan, Umbrian, Venetic, and Faliscan word data from CEIPoM.
Source: Corpus of the Epigraphy of the Italian Peninsula in the 1st Millennium BCE
URL: https://github.com/ReubenJPitts/Corpus-of-the-Epigraphy-of-the-Italian-Peninsula-in-the-1st-Millennium-BCE
License: CC BY-SA 4.0
Citation: Pitts (2022), DOI: 10.5281/zenodo.6475427
CEIPoM provides analysis.csv (UTF-16) with linguistic annotations and
tokens.csv (UTF-16) with attested word forms. For Oscan/Umbrian, the
Standard_aligned field gives standardized phonological forms.
Faliscan is classified under Language=Latin with Language_variety=Faliscan
in texts.csv; extraction requires a join via Text_ID through texts.csv.
Iron Rule: Data comes from downloaded CSV files. No hardcoded word lists.
Usage:
python scripts/ingest_ceipom.py [--dry-run] [--language ISO]
"""
from __future__ import annotations
import argparse
import csv
import io
import json
import logging
import sys
import unicodedata
from pathlib import Path
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding="utf-8")
sys.stderr = io.TextIOWrapper(sys.stderr.buffer, encoding="utf-8")
ROOT = Path(__file__).resolve().parent.parent
sys.path.insert(0, str(ROOT / "cognate_pipeline" / "src"))
sys.path.insert(0, str(ROOT / "scripts"))
from cognate_pipeline.normalise.sound_class import ipa_to_sound_class # noqa: E402
from transliteration_maps import transliterate # noqa: E402
logger = logging.getLogger(__name__)
LEXICON_DIR = ROOT / "data" / "training" / "lexicons"
AUDIT_TRAIL_DIR = ROOT / "data" / "training" / "audit_trails"
RAW_DIR = ROOT / "data" / "training" / "raw"
CEIPOM_DIR = RAW_DIR / "ceipom"
CEIPOM_REPO = (
"https://raw.githubusercontent.com/ReubenJPitts/"
"Corpus-of-the-Epigraphy-of-the-Italian-Peninsula-in-the-1st-Millennium-BCE"
"/master/"
)
LANGUAGE_MAP = {
"osc": "Oscan",
"xum": "Umbrian",
"xve": "Venetic",
"xfa": "Faliscan",
}
# Languages identified via Language_variety in texts.csv rather than
# Language in analysis.csv (because CEIPoM classifies them under a parent).
VARIETY_LANGUAGES = {
"xfa": "Faliscan", # classified as Language=Latin, Language_variety=Faliscan
}
def download_if_needed():
"""Download CEIPoM CSV files if not cached."""
import urllib.request
CEIPOM_DIR.mkdir(parents=True, exist_ok=True)
for fname in ("analysis.csv", "tokens.csv", "texts.csv"):
local = CEIPOM_DIR / fname
if local.exists():
logger.info("Using cached: %s (%d bytes)", local, local.stat().st_size)
continue
url = CEIPOM_REPO + fname
logger.info("Downloading %s ...", url)
req = urllib.request.Request(url, headers={
"User-Agent": "PhaiPhon/1.0 (ancient-scripts-datasets)"
})
with urllib.request.urlopen(req, timeout=120) as resp:
data = resp.read()
with open(local, "wb") as f:
f.write(data)
logger.info("Downloaded %d bytes", len(data))
def _read_csv(path: Path):
"""Read a CEIPoM CSV, trying UTF-16 then UTF-8."""
for enc in ("utf-16", "utf-8-sig", "utf-8"):
try:
with open(path, "r", encoding=enc) as f:
return list(csv.DictReader(f))
except (UnicodeDecodeError, UnicodeError):
continue
raise RuntimeError(f"Cannot read {path}")
def load_ceipom_data():
"""Load analysis, token, and text data from CEIPoM CSVs."""
analysis_rows = _read_csv(CEIPOM_DIR / "analysis.csv")
token_rows = _read_csv(CEIPOM_DIR / "tokens.csv")
text_rows = _read_csv(CEIPOM_DIR / "texts.csv")
# Build Token_ID -> analysis mapping
tok_to_analysis = {}
for row in analysis_rows:
tid = row["Token_ID"]
if tid not in tok_to_analysis:
tok_to_analysis[tid] = row
# Build Text_ID -> Language_variety mapping (for Faliscan etc.)
text_variety = {}
for row in text_rows:
variety = row.get("Language_variety", "").strip()
if variety:
text_variety[row["Text_ID"]] = variety
return token_rows, tok_to_analysis, text_variety
def extract_words(token_rows, tok_to_analysis, lang_name: str, iso: str,
text_variety=None):
"""Extract unique word forms for a language.
For most languages, filtering uses analysis.csv Language field.
For variety languages (e.g. Faliscan), filtering uses texts.csv
Language_variety via the *text_variety* dict, keyed by Text_ID.
"""
words = {} # word_form -> {meaning, pos, standard}
# Determine whether to use variety-based filtering
variety_name = VARIETY_LANGUAGES.get(iso)
use_variety = variety_name is not None and text_variety is not None
for tok in token_rows:
tid = tok["Token_ID"]
text_id = tok["Text_ID"]
if use_variety:
# Filter by Language_variety in texts.csv
if text_variety.get(text_id) != variety_name:
continue
else:
# Filter by Language in analysis.csv
a = tok_to_analysis.get(tid)
if not a or a["Language"] != lang_name:
continue
a = tok_to_analysis.get(tid, {})
word = tok.get("Token_clean", "").strip()
if not word or word == "-":
continue
# NFC normalize
word = unicodedata.normalize("NFC", word)
# Filter single characters (abbreviations in inscriptions)
if len(word) < 2:
continue
# Filter very long entries
if len(word) > 50:
continue
# Filter entries with brackets (fragmentary)
if "[" in word or "]" in word:
continue
# Filter numeric-only
if word.isdigit():
continue
meaning = a.get("Meaning", "")
pos = a.get("Part_of_speech", "")
standard = a.get("Standard_aligned", "")
if word not in words:
words[word] = {
"meaning": meaning,
"pos": pos,
"standard": standard if standard != "-" else "",
}
return words
def load_existing_words(tsv_path: Path) -> set[str]:
"""Load existing Word column values."""
existing = set()
if tsv_path.exists():
with open(tsv_path, "r", encoding="utf-8") as f:
for line in f:
if line.startswith("Word\t"):
continue
word = line.split("\t")[0]
existing.add(word)
return existing
def main():
parser = argparse.ArgumentParser(description="Ingest CEIPoM Italic languages")
parser.add_argument("--dry-run", action="store_true")
parser.add_argument("--language", "-l",
help="Specific ISO code (osc, xum, xve, xfa)")
args = parser.parse_args()
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s %(levelname)s: %(message)s",
datefmt="%H:%M:%S",
)
download_if_needed()
logger.info("Loading CEIPoM data...")
token_rows, tok_to_analysis, text_variety = load_ceipom_data()
logger.info("Loaded %d tokens, %d analyses, %d text varieties",
len(token_rows), len(tok_to_analysis), len(text_variety))
if args.language:
if args.language not in LANGUAGE_MAP:
logger.error("Unknown: %s. Available: %s",
args.language, ", ".join(LANGUAGE_MAP))
sys.exit(1)
langs = {args.language: LANGUAGE_MAP[args.language]}
else:
langs = LANGUAGE_MAP
results = []
for iso, lang_name in langs.items():
tsv_path = LEXICON_DIR / f"{iso}.tsv"
existing = load_existing_words(tsv_path)
logger.info("%s (%s): %d existing entries", iso, lang_name, len(existing))
# Extract words
words = extract_words(token_rows, tok_to_analysis, lang_name, iso,
text_variety=text_variety)
logger.info("%s: %d unique word forms from CEIPoM", iso, len(words))
# Process new entries
new_entries = []
audit_trail = []
skipped = 0
for word, info in sorted(words.items()):
if word in existing:
skipped += 1
continue
# For Oscan/Umbrian, prefer Standard_aligned for transliteration.
# For Faliscan, always use Token_clean: Standard_aligned maps to
# Latin equivalents (e.g. "de-dit" for Faliscan "porded"), which
# would corrupt the Faliscan phonology.
# For Venetic (no Standard_aligned), also use Token_clean.
if iso in VARIETY_LANGUAGES:
source_form = word # always Token_clean for variety langs
else:
source_form = info["standard"] if info["standard"] else word
try:
ipa = transliterate(source_form, iso)
except Exception:
ipa = source_form
if not ipa:
ipa = word
try:
sca = ipa_to_sound_class(ipa)
except Exception:
sca = ""
new_entries.append({
"word": word,
"ipa": ipa,
"sca": sca,
})
existing.add(word)
audit_trail.append({
"word": word,
"standard_aligned": info["standard"],
"ipa": ipa,
"pos": info["pos"],
"meaning": info["meaning"],
"source": "ceipom",
})
logger.info("%s: %d new, %d skipped", iso, len(new_entries), skipped)
if args.dry_run:
results.append({
"iso": iso, "name": lang_name,
"existing": len(existing) - len(new_entries),
"new": len(new_entries), "total": len(existing),
})
continue
# Write TSV
if new_entries:
LEXICON_DIR.mkdir(parents=True, exist_ok=True)
if not tsv_path.exists():
with open(tsv_path, "w", encoding="utf-8") as f:
f.write("Word\tIPA\tSCA\tSource\tConcept_ID\tCognate_Set_ID\n")
with open(tsv_path, "a", encoding="utf-8") as f:
for e in new_entries:
f.write(f"{e['word']}\t{e['ipa']}\t{e['sca']}\tceipom\t-\t-\n")
# Save audit trail
if audit_trail:
AUDIT_TRAIL_DIR.mkdir(parents=True, exist_ok=True)
audit_path = AUDIT_TRAIL_DIR / f"ceipom_ingest_{iso}.jsonl"
with open(audit_path, "w", encoding="utf-8") as f:
for r in audit_trail:
f.write(json.dumps(r, ensure_ascii=False) + "\n")
results.append({
"iso": iso, "name": lang_name,
"existing": len(existing) - len(new_entries),
"new": len(new_entries), "total": len(existing),
})
mode = "DRY RUN: " if args.dry_run else ""
print(f"\n{mode}CEIPoM Italic Language Ingestion:")
print("=" * 60)
total_new = 0
for r in results:
print(f" {r['iso']:8s} {r['name']:15s} existing={r['existing']:>5d}, "
f"new={r['new']:>5d}, total={r['total']:>5d}")
total_new += r["new"]
print(f"\n Total new entries: {total_new}")
print("=" * 60)
if __name__ == "__main__":
main()