| |
| """Build Linear A model-ready training data from libation tables and lineara.xyz. |
| |
| Reads: |
| - libation_tables.json (inscription metadata + transcriptions) |
| - sign_to_ipa.json (syllabogram → IPA mapping) |
| - LinearAInscriptions.js (fetched from lineara.xyz, cached locally) |
| |
| Outputs: |
| - linear_a_words.tsv (all unique word forms with IPA and SCA) |
| - linear_a_corpus.txt (one inscription per line, IPA-converted words) |
| """ |
|
|
| from __future__ import annotations |
|
|
| import json |
| import re |
| import sys |
| import urllib.request |
| from pathlib import Path |
| from typing import Dict, List, Optional, Tuple |
|
|
| _HERE = Path(__file__).resolve().parent |
| _CACHE_JS = _HERE / "LinearAInscriptions.js" |
| _LINEARA_URL = "https://www.lineara.xyz/LinearAInscriptions.js" |
|
|
|
|
| def load_sign_map(path: Path) -> Dict[str, str]: |
| """Load sign_to_ipa.json.""" |
| with path.open("r", encoding="utf-8") as f: |
| return json.load(f) |
|
|
|
|
| |
| _SIGN_ALIASES = { |
| "pa₃": "*56", |
| "pa3": "*56", |
| } |
|
|
|
|
| def syllables_to_ipa(word: str, sign_map: Dict[str, str]) -> Optional[str]: |
| """Convert a hyphen-delimited transliteration to IPA. |
| |
| E.g. "A-TA-I-*301-WA-JA" → "ataiΘwaja" |
| |
| Returns None if any syllable is unrecognizable (damaged, numeric, etc.). |
| """ |
| syllables = word.split("-") |
| ipa_parts: List[str] = [] |
| for syl in syllables: |
| key = syl.lower().strip() |
| if not key: |
| continue |
| |
| key = _SIGN_ALIASES.get(key, key) |
| if key in sign_map: |
| ipa_parts.append(sign_map[key]) |
| else: |
| |
| return None |
| if not ipa_parts: |
| return None |
| return "".join(ipa_parts) |
|
|
|
|
| def ipa_to_sca(ipa: str) -> str: |
| """Generate an uppercase SCA (Sound Class Approximation) from IPA. |
| |
| For CV syllabograms that map directly, this is just uppercase ASCII. |
| For placeholders Θ and Φ, keep them as-is in SCA. |
| """ |
| result: List[str] = [] |
| for ch in ipa: |
| if ch in ("Θ", "Φ"): |
| result.append(ch) |
| elif ch.isascii() and ch.isalpha(): |
| result.append(ch.upper()) |
| else: |
| |
| result.append(ch.upper()) |
| return "".join(result) |
|
|
|
|
| def fetch_lineara_js(cache_path: Path) -> str: |
| """Fetch LinearAInscriptions.js, caching locally.""" |
| if cache_path.exists(): |
| return cache_path.read_text(encoding="utf-8") |
| print(f"Fetching {_LINEARA_URL} ...") |
| req = urllib.request.Request(_LINEARA_URL, headers={"User-Agent": "LinearA-build/1.0"}) |
| with urllib.request.urlopen(req, timeout=30) as resp: |
| data = resp.read().decode("utf-8") |
| cache_path.write_text(data, encoding="utf-8") |
| print(f" Cached to {cache_path}") |
| return data |
|
|
|
|
| def parse_lineara_js(js_text: str) -> Dict[str, List[str]]: |
| """Parse the JS Map to extract transliteratedWords per inscription ID. |
| |
| Returns dict of inscription_id → list of transliterated word strings. |
| """ |
| inscriptions: Dict[str, List[str]] = {} |
|
|
| |
| |
| |
| |
|
|
| |
| |
|
|
| |
| entry_pattern = re.compile(r'\["([A-Za-z]+Za\d+[a-z]?)",\s*\{') |
| tw_pattern = re.compile(r'"transliteratedWords"\s*:\s*\[([^\]]*)\]') |
|
|
| |
| |
| pos = 0 |
| while pos < len(js_text): |
| m = entry_pattern.search(js_text, pos) |
| if not m: |
| break |
| entry_id = m.group(1) |
| entry_start = m.start() |
|
|
| |
| chunk_end = min(len(js_text), entry_start + 10000) |
| chunk = js_text[entry_start:chunk_end] |
|
|
| tw_match = tw_pattern.search(chunk) |
| if tw_match: |
| raw = tw_match.group(1) |
| |
| words: List[str] = [] |
| for item in re.findall(r'"([^"]*)"', raw): |
| |
| if item in ("\u2981", "\U00010101", "\n", ""): |
| continue |
| words.append(item) |
| inscriptions[entry_id] = words |
|
|
| pos = m.end() |
|
|
| return inscriptions |
|
|
|
|
| def normalize_inscription_id(json_id: str) -> str: |
| """Convert JSON id like 'IO Za 2' to lineara.xyz key like 'IOZa2'.""" |
| return json_id.replace(" ", "") |
|
|
|
|
| def is_transliterable_word(word: str) -> bool: |
| """Check if a word from lineara.xyz is a transliterable syllabic word. |
| |
| Filters out: |
| - Pure numbers (commodity counts) |
| - Words with embedded Unicode Linear A characters (untransliterable signs) |
| - Empty strings |
| """ |
| if not word or not word.strip(): |
| return False |
| |
| if re.match(r"^\d+$", word.strip()): |
| return False |
| |
| for ch in word: |
| cp = ord(ch) |
| if 0x10600 <= cp <= 0x1077F: |
| return False |
| |
| clean = word.strip() |
| if len(clean) <= 0: |
| return False |
| return True |
|
|
|
|
| def build_corpus( |
| libation_path: Path, |
| sign_map: Dict[str, str], |
| lineara_inscriptions: Dict[str, List[str]], |
| ) -> Tuple[List[Dict[str, str]], List[str]]: |
| """Build word list and corpus lines from all sources. |
| |
| Returns: |
| (word_records, corpus_lines) where: |
| - word_records: list of dicts with Word, IPA, SCA, Source, Concept_ID, Cognate_Set_ID |
| - corpus_lines: list of strings, one per inscription (IPA words space-separated) |
| """ |
| with libation_path.open("r", encoding="utf-8") as f: |
| data = json.load(f) |
|
|
| all_words: Dict[str, Dict[str, str]] = {} |
| corpus_lines: List[str] = [] |
| inscriptions_used = 0 |
|
|
| for insc in data["inscriptions"]: |
| insc_id = insc["id"] |
| norm_id = normalize_inscription_id(insc_id) |
|
|
| |
| words_raw: List[str] = [] |
| source_tag = "linear_a_za" |
|
|
| if norm_id in lineara_inscriptions: |
| words_raw = lineara_inscriptions[norm_id] |
| |
| for key in lineara_inscriptions: |
| if key.startswith(norm_id) and key != norm_id and key not in [ |
| k for k in lineara_inscriptions if k == norm_id |
| ]: |
| words_raw.extend(lineara_inscriptions[key]) |
|
|
| |
| if not words_raw and insc.get("word_segmentation"): |
| for seg in insc["word_segmentation"]: |
| |
| if seg.startswith("["): |
| continue |
| |
| for part in seg.split(): |
| words_raw.append(part.upper()) |
|
|
| if not words_raw: |
| continue |
|
|
| |
| line_ipa_words: List[str] = [] |
| for word_raw in words_raw: |
| if not is_transliterable_word(word_raw): |
| continue |
|
|
| word_clean = word_raw.strip() |
| ipa = syllables_to_ipa(word_clean, sign_map) |
| if ipa is None: |
| continue |
|
|
| sca = ipa_to_sca(ipa) |
|
|
| if ipa not in all_words: |
| all_words[ipa] = { |
| "Word": word_clean, |
| "IPA": ipa, |
| "SCA": sca, |
| "Source": source_tag, |
| "Concept_ID": "-", |
| "Cognate_Set_ID": "-", |
| } |
|
|
| line_ipa_words.append(ipa) |
|
|
| if line_ipa_words: |
| corpus_lines.append(" ".join(line_ipa_words)) |
| inscriptions_used += 1 |
|
|
| print(f"Inscriptions with data: {inscriptions_used}") |
| print(f"Unique word forms: {len(all_words)}") |
| print(f"Corpus lines: {len(corpus_lines)}") |
|
|
| |
| word_records = sorted(all_words.values(), key=lambda r: r["IPA"]) |
| return word_records, corpus_lines |
|
|
|
|
| def write_tsv(records: List[Dict[str, str]], path: Path) -> None: |
| """Write word records to TSV.""" |
| cols = ["Word", "IPA", "SCA", "Source", "Concept_ID", "Cognate_Set_ID"] |
| with path.open("w", encoding="utf-8", newline="") as f: |
| f.write("\t".join(cols) + "\n") |
| for rec in records: |
| f.write("\t".join(rec[c] for c in cols) + "\n") |
| print(f"Wrote {len(records)} rows to {path}") |
|
|
|
|
| def write_corpus(lines: List[str], path: Path) -> None: |
| """Write corpus lines.""" |
| with path.open("w", encoding="utf-8", newline="") as f: |
| for line in lines: |
| f.write(line + "\n") |
| print(f"Wrote {len(lines)} lines to {path}") |
|
|
|
|
| def main() -> None: |
| sign_map_path = _HERE / "sign_to_ipa.json" |
| libation_path = _HERE / "libation_tables.json" |
| words_out = _HERE / "linear_a_words.tsv" |
| corpus_out = _HERE / "linear_a_corpus.txt" |
|
|
| if not sign_map_path.exists(): |
| print(f"ERROR: {sign_map_path} not found", file=sys.stderr) |
| sys.exit(1) |
| if not libation_path.exists(): |
| print(f"ERROR: {libation_path} not found", file=sys.stderr) |
| sys.exit(1) |
|
|
| sign_map = load_sign_map(sign_map_path) |
| print(f"Loaded {len(sign_map)} sign mappings") |
|
|
| |
| js_text = fetch_lineara_js(_CACHE_JS) |
| lineara_inscriptions = parse_lineara_js(js_text) |
| za_count = len(lineara_inscriptions) |
| print(f"Parsed {za_count} Za inscriptions from lineara.xyz") |
|
|
| word_records, corpus_lines = build_corpus(libation_path, sign_map, lineara_inscriptions) |
|
|
| write_tsv(word_records, words_out) |
| write_corpus(corpus_lines, corpus_out) |
|
|
| print("\nDone. Files created:") |
| print(f" {words_out}") |
| print(f" {corpus_out}") |
|
|
|
|
| if __name__ == "__main__": |
| main() |
|
|