| |
| """ |
| Phase 2 verification: corrected URL patterns for eDiAna and deeper checks. |
| """ |
|
|
| import json |
| import os |
| import random |
| import time |
| import sys |
| import io |
| from pathlib import Path |
|
|
| sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8', errors='replace') |
| sys.stderr = io.TextIOWrapper(sys.stderr.buffer, encoding='utf-8', errors='replace') |
|
|
| import requests |
| import urllib3 |
| urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) |
| from urllib.parse import quote |
|
|
| AUDIT_DIR = Path(r"C:\Users\alvin\hf-ancient-scripts\data\training\audit_trails") |
|
|
| HEADERS = { |
| "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 " |
| "(KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36" |
| } |
| REQUEST_DELAY = 1.5 |
| random.seed(42) |
|
|
|
|
| def load_audit_trail(filepath): |
| entries = [] |
| with open(filepath, "r", encoding="utf-8") as f: |
| for line in f: |
| line = line.strip() |
| if line: |
| try: |
| entries.append(json.loads(line)) |
| except json.JSONDecodeError: |
| pass |
| return entries |
|
|
|
|
| def check_url(url, timeout=15): |
| try: |
| resp = requests.get(url, headers=HEADERS, timeout=timeout, allow_redirects=True, verify=False) |
| return resp.status_code, resp.ok, resp.text[:5000] if resp.ok else "" |
| except Exception as e: |
| return None, False, str(e) |
|
|
|
|
| |
| |
| |
| def verify_ediana_corrected(): |
| print("=" * 70) |
| print("eDiAna VERIFICATION (Corrected URL: /dictionary.php?lemma=ID)") |
| print("=" * 70) |
|
|
| audit_files = sorted(AUDIT_DIR.glob("ediana_*.jsonl")) |
|
|
| for af in audit_files: |
| lang = af.stem.replace("ediana_", "") |
| entries = load_audit_trail(af) |
| if not entries: |
| print(f"\n --- {lang}: EMPTY ---") |
| continue |
|
|
| print(f"\n --- Language: {lang} ({len(entries)} entries) ---") |
| samples = random.sample(entries, min(5, len(entries))) |
|
|
| for entry in samples: |
| word = entry.get("word", "?") |
| gloss = entry.get("gloss", "?") |
| entry_id = entry.get("entry_id", "") |
|
|
| if entry_id: |
| url = f"https://ediana.gwi.uni-muenchen.de/dictionary.php?lemma={entry_id}" |
| status, ok, text = check_url(url, timeout=15) |
|
|
| |
| word_found = False |
| if ok and text: |
| |
| clean_word = word.replace("*", "").replace("-", "").replace("(", "").replace(")", "").strip() |
| if clean_word and len(clean_word) >= 2: |
| word_found = clean_word.lower() in text.lower() |
|
|
| status_str = f"HTTP {status}" |
| if ok: |
| status_str += f" | word_on_page={'YES' if word_found else 'NO'}" |
| print(f" id={entry_id:5s} word={word[:25]:25s} gloss={gloss[:30]:30s} -> {status_str}") |
| time.sleep(REQUEST_DELAY) |
|
|
|
|
| |
| |
| |
| def verify_palaeolexicon_deep(): |
| print("\n" + "=" * 70) |
| print("PALAEOLEXICON DEEP VERIFICATION (gloss + word match)") |
| print("=" * 70) |
|
|
| |
| entries = load_audit_trail(AUDIT_DIR / "palaeolexicon_ett.jsonl") |
| print(f"\n Etruscan: {len(entries)} entries") |
|
|
| |
| known_words = ["ais", "clan", "sec", "huth", "avil", "tur", "lautni"] |
| matches = [e for e in entries if e.get("word") in known_words] |
|
|
| if not matches: |
| |
| matches = random.sample(entries, min(5, len(entries))) |
|
|
| for entry in matches[:7]: |
| word = entry.get("word", "?") |
| gloss = entry.get("gloss", "?") |
| word_id = entry.get("word_id") |
|
|
| if word_id: |
| url = f"https://www.palaeolexicon.com/Word/Show/{word_id}" |
| status, ok, text = check_url(url, timeout=15) |
|
|
| word_found = ok and word.lower() in text.lower() |
| gloss_parts = [g.strip() for g in gloss.split(",")] |
| gloss_found = ok and any(g.lower() in text.lower() for g in gloss_parts if len(g) > 2) |
|
|
| print(f" word={word:15s} gloss={gloss:30s} word_id={word_id}") |
| print(f" URL: {url}") |
| print(f" word_match={word_found} gloss_match={gloss_found} HTTP={status}") |
| time.sleep(REQUEST_DELAY) |
|
|
|
|
| |
| |
| |
| def verify_tir_corrected(): |
| print("\n" + "=" * 70) |
| print("TIR RAETICA (Corrected URL: tir.univie.ac.at)") |
| print("=" * 70) |
|
|
| entries = load_audit_trail(AUDIT_DIR / "tir_raetica_xrr.jsonl") |
| print(f" {len(entries)} entries") |
|
|
| |
| cat_url = "https://tir.univie.ac.at/wiki/Category:Word" |
| status, ok, text = check_url(cat_url) |
| print(f"\n Category:Word -> HTTP {status}") |
|
|
| |
| samples = random.sample(entries, min(5, len(entries))) |
| for entry in samples: |
| word = entry.get("word", "?") |
| gloss = entry.get("gloss", "?") |
|
|
| |
| |
| url = f"https://tir.univie.ac.at/wiki/{quote(word)}" |
| status, ok, text = check_url(url, timeout=15) |
|
|
| word_found = ok and word.lower() in text.lower() if ok else False |
| status_str = f"HTTP {status}" |
| if ok: |
| |
| is_search = "There were no results" in text or "Search results" in text |
| status_str += f" | content={'SEARCH_REDIRECT' if is_search else 'CONTENT_PAGE'}" |
|
|
| print(f" word={word:20s} gloss={gloss[:25]:25s} -> {status_str}") |
| time.sleep(REQUEST_DELAY) |
|
|
|
|
| |
| |
| |
| def verify_oracc_deep(): |
| print("\n" + "=" * 70) |
| print("ORACC/eCUT DEEP VERIFICATION") |
| print("=" * 70) |
|
|
| entries = load_audit_trail(AUDIT_DIR / "oracc_ecut_xur.jsonl") |
| print(f" {len(entries)} entries") |
|
|
| |
| urls_to_try = [ |
| "https://oracc.museum.upenn.edu/ecut/", |
| "https://oracc.museum.upenn.edu/ecut/corpus", |
| "https://oracc.museum.upenn.edu/ecut/pager", |
| |
| "https://oracc.museum.upenn.edu/ecut/xur/index.html", |
| "https://oracc.museum.upenn.edu/ecut/glossary/xur", |
| ] |
|
|
| for url in urls_to_try: |
| status, ok, text = check_url(url, timeout=15) |
| has_content = len(text) > 500 if ok else False |
| print(f" {url}") |
| print(f" -> HTTP {status} | content={'YES' if has_content else 'NO/SMALL'}") |
| time.sleep(REQUEST_DELAY) |
|
|
| |
| |
| json_urls = [ |
| "https://oracc.museum.upenn.edu/ecut/signlist", |
| ] |
| for url in json_urls: |
| status, ok, text = check_url(url, timeout=15) |
| print(f" {url}") |
| print(f" -> HTTP {status}") |
| time.sleep(REQUEST_DELAY) |
|
|
| print(f"\n Sample Urartian words in audit trail:") |
| samples = random.sample(entries, min(10, len(entries))) |
| for entry in samples: |
| word = entry.get("word", "?") |
| gloss = entry.get("gloss", "?") |
| print(f" word={word:20s} gloss={gloss}") |
|
|
|
|
| |
| |
| |
| def verify_avesta_deep(): |
| print("\n" + "=" * 70) |
| print("AVESTA.ORG DEEP VERIFICATION") |
| print("=" * 70) |
|
|
| entries = load_audit_trail(AUDIT_DIR / "avesta_org_ave.jsonl") |
|
|
| |
| dict_url = "https://avesta.org/avdict/avdict.htm" |
| status, ok, text = check_url(dict_url, timeout=15) |
| print(f" Dictionary page: HTTP {status} | length={len(text) if ok else 0} chars") |
|
|
| |
| real_entries = [e for e in entries if e.get("word") not in ("NOTE", "Example", "swift", "AVESTA")] |
| print(f"\n Checking {len(real_entries)} entries against dictionary page:") |
|
|
| found_count = 0 |
| for entry in real_entries: |
| word = entry.get("word", "") |
| gloss = entry.get("gloss", "") |
| if ok and word and len(word) >= 2: |
| in_dict = word.lower() in text.lower() |
| if in_dict: |
| found_count += 1 |
| print(f" word={word:20s} gloss={gloss:20s} in_dictionary={'YES' if in_dict else 'NO'}") |
|
|
| print(f"\n Found {found_count}/{len(real_entries)} words in dictionary page") |
|
|
| |
| |
| |
| print(f"\n DATA QUALITY ASSESSMENT:") |
| print(f" The avesta.org audit trail entries have a suspicious pattern:") |
| print(f" - 'word' and 'gloss' fields often look like adjacent Avestan words,") |
| print(f" not word-definition pairs.") |
| print(f" - Examples: word='aiwi' gloss='druxt' (both are Avestan words)") |
| print(f" - Examples: word='aurvat' gloss='aspa' (both are Avestan words)") |
| print(f" - The 'gloss' field appears to contain the NEXT word in the text,") |
| print(f" not an English translation.") |
| print(f" - NOTE: avesta.org data is NOT used in the ave.tsv lexicon (which uses wiktionary)") |
| print(f" - The audit trail exists but was apparently not imported into the lexicon") |
|
|
|
|
| |
| |
| |
| def verify_wiktionary_deep(): |
| print("\n" + "=" * 70) |
| print("WIKTIONARY EXPANSION DEEP VERIFICATION") |
| print("=" * 70) |
|
|
| |
| entries = load_audit_trail(AUDIT_DIR / "wiktionary_expansion_ine-pro.jsonl") |
| print(f"\n PIE (ine-pro): {len(entries)} entries") |
|
|
| samples = random.sample(entries, min(5, len(entries))) |
| for entry in samples: |
| word = entry.get("word", "?") |
| page_title = entry.get("page_title", "") |
| if page_title: |
| url = f"https://en.wiktionary.org/wiki/{quote(page_title)}" |
| status, ok, text = check_url(url, timeout=15) |
| word_found = ok and word in text |
| print(f" word={word:20s} page={page_title[:40]:40s} HTTP={status} found={word_found}") |
| time.sleep(REQUEST_DELAY) |
|
|
| |
| entries = load_audit_trail(AUDIT_DIR / "wiktionary_expansion_uga.jsonl") |
| print(f"\n Ugaritic (uga): {len(entries)} entries") |
|
|
| samples = random.sample(entries, min(5, len(entries))) |
| for entry in samples: |
| word = entry.get("word", "?") |
| page_title = entry.get("page_title", "") |
| if page_title: |
| url = f"https://en.wiktionary.org/wiki/{quote(page_title)}" |
| status, ok, text = check_url(url, timeout=15) |
| print(f" word={word:20s} page={page_title[:40]:40s} HTTP={status}") |
| time.sleep(REQUEST_DELAY) |
|
|
|
|
| def main(): |
| print("PHASE 2: DEEP SOURCE VERIFICATION") |
| print("=" * 70) |
| print(f"Date: {time.strftime('%Y-%m-%d %H:%M:%S')}\n") |
|
|
| verify_ediana_corrected() |
| verify_palaeolexicon_deep() |
| verify_tir_corrected() |
| verify_oracc_deep() |
| verify_avesta_deep() |
| verify_wiktionary_deep() |
|
|
| print("\n" + "=" * 70) |
| print("PHASE 2 COMPLETE") |
| print("=" * 70) |
|
|
|
|
| if __name__ == "__main__": |
| main() |
|
|