| """ |
| Data Diagnostic: Answers all reviewer questions about the dataset. |
| Run: python data_diagnostic.py |
| """ |
| import json |
| from collections import Counter, defaultdict |
| import numpy as np |
|
|
| |
| print("=" * 70) |
| print(" DATA DIAGNOSTIC — Answering All Reviewer Questions") |
| print("=" * 70) |
|
|
| with open("data/knowledge_drift_unified_tier1.json") as f: |
| tier1 = json.load(f) |
| samples = tier1["samples"] |
| print(f"\nTier 1: {len(samples)} samples") |
|
|
| |
| |
| |
| print("\n" + "=" * 70) |
| print(" Q1: CLASS IMBALANCE PER MODEL") |
| print("=" * 70) |
| models = ["llama2", "mistral", "llama31", "qwen25", "gemma2"] |
| for m in models: |
| key = f"is_drifted_{m}" |
| d = sum(1 for s in samples if s.get(key, False)) |
| s_ = len(samples) - d |
| ratio = d / s_ if s_ > 0 else 0 |
| print(f" {m:10s}: {d:5d} drifted / {s_:5d} stable (ratio 1:{s_/d:.1f})" if d > 0 else f" {m:10s}: 0 drifted") |
|
|
| |
| |
| |
| print("\n" + "=" * 70) |
| print(" Q2: CATEGORY → DRIFT LABEL MAPPING (for Qwen2.5)") |
| print("=" * 70) |
| cat_drift = defaultdict(lambda: {"drifted": 0, "stable": 0}) |
| for s in samples: |
| cat = s.get("category", "unknown") |
| if s.get("is_drifted_qwen25", False): |
| cat_drift[cat]["drifted"] += 1 |
| else: |
| cat_drift[cat]["stable"] += 1 |
|
|
| print(f" {'Category':20s} {'Drifted':>8s} {'Stable':>8s} {'%Drifted':>10s}") |
| print(" " + "-" * 50) |
| for cat in sorted(cat_drift.keys()): |
| d = cat_drift[cat]["drifted"] |
| s_ = cat_drift[cat]["stable"] |
| pct = d / (d + s_) * 100 if (d + s_) > 0 else 0 |
| print(f" {cat:20s} {d:8d} {s_:8d} {pct:9.1f}%") |
|
|
| print("\n KEY QUESTION: Is known_drift labeled as drifted?") |
| kd_drifted = sum(1 for s in samples if s.get("category") == "known_drift" and s.get("is_drifted_qwen25", False)) |
| kd_total = sum(1 for s in samples if s.get("category") == "known_drift") |
| print(f" known_drift samples labeled drifted for Qwen: {kd_drifted}/{kd_total}") |
|
|
| |
| |
| |
| print("\n" + "=" * 70) |
| print(" Q3: YEAR LEAKAGE CHECK (Qwen2.5)") |
| print("=" * 70) |
|
|
| |
| year_by_drift = defaultdict(lambda: {"drifted": 0, "stable": 0}) |
| for s in samples: |
| yr = s.get("year", "unknown") |
| if s.get("is_drifted_qwen25", False): |
| year_by_drift[yr]["drifted"] += 1 |
| else: |
| year_by_drift[yr]["stable"] += 1 |
|
|
| print(f" {'Year':>6s} {'Drifted':>8s} {'Stable':>8s} {'%Drifted':>10s}") |
| print(" " + "-" * 36) |
| for yr in sorted(year_by_drift.keys(), key=lambda x: str(x)): |
| d = year_by_drift[yr]["drifted"] |
| s_ = year_by_drift[yr]["stable"] |
| pct = d / (d + s_) * 100 if (d + s_) > 0 else 0 |
| print(f" {str(yr):>6s} {d:8d} {s_:8d} {pct:9.1f}%") |
|
|
| |
| year_in_query = Counter() |
| for s in samples: |
| q = s.get("query", "") |
| for y in range(2010, 2027): |
| if str(y) in q: |
| year_in_query[y] += 1 |
| break |
| else: |
| year_in_query["no_year"] += 1 |
|
|
| print(f"\n Year mentioned in query text:") |
| for yr, n in sorted(year_in_query.items(), key=lambda x: str(x)): |
| print(f" {yr}: {n}") |
|
|
| |
| print(f"\n Query year distribution for DRIFTED vs STABLE (Qwen):") |
| drifted_years = Counter() |
| stable_years = Counter() |
| for s in samples: |
| q = s.get("query", "") |
| yr_found = None |
| for y in range(2010, 2027): |
| if str(y) in q: |
| yr_found = y |
| break |
| if yr_found is None: |
| yr_found = "no_year" |
| if s.get("is_drifted_qwen25", False): |
| drifted_years[yr_found] += 1 |
| else: |
| stable_years[yr_found] += 1 |
|
|
| all_years = sorted(set(list(drifted_years.keys()) + list(stable_years.keys())), key=lambda x: str(x)) |
| print(f" {'Year':>8s} {'Drifted':>8s} {'Stable':>8s}") |
| print(" " + "-" * 28) |
| for yr in all_years: |
| print(f" {str(yr):>8s} {drifted_years.get(yr, 0):8d} {stable_years.get(yr, 0):8d}") |
|
|
| |
| |
| |
| print("\n" + "=" * 70) |
| print(" Q4: TEMPORAL ZONE DISTRIBUTION") |
| print("=" * 70) |
| tz_counts = Counter(s.get("temporal_zone", "none") for s in samples) |
| for tz, n in tz_counts.most_common(): |
| print(f" {str(tz):20s}: {n:6d}") |
|
|
| |
| |
| |
| print("\n" + "=" * 70) |
| print(" Q5: MODEL_LIKELY_ANSWER FIELD") |
| print("=" * 70) |
| has_mla = sum(1 for s in samples if s.get("model_likely_answer") and str(s.get("model_likely_answer")).strip()) |
| print(f" Samples with model_likely_answer: {has_mla}/{len(samples)}") |
| if has_mla > 0: |
| |
| count = 0 |
| for s in samples: |
| mla = s.get("model_likely_answer", "") |
| if mla and str(mla).strip(): |
| ea = s.get("expected_answer", "") |
| q = s.get("query", "")[:60] |
| print(f" Query: {q}") |
| print(f" Expected: {ea}") |
| print(f" Model likely: {mla}") |
| print(f" Drifted (qwen): {s.get('is_drifted_qwen25', False)}") |
| print() |
| count += 1 |
| if count >= 3: |
| break |
|
|
| |
| |
| |
| print("\n" + "=" * 70) |
| print(" Q6: NOISE SAMPLES IN TIER 1") |
| print("=" * 70) |
| empty_rel = sum(1 for s in samples if not s.get("relation", "").strip()) |
| arabic = sum(1 for s in samples if any(ord(c) > 0x0600 and ord(c) < 0x06FF for c in s.get("relation", ""))) |
| tiny_rels = [(r, n) for r, n in Counter(s.get("relation", "") for s in samples).items() if n < 20] |
| print(f" Empty relation: {empty_rel}") |
| print(f" Arabic relation: {arabic}") |
| print(f" Relations with <20 samples: {tiny_rels}") |
|
|
| |
| |
| |
| print("\n" + "=" * 70) |
| print(" Q7: DIFFERENTIAL FACTS BY RELATION") |
| print("=" * 70) |
| diff_by_rel = Counter() |
| total_by_rel = Counter() |
| for s in samples: |
| rel = s.get("relation", "unknown") |
| total_by_rel[rel] += 1 |
| labels = set() |
| for m in models: |
| labels.add(s.get(f"is_drifted_{m}", False)) |
| if len(labels) > 1: |
| diff_by_rel[rel] += 1 |
|
|
| n_diff = sum(diff_by_rel.values()) |
| print(f" Total differential facts: {n_diff}") |
| print(f"\n {'Relation':30s} {'Differential':>12s} {'Total':>8s} {'%Diff':>8s}") |
| print(" " + "-" * 60) |
| for rel in sorted(total_by_rel.keys()): |
| d = diff_by_rel.get(rel, 0) |
| t = total_by_rel[rel] |
| pct = d / t * 100 if t > 0 else 0 |
| print(f" {rel:30s} {d:12d} {t:8d} {pct:7.1f}%") |
|
|
| |
| |
| |
| print("\n" + "=" * 70) |
| print(" Q8: SAMPLE QUERIES PER CATEGORY") |
| print("=" * 70) |
| for cat in ["stable", "no_drift", "known_drift", "unknown_drift"]: |
| cat_samples = [s for s in samples if s.get("category") == cat] |
| print(f"\n [{cat}] ({len(cat_samples)} samples)") |
| for s in cat_samples[:3]: |
| d_labels = " | ".join(f"{m}={'D' if s.get(f'is_drifted_{m}', False) else 'S'}" for m in models) |
| print(f" Q: {s.get('query', '')[:80]}") |
| print(f" A: {s.get('expected_answer', '')[:40]}") |
| print(f" Year: {s.get('year', '?')}, Drift date: {str(s.get('drift_date', ''))[:10]}") |
| print(f" Labels: {d_labels}") |
| print() |
|
|
| |
| |
| |
| print("\n" + "=" * 70) |
| print(" VERDICT: YEAR LEAKAGE RISK") |
| print("=" * 70) |
| |
| drifted_recent = sum(1 for s in samples if s.get("is_drifted_qwen25", False) and int(s.get("year", 0)) >= 2024) |
| drifted_total = sum(1 for s in samples if s.get("is_drifted_qwen25", False)) |
| stable_recent = sum(1 for s in samples if not s.get("is_drifted_qwen25", False) and int(s.get("year", 0)) >= 2024) |
| stable_total = sum(1 for s in samples if not s.get("is_drifted_qwen25", False)) |
|
|
| print(f" Drifted in 2024+: {drifted_recent}/{drifted_total} ({drifted_recent/drifted_total*100:.1f}%)" if drifted_total > 0 else " No drifted samples") |
| print(f" Stable in 2024+: {stable_recent}/{stable_total} ({stable_recent/stable_total*100:.1f}%)" if stable_total > 0 else " No stable samples") |
|
|
| if drifted_total > 0 and stable_total > 0: |
| d_pct = drifted_recent / drifted_total |
| s_pct = stable_recent / stable_total |
| if d_pct > 0.8 and s_pct < 0.3: |
| print("\n ⚠️ HIGH RISK: Drifted samples are concentrated in recent years.") |
| print(" The probe may be learning YEAR, not DRIFT.") |
| elif d_pct > s_pct + 0.2: |
| print("\n ⚠️ MODERATE RISK: Some year-drift correlation exists.") |
| print(" Paraphrase test + year-controlled subset needed.") |
| else: |
| print("\n ✅ LOW RISK: Year distribution is similar across drifted/stable.") |
|
|
| print(f"\n{'=' * 70}") |
| print(" Run: python data_diagnostic.py") |
| print(f"{'=' * 70}") |