| | """ |
| | Evaluate word segmentation quality of UDD-1 treebank. |
| | |
| | Analyses: |
| | 1. Syllable distribution per token (by UPOS) |
| | 2. Anomalous token detection (long tokens, cross-boundary merges, legal terms) |
| | 3. Inconsistent segmentation (bigram vs single token) |
| | 4. Comparison with underthesea word_tokenize() (optional) |
| | 5. Manual review samples |
| | 6. Dictionary-based validation (optional) — uses Viet74K/UTS_Dictionary |
| | """ |
| |
|
| | import argparse |
| | import random |
| | import re |
| | import sys |
| | from collections import Counter, defaultdict |
| | from os.path import dirname, join, exists |
| |
|
| |
|
| | |
| | LEGAL_TERMS = [ |
| | "vụ án", "hợp đồng", "tài sản", "pháp luật", "quy định", |
| | "nghị định", "cơ quan", "tổ chức", "cá nhân", "trách nhiệm", |
| | "quyền lợi", "nghĩa vụ", "xử phạt", "vi phạm", "bồi thường", |
| | "thẩm quyền", "giải quyết", "khiếu nại", "tố cáo", "hình sự", |
| | ] |
| |
|
| |
|
| | def parse_conllu(filepath): |
| | """Parse CoNLL-U file and return sentences with full token info.""" |
| | sentences = [] |
| | current = { |
| | "sent_id": None, |
| | "text": None, |
| | "tokens": [], |
| | "upos": [], |
| | "xpos": [], |
| | "deprel": [], |
| | "head": [], |
| | "lemmas": [], |
| | } |
| |
|
| | with open(filepath, "r", encoding="utf-8") as f: |
| | for line in f: |
| | line = line.rstrip("\n") |
| | if not line.strip(): |
| | if current["tokens"]: |
| | sentences.append(current) |
| | current = { |
| | "sent_id": None, |
| | "text": None, |
| | "tokens": [], |
| | "upos": [], |
| | "xpos": [], |
| | "deprel": [], |
| | "head": [], |
| | "lemmas": [], |
| | } |
| | elif line.startswith("#"): |
| | if line.startswith("# sent_id"): |
| | current["sent_id"] = line.split("=", 1)[1].strip() |
| | elif line.startswith("# text"): |
| | current["text"] = line.split("=", 1)[1].strip() |
| | else: |
| | parts = line.split("\t") |
| | if len(parts) >= 10: |
| | if "-" in parts[0] or "." in parts[0]: |
| | continue |
| | current["tokens"].append(parts[1]) |
| | current["lemmas"].append(parts[2]) |
| | current["upos"].append(parts[3]) |
| | current["xpos"].append(parts[4]) |
| | current["head"].append(parts[6]) |
| | current["deprel"].append(parts[7]) |
| |
|
| | if current["tokens"]: |
| | sentences.append(current) |
| |
|
| | return sentences |
| |
|
| |
|
| | def count_syllables(token): |
| | """Count syllables in a Vietnamese token (space-separated).""" |
| | return len(token.split()) |
| |
|
| |
|
| | |
| |
|
| | def analysis_syllable_distribution(sentences): |
| | """Compute syllable count distribution per token, overall and by UPOS.""" |
| | overall = Counter() |
| | by_upos = defaultdict(Counter) |
| | total_tokens = 0 |
| |
|
| | for sent in sentences: |
| | for token, upos in zip(sent["tokens"], sent["upos"]): |
| | n = count_syllables(token) |
| | overall[n] += 1 |
| | by_upos[upos][n] += 1 |
| | total_tokens += 1 |
| |
|
| | return overall, by_upos, total_tokens |
| |
|
| |
|
| | def format_syllable_report(overall, by_upos, total_tokens): |
| | """Format syllable distribution as markdown.""" |
| | lines = [] |
| | lines.append("## 1. Syllable Distribution per Token") |
| | lines.append("") |
| | lines.append("### 1.1 Overall Distribution") |
| | lines.append("") |
| | lines.append("| Syllables | Count | Percentage |") |
| | lines.append("|---:|---:|---:|") |
| |
|
| | for n in sorted(overall.keys()): |
| | label = f"{n}" if n < 4 else "4+" |
| | if n == 4: |
| | count = sum(overall[k] for k in overall if k >= 4) |
| | pct = count / total_tokens * 100 |
| | lines.append(f"| {label} | {count:,} | {pct:.2f}% |") |
| | break |
| | else: |
| | count = overall[n] |
| | pct = count / total_tokens * 100 |
| | lines.append(f"| {label} | {count:,} | {pct:.2f}% |") |
| |
|
| | |
| | if max(overall.keys()) >= 4 and 4 not in overall: |
| | count = sum(overall[k] for k in overall if k >= 4) |
| | pct = count / total_tokens * 100 |
| | lines.append(f"| 4+ | {count:,} | {pct:.2f}% |") |
| |
|
| | lines.append("") |
| | lines.append("### 1.2 Distribution by UPOS") |
| | lines.append("") |
| |
|
| | |
| | upos_totals = {upos: sum(counts.values()) for upos, counts in by_upos.items()} |
| | top_upos = sorted(upos_totals, key=upos_totals.get, reverse=True)[:10] |
| |
|
| | lines.append("| UPOS | 1-syl | 2-syl | 3-syl | 4+-syl | Total | Avg syl |") |
| | lines.append("|:---|---:|---:|---:|---:|---:|---:|") |
| |
|
| | for upos in top_upos: |
| | counts = by_upos[upos] |
| | total = upos_totals[upos] |
| | s1 = counts.get(1, 0) |
| | s2 = counts.get(2, 0) |
| | s3 = counts.get(3, 0) |
| | s4p = sum(counts[k] for k in counts if k >= 4) |
| | avg = sum(k * counts[k] for k in counts) / total if total else 0 |
| | lines.append( |
| | f"| {upos} | {s1:,} | {s2:,} | {s3:,} | {s4p:,} | {total:,} | {avg:.2f} |" |
| | ) |
| |
|
| | lines.append("") |
| | return "\n".join(lines) |
| |
|
| |
|
| | |
| |
|
| | def analysis_anomalous_tokens(sentences): |
| | """Find anomalous tokens: long tokens, cross-boundary merges, legal term consistency.""" |
| |
|
| | |
| | long_tokens = [] |
| | for sent in sentences: |
| | for i, (token, upos) in enumerate(zip(sent["tokens"], sent["upos"])): |
| | n = count_syllables(token) |
| | if n >= 4: |
| | long_tokens.append({ |
| | "sent_id": sent["sent_id"], |
| | "token": token, |
| | "upos": upos, |
| | "syllables": n, |
| | }) |
| |
|
| | long_token_counter = Counter(t["token"] for t in long_tokens) |
| |
|
| | |
| | |
| | cross_boundary = [] |
| | for sent in sentences: |
| | for i, (token, upos) in enumerate(zip(sent["tokens"], sent["upos"])): |
| | if upos == "PROPN": |
| | continue |
| | if " " not in token: |
| | continue |
| | |
| | syllables = token.split() |
| | has_mid_upper = any(s[0].isupper() for s in syllables[1:] if s) |
| | if has_mid_upper: |
| | cross_boundary.append({ |
| | "sent_id": sent["sent_id"], |
| | "token": token, |
| | "upos": upos, |
| | }) |
| |
|
| | cross_boundary_counter = Counter(t["token"] for t in cross_boundary) |
| |
|
| | |
| | legal_term_stats = {} |
| | for term in LEGAL_TERMS: |
| | parts = term.split() |
| | as_single = 0 |
| | as_split = 0 |
| |
|
| | for sent in sentences: |
| | tokens = sent["tokens"] |
| | |
| | for token in tokens: |
| | if token.lower() == term: |
| | as_single += 1 |
| | |
| | if len(parts) == 2: |
| | for j in range(len(tokens) - 1): |
| | if tokens[j].lower() == parts[0] and tokens[j + 1].lower() == parts[1]: |
| | as_split += 1 |
| |
|
| | if as_single > 0 or as_split > 0: |
| | legal_term_stats[term] = { |
| | "as_single": as_single, |
| | "as_split": as_split, |
| | "total": as_single + as_split, |
| | "consistency": max(as_single, as_split) / (as_single + as_split) * 100 |
| | if (as_single + as_split) > 0 else 0, |
| | } |
| |
|
| | return long_tokens, long_token_counter, cross_boundary, cross_boundary_counter, legal_term_stats |
| |
|
| |
|
| | def format_anomalous_report(long_tokens, long_token_counter, cross_boundary, |
| | cross_boundary_counter, legal_term_stats): |
| | """Format anomalous token report as markdown.""" |
| | lines = [] |
| | lines.append("## 2. Anomalous Token Detection") |
| | lines.append("") |
| |
|
| | |
| | lines.append("### 2a. Long Tokens (4+ syllables)") |
| | lines.append("") |
| | lines.append(f"Total occurrences: {len(long_tokens):,}") |
| | lines.append(f"Unique tokens: {len(long_token_counter):,}") |
| | lines.append("") |
| | lines.append("**Top 30 by frequency:**") |
| | lines.append("") |
| | lines.append("| Token | Count | UPOS | Syllables |") |
| | lines.append("|:---|---:|:---|---:|") |
| | for token, count in long_token_counter.most_common(30): |
| | |
| | upos = next(t["upos"] for t in long_tokens if t["token"] == token) |
| | n_syl = count_syllables(token) |
| | lines.append(f"| {token} | {count} | {upos} | {n_syl} |") |
| | lines.append("") |
| |
|
| | |
| | lines.append("### 2b. Possible Cross-Boundary Merges") |
| | lines.append("") |
| | lines.append("Tokens (non-PROPN) with uppercase letters after spaces, suggesting") |
| | lines.append("incorrect merging of adjacent words.") |
| | lines.append("") |
| | lines.append(f"Total occurrences: {len(cross_boundary):,}") |
| | lines.append(f"Unique tokens: {len(cross_boundary_counter):,}") |
| | lines.append("") |
| | if cross_boundary_counter: |
| | lines.append("| Token | Count | UPOS | Example sent_id |") |
| | lines.append("|:---|---:|:---|:---|") |
| | for token, count in cross_boundary_counter.most_common(30): |
| | example = next(t for t in cross_boundary if t["token"] == token) |
| | lines.append( |
| | f"| {token} | {count} | {example['upos']} | {example['sent_id']} |" |
| | ) |
| | else: |
| | lines.append("No cross-boundary merges detected.") |
| | lines.append("") |
| |
|
| | |
| | lines.append("### 2c. Legal Term Segmentation Consistency") |
| | lines.append("") |
| | lines.append("| Term | As Single Token | As Split Tokens | Total | Consistency |") |
| | lines.append("|:---|---:|---:|---:|---:|") |
| | for term in sorted(legal_term_stats, key=lambda t: legal_term_stats[t]["total"], reverse=True): |
| | s = legal_term_stats[term] |
| | dominant = "single" if s["as_single"] >= s["as_split"] else "split" |
| | lines.append( |
| | f"| {term} | {s['as_single']:,} | {s['as_split']:,} | {s['total']:,} | {s['consistency']:.1f}% ({dominant}) |" |
| | ) |
| | lines.append("") |
| | return "\n".join(lines) |
| |
|
| |
|
| | |
| |
|
| | def analysis_inconsistency(sentences): |
| | """Find bigrams that also appear as single tokens elsewhere.""" |
| | |
| | token_set = set() |
| | for sent in sentences: |
| | for token in sent["tokens"]: |
| | token_set.add(token.lower()) |
| |
|
| | |
| | bigram_as_single = Counter() |
| | single_as_bigram = Counter() |
| |
|
| | for sent in sentences: |
| | tokens = sent["tokens"] |
| | for i in range(len(tokens) - 1): |
| | bigram = tokens[i].lower() + " " + tokens[i + 1].lower() |
| | if bigram in token_set: |
| | bigram_as_single[bigram] += 1 |
| |
|
| | |
| | for sent in sentences: |
| | for token in sent["tokens"]: |
| | t = token.lower() |
| | if t in bigram_as_single: |
| | single_as_bigram[t] += 1 |
| |
|
| | |
| | inconsistencies = {} |
| | for bigram in bigram_as_single: |
| | inconsistencies[bigram] = { |
| | "as_split": bigram_as_single[bigram], |
| | "as_single": single_as_bigram.get(bigram, 0), |
| | } |
| |
|
| | return inconsistencies |
| |
|
| |
|
| | def format_inconsistency_report(inconsistencies): |
| | """Format inconsistency report as markdown.""" |
| | lines = [] |
| | lines.append("## 3. Inconsistent Segmentation") |
| | lines.append("") |
| | lines.append("Cases where two adjacent tokens appear elsewhere as a single token,") |
| | lines.append("or vice versa. Sorted by total occurrences.") |
| | lines.append("") |
| |
|
| | if not inconsistencies: |
| | lines.append("No inconsistencies found.") |
| | lines.append("") |
| | return "\n".join(lines) |
| |
|
| | lines.append(f"Total inconsistent forms: {len(inconsistencies):,}") |
| | lines.append("") |
| | lines.append("| Token | As Single | As Split (bigram) | Total |") |
| | lines.append("|:---|---:|---:|---:|") |
| |
|
| | sorted_items = sorted( |
| | inconsistencies.items(), |
| | key=lambda x: x[1]["as_single"] + x[1]["as_split"], |
| | reverse=True, |
| | ) |
| | for token, stats in sorted_items[:50]: |
| | total = stats["as_single"] + stats["as_split"] |
| | lines.append( |
| | f"| {token} | {stats['as_single']:,} | {stats['as_split']:,} | {total:,} |" |
| | ) |
| | lines.append("") |
| | return "\n".join(lines) |
| |
|
| |
|
| | |
| |
|
| | def analysis_compare_tokenize(sentences, sample_size=300): |
| | """Compare parser tokenization with underthesea word_tokenize().""" |
| | try: |
| | from underthesea import word_tokenize |
| | except ImportError: |
| | return None, None |
| |
|
| | |
| | with_text = [s for s in sentences if s["text"]] |
| | if not with_text: |
| | return None, None |
| |
|
| | random.seed(42) |
| | sample = random.sample(with_text, min(sample_size, len(with_text))) |
| |
|
| | results = [] |
| | match_count = 0 |
| | mismatch_count = 0 |
| | diff_categories = Counter() |
| |
|
| | for sent in sample: |
| | text = sent["text"] |
| | parser_tokens = sent["tokens"] |
| |
|
| | |
| | wt_raw = word_tokenize(text) |
| | |
| | wt_tokens = [t.replace("_", " ") for t in wt_raw] |
| |
|
| | |
| | if parser_tokens == wt_tokens: |
| | match_count += 1 |
| | results.append({ |
| | "sent_id": sent["sent_id"], |
| | "match": True, |
| | "parser_tokens": parser_tokens, |
| | "wt_tokens": wt_tokens, |
| | "diffs": [], |
| | }) |
| | else: |
| | mismatch_count += 1 |
| | diffs = find_token_diffs(parser_tokens, wt_tokens) |
| | results.append({ |
| | "sent_id": sent["sent_id"], |
| | "match": False, |
| | "parser_tokens": parser_tokens, |
| | "wt_tokens": wt_tokens, |
| | "diffs": diffs, |
| | }) |
| | for d in diffs: |
| | diff_categories[d["type"]] += 1 |
| |
|
| | return results, { |
| | "sample_size": len(sample), |
| | "match_count": match_count, |
| | "mismatch_count": mismatch_count, |
| | "match_rate": match_count / len(sample) * 100 if sample else 0, |
| | "diff_categories": diff_categories, |
| | } |
| |
|
| |
|
| | def find_token_diffs(parser_tokens, wt_tokens): |
| | """Find differences between two tokenizations using alignment.""" |
| | diffs = [] |
| |
|
| | |
| | p_syls = [] |
| | for i, tok in enumerate(parser_tokens): |
| | for syl in tok.split(): |
| | p_syls.append((syl, i)) |
| |
|
| | w_syls = [] |
| | for i, tok in enumerate(wt_tokens): |
| | for syl in tok.split(): |
| | w_syls.append((syl, i)) |
| |
|
| | |
| | pi, wi = 0, 0 |
| | while pi < len(p_syls) and wi < len(w_syls): |
| | if p_syls[pi][0] == w_syls[wi][0]: |
| | if p_syls[pi][1] != w_syls[wi][1]: |
| | |
| | p_tok = parser_tokens[p_syls[pi][1]] |
| | w_tok = wt_tokens[w_syls[wi][1]] |
| | if count_syllables(p_tok) > count_syllables(w_tok): |
| | diff_type = "parser_merges" |
| | elif count_syllables(p_tok) < count_syllables(w_tok): |
| | diff_type = "parser_splits" |
| | else: |
| | diff_type = "boundary_shift" |
| | diffs.append({ |
| | "type": diff_type, |
| | "parser": p_tok, |
| | "wt": w_tok, |
| | }) |
| | pi += 1 |
| | wi += 1 |
| | else: |
| | |
| | pi += 1 |
| | wi += 1 |
| |
|
| | return diffs |
| |
|
| |
|
| | def format_compare_report(results, stats): |
| | """Format comparison report as markdown.""" |
| | lines = [] |
| | lines.append("## 4. Comparison with `word_tokenize()`") |
| | lines.append("") |
| |
|
| | if results is None: |
| | lines.append("**Skipped**: `underthesea` not available or not requested. " |
| | "Use `--compare-tokenize` to enable.") |
| | lines.append("") |
| | return "\n".join(lines) |
| |
|
| | lines.append(f"Sample size: {stats['sample_size']} sentences") |
| | lines.append(f"- Exact match: {stats['match_count']} ({stats['match_rate']:.1f}%)") |
| | lines.append(f"- Mismatch: {stats['mismatch_count']} ({100 - stats['match_rate']:.1f}%)") |
| | lines.append("") |
| |
|
| | if stats["match_rate"] > 99.0: |
| | lines.append("### Finding: Shared Tokenizer") |
| | lines.append("") |
| | lines.append("The near-100% match rate confirms that Underthesea's `dependency_parse()` " |
| | "internally uses the same `word_tokenize()` model for segmentation. " |
| | "This means segmentation errors in UDD-1 are inherent to the Underthesea " |
| | "tokenizer and cannot be detected by comparing against `word_tokenize()`. " |
| | "A meaningful comparison would require an independent segmentation tool " |
| | "(e.g., VnCoreNLP, pyvi) or gold-standard segmented data.") |
| | lines.append("") |
| |
|
| | if stats["diff_categories"]: |
| | lines.append("### Difference Categories") |
| | lines.append("") |
| | lines.append("| Category | Count | Description |") |
| | lines.append("|:---|---:|:---|") |
| | descs = { |
| | "parser_merges": "Parser joins tokens that word_tokenize keeps separate", |
| | "parser_splits": "Parser splits tokens that word_tokenize joins", |
| | "boundary_shift": "Different token boundary placement", |
| | } |
| | for cat, count in stats["diff_categories"].most_common(): |
| | desc = descs.get(cat, cat) |
| | lines.append(f"| {cat} | {count} | {desc} |") |
| | lines.append("") |
| |
|
| | |
| | mismatches = [r for r in results if not r["match"]] |
| | if mismatches: |
| | lines.append("### Sample Mismatches (first 20)") |
| | lines.append("") |
| | for r in mismatches[:20]: |
| | lines.append(f"**{r['sent_id']}**") |
| | lines.append(f"- Parser: `{'` `'.join(r['parser_tokens'])}`") |
| | lines.append(f"- word_tokenize: `{'` `'.join(r['wt_tokens'])}`") |
| | if r["diffs"]: |
| | diff_strs = [f"{d['type']}: \"{d['parser']}\" vs \"{d['wt']}\"" for d in r["diffs"][:5]] |
| | lines.append(f"- Diffs: {'; '.join(diff_strs)}") |
| | lines.append("") |
| |
|
| | lines.append("") |
| | return "\n".join(lines) |
| |
|
| |
|
| | |
| |
|
| | def analysis_manual_samples(sentences, long_tokens, cross_boundary, inconsistencies, |
| | compare_results=None, n_samples=100): |
| | """Generate samples for manual review.""" |
| | suspicious_ids = set() |
| |
|
| | |
| | for t in long_tokens: |
| | suspicious_ids.add(t["sent_id"]) |
| | for t in cross_boundary: |
| | suspicious_ids.add(t["sent_id"]) |
| |
|
| | |
| | inconsistent_tokens = set(inconsistencies.keys()) if inconsistencies else set() |
| | for sent in sentences: |
| | for token in sent["tokens"]: |
| | if token.lower() in inconsistent_tokens: |
| | suspicious_ids.add(sent["sent_id"]) |
| | break |
| |
|
| | |
| | id_map = {s["sent_id"]: s for s in sentences} |
| |
|
| | |
| | n_suspicious = min(int(n_samples * 0.3), len(suspicious_ids)) |
| | n_random = n_samples - n_suspicious |
| |
|
| | random.seed(42) |
| | suspicious_sample = random.sample(sorted(suspicious_ids), min(n_suspicious, len(suspicious_ids))) |
| |
|
| | remaining_ids = [s["sent_id"] for s in sentences if s["sent_id"] not in suspicious_ids] |
| | random_sample = random.sample(remaining_ids, min(n_random, len(remaining_ids))) |
| |
|
| | samples = [] |
| |
|
| | |
| | compare_lookup = {} |
| | if compare_results: |
| | for r in compare_results: |
| | compare_lookup[r["sent_id"]] = r |
| |
|
| | for sid in suspicious_sample + random_sample: |
| | sent = id_map.get(sid) |
| | if not sent: |
| | continue |
| |
|
| | flags = [] |
| | |
| | for token in sent["tokens"]: |
| | if count_syllables(token) >= 4: |
| | flags.append(f"long_token: \"{token}\"") |
| | |
| | for token, upos in zip(sent["tokens"], sent["upos"]): |
| | if upos != "PROPN" and " " in token: |
| | syllables = token.split() |
| | if any(s[0].isupper() for s in syllables[1:] if s): |
| | flags.append(f"cross_boundary: \"{token}\"") |
| | |
| | for i in range(len(sent["tokens"]) - 1): |
| | bigram = sent["tokens"][i].lower() + " " + sent["tokens"][i + 1].lower() |
| | if bigram in inconsistent_tokens: |
| | flags.append(f"inconsistent: \"{sent['tokens'][i]}\" + \"{sent['tokens'][i+1]}\" (also as \"{bigram}\")") |
| |
|
| | wt_output = None |
| | if sid in compare_lookup: |
| | cr = compare_lookup[sid] |
| | wt_output = cr["wt_tokens"] |
| |
|
| | samples.append({ |
| | "sent_id": sid, |
| | "text": sent["text"], |
| | "tokens": sent["tokens"], |
| | "upos": sent["upos"], |
| | "flags": flags, |
| | "wt_tokens": wt_output, |
| | "is_suspicious": sid in suspicious_ids, |
| | }) |
| |
|
| | return samples |
| |
|
| |
|
| | def format_samples_report(samples): |
| | """Format manual review samples as markdown.""" |
| | lines = [] |
| | lines.append("## 5. Manual Review Samples") |
| | lines.append("") |
| |
|
| | n_suspicious = sum(1 for s in samples if s["is_suspicious"]) |
| | n_random = len(samples) - n_suspicious |
| | lines.append(f"Total samples: {len(samples)} ({n_suspicious} suspicious, {n_random} random)") |
| | lines.append("") |
| |
|
| | for i, s in enumerate(samples, 1): |
| | tag = "SUSPICIOUS" if s["is_suspicious"] else "RANDOM" |
| | lines.append(f"### Sample {i} [{tag}] — {s['sent_id']}") |
| | lines.append("") |
| | if s["text"]: |
| | lines.append(f"**Text:** {s['text']}") |
| | lines.append(f"**Tokens:** `{'` `'.join(s['tokens'])}`") |
| | lines.append(f"**UPOS:** {' '.join(s['upos'])}") |
| | if s["wt_tokens"]: |
| | lines.append(f"**word_tokenize:** `{'` `'.join(s['wt_tokens'])}`") |
| | if s["flags"]: |
| | lines.append(f"**Flags:** {'; '.join(s['flags'])}") |
| | lines.append("") |
| |
|
| | return "\n".join(lines) |
| |
|
| |
|
| | |
| |
|
| | def load_dictionary(): |
| | """Load Vietnamese dictionary from underthesea (Viet74K / UTS_Dictionary).""" |
| | try: |
| | from underthesea.corpus import viet_dict_74K |
| | words = viet_dict_74K.words |
| | word_set = set(w.lower().strip() for w in words if w.strip()) |
| | return word_set, "Viet74K" |
| | except Exception: |
| | pass |
| | try: |
| | from underthesea.datasets.uts_dictionary import UTSDictionary |
| | d = UTSDictionary() |
| | word_set = set(w.lower().strip() for w in d.words if w.strip()) |
| | return word_set, "UTS_Dictionary" |
| | except Exception: |
| | pass |
| | return None, None |
| |
|
| |
|
| | def analysis_dictionary_validation(sentences, dict_set): |
| | """Validate word segmentation against a dictionary. |
| | |
| | Checks: |
| | A) Token coverage: is each token in the dictionary? |
| | B) Under-segmentation: multi-syllable tokens NOT in dictionary (possible over-merge) |
| | C) Over-segmentation: adjacent token pairs that form a dictionary word (possible under-merge) |
| | """ |
| | |
| | total_tokens = 0 |
| | in_dict = 0 |
| | not_in_dict = 0 |
| | oov_by_upos = defaultdict(Counter) |
| | oov_counter = Counter() |
| | in_dict_by_upos = Counter() |
| | total_by_upos = Counter() |
| |
|
| | |
| | under_seg_candidates = [] |
| | multi_oov_counter = Counter() |
| |
|
| | |
| | over_seg_counter = Counter() |
| | over_seg_examples = {} |
| |
|
| | for sent in sentences: |
| | tokens = sent["tokens"] |
| | upos_list = sent["upos"] |
| |
|
| | for i, (token, upos) in enumerate(zip(tokens, upos_list)): |
| | t_lower = token.lower().strip() |
| | total_tokens += 1 |
| | total_by_upos[upos] += 1 |
| |
|
| | if upos == "PUNCT" or upos == "NUM" or upos == "SYM": |
| | |
| | in_dict += 1 |
| | in_dict_by_upos[upos] += 1 |
| | continue |
| |
|
| | if t_lower in dict_set: |
| | in_dict += 1 |
| | in_dict_by_upos[upos] += 1 |
| | else: |
| | not_in_dict += 1 |
| | oov_counter[t_lower] += 1 |
| | oov_by_upos[upos][t_lower] += 1 |
| |
|
| | |
| | syllables = t_lower.split() |
| | if len(syllables) >= 2: |
| | |
| | all_parts_known = all(s in dict_set for s in syllables) |
| | if all_parts_known: |
| | multi_oov_counter[t_lower] += 1 |
| | under_seg_candidates.append({ |
| | "sent_id": sent["sent_id"], |
| | "token": token, |
| | "upos": upos, |
| | "syllables": syllables, |
| | }) |
| |
|
| | |
| | |
| | func_upos = {"ADP", "AUX", "CCONJ", "SCONJ", "DET", "PART", "PUNCT"} |
| | for i in range(len(tokens) - 1): |
| | |
| | if upos_list[i] in func_upos and upos_list[i + 1] in func_upos: |
| | continue |
| | |
| | if upos_list[i] == "PUNCT" or upos_list[i + 1] == "PUNCT": |
| | continue |
| | bigram = tokens[i].lower().strip() + " " + tokens[i + 1].lower().strip() |
| | if bigram in dict_set: |
| | over_seg_counter[bigram] += 1 |
| | if bigram not in over_seg_examples: |
| | over_seg_examples[bigram] = sent["sent_id"] |
| |
|
| | |
| | for i in range(len(tokens) - 2): |
| | if upos_list[i] == "PUNCT" or upos_list[i + 2] == "PUNCT": |
| | continue |
| | trigram = (tokens[i].lower().strip() + " " + |
| | tokens[i + 1].lower().strip() + " " + |
| | tokens[i + 2].lower().strip()) |
| | if trigram in dict_set: |
| | over_seg_counter[trigram] += 1 |
| | if trigram not in over_seg_examples: |
| | over_seg_examples[trigram] = sent["sent_id"] |
| |
|
| | return { |
| | "total_tokens": total_tokens, |
| | "in_dict": in_dict, |
| | "not_in_dict": not_in_dict, |
| | "coverage": in_dict / total_tokens * 100 if total_tokens else 0, |
| | "oov_counter": oov_counter, |
| | "oov_by_upos": oov_by_upos, |
| | "in_dict_by_upos": in_dict_by_upos, |
| | "total_by_upos": total_by_upos, |
| | "multi_oov_counter": multi_oov_counter, |
| | "under_seg_candidates": under_seg_candidates, |
| | "over_seg_counter": over_seg_counter, |
| | "over_seg_examples": over_seg_examples, |
| | } |
| |
|
| |
|
| | def format_dictionary_report(stats, dict_name, dict_size=0): |
| | """Format dictionary validation report as markdown.""" |
| | lines = [] |
| | lines.append("## 6. Dictionary-Based Validation") |
| | lines.append("") |
| |
|
| | if stats is None: |
| | lines.append("**Skipped**: Dictionary not available or not requested. " |
| | "Use `--dict-validate` to enable.") |
| | lines.append("") |
| | return "\n".join(lines) |
| |
|
| | lines.append(f"**Dictionary:** {dict_name}") |
| | lines.append(f"**Dictionary size:** {dict_size:,} entries") |
| | lines.append("") |
| |
|
| | |
| | lines.append("### 6a. Token Coverage") |
| | lines.append("") |
| | lines.append(f"| Metric | Count | Percentage |") |
| | lines.append(f"|:---|---:|---:|") |
| | lines.append(f"| In dictionary | {stats['in_dict']:,} | {stats['coverage']:.1f}% |") |
| | oov_pct = stats['not_in_dict'] / stats['total_tokens'] * 100 |
| | lines.append(f"| Out-of-vocabulary (OOV) | {stats['not_in_dict']:,} | {oov_pct:.1f}% |") |
| | lines.append(f"| Total (excl. PUNCT/NUM/SYM) | {stats['total_tokens']:,} | 100% |") |
| | lines.append("") |
| |
|
| | |
| | lines.append("**Coverage by UPOS** (top tags):") |
| | lines.append("") |
| | lines.append("| UPOS | In Dict | Total | Coverage |") |
| | lines.append("|:---|---:|---:|---:|") |
| | for upos in sorted(stats["total_by_upos"], key=stats["total_by_upos"].get, reverse=True)[:12]: |
| | total = stats["total_by_upos"][upos] |
| | in_d = stats["in_dict_by_upos"].get(upos, 0) |
| | cov = in_d / total * 100 if total else 0 |
| | lines.append(f"| {upos} | {in_d:,} | {total:,} | {cov:.1f}% |") |
| | lines.append("") |
| |
|
| | |
| | lines.append("**Top 30 OOV tokens:**") |
| | lines.append("") |
| | lines.append("| Token | Count | UPOS |") |
| | lines.append("|:---|---:|:---|") |
| | for token, count in stats["oov_counter"].most_common(30): |
| | |
| | upos_for_token = "?" |
| | for upos, tokens in stats["oov_by_upos"].items(): |
| | if token in tokens: |
| | upos_for_token = upos |
| | break |
| | lines.append(f"| {token} | {count} | {upos_for_token} |") |
| | lines.append("") |
| |
|
| | |
| | lines.append("### 6b. Possible Under-Segmentation (Over-Merged Tokens)") |
| | lines.append("") |
| | lines.append("Multi-syllable tokens NOT in dictionary, but all individual syllables ARE") |
| | lines.append("in dictionary. These may be incorrectly merged by the tokenizer.") |
| | lines.append("") |
| | n_under = sum(stats["multi_oov_counter"].values()) |
| | lines.append(f"Total occurrences: {n_under:,}") |
| | lines.append(f"Unique forms: {len(stats['multi_oov_counter']):,}") |
| | lines.append("") |
| | if stats["multi_oov_counter"]: |
| | lines.append("| Token | Count | Sub-parts |") |
| | lines.append("|:---|---:|:---|") |
| | for token, count in stats["multi_oov_counter"].most_common(40): |
| | parts = " + ".join(token.split()) |
| | lines.append(f"| {token} | {count} | {parts} |") |
| | lines.append("") |
| |
|
| | |
| | lines.append("### 6c. Possible Over-Segmentation (Under-Merged Tokens)") |
| | lines.append("") |
| | lines.append("Adjacent tokens that together form a word found in the dictionary.") |
| | lines.append("These may be incorrectly split by the tokenizer.") |
| | lines.append("") |
| | n_over = sum(stats["over_seg_counter"].values()) |
| | lines.append(f"Total occurrences: {n_over:,}") |
| | lines.append(f"Unique dictionary words split: {len(stats['over_seg_counter']):,}") |
| | lines.append("") |
| | if stats["over_seg_counter"]: |
| | lines.append("| Dictionary Word | Times Split | Example sent_id |") |
| | lines.append("|:---|---:|:---|") |
| | for word, count in stats["over_seg_counter"].most_common(50): |
| | example_id = stats["over_seg_examples"].get(word, "?") |
| | lines.append(f"| {word} | {count} | {example_id} |") |
| | lines.append("") |
| |
|
| | |
| | lines.append("### 6d. Summary") |
| | lines.append("") |
| | lines.append(f"- **Dictionary coverage**: {stats['coverage']:.1f}% of tokens are known words") |
| | lines.append(f"- **Possible over-merges**: {len(stats['multi_oov_counter']):,} unique multi-syllable " |
| | f"OOV forms ({n_under:,} occurrences)") |
| | lines.append(f"- **Possible under-merges**: {len(stats['over_seg_counter']):,} unique dictionary words " |
| | f"found split ({n_over:,} occurrences)") |
| | lines.append("") |
| |
|
| | return "\n".join(lines) |
| |
|
| |
|
| | |
| |
|
| | def main(): |
| | parser = argparse.ArgumentParser(description="Evaluate UDD-1 word segmentation quality") |
| | parser.add_argument( |
| | "-i", "--input", nargs="+", |
| | help="Input CoNLL-U files. If not specified, uses default UDD-1 files.", |
| | ) |
| | parser.add_argument( |
| | "--all-files", action="store_true", |
| | help="Use all UDD-1 files (train, dev, test)", |
| | ) |
| | parser.add_argument( |
| | "-o", "--output", default="SEGMENTATION_EVAL.md", |
| | help="Output markdown report file (default: SEGMENTATION_EVAL.md)", |
| | ) |
| | parser.add_argument( |
| | "--compare-tokenize", action="store_true", |
| | help="Compare with underthesea word_tokenize() (requires underthesea)", |
| | ) |
| | parser.add_argument( |
| | "--sample-size", type=int, default=300, |
| | help="Number of sentences to sample for word_tokenize comparison (default: 300)", |
| | ) |
| | parser.add_argument( |
| | "--review-samples", type=int, default=100, |
| | help="Number of manual review samples (default: 100)", |
| | ) |
| | parser.add_argument( |
| | "--dict-validate", action="store_true", |
| | help="Validate segmentation against Vietnamese dictionary (requires underthesea)", |
| | ) |
| | args = parser.parse_args() |
| |
|
| | base_dir = dirname(dirname(__file__)) |
| |
|
| | |
| | if args.all_files: |
| | input_files = [ |
| | join(base_dir, "vi_udd-ud-train.conllu"), |
| | join(base_dir, "vi_udd-ud-dev.conllu"), |
| | join(base_dir, "vi_udd-ud-test.conllu"), |
| | ] |
| | elif args.input: |
| | input_files = args.input |
| | else: |
| | input_files = [join(base_dir, "vi_udd-ud-train.conllu")] |
| |
|
| | |
| | print(f"Parsing {len(input_files)} file(s)...") |
| | all_sentences = [] |
| | for filepath in input_files: |
| | if not exists(filepath): |
| | print(f" WARNING: {filepath} not found, skipping") |
| | continue |
| | sents = parse_conllu(filepath) |
| | print(f" {filepath}: {len(sents):,} sentences") |
| | all_sentences.extend(sents) |
| |
|
| | print(f"Total: {len(all_sentences):,} sentences, " |
| | f"{sum(len(s['tokens']) for s in all_sentences):,} tokens") |
| | print() |
| |
|
| | |
| | report_parts = [] |
| | report_parts.append("# UDD-1 Word Segmentation Evaluation") |
| | report_parts.append("") |
| | report_parts.append(f"**Files analyzed:** {', '.join(f.split('/')[-1] for f in input_files)}") |
| | report_parts.append(f"**Total sentences:** {len(all_sentences):,}") |
| | report_parts.append(f"**Total tokens:** {sum(len(s['tokens']) for s in all_sentences):,}") |
| | report_parts.append("") |
| |
|
| | |
| | print("Analysis 1: Syllable distribution...") |
| | overall, by_upos, total_tokens = analysis_syllable_distribution(all_sentences) |
| | report_parts.append(format_syllable_report(overall, by_upos, total_tokens)) |
| |
|
| | |
| | print("Analysis 2: Anomalous tokens...") |
| | long_tokens, long_counter, cross_boundary, cross_counter, legal_stats = \ |
| | analysis_anomalous_tokens(all_sentences) |
| | report_parts.append(format_anomalous_report( |
| | long_tokens, long_counter, cross_boundary, cross_counter, legal_stats)) |
| |
|
| | |
| | print("Analysis 3: Inconsistent segmentation...") |
| | inconsistencies = analysis_inconsistency(all_sentences) |
| | report_parts.append(format_inconsistency_report(inconsistencies)) |
| |
|
| | |
| | compare_results = None |
| | compare_stats = None |
| | if args.compare_tokenize: |
| | print(f"Analysis 4: Comparing with word_tokenize() (sample={args.sample_size})...") |
| | compare_results, compare_stats = analysis_compare_tokenize( |
| | all_sentences, sample_size=args.sample_size) |
| | else: |
| | print("Analysis 4: Skipped (use --compare-tokenize to enable)") |
| | report_parts.append(format_compare_report(compare_results, compare_stats)) |
| |
|
| | |
| | print(f"Analysis 5: Manual review samples (n={args.review_samples})...") |
| | samples = analysis_manual_samples( |
| | all_sentences, long_tokens, cross_boundary, inconsistencies, |
| | compare_results=compare_results, n_samples=args.review_samples, |
| | ) |
| | report_parts.append(format_samples_report(samples)) |
| |
|
| | |
| | dict_stats = None |
| | dict_name = None |
| | if args.dict_validate: |
| | print("Analysis 6: Dictionary-based validation...") |
| | dict_set, dict_name = load_dictionary() |
| | if dict_set: |
| | print(f" Dictionary: {dict_name} ({len(dict_set):,} entries)") |
| | dict_stats = analysis_dictionary_validation(all_sentences, dict_set) |
| | else: |
| | print(" WARNING: No dictionary available, skipping") |
| | else: |
| | print("Analysis 6: Skipped (use --dict-validate to enable)") |
| | dict_size = len(dict_set) if dict_set else 0 |
| | report_parts.append(format_dictionary_report(dict_stats, dict_name, dict_size)) |
| |
|
| | |
| | output_path = args.output |
| | if not output_path.startswith("/"): |
| | output_path = join(base_dir, output_path) |
| |
|
| | report = "\n".join(report_parts) |
| | with open(output_path, "w", encoding="utf-8") as f: |
| | f.write(report) |
| |
|
| | print(f"\nReport written to: {output_path}") |
| | print(f" Total inconsistent forms: {len(inconsistencies):,}") |
| | print(f" Long tokens (4+ syl): {len(long_tokens):,} occurrences") |
| | print(f" Cross-boundary candidates: {len(cross_boundary):,} occurrences") |
| | if compare_stats: |
| | print(f" word_tokenize match rate: {compare_stats['match_rate']:.1f}%") |
| | if dict_stats: |
| | print(f" Dictionary coverage: {dict_stats['coverage']:.1f}%") |
| | n_under = sum(dict_stats["multi_oov_counter"].values()) |
| | n_over = sum(dict_stats["over_seg_counter"].values()) |
| | print(f" Possible over-merges: {len(dict_stats['multi_oov_counter']):,} forms ({n_under:,} occ)") |
| | print(f" Possible under-merges: {len(dict_stats['over_seg_counter']):,} forms ({n_over:,} occ)") |
| |
|
| |
|
| | if __name__ == "__main__": |
| | main() |
| |
|