sayit-archive-tw / scripts /parse_transcripts.py
audreyt's picture
Initial dataset: 1,931 transcripts, 59K SFT pairs, 85K RAG chunks, 765 bilingual terms
bf38a2e verified
#!/usr/bin/env python3
"""Parse ~2,000 Audrey Tang transcript markdown files into structured JSONL.
Reads from: /Users/au/w/transcript/*.md
Writes to: dataset/data/turns.jsonl (one JSON object per speaker turn)
dataset/data/metadata.json (statistics)
"""
from __future__ import annotations
import json
import glob
import html
import os
import re
import sys
from collections import Counter
from pathlib import Path
from typing import Optional
TRANSCRIPT_DIR = "/Users/au/w/transcript"
OUTPUT_DIR = Path(__file__).resolve().parent.parent / "data"
SKIP_FILES = {"lexicon.md", "z.md"}
# Audrey's names across languages
AUDREY_NAMES = {"audrey tang", "唐鳳"}
# Regex patterns
HEADER_RE = re.compile(r"^#\s+(\d{4}-\d{2}-\d{2})\s+(.+)$")
HEADER_YEAR_ONLY_RE = re.compile(r"^#\s+(\d{4})年(.+)$")
SPEAKER_RE = re.compile(r"^###\s+(.+?)\s*[::]\s*$")
STAGE_DIR_RE = re.compile(r"^>\s*(.+)$")
HTML_TAG_RE = re.compile(r"<[^>]+>")
IFRAME_BLOCK_RE = re.compile(r"<iframe[\s\S]*?</iframe>", re.IGNORECASE)
def is_cjk(char: str) -> bool:
cp = ord(char)
return 0x4E00 <= cp <= 0x9FFF
def detect_language(text: str) -> str:
"""Return 'zh' if >30% CJK characters, else 'en'."""
chars = [c for c in text if not c.isspace()]
if not chars:
return "en"
cjk_count = sum(1 for c in chars if is_cjk(c))
return "zh" if cjk_count / len(chars) > 0.30 else "en"
def normalize_speaker(name: str) -> str:
"""Normalize speaker name: strip extra whitespace."""
return " ".join(name.split())
def is_audrey(speaker: str) -> bool:
return speaker.lower().strip() in AUDREY_NAMES
def extract_stage_directions(text: str) -> list[str]:
"""Extract parenthetical stage directions like (laughter), (笑)from text."""
directions = []
# Match both ASCII and fullwidth parens
for m in re.finditer(r"[((]([^))]+)[))]", text):
directions.append(f"({m.group(1)})")
return directions
def clean_text(text: str) -> str:
"""Clean turn text: decode HTML entities, strip HTML tags, normalize whitespace."""
text = html.unescape(text)
text = IFRAME_BLOCK_RE.sub("", text)
text = HTML_TAG_RE.sub("", text)
# Remove markdown image/link artifacts that are just URLs
# Keep link text but remove URL: [text](url) -> text
text = re.sub(r"\[([^\]]*)\]\([^)]+\)", r"\1", text)
# Normalize whitespace within paragraphs but preserve paragraph breaks
lines = text.split("\n")
cleaned_lines = []
for line in lines:
stripped = line.strip()
if stripped:
cleaned_lines.append(stripped)
else:
cleaned_lines.append("")
text = "\n".join(cleaned_lines)
# Collapse multiple blank lines into one
text = re.sub(r"\n{3,}", "\n\n", text)
return text.strip()
def parse_file(filepath: str) -> dict | None:
"""Parse a single transcript .md file.
Returns dict with keys: date, title, source_file, turns, language, skip_reason
or None if the file should be skipped.
"""
filename = os.path.basename(filepath)
if filename in SKIP_FILES:
return {"skip_reason": f"excluded file: {filename}", "source_file": filename}
with open(filepath, "r", encoding="utf-8") as f:
content = f.read()
if not content.strip():
return {"skip_reason": "empty file", "source_file": filename}
lines = content.split("\n")
# Parse H1 header
date = None
title = None
header_line_idx = None
for i, line in enumerate(lines):
stripped = line.strip()
if not stripped:
continue
m = HEADER_RE.match(stripped)
if m:
date = m.group(1)
title = m.group(2).strip()
header_line_idx = i
break
m2 = HEADER_YEAR_ONLY_RE.match(stripped)
if m2:
# e.g. "# 1999年全國司法改革會議" - has year but no full date
date = None
title = stripped.lstrip("# ").strip()
header_line_idx = i
break
# If first non-empty line is not a header, check if it's a speaker line
if stripped.startswith("###"):
break # No H1 header
if not stripped.startswith("#"):
break # Not a header line
break
# Try to extract date from filename if not from header
if date is None:
fm = re.match(r"(\d{4}-\d{2}-\d{2})", filename)
if fm:
date = fm.group(1)
if title is None:
# Derive title from filename
title_part = filename[11:] # Skip "YYYY-MM-DD-"
title_part = title_part.rsplit(".md", 1)[0]
title = title_part.replace("-", " ")
if date is None:
return {"skip_reason": "no date found in header or filename", "source_file": filename}
# Parse content into turns
body_start = (header_line_idx + 1) if header_line_idx is not None else 0
body_lines = lines[body_start:]
turns = []
current_speaker = None
current_paragraphs = []
current_stage_dirs = []
has_speaker_blocks = False
def flush_turn():
nonlocal current_speaker, current_paragraphs, current_stage_dirs
if current_speaker and current_paragraphs:
text = "\n\n".join(current_paragraphs)
text = clean_text(text)
if text:
turns.append({
"speaker": current_speaker,
"text": text,
"stage_directions": current_stage_dirs[:],
})
current_paragraphs = []
current_stage_dirs = []
for line in body_lines:
stripped = line.strip()
# Check for speaker header
sm = SPEAKER_RE.match(stripped)
if sm:
has_speaker_blocks = True
new_speaker = normalize_speaker(sm.group(1))
if new_speaker == current_speaker:
# Same speaker continues - we'll add a paragraph break
# but keep accumulating under the same turn
if current_paragraphs:
current_paragraphs.append("") # blank separator
else:
flush_turn()
current_speaker = new_speaker
continue
# Check for stage direction (blockquote)
sd = STAGE_DIR_RE.match(stripped)
if sd:
direction_text = sd.group(1).strip()
# Extract parenthetical directions
dirs = extract_stage_directions(direction_text)
if dirs:
current_stage_dirs.extend(dirs)
# Don't add blockquote text to the turn content
continue
# Regular text line
if current_speaker:
if stripped:
current_paragraphs.append(stripped)
elif current_paragraphs and current_paragraphs[-1] != "":
current_paragraphs.append("") # paragraph break
flush_turn()
# Handle files with no speaker blocks - treat as monologue by Audrey Tang
if not has_speaker_blocks:
# Check if there's meaningful text content
text_lines = []
for line in body_lines:
stripped = line.strip()
if not stripped:
continue
sd = STAGE_DIR_RE.match(stripped)
if sd:
continue
# Skip lines that are just links/embeds
if stripped.startswith("http") or stripped.startswith("<"):
continue
text_lines.append(stripped)
if not text_lines:
return {"skip_reason": "no meaningful content", "source_file": filename}
full_text = clean_text("\n\n".join(text_lines))
if not full_text:
return {"skip_reason": "no meaningful content after cleaning", "source_file": filename}
# Detect if this is CJK to pick the right name
lang = detect_language(full_text)
speaker = "唐鳳" if lang == "zh" else "Audrey Tang"
turns = [{
"speaker": speaker,
"text": full_text,
"stage_directions": [],
}]
if not turns:
return {"skip_reason": "no turns extracted", "source_file": filename}
# Detect language from all turn text
all_text = " ".join(t["text"] for t in turns)
language = detect_language(all_text)
return {
"date": date,
"title": title,
"source_file": filename,
"turns": turns,
"language": language,
"skip_reason": None,
}
def make_turn_id(source_file: str, turn_index: int) -> str:
"""Create a turn ID from source file and index."""
base = source_file.rsplit(".md", 1)[0]
return f"{base}/{turn_index:03d}"
def main():
OUTPUT_DIR.mkdir(parents=True, exist_ok=True)
md_files = sorted(glob.glob(os.path.join(TRANSCRIPT_DIR, "*.md")))
print(f"Found {len(md_files)} .md files")
all_turns = []
skipped = []
parsed_count = 0
speaker_counter = Counter()
lang_counter = Counter()
dates = []
for filepath in md_files:
result = parse_file(filepath)
if result is None:
continue
if result.get("skip_reason"):
skipped.append({
"file": result["source_file"],
"reason": result["skip_reason"],
})
continue
parsed_count += 1
turns = result["turns"]
total_turns = len(turns)
lang_counter[result["language"]] += 1
dates.append(result["date"])
for i, turn in enumerate(turns):
speaker = turn["speaker"]
speaker_counter[speaker] += 1
turn_obj = {
"id": make_turn_id(result["source_file"], i),
"date": result["date"],
"title": result["title"],
"source_file": result["source_file"],
"speaker": speaker,
"text": turn["text"],
"turn_index": i,
"is_audrey": is_audrey(speaker),
"language": result["language"],
"stage_directions": turn["stage_directions"],
"total_turns": total_turns,
}
all_turns.append(turn_obj)
# Write turns.jsonl
turns_path = OUTPUT_DIR / "turns.jsonl"
with open(turns_path, "w", encoding="utf-8") as f:
for turn in all_turns:
f.write(json.dumps(turn, ensure_ascii=False) + "\n")
# Compute stats
audrey_turns = sum(1 for t in all_turns if t["is_audrey"])
sorted_dates = sorted(dates) if dates else []
top_speakers = speaker_counter.most_common(20)
metadata = {
"total_files_found": len(md_files),
"total_files_parsed": parsed_count,
"total_files_skipped": len(skipped),
"total_turns": len(all_turns),
"total_audrey_turns": audrey_turns,
"language_distribution": dict(lang_counter),
"date_range": {
"earliest": sorted_dates[0] if sorted_dates else None,
"latest": sorted_dates[-1] if sorted_dates else None,
},
"top_speakers": [{"speaker": s, "count": c} for s, c in top_speakers],
"skipped_files": skipped,
}
meta_path = OUTPUT_DIR / "metadata.json"
with open(meta_path, "w", encoding="utf-8") as f:
json.dump(metadata, f, ensure_ascii=False, indent=2)
# Print summary
print(f"\nParsed {parsed_count} files, skipped {len(skipped)}")
print(f"Total turns: {len(all_turns)}")
print(f"Audrey turns: {audrey_turns}")
print(f"Language distribution: {dict(lang_counter)}")
if sorted_dates:
print(f"Date range: {sorted_dates[0]} to {sorted_dates[-1]}")
print(f"Top 10 speakers:")
for speaker, count in top_speakers[:10]:
print(f" {speaker}: {count}")
print(f"\nOutput: {turns_path}")
print(f"Metadata: {meta_path}")
if __name__ == "__main__":
main()