File size: 5,755 Bytes
088662b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 |
# -----------------------------------------------------------------------------
# Author: Jiawei Liu
# Date: 2025-10-29
# -----------------------------------------------------------------------------
import json
import re
from pathlib import Path
from rapidfuzz import fuzz
def clean_text(text: str):
text = re.sub(
r"\nAvailable online at.+?\.\s\s$", "", text, flags=re.DOTALL | re.MULTILINE
)
return text
def extract_problems(markdown_text: str):
problems = {}
# 1. Find the block of text containing the problems
# Block starts with '## Problems'
# and ends before '## \(S 1\) Solutions to Day 1'
problems_section = re.search(
r"^## Problems\s*$(.*?)^##.*Solutions to Day 1",
markdown_text,
re.DOTALL | re.MULTILINE,
)
if not problems_section:
print(" - Not found '## Problems' section.")
return problems
problems_block = problems_section.group(1)
# 2. Split the block into individual problems
# Each problem starts with a number followed by a dot and space (e.g., '1. ')
matchs = list(re.finditer(r"^(\d+)\.\s+", problems_block, flags=re.MULTILINE))
# Split the text into parts using the problem numbers as delimiters
for i, m in enumerate(matchs):
problem_label = m.group(1)
problem = problems_block[
m.end() : matchs[i + 1].start()
if i + 1 < len(matchs)
else len(problems_block)
]
problems[problem_label] = (problem.strip(), m.group())
print(f" - Extracted {len(problems)} problems.")
return problems
def extract_solutions(markdown_text):
solutions = {}
# 1. Find the block of text containing the solutions
# Block starts with '## \(S 1\) Solutions to Day 1' and end of document
solutions_section = re.search(
r"^##.*?Solutions to Day 1\s*$(.*)",
markdown_text,
re.DOTALL | re.MULTILINE,
)
if not solutions_section:
print(" - Not found Solutions section.")
return solutions
solutions_block = solutions_section.group(1)
## \(\S 1.1\) IMO 2005/1, proposed by Bogdan Enescu (ROU)
# 2. Split the block into individual solutions
matchs = list(
re.finditer(r"^##.*?IMO\s\d+\/(\d+).*?\n$", solutions_block, flags=re.MULTILINE)
)
# Split the text into parts using the solution numbers as delimiters
for i, m in enumerate(matchs):
problem_label = m.group(1)
solution = solutions_block[
m.end() : matchs[i + 1].start()
if i + 1 < len(matchs)
else len(solutions_block)
]
solutions[problem_label] = (solution.strip(), m.group())
print(f" - Extracted {len(solutions)} solutions.")
return solutions
def join(problems: dict, solutions: dict):
pairs = []
for problem_label, (problem, p_match) in problems.items():
solution, s_match = solutions.get(problem_label)
# Clean solution by removing the part that overlaps with the problem statement
problem_align = fuzz.partial_ratio_alignment(solution, problem)
solution = solution.replace(
solution[problem_align.src_start : problem_align.src_end], ""
)
solution = re.sub(
r"^\s*## Problem statement", "", solution, flags=re.IGNORECASE
).strip()
if not solution:
print(f" - Warning: No solution found for problem {problem_label}.")
pairs.append((problem, solution, problem_label, p_match, s_match))
return pairs
def write_pairs(output_file: Path, pairs: list, year: str, project_root: Path):
output_jsonl_text = ""
for problem, solution, problem_label, p_match, s_match in pairs:
output_jsonl_text += (
json.dumps(
{
"year": year,
"tier": "T0",
"problem_label": problem_label,
"problem_type": None,
"exam": "IMO",
"problem": problem,
"solution": solution,
"metadata": {
"resource_path": output_file.relative_to(
project_root
).as_posix(),
"problem_match": p_match,
"solution_match": s_match,
},
},
ensure_ascii=False,
)
+ "\n"
)
output_file.write_text(output_jsonl_text, encoding="utf-8")
if __name__ == "__main__":
compet_base_path = Path(__file__).resolve().parent.parent
compet_md_path = compet_base_path / "md"
seg_output_path = compet_base_path / "segmented"
project_root = compet_base_path.parent
num_problems = 0
num_solutions = 0
for md_file in list(compet_md_path.glob("**/*notes.md")):
print(f"Processing {md_file}...")
output_file = seg_output_path / md_file.relative_to(compet_md_path).with_suffix(
".jsonl"
)
output_file.parent.mkdir(parents=True, exist_ok=True)
# Read the markdown file
markdown_text = "\n" + md_file.read_text(encoding="utf-8")
markdown_text = clean_text(markdown_text)
problems = extract_problems(markdown_text)
solutions = extract_solutions(markdown_text)
num_problems += len(problems)
num_solutions += len(solutions)
pairs = join(problems, solutions)
year = re.search(r"\d{4}", output_file.stem).group()
write_pairs(output_file, pairs, year, project_root)
print()
print(f"Total problems extracted: {num_problems}")
print(f"Total solutions extracted: {num_solutions}")
|