LxYxvv's picture
add IMO 2005 - 2025
088662b
# -----------------------------------------------------------------------------
# Author: Jiawei Liu
# Date: 2025-10-29
# -----------------------------------------------------------------------------
import json
import re
from pathlib import Path
from rapidfuzz import fuzz
def clean_text(text: str):
text = re.sub(
r"\nAvailable online at.+?\.\s\s$", "", text, flags=re.DOTALL | re.MULTILINE
)
return text
def extract_problems(markdown_text: str):
problems = {}
# 1. Find the block of text containing the problems
# Block starts with '## Problems'
# and ends before '## \(S 1\) Solutions to Day 1'
problems_section = re.search(
r"^## Problems\s*$(.*?)^##.*Solutions to Day 1",
markdown_text,
re.DOTALL | re.MULTILINE,
)
if not problems_section:
print(" - Not found '## Problems' section.")
return problems
problems_block = problems_section.group(1)
# 2. Split the block into individual problems
# Each problem starts with a number followed by a dot and space (e.g., '1. ')
matchs = list(re.finditer(r"^(\d+)\.\s+", problems_block, flags=re.MULTILINE))
# Split the text into parts using the problem numbers as delimiters
for i, m in enumerate(matchs):
problem_label = m.group(1)
problem = problems_block[
m.end() : matchs[i + 1].start()
if i + 1 < len(matchs)
else len(problems_block)
]
problems[problem_label] = (problem.strip(), m.group())
print(f" - Extracted {len(problems)} problems.")
return problems
def extract_solutions(markdown_text):
solutions = {}
# 1. Find the block of text containing the solutions
# Block starts with '## \(S 1\) Solutions to Day 1' and end of document
solutions_section = re.search(
r"^##.*?Solutions to Day 1\s*$(.*)",
markdown_text,
re.DOTALL | re.MULTILINE,
)
if not solutions_section:
print(" - Not found Solutions section.")
return solutions
solutions_block = solutions_section.group(1)
## \(\S 1.1\) IMO 2005/1, proposed by Bogdan Enescu (ROU)
# 2. Split the block into individual solutions
matchs = list(
re.finditer(r"^##.*?IMO\s\d+\/(\d+).*?\n$", solutions_block, flags=re.MULTILINE)
)
# Split the text into parts using the solution numbers as delimiters
for i, m in enumerate(matchs):
problem_label = m.group(1)
solution = solutions_block[
m.end() : matchs[i + 1].start()
if i + 1 < len(matchs)
else len(solutions_block)
]
solutions[problem_label] = (solution.strip(), m.group())
print(f" - Extracted {len(solutions)} solutions.")
return solutions
def join(problems: dict, solutions: dict):
pairs = []
for problem_label, (problem, p_match) in problems.items():
solution, s_match = solutions.get(problem_label)
# Clean solution by removing the part that overlaps with the problem statement
problem_align = fuzz.partial_ratio_alignment(solution, problem)
solution = solution.replace(
solution[problem_align.src_start : problem_align.src_end], ""
)
solution = re.sub(
r"^\s*## Problem statement", "", solution, flags=re.IGNORECASE
).strip()
if not solution:
print(f" - Warning: No solution found for problem {problem_label}.")
pairs.append((problem, solution, problem_label, p_match, s_match))
return pairs
def write_pairs(output_file: Path, pairs: list, year: str, project_root: Path):
output_jsonl_text = ""
for problem, solution, problem_label, p_match, s_match in pairs:
output_jsonl_text += (
json.dumps(
{
"year": year,
"tier": "T0",
"problem_label": problem_label,
"problem_type": None,
"exam": "IMO",
"problem": problem,
"solution": solution,
"metadata": {
"resource_path": output_file.relative_to(
project_root
).as_posix(),
"problem_match": p_match,
"solution_match": s_match,
},
},
ensure_ascii=False,
)
+ "\n"
)
output_file.write_text(output_jsonl_text, encoding="utf-8")
if __name__ == "__main__":
compet_base_path = Path(__file__).resolve().parent.parent
compet_md_path = compet_base_path / "md"
seg_output_path = compet_base_path / "segmented"
project_root = compet_base_path.parent
num_problems = 0
num_solutions = 0
for md_file in list(compet_md_path.glob("**/*notes.md")):
print(f"Processing {md_file}...")
output_file = seg_output_path / md_file.relative_to(compet_md_path).with_suffix(
".jsonl"
)
output_file.parent.mkdir(parents=True, exist_ok=True)
# Read the markdown file
markdown_text = "\n" + md_file.read_text(encoding="utf-8")
markdown_text = clean_text(markdown_text)
problems = extract_problems(markdown_text)
solutions = extract_solutions(markdown_text)
num_problems += len(problems)
num_solutions += len(solutions)
pairs = join(problems, solutions)
year = re.search(r"\d{4}", output_file.stem).group()
write_pairs(output_file, pairs, year, project_root)
print()
print(f"Total problems extracted: {num_problems}")
print(f"Total solutions extracted: {num_solutions}")