chess-clement-v2 / tokenizer.py
clemdrl's picture
Chess Challenge submission by clemdrl
3c7724f verified
# """
# Custom Chess Tokenizer for the Chess Challenge.
# This tokenizer treats each move as a single token using the extended UCI notation
# from the Lichess dataset (e.g., WPe2e4, BNg8f6).
# The dataset format uses:
# - W/B prefix for White/Black
# - Piece letter: P=Pawn, N=Knight, B=Bishop, R=Rook, Q=Queen, K=King
# - Source and destination squares (e.g., e2e4)
# - Special suffixes: (x)=capture, (+)=check, (+*)=checkmate, (o)/(O)=castling
# """
# from __future__ import annotations
# import json
# import os
# from pathlib import Path
# from typing import Dict, List, Optional
# from transformers import PreTrainedTokenizer
# class ChessTokenizer(PreTrainedTokenizer):
# """
# A custom tokenizer for chess moves using extended UCI notation.
# This tokenizer maps each possible chess move to a unique token ID.
# The vocabulary is built from the training dataset to ensure all moves
# encountered during training have a corresponding token.
# Example:
# >>> tokenizer = ChessTokenizer()
# >>> tokenizer.encode("WPe2e4 BPe7e5")
# [1, 42, 87, 2] # [BOS, e2e4, e7e5, EOS]
# """
# model_input_names = ["input_ids", "attention_mask"]
# vocab_files_names = {"vocab_file": "vocab.json"}
# # Special tokens
# PAD_TOKEN = "[PAD]"
# BOS_TOKEN = "[BOS]"
# EOS_TOKEN = "[EOS]"
# UNK_TOKEN = "[UNK]"
# def __init__(
# self,
# vocab_file: Optional[str] = None,
# vocab: Optional[Dict[str, int]] = None,
# **kwargs,
# ):
# """
# Initialize the chess tokenizer.
# Args:
# vocab_file: Path to a JSON file containing the vocabulary mapping.
# vocab: Dictionary mapping tokens to IDs (alternative to vocab_file).
# **kwargs: Additional arguments passed to PreTrainedTokenizer.
# """
# # Initialize special tokens
# self._pad_token = self.PAD_TOKEN
# self._bos_token = self.BOS_TOKEN
# self._eos_token = self.EOS_TOKEN
# self._unk_token = self.UNK_TOKEN
# # Remove any duplicate special-token entries passed through kwargs
# # to avoid "multiple values for keyword" errors when loading from disk.
# kwargs.pop("pad_token", None)
# kwargs.pop("bos_token", None)
# kwargs.pop("eos_token", None)
# kwargs.pop("unk_token", None)
# # Load or create vocabulary
# if vocab is not None:
# self._vocab = vocab
# elif vocab_file is not None and os.path.exists(vocab_file):
# with open(vocab_file, "r", encoding="utf-8") as f:
# self._vocab = json.load(f)
# else:
# # Create a minimal vocabulary with just special tokens
# # The full vocabulary should be built from the dataset
# self._vocab = self._create_default_vocab()
# # Create reverse mapping
# self._ids_to_tokens = {v: k for k, v in self._vocab.items()}
# # Call parent init AFTER setting up vocab
# super().__init__(
# pad_token=self._pad_token,
# bos_token=self._bos_token,
# eos_token=self._eos_token,
# unk_token=self._unk_token,
# **kwargs,
# )
# def _create_default_vocab(self) -> Dict[str, int]:
# """
# Create a minimal default vocabulary with just special tokens.
# For the full vocabulary, use `build_vocab_from_dataset()`.
# This minimal vocab is just a placeholder - you should build from data.
# """
# special_tokens = [self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN]
# vocab = {token: idx for idx, token in enumerate(special_tokens)}
# return vocab
# @classmethod
# def build_vocab_from_iterator(
# cls,
# iterator,
# min_frequency: int = 1,
# ) -> "ChessTokenizer":
# """
# Build a tokenizer vocabulary from an iterator of game strings.
# Args:
# iterator: An iterator yielding game strings (space-separated moves).
# min_frequency: Minimum frequency for a token to be included.
# Returns:
# A ChessTokenizer with the built vocabulary.
# """
# from collections import Counter
# token_counts = Counter()
# for game in iterator:
# moves = game.strip().split()
# token_counts.update(moves)
# # Filter by frequency
# tokens = [
# token for token, count in token_counts.items()
# if count >= min_frequency
# ]
# # Sort for reproducibility
# tokens = sorted(tokens)
# # Build vocabulary
# special_tokens = [cls.PAD_TOKEN, cls.BOS_TOKEN, cls.EOS_TOKEN, cls.UNK_TOKEN]
# vocab = {token: idx for idx, token in enumerate(special_tokens + tokens)}
# return cls(vocab=vocab)
# @classmethod
# def build_vocab_from_dataset(
# cls,
# dataset_name: str = "dlouapre/lichess_2025-01_1M",
# split: str = "train",
# column: str = "text",
# min_frequency: int = 500,
# max_samples: Optional[int] = 100000,
# ) -> "ChessTokenizer":
# """
# Build a tokenizer vocabulary from a Hugging Face dataset.
# Args:
# dataset_name: Name of the dataset on Hugging Face Hub.
# split: Dataset split to use.
# column: Column containing the game strings.
# min_frequency: Minimum frequency for a token to be included (default: 500).
# max_samples: Maximum number of samples to process (default: 100k).
# Returns:
# A ChessTokenizer with the built vocabulary.
# """
# from datasets import load_dataset
# dataset = load_dataset(dataset_name, split=split)
# if max_samples is not None:
# dataset = dataset.select(range(min(max_samples, len(dataset))))
# def game_iterator():
# for example in dataset:
# yield example[column]
# return cls.build_vocab_from_iterator(game_iterator(), min_frequency=min_frequency)
# @property
# def vocab_size(self) -> int:
# """Return the size of the vocabulary."""
# return len(self._vocab)
# def get_vocab(self) -> Dict[str, int]:
# """Return the vocabulary as a dictionary."""
# return dict(self._vocab)
# def _tokenize(self, text: str) -> List[str]:
# """
# Tokenize a string of moves into a list of tokens.
# Args:
# text: A string of space-separated moves.
# Returns:
# List of move tokens.
# """
# return text.strip().split()
# def _convert_token_to_id(self, token: str) -> int:
# """Convert a token to its ID."""
# return self._vocab.get(token, self._vocab.get(self.UNK_TOKEN, 0))
# def _convert_id_to_token(self, index: int) -> str:
# """Convert an ID to its token."""
# return self._ids_to_tokens.get(index, self.UNK_TOKEN)
# def convert_tokens_to_string(self, tokens: List[str]) -> str:
# """Convert a list of tokens back to a string."""
# # Filter out special tokens for cleaner output
# special = {self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN}
# return " ".join(t for t in tokens if t not in special)
# def save_vocabulary(
# self,
# save_directory: str,
# filename_prefix: Optional[str] = None,
# ) -> tuple:
# """
# Save the vocabulary to a JSON file.
# Args:
# save_directory: Directory to save the vocabulary.
# filename_prefix: Optional prefix for the filename.
# Returns:
# Tuple containing the path to the saved vocabulary file.
# """
# if not os.path.isdir(save_directory):
# os.makedirs(save_directory, exist_ok=True)
# vocab_file = os.path.join(
# save_directory,
# (filename_prefix + "-" if filename_prefix else "") + "vocab.json",
# )
# with open(vocab_file, "w", encoding="utf-8") as f:
# json.dump(self._vocab, f, ensure_ascii=False, indent=2)
# return (vocab_file,)
# def count_vocab_from_dataset(
# dataset_name: str = "dlouapre/lichess_2025-01_1M",
# split: str = "train",
# column: str = "text",
# max_samples: Optional[int] = 10000,
# ) -> Dict[str, int]:
# """
# Count token frequencies in a dataset (useful for vocabulary analysis).
# Args:
# dataset_name: Name of the dataset on Hugging Face Hub.
# split: Dataset split to use.
# column: Column containing the game strings.
# max_samples: Maximum number of samples to process.
# Returns:
# Dictionary mapping tokens to their frequencies.
# """
# from collections import Counter
# from datasets import load_dataset
# dataset = load_dataset(dataset_name, split=split)
# if max_samples is not None:
# dataset = dataset.select(range(min(max_samples, len(dataset))))
# token_counts = Counter()
# for example in dataset:
# moves = example[column].strip().split()
# token_counts.update(moves)
# return dict(token_counts)
"""
Improved Chess Tokenizer (composable tokens) for the Chess Challenge.
Instead of using one token per full move (huge vocab, many UNKs),
we use a small fixed vocabulary:
- Special tokens: [PAD], [BOS], [EOS], [UNK]
- Space token: [SP] -> " "
- Colors: W, B
- Pieces: P, N, B, R, Q, K
- Squares: a1..h8 (64 tokens)
- Symbols: (, ), =, x, +, *, o, O
This keeps vocab small, avoids UNKs, and helps the model learn move syntax.
"""
from __future__ import annotations
import json
import os
from pathlib import Path
from typing import Dict, List, Optional
from transformers import PreTrainedTokenizer
class ChessTokenizer(PreTrainedTokenizer):
model_input_names = ["input_ids", "attention_mask"]
vocab_files_names = {"vocab_file": "vocab.json"}
# Special tokens
PAD_TOKEN = "[PAD]"
BOS_TOKEN = "[BOS]"
EOS_TOKEN = "[EOS]"
UNK_TOKEN = "[UNK]"
# Explicit space token (VERY IMPORTANT for the evaluator's MoveGenerator)
SPACE_TOKEN = "[SP]"
def __init__(
self,
vocab_file: Optional[str] = None,
vocab: Optional[Dict[str, int]] = None,
**kwargs,
):
# Avoid duplicate kwargs errors when loading from disk
kwargs.pop("pad_token", None)
kwargs.pop("bos_token", None)
kwargs.pop("eos_token", None)
kwargs.pop("unk_token", None)
self._pad_token = self.PAD_TOKEN
self._bos_token = self.BOS_TOKEN
self._eos_token = self.EOS_TOKEN
self._unk_token = self.UNK_TOKEN
if vocab is not None:
self._vocab = dict(vocab)
elif vocab_file is not None and os.path.exists(vocab_file):
with open(vocab_file, "r", encoding="utf-8") as f:
self._vocab = json.load(f)
else:
self._vocab = self._build_fixed_vocab()
self._ids_to_tokens = {v: k for k, v in self._vocab.items()}
super().__init__(
pad_token=self._pad_token,
bos_token=self._bos_token,
eos_token=self._eos_token,
unk_token=self._unk_token,
**kwargs,
)
@staticmethod
def _all_squares() -> List[str]:
files = "abcdefgh"
ranks = "12345678"
return [f + r for r in ranks for f in files] # a1..h1..a8..h8
def _build_fixed_vocab(self) -> Dict[str, int]:
# Order matters: deterministic IDs
special = [self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN, self.SPACE_TOKEN]
colors = ["W", "B"]
pieces = ["P", "N", "B", "R", "Q", "K"]
squares = self._all_squares()
symbols = ["(", ")", "=", "x", "+", "*", "o", "O"]
tokens = special + colors + pieces + squares + symbols
return {tok: i for i, tok in enumerate(tokens)}
# ---------------------------------------------------------------------
# Compatibility helpers used by your training script
# ---------------------------------------------------------------------
@classmethod
def build_vocab_from_dataset(
cls,
dataset_name: str = "dlouapre/lichess_2025-01_1M",
split: str = "train",
column: str = "text",
min_frequency: int = 1,
max_samples: Optional[int] = None,
) -> "ChessTokenizer":
"""
Kept for backwards compatibility with train.py.
With this tokenizer we use a fixed vocab, so we do NOT need to scan dataset.
"""
return cls(vocab=cls()._build_fixed_vocab())
@classmethod
def build_vocab_from_iterator(cls, iterator, min_frequency: int = 1) -> "ChessTokenizer":
"""Kept for compatibility; fixed vocab anyway."""
return cls(vocab=cls()._build_fixed_vocab())
# ---------------------------------------------------------------------
# Required overrides
# ---------------------------------------------------------------------
@property
def vocab_size(self) -> int:
return len(self._vocab)
def get_vocab(self) -> Dict[str, int]:
return dict(self._vocab)
def _convert_token_to_id(self, token: str) -> int:
return self._vocab.get(token, self._vocab[self.UNK_TOKEN])
def _convert_id_to_token(self, index: int) -> str:
return self._ids_to_tokens.get(index, self.UNK_TOKEN)
def _tokenize(self, text: str) -> List[str]:
"""
Tokenize by scanning characters and recognizing:
- special tokens like [BOS]
- whitespace -> [SP]
- squares like e2
- single-char symbols / letters (W,B,P,N,B,R,Q,K,()=x+*oO)
"""
tokens: List[str] = []
i = 0
n = len(text)
# Fast access
vocab = self._vocab
squares_set = set(self._all_squares())
while i < n:
ch = text[i]
# Whitespace -> SPACE_TOKEN
if ch.isspace():
tokens.append(self.SPACE_TOKEN)
i += 1
continue
# Special tokens [BOS] [EOS] etc
if ch == "[":
j = text.find("]", i)
if j != -1:
cand = text[i : j + 1]
if cand in vocab:
tokens.append(cand)
i = j + 1
continue
# If malformed, fall through as unknown char
# Square like e2
if i + 1 < n:
cand2 = text[i : i + 2]
if cand2 in squares_set:
tokens.append(cand2)
i += 2
continue
# Treat separators as [SP]
if ch == "|":
tokens.append(self.SPACE_TOKEN)
i += 1
continue
if ch == "_":
tokens.append(self.SPACE_TOKEN)
i += 1
continue
if ch == "|":
tokens.append(self.SPACE_TOKEN)
i += 1
continue
# Single-char token
if ch in vocab:
tokens.append(ch)
i += 1
continue
# Otherwise unknown
tokens.append(self.UNK_TOKEN)
i += 1
return tokens
def convert_tokens_to_string(self, tokens: List[str]) -> str:
"""
Reconstruct the exact string:
- [SP] becomes " "
- other tokens concatenate (NO extra spaces)
- skip special tokens except [SP]
"""
out = []
for tok in tokens:
if tok == self.SPACE_TOKEN:
out.append("___")
elif tok in {self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN}:
# Skip in text output (except space token already handled)
continue
elif tok == self.UNK_TOKEN:
out.append("") # keep it silent
else:
out.append(tok)
return "".join(out)
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> tuple:
if not os.path.isdir(save_directory):
os.makedirs(save_directory, exist_ok=True)
vocab_file = os.path.join(
save_directory,
(filename_prefix + "-" if filename_prefix else "") + "vocab.json",
)
with open(vocab_file, "w", encoding="utf-8") as f:
json.dump(self._vocab, f, ensure_ascii=False, indent=2)
return (vocab_file,)