|
|
""" |
|
|
Custom Chess Tokenizer for the Chess Challenge. |
|
|
|
|
|
This tokenizer uses a DECOMPOSED format compatible with the evaluator: |
|
|
"WPe2e4" -> ["WP", "e2_f", "e4_t"] |
|
|
|
|
|
The decomposed format uses: |
|
|
- Piece token: "WP", "BN", etc. (color + piece) |
|
|
- Source square with _f suffix: "e2_f", "g1_f", etc. |
|
|
- Destination square with _t suffix: "e4_t", "f3_t", etc. |
|
|
- Optional suffix for annotations: "(x)", "(+)", "(+*)", "(o)", "(O)" |
|
|
|
|
|
The dataset format uses: |
|
|
- W/B prefix for White/Black |
|
|
- Piece letter: P=Pawn, N=Knight, B=Bishop, R=Rook, Q=Queen, K=King |
|
|
- Source and destination squares (e.g., e2e4) |
|
|
- Special suffixes: (x)=capture, (+)=check, (+*)=checkmate, (o)/(O)=castling |
|
|
""" |
|
|
|
|
|
from __future__ import annotations |
|
|
|
|
|
import json |
|
|
import os |
|
|
from pathlib import Path |
|
|
from typing import Dict, List, Optional |
|
|
|
|
|
from transformers import PreTrainedTokenizer |
|
|
|
|
|
|
|
|
class ChessTokenizer(PreTrainedTokenizer): |
|
|
""" |
|
|
A custom tokenizer for chess moves using DECOMPOSED format. |
|
|
|
|
|
This tokenizer decomposes each move into sub-tokens: |
|
|
- Piece: "WP", "BN", etc. |
|
|
- Source square with _f suffix: "e2_f", "g1_f", etc. |
|
|
- Destination square with _t suffix: "e4_t", "f3_t", etc. |
|
|
- Optional suffix: "(x)", "(+)", etc. |
|
|
|
|
|
This format is compatible with the evaluator's 'decomposed' detection. |
|
|
|
|
|
Example: |
|
|
>>> tokenizer = ChessTokenizer.build_vocab_from_dataset() |
|
|
>>> tokenizer.tokenize("WPe2e4 BPe7e5") |
|
|
['WP', 'e2_f', 'e4_t', 'BP', 'e7_f', 'e5_t'] |
|
|
""" |
|
|
|
|
|
model_input_names = ["input_ids", "attention_mask"] |
|
|
vocab_files_names = {"vocab_file": "vocab.json"} |
|
|
|
|
|
|
|
|
PAD_TOKEN = "[PAD]" |
|
|
BOS_TOKEN = "[BOS]" |
|
|
EOS_TOKEN = "[EOS]" |
|
|
UNK_TOKEN = "[UNK]" |
|
|
|
|
|
def __init__( |
|
|
self, |
|
|
vocab_file: Optional[str] = None, |
|
|
vocab: Optional[Dict[str, int]] = None, |
|
|
**kwargs, |
|
|
): |
|
|
""" |
|
|
Initialize the chess tokenizer. |
|
|
|
|
|
Args: |
|
|
vocab_file: Path to a JSON file containing the vocabulary mapping. |
|
|
vocab: Dictionary mapping tokens to IDs (alternative to vocab_file). |
|
|
**kwargs: Additional arguments passed to PreTrainedTokenizer. |
|
|
""" |
|
|
|
|
|
self._pad_token = self.PAD_TOKEN |
|
|
self._bos_token = self.BOS_TOKEN |
|
|
self._eos_token = self.EOS_TOKEN |
|
|
self._unk_token = self.UNK_TOKEN |
|
|
|
|
|
|
|
|
|
|
|
kwargs.pop("pad_token", None) |
|
|
kwargs.pop("bos_token", None) |
|
|
kwargs.pop("eos_token", None) |
|
|
kwargs.pop("unk_token", None) |
|
|
|
|
|
|
|
|
if vocab is not None: |
|
|
self._vocab = vocab |
|
|
elif vocab_file is not None and os.path.exists(vocab_file): |
|
|
with open(vocab_file, "r", encoding="utf-8") as f: |
|
|
self._vocab = json.load(f) |
|
|
else: |
|
|
|
|
|
|
|
|
self._vocab = self._create_default_vocab() |
|
|
|
|
|
|
|
|
self._ids_to_tokens = {v: k for k, v in self._vocab.items()} |
|
|
|
|
|
|
|
|
super().__init__( |
|
|
pad_token=self._pad_token, |
|
|
bos_token=self._bos_token, |
|
|
eos_token=self._eos_token, |
|
|
unk_token=self._unk_token, |
|
|
**kwargs, |
|
|
) |
|
|
|
|
|
def _create_default_vocab(self) -> Dict[str, int]: |
|
|
""" |
|
|
Create a minimal default vocabulary with just special tokens. |
|
|
|
|
|
For the full vocabulary, use `build_vocab_from_dataset()`. |
|
|
This minimal vocab is just a placeholder - you should build from data. |
|
|
""" |
|
|
special_tokens = [self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN] |
|
|
vocab = {token: idx for idx, token in enumerate(special_tokens)} |
|
|
return vocab |
|
|
|
|
|
@classmethod |
|
|
def build_vocab_from_iterator( |
|
|
cls, |
|
|
iterator, |
|
|
min_frequency: int = 1, |
|
|
) -> "ChessTokenizer": |
|
|
""" |
|
|
Build a tokenizer vocabulary from an iterator of game strings. |
|
|
|
|
|
Decomposes each move into tokens: piece, source_f, dest_t, and optional suffix. |
|
|
|
|
|
Args: |
|
|
iterator: An iterator yielding game strings (space-separated moves). |
|
|
min_frequency: Minimum frequency for a token to be included. |
|
|
|
|
|
Returns: |
|
|
A ChessTokenizer with the built vocabulary. |
|
|
""" |
|
|
from collections import Counter |
|
|
|
|
|
token_counts = Counter() |
|
|
|
|
|
for game in iterator: |
|
|
moves = game.strip().split() |
|
|
for move in moves: |
|
|
if len(move) < 6: |
|
|
token_counts[move] += 1 |
|
|
continue |
|
|
|
|
|
|
|
|
piece = move[:2] |
|
|
source = move[2:4] + "_f" |
|
|
dest = move[4:6] + "_t" |
|
|
suffix = move[6:] if len(move) > 6 else None |
|
|
|
|
|
token_counts[piece] += 1 |
|
|
token_counts[source] += 1 |
|
|
token_counts[dest] += 1 |
|
|
if suffix: |
|
|
token_counts[suffix] += 1 |
|
|
|
|
|
|
|
|
tokens = [ |
|
|
token for token, count in token_counts.items() |
|
|
if count >= min_frequency |
|
|
] |
|
|
|
|
|
|
|
|
tokens = sorted(tokens) |
|
|
|
|
|
|
|
|
special_tokens = [cls.PAD_TOKEN, cls.BOS_TOKEN, cls.EOS_TOKEN, cls.UNK_TOKEN] |
|
|
vocab = {token: idx for idx, token in enumerate(special_tokens + tokens)} |
|
|
|
|
|
return cls(vocab=vocab) |
|
|
|
|
|
@classmethod |
|
|
def build_vocab_from_dataset( |
|
|
cls, |
|
|
dataset_name: str = "dlouapre/lichess_2025-01_1M", |
|
|
split: str = "train", |
|
|
column: str = "text", |
|
|
min_frequency: int = 500, |
|
|
max_samples: Optional[int] = 100000, |
|
|
) -> "ChessTokenizer": |
|
|
""" |
|
|
Build a tokenizer vocabulary from a Hugging Face dataset. |
|
|
|
|
|
Args: |
|
|
dataset_name: Name of the dataset on Hugging Face Hub. |
|
|
split: Dataset split to use. |
|
|
column: Column containing the game strings. |
|
|
min_frequency: Minimum frequency for a token to be included (default: 500). |
|
|
max_samples: Maximum number of samples to process (default: 100k). |
|
|
|
|
|
Returns: |
|
|
A ChessTokenizer with the built vocabulary. |
|
|
""" |
|
|
from datasets import load_dataset |
|
|
|
|
|
dataset = load_dataset(dataset_name, split=split) |
|
|
|
|
|
if max_samples is not None: |
|
|
dataset = dataset.select(range(min(max_samples, len(dataset)))) |
|
|
|
|
|
def game_iterator(): |
|
|
for example in dataset: |
|
|
yield example[column] |
|
|
|
|
|
return cls.build_vocab_from_iterator(game_iterator(), min_frequency=min_frequency) |
|
|
|
|
|
@property |
|
|
def vocab_size(self) -> int: |
|
|
"""Return the size of the vocabulary.""" |
|
|
return len(self._vocab) |
|
|
|
|
|
def get_vocab(self) -> Dict[str, int]: |
|
|
"""Return the vocabulary as a dictionary.""" |
|
|
return dict(self._vocab) |
|
|
|
|
|
def _tokenize(self, text: str) -> List[str]: |
|
|
""" |
|
|
Tokenize a string of moves into decomposed tokens. |
|
|
|
|
|
Each move like "WPe2e4" becomes ["WP", "e2_f", "e4_t"]. |
|
|
Moves with suffixes like "WPe2e4(x)" become ["WP", "e2_f", "e4_t", "(x)"]. |
|
|
|
|
|
Args: |
|
|
text: A string of space-separated moves. |
|
|
|
|
|
Returns: |
|
|
List of decomposed tokens. |
|
|
""" |
|
|
moves = text.strip().split() |
|
|
tokens = [] |
|
|
|
|
|
for move in moves: |
|
|
if len(move) < 6: |
|
|
|
|
|
tokens.append(move) |
|
|
continue |
|
|
|
|
|
|
|
|
piece = move[:2] |
|
|
source = move[2:4] + "_f" |
|
|
dest = move[4:6] + "_t" |
|
|
suffix = move[6:] if len(move) > 6 else None |
|
|
|
|
|
tokens.extend([piece, source, dest]) |
|
|
if suffix: |
|
|
tokens.append(suffix) |
|
|
|
|
|
return tokens |
|
|
|
|
|
def _convert_token_to_id(self, token: str) -> int: |
|
|
"""Convert a token to its ID.""" |
|
|
return self._vocab.get(token, self._vocab.get(self.UNK_TOKEN, 0)) |
|
|
|
|
|
def _convert_id_to_token(self, index: int) -> str: |
|
|
"""Convert an ID to its token.""" |
|
|
return self._ids_to_tokens.get(index, self.UNK_TOKEN) |
|
|
|
|
|
def convert_tokens_to_string(self, tokens: List[str]) -> str: |
|
|
""" |
|
|
Convert decomposed tokens back to a string of moves. |
|
|
|
|
|
Reconstructs moves from [piece, source_f, dest_t, optional_suffix] format. |
|
|
E.g., ["WP", "e2_f", "e4_t"] -> "WP e2_f e4_t" |
|
|
|
|
|
For the evaluator's decomposed format, we keep the tokens space-separated. |
|
|
""" |
|
|
|
|
|
special = {self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN} |
|
|
filtered = [t for t in tokens if t not in special] |
|
|
return " ".join(filtered) |
|
|
|
|
|
def save_vocabulary( |
|
|
self, |
|
|
save_directory: str, |
|
|
filename_prefix: Optional[str] = None, |
|
|
) -> tuple: |
|
|
""" |
|
|
Save the vocabulary to a JSON file. |
|
|
|
|
|
Args: |
|
|
save_directory: Directory to save the vocabulary. |
|
|
filename_prefix: Optional prefix for the filename. |
|
|
|
|
|
Returns: |
|
|
Tuple containing the path to the saved vocabulary file. |
|
|
""" |
|
|
if not os.path.isdir(save_directory): |
|
|
os.makedirs(save_directory, exist_ok=True) |
|
|
|
|
|
vocab_file = os.path.join( |
|
|
save_directory, |
|
|
(filename_prefix + "-" if filename_prefix else "") + "vocab.json", |
|
|
) |
|
|
|
|
|
with open(vocab_file, "w", encoding="utf-8") as f: |
|
|
json.dump(self._vocab, f, ensure_ascii=False, indent=2) |
|
|
|
|
|
return (vocab_file,) |
|
|
|
|
|
|
|
|
def count_vocab_from_dataset( |
|
|
dataset_name: str = "dlouapre/lichess_2025-01_1M", |
|
|
split: str = "train", |
|
|
column: str = "text", |
|
|
max_samples: Optional[int] = 10000, |
|
|
) -> Dict[str, int]: |
|
|
""" |
|
|
Count decomposed token frequencies in a dataset (useful for vocabulary analysis). |
|
|
|
|
|
Args: |
|
|
dataset_name: Name of the dataset on Hugging Face Hub. |
|
|
split: Dataset split to use. |
|
|
column: Column containing the game strings. |
|
|
max_samples: Maximum number of samples to process. |
|
|
|
|
|
Returns: |
|
|
Dictionary mapping decomposed tokens to their frequencies. |
|
|
""" |
|
|
from collections import Counter |
|
|
from datasets import load_dataset |
|
|
|
|
|
dataset = load_dataset(dataset_name, split=split) |
|
|
|
|
|
if max_samples is not None: |
|
|
dataset = dataset.select(range(min(max_samples, len(dataset)))) |
|
|
|
|
|
token_counts = Counter() |
|
|
|
|
|
for example in dataset: |
|
|
moves = example[column].strip().split() |
|
|
for move in moves: |
|
|
if len(move) < 6: |
|
|
token_counts[move] += 1 |
|
|
continue |
|
|
|
|
|
|
|
|
piece = move[:2] |
|
|
source = move[2:4] + "_f" |
|
|
dest = move[4:6] + "_t" |
|
|
suffix = move[6:] if len(move) > 6 else None |
|
|
|
|
|
token_counts[piece] += 1 |
|
|
token_counts[source] += 1 |
|
|
token_counts[dest] += 1 |
|
|
if suffix: |
|
|
token_counts[suffix] += 1 |
|
|
|
|
|
return dict(token_counts) |