| |
|
| |
|
| | from __future__ import annotations |
| |
|
| | import json |
| | import os |
| | import re |
| | from pathlib import Path |
| | from typing import Dict, List, Optional, Tuple |
| |
|
| | from transformers import PreTrainedTokenizer |
| |
|
| |
|
| | class ChessTokenizer(PreTrainedTokenizer): |
| | """ |
| | A custom tokenizer |
| | |
| | Example: |
| | >>> tokenizer = ChessTokenizer() |
| | >>> tokenizer.encode("WPe2e4 BPe7e5") |
| | [1, 4, 6, 45, 47, 5, 6, 50, 48, 2] # [BOS, components..., EOS] |
| | """ |
| | |
| | model_input_names = ["input_ids", "attention_mask"] |
| | vocab_files_names = {"vocab_file": "vocab.json"} |
| | |
| | |
| | PAD_TOKEN = "[PAD]" |
| | BOS_TOKEN = "[BOS]" |
| | EOS_TOKEN = "[EOS]" |
| | UNK_TOKEN = "[UNK]" |
| | |
| | |
| | COLORS = ["[W]", "[B]"] |
| | PIECES = ["P", "N", "B", "R", "Q", "K"] |
| | FILES = ["a", "b", "c", "d", "e", "f", "g", "h"] |
| | RANKS = ["1", "2", "3", "4", "5", "6", "7", "8"] |
| | SQUARES = [f + r for f in FILES for r in ["1", "2", "3", "4", "5", "6", "7", "8"]] |
| | |
| | MODIFIERS = [ |
| | "x", |
| | "+", |
| | "#", |
| | "+*", |
| | "=Q", |
| | "=R", |
| | "=B", |
| | "=N", |
| | "O-O", |
| | "O-O-O", |
| | "o", |
| | "O", |
| | ] |
| | |
| | MOVE_PATTERN = re.compile( |
| | r'^([WB])' |
| | r'([PNBRQK])' |
| | r'([a-h][1-8])' |
| | r'([a-h][1-8])' |
| | r'(=[QRBN])?' |
| | r'(\([xoO+*]+\))?$' |
| | ) |
| | |
| | def __init__( |
| | self, |
| | vocab_file: Optional[str] = None, |
| | vocab: Optional[Dict[str, int]] = None, |
| | **kwargs, |
| | ): |
| |
|
| |
|
| | self._pad_token = self.PAD_TOKEN |
| | self._bos_token = self.BOS_TOKEN |
| | self._eos_token = self.EOS_TOKEN |
| | self._unk_token = self.UNK_TOKEN |
| |
|
| | |
| | kwargs.pop("pad_token", None) |
| | kwargs.pop("bos_token", None) |
| | kwargs.pop("eos_token", None) |
| | kwargs.pop("unk_token", None) |
| | |
| | |
| | if vocab is not None: |
| | self._vocab = vocab |
| | elif vocab_file is not None and os.path.exists(vocab_file): |
| | with open(vocab_file, "r", encoding="utf-8") as f: |
| | self._vocab = json.load(f) |
| | else: |
| | |
| | self._vocab = self._create_default_vocab() |
| | |
| | |
| | self._ids_to_tokens = {v: k for k, v in self._vocab.items()} |
| | |
| | super().__init__( |
| | pad_token=self._pad_token, |
| | bos_token=self._bos_token, |
| | eos_token=self._eos_token, |
| | unk_token=self._unk_token, |
| | **kwargs, |
| | ) |
| | |
| | def _create_default_vocab(self) -> Dict[str, int]: |
| | """ |
| | Create the fixed vocabulary from chess components. |
| | |
| | Unlike the standard tokenizer, this creates a small fixed vocab |
| | of ~88 tokens for decomposed move representation. |
| | """ |
| | tokens = [] |
| | |
| | |
| | tokens.extend([self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN]) |
| | |
| | |
| | tokens.extend(self.COLORS) |
| | |
| | |
| | tokens.extend(self.PIECES) |
| | |
| | |
| | tokens.extend(self.SQUARES) |
| | |
| | |
| | tokens.extend(self.MODIFIERS) |
| | |
| | return {token: idx for idx, token in enumerate(tokens)} |
| | |
| | @classmethod |
| | def build_vocab_from_iterator( |
| | cls, |
| | iterator, |
| | min_frequency: int = 1, |
| | ) -> "ChessTokenizer": |
| | """ |
| | Build a tokenizer vocabulary from an iterator of game strings. |
| | |
| | Note: For decomposed tokenizer, this ignores the iterator and |
| | creates the fixed vocabulary. Provided for API compatibility. |
| | |
| | Args: |
| | iterator: An iterator yielding game strings (ignored). |
| | min_frequency: Minimum frequency for a token (ignored). |
| | |
| | Returns: |
| | A ChessTokenizer with the fixed decomposed vocabulary. |
| | """ |
| | |
| | return cls() |
| | |
| | @classmethod |
| | def build_vocab_from_dataset( |
| | cls, |
| | dataset_name: str = "dlouapre/lichess_2025-01_1M", |
| | split: str = "train", |
| | column: str = "moves", |
| | min_frequency: int = 1, |
| | max_samples: Optional[int] = None, |
| | ) -> "ChessTokenizer": |
| | """ |
| | Build a tokenizer vocabulary from a Hugging Face dataset. |
| | |
| | Note: For decomposed tokenizer, this ignores the dataset and |
| | creates the fixed vocabulary. Provided for API compatibility. |
| | |
| | Args: |
| | dataset_name: Name of the dataset on Hugging Face Hub (ignored). |
| | split: Dataset split to use (ignored). |
| | column: Column containing move strings (ignored). |
| | min_frequency: Minimum frequency for inclusion (ignored). |
| | max_samples: Maximum samples to process (ignored). |
| | |
| | Returns: |
| | A ChessTokenizer with the fixed decomposed vocabulary. |
| | """ |
| | print(f"Note: Decomposed tokenizer uses fixed vocabulary (~88 tokens)") |
| | return cls() |
| | |
| | @property |
| | def vocab_size(self) -> int: |
| | return len(self._vocab) |
| | |
| | def get_vocab(self) -> Dict[str, int]: |
| | return dict(self._vocab) |
| | |
| | def _parse_move(self, move: str) -> List[str]: |
| | """ |
| | Parse a single move into component tokens. |
| | |
| | Args: |
| | move: Move in extended UCI format (e.g., "WPe2e4", "BNg8f6(x+)") |
| | |
| | Returns: |
| | List of component tokens. |
| | """ |
| | match = self.MOVE_PATTERN.match(move) |
| | |
| | if not match: |
| | return [self.UNK_TOKEN] |
| | |
| | tokens = [] |
| | |
| | |
| | color = match.group(1) |
| | tokens.append(f"[{color}]") |
| | |
| | |
| | tokens.append(match.group(2)) |
| | |
| | |
| | tokens.append(match.group(3)) |
| | |
| | |
| | tokens.append(match.group(4)) |
| | |
| | |
| | if match.group(5): |
| | tokens.append(match.group(5)) |
| | |
| | |
| | if match.group(6): |
| | suffix = match.group(6) |
| | suffix_content = suffix[1:-1] |
| | |
| | if "x" in suffix_content: |
| | tokens.append("x") |
| | if "+*" in suffix_content: |
| | tokens.append("+*") |
| | elif "+" in suffix_content: |
| | tokens.append("+") |
| | if suffix_content == "o": |
| | tokens.append("o") |
| | elif suffix_content == "O": |
| | tokens.append("O") |
| | |
| | return tokens |
| | |
| | def _tokenize(self, text: str) -> List[str]: |
| | """ |
| | Tokenize a string of moves into component tokens. |
| | |
| | Args: |
| | text: Space-separated moves in extended UCI format. |
| | |
| | Returns: |
| | List of component tokens. |
| | """ |
| | tokens = [] |
| | moves = text.strip().split() |
| | |
| | for move in moves: |
| | move_tokens = self._parse_move(move) |
| | tokens.extend(move_tokens) |
| | |
| | return tokens |
| | |
| | def _convert_token_to_id(self, token: str) -> int: |
| | return self._vocab.get(token, self._vocab.get(self.UNK_TOKEN, 0)) |
| | |
| | def _convert_id_to_token(self, index: int) -> str: |
| | return self._ids_to_tokens.get(index, self.UNK_TOKEN) |
| | |
| | def convert_tokens_to_string(self, tokens: List[str]) -> str: |
| | """ |
| | Convert tokens back to move string. |
| | |
| | Reconstructs moves from component tokens. |
| | """ |
| | special = {self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN} |
| | |
| | result = [] |
| | current_move = [] |
| | |
| | for token in tokens: |
| | if token in special: |
| | if current_move: |
| | result.append(self._reconstruct_move(current_move)) |
| | current_move = [] |
| | continue |
| | |
| | current_move.append(token) |
| | |
| | |
| | if self._is_complete_move(current_move): |
| | result.append(self._reconstruct_move(current_move)) |
| | current_move = [] |
| | |
| | |
| | if current_move: |
| | result.append(self._reconstruct_move(current_move)) |
| | |
| | return " ".join(result) |
| | |
| | def _is_complete_move(self, tokens: List[str]) -> bool: |
| | """Check if tokens form a complete move.""" |
| | if len(tokens) < 4: |
| | return False |
| | |
| | |
| | if (tokens[0] in self.COLORS and |
| | tokens[1] in self.PIECES and |
| | tokens[2] in self.SQUARES and |
| | tokens[3] in self.SQUARES): |
| | |
| | if len(tokens) == 4: |
| | return True |
| | |
| | |
| | remaining = tokens[4:] |
| | for t in remaining: |
| | if t in self.COLORS: |
| | return True |
| | if t not in self.MODIFIERS and not t.startswith("="): |
| | return True |
| | |
| | return True |
| | |
| | return False |
| | |
| | def _reconstruct_move(self, tokens: List[str]) -> str: |
| | """Reconstruct a move string from component tokens.""" |
| | if not tokens: |
| | return "" |
| | |
| | if len(tokens) >= 4: |
| | |
| | color = tokens[0] |
| | if color in self.COLORS: |
| | color = color[1] |
| | |
| | move = color + "".join(tokens[1:4]) |
| | |
| | |
| | suffixes = [] |
| | for t in tokens[4:]: |
| | if t.startswith("="): |
| | move += t |
| | elif t in ["x", "+", "+*", "o", "O"]: |
| | suffixes.append(t) |
| | |
| | if suffixes: |
| | move += "(" + "".join(suffixes) + ")" |
| | |
| | return move |
| | |
| | return "".join(tokens) |
| | |
| | def save_vocabulary( |
| | self, |
| | save_directory: str, |
| | filename_prefix: Optional[str] = None, |
| | ) -> Tuple[str]: |
| | """ |
| | Save the vocabulary to a file. |
| | |
| | Args: |
| | save_directory: Directory to save the vocabulary. |
| | filename_prefix: Optional prefix for the vocabulary file. |
| | |
| | Returns: |
| | Tuple containing the path to the saved vocabulary file. |
| | """ |
| | if not os.path.isdir(save_directory): |
| | os.makedirs(save_directory, exist_ok=True) |
| | |
| | vocab_file = os.path.join( |
| | save_directory, |
| | (filename_prefix + "-" if filename_prefix else "") + "vocab.json", |
| | ) |
| | |
| | with open(vocab_file, "w", encoding="utf-8") as f: |
| | json.dump(self._vocab, f, ensure_ascii=False, indent=2) |
| | |
| | return (vocab_file,) |
| |
|
| |
|
| | def count_vocab_from_dataset( |
| | dataset_name: str = "dlouapre/lichess_2025-01_1M", |
| | split: str = "train", |
| | column: str = "moves", |
| | max_samples: Optional[int] = None, |
| | ) -> Dict[str, int]: |
| | """ |
| | Count token frequencies in a dataset. |
| | |
| | Note: For decomposed tokenizer, this counts component frequencies |
| | rather than whole-move frequencies. |
| | |
| | Args: |
| | dataset_name: Name of the dataset. |
| | split: Dataset split. |
| | column: Column with moves. |
| | max_samples: Max samples to process. |
| | |
| | Returns: |
| | Dictionary of token frequencies. |
| | """ |
| | from collections import Counter |
| | from datasets import load_dataset |
| | |
| | tokenizer = ChessTokenizer() |
| | |
| | dataset = load_dataset(dataset_name, split=split) |
| | if max_samples: |
| | dataset = dataset.select(range(min(max_samples, len(dataset)))) |
| | |
| | counts = Counter() |
| | for example in dataset: |
| | tokens = tokenizer.tokenize(example[column]) |
| | counts.update(tokens) |
| | |
| | return dict(counts) |
| |
|