| from __future__ import annotations |
|
|
| import json |
| import os |
| import re |
| from typing import Dict, List, Optional, Tuple |
|
|
| from transformers import PreTrainedTokenizer |
|
|
|
|
| class ChessTokenizer(PreTrainedTokenizer): |
|
|
| model_input_names = ["input_ids", "attention_mask"] |
| vocab_files_names = {"vocab_file": "vocab.json"} |
|
|
| |
| PAD_TOKEN = "[PAD]" |
| BOS_TOKEN = "[BOS]" |
| EOS_TOKEN = "[EOS]" |
| UNK_TOKEN = "[UNK]" |
|
|
| |
| MOVE_TOKEN = "[MOVE]" |
|
|
| _MOVE_RE = re.compile( |
| r'^(?P<color>[WB])(?P<piece>[PNBRQK])(?P<from>[a-h][1-8])(?P<to>[a-h][1-8])(?P<rest>.*)$' |
| ) |
| _PROMO_RE = re.compile(r'=?([QRBNqrbn])') |
|
|
| def __init__( |
| self, |
| vocab_file: Optional[str] = None, |
| vocab: Optional[Dict[str, int]] = None, |
| **kwargs, |
| ): |
| |
| self._pad_token = self.PAD_TOKEN |
| self._bos_token = self.BOS_TOKEN |
| self._eos_token = self.EOS_TOKEN |
| self._unk_token = self.UNK_TOKEN |
|
|
| |
| kwargs.pop("pad_token", None) |
| kwargs.pop("bos_token", None) |
| kwargs.pop("eos_token", None) |
| kwargs.pop("unk_token", None) |
|
|
| |
| if vocab is not None: |
| self._vocab = vocab |
| elif vocab_file is not None and os.path.exists(vocab_file): |
| with open(vocab_file, "r", encoding="utf-8") as f: |
| self._vocab = json.load(f) |
| else: |
| self._vocab = self._create_default_vocab() |
|
|
| self._ids_to_tokens = {v: k for k, v in self._vocab.items()} |
|
|
| super().__init__( |
| pad_token=self._pad_token, |
| bos_token=self._bos_token, |
| eos_token=self._eos_token, |
| unk_token=self._unk_token, |
| **kwargs, |
| ) |
|
|
| def _create_default_vocab(self) -> Dict[str, int]: |
| special = [self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN, self.MOVE_TOKEN] |
| return {t: i for i, t in enumerate(special)} |
|
|
| @classmethod |
| def build_structured_vocab(cls) -> "ChessTokenizer": |
| special = [cls.PAD_TOKEN, cls.BOS_TOKEN, cls.EOS_TOKEN, cls.UNK_TOKEN, cls.MOVE_TOKEN] |
|
|
| files = "abcdefgh" |
| ranks = "12345678" |
| squares = [f"{f}{r}" for f in files for r in ranks] |
|
|
| promo = [f"promo_{p}" for p in ("q", "r", "b", "n")] |
|
|
| tokens = special + squares + promo |
| vocab = {t: i for i, t in enumerate(tokens)} |
| return cls(vocab=vocab) |
|
|
| @classmethod |
| def build_vocab_from_dataset( |
| cls, |
| dataset_name: str = "dlouapre/lichess_2025-01_1M", |
| split: str = "train", |
| column: str = "text", |
| min_frequency: int = 500, |
| max_samples: Optional[int] = 100000, |
| ) -> "ChessTokenizer": |
| return cls.build_structured_vocab() |
|
|
| @property |
| def vocab_size(self) -> int: |
| return len(self._vocab) |
|
|
| def get_vocab(self) -> Dict[str, int]: |
| return dict(self._vocab) |
|
|
| def _convert_token_to_id(self, token: str) -> int: |
| return self._vocab.get(token, self._vocab.get(self.UNK_TOKEN, 0)) |
|
|
| def _convert_id_to_token(self, index: int) -> str: |
| return self._ids_to_tokens.get(index, self.UNK_TOKEN) |
|
|
| def convert_tokens_to_string(self, tokens: List[str]) -> str: |
| drop = {self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN} |
| return " ".join(t for t in tokens if t not in drop) |
|
|
| def _decompose_one_move(self, move_tok: str) -> List[str]: |
| m = self._MOVE_RE.match(move_tok) |
| if not m: |
| return [self.UNK_TOKEN] |
|
|
| from_sq = m.group("from") |
| to_sq = m.group("to") |
| rest = m.group("rest") or "" |
|
|
| out = [self.MOVE_TOKEN, from_sq, to_sq] |
|
|
| |
| pm = self._PROMO_RE.search(rest) |
| if pm: |
| p = pm.group(1).lower() |
| if p in ("q", "r", "b", "n"): |
| out.append(f"promo_{p}") |
|
|
| return out |
|
|
| def _tokenize(self, text: str) -> List[str]: |
| text = text.strip() |
| if not text: |
| return [] |
|
|
| special = {self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN, self.MOVE_TOKEN} |
|
|
| if " " not in text: |
| if text in special: |
| return [text] |
| if text in self._vocab: |
| return [text] |
| return self._decompose_one_move(text) |
|
|
| out: List[str] = [] |
| for part in text.split(): |
| if part in special: |
| out.append(part) |
| elif part in self._vocab: |
| out.append(part) |
| else: |
| out.extend(self._decompose_one_move(part)) |
| return out |
|
|
| def save_vocabulary( |
| self, |
| save_directory: str, |
| filename_prefix: Optional[str] = None, |
| ) -> Tuple[str]: |
| if not os.path.isdir(save_directory): |
| os.makedirs(save_directory, exist_ok=True) |
|
|
| vocab_file = os.path.join( |
| save_directory, |
| (filename_prefix + "-" if filename_prefix else "") + "vocab.json", |
| ) |
|
|
| with open(vocab_file, "w", encoding="utf-8") as f: |
| json.dump(self._vocab, f, ensure_ascii=False, indent=2) |
|
|
| return (vocab_file,) |
|
|
|
|
| def count_vocab_from_dataset( |
| dataset_name: str = "dlouapre/lichess_2025-01_1M", |
| split: str = "train", |
| column: str = "text", |
| max_samples: Optional[int] = 10000, |
| ) -> Dict[str, int]: |
| from collections import Counter |
| from datasets import load_dataset |
|
|
| dataset = load_dataset(dataset_name, split=split) |
| if max_samples is not None: |
| dataset = dataset.select(range(min(max_samples, len(dataset)))) |
|
|
| token_counts = Counter() |
| for example in dataset: |
| moves = example[column].strip().split() |
| token_counts.update(moves) |
|
|
| return dict(token_counts) |
|
|