chess_MaximeMuh3 / tokenizer.py
MaximeMuhlethaler's picture
Chess Challenge submission by MaximeMuhlethaler
bd0f882 verified
from __future__ import annotations
import json
import os
import re
import shutil
from typing import Dict, List, Optional
from transformers import PreTrainedTokenizer
REGEX_CASE = re.compile(r"([a-h][1-8])")
REGEX_PROMO = re.compile(r"[=\(]?([qrbnQRBN])[\)]?$")
class ChessTokenizer(PreTrainedTokenizer):
"""
Tokenizer qui traite le jeu d'échecs case par case.
Vocabulaire déterministe : Spéciaux + Cases (a1..h8) + Promotions.
"""
vocab_files_names = {"vocab_file": "vocab.json"}
model_input_names = ["input_ids", "attention_mask"]
# Tokens
PAD_TOKEN = "[PAD]"
BOS_TOKEN = "[BOS]"
EOS_TOKEN = "[EOS]"
UNK_TOKEN = "[UNK]"
def __init__(
self,
vocab_file: Optional[str] = None,
vocab: Optional[Dict[str, int]] = None,
**kwargs,
):
self._pad_token = self.PAD_TOKEN
self._bos_token = self.BOS_TOKEN
self._eos_token = self.EOS_TOKEN
self._unk_token = self.UNK_TOKEN
for cle in ["pad_token", "bos_token", "eos_token", "unk_token"]:
kwargs.pop(cle, None)
if vocab:
self.map_token_id = vocab
elif vocab_file and os.path.exists(vocab_file):
with open(vocab_file, "r", encoding="utf-8") as f:
self.map_token_id = json.load(f)
else:
self.map_token_id = self._generer_vocabulaire()
self.map_id_token = {i: t for t, i in self.map_token_id.items()}
super().__init__(
pad_token=self._pad_token,
bos_token=self._bos_token,
eos_token=self._eos_token,
unk_token=self._unk_token,
**kwargs,
)
def _generer_vocabulaire(self) -> Dict[str, int]:
"""Génère la liste fixe des tokens nécessaires."""
liste_tokens = [self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN]
colonnes = "abcdefgh"
lignes = "12345678"
cases = [f"{c}{l}" for c in colonnes for l in lignes]
liste_tokens.extend(cases)
pieces_promo = ["q", "r", "b", "n"]
liste_tokens.extend(pieces_promo)
return {t: i for i, t in enumerate(liste_tokens)}
@property
def vocab_size(self) -> int:
return len(self.map_token_id)
def get_vocab(self) -> Dict[str, int]:
return dict(self.map_token_id)
def _tokenize(self, text: str) -> List[str]:
"""
Transforme une phrase de coups en liste de tokens.
"""
resultat = []
mouvements = text.strip().split()
for mv in mouvements:
cases_trouvees = REGEX_CASE.findall(mv)
if len(cases_trouvees) >= 2:
resultat.extend(cases_trouvees[:2])
match_promo = REGEX_PROMO.search(mv)
if match_promo:
resultat.append(match_promo.group(1).lower())
elif mv in self.map_token_id:
resultat.append(mv)
else:
resultat.append(self.UNK_TOKEN)
return resultat
def _convert_token_to_id(self, token: str) -> int:
return self.map_token_id.get(token, self.map_token_id[self.UNK_TOKEN])
def _convert_id_to_token(self, index: int) -> str:
return self.map_id_token.get(index, self.UNK_TOKEN)
def convert_tokens_to_string(self, tokens: List[str]) -> str:
"""
Reconstruit la chaine de caractères depuis les tokens.
Logique : on assemble les paires de cases.
"""
sortie = []
tampon_cases = []
exclus = {self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN}
promotions = {"q", "r", "b", "n"}
for t in tokens:
if t in exclus:
continue
if t in promotions:
if sortie:
sortie[-1] += t
else:
tampon_cases.append(t)
if len(tampon_cases) == 2:
coup_complet = "".join(tampon_cases)
sortie.append(coup_complet)
tampon_cases = []
return " ".join(sortie)
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> tuple:
"""Sauvegarde le vocabulaire sur le disque."""
if not os.path.exists(save_directory):
os.makedirs(save_directory)
nom_fichier = "vocab.json"
if filename_prefix:
nom_fichier = f"{filename_prefix}-{nom_fichier}"
chemin_complet = os.path.join(save_directory, nom_fichier)
with open(chemin_complet, "w", encoding="utf-8") as f:
json.dump(self.map_token_id, f, ensure_ascii=False, indent=2)
return (chemin_complet,)
def save_pretrained(self, save_directory: str, **kwargs):
"""
Sauvegarde standard + Copie du script tokenizer.py pour Hugging Face.
"""
super().save_pretrained(save_directory, **kwargs)
source = os.path.abspath(__file__)
dest = os.path.join(save_directory, "tokenizer.py")
if source != dest:
shutil.copy(source, dest)
chem_config = os.path.join(save_directory, "tokenizer_config.json")
if os.path.exists(chem_config):
with open(chem_config, "r") as f:
cfg = json.load(f)
cfg["auto_map"] = {"AutoTokenizer": "tokenizer.ChessTokenizer"}
with open(chem_config, "w") as f:
json.dump(cfg, f, indent=2)
from transformers import AutoTokenizer
try:
ChessTokenizer.register_for_auto_class("AutoTokenizer")
except:
pass