lichess_sf / lichess_sf.py
mauricett's picture
Update lichess_sf.py
92dd194 verified
raw
history blame
3.33 kB
import datasets
import zstandard as zstd
import io
class LichessConfig(datasets.BuilderConfig):
def __init__(self, features, **kwargs):
super(LichessConfig, self).__init__(**kwargs)
self.features = features
class Lichess(datasets.GeneratorBasedBuilder):
BUILDER_CONFIG_CLASS = LichessConfig
BUILDER_CONFIGS = [LichessConfig(features=["WhiteElo",
"BlackElo",
"fens",
"moves",
"scores"])]
def _info(self):
features_dict = {feature: datasets.Value("uint16") for feature in self.config.features}
features_dict["fens"] = datasets.Value("null")
features_dict["moves"] = datasets.Value("null")
features_dict["scores"] = datasets.Value("null")
info = datasets.DatasetInfo(datasets.Features(features_dict))
return info
def _get_filepaths(self):
months = ["01", "02", "03", "04", "05", "06",
"07", "08", "09", "10", "11", "12"]
shards = ["0", "1", "2", "3"]
filepaths = ["fen/2023/" + m for m in months]
paths = []
for shard in shards:
paths.extend([filepath + "/" + shard + "_fen.zst" for filepath in filepaths])
return paths
def _split_generators(self, dl_manager: datasets.DownloadManager):
filepaths = self._get_filepaths()
downloaded_files = dl_manager.download(filepaths)
generator = datasets.SplitGenerator(name=datasets.Split.TRAIN,
gen_kwargs={'filepaths': downloaded_files})
return [generator]
def _generate_examples(self, filepaths):
""" Each worker receives a random set of the .zst files (the raw dataset).
Each worker will cycle through its set of files. They read a single game
from file 1, then a single game from file 2, etc. ...
The purpose is to create batches that contain games from a diverse mix
of time periods. -> Reduces distribution shift.
"""
files = [open(filepath, "rb") for filepath in filepaths]
dctxs = [zstd.ZstdDecompressor() for file in files]
stream_readers = [dctx.stream_reader(file) for dctx, file in zip(dctxs, files)]
pgns = [io.TextIOWrapper(sr) for sr in stream_readers]
n = 0
n_files = len(files)
# approximate number of positions per .zst file
n_positions = 2 * 10**6
while n <= n_files * n_positions:
# cycle through the different shards
pgn = pgns[n % n_files]
elos = pgn.readline()
game = pgn.readline()
if game:
white_elo, black_elo = elos.split(" ")
fens, moves, scores = game.split(";")
_id = n
n += 1
yield _id, {"WhiteElo": int(white_elo),
"BlackElo": int(black_elo),
"fens": fens.split(","),
"moves": moves.split(","),
"scores": scores.rstrip("\n").split(",")}
else:
break