Additional scripts - convert, sort, split, Mamba train
sort_split.py - sort by row (game) length, then split. Useful for training models like Mamba with flexible context size (sequence length), for faster training. See train_bygame.py below, though that's not directly relevant to this DATASET repo.
import pandas as pd
import pyarrow.parquet as pq
import os
import numpy as np
import math
def sort_and_split_parquet(input_file, output_dir, n_splits):
# Load the parquet file
df = pq.read_table(input_file).to_pandas()
# Sort by the length of the 'tokenized' column
df['length'] = df['tokenized'].apply(len)
df_sorted = df.sort_values(by='length').drop(columns=['length'])
# Calculate the number of rows per split
total_rows = len(df_sorted)
rows_per_split = math.ceil(total_rows / n_splits)
print("Dataset sorted, splitting...")
# Split and save each part
for i in range(n_splits):
start_row = i * rows_per_split
end_row = min(start_row + rows_per_split, total_rows)
split_df = df_sorted.iloc[start_row:end_row]
# Save the split DataFrame as a parquet file
split_file_name = f"train_{i}.parquet"
split_df.to_parquet(os.path.join(output_dir, split_file_name))
first_game_length = len(split_df.iloc[0]['tokenized'])
last_game_length = len(split_df.iloc[-1]['tokenized'])
print(f"Saved {split_file_name}... Game lengths: {first_game_length} - {last_game_length}")
# Example usage
input_file = 'data/chess/train.parquet'
output_dir = 'data/chess'
n_splits = 250
sort_and_split_parquet(input_file, output_dir, n_splits)
print("Done.")
csv2pqt.py
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
import numpy as np
import tiktoken
import pickle
from sklearn.model_selection import train_test_split
import random
def tokenize_game(game, stoi):
# Remove the prefix and tokenize the game
game_cleaned = game.split('\n\n', 1)[1] if '\n\n' in game else game
return np.array([stoi[char] for char in game_cleaned], dtype=np.uint8)
if __name__ == "__main__":
dataset_path = "gt1_8kElo_all.zip"
meta_path = "data/chess/meta.pkl"
# Load metadata for tokenization
with open(meta_path, "rb") as f:
meta = pickle.load(f)
stoi = meta["stoi"]
# Read CSV with headers
df = pd.read_csv(dataset_path)
print(df.iloc[random.randint(0, len(df) - 1)])
# Tokenize games in the 'transcript' column
df['tokenized'] = df['transcript'].apply(lambda x: tokenize_game(x, stoi))
# Split dataset into training and validation
train_df, val_df = train_test_split(df, test_size=0.01, random_state=42)
# Define a function to write the DataFrame to a Parquet file with multiple rows per row group
def write_parquet_with_row_groups(df, file_name, rows_per_group=100):
table = pa.Table.from_pandas(df[['tokenized']])
writer = pq.ParquetWriter(file_name, table.schema)
for i in range(0, len(df), rows_per_group):
writer.write_table(table.slice(i, min(rows_per_group, len(df) - i)))
writer.close()
write_parquet_with_row_groups(train_df, 'train_lich.parquet')
write_parquet_with_row_groups(val_df, 'val_lich.parquet')
train_bygame.py (see https://github.com/alxndrTL/mamba.py)
import os
import time
import math
import pickle
from contextlib import nullcontext
import numpy as np
import torch
import torch.nn.functional as F
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.distributed import init_process_group, destroy_process_group
from mamba_lm import MambaLM, MambaLMConfig
import pyarrow.parquet as pq
import random
from torch.utils.data import Dataset, DataLoader
import glob
# -----------------------------------------------------------------------------
# default config values designed for Mamba model training
# I/O
out_dir = 'out'
eval_interval = 2000
log_interval = 1
eval_iters = 5
eval_only = False
always_save_checkpoint = True
init_from = 'resume' # 'scratch', 'resume', or Mamba model name
# wandb logging
wandb_log = False
wandb_project = 'mamba'
wandb_run_name = 'mamba_run' # modify as needed
# data
dataset = 'chess' # specify your dataset
gradient_accumulation_steps = 5 * 8
batch_size = 12
base_batch_size = batch_size
effective_batch_size = batch_size
max_seq_len = 1024 # A trianing-only parameter for controlling VRAM
# model
n_layer = 12
d_model = 768
dt_rank = 'auto'
d_state = 16
expand_factor = 2
bias = False
conv_bias = True
pscan = True
vocab_size = 32000
# optimizer settings
learning_rate = 6e-4
max_iters = 600000
weight_decay = 1e-1
beta1 = 0.9
beta2 = 0.95
grad_clip = 1.0
# learning rate decay settings
decay_lr = True
warmup_iters = 2000
lr_decay_iters = 600000
min_lr = 6e-5
# DDP settings
backend = 'nccl'
# system
device = 'cuda' if torch.cuda.is_available() else 'cpu'
dtype = 'bfloat16' if torch.cuda.is_bf16_supported() else 'float32'
compile = False # set to True if using PyTorch 2.0
# -----------------------------------------------------------------------------
config_keys = [k for k, v in globals().items() if not k.startswith('_') and isinstance(v, (int, float, bool, str))]
exec(open('configurator.py').read()) # overrides from command line or config file
config = {k: globals()[k] for k in config_keys} # will be useful for logging
# -----------------------------------------------------------------------------
mamba_config = MambaLMConfig(
d_model=d_model, # adjust as needed
n_layers=n_layer, # adjust as needed
dt_rank=dt_rank,
d_state=d_state,
expand_factor=expand_factor,
bias=bias,
conv_bias=conv_bias,
pscan=pscan,
vocab_size=vocab_size # adjust based on your dataset
)
# DDP and other initializations
ddp = int(os.environ.get('RANK', -1)) != -1
if ddp:
init_process_group(backend=backend)
ddp_rank = int(os.environ['RANK'])
ddp_local_rank = int(os.environ['LOCAL_RANK'])
ddp_world_size = int(os.environ['WORLD_SIZE'])
device = f'cuda:{ddp_local_rank}'
torch.cuda.set_device(device)
master_process = ddp_rank == 0
seed_offset = ddp_rank
assert gradient_accumulation_steps % ddp_world_size == 0
gradient_accumulation_steps //= ddp_world_size
else:
master_process = True
seed_offset = 0
ddp_world_size = 1
tokens_per_iter = gradient_accumulation_steps * ddp_world_size * batch_size * max_seq_len
if master_process:
os.makedirs(out_dir, exist_ok=True)
torch.manual_seed(1337 + seed_offset)
torch.backends.cuda.matmul.allow_tf32 = True
torch.backends.cudnn.allow_tf32 = True
device_type = 'cuda' if 'cuda' in device else 'cpu'
ptdtype = {'float32': torch.float32, 'bfloat16': torch.bfloat16}[dtype]
ctx = nullcontext() if device_type == 'cpu' else torch.amp.autocast(device_type=device_type, dtype=ptdtype)
# poor man's data loader
data_dir = os.path.join('data', dataset)
current_train_file_index = 0
train_file_update_interval = eval_interval // 20
train_files = glob.glob(os.path.join(data_dir, 'train*.parquet'))
train_datasets = []
for f in train_files:
dataset = pq.read_table(f).to_pandas()
dataset = dataset[dataset['tokenized'].apply(len) >= 8]
train_datasets.append(dataset)
val_data = pq.read_table(os.path.join(data_dir, 'val.parquet')).to_pandas()
val_data = val_data[val_data['tokenized'].apply(len) >= 8]
truncated_games_count = 0
total_games_count = 0
def get_batch(split):
global truncated_games_count, total_games_count
# Randomly select batch_size games
dataset = train_datasets[current_train_file_index] if split == 'train' else val_data # Use the correct DataFrame based on the split
sample_df = dataset.sample(batch_size)
games = sample_df['tokenized'].tolist()
# Prepare sequences tensor for the batch
max_length_in_batch = min(max(len(game) for game in games), max_seq_len)
sequences = torch.zeros((batch_size, max_length_in_batch), dtype=torch.int64)
for i, game in enumerate(games):
total_games_count += 1
if len(game) > max_seq_len:
truncated_games_count += 1
# Randomly decide truncation strategy
truncation_choice = random.choice(['beginning', 'end', 'end2', 'random'])
if truncation_choice == 'beginning':
# Truncatethe beginning (use from the end backward)
truncated_game = game[-max_seq_len:]
elif truncation_choice.startswith('end'):
# Truncatethe end (use from the beginning forward)
truncated_game = game[:max_seq_len]
else:
# Random start index (truncate beginning and end)
start_idx = random.randint(0, len(game) - max_seq_len)
truncated_game = game[start_idx:start_idx + max_seq_len]
sequences[i, :len(truncated_game)] = torch.tensor(truncated_game, dtype=torch.int64)
# Report the percentage of truncated games
if truncated_games_count > 0 and truncated_games_count % 50 == 0:
truncated_percentage = (truncated_games_count / total_games_count) * 100
print(f"Percentage of truncated games: {truncated_percentage:.2f}%\t\t({truncated_games_count}/{total_games_count})")
else:
sequences[i, :len(game)] = torch.tensor(game, dtype=torch.int64)
if device_type == 'cuda':
sequences = sequences.pin_memory().to(device, non_blocking=True)
else:
sequences = sequences.to(device)
return sequences
# init these up here, can override if init_from='resume' (i.e. from a checkpoint)
iter_num = 0
best_val_loss = 1e9
# attempt to derive vocab_size from the dataset
meta_path = os.path.join(data_dir, 'meta.pkl')
meta_vocab_size = None
if os.path.exists(meta_path):
with open(meta_path, 'rb') as f:
meta = pickle.load(f)
meta_vocab_size = meta['vocab_size']
print(f"found vocab_size = {meta_vocab_size} (inside {meta_path})")
# Model initialization
if init_from == 'scratch':
print("Initializing a new Mamba model from scratch")
if meta_vocab_size is None:
print(f"defaulting to vocab_size of {vocab_size}")
else:
mamba_config.vocab_size = meta_vocab_size
model = MambaLM(mamba_config)
elif init_from == 'resume':
print(f"Resuming training from {out_dir}")
ckpt_path = os.path.join(out_dir, 'ckpt.pt')
checkpoint = torch.load(ckpt_path, map_location=device)
mamba_config = checkpoint['model_args']
model = MambaLM(mamba_config)
state_dict = checkpoint['model']
# fix the keys of the state dictionary :(
# honestly no idea how checkpoints sometimes get this prefix, have to debug more
unwanted_prefix = '_orig_mod.'
for k,v in list(state_dict.items()):
if k.startswith(unwanted_prefix):
state_dict[k[len(unwanted_prefix):]] = state_dict.pop(k)
model.load_state_dict(state_dict)
if 'effective_batch_size' not in checkpoint['config']:
print("Checkpoint was saved without `effective_batch_size`, assuming current value (will save with next checkpoint). This is used for correcting `iter_num` when the effetive batch size is changed.")
checkpoint['config']['effective_batch_size'] = effective_batch_size
iter_num = checkpoint['iter_num'] * (checkpoint['config']['effective_batch_size'] / effective_batch_size)
best_val_loss = checkpoint['best_val_loss']
elif init_from.startswith('state-spaces'):
print(f"Initializing from Mamba pre-trained weights: {init_from}")
model = from_pretrained(init_from)
mamba_config = model.config
else:
raise ValueError("Invalid init_from value")
model.to(device)
print(f'Model with {sum([p.numel() for p in model.parameters()])} parameters loaded.')
# Optimizer and GradScaler
optimizer = torch.optim.AdamW(model.parameters(), lr=learning_rate, weight_decay=weight_decay, betas=(beta1, beta2))
scaler = torch.cuda.amp.GradScaler(enabled=dtype == 'float16')
if init_from == 'resume':
optimizer.load_state_dict(checkpoint['optimizer'])
checkpoint = None
# Compile the model if using PyTorch 2.0
if compile:
print("compiling the model... (takes a ~minute)")
model = torch.compile(model)
# Wrap model in DDP container if necessary
if ddp:
model = DDP(model, device_ids=[ddp_local_rank])
@torch
.no_grad()
def estimate_loss():
out = {}
model.eval()
for split in ['train']: #['train', 'val']:
losses = torch.zeros(eval_iters)
for k in range(eval_iters):
if split=='train' and k % (train_file_update_interval // 1.5) == 0:
current_train_file_index = random.randint(0, len(train_files) - 1)
print(f"Switched to file: {train_files[current_train_file_index]}\t\t(eval)")
tokens = get_batch(split) # Fetch tokens in the correct format
logits = model(tokens[:, :-1]) # Predict next tokens (ignore last token)
# The targets are the tokens shifted by one position
targets = tokens[:, 1:].reshape(-1) # Flatten targets for cross-entropy
# Compute cross-entropy loss between logits and targets
loss = F.cross_entropy(logits.view(-1, logits.size(-1)), targets)
losses[k] = loss.item()
split = 'val' # Temporary hack
out[split] = losses.mean()
model.train()
return out
# learning rate decay scheduler (cosine with warmup)
def get_lr(it):
# 1) linear warmup for warmup_iters steps
if it < warmup_iters:
return learning_rate * it / warmup_iters
# 2) if it > lr_decay_iters, return min learning rate
if it > lr_decay_iters:
return min_lr
# 3) in between, use cosine decay down to min learning rate
decay_ratio = (it - warmup_iters) / (lr_decay_iters - warmup_iters)
assert 0 <= decay_ratio <= 1
coeff = 0.5 * (1.0 + math.cos(math.pi * decay_ratio)) # coeff ranges 0..1
return min_lr + coeff * (learning_rate - min_lr)
# Logging setup
if wandb_log and master_process:
import wandb
wandb.init(project=wandb_project, name=wandb_run_name, config=config)
# Training loop
local_iter_num = 0 # Number of iterations in the lifetime of this process
raw_model = model.module if ddp else model # Unwrap DDP container if needed
t0 = time.time()
while True:
if iter_num % train_file_update_interval == 0:
current_train_file_index = random.randint(0, len(train_files) - 1)
print(f"Switched to file: {train_files[current_train_file_index]}")
# Determine and set the learning rate for this iteration
lr = get_lr(iter_num) if decay_lr else learning_rate
for param_group in optimizer.param_groups:
param_group['lr'] = lr
# Evaluate the loss on train/val sets and write checkpoints
if iter_num % eval_interval == 0 and master_process:
losses = estimate_loss()
print(f"\nstep {iter_num}: 'val' loss {losses['val']:.4f}") # Temporary hack
#print(f"step {iter_num}: train loss {losses['train']:.4f}, val loss {losses['val']:.4f}")
if wandb_log:
wandb.log({
"iter": iter_num,
#"train/loss": losses['train'], # Temporary hack
"val/loss": losses['val'],
"lr": lr,
})
if losses['val'] < best_val_loss or always_save_checkpoint:
best_val_loss = losses['val']
if iter_num > 0:
checkpoint = {
'model': raw_model.state_dict(),
'optimizer': optimizer.state_dict(),
'model_args': mamba_config,
'iter_num': iter_num,
'best_val_loss': best_val_loss,
'config': config,
}
print(f"saving checkpoint to {out_dir}\n")
torch.save(checkpoint, os.path.join(out_dir, 'ckpt.pt'))
if iter_num == 0 and eval_only:
break
# Forward and backward pass
for micro_step in range(gradient_accumulation_steps):
if ddp:
model.require_backward_grad_sync = (micro_step == gradient_accumulation_steps - 1)
sequences = get_batch('train') # Fetch the training data
with ctx:
logits = model(sequences[:, :-1]) # Forward pass, exclude last token for input
# Compute loss (assuming next token prediction task)
targets = sequences[:, 1:].reshape(-1) # Shifted by one for next token prediction
loss = F.cross_entropy(logits.view(-1, logits.size(-1)), targets)
loss = loss / gradient_accumulation_steps
scaler.scale(loss).backward()
# clip the gradient
if grad_clip != 0.0:
scaler.unscale_(optimizer)
torch.nn.utils.clip_grad_norm_(model.parameters(), grad_clip)
# step the optimizer and scaler if training in fp16
scaler.step(optimizer)
scaler.update()
# flush the gradients as soon as we can, no need for this memory anymore
optimizer.zero_grad(set_to_none=True)
# timing and logging
t1 = time.time()
dt = t1 - t0
t0 = t1
if iter_num % log_interval == 0 and master_process:
# get loss as float. note: this is a CPU-GPU sync point
# scale up to undo the division above, approximating the true total loss (exact would have been a sum)
lossf = loss.item() * gradient_accumulation_steps
print(f"iter {iter_num}: loss {lossf:.4f}, time {dt*1000:.2f}ms")
if wandb_log:
wandb.log({
"iter": iter_num,
"train/loss": lossf,
"lr": lr,
})
iter_num += 1
local_iter_num += 1
# termination conditions
if iter_num > max_iters:
checkpoint = {
'model': raw_model.state_dict(),
'optimizer': optimizer.state_dict(),
'model_args': mamba_config,
'iter_num': iter_num,
'best_val_loss': best_val_loss,
'config': config,
}
print(f"saving checkpoint to {out_dir}")
torch.save(checkpoint, os.path.join(out_dir, 'ckpt_final.pt'))
break
if ddp:
destroy_process_group()
I would be curious how Mamba compares to a GPT for learning to play chess. It seems like a decent test of how useful Mamba's hidden state is. Have you had any results?
Me too! I'm still training. Just starting, really, I've been doing short run experiments til now. My training resources aren't great - a 7900XTX, with the inefficiencies that come with RoCM (and what that means for Mamba speed and VRAM use in particular, since I have to use the mamba.py repo rather than the cuda-native/nvcc/original implementation). I was testing with closer to 50M parameter models but the one I'm training right now is 20.9M (D=256, N=14, 48 layers ... it's deep b/c mamba.py issues with scaling D&N). Despite the default in the script above, its max_seq_len is 2600 (each batch the sequence length is the longest game of the batch -- unless it's above this variable, in which case it/they get truncated to that length, randomly cutting off the beginning, the end, or some of both), so it'll be seeing the full game of almost every game in the (stockfish) dataset. It's currently about 239k games in, and playing 150 games against Stockfish0 it's playing 35% illegal moves and the longest game was 36 moves. Time will tell ... a fair bit of time :)
Happy to share everything else I have, though the above should be enough to get started if you or anyone wants to give it a shot.
Update (will use Twitter for further updates): At 1.2 million games seen, it's matching the transformer 25M model at 2 million games seen!