| import os |
| import time |
| import math |
| import pickle |
| from contextlib import nullcontext |
|
|
| import queue |
|
|
| import logging |
|
|
| import numpy as np |
| import torch |
| from torch.nn.parallel import DistributedDataParallel as DDP |
| from torch.distributed import init_process_group, destroy_process_group |
|
|
| from model import GPTConfig, GPT |
|
|
| |
| |
| |
| out_dir = 'out' |
| eval_interval = 2000 |
| log_interval = 1 |
| eval_iters = 200 |
| eval_only = False |
| always_save_checkpoint = True |
| init_from = 'scratch' |
| |
| wandb_log = False |
| wandb_project = 'owt' |
| wandb_run_name = 'gpt2' |
| |
| dataset = 'openwebtext' |
| gradient_accumulation_steps = 5 * 8 |
| batch_size = 12 |
| block_size = 1024 |
| |
| n_layer = 12 |
| n_head = 12 |
| n_embd = 768 |
| dropout = 0.0 |
| bias = False |
| |
| learning_rate = 6e-4 |
| max_iters = 600000 |
| weight_decay = 1e-1 |
| beta1 = 0.9 |
| beta2 = 0.95 |
| grad_clip = 1.0 |
| |
| decay_lr = True |
| warmup_iters = 2000 |
| lr_decay_iters = 600000 |
| min_lr = 6e-5 |
| |
| backend = 'nccl' |
| |
| device = 'cuda' |
| dtype = 'bfloat16' if torch.cuda.is_available() and torch.cuda.is_bf16_supported() else 'float16' |
| compile = True |
| |
| config_keys = [k for k,v in globals().items() if not k.startswith('_') and isinstance(v, (int, float, bool, str))] |
| exec(open('configurator.py').read()) |
| config = {k: globals()[k] for k in config_keys} |
| |
|
|
| logger = None |
| db_conn = None |
|
|
| logging.basicConfig( |
| level=logging.INFO, |
| format='%(asctime)s %(levelname)s: %(message)s', |
| handlers=[logging.StreamHandler()] |
| ) |
| logger = logging.getLogger("Train") |
|
|
| |
| ddp = int(os.environ.get('RANK', -1)) != -1 |
| if ddp: |
| init_process_group(backend=backend) |
| ddp_rank = int(os.environ['RANK']) |
| ddp_local_rank = int(os.environ['LOCAL_RANK']) |
| ddp_world_size = int(os.environ['WORLD_SIZE']) |
| device = f'cuda:{ddp_local_rank}' |
| torch.cuda.set_device(device) |
| master_process = ddp_rank == 0 |
| seed_offset = ddp_rank |
| |
| |
| assert gradient_accumulation_steps % ddp_world_size == 0 |
| gradient_accumulation_steps //= ddp_world_size |
| else: |
| |
| master_process = True |
| seed_offset = 0 |
| ddp_world_size = 1 |
| tokens_per_iter = gradient_accumulation_steps * ddp_world_size * batch_size * block_size |
| logger.info(f"tokens per iteration will be: {tokens_per_iter:,}") |
|
|
|
|
| if master_process: |
| os.makedirs(out_dir, exist_ok=True) |
| log_dir = "/home/350m_fineweb" |
| os.makedirs(log_dir, exist_ok=True) |
| log_file = os.path.join(log_dir, "training.log") |
|
|
| file_handler = logging.FileHandler(log_file) |
| file_handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s: %(message)s')) |
| logger.addHandler(file_handler) |
| |
| logger.info(f"Logging in Datei gestartet: {log_file}") |
|
|
| torch.manual_seed(1337 + seed_offset) |
| torch.backends.cuda.matmul.allow_tf32 = True |
| torch.backends.cudnn.allow_tf32 = True |
| device_type = 'cuda' if 'cuda' in device else 'cpu' |
| |
| ptdtype = {'float32': torch.float32, 'bfloat16': torch.bfloat16, 'float16': torch.float16}[dtype] |
| ctx = nullcontext() if device_type == 'cpu' else torch.amp.autocast(device_type=device_type, dtype=ptdtype) |
|
|
| |
|
|
| data_handles = { |
| split: { |
| name: np.memmap(os.path.join(path, f'{split}.bin'), dtype=np.uint16, mode='r') |
| for name, path in data_sources.items() |
| } |
| for split in ['train', 'val'] |
| } |
|
|
| def get_batch(split): |
| source = 'fineweb' |
| data = data_handles[split][source] |
| |
| ix = torch.randint(len(data) - block_size, (batch_size,)) |
| x = torch.stack([torch.from_numpy((data[i:i+block_size]).astype(np.int64)) for i in ix]) |
| y = torch.stack([torch.from_numpy((data[i+1:i+1+block_size]).astype(np.int64)) for i in ix]) |
| |
| if device_type == 'cuda': |
| |
| x, y = x.pin_memory().to(device, non_blocking=True), y.pin_memory().to(device, non_blocking=True) |
| else: |
| x, y = x.to(device), y.to(device) |
| return x, y |
|
|
| |
| iter_num = 0 |
| best_val_loss = 1e9 |
|
|
| |
| meta_path = os.path.join(data_sources['fineweb'], 'meta.pkl') |
| meta_vocab_size = None |
| if os.path.exists(meta_path): |
| with open(meta_path, 'rb') as f: |
| meta = pickle.load(f) |
| meta_vocab_size = meta['vocab_size'] |
| logger.info(f"found vocab_size = {meta_vocab_size} (inside {meta_path})") |
|
|
| |
| model_args = dict(n_layer=n_layer, n_head=n_head, n_embd=n_embd, block_size=block_size, |
| bias=bias, vocab_size=None, dropout=dropout) |
| if init_from == 'scratch': |
| |
| logger.info("Initializing a new model from scratch") |
| |
| if meta_vocab_size is None: |
| logger.info("defaulting to vocab_size of GPT-2 to 50304 (50257 rounded up for efficiency)") |
| model_args['vocab_size'] = meta_vocab_size if meta_vocab_size is not None else 50304 |
| gptconf = GPTConfig(**model_args) |
| model = GPT(gptconf) |
| elif init_from == 'resume': |
| logger.info(f"Resuming training from {out_dir}") |
| |
| ckpt_path = os.path.join(out_dir, sorted( |
| [f for f in os.listdir(out_dir) if f.startswith("ckpt_") and f.endswith(".pt")] |
| )[-1]) |
| checkpoint = torch.load(ckpt_path, map_location=device) |
| checkpoint_model_args = checkpoint['model_args'] |
| |
| |
| for k in ['n_layer', 'n_head', 'n_embd', 'block_size', 'bias', 'vocab_size']: |
| model_args[k] = checkpoint_model_args[k] |
| |
| gptconf = GPTConfig(**model_args) |
| model = GPT(gptconf) |
| state_dict = checkpoint['model'] |
| |
| |
| unwanted_prefix = '_orig_mod.' |
| for k,v in list(state_dict.items()): |
| if k.startswith(unwanted_prefix): |
| state_dict[k[len(unwanted_prefix):]] = state_dict.pop(k) |
| model.load_state_dict(state_dict) |
| iter_num = checkpoint['iter_num'] |
| best_val_loss = checkpoint['best_val_loss'] |
| elif init_from.startswith('gpt2'): |
| logger.info(f"Initializing from OpenAI GPT-2 weights: {init_from}") |
| |
| override_args = dict(dropout=dropout) |
| model = GPT.from_pretrained(init_from, override_args) |
| |
| for k in ['n_layer', 'n_head', 'n_embd', 'block_size', 'bias', 'vocab_size']: |
| model_args[k] = getattr(model.config, k) |
| |
| if block_size < model.config.block_size: |
| model.crop_block_size(block_size) |
| model_args['block_size'] = block_size |
| model.to(device) |
|
|
| |
| scaler = torch.cuda.amp.GradScaler(enabled=(dtype == 'float16')) |
|
|
| |
| optimizer = model.configure_optimizers(weight_decay, learning_rate, (beta1, beta2), device_type) |
| if init_from == 'resume': |
| optimizer.load_state_dict(checkpoint['optimizer']) |
| checkpoint = None |
|
|
| |
| if compile: |
| logger.info("compiling the model... (takes a ~minute)") |
| unoptimized_model = model |
| model = torch.compile(model) |
|
|
| |
| if ddp: |
| model = DDP(model, device_ids=[ddp_local_rank]) |
|
|
| |
| @torch.no_grad() |
| def estimate_loss(): |
| out = {} |
| model.eval() |
| for split in ['train', 'val']: |
| losses = torch.zeros(eval_iters) |
| for k in range(eval_iters): |
| X, Y = get_batch(split) |
| with ctx: |
| logits, loss = model(X, Y) |
| losses[k] = loss.item() |
| out[split] = losses.mean() |
| model.train() |
| return out |
|
|
| |
| def get_lr(it): |
| |
| if it < warmup_iters: |
| return learning_rate * (it + 1) / (warmup_iters + 1) |
| |
| if it > lr_decay_iters: |
| return min_lr |
| |
| decay_ratio = (it - warmup_iters) / (lr_decay_iters - warmup_iters) |
| assert 0 <= decay_ratio <= 1 |
| coeff = 0.5 * (1.0 + math.cos(math.pi * decay_ratio)) |
| return min_lr + coeff * (learning_rate - min_lr) |
|
|
| |
| if wandb_log and master_process: |
| import wandb |
| wandb.init(project=wandb_project, name=wandb_run_name, config=config) |
|
|
| |
| X, Y = get_batch('train') |
| t0 = time.time() |
| local_iter_num = 0 |
| raw_model = model.module if ddp else model |
| running_mfu = -1.0 |
| while True: |
|
|
| |
| lr = get_lr(iter_num) if decay_lr else learning_rate |
| for param_group in optimizer.param_groups: |
| param_group['lr'] = lr |
|
|
| |
| if iter_num % eval_interval == 0 and master_process: |
| losses = estimate_loss() |
| logger.info(f"step {iter_num}: train loss {losses['train']:.4f}, val loss {losses['val']:.4f}") |
| if wandb_log: |
| wandb.log({ |
| "iter": iter_num, |
| "train/loss": losses['train'], |
| "val/loss": losses['val'], |
| "lr": lr, |
| "mfu": running_mfu*100, |
| }) |
| if losses['val'] < best_val_loss or always_save_checkpoint: |
| best_val_loss = losses['val'] |
| if iter_num > 0: |
| checkpoint = { |
| 'model': raw_model.state_dict(), |
| 'optimizer': optimizer.state_dict(), |
| 'model_args': model_args, |
| 'iter_num': iter_num, |
| 'best_val_loss': best_val_loss, |
| 'config': config, |
| } |
| logger.info(f"💾 SAVING CHECKPOINT TO {out_dir}") |
| ckpt_name = f"ckpt_{iter_num:07d}.pt" |
| ckpt_path = os.path.join(out_dir, ckpt_name) |
| torch.save(checkpoint, ckpt_path) |
| if iter_num == 0 and eval_only: |
| break |
|
|
| |
| |
| for micro_step in range(gradient_accumulation_steps): |
| if ddp: |
| |
| |
| |
| |
| model.require_backward_grad_sync = (micro_step == gradient_accumulation_steps - 1) |
| with ctx: |
| logits, loss = model(X, Y) |
| loss = loss / gradient_accumulation_steps |
| |
| X, Y = get_batch('train') |
| |
| scaler.scale(loss).backward() |
| |
| if grad_clip != 0.0: |
| scaler.unscale_(optimizer) |
| torch.nn.utils.clip_grad_norm_(model.parameters(), grad_clip) |
| |
| scaler.step(optimizer) |
| scaler.update() |
| |
| optimizer.zero_grad(set_to_none=True) |
|
|
| |
| t1 = time.time() |
| dt = t1 - t0 |
| t0 = t1 |
| if iter_num % log_interval == 0 and master_process: |
| |
| |
| lossf = loss.item() * gradient_accumulation_steps |
| if local_iter_num >= 5: |
| mfu = raw_model.estimate_mfu(batch_size * gradient_accumulation_steps, dt) |
| running_mfu = mfu if running_mfu == -1.0 else 0.9*running_mfu + 0.1*mfu |
| |
| if logger: |
| log_msg = f"iter {iter_num}: loss {lossf:.4f}, time {dt*1000:.2f}ms, mfu {running_mfu*100:.2f}%" |
| logger.info(log_msg) |
|
|
|
|
| if iter_num % 100 == 0: |
|
|
| remaining_iters = max_iters - iter_num |
| est_seconds = remaining_iters * dt |
| days = int(est_seconds // 86400) |
| hours = int((est_seconds % 86400) // 3600) |
| minutes = int((est_seconds % 3600) // 60) |
|
|
| logger.info(f"⏳ ETA: Resttime ca. {days}d, {hours}h, {minutes}m until iteration {max_iters}") |
| logger.info("📝 LIVE-SAMPLE:") |
|
|
| model.eval() |
| |
| with torch.no_grad(): |
| import tiktoken |
| enc = tiktoken.get_encoding("gpt2") |
| |
| prompt = "Artificial Intelligence is " |
| start_ids = enc.encode(prompt, allowed_special={""}) |
| context = torch.tensor(start_ids, dtype=torch.long, device=device).unsqueeze(0) |
|
|
| generated_tokens = raw_model.generate(context, max_new_tokens=200)[0].tolist() |
| |
| valid_tokens = [t for t in generated_tokens if t < enc.n_vocab] |
| |
| try: |
| decoded_text = enc.decode(valid_tokens, errors='replace') |
| logger.info(f"\n{decoded_text}") |
| except Exception as e: |
| logger.error(f"Sampling-Fehler: {e}") |
| |
| model.train() |
| logger.info("-" * 50) |
| iter_num += 1 |
| local_iter_num += 1 |
|
|
| |
| if iter_num > max_iters: |
| break |
|
|
| if ddp: |
| destroy_process_group() |