Add files using upload-large-folder tool
Browse files- lr_sweep/pythia_1b_lr_2e-5/wandb/run-20260425_175439-8mll1jbb/files/code/code_completion_exp/train_pythia/train.py +606 -0
- lr_sweep/pythia_1b_lr_2e-5/wandb/run-20260425_175439-8mll1jbb/files/config.yaml +146 -0
- lr_sweep/pythia_1b_lr_2e-5/wandb/run-20260425_175439-8mll1jbb/files/output.log +87 -0
- lr_sweep/pythia_1b_lr_2e-5/wandb/run-20260425_175439-8mll1jbb/files/requirements.txt +245 -0
- lr_sweep/pythia_1b_lr_2e-5/wandb/run-20260425_175439-8mll1jbb/files/wandb-metadata.json +70 -0
- lr_sweep/pythia_1b_lr_2e-5/wandb/run-20260425_175439-8mll1jbb/files/wandb-summary.json +1 -0
- lr_sweep/pythia_1b_lr_2e-5/wandb/run-20260425_175439-8mll1jbb/logs/debug-core.log +16 -0
- lr_sweep/pythia_1b_lr_2e-5/wandb/run-20260425_175439-8mll1jbb/logs/debug-internal.log +13 -0
- lr_sweep/pythia_1b_lr_2e-5/wandb/run-20260425_184822-bhvwo83l/files/code/code_completion_exp/train_pythia/train.py +606 -0
- lr_sweep/pythia_1b_lr_2e-5/wandb/run-20260425_184822-bhvwo83l/files/config.yaml +146 -0
- lr_sweep/pythia_1b_lr_2e-5/wandb/run-20260425_184822-bhvwo83l/files/output.log +1056 -0
- lr_sweep/pythia_1b_lr_2e-5/wandb/run-20260425_184822-bhvwo83l/files/requirements.txt +245 -0
- lr_sweep/pythia_1b_lr_2e-5/wandb/run-20260425_184822-bhvwo83l/files/wandb-metadata.json +70 -0
- lr_sweep/pythia_1b_lr_2e-5/wandb/run-20260425_184822-bhvwo83l/files/wandb-summary.json +1 -0
- lr_sweep/pythia_1b_lr_2e-5/wandb/run-20260425_184822-bhvwo83l/logs/debug-core.log +16 -0
- lr_sweep/pythia_1b_lr_2e-5/wandb/run-20260425_184822-bhvwo83l/logs/debug-internal.log +13 -0
- lr_sweep/pythia_1b_lr_2e-5/wandb/run-20260425_184822-bhvwo83l/logs/debug.log +24 -0
- lr_sweep/pythia_1b_lr_5e-5/.hydra/config.yaml +49 -0
- lr_sweep/pythia_1b_lr_5e-5/.hydra/hydra.yaml +167 -0
- lr_sweep/pythia_1b_lr_5e-5/model_final.pt +3 -0
lr_sweep/pythia_1b_lr_2e-5/wandb/run-20260425_175439-8mll1jbb/files/code/code_completion_exp/train_pythia/train.py
ADDED
|
@@ -0,0 +1,606 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Training Pipeline для Pythia (decoder-only transformer) на задаче Code Completion.
|
| 3 |
+
|
| 4 |
+
Конфигурация через Hydra + OmegaConf, логирование в Trackio.
|
| 5 |
+
Поддержка DDP через Accelerate для multi-GPU тренировки.
|
| 6 |
+
|
| 7 |
+
Использование:
|
| 8 |
+
# Базовый запуск (single GPU)
|
| 9 |
+
python train.py
|
| 10 |
+
|
| 11 |
+
# Multi-GPU с Accelerate
|
| 12 |
+
accelerate launch train.py
|
| 13 |
+
|
| 14 |
+
# Multi-GPU с указанием количества GPU
|
| 15 |
+
accelerate launch --num_processes=4 train.py
|
| 16 |
+
|
| 17 |
+
# Переопределение параметров через CLI
|
| 18 |
+
python train.py training.lr=1e-4 training.epochs=5
|
| 19 |
+
|
| 20 |
+
# Выбор другого конфига модели
|
| 21 |
+
python train.py model=pythia_160m
|
| 22 |
+
|
| 23 |
+
# Multirun (sweep)
|
| 24 |
+
python train.py --multirun training.lr=1e-4,3e-4,1e-3
|
| 25 |
+
|
| 26 |
+
# Без логирования
|
| 27 |
+
python train.py tracking.enabled=false
|
| 28 |
+
"""
|
| 29 |
+
|
| 30 |
+
import os
|
| 31 |
+
import math
|
| 32 |
+
import time
|
| 33 |
+
from pathlib import Path
|
| 34 |
+
|
| 35 |
+
import torch
|
| 36 |
+
import torch.nn as nn
|
| 37 |
+
import torch.nn.functional as F
|
| 38 |
+
from torch.utils.data import DataLoader
|
| 39 |
+
from datasets import load_from_disk
|
| 40 |
+
|
| 41 |
+
import hydra
|
| 42 |
+
from hydra.core.hydra_config import HydraConfig
|
| 43 |
+
from omegaconf import DictConfig, OmegaConf
|
| 44 |
+
from transformers import (
|
| 45 |
+
AutoTokenizer,
|
| 46 |
+
AutoModelForCausalLM,
|
| 47 |
+
AutoConfig,
|
| 48 |
+
PreTrainedTokenizerBase,
|
| 49 |
+
)
|
| 50 |
+
from accelerate import Accelerator
|
| 51 |
+
from accelerate.utils import set_seed as accelerate_set_seed
|
| 52 |
+
|
| 53 |
+
# Ensure repo root is on sys.path (needed when running from subdirectory)
|
| 54 |
+
import sys
|
| 55 |
+
sys.path.insert(0, str(Path(__file__).resolve().parents[2]))
|
| 56 |
+
|
| 57 |
+
# Shared training library
|
| 58 |
+
from training_lib.utils import AverageMeter, log_message
|
| 59 |
+
from training_lib.checkpointing import save_checkpoint, load_checkpoint
|
| 60 |
+
from training_lib.schedulers import get_lr_scheduler
|
| 61 |
+
from training_lib.tracking import init_tracking, log_metrics, finish_tracking
|
| 62 |
+
from training_lib.validation import run_validation
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
# ============================================================================
|
| 66 |
+
# ДАННЫЕ
|
| 67 |
+
# ============================================================================
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
class CodeCompletionCollator:
|
| 71 |
+
"""Collate function для батчирования примеров code completion."""
|
| 72 |
+
|
| 73 |
+
def __init__(
|
| 74 |
+
self,
|
| 75 |
+
tokenizer: PreTrainedTokenizerBase,
|
| 76 |
+
max_context_len: int = 1024,
|
| 77 |
+
max_target_len: int = 256,
|
| 78 |
+
):
|
| 79 |
+
self.tokenizer = tokenizer
|
| 80 |
+
self.max_context_len = max_context_len
|
| 81 |
+
self.max_target_len = max_target_len
|
| 82 |
+
self.pad_token_id = tokenizer.pad_token_id
|
| 83 |
+
|
| 84 |
+
def __call__(self, batch: list[dict]) -> dict:
|
| 85 |
+
contexts = [item["context"] for item in batch]
|
| 86 |
+
targets = [item["target"] for item in batch]
|
| 87 |
+
|
| 88 |
+
encoded_contexts = self.tokenizer(
|
| 89 |
+
contexts,
|
| 90 |
+
add_special_tokens=True,
|
| 91 |
+
truncation=True,
|
| 92 |
+
max_length=self.max_context_len,
|
| 93 |
+
return_tensors=None,
|
| 94 |
+
)
|
| 95 |
+
encoded_targets = self.tokenizer(
|
| 96 |
+
targets,
|
| 97 |
+
add_special_tokens=False,
|
| 98 |
+
truncation=True,
|
| 99 |
+
max_length=self.max_target_len,
|
| 100 |
+
return_tensors=None,
|
| 101 |
+
)
|
| 102 |
+
|
| 103 |
+
input_ids_list = []
|
| 104 |
+
context_lengths = []
|
| 105 |
+
|
| 106 |
+
for ctx_ids, tgt_ids in zip(
|
| 107 |
+
encoded_contexts["input_ids"], encoded_targets["input_ids"]
|
| 108 |
+
):
|
| 109 |
+
tgt_ids = tgt_ids + [self.tokenizer.eos_token_id]
|
| 110 |
+
context_lengths.append(len(ctx_ids))
|
| 111 |
+
input_ids_list.append(ctx_ids + tgt_ids)
|
| 112 |
+
|
| 113 |
+
max_len = max(len(ids) for ids in input_ids_list)
|
| 114 |
+
|
| 115 |
+
padded_input_ids = []
|
| 116 |
+
attention_mask = []
|
| 117 |
+
|
| 118 |
+
for ids in input_ids_list:
|
| 119 |
+
padding_len = max_len - len(ids)
|
| 120 |
+
padded_input_ids.append(ids + [self.pad_token_id] * padding_len)
|
| 121 |
+
attention_mask.append([1] * len(ids) + [0] * padding_len)
|
| 122 |
+
|
| 123 |
+
return {
|
| 124 |
+
"input_ids": torch.tensor(padded_input_ids, dtype=torch.long),
|
| 125 |
+
"attention_mask": torch.tensor(attention_mask, dtype=torch.long),
|
| 126 |
+
"context_lengths": torch.tensor(context_lengths, dtype=torch.long),
|
| 127 |
+
}
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
def create_dataloaders(
|
| 131 |
+
cfg: DictConfig, tokenizer: PreTrainedTokenizerBase
|
| 132 |
+
) -> dict[str, DataLoader]:
|
| 133 |
+
"""Создание DataLoader'ов для train и validation."""
|
| 134 |
+
dataset_dict = load_from_disk(cfg.data.path)
|
| 135 |
+
|
| 136 |
+
collator = CodeCompletionCollator(
|
| 137 |
+
tokenizer=tokenizer,
|
| 138 |
+
max_context_len=cfg.data.max_context_len,
|
| 139 |
+
max_target_len=cfg.data.max_target_len,
|
| 140 |
+
)
|
| 141 |
+
|
| 142 |
+
dataloaders = {}
|
| 143 |
+
|
| 144 |
+
if "train" in dataset_dict:
|
| 145 |
+
train_dataset = dataset_dict["train"]
|
| 146 |
+
max_train = cfg.data.get("max_train_samples", None)
|
| 147 |
+
if max_train is not None:
|
| 148 |
+
train_dataset = train_dataset.select(range(min(max_train, len(train_dataset))))
|
| 149 |
+
dataloaders["train"] = DataLoader(
|
| 150 |
+
train_dataset,
|
| 151 |
+
batch_size=cfg.training.batch_size,
|
| 152 |
+
shuffle=True,
|
| 153 |
+
collate_fn=collator,
|
| 154 |
+
num_workers=cfg.data.num_workers,
|
| 155 |
+
pin_memory=cfg.data.pin_memory,
|
| 156 |
+
)
|
| 157 |
+
|
| 158 |
+
if "validation" in dataset_dict:
|
| 159 |
+
val_dataset = dataset_dict["validation"]
|
| 160 |
+
max_val = cfg.data.get("max_val_samples", None)
|
| 161 |
+
if max_val is not None:
|
| 162 |
+
val_dataset = val_dataset.select(range(min(max_val, len(val_dataset))))
|
| 163 |
+
eval_batch_size = cfg.training.get("eval_batch_size", cfg.training.batch_size)
|
| 164 |
+
dataloaders["validation"] = DataLoader(
|
| 165 |
+
val_dataset,
|
| 166 |
+
batch_size=eval_batch_size,
|
| 167 |
+
shuffle=False,
|
| 168 |
+
collate_fn=collator,
|
| 169 |
+
num_workers=cfg.data.num_workers,
|
| 170 |
+
pin_memory=cfg.data.pin_memory,
|
| 171 |
+
)
|
| 172 |
+
|
| 173 |
+
return dataloaders
|
| 174 |
+
|
| 175 |
+
|
| 176 |
+
|
| 177 |
+
|
| 178 |
+
# ============================================================================
|
| 179 |
+
# LOSS ФУНКЦИИ
|
| 180 |
+
# ============================================================================
|
| 181 |
+
|
| 182 |
+
|
| 183 |
+
def compute_loss(
|
| 184 |
+
logits: torch.Tensor,
|
| 185 |
+
input_ids: torch.Tensor,
|
| 186 |
+
context_lengths: torch.Tensor,
|
| 187 |
+
attention_mask: torch.Tensor,
|
| 188 |
+
) -> dict:
|
| 189 |
+
"""Вычисление loss для авторегрессионной модели."""
|
| 190 |
+
batch_size, seq_len, vocab_size = logits.shape
|
| 191 |
+
|
| 192 |
+
shift_logits = logits[:, :-1, :].contiguous()
|
| 193 |
+
shift_labels = input_ids[:, 1:].contiguous()
|
| 194 |
+
shift_mask = attention_mask[:, 1:].contiguous()
|
| 195 |
+
|
| 196 |
+
target_mask = torch.zeros_like(shift_labels, dtype=torch.bool)
|
| 197 |
+
for i in range(batch_size):
|
| 198 |
+
ctx_len = context_lengths[i].item()
|
| 199 |
+
target_mask[i, ctx_len - 1 :] = True
|
| 200 |
+
|
| 201 |
+
final_mask = target_mask & shift_mask.bool()
|
| 202 |
+
|
| 203 |
+
if final_mask.sum() > 0:
|
| 204 |
+
loss = F.cross_entropy(
|
| 205 |
+
shift_logits[final_mask], shift_labels[final_mask], reduction="mean"
|
| 206 |
+
)
|
| 207 |
+
else:
|
| 208 |
+
loss = torch.tensor(0.0, device=logits.device)
|
| 209 |
+
|
| 210 |
+
return {"loss": loss}
|
| 211 |
+
|
| 212 |
+
|
| 213 |
+
def _pythia_forward_loss(
|
| 214 |
+
model: nn.Module,
|
| 215 |
+
batch: dict,
|
| 216 |
+
cfg: DictConfig,
|
| 217 |
+
accelerator: Accelerator,
|
| 218 |
+
) -> dict:
|
| 219 |
+
"""Forward + loss for a plain HF causal LM (attention_mask= kwarg, .logits)."""
|
| 220 |
+
input_ids = batch["input_ids"]
|
| 221 |
+
attention_mask = batch["attention_mask"]
|
| 222 |
+
context_lengths = batch["context_lengths"]
|
| 223 |
+
output = model(input_ids, attention_mask=attention_mask)
|
| 224 |
+
return compute_loss(output.logits, input_ids, context_lengths, attention_mask)
|
| 225 |
+
|
| 226 |
+
|
| 227 |
+
# ============================================================================
|
| 228 |
+
# PARAMETER GROUPING
|
| 229 |
+
# ============================================================================
|
| 230 |
+
|
| 231 |
+
|
| 232 |
+
def group_params(model: nn.Module, weight_decay: float) -> list[dict]:
|
| 233 |
+
"""Группировка параметров для optimizer."""
|
| 234 |
+
decay_params = []
|
| 235 |
+
no_decay_params = []
|
| 236 |
+
|
| 237 |
+
for name, param in model.named_parameters():
|
| 238 |
+
if not param.requires_grad:
|
| 239 |
+
continue
|
| 240 |
+
|
| 241 |
+
if "bias" in name or "LayerNorm" in name or "layernorm" in name:
|
| 242 |
+
no_decay_params.append(param)
|
| 243 |
+
else:
|
| 244 |
+
decay_params.append(param)
|
| 245 |
+
|
| 246 |
+
return [
|
| 247 |
+
{"params": decay_params, "weight_decay": weight_decay},
|
| 248 |
+
{"params": no_decay_params, "weight_decay": 0.0},
|
| 249 |
+
]
|
| 250 |
+
|
| 251 |
+
|
| 252 |
+
|
| 253 |
+
|
| 254 |
+
# ============================================================================
|
| 255 |
+
# TRAINING LOOP
|
| 256 |
+
# ============================================================================
|
| 257 |
+
|
| 258 |
+
|
| 259 |
+
def train_epoch(
|
| 260 |
+
model: nn.Module,
|
| 261 |
+
dataloader: DataLoader,
|
| 262 |
+
optimizer: torch.optim.Optimizer,
|
| 263 |
+
scheduler,
|
| 264 |
+
cfg: DictConfig,
|
| 265 |
+
epoch: int,
|
| 266 |
+
global_step: int,
|
| 267 |
+
accelerator: Accelerator,
|
| 268 |
+
val_dataloader: DataLoader | None = None,
|
| 269 |
+
best_val_loss: float = float("inf"),
|
| 270 |
+
) -> tuple[int, float]:
|
| 271 |
+
"""Один epoch тренировки. Возвращает (global_step, best_val_loss)."""
|
| 272 |
+
model.train()
|
| 273 |
+
|
| 274 |
+
loss_meter = AverageMeter()
|
| 275 |
+
|
| 276 |
+
optimizer.zero_grad()
|
| 277 |
+
accumulated_loss = 0.0
|
| 278 |
+
accumulated_steps = 0
|
| 279 |
+
|
| 280 |
+
epoch_start_time = time.time()
|
| 281 |
+
step_start_time = time.time()
|
| 282 |
+
|
| 283 |
+
for batch_idx, batch in enumerate(dataloader):
|
| 284 |
+
input_ids = batch["input_ids"]
|
| 285 |
+
attention_mask = batch["attention_mask"]
|
| 286 |
+
context_lengths = batch["context_lengths"]
|
| 287 |
+
|
| 288 |
+
with accelerator.autocast():
|
| 289 |
+
output = model(input_ids, attention_mask=attention_mask)
|
| 290 |
+
logits = output.logits
|
| 291 |
+
loss_dict = compute_loss(
|
| 292 |
+
logits, input_ids, context_lengths, attention_mask
|
| 293 |
+
)
|
| 294 |
+
|
| 295 |
+
loss = loss_dict["loss"] / cfg.training.gradient_accumulation_steps
|
| 296 |
+
accelerator.backward(loss)
|
| 297 |
+
|
| 298 |
+
accumulated_loss += loss_dict["loss"].item()
|
| 299 |
+
accumulated_steps += 1
|
| 300 |
+
|
| 301 |
+
if accumulated_steps == cfg.training.gradient_accumulation_steps:
|
| 302 |
+
if cfg.training.max_grad_norm > 0:
|
| 303 |
+
accelerator.clip_grad_norm_(
|
| 304 |
+
model.parameters(), cfg.training.max_grad_norm
|
| 305 |
+
)
|
| 306 |
+
|
| 307 |
+
optimizer.step()
|
| 308 |
+
scheduler.step()
|
| 309 |
+
optimizer.zero_grad()
|
| 310 |
+
|
| 311 |
+
avg_loss = accumulated_loss / cfg.training.gradient_accumulation_steps
|
| 312 |
+
loss_meter.update(avg_loss)
|
| 313 |
+
|
| 314 |
+
global_step += 1
|
| 315 |
+
|
| 316 |
+
if global_step % cfg.logging.log_interval == 0:
|
| 317 |
+
step_time = time.time() - step_start_time
|
| 318 |
+
current_lr = scheduler.get_last_lr()[0]
|
| 319 |
+
|
| 320 |
+
metrics = {
|
| 321 |
+
"train/loss": loss_meter.val,
|
| 322 |
+
"train/loss_avg": loss_meter.avg,
|
| 323 |
+
"train/lr": current_lr,
|
| 324 |
+
"train/epoch": epoch,
|
| 325 |
+
"train/step_time": step_time / cfg.logging.log_interval,
|
| 326 |
+
}
|
| 327 |
+
|
| 328 |
+
log_metrics(metrics, step=global_step)
|
| 329 |
+
|
| 330 |
+
log_message(
|
| 331 |
+
f"Epoch {epoch} | Step {global_step} | "
|
| 332 |
+
f"Loss: {loss_meter.avg:.4f} | "
|
| 333 |
+
f"LR: {current_lr:.2e}",
|
| 334 |
+
cfg,
|
| 335 |
+
accelerator,
|
| 336 |
+
)
|
| 337 |
+
|
| 338 |
+
step_start_time = time.time()
|
| 339 |
+
|
| 340 |
+
if (
|
| 341 |
+
cfg.logging.save_interval > 0
|
| 342 |
+
and global_step % cfg.logging.save_interval == 0
|
| 343 |
+
):
|
| 344 |
+
save_checkpoint(
|
| 345 |
+
model, optimizer, scheduler, global_step, epoch, cfg, accelerator
|
| 346 |
+
)
|
| 347 |
+
|
| 348 |
+
eval_interval = cfg.logging.get("eval_interval", 0)
|
| 349 |
+
if (
|
| 350 |
+
eval_interval > 0
|
| 351 |
+
and val_dataloader is not None
|
| 352 |
+
and global_step % eval_interval == 0
|
| 353 |
+
):
|
| 354 |
+
val_metrics = run_validation(
|
| 355 |
+
model=model,
|
| 356 |
+
dataloader=val_dataloader,
|
| 357 |
+
cfg=cfg,
|
| 358 |
+
global_step=global_step,
|
| 359 |
+
accelerator=accelerator,
|
| 360 |
+
forward_loss_fn=_pythia_forward_loss,
|
| 361 |
+
)
|
| 362 |
+
|
| 363 |
+
if val_metrics["val/loss"] < best_val_loss:
|
| 364 |
+
best_val_loss = val_metrics["val/loss"]
|
| 365 |
+
if accelerator.is_main_process:
|
| 366 |
+
best_model_path = Path(cfg.paths.output_dir) / "model_best.pt"
|
| 367 |
+
unwrapped_model = accelerator.unwrap_model(model)
|
| 368 |
+
torch.save(unwrapped_model.state_dict(), best_model_path)
|
| 369 |
+
log_message(
|
| 370 |
+
f"New best model saved! Val loss: {best_val_loss:.4f}",
|
| 371 |
+
cfg,
|
| 372 |
+
accelerator
|
| 373 |
+
)
|
| 374 |
+
|
| 375 |
+
log_metrics(
|
| 376 |
+
{
|
| 377 |
+
"best/val_loss": best_val_loss,
|
| 378 |
+
"best/val_perplexity": val_metrics["val/perplexity"],
|
| 379 |
+
"best/step": global_step,
|
| 380 |
+
},
|
| 381 |
+
step=global_step,
|
| 382 |
+
)
|
| 383 |
+
|
| 384 |
+
model.train()
|
| 385 |
+
|
| 386 |
+
accumulated_loss = 0.0
|
| 387 |
+
accumulated_steps = 0
|
| 388 |
+
|
| 389 |
+
epoch_time = time.time() - epoch_start_time
|
| 390 |
+
|
| 391 |
+
log_message(
|
| 392 |
+
f"Epoch {epoch} completed in {epoch_time:.2f}s | "
|
| 393 |
+
f"Loss: {loss_meter.avg:.4f}",
|
| 394 |
+
cfg,
|
| 395 |
+
accelerator,
|
| 396 |
+
)
|
| 397 |
+
|
| 398 |
+
log_metrics({
|
| 399 |
+
"epoch/loss": loss_meter.avg,
|
| 400 |
+
"epoch/time": epoch_time,
|
| 401 |
+
})
|
| 402 |
+
|
| 403 |
+
return global_step, best_val_loss
|
| 404 |
+
|
| 405 |
+
|
| 406 |
+
# ============================================================================
|
| 407 |
+
# MAIN
|
| 408 |
+
# ============================================================================
|
| 409 |
+
|
| 410 |
+
|
| 411 |
+
@hydra.main(version_base=None, config_path="configs", config_name="config")
|
| 412 |
+
def main(cfg: DictConfig):
|
| 413 |
+
"""Главная функция тренировки с поддержкой DDP через Accelerate."""
|
| 414 |
+
|
| 415 |
+
# === Performance: Enable TF32 for faster matmuls on Ampere+ GPUs ===
|
| 416 |
+
torch.set_float32_matmul_precision('high')
|
| 417 |
+
|
| 418 |
+
# === Accelerator Setup ===
|
| 419 |
+
mixed_precision = "bf16" if cfg.training.use_amp else "no"
|
| 420 |
+
|
| 421 |
+
accelerator = Accelerator(
|
| 422 |
+
mixed_precision=mixed_precision,
|
| 423 |
+
gradient_accumulation_steps=cfg.training.gradient_accumulation_steps,
|
| 424 |
+
)
|
| 425 |
+
|
| 426 |
+
# === Setup ===
|
| 427 |
+
accelerate_set_seed(cfg.seed)
|
| 428 |
+
|
| 429 |
+
if cfg.paths.output_dir is None:
|
| 430 |
+
cfg.paths.output_dir = HydraConfig.get().runtime.output_dir
|
| 431 |
+
|
| 432 |
+
OmegaConf.resolve(cfg)
|
| 433 |
+
|
| 434 |
+
log_message(f"CUDA_VISIBLE_DEVICES: {os.environ.get('CUDA_VISIBLE_DEVICES', 'not set')}", cfg, accelerator)
|
| 435 |
+
log_message(f"Number of processes: {accelerator.num_processes}", cfg, accelerator)
|
| 436 |
+
log_message(f"Process index: {accelerator.process_index}", cfg, accelerator)
|
| 437 |
+
log_message(f"Mixed precision: {mixed_precision}", cfg, accelerator)
|
| 438 |
+
|
| 439 |
+
log_message("=" * 60, cfg, accelerator)
|
| 440 |
+
log_message("Pythia Training Pipeline (Hydra + Trackio + Accelerate)", cfg, accelerator)
|
| 441 |
+
log_message("=" * 60, cfg, accelerator)
|
| 442 |
+
log_message(f"Config:\n{OmegaConf.to_yaml(cfg)}", cfg, accelerator)
|
| 443 |
+
|
| 444 |
+
# === Trackio Init ===
|
| 445 |
+
init_tracking(cfg, accelerator)
|
| 446 |
+
|
| 447 |
+
# === Tokenizer ===
|
| 448 |
+
log_message("Initializing tokenizer...", cfg, accelerator)
|
| 449 |
+
tokenizer = AutoTokenizer.from_pretrained(cfg.model.name)
|
| 450 |
+
|
| 451 |
+
if tokenizer.pad_token is None:
|
| 452 |
+
tokenizer.pad_token = tokenizer.eos_token
|
| 453 |
+
tokenizer.pad_token_id = tokenizer.eos_token_id
|
| 454 |
+
|
| 455 |
+
# === Model ===
|
| 456 |
+
log_message("Loading model...", cfg, accelerator)
|
| 457 |
+
|
| 458 |
+
# Flash Attention 2
|
| 459 |
+
torch_dtype = torch.bfloat16 if cfg.training.use_amp else torch.float32
|
| 460 |
+
|
| 461 |
+
if cfg.model.checkpoint_path:
|
| 462 |
+
model = AutoModelForCausalLM.from_pretrained(
|
| 463 |
+
cfg.model.name,
|
| 464 |
+
attn_implementation="flash_attention_2",
|
| 465 |
+
torch_dtype=torch_dtype,
|
| 466 |
+
)
|
| 467 |
+
checkpoint = torch.load(cfg.model.checkpoint_path, map_location="cpu")
|
| 468 |
+
model.load_state_dict(checkpoint["model_state_dict"] if "model_state_dict" in checkpoint else checkpoint)
|
| 469 |
+
log_message(f"Loaded checkpoint: {cfg.model.checkpoint_path}", cfg, accelerator)
|
| 470 |
+
elif cfg.model.from_scratch:
|
| 471 |
+
config = AutoConfig.from_pretrained(cfg.model.name)
|
| 472 |
+
config._attn_implementation = "flash_attention_2"
|
| 473 |
+
model = AutoModelForCausalLM.from_config(config, torch_dtype=torch_dtype)
|
| 474 |
+
log_message(f"Initialized from scratch: {cfg.model.name}", cfg, accelerator)
|
| 475 |
+
else:
|
| 476 |
+
model = AutoModelForCausalLM.from_pretrained(
|
| 477 |
+
cfg.model.name,
|
| 478 |
+
attn_implementation="flash_attention_2",
|
| 479 |
+
torch_dtype=torch_dtype,
|
| 480 |
+
)
|
| 481 |
+
log_message(f"Loaded pretrained: {cfg.model.name}", cfg, accelerator)
|
| 482 |
+
|
| 483 |
+
model.train()
|
| 484 |
+
|
| 485 |
+
# Log model info
|
| 486 |
+
total_params = sum(p.numel() for p in model.parameters())
|
| 487 |
+
trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
|
| 488 |
+
log_message(f"Total params: {total_params:,}", cfg, accelerator)
|
| 489 |
+
log_message(f"Trainable params: {trainable_params:,}", cfg, accelerator)
|
| 490 |
+
|
| 491 |
+
# === Data ===
|
| 492 |
+
log_message("Creating dataloaders...", cfg, accelerator)
|
| 493 |
+
dataloaders = create_dataloaders(cfg, tokenizer)
|
| 494 |
+
|
| 495 |
+
train_dataloader = dataloaders["train"]
|
| 496 |
+
val_dataloader = dataloaders.get("validation", None)
|
| 497 |
+
|
| 498 |
+
log_message(f"Train dataset size: {len(train_dataloader.dataset)}", cfg, accelerator)
|
| 499 |
+
log_message(f"Train batches per epoch (before DDP split): {len(train_dataloader)}", cfg, accelerator)
|
| 500 |
+
|
| 501 |
+
if val_dataloader:
|
| 502 |
+
log_message(f"Validation dataset size: {len(val_dataloader.dataset)}", cfg, accelerator)
|
| 503 |
+
log_message(f"Validation batches: {len(val_dataloader)}", cfg, accelerator)
|
| 504 |
+
else:
|
| 505 |
+
log_message("No validation dataset found", cfg, accelerator)
|
| 506 |
+
|
| 507 |
+
# === Optimizer ===
|
| 508 |
+
log_message("Creating optimizer...", cfg, accelerator)
|
| 509 |
+
param_groups = group_params(model, cfg.training.weight_decay)
|
| 510 |
+
|
| 511 |
+
optimizer = torch.optim.AdamW(
|
| 512 |
+
param_groups,
|
| 513 |
+
lr=cfg.training.lr,
|
| 514 |
+
betas=tuple(cfg.training.betas),
|
| 515 |
+
eps=cfg.training.eps,
|
| 516 |
+
)
|
| 517 |
+
|
| 518 |
+
# === Scheduler ===
|
| 519 |
+
steps_per_epoch = math.ceil(
|
| 520 |
+
len(train_dataloader) / accelerator.num_processes
|
| 521 |
+
)
|
| 522 |
+
total_steps = (
|
| 523 |
+
cfg.training.epochs
|
| 524 |
+
* steps_per_epoch
|
| 525 |
+
// cfg.training.gradient_accumulation_steps
|
| 526 |
+
)
|
| 527 |
+
scheduler = get_lr_scheduler(optimizer, cfg, total_steps)
|
| 528 |
+
|
| 529 |
+
log_message(
|
| 530 |
+
f"Total steps: {total_steps}, Steps per epoch: {steps_per_epoch}",
|
| 531 |
+
cfg,
|
| 532 |
+
accelerator
|
| 533 |
+
)
|
| 534 |
+
|
| 535 |
+
# === Accelerate Prepare ===
|
| 536 |
+
log_message("Preparing model, optimizer, and dataloaders with Accelerate...", cfg, accelerator)
|
| 537 |
+
|
| 538 |
+
if val_dataloader is not None:
|
| 539 |
+
model, optimizer, train_dataloader, val_dataloader, scheduler = accelerator.prepare(
|
| 540 |
+
model, optimizer, train_dataloader, val_dataloader, scheduler
|
| 541 |
+
)
|
| 542 |
+
else:
|
| 543 |
+
model, optimizer, train_dataloader, scheduler = accelerator.prepare(
|
| 544 |
+
model, optimizer, train_dataloader, scheduler
|
| 545 |
+
)
|
| 546 |
+
|
| 547 |
+
log_message(f"Train batches per epoch (after DDP split): {len(train_dataloader)}", cfg, accelerator)
|
| 548 |
+
|
| 549 |
+
# === Resume ===
|
| 550 |
+
global_step = 0
|
| 551 |
+
start_epoch = 1
|
| 552 |
+
|
| 553 |
+
if cfg.training.resume and cfg.training.resume_checkpoint:
|
| 554 |
+
global_step, start_epoch = load_checkpoint(
|
| 555 |
+
model, optimizer, scheduler, cfg.training.resume_checkpoint, cfg, accelerator
|
| 556 |
+
)
|
| 557 |
+
start_epoch += 1
|
| 558 |
+
|
| 559 |
+
# === Training Loop ===
|
| 560 |
+
log_message("Starting training...", cfg, accelerator)
|
| 561 |
+
|
| 562 |
+
best_val_loss = float("inf")
|
| 563 |
+
|
| 564 |
+
try:
|
| 565 |
+
for epoch in range(start_epoch, cfg.training.epochs + 1):
|
| 566 |
+
log_message(f"\n{'=' * 60}", cfg, accelerator)
|
| 567 |
+
log_message(f"EPOCH {epoch}/{cfg.training.epochs}", cfg, accelerator)
|
| 568 |
+
log_message(f"{'=' * 60}", cfg, accelerator)
|
| 569 |
+
|
| 570 |
+
global_step, best_val_loss = train_epoch(
|
| 571 |
+
model=model,
|
| 572 |
+
dataloader=train_dataloader,
|
| 573 |
+
optimizer=optimizer,
|
| 574 |
+
scheduler=scheduler,
|
| 575 |
+
cfg=cfg,
|
| 576 |
+
epoch=epoch,
|
| 577 |
+
global_step=global_step,
|
| 578 |
+
accelerator=accelerator,
|
| 579 |
+
val_dataloader=val_dataloader,
|
| 580 |
+
best_val_loss=best_val_loss,
|
| 581 |
+
)
|
| 582 |
+
|
| 583 |
+
if cfg.logging.save_every_epoch:
|
| 584 |
+
save_checkpoint(
|
| 585 |
+
model, optimizer, scheduler, global_step, epoch, cfg, accelerator
|
| 586 |
+
)
|
| 587 |
+
|
| 588 |
+
except KeyboardInterrupt:
|
| 589 |
+
log_message("Training interrupted by user", cfg, accelerator)
|
| 590 |
+
save_checkpoint(model, optimizer, scheduler, global_step, epoch, cfg, accelerator)
|
| 591 |
+
|
| 592 |
+
# === Final Save ===
|
| 593 |
+
log_message("\nTraining completed!", cfg, accelerator)
|
| 594 |
+
|
| 595 |
+
if accelerator.is_main_process:
|
| 596 |
+
final_model_path = Path(cfg.paths.output_dir) / "model_final.pt"
|
| 597 |
+
unwrapped_model = accelerator.unwrap_model(model)
|
| 598 |
+
torch.save(unwrapped_model.state_dict(), final_model_path)
|
| 599 |
+
log_message(f"Final model: {final_model_path}", cfg, accelerator)
|
| 600 |
+
|
| 601 |
+
accelerator.wait_for_everyone()
|
| 602 |
+
finish_tracking()
|
| 603 |
+
|
| 604 |
+
|
| 605 |
+
if __name__ == "__main__":
|
| 606 |
+
main()
|
lr_sweep/pythia_1b_lr_2e-5/wandb/run-20260425_175439-8mll1jbb/files/config.yaml
ADDED
|
@@ -0,0 +1,146 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
_wandb:
|
| 2 |
+
value:
|
| 3 |
+
cli_version: 0.24.0
|
| 4 |
+
code_path: code/code_completion_exp/train_pythia/train.py
|
| 5 |
+
e:
|
| 6 |
+
5e04zafr99ci3kv9t06h2zc7nrpy0bwg:
|
| 7 |
+
args:
|
| 8 |
+
- tracking=wandb
|
| 9 |
+
- tracking.project=code-completion_lr-sweep
|
| 10 |
+
- tracking.run_name=pythia_1b_lr_2e-5
|
| 11 |
+
- training.lr=2e-5
|
| 12 |
+
- paths.output_dir=/workspace/byte-llms-code/outputs/lr_sweep/pythia_1b_lr_2e-5
|
| 13 |
+
- model=pythia_1b
|
| 14 |
+
- data.path=/workspace/byte-llms-code/code_completion_exp/datasets/data_V4_full
|
| 15 |
+
codePath: code_completion_exp/train_pythia/train.py
|
| 16 |
+
codePathLocal: train.py
|
| 17 |
+
cpu_count: 64
|
| 18 |
+
cpu_count_logical: 128
|
| 19 |
+
cudaVersion: "12.2"
|
| 20 |
+
disk:
|
| 21 |
+
/:
|
| 22 |
+
total: "265214230528"
|
| 23 |
+
used: "56100974592"
|
| 24 |
+
email: nikita@local.ru
|
| 25 |
+
executable: /venv/bytellm/bin/python
|
| 26 |
+
git:
|
| 27 |
+
commit: f111e13281aa0dc58e24302edab5b0d5c2024586
|
| 28 |
+
remote: https://github.com/naryst/byte-llms-code.git
|
| 29 |
+
gpu: NVIDIA H100 80GB HBM3
|
| 30 |
+
gpu_count: 4
|
| 31 |
+
gpu_nvidia:
|
| 32 |
+
- architecture: Hopper
|
| 33 |
+
cudaCores: 16896
|
| 34 |
+
memoryTotal: "85520809984"
|
| 35 |
+
name: NVIDIA H100 80GB HBM3
|
| 36 |
+
uuid: GPU-b60cdcab-2033-2009-41de-be646c953a20
|
| 37 |
+
- architecture: Hopper
|
| 38 |
+
cudaCores: 16896
|
| 39 |
+
memoryTotal: "85520809984"
|
| 40 |
+
name: NVIDIA H100 80GB HBM3
|
| 41 |
+
uuid: GPU-9982b420-4520-4238-c378-ec5a46015474
|
| 42 |
+
- architecture: Hopper
|
| 43 |
+
cudaCores: 16896
|
| 44 |
+
memoryTotal: "85520809984"
|
| 45 |
+
name: NVIDIA H100 80GB HBM3
|
| 46 |
+
uuid: GPU-e26ebaac-aaa6-3eed-17ab-a3dce303a76f
|
| 47 |
+
- architecture: Hopper
|
| 48 |
+
cudaCores: 16896
|
| 49 |
+
memoryTotal: "85520809984"
|
| 50 |
+
name: NVIDIA H100 80GB HBM3
|
| 51 |
+
uuid: GPU-9dfc6dba-0be6-4a10-1027-336cc0e65134
|
| 52 |
+
host: 7504e518d24a
|
| 53 |
+
memory:
|
| 54 |
+
total: "1081679683584"
|
| 55 |
+
os: Linux-5.4.0-176-generic-x86_64-with-glibc2.35
|
| 56 |
+
program: /workspace/byte-llms-code/code_completion_exp/train_pythia/train.py
|
| 57 |
+
python: CPython 3.12.0
|
| 58 |
+
root: /workspace/byte-llms-code/outputs/lr_sweep/pythia_1b_lr_2e-5
|
| 59 |
+
startedAt: "2026-04-25T17:54:39.294048Z"
|
| 60 |
+
writerId: 5e04zafr99ci3kv9t06h2zc7nrpy0bwg
|
| 61 |
+
m: []
|
| 62 |
+
python_version: 3.12.0
|
| 63 |
+
t:
|
| 64 |
+
"1":
|
| 65 |
+
- 1
|
| 66 |
+
- 11
|
| 67 |
+
- 49
|
| 68 |
+
- 50
|
| 69 |
+
- 51
|
| 70 |
+
- 71
|
| 71 |
+
- 105
|
| 72 |
+
"2":
|
| 73 |
+
- 1
|
| 74 |
+
- 11
|
| 75 |
+
- 49
|
| 76 |
+
- 50
|
| 77 |
+
- 51
|
| 78 |
+
- 71
|
| 79 |
+
- 105
|
| 80 |
+
"3":
|
| 81 |
+
- 2
|
| 82 |
+
- 13
|
| 83 |
+
- 16
|
| 84 |
+
- 61
|
| 85 |
+
"4": 3.12.0
|
| 86 |
+
"5": 0.24.0
|
| 87 |
+
"6": 4.57.6
|
| 88 |
+
"12": 0.24.0
|
| 89 |
+
"13": linux-x86_64
|
| 90 |
+
data:
|
| 91 |
+
value:
|
| 92 |
+
max_context_len: 4096
|
| 93 |
+
max_target_len: 256
|
| 94 |
+
max_train_samples: 20000
|
| 95 |
+
max_val_samples: 2000
|
| 96 |
+
num_workers: 4
|
| 97 |
+
path: /workspace/byte-llms-code/code_completion_exp/datasets/data_V4_full
|
| 98 |
+
pin_memory: true
|
| 99 |
+
device:
|
| 100 |
+
value: cuda
|
| 101 |
+
logging:
|
| 102 |
+
value:
|
| 103 |
+
eval_interval: 1000
|
| 104 |
+
log_interval: 10
|
| 105 |
+
save_every_epoch: true
|
| 106 |
+
save_interval: 3000
|
| 107 |
+
model:
|
| 108 |
+
value:
|
| 109 |
+
checkpoint_path: null
|
| 110 |
+
from_scratch: false
|
| 111 |
+
name: EleutherAI/pythia-1b
|
| 112 |
+
paths:
|
| 113 |
+
value:
|
| 114 |
+
output_dir: /workspace/byte-llms-code/outputs/lr_sweep/pythia_1b_lr_2e-5
|
| 115 |
+
seed:
|
| 116 |
+
value: 42
|
| 117 |
+
tracking:
|
| 118 |
+
value:
|
| 119 |
+
backend: wandb
|
| 120 |
+
base_url: https://wandb.platun0v.ru
|
| 121 |
+
enabled: true
|
| 122 |
+
entity: null
|
| 123 |
+
local_dir: /workspace/byte-llms-code/outputs/lr_sweep/pythia_1b_lr_2e-5
|
| 124 |
+
project: code-completion_lr-sweep
|
| 125 |
+
run_name: pythia_1b_lr_2e-5
|
| 126 |
+
training:
|
| 127 |
+
value:
|
| 128 |
+
batch_size: 4
|
| 129 |
+
betas:
|
| 130 |
+
- 0.9
|
| 131 |
+
- 0.95
|
| 132 |
+
decay_ratio: 0.2
|
| 133 |
+
epochs: 1
|
| 134 |
+
eps: 1e-08
|
| 135 |
+
eval_batch_size: 12
|
| 136 |
+
gradient_accumulation_steps: 4
|
| 137 |
+
lr: 2e-05
|
| 138 |
+
lr_scheduler: wsd
|
| 139 |
+
max_grad_norm: 1
|
| 140 |
+
min_lr_ratio: 0.1
|
| 141 |
+
resume: false
|
| 142 |
+
resume_checkpoint: null
|
| 143 |
+
use_amp: true
|
| 144 |
+
warmup_ratio: 0.1
|
| 145 |
+
warmup_steps: 100
|
| 146 |
+
weight_decay: 0.1
|
lr_sweep/pythia_1b_lr_2e-5/wandb/run-20260425_175439-8mll1jbb/files/output.log
ADDED
|
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[2026-04-25 17:54:40] Initializing tokenizer...
|
| 2 |
+
[2026-04-25 17:54:40] Loading model...
|
| 3 |
+
`torch_dtype` is deprecated! Use `dtype` instead!
|
| 4 |
+
[2026-04-25 17:54:43] Loaded pretrained: EleutherAI/pythia-1b
|
| 5 |
+
[2026-04-25 17:54:43] Total params: 1,011,781,632
|
| 6 |
+
[2026-04-25 17:54:43] Trainable params: 1,011,781,632
|
| 7 |
+
[2026-04-25 17:54:43] Creating dataloaders...
|
| 8 |
+
[2026-04-25 17:54:43] Train dataset size: 20000
|
| 9 |
+
[2026-04-25 17:54:43] Train batches per epoch (before DDP split): 5000
|
| 10 |
+
[2026-04-25 17:54:43] Validation dataset size: 2000
|
| 11 |
+
[2026-04-25 17:54:43] Validation batches: 167
|
| 12 |
+
[2026-04-25 17:54:43] Creating optimizer...
|
| 13 |
+
[2026-04-25 17:54:43] Total steps: 625, Steps per epoch: 2500
|
| 14 |
+
[2026-04-25 17:54:43] Preparing model, optimizer, and dataloaders with Accelerate...
|
| 15 |
+
[2026-04-25 17:54:45] Train batches per epoch (after DDP split): 2500
|
| 16 |
+
[2026-04-25 17:54:45] Starting training...
|
| 17 |
+
[2026-04-25 17:54:45]
|
| 18 |
+
============================================================
|
| 19 |
+
[2026-04-25 17:54:45] EPOCH 1/1
|
| 20 |
+
[2026-04-25 17:54:45] ============================================================
|
| 21 |
+
[2026-04-25 17:54:48] Epoch 1 | Step 10 | Loss: 2.2990 | LR: 7.81e-06
|
| 22 |
+
[2026-04-25 17:54:51] Epoch 1 | Step 20 | Loss: 1.8267 | LR: 1.36e-05
|
| 23 |
+
[2026-04-25 17:54:53] Epoch 1 | Step 30 | Loss: 1.6114 | LR: 1.94e-05
|
| 24 |
+
[2026-04-25 17:54:56] Epoch 1 | Step 40 | Loss: 1.5023 | LR: 2.00e-05
|
| 25 |
+
[2026-04-25 17:54:58] Epoch 1 | Step 50 | Loss: 1.4291 | LR: 2.00e-05
|
| 26 |
+
[2026-04-25 17:55:01] Epoch 1 | Step 60 | Loss: 1.3708 | LR: 2.00e-05
|
| 27 |
+
[2026-04-25 17:55:03] Epoch 1 | Step 70 | Loss: 1.3249 | LR: 2.00e-05
|
| 28 |
+
[2026-04-25 17:55:06] Epoch 1 | Step 80 | Loss: 1.2878 | LR: 2.00e-05
|
| 29 |
+
[2026-04-25 17:55:09] Epoch 1 | Step 90 | Loss: 1.2478 | LR: 2.00e-05
|
| 30 |
+
[2026-04-25 17:55:11] Epoch 1 | Step 100 | Loss: 1.2470 | LR: 2.00e-05
|
| 31 |
+
[2026-04-25 17:55:14] Epoch 1 | Step 110 | Loss: 1.2379 | LR: 2.00e-05
|
| 32 |
+
[2026-04-25 17:55:16] Epoch 1 | Step 120 | Loss: 1.2292 | LR: 2.00e-05
|
| 33 |
+
[2026-04-25 17:55:19] Epoch 1 | Step 130 | Loss: 1.2023 | LR: 2.00e-05
|
| 34 |
+
[2026-04-25 17:55:22] Epoch 1 | Step 140 | Loss: 1.1906 | LR: 2.00e-05
|
| 35 |
+
[2026-04-25 17:55:24] Epoch 1 | Step 150 | Loss: 1.1838 | LR: 2.00e-05
|
| 36 |
+
[2026-04-25 17:55:27] Epoch 1 | Step 160 | Loss: 1.1693 | LR: 2.00e-05
|
| 37 |
+
[2026-04-25 17:55:29] Epoch 1 | Step 170 | Loss: 1.1607 | LR: 2.00e-05
|
| 38 |
+
[2026-04-25 17:55:32] Epoch 1 | Step 180 | Loss: 1.1611 | LR: 2.00e-05
|
| 39 |
+
[2026-04-25 17:55:34] Epoch 1 | Step 190 | Loss: 1.1570 | LR: 2.00e-05
|
| 40 |
+
[2026-04-25 17:55:37] Epoch 1 | Step 200 | Loss: 1.1495 | LR: 2.00e-05
|
| 41 |
+
[2026-04-25 17:55:40] Epoch 1 | Step 210 | Loss: 1.1443 | LR: 2.00e-05
|
| 42 |
+
[2026-04-25 17:55:42] Epoch 1 | Step 220 | Loss: 1.1430 | LR: 2.00e-05
|
| 43 |
+
[2026-04-25 17:55:45] Epoch 1 | Step 230 | Loss: 1.1414 | LR: 2.00e-05
|
| 44 |
+
[2026-04-25 17:55:47] Epoch 1 | Step 240 | Loss: 1.1347 | LR: 2.00e-05
|
| 45 |
+
[2026-04-25 17:55:50] Epoch 1 | Step 250 | Loss: 1.1281 | LR: 2.00e-05
|
| 46 |
+
[2026-04-25 17:55:53] Epoch 1 | Step 260 | Loss: 1.1317 | LR: 1.89e-05
|
| 47 |
+
[2026-04-25 17:55:55] Epoch 1 | Step 270 | Loss: 1.1303 | LR: 1.58e-05
|
| 48 |
+
[2026-04-25 17:55:58] Epoch 1 | Step 280 | Loss: 1.1275 | LR: 1.16e-05
|
| 49 |
+
[2026-04-25 17:56:00] Epoch 1 | Step 290 | Loss: 1.1248 | LR: 7.17e-06
|
| 50 |
+
[2026-04-25 17:56:03] Epoch 1 | Step 300 | Loss: 1.1198 | LR: 3.72e-06
|
| 51 |
+
[2026-04-25 17:56:06] Epoch 1 | Step 310 | Loss: 1.1188 | LR: 2.07e-06
|
| 52 |
+
[2026-04-25 17:56:08] Epoch 1 | Step 320 | Loss: 1.1224 | LR: 2.00e-06
|
| 53 |
+
[2026-04-25 17:56:11] Epoch 1 | Step 330 | Loss: 1.1196 | LR: 2.00e-06
|
| 54 |
+
[2026-04-25 17:56:13] Epoch 1 | Step 340 | Loss: 1.1193 | LR: 2.00e-06
|
| 55 |
+
[2026-04-25 17:56:15] Epoch 1 | Step 350 | Loss: 1.1143 | LR: 2.00e-06
|
| 56 |
+
[2026-04-25 17:56:18] Epoch 1 | Step 360 | Loss: 1.1109 | LR: 2.00e-06
|
| 57 |
+
[2026-04-25 17:56:21] Epoch 1 | Step 370 | Loss: 1.1074 | LR: 2.00e-06
|
| 58 |
+
[2026-04-25 17:56:23] Epoch 1 | Step 380 | Loss: 1.1095 | LR: 2.00e-06
|
| 59 |
+
[2026-04-25 17:56:26] Epoch 1 | Step 390 | Loss: 1.1066 | LR: 2.00e-06
|
| 60 |
+
[2026-04-25 17:56:28] Epoch 1 | Step 400 | Loss: 1.1040 | LR: 2.00e-06
|
| 61 |
+
[2026-04-25 17:56:31] Epoch 1 | Step 410 | Loss: 1.1061 | LR: 2.00e-06
|
| 62 |
+
[2026-04-25 17:56:34] Epoch 1 | Step 420 | Loss: 1.1029 | LR: 2.00e-06
|
| 63 |
+
[2026-04-25 17:56:36] Epoch 1 | Step 430 | Loss: 1.1003 | LR: 2.00e-06
|
| 64 |
+
[2026-04-25 17:56:39] Epoch 1 | Step 440 | Loss: 1.0993 | LR: 2.00e-06
|
| 65 |
+
[2026-04-25 17:56:41] Epoch 1 | Step 450 | Loss: 1.0988 | LR: 2.00e-06
|
| 66 |
+
[2026-04-25 17:56:44] Epoch 1 | Step 460 | Loss: 1.1001 | LR: 2.00e-06
|
| 67 |
+
[2026-04-25 17:56:46] Epoch 1 | Step 470 | Loss: 1.1021 | LR: 2.00e-06
|
| 68 |
+
[2026-04-25 17:56:49] Epoch 1 | Step 480 | Loss: 1.1025 | LR: 2.00e-06
|
| 69 |
+
[2026-04-25 17:56:51] Epoch 1 | Step 490 | Loss: 1.1045 | LR: 2.00e-06
|
| 70 |
+
[2026-04-25 17:56:54] Epoch 1 | Step 500 | Loss: 1.1021 | LR: 2.00e-06
|
| 71 |
+
[2026-04-25 17:56:57] Epoch 1 | Step 510 | Loss: 1.0999 | LR: 2.00e-06
|
| 72 |
+
[2026-04-25 17:56:59] Epoch 1 | Step 520 | Loss: 1.0994 | LR: 2.00e-06
|
| 73 |
+
[2026-04-25 17:57:02] Epoch 1 | Step 530 | Loss: 1.0979 | LR: 2.00e-06
|
| 74 |
+
[2026-04-25 17:57:04] Epoch 1 | Step 540 | Loss: 1.0996 | LR: 2.00e-06
|
| 75 |
+
[2026-04-25 17:57:07] Epoch 1 | Step 550 | Loss: 1.0987 | LR: 2.00e-06
|
| 76 |
+
[2026-04-25 17:57:10] Epoch 1 | Step 560 | Loss: 1.0978 | LR: 2.00e-06
|
| 77 |
+
[2026-04-25 17:57:12] Epoch 1 | Step 570 | Loss: 1.0967 | LR: 2.00e-06
|
| 78 |
+
[2026-04-25 17:57:15] Epoch 1 | Step 580 | Loss: 1.0964 | LR: 2.00e-06
|
| 79 |
+
[2026-04-25 17:57:17] Epoch 1 | Step 590 | Loss: 1.0982 | LR: 2.00e-06
|
| 80 |
+
[2026-04-25 17:57:20] Epoch 1 | Step 600 | Loss: 1.0986 | LR: 2.00e-06
|
| 81 |
+
[2026-04-25 17:57:22] Epoch 1 | Step 610 | Loss: 1.0997 | LR: 2.00e-06
|
| 82 |
+
[2026-04-25 17:57:25] Epoch 1 | Step 620 | Loss: 1.0978 | LR: 2.00e-06
|
| 83 |
+
[2026-04-25 17:57:26] Epoch 1 completed in 161.35s | Loss: 1.0968
|
| 84 |
+
[2026-04-25 17:57:33] Checkpoint saved: /workspace/byte-llms-code/outputs/lr_sweep/pythia_1b_lr_2e-5/checkpoints/checkpoint_step_625.pt
|
| 85 |
+
[2026-04-25 17:57:39]
|
| 86 |
+
Training completed!
|
| 87 |
+
[2026-04-25 17:57:41] Final model: /workspace/byte-llms-code/outputs/lr_sweep/pythia_1b_lr_2e-5/model_final.pt
|
lr_sweep/pythia_1b_lr_2e-5/wandb/run-20260425_175439-8mll1jbb/files/requirements.txt
ADDED
|
@@ -0,0 +1,245 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
setuptools==78.1.1
|
| 2 |
+
wheel==0.45.1
|
| 3 |
+
pip==25.2
|
| 4 |
+
webencodings==0.5.1
|
| 5 |
+
triton==3.2.0
|
| 6 |
+
pytz==2025.2
|
| 7 |
+
pydub==0.25.1
|
| 8 |
+
pure_eval==0.2.3
|
| 9 |
+
ptyprocess==0.7.0
|
| 10 |
+
nvidia-ml-py==13.590.48
|
| 11 |
+
nvidia-cusparselt-cu12==0.6.2
|
| 12 |
+
mpmath==1.3.0
|
| 13 |
+
ipython-genutils==0.2.0
|
| 14 |
+
fastjsonschema==2.21.2
|
| 15 |
+
brotli==1.2.0
|
| 16 |
+
antlr4-python3-runtime==4.9.3
|
| 17 |
+
xxhash==3.6.0
|
| 18 |
+
widgetsnbextension==4.0.14
|
| 19 |
+
websocket-client==1.9.0
|
| 20 |
+
webcolors==24.11.1
|
| 21 |
+
wcwidth==0.2.14
|
| 22 |
+
urllib3==2.5.0
|
| 23 |
+
uri-template==1.3.0
|
| 24 |
+
tzdata==2025.2
|
| 25 |
+
typing_extensions==4.15.0
|
| 26 |
+
types-python-dateutil==2.9.0.20251008
|
| 27 |
+
traitlets==5.14.3
|
| 28 |
+
tqdm==4.67.1
|
| 29 |
+
tornado==6.5.2
|
| 30 |
+
tomlkit==0.13.3
|
| 31 |
+
tinycss2==1.4.0
|
| 32 |
+
tabulate==0.9.0
|
| 33 |
+
sympy==1.13.1
|
| 34 |
+
soupsieve==2.8
|
| 35 |
+
sniffio==1.3.1
|
| 36 |
+
smmap==5.0.2
|
| 37 |
+
six==1.17.0
|
| 38 |
+
shellingham==1.5.4
|
| 39 |
+
Send2Trash==1.8.3
|
| 40 |
+
semantic-version==2.10.0
|
| 41 |
+
safetensors==0.6.2
|
| 42 |
+
rpds-py==0.27.1
|
| 43 |
+
rfc3986-validator==0.1.1
|
| 44 |
+
regex==2025.9.18
|
| 45 |
+
pyzmq==27.1.0
|
| 46 |
+
PyYAML==6.0.3
|
| 47 |
+
python-multipart==0.0.22
|
| 48 |
+
python-json-logger==4.0.0
|
| 49 |
+
python-dotenv==1.2.1
|
| 50 |
+
pyparsing==3.2.5
|
| 51 |
+
PyJWT==2.8.0
|
| 52 |
+
Pygments==2.19.2
|
| 53 |
+
pycparser==2.23
|
| 54 |
+
pyarrow==22.0.0
|
| 55 |
+
psutil==7.1.0
|
| 56 |
+
protobuf==6.33.4
|
| 57 |
+
propcache==0.4.1
|
| 58 |
+
prometheus_client==0.23.1
|
| 59 |
+
portalocker==3.2.0
|
| 60 |
+
platformdirs==4.5.0
|
| 61 |
+
pillow==11.3.0
|
| 62 |
+
pexpect==4.9.0
|
| 63 |
+
pathspec==1.0.4
|
| 64 |
+
parso==0.8.5
|
| 65 |
+
pandocfilters==1.5.1
|
| 66 |
+
packaging==25.0
|
| 67 |
+
orjson==3.11.6
|
| 68 |
+
opt_einsum==3.4.0
|
| 69 |
+
nvidia-nvtx-cu12==12.4.127
|
| 70 |
+
nvidia-nvjitlink-cu12==12.4.127
|
| 71 |
+
nvidia-nccl-cu12==2.21.5
|
| 72 |
+
nvidia-curand-cu12==10.3.5.147
|
| 73 |
+
nvidia-cufile-cu12==1.13.1.3
|
| 74 |
+
nvidia-cufft-cu12==11.2.1.3
|
| 75 |
+
nvidia-cuda-runtime-cu12==12.4.127
|
| 76 |
+
nvidia-cuda-nvrtc-cu12==12.4.127
|
| 77 |
+
nvidia-cuda-cupti-cu12==12.4.127
|
| 78 |
+
nvidia-cublas-cu12==12.4.5.8
|
| 79 |
+
numpy==2.3.3
|
| 80 |
+
ninja==1.13.0
|
| 81 |
+
networkx==3.5
|
| 82 |
+
nest-asyncio==1.6.0
|
| 83 |
+
narwhals==2.15.0
|
| 84 |
+
mypy_extensions==1.1.0
|
| 85 |
+
multidict==6.7.0
|
| 86 |
+
mistune==3.1.4
|
| 87 |
+
mdurl==0.1.2
|
| 88 |
+
MarkupSafe==3.0.3
|
| 89 |
+
lxml==6.0.2
|
| 90 |
+
librt==0.8.0
|
| 91 |
+
lark==1.3.0
|
| 92 |
+
kiwisolver==1.4.9
|
| 93 |
+
jupyterlab_widgets==3.0.15
|
| 94 |
+
jupyterlab_pygments==0.3.0
|
| 95 |
+
jsonpointer==3.0.0
|
| 96 |
+
json5==0.12.1
|
| 97 |
+
itsdangerous==2.2.0
|
| 98 |
+
idna==3.10
|
| 99 |
+
hf-xet==1.1.10
|
| 100 |
+
h11==0.16.0
|
| 101 |
+
groovy==0.1.2
|
| 102 |
+
fsspec==2025.9.0
|
| 103 |
+
frozenlist==1.8.0
|
| 104 |
+
fqdn==1.5.1
|
| 105 |
+
fonttools==4.60.1
|
| 106 |
+
filelock==3.19.1
|
| 107 |
+
ffmpy==1.0.0
|
| 108 |
+
executing==2.2.1
|
| 109 |
+
einops==0.8.1
|
| 110 |
+
dill==0.4.0
|
| 111 |
+
defusedxml==0.7.1
|
| 112 |
+
decorator==5.2.1
|
| 113 |
+
debugpy==1.8.17
|
| 114 |
+
dacite==1.9.2
|
| 115 |
+
cycler==0.12.1
|
| 116 |
+
comm==0.2.3
|
| 117 |
+
colorama==0.4.6
|
| 118 |
+
click==8.3.1
|
| 119 |
+
charset-normalizer==3.4.3
|
| 120 |
+
certifi==2025.10.5
|
| 121 |
+
bleach==6.2.0
|
| 122 |
+
babel==2.17.0
|
| 123 |
+
attrs==25.4.0
|
| 124 |
+
async-lru==2.0.5
|
| 125 |
+
asttokens==3.0.0
|
| 126 |
+
annotated-types==0.7.0
|
| 127 |
+
annotated-doc==0.0.4
|
| 128 |
+
aiohappyeyeballs==2.6.1
|
| 129 |
+
aiofiles==24.1.0
|
| 130 |
+
yarl==1.22.0
|
| 131 |
+
uvicorn==0.40.0
|
| 132 |
+
typing-inspection==0.4.2
|
| 133 |
+
terminado==0.18.1
|
| 134 |
+
stack-data==0.6.3
|
| 135 |
+
sentry-sdk==2.50.0
|
| 136 |
+
scipy==1.17.0
|
| 137 |
+
sacrebleu==2.6.0
|
| 138 |
+
rfc3987-syntax==1.1.0
|
| 139 |
+
rfc3339-validator==0.1.4
|
| 140 |
+
requests==2.32.5
|
| 141 |
+
reportlab==4.4.9
|
| 142 |
+
referencing==0.36.2
|
| 143 |
+
python-dateutil==2.9.0.post0
|
| 144 |
+
pydantic_core==2.41.5
|
| 145 |
+
prompt_toolkit==3.0.52
|
| 146 |
+
plotly==6.5.2
|
| 147 |
+
pathlib2==2.3.7.post1
|
| 148 |
+
orderedmultidict==1.0.2
|
| 149 |
+
optree==0.17.0
|
| 150 |
+
omegaconf==2.3.0
|
| 151 |
+
nvidia-cusparse-cu12==12.3.1.170
|
| 152 |
+
nvidia-cudnn-cu12==9.1.0.70
|
| 153 |
+
mypy==1.19.1
|
| 154 |
+
multiprocess==0.70.16
|
| 155 |
+
matplotlib-inline==0.1.7
|
| 156 |
+
markdown-it-py==4.0.0
|
| 157 |
+
jupyter_core==5.8.1
|
| 158 |
+
Jinja2==3.1.6
|
| 159 |
+
jedi==0.19.2
|
| 160 |
+
ipython_pygments_lexers==1.1.1
|
| 161 |
+
httpcore==1.0.9
|
| 162 |
+
gitdb==4.0.12
|
| 163 |
+
ftfy==6.3.1
|
| 164 |
+
contourpy==1.3.3
|
| 165 |
+
cffi==2.0.0
|
| 166 |
+
beautifulsoup4==4.14.2
|
| 167 |
+
anyio==4.11.0
|
| 168 |
+
aiosignal==1.4.0
|
| 169 |
+
starlette==0.50.0
|
| 170 |
+
rich==14.2.0
|
| 171 |
+
pydantic==2.12.5
|
| 172 |
+
pandas==2.3.3
|
| 173 |
+
nvidia-cusolver-cu12==11.6.1.9
|
| 174 |
+
matplotlib==3.10.7
|
| 175 |
+
jupyter_server_terminals==0.5.3
|
| 176 |
+
jupyter_client==8.6.3
|
| 177 |
+
jsonschema-specifications==2025.9.1
|
| 178 |
+
ipython==9.6.0
|
| 179 |
+
hydra-core==1.3.2
|
| 180 |
+
huggingface-hub==0.35.3
|
| 181 |
+
httpx==0.28.1
|
| 182 |
+
GitPython==3.1.46
|
| 183 |
+
furl==2.1.4
|
| 184 |
+
cryptography==46.0.4
|
| 185 |
+
arrow==1.3.0
|
| 186 |
+
argon2-cffi-bindings==25.1.0
|
| 187 |
+
aiohttp==3.13.1
|
| 188 |
+
wandb==0.24.0
|
| 189 |
+
typer==0.21.1
|
| 190 |
+
torch==2.6.0
|
| 191 |
+
tokenizers==0.22.1
|
| 192 |
+
seaborn==0.13.2
|
| 193 |
+
safehttpx==0.1.7
|
| 194 |
+
jsonschema==4.25.1
|
| 195 |
+
joypy==0.2.6
|
| 196 |
+
isoduration==20.11.0
|
| 197 |
+
ipywidgets==8.1.7
|
| 198 |
+
ipykernel==6.30.1
|
| 199 |
+
gradio_client==2.0.3
|
| 200 |
+
fastapi==0.128.0
|
| 201 |
+
Authlib==1.6.6
|
| 202 |
+
argon2-cffi==25.1.0
|
| 203 |
+
transformers==4.57.6
|
| 204 |
+
nbformat==5.10.4
|
| 205 |
+
mlstm_kernels==2.0.2
|
| 206 |
+
jupyter-console==6.6.3
|
| 207 |
+
gradio==6.5.1
|
| 208 |
+
datasets==4.3.0
|
| 209 |
+
clearml==1.16.4
|
| 210 |
+
accelerate==1.10.1
|
| 211 |
+
xlstm==2.0.4
|
| 212 |
+
nbclient==0.10.2
|
| 213 |
+
jupyter-events==0.12.0
|
| 214 |
+
trackio==0.15.0
|
| 215 |
+
nbconvert==7.16.6
|
| 216 |
+
jupyter_server==2.17.0
|
| 217 |
+
notebook_shim==0.2.4
|
| 218 |
+
jupyterlab_server==2.27.3
|
| 219 |
+
jupyter-lsp==2.3.0
|
| 220 |
+
nbclassic==1.3.3
|
| 221 |
+
jupyterlab==4.4.9
|
| 222 |
+
notebook==7.4.7
|
| 223 |
+
jupyter_contrib_core==0.4.2
|
| 224 |
+
jupyter==1.1.1
|
| 225 |
+
jupyter_nbextensions_configurator==0.6.4
|
| 226 |
+
causal-conv1d==1.5.0.post8
|
| 227 |
+
flash_attn==2.7.4.post1
|
| 228 |
+
mamba-ssm==2.2.4
|
| 229 |
+
hnet==0.0.1
|
| 230 |
+
autocommand==2.2.2
|
| 231 |
+
backports.tarfile==1.2.0
|
| 232 |
+
importlib_metadata==8.0.0
|
| 233 |
+
inflect==7.3.1
|
| 234 |
+
jaraco.collections==5.1.0
|
| 235 |
+
jaraco.context==5.3.0
|
| 236 |
+
jaraco.functools==4.0.1
|
| 237 |
+
jaraco.text==3.12.1
|
| 238 |
+
more-itertools==10.3.0
|
| 239 |
+
packaging==24.2
|
| 240 |
+
platformdirs==4.2.2
|
| 241 |
+
tomli==2.0.1
|
| 242 |
+
typeguard==4.3.0
|
| 243 |
+
typing_extensions==4.12.2
|
| 244 |
+
wheel==0.45.1
|
| 245 |
+
zipp==3.19.2
|
lr_sweep/pythia_1b_lr_2e-5/wandb/run-20260425_175439-8mll1jbb/files/wandb-metadata.json
ADDED
|
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"os": "Linux-5.4.0-176-generic-x86_64-with-glibc2.35",
|
| 3 |
+
"python": "CPython 3.12.0",
|
| 4 |
+
"startedAt": "2026-04-25T17:54:39.294048Z",
|
| 5 |
+
"args": [
|
| 6 |
+
"tracking=wandb",
|
| 7 |
+
"tracking.project=code-completion_lr-sweep",
|
| 8 |
+
"tracking.run_name=pythia_1b_lr_2e-5",
|
| 9 |
+
"training.lr=2e-5",
|
| 10 |
+
"paths.output_dir=/workspace/byte-llms-code/outputs/lr_sweep/pythia_1b_lr_2e-5",
|
| 11 |
+
"model=pythia_1b",
|
| 12 |
+
"data.path=/workspace/byte-llms-code/code_completion_exp/datasets/data_V4_full"
|
| 13 |
+
],
|
| 14 |
+
"program": "/workspace/byte-llms-code/code_completion_exp/train_pythia/train.py",
|
| 15 |
+
"codePath": "code_completion_exp/train_pythia/train.py",
|
| 16 |
+
"codePathLocal": "train.py",
|
| 17 |
+
"git": {
|
| 18 |
+
"remote": "https://github.com/naryst/byte-llms-code.git",
|
| 19 |
+
"commit": "f111e13281aa0dc58e24302edab5b0d5c2024586"
|
| 20 |
+
},
|
| 21 |
+
"email": "nikita@local.ru",
|
| 22 |
+
"root": "/workspace/byte-llms-code/outputs/lr_sweep/pythia_1b_lr_2e-5",
|
| 23 |
+
"host": "7504e518d24a",
|
| 24 |
+
"executable": "/venv/bytellm/bin/python",
|
| 25 |
+
"cpu_count": 64,
|
| 26 |
+
"cpu_count_logical": 128,
|
| 27 |
+
"gpu": "NVIDIA H100 80GB HBM3",
|
| 28 |
+
"gpu_count": 4,
|
| 29 |
+
"disk": {
|
| 30 |
+
"/": {
|
| 31 |
+
"total": "265214230528",
|
| 32 |
+
"used": "56100974592"
|
| 33 |
+
}
|
| 34 |
+
},
|
| 35 |
+
"memory": {
|
| 36 |
+
"total": "1081679683584"
|
| 37 |
+
},
|
| 38 |
+
"gpu_nvidia": [
|
| 39 |
+
{
|
| 40 |
+
"name": "NVIDIA H100 80GB HBM3",
|
| 41 |
+
"memoryTotal": "85520809984",
|
| 42 |
+
"cudaCores": 16896,
|
| 43 |
+
"architecture": "Hopper",
|
| 44 |
+
"uuid": "GPU-b60cdcab-2033-2009-41de-be646c953a20"
|
| 45 |
+
},
|
| 46 |
+
{
|
| 47 |
+
"name": "NVIDIA H100 80GB HBM3",
|
| 48 |
+
"memoryTotal": "85520809984",
|
| 49 |
+
"cudaCores": 16896,
|
| 50 |
+
"architecture": "Hopper",
|
| 51 |
+
"uuid": "GPU-9982b420-4520-4238-c378-ec5a46015474"
|
| 52 |
+
},
|
| 53 |
+
{
|
| 54 |
+
"name": "NVIDIA H100 80GB HBM3",
|
| 55 |
+
"memoryTotal": "85520809984",
|
| 56 |
+
"cudaCores": 16896,
|
| 57 |
+
"architecture": "Hopper",
|
| 58 |
+
"uuid": "GPU-e26ebaac-aaa6-3eed-17ab-a3dce303a76f"
|
| 59 |
+
},
|
| 60 |
+
{
|
| 61 |
+
"name": "NVIDIA H100 80GB HBM3",
|
| 62 |
+
"memoryTotal": "85520809984",
|
| 63 |
+
"cudaCores": 16896,
|
| 64 |
+
"architecture": "Hopper",
|
| 65 |
+
"uuid": "GPU-9dfc6dba-0be6-4a10-1027-336cc0e65134"
|
| 66 |
+
}
|
| 67 |
+
],
|
| 68 |
+
"cudaVersion": "12.2",
|
| 69 |
+
"writerId": "5e04zafr99ci3kv9t06h2zc7nrpy0bwg"
|
| 70 |
+
}
|
lr_sweep/pythia_1b_lr_2e-5/wandb/run-20260425_175439-8mll1jbb/files/wandb-summary.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"train/loss_avg":1.097824421620387,"train/lr":2.0000000000000003e-06,"_runtime":181,"epoch/time":161.35113525390625,"epoch/loss":1.0968492313854397,"train/step_time":0.2730685234069824,"train/epoch":1,"_step":620,"_wandb":{"runtime":181},"train/loss":1.161327913403511,"_timestamp":1.7771398465400174e+09}
|
lr_sweep/pythia_1b_lr_2e-5/wandb/run-20260425_175439-8mll1jbb/logs/debug-core.log
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{"time":"2026-04-25T17:54:39.376387108Z","level":"INFO","msg":"main: starting server","port-filename":"/tmp/tmpxb4kgtwq/port-56987.txt","pid":56987,"log-level":0,"disable-analytics":false,"shutdown-on-parent-exit":false,"enable-dcgm-profiling":false}
|
| 2 |
+
{"time":"2026-04-25T17:54:39.376793032Z","level":"INFO","msg":"server: will exit if parent process dies","ppid":56987}
|
| 3 |
+
{"time":"2026-04-25T17:54:39.376773037Z","level":"INFO","msg":"server: accepting connections","addr":{"Name":"/tmp/wandb-56987-57050-2526965137/socket","Net":"unix"}}
|
| 4 |
+
{"time":"2026-04-25T17:54:39.56499173Z","level":"INFO","msg":"connection: ManageConnectionData: new connection created","id":"1(@)"}
|
| 5 |
+
{"time":"2026-04-25T17:54:39.586802371Z","level":"INFO","msg":"handleInformInit: received","streamId":"8mll1jbb","id":"1(@)"}
|
| 6 |
+
{"time":"2026-04-25T17:54:39.984087312Z","level":"INFO","msg":"handleInformInit: stream started","streamId":"8mll1jbb","id":"1(@)"}
|
| 7 |
+
{"time":"2026-04-25T17:57:42.5333524Z","level":"INFO","msg":"handleInformFinish: finish message received","streamId":"8mll1jbb","id":"1(@)"}
|
| 8 |
+
{"time":"2026-04-25T17:57:42.53378667Z","level":"INFO","msg":"handleInformFinish: stream closed","streamId":"8mll1jbb","id":"1(@)"}
|
| 9 |
+
{"time":"2026-04-25T17:57:42.55447515Z","level":"INFO","msg":"handleInformTeardown: server teardown initiated","id":"1(@)"}
|
| 10 |
+
{"time":"2026-04-25T17:57:42.554487559Z","level":"INFO","msg":"handleInformTeardown: server shutdown complete","id":"1(@)"}
|
| 11 |
+
{"time":"2026-04-25T17:57:42.554492637Z","level":"INFO","msg":"server is shutting down"}
|
| 12 |
+
{"time":"2026-04-25T17:57:42.554492194Z","level":"INFO","msg":"connection: closing","id":"1(@)"}
|
| 13 |
+
{"time":"2026-04-25T17:57:42.554533617Z","level":"INFO","msg":"connection: closed successfully","id":"1(@)"}
|
| 14 |
+
{"time":"2026-04-25T17:57:42.554537717Z","level":"INFO","msg":"connection: ManageConnectionData: connection closed","id":"1(@)"}
|
| 15 |
+
{"time":"2026-04-25T17:57:42.554537595Z","level":"INFO","msg":"server: listener closed","addr":{"Name":"/tmp/wandb-56987-57050-2526965137/socket","Net":"unix"}}
|
| 16 |
+
{"time":"2026-04-25T17:57:42.554555452Z","level":"INFO","msg":"server is closed"}
|
lr_sweep/pythia_1b_lr_2e-5/wandb/run-20260425_175439-8mll1jbb/logs/debug-internal.log
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{"time":"2026-04-25T17:54:39.586910823Z","level":"INFO","msg":"stream: starting","core version":"0.24.0"}
|
| 2 |
+
{"time":"2026-04-25T17:54:39.983922284Z","level":"INFO","msg":"stream: created new stream","id":"8mll1jbb"}
|
| 3 |
+
{"time":"2026-04-25T17:54:39.983982891Z","level":"INFO","msg":"handler: started","stream_id":"8mll1jbb"}
|
| 4 |
+
{"time":"2026-04-25T17:54:39.984081738Z","level":"INFO","msg":"stream: started","id":"8mll1jbb"}
|
| 5 |
+
{"time":"2026-04-25T17:54:39.9840977Z","level":"INFO","msg":"sender: started","stream_id":"8mll1jbb"}
|
| 6 |
+
{"time":"2026-04-25T17:54:39.984097461Z","level":"INFO","msg":"writer: started","stream_id":"8mll1jbb"}
|
| 7 |
+
{"time":"2026-04-25T17:54:40.110275288Z","level":"ERROR","msg":"git repo not found","error":"repository does not exist"}
|
| 8 |
+
{"time":"2026-04-25T17:57:42.363767521Z","level":"INFO","msg":"fileTransfer: Close: file transfer manager closed"}
|
| 9 |
+
{"time":"2026-04-25T17:57:42.530173527Z","level":"INFO","msg":"handler: operation stats","stats":{}}
|
| 10 |
+
{"time":"2026-04-25T17:57:42.533374933Z","level":"INFO","msg":"stream: closing","id":"8mll1jbb"}
|
| 11 |
+
{"time":"2026-04-25T17:57:42.533385998Z","level":"INFO","msg":"handler: closed","stream_id":"8mll1jbb"}
|
| 12 |
+
{"time":"2026-04-25T17:57:42.533445218Z","level":"INFO","msg":"sender: closed","stream_id":"8mll1jbb"}
|
| 13 |
+
{"time":"2026-04-25T17:57:42.5334496Z","level":"INFO","msg":"stream: closed","id":"8mll1jbb"}
|
lr_sweep/pythia_1b_lr_2e-5/wandb/run-20260425_184822-bhvwo83l/files/code/code_completion_exp/train_pythia/train.py
ADDED
|
@@ -0,0 +1,606 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Training Pipeline для Pythia (decoder-only transformer) на задаче Code Completion.
|
| 3 |
+
|
| 4 |
+
Конфигурация через Hydra + OmegaConf, логирование в Trackio.
|
| 5 |
+
Поддержка DDP через Accelerate для multi-GPU тренировки.
|
| 6 |
+
|
| 7 |
+
Использование:
|
| 8 |
+
# Базовый запуск (single GPU)
|
| 9 |
+
python train.py
|
| 10 |
+
|
| 11 |
+
# Multi-GPU с Accelerate
|
| 12 |
+
accelerate launch train.py
|
| 13 |
+
|
| 14 |
+
# Multi-GPU с указанием количества GPU
|
| 15 |
+
accelerate launch --num_processes=4 train.py
|
| 16 |
+
|
| 17 |
+
# Переопределение параметров через CLI
|
| 18 |
+
python train.py training.lr=1e-4 training.epochs=5
|
| 19 |
+
|
| 20 |
+
# Выбор другого конфига модели
|
| 21 |
+
python train.py model=pythia_160m
|
| 22 |
+
|
| 23 |
+
# Multirun (sweep)
|
| 24 |
+
python train.py --multirun training.lr=1e-4,3e-4,1e-3
|
| 25 |
+
|
| 26 |
+
# Без логирования
|
| 27 |
+
python train.py tracking.enabled=false
|
| 28 |
+
"""
|
| 29 |
+
|
| 30 |
+
import os
|
| 31 |
+
import math
|
| 32 |
+
import time
|
| 33 |
+
from pathlib import Path
|
| 34 |
+
|
| 35 |
+
import torch
|
| 36 |
+
import torch.nn as nn
|
| 37 |
+
import torch.nn.functional as F
|
| 38 |
+
from torch.utils.data import DataLoader
|
| 39 |
+
from datasets import load_from_disk
|
| 40 |
+
|
| 41 |
+
import hydra
|
| 42 |
+
from hydra.core.hydra_config import HydraConfig
|
| 43 |
+
from omegaconf import DictConfig, OmegaConf
|
| 44 |
+
from transformers import (
|
| 45 |
+
AutoTokenizer,
|
| 46 |
+
AutoModelForCausalLM,
|
| 47 |
+
AutoConfig,
|
| 48 |
+
PreTrainedTokenizerBase,
|
| 49 |
+
)
|
| 50 |
+
from accelerate import Accelerator
|
| 51 |
+
from accelerate.utils import set_seed as accelerate_set_seed
|
| 52 |
+
|
| 53 |
+
# Ensure repo root is on sys.path (needed when running from subdirectory)
|
| 54 |
+
import sys
|
| 55 |
+
sys.path.insert(0, str(Path(__file__).resolve().parents[2]))
|
| 56 |
+
|
| 57 |
+
# Shared training library
|
| 58 |
+
from training_lib.utils import AverageMeter, log_message
|
| 59 |
+
from training_lib.checkpointing import save_checkpoint, load_checkpoint
|
| 60 |
+
from training_lib.schedulers import get_lr_scheduler
|
| 61 |
+
from training_lib.tracking import init_tracking, log_metrics, finish_tracking
|
| 62 |
+
from training_lib.validation import run_validation
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
# ============================================================================
|
| 66 |
+
# ДАННЫЕ
|
| 67 |
+
# ============================================================================
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
class CodeCompletionCollator:
|
| 71 |
+
"""Collate function для батчирования примеров code completion."""
|
| 72 |
+
|
| 73 |
+
def __init__(
|
| 74 |
+
self,
|
| 75 |
+
tokenizer: PreTrainedTokenizerBase,
|
| 76 |
+
max_context_len: int = 1024,
|
| 77 |
+
max_target_len: int = 256,
|
| 78 |
+
):
|
| 79 |
+
self.tokenizer = tokenizer
|
| 80 |
+
self.max_context_len = max_context_len
|
| 81 |
+
self.max_target_len = max_target_len
|
| 82 |
+
self.pad_token_id = tokenizer.pad_token_id
|
| 83 |
+
|
| 84 |
+
def __call__(self, batch: list[dict]) -> dict:
|
| 85 |
+
contexts = [item["context"] for item in batch]
|
| 86 |
+
targets = [item["target"] for item in batch]
|
| 87 |
+
|
| 88 |
+
encoded_contexts = self.tokenizer(
|
| 89 |
+
contexts,
|
| 90 |
+
add_special_tokens=True,
|
| 91 |
+
truncation=True,
|
| 92 |
+
max_length=self.max_context_len,
|
| 93 |
+
return_tensors=None,
|
| 94 |
+
)
|
| 95 |
+
encoded_targets = self.tokenizer(
|
| 96 |
+
targets,
|
| 97 |
+
add_special_tokens=False,
|
| 98 |
+
truncation=True,
|
| 99 |
+
max_length=self.max_target_len,
|
| 100 |
+
return_tensors=None,
|
| 101 |
+
)
|
| 102 |
+
|
| 103 |
+
input_ids_list = []
|
| 104 |
+
context_lengths = []
|
| 105 |
+
|
| 106 |
+
for ctx_ids, tgt_ids in zip(
|
| 107 |
+
encoded_contexts["input_ids"], encoded_targets["input_ids"]
|
| 108 |
+
):
|
| 109 |
+
tgt_ids = tgt_ids + [self.tokenizer.eos_token_id]
|
| 110 |
+
context_lengths.append(len(ctx_ids))
|
| 111 |
+
input_ids_list.append(ctx_ids + tgt_ids)
|
| 112 |
+
|
| 113 |
+
max_len = max(len(ids) for ids in input_ids_list)
|
| 114 |
+
|
| 115 |
+
padded_input_ids = []
|
| 116 |
+
attention_mask = []
|
| 117 |
+
|
| 118 |
+
for ids in input_ids_list:
|
| 119 |
+
padding_len = max_len - len(ids)
|
| 120 |
+
padded_input_ids.append(ids + [self.pad_token_id] * padding_len)
|
| 121 |
+
attention_mask.append([1] * len(ids) + [0] * padding_len)
|
| 122 |
+
|
| 123 |
+
return {
|
| 124 |
+
"input_ids": torch.tensor(padded_input_ids, dtype=torch.long),
|
| 125 |
+
"attention_mask": torch.tensor(attention_mask, dtype=torch.long),
|
| 126 |
+
"context_lengths": torch.tensor(context_lengths, dtype=torch.long),
|
| 127 |
+
}
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
def create_dataloaders(
|
| 131 |
+
cfg: DictConfig, tokenizer: PreTrainedTokenizerBase
|
| 132 |
+
) -> dict[str, DataLoader]:
|
| 133 |
+
"""Создание DataLoader'ов для train и validation."""
|
| 134 |
+
dataset_dict = load_from_disk(cfg.data.path)
|
| 135 |
+
|
| 136 |
+
collator = CodeCompletionCollator(
|
| 137 |
+
tokenizer=tokenizer,
|
| 138 |
+
max_context_len=cfg.data.max_context_len,
|
| 139 |
+
max_target_len=cfg.data.max_target_len,
|
| 140 |
+
)
|
| 141 |
+
|
| 142 |
+
dataloaders = {}
|
| 143 |
+
|
| 144 |
+
if "train" in dataset_dict:
|
| 145 |
+
train_dataset = dataset_dict["train"]
|
| 146 |
+
max_train = cfg.data.get("max_train_samples", None)
|
| 147 |
+
if max_train is not None:
|
| 148 |
+
train_dataset = train_dataset.select(range(min(max_train, len(train_dataset))))
|
| 149 |
+
dataloaders["train"] = DataLoader(
|
| 150 |
+
train_dataset,
|
| 151 |
+
batch_size=cfg.training.batch_size,
|
| 152 |
+
shuffle=True,
|
| 153 |
+
collate_fn=collator,
|
| 154 |
+
num_workers=cfg.data.num_workers,
|
| 155 |
+
pin_memory=cfg.data.pin_memory,
|
| 156 |
+
)
|
| 157 |
+
|
| 158 |
+
if "validation" in dataset_dict:
|
| 159 |
+
val_dataset = dataset_dict["validation"]
|
| 160 |
+
max_val = cfg.data.get("max_val_samples", None)
|
| 161 |
+
if max_val is not None:
|
| 162 |
+
val_dataset = val_dataset.select(range(min(max_val, len(val_dataset))))
|
| 163 |
+
eval_batch_size = cfg.training.get("eval_batch_size", cfg.training.batch_size)
|
| 164 |
+
dataloaders["validation"] = DataLoader(
|
| 165 |
+
val_dataset,
|
| 166 |
+
batch_size=eval_batch_size,
|
| 167 |
+
shuffle=False,
|
| 168 |
+
collate_fn=collator,
|
| 169 |
+
num_workers=cfg.data.num_workers,
|
| 170 |
+
pin_memory=cfg.data.pin_memory,
|
| 171 |
+
)
|
| 172 |
+
|
| 173 |
+
return dataloaders
|
| 174 |
+
|
| 175 |
+
|
| 176 |
+
|
| 177 |
+
|
| 178 |
+
# ============================================================================
|
| 179 |
+
# LOSS ФУНКЦИИ
|
| 180 |
+
# ============================================================================
|
| 181 |
+
|
| 182 |
+
|
| 183 |
+
def compute_loss(
|
| 184 |
+
logits: torch.Tensor,
|
| 185 |
+
input_ids: torch.Tensor,
|
| 186 |
+
context_lengths: torch.Tensor,
|
| 187 |
+
attention_mask: torch.Tensor,
|
| 188 |
+
) -> dict:
|
| 189 |
+
"""Вычисление loss для авторегрессионной модели."""
|
| 190 |
+
batch_size, seq_len, vocab_size = logits.shape
|
| 191 |
+
|
| 192 |
+
shift_logits = logits[:, :-1, :].contiguous()
|
| 193 |
+
shift_labels = input_ids[:, 1:].contiguous()
|
| 194 |
+
shift_mask = attention_mask[:, 1:].contiguous()
|
| 195 |
+
|
| 196 |
+
target_mask = torch.zeros_like(shift_labels, dtype=torch.bool)
|
| 197 |
+
for i in range(batch_size):
|
| 198 |
+
ctx_len = context_lengths[i].item()
|
| 199 |
+
target_mask[i, ctx_len - 1 :] = True
|
| 200 |
+
|
| 201 |
+
final_mask = target_mask & shift_mask.bool()
|
| 202 |
+
|
| 203 |
+
if final_mask.sum() > 0:
|
| 204 |
+
loss = F.cross_entropy(
|
| 205 |
+
shift_logits[final_mask], shift_labels[final_mask], reduction="mean"
|
| 206 |
+
)
|
| 207 |
+
else:
|
| 208 |
+
loss = torch.tensor(0.0, device=logits.device)
|
| 209 |
+
|
| 210 |
+
return {"loss": loss}
|
| 211 |
+
|
| 212 |
+
|
| 213 |
+
def _pythia_forward_loss(
|
| 214 |
+
model: nn.Module,
|
| 215 |
+
batch: dict,
|
| 216 |
+
cfg: DictConfig,
|
| 217 |
+
accelerator: Accelerator,
|
| 218 |
+
) -> dict:
|
| 219 |
+
"""Forward + loss for a plain HF causal LM (attention_mask= kwarg, .logits)."""
|
| 220 |
+
input_ids = batch["input_ids"]
|
| 221 |
+
attention_mask = batch["attention_mask"]
|
| 222 |
+
context_lengths = batch["context_lengths"]
|
| 223 |
+
output = model(input_ids, attention_mask=attention_mask)
|
| 224 |
+
return compute_loss(output.logits, input_ids, context_lengths, attention_mask)
|
| 225 |
+
|
| 226 |
+
|
| 227 |
+
# ============================================================================
|
| 228 |
+
# PARAMETER GROUPING
|
| 229 |
+
# ============================================================================
|
| 230 |
+
|
| 231 |
+
|
| 232 |
+
def group_params(model: nn.Module, weight_decay: float) -> list[dict]:
|
| 233 |
+
"""Группировка параметров для optimizer."""
|
| 234 |
+
decay_params = []
|
| 235 |
+
no_decay_params = []
|
| 236 |
+
|
| 237 |
+
for name, param in model.named_parameters():
|
| 238 |
+
if not param.requires_grad:
|
| 239 |
+
continue
|
| 240 |
+
|
| 241 |
+
if "bias" in name or "LayerNorm" in name or "layernorm" in name:
|
| 242 |
+
no_decay_params.append(param)
|
| 243 |
+
else:
|
| 244 |
+
decay_params.append(param)
|
| 245 |
+
|
| 246 |
+
return [
|
| 247 |
+
{"params": decay_params, "weight_decay": weight_decay},
|
| 248 |
+
{"params": no_decay_params, "weight_decay": 0.0},
|
| 249 |
+
]
|
| 250 |
+
|
| 251 |
+
|
| 252 |
+
|
| 253 |
+
|
| 254 |
+
# ============================================================================
|
| 255 |
+
# TRAINING LOOP
|
| 256 |
+
# ============================================================================
|
| 257 |
+
|
| 258 |
+
|
| 259 |
+
def train_epoch(
|
| 260 |
+
model: nn.Module,
|
| 261 |
+
dataloader: DataLoader,
|
| 262 |
+
optimizer: torch.optim.Optimizer,
|
| 263 |
+
scheduler,
|
| 264 |
+
cfg: DictConfig,
|
| 265 |
+
epoch: int,
|
| 266 |
+
global_step: int,
|
| 267 |
+
accelerator: Accelerator,
|
| 268 |
+
val_dataloader: DataLoader | None = None,
|
| 269 |
+
best_val_loss: float = float("inf"),
|
| 270 |
+
) -> tuple[int, float]:
|
| 271 |
+
"""Один epoch тренировки. Возвращает (global_step, best_val_loss)."""
|
| 272 |
+
model.train()
|
| 273 |
+
|
| 274 |
+
loss_meter = AverageMeter()
|
| 275 |
+
|
| 276 |
+
optimizer.zero_grad()
|
| 277 |
+
accumulated_loss = 0.0
|
| 278 |
+
accumulated_steps = 0
|
| 279 |
+
|
| 280 |
+
epoch_start_time = time.time()
|
| 281 |
+
step_start_time = time.time()
|
| 282 |
+
|
| 283 |
+
for batch_idx, batch in enumerate(dataloader):
|
| 284 |
+
input_ids = batch["input_ids"]
|
| 285 |
+
attention_mask = batch["attention_mask"]
|
| 286 |
+
context_lengths = batch["context_lengths"]
|
| 287 |
+
|
| 288 |
+
with accelerator.autocast():
|
| 289 |
+
output = model(input_ids, attention_mask=attention_mask)
|
| 290 |
+
logits = output.logits
|
| 291 |
+
loss_dict = compute_loss(
|
| 292 |
+
logits, input_ids, context_lengths, attention_mask
|
| 293 |
+
)
|
| 294 |
+
|
| 295 |
+
loss = loss_dict["loss"] / cfg.training.gradient_accumulation_steps
|
| 296 |
+
accelerator.backward(loss)
|
| 297 |
+
|
| 298 |
+
accumulated_loss += loss_dict["loss"].item()
|
| 299 |
+
accumulated_steps += 1
|
| 300 |
+
|
| 301 |
+
if accumulated_steps == cfg.training.gradient_accumulation_steps:
|
| 302 |
+
if cfg.training.max_grad_norm > 0:
|
| 303 |
+
accelerator.clip_grad_norm_(
|
| 304 |
+
model.parameters(), cfg.training.max_grad_norm
|
| 305 |
+
)
|
| 306 |
+
|
| 307 |
+
optimizer.step()
|
| 308 |
+
scheduler.step()
|
| 309 |
+
optimizer.zero_grad()
|
| 310 |
+
|
| 311 |
+
avg_loss = accumulated_loss / cfg.training.gradient_accumulation_steps
|
| 312 |
+
loss_meter.update(avg_loss)
|
| 313 |
+
|
| 314 |
+
global_step += 1
|
| 315 |
+
|
| 316 |
+
if global_step % cfg.logging.log_interval == 0:
|
| 317 |
+
step_time = time.time() - step_start_time
|
| 318 |
+
current_lr = scheduler.get_last_lr()[0]
|
| 319 |
+
|
| 320 |
+
metrics = {
|
| 321 |
+
"train/loss": loss_meter.val,
|
| 322 |
+
"train/loss_avg": loss_meter.avg,
|
| 323 |
+
"train/lr": current_lr,
|
| 324 |
+
"train/epoch": epoch,
|
| 325 |
+
"train/step_time": step_time / cfg.logging.log_interval,
|
| 326 |
+
}
|
| 327 |
+
|
| 328 |
+
log_metrics(metrics, step=global_step)
|
| 329 |
+
|
| 330 |
+
log_message(
|
| 331 |
+
f"Epoch {epoch} | Step {global_step} | "
|
| 332 |
+
f"Loss: {loss_meter.avg:.4f} | "
|
| 333 |
+
f"LR: {current_lr:.2e}",
|
| 334 |
+
cfg,
|
| 335 |
+
accelerator,
|
| 336 |
+
)
|
| 337 |
+
|
| 338 |
+
step_start_time = time.time()
|
| 339 |
+
|
| 340 |
+
if (
|
| 341 |
+
cfg.logging.save_interval > 0
|
| 342 |
+
and global_step % cfg.logging.save_interval == 0
|
| 343 |
+
):
|
| 344 |
+
save_checkpoint(
|
| 345 |
+
model, optimizer, scheduler, global_step, epoch, cfg, accelerator
|
| 346 |
+
)
|
| 347 |
+
|
| 348 |
+
eval_interval = cfg.logging.get("eval_interval", 0)
|
| 349 |
+
if (
|
| 350 |
+
eval_interval > 0
|
| 351 |
+
and val_dataloader is not None
|
| 352 |
+
and global_step % eval_interval == 0
|
| 353 |
+
):
|
| 354 |
+
val_metrics = run_validation(
|
| 355 |
+
model=model,
|
| 356 |
+
dataloader=val_dataloader,
|
| 357 |
+
cfg=cfg,
|
| 358 |
+
global_step=global_step,
|
| 359 |
+
accelerator=accelerator,
|
| 360 |
+
forward_loss_fn=_pythia_forward_loss,
|
| 361 |
+
)
|
| 362 |
+
|
| 363 |
+
if val_metrics["val/loss"] < best_val_loss:
|
| 364 |
+
best_val_loss = val_metrics["val/loss"]
|
| 365 |
+
if accelerator.is_main_process:
|
| 366 |
+
best_model_path = Path(cfg.paths.output_dir) / "model_best.pt"
|
| 367 |
+
unwrapped_model = accelerator.unwrap_model(model)
|
| 368 |
+
torch.save(unwrapped_model.state_dict(), best_model_path)
|
| 369 |
+
log_message(
|
| 370 |
+
f"New best model saved! Val loss: {best_val_loss:.4f}",
|
| 371 |
+
cfg,
|
| 372 |
+
accelerator
|
| 373 |
+
)
|
| 374 |
+
|
| 375 |
+
log_metrics(
|
| 376 |
+
{
|
| 377 |
+
"best/val_loss": best_val_loss,
|
| 378 |
+
"best/val_perplexity": val_metrics["val/perplexity"],
|
| 379 |
+
"best/step": global_step,
|
| 380 |
+
},
|
| 381 |
+
step=global_step,
|
| 382 |
+
)
|
| 383 |
+
|
| 384 |
+
model.train()
|
| 385 |
+
|
| 386 |
+
accumulated_loss = 0.0
|
| 387 |
+
accumulated_steps = 0
|
| 388 |
+
|
| 389 |
+
epoch_time = time.time() - epoch_start_time
|
| 390 |
+
|
| 391 |
+
log_message(
|
| 392 |
+
f"Epoch {epoch} completed in {epoch_time:.2f}s | "
|
| 393 |
+
f"Loss: {loss_meter.avg:.4f}",
|
| 394 |
+
cfg,
|
| 395 |
+
accelerator,
|
| 396 |
+
)
|
| 397 |
+
|
| 398 |
+
log_metrics({
|
| 399 |
+
"epoch/loss": loss_meter.avg,
|
| 400 |
+
"epoch/time": epoch_time,
|
| 401 |
+
})
|
| 402 |
+
|
| 403 |
+
return global_step, best_val_loss
|
| 404 |
+
|
| 405 |
+
|
| 406 |
+
# ============================================================================
|
| 407 |
+
# MAIN
|
| 408 |
+
# ============================================================================
|
| 409 |
+
|
| 410 |
+
|
| 411 |
+
@hydra.main(version_base=None, config_path="configs", config_name="config")
|
| 412 |
+
def main(cfg: DictConfig):
|
| 413 |
+
"""Главная функция тренировки с поддержкой DDP через Accelerate."""
|
| 414 |
+
|
| 415 |
+
# === Performance: Enable TF32 for faster matmuls on Ampere+ GPUs ===
|
| 416 |
+
torch.set_float32_matmul_precision('high')
|
| 417 |
+
|
| 418 |
+
# === Accelerator Setup ===
|
| 419 |
+
mixed_precision = "bf16" if cfg.training.use_amp else "no"
|
| 420 |
+
|
| 421 |
+
accelerator = Accelerator(
|
| 422 |
+
mixed_precision=mixed_precision,
|
| 423 |
+
gradient_accumulation_steps=cfg.training.gradient_accumulation_steps,
|
| 424 |
+
)
|
| 425 |
+
|
| 426 |
+
# === Setup ===
|
| 427 |
+
accelerate_set_seed(cfg.seed)
|
| 428 |
+
|
| 429 |
+
if cfg.paths.output_dir is None:
|
| 430 |
+
cfg.paths.output_dir = HydraConfig.get().runtime.output_dir
|
| 431 |
+
|
| 432 |
+
OmegaConf.resolve(cfg)
|
| 433 |
+
|
| 434 |
+
log_message(f"CUDA_VISIBLE_DEVICES: {os.environ.get('CUDA_VISIBLE_DEVICES', 'not set')}", cfg, accelerator)
|
| 435 |
+
log_message(f"Number of processes: {accelerator.num_processes}", cfg, accelerator)
|
| 436 |
+
log_message(f"Process index: {accelerator.process_index}", cfg, accelerator)
|
| 437 |
+
log_message(f"Mixed precision: {mixed_precision}", cfg, accelerator)
|
| 438 |
+
|
| 439 |
+
log_message("=" * 60, cfg, accelerator)
|
| 440 |
+
log_message("Pythia Training Pipeline (Hydra + Trackio + Accelerate)", cfg, accelerator)
|
| 441 |
+
log_message("=" * 60, cfg, accelerator)
|
| 442 |
+
log_message(f"Config:\n{OmegaConf.to_yaml(cfg)}", cfg, accelerator)
|
| 443 |
+
|
| 444 |
+
# === Trackio Init ===
|
| 445 |
+
init_tracking(cfg, accelerator)
|
| 446 |
+
|
| 447 |
+
# === Tokenizer ===
|
| 448 |
+
log_message("Initializing tokenizer...", cfg, accelerator)
|
| 449 |
+
tokenizer = AutoTokenizer.from_pretrained(cfg.model.name)
|
| 450 |
+
|
| 451 |
+
if tokenizer.pad_token is None:
|
| 452 |
+
tokenizer.pad_token = tokenizer.eos_token
|
| 453 |
+
tokenizer.pad_token_id = tokenizer.eos_token_id
|
| 454 |
+
|
| 455 |
+
# === Model ===
|
| 456 |
+
log_message("Loading model...", cfg, accelerator)
|
| 457 |
+
|
| 458 |
+
# Flash Attention 2
|
| 459 |
+
torch_dtype = torch.bfloat16 if cfg.training.use_amp else torch.float32
|
| 460 |
+
|
| 461 |
+
if cfg.model.checkpoint_path:
|
| 462 |
+
model = AutoModelForCausalLM.from_pretrained(
|
| 463 |
+
cfg.model.name,
|
| 464 |
+
attn_implementation="flash_attention_2",
|
| 465 |
+
torch_dtype=torch_dtype,
|
| 466 |
+
)
|
| 467 |
+
checkpoint = torch.load(cfg.model.checkpoint_path, map_location="cpu")
|
| 468 |
+
model.load_state_dict(checkpoint["model_state_dict"] if "model_state_dict" in checkpoint else checkpoint)
|
| 469 |
+
log_message(f"Loaded checkpoint: {cfg.model.checkpoint_path}", cfg, accelerator)
|
| 470 |
+
elif cfg.model.from_scratch:
|
| 471 |
+
config = AutoConfig.from_pretrained(cfg.model.name)
|
| 472 |
+
config._attn_implementation = "flash_attention_2"
|
| 473 |
+
model = AutoModelForCausalLM.from_config(config, torch_dtype=torch_dtype)
|
| 474 |
+
log_message(f"Initialized from scratch: {cfg.model.name}", cfg, accelerator)
|
| 475 |
+
else:
|
| 476 |
+
model = AutoModelForCausalLM.from_pretrained(
|
| 477 |
+
cfg.model.name,
|
| 478 |
+
attn_implementation="flash_attention_2",
|
| 479 |
+
torch_dtype=torch_dtype,
|
| 480 |
+
)
|
| 481 |
+
log_message(f"Loaded pretrained: {cfg.model.name}", cfg, accelerator)
|
| 482 |
+
|
| 483 |
+
model.train()
|
| 484 |
+
|
| 485 |
+
# Log model info
|
| 486 |
+
total_params = sum(p.numel() for p in model.parameters())
|
| 487 |
+
trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
|
| 488 |
+
log_message(f"Total params: {total_params:,}", cfg, accelerator)
|
| 489 |
+
log_message(f"Trainable params: {trainable_params:,}", cfg, accelerator)
|
| 490 |
+
|
| 491 |
+
# === Data ===
|
| 492 |
+
log_message("Creating dataloaders...", cfg, accelerator)
|
| 493 |
+
dataloaders = create_dataloaders(cfg, tokenizer)
|
| 494 |
+
|
| 495 |
+
train_dataloader = dataloaders["train"]
|
| 496 |
+
val_dataloader = dataloaders.get("validation", None)
|
| 497 |
+
|
| 498 |
+
log_message(f"Train dataset size: {len(train_dataloader.dataset)}", cfg, accelerator)
|
| 499 |
+
log_message(f"Train batches per epoch (before DDP split): {len(train_dataloader)}", cfg, accelerator)
|
| 500 |
+
|
| 501 |
+
if val_dataloader:
|
| 502 |
+
log_message(f"Validation dataset size: {len(val_dataloader.dataset)}", cfg, accelerator)
|
| 503 |
+
log_message(f"Validation batches: {len(val_dataloader)}", cfg, accelerator)
|
| 504 |
+
else:
|
| 505 |
+
log_message("No validation dataset found", cfg, accelerator)
|
| 506 |
+
|
| 507 |
+
# === Optimizer ===
|
| 508 |
+
log_message("Creating optimizer...", cfg, accelerator)
|
| 509 |
+
param_groups = group_params(model, cfg.training.weight_decay)
|
| 510 |
+
|
| 511 |
+
optimizer = torch.optim.AdamW(
|
| 512 |
+
param_groups,
|
| 513 |
+
lr=cfg.training.lr,
|
| 514 |
+
betas=tuple(cfg.training.betas),
|
| 515 |
+
eps=cfg.training.eps,
|
| 516 |
+
)
|
| 517 |
+
|
| 518 |
+
# === Scheduler ===
|
| 519 |
+
steps_per_epoch = math.ceil(
|
| 520 |
+
len(train_dataloader) / accelerator.num_processes
|
| 521 |
+
)
|
| 522 |
+
total_steps = (
|
| 523 |
+
cfg.training.epochs
|
| 524 |
+
* steps_per_epoch
|
| 525 |
+
// cfg.training.gradient_accumulation_steps
|
| 526 |
+
)
|
| 527 |
+
scheduler = get_lr_scheduler(optimizer, cfg, total_steps)
|
| 528 |
+
|
| 529 |
+
log_message(
|
| 530 |
+
f"Total steps: {total_steps}, Steps per epoch: {steps_per_epoch}",
|
| 531 |
+
cfg,
|
| 532 |
+
accelerator
|
| 533 |
+
)
|
| 534 |
+
|
| 535 |
+
# === Accelerate Prepare ===
|
| 536 |
+
log_message("Preparing model, optimizer, and dataloaders with Accelerate...", cfg, accelerator)
|
| 537 |
+
|
| 538 |
+
if val_dataloader is not None:
|
| 539 |
+
model, optimizer, train_dataloader, val_dataloader, scheduler = accelerator.prepare(
|
| 540 |
+
model, optimizer, train_dataloader, val_dataloader, scheduler
|
| 541 |
+
)
|
| 542 |
+
else:
|
| 543 |
+
model, optimizer, train_dataloader, scheduler = accelerator.prepare(
|
| 544 |
+
model, optimizer, train_dataloader, scheduler
|
| 545 |
+
)
|
| 546 |
+
|
| 547 |
+
log_message(f"Train batches per epoch (after DDP split): {len(train_dataloader)}", cfg, accelerator)
|
| 548 |
+
|
| 549 |
+
# === Resume ===
|
| 550 |
+
global_step = 0
|
| 551 |
+
start_epoch = 1
|
| 552 |
+
|
| 553 |
+
if cfg.training.resume and cfg.training.resume_checkpoint:
|
| 554 |
+
global_step, start_epoch = load_checkpoint(
|
| 555 |
+
model, optimizer, scheduler, cfg.training.resume_checkpoint, cfg, accelerator
|
| 556 |
+
)
|
| 557 |
+
start_epoch += 1
|
| 558 |
+
|
| 559 |
+
# === Training Loop ===
|
| 560 |
+
log_message("Starting training...", cfg, accelerator)
|
| 561 |
+
|
| 562 |
+
best_val_loss = float("inf")
|
| 563 |
+
|
| 564 |
+
try:
|
| 565 |
+
for epoch in range(start_epoch, cfg.training.epochs + 1):
|
| 566 |
+
log_message(f"\n{'=' * 60}", cfg, accelerator)
|
| 567 |
+
log_message(f"EPOCH {epoch}/{cfg.training.epochs}", cfg, accelerator)
|
| 568 |
+
log_message(f"{'=' * 60}", cfg, accelerator)
|
| 569 |
+
|
| 570 |
+
global_step, best_val_loss = train_epoch(
|
| 571 |
+
model=model,
|
| 572 |
+
dataloader=train_dataloader,
|
| 573 |
+
optimizer=optimizer,
|
| 574 |
+
scheduler=scheduler,
|
| 575 |
+
cfg=cfg,
|
| 576 |
+
epoch=epoch,
|
| 577 |
+
global_step=global_step,
|
| 578 |
+
accelerator=accelerator,
|
| 579 |
+
val_dataloader=val_dataloader,
|
| 580 |
+
best_val_loss=best_val_loss,
|
| 581 |
+
)
|
| 582 |
+
|
| 583 |
+
if cfg.logging.save_every_epoch:
|
| 584 |
+
save_checkpoint(
|
| 585 |
+
model, optimizer, scheduler, global_step, epoch, cfg, accelerator
|
| 586 |
+
)
|
| 587 |
+
|
| 588 |
+
except KeyboardInterrupt:
|
| 589 |
+
log_message("Training interrupted by user", cfg, accelerator)
|
| 590 |
+
save_checkpoint(model, optimizer, scheduler, global_step, epoch, cfg, accelerator)
|
| 591 |
+
|
| 592 |
+
# === Final Save ===
|
| 593 |
+
log_message("\nTraining completed!", cfg, accelerator)
|
| 594 |
+
|
| 595 |
+
if accelerator.is_main_process:
|
| 596 |
+
final_model_path = Path(cfg.paths.output_dir) / "model_final.pt"
|
| 597 |
+
unwrapped_model = accelerator.unwrap_model(model)
|
| 598 |
+
torch.save(unwrapped_model.state_dict(), final_model_path)
|
| 599 |
+
log_message(f"Final model: {final_model_path}", cfg, accelerator)
|
| 600 |
+
|
| 601 |
+
accelerator.wait_for_everyone()
|
| 602 |
+
finish_tracking()
|
| 603 |
+
|
| 604 |
+
|
| 605 |
+
if __name__ == "__main__":
|
| 606 |
+
main()
|
lr_sweep/pythia_1b_lr_2e-5/wandb/run-20260425_184822-bhvwo83l/files/config.yaml
ADDED
|
@@ -0,0 +1,146 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
_wandb:
|
| 2 |
+
value:
|
| 3 |
+
cli_version: 0.24.0
|
| 4 |
+
code_path: code/code_completion_exp/train_pythia/train.py
|
| 5 |
+
e:
|
| 6 |
+
p4yngwo0m9zjwjsftw16p6b9r2nywedy:
|
| 7 |
+
args:
|
| 8 |
+
- tracking=wandb
|
| 9 |
+
- tracking.project=code-completion_lr-sweep
|
| 10 |
+
- tracking.run_name=pythia_1b_lr_2e-5
|
| 11 |
+
- training.lr=2e-5
|
| 12 |
+
- paths.output_dir=/workspace/byte-llms-code/outputs/lr_sweep/pythia_1b_lr_2e-5
|
| 13 |
+
- model=pythia_1b
|
| 14 |
+
- data.path=/workspace/byte-llms-code/code_completion_exp/datasets/data_V4_full
|
| 15 |
+
codePath: code_completion_exp/train_pythia/train.py
|
| 16 |
+
codePathLocal: train.py
|
| 17 |
+
cpu_count: 64
|
| 18 |
+
cpu_count_logical: 128
|
| 19 |
+
cudaVersion: "12.2"
|
| 20 |
+
disk:
|
| 21 |
+
/:
|
| 22 |
+
total: "265214230528"
|
| 23 |
+
used: "96701419520"
|
| 24 |
+
email: nikita@local.ru
|
| 25 |
+
executable: /venv/bytellm/bin/python
|
| 26 |
+
git:
|
| 27 |
+
commit: f111e13281aa0dc58e24302edab5b0d5c2024586
|
| 28 |
+
remote: https://github.com/naryst/byte-llms-code.git
|
| 29 |
+
gpu: NVIDIA H100 80GB HBM3
|
| 30 |
+
gpu_count: 4
|
| 31 |
+
gpu_nvidia:
|
| 32 |
+
- architecture: Hopper
|
| 33 |
+
cudaCores: 16896
|
| 34 |
+
memoryTotal: "85520809984"
|
| 35 |
+
name: NVIDIA H100 80GB HBM3
|
| 36 |
+
uuid: GPU-b60cdcab-2033-2009-41de-be646c953a20
|
| 37 |
+
- architecture: Hopper
|
| 38 |
+
cudaCores: 16896
|
| 39 |
+
memoryTotal: "85520809984"
|
| 40 |
+
name: NVIDIA H100 80GB HBM3
|
| 41 |
+
uuid: GPU-9982b420-4520-4238-c378-ec5a46015474
|
| 42 |
+
- architecture: Hopper
|
| 43 |
+
cudaCores: 16896
|
| 44 |
+
memoryTotal: "85520809984"
|
| 45 |
+
name: NVIDIA H100 80GB HBM3
|
| 46 |
+
uuid: GPU-e26ebaac-aaa6-3eed-17ab-a3dce303a76f
|
| 47 |
+
- architecture: Hopper
|
| 48 |
+
cudaCores: 16896
|
| 49 |
+
memoryTotal: "85520809984"
|
| 50 |
+
name: NVIDIA H100 80GB HBM3
|
| 51 |
+
uuid: GPU-9dfc6dba-0be6-4a10-1027-336cc0e65134
|
| 52 |
+
host: 7504e518d24a
|
| 53 |
+
memory:
|
| 54 |
+
total: "1081679683584"
|
| 55 |
+
os: Linux-5.4.0-176-generic-x86_64-with-glibc2.35
|
| 56 |
+
program: /workspace/byte-llms-code/code_completion_exp/train_pythia/train.py
|
| 57 |
+
python: CPython 3.12.0
|
| 58 |
+
root: /workspace/byte-llms-code/outputs/lr_sweep/pythia_1b_lr_2e-5
|
| 59 |
+
startedAt: "2026-04-25T18:48:22.988796Z"
|
| 60 |
+
writerId: p4yngwo0m9zjwjsftw16p6b9r2nywedy
|
| 61 |
+
m: []
|
| 62 |
+
python_version: 3.12.0
|
| 63 |
+
t:
|
| 64 |
+
"1":
|
| 65 |
+
- 1
|
| 66 |
+
- 11
|
| 67 |
+
- 49
|
| 68 |
+
- 50
|
| 69 |
+
- 51
|
| 70 |
+
- 71
|
| 71 |
+
- 105
|
| 72 |
+
"2":
|
| 73 |
+
- 1
|
| 74 |
+
- 11
|
| 75 |
+
- 49
|
| 76 |
+
- 50
|
| 77 |
+
- 51
|
| 78 |
+
- 71
|
| 79 |
+
- 105
|
| 80 |
+
"3":
|
| 81 |
+
- 2
|
| 82 |
+
- 13
|
| 83 |
+
- 16
|
| 84 |
+
- 61
|
| 85 |
+
"4": 3.12.0
|
| 86 |
+
"5": 0.24.0
|
| 87 |
+
"6": 4.57.6
|
| 88 |
+
"12": 0.24.0
|
| 89 |
+
"13": linux-x86_64
|
| 90 |
+
data:
|
| 91 |
+
value:
|
| 92 |
+
max_context_len: 4096
|
| 93 |
+
max_target_len: 256
|
| 94 |
+
max_train_samples: null
|
| 95 |
+
max_val_samples: 2000
|
| 96 |
+
num_workers: 4
|
| 97 |
+
path: /workspace/byte-llms-code/code_completion_exp/datasets/data_V4_full
|
| 98 |
+
pin_memory: true
|
| 99 |
+
device:
|
| 100 |
+
value: cuda
|
| 101 |
+
logging:
|
| 102 |
+
value:
|
| 103 |
+
eval_interval: 2000
|
| 104 |
+
log_interval: 10
|
| 105 |
+
save_every_epoch: false
|
| 106 |
+
save_interval: 0
|
| 107 |
+
model:
|
| 108 |
+
value:
|
| 109 |
+
checkpoint_path: null
|
| 110 |
+
from_scratch: false
|
| 111 |
+
name: EleutherAI/pythia-1b
|
| 112 |
+
paths:
|
| 113 |
+
value:
|
| 114 |
+
output_dir: /workspace/byte-llms-code/outputs/lr_sweep/pythia_1b_lr_2e-5
|
| 115 |
+
seed:
|
| 116 |
+
value: 42
|
| 117 |
+
tracking:
|
| 118 |
+
value:
|
| 119 |
+
backend: wandb
|
| 120 |
+
base_url: https://wandb.platun0v.ru
|
| 121 |
+
enabled: true
|
| 122 |
+
entity: null
|
| 123 |
+
local_dir: /workspace/byte-llms-code/outputs/lr_sweep/pythia_1b_lr_2e-5
|
| 124 |
+
project: code-completion_lr-sweep
|
| 125 |
+
run_name: pythia_1b_lr_2e-5
|
| 126 |
+
training:
|
| 127 |
+
value:
|
| 128 |
+
batch_size: 4
|
| 129 |
+
betas:
|
| 130 |
+
- 0.9
|
| 131 |
+
- 0.95
|
| 132 |
+
decay_ratio: 0.2
|
| 133 |
+
epochs: 1
|
| 134 |
+
eps: 1e-08
|
| 135 |
+
eval_batch_size: 12
|
| 136 |
+
gradient_accumulation_steps: 4
|
| 137 |
+
lr: 2e-05
|
| 138 |
+
lr_scheduler: wsd
|
| 139 |
+
max_grad_norm: 1
|
| 140 |
+
min_lr_ratio: 0.1
|
| 141 |
+
resume: false
|
| 142 |
+
resume_checkpoint: null
|
| 143 |
+
use_amp: true
|
| 144 |
+
warmup_ratio: 0.1
|
| 145 |
+
warmup_steps: 100
|
| 146 |
+
weight_decay: 0.1
|
lr_sweep/pythia_1b_lr_2e-5/wandb/run-20260425_184822-bhvwo83l/files/output.log
ADDED
|
@@ -0,0 +1,1056 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[2026-04-25 18:48:24] Initializing tokenizer...
|
| 2 |
+
[2026-04-25 18:48:24] Loading model...
|
| 3 |
+
`torch_dtype` is deprecated! Use `dtype` instead!
|
| 4 |
+
[2026-04-25 18:48:27] Loaded pretrained: EleutherAI/pythia-1b
|
| 5 |
+
[2026-04-25 18:48:27] Total params: 1,011,781,632
|
| 6 |
+
[2026-04-25 18:48:27] Trainable params: 1,011,781,632
|
| 7 |
+
[2026-04-25 18:48:27] Creating dataloaders...
|
| 8 |
+
[2026-04-25 18:48:27] Train dataset size: 316397
|
| 9 |
+
[2026-04-25 18:48:27] Train batches per epoch (before DDP split): 79100
|
| 10 |
+
[2026-04-25 18:48:27] Validation dataset size: 2000
|
| 11 |
+
[2026-04-25 18:48:27] Validation batches: 167
|
| 12 |
+
[2026-04-25 18:48:27] Creating optimizer...
|
| 13 |
+
[2026-04-25 18:48:27] Total steps: 9887, Steps per epoch: 39550
|
| 14 |
+
[2026-04-25 18:48:27] Preparing model, optimizer, and dataloaders with Accelerate...
|
| 15 |
+
[2026-04-25 18:48:28] Train batches per epoch (after DDP split): 39550
|
| 16 |
+
[2026-04-25 18:48:28] Starting training...
|
| 17 |
+
[2026-04-25 18:48:28]
|
| 18 |
+
============================================================
|
| 19 |
+
[2026-04-25 18:48:28] EPOCH 1/1
|
| 20 |
+
[2026-04-25 18:48:28] ============================================================
|
| 21 |
+
[2026-04-25 18:48:32] Epoch 1 | Step 10 | Loss: 2.5532 | LR: 2.36e-06
|
| 22 |
+
[2026-04-25 18:48:34] Epoch 1 | Step 20 | Loss: 2.4795 | LR: 2.73e-06
|
| 23 |
+
[2026-04-25 18:48:36] Epoch 1 | Step 30 | Loss: 2.3311 | LR: 3.09e-06
|
| 24 |
+
[2026-04-25 18:48:39] Epoch 1 | Step 40 | Loss: 2.2161 | LR: 3.46e-06
|
| 25 |
+
[2026-04-25 18:48:42] Epoch 1 | Step 50 | Loss: 2.0812 | LR: 3.82e-06
|
| 26 |
+
[2026-04-25 18:48:44] Epoch 1 | Step 60 | Loss: 1.9707 | LR: 4.19e-06
|
| 27 |
+
[2026-04-25 18:48:47] Epoch 1 | Step 70 | Loss: 1.8690 | LR: 4.55e-06
|
| 28 |
+
[2026-04-25 18:48:50] Epoch 1 | Step 80 | Loss: 1.8062 | LR: 4.91e-06
|
| 29 |
+
[2026-04-25 18:48:52] Epoch 1 | Step 90 | Loss: 1.7392 | LR: 5.28e-06
|
| 30 |
+
[2026-04-25 18:48:54] Epoch 1 | Step 100 | Loss: 1.6842 | LR: 5.64e-06
|
| 31 |
+
[2026-04-25 18:48:57] Epoch 1 | Step 110 | Loss: 1.6506 | LR: 6.01e-06
|
| 32 |
+
[2026-04-25 18:48:59] Epoch 1 | Step 120 | Loss: 1.6147 | LR: 6.37e-06
|
| 33 |
+
[2026-04-25 18:49:02] Epoch 1 | Step 130 | Loss: 1.5910 | LR: 6.74e-06
|
| 34 |
+
[2026-04-25 18:49:04] Epoch 1 | Step 140 | Loss: 1.5694 | LR: 7.10e-06
|
| 35 |
+
[2026-04-25 18:49:07] Epoch 1 | Step 150 | Loss: 1.5360 | LR: 7.47e-06
|
| 36 |
+
[2026-04-25 18:49:09] Epoch 1 | Step 160 | Loss: 1.5069 | LR: 7.83e-06
|
| 37 |
+
[2026-04-25 18:49:12] Epoch 1 | Step 170 | Loss: 1.4848 | LR: 8.19e-06
|
| 38 |
+
[2026-04-25 18:49:14] Epoch 1 | Step 180 | Loss: 1.4571 | LR: 8.56e-06
|
| 39 |
+
[2026-04-25 18:49:17] Epoch 1 | Step 190 | Loss: 1.4424 | LR: 8.92e-06
|
| 40 |
+
[2026-04-25 18:49:19] Epoch 1 | Step 200 | Loss: 1.4271 | LR: 9.29e-06
|
| 41 |
+
[2026-04-25 18:49:22] Epoch 1 | Step 210 | Loss: 1.4197 | LR: 9.65e-06
|
| 42 |
+
[2026-04-25 18:49:24] Epoch 1 | Step 220 | Loss: 1.4071 | LR: 1.00e-05
|
| 43 |
+
[2026-04-25 18:49:26] Epoch 1 | Step 230 | Loss: 1.3887 | LR: 1.04e-05
|
| 44 |
+
[2026-04-25 18:49:29] Epoch 1 | Step 240 | Loss: 1.3747 | LR: 1.07e-05
|
| 45 |
+
[2026-04-25 18:49:31] Epoch 1 | Step 250 | Loss: 1.3629 | LR: 1.11e-05
|
| 46 |
+
[2026-04-25 18:49:34] Epoch 1 | Step 260 | Loss: 1.3584 | LR: 1.15e-05
|
| 47 |
+
[2026-04-25 18:49:37] Epoch 1 | Step 270 | Loss: 1.3467 | LR: 1.18e-05
|
| 48 |
+
[2026-04-25 18:49:39] Epoch 1 | Step 280 | Loss: 1.3339 | LR: 1.22e-05
|
| 49 |
+
[2026-04-25 18:49:41] Epoch 1 | Step 290 | Loss: 1.3252 | LR: 1.26e-05
|
| 50 |
+
[2026-04-25 18:49:44] Epoch 1 | Step 300 | Loss: 1.3174 | LR: 1.29e-05
|
| 51 |
+
[2026-04-25 18:49:47] Epoch 1 | Step 310 | Loss: 1.3085 | LR: 1.33e-05
|
| 52 |
+
[2026-04-25 18:49:49] Epoch 1 | Step 320 | Loss: 1.2988 | LR: 1.37e-05
|
| 53 |
+
[2026-04-25 18:49:52] Epoch 1 | Step 330 | Loss: 1.2891 | LR: 1.40e-05
|
| 54 |
+
[2026-04-25 18:49:54] Epoch 1 | Step 340 | Loss: 1.2812 | LR: 1.44e-05
|
| 55 |
+
[2026-04-25 18:49:57] Epoch 1 | Step 350 | Loss: 1.2758 | LR: 1.48e-05
|
| 56 |
+
[2026-04-25 18:49:59] Epoch 1 | Step 360 | Loss: 1.2672 | LR: 1.51e-05
|
| 57 |
+
[2026-04-25 18:50:02] Epoch 1 | Step 370 | Loss: 1.2578 | LR: 1.55e-05
|
| 58 |
+
[2026-04-25 18:50:04] Epoch 1 | Step 380 | Loss: 1.2505 | LR: 1.58e-05
|
| 59 |
+
[2026-04-25 18:50:07] Epoch 1 | Step 390 | Loss: 1.2448 | LR: 1.62e-05
|
| 60 |
+
[2026-04-25 18:50:10] Epoch 1 | Step 400 | Loss: 1.2397 | LR: 1.66e-05
|
| 61 |
+
[2026-04-25 18:50:12] Epoch 1 | Step 410 | Loss: 1.2351 | LR: 1.69e-05
|
| 62 |
+
[2026-04-25 18:50:15] Epoch 1 | Step 420 | Loss: 1.2302 | LR: 1.73e-05
|
| 63 |
+
[2026-04-25 18:50:17] Epoch 1 | Step 430 | Loss: 1.2292 | LR: 1.77e-05
|
| 64 |
+
[2026-04-25 18:50:20] Epoch 1 | Step 440 | Loss: 1.2219 | LR: 1.80e-05
|
| 65 |
+
[2026-04-25 18:50:23] Epoch 1 | Step 450 | Loss: 1.2174 | LR: 1.84e-05
|
| 66 |
+
[2026-04-25 18:50:25] Epoch 1 | Step 460 | Loss: 1.2144 | LR: 1.88e-05
|
| 67 |
+
[2026-04-25 18:50:28] Epoch 1 | Step 470 | Loss: 1.2094 | LR: 1.91e-05
|
| 68 |
+
[2026-04-25 18:50:30] Epoch 1 | Step 480 | Loss: 1.2067 | LR: 1.95e-05
|
| 69 |
+
[2026-04-25 18:50:33] Epoch 1 | Step 490 | Loss: 1.2017 | LR: 1.99e-05
|
| 70 |
+
[2026-04-25 18:50:35] Epoch 1 | Step 500 | Loss: 1.1974 | LR: 2.00e-05
|
| 71 |
+
[2026-04-25 18:50:37] Epoch 1 | Step 510 | Loss: 1.1927 | LR: 2.00e-05
|
| 72 |
+
[2026-04-25 18:50:40] Epoch 1 | Step 520 | Loss: 1.1904 | LR: 2.00e-05
|
| 73 |
+
[2026-04-25 18:50:42] Epoch 1 | Step 530 | Loss: 1.1863 | LR: 2.00e-05
|
| 74 |
+
[2026-04-25 18:50:45] Epoch 1 | Step 540 | Loss: 1.1815 | LR: 2.00e-05
|
| 75 |
+
[2026-04-25 18:50:48] Epoch 1 | Step 550 | Loss: 1.1778 | LR: 2.00e-05
|
| 76 |
+
[2026-04-25 18:50:50] Epoch 1 | Step 560 | Loss: 1.1744 | LR: 2.00e-05
|
| 77 |
+
[2026-04-25 18:50:52] Epoch 1 | Step 570 | Loss: 1.1728 | LR: 2.00e-05
|
| 78 |
+
[2026-04-25 18:50:55] Epoch 1 | Step 580 | Loss: 1.1712 | LR: 2.00e-05
|
| 79 |
+
[2026-04-25 18:50:57] Epoch 1 | Step 590 | Loss: 1.1700 | LR: 2.00e-05
|
| 80 |
+
[2026-04-25 18:51:00] Epoch 1 | Step 600 | Loss: 1.1683 | LR: 2.00e-05
|
| 81 |
+
[2026-04-25 18:51:03] Epoch 1 | Step 610 | Loss: 1.1683 | LR: 2.00e-05
|
| 82 |
+
[2026-04-25 18:51:05] Epoch 1 | Step 620 | Loss: 1.1674 | LR: 2.00e-05
|
| 83 |
+
[2026-04-25 18:51:07] Epoch 1 | Step 630 | Loss: 1.1651 | LR: 2.00e-05
|
| 84 |
+
[2026-04-25 18:51:10] Epoch 1 | Step 640 | Loss: 1.1644 | LR: 2.00e-05
|
| 85 |
+
[2026-04-25 18:51:12] Epoch 1 | Step 650 | Loss: 1.1624 | LR: 2.00e-05
|
| 86 |
+
[2026-04-25 18:51:15] Epoch 1 | Step 660 | Loss: 1.1616 | LR: 2.00e-05
|
| 87 |
+
[2026-04-25 18:51:17] Epoch 1 | Step 670 | Loss: 1.1591 | LR: 2.00e-05
|
| 88 |
+
[2026-04-25 18:51:20] Epoch 1 | Step 680 | Loss: 1.1578 | LR: 2.00e-05
|
| 89 |
+
[2026-04-25 18:51:23] Epoch 1 | Step 690 | Loss: 1.1557 | LR: 2.00e-05
|
| 90 |
+
[2026-04-25 18:51:25] Epoch 1 | Step 700 | Loss: 1.1555 | LR: 2.00e-05
|
| 91 |
+
[2026-04-25 18:51:27] Epoch 1 | Step 710 | Loss: 1.1538 | LR: 2.00e-05
|
| 92 |
+
[2026-04-25 18:51:30] Epoch 1 | Step 720 | Loss: 1.1524 | LR: 2.00e-05
|
| 93 |
+
[2026-04-25 18:51:32] Epoch 1 | Step 730 | Loss: 1.1515 | LR: 2.00e-05
|
| 94 |
+
[2026-04-25 18:51:35] Epoch 1 | Step 740 | Loss: 1.1496 | LR: 2.00e-05
|
| 95 |
+
[2026-04-25 18:51:37] Epoch 1 | Step 750 | Loss: 1.1476 | LR: 2.00e-05
|
| 96 |
+
[2026-04-25 18:51:40] Epoch 1 | Step 760 | Loss: 1.1481 | LR: 2.00e-05
|
| 97 |
+
[2026-04-25 18:51:42] Epoch 1 | Step 770 | Loss: 1.1486 | LR: 2.00e-05
|
| 98 |
+
[2026-04-25 18:51:45] Epoch 1 | Step 780 | Loss: 1.1472 | LR: 2.00e-05
|
| 99 |
+
[2026-04-25 18:51:47] Epoch 1 | Step 790 | Loss: 1.1457 | LR: 2.00e-05
|
| 100 |
+
[2026-04-25 18:51:50] Epoch 1 | Step 800 | Loss: 1.1437 | LR: 2.00e-05
|
| 101 |
+
[2026-04-25 18:51:52] Epoch 1 | Step 810 | Loss: 1.1427 | LR: 2.00e-05
|
| 102 |
+
[2026-04-25 18:51:55] Epoch 1 | Step 820 | Loss: 1.1412 | LR: 2.00e-05
|
| 103 |
+
[2026-04-25 18:51:57] Epoch 1 | Step 830 | Loss: 1.1406 | LR: 2.00e-05
|
| 104 |
+
[2026-04-25 18:52:00] Epoch 1 | Step 840 | Loss: 1.1392 | LR: 2.00e-05
|
| 105 |
+
[2026-04-25 18:52:03] Epoch 1 | Step 850 | Loss: 1.1368 | LR: 2.00e-05
|
| 106 |
+
[2026-04-25 18:52:05] Epoch 1 | Step 860 | Loss: 1.1367 | LR: 2.00e-05
|
| 107 |
+
[2026-04-25 18:52:07] Epoch 1 | Step 870 | Loss: 1.1369 | LR: 2.00e-05
|
| 108 |
+
[2026-04-25 18:52:10] Epoch 1 | Step 880 | Loss: 1.1365 | LR: 2.00e-05
|
| 109 |
+
[2026-04-25 18:52:12] Epoch 1 | Step 890 | Loss: 1.1358 | LR: 2.00e-05
|
| 110 |
+
[2026-04-25 18:52:15] Epoch 1 | Step 900 | Loss: 1.1344 | LR: 2.00e-05
|
| 111 |
+
[2026-04-25 18:52:17] Epoch 1 | Step 910 | Loss: 1.1343 | LR: 2.00e-05
|
| 112 |
+
[2026-04-25 18:52:20] Epoch 1 | Step 920 | Loss: 1.1349 | LR: 2.00e-05
|
| 113 |
+
[2026-04-25 18:52:22] Epoch 1 | Step 930 | Loss: 1.1340 | LR: 2.00e-05
|
| 114 |
+
[2026-04-25 18:52:25] Epoch 1 | Step 940 | Loss: 1.1336 | LR: 2.00e-05
|
| 115 |
+
[2026-04-25 18:52:27] Epoch 1 | Step 950 | Loss: 1.1315 | LR: 2.00e-05
|
| 116 |
+
[2026-04-25 18:52:31] Epoch 1 | Step 960 | Loss: 1.1304 | LR: 2.00e-05
|
| 117 |
+
[2026-04-25 18:52:33] Epoch 1 | Step 970 | Loss: 1.1299 | LR: 2.00e-05
|
| 118 |
+
[2026-04-25 18:52:35] Epoch 1 | Step 980 | Loss: 1.1287 | LR: 2.00e-05
|
| 119 |
+
[2026-04-25 18:52:38] Epoch 1 | Step 990 | Loss: 1.1267 | LR: 2.00e-05
|
| 120 |
+
[2026-04-25 18:52:41] Epoch 1 | Step 1000 | Loss: 1.1258 | LR: 2.00e-05
|
| 121 |
+
[2026-04-25 18:52:43] Epoch 1 | Step 1010 | Loss: 1.1258 | LR: 2.00e-05
|
| 122 |
+
[2026-04-25 18:52:46] Epoch 1 | Step 1020 | Loss: 1.1252 | LR: 2.00e-05
|
| 123 |
+
[2026-04-25 18:52:48] Epoch 1 | Step 1030 | Loss: 1.1253 | LR: 2.00e-05
|
| 124 |
+
[2026-04-25 18:52:51] Epoch 1 | Step 1040 | Loss: 1.1235 | LR: 2.00e-05
|
| 125 |
+
[2026-04-25 18:52:53] Epoch 1 | Step 1050 | Loss: 1.1223 | LR: 2.00e-05
|
| 126 |
+
[2026-04-25 18:52:55] Epoch 1 | Step 1060 | Loss: 1.1201 | LR: 2.00e-05
|
| 127 |
+
[2026-04-25 18:52:58] Epoch 1 | Step 1070 | Loss: 1.1190 | LR: 2.00e-05
|
| 128 |
+
[2026-04-25 18:53:00] Epoch 1 | Step 1080 | Loss: 1.1196 | LR: 2.00e-05
|
| 129 |
+
[2026-04-25 18:53:03] Epoch 1 | Step 1090 | Loss: 1.1210 | LR: 2.00e-05
|
| 130 |
+
[2026-04-25 18:53:05] Epoch 1 | Step 1100 | Loss: 1.1201 | LR: 2.00e-05
|
| 131 |
+
[2026-04-25 18:53:08] Epoch 1 | Step 1110 | Loss: 1.1202 | LR: 2.00e-05
|
| 132 |
+
[2026-04-25 18:53:10] Epoch 1 | Step 1120 | Loss: 1.1203 | LR: 2.00e-05
|
| 133 |
+
[2026-04-25 18:53:13] Epoch 1 | Step 1130 | Loss: 1.1203 | LR: 2.00e-05
|
| 134 |
+
[2026-04-25 18:53:16] Epoch 1 | Step 1140 | Loss: 1.1196 | LR: 2.00e-05
|
| 135 |
+
[2026-04-25 18:53:18] Epoch 1 | Step 1150 | Loss: 1.1174 | LR: 2.00e-05
|
| 136 |
+
[2026-04-25 18:53:21] Epoch 1 | Step 1160 | Loss: 1.1177 | LR: 2.00e-05
|
| 137 |
+
[2026-04-25 18:53:23] Epoch 1 | Step 1170 | Loss: 1.1182 | LR: 2.00e-05
|
| 138 |
+
[2026-04-25 18:53:26] Epoch 1 | Step 1180 | Loss: 1.1176 | LR: 2.00e-05
|
| 139 |
+
[2026-04-25 18:53:29] Epoch 1 | Step 1190 | Loss: 1.1175 | LR: 2.00e-05
|
| 140 |
+
[2026-04-25 18:53:31] Epoch 1 | Step 1200 | Loss: 1.1163 | LR: 2.00e-05
|
| 141 |
+
[2026-04-25 18:53:34] Epoch 1 | Step 1210 | Loss: 1.1145 | LR: 2.00e-05
|
| 142 |
+
[2026-04-25 18:53:36] Epoch 1 | Step 1220 | Loss: 1.1127 | LR: 2.00e-05
|
| 143 |
+
[2026-04-25 18:53:39] Epoch 1 | Step 1230 | Loss: 1.1126 | LR: 2.00e-05
|
| 144 |
+
[2026-04-25 18:53:42] Epoch 1 | Step 1240 | Loss: 1.1121 | LR: 2.00e-05
|
| 145 |
+
[2026-04-25 18:53:44] Epoch 1 | Step 1250 | Loss: 1.1107 | LR: 2.00e-05
|
| 146 |
+
[2026-04-25 18:53:47] Epoch 1 | Step 1260 | Loss: 1.1101 | LR: 2.00e-05
|
| 147 |
+
[2026-04-25 18:53:49] Epoch 1 | Step 1270 | Loss: 1.1081 | LR: 2.00e-05
|
| 148 |
+
[2026-04-25 18:53:51] Epoch 1 | Step 1280 | Loss: 1.1084 | LR: 2.00e-05
|
| 149 |
+
[2026-04-25 18:53:54] Epoch 1 | Step 1290 | Loss: 1.1084 | LR: 2.00e-05
|
| 150 |
+
[2026-04-25 18:53:56] Epoch 1 | Step 1300 | Loss: 1.1073 | LR: 2.00e-05
|
| 151 |
+
[2026-04-25 18:53:59] Epoch 1 | Step 1310 | Loss: 1.1068 | LR: 2.00e-05
|
| 152 |
+
[2026-04-25 18:54:01] Epoch 1 | Step 1320 | Loss: 1.1067 | LR: 2.00e-05
|
| 153 |
+
[2026-04-25 18:54:04] Epoch 1 | Step 1330 | Loss: 1.1055 | LR: 2.00e-05
|
| 154 |
+
[2026-04-25 18:54:06] Epoch 1 | Step 1340 | Loss: 1.1052 | LR: 2.00e-05
|
| 155 |
+
[2026-04-25 18:54:09] Epoch 1 | Step 1350 | Loss: 1.1055 | LR: 2.00e-05
|
| 156 |
+
[2026-04-25 18:54:11] Epoch 1 | Step 1360 | Loss: 1.1049 | LR: 2.00e-05
|
| 157 |
+
[2026-04-25 18:54:14] Epoch 1 | Step 1370 | Loss: 1.1044 | LR: 2.00e-05
|
| 158 |
+
[2026-04-25 18:54:17] Epoch 1 | Step 1380 | Loss: 1.1049 | LR: 2.00e-05
|
| 159 |
+
[2026-04-25 18:54:19] Epoch 1 | Step 1390 | Loss: 1.1052 | LR: 2.00e-05
|
| 160 |
+
[2026-04-25 18:54:22] Epoch 1 | Step 1400 | Loss: 1.1049 | LR: 2.00e-05
|
| 161 |
+
[2026-04-25 18:54:24] Epoch 1 | Step 1410 | Loss: 1.1035 | LR: 2.00e-05
|
| 162 |
+
[2026-04-25 18:54:27] Epoch 1 | Step 1420 | Loss: 1.1031 | LR: 2.00e-05
|
| 163 |
+
[2026-04-25 18:54:29] Epoch 1 | Step 1430 | Loss: 1.1026 | LR: 2.00e-05
|
| 164 |
+
[2026-04-25 18:54:32] Epoch 1 | Step 1440 | Loss: 1.1017 | LR: 2.00e-05
|
| 165 |
+
[2026-04-25 18:54:34] Epoch 1 | Step 1450 | Loss: 1.1011 | LR: 2.00e-05
|
| 166 |
+
[2026-04-25 18:54:37] Epoch 1 | Step 1460 | Loss: 1.0998 | LR: 2.00e-05
|
| 167 |
+
[2026-04-25 18:54:40] Epoch 1 | Step 1470 | Loss: 1.0997 | LR: 2.00e-05
|
| 168 |
+
[2026-04-25 18:54:42] Epoch 1 | Step 1480 | Loss: 1.0997 | LR: 2.00e-05
|
| 169 |
+
[2026-04-25 18:54:45] Epoch 1 | Step 1490 | Loss: 1.0997 | LR: 2.00e-05
|
| 170 |
+
[2026-04-25 18:54:47] Epoch 1 | Step 1500 | Loss: 1.0988 | LR: 2.00e-05
|
| 171 |
+
[2026-04-25 18:54:50] Epoch 1 | Step 1510 | Loss: 1.0988 | LR: 2.00e-05
|
| 172 |
+
[2026-04-25 18:54:52] Epoch 1 | Step 1520 | Loss: 1.0988 | LR: 2.00e-05
|
| 173 |
+
[2026-04-25 18:54:54] Epoch 1 | Step 1530 | Loss: 1.0982 | LR: 2.00e-05
|
| 174 |
+
[2026-04-25 18:54:57] Epoch 1 | Step 1540 | Loss: 1.0984 | LR: 2.00e-05
|
| 175 |
+
[2026-04-25 18:54:59] Epoch 1 | Step 1550 | Loss: 1.0983 | LR: 2.00e-05
|
| 176 |
+
[2026-04-25 18:55:02] Epoch 1 | Step 1560 | Loss: 1.0975 | LR: 2.00e-05
|
| 177 |
+
[2026-04-25 18:55:05] Epoch 1 | Step 1570 | Loss: 1.0979 | LR: 2.00e-05
|
| 178 |
+
[2026-04-25 18:55:07] Epoch 1 | Step 1580 | Loss: 1.0972 | LR: 2.00e-05
|
| 179 |
+
[2026-04-25 18:55:09] Epoch 1 | Step 1590 | Loss: 1.0970 | LR: 2.00e-05
|
| 180 |
+
[2026-04-25 18:55:12] Epoch 1 | Step 1600 | Loss: 1.0969 | LR: 2.00e-05
|
| 181 |
+
[2026-04-25 18:55:14] Epoch 1 | Step 1610 | Loss: 1.0955 | LR: 2.00e-05
|
| 182 |
+
[2026-04-25 18:55:17] Epoch 1 | Step 1620 | Loss: 1.0946 | LR: 2.00e-05
|
| 183 |
+
[2026-04-25 18:55:19] Epoch 1 | Step 1630 | Loss: 1.0948 | LR: 2.00e-05
|
| 184 |
+
[2026-04-25 18:55:22] Epoch 1 | Step 1640 | Loss: 1.0944 | LR: 2.00e-05
|
| 185 |
+
[2026-04-25 18:55:24] Epoch 1 | Step 1650 | Loss: 1.0936 | LR: 2.00e-05
|
| 186 |
+
[2026-04-25 18:55:26] Epoch 1 | Step 1660 | Loss: 1.0928 | LR: 2.00e-05
|
| 187 |
+
[2026-04-25 18:55:29] Epoch 1 | Step 1670 | Loss: 1.0933 | LR: 2.00e-05
|
| 188 |
+
[2026-04-25 18:55:31] Epoch 1 | Step 1680 | Loss: 1.0932 | LR: 2.00e-05
|
| 189 |
+
[2026-04-25 18:55:34] Epoch 1 | Step 1690 | Loss: 1.0927 | LR: 2.00e-05
|
| 190 |
+
[2026-04-25 18:55:36] Epoch 1 | Step 1700 | Loss: 1.0914 | LR: 2.00e-05
|
| 191 |
+
[2026-04-25 18:55:39] Epoch 1 | Step 1710 | Loss: 1.0909 | LR: 2.00e-05
|
| 192 |
+
[2026-04-25 18:55:41] Epoch 1 | Step 1720 | Loss: 1.0902 | LR: 2.00e-05
|
| 193 |
+
[2026-04-25 18:55:44] Epoch 1 | Step 1730 | Loss: 1.0898 | LR: 2.00e-05
|
| 194 |
+
[2026-04-25 18:55:46] Epoch 1 | Step 1740 | Loss: 1.0898 | LR: 2.00e-05
|
| 195 |
+
[2026-04-25 18:55:49] Epoch 1 | Step 1750 | Loss: 1.0906 | LR: 2.00e-05
|
| 196 |
+
[2026-04-25 18:55:51] Epoch 1 | Step 1760 | Loss: 1.0898 | LR: 2.00e-05
|
| 197 |
+
[2026-04-25 18:55:54] Epoch 1 | Step 1770 | Loss: 1.0899 | LR: 2.00e-05
|
| 198 |
+
[2026-04-25 18:55:56] Epoch 1 | Step 1780 | Loss: 1.0895 | LR: 2.00e-05
|
| 199 |
+
[2026-04-25 18:55:59] Epoch 1 | Step 1790 | Loss: 1.0895 | LR: 2.00e-05
|
| 200 |
+
[2026-04-25 18:56:01] Epoch 1 | Step 1800 | Loss: 1.0887 | LR: 2.00e-05
|
| 201 |
+
[2026-04-25 18:56:04] Epoch 1 | Step 1810 | Loss: 1.0886 | LR: 2.00e-05
|
| 202 |
+
[2026-04-25 18:56:07] Epoch 1 | Step 1820 | Loss: 1.0890 | LR: 2.00e-05
|
| 203 |
+
[2026-04-25 18:56:09] Epoch 1 | Step 1830 | Loss: 1.0890 | LR: 2.00e-05
|
| 204 |
+
[2026-04-25 18:56:12] Epoch 1 | Step 1840 | Loss: 1.0890 | LR: 2.00e-05
|
| 205 |
+
[2026-04-25 18:56:14] Epoch 1 | Step 1850 | Loss: 1.0884 | LR: 2.00e-05
|
| 206 |
+
[2026-04-25 18:56:17] Epoch 1 | Step 1860 | Loss: 1.0884 | LR: 2.00e-05
|
| 207 |
+
[2026-04-25 18:56:19] Epoch 1 | Step 1870 | Loss: 1.0880 | LR: 2.00e-05
|
| 208 |
+
[2026-04-25 18:56:22] Epoch 1 | Step 1880 | Loss: 1.0874 | LR: 2.00e-05
|
| 209 |
+
[2026-04-25 18:56:25] Epoch 1 | Step 1890 | Loss: 1.0876 | LR: 2.00e-05
|
| 210 |
+
[2026-04-25 18:56:27] Epoch 1 | Step 1900 | Loss: 1.0872 | LR: 2.00e-05
|
| 211 |
+
[2026-04-25 18:56:30] Epoch 1 | Step 1910 | Loss: 1.0872 | LR: 2.00e-05
|
| 212 |
+
[2026-04-25 18:56:32] Epoch 1 | Step 1920 | Loss: 1.0874 | LR: 2.00e-05
|
| 213 |
+
[2026-04-25 18:56:35] Epoch 1 | Step 1930 | Loss: 1.0871 | LR: 2.00e-05
|
| 214 |
+
[2026-04-25 18:56:37] Epoch 1 | Step 1940 | Loss: 1.0865 | LR: 2.00e-05
|
| 215 |
+
[2026-04-25 18:56:40] Epoch 1 | Step 1950 | Loss: 1.0860 | LR: 2.00e-05
|
| 216 |
+
[2026-04-25 18:56:43] Epoch 1 | Step 1960 | Loss: 1.0859 | LR: 2.00e-05
|
| 217 |
+
[2026-04-25 18:56:45] Epoch 1 | Step 1970 | Loss: 1.0858 | LR: 2.00e-05
|
| 218 |
+
[2026-04-25 18:56:47] Epoch 1 | Step 1980 | Loss: 1.0861 | LR: 2.00e-05
|
| 219 |
+
[2026-04-25 18:56:50] Epoch 1 | Step 1990 | Loss: 1.0861 | LR: 2.00e-05
|
| 220 |
+
[2026-04-25 18:56:52] Epoch 1 | Step 2000 | Loss: 1.0859 | LR: 2.00e-05
|
| 221 |
+
[2026-04-25 18:56:52] Validation | Batch 10/84 | Loss: 1.0209
|
| 222 |
+
[2026-04-25 18:56:53] Validation | Batch 20/84 | Loss: 1.0159
|
| 223 |
+
[2026-04-25 18:56:53] Validation | Batch 30/84 | Loss: 1.0893
|
| 224 |
+
[2026-04-25 18:56:54] Validation | Batch 40/84 | Loss: 1.0904
|
| 225 |
+
[2026-04-25 18:56:54] Validation | Batch 50/84 | Loss: 1.0944
|
| 226 |
+
[2026-04-25 18:56:55] Validation | Batch 60/84 | Loss: 1.0662
|
| 227 |
+
[2026-04-25 18:56:55] Validation | Batch 70/84 | Loss: 1.0474
|
| 228 |
+
[2026-04-25 18:56:56] Validation | Batch 80/84 | Loss: 1.0540
|
| 229 |
+
[2026-04-25 18:56:56] Validation | Batch 84/84 | Loss: 1.0472
|
| 230 |
+
[2026-04-25 18:56:56] Validation | Loss: 1.0472 | PPL: 2.92 | Time: 3.82s
|
| 231 |
+
[2026-04-25 18:56:58] New best model saved! Val loss: 1.0472
|
| 232 |
+
[2026-04-25 18:57:01] Epoch 1 | Step 2010 | Loss: 1.0858 | LR: 2.00e-05
|
| 233 |
+
[2026-04-25 18:57:04] Epoch 1 | Step 2020 | Loss: 1.0856 | LR: 2.00e-05
|
| 234 |
+
[2026-04-25 18:57:06] Epoch 1 | Step 2030 | Loss: 1.0857 | LR: 2.00e-05
|
| 235 |
+
[2026-04-25 18:57:08] Epoch 1 | Step 2040 | Loss: 1.0855 | LR: 2.00e-05
|
| 236 |
+
[2026-04-25 18:57:11] Epoch 1 | Step 2050 | Loss: 1.0854 | LR: 2.00e-05
|
| 237 |
+
[2026-04-25 18:57:14] Epoch 1 | Step 2060 | Loss: 1.0849 | LR: 2.00e-05
|
| 238 |
+
[2026-04-25 18:57:16] Epoch 1 | Step 2070 | Loss: 1.0838 | LR: 2.00e-05
|
| 239 |
+
[2026-04-25 18:57:18] Epoch 1 | Step 2080 | Loss: 1.0832 | LR: 2.00e-05
|
| 240 |
+
[2026-04-25 18:57:21] Epoch 1 | Step 2090 | Loss: 1.0835 | LR: 2.00e-05
|
| 241 |
+
[2026-04-25 18:57:23] Epoch 1 | Step 2100 | Loss: 1.0835 | LR: 2.00e-05
|
| 242 |
+
[2026-04-25 18:57:26] Epoch 1 | Step 2110 | Loss: 1.0834 | LR: 2.00e-05
|
| 243 |
+
[2026-04-25 18:57:29] Epoch 1 | Step 2120 | Loss: 1.0830 | LR: 2.00e-05
|
| 244 |
+
[2026-04-25 18:57:31] Epoch 1 | Step 2130 | Loss: 1.0830 | LR: 2.00e-05
|
| 245 |
+
[2026-04-25 18:57:34] Epoch 1 | Step 2140 | Loss: 1.0826 | LR: 2.00e-05
|
| 246 |
+
[2026-04-25 18:57:36] Epoch 1 | Step 2150 | Loss: 1.0824 | LR: 2.00e-05
|
| 247 |
+
[2026-04-25 18:57:38] Epoch 1 | Step 2160 | Loss: 1.0827 | LR: 2.00e-05
|
| 248 |
+
[2026-04-25 18:57:41] Epoch 1 | Step 2170 | Loss: 1.0822 | LR: 2.00e-05
|
| 249 |
+
[2026-04-25 18:57:43] Epoch 1 | Step 2180 | Loss: 1.0817 | LR: 2.00e-05
|
| 250 |
+
[2026-04-25 18:57:46] Epoch 1 | Step 2190 | Loss: 1.0818 | LR: 2.00e-05
|
| 251 |
+
[2026-04-25 18:57:48] Epoch 1 | Step 2200 | Loss: 1.0816 | LR: 2.00e-05
|
| 252 |
+
[2026-04-25 18:57:51] Epoch 1 | Step 2210 | Loss: 1.0813 | LR: 2.00e-05
|
| 253 |
+
[2026-04-25 18:57:53] Epoch 1 | Step 2220 | Loss: 1.0818 | LR: 2.00e-05
|
| 254 |
+
[2026-04-25 18:57:56] Epoch 1 | Step 2230 | Loss: 1.0824 | LR: 2.00e-05
|
| 255 |
+
[2026-04-25 18:57:58] Epoch 1 | Step 2240 | Loss: 1.0829 | LR: 2.00e-05
|
| 256 |
+
[2026-04-25 18:58:01] Epoch 1 | Step 2250 | Loss: 1.0831 | LR: 2.00e-05
|
| 257 |
+
[2026-04-25 18:58:03] Epoch 1 | Step 2260 | Loss: 1.0829 | LR: 2.00e-05
|
| 258 |
+
[2026-04-25 18:58:06] Epoch 1 | Step 2270 | Loss: 1.0829 | LR: 2.00e-05
|
| 259 |
+
[2026-04-25 18:58:08] Epoch 1 | Step 2280 | Loss: 1.0829 | LR: 2.00e-05
|
| 260 |
+
[2026-04-25 18:58:11] Epoch 1 | Step 2290 | Loss: 1.0837 | LR: 2.00e-05
|
| 261 |
+
[2026-04-25 18:58:13] Epoch 1 | Step 2300 | Loss: 1.0836 | LR: 2.00e-05
|
| 262 |
+
[2026-04-25 18:58:15] Epoch 1 | Step 2310 | Loss: 1.0833 | LR: 2.00e-05
|
| 263 |
+
[2026-04-25 18:58:18] Epoch 1 | Step 2320 | Loss: 1.0834 | LR: 2.00e-05
|
| 264 |
+
[2026-04-25 18:58:20] Epoch 1 | Step 2330 | Loss: 1.0831 | LR: 2.00e-05
|
| 265 |
+
[2026-04-25 18:58:23] Epoch 1 | Step 2340 | Loss: 1.0827 | LR: 2.00e-05
|
| 266 |
+
[2026-04-25 18:58:25] Epoch 1 | Step 2350 | Loss: 1.0823 | LR: 2.00e-05
|
| 267 |
+
[2026-04-25 18:58:28] Epoch 1 | Step 2360 | Loss: 1.0824 | LR: 2.00e-05
|
| 268 |
+
[2026-04-25 18:58:30] Epoch 1 | Step 2370 | Loss: 1.0822 | LR: 2.00e-05
|
| 269 |
+
[2026-04-25 18:58:33] Epoch 1 | Step 2380 | Loss: 1.0817 | LR: 2.00e-05
|
| 270 |
+
[2026-04-25 18:58:35] Epoch 1 | Step 2390 | Loss: 1.0818 | LR: 2.00e-05
|
| 271 |
+
[2026-04-25 18:58:38] Epoch 1 | Step 2400 | Loss: 1.0812 | LR: 2.00e-05
|
| 272 |
+
[2026-04-25 18:58:40] Epoch 1 | Step 2410 | Loss: 1.0814 | LR: 2.00e-05
|
| 273 |
+
[2026-04-25 18:58:43] Epoch 1 | Step 2420 | Loss: 1.0813 | LR: 2.00e-05
|
| 274 |
+
[2026-04-25 18:58:45] Epoch 1 | Step 2430 | Loss: 1.0815 | LR: 2.00e-05
|
| 275 |
+
[2026-04-25 18:58:48] Epoch 1 | Step 2440 | Loss: 1.0810 | LR: 2.00e-05
|
| 276 |
+
[2026-04-25 18:58:50] Epoch 1 | Step 2450 | Loss: 1.0806 | LR: 2.00e-05
|
| 277 |
+
[2026-04-25 18:58:53] Epoch 1 | Step 2460 | Loss: 1.0804 | LR: 2.00e-05
|
| 278 |
+
[2026-04-25 18:58:56] Epoch 1 | Step 2470 | Loss: 1.0805 | LR: 2.00e-05
|
| 279 |
+
[2026-04-25 18:58:58] Epoch 1 | Step 2480 | Loss: 1.0805 | LR: 2.00e-05
|
| 280 |
+
[2026-04-25 18:59:01] Epoch 1 | Step 2490 | Loss: 1.0800 | LR: 2.00e-05
|
| 281 |
+
[2026-04-25 18:59:03] Epoch 1 | Step 2500 | Loss: 1.0796 | LR: 2.00e-05
|
| 282 |
+
[2026-04-25 18:59:06] Epoch 1 | Step 2510 | Loss: 1.0797 | LR: 2.00e-05
|
| 283 |
+
[2026-04-25 18:59:08] Epoch 1 | Step 2520 | Loss: 1.0789 | LR: 2.00e-05
|
| 284 |
+
[2026-04-25 18:59:10] Epoch 1 | Step 2530 | Loss: 1.0785 | LR: 2.00e-05
|
| 285 |
+
[2026-04-25 18:59:13] Epoch 1 | Step 2540 | Loss: 1.0783 | LR: 2.00e-05
|
| 286 |
+
[2026-04-25 18:59:16] Epoch 1 | Step 2550 | Loss: 1.0776 | LR: 2.00e-05
|
| 287 |
+
[2026-04-25 18:59:18] Epoch 1 | Step 2560 | Loss: 1.0775 | LR: 2.00e-05
|
| 288 |
+
[2026-04-25 18:59:21] Epoch 1 | Step 2570 | Loss: 1.0779 | LR: 2.00e-05
|
| 289 |
+
[2026-04-25 18:59:24] Epoch 1 | Step 2580 | Loss: 1.0781 | LR: 2.00e-05
|
| 290 |
+
[2026-04-25 18:59:26] Epoch 1 | Step 2590 | Loss: 1.0783 | LR: 2.00e-05
|
| 291 |
+
[2026-04-25 18:59:29] Epoch 1 | Step 2600 | Loss: 1.0782 | LR: 2.00e-05
|
| 292 |
+
[2026-04-25 18:59:31] Epoch 1 | Step 2610 | Loss: 1.0781 | LR: 2.00e-05
|
| 293 |
+
[2026-04-25 18:59:33] Epoch 1 | Step 2620 | Loss: 1.0777 | LR: 2.00e-05
|
| 294 |
+
[2026-04-25 18:59:36] Epoch 1 | Step 2630 | Loss: 1.0774 | LR: 2.00e-05
|
| 295 |
+
[2026-04-25 18:59:38] Epoch 1 | Step 2640 | Loss: 1.0774 | LR: 2.00e-05
|
| 296 |
+
[2026-04-25 18:59:41] Epoch 1 | Step 2650 | Loss: 1.0770 | LR: 2.00e-05
|
| 297 |
+
[2026-04-25 18:59:43] Epoch 1 | Step 2660 | Loss: 1.0771 | LR: 2.00e-05
|
| 298 |
+
[2026-04-25 18:59:46] Epoch 1 | Step 2670 | Loss: 1.0768 | LR: 2.00e-05
|
| 299 |
+
[2026-04-25 18:59:48] Epoch 1 | Step 2680 | Loss: 1.0765 | LR: 2.00e-05
|
| 300 |
+
[2026-04-25 18:59:51] Epoch 1 | Step 2690 | Loss: 1.0763 | LR: 2.00e-05
|
| 301 |
+
[2026-04-25 18:59:54] Epoch 1 | Step 2700 | Loss: 1.0759 | LR: 2.00e-05
|
| 302 |
+
[2026-04-25 18:59:56] Epoch 1 | Step 2710 | Loss: 1.0753 | LR: 2.00e-05
|
| 303 |
+
[2026-04-25 18:59:59] Epoch 1 | Step 2720 | Loss: 1.0754 | LR: 2.00e-05
|
| 304 |
+
[2026-04-25 19:00:01] Epoch 1 | Step 2730 | Loss: 1.0751 | LR: 2.00e-05
|
| 305 |
+
[2026-04-25 19:00:03] Epoch 1 | Step 2740 | Loss: 1.0755 | LR: 2.00e-05
|
| 306 |
+
[2026-04-25 19:00:06] Epoch 1 | Step 2750 | Loss: 1.0756 | LR: 2.00e-05
|
| 307 |
+
[2026-04-25 19:00:08] Epoch 1 | Step 2760 | Loss: 1.0752 | LR: 2.00e-05
|
| 308 |
+
[2026-04-25 19:00:11] Epoch 1 | Step 2770 | Loss: 1.0750 | LR: 2.00e-05
|
| 309 |
+
[2026-04-25 19:00:13] Epoch 1 | Step 2780 | Loss: 1.0752 | LR: 2.00e-05
|
| 310 |
+
[2026-04-25 19:00:16] Epoch 1 | Step 2790 | Loss: 1.0751 | LR: 2.00e-05
|
| 311 |
+
[2026-04-25 19:00:18] Epoch 1 | Step 2800 | Loss: 1.0748 | LR: 2.00e-05
|
| 312 |
+
[2026-04-25 19:00:21] Epoch 1 | Step 2810 | Loss: 1.0748 | LR: 2.00e-05
|
| 313 |
+
[2026-04-25 19:00:23] Epoch 1 | Step 2820 | Loss: 1.0746 | LR: 2.00e-05
|
| 314 |
+
[2026-04-25 19:00:26] Epoch 1 | Step 2830 | Loss: 1.0743 | LR: 2.00e-05
|
| 315 |
+
[2026-04-25 19:00:28] Epoch 1 | Step 2840 | Loss: 1.0749 | LR: 2.00e-05
|
| 316 |
+
[2026-04-25 19:00:30] Epoch 1 | Step 2850 | Loss: 1.0748 | LR: 2.00e-05
|
| 317 |
+
[2026-04-25 19:00:33] Epoch 1 | Step 2860 | Loss: 1.0745 | LR: 2.00e-05
|
| 318 |
+
[2026-04-25 19:00:35] Epoch 1 | Step 2870 | Loss: 1.0746 | LR: 2.00e-05
|
| 319 |
+
[2026-04-25 19:00:38] Epoch 1 | Step 2880 | Loss: 1.0742 | LR: 2.00e-05
|
| 320 |
+
[2026-04-25 19:00:41] Epoch 1 | Step 2890 | Loss: 1.0740 | LR: 2.00e-05
|
| 321 |
+
[2026-04-25 19:00:43] Epoch 1 | Step 2900 | Loss: 1.0735 | LR: 2.00e-05
|
| 322 |
+
[2026-04-25 19:00:46] Epoch 1 | Step 2910 | Loss: 1.0734 | LR: 2.00e-05
|
| 323 |
+
[2026-04-25 19:00:49] Epoch 1 | Step 2920 | Loss: 1.0736 | LR: 2.00e-05
|
| 324 |
+
[2026-04-25 19:00:51] Epoch 1 | Step 2930 | Loss: 1.0734 | LR: 2.00e-05
|
| 325 |
+
[2026-04-25 19:00:54] Epoch 1 | Step 2940 | Loss: 1.0729 | LR: 2.00e-05
|
| 326 |
+
[2026-04-25 19:00:56] Epoch 1 | Step 2950 | Loss: 1.0731 | LR: 2.00e-05
|
| 327 |
+
[2026-04-25 19:00:59] Epoch 1 | Step 2960 | Loss: 1.0732 | LR: 2.00e-05
|
| 328 |
+
[2026-04-25 19:01:02] Epoch 1 | Step 2970 | Loss: 1.0732 | LR: 2.00e-05
|
| 329 |
+
[2026-04-25 19:01:04] Epoch 1 | Step 2980 | Loss: 1.0729 | LR: 2.00e-05
|
| 330 |
+
[2026-04-25 19:01:07] Epoch 1 | Step 2990 | Loss: 1.0732 | LR: 2.00e-05
|
| 331 |
+
[2026-04-25 19:01:09] Epoch 1 | Step 3000 | Loss: 1.0730 | LR: 2.00e-05
|
| 332 |
+
[2026-04-25 19:01:12] Epoch 1 | Step 3010 | Loss: 1.0731 | LR: 2.00e-05
|
| 333 |
+
[2026-04-25 19:01:14] Epoch 1 | Step 3020 | Loss: 1.0728 | LR: 2.00e-05
|
| 334 |
+
[2026-04-25 19:01:17] Epoch 1 | Step 3030 | Loss: 1.0726 | LR: 2.00e-05
|
| 335 |
+
[2026-04-25 19:01:20] Epoch 1 | Step 3040 | Loss: 1.0720 | LR: 2.00e-05
|
| 336 |
+
[2026-04-25 19:01:22] Epoch 1 | Step 3050 | Loss: 1.0715 | LR: 2.00e-05
|
| 337 |
+
[2026-04-25 19:01:24] Epoch 1 | Step 3060 | Loss: 1.0714 | LR: 2.00e-05
|
| 338 |
+
[2026-04-25 19:01:27] Epoch 1 | Step 3070 | Loss: 1.0711 | LR: 2.00e-05
|
| 339 |
+
[2026-04-25 19:01:30] Epoch 1 | Step 3080 | Loss: 1.0712 | LR: 2.00e-05
|
| 340 |
+
[2026-04-25 19:01:32] Epoch 1 | Step 3090 | Loss: 1.0708 | LR: 2.00e-05
|
| 341 |
+
[2026-04-25 19:01:34] Epoch 1 | Step 3100 | Loss: 1.0706 | LR: 2.00e-05
|
| 342 |
+
[2026-04-25 19:01:37] Epoch 1 | Step 3110 | Loss: 1.0703 | LR: 2.00e-05
|
| 343 |
+
[2026-04-25 19:01:39] Epoch 1 | Step 3120 | Loss: 1.0707 | LR: 2.00e-05
|
| 344 |
+
[2026-04-25 19:01:42] Epoch 1 | Step 3130 | Loss: 1.0704 | LR: 2.00e-05
|
| 345 |
+
[2026-04-25 19:01:44] Epoch 1 | Step 3140 | Loss: 1.0704 | LR: 2.00e-05
|
| 346 |
+
[2026-04-25 19:01:47] Epoch 1 | Step 3150 | Loss: 1.0706 | LR: 2.00e-05
|
| 347 |
+
[2026-04-25 19:01:50] Epoch 1 | Step 3160 | Loss: 1.0706 | LR: 2.00e-05
|
| 348 |
+
[2026-04-25 19:01:52] Epoch 1 | Step 3170 | Loss: 1.0705 | LR: 2.00e-05
|
| 349 |
+
[2026-04-25 19:01:55] Epoch 1 | Step 3180 | Loss: 1.0705 | LR: 2.00e-05
|
| 350 |
+
[2026-04-25 19:01:57] Epoch 1 | Step 3190 | Loss: 1.0700 | LR: 2.00e-05
|
| 351 |
+
[2026-04-25 19:01:59] Epoch 1 | Step 3200 | Loss: 1.0698 | LR: 2.00e-05
|
| 352 |
+
[2026-04-25 19:02:02] Epoch 1 | Step 3210 | Loss: 1.0696 | LR: 2.00e-05
|
| 353 |
+
[2026-04-25 19:02:05] Epoch 1 | Step 3220 | Loss: 1.0691 | LR: 2.00e-05
|
| 354 |
+
[2026-04-25 19:02:07] Epoch 1 | Step 3230 | Loss: 1.0695 | LR: 2.00e-05
|
| 355 |
+
[2026-04-25 19:02:09] Epoch 1 | Step 3240 | Loss: 1.0693 | LR: 2.00e-05
|
| 356 |
+
[2026-04-25 19:02:12] Epoch 1 | Step 3250 | Loss: 1.0694 | LR: 2.00e-05
|
| 357 |
+
[2026-04-25 19:02:15] Epoch 1 | Step 3260 | Loss: 1.0692 | LR: 2.00e-05
|
| 358 |
+
[2026-04-25 19:02:17] Epoch 1 | Step 3270 | Loss: 1.0690 | LR: 2.00e-05
|
| 359 |
+
[2026-04-25 19:02:20] Epoch 1 | Step 3280 | Loss: 1.0686 | LR: 2.00e-05
|
| 360 |
+
[2026-04-25 19:02:22] Epoch 1 | Step 3290 | Loss: 1.0684 | LR: 2.00e-05
|
| 361 |
+
[2026-04-25 19:02:25] Epoch 1 | Step 3300 | Loss: 1.0684 | LR: 2.00e-05
|
| 362 |
+
[2026-04-25 19:02:27] Epoch 1 | Step 3310 | Loss: 1.0682 | LR: 2.00e-05
|
| 363 |
+
[2026-04-25 19:02:30] Epoch 1 | Step 3320 | Loss: 1.0682 | LR: 2.00e-05
|
| 364 |
+
[2026-04-25 19:02:33] Epoch 1 | Step 3330 | Loss: 1.0680 | LR: 2.00e-05
|
| 365 |
+
[2026-04-25 19:02:35] Epoch 1 | Step 3340 | Loss: 1.0681 | LR: 2.00e-05
|
| 366 |
+
[2026-04-25 19:02:38] Epoch 1 | Step 3350 | Loss: 1.0677 | LR: 2.00e-05
|
| 367 |
+
[2026-04-25 19:02:40] Epoch 1 | Step 3360 | Loss: 1.0675 | LR: 2.00e-05
|
| 368 |
+
[2026-04-25 19:02:43] Epoch 1 | Step 3370 | Loss: 1.0676 | LR: 2.00e-05
|
| 369 |
+
[2026-04-25 19:02:45] Epoch 1 | Step 3380 | Loss: 1.0671 | LR: 2.00e-05
|
| 370 |
+
[2026-04-25 19:02:48] Epoch 1 | Step 3390 | Loss: 1.0673 | LR: 2.00e-05
|
| 371 |
+
[2026-04-25 19:02:51] Epoch 1 | Step 3400 | Loss: 1.0677 | LR: 2.00e-05
|
| 372 |
+
[2026-04-25 19:02:53] Epoch 1 | Step 3410 | Loss: 1.0675 | LR: 2.00e-05
|
| 373 |
+
[2026-04-25 19:02:56] Epoch 1 | Step 3420 | Loss: 1.0671 | LR: 2.00e-05
|
| 374 |
+
[2026-04-25 19:02:59] Epoch 1 | Step 3430 | Loss: 1.0671 | LR: 2.00e-05
|
| 375 |
+
[2026-04-25 19:03:01] Epoch 1 | Step 3440 | Loss: 1.0672 | LR: 2.00e-05
|
| 376 |
+
[2026-04-25 19:03:04] Epoch 1 | Step 3450 | Loss: 1.0670 | LR: 2.00e-05
|
| 377 |
+
[2026-04-25 19:03:06] Epoch 1 | Step 3460 | Loss: 1.0669 | LR: 2.00e-05
|
| 378 |
+
[2026-04-25 19:03:09] Epoch 1 | Step 3470 | Loss: 1.0668 | LR: 2.00e-05
|
| 379 |
+
[2026-04-25 19:03:11] Epoch 1 | Step 3480 | Loss: 1.0667 | LR: 2.00e-05
|
| 380 |
+
[2026-04-25 19:03:13] Epoch 1 | Step 3490 | Loss: 1.0665 | LR: 2.00e-05
|
| 381 |
+
[2026-04-25 19:03:16] Epoch 1 | Step 3500 | Loss: 1.0662 | LR: 2.00e-05
|
| 382 |
+
[2026-04-25 19:03:19] Epoch 1 | Step 3510 | Loss: 1.0664 | LR: 2.00e-05
|
| 383 |
+
[2026-04-25 19:03:21] Epoch 1 | Step 3520 | Loss: 1.0660 | LR: 2.00e-05
|
| 384 |
+
[2026-04-25 19:03:24] Epoch 1 | Step 3530 | Loss: 1.0662 | LR: 2.00e-05
|
| 385 |
+
[2026-04-25 19:03:26] Epoch 1 | Step 3540 | Loss: 1.0659 | LR: 2.00e-05
|
| 386 |
+
[2026-04-25 19:03:29] Epoch 1 | Step 3550 | Loss: 1.0658 | LR: 2.00e-05
|
| 387 |
+
[2026-04-25 19:03:32] Epoch 1 | Step 3560 | Loss: 1.0657 | LR: 2.00e-05
|
| 388 |
+
[2026-04-25 19:03:34] Epoch 1 | Step 3570 | Loss: 1.0656 | LR: 2.00e-05
|
| 389 |
+
[2026-04-25 19:03:37] Epoch 1 | Step 3580 | Loss: 1.0655 | LR: 2.00e-05
|
| 390 |
+
[2026-04-25 19:03:39] Epoch 1 | Step 3590 | Loss: 1.0654 | LR: 2.00e-05
|
| 391 |
+
[2026-04-25 19:03:42] Epoch 1 | Step 3600 | Loss: 1.0650 | LR: 2.00e-05
|
| 392 |
+
[2026-04-25 19:03:44] Epoch 1 | Step 3610 | Loss: 1.0648 | LR: 2.00e-05
|
| 393 |
+
[2026-04-25 19:03:47] Epoch 1 | Step 3620 | Loss: 1.0646 | LR: 2.00e-05
|
| 394 |
+
[2026-04-25 19:03:49] Epoch 1 | Step 3630 | Loss: 1.0649 | LR: 2.00e-05
|
| 395 |
+
[2026-04-25 19:03:51] Epoch 1 | Step 3640 | Loss: 1.0651 | LR: 2.00e-05
|
| 396 |
+
[2026-04-25 19:03:54] Epoch 1 | Step 3650 | Loss: 1.0651 | LR: 2.00e-05
|
| 397 |
+
[2026-04-25 19:03:56] Epoch 1 | Step 3660 | Loss: 1.0650 | LR: 2.00e-05
|
| 398 |
+
[2026-04-25 19:03:59] Epoch 1 | Step 3670 | Loss: 1.0646 | LR: 2.00e-05
|
| 399 |
+
[2026-04-25 19:04:01] Epoch 1 | Step 3680 | Loss: 1.0646 | LR: 2.00e-05
|
| 400 |
+
[2026-04-25 19:04:04] Epoch 1 | Step 3690 | Loss: 1.0644 | LR: 2.00e-05
|
| 401 |
+
[2026-04-25 19:04:06] Epoch 1 | Step 3700 | Loss: 1.0642 | LR: 2.00e-05
|
| 402 |
+
[2026-04-25 19:04:09] Epoch 1 | Step 3710 | Loss: 1.0640 | LR: 2.00e-05
|
| 403 |
+
[2026-04-25 19:04:12] Epoch 1 | Step 3720 | Loss: 1.0639 | LR: 2.00e-05
|
| 404 |
+
[2026-04-25 19:04:14] Epoch 1 | Step 3730 | Loss: 1.0640 | LR: 2.00e-05
|
| 405 |
+
[2026-04-25 19:04:16] Epoch 1 | Step 3740 | Loss: 1.0641 | LR: 2.00e-05
|
| 406 |
+
[2026-04-25 19:04:19] Epoch 1 | Step 3750 | Loss: 1.0638 | LR: 2.00e-05
|
| 407 |
+
[2026-04-25 19:04:22] Epoch 1 | Step 3760 | Loss: 1.0639 | LR: 2.00e-05
|
| 408 |
+
[2026-04-25 19:04:24] Epoch 1 | Step 3770 | Loss: 1.0640 | LR: 2.00e-05
|
| 409 |
+
[2026-04-25 19:04:27] Epoch 1 | Step 3780 | Loss: 1.0639 | LR: 2.00e-05
|
| 410 |
+
[2026-04-25 19:04:29] Epoch 1 | Step 3790 | Loss: 1.0639 | LR: 2.00e-05
|
| 411 |
+
[2026-04-25 19:04:32] Epoch 1 | Step 3800 | Loss: 1.0641 | LR: 2.00e-05
|
| 412 |
+
[2026-04-25 19:04:34] Epoch 1 | Step 3810 | Loss: 1.0635 | LR: 2.00e-05
|
| 413 |
+
[2026-04-25 19:04:37] Epoch 1 | Step 3820 | Loss: 1.0633 | LR: 2.00e-05
|
| 414 |
+
[2026-04-25 19:04:39] Epoch 1 | Step 3830 | Loss: 1.0631 | LR: 2.00e-05
|
| 415 |
+
[2026-04-25 19:04:42] Epoch 1 | Step 3840 | Loss: 1.0632 | LR: 2.00e-05
|
| 416 |
+
[2026-04-25 19:04:44] Epoch 1 | Step 3850 | Loss: 1.0629 | LR: 2.00e-05
|
| 417 |
+
[2026-04-25 19:04:47] Epoch 1 | Step 3860 | Loss: 1.0627 | LR: 2.00e-05
|
| 418 |
+
[2026-04-25 19:04:49] Epoch 1 | Step 3870 | Loss: 1.0627 | LR: 2.00e-05
|
| 419 |
+
[2026-04-25 19:04:52] Epoch 1 | Step 3880 | Loss: 1.0621 | LR: 2.00e-05
|
| 420 |
+
[2026-04-25 19:04:55] Epoch 1 | Step 3890 | Loss: 1.0619 | LR: 2.00e-05
|
| 421 |
+
[2026-04-25 19:04:57] Epoch 1 | Step 3900 | Loss: 1.0619 | LR: 2.00e-05
|
| 422 |
+
[2026-04-25 19:05:00] Epoch 1 | Step 3910 | Loss: 1.0621 | LR: 2.00e-05
|
| 423 |
+
[2026-04-25 19:05:02] Epoch 1 | Step 3920 | Loss: 1.0622 | LR: 2.00e-05
|
| 424 |
+
[2026-04-25 19:05:04] Epoch 1 | Step 3930 | Loss: 1.0620 | LR: 2.00e-05
|
| 425 |
+
[2026-04-25 19:05:07] Epoch 1 | Step 3940 | Loss: 1.0620 | LR: 2.00e-05
|
| 426 |
+
[2026-04-25 19:05:10] Epoch 1 | Step 3950 | Loss: 1.0617 | LR: 2.00e-05
|
| 427 |
+
[2026-04-25 19:05:12] Epoch 1 | Step 3960 | Loss: 1.0617 | LR: 2.00e-05
|
| 428 |
+
[2026-04-25 19:05:15] Epoch 1 | Step 3970 | Loss: 1.0615 | LR: 2.00e-05
|
| 429 |
+
[2026-04-25 19:05:18] Epoch 1 | Step 3980 | Loss: 1.0614 | LR: 2.00e-05
|
| 430 |
+
[2026-04-25 19:05:20] Epoch 1 | Step 3990 | Loss: 1.0612 | LR: 1.99e-05
|
| 431 |
+
[2026-04-25 19:05:22] Epoch 1 | Step 4000 | Loss: 1.0611 | LR: 1.99e-05
|
| 432 |
+
[2026-04-25 19:05:23] Validation | Batch 10/84 | Loss: 0.9893
|
| 433 |
+
[2026-04-25 19:05:23] Validation | Batch 20/84 | Loss: 0.9886
|
| 434 |
+
[2026-04-25 19:05:24] Validation | Batch 30/84 | Loss: 1.0646
|
| 435 |
+
[2026-04-25 19:05:24] Validation | Batch 40/84 | Loss: 1.0736
|
| 436 |
+
[2026-04-25 19:05:25] Validation | Batch 50/84 | Loss: 1.0745
|
| 437 |
+
[2026-04-25 19:05:25] Validation | Batch 60/84 | Loss: 1.0514
|
| 438 |
+
[2026-04-25 19:05:26] Validation | Batch 70/84 | Loss: 1.0341
|
| 439 |
+
[2026-04-25 19:05:26] Validation | Batch 80/84 | Loss: 1.0417
|
| 440 |
+
[2026-04-25 19:05:26] Validation | Batch 84/84 | Loss: 1.0352
|
| 441 |
+
[2026-04-25 19:05:27] Validation | Loss: 1.0352 | PPL: 2.88 | Time: 3.75s
|
| 442 |
+
[2026-04-25 19:05:29] New best model saved! Val loss: 1.0352
|
| 443 |
+
[2026-04-25 19:05:31] Epoch 1 | Step 4010 | Loss: 1.0610 | LR: 1.99e-05
|
| 444 |
+
[2026-04-25 19:05:34] Epoch 1 | Step 4020 | Loss: 1.0611 | LR: 1.98e-05
|
| 445 |
+
[2026-04-25 19:05:36] Epoch 1 | Step 4030 | Loss: 1.0608 | LR: 1.97e-05
|
| 446 |
+
[2026-04-25 19:05:38] Epoch 1 | Step 4040 | Loss: 1.0604 | LR: 1.97e-05
|
| 447 |
+
[2026-04-25 19:05:41] Epoch 1 | Step 4050 | Loss: 1.0602 | LR: 1.96e-05
|
| 448 |
+
[2026-04-25 19:05:43] Epoch 1 | Step 4060 | Loss: 1.0596 | LR: 1.95e-05
|
| 449 |
+
[2026-04-25 19:05:46] Epoch 1 | Step 4070 | Loss: 1.0596 | LR: 1.94e-05
|
| 450 |
+
[2026-04-25 19:05:48] Epoch 1 | Step 4080 | Loss: 1.0596 | LR: 1.93e-05
|
| 451 |
+
[2026-04-25 19:05:51] Epoch 1 | Step 4090 | Loss: 1.0597 | LR: 1.92e-05
|
| 452 |
+
[2026-04-25 19:05:53] Epoch 1 | Step 4100 | Loss: 1.0598 | LR: 1.91e-05
|
| 453 |
+
[2026-04-25 19:05:56] Epoch 1 | Step 4110 | Loss: 1.0597 | LR: 1.89e-05
|
| 454 |
+
[2026-04-25 19:05:58] Epoch 1 | Step 4120 | Loss: 1.0599 | LR: 1.88e-05
|
| 455 |
+
[2026-04-25 19:06:01] Epoch 1 | Step 4130 | Loss: 1.0597 | LR: 1.86e-05
|
| 456 |
+
[2026-04-25 19:06:04] Epoch 1 | Step 4140 | Loss: 1.0598 | LR: 1.85e-05
|
| 457 |
+
[2026-04-25 19:06:06] Epoch 1 | Step 4150 | Loss: 1.0603 | LR: 1.83e-05
|
| 458 |
+
[2026-04-25 19:06:09] Epoch 1 | Step 4160 | Loss: 1.0605 | LR: 1.82e-05
|
| 459 |
+
[2026-04-25 19:06:11] Epoch 1 | Step 4170 | Loss: 1.0603 | LR: 1.80e-05
|
| 460 |
+
[2026-04-25 19:06:14] Epoch 1 | Step 4180 | Loss: 1.0603 | LR: 1.78e-05
|
| 461 |
+
[2026-04-25 19:06:17] Epoch 1 | Step 4190 | Loss: 1.0601 | LR: 1.76e-05
|
| 462 |
+
[2026-04-25 19:06:19] Epoch 1 | Step 4200 | Loss: 1.0604 | LR: 1.74e-05
|
| 463 |
+
[2026-04-25 19:06:22] Epoch 1 | Step 4210 | Loss: 1.0603 | LR: 1.72e-05
|
| 464 |
+
[2026-04-25 19:06:25] Epoch 1 | Step 4220 | Loss: 1.0607 | LR: 1.70e-05
|
| 465 |
+
[2026-04-25 19:06:27] Epoch 1 | Step 4230 | Loss: 1.0608 | LR: 1.68e-05
|
| 466 |
+
[2026-04-25 19:06:30] Epoch 1 | Step 4240 | Loss: 1.0609 | LR: 1.66e-05
|
| 467 |
+
[2026-04-25 19:06:33] Epoch 1 | Step 4250 | Loss: 1.0609 | LR: 1.63e-05
|
| 468 |
+
[2026-04-25 19:06:35] Epoch 1 | Step 4260 | Loss: 1.0605 | LR: 1.61e-05
|
| 469 |
+
[2026-04-25 19:06:37] Epoch 1 | Step 4270 | Loss: 1.0608 | LR: 1.59e-05
|
| 470 |
+
[2026-04-25 19:06:40] Epoch 1 | Step 4280 | Loss: 1.0606 | LR: 1.56e-05
|
| 471 |
+
[2026-04-25 19:06:42] Epoch 1 | Step 4290 | Loss: 1.0603 | LR: 1.54e-05
|
| 472 |
+
[2026-04-25 19:06:45] Epoch 1 | Step 4300 | Loss: 1.0604 | LR: 1.51e-05
|
| 473 |
+
[2026-04-25 19:06:47] Epoch 1 | Step 4310 | Loss: 1.0605 | LR: 1.49e-05
|
| 474 |
+
[2026-04-25 19:06:50] Epoch 1 | Step 4320 | Loss: 1.0606 | LR: 1.46e-05
|
| 475 |
+
[2026-04-25 19:06:53] Epoch 1 | Step 4330 | Loss: 1.0604 | LR: 1.43e-05
|
| 476 |
+
[2026-04-25 19:06:55] Epoch 1 | Step 4340 | Loss: 1.0603 | LR: 1.41e-05
|
| 477 |
+
[2026-04-25 19:06:57] Epoch 1 | Step 4350 | Loss: 1.0600 | LR: 1.38e-05
|
| 478 |
+
[2026-04-25 19:07:00] Epoch 1 | Step 4360 | Loss: 1.0599 | LR: 1.35e-05
|
| 479 |
+
[2026-04-25 19:07:03] Epoch 1 | Step 4370 | Loss: 1.0600 | LR: 1.32e-05
|
| 480 |
+
[2026-04-25 19:07:05] Epoch 1 | Step 4380 | Loss: 1.0598 | LR: 1.30e-05
|
| 481 |
+
[2026-04-25 19:07:07] Epoch 1 | Step 4390 | Loss: 1.0598 | LR: 1.27e-05
|
| 482 |
+
[2026-04-25 19:07:10] Epoch 1 | Step 4400 | Loss: 1.0596 | LR: 1.24e-05
|
| 483 |
+
[2026-04-25 19:07:12] Epoch 1 | Step 4410 | Loss: 1.0591 | LR: 1.21e-05
|
| 484 |
+
[2026-04-25 19:07:15] Epoch 1 | Step 4420 | Loss: 1.0594 | LR: 1.18e-05
|
| 485 |
+
[2026-04-25 19:07:17] Epoch 1 | Step 4430 | Loss: 1.0593 | LR: 1.16e-05
|
| 486 |
+
[2026-04-25 19:07:20] Epoch 1 | Step 4440 | Loss: 1.0596 | LR: 1.13e-05
|
| 487 |
+
[2026-04-25 19:07:23] Epoch 1 | Step 4450 | Loss: 1.0594 | LR: 1.10e-05
|
| 488 |
+
[2026-04-25 19:07:25] Epoch 1 | Step 4460 | Loss: 1.0597 | LR: 1.07e-05
|
| 489 |
+
[2026-04-25 19:07:28] Epoch 1 | Step 4470 | Loss: 1.0595 | LR: 1.04e-05
|
| 490 |
+
[2026-04-25 19:07:30] Epoch 1 | Step 4480 | Loss: 1.0593 | LR: 1.01e-05
|
| 491 |
+
[2026-04-25 19:07:33] Epoch 1 | Step 4490 | Loss: 1.0591 | LR: 9.84e-06
|
| 492 |
+
[2026-04-25 19:07:35] Epoch 1 | Step 4500 | Loss: 1.0592 | LR: 9.55e-06
|
| 493 |
+
[2026-04-25 19:07:38] Epoch 1 | Step 4510 | Loss: 1.0588 | LR: 9.27e-06
|
| 494 |
+
[2026-04-25 19:07:40] Epoch 1 | Step 4520 | Loss: 1.0585 | LR: 8.99e-06
|
| 495 |
+
[2026-04-25 19:07:43] Epoch 1 | Step 4530 | Loss: 1.0582 | LR: 8.72e-06
|
| 496 |
+
[2026-04-25 19:07:46] Epoch 1 | Step 4540 | Loss: 1.0581 | LR: 8.44e-06
|
| 497 |
+
[2026-04-25 19:07:49] Epoch 1 | Step 4550 | Loss: 1.0577 | LR: 8.17e-06
|
| 498 |
+
[2026-04-25 19:07:51] Epoch 1 | Step 4560 | Loss: 1.0577 | LR: 7.90e-06
|
| 499 |
+
[2026-04-25 19:07:54] Epoch 1 | Step 4570 | Loss: 1.0577 | LR: 7.63e-06
|
| 500 |
+
[2026-04-25 19:07:56] Epoch 1 | Step 4580 | Loss: 1.0575 | LR: 7.37e-06
|
| 501 |
+
[2026-04-25 19:07:59] Epoch 1 | Step 4590 | Loss: 1.0573 | LR: 7.11e-06
|
| 502 |
+
[2026-04-25 19:08:01] Epoch 1 | Step 4600 | Loss: 1.0571 | LR: 6.85e-06
|
| 503 |
+
[2026-04-25 19:08:03] Epoch 1 | Step 4610 | Loss: 1.0570 | LR: 6.60e-06
|
| 504 |
+
[2026-04-25 19:08:06] Epoch 1 | Step 4620 | Loss: 1.0570 | LR: 6.35e-06
|
| 505 |
+
[2026-04-25 19:08:08] Epoch 1 | Step 4630 | Loss: 1.0569 | LR: 6.11e-06
|
| 506 |
+
[2026-04-25 19:08:11] Epoch 1 | Step 4640 | Loss: 1.0568 | LR: 5.87e-06
|
| 507 |
+
[2026-04-25 19:08:13] Epoch 1 | Step 4650 | Loss: 1.0568 | LR: 5.64e-06
|
| 508 |
+
[2026-04-25 19:08:16] Epoch 1 | Step 4660 | Loss: 1.0566 | LR: 5.41e-06
|
| 509 |
+
[2026-04-25 19:08:18] Epoch 1 | Step 4670 | Loss: 1.0564 | LR: 5.19e-06
|
| 510 |
+
[2026-04-25 19:08:21] Epoch 1 | Step 4680 | Loss: 1.0565 | LR: 4.98e-06
|
| 511 |
+
[2026-04-25 19:08:24] Epoch 1 | Step 4690 | Loss: 1.0563 | LR: 4.77e-06
|
| 512 |
+
[2026-04-25 19:08:26] Epoch 1 | Step 4700 | Loss: 1.0565 | LR: 4.56e-06
|
| 513 |
+
[2026-04-25 19:08:29] Epoch 1 | Step 4710 | Loss: 1.0563 | LR: 4.37e-06
|
| 514 |
+
[2026-04-25 19:08:31] Epoch 1 | Step 4720 | Loss: 1.0562 | LR: 4.18e-06
|
| 515 |
+
[2026-04-25 19:08:33] Epoch 1 | Step 4730 | Loss: 1.0562 | LR: 3.99e-06
|
| 516 |
+
[2026-04-25 19:08:36] Epoch 1 | Step 4740 | Loss: 1.0560 | LR: 3.82e-06
|
| 517 |
+
[2026-04-25 19:08:38] Epoch 1 | Step 4750 | Loss: 1.0559 | LR: 3.65e-06
|
| 518 |
+
[2026-04-25 19:08:40] Epoch 1 | Step 4760 | Loss: 1.0557 | LR: 3.49e-06
|
| 519 |
+
[2026-04-25 19:08:43] Epoch 1 | Step 4770 | Loss: 1.0554 | LR: 3.33e-06
|
| 520 |
+
[2026-04-25 19:08:46] Epoch 1 | Step 4780 | Loss: 1.0554 | LR: 3.19e-06
|
| 521 |
+
[2026-04-25 19:08:48] Epoch 1 | Step 4790 | Loss: 1.0553 | LR: 3.05e-06
|
| 522 |
+
[2026-04-25 19:08:51] Epoch 1 | Step 4800 | Loss: 1.0551 | LR: 2.92e-06
|
| 523 |
+
[2026-04-25 19:08:53] Epoch 1 | Step 4810 | Loss: 1.0548 | LR: 2.80e-06
|
| 524 |
+
[2026-04-25 19:08:56] Epoch 1 | Step 4820 | Loss: 1.0545 | LR: 2.68e-06
|
| 525 |
+
[2026-04-25 19:08:58] Epoch 1 | Step 4830 | Loss: 1.0542 | LR: 2.58e-06
|
| 526 |
+
[2026-04-25 19:09:01] Epoch 1 | Step 4840 | Loss: 1.0541 | LR: 2.48e-06
|
| 527 |
+
[2026-04-25 19:09:04] Epoch 1 | Step 4850 | Loss: 1.0542 | LR: 2.39e-06
|
| 528 |
+
[2026-04-25 19:09:06] Epoch 1 | Step 4860 | Loss: 1.0544 | LR: 2.32e-06
|
| 529 |
+
[2026-04-25 19:09:09] Epoch 1 | Step 4870 | Loss: 1.0545 | LR: 2.24e-06
|
| 530 |
+
[2026-04-25 19:09:11] Epoch 1 | Step 4880 | Loss: 1.0544 | LR: 2.18e-06
|
| 531 |
+
[2026-04-25 19:09:14] Epoch 1 | Step 4890 | Loss: 1.0541 | LR: 2.13e-06
|
| 532 |
+
[2026-04-25 19:09:17] Epoch 1 | Step 4900 | Loss: 1.0541 | LR: 2.09e-06
|
| 533 |
+
[2026-04-25 19:09:19] Epoch 1 | Step 4910 | Loss: 1.0539 | LR: 2.05e-06
|
| 534 |
+
[2026-04-25 19:09:22] Epoch 1 | Step 4920 | Loss: 1.0539 | LR: 2.03e-06
|
| 535 |
+
[2026-04-25 19:09:24] Epoch 1 | Step 4930 | Loss: 1.0538 | LR: 2.01e-06
|
| 536 |
+
[2026-04-25 19:09:26] Epoch 1 | Step 4940 | Loss: 1.0537 | LR: 2.00e-06
|
| 537 |
+
[2026-04-25 19:09:29] Epoch 1 | Step 4950 | Loss: 1.0536 | LR: 2.00e-06
|
| 538 |
+
[2026-04-25 19:09:31] Epoch 1 | Step 4960 | Loss: 1.0536 | LR: 2.00e-06
|
| 539 |
+
[2026-04-25 19:09:34] Epoch 1 | Step 4970 | Loss: 1.0534 | LR: 2.00e-06
|
| 540 |
+
[2026-04-25 19:09:36] Epoch 1 | Step 4980 | Loss: 1.0533 | LR: 2.00e-06
|
| 541 |
+
[2026-04-25 19:09:39] Epoch 1 | Step 4990 | Loss: 1.0530 | LR: 2.00e-06
|
| 542 |
+
[2026-04-25 19:09:41] Epoch 1 | Step 5000 | Loss: 1.0532 | LR: 2.00e-06
|
| 543 |
+
[2026-04-25 19:09:44] Epoch 1 | Step 5010 | Loss: 1.0530 | LR: 2.00e-06
|
| 544 |
+
[2026-04-25 19:09:46] Epoch 1 | Step 5020 | Loss: 1.0527 | LR: 2.00e-06
|
| 545 |
+
[2026-04-25 19:09:48] Epoch 1 | Step 5030 | Loss: 1.0527 | LR: 2.00e-06
|
| 546 |
+
[2026-04-25 19:09:51] Epoch 1 | Step 5040 | Loss: 1.0525 | LR: 2.00e-06
|
| 547 |
+
[2026-04-25 19:09:53] Epoch 1 | Step 5050 | Loss: 1.0524 | LR: 2.00e-06
|
| 548 |
+
[2026-04-25 19:09:56] Epoch 1 | Step 5060 | Loss: 1.0523 | LR: 2.00e-06
|
| 549 |
+
[2026-04-25 19:09:59] Epoch 1 | Step 5070 | Loss: 1.0522 | LR: 2.00e-06
|
| 550 |
+
[2026-04-25 19:10:01] Epoch 1 | Step 5080 | Loss: 1.0523 | LR: 2.00e-06
|
| 551 |
+
[2026-04-25 19:10:04] Epoch 1 | Step 5090 | Loss: 1.0523 | LR: 2.00e-06
|
| 552 |
+
[2026-04-25 19:10:07] Epoch 1 | Step 5100 | Loss: 1.0521 | LR: 2.00e-06
|
| 553 |
+
[2026-04-25 19:10:09] Epoch 1 | Step 5110 | Loss: 1.0520 | LR: 2.00e-06
|
| 554 |
+
[2026-04-25 19:10:12] Epoch 1 | Step 5120 | Loss: 1.0521 | LR: 2.00e-06
|
| 555 |
+
[2026-04-25 19:10:14] Epoch 1 | Step 5130 | Loss: 1.0520 | LR: 2.00e-06
|
| 556 |
+
[2026-04-25 19:10:17] Epoch 1 | Step 5140 | Loss: 1.0518 | LR: 2.00e-06
|
| 557 |
+
[2026-04-25 19:10:19] Epoch 1 | Step 5150 | Loss: 1.0516 | LR: 2.00e-06
|
| 558 |
+
[2026-04-25 19:10:22] Epoch 1 | Step 5160 | Loss: 1.0511 | LR: 2.00e-06
|
| 559 |
+
[2026-04-25 19:10:24] Epoch 1 | Step 5170 | Loss: 1.0511 | LR: 2.00e-06
|
| 560 |
+
[2026-04-25 19:10:27] Epoch 1 | Step 5180 | Loss: 1.0510 | LR: 2.00e-06
|
| 561 |
+
[2026-04-25 19:10:29] Epoch 1 | Step 5190 | Loss: 1.0510 | LR: 2.00e-06
|
| 562 |
+
[2026-04-25 19:10:32] Epoch 1 | Step 5200 | Loss: 1.0509 | LR: 2.00e-06
|
| 563 |
+
[2026-04-25 19:10:35] Epoch 1 | Step 5210 | Loss: 1.0508 | LR: 2.00e-06
|
| 564 |
+
[2026-04-25 19:10:37] Epoch 1 | Step 5220 | Loss: 1.0507 | LR: 2.00e-06
|
| 565 |
+
[2026-04-25 19:10:40] Epoch 1 | Step 5230 | Loss: 1.0506 | LR: 2.00e-06
|
| 566 |
+
[2026-04-25 19:10:43] Epoch 1 | Step 5240 | Loss: 1.0506 | LR: 2.00e-06
|
| 567 |
+
[2026-04-25 19:10:45] Epoch 1 | Step 5250 | Loss: 1.0506 | LR: 2.00e-06
|
| 568 |
+
[2026-04-25 19:10:48] Epoch 1 | Step 5260 | Loss: 1.0505 | LR: 2.00e-06
|
| 569 |
+
[2026-04-25 19:10:50] Epoch 1 | Step 5270 | Loss: 1.0504 | LR: 2.00e-06
|
| 570 |
+
[2026-04-25 19:10:53] Epoch 1 | Step 5280 | Loss: 1.0501 | LR: 2.00e-06
|
| 571 |
+
[2026-04-25 19:10:55] Epoch 1 | Step 5290 | Loss: 1.0498 | LR: 2.00e-06
|
| 572 |
+
[2026-04-25 19:10:58] Epoch 1 | Step 5300 | Loss: 1.0497 | LR: 2.00e-06
|
| 573 |
+
[2026-04-25 19:11:00] Epoch 1 | Step 5310 | Loss: 1.0498 | LR: 2.00e-06
|
| 574 |
+
[2026-04-25 19:11:03] Epoch 1 | Step 5320 | Loss: 1.0496 | LR: 2.00e-06
|
| 575 |
+
[2026-04-25 19:11:05] Epoch 1 | Step 5330 | Loss: 1.0496 | LR: 2.00e-06
|
| 576 |
+
[2026-04-25 19:11:08] Epoch 1 | Step 5340 | Loss: 1.0494 | LR: 2.00e-06
|
| 577 |
+
[2026-04-25 19:11:10] Epoch 1 | Step 5350 | Loss: 1.0493 | LR: 2.00e-06
|
| 578 |
+
[2026-04-25 19:11:13] Epoch 1 | Step 5360 | Loss: 1.0493 | LR: 2.00e-06
|
| 579 |
+
[2026-04-25 19:11:15] Epoch 1 | Step 5370 | Loss: 1.0493 | LR: 2.00e-06
|
| 580 |
+
[2026-04-25 19:11:17] Epoch 1 | Step 5380 | Loss: 1.0491 | LR: 2.00e-06
|
| 581 |
+
[2026-04-25 19:11:20] Epoch 1 | Step 5390 | Loss: 1.0489 | LR: 2.00e-06
|
| 582 |
+
[2026-04-25 19:11:23] Epoch 1 | Step 5400 | Loss: 1.0486 | LR: 2.00e-06
|
| 583 |
+
[2026-04-25 19:11:25] Epoch 1 | Step 5410 | Loss: 1.0486 | LR: 2.00e-06
|
| 584 |
+
[2026-04-25 19:11:28] Epoch 1 | Step 5420 | Loss: 1.0484 | LR: 2.00e-06
|
| 585 |
+
[2026-04-25 19:11:30] Epoch 1 | Step 5430 | Loss: 1.0483 | LR: 2.00e-06
|
| 586 |
+
[2026-04-25 19:11:32] Epoch 1 | Step 5440 | Loss: 1.0484 | LR: 2.00e-06
|
| 587 |
+
[2026-04-25 19:11:35] Epoch 1 | Step 5450 | Loss: 1.0485 | LR: 2.00e-06
|
| 588 |
+
[2026-04-25 19:11:38] Epoch 1 | Step 5460 | Loss: 1.0483 | LR: 2.00e-06
|
| 589 |
+
[2026-04-25 19:11:40] Epoch 1 | Step 5470 | Loss: 1.0481 | LR: 2.00e-06
|
| 590 |
+
[2026-04-25 19:11:43] Epoch 1 | Step 5480 | Loss: 1.0481 | LR: 2.00e-06
|
| 591 |
+
[2026-04-25 19:11:45] Epoch 1 | Step 5490 | Loss: 1.0482 | LR: 2.00e-06
|
| 592 |
+
[2026-04-25 19:11:48] Epoch 1 | Step 5500 | Loss: 1.0481 | LR: 2.00e-06
|
| 593 |
+
[2026-04-25 19:11:50] Epoch 1 | Step 5510 | Loss: 1.0482 | LR: 2.00e-06
|
| 594 |
+
[2026-04-25 19:11:53] Epoch 1 | Step 5520 | Loss: 1.0481 | LR: 2.00e-06
|
| 595 |
+
[2026-04-25 19:11:55] Epoch 1 | Step 5530 | Loss: 1.0480 | LR: 2.00e-06
|
| 596 |
+
[2026-04-25 19:11:58] Epoch 1 | Step 5540 | Loss: 1.0476 | LR: 2.00e-06
|
| 597 |
+
[2026-04-25 19:12:00] Epoch 1 | Step 5550 | Loss: 1.0476 | LR: 2.00e-06
|
| 598 |
+
[2026-04-25 19:12:03] Epoch 1 | Step 5560 | Loss: 1.0475 | LR: 2.00e-06
|
| 599 |
+
[2026-04-25 19:12:05] Epoch 1 | Step 5570 | Loss: 1.0478 | LR: 2.00e-06
|
| 600 |
+
[2026-04-25 19:12:08] Epoch 1 | Step 5580 | Loss: 1.0476 | LR: 2.00e-06
|
| 601 |
+
[2026-04-25 19:12:10] Epoch 1 | Step 5590 | Loss: 1.0473 | LR: 2.00e-06
|
| 602 |
+
[2026-04-25 19:12:13] Epoch 1 | Step 5600 | Loss: 1.0475 | LR: 2.00e-06
|
| 603 |
+
[2026-04-25 19:12:16] Epoch 1 | Step 5610 | Loss: 1.0475 | LR: 2.00e-06
|
| 604 |
+
[2026-04-25 19:12:18] Epoch 1 | Step 5620 | Loss: 1.0474 | LR: 2.00e-06
|
| 605 |
+
[2026-04-25 19:12:20] Epoch 1 | Step 5630 | Loss: 1.0474 | LR: 2.00e-06
|
| 606 |
+
[2026-04-25 19:12:23] Epoch 1 | Step 5640 | Loss: 1.0474 | LR: 2.00e-06
|
| 607 |
+
[2026-04-25 19:12:26] Epoch 1 | Step 5650 | Loss: 1.0473 | LR: 2.00e-06
|
| 608 |
+
[2026-04-25 19:12:28] Epoch 1 | Step 5660 | Loss: 1.0471 | LR: 2.00e-06
|
| 609 |
+
[2026-04-25 19:12:30] Epoch 1 | Step 5670 | Loss: 1.0470 | LR: 2.00e-06
|
| 610 |
+
[2026-04-25 19:12:33] Epoch 1 | Step 5680 | Loss: 1.0467 | LR: 2.00e-06
|
| 611 |
+
[2026-04-25 19:12:35] Epoch 1 | Step 5690 | Loss: 1.0467 | LR: 2.00e-06
|
| 612 |
+
[2026-04-25 19:12:38] Epoch 1 | Step 5700 | Loss: 1.0466 | LR: 2.00e-06
|
| 613 |
+
[2026-04-25 19:12:41] Epoch 1 | Step 5710 | Loss: 1.0467 | LR: 2.00e-06
|
| 614 |
+
[2026-04-25 19:12:43] Epoch 1 | Step 5720 | Loss: 1.0467 | LR: 2.00e-06
|
| 615 |
+
[2026-04-25 19:12:46] Epoch 1 | Step 5730 | Loss: 1.0466 | LR: 2.00e-06
|
| 616 |
+
[2026-04-25 19:12:48] Epoch 1 | Step 5740 | Loss: 1.0467 | LR: 2.00e-06
|
| 617 |
+
[2026-04-25 19:12:51] Epoch 1 | Step 5750 | Loss: 1.0466 | LR: 2.00e-06
|
| 618 |
+
[2026-04-25 19:12:54] Epoch 1 | Step 5760 | Loss: 1.0465 | LR: 2.00e-06
|
| 619 |
+
[2026-04-25 19:12:56] Epoch 1 | Step 5770 | Loss: 1.0466 | LR: 2.00e-06
|
| 620 |
+
[2026-04-25 19:12:59] Epoch 1 | Step 5780 | Loss: 1.0464 | LR: 2.00e-06
|
| 621 |
+
[2026-04-25 19:13:01] Epoch 1 | Step 5790 | Loss: 1.0465 | LR: 2.00e-06
|
| 622 |
+
[2026-04-25 19:13:04] Epoch 1 | Step 5800 | Loss: 1.0467 | LR: 2.00e-06
|
| 623 |
+
[2026-04-25 19:13:06] Epoch 1 | Step 5810 | Loss: 1.0466 | LR: 2.00e-06
|
| 624 |
+
[2026-04-25 19:13:09] Epoch 1 | Step 5820 | Loss: 1.0464 | LR: 2.00e-06
|
| 625 |
+
[2026-04-25 19:13:11] Epoch 1 | Step 5830 | Loss: 1.0463 | LR: 2.00e-06
|
| 626 |
+
[2026-04-25 19:13:14] Epoch 1 | Step 5840 | Loss: 1.0463 | LR: 2.00e-06
|
| 627 |
+
[2026-04-25 19:13:16] Epoch 1 | Step 5850 | Loss: 1.0464 | LR: 2.00e-06
|
| 628 |
+
[2026-04-25 19:13:18] Epoch 1 | Step 5860 | Loss: 1.0463 | LR: 2.00e-06
|
| 629 |
+
[2026-04-25 19:13:21] Epoch 1 | Step 5870 | Loss: 1.0463 | LR: 2.00e-06
|
| 630 |
+
[2026-04-25 19:13:24] Epoch 1 | Step 5880 | Loss: 1.0463 | LR: 2.00e-06
|
| 631 |
+
[2026-04-25 19:13:26] Epoch 1 | Step 5890 | Loss: 1.0463 | LR: 2.00e-06
|
| 632 |
+
[2026-04-25 19:13:29] Epoch 1 | Step 5900 | Loss: 1.0462 | LR: 2.00e-06
|
| 633 |
+
[2026-04-25 19:13:32] Epoch 1 | Step 5910 | Loss: 1.0462 | LR: 2.00e-06
|
| 634 |
+
[2026-04-25 19:13:34] Epoch 1 | Step 5920 | Loss: 1.0459 | LR: 2.00e-06
|
| 635 |
+
[2026-04-25 19:13:37] Epoch 1 | Step 5930 | Loss: 1.0460 | LR: 2.00e-06
|
| 636 |
+
[2026-04-25 19:13:40] Epoch 1 | Step 5940 | Loss: 1.0458 | LR: 2.00e-06
|
| 637 |
+
[2026-04-25 19:13:42] Epoch 1 | Step 5950 | Loss: 1.0459 | LR: 2.00e-06
|
| 638 |
+
[2026-04-25 19:13:45] Epoch 1 | Step 5960 | Loss: 1.0459 | LR: 2.00e-06
|
| 639 |
+
[2026-04-25 19:13:47] Epoch 1 | Step 5970 | Loss: 1.0460 | LR: 2.00e-06
|
| 640 |
+
[2026-04-25 19:13:50] Epoch 1 | Step 5980 | Loss: 1.0459 | LR: 2.00e-06
|
| 641 |
+
[2026-04-25 19:13:53] Epoch 1 | Step 5990 | Loss: 1.0461 | LR: 2.00e-06
|
| 642 |
+
[2026-04-25 19:13:55] Epoch 1 | Step 6000 | Loss: 1.0459 | LR: 2.00e-06
|
| 643 |
+
[2026-04-25 19:13:56] Validation | Batch 10/84 | Loss: 0.9715
|
| 644 |
+
[2026-04-25 19:13:56] Validation | Batch 20/84 | Loss: 0.9678
|
| 645 |
+
[2026-04-25 19:13:57] Validation | Batch 30/84 | Loss: 1.0471
|
| 646 |
+
[2026-04-25 19:13:57] Validation | Batch 40/84 | Loss: 1.0534
|
| 647 |
+
[2026-04-25 19:13:57] Validation | Batch 50/84 | Loss: 1.0527
|
| 648 |
+
[2026-04-25 19:13:58] Validation | Batch 60/84 | Loss: 1.0254
|
| 649 |
+
[2026-04-25 19:13:58] Validation | Batch 70/84 | Loss: 1.0086
|
| 650 |
+
[2026-04-25 19:13:59] Validation | Batch 80/84 | Loss: 1.0154
|
| 651 |
+
[2026-04-25 19:13:59] Validation | Batch 84/84 | Loss: 1.0084
|
| 652 |
+
[2026-04-25 19:13:59] Validation | Loss: 1.0084 | PPL: 2.80 | Time: 3.77s
|
| 653 |
+
[2026-04-25 19:14:02] New best model saved! Val loss: 1.0084
|
| 654 |
+
[2026-04-25 19:14:04] Epoch 1 | Step 6010 | Loss: 1.0459 | LR: 2.00e-06
|
| 655 |
+
[2026-04-25 19:14:07] Epoch 1 | Step 6020 | Loss: 1.0457 | LR: 2.00e-06
|
| 656 |
+
[2026-04-25 19:14:09] Epoch 1 | Step 6030 | Loss: 1.0459 | LR: 2.00e-06
|
| 657 |
+
[2026-04-25 19:14:12] Epoch 1 | Step 6040 | Loss: 1.0459 | LR: 2.00e-06
|
| 658 |
+
[2026-04-25 19:14:15] Epoch 1 | Step 6050 | Loss: 1.0459 | LR: 2.00e-06
|
| 659 |
+
[2026-04-25 19:14:17] Epoch 1 | Step 6060 | Loss: 1.0459 | LR: 2.00e-06
|
| 660 |
+
[2026-04-25 19:14:20] Epoch 1 | Step 6070 | Loss: 1.0457 | LR: 2.00e-06
|
| 661 |
+
[2026-04-25 19:14:22] Epoch 1 | Step 6080 | Loss: 1.0458 | LR: 2.00e-06
|
| 662 |
+
[2026-04-25 19:14:25] Epoch 1 | Step 6090 | Loss: 1.0458 | LR: 2.00e-06
|
| 663 |
+
[2026-04-25 19:14:27] Epoch 1 | Step 6100 | Loss: 1.0459 | LR: 2.00e-06
|
| 664 |
+
[2026-04-25 19:14:30] Epoch 1 | Step 6110 | Loss: 1.0459 | LR: 2.00e-06
|
| 665 |
+
[2026-04-25 19:14:33] Epoch 1 | Step 6120 | Loss: 1.0458 | LR: 2.00e-06
|
| 666 |
+
[2026-04-25 19:14:35] Epoch 1 | Step 6130 | Loss: 1.0457 | LR: 2.00e-06
|
| 667 |
+
[2026-04-25 19:14:38] Epoch 1 | Step 6140 | Loss: 1.0453 | LR: 2.00e-06
|
| 668 |
+
[2026-04-25 19:14:40] Epoch 1 | Step 6150 | Loss: 1.0452 | LR: 2.00e-06
|
| 669 |
+
[2026-04-25 19:14:43] Epoch 1 | Step 6160 | Loss: 1.0451 | LR: 2.00e-06
|
| 670 |
+
[2026-04-25 19:14:45] Epoch 1 | Step 6170 | Loss: 1.0453 | LR: 2.00e-06
|
| 671 |
+
[2026-04-25 19:14:48] Epoch 1 | Step 6180 | Loss: 1.0450 | LR: 2.00e-06
|
| 672 |
+
[2026-04-25 19:14:50] Epoch 1 | Step 6190 | Loss: 1.0448 | LR: 2.00e-06
|
| 673 |
+
[2026-04-25 19:14:53] Epoch 1 | Step 6200 | Loss: 1.0446 | LR: 2.00e-06
|
| 674 |
+
[2026-04-25 19:14:55] Epoch 1 | Step 6210 | Loss: 1.0447 | LR: 2.00e-06
|
| 675 |
+
[2026-04-25 19:14:58] Epoch 1 | Step 6220 | Loss: 1.0448 | LR: 2.00e-06
|
| 676 |
+
[2026-04-25 19:15:01] Epoch 1 | Step 6230 | Loss: 1.0446 | LR: 2.00e-06
|
| 677 |
+
[2026-04-25 19:15:03] Epoch 1 | Step 6240 | Loss: 1.0446 | LR: 2.00e-06
|
| 678 |
+
[2026-04-25 19:15:06] Epoch 1 | Step 6250 | Loss: 1.0443 | LR: 2.00e-06
|
| 679 |
+
[2026-04-25 19:15:08] Epoch 1 | Step 6260 | Loss: 1.0443 | LR: 2.00e-06
|
| 680 |
+
[2026-04-25 19:15:11] Epoch 1 | Step 6270 | Loss: 1.0442 | LR: 2.00e-06
|
| 681 |
+
[2026-04-25 19:15:13] Epoch 1 | Step 6280 | Loss: 1.0440 | LR: 2.00e-06
|
| 682 |
+
[2026-04-25 19:15:16] Epoch 1 | Step 6290 | Loss: 1.0439 | LR: 2.00e-06
|
| 683 |
+
[2026-04-25 19:15:19] Epoch 1 | Step 6300 | Loss: 1.0439 | LR: 2.00e-06
|
| 684 |
+
[2026-04-25 19:15:21] Epoch 1 | Step 6310 | Loss: 1.0439 | LR: 2.00e-06
|
| 685 |
+
[2026-04-25 19:15:24] Epoch 1 | Step 6320 | Loss: 1.0439 | LR: 2.00e-06
|
| 686 |
+
[2026-04-25 19:15:26] Epoch 1 | Step 6330 | Loss: 1.0441 | LR: 2.00e-06
|
| 687 |
+
[2026-04-25 19:15:29] Epoch 1 | Step 6340 | Loss: 1.0441 | LR: 2.00e-06
|
| 688 |
+
[2026-04-25 19:15:31] Epoch 1 | Step 6350 | Loss: 1.0441 | LR: 2.00e-06
|
| 689 |
+
[2026-04-25 19:15:34] Epoch 1 | Step 6360 | Loss: 1.0441 | LR: 2.00e-06
|
| 690 |
+
[2026-04-25 19:15:36] Epoch 1 | Step 6370 | Loss: 1.0441 | LR: 2.00e-06
|
| 691 |
+
[2026-04-25 19:15:38] Epoch 1 | Step 6380 | Loss: 1.0441 | LR: 2.00e-06
|
| 692 |
+
[2026-04-25 19:15:41] Epoch 1 | Step 6390 | Loss: 1.0439 | LR: 2.00e-06
|
| 693 |
+
[2026-04-25 19:15:43] Epoch 1 | Step 6400 | Loss: 1.0438 | LR: 2.00e-06
|
| 694 |
+
[2026-04-25 19:15:46] Epoch 1 | Step 6410 | Loss: 1.0437 | LR: 2.00e-06
|
| 695 |
+
[2026-04-25 19:15:48] Epoch 1 | Step 6420 | Loss: 1.0436 | LR: 2.00e-06
|
| 696 |
+
[2026-04-25 19:15:51] Epoch 1 | Step 6430 | Loss: 1.0435 | LR: 2.00e-06
|
| 697 |
+
[2026-04-25 19:15:53] Epoch 1 | Step 6440 | Loss: 1.0435 | LR: 2.00e-06
|
| 698 |
+
[2026-04-25 19:15:55] Epoch 1 | Step 6450 | Loss: 1.0434 | LR: 2.00e-06
|
| 699 |
+
[2026-04-25 19:15:58] Epoch 1 | Step 6460 | Loss: 1.0430 | LR: 2.00e-06
|
| 700 |
+
[2026-04-25 19:16:00] Epoch 1 | Step 6470 | Loss: 1.0430 | LR: 2.00e-06
|
| 701 |
+
[2026-04-25 19:16:03] Epoch 1 | Step 6480 | Loss: 1.0431 | LR: 2.00e-06
|
| 702 |
+
[2026-04-25 19:16:05] Epoch 1 | Step 6490 | Loss: 1.0432 | LR: 2.00e-06
|
| 703 |
+
[2026-04-25 19:16:08] Epoch 1 | Step 6500 | Loss: 1.0430 | LR: 2.00e-06
|
| 704 |
+
[2026-04-25 19:16:10] Epoch 1 | Step 6510 | Loss: 1.0428 | LR: 2.00e-06
|
| 705 |
+
[2026-04-25 19:16:13] Epoch 1 | Step 6520 | Loss: 1.0426 | LR: 2.00e-06
|
| 706 |
+
[2026-04-25 19:16:15] Epoch 1 | Step 6530 | Loss: 1.0424 | LR: 2.00e-06
|
| 707 |
+
[2026-04-25 19:16:18] Epoch 1 | Step 6540 | Loss: 1.0423 | LR: 2.00e-06
|
| 708 |
+
[2026-04-25 19:16:20] Epoch 1 | Step 6550 | Loss: 1.0422 | LR: 2.00e-06
|
| 709 |
+
[2026-04-25 19:16:22] Epoch 1 | Step 6560 | Loss: 1.0421 | LR: 2.00e-06
|
| 710 |
+
[2026-04-25 19:16:25] Epoch 1 | Step 6570 | Loss: 1.0421 | LR: 2.00e-06
|
| 711 |
+
[2026-04-25 19:16:28] Epoch 1 | Step 6580 | Loss: 1.0420 | LR: 2.00e-06
|
| 712 |
+
[2026-04-25 19:16:30] Epoch 1 | Step 6590 | Loss: 1.0419 | LR: 2.00e-06
|
| 713 |
+
[2026-04-25 19:16:33] Epoch 1 | Step 6600 | Loss: 1.0418 | LR: 2.00e-06
|
| 714 |
+
[2026-04-25 19:16:35] Epoch 1 | Step 6610 | Loss: 1.0418 | LR: 2.00e-06
|
| 715 |
+
[2026-04-25 19:16:38] Epoch 1 | Step 6620 | Loss: 1.0417 | LR: 2.00e-06
|
| 716 |
+
[2026-04-25 19:16:40] Epoch 1 | Step 6630 | Loss: 1.0416 | LR: 2.00e-06
|
| 717 |
+
[2026-04-25 19:16:43] Epoch 1 | Step 6640 | Loss: 1.0416 | LR: 2.00e-06
|
| 718 |
+
[2026-04-25 19:16:45] Epoch 1 | Step 6650 | Loss: 1.0417 | LR: 2.00e-06
|
| 719 |
+
[2026-04-25 19:16:48] Epoch 1 | Step 6660 | Loss: 1.0414 | LR: 2.00e-06
|
| 720 |
+
[2026-04-25 19:16:50] Epoch 1 | Step 6670 | Loss: 1.0414 | LR: 2.00e-06
|
| 721 |
+
[2026-04-25 19:16:53] Epoch 1 | Step 6680 | Loss: 1.0414 | LR: 2.00e-06
|
| 722 |
+
[2026-04-25 19:16:55] Epoch 1 | Step 6690 | Loss: 1.0414 | LR: 2.00e-06
|
| 723 |
+
[2026-04-25 19:16:58] Epoch 1 | Step 6700 | Loss: 1.0413 | LR: 2.00e-06
|
| 724 |
+
[2026-04-25 19:17:00] Epoch 1 | Step 6710 | Loss: 1.0413 | LR: 2.00e-06
|
| 725 |
+
[2026-04-25 19:17:03] Epoch 1 | Step 6720 | Loss: 1.0412 | LR: 2.00e-06
|
| 726 |
+
[2026-04-25 19:17:05] Epoch 1 | Step 6730 | Loss: 1.0414 | LR: 2.00e-06
|
| 727 |
+
[2026-04-25 19:17:08] Epoch 1 | Step 6740 | Loss: 1.0412 | LR: 2.00e-06
|
| 728 |
+
[2026-04-25 19:17:10] Epoch 1 | Step 6750 | Loss: 1.0411 | LR: 2.00e-06
|
| 729 |
+
[2026-04-25 19:17:13] Epoch 1 | Step 6760 | Loss: 1.0411 | LR: 2.00e-06
|
| 730 |
+
[2026-04-25 19:17:15] Epoch 1 | Step 6770 | Loss: 1.0410 | LR: 2.00e-06
|
| 731 |
+
[2026-04-25 19:17:18] Epoch 1 | Step 6780 | Loss: 1.0410 | LR: 2.00e-06
|
| 732 |
+
[2026-04-25 19:17:20] Epoch 1 | Step 6790 | Loss: 1.0411 | LR: 2.00e-06
|
| 733 |
+
[2026-04-25 19:17:23] Epoch 1 | Step 6800 | Loss: 1.0412 | LR: 2.00e-06
|
| 734 |
+
[2026-04-25 19:17:25] Epoch 1 | Step 6810 | Loss: 1.0412 | LR: 2.00e-06
|
| 735 |
+
[2026-04-25 19:17:28] Epoch 1 | Step 6820 | Loss: 1.0413 | LR: 2.00e-06
|
| 736 |
+
[2026-04-25 19:17:30] Epoch 1 | Step 6830 | Loss: 1.0414 | LR: 2.00e-06
|
| 737 |
+
[2026-04-25 19:17:33] Epoch 1 | Step 6840 | Loss: 1.0415 | LR: 2.00e-06
|
| 738 |
+
[2026-04-25 19:17:35] Epoch 1 | Step 6850 | Loss: 1.0415 | LR: 2.00e-06
|
| 739 |
+
[2026-04-25 19:17:38] Epoch 1 | Step 6860 | Loss: 1.0414 | LR: 2.00e-06
|
| 740 |
+
[2026-04-25 19:17:40] Epoch 1 | Step 6870 | Loss: 1.0413 | LR: 2.00e-06
|
| 741 |
+
[2026-04-25 19:17:43] Epoch 1 | Step 6880 | Loss: 1.0413 | LR: 2.00e-06
|
| 742 |
+
[2026-04-25 19:17:45] Epoch 1 | Step 6890 | Loss: 1.0414 | LR: 2.00e-06
|
| 743 |
+
[2026-04-25 19:17:48] Epoch 1 | Step 6900 | Loss: 1.0413 | LR: 2.00e-06
|
| 744 |
+
[2026-04-25 19:17:50] Epoch 1 | Step 6910 | Loss: 1.0410 | LR: 2.00e-06
|
| 745 |
+
[2026-04-25 19:17:53] Epoch 1 | Step 6920 | Loss: 1.0410 | LR: 2.00e-06
|
| 746 |
+
[2026-04-25 19:17:55] Epoch 1 | Step 6930 | Loss: 1.0410 | LR: 2.00e-06
|
| 747 |
+
[2026-04-25 19:17:58] Epoch 1 | Step 6940 | Loss: 1.0409 | LR: 2.00e-06
|
| 748 |
+
[2026-04-25 19:18:01] Epoch 1 | Step 6950 | Loss: 1.0408 | LR: 2.00e-06
|
| 749 |
+
[2026-04-25 19:18:03] Epoch 1 | Step 6960 | Loss: 1.0408 | LR: 2.00e-06
|
| 750 |
+
[2026-04-25 19:18:06] Epoch 1 | Step 6970 | Loss: 1.0407 | LR: 2.00e-06
|
| 751 |
+
[2026-04-25 19:18:08] Epoch 1 | Step 6980 | Loss: 1.0407 | LR: 2.00e-06
|
| 752 |
+
[2026-04-25 19:18:10] Epoch 1 | Step 6990 | Loss: 1.0404 | LR: 2.00e-06
|
| 753 |
+
[2026-04-25 19:18:13] Epoch 1 | Step 7000 | Loss: 1.0403 | LR: 2.00e-06
|
| 754 |
+
[2026-04-25 19:18:15] Epoch 1 | Step 7010 | Loss: 1.0402 | LR: 2.00e-06
|
| 755 |
+
[2026-04-25 19:18:18] Epoch 1 | Step 7020 | Loss: 1.0403 | LR: 2.00e-06
|
| 756 |
+
[2026-04-25 19:18:20] Epoch 1 | Step 7030 | Loss: 1.0402 | LR: 2.00e-06
|
| 757 |
+
[2026-04-25 19:18:23] Epoch 1 | Step 7040 | Loss: 1.0403 | LR: 2.00e-06
|
| 758 |
+
[2026-04-25 19:18:25] Epoch 1 | Step 7050 | Loss: 1.0401 | LR: 2.00e-06
|
| 759 |
+
[2026-04-25 19:18:28] Epoch 1 | Step 7060 | Loss: 1.0400 | LR: 2.00e-06
|
| 760 |
+
[2026-04-25 19:18:30] Epoch 1 | Step 7070 | Loss: 1.0401 | LR: 2.00e-06
|
| 761 |
+
[2026-04-25 19:18:33] Epoch 1 | Step 7080 | Loss: 1.0400 | LR: 2.00e-06
|
| 762 |
+
[2026-04-25 19:18:35] Epoch 1 | Step 7090 | Loss: 1.0400 | LR: 2.00e-06
|
| 763 |
+
[2026-04-25 19:18:38] Epoch 1 | Step 7100 | Loss: 1.0398 | LR: 2.00e-06
|
| 764 |
+
[2026-04-25 19:18:40] Epoch 1 | Step 7110 | Loss: 1.0397 | LR: 2.00e-06
|
| 765 |
+
[2026-04-25 19:18:43] Epoch 1 | Step 7120 | Loss: 1.0398 | LR: 2.00e-06
|
| 766 |
+
[2026-04-25 19:18:45] Epoch 1 | Step 7130 | Loss: 1.0396 | LR: 2.00e-06
|
| 767 |
+
[2026-04-25 19:18:48] Epoch 1 | Step 7140 | Loss: 1.0395 | LR: 2.00e-06
|
| 768 |
+
[2026-04-25 19:18:50] Epoch 1 | Step 7150 | Loss: 1.0396 | LR: 2.00e-06
|
| 769 |
+
[2026-04-25 19:18:53] Epoch 1 | Step 7160 | Loss: 1.0394 | LR: 2.00e-06
|
| 770 |
+
[2026-04-25 19:18:55] Epoch 1 | Step 7170 | Loss: 1.0394 | LR: 2.00e-06
|
| 771 |
+
[2026-04-25 19:18:58] Epoch 1 | Step 7180 | Loss: 1.0394 | LR: 2.00e-06
|
| 772 |
+
[2026-04-25 19:19:00] Epoch 1 | Step 7190 | Loss: 1.0395 | LR: 2.00e-06
|
| 773 |
+
[2026-04-25 19:19:03] Epoch 1 | Step 7200 | Loss: 1.0394 | LR: 2.00e-06
|
| 774 |
+
[2026-04-25 19:19:06] Epoch 1 | Step 7210 | Loss: 1.0392 | LR: 2.00e-06
|
| 775 |
+
[2026-04-25 19:19:08] Epoch 1 | Step 7220 | Loss: 1.0393 | LR: 2.00e-06
|
| 776 |
+
[2026-04-25 19:19:11] Epoch 1 | Step 7230 | Loss: 1.0393 | LR: 2.00e-06
|
| 777 |
+
[2026-04-25 19:19:13] Epoch 1 | Step 7240 | Loss: 1.0393 | LR: 2.00e-06
|
| 778 |
+
[2026-04-25 19:19:16] Epoch 1 | Step 7250 | Loss: 1.0392 | LR: 2.00e-06
|
| 779 |
+
[2026-04-25 19:19:19] Epoch 1 | Step 7260 | Loss: 1.0392 | LR: 2.00e-06
|
| 780 |
+
[2026-04-25 19:19:21] Epoch 1 | Step 7270 | Loss: 1.0393 | LR: 2.00e-06
|
| 781 |
+
[2026-04-25 19:19:24] Epoch 1 | Step 7280 | Loss: 1.0393 | LR: 2.00e-06
|
| 782 |
+
[2026-04-25 19:19:26] Epoch 1 | Step 7290 | Loss: 1.0391 | LR: 2.00e-06
|
| 783 |
+
[2026-04-25 19:19:29] Epoch 1 | Step 7300 | Loss: 1.0389 | LR: 2.00e-06
|
| 784 |
+
[2026-04-25 19:19:31] Epoch 1 | Step 7310 | Loss: 1.0387 | LR: 2.00e-06
|
| 785 |
+
[2026-04-25 19:19:34] Epoch 1 | Step 7320 | Loss: 1.0385 | LR: 2.00e-06
|
| 786 |
+
[2026-04-25 19:19:36] Epoch 1 | Step 7330 | Loss: 1.0386 | LR: 2.00e-06
|
| 787 |
+
[2026-04-25 19:19:39] Epoch 1 | Step 7340 | Loss: 1.0387 | LR: 2.00e-06
|
| 788 |
+
[2026-04-25 19:19:42] Epoch 1 | Step 7350 | Loss: 1.0388 | LR: 2.00e-06
|
| 789 |
+
[2026-04-25 19:19:44] Epoch 1 | Step 7360 | Loss: 1.0387 | LR: 2.00e-06
|
| 790 |
+
[2026-04-25 19:19:47] Epoch 1 | Step 7370 | Loss: 1.0385 | LR: 2.00e-06
|
| 791 |
+
[2026-04-25 19:19:49] Epoch 1 | Step 7380 | Loss: 1.0383 | LR: 2.00e-06
|
| 792 |
+
[2026-04-25 19:19:52] Epoch 1 | Step 7390 | Loss: 1.0382 | LR: 2.00e-06
|
| 793 |
+
[2026-04-25 19:19:54] Epoch 1 | Step 7400 | Loss: 1.0381 | LR: 2.00e-06
|
| 794 |
+
[2026-04-25 19:19:57] Epoch 1 | Step 7410 | Loss: 1.0382 | LR: 2.00e-06
|
| 795 |
+
[2026-04-25 19:19:59] Epoch 1 | Step 7420 | Loss: 1.0382 | LR: 2.00e-06
|
| 796 |
+
[2026-04-25 19:20:02] Epoch 1 | Step 7430 | Loss: 1.0381 | LR: 2.00e-06
|
| 797 |
+
[2026-04-25 19:20:04] Epoch 1 | Step 7440 | Loss: 1.0381 | LR: 2.00e-06
|
| 798 |
+
[2026-04-25 19:20:07] Epoch 1 | Step 7450 | Loss: 1.0380 | LR: 2.00e-06
|
| 799 |
+
[2026-04-25 19:20:09] Epoch 1 | Step 7460 | Loss: 1.0379 | LR: 2.00e-06
|
| 800 |
+
[2026-04-25 19:20:12] Epoch 1 | Step 7470 | Loss: 1.0379 | LR: 2.00e-06
|
| 801 |
+
[2026-04-25 19:20:14] Epoch 1 | Step 7480 | Loss: 1.0379 | LR: 2.00e-06
|
| 802 |
+
[2026-04-25 19:20:17] Epoch 1 | Step 7490 | Loss: 1.0380 | LR: 2.00e-06
|
| 803 |
+
[2026-04-25 19:20:19] Epoch 1 | Step 7500 | Loss: 1.0380 | LR: 2.00e-06
|
| 804 |
+
[2026-04-25 19:20:22] Epoch 1 | Step 7510 | Loss: 1.0381 | LR: 2.00e-06
|
| 805 |
+
[2026-04-25 19:20:24] Epoch 1 | Step 7520 | Loss: 1.0380 | LR: 2.00e-06
|
| 806 |
+
[2026-04-25 19:20:26] Epoch 1 | Step 7530 | Loss: 1.0379 | LR: 2.00e-06
|
| 807 |
+
[2026-04-25 19:20:29] Epoch 1 | Step 7540 | Loss: 1.0378 | LR: 2.00e-06
|
| 808 |
+
[2026-04-25 19:20:31] Epoch 1 | Step 7550 | Loss: 1.0379 | LR: 2.00e-06
|
| 809 |
+
[2026-04-25 19:20:34] Epoch 1 | Step 7560 | Loss: 1.0379 | LR: 2.00e-06
|
| 810 |
+
[2026-04-25 19:20:36] Epoch 1 | Step 7570 | Loss: 1.0378 | LR: 2.00e-06
|
| 811 |
+
[2026-04-25 19:20:39] Epoch 1 | Step 7580 | Loss: 1.0377 | LR: 2.00e-06
|
| 812 |
+
[2026-04-25 19:20:41] Epoch 1 | Step 7590 | Loss: 1.0376 | LR: 2.00e-06
|
| 813 |
+
[2026-04-25 19:20:43] Epoch 1 | Step 7600 | Loss: 1.0375 | LR: 2.00e-06
|
| 814 |
+
[2026-04-25 19:20:46] Epoch 1 | Step 7610 | Loss: 1.0374 | LR: 2.00e-06
|
| 815 |
+
[2026-04-25 19:20:48] Epoch 1 | Step 7620 | Loss: 1.0373 | LR: 2.00e-06
|
| 816 |
+
[2026-04-25 19:20:51] Epoch 1 | Step 7630 | Loss: 1.0372 | LR: 2.00e-06
|
| 817 |
+
[2026-04-25 19:20:53] Epoch 1 | Step 7640 | Loss: 1.0371 | LR: 2.00e-06
|
| 818 |
+
[2026-04-25 19:20:56] Epoch 1 | Step 7650 | Loss: 1.0370 | LR: 2.00e-06
|
| 819 |
+
[2026-04-25 19:20:59] Epoch 1 | Step 7660 | Loss: 1.0369 | LR: 2.00e-06
|
| 820 |
+
[2026-04-25 19:21:01] Epoch 1 | Step 7670 | Loss: 1.0367 | LR: 2.00e-06
|
| 821 |
+
[2026-04-25 19:21:04] Epoch 1 | Step 7680 | Loss: 1.0367 | LR: 2.00e-06
|
| 822 |
+
[2026-04-25 19:21:06] Epoch 1 | Step 7690 | Loss: 1.0368 | LR: 2.00e-06
|
| 823 |
+
[2026-04-25 19:21:09] Epoch 1 | Step 7700 | Loss: 1.0367 | LR: 2.00e-06
|
| 824 |
+
[2026-04-25 19:21:12] Epoch 1 | Step 7710 | Loss: 1.0364 | LR: 2.00e-06
|
| 825 |
+
[2026-04-25 19:21:14] Epoch 1 | Step 7720 | Loss: 1.0365 | LR: 2.00e-06
|
| 826 |
+
[2026-04-25 19:21:17] Epoch 1 | Step 7730 | Loss: 1.0367 | LR: 2.00e-06
|
| 827 |
+
[2026-04-25 19:21:19] Epoch 1 | Step 7740 | Loss: 1.0368 | LR: 2.00e-06
|
| 828 |
+
[2026-04-25 19:21:22] Epoch 1 | Step 7750 | Loss: 1.0368 | LR: 2.00e-06
|
| 829 |
+
[2026-04-25 19:21:25] Epoch 1 | Step 7760 | Loss: 1.0366 | LR: 2.00e-06
|
| 830 |
+
[2026-04-25 19:21:27] Epoch 1 | Step 7770 | Loss: 1.0365 | LR: 2.00e-06
|
| 831 |
+
[2026-04-25 19:21:30] Epoch 1 | Step 7780 | Loss: 1.0364 | LR: 2.00e-06
|
| 832 |
+
[2026-04-25 19:21:32] Epoch 1 | Step 7790 | Loss: 1.0363 | LR: 2.00e-06
|
| 833 |
+
[2026-04-25 19:21:35] Epoch 1 | Step 7800 | Loss: 1.0362 | LR: 2.00e-06
|
| 834 |
+
[2026-04-25 19:21:37] Epoch 1 | Step 7810 | Loss: 1.0363 | LR: 2.00e-06
|
| 835 |
+
[2026-04-25 19:21:40] Epoch 1 | Step 7820 | Loss: 1.0364 | LR: 2.00e-06
|
| 836 |
+
[2026-04-25 19:21:42] Epoch 1 | Step 7830 | Loss: 1.0363 | LR: 2.00e-06
|
| 837 |
+
[2026-04-25 19:21:45] Epoch 1 | Step 7840 | Loss: 1.0362 | LR: 2.00e-06
|
| 838 |
+
[2026-04-25 19:21:47] Epoch 1 | Step 7850 | Loss: 1.0360 | LR: 2.00e-06
|
| 839 |
+
[2026-04-25 19:21:50] Epoch 1 | Step 7860 | Loss: 1.0360 | LR: 2.00e-06
|
| 840 |
+
[2026-04-25 19:21:53] Epoch 1 | Step 7870 | Loss: 1.0359 | LR: 2.00e-06
|
| 841 |
+
[2026-04-25 19:21:55] Epoch 1 | Step 7880 | Loss: 1.0359 | LR: 2.00e-06
|
| 842 |
+
[2026-04-25 19:21:58] Epoch 1 | Step 7890 | Loss: 1.0359 | LR: 2.00e-06
|
| 843 |
+
[2026-04-25 19:22:01] Epoch 1 | Step 7900 | Loss: 1.0359 | LR: 2.00e-06
|
| 844 |
+
[2026-04-25 19:22:04] Epoch 1 | Step 7910 | Loss: 1.0359 | LR: 2.00e-06
|
| 845 |
+
[2026-04-25 19:22:06] Epoch 1 | Step 7920 | Loss: 1.0359 | LR: 2.00e-06
|
| 846 |
+
[2026-04-25 19:22:09] Epoch 1 | Step 7930 | Loss: 1.0360 | LR: 2.00e-06
|
| 847 |
+
[2026-04-25 19:22:11] Epoch 1 | Step 7940 | Loss: 1.0360 | LR: 2.00e-06
|
| 848 |
+
[2026-04-25 19:22:14] Epoch 1 | Step 7950 | Loss: 1.0362 | LR: 2.00e-06
|
| 849 |
+
[2026-04-25 19:22:16] Epoch 1 | Step 7960 | Loss: 1.0362 | LR: 2.00e-06
|
| 850 |
+
[2026-04-25 19:22:19] Epoch 1 | Step 7970 | Loss: 1.0362 | LR: 2.00e-06
|
| 851 |
+
[2026-04-25 19:22:21] Epoch 1 | Step 7980 | Loss: 1.0361 | LR: 2.00e-06
|
| 852 |
+
[2026-04-25 19:22:24] Epoch 1 | Step 7990 | Loss: 1.0360 | LR: 2.00e-06
|
| 853 |
+
[2026-04-25 19:22:26] Epoch 1 | Step 8000 | Loss: 1.0360 | LR: 2.00e-06
|
| 854 |
+
[2026-04-25 19:22:27] Validation | Batch 10/84 | Loss: 0.9692
|
| 855 |
+
[2026-04-25 19:22:27] Validation | Batch 20/84 | Loss: 0.9655
|
| 856 |
+
[2026-04-25 19:22:28] Validation | Batch 30/84 | Loss: 1.0454
|
| 857 |
+
[2026-04-25 19:22:28] Validation | Batch 40/84 | Loss: 1.0514
|
| 858 |
+
[2026-04-25 19:22:28] Validation | Batch 50/84 | Loss: 1.0510
|
| 859 |
+
[2026-04-25 19:22:29] Validation | Batch 60/84 | Loss: 1.0237
|
| 860 |
+
[2026-04-25 19:22:29] Validation | Batch 70/84 | Loss: 1.0070
|
| 861 |
+
[2026-04-25 19:22:30] Validation | Batch 80/84 | Loss: 1.0139
|
| 862 |
+
[2026-04-25 19:22:30] Validation | Batch 84/84 | Loss: 1.0070
|
| 863 |
+
[2026-04-25 19:22:30] Validation | Loss: 1.0070 | PPL: 2.80 | Time: 3.75s
|
| 864 |
+
[2026-04-25 19:22:33] New best model saved! Val loss: 1.0070
|
| 865 |
+
[2026-04-25 19:22:36] Epoch 1 | Step 8010 | Loss: 1.0359 | LR: 2.00e-06
|
| 866 |
+
[2026-04-25 19:22:38] Epoch 1 | Step 8020 | Loss: 1.0357 | LR: 2.00e-06
|
| 867 |
+
[2026-04-25 19:22:41] Epoch 1 | Step 8030 | Loss: 1.0356 | LR: 2.00e-06
|
| 868 |
+
[2026-04-25 19:22:44] Epoch 1 | Step 8040 | Loss: 1.0357 | LR: 2.00e-06
|
| 869 |
+
[2026-04-25 19:22:46] Epoch 1 | Step 8050 | Loss: 1.0356 | LR: 2.00e-06
|
| 870 |
+
[2026-04-25 19:22:49] Epoch 1 | Step 8060 | Loss: 1.0356 | LR: 2.00e-06
|
| 871 |
+
[2026-04-25 19:22:51] Epoch 1 | Step 8070 | Loss: 1.0355 | LR: 2.00e-06
|
| 872 |
+
[2026-04-25 19:22:54] Epoch 1 | Step 8080 | Loss: 1.0355 | LR: 2.00e-06
|
| 873 |
+
[2026-04-25 19:22:57] Epoch 1 | Step 8090 | Loss: 1.0353 | LR: 2.00e-06
|
| 874 |
+
[2026-04-25 19:22:59] Epoch 1 | Step 8100 | Loss: 1.0352 | LR: 2.00e-06
|
| 875 |
+
[2026-04-25 19:23:02] Epoch 1 | Step 8110 | Loss: 1.0353 | LR: 2.00e-06
|
| 876 |
+
[2026-04-25 19:23:04] Epoch 1 | Step 8120 | Loss: 1.0353 | LR: 2.00e-06
|
| 877 |
+
[2026-04-25 19:23:07] Epoch 1 | Step 8130 | Loss: 1.0352 | LR: 2.00e-06
|
| 878 |
+
[2026-04-25 19:23:09] Epoch 1 | Step 8140 | Loss: 1.0353 | LR: 2.00e-06
|
| 879 |
+
[2026-04-25 19:23:12] Epoch 1 | Step 8150 | Loss: 1.0353 | LR: 2.00e-06
|
| 880 |
+
[2026-04-25 19:23:14] Epoch 1 | Step 8160 | Loss: 1.0351 | LR: 2.00e-06
|
| 881 |
+
[2026-04-25 19:23:17] Epoch 1 | Step 8170 | Loss: 1.0351 | LR: 2.00e-06
|
| 882 |
+
[2026-04-25 19:23:19] Epoch 1 | Step 8180 | Loss: 1.0351 | LR: 2.00e-06
|
| 883 |
+
[2026-04-25 19:23:22] Epoch 1 | Step 8190 | Loss: 1.0350 | LR: 2.00e-06
|
| 884 |
+
[2026-04-25 19:23:24] Epoch 1 | Step 8200 | Loss: 1.0350 | LR: 2.00e-06
|
| 885 |
+
[2026-04-25 19:23:27] Epoch 1 | Step 8210 | Loss: 1.0350 | LR: 2.00e-06
|
| 886 |
+
[2026-04-25 19:23:29] Epoch 1 | Step 8220 | Loss: 1.0349 | LR: 2.00e-06
|
| 887 |
+
[2026-04-25 19:23:32] Epoch 1 | Step 8230 | Loss: 1.0350 | LR: 2.00e-06
|
| 888 |
+
[2026-04-25 19:23:34] Epoch 1 | Step 8240 | Loss: 1.0350 | LR: 2.00e-06
|
| 889 |
+
[2026-04-25 19:23:37] Epoch 1 | Step 8250 | Loss: 1.0350 | LR: 2.00e-06
|
| 890 |
+
[2026-04-25 19:23:39] Epoch 1 | Step 8260 | Loss: 1.0351 | LR: 2.00e-06
|
| 891 |
+
[2026-04-25 19:23:42] Epoch 1 | Step 8270 | Loss: 1.0352 | LR: 2.00e-06
|
| 892 |
+
[2026-04-25 19:23:44] Epoch 1 | Step 8280 | Loss: 1.0352 | LR: 2.00e-06
|
| 893 |
+
[2026-04-25 19:23:47] Epoch 1 | Step 8290 | Loss: 1.0353 | LR: 2.00e-06
|
| 894 |
+
[2026-04-25 19:23:50] Epoch 1 | Step 8300 | Loss: 1.0351 | LR: 2.00e-06
|
| 895 |
+
[2026-04-25 19:23:52] Epoch 1 | Step 8310 | Loss: 1.0352 | LR: 2.00e-06
|
| 896 |
+
[2026-04-25 19:23:54] Epoch 1 | Step 8320 | Loss: 1.0351 | LR: 2.00e-06
|
| 897 |
+
[2026-04-25 19:23:56] Epoch 1 | Step 8330 | Loss: 1.0352 | LR: 2.00e-06
|
| 898 |
+
[2026-04-25 19:23:59] Epoch 1 | Step 8340 | Loss: 1.0352 | LR: 2.00e-06
|
| 899 |
+
[2026-04-25 19:24:02] Epoch 1 | Step 8350 | Loss: 1.0350 | LR: 2.00e-06
|
| 900 |
+
[2026-04-25 19:24:04] Epoch 1 | Step 8360 | Loss: 1.0350 | LR: 2.00e-06
|
| 901 |
+
[2026-04-25 19:24:06] Epoch 1 | Step 8370 | Loss: 1.0350 | LR: 2.00e-06
|
| 902 |
+
[2026-04-25 19:24:09] Epoch 1 | Step 8380 | Loss: 1.0349 | LR: 2.00e-06
|
| 903 |
+
[2026-04-25 19:24:11] Epoch 1 | Step 8390 | Loss: 1.0350 | LR: 2.00e-06
|
| 904 |
+
[2026-04-25 19:24:14] Epoch 1 | Step 8400 | Loss: 1.0350 | LR: 2.00e-06
|
| 905 |
+
[2026-04-25 19:24:16] Epoch 1 | Step 8410 | Loss: 1.0350 | LR: 2.00e-06
|
| 906 |
+
[2026-04-25 19:24:19] Epoch 1 | Step 8420 | Loss: 1.0351 | LR: 2.00e-06
|
| 907 |
+
[2026-04-25 19:24:21] Epoch 1 | Step 8430 | Loss: 1.0349 | LR: 2.00e-06
|
| 908 |
+
[2026-04-25 19:24:24] Epoch 1 | Step 8440 | Loss: 1.0349 | LR: 2.00e-06
|
| 909 |
+
[2026-04-25 19:24:26] Epoch 1 | Step 8450 | Loss: 1.0349 | LR: 2.00e-06
|
| 910 |
+
[2026-04-25 19:24:29] Epoch 1 | Step 8460 | Loss: 1.0349 | LR: 2.00e-06
|
| 911 |
+
[2026-04-25 19:24:31] Epoch 1 | Step 8470 | Loss: 1.0348 | LR: 2.00e-06
|
| 912 |
+
[2026-04-25 19:24:34] Epoch 1 | Step 8480 | Loss: 1.0348 | LR: 2.00e-06
|
| 913 |
+
[2026-04-25 19:24:36] Epoch 1 | Step 8490 | Loss: 1.0346 | LR: 2.00e-06
|
| 914 |
+
[2026-04-25 19:24:39] Epoch 1 | Step 8500 | Loss: 1.0347 | LR: 2.00e-06
|
| 915 |
+
[2026-04-25 19:24:41] Epoch 1 | Step 8510 | Loss: 1.0346 | LR: 2.00e-06
|
| 916 |
+
[2026-04-25 19:24:44] Epoch 1 | Step 8520 | Loss: 1.0346 | LR: 2.00e-06
|
| 917 |
+
[2026-04-25 19:24:46] Epoch 1 | Step 8530 | Loss: 1.0346 | LR: 2.00e-06
|
| 918 |
+
[2026-04-25 19:24:49] Epoch 1 | Step 8540 | Loss: 1.0347 | LR: 2.00e-06
|
| 919 |
+
[2026-04-25 19:24:51] Epoch 1 | Step 8550 | Loss: 1.0347 | LR: 2.00e-06
|
| 920 |
+
[2026-04-25 19:24:54] Epoch 1 | Step 8560 | Loss: 1.0347 | LR: 2.00e-06
|
| 921 |
+
[2026-04-25 19:24:56] Epoch 1 | Step 8570 | Loss: 1.0347 | LR: 2.00e-06
|
| 922 |
+
[2026-04-25 19:24:58] Epoch 1 | Step 8580 | Loss: 1.0346 | LR: 2.00e-06
|
| 923 |
+
[2026-04-25 19:25:01] Epoch 1 | Step 8590 | Loss: 1.0345 | LR: 2.00e-06
|
| 924 |
+
[2026-04-25 19:25:04] Epoch 1 | Step 8600 | Loss: 1.0343 | LR: 2.00e-06
|
| 925 |
+
[2026-04-25 19:25:06] Epoch 1 | Step 8610 | Loss: 1.0344 | LR: 2.00e-06
|
| 926 |
+
[2026-04-25 19:25:09] Epoch 1 | Step 8620 | Loss: 1.0344 | LR: 2.00e-06
|
| 927 |
+
[2026-04-25 19:25:11] Epoch 1 | Step 8630 | Loss: 1.0342 | LR: 2.00e-06
|
| 928 |
+
[2026-04-25 19:25:14] Epoch 1 | Step 8640 | Loss: 1.0343 | LR: 2.00e-06
|
| 929 |
+
[2026-04-25 19:25:17] Epoch 1 | Step 8650 | Loss: 1.0344 | LR: 2.00e-06
|
| 930 |
+
[2026-04-25 19:25:19] Epoch 1 | Step 8660 | Loss: 1.0342 | LR: 2.00e-06
|
| 931 |
+
[2026-04-25 19:25:22] Epoch 1 | Step 8670 | Loss: 1.0343 | LR: 2.00e-06
|
| 932 |
+
[2026-04-25 19:25:24] Epoch 1 | Step 8680 | Loss: 1.0343 | LR: 2.00e-06
|
| 933 |
+
[2026-04-25 19:25:26] Epoch 1 | Step 8690 | Loss: 1.0342 | LR: 2.00e-06
|
| 934 |
+
[2026-04-25 19:25:29] Epoch 1 | Step 8700 | Loss: 1.0342 | LR: 2.00e-06
|
| 935 |
+
[2026-04-25 19:25:32] Epoch 1 | Step 8710 | Loss: 1.0339 | LR: 2.00e-06
|
| 936 |
+
[2026-04-25 19:25:34] Epoch 1 | Step 8720 | Loss: 1.0338 | LR: 2.00e-06
|
| 937 |
+
[2026-04-25 19:25:37] Epoch 1 | Step 8730 | Loss: 1.0338 | LR: 2.00e-06
|
| 938 |
+
[2026-04-25 19:25:39] Epoch 1 | Step 8740 | Loss: 1.0339 | LR: 2.00e-06
|
| 939 |
+
[2026-04-25 19:25:42] Epoch 1 | Step 8750 | Loss: 1.0339 | LR: 2.00e-06
|
| 940 |
+
[2026-04-25 19:25:44] Epoch 1 | Step 8760 | Loss: 1.0338 | LR: 2.00e-06
|
| 941 |
+
[2026-04-25 19:25:47] Epoch 1 | Step 8770 | Loss: 1.0337 | LR: 2.00e-06
|
| 942 |
+
[2026-04-25 19:25:50] Epoch 1 | Step 8780 | Loss: 1.0337 | LR: 2.00e-06
|
| 943 |
+
[2026-04-25 19:25:52] Epoch 1 | Step 8790 | Loss: 1.0337 | LR: 2.00e-06
|
| 944 |
+
[2026-04-25 19:25:55] Epoch 1 | Step 8800 | Loss: 1.0335 | LR: 2.00e-06
|
| 945 |
+
[2026-04-25 19:25:57] Epoch 1 | Step 8810 | Loss: 1.0334 | LR: 2.00e-06
|
| 946 |
+
[2026-04-25 19:26:00] Epoch 1 | Step 8820 | Loss: 1.0334 | LR: 2.00e-06
|
| 947 |
+
[2026-04-25 19:26:02] Epoch 1 | Step 8830 | Loss: 1.0334 | LR: 2.00e-06
|
| 948 |
+
[2026-04-25 19:26:05] Epoch 1 | Step 8840 | Loss: 1.0334 | LR: 2.00e-06
|
| 949 |
+
[2026-04-25 19:26:08] Epoch 1 | Step 8850 | Loss: 1.0333 | LR: 2.00e-06
|
| 950 |
+
[2026-04-25 19:26:10] Epoch 1 | Step 8860 | Loss: 1.0333 | LR: 2.00e-06
|
| 951 |
+
[2026-04-25 19:26:13] Epoch 1 | Step 8870 | Loss: 1.0334 | LR: 2.00e-06
|
| 952 |
+
[2026-04-25 19:26:15] Epoch 1 | Step 8880 | Loss: 1.0333 | LR: 2.00e-06
|
| 953 |
+
[2026-04-25 19:26:18] Epoch 1 | Step 8890 | Loss: 1.0332 | LR: 2.00e-06
|
| 954 |
+
[2026-04-25 19:26:20] Epoch 1 | Step 8900 | Loss: 1.0330 | LR: 2.00e-06
|
| 955 |
+
[2026-04-25 19:26:22] Epoch 1 | Step 8910 | Loss: 1.0331 | LR: 2.00e-06
|
| 956 |
+
[2026-04-25 19:26:25] Epoch 1 | Step 8920 | Loss: 1.0329 | LR: 2.00e-06
|
| 957 |
+
[2026-04-25 19:26:27] Epoch 1 | Step 8930 | Loss: 1.0327 | LR: 2.00e-06
|
| 958 |
+
[2026-04-25 19:26:30] Epoch 1 | Step 8940 | Loss: 1.0328 | LR: 2.00e-06
|
| 959 |
+
[2026-04-25 19:26:32] Epoch 1 | Step 8950 | Loss: 1.0328 | LR: 2.00e-06
|
| 960 |
+
[2026-04-25 19:26:35] Epoch 1 | Step 8960 | Loss: 1.0328 | LR: 2.00e-06
|
| 961 |
+
[2026-04-25 19:26:37] Epoch 1 | Step 8970 | Loss: 1.0328 | LR: 2.00e-06
|
| 962 |
+
[2026-04-25 19:26:40] Epoch 1 | Step 8980 | Loss: 1.0327 | LR: 2.00e-06
|
| 963 |
+
[2026-04-25 19:26:42] Epoch 1 | Step 8990 | Loss: 1.0326 | LR: 2.00e-06
|
| 964 |
+
[2026-04-25 19:26:45] Epoch 1 | Step 9000 | Loss: 1.0325 | LR: 2.00e-06
|
| 965 |
+
[2026-04-25 19:26:48] Epoch 1 | Step 9010 | Loss: 1.0326 | LR: 2.00e-06
|
| 966 |
+
[2026-04-25 19:26:50] Epoch 1 | Step 9020 | Loss: 1.0327 | LR: 2.00e-06
|
| 967 |
+
[2026-04-25 19:26:53] Epoch 1 | Step 9030 | Loss: 1.0326 | LR: 2.00e-06
|
| 968 |
+
[2026-04-25 19:26:55] Epoch 1 | Step 9040 | Loss: 1.0326 | LR: 2.00e-06
|
| 969 |
+
[2026-04-25 19:26:58] Epoch 1 | Step 9050 | Loss: 1.0325 | LR: 2.00e-06
|
| 970 |
+
[2026-04-25 19:27:00] Epoch 1 | Step 9060 | Loss: 1.0326 | LR: 2.00e-06
|
| 971 |
+
[2026-04-25 19:27:03] Epoch 1 | Step 9070 | Loss: 1.0324 | LR: 2.00e-06
|
| 972 |
+
[2026-04-25 19:27:05] Epoch 1 | Step 9080 | Loss: 1.0325 | LR: 2.00e-06
|
| 973 |
+
[2026-04-25 19:27:07] Epoch 1 | Step 9090 | Loss: 1.0324 | LR: 2.00e-06
|
| 974 |
+
[2026-04-25 19:27:10] Epoch 1 | Step 9100 | Loss: 1.0324 | LR: 2.00e-06
|
| 975 |
+
[2026-04-25 19:27:13] Epoch 1 | Step 9110 | Loss: 1.0325 | LR: 2.00e-06
|
| 976 |
+
[2026-04-25 19:27:15] Epoch 1 | Step 9120 | Loss: 1.0326 | LR: 2.00e-06
|
| 977 |
+
[2026-04-25 19:27:17] Epoch 1 | Step 9130 | Loss: 1.0324 | LR: 2.00e-06
|
| 978 |
+
[2026-04-25 19:27:20] Epoch 1 | Step 9140 | Loss: 1.0324 | LR: 2.00e-06
|
| 979 |
+
[2026-04-25 19:27:22] Epoch 1 | Step 9150 | Loss: 1.0325 | LR: 2.00e-06
|
| 980 |
+
[2026-04-25 19:27:25] Epoch 1 | Step 9160 | Loss: 1.0324 | LR: 2.00e-06
|
| 981 |
+
[2026-04-25 19:27:27] Epoch 1 | Step 9170 | Loss: 1.0322 | LR: 2.00e-06
|
| 982 |
+
[2026-04-25 19:27:30] Epoch 1 | Step 9180 | Loss: 1.0322 | LR: 2.00e-06
|
| 983 |
+
[2026-04-25 19:27:32] Epoch 1 | Step 9190 | Loss: 1.0319 | LR: 2.00e-06
|
| 984 |
+
[2026-04-25 19:27:35] Epoch 1 | Step 9200 | Loss: 1.0319 | LR: 2.00e-06
|
| 985 |
+
[2026-04-25 19:27:38] Epoch 1 | Step 9210 | Loss: 1.0320 | LR: 2.00e-06
|
| 986 |
+
[2026-04-25 19:27:40] Epoch 1 | Step 9220 | Loss: 1.0319 | LR: 2.00e-06
|
| 987 |
+
[2026-04-25 19:27:43] Epoch 1 | Step 9230 | Loss: 1.0318 | LR: 2.00e-06
|
| 988 |
+
[2026-04-25 19:27:45] Epoch 1 | Step 9240 | Loss: 1.0316 | LR: 2.00e-06
|
| 989 |
+
[2026-04-25 19:27:48] Epoch 1 | Step 9250 | Loss: 1.0315 | LR: 2.00e-06
|
| 990 |
+
[2026-04-25 19:27:50] Epoch 1 | Step 9260 | Loss: 1.0314 | LR: 2.00e-06
|
| 991 |
+
[2026-04-25 19:27:53] Epoch 1 | Step 9270 | Loss: 1.0313 | LR: 2.00e-06
|
| 992 |
+
[2026-04-25 19:27:55] Epoch 1 | Step 9280 | Loss: 1.0313 | LR: 2.00e-06
|
| 993 |
+
[2026-04-25 19:27:58] Epoch 1 | Step 9290 | Loss: 1.0313 | LR: 2.00e-06
|
| 994 |
+
[2026-04-25 19:28:00] Epoch 1 | Step 9300 | Loss: 1.0313 | LR: 2.00e-06
|
| 995 |
+
[2026-04-25 19:28:03] Epoch 1 | Step 9310 | Loss: 1.0312 | LR: 2.00e-06
|
| 996 |
+
[2026-04-25 19:28:05] Epoch 1 | Step 9320 | Loss: 1.0312 | LR: 2.00e-06
|
| 997 |
+
[2026-04-25 19:28:08] Epoch 1 | Step 9330 | Loss: 1.0311 | LR: 2.00e-06
|
| 998 |
+
[2026-04-25 19:28:10] Epoch 1 | Step 9340 | Loss: 1.0310 | LR: 2.00e-06
|
| 999 |
+
[2026-04-25 19:28:13] Epoch 1 | Step 9350 | Loss: 1.0310 | LR: 2.00e-06
|
| 1000 |
+
[2026-04-25 19:28:15] Epoch 1 | Step 9360 | Loss: 1.0309 | LR: 2.00e-06
|
| 1001 |
+
[2026-04-25 19:28:18] Epoch 1 | Step 9370 | Loss: 1.0309 | LR: 2.00e-06
|
| 1002 |
+
[2026-04-25 19:28:20] Epoch 1 | Step 9380 | Loss: 1.0309 | LR: 2.00e-06
|
| 1003 |
+
[2026-04-25 19:28:23] Epoch 1 | Step 9390 | Loss: 1.0307 | LR: 2.00e-06
|
| 1004 |
+
[2026-04-25 19:28:25] Epoch 1 | Step 9400 | Loss: 1.0308 | LR: 2.00e-06
|
| 1005 |
+
[2026-04-25 19:28:28] Epoch 1 | Step 9410 | Loss: 1.0308 | LR: 2.00e-06
|
| 1006 |
+
[2026-04-25 19:28:31] Epoch 1 | Step 9420 | Loss: 1.0309 | LR: 2.00e-06
|
| 1007 |
+
[2026-04-25 19:28:33] Epoch 1 | Step 9430 | Loss: 1.0309 | LR: 2.00e-06
|
| 1008 |
+
[2026-04-25 19:28:36] Epoch 1 | Step 9440 | Loss: 1.0309 | LR: 2.00e-06
|
| 1009 |
+
[2026-04-25 19:28:38] Epoch 1 | Step 9450 | Loss: 1.0310 | LR: 2.00e-06
|
| 1010 |
+
[2026-04-25 19:28:41] Epoch 1 | Step 9460 | Loss: 1.0309 | LR: 2.00e-06
|
| 1011 |
+
[2026-04-25 19:28:43] Epoch 1 | Step 9470 | Loss: 1.0308 | LR: 2.00e-06
|
| 1012 |
+
[2026-04-25 19:28:46] Epoch 1 | Step 9480 | Loss: 1.0306 | LR: 2.00e-06
|
| 1013 |
+
[2026-04-25 19:28:48] Epoch 1 | Step 9490 | Loss: 1.0306 | LR: 2.00e-06
|
| 1014 |
+
[2026-04-25 19:28:50] Epoch 1 | Step 9500 | Loss: 1.0306 | LR: 2.00e-06
|
| 1015 |
+
[2026-04-25 19:28:53] Epoch 1 | Step 9510 | Loss: 1.0306 | LR: 2.00e-06
|
| 1016 |
+
[2026-04-25 19:28:56] Epoch 1 | Step 9520 | Loss: 1.0305 | LR: 2.00e-06
|
| 1017 |
+
[2026-04-25 19:28:58] Epoch 1 | Step 9530 | Loss: 1.0306 | LR: 2.00e-06
|
| 1018 |
+
[2026-04-25 19:29:01] Epoch 1 | Step 9540 | Loss: 1.0304 | LR: 2.00e-06
|
| 1019 |
+
[2026-04-25 19:29:03] Epoch 1 | Step 9550 | Loss: 1.0305 | LR: 2.00e-06
|
| 1020 |
+
[2026-04-25 19:29:06] Epoch 1 | Step 9560 | Loss: 1.0305 | LR: 2.00e-06
|
| 1021 |
+
[2026-04-25 19:29:08] Epoch 1 | Step 9570 | Loss: 1.0305 | LR: 2.00e-06
|
| 1022 |
+
[2026-04-25 19:29:11] Epoch 1 | Step 9580 | Loss: 1.0307 | LR: 2.00e-06
|
| 1023 |
+
[2026-04-25 19:29:13] Epoch 1 | Step 9590 | Loss: 1.0306 | LR: 2.00e-06
|
| 1024 |
+
[2026-04-25 19:29:16] Epoch 1 | Step 9600 | Loss: 1.0305 | LR: 2.00e-06
|
| 1025 |
+
[2026-04-25 19:29:19] Epoch 1 | Step 9610 | Loss: 1.0304 | LR: 2.00e-06
|
| 1026 |
+
[2026-04-25 19:29:22] Epoch 1 | Step 9620 | Loss: 1.0305 | LR: 2.00e-06
|
| 1027 |
+
[2026-04-25 19:29:24] Epoch 1 | Step 9630 | Loss: 1.0306 | LR: 2.00e-06
|
| 1028 |
+
[2026-04-25 19:29:27] Epoch 1 | Step 9640 | Loss: 1.0306 | LR: 2.00e-06
|
| 1029 |
+
[2026-04-25 19:29:30] Epoch 1 | Step 9650 | Loss: 1.0306 | LR: 2.00e-06
|
| 1030 |
+
[2026-04-25 19:29:32] Epoch 1 | Step 9660 | Loss: 1.0306 | LR: 2.00e-06
|
| 1031 |
+
[2026-04-25 19:29:35] Epoch 1 | Step 9670 | Loss: 1.0306 | LR: 2.00e-06
|
| 1032 |
+
[2026-04-25 19:29:37] Epoch 1 | Step 9680 | Loss: 1.0305 | LR: 2.00e-06
|
| 1033 |
+
[2026-04-25 19:29:40] Epoch 1 | Step 9690 | Loss: 1.0305 | LR: 2.00e-06
|
| 1034 |
+
[2026-04-25 19:29:42] Epoch 1 | Step 9700 | Loss: 1.0305 | LR: 2.00e-06
|
| 1035 |
+
[2026-04-25 19:29:45] Epoch 1 | Step 9710 | Loss: 1.0305 | LR: 2.00e-06
|
| 1036 |
+
[2026-04-25 19:29:48] Epoch 1 | Step 9720 | Loss: 1.0304 | LR: 2.00e-06
|
| 1037 |
+
[2026-04-25 19:29:50] Epoch 1 | Step 9730 | Loss: 1.0305 | LR: 2.00e-06
|
| 1038 |
+
[2026-04-25 19:29:52] Epoch 1 | Step 9740 | Loss: 1.0304 | LR: 2.00e-06
|
| 1039 |
+
[2026-04-25 19:29:55] Epoch 1 | Step 9750 | Loss: 1.0303 | LR: 2.00e-06
|
| 1040 |
+
[2026-04-25 19:29:57] Epoch 1 | Step 9760 | Loss: 1.0303 | LR: 2.00e-06
|
| 1041 |
+
[2026-04-25 19:30:00] Epoch 1 | Step 9770 | Loss: 1.0303 | LR: 2.00e-06
|
| 1042 |
+
[2026-04-25 19:30:02] Epoch 1 | Step 9780 | Loss: 1.0303 | LR: 2.00e-06
|
| 1043 |
+
[2026-04-25 19:30:05] Epoch 1 | Step 9790 | Loss: 1.0302 | LR: 2.00e-06
|
| 1044 |
+
[2026-04-25 19:30:08] Epoch 1 | Step 9800 | Loss: 1.0302 | LR: 2.00e-06
|
| 1045 |
+
[2026-04-25 19:30:10] Epoch 1 | Step 9810 | Loss: 1.0301 | LR: 2.00e-06
|
| 1046 |
+
[2026-04-25 19:30:13] Epoch 1 | Step 9820 | Loss: 1.0301 | LR: 2.00e-06
|
| 1047 |
+
[2026-04-25 19:30:16] Epoch 1 | Step 9830 | Loss: 1.0301 | LR: 2.00e-06
|
| 1048 |
+
[2026-04-25 19:30:18] Epoch 1 | Step 9840 | Loss: 1.0302 | LR: 2.00e-06
|
| 1049 |
+
[2026-04-25 19:30:21] Epoch 1 | Step 9850 | Loss: 1.0301 | LR: 2.00e-06
|
| 1050 |
+
[2026-04-25 19:30:23] Epoch 1 | Step 9860 | Loss: 1.0300 | LR: 2.00e-06
|
| 1051 |
+
[2026-04-25 19:30:26] Epoch 1 | Step 9870 | Loss: 1.0301 | LR: 2.00e-06
|
| 1052 |
+
[2026-04-25 19:30:28] Epoch 1 | Step 9880 | Loss: 1.0301 | LR: 2.00e-06
|
| 1053 |
+
[2026-04-25 19:30:30] Epoch 1 completed in 2521.97s | Loss: 1.0301
|
| 1054 |
+
[2026-04-25 19:30:30]
|
| 1055 |
+
Training completed!
|
| 1056 |
+
[2026-04-25 19:30:33] Final model: /workspace/byte-llms-code/outputs/lr_sweep/pythia_1b_lr_2e-5/model_final.pt
|
lr_sweep/pythia_1b_lr_2e-5/wandb/run-20260425_184822-bhvwo83l/files/requirements.txt
ADDED
|
@@ -0,0 +1,245 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
setuptools==78.1.1
|
| 2 |
+
wheel==0.45.1
|
| 3 |
+
pip==25.2
|
| 4 |
+
webencodings==0.5.1
|
| 5 |
+
triton==3.2.0
|
| 6 |
+
pytz==2025.2
|
| 7 |
+
pydub==0.25.1
|
| 8 |
+
pure_eval==0.2.3
|
| 9 |
+
ptyprocess==0.7.0
|
| 10 |
+
nvidia-ml-py==13.590.48
|
| 11 |
+
nvidia-cusparselt-cu12==0.6.2
|
| 12 |
+
mpmath==1.3.0
|
| 13 |
+
ipython-genutils==0.2.0
|
| 14 |
+
fastjsonschema==2.21.2
|
| 15 |
+
brotli==1.2.0
|
| 16 |
+
antlr4-python3-runtime==4.9.3
|
| 17 |
+
xxhash==3.6.0
|
| 18 |
+
widgetsnbextension==4.0.14
|
| 19 |
+
websocket-client==1.9.0
|
| 20 |
+
webcolors==24.11.1
|
| 21 |
+
wcwidth==0.2.14
|
| 22 |
+
urllib3==2.5.0
|
| 23 |
+
uri-template==1.3.0
|
| 24 |
+
tzdata==2025.2
|
| 25 |
+
typing_extensions==4.15.0
|
| 26 |
+
types-python-dateutil==2.9.0.20251008
|
| 27 |
+
traitlets==5.14.3
|
| 28 |
+
tqdm==4.67.1
|
| 29 |
+
tornado==6.5.2
|
| 30 |
+
tomlkit==0.13.3
|
| 31 |
+
tinycss2==1.4.0
|
| 32 |
+
tabulate==0.9.0
|
| 33 |
+
sympy==1.13.1
|
| 34 |
+
soupsieve==2.8
|
| 35 |
+
sniffio==1.3.1
|
| 36 |
+
smmap==5.0.2
|
| 37 |
+
six==1.17.0
|
| 38 |
+
shellingham==1.5.4
|
| 39 |
+
Send2Trash==1.8.3
|
| 40 |
+
semantic-version==2.10.0
|
| 41 |
+
safetensors==0.6.2
|
| 42 |
+
rpds-py==0.27.1
|
| 43 |
+
rfc3986-validator==0.1.1
|
| 44 |
+
regex==2025.9.18
|
| 45 |
+
pyzmq==27.1.0
|
| 46 |
+
PyYAML==6.0.3
|
| 47 |
+
python-multipart==0.0.22
|
| 48 |
+
python-json-logger==4.0.0
|
| 49 |
+
python-dotenv==1.2.1
|
| 50 |
+
pyparsing==3.2.5
|
| 51 |
+
PyJWT==2.8.0
|
| 52 |
+
Pygments==2.19.2
|
| 53 |
+
pycparser==2.23
|
| 54 |
+
pyarrow==22.0.0
|
| 55 |
+
psutil==7.1.0
|
| 56 |
+
protobuf==6.33.4
|
| 57 |
+
propcache==0.4.1
|
| 58 |
+
prometheus_client==0.23.1
|
| 59 |
+
portalocker==3.2.0
|
| 60 |
+
platformdirs==4.5.0
|
| 61 |
+
pillow==11.3.0
|
| 62 |
+
pexpect==4.9.0
|
| 63 |
+
pathspec==1.0.4
|
| 64 |
+
parso==0.8.5
|
| 65 |
+
pandocfilters==1.5.1
|
| 66 |
+
packaging==25.0
|
| 67 |
+
orjson==3.11.6
|
| 68 |
+
opt_einsum==3.4.0
|
| 69 |
+
nvidia-nvtx-cu12==12.4.127
|
| 70 |
+
nvidia-nvjitlink-cu12==12.4.127
|
| 71 |
+
nvidia-nccl-cu12==2.21.5
|
| 72 |
+
nvidia-curand-cu12==10.3.5.147
|
| 73 |
+
nvidia-cufile-cu12==1.13.1.3
|
| 74 |
+
nvidia-cufft-cu12==11.2.1.3
|
| 75 |
+
nvidia-cuda-runtime-cu12==12.4.127
|
| 76 |
+
nvidia-cuda-nvrtc-cu12==12.4.127
|
| 77 |
+
nvidia-cuda-cupti-cu12==12.4.127
|
| 78 |
+
nvidia-cublas-cu12==12.4.5.8
|
| 79 |
+
numpy==2.3.3
|
| 80 |
+
ninja==1.13.0
|
| 81 |
+
networkx==3.5
|
| 82 |
+
nest-asyncio==1.6.0
|
| 83 |
+
narwhals==2.15.0
|
| 84 |
+
mypy_extensions==1.1.0
|
| 85 |
+
multidict==6.7.0
|
| 86 |
+
mistune==3.1.4
|
| 87 |
+
mdurl==0.1.2
|
| 88 |
+
MarkupSafe==3.0.3
|
| 89 |
+
lxml==6.0.2
|
| 90 |
+
librt==0.8.0
|
| 91 |
+
lark==1.3.0
|
| 92 |
+
kiwisolver==1.4.9
|
| 93 |
+
jupyterlab_widgets==3.0.15
|
| 94 |
+
jupyterlab_pygments==0.3.0
|
| 95 |
+
jsonpointer==3.0.0
|
| 96 |
+
json5==0.12.1
|
| 97 |
+
itsdangerous==2.2.0
|
| 98 |
+
idna==3.10
|
| 99 |
+
hf-xet==1.1.10
|
| 100 |
+
h11==0.16.0
|
| 101 |
+
groovy==0.1.2
|
| 102 |
+
fsspec==2025.9.0
|
| 103 |
+
frozenlist==1.8.0
|
| 104 |
+
fqdn==1.5.1
|
| 105 |
+
fonttools==4.60.1
|
| 106 |
+
filelock==3.19.1
|
| 107 |
+
ffmpy==1.0.0
|
| 108 |
+
executing==2.2.1
|
| 109 |
+
einops==0.8.1
|
| 110 |
+
dill==0.4.0
|
| 111 |
+
defusedxml==0.7.1
|
| 112 |
+
decorator==5.2.1
|
| 113 |
+
debugpy==1.8.17
|
| 114 |
+
dacite==1.9.2
|
| 115 |
+
cycler==0.12.1
|
| 116 |
+
comm==0.2.3
|
| 117 |
+
colorama==0.4.6
|
| 118 |
+
click==8.3.1
|
| 119 |
+
charset-normalizer==3.4.3
|
| 120 |
+
certifi==2025.10.5
|
| 121 |
+
bleach==6.2.0
|
| 122 |
+
babel==2.17.0
|
| 123 |
+
attrs==25.4.0
|
| 124 |
+
async-lru==2.0.5
|
| 125 |
+
asttokens==3.0.0
|
| 126 |
+
annotated-types==0.7.0
|
| 127 |
+
annotated-doc==0.0.4
|
| 128 |
+
aiohappyeyeballs==2.6.1
|
| 129 |
+
aiofiles==24.1.0
|
| 130 |
+
yarl==1.22.0
|
| 131 |
+
uvicorn==0.40.0
|
| 132 |
+
typing-inspection==0.4.2
|
| 133 |
+
terminado==0.18.1
|
| 134 |
+
stack-data==0.6.3
|
| 135 |
+
sentry-sdk==2.50.0
|
| 136 |
+
scipy==1.17.0
|
| 137 |
+
sacrebleu==2.6.0
|
| 138 |
+
rfc3987-syntax==1.1.0
|
| 139 |
+
rfc3339-validator==0.1.4
|
| 140 |
+
requests==2.32.5
|
| 141 |
+
reportlab==4.4.9
|
| 142 |
+
referencing==0.36.2
|
| 143 |
+
python-dateutil==2.9.0.post0
|
| 144 |
+
pydantic_core==2.41.5
|
| 145 |
+
prompt_toolkit==3.0.52
|
| 146 |
+
plotly==6.5.2
|
| 147 |
+
pathlib2==2.3.7.post1
|
| 148 |
+
orderedmultidict==1.0.2
|
| 149 |
+
optree==0.17.0
|
| 150 |
+
omegaconf==2.3.0
|
| 151 |
+
nvidia-cusparse-cu12==12.3.1.170
|
| 152 |
+
nvidia-cudnn-cu12==9.1.0.70
|
| 153 |
+
mypy==1.19.1
|
| 154 |
+
multiprocess==0.70.16
|
| 155 |
+
matplotlib-inline==0.1.7
|
| 156 |
+
markdown-it-py==4.0.0
|
| 157 |
+
jupyter_core==5.8.1
|
| 158 |
+
Jinja2==3.1.6
|
| 159 |
+
jedi==0.19.2
|
| 160 |
+
ipython_pygments_lexers==1.1.1
|
| 161 |
+
httpcore==1.0.9
|
| 162 |
+
gitdb==4.0.12
|
| 163 |
+
ftfy==6.3.1
|
| 164 |
+
contourpy==1.3.3
|
| 165 |
+
cffi==2.0.0
|
| 166 |
+
beautifulsoup4==4.14.2
|
| 167 |
+
anyio==4.11.0
|
| 168 |
+
aiosignal==1.4.0
|
| 169 |
+
starlette==0.50.0
|
| 170 |
+
rich==14.2.0
|
| 171 |
+
pydantic==2.12.5
|
| 172 |
+
pandas==2.3.3
|
| 173 |
+
nvidia-cusolver-cu12==11.6.1.9
|
| 174 |
+
matplotlib==3.10.7
|
| 175 |
+
jupyter_server_terminals==0.5.3
|
| 176 |
+
jupyter_client==8.6.3
|
| 177 |
+
jsonschema-specifications==2025.9.1
|
| 178 |
+
ipython==9.6.0
|
| 179 |
+
hydra-core==1.3.2
|
| 180 |
+
huggingface-hub==0.35.3
|
| 181 |
+
httpx==0.28.1
|
| 182 |
+
GitPython==3.1.46
|
| 183 |
+
furl==2.1.4
|
| 184 |
+
cryptography==46.0.4
|
| 185 |
+
arrow==1.3.0
|
| 186 |
+
argon2-cffi-bindings==25.1.0
|
| 187 |
+
aiohttp==3.13.1
|
| 188 |
+
wandb==0.24.0
|
| 189 |
+
typer==0.21.1
|
| 190 |
+
torch==2.6.0
|
| 191 |
+
tokenizers==0.22.1
|
| 192 |
+
seaborn==0.13.2
|
| 193 |
+
safehttpx==0.1.7
|
| 194 |
+
jsonschema==4.25.1
|
| 195 |
+
joypy==0.2.6
|
| 196 |
+
isoduration==20.11.0
|
| 197 |
+
ipywidgets==8.1.7
|
| 198 |
+
ipykernel==6.30.1
|
| 199 |
+
gradio_client==2.0.3
|
| 200 |
+
fastapi==0.128.0
|
| 201 |
+
Authlib==1.6.6
|
| 202 |
+
argon2-cffi==25.1.0
|
| 203 |
+
transformers==4.57.6
|
| 204 |
+
nbformat==5.10.4
|
| 205 |
+
mlstm_kernels==2.0.2
|
| 206 |
+
jupyter-console==6.6.3
|
| 207 |
+
gradio==6.5.1
|
| 208 |
+
datasets==4.3.0
|
| 209 |
+
clearml==1.16.4
|
| 210 |
+
accelerate==1.10.1
|
| 211 |
+
xlstm==2.0.4
|
| 212 |
+
nbclient==0.10.2
|
| 213 |
+
jupyter-events==0.12.0
|
| 214 |
+
trackio==0.15.0
|
| 215 |
+
nbconvert==7.16.6
|
| 216 |
+
jupyter_server==2.17.0
|
| 217 |
+
notebook_shim==0.2.4
|
| 218 |
+
jupyterlab_server==2.27.3
|
| 219 |
+
jupyter-lsp==2.3.0
|
| 220 |
+
nbclassic==1.3.3
|
| 221 |
+
jupyterlab==4.4.9
|
| 222 |
+
notebook==7.4.7
|
| 223 |
+
jupyter_contrib_core==0.4.2
|
| 224 |
+
jupyter==1.1.1
|
| 225 |
+
jupyter_nbextensions_configurator==0.6.4
|
| 226 |
+
causal-conv1d==1.5.0.post8
|
| 227 |
+
flash_attn==2.7.4.post1
|
| 228 |
+
mamba-ssm==2.2.4
|
| 229 |
+
hnet==0.0.1
|
| 230 |
+
autocommand==2.2.2
|
| 231 |
+
backports.tarfile==1.2.0
|
| 232 |
+
importlib_metadata==8.0.0
|
| 233 |
+
inflect==7.3.1
|
| 234 |
+
jaraco.collections==5.1.0
|
| 235 |
+
jaraco.context==5.3.0
|
| 236 |
+
jaraco.functools==4.0.1
|
| 237 |
+
jaraco.text==3.12.1
|
| 238 |
+
more-itertools==10.3.0
|
| 239 |
+
packaging==24.2
|
| 240 |
+
platformdirs==4.2.2
|
| 241 |
+
tomli==2.0.1
|
| 242 |
+
typeguard==4.3.0
|
| 243 |
+
typing_extensions==4.12.2
|
| 244 |
+
wheel==0.45.1
|
| 245 |
+
zipp==3.19.2
|
lr_sweep/pythia_1b_lr_2e-5/wandb/run-20260425_184822-bhvwo83l/files/wandb-metadata.json
ADDED
|
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"os": "Linux-5.4.0-176-generic-x86_64-with-glibc2.35",
|
| 3 |
+
"python": "CPython 3.12.0",
|
| 4 |
+
"startedAt": "2026-04-25T18:48:22.988796Z",
|
| 5 |
+
"args": [
|
| 6 |
+
"tracking=wandb",
|
| 7 |
+
"tracking.project=code-completion_lr-sweep",
|
| 8 |
+
"tracking.run_name=pythia_1b_lr_2e-5",
|
| 9 |
+
"training.lr=2e-5",
|
| 10 |
+
"paths.output_dir=/workspace/byte-llms-code/outputs/lr_sweep/pythia_1b_lr_2e-5",
|
| 11 |
+
"model=pythia_1b",
|
| 12 |
+
"data.path=/workspace/byte-llms-code/code_completion_exp/datasets/data_V4_full"
|
| 13 |
+
],
|
| 14 |
+
"program": "/workspace/byte-llms-code/code_completion_exp/train_pythia/train.py",
|
| 15 |
+
"codePath": "code_completion_exp/train_pythia/train.py",
|
| 16 |
+
"codePathLocal": "train.py",
|
| 17 |
+
"git": {
|
| 18 |
+
"remote": "https://github.com/naryst/byte-llms-code.git",
|
| 19 |
+
"commit": "f111e13281aa0dc58e24302edab5b0d5c2024586"
|
| 20 |
+
},
|
| 21 |
+
"email": "nikita@local.ru",
|
| 22 |
+
"root": "/workspace/byte-llms-code/outputs/lr_sweep/pythia_1b_lr_2e-5",
|
| 23 |
+
"host": "7504e518d24a",
|
| 24 |
+
"executable": "/venv/bytellm/bin/python",
|
| 25 |
+
"cpu_count": 64,
|
| 26 |
+
"cpu_count_logical": 128,
|
| 27 |
+
"gpu": "NVIDIA H100 80GB HBM3",
|
| 28 |
+
"gpu_count": 4,
|
| 29 |
+
"disk": {
|
| 30 |
+
"/": {
|
| 31 |
+
"total": "265214230528",
|
| 32 |
+
"used": "96701419520"
|
| 33 |
+
}
|
| 34 |
+
},
|
| 35 |
+
"memory": {
|
| 36 |
+
"total": "1081679683584"
|
| 37 |
+
},
|
| 38 |
+
"gpu_nvidia": [
|
| 39 |
+
{
|
| 40 |
+
"name": "NVIDIA H100 80GB HBM3",
|
| 41 |
+
"memoryTotal": "85520809984",
|
| 42 |
+
"cudaCores": 16896,
|
| 43 |
+
"architecture": "Hopper",
|
| 44 |
+
"uuid": "GPU-b60cdcab-2033-2009-41de-be646c953a20"
|
| 45 |
+
},
|
| 46 |
+
{
|
| 47 |
+
"name": "NVIDIA H100 80GB HBM3",
|
| 48 |
+
"memoryTotal": "85520809984",
|
| 49 |
+
"cudaCores": 16896,
|
| 50 |
+
"architecture": "Hopper",
|
| 51 |
+
"uuid": "GPU-9982b420-4520-4238-c378-ec5a46015474"
|
| 52 |
+
},
|
| 53 |
+
{
|
| 54 |
+
"name": "NVIDIA H100 80GB HBM3",
|
| 55 |
+
"memoryTotal": "85520809984",
|
| 56 |
+
"cudaCores": 16896,
|
| 57 |
+
"architecture": "Hopper",
|
| 58 |
+
"uuid": "GPU-e26ebaac-aaa6-3eed-17ab-a3dce303a76f"
|
| 59 |
+
},
|
| 60 |
+
{
|
| 61 |
+
"name": "NVIDIA H100 80GB HBM3",
|
| 62 |
+
"memoryTotal": "85520809984",
|
| 63 |
+
"cudaCores": 16896,
|
| 64 |
+
"architecture": "Hopper",
|
| 65 |
+
"uuid": "GPU-9dfc6dba-0be6-4a10-1027-336cc0e65134"
|
| 66 |
+
}
|
| 67 |
+
],
|
| 68 |
+
"cudaVersion": "12.2",
|
| 69 |
+
"writerId": "p4yngwo0m9zjwjsftw16p6b9r2nywedy"
|
| 70 |
+
}
|
lr_sweep/pythia_1b_lr_2e-5/wandb/run-20260425_184822-bhvwo83l/files/wandb-summary.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"train/step_time":0.25149242877960204,"_timestamp":1.7771454307587473e+09,"val/perplexity":2.798185577734647,"best/val_perplexity":2.798185577734647,"best/step":8000,"_step":9880,"epoch/loss":1.030084548864682,"_wandb":{"runtime":2529},"train/loss_avg":1.030100240602666,"best/val_loss":1.0069655648299627,"train/epoch":1,"val/loss":1.0069655648299627,"_runtime":2529,"val/time":3.7501397132873535,"train/loss":0.7768054381012917,"epoch/time":2521.9737315177917,"train/lr":2.0000000000000003e-06}
|
lr_sweep/pythia_1b_lr_2e-5/wandb/run-20260425_184822-bhvwo83l/logs/debug-core.log
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{"time":"2026-04-25T18:48:23.067514123Z","level":"INFO","msg":"main: starting server","port-filename":"/tmp/tmptoctbjkj/port-87140.txt","pid":87140,"log-level":0,"disable-analytics":false,"shutdown-on-parent-exit":false,"enable-dcgm-profiling":false}
|
| 2 |
+
{"time":"2026-04-25T18:48:23.068489647Z","level":"INFO","msg":"server: will exit if parent process dies","ppid":87140}
|
| 3 |
+
{"time":"2026-04-25T18:48:23.068478645Z","level":"INFO","msg":"server: accepting connections","addr":{"Name":"/tmp/wandb-87140-87202-3407868505/socket","Net":"unix"}}
|
| 4 |
+
{"time":"2026-04-25T18:48:23.255789735Z","level":"INFO","msg":"connection: ManageConnectionData: new connection created","id":"1(@)"}
|
| 5 |
+
{"time":"2026-04-25T18:48:23.273102705Z","level":"INFO","msg":"handleInformInit: received","streamId":"bhvwo83l","id":"1(@)"}
|
| 6 |
+
{"time":"2026-04-25T18:48:23.732368965Z","level":"INFO","msg":"handleInformInit: stream started","streamId":"bhvwo83l","id":"1(@)"}
|
| 7 |
+
{"time":"2026-04-25T19:30:34.290124991Z","level":"INFO","msg":"handleInformFinish: finish message received","streamId":"bhvwo83l","id":"1(@)"}
|
| 8 |
+
{"time":"2026-04-25T19:30:34.290643917Z","level":"INFO","msg":"handleInformFinish: stream closed","streamId":"bhvwo83l","id":"1(@)"}
|
| 9 |
+
{"time":"2026-04-25T19:30:34.305560517Z","level":"INFO","msg":"handleInformTeardown: server teardown initiated","id":"1(@)"}
|
| 10 |
+
{"time":"2026-04-25T19:30:34.305589646Z","level":"INFO","msg":"handleInformTeardown: server shutdown complete","id":"1(@)"}
|
| 11 |
+
{"time":"2026-04-25T19:30:34.305594668Z","level":"INFO","msg":"server is shutting down"}
|
| 12 |
+
{"time":"2026-04-25T19:30:34.305601132Z","level":"INFO","msg":"connection: closing","id":"1(@)"}
|
| 13 |
+
{"time":"2026-04-25T19:30:34.305652804Z","level":"INFO","msg":"connection: closed successfully","id":"1(@)"}
|
| 14 |
+
{"time":"2026-04-25T19:30:34.305677559Z","level":"INFO","msg":"connection: ManageConnectionData: connection closed","id":"1(@)"}
|
| 15 |
+
{"time":"2026-04-25T19:30:34.30569399Z","level":"INFO","msg":"server: listener closed","addr":{"Name":"/tmp/wandb-87140-87202-3407868505/socket","Net":"unix"}}
|
| 16 |
+
{"time":"2026-04-25T19:30:34.305717752Z","level":"INFO","msg":"server is closed"}
|
lr_sweep/pythia_1b_lr_2e-5/wandb/run-20260425_184822-bhvwo83l/logs/debug-internal.log
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{"time":"2026-04-25T18:48:23.273198118Z","level":"INFO","msg":"stream: starting","core version":"0.24.0"}
|
| 2 |
+
{"time":"2026-04-25T18:48:23.732205852Z","level":"INFO","msg":"stream: created new stream","id":"bhvwo83l"}
|
| 3 |
+
{"time":"2026-04-25T18:48:23.732250159Z","level":"INFO","msg":"handler: started","stream_id":"bhvwo83l"}
|
| 4 |
+
{"time":"2026-04-25T18:48:23.732362504Z","level":"INFO","msg":"stream: started","id":"bhvwo83l"}
|
| 5 |
+
{"time":"2026-04-25T18:48:23.732371188Z","level":"INFO","msg":"writer: started","stream_id":"bhvwo83l"}
|
| 6 |
+
{"time":"2026-04-25T18:48:23.7323745Z","level":"INFO","msg":"sender: started","stream_id":"bhvwo83l"}
|
| 7 |
+
{"time":"2026-04-25T18:48:23.86036147Z","level":"ERROR","msg":"git repo not found","error":"repository does not exist"}
|
| 8 |
+
{"time":"2026-04-25T19:30:34.127544149Z","level":"INFO","msg":"fileTransfer: Close: file transfer manager closed"}
|
| 9 |
+
{"time":"2026-04-25T19:30:34.287461914Z","level":"INFO","msg":"handler: operation stats","stats":{}}
|
| 10 |
+
{"time":"2026-04-25T19:30:34.290164857Z","level":"INFO","msg":"stream: closing","id":"bhvwo83l"}
|
| 11 |
+
{"time":"2026-04-25T19:30:34.29017794Z","level":"INFO","msg":"handler: closed","stream_id":"bhvwo83l"}
|
| 12 |
+
{"time":"2026-04-25T19:30:34.290285178Z","level":"INFO","msg":"sender: closed","stream_id":"bhvwo83l"}
|
| 13 |
+
{"time":"2026-04-25T19:30:34.290289857Z","level":"INFO","msg":"stream: closed","id":"bhvwo83l"}
|
lr_sweep/pythia_1b_lr_2e-5/wandb/run-20260425_184822-bhvwo83l/logs/debug.log
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
2026-04-25 18:48:22,990 INFO MainThread:87140 [wandb_setup.py:_flush():81] Current SDK version is 0.24.0
|
| 2 |
+
2026-04-25 18:48:22,990 INFO MainThread:87140 [wandb_setup.py:_flush():81] Configure stats pid to 87140
|
| 3 |
+
2026-04-25 18:48:22,990 INFO MainThread:87140 [wandb_setup.py:_flush():81] Loading settings from environment variables
|
| 4 |
+
2026-04-25 18:48:22,990 INFO MainThread:87140 [wandb_init.py:setup_run_log_directory():717] Logging user logs to /workspace/byte-llms-code/outputs/lr_sweep/pythia_1b_lr_2e-5/wandb/run-20260425_184822-bhvwo83l/logs/debug.log
|
| 5 |
+
2026-04-25 18:48:22,990 INFO MainThread:87140 [wandb_init.py:setup_run_log_directory():718] Logging internal logs to /workspace/byte-llms-code/outputs/lr_sweep/pythia_1b_lr_2e-5/wandb/run-20260425_184822-bhvwo83l/logs/debug-internal.log
|
| 6 |
+
2026-04-25 18:48:22,990 INFO MainThread:87140 [wandb_init.py:init():844] calling init triggers
|
| 7 |
+
2026-04-25 18:48:22,990 INFO MainThread:87140 [wandb_init.py:init():849] wandb.init called with sweep_config: {}
|
| 8 |
+
config: {'model': {'name': 'EleutherAI/pythia-1b', 'checkpoint_path': None, 'from_scratch': False}, 'training': {'epochs': 1, 'batch_size': 4, 'eval_batch_size': 12, 'gradient_accumulation_steps': 4, 'lr': 2e-05, 'weight_decay': 0.1, 'betas': [0.9, 0.95], 'eps': 1e-08, 'lr_scheduler': 'wsd', 'warmup_ratio': 0.1, 'decay_ratio': 0.2, 'warmup_steps': 100, 'min_lr_ratio': 0.1, 'max_grad_norm': 1.0, 'use_amp': True, 'resume': False, 'resume_checkpoint': None}, 'data': {'path': '/workspace/byte-llms-code/code_completion_exp/datasets/data_V4_full', 'max_context_len': 4096, 'max_target_len': 256, 'num_workers': 4, 'pin_memory': True, 'max_train_samples': None, 'max_val_samples': 2000}, 'logging': {'log_interval': 10, 'save_interval': 0, 'eval_interval': 2000, 'save_every_epoch': False}, 'tracking': {'enabled': True, 'backend': 'wandb', 'project': 'code-completion_lr-sweep', 'run_name': 'pythia_1b_lr_2e-5', 'entity': None, 'base_url': 'https://wandb.platun0v.ru', 'local_dir': '/workspace/byte-llms-code/outputs/lr_sweep/pythia_1b_lr_2e-5'}, 'paths': {'output_dir': '/workspace/byte-llms-code/outputs/lr_sweep/pythia_1b_lr_2e-5'}, 'seed': 42, 'device': 'cuda', '_wandb': {'code_path': 'code/code_completion_exp/train_pythia/train.py'}}
|
| 9 |
+
2026-04-25 18:48:22,990 INFO MainThread:87140 [wandb_init.py:init():892] starting backend
|
| 10 |
+
2026-04-25 18:48:23,256 INFO MainThread:87140 [wandb_init.py:init():895] sending inform_init request
|
| 11 |
+
2026-04-25 18:48:23,271 INFO MainThread:87140 [wandb_init.py:init():903] backend started and connected
|
| 12 |
+
2026-04-25 18:48:23,275 INFO MainThread:87140 [wandb_init.py:init():973] updated telemetry
|
| 13 |
+
2026-04-25 18:48:23,290 INFO MainThread:87140 [wandb_init.py:init():997] communicating run to backend with 90.0 second timeout
|
| 14 |
+
2026-04-25 18:48:23,859 INFO MainThread:87140 [wandb_init.py:init():1044] starting run threads in backend
|
| 15 |
+
2026-04-25 18:48:24,017 INFO MainThread:87140 [wandb_run.py:_console_start():2529] atexit reg
|
| 16 |
+
2026-04-25 18:48:24,018 INFO MainThread:87140 [wandb_run.py:_redirect():2377] redirect: wrap_raw
|
| 17 |
+
2026-04-25 18:48:24,018 INFO MainThread:87140 [wandb_run.py:_redirect():2446] Wrapping output streams.
|
| 18 |
+
2026-04-25 18:48:24,018 INFO MainThread:87140 [wandb_run.py:_redirect():2469] Redirects installed.
|
| 19 |
+
2026-04-25 18:48:24,020 INFO MainThread:87140 [wandb_init.py:init():1084] run started, returning control to user process
|
| 20 |
+
2026-04-25 19:30:33,277 INFO MainThread:87140 [wandb_run.py:_finish():2295] finishing run nikita/code-completion_lr-sweep/bhvwo83l
|
| 21 |
+
2026-04-25 19:30:33,278 INFO MainThread:87140 [wandb_run.py:_atexit_cleanup():2494] got exitcode: 0
|
| 22 |
+
2026-04-25 19:30:33,278 INFO MainThread:87140 [wandb_run.py:_restore():2476] restore
|
| 23 |
+
2026-04-25 19:30:33,278 INFO MainThread:87140 [wandb_run.py:_restore():2482] restore done
|
| 24 |
+
2026-04-25 19:30:34,289 INFO MainThread:87140 [wandb_run.py:_footer_sync_info():3870] logging synced files
|
lr_sweep/pythia_1b_lr_5e-5/.hydra/config.yaml
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
model:
|
| 2 |
+
name: EleutherAI/pythia-1b
|
| 3 |
+
checkpoint_path: null
|
| 4 |
+
from_scratch: false
|
| 5 |
+
training:
|
| 6 |
+
epochs: 1
|
| 7 |
+
batch_size: 4
|
| 8 |
+
eval_batch_size: 12
|
| 9 |
+
gradient_accumulation_steps: 4
|
| 10 |
+
lr: 5.0e-05
|
| 11 |
+
weight_decay: 0.1
|
| 12 |
+
betas:
|
| 13 |
+
- 0.9
|
| 14 |
+
- 0.95
|
| 15 |
+
eps: 1.0e-08
|
| 16 |
+
lr_scheduler: wsd
|
| 17 |
+
warmup_ratio: 0.1
|
| 18 |
+
decay_ratio: 0.2
|
| 19 |
+
warmup_steps: 100
|
| 20 |
+
min_lr_ratio: 0.1
|
| 21 |
+
max_grad_norm: 1.0
|
| 22 |
+
use_amp: true
|
| 23 |
+
resume: false
|
| 24 |
+
resume_checkpoint: null
|
| 25 |
+
data:
|
| 26 |
+
path: /workspace/byte-llms-code/code_completion_exp/datasets/data_V4_full
|
| 27 |
+
max_context_len: 4096
|
| 28 |
+
max_target_len: 256
|
| 29 |
+
num_workers: 4
|
| 30 |
+
pin_memory: true
|
| 31 |
+
max_train_samples: null
|
| 32 |
+
max_val_samples: 2000
|
| 33 |
+
logging:
|
| 34 |
+
log_interval: 10
|
| 35 |
+
save_interval: 0
|
| 36 |
+
eval_interval: 2000
|
| 37 |
+
save_every_epoch: false
|
| 38 |
+
tracking:
|
| 39 |
+
enabled: true
|
| 40 |
+
backend: wandb
|
| 41 |
+
project: code-completion_lr-sweep
|
| 42 |
+
run_name: pythia_1b_lr_5e-5
|
| 43 |
+
entity: null
|
| 44 |
+
base_url: https://wandb.platun0v.ru
|
| 45 |
+
local_dir: ${paths.output_dir}
|
| 46 |
+
paths:
|
| 47 |
+
output_dir: /workspace/byte-llms-code/outputs/lr_sweep/pythia_1b_lr_5e-5
|
| 48 |
+
seed: 42
|
| 49 |
+
device: cuda
|
lr_sweep/pythia_1b_lr_5e-5/.hydra/hydra.yaml
ADDED
|
@@ -0,0 +1,167 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
hydra:
|
| 2 |
+
run:
|
| 3 |
+
dir: ${paths.output_dir}
|
| 4 |
+
sweep:
|
| 5 |
+
dir: outputs/multirun/${now:%Y-%m-%d}/${now:%H-%M-%S}
|
| 6 |
+
subdir: ${hydra.job.num}
|
| 7 |
+
launcher:
|
| 8 |
+
_target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
|
| 9 |
+
sweeper:
|
| 10 |
+
_target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
|
| 11 |
+
max_batch_size: null
|
| 12 |
+
params: null
|
| 13 |
+
help:
|
| 14 |
+
app_name: ${hydra.job.name}
|
| 15 |
+
header: '${hydra.help.app_name} is powered by Hydra.
|
| 16 |
+
|
| 17 |
+
'
|
| 18 |
+
footer: 'Powered by Hydra (https://hydra.cc)
|
| 19 |
+
|
| 20 |
+
Use --hydra-help to view Hydra specific help
|
| 21 |
+
|
| 22 |
+
'
|
| 23 |
+
template: '${hydra.help.header}
|
| 24 |
+
|
| 25 |
+
== Configuration groups ==
|
| 26 |
+
|
| 27 |
+
Compose your configuration from those groups (group=option)
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
$APP_CONFIG_GROUPS
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
== Config ==
|
| 34 |
+
|
| 35 |
+
Override anything in the config (foo.bar=value)
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
$CONFIG
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
${hydra.help.footer}
|
| 42 |
+
|
| 43 |
+
'
|
| 44 |
+
hydra_help:
|
| 45 |
+
template: 'Hydra (${hydra.runtime.version})
|
| 46 |
+
|
| 47 |
+
See https://hydra.cc for more info.
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
== Flags ==
|
| 51 |
+
|
| 52 |
+
$FLAGS_HELP
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
== Configuration groups ==
|
| 56 |
+
|
| 57 |
+
Compose your configuration from those groups (For example, append hydra/job_logging=disabled
|
| 58 |
+
to command line)
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
$HYDRA_CONFIG_GROUPS
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
Use ''--cfg hydra'' to Show the Hydra config.
|
| 65 |
+
|
| 66 |
+
'
|
| 67 |
+
hydra_help: ???
|
| 68 |
+
hydra_logging:
|
| 69 |
+
version: 1
|
| 70 |
+
formatters:
|
| 71 |
+
simple:
|
| 72 |
+
format: '[%(asctime)s][HYDRA] %(message)s'
|
| 73 |
+
handlers:
|
| 74 |
+
console:
|
| 75 |
+
class: logging.StreamHandler
|
| 76 |
+
formatter: simple
|
| 77 |
+
stream: ext://sys.stdout
|
| 78 |
+
root:
|
| 79 |
+
level: INFO
|
| 80 |
+
handlers:
|
| 81 |
+
- console
|
| 82 |
+
loggers:
|
| 83 |
+
logging_example:
|
| 84 |
+
level: DEBUG
|
| 85 |
+
disable_existing_loggers: false
|
| 86 |
+
job_logging:
|
| 87 |
+
version: 1
|
| 88 |
+
formatters:
|
| 89 |
+
simple:
|
| 90 |
+
format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
|
| 91 |
+
handlers:
|
| 92 |
+
console:
|
| 93 |
+
class: logging.StreamHandler
|
| 94 |
+
formatter: simple
|
| 95 |
+
stream: ext://sys.stdout
|
| 96 |
+
file:
|
| 97 |
+
class: logging.FileHandler
|
| 98 |
+
formatter: simple
|
| 99 |
+
filename: ${hydra.runtime.output_dir}/${hydra.job.name}.log
|
| 100 |
+
root:
|
| 101 |
+
level: INFO
|
| 102 |
+
handlers:
|
| 103 |
+
- console
|
| 104 |
+
- file
|
| 105 |
+
disable_existing_loggers: false
|
| 106 |
+
env: {}
|
| 107 |
+
mode: RUN
|
| 108 |
+
searchpath: []
|
| 109 |
+
callbacks: {}
|
| 110 |
+
output_subdir: .hydra
|
| 111 |
+
overrides:
|
| 112 |
+
hydra:
|
| 113 |
+
- hydra.mode=RUN
|
| 114 |
+
task:
|
| 115 |
+
- tracking=wandb
|
| 116 |
+
- tracking.project=code-completion_lr-sweep
|
| 117 |
+
- tracking.run_name=pythia_1b_lr_5e-5
|
| 118 |
+
- training.lr=5e-5
|
| 119 |
+
- paths.output_dir=/workspace/byte-llms-code/outputs/lr_sweep/pythia_1b_lr_5e-5
|
| 120 |
+
- model=pythia_1b
|
| 121 |
+
- data.path=/workspace/byte-llms-code/code_completion_exp/datasets/data_V4_full
|
| 122 |
+
job:
|
| 123 |
+
name: train
|
| 124 |
+
chdir: false
|
| 125 |
+
override_dirname: data.path=/workspace/byte-llms-code/code_completion_exp/datasets/data_V4_full,model=pythia_1b,paths.output_dir=/workspace/byte-llms-code/outputs/lr_sweep/pythia_1b_lr_5e-5,tracking.project=code-completion_lr-sweep,tracking.run_name=pythia_1b_lr_5e-5,tracking=wandb,training.lr=5e-5
|
| 126 |
+
id: ???
|
| 127 |
+
num: ???
|
| 128 |
+
config_name: config
|
| 129 |
+
env_set: {}
|
| 130 |
+
env_copy: []
|
| 131 |
+
config:
|
| 132 |
+
override_dirname:
|
| 133 |
+
kv_sep: '='
|
| 134 |
+
item_sep: ','
|
| 135 |
+
exclude_keys: []
|
| 136 |
+
runtime:
|
| 137 |
+
version: 1.3.2
|
| 138 |
+
version_base: '1.3'
|
| 139 |
+
cwd: /workspace/byte-llms-code/code_completion_exp/train_pythia
|
| 140 |
+
config_sources:
|
| 141 |
+
- path: hydra.conf
|
| 142 |
+
schema: pkg
|
| 143 |
+
provider: hydra
|
| 144 |
+
- path: /workspace/byte-llms-code/code_completion_exp/train_pythia/configs
|
| 145 |
+
schema: file
|
| 146 |
+
provider: main
|
| 147 |
+
- path: ''
|
| 148 |
+
schema: structured
|
| 149 |
+
provider: schema
|
| 150 |
+
output_dir: /workspace/byte-llms-code/outputs/lr_sweep/pythia_1b_lr_5e-5
|
| 151 |
+
choices:
|
| 152 |
+
paths: default
|
| 153 |
+
tracking: wandb
|
| 154 |
+
logging: default
|
| 155 |
+
data: default
|
| 156 |
+
training: default
|
| 157 |
+
model: pythia_1b
|
| 158 |
+
hydra/env: default
|
| 159 |
+
hydra/callbacks: null
|
| 160 |
+
hydra/job_logging: default
|
| 161 |
+
hydra/hydra_logging: default
|
| 162 |
+
hydra/hydra_help: default
|
| 163 |
+
hydra/help: default
|
| 164 |
+
hydra/sweeper: basic
|
| 165 |
+
hydra/launcher: basic
|
| 166 |
+
hydra/output: default
|
| 167 |
+
verbose: false
|
lr_sweep/pythia_1b_lr_5e-5/model_final.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3eb6ba8eddf018bea4e77013ab036276e6bc1035c5a700e9f909e4d1a396f060
|
| 3 |
+
size 2023640586
|